{"repo_name": "lsfg-vk", "file_name": "/lsfg-vk/framegen/src/core/commandpool.cpp", "inference_info": {"prefix_code": "#include \n#include \n\n#include \"core/commandpool.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n\nusing namespace LSFG::Core;\n\n", "suffix_code": "\n", "middle_code": "CommandPool::CommandPool(const Core::Device& device) {\n const VkCommandPoolCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,\n .queueFamilyIndex = device.getComputeFamilyIdx()\n };\n VkCommandPool commandPoolHandle{};\n auto res = vkCreateCommandPool(device.handle(), &desc, nullptr, &commandPoolHandle);\n if (res != VK_SUCCESS || commandPoolHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create command pool\");\n this->commandPool = std::shared_ptr(\n new VkCommandPool(commandPoolHandle),\n [dev = device.handle()](VkCommandPool* commandPoolHandle) {\n vkDestroyCommandPool(dev, *commandPoolHandle, nullptr);\n }\n );\n}", "code_description": null, "fill_type": "FUNCTION_TYPE", "language_type": "cpp", "sub_task_type": null}, "context_code": [["/lsfg-vk/src/mini/commandpool.cpp", "#include \"mini/commandpool.hpp\"\n#include \"common/exception.hpp\"\n#include \"layer.hpp\"\n\n#include \n\n#include \n#include \n\nusing namespace Mini;\n\nCommandPool::CommandPool(VkDevice device, uint32_t graphicsFamilyIdx) {\n // create command pool\n const VkCommandPoolCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,\n .queueFamilyIndex = graphicsFamilyIdx\n };\n VkCommandPool commandPoolHandle{};\n auto res = Layer::ovkCreateCommandPool(device, &desc, nullptr, &commandPoolHandle);\n if (res != VK_SUCCESS || commandPoolHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create command pool\");\n\n // store command pool in shared ptr\n this->commandPool = std::shared_ptr(\n new VkCommandPool(commandPoolHandle),\n [dev = device](VkCommandPool* commandPoolHandle) {\n Layer::ovkDestroyCommandPool(dev, *commandPoolHandle, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/src/core/commandbuffer.cpp", "#include \n#include \n\n#include \"core/commandbuffer.hpp\"\n#include \"core/device.hpp\"\n#include \"core/commandpool.hpp\"\n#include \"core/fence.hpp\"\n#include \"core/semaphore.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nCommandBuffer::CommandBuffer(const Core::Device& device, const CommandPool& pool) {\n // create command buffer\n const VkCommandBufferAllocateInfo desc{\n .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,\n .commandPool = pool.handle(),\n .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,\n .commandBufferCount = 1\n };\n VkCommandBuffer commandBufferHandle{};\n auto res = vkAllocateCommandBuffers(device.handle(), &desc, &commandBufferHandle);\n if (res != VK_SUCCESS || commandBufferHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to allocate command buffer\");\n\n // store command buffer in shared ptr\n this->state = std::make_shared(CommandBufferState::Empty);\n this->commandBuffer = std::shared_ptr(\n new VkCommandBuffer(commandBufferHandle),\n [dev = device.handle(), pool = pool.handle()](VkCommandBuffer* cmdBuffer) {\n vkFreeCommandBuffers(dev, pool, 1, cmdBuffer);\n }\n );\n}\n\nvoid CommandBuffer::begin() {\n if (*this->state != CommandBufferState::Empty)\n throw std::logic_error(\"Command buffer is not in Empty state\");\n\n const VkCommandBufferBeginInfo beginInfo = {\n .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,\n .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT\n };\n auto res = vkBeginCommandBuffer(*this->commandBuffer, &beginInfo);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to begin command buffer\");\n\n *this->state = CommandBufferState::Recording;\n}\n\nvoid CommandBuffer::dispatch(uint32_t x, uint32_t y, uint32_t z) const {\n if (*this->state != CommandBufferState::Recording)\n throw std::logic_error(\"Command buffer is not in Recording state\");\n\n vkCmdDispatch(*this->commandBuffer, x, y, z);\n}\n\nvoid CommandBuffer::end() {\n if (*this->state != CommandBufferState::Recording)\n throw std::logic_error(\"Command buffer is not in Recording state\");\n\n auto res = vkEndCommandBuffer(*this->commandBuffer);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to end command buffer\");\n\n *this->state = CommandBufferState::Full;\n}\n\nvoid CommandBuffer::submit(VkQueue queue, std::optional fence,\n const std::vector& waitSemaphores,\n std::optional> waitSemaphoreValues,\n const std::vector& signalSemaphores,\n std::optional> signalSemaphoreValues) {\n if (*this->state != CommandBufferState::Full)\n throw std::logic_error(\"Command buffer is not in Full state\");\n\n const std::vector waitStages(waitSemaphores.size(),\n VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);\n VkTimelineSemaphoreSubmitInfo timelineInfo{\n .sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO,\n };\n if (waitSemaphoreValues.has_value()) {\n timelineInfo.waitSemaphoreValueCount =\n static_cast(waitSemaphoreValues->size());\n timelineInfo.pWaitSemaphoreValues = waitSemaphoreValues->data();\n }\n if (signalSemaphoreValues.has_value()) {\n timelineInfo.signalSemaphoreValueCount =\n static_cast(signalSemaphoreValues->size());\n timelineInfo.pSignalSemaphoreValues = signalSemaphoreValues->data();\n }\n\n std::vector waitSemaphoresHandles;\n waitSemaphoresHandles.reserve(waitSemaphores.size());\n for (const auto& semaphore : waitSemaphores)\n waitSemaphoresHandles.push_back(semaphore.handle());\n std::vector signalSemaphoresHandles;\n signalSemaphoresHandles.reserve(signalSemaphores.size());\n for (const auto& semaphore : signalSemaphores)\n signalSemaphoresHandles.push_back(semaphore.handle());\n\n const VkSubmitInfo submitInfo{\n .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,\n .pNext = (waitSemaphoreValues.has_value() || signalSemaphoreValues.has_value())\n ? &timelineInfo : nullptr,\n .waitSemaphoreCount = static_cast(waitSemaphores.size()),\n .pWaitSemaphores = waitSemaphoresHandles.data(),\n .pWaitDstStageMask = waitStages.data(),\n .commandBufferCount = 1,\n .pCommandBuffers = &(*this->commandBuffer),\n .signalSemaphoreCount = static_cast(signalSemaphores.size()),\n .pSignalSemaphores = signalSemaphoresHandles.data()\n };\n auto res = vkQueueSubmit(queue, 1, &submitInfo, fence ? fence->handle() : VK_NULL_HANDLE);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to submit command buffer\");\n\n *this->state = CommandBufferState::Submitted;\n}\n"], ["/lsfg-vk/src/mini/commandbuffer.cpp", "#include \"mini/commandbuffer.hpp\"\n#include \"mini/commandpool.hpp\"\n#include \"common/exception.hpp\"\n#include \"layer.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n\nusing namespace Mini;\n\nCommandBuffer::CommandBuffer(VkDevice device, const CommandPool& pool) {\n // create command buffer\n const VkCommandBufferAllocateInfo desc{\n .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,\n .commandPool = pool.handle(),\n .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,\n .commandBufferCount = 1\n };\n VkCommandBuffer commandBufferHandle{};\n auto res = Layer::ovkAllocateCommandBuffers(device, &desc, &commandBufferHandle);\n if (res != VK_SUCCESS || commandBufferHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to allocate command buffer\");\n res = Layer::ovkSetDeviceLoaderData(device, commandBufferHandle);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to set device loader data for command buffer\");\n\n // store command buffer in shared ptr\n this->state = std::make_shared(CommandBufferState::Empty);\n this->commandBuffer = std::shared_ptr(\n new VkCommandBuffer(commandBufferHandle),\n [dev = device, pool = pool.handle()](VkCommandBuffer* cmdBuffer) {\n Layer::ovkFreeCommandBuffers(dev, pool, 1, cmdBuffer);\n }\n );\n}\n\nvoid CommandBuffer::begin() {\n if (*this->state != CommandBufferState::Empty)\n throw std::logic_error(\"Command buffer is not in Empty state\");\n\n const VkCommandBufferBeginInfo beginInfo = {\n .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,\n .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT\n };\n auto res = Layer::ovkBeginCommandBuffer(*this->commandBuffer, &beginInfo);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to begin command buffer\");\n\n *this->state = CommandBufferState::Recording;\n}\n\nvoid CommandBuffer::end() {\n if (*this->state != CommandBufferState::Recording)\n throw std::logic_error(\"Command buffer is not in Recording state\");\n\n auto res = Layer::ovkEndCommandBuffer(*this->commandBuffer);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to end command buffer\");\n\n *this->state = CommandBufferState::Full;\n}\n\nvoid CommandBuffer::submit(VkQueue queue,\n const std::vector& waitSemaphores,\n const std::vector& signalSemaphores) {\n if (*this->state != CommandBufferState::Full)\n throw std::logic_error(\"Command buffer is not in Full state\");\n\n const std::vector waitStages(waitSemaphores.size(),\n VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);\n\n const VkSubmitInfo submitInfo{\n .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,\n .waitSemaphoreCount = static_cast(waitSemaphores.size()),\n .pWaitSemaphores = waitSemaphores.data(),\n .pWaitDstStageMask = waitStages.data(),\n .commandBufferCount = 1,\n .pCommandBuffers = &(*this->commandBuffer),\n .signalSemaphoreCount = static_cast(signalSemaphores.size()),\n .pSignalSemaphores = signalSemaphores.data()\n };\n auto res = Layer::ovkQueueSubmit(queue, 1, &submitInfo, VK_NULL_HANDLE);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to submit command buffer\");\n\n *this->state = CommandBufferState::Submitted;\n}\n"], ["/lsfg-vk/framegen/src/core/descriptorpool.cpp", "#include \n#include \n\n#include \"core/descriptorpool.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nDescriptorPool::DescriptorPool(const Core::Device& device) {\n // create descriptor pool\n const std::array pools{{ // arbitrary limits\n { .type = VK_DESCRIPTOR_TYPE_SAMPLER, .descriptorCount = 4096 },\n { .type = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, .descriptorCount = 4096 },\n { .type = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, .descriptorCount = 4096 },\n { .type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, .descriptorCount = 4096 }\n }};\n const VkDescriptorPoolCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,\n .flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,\n .maxSets = 16384,\n .poolSizeCount = static_cast(pools.size()),\n .pPoolSizes = pools.data()\n };\n VkDescriptorPool poolHandle{};\n auto res = vkCreateDescriptorPool(device.handle(), &desc, nullptr, &poolHandle);\n if (res != VK_SUCCESS || poolHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create descriptor pool\");\n\n // store pool in shared ptr\n this->descriptorPool = std::shared_ptr(\n new VkDescriptorPool(poolHandle),\n [dev = device.handle()](VkDescriptorPool* poolHandle) {\n vkDestroyDescriptorPool(dev, *poolHandle, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/src/core/descriptorset.cpp", "#include \n#include \n\n#include \"core/descriptorset.hpp\"\n#include \"core/device.hpp\"\n#include \"core/descriptorpool.hpp\"\n#include \"core/shadermodule.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/pipeline.hpp\"\n#include \"core/image.hpp\"\n#include \"core/sampler.hpp\"\n#include \"core/buffer.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n\nusing namespace LSFG::Core;\n\nDescriptorSet::DescriptorSet(const Core::Device& device,\n const DescriptorPool& pool, const ShaderModule& shaderModule) {\n // create descriptor set\n VkDescriptorSetLayout layout = shaderModule.getLayout();\n const VkDescriptorSetAllocateInfo desc{\n .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,\n .descriptorPool = pool.handle(),\n .descriptorSetCount = 1,\n .pSetLayouts = &layout\n };\n VkDescriptorSet descriptorSetHandle{};\n auto res = vkAllocateDescriptorSets(device.handle(), &desc, &descriptorSetHandle);\n if (res != VK_SUCCESS || descriptorSetHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to allocate descriptor set\");\n\n /// store set in shared ptr\n this->descriptorSet = std::shared_ptr(\n new VkDescriptorSet(descriptorSetHandle),\n [dev = device.handle(), pool = pool](VkDescriptorSet* setHandle) {\n vkFreeDescriptorSets(dev, pool.handle(), 1, setHandle);\n }\n );\n}\n\nDescriptorSetUpdateBuilder DescriptorSet::update(const Core::Device& device) const {\n return { *this, device };\n}\n\nvoid DescriptorSet::bind(const CommandBuffer& commandBuffer, const Pipeline& pipeline) const {\n VkDescriptorSet descriptorSetHandle = this->handle();\n vkCmdBindDescriptorSets(commandBuffer.handle(),\n VK_PIPELINE_BIND_POINT_COMPUTE, pipeline.getLayout(),\n 0, 1, &descriptorSetHandle, 0, nullptr);\n}\n\n// updater class\n\nDescriptorSetUpdateBuilder& DescriptorSetUpdateBuilder::add(VkDescriptorType type, const Image& image) {\n this->entries.push_back({\n .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,\n .dstSet = this->descriptorSet->handle(),\n .dstBinding = static_cast(this->entries.size()),\n .descriptorCount = 1,\n .descriptorType = type,\n .pImageInfo = new VkDescriptorImageInfo {\n .imageView = image.getView(),\n .imageLayout = VK_IMAGE_LAYOUT_GENERAL\n },\n .pBufferInfo = nullptr\n });\n return *this;\n}\n\nDescriptorSetUpdateBuilder& DescriptorSetUpdateBuilder::add(VkDescriptorType type, const Sampler& sampler) {\n this->entries.push_back({\n .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,\n .dstSet = this->descriptorSet->handle(),\n .dstBinding = static_cast(this->entries.size()),\n .descriptorCount = 1,\n .descriptorType = type,\n .pImageInfo = new VkDescriptorImageInfo {\n .sampler = sampler.handle(),\n },\n .pBufferInfo = nullptr\n });\n return *this;\n}\n\nDescriptorSetUpdateBuilder& DescriptorSetUpdateBuilder::add(VkDescriptorType type, const Buffer& buffer) {\n this->entries.push_back({\n .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,\n .dstSet = this->descriptorSet->handle(),\n .dstBinding = static_cast(this->entries.size()),\n .descriptorCount = 1,\n .descriptorType = type,\n .pImageInfo = nullptr,\n .pBufferInfo = new VkDescriptorBufferInfo {\n .buffer = buffer.handle(),\n .range = buffer.getSize()\n }\n });\n return *this;\n}\n\nDescriptorSetUpdateBuilder& DescriptorSetUpdateBuilder::add(VkDescriptorType type) {\n this->entries.push_back({\n .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,\n .dstSet = this->descriptorSet->handle(),\n .dstBinding = static_cast(this->entries.size()),\n .descriptorCount = 1,\n .descriptorType = type,\n .pImageInfo = new VkDescriptorImageInfo {\n },\n .pBufferInfo = nullptr\n });\n return *this;\n}\n\nvoid DescriptorSetUpdateBuilder::build() {\n vkUpdateDescriptorSets(this->device->handle(),\n static_cast(this->entries.size()),\n this->entries.data(), 0, nullptr);\n\n // NOLINTBEGIN\n for (const auto& entry : this->entries) {\n delete entry.pImageInfo;\n delete entry.pBufferInfo;\n }\n // NOLINTEND\n}\n"], ["/lsfg-vk/framegen/src/core/pipeline.cpp", "#include \n#include \n\n#include \"core/pipeline.hpp\"\n#include \"core/device.hpp\"\n#include \"core/shadermodule.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n\nusing namespace LSFG::Core;\n\nPipeline::Pipeline(const Core::Device& device, const ShaderModule& shader) {\n // create pipeline layout\n VkDescriptorSetLayout shaderLayout = shader.getLayout();\n const VkPipelineLayoutCreateInfo layoutDesc{\n .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,\n .setLayoutCount = 1,\n .pSetLayouts = &shaderLayout,\n };\n VkPipelineLayout layoutHandle{};\n auto res = vkCreatePipelineLayout(device.handle(), &layoutDesc, nullptr, &layoutHandle);\n if (res != VK_SUCCESS || !layoutHandle)\n throw LSFG::vulkan_error(res, \"Failed to create pipeline layout\");\n\n // create pipeline\n const VkPipelineShaderStageCreateInfo shaderStageInfo{\n .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,\n .stage = VK_SHADER_STAGE_COMPUTE_BIT,\n .module = shader.handle(),\n .pName = \"main\",\n };\n const VkComputePipelineCreateInfo pipelineDesc{\n .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,\n .stage = shaderStageInfo,\n .layout = layoutHandle,\n };\n VkPipeline pipelineHandle{};\n res = vkCreateComputePipelines(device.handle(),\n VK_NULL_HANDLE, 1, &pipelineDesc, nullptr, &pipelineHandle);\n if (res != VK_SUCCESS || !pipelineHandle)\n throw LSFG::vulkan_error(res, \"Failed to create compute pipeline\");\n\n // store layout and pipeline in shared ptr\n this->layout = std::shared_ptr(\n new VkPipelineLayout(layoutHandle),\n [dev = device.handle()](VkPipelineLayout* layout) {\n vkDestroyPipelineLayout(dev, *layout, nullptr);\n }\n );\n this->pipeline = std::shared_ptr(\n new VkPipeline(pipelineHandle),\n [dev = device.handle()](VkPipeline* pipeline) {\n vkDestroyPipeline(dev, *pipeline, nullptr);\n }\n );\n}\n\nvoid Pipeline::bind(const CommandBuffer& commandBuffer) const {\n vkCmdBindPipeline(commandBuffer.handle(), VK_PIPELINE_BIND_POINT_COMPUTE, *this->pipeline);\n}\n"], ["/lsfg-vk/framegen/src/core/image.cpp", "#include \n#include \n\n#include \"core/image.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nImage::Image(const Core::Device& device, VkExtent2D extent, VkFormat format,\n VkImageUsageFlags usage, VkImageAspectFlags aspectFlags)\n : extent(extent), format(format), aspectFlags(aspectFlags) {\n // create image\n const VkImageCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,\n .imageType = VK_IMAGE_TYPE_2D,\n .format = format,\n .extent = {\n .width = extent.width,\n .height = extent.height,\n .depth = 1\n },\n .mipLevels = 1,\n .arrayLayers = 1,\n .samples = VK_SAMPLE_COUNT_1_BIT,\n .usage = usage,\n .sharingMode = VK_SHARING_MODE_EXCLUSIVE\n };\n VkImage imageHandle{};\n auto res = vkCreateImage(device.handle(), &desc, nullptr, &imageHandle);\n if (res != VK_SUCCESS || imageHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create Vulkan image\");\n\n // find memory type\n VkPhysicalDeviceMemoryProperties memProps;\n vkGetPhysicalDeviceMemoryProperties(device.getPhysicalDevice(), &memProps);\n\n VkMemoryRequirements memReqs;\n vkGetImageMemoryRequirements(device.handle(), imageHandle, &memReqs);\n\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n std::optional memType{};\n for (uint32_t i = 0; i < memProps.memoryTypeCount; ++i) {\n if ((memReqs.memoryTypeBits & (1 << i)) && // NOLINTBEGIN\n (memProps.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) {\n memType.emplace(i);\n break;\n } // NOLINTEND\n }\n if (!memType.has_value())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Unable to find memory type for image\");\n#pragma clang diagnostic pop\n\n // allocate and bind memory\n const VkMemoryAllocateInfo allocInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,\n .allocationSize = memReqs.size,\n .memoryTypeIndex = memType.value()\n };\n VkDeviceMemory memoryHandle{};\n res = vkAllocateMemory(device.handle(), &allocInfo, nullptr, &memoryHandle);\n if (res != VK_SUCCESS || memoryHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to allocate memory for Vulkan image\");\n\n res = vkBindImageMemory(device.handle(), imageHandle, memoryHandle, 0);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to bind memory to Vulkan image\");\n\n // create image view\n const VkImageViewCreateInfo viewDesc{\n .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,\n .image = imageHandle,\n .viewType = VK_IMAGE_VIEW_TYPE_2D,\n .format = format,\n .components = {\n .r = VK_COMPONENT_SWIZZLE_IDENTITY,\n .g = VK_COMPONENT_SWIZZLE_IDENTITY,\n .b = VK_COMPONENT_SWIZZLE_IDENTITY,\n .a = VK_COMPONENT_SWIZZLE_IDENTITY\n },\n .subresourceRange = {\n .aspectMask = aspectFlags,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n\n VkImageView viewHandle{};\n res = vkCreateImageView(device.handle(), &viewDesc, nullptr, &viewHandle);\n if (res != VK_SUCCESS || viewHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create image view\");\n\n // store objects in shared ptr\n this->layout = std::make_shared(VK_IMAGE_LAYOUT_UNDEFINED);\n this->image = std::shared_ptr(\n new VkImage(imageHandle),\n [dev = device.handle()](VkImage* img) {\n vkDestroyImage(dev, *img, nullptr);\n }\n );\n this->memory = std::shared_ptr(\n new VkDeviceMemory(memoryHandle),\n [dev = device.handle()](VkDeviceMemory* mem) {\n vkFreeMemory(dev, *mem, nullptr);\n }\n );\n this->view = std::shared_ptr(\n new VkImageView(viewHandle),\n [dev = device.handle()](VkImageView* imgView) {\n vkDestroyImageView(dev, *imgView, nullptr);\n }\n );\n}\n\n// shared memory constructor\n\nImage::Image(const Core::Device& device, VkExtent2D extent, VkFormat format,\n VkImageUsageFlags usage, VkImageAspectFlags aspectFlags, int fd)\n : extent(extent), format(format), aspectFlags(aspectFlags) {\n // create image\n const VkExternalMemoryImageCreateInfo externalInfo{\n .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,\n .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR\n };\n const VkImageCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,\n .pNext = &externalInfo,\n .imageType = VK_IMAGE_TYPE_2D,\n .format = format,\n .extent = {\n .width = extent.width,\n .height = extent.height,\n .depth = 1\n },\n .mipLevels = 1,\n .arrayLayers = 1,\n .samples = VK_SAMPLE_COUNT_1_BIT,\n .usage = usage,\n .sharingMode = VK_SHARING_MODE_EXCLUSIVE\n };\n VkImage imageHandle{};\n auto res = vkCreateImage(device.handle(), &desc, nullptr, &imageHandle);\n if (res != VK_SUCCESS || imageHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create Vulkan image\");\n\n // find memory type\n VkPhysicalDeviceMemoryProperties memProps;\n vkGetPhysicalDeviceMemoryProperties(device.getPhysicalDevice(), &memProps);\n\n VkMemoryRequirements memReqs;\n vkGetImageMemoryRequirements(device.handle(), imageHandle, &memReqs);\n\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n std::optional memType{};\n for (uint32_t i = 0; i < memProps.memoryTypeCount; ++i) {\n if ((memReqs.memoryTypeBits & (1 << i)) && // NOLINTBEGIN\n (memProps.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) {\n memType.emplace(i);\n break;\n } // NOLINTEND\n }\n if (!memType.has_value())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Unable to find memory type for image\");\n#pragma clang diagnostic pop\n\n // ~~allocate~~ and bind memory\n const VkMemoryDedicatedAllocateInfoKHR dedicatedInfo2{\n .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,\n .image = imageHandle,\n };\n const VkImportMemoryFdInfoKHR importInfo{\n .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR,\n .pNext = &dedicatedInfo2,\n .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR,\n .fd = fd // closes the fd\n };\n const VkMemoryAllocateInfo allocInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,\n .pNext = fd == -1 ? nullptr : &importInfo,\n .allocationSize = memReqs.size,\n .memoryTypeIndex = memType.value()\n };\n VkDeviceMemory memoryHandle{};\n res = vkAllocateMemory(device.handle(), &allocInfo, nullptr, &memoryHandle);\n if (res != VK_SUCCESS || memoryHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to allocate memory for Vulkan image\");\n\n res = vkBindImageMemory(device.handle(), imageHandle, memoryHandle, 0);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to bind memory to Vulkan image\");\n\n // create image view\n const VkImageViewCreateInfo viewDesc{\n .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,\n .image = imageHandle,\n .viewType = VK_IMAGE_VIEW_TYPE_2D,\n .format = format,\n .components = {\n .r = VK_COMPONENT_SWIZZLE_IDENTITY,\n .g = VK_COMPONENT_SWIZZLE_IDENTITY,\n .b = VK_COMPONENT_SWIZZLE_IDENTITY,\n .a = VK_COMPONENT_SWIZZLE_IDENTITY\n },\n .subresourceRange = {\n .aspectMask = aspectFlags,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n\n VkImageView viewHandle{};\n res = vkCreateImageView(device.handle(), &viewDesc, nullptr, &viewHandle);\n if (res != VK_SUCCESS || viewHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create image view\");\n\n // store objects in shared ptr\n this->layout = std::make_shared(VK_IMAGE_LAYOUT_UNDEFINED);\n this->image = std::shared_ptr(\n new VkImage(imageHandle),\n [dev = device.handle()](VkImage* img) {\n vkDestroyImage(dev, *img, nullptr);\n }\n );\n this->memory = std::shared_ptr(\n new VkDeviceMemory(memoryHandle),\n [dev = device.handle()](VkDeviceMemory* mem) {\n vkFreeMemory(dev, *mem, nullptr);\n }\n );\n this->view = std::shared_ptr(\n new VkImageView(viewHandle),\n [dev = device.handle()](VkImageView* imgView) {\n vkDestroyImageView(dev, *imgView, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/src/core/semaphore.cpp", "#include \n#include \n\n#include \"core/semaphore.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nSemaphore::Semaphore(const Core::Device& device, std::optional initial) {\n // create semaphore\n const VkSemaphoreTypeCreateInfo typeInfo{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO,\n .semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE,\n .initialValue = initial.value_or(0)\n };\n const VkSemaphoreCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,\n .pNext = initial.has_value() ? &typeInfo : nullptr,\n };\n VkSemaphore semaphoreHandle{};\n auto res = vkCreateSemaphore(device.handle(), &desc, nullptr, &semaphoreHandle);\n if (res != VK_SUCCESS || semaphoreHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create semaphore\");\n\n // store semaphore in shared ptr\n this->isTimeline = initial.has_value();\n this->semaphore = std::shared_ptr(\n new VkSemaphore(semaphoreHandle),\n [dev = device.handle()](VkSemaphore* semaphoreHandle) {\n vkDestroySemaphore(dev, *semaphoreHandle, nullptr);\n }\n );\n}\n\nSemaphore::Semaphore(const Core::Device& device, int fd) {\n // create semaphore\n const VkExportSemaphoreCreateInfo exportInfo{\n .sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO,\n .handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT\n };\n const VkSemaphoreCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,\n .pNext = &exportInfo\n };\n VkSemaphore semaphoreHandle{};\n auto res = vkCreateSemaphore(device.handle(), &desc, nullptr, &semaphoreHandle);\n if (res != VK_SUCCESS || semaphoreHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create semaphore\");\n\n // import semaphore from fd\n auto vkImportSemaphoreFdKHR = reinterpret_cast(\n vkGetDeviceProcAddr(device.handle(), \"vkImportSemaphoreFdKHR\"));\n\n const VkImportSemaphoreFdInfoKHR importInfo{\n .sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR,\n .semaphore = semaphoreHandle,\n .handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT,\n .fd = fd // closes the fd\n };\n res = vkImportSemaphoreFdKHR(device.handle(), &importInfo);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to import semaphore from fd\");\n\n // store semaphore in shared ptr\n this->isTimeline = false;\n this->semaphore = std::shared_ptr(\n new VkSemaphore(semaphoreHandle),\n [dev = device.handle()](VkSemaphore* semaphoreHandle) {\n vkDestroySemaphore(dev, *semaphoreHandle, nullptr);\n }\n );\n}\n\nvoid Semaphore::signal(const Core::Device& device, uint64_t value) const {\n if (!this->isTimeline)\n throw std::logic_error(\"Invalid timeline semaphore\");\n\n const VkSemaphoreSignalInfo signalInfo{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO,\n .semaphore = this->handle(),\n .value = value\n };\n auto res = vkSignalSemaphore(device.handle(), &signalInfo);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to signal semaphore\");\n}\n\nbool Semaphore::wait(const Core::Device& device, uint64_t value, uint64_t timeout) const {\n if (!this->isTimeline)\n throw std::logic_error(\"Invalid timeline semaphore\");\n\n VkSemaphore semaphore = this->handle();\n const VkSemaphoreWaitInfo waitInfo{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO,\n .semaphoreCount = 1,\n .pSemaphores = &semaphore,\n .pValues = &value\n };\n auto res = vkWaitSemaphores(device.handle(), &waitInfo, timeout);\n if (res != VK_SUCCESS && res != VK_TIMEOUT)\n throw LSFG::vulkan_error(res, \"Unable to wait for semaphore\");\n\n return res == VK_SUCCESS;\n}\n"], ["/lsfg-vk/framegen/src/core/buffer.cpp", "#include \n#include \n\n#include \"core/buffer.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nvoid Buffer::construct(const Core::Device& device, const void* data, VkBufferUsageFlags usage) {\n // create buffer\n const VkBufferCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,\n .size = this->size,\n .usage = usage,\n .sharingMode = VK_SHARING_MODE_EXCLUSIVE\n };\n VkBuffer bufferHandle{};\n auto res = vkCreateBuffer(device.handle(), &desc, nullptr, &bufferHandle);\n if (res != VK_SUCCESS || bufferHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create Vulkan buffer\");\n\n // find memory type\n VkPhysicalDeviceMemoryProperties memProps;\n vkGetPhysicalDeviceMemoryProperties(device.getPhysicalDevice(), &memProps);\n\n VkMemoryRequirements memReqs;\n vkGetBufferMemoryRequirements(device.handle(), bufferHandle, &memReqs);\n\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n std::optional memType{};\n for (uint32_t i = 0; i < memProps.memoryTypeCount; ++i) {\n if ((memReqs.memoryTypeBits & (1 << i)) && // NOLINTBEGIN\n (memProps.memoryTypes[i].propertyFlags &\n (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT))) {\n memType.emplace(i);\n break;\n } // NOLINTEND\n }\n if (!memType.has_value())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Unable to find memory type for buffer\");\n#pragma clang diagnostic pop\n\n // allocate and bind memory\n const VkMemoryAllocateInfo allocInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,\n .allocationSize = memReqs.size,\n .memoryTypeIndex = memType.value()\n };\n VkDeviceMemory memoryHandle{};\n res = vkAllocateMemory(device.handle(), &allocInfo, nullptr, &memoryHandle);\n if (res != VK_SUCCESS || memoryHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to allocate memory for Vulkan buffer\");\n\n res = vkBindBufferMemory(device.handle(), bufferHandle, memoryHandle, 0);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to bind memory to Vulkan buffer\");\n\n // upload data to buffer\n uint8_t* buf{};\n res = vkMapMemory(device.handle(), memoryHandle, 0, this->size, 0, reinterpret_cast(&buf));\n if (res != VK_SUCCESS || buf == nullptr)\n throw LSFG::vulkan_error(res, \"Failed to map memory for Vulkan buffer\");\n std::copy_n(reinterpret_cast(data), this->size, buf);\n vkUnmapMemory(device.handle(), memoryHandle);\n\n // store buffer and memory in shared ptr\n this->buffer = std::shared_ptr(\n new VkBuffer(bufferHandle),\n [dev = device.handle()](VkBuffer* img) {\n vkDestroyBuffer(dev, *img, nullptr);\n }\n );\n this->memory = std::shared_ptr(\n new VkDeviceMemory(memoryHandle),\n [dev = device.handle()](VkDeviceMemory* mem) {\n vkFreeMemory(dev, *mem, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/src/core/fence.cpp", "#include \n#include \n\n#include \"core/fence.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n\nusing namespace LSFG::Core;\n\nFence::Fence(const Core::Device& device) {\n // create fence\n const VkFenceCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO\n };\n VkFence fenceHandle{};\n auto res = vkCreateFence(device.handle(), &desc, nullptr, &fenceHandle);\n if (res != VK_SUCCESS || fenceHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create fence\");\n\n // store fence in shared ptr\n this->fence = std::shared_ptr(\n new VkFence(fenceHandle),\n [dev = device.handle()](VkFence* fenceHandle) {\n vkDestroyFence(dev, *fenceHandle, nullptr);\n }\n );\n}\n\nvoid Fence::reset(const Core::Device& device) const {\n VkFence fenceHandle = this->handle();\n auto res = vkResetFences(device.handle(), 1, &fenceHandle);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to reset fence\");\n}\n\nbool Fence::wait(const Core::Device& device, uint64_t timeout) const {\n VkFence fenceHandle = this->handle();\n auto res = vkWaitForFences(device.handle(), 1, &fenceHandle, VK_TRUE, timeout);\n if (res != VK_SUCCESS && res != VK_TIMEOUT)\n throw LSFG::vulkan_error(res, \"Unable to wait for fence\");\n\n return res == VK_SUCCESS;\n}\n"], ["/lsfg-vk/framegen/src/core/device.cpp", "#include \n#include \n\n#include \"core/device.hpp\"\n#include \"core/instance.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nconst std::vector requiredExtensions = {\n \"VK_KHR_external_memory_fd\",\n \"VK_KHR_external_semaphore_fd\",\n \"VK_EXT_robustness2\",\n};\n\nDevice::Device(const Instance& instance, uint64_t deviceUUID) {\n // get all physical devices\n uint32_t deviceCount{};\n auto res = vkEnumeratePhysicalDevices(instance.handle(), &deviceCount, nullptr);\n if (res != VK_SUCCESS || deviceCount == 0)\n throw LSFG::vulkan_error(res, \"Failed to enumerate physical devices\");\n\n std::vector devices(deviceCount);\n res = vkEnumeratePhysicalDevices(instance.handle(), &deviceCount, devices.data());\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to get physical devices\");\n\n // get device by uuid\n std::optional physicalDevice;\n for (const auto& device : devices) {\n VkPhysicalDeviceProperties properties;\n vkGetPhysicalDeviceProperties(device, &properties);\n\n const uint64_t uuid =\n static_cast(properties.vendorID) << 32 | properties.deviceID;\n if (deviceUUID == uuid || deviceUUID == 0x1463ABAC) {\n physicalDevice = device;\n break;\n }\n }\n if (!physicalDevice)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"Could not find physical device with UUID\");\n\n // find queue family indices\n uint32_t familyCount{};\n vkGetPhysicalDeviceQueueFamilyProperties(*physicalDevice, &familyCount, nullptr);\n\n std::vector queueFamilies(familyCount);\n vkGetPhysicalDeviceQueueFamilyProperties(*physicalDevice, &familyCount, queueFamilies.data());\n\n std::optional computeFamilyIdx;\n for (uint32_t i = 0; i < familyCount; ++i) {\n if (queueFamilies[i].queueFlags & VK_QUEUE_COMPUTE_BIT)\n computeFamilyIdx = i;\n }\n if (!computeFamilyIdx)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"No compute queue family found\");\n\n // create logical device\n const float queuePriority{1.0F}; // highest priority\n VkPhysicalDeviceRobustness2FeaturesEXT robustness2{\n .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT,\n .nullDescriptor = VK_TRUE,\n };\n VkPhysicalDeviceVulkan13Features features13{\n .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES,\n .pNext = &robustness2,\n .synchronization2 = VK_TRUE\n };\n const VkPhysicalDeviceVulkan12Features features12{\n .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES,\n .pNext = &features13,\n .timelineSemaphore = VK_TRUE,\n .vulkanMemoryModel = VK_TRUE\n };\n const VkDeviceQueueCreateInfo computeQueueDesc{\n .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,\n .queueFamilyIndex = *computeFamilyIdx,\n .queueCount = 1,\n .pQueuePriorities = &queuePriority\n };\n const VkDeviceCreateInfo deviceCreateInfo{\n .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,\n .pNext = &features12,\n .queueCreateInfoCount = 1,\n .pQueueCreateInfos = &computeQueueDesc,\n .enabledExtensionCount = static_cast(requiredExtensions.size()),\n .ppEnabledExtensionNames = requiredExtensions.data()\n };\n VkDevice deviceHandle{};\n res = vkCreateDevice(*physicalDevice, &deviceCreateInfo, nullptr, &deviceHandle);\n if (res != VK_SUCCESS | deviceHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create logical device\");\n\n volkLoadDevice(deviceHandle);\n\n // get compute queue\n VkQueue queueHandle{};\n vkGetDeviceQueue(deviceHandle, *computeFamilyIdx, 0, &queueHandle);\n\n // store in shared ptr\n this->computeQueue = queueHandle;\n this->computeFamilyIdx = *computeFamilyIdx;\n this->physicalDevice = *physicalDevice;\n this->device = std::shared_ptr(\n new VkDevice(deviceHandle),\n [](VkDevice* device) {\n vkDestroyDevice(*device, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/src/common/utils.cpp", "#include \n#include \n\n#include \"common/utils.hpp\"\n#include \"core/buffer.hpp\"\n#include \"core/image.hpp\"\n#include \"core/device.hpp\"\n#include \"core/commandpool.hpp\"\n#include \"core/fence.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG::Utils;\n\nBarrierBuilder& BarrierBuilder::addR2W(Core::Image& image) {\n this->barriers.emplace_back(VkImageMemoryBarrier2 {\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,\n .srcStageMask = VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,\n .srcAccessMask = VK_ACCESS_2_SHADER_READ_BIT,\n .dstStageMask = VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,\n .dstAccessMask = VK_ACCESS_2_SHADER_WRITE_BIT,\n .oldLayout = image.getLayout(),\n .newLayout = VK_IMAGE_LAYOUT_GENERAL,\n .image = image.handle(),\n .subresourceRange = {\n .aspectMask = image.getAspectFlags(),\n .levelCount = 1,\n .layerCount = 1\n }\n });\n image.setLayout(VK_IMAGE_LAYOUT_GENERAL);\n\n return *this;\n}\n\nBarrierBuilder& BarrierBuilder::addW2R(Core::Image& image) {\n this->barriers.emplace_back(VkImageMemoryBarrier2 {\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,\n .srcStageMask = VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,\n .srcAccessMask = VK_ACCESS_2_SHADER_WRITE_BIT,\n .dstStageMask = VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,\n .dstAccessMask = VK_ACCESS_2_SHADER_READ_BIT,\n .oldLayout = image.getLayout(),\n .newLayout = VK_IMAGE_LAYOUT_GENERAL,\n .image = image.handle(),\n .subresourceRange = {\n .aspectMask = image.getAspectFlags(),\n .levelCount = 1,\n .layerCount = 1\n }\n });\n image.setLayout(VK_IMAGE_LAYOUT_GENERAL);\n\n return *this;\n}\n\nvoid BarrierBuilder::build() const {\n const VkDependencyInfo dependencyInfo = {\n .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,\n .imageMemoryBarrierCount = static_cast(this->barriers.size()),\n .pImageMemoryBarriers = this->barriers.data()\n };\n vkCmdPipelineBarrier2(this->commandBuffer->handle(), &dependencyInfo);\n}\n\nvoid Utils::uploadImage(const Core::Device& device, const Core::CommandPool& commandPool,\n Core::Image& image, const std::string& path) {\n // read image bytecode\n std::ifstream file(path.data(), std::ios::binary | std::ios::ate);\n if (!file.is_open())\n throw std::system_error(errno, std::generic_category(), \"Failed to open image: \" + path);\n\n std::streamsize size = file.tellg();\n size -= 124 + 4; // dds header and magic bytes\n std::vector code(static_cast(size));\n\n file.seekg(124 + 4, std::ios::beg);\n if (!file.read(code.data(), size))\n throw std::system_error(errno, std::generic_category(), \"Failed to read image: \" + path);\n\n file.close();\n\n // copy data to buffer\n const Core::Buffer stagingBuffer(\n device, code.data(), static_cast(code.size()),\n VK_BUFFER_USAGE_TRANSFER_SRC_BIT\n );\n\n // perform the upload\n Core::CommandBuffer commandBuffer(device, commandPool);\n commandBuffer.begin();\n\n const VkImageMemoryBarrier barrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,\n .srcAccessMask = VK_ACCESS_NONE,\n .dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,\n .oldLayout = image.getLayout(),\n .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n .image = image.handle(),\n .subresourceRange = {\n .aspectMask = image.getAspectFlags(),\n .levelCount = 1,\n .layerCount = 1\n }\n };\n image.setLayout(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);\n vkCmdPipelineBarrier(\n commandBuffer.handle(),\n VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,\n 0, 0, nullptr, 0, nullptr, 1, &barrier\n );\n\n auto extent = image.getExtent();\n const VkBufferImageCopy region{\n .bufferImageHeight = 0,\n .imageSubresource = {\n .aspectMask = image.getAspectFlags(),\n .layerCount = 1\n },\n .imageExtent = { extent.width, extent.height, 1 }\n };\n vkCmdCopyBufferToImage(\n commandBuffer.handle(),\n stagingBuffer.handle(), image.handle(),\n VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion\n );\n\n commandBuffer.end();\n\n Core::Fence fence(device);\n commandBuffer.submit(device.getComputeQueue(), fence);\n\n // wait for the upload to complete\n if (!fence.wait(device))\n throw LSFG::vulkan_error(VK_TIMEOUT, \"Upload operation timed out\");\n}\n\nvoid Utils::clearImage(const Core::Device& device, Core::Image& image, bool white) {\n Core::Fence fence(device);\n const Core::CommandPool cmdPool(device);\n Core::CommandBuffer cmdBuf(device, cmdPool);\n cmdBuf.begin();\n\n const VkImageMemoryBarrier2 barrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,\n .dstStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT,\n .dstAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,\n .oldLayout = image.getLayout(),\n .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n .image = image.handle(),\n .subresourceRange = {\n .aspectMask = image.getAspectFlags(),\n .levelCount = 1,\n .layerCount = 1\n }\n };\n const VkDependencyInfo dependencyInfo = {\n .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,\n .imageMemoryBarrierCount = 1,\n .pImageMemoryBarriers = &barrier\n };\n image.setLayout(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);\n vkCmdPipelineBarrier2(cmdBuf.handle(), &dependencyInfo);\n\n const float clearValue = white ? 1.0F : 0.0F;\n const VkClearColorValue clearColor = {{ clearValue, clearValue, clearValue, clearValue }};\n const VkImageSubresourceRange subresourceRange = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .levelCount = 1,\n .layerCount = 1\n };\n vkCmdClearColorImage(cmdBuf.handle(),\n image.handle(), image.getLayout(),\n &clearColor,\n 1, &subresourceRange);\n\n cmdBuf.end();\n\n cmdBuf.submit(device.getComputeQueue(), fence);\n if (!fence.wait(device))\n throw LSFG::vulkan_error(VK_TIMEOUT, \"Failed to wait for clearing fence.\");\n}\n"], ["/lsfg-vk/framegen/src/core/shadermodule.cpp", "#include \n#include \n\n#include \"core/shadermodule.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nShaderModule::ShaderModule(const Core::Device& device, const std::vector& code,\n const std::vector>& descriptorTypes) {\n // create shader module\n const uint8_t* data_ptr = code.data();\n const VkShaderModuleCreateInfo createInfo{\n .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,\n .codeSize = code.size(),\n .pCode = reinterpret_cast(data_ptr)\n };\n VkShaderModule shaderModuleHandle{};\n auto res = vkCreateShaderModule(device.handle(), &createInfo, nullptr, &shaderModuleHandle);\n if (res != VK_SUCCESS || !shaderModuleHandle)\n throw LSFG::vulkan_error(res, \"Failed to create shader module\");\n\n // create descriptor set layout\n std::vector layoutBindings;\n size_t bindIdx = 0;\n for (const auto &[count, type] : descriptorTypes)\n for (size_t i = 0; i < count; i++, bindIdx++)\n layoutBindings.emplace_back(VkDescriptorSetLayoutBinding {\n .binding = static_cast(bindIdx),\n .descriptorType = type,\n .descriptorCount = 1,\n .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT\n });\n\n const VkDescriptorSetLayoutCreateInfo layoutDesc{\n .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,\n .bindingCount = static_cast(layoutBindings.size()),\n .pBindings = layoutBindings.data()\n };\n VkDescriptorSetLayout descriptorSetLayout{};\n res = vkCreateDescriptorSetLayout(device.handle(), &layoutDesc, nullptr, &descriptorSetLayout);\n if (res != VK_SUCCESS || !descriptorSetLayout)\n throw LSFG::vulkan_error(res, \"Failed to create descriptor set layout\");\n\n // store module and layout in shared ptr\n this->shaderModule = std::shared_ptr(\n new VkShaderModule(shaderModuleHandle),\n [dev = device.handle()](VkShaderModule* shaderModuleHandle) {\n vkDestroyShaderModule(dev, *shaderModuleHandle, nullptr);\n }\n );\n this->descriptorSetLayout = std::shared_ptr(\n new VkDescriptorSetLayout(descriptorSetLayout),\n [dev = device.handle()](VkDescriptorSetLayout* layout) {\n vkDestroyDescriptorSetLayout(dev, *layout, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/src/layer.cpp", "#include \"layer.hpp\"\n#include \"common/exception.hpp\"\n#include \"config/config.hpp\"\n#include \"hooks.hpp\"\n\n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n\nnamespace {\n PFN_vkCreateInstance next_vkCreateInstance{};\n PFN_vkDestroyInstance next_vkDestroyInstance{};\n\n PFN_vkCreateDevice next_vkCreateDevice{};\n PFN_vkDestroyDevice next_vkDestroyDevice{};\n\n PFN_vkSetDeviceLoaderData next_vSetDeviceLoaderData{};\n\n PFN_vkGetInstanceProcAddr next_vkGetInstanceProcAddr{};\n PFN_vkGetDeviceProcAddr next_vkGetDeviceProcAddr{};\n\n PFN_vkGetPhysicalDeviceQueueFamilyProperties next_vkGetPhysicalDeviceQueueFamilyProperties{};\n PFN_vkGetPhysicalDeviceMemoryProperties next_vkGetPhysicalDeviceMemoryProperties{};\n PFN_vkGetPhysicalDeviceProperties next_vkGetPhysicalDeviceProperties{};\n PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR next_vkGetPhysicalDeviceSurfaceCapabilitiesKHR{};\n\n PFN_vkCreateSwapchainKHR next_vkCreateSwapchainKHR{};\n PFN_vkQueuePresentKHR next_vkQueuePresentKHR{};\n PFN_vkDestroySwapchainKHR next_vkDestroySwapchainKHR{};\n PFN_vkGetSwapchainImagesKHR next_vkGetSwapchainImagesKHR{};\n PFN_vkAllocateCommandBuffers next_vkAllocateCommandBuffers{};\n PFN_vkFreeCommandBuffers next_vkFreeCommandBuffers{};\n PFN_vkBeginCommandBuffer next_vkBeginCommandBuffer{};\n PFN_vkEndCommandBuffer next_vkEndCommandBuffer{};\n PFN_vkCreateCommandPool next_vkCreateCommandPool{};\n PFN_vkDestroyCommandPool next_vkDestroyCommandPool{};\n PFN_vkCreateImage next_vkCreateImage{};\n PFN_vkDestroyImage next_vkDestroyImage{};\n PFN_vkGetImageMemoryRequirements next_vkGetImageMemoryRequirements{};\n PFN_vkBindImageMemory next_vkBindImageMemory{};\n PFN_vkAllocateMemory next_vkAllocateMemory{};\n PFN_vkFreeMemory next_vkFreeMemory{};\n PFN_vkCreateSemaphore next_vkCreateSemaphore{};\n PFN_vkDestroySemaphore next_vkDestroySemaphore{};\n PFN_vkGetMemoryFdKHR next_vkGetMemoryFdKHR{};\n PFN_vkGetSemaphoreFdKHR next_vkGetSemaphoreFdKHR{};\n PFN_vkGetDeviceQueue next_vkGetDeviceQueue{};\n PFN_vkQueueSubmit next_vkQueueSubmit{};\n PFN_vkCmdPipelineBarrier next_vkCmdPipelineBarrier{};\n PFN_vkCmdBlitImage next_vkCmdBlitImage{};\n PFN_vkAcquireNextImageKHR next_vkAcquireNextImageKHR{};\n\n template\n bool initInstanceFunc(VkInstance instance, const char* name, T* func) {\n *func = reinterpret_cast(next_vkGetInstanceProcAddr(instance, name));\n if (!*func) {\n std::cerr << \"(no function pointer for \" << name << \")\\n\";\n return false;\n }\n return true;\n }\n\n template\n bool initDeviceFunc(VkDevice device, const char* name, T* func) {\n *func = reinterpret_cast(next_vkGetDeviceProcAddr(device, name));\n if (!*func) {\n std::cerr << \"(no function pointer for \" << name << \")\\n\";\n return false;\n }\n return true;\n }\n}\n\nnamespace {\n VkResult layer_vkCreateInstance(\n const VkInstanceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkInstance* pInstance) {\n try {\n // prepare layer | NOLINTBEGIN\n auto* layerDesc = const_cast(\n reinterpret_cast(pCreateInfo->pNext));\n while (layerDesc && (layerDesc->sType != VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO\n || layerDesc->function != VK_LAYER_LINK_INFO)) {\n layerDesc = const_cast(\n reinterpret_cast(layerDesc->pNext));\n }\n if (!layerDesc)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"No layer creation info found in pNext chain\");\n\n next_vkGetInstanceProcAddr = layerDesc->u.pLayerInfo->pfnNextGetInstanceProcAddr;\n layerDesc->u.pLayerInfo = layerDesc->u.pLayerInfo->pNext;\n\n bool success = initInstanceFunc(nullptr, \"vkCreateInstance\", &next_vkCreateInstance);\n if (!success)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"Failed to get instance function pointer for vkCreateInstance\");\n\n // NOLINTEND | skip initialization if the layer is disabled\n if (!Config::activeConf.enable) {\n auto res = next_vkCreateInstance(pCreateInfo, pAllocator, pInstance);\n initInstanceFunc(*pInstance, \"vkCreateDevice\", &next_vkCreateDevice);\n return res;\n }\n\n // create instance\n try {\n auto* createInstanceHook = reinterpret_cast(\n Hooks::hooks[\"vkCreateInstance\"]);\n auto res = createInstanceHook(pCreateInfo, pAllocator, pInstance);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unknown error\");\n } catch (const std::exception& e) {\n throw LSFG::rethrowable_error(\"Failed to create Vulkan instance\", e);\n }\n\n // get relevant function pointers from the next layer\n success = true;\n success &= initInstanceFunc(*pInstance,\n \"vkDestroyInstance\", &next_vkDestroyInstance);\n success &= initInstanceFunc(*pInstance,\n \"vkCreateDevice\", &next_vkCreateDevice); // workaround mesa bug\n success &= initInstanceFunc(*pInstance,\n \"vkGetPhysicalDeviceQueueFamilyProperties\", &next_vkGetPhysicalDeviceQueueFamilyProperties);\n success &= initInstanceFunc(*pInstance,\n \"vkGetPhysicalDeviceMemoryProperties\", &next_vkGetPhysicalDeviceMemoryProperties);\n success &= initInstanceFunc(*pInstance,\n \"vkGetPhysicalDeviceProperties\", &next_vkGetPhysicalDeviceProperties);\n success &= initInstanceFunc(*pInstance,\n \"vkGetPhysicalDeviceSurfaceCapabilitiesKHR\", &next_vkGetPhysicalDeviceSurfaceCapabilitiesKHR);\n if (!success)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"Failed to get instance function pointers\");\n\n std::cerr << \"lsfg-vk: Vulkan instance layer initialized successfully.\\n\";\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred while initializing the Vulkan instance layer:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n return VK_ERROR_INITIALIZATION_FAILED;\n }\n return VK_SUCCESS;\n }\n\n VkResult layer_vkCreateDevice( // NOLINTBEGIN\n VkPhysicalDevice physicalDevice,\n const VkDeviceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkDevice* pDevice) {\n try {\n // prepare layer | NOLINTBEGIN\n auto* layerDesc = const_cast(\n reinterpret_cast(pCreateInfo->pNext));\n while (layerDesc && (layerDesc->sType != VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO\n || layerDesc->function != VK_LAYER_LINK_INFO)) {\n layerDesc = const_cast(\n reinterpret_cast(layerDesc->pNext));\n }\n if (!layerDesc)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"No layer creation info found in pNext chain\");\n\n next_vkGetDeviceProcAddr = layerDesc->u.pLayerInfo->pfnNextGetDeviceProcAddr;\n layerDesc->u.pLayerInfo = layerDesc->u.pLayerInfo->pNext;\n\n auto* layerDesc2 = const_cast(\n reinterpret_cast(pCreateInfo->pNext));\n while (layerDesc2 && (layerDesc2->sType != VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO\n || layerDesc2->function != VK_LOADER_DATA_CALLBACK)) {\n layerDesc2 = const_cast(\n reinterpret_cast(layerDesc2->pNext));\n }\n if (!layerDesc2)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"No layer device loader data found in pNext chain\");\n\n next_vSetDeviceLoaderData = layerDesc2->u.pfnSetDeviceLoaderData;\n\n // NOLINTEND | skip initialization if the layer is disabled\n if (!Config::activeConf.enable)\n return next_vkCreateDevice(physicalDevice, pCreateInfo, pAllocator, pDevice);\n\n // create device\n try {\n auto* createDeviceHook = reinterpret_cast(\n Hooks::hooks[\"vkCreateDevicePre\"]);\n auto res = createDeviceHook(physicalDevice, pCreateInfo, pAllocator, pDevice);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unknown error\");\n } catch (const std::exception& e) {\n throw LSFG::rethrowable_error(\"Failed to create Vulkan device\", e);\n }\n\n // get relevant function pointers from the next layer\n bool success = true;\n success &= initDeviceFunc(*pDevice, \"vkDestroyDevice\", &next_vkDestroyDevice);\n success &= initDeviceFunc(*pDevice, \"vkCreateSwapchainKHR\", &next_vkCreateSwapchainKHR);\n success &= initDeviceFunc(*pDevice, \"vkQueuePresentKHR\", &next_vkQueuePresentKHR);\n success &= initDeviceFunc(*pDevice, \"vkDestroySwapchainKHR\", &next_vkDestroySwapchainKHR);\n success &= initDeviceFunc(*pDevice, \"vkGetSwapchainImagesKHR\", &next_vkGetSwapchainImagesKHR);\n success &= initDeviceFunc(*pDevice, \"vkAllocateCommandBuffers\", &next_vkAllocateCommandBuffers);\n success &= initDeviceFunc(*pDevice, \"vkFreeCommandBuffers\", &next_vkFreeCommandBuffers);\n success &= initDeviceFunc(*pDevice, \"vkBeginCommandBuffer\", &next_vkBeginCommandBuffer);\n success &= initDeviceFunc(*pDevice, \"vkEndCommandBuffer\", &next_vkEndCommandBuffer);\n success &= initDeviceFunc(*pDevice, \"vkCreateCommandPool\", &next_vkCreateCommandPool);\n success &= initDeviceFunc(*pDevice, \"vkDestroyCommandPool\", &next_vkDestroyCommandPool);\n success &= initDeviceFunc(*pDevice, \"vkCreateImage\", &next_vkCreateImage);\n success &= initDeviceFunc(*pDevice, \"vkDestroyImage\", &next_vkDestroyImage);\n success &= initDeviceFunc(*pDevice, \"vkGetImageMemoryRequirements\", &next_vkGetImageMemoryRequirements);\n success &= initDeviceFunc(*pDevice, \"vkBindImageMemory\", &next_vkBindImageMemory);\n success &= initDeviceFunc(*pDevice, \"vkGetMemoryFdKHR\", &next_vkGetMemoryFdKHR);\n success &= initDeviceFunc(*pDevice, \"vkAllocateMemory\", &next_vkAllocateMemory);\n success &= initDeviceFunc(*pDevice, \"vkFreeMemory\", &next_vkFreeMemory);\n success &= initDeviceFunc(*pDevice, \"vkCreateSemaphore\", &next_vkCreateSemaphore);\n success &= initDeviceFunc(*pDevice, \"vkDestroySemaphore\", &next_vkDestroySemaphore);\n success &= initDeviceFunc(*pDevice, \"vkGetSemaphoreFdKHR\", &next_vkGetSemaphoreFdKHR);\n success &= initDeviceFunc(*pDevice, \"vkGetDeviceQueue\", &next_vkGetDeviceQueue);\n success &= initDeviceFunc(*pDevice, \"vkQueueSubmit\", &next_vkQueueSubmit);\n success &= initDeviceFunc(*pDevice, \"vkCmdPipelineBarrier\", &next_vkCmdPipelineBarrier);\n success &= initDeviceFunc(*pDevice, \"vkCmdBlitImage\", &next_vkCmdBlitImage);\n success &= initDeviceFunc(*pDevice, \"vkAcquireNextImageKHR\", &next_vkAcquireNextImageKHR);\n if (!success)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"Failed to get device function pointers\");\n\n auto postCreateDeviceHook = reinterpret_cast(\n Hooks::hooks[\"vkCreateDevicePost\"]);\n auto res = postCreateDeviceHook(physicalDevice, pCreateInfo, pAllocator, pDevice);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unknown error\");\n\n std::cerr << \"lsfg-vk: Vulkan device layer initialized successfully.\\n\";\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred while initializing the Vulkan device layer:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n return VK_ERROR_INITIALIZATION_FAILED;\n }\n return VK_SUCCESS;\n } // NOLINTEND\n}\n\nconst std::unordered_map layerFunctions = {\n { \"vkCreateInstance\",\n reinterpret_cast(&layer_vkCreateInstance) },\n { \"vkCreateDevice\",\n reinterpret_cast(&layer_vkCreateDevice) },\n { \"vkGetInstanceProcAddr\",\n reinterpret_cast(&layer_vkGetInstanceProcAddr) },\n { \"vkGetDeviceProcAddr\",\n reinterpret_cast(&layer_vkGetDeviceProcAddr) },\n};\n\nPFN_vkVoidFunction layer_vkGetInstanceProcAddr(VkInstance instance, const char* pName) {\n const std::string name(pName);\n auto it = layerFunctions.find(name);\n if (it != layerFunctions.end())\n return it->second;\n\n it = Hooks::hooks.find(name);\n if (it != Hooks::hooks.end() && Config::activeConf.enable)\n return it->second;\n\n return next_vkGetInstanceProcAddr(instance, pName);\n}\n\nPFN_vkVoidFunction layer_vkGetDeviceProcAddr(VkDevice device, const char* pName) {\n const std::string name(pName);\n auto it = layerFunctions.find(name);\n if (it != layerFunctions.end())\n return it->second;\n\n it = Hooks::hooks.find(name);\n if (it != Hooks::hooks.end() && Config::activeConf.enable)\n return it->second;\n\n return next_vkGetDeviceProcAddr(device, pName);\n}\n\n// original functions\nnamespace Layer {\n VkResult ovkCreateInstance(\n const VkInstanceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkInstance* pInstance) {\n return next_vkCreateInstance(pCreateInfo, pAllocator, pInstance);\n }\n void ovkDestroyInstance(\n VkInstance instance,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroyInstance(instance, pAllocator);\n }\n\n VkResult ovkCreateDevice(\n VkPhysicalDevice physicalDevice,\n const VkDeviceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkDevice* pDevice) {\n return next_vkCreateDevice(physicalDevice, pCreateInfo, pAllocator, pDevice);\n }\n void ovkDestroyDevice(\n VkDevice device,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroyDevice(device, pAllocator);\n }\n\n VkResult ovkSetDeviceLoaderData(VkDevice device, void* object) {\n return next_vSetDeviceLoaderData(device, object);\n }\n\n PFN_vkVoidFunction ovkGetInstanceProcAddr(\n VkInstance instance,\n const char* pName) {\n return next_vkGetInstanceProcAddr(instance, pName);\n }\n PFN_vkVoidFunction ovkGetDeviceProcAddr(\n VkDevice device,\n const char* pName) {\n return next_vkGetDeviceProcAddr(device, pName);\n }\n\n void ovkGetPhysicalDeviceQueueFamilyProperties(\n VkPhysicalDevice physicalDevice,\n uint32_t* pQueueFamilyPropertyCount,\n VkQueueFamilyProperties* pQueueFamilyProperties) {\n next_vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties);\n }\n void ovkGetPhysicalDeviceMemoryProperties(\n VkPhysicalDevice physicalDevice,\n VkPhysicalDeviceMemoryProperties* pMemoryProperties) {\n next_vkGetPhysicalDeviceMemoryProperties(physicalDevice, pMemoryProperties);\n }\n void ovkGetPhysicalDeviceProperties(\n VkPhysicalDevice physicalDevice,\n VkPhysicalDeviceProperties* pProperties) {\n next_vkGetPhysicalDeviceProperties(physicalDevice, pProperties);\n }\n VkResult ovkGetPhysicalDeviceSurfaceCapabilitiesKHR(\n VkPhysicalDevice physicalDevice,\n VkSurfaceKHR surface,\n VkSurfaceCapabilitiesKHR* pSurfaceCapabilities) {\n return next_vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface, pSurfaceCapabilities);\n }\n\n VkResult ovkCreateSwapchainKHR(\n VkDevice device,\n const VkSwapchainCreateInfoKHR* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkSwapchainKHR* pSwapchain) {\n return next_vkCreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);\n }\n VkResult ovkQueuePresentKHR(\n VkQueue queue,\n const VkPresentInfoKHR* pPresentInfo) {\n return next_vkQueuePresentKHR(queue, pPresentInfo);\n }\n void ovkDestroySwapchainKHR(\n VkDevice device,\n VkSwapchainKHR swapchain,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroySwapchainKHR(device, swapchain, pAllocator);\n }\n\n VkResult ovkGetSwapchainImagesKHR(\n VkDevice device,\n VkSwapchainKHR swapchain,\n uint32_t* pSwapchainImageCount,\n VkImage* pSwapchainImages) {\n return next_vkGetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);\n }\n\n VkResult ovkAllocateCommandBuffers(\n VkDevice device,\n const VkCommandBufferAllocateInfo* pAllocateInfo,\n VkCommandBuffer* pCommandBuffers) {\n return next_vkAllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers);\n }\n void ovkFreeCommandBuffers(\n VkDevice device,\n VkCommandPool commandPool,\n uint32_t commandBufferCount,\n const VkCommandBuffer* pCommandBuffers) {\n next_vkFreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);\n }\n\n VkResult ovkBeginCommandBuffer(\n VkCommandBuffer commandBuffer,\n const VkCommandBufferBeginInfo* pBeginInfo) {\n return next_vkBeginCommandBuffer(commandBuffer, pBeginInfo);\n }\n VkResult ovkEndCommandBuffer(\n VkCommandBuffer commandBuffer) {\n return next_vkEndCommandBuffer(commandBuffer);\n }\n\n VkResult ovkCreateCommandPool(\n VkDevice device,\n const VkCommandPoolCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkCommandPool* pCommandPool) {\n return next_vkCreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);\n }\n void ovkDestroyCommandPool(\n VkDevice device,\n VkCommandPool commandPool,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroyCommandPool(device, commandPool, pAllocator);\n }\n\n VkResult ovkCreateImage(\n VkDevice device,\n const VkImageCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkImage* pImage) {\n return next_vkCreateImage(device, pCreateInfo, pAllocator, pImage);\n }\n void ovkDestroyImage(\n VkDevice device,\n VkImage image,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroyImage(device, image, pAllocator);\n }\n\n void ovkGetImageMemoryRequirements(\n VkDevice device,\n VkImage image,\n VkMemoryRequirements* pMemoryRequirements) {\n next_vkGetImageMemoryRequirements(device, image, pMemoryRequirements);\n }\n VkResult ovkBindImageMemory(\n VkDevice device,\n VkImage image,\n VkDeviceMemory memory,\n VkDeviceSize memoryOffset) {\n return next_vkBindImageMemory(device, image, memory, memoryOffset);\n }\n\n VkResult ovkAllocateMemory(\n VkDevice device,\n const VkMemoryAllocateInfo* pAllocateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkDeviceMemory* pMemory) {\n return next_vkAllocateMemory(device, pAllocateInfo, pAllocator, pMemory);\n }\n void ovkFreeMemory(\n VkDevice device,\n VkDeviceMemory memory,\n const VkAllocationCallbacks* pAllocator) {\n next_vkFreeMemory(device, memory, pAllocator);\n }\n\n VkResult ovkCreateSemaphore(\n VkDevice device,\n const VkSemaphoreCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkSemaphore* pSemaphore) {\n return next_vkCreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);\n }\n void ovkDestroySemaphore(\n VkDevice device,\n VkSemaphore semaphore,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroySemaphore(device, semaphore, pAllocator);\n }\n\n VkResult ovkGetMemoryFdKHR(\n VkDevice device,\n const VkMemoryGetFdInfoKHR* pGetFdInfo,\n int* pFd) {\n return next_vkGetMemoryFdKHR(device, pGetFdInfo, pFd);\n }\n VkResult ovkGetSemaphoreFdKHR(\n VkDevice device,\n const VkSemaphoreGetFdInfoKHR* pGetFdInfo,\n int* pFd) {\n return next_vkGetSemaphoreFdKHR(device, pGetFdInfo, pFd);\n }\n\n void ovkGetDeviceQueue(\n VkDevice device,\n uint32_t queueFamilyIndex,\n uint32_t queueIndex,\n VkQueue* pQueue) {\n next_vkGetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);\n }\n VkResult ovkQueueSubmit(\n VkQueue queue,\n uint32_t submitCount,\n const VkSubmitInfo* pSubmits,\n VkFence fence) {\n return next_vkQueueSubmit(queue, submitCount, pSubmits, fence);\n }\n\n void ovkCmdPipelineBarrier(\n VkCommandBuffer commandBuffer,\n VkPipelineStageFlags srcStageMask,\n VkPipelineStageFlags dstStageMask,\n VkDependencyFlags dependencyFlags,\n uint32_t memoryBarrierCount,\n const VkMemoryBarrier* pMemoryBarriers,\n uint32_t bufferMemoryBarrierCount,\n const VkBufferMemoryBarrier* pBufferMemoryBarriers,\n uint32_t imageMemoryBarrierCount,\n const VkImageMemoryBarrier* pImageMemoryBarriers) {\n next_vkCmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,\n memoryBarrierCount, pMemoryBarriers,\n bufferMemoryBarrierCount, pBufferMemoryBarriers,\n imageMemoryBarrierCount, pImageMemoryBarriers);\n }\n void ovkCmdBlitImage(\n VkCommandBuffer commandBuffer,\n VkImage srcImage,\n VkImageLayout srcImageLayout,\n VkImage dstImage,\n VkImageLayout dstImageLayout,\n uint32_t regionCount,\n const VkImageBlit* pRegions,\n VkFilter filter) {\n next_vkCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter);\n }\n\n VkResult ovkAcquireNextImageKHR(\n VkDevice device,\n VkSwapchainKHR swapchain,\n uint64_t timeout,\n VkSemaphore semaphore,\n VkFence fence,\n uint32_t* pImageIndex) {\n return next_vkAcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);\n }\n}\n"], ["/lsfg-vk/framegen/src/core/sampler.cpp", "#include \n#include \n\n#include \"core/sampler.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n\nusing namespace LSFG::Core;\n\nSampler::Sampler(const Core::Device& device,\n VkSamplerAddressMode mode,\n VkCompareOp compare,\n bool isWhite) {\n // create sampler\n const VkSamplerCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,\n .magFilter = VK_FILTER_LINEAR,\n .minFilter = VK_FILTER_LINEAR,\n .mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR,\n .addressModeU = mode,\n .addressModeV = mode,\n .addressModeW = mode,\n .compareOp = compare,\n .maxLod = VK_LOD_CLAMP_NONE,\n .borderColor =\n isWhite ? VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE\n : VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK\n };\n VkSampler samplerHandle{};\n auto res = vkCreateSampler(device.handle(), &desc, nullptr, &samplerHandle);\n if (res != VK_SUCCESS || samplerHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create sampler\");\n\n // store sampler in shared ptr\n this->sampler = std::shared_ptr(\n new VkSampler(samplerHandle),\n [dev = device.handle()](VkSampler* samplerHandle) {\n vkDestroySampler(dev, *samplerHandle, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/src/mini/semaphore.cpp", "#include \"mini/semaphore.hpp\"\n#include \"common/exception.hpp\"\n#include \"layer.hpp\"\n\n#include \n\n#include \n\nusing namespace Mini;\n\nSemaphore::Semaphore(VkDevice device) {\n // create semaphore\n const VkSemaphoreCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO\n };\n VkSemaphore semaphoreHandle{};\n auto res = Layer::ovkCreateSemaphore(device, &desc, nullptr, &semaphoreHandle);\n if (res != VK_SUCCESS || semaphoreHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create semaphore\");\n\n // store semaphore in shared ptr\n this->semaphore = std::shared_ptr(\n new VkSemaphore(semaphoreHandle),\n [dev = device](VkSemaphore* semaphoreHandle) {\n Layer::ovkDestroySemaphore(dev, *semaphoreHandle, nullptr);\n }\n );\n}\n\nSemaphore::Semaphore(VkDevice device, int* fd) {\n // create semaphore\n const VkExportSemaphoreCreateInfo exportInfo{\n .sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO,\n .handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT\n };\n const VkSemaphoreCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,\n .pNext = &exportInfo\n };\n VkSemaphore semaphoreHandle{};\n auto res = Layer::ovkCreateSemaphore(device, &desc, nullptr, &semaphoreHandle);\n if (res != VK_SUCCESS || semaphoreHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create semaphore\");\n\n // export semaphore to fd\n const VkSemaphoreGetFdInfoKHR fdInfo{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR,\n .semaphore = semaphoreHandle,\n .handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT\n };\n res = Layer::ovkGetSemaphoreFdKHR(device, &fdInfo, fd);\n if (res != VK_SUCCESS || *fd < 0)\n throw LSFG::vulkan_error(res, \"Unable to export semaphore to fd\");\n\n // store semaphore in shared ptr\n this->semaphore = std::shared_ptr(\n new VkSemaphore(semaphoreHandle),\n [dev = device](VkSemaphore* semaphoreHandle) {\n Layer::ovkDestroySemaphore(dev, *semaphoreHandle, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/src/mini/image.cpp", "#include \"mini/image.hpp\"\n#include \"common/exception.hpp\"\n#include \"layer.hpp\"\n\n#include \n\n#include \n#include \n#include \n\nusing namespace Mini;\n\nImage::Image(VkDevice device, VkPhysicalDevice physicalDevice,\n VkExtent2D extent, VkFormat format,\n VkImageUsageFlags usage, VkImageAspectFlags aspectFlags, int* fd)\n : extent(extent), format(format), aspectFlags(aspectFlags) {\n // create image\n const VkExternalMemoryImageCreateInfo externalInfo{\n .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,\n .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR\n };\n const VkImageCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,\n .pNext = &externalInfo,\n .imageType = VK_IMAGE_TYPE_2D,\n .format = format,\n .extent = {\n .width = extent.width,\n .height = extent.height,\n .depth = 1\n },\n .mipLevels = 1,\n .arrayLayers = 1,\n .samples = VK_SAMPLE_COUNT_1_BIT,\n .usage = usage,\n .sharingMode = VK_SHARING_MODE_EXCLUSIVE\n };\n VkImage imageHandle{};\n auto res = Layer::ovkCreateImage(device, &desc, nullptr, &imageHandle);\n if (res != VK_SUCCESS || imageHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create Vulkan image\");\n\n // find memory type\n VkPhysicalDeviceMemoryProperties memProps;\n Layer::ovkGetPhysicalDeviceMemoryProperties(physicalDevice, &memProps);\n\n VkMemoryRequirements memReqs;\n Layer::ovkGetImageMemoryRequirements(device, imageHandle, &memReqs);\n\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n std::optional memType{};\n for (uint32_t i = 0; i < memProps.memoryTypeCount; ++i) {\n if ((memReqs.memoryTypeBits & (1 << i)) && // NOLINTBEGIN\n (memProps.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) {\n memType.emplace(i);\n break;\n } // NOLINTEND\n }\n if (!memType.has_value())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Unable to find memory type for image\");\n#pragma clang diagnostic pop\n\n // allocate and bind memory\n const VkMemoryDedicatedAllocateInfoKHR dedicatedInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,\n .image = imageHandle,\n };\n const VkExportMemoryAllocateInfo exportInfo{\n .sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO,\n .pNext = &dedicatedInfo,\n .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR\n };\n const VkMemoryAllocateInfo allocInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,\n .pNext = &exportInfo,\n .allocationSize = memReqs.size,\n .memoryTypeIndex = memType.value()\n };\n VkDeviceMemory memoryHandle{};\n res = Layer::ovkAllocateMemory(device, &allocInfo, nullptr, &memoryHandle);\n if (res != VK_SUCCESS || memoryHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to allocate memory for Vulkan image\");\n\n res = Layer::ovkBindImageMemory(device, imageHandle, memoryHandle, 0);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to bind memory to Vulkan image\");\n\n // obtain the sharing fd\n const VkMemoryGetFdInfoKHR fdInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR,\n .memory = memoryHandle,\n .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR,\n };\n res = Layer::ovkGetMemoryFdKHR(device, &fdInfo, fd);\n if (res != VK_SUCCESS || *fd < 0)\n throw LSFG::vulkan_error(res, \"Failed to obtain sharing fd for Vulkan image\");\n\n // store objects in shared ptr\n this->image = std::shared_ptr(\n new VkImage(imageHandle),\n [dev = device](VkImage* img) {\n Layer::ovkDestroyImage(dev, *img, nullptr);\n }\n );\n this->memory = std::shared_ptr(\n new VkDeviceMemory(memoryHandle),\n [dev = device](VkDeviceMemory* mem) {\n Layer::ovkFreeMemory(dev, *mem, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/src/context.cpp", "#include \"context.hpp\"\n#include \"config/config.hpp\"\n#include \"common/exception.hpp\"\n#include \"extract/extract.hpp\"\n#include \"extract/trans.hpp\"\n#include \"utils/utils.hpp\"\n#include \"hooks.hpp\"\n#include \"layer.hpp\"\n\n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nLsContext::LsContext(const Hooks::DeviceInfo& info, VkSwapchainKHR swapchain,\n VkExtent2D extent, const std::vector& swapchainImages)\n : swapchain(swapchain), swapchainImages(swapchainImages),\n extent(extent) {\n // get updated configuration\n auto& conf = Config::activeConf;\n if (!conf.config_file.empty()\n && (\n !std::filesystem::exists(conf.config_file)\n || conf.timestamp != std::filesystem::last_write_time(conf.config_file)\n )) {\n std::cerr << \"lsfg-vk: Rereading configuration, as it is no longer valid.\\n\";\n std::this_thread::sleep_for(std::chrono::milliseconds(100));\n\n // reread configuration\n const std::string file = Utils::getConfigFile();\n const auto name = Utils::getProcessName();\n try {\n Config::updateConfig(file);\n conf = Config::getConfig(name);\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: Failed to update configuration, continuing using old:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n }\n\n LSFG_3_1P::finalize();\n LSFG_3_1::finalize();\n\n // print config\n std::cerr << \"lsfg-vk: Reloaded configuration for \" << name.second << \":\\n\";\n if (!conf.dll.empty()) std::cerr << \" Using DLL from: \" << conf.dll << '\\n';\n std::cerr << \" Multiplier: \" << conf.multiplier << '\\n';\n std::cerr << \" Flow Scale: \" << conf.flowScale << '\\n';\n std::cerr << \" Performance Mode: \" << (conf.performance ? \"Enabled\" : \"Disabled\") << '\\n';\n std::cerr << \" HDR Mode: \" << (conf.hdr ? \"Enabled\" : \"Disabled\") << '\\n';\n if (conf.e_present != 2) std::cerr << \" ! Present Mode: \" << conf.e_present << '\\n';\n\n if (conf.multiplier <= 1) return;\n }\n // we could take the format from the swapchain,\n // but honestly this is safer.\n const VkFormat format = conf.hdr\n ? VK_FORMAT_R8G8B8A8_UNORM\n : VK_FORMAT_R16G16B16A16_SFLOAT;\n\n // prepare textures for lsfg\n std::array fds{};\n this->frame_0 = Mini::Image(info.device, info.physicalDevice,\n extent, format, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_ASPECT_COLOR_BIT,\n &fds.at(0));\n this->frame_1 = Mini::Image(info.device, info.physicalDevice,\n extent, format, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_ASPECT_COLOR_BIT,\n &fds.at(1));\n\n std::vector outFds(conf.multiplier - 1);\n for (size_t i = 0; i < (conf.multiplier - 1); ++i)\n this->out_n.emplace_back(info.device, info.physicalDevice,\n extent, format,\n VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_ASPECT_COLOR_BIT,\n &outFds.at(i));\n\n // initialize lsfg\n auto* lsfgInitialize = LSFG_3_1::initialize;\n auto* lsfgCreateContext = LSFG_3_1::createContext;\n auto* lsfgDeleteContext = LSFG_3_1::deleteContext;\n if (conf.performance) {\n lsfgInitialize = LSFG_3_1P::initialize;\n lsfgCreateContext = LSFG_3_1P::createContext;\n lsfgDeleteContext = LSFG_3_1P::deleteContext;\n }\n\n setenv(\"DISABLE_LSFG\", \"1\", 1); // NOLINT\n\n lsfgInitialize(\n Utils::getDeviceUUID(info.physicalDevice),\n conf.hdr, 1.0F / conf.flowScale, conf.multiplier - 1,\n [](const std::string& name) {\n auto dxbc = Extract::getShader(name);\n auto spirv = Extract::translateShader(dxbc);\n return spirv;\n }\n );\n\n this->lsfgCtxId = std::shared_ptr(\n new int32_t(lsfgCreateContext(fds.at(0), fds.at(1), outFds, extent, format)),\n [lsfgDeleteContext = lsfgDeleteContext](const int32_t* id) {\n lsfgDeleteContext(*id);\n }\n );\n\n unsetenv(\"DISABLE_LSFG\"); // NOLINT\n\n // prepare render passes\n this->cmdPool = Mini::CommandPool(info.device, info.queue.first);\n for (size_t i = 0; i < 8; i++) {\n auto& pass = this->passInfos.at(i);\n pass.renderSemaphores.resize(conf.multiplier - 1);\n pass.acquireSemaphores.resize(conf.multiplier - 1);\n pass.postCopyBufs.resize(conf.multiplier - 1);\n pass.postCopySemaphores.resize(conf.multiplier - 1);\n pass.prevPostCopySemaphores.resize(conf.multiplier - 1);\n }\n}\n\nVkResult LsContext::present(const Hooks::DeviceInfo& info, const void* pNext, VkQueue queue,\n const std::vector& gameRenderSemaphores, uint32_t presentIdx) {\n const auto& conf = Config::activeConf;\n auto& pass = this->passInfos.at(this->frameIdx % 8);\n\n // 1. copy swapchain image to frame_0/frame_1\n int preCopySemaphoreFd{};\n pass.preCopySemaphores.at(0) = Mini::Semaphore(info.device, &preCopySemaphoreFd);\n pass.preCopySemaphores.at(1) = Mini::Semaphore(info.device);\n pass.preCopyBuf = Mini::CommandBuffer(info.device, this->cmdPool);\n pass.preCopyBuf.begin();\n\n Utils::copyImage(pass.preCopyBuf.handle(),\n this->swapchainImages.at(presentIdx),\n this->frameIdx % 2 == 0 ? this->frame_0.handle() : this->frame_1.handle(),\n this->extent.width, this->extent.height,\n VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,\n true, false);\n\n pass.preCopyBuf.end();\n\n std::vector gameRenderSemaphores2 = gameRenderSemaphores;\n if (this->frameIdx > 0)\n gameRenderSemaphores2.emplace_back(this->passInfos.at((this->frameIdx - 1) % 8)\n .preCopySemaphores.at(1).handle());\n pass.preCopyBuf.submit(info.queue.second,\n gameRenderSemaphores2,\n { pass.preCopySemaphores.at(0).handle(),\n pass.preCopySemaphores.at(1).handle() });\n\n // 2. render intermediary frames\n std::vector renderSemaphoreFds(conf.multiplier - 1);\n for (size_t i = 0; i < (conf.multiplier - 1); ++i)\n pass.renderSemaphores.at(i) = Mini::Semaphore(info.device, &renderSemaphoreFds.at(i));\n\n if (conf.performance)\n LSFG_3_1P::presentContext(*this->lsfgCtxId,\n preCopySemaphoreFd,\n renderSemaphoreFds);\n else\n LSFG_3_1::presentContext(*this->lsfgCtxId,\n preCopySemaphoreFd,\n renderSemaphoreFds);\n\n for (size_t i = 0; i < (conf.multiplier - 1); i++) {\n // 3. acquire next swapchain image\n pass.acquireSemaphores.at(i) = Mini::Semaphore(info.device);\n uint32_t imageIdx{};\n auto res = Layer::ovkAcquireNextImageKHR(info.device, this->swapchain, UINT64_MAX,\n pass.acquireSemaphores.at(i).handle(), VK_NULL_HANDLE, &imageIdx);\n if (res != VK_SUCCESS && res != VK_SUBOPTIMAL_KHR)\n throw LSFG::vulkan_error(res, \"Failed to acquire next swapchain image\");\n\n // 4. copy output image to swapchain image\n pass.postCopySemaphores.at(i) = Mini::Semaphore(info.device);\n pass.prevPostCopySemaphores.at(i) = Mini::Semaphore(info.device);\n pass.postCopyBufs.at(i) = Mini::CommandBuffer(info.device, this->cmdPool);\n pass.postCopyBufs.at(i).begin();\n\n Utils::copyImage(pass.postCopyBufs.at(i).handle(),\n this->out_n.at(i).handle(),\n this->swapchainImages.at(imageIdx),\n this->extent.width, this->extent.height,\n VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,\n false, true);\n\n pass.postCopyBufs.at(i).end();\n pass.postCopyBufs.at(i).submit(info.queue.second,\n { pass.acquireSemaphores.at(i).handle(),\n pass.renderSemaphores.at(i).handle() },\n { pass.postCopySemaphores.at(i).handle(),\n pass.prevPostCopySemaphores.at(i).handle() });\n\n // 5. present swapchain image\n std::vector waitSemaphores{ pass.postCopySemaphores.at(i).handle() };\n if (i != 0) waitSemaphores.emplace_back(pass.prevPostCopySemaphores.at(i - 1).handle());\n\n const VkPresentInfoKHR presentInfo{\n .sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,\n .pNext = i == 0 ? pNext : nullptr, // only set on first present\n .waitSemaphoreCount = static_cast(waitSemaphores.size()),\n .pWaitSemaphores = waitSemaphores.data(),\n .swapchainCount = 1,\n .pSwapchains = &this->swapchain,\n .pImageIndices = &imageIdx,\n };\n res = Layer::ovkQueuePresentKHR(queue, &presentInfo);\n if (res != VK_SUCCESS && res != VK_SUBOPTIMAL_KHR)\n throw LSFG::vulkan_error(res, \"Failed to present swapchain image\");\n }\n\n // 6. present actual next frame\n VkSemaphore lastPrevPostCopySemaphore =\n pass.prevPostCopySemaphores.at(conf.multiplier - 1 - 1).handle();\n const VkPresentInfoKHR presentInfo{\n .sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,\n .waitSemaphoreCount = 1,\n .pWaitSemaphores = &lastPrevPostCopySemaphore,\n .swapchainCount = 1,\n .pSwapchains = &this->swapchain,\n .pImageIndices = &presentIdx,\n };\n auto res = Layer::ovkQueuePresentKHR(queue, &presentInfo);\n if (res != VK_SUCCESS && res != VK_SUBOPTIMAL_KHR)\n throw LSFG::vulkan_error(res, \"Failed to present swapchain image\");\n\n this->frameIdx++;\n return res;\n}\n"], ["/lsfg-vk/framegen/v3.1_src/lsfg.cpp", "#include \n#include \n\n#include \"lsfg_3_1.hpp\"\n#include \"v3_1/context.hpp\"\n#include \"core/commandpool.hpp\"\n#include \"core/descriptorpool.hpp\"\n#include \"core/instance.hpp\"\n#include \"pool/shaderpool.hpp\"\n#include \"common/exception.hpp\"\n#include \"common/utils.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG_3_1;\n\nnamespace {\n std::optional instance;\n std::optional device;\n std::unordered_map contexts;\n}\n\nvoid LSFG_3_1::initialize(uint64_t deviceUUID,\n bool isHdr, float flowScale, uint64_t generationCount,\n const std::function(const std::string&)>& loader) {\n if (instance.has_value() || device.has_value())\n return;\n\n instance.emplace();\n device.emplace(Vulkan {\n .device{*instance, deviceUUID},\n .generationCount = generationCount,\n .flowScale = flowScale,\n .isHdr = isHdr\n });\n contexts = std::unordered_map();\n\n device->commandPool = Core::CommandPool(device->device);\n device->descriptorPool = Core::DescriptorPool(device->device);\n\n device->resources = Pool::ResourcePool(device->isHdr, device->flowScale);\n device->shaders = Pool::ShaderPool(loader);\n\n std::srand(static_cast(std::time(nullptr)));\n}\n\nint32_t LSFG_3_1::createContext(\n int in0, int in1, const std::vector& outN,\n VkExtent2D extent, VkFormat format) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n const int32_t id = std::rand();\n contexts.emplace(id, Context(*device, in0, in1, outN, extent, format));\n return id;\n}\n\nvoid LSFG_3_1::presentContext(int32_t id, int inSem, const std::vector& outSem) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n auto it = contexts.find(id);\n if (it == contexts.end())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Context not found\");\n\n it->second.present(*device, inSem, outSem);\n}\n\nvoid LSFG_3_1::deleteContext(int32_t id) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n auto it = contexts.find(id);\n if (it == contexts.end())\n throw LSFG::vulkan_error(VK_ERROR_DEVICE_LOST, \"No such context\");\n\n vkDeviceWaitIdle(device->device.handle());\n contexts.erase(it);\n}\n\nvoid LSFG_3_1::finalize() {\n if (!instance.has_value() || !device.has_value())\n return;\n\n vkDeviceWaitIdle(device->device.handle());\n contexts.clear();\n device.reset();\n instance.reset();\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/lsfg.cpp", "#include \n#include \n\n#include \"lsfg_3_1p.hpp\"\n#include \"v3_1p/context.hpp\"\n#include \"core/commandpool.hpp\"\n#include \"core/descriptorpool.hpp\"\n#include \"core/instance.hpp\"\n#include \"pool/shaderpool.hpp\"\n#include \"common/exception.hpp\"\n#include \"common/utils.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG_3_1P;\n\nnamespace {\n std::optional instance;\n std::optional device;\n std::unordered_map contexts;\n}\n\nvoid LSFG_3_1P::initialize(uint64_t deviceUUID,\n bool isHdr, float flowScale, uint64_t generationCount,\n const std::function(const std::string&)>& loader) {\n if (instance.has_value() || device.has_value())\n return;\n\n instance.emplace();\n device.emplace(Vulkan {\n .device{*instance, deviceUUID},\n .generationCount = generationCount,\n .flowScale = flowScale,\n .isHdr = isHdr\n });\n contexts = std::unordered_map();\n\n device->commandPool = Core::CommandPool(device->device);\n device->descriptorPool = Core::DescriptorPool(device->device);\n\n device->resources = Pool::ResourcePool(device->isHdr, device->flowScale);\n device->shaders = Pool::ShaderPool(loader);\n\n std::srand(static_cast(std::time(nullptr)));\n}\n\nint32_t LSFG_3_1P::createContext(\n int in0, int in1, const std::vector& outN,\n VkExtent2D extent, VkFormat format) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n const int32_t id = std::rand();\n contexts.emplace(id, Context(*device, in0, in1, outN, extent, format));\n return id;\n}\n\nvoid LSFG_3_1P::presentContext(int32_t id, int inSem, const std::vector& outSem) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n auto it = contexts.find(id);\n if (it == contexts.end())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Context not found\");\n\n it->second.present(*device, inSem, outSem);\n}\n\nvoid LSFG_3_1P::deleteContext(int32_t id) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n auto it = contexts.find(id);\n if (it == contexts.end())\n throw LSFG::vulkan_error(VK_ERROR_DEVICE_LOST, \"No such context\");\n\n vkDeviceWaitIdle(device->device.handle());\n contexts.erase(it);\n}\n\nvoid LSFG_3_1P::finalize() {\n if (!instance.has_value() || !device.has_value())\n return;\n\n vkDeviceWaitIdle(device->device.handle());\n contexts.clear();\n device.reset();\n instance.reset();\n}\n"], ["/lsfg-vk/framegen/src/core/instance.cpp", "#include \n#include \n\n#include \"core/instance.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nconst std::vector requiredExtensions = {\n\n};\n\nInstance::Instance() {\n volkInitialize();\n\n // create Vulkan instance\n const VkApplicationInfo appInfo{\n .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,\n .pApplicationName = \"lsfg-vk-base\",\n .applicationVersion = VK_MAKE_VERSION(0, 0, 1),\n .pEngineName = \"lsfg-vk-base\",\n .engineVersion = VK_MAKE_VERSION(0, 0, 1),\n .apiVersion = VK_API_VERSION_1_3\n };\n const VkInstanceCreateInfo createInfo{\n .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,\n .pApplicationInfo = &appInfo,\n .enabledExtensionCount = static_cast(requiredExtensions.size()),\n .ppEnabledExtensionNames = requiredExtensions.data()\n };\n VkInstance instanceHandle{};\n auto res = vkCreateInstance(&createInfo, nullptr, &instanceHandle);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to create Vulkan instance\");\n\n volkLoadInstance(instanceHandle);\n\n // store in shared ptr\n this->instance = std::shared_ptr(\n new VkInstance(instanceHandle),\n [](VkInstance* instance) {\n vkDestroyInstance(*instance, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/src/utils/utils.cpp", "#include \"utils/utils.hpp\"\n#include \"common/exception.hpp\"\n#include \"layer.hpp\"\n\n#include \n#include \n#include // NOLINT\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace Utils;\n\nstd::pair Utils::findQueue(VkDevice device, VkPhysicalDevice physicalDevice,\n VkDeviceCreateInfo* desc, VkQueueFlags flags) {\n std::vector enabledQueues(desc->queueCreateInfoCount);\n std::copy_n(desc->pQueueCreateInfos, enabledQueues.size(), enabledQueues.data());\n\n uint32_t familyCount{};\n Layer::ovkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, &familyCount, nullptr);\n std::vector families(familyCount);\n Layer::ovkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, &familyCount,\n families.data());\n\n std::optional idx;\n for (const auto& queueInfo : enabledQueues) {\n if ((queueInfo.queueFamilyIndex < families.size()) &&\n (families[queueInfo.queueFamilyIndex].queueFlags & flags)) {\n idx = queueInfo.queueFamilyIndex;\n break;\n }\n }\n if (!idx.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"No suitable queue found\");\n\n VkQueue queue{};\n Layer::ovkGetDeviceQueue(device, *idx, 0, &queue);\n\n auto res = Layer::ovkSetDeviceLoaderData(device, queue);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to set device loader data for queue\");\n\n return { *idx, queue };\n}\n\nuint64_t Utils::getDeviceUUID(VkPhysicalDevice physicalDevice) {\n VkPhysicalDeviceProperties properties{};\n Layer::ovkGetPhysicalDeviceProperties(physicalDevice, &properties);\n\n return static_cast(properties.vendorID) << 32 | properties.deviceID;\n}\n\nuint32_t Utils::getMaxImageCount(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface) {\n VkSurfaceCapabilitiesKHR capabilities{};\n auto res = Layer::ovkGetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice,\n surface, &capabilities);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to get surface capabilities\");\n if (capabilities.maxImageCount == 0)\n return 999; // :3\n return capabilities.maxImageCount;\n}\n\nstd::vector Utils::addExtensions(const char* const* extensions, size_t count,\n const std::vector& requiredExtensions) {\n std::vector ext(count);\n std::copy_n(extensions, count, ext.data());\n\n for (const auto& e : requiredExtensions) {\n auto it = std::ranges::find_if(ext,\n [e](const char* extName) {\n return std::string(extName) == std::string(e);\n });\n if (it == ext.end())\n ext.push_back(e);\n }\n\n return ext;\n}\n\nvoid Utils::copyImage(VkCommandBuffer buf,\n VkImage src, VkImage dst,\n uint32_t width, uint32_t height,\n VkPipelineStageFlags pre, VkPipelineStageFlags post,\n bool makeSrcPresentable, bool makeDstPresentable) {\n const VkImageMemoryBarrier srcBarrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,\n .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,\n .oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,\n .newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,\n .image = src,\n .subresourceRange = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n const VkImageMemoryBarrier dstBarrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,\n .dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,\n .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n .image = dst,\n .subresourceRange = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n const std::vector barriers = { srcBarrier, dstBarrier };\n Layer::ovkCmdPipelineBarrier(buf,\n pre, VK_PIPELINE_STAGE_TRANSFER_BIT, 0,\n 0, nullptr, 0, nullptr,\n static_cast(barriers.size()), barriers.data());\n\n const VkImageBlit imageBlit{\n .srcSubresource = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .layerCount = 1\n },\n .srcOffsets = {\n { 0, 0, 0 },\n { static_cast(width), static_cast(height), 1 }\n },\n .dstSubresource = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .layerCount = 1\n },\n .dstOffsets = {\n { 0, 0, 0 },\n { static_cast(width), static_cast(height), 1 }\n }\n };\n Layer::ovkCmdBlitImage(\n buf,\n src, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,\n dst, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n 1, &imageBlit,\n VK_FILTER_NEAREST\n );\n\n if (makeSrcPresentable) {\n const VkImageMemoryBarrier presentBarrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,\n .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,\n .newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,\n .image = src,\n .subresourceRange = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n Layer::ovkCmdPipelineBarrier(buf,\n VK_PIPELINE_STAGE_TRANSFER_BIT, post, 0,\n 0, nullptr, 0, nullptr,\n 1, &presentBarrier);\n }\n\n if (makeDstPresentable) {\n const VkImageMemoryBarrier presentBarrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,\n .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,\n .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT,\n .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n .newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,\n .image = dst,\n .subresourceRange = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n Layer::ovkCmdPipelineBarrier(buf,\n VK_PIPELINE_STAGE_TRANSFER_BIT, post, 0,\n 0, nullptr, 0, nullptr,\n 1, &presentBarrier);\n }\n}\n\nnamespace {\n auto& logCounts() {\n static std::unordered_map map;\n return map;\n }\n}\n\nvoid Utils::logLimitN(const std::string& id, size_t n, const std::string& message) {\n auto& count = logCounts()[id];\n if (count <= n)\n std::cerr << \"lsfg-vk: \" << message << '\\n';\n if (count == n)\n std::cerr << \"(above message has been repeated \" << n << \" times, suppressing further)\\n\";\n count++;\n}\n\nvoid Utils::resetLimitN(const std::string& id) noexcept {\n logCounts().erase(id);\n}\n\nstd::pair Utils::getProcessName() {\n const char* process_name = std::getenv(\"LSFG_PROCESS\");\n if (process_name && *process_name != '\\0')\n return { process_name, process_name };\n\n const char* benchmark_flag = std::getenv(\"LSFG_BENCHMARK\");\n if (benchmark_flag)\n return { \"benchmark\", \"benchmark\" };\n std::array exe{};\n\n const ssize_t exe_len = readlink(\"/proc/self/exe\", exe.data(), exe.size() - 1);\n if (exe_len <= 0)\n return { \"Unknown Process\", \"unknown\" };\n exe.at(static_cast(exe_len)) = '\\0';\n\n std::ifstream comm_file(\"/proc/self/comm\");\n if (!comm_file.is_open())\n return { std::string(exe.data()), \"unknown\" };\n std::array comm{};\n comm_file.read(comm.data(), 256);\n comm.at(static_cast(comm_file.gcount())) = '\\0';\n std::string comm_str(comm.data());\n if (comm_str.back() == '\\n')\n comm_str.pop_back();\n\n return{ std::string(exe.data()), comm_str };\n}\n\nstd::string Utils::getConfigFile() {\n const char* configFile = std::getenv(\"LSFG_CONFIG\");\n if (configFile && *configFile != '\\0')\n return{configFile};\n const char* xdgPath = std::getenv(\"XDG_CONFIG_HOME\");\n if (xdgPath && *xdgPath != '\\0')\n return std::string(xdgPath) + \"/lsfg-vk/conf.toml\";\n const char* homePath = std::getenv(\"HOME\");\n if (homePath && *homePath != '\\0')\n return std::string(homePath) + \"/.config/lsfg-vk/conf.toml\";\n return \"/etc/lsfg-vk/conf.toml\";\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/context.cpp", "#include \n#include \n\n#include \"v3_1p/context.hpp\"\n#include \"common/utils.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG_3_1P;\n\nContext::Context(Vulkan& vk,\n int in0, int in1, const std::vector& outN,\n VkExtent2D extent, VkFormat format) {\n // import input images\n this->inImg_0 = Core::Image(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, in0);\n this->inImg_1 = Core::Image(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, in1);\n\n // prepare render data\n for (size_t i = 0; i < 8; i++) {\n auto& data = this->data.at(i);\n data.internalSemaphores.resize(vk.generationCount);\n data.outSemaphores.resize(vk.generationCount);\n data.completionFences.resize(vk.generationCount);\n data.cmdBuffers2.resize(vk.generationCount);\n }\n\n // create shader chains\n this->mipmaps = Shaders::Mipmaps(vk, this->inImg_0, this->inImg_1);\n for (size_t i = 0; i < 7; i++)\n this->alpha.at(i) = Shaders::Alpha(vk, this->mipmaps.getOutImages().at(i));\n this->beta = Shaders::Beta(vk, this->alpha.at(0).getOutImages());\n for (size_t i = 0; i < 7; i++) {\n this->gamma.at(i) = Shaders::Gamma(vk,\n this->alpha.at(6 - i).getOutImages(),\n this->beta.getOutImages().at(std::min(6 - i, 5)),\n (i == 0) ? std::nullopt : std::make_optional(this->gamma.at(i - 1).getOutImage()));\n if (i < 4) continue;\n\n this->delta.at(i - 4) = Shaders::Delta(vk,\n this->alpha.at(6 - i).getOutImages(),\n this->beta.getOutImages().at(6 - i),\n (i == 4) ? std::nullopt : std::make_optional(this->gamma.at(i - 1).getOutImage()),\n (i == 4) ? std::nullopt : std::make_optional(this->delta.at(i - 5).getOutImage1()));\n }\n this->generate = Shaders::Generate(vk,\n this->inImg_0, this->inImg_1,\n this->gamma.at(6).getOutImage(),\n this->delta.at(2).getOutImage1(),\n this->delta.at(2).getOutImage2(),\n outN, format);\n}\n\nvoid Context::present(Vulkan& vk,\n int inSem, const std::vector& outSem) {\n auto& data = this->data.at(this->frameIdx % 8);\n\n // 3. wait for completion of previous frame in this slot\n if (data.shouldWait)\n for (auto& fence : data.completionFences)\n if (!fence.wait(vk.device, UINT64_MAX))\n throw LSFG::vulkan_error(VK_TIMEOUT, \"Fence wait timed out\");\n data.shouldWait = true;\n\n // 1. create mipmaps and process input image\n if (inSem >= 0) data.inSemaphore = Core::Semaphore(vk.device, inSem);\n for (size_t i = 0; i < vk.generationCount; i++)\n data.internalSemaphores.at(i) = Core::Semaphore(vk.device);\n\n data.cmdBuffer1 = Core::CommandBuffer(vk.device, vk.commandPool);\n data.cmdBuffer1.begin();\n\n this->mipmaps.Dispatch(data.cmdBuffer1, this->frameIdx);\n for (size_t i = 0; i < 7; i++)\n this->alpha.at(6 - i).Dispatch(data.cmdBuffer1, this->frameIdx);\n this->beta.Dispatch(data.cmdBuffer1, this->frameIdx);\n\n data.cmdBuffer1.end();\n std::vector waits = { data.inSemaphore };\n if (inSem < 0) waits.clear();\n data.cmdBuffer1.submit(vk.device.getComputeQueue(), std::nullopt,\n waits, std::nullopt,\n data.internalSemaphores, std::nullopt);\n\n // 2. generate intermediary frames\n for (size_t pass = 0; pass < vk.generationCount; pass++) {\n auto& internalSemaphore = data.internalSemaphores.at(pass);\n auto& outSemaphore = data.outSemaphores.at(pass);\n if (inSem >= 0) outSemaphore = Core::Semaphore(vk.device, outSem.empty() ? -1 : outSem.at(pass));\n auto& completionFence = data.completionFences.at(pass);\n completionFence = Core::Fence(vk.device);\n\n auto& buf2 = data.cmdBuffers2.at(pass);\n buf2 = Core::CommandBuffer(vk.device, vk.commandPool);\n buf2.begin();\n\n for (size_t i = 0; i < 7; i++) {\n this->gamma.at(i).Dispatch(buf2, this->frameIdx, pass);\n if (i >= 4)\n this->delta.at(i - 4).Dispatch(buf2, this->frameIdx, pass, i == 6);\n }\n this->generate.Dispatch(buf2, this->frameIdx, pass);\n\n buf2.end();\n std::vector signals = { outSemaphore };\n if (inSem < 0) signals.clear();\n buf2.submit(vk.device.getComputeQueue(), completionFence,\n { internalSemaphore }, std::nullopt,\n signals, std::nullopt);\n }\n\n this->frameIdx++;\n}\n"], ["/lsfg-vk/framegen/v3.1_src/context.cpp", "#include \n#include \n\n#include \"v3_1/context.hpp\"\n#include \"common/utils.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1;\n\nContext::Context(Vulkan& vk,\n int in0, int in1, const std::vector& outN,\n VkExtent2D extent, VkFormat format) {\n // import input images\n this->inImg_0 = Core::Image(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, in0);\n this->inImg_1 = Core::Image(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, in1);\n\n // prepare render data\n for (size_t i = 0; i < 8; i++) {\n auto& data = this->data.at(i);\n data.internalSemaphores.resize(vk.generationCount);\n data.outSemaphores.resize(vk.generationCount);\n data.completionFences.resize(vk.generationCount);\n data.cmdBuffers2.resize(vk.generationCount);\n }\n\n // create shader chains\n this->mipmaps = Shaders::Mipmaps(vk, this->inImg_0, this->inImg_1);\n for (size_t i = 0; i < 7; i++)\n this->alpha.at(i) = Shaders::Alpha(vk, this->mipmaps.getOutImages().at(i));\n this->beta = Shaders::Beta(vk, this->alpha.at(0).getOutImages());\n for (size_t i = 0; i < 7; i++) {\n this->gamma.at(i) = Shaders::Gamma(vk,\n this->alpha.at(6 - i).getOutImages(),\n this->beta.getOutImages().at(std::min(6 - i, 5)),\n (i == 0) ? std::nullopt : std::make_optional(this->gamma.at(i - 1).getOutImage()));\n if (i < 4) continue;\n\n this->delta.at(i - 4) = Shaders::Delta(vk,\n this->alpha.at(6 - i).getOutImages(),\n this->beta.getOutImages().at(6 - i),\n (i == 4) ? std::nullopt : std::make_optional(this->gamma.at(i - 1).getOutImage()),\n (i == 4) ? std::nullopt : std::make_optional(this->delta.at(i - 5).getOutImage1()),\n (i == 4) ? std::nullopt : std::make_optional(this->delta.at(i - 5).getOutImage2()));\n }\n this->generate = Shaders::Generate(vk,\n this->inImg_0, this->inImg_1,\n this->gamma.at(6).getOutImage(),\n this->delta.at(2).getOutImage1(),\n this->delta.at(2).getOutImage2(),\n outN, format);\n}\n\nvoid Context::present(Vulkan& vk,\n int inSem, const std::vector& outSem) {\n auto& data = this->data.at(this->frameIdx % 8);\n\n // 3. wait for completion of previous frame in this slot\n if (data.shouldWait)\n for (auto& fence : data.completionFences)\n if (!fence.wait(vk.device, UINT64_MAX))\n throw LSFG::vulkan_error(VK_TIMEOUT, \"Fence wait timed out\");\n data.shouldWait = true;\n\n // 1. create mipmaps and process input image\n if (inSem >= 0) data.inSemaphore = Core::Semaphore(vk.device, inSem);\n for (size_t i = 0; i < vk.generationCount; i++)\n data.internalSemaphores.at(i) = Core::Semaphore(vk.device);\n\n data.cmdBuffer1 = Core::CommandBuffer(vk.device, vk.commandPool);\n data.cmdBuffer1.begin();\n\n this->mipmaps.Dispatch(data.cmdBuffer1, this->frameIdx);\n for (size_t i = 0; i < 7; i++)\n this->alpha.at(6 - i).Dispatch(data.cmdBuffer1, this->frameIdx);\n this->beta.Dispatch(data.cmdBuffer1, this->frameIdx);\n\n data.cmdBuffer1.end();\n std::vector waits = { data.inSemaphore };\n if (inSem < 0) waits.clear();\n data.cmdBuffer1.submit(vk.device.getComputeQueue(), std::nullopt,\n waits, std::nullopt,\n data.internalSemaphores, std::nullopt);\n\n // 2. generate intermediary frames\n for (size_t pass = 0; pass < vk.generationCount; pass++) {\n auto& internalSemaphore = data.internalSemaphores.at(pass);\n auto& outSemaphore = data.outSemaphores.at(pass);\n if (inSem >= 0) outSemaphore = Core::Semaphore(vk.device, outSem.empty() ? -1 : outSem.at(pass));\n auto& completionFence = data.completionFences.at(pass);\n completionFence = Core::Fence(vk.device);\n\n auto& buf2 = data.cmdBuffers2.at(pass);\n buf2 = Core::CommandBuffer(vk.device, vk.commandPool);\n buf2.begin();\n\n for (size_t i = 0; i < 7; i++) {\n this->gamma.at(i).Dispatch(buf2, this->frameIdx, pass);\n if (i >= 4)\n this->delta.at(i - 4).Dispatch(buf2, this->frameIdx, pass);\n }\n this->generate.Dispatch(buf2, this->frameIdx, pass);\n\n buf2.end();\n std::vector signals = { outSemaphore };\n if (inSem < 0) signals.clear();\n buf2.submit(vk.device.getComputeQueue(), completionFence,\n { internalSemaphore }, std::nullopt,\n signals, std::nullopt);\n }\n\n this->frameIdx++;\n}\n"], ["/lsfg-vk/framegen/v3.1_src/shaders/mipmaps.cpp", "#include \n#include \n\n#include \"v3_1/shaders/mipmaps.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/image.hpp\"\n#include \"core/commandbuffer.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nMipmaps::Mipmaps(Vulkan& vk,\n Core::Image inImg_0, Core::Image inImg_1)\n : inImg_0(std::move(inImg_0)), inImg_1(std::move(inImg_1)) {\n // create resources\n this->shaderModule = vk.shaders.getShader(vk.device, \"mipmaps\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 7, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } });\n this->pipeline = vk.shaders.getPipeline(vk.device, \"mipmaps\");\n this->buffer = vk.resources.getBuffer(vk.device);\n this->sampler = vk.resources.getSampler(vk.device);\n for (size_t i = 0; i < 2; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModule);\n\n // create outputs\n const VkExtent2D flowExtent{\n .width = static_cast(\n static_cast(this->inImg_0.getExtent().width) / vk.flowScale),\n .height = static_cast(\n static_cast(this->inImg_0.getExtent().height) / vk.flowScale)\n };\n for (size_t i = 0; i < 7; i++)\n this->outImgs.at(i) = Core::Image(vk.device,\n { flowExtent.width >> i, flowExtent.height >> i },\n VK_FORMAT_R8_UNORM);\n\n // hook up shaders\n for (size_t fc = 0; fc < 2; fc++)\n this->descriptorSets.at(fc).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, this->buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, (fc % 2 == 0) ? this->inImg_0 : this->inImg_1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs)\n .build();\n}\n\nvoid Mipmaps::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto flowExtent = this->outImgs.at(0).getExtent();\n const uint32_t threadsX = (flowExtent.width + 63) >> 6;\n const uint32_t threadsY = (flowExtent.height + 63) >> 6;\n\n Utils::BarrierBuilder(buf)\n .addW2R((frameCount % 2 == 0) ? this->inImg_0 : this->inImg_1)\n .addR2W(this->outImgs)\n .build();\n\n this->pipeline.bind(buf);\n this->descriptorSets.at(frameCount % 2).bind(buf, this->pipeline);\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/shaders/mipmaps.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/mipmaps.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/image.hpp\"\n#include \"core/commandbuffer.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nMipmaps::Mipmaps(Vulkan& vk,\n Core::Image inImg_0, Core::Image inImg_1)\n : inImg_0(std::move(inImg_0)), inImg_1(std::move(inImg_1)) {\n // create resources\n this->shaderModule = vk.shaders.getShader(vk.device, \"p_mipmaps\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 7, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } });\n this->pipeline = vk.shaders.getPipeline(vk.device, \"p_mipmaps\");\n this->buffer = vk.resources.getBuffer(vk.device);\n this->sampler = vk.resources.getSampler(vk.device);\n for (size_t i = 0; i < 2; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModule);\n\n // create outputs\n const VkExtent2D flowExtent{\n .width = static_cast(\n static_cast(this->inImg_0.getExtent().width) / vk.flowScale),\n .height = static_cast(\n static_cast(this->inImg_0.getExtent().height) / vk.flowScale)\n };\n for (size_t i = 0; i < 7; i++)\n this->outImgs.at(i) = Core::Image(vk.device,\n { flowExtent.width >> i, flowExtent.height >> i },\n VK_FORMAT_R8_UNORM);\n\n // hook up shaders\n for (size_t fc = 0; fc < 2; fc++)\n this->descriptorSets.at(fc).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, this->buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, (fc % 2 == 0) ? this->inImg_0 : this->inImg_1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs)\n .build();\n}\n\nvoid Mipmaps::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto flowExtent = this->outImgs.at(0).getExtent();\n const uint32_t threadsX = (flowExtent.width + 63) >> 6;\n const uint32_t threadsY = (flowExtent.height + 63) >> 6;\n\n Utils::BarrierBuilder(buf)\n .addW2R((frameCount % 2 == 0) ? this->inImg_0 : this->inImg_1)\n .addR2W(this->outImgs)\n .build();\n\n this->pipeline.bind(buf);\n this->descriptorSets.at(frameCount % 2).bind(buf, this->pipeline);\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/src/hooks.cpp", "#include \"hooks.hpp\"\n#include \"common/exception.hpp\"\n#include \"config/config.hpp\"\n#include \"utils/utils.hpp\"\n#include \"context.hpp\"\n#include \"layer.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace Hooks;\n\nnamespace {\n\n ///\n /// Add extensions to the instance create info.\n ///\n VkResult myvkCreateInstance(\n const VkInstanceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkInstance* pInstance) {\n auto extensions = Utils::addExtensions(\n pCreateInfo->ppEnabledExtensionNames,\n pCreateInfo->enabledExtensionCount,\n {\n \"VK_KHR_get_physical_device_properties2\",\n \"VK_KHR_external_memory_capabilities\",\n \"VK_KHR_external_semaphore_capabilities\"\n }\n );\n VkInstanceCreateInfo createInfo = *pCreateInfo;\n createInfo.enabledExtensionCount = static_cast(extensions.size());\n createInfo.ppEnabledExtensionNames = extensions.data();\n auto res = Layer::ovkCreateInstance(&createInfo, pAllocator, pInstance);\n if (res == VK_ERROR_EXTENSION_NOT_PRESENT)\n throw std::runtime_error(\n \"Required Vulkan instance extensions are not present.\"\n \"Your GPU driver is not supported.\");\n return res;\n }\n\n /// Map of devices to related information.\n std::unordered_map deviceToInfo;\n\n ///\n /// Add extensions to the device create info.\n /// (function pointers are not initialized yet)\n ///\n VkResult myvkCreateDevicePre(\n VkPhysicalDevice physicalDevice,\n const VkDeviceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkDevice* pDevice) {\n // add extensions\n auto extensions = Utils::addExtensions(\n pCreateInfo->ppEnabledExtensionNames,\n pCreateInfo->enabledExtensionCount,\n {\n \"VK_KHR_external_memory\",\n \"VK_KHR_external_memory_fd\",\n \"VK_KHR_external_semaphore\",\n \"VK_KHR_external_semaphore_fd\"\n }\n );\n VkDeviceCreateInfo createInfo = *pCreateInfo;\n createInfo.enabledExtensionCount = static_cast(extensions.size());\n createInfo.ppEnabledExtensionNames = extensions.data();\n auto res = Layer::ovkCreateDevice(physicalDevice, &createInfo, pAllocator, pDevice);\n if (res == VK_ERROR_EXTENSION_NOT_PRESENT)\n throw std::runtime_error(\n \"Required Vulkan device extensions are not present.\"\n \"Your GPU driver is not supported.\");\n return res;\n }\n\n ///\n /// Add related device information after the device is created.\n ///\n VkResult myvkCreateDevicePost(\n VkPhysicalDevice physicalDevice,\n VkDeviceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks*,\n VkDevice* pDevice) {\n deviceToInfo.emplace(*pDevice, DeviceInfo {\n .device = *pDevice,\n .physicalDevice = physicalDevice,\n .queue = Utils::findQueue(*pDevice, physicalDevice, pCreateInfo, VK_QUEUE_GRAPHICS_BIT)\n });\n return VK_SUCCESS;\n }\n\n /// Erase the device information when the device is destroyed.\n void myvkDestroyDevice(VkDevice device, const VkAllocationCallbacks* pAllocator) noexcept {\n deviceToInfo.erase(device);\n Layer::ovkDestroyDevice(device, pAllocator);\n }\n\n std::unordered_map swapchains;\n std::unordered_map swapchainToDeviceTable;\n std::unordered_map swapchainToPresent;\n\n ///\n /// Adjust swapchain creation parameters and create a swapchain context.\n ///\n VkResult myvkCreateSwapchainKHR(\n VkDevice device,\n const VkSwapchainCreateInfoKHR* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkSwapchainKHR* pSwapchain) noexcept {\n // find device\n auto it = deviceToInfo.find(device);\n if (it == deviceToInfo.end()) {\n Utils::logLimitN(\"swapMap\", 5, \"Device not found in map\");\n return Layer::ovkCreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);\n }\n Utils::resetLimitN(\"swapMap\");\n auto& deviceInfo = it->second;\n\n // increase amount of images in swapchain\n VkSwapchainCreateInfoKHR createInfo = *pCreateInfo;\n const auto maxImages = Utils::getMaxImageCount(\n deviceInfo.physicalDevice, pCreateInfo->surface);\n createInfo.minImageCount = createInfo.minImageCount + 1\n + static_cast(deviceInfo.queue.first);\n if (createInfo.minImageCount > maxImages) {\n createInfo.minImageCount = maxImages;\n Utils::logLimitN(\"swapCount\", 10,\n \"Requested image count (\" +\n std::to_string(pCreateInfo->minImageCount) + \") \"\n \"exceeds maximum allowed (\" +\n std::to_string(maxImages) + \"). \"\n \"Continuing with maximum allowed image count. \"\n \"This might lead to performance degradation.\");\n } else {\n Utils::resetLimitN(\"swapCount\");\n }\n\n // allow copy operations on swapchain images\n createInfo.imageUsage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;\n createInfo.imageUsage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;\n\n // enforce present mode\n createInfo.presentMode = Config::activeConf.e_present;\n\n // retire potential old swapchain\n if (pCreateInfo->oldSwapchain) {\n swapchains.erase(pCreateInfo->oldSwapchain);\n swapchainToDeviceTable.erase(pCreateInfo->oldSwapchain);\n }\n\n // create swapchain\n auto res = Layer::ovkCreateSwapchainKHR(device, &createInfo, pAllocator, pSwapchain);\n if (res != VK_SUCCESS)\n return res; // can't be caused by lsfg-vk (yet)\n\n try {\n swapchainToPresent.emplace(*pSwapchain, createInfo.presentMode);\n\n // get all swapchain images\n uint32_t imageCount{};\n res = Layer::ovkGetSwapchainImagesKHR(device, *pSwapchain, &imageCount, nullptr);\n if (res != VK_SUCCESS || imageCount == 0)\n throw LSFG::vulkan_error(res, \"Failed to get swapchain image count\");\n\n std::vector swapchainImages(imageCount);\n res = Layer::ovkGetSwapchainImagesKHR(device, *pSwapchain,\n &imageCount, swapchainImages.data());\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to get swapchain images\");\n\n // create swapchain context\n swapchainToDeviceTable.emplace(*pSwapchain, device);\n swapchains.emplace(*pSwapchain, LsContext(\n deviceInfo, *pSwapchain, pCreateInfo->imageExtent,\n swapchainImages\n ));\n\n std::cerr << \"lsfg-vk: Swapchain context \" <<\n (createInfo.oldSwapchain ? \"recreated\" : \"created\")\n << \" (using \" << imageCount << \" images).\\n\";\n\n Utils::resetLimitN(\"swapCtxCreate\");\n } catch (const std::exception& e) {\n Utils::logLimitN(\"swapCtxCreate\", 5,\n \"An error occurred while creating the swapchain wrapper:\\n\"\n \"- \" + std::string(e.what()));\n return VK_SUCCESS; // swapchain is still valid\n }\n return VK_SUCCESS;\n }\n\n ///\n /// Update presentation parameters and present the next frame(s).\n ///\n VkResult myvkQueuePresentKHR(\n VkQueue queue,\n const VkPresentInfoKHR* pPresentInfo) noexcept {\n // find swapchain device\n auto it = swapchainToDeviceTable.find(*pPresentInfo->pSwapchains);\n if (it == swapchainToDeviceTable.end()) {\n Utils::logLimitN(\"swapMap\", 5,\n \"Swapchain not found in map\");\n return Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n }\n\n // find device info\n auto it2 = deviceToInfo.find(it->second);\n if (it2 == deviceToInfo.end()) {\n Utils::logLimitN(\"swapMap\", 5,\n \"Device not found in map\");\n return Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n }\n auto& deviceInfo = it2->second;\n\n // find swapchain context\n auto it3 = swapchains.find(*pPresentInfo->pSwapchains);\n if (it3 == swapchains.end()) {\n Utils::logLimitN(\"swapMap\", 5,\n \"Swapchain context not found in map\");\n return Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n }\n auto& swapchain = it3->second;\n\n // find present mode\n auto it4 = swapchainToPresent.find(*pPresentInfo->pSwapchains);\n if (it4 == swapchainToPresent.end()) {\n Utils::logLimitN(\"swapMap\", 5,\n \"Swapchain present mode not found in map\");\n return Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n }\n auto& present = it4->second;\n\n // enforce present mode | NOLINTBEGIN\n #pragma clang diagnostic push\n #pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n const VkSwapchainPresentModeInfoEXT* presentModeInfo =\n reinterpret_cast(pPresentInfo->pNext);\n while (presentModeInfo) {\n if (presentModeInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_PRESENT_MODE_INFO_EXT) {\n for (size_t i = 0; i < presentModeInfo->swapchainCount; i++)\n const_cast(presentModeInfo->pPresentModes)[i] =\n present;\n }\n presentModeInfo =\n reinterpret_cast(presentModeInfo->pNext);\n }\n #pragma clang diagnostic pop\n\n // NOLINTEND | present the next frame\n VkResult res{}; // might return VK_SUBOPTIMAL_KHR\n try {\n // ensure config is valid\n auto& conf = Config::activeConf;\n if (!conf.config_file.empty()\n && (\n !std::filesystem::exists(conf.config_file)\n || conf.timestamp != std::filesystem::last_write_time(conf.config_file)\n )) {\n Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n return VK_ERROR_OUT_OF_DATE_KHR;\n }\n\n // ensure present mode is still valid\n if (present != conf.e_present) {\n Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n return VK_ERROR_OUT_OF_DATE_KHR;\n }\n\n // skip if disabled\n if (conf.multiplier <= 1)\n return Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n\n // present the swapchain\n std::vector semaphores(pPresentInfo->waitSemaphoreCount);\n std::copy_n(pPresentInfo->pWaitSemaphores, semaphores.size(), semaphores.data());\n\n res = swapchain.present(deviceInfo, pPresentInfo->pNext,\n queue, semaphores, *pPresentInfo->pImageIndices);\n\n Utils::resetLimitN(\"swapPresent\");\n } catch (const std::exception& e) {\n Utils::logLimitN(\"swapPresent\", 5,\n \"An error occurred while presenting the swapchain:\\n\"\n \"- \" + std::string(e.what()));\n return VK_ERROR_INITIALIZATION_FAILED;\n }\n return res;\n }\n\n /// Erase the swapchain context and mapping when the swapchain is destroyed.\n void myvkDestroySwapchainKHR(\n VkDevice device,\n VkSwapchainKHR swapchain,\n const VkAllocationCallbacks* pAllocator) noexcept {\n swapchains.erase(swapchain);\n swapchainToDeviceTable.erase(swapchain);\n swapchainToPresent.erase(swapchain);\n Layer::ovkDestroySwapchainKHR(device, swapchain, pAllocator);\n }\n}\n\nstd::unordered_map Hooks::hooks = {\n // instance hooks\n {\"vkCreateInstance\", reinterpret_cast(myvkCreateInstance)},\n\n // device hooks\n {\"vkCreateDevicePre\", reinterpret_cast(myvkCreateDevicePre)},\n {\"vkCreateDevicePost\", reinterpret_cast(myvkCreateDevicePost)},\n {\"vkDestroyDevice\", reinterpret_cast(myvkDestroyDevice)},\n\n // swapchain hooks\n {\"vkCreateSwapchainKHR\", reinterpret_cast(myvkCreateSwapchainKHR)},\n {\"vkQueuePresentKHR\", reinterpret_cast(myvkQueuePresentKHR)},\n {\"vkDestroySwapchainKHR\", reinterpret_cast(myvkDestroySwapchainKHR)}\n};\n"], ["/lsfg-vk/framegen/v3.1_src/shaders/generate.cpp", "#include \n#include \n\n#include \"v3_1/shaders/generate.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nGenerate::Generate(Vulkan& vk,\n Core::Image inImg1, Core::Image inImg2,\n Core::Image inImg3, Core::Image inImg4, Core::Image inImg5,\n const std::vector& fds, VkFormat format)\n : inImg1(std::move(inImg1)), inImg2(std::move(inImg2)),\n inImg3(std::move(inImg3)), inImg4(std::move(inImg4)),\n inImg5(std::move(inImg5)) {\n // create resources\n this->shaderModule = vk.shaders.getShader(vk.device, \"generate\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 5, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } });\n this->pipeline = vk.shaders.getPipeline(vk.device, \"generate\");\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImg1.getExtent();\n for (size_t i = 0; i < vk.generationCount; i++)\n this->outImgs.emplace_back(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, fds.empty() ? -1 : fds.at(i));\n\n // hook up shaders\n for (size_t i = 0; i < vk.generationCount; i++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(i + 1) / static_cast(vk.generationCount + 1));\n for (size_t j = 0; j < 2; j++) {\n pass.descriptorSet.at(j) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModule);\n pass.descriptorSet.at(j).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, j == 0 ? this->inImg2 : this->inImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, j == 0 ? this->inImg1 : this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg3)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg4)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg5)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs.at(i))\n .build();\n }\n }\n}\n\nvoid Generate::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx) {\n auto& pass = this->passes.at(pass_idx);\n\n // first pass\n const auto extent = this->inImg1.getExtent();\n const uint32_t threadsX = (extent.width + 15) >> 4;\n const uint32_t threadsY = (extent.height + 15) >> 4;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImg1)\n .addW2R(this->inImg2)\n .addW2R(this->inImg3)\n .addW2R(this->inImg4)\n .addW2R(this->inImg5)\n .addR2W(this->outImgs.at(pass_idx))\n .build();\n\n this->pipeline.bind(buf);\n pass.descriptorSet.at(frameCount % 2).bind(buf, this->pipeline);\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/shaders/generate.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/generate.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nGenerate::Generate(Vulkan& vk,\n Core::Image inImg1, Core::Image inImg2,\n Core::Image inImg3, Core::Image inImg4, Core::Image inImg5,\n const std::vector& fds, VkFormat format)\n : inImg1(std::move(inImg1)), inImg2(std::move(inImg2)),\n inImg3(std::move(inImg3)), inImg4(std::move(inImg4)),\n inImg5(std::move(inImg5)) {\n // create resources\n this->shaderModule = vk.shaders.getShader(vk.device, \"p_generate\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 5, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } });\n this->pipeline = vk.shaders.getPipeline(vk.device, \"p_generate\");\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImg1.getExtent();\n for (size_t i = 0; i < vk.generationCount; i++)\n this->outImgs.emplace_back(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, fds.empty() ? -1 : fds.at(i));\n\n // hook up shaders\n for (size_t i = 0; i < vk.generationCount; i++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(i + 1) / static_cast(vk.generationCount + 1));\n for (size_t j = 0; j < 2; j++) {\n pass.descriptorSet.at(j) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModule);\n pass.descriptorSet.at(j).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, j == 0 ? this->inImg2 : this->inImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, j == 0 ? this->inImg1 : this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg3)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg4)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg5)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs.at(i))\n .build();\n }\n }\n}\n\nvoid Generate::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx) {\n auto& pass = this->passes.at(pass_idx);\n\n // first pass\n const auto extent = this->inImg1.getExtent();\n const uint32_t threadsX = (extent.width + 15) >> 4;\n const uint32_t threadsY = (extent.height + 15) >> 4;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImg1)\n .addW2R(this->inImg2)\n .addW2R(this->inImg3)\n .addW2R(this->inImg4)\n .addW2R(this->inImg5)\n .addR2W(this->outImgs.at(pass_idx))\n .build();\n\n this->pipeline.bind(buf);\n pass.descriptorSet.at(frameCount % 2).bind(buf, this->pipeline);\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/src/pool/resourcepool.cpp", "#include \"pool/resourcepool.hpp\"\n#include \"core/buffer.hpp\"\n#include \"core/device.hpp\"\n#include \"core/sampler.hpp\"\n\n#include \n\n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG::Pool;\n\nstruct ConstantBuffer {\n std::array inputOffset;\n uint32_t firstIter;\n uint32_t firstIterS;\n uint32_t advancedColorKind;\n uint32_t hdrSupport;\n float resolutionInvScale;\n float timestamp;\n float uiThreshold;\n std::array pad;\n};\n\nCore::Buffer ResourcePool::getBuffer(\n const Core::Device& device,\n float timestamp, bool firstIter, bool firstIterS) {\n uint64_t hash = 0;\n const union { float f; uint32_t i; } u{\n .f = timestamp };\n hash |= u.i;\n hash |= static_cast(firstIter) << 32;\n hash |= static_cast(firstIterS) << 33;\n\n auto it = buffers.find(hash);\n if (it != buffers.end())\n return it->second;\n\n // create the buffer\n const ConstantBuffer data{\n .inputOffset = { 0, 0 },\n .advancedColorKind = this->isHdr ? 2U : 0U,\n .hdrSupport = this->isHdr,\n .resolutionInvScale = this->flowScale,\n .timestamp = timestamp,\n .uiThreshold = 0.5F,\n };\n Core::Buffer buffer(device, data, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);\n buffers[hash] = buffer;\n return buffer;\n}\n\nCore::Sampler ResourcePool::getSampler(\n const Core::Device& device,\n VkSamplerAddressMode type,\n VkCompareOp compare,\n bool isWhite) {\n uint64_t hash = 0;\n hash |= static_cast(type) << 0;\n hash |= static_cast(compare) << 8;\n hash |= static_cast(isWhite) << 16;\n\n auto it = samplers.find(hash);\n if (it != samplers.end())\n return it->second;\n\n // create the sampler\n Core::Sampler sampler(device, type, compare, isWhite);\n samplers[hash] = sampler;\n return sampler;\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/shaders/alpha.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/alpha.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nAlpha::Alpha(Vulkan& vk, Core::Image inImg) : inImg(std::move(inImg)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"p_alpha[0]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_alpha[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_alpha[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_alpha[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"p_alpha[0]\"),\n vk.shaders.getPipeline(vk.device, \"p_alpha[1]\"),\n vk.shaders.getPipeline(vk.device, \"p_alpha[2]\"),\n vk.shaders.getPipeline(vk.device, \"p_alpha[3]\")\n }};\n this->sampler = vk.resources.getSampler(vk.device);\n for (size_t i = 0; i < 3; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(i));\n for (size_t i = 0; i < 3; i++)\n this->lastDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(3));\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImg.getExtent();\n const VkExtent2D halfExtent = {\n .width = (extent.width + 1) >> 1,\n .height = (extent.height + 1) >> 1\n };\n this->tempImg1 = Core::Image(vk.device, halfExtent);\n this->tempImg2 = Core::Image(vk.device, halfExtent);\n\n const VkExtent2D quarterExtent = {\n .width = (halfExtent.width + 1) >> 1,\n .height = (halfExtent.height + 1) >> 1\n };\n for (size_t i = 0; i < 2; i++) {\n this->tempImgs3.at(i) = Core::Image(vk.device, quarterExtent);\n for (size_t j = 0; j < 3; j++)\n this->outImgs.at(j).at(i) = Core::Image(vk.device, quarterExtent);\n }\n\n // hook up shaders\n this->descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImg1)\n .build();\n this->descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImg1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImg2)\n .build();\n this->descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs3)\n .build();\n for (size_t i = 0; i < 3; i++)\n this->lastDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs3)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs.at(i))\n .build();\n}\n\nvoid Alpha::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto halfExtent = this->tempImg1.getExtent();\n uint32_t threadsX = (halfExtent.width + 7) >> 3;\n uint32_t threadsY = (halfExtent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImg)\n .addR2W(this->tempImg1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n this->descriptorSets.at(0).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImg1)\n .addR2W(this->tempImg2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n this->descriptorSets.at(1).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third pass\n const auto quarterExtent = this->tempImgs3.at(0).getExtent();\n threadsX = (quarterExtent.width + 7) >> 3;\n threadsY = (quarterExtent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImg2)\n .addR2W(this->tempImgs3)\n .build();\n\n this->pipelines.at(2).bind(buf);\n this->descriptorSets.at(2).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs3)\n .addR2W(this->outImgs.at(frameCount % 3))\n .build();\n\n this->pipelines.at(3).bind(buf);\n this->lastDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/shaders/gamma.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/gamma.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nGamma::Gamma(Vulkan& vk, std::array, 3> inImgs1,\n Core::Image inImg2,\n std::optional optImg)\n : inImgs1(std::move(inImgs1)), inImg2(std::move(inImg2)),\n optImg(std::move(optImg)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"p_gamma[0]\",\n { { 1 , VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 5, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 3, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_gamma[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 3, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_gamma[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_gamma[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_gamma[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"p_gamma[0]\"),\n vk.shaders.getPipeline(vk.device, \"p_gamma[1]\"),\n vk.shaders.getPipeline(vk.device, \"p_gamma[2]\"),\n vk.shaders.getPipeline(vk.device, \"p_gamma[3]\"),\n vk.shaders.getPipeline(vk.device, \"p_gamma[4]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n this->samplers.at(2) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS, false);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs1.at(0).at(0).getExtent();\n for (size_t i = 0; i < 3; i++)\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n for (size_t i = 0; i < 2; i++)\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n\n this->outImg = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n\n // hook up shaders\n for (size_t pass_idx = 0; pass_idx < vk.generationCount; pass_idx++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(pass_idx + 1) / static_cast(vk.generationCount + 1),\n !this->optImg.has_value());\n for (size_t i = 0; i < 3; i++) {\n pass.firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(0));\n pass.firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n }\n pass.descriptorSets.at(0) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(1));\n pass.descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(1) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(2));\n pass.descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .build();\n pass.descriptorSets.at(2) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(3));\n pass.descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(3) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(4));\n pass.descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg)\n .build();\n }\n}\n\nvoid Gamma::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx) {\n auto& pass = this->passes.at(pass_idx);\n\n // first shader\n const auto extent = this->tempImgs1.at(0).getExtent();\n const uint32_t threadsX = (extent.width + 7) >> 3;\n const uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n pass.firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n pass.descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .build();\n\n this->pipelines.at(2).bind(buf);\n pass.descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n pass.descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addW2R(this->optImg)\n .addW2R(this->inImg2)\n .addR2W(this->outImg)\n .build();\n\n this->pipelines.at(4).bind(buf);\n pass.descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1_src/shaders/alpha.cpp", "#include \n#include \n\n#include \"v3_1/shaders/alpha.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nAlpha::Alpha(Vulkan& vk, Core::Image inImg) : inImg(std::move(inImg)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"alpha[0]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"alpha[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"alpha[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"alpha[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"alpha[0]\"),\n vk.shaders.getPipeline(vk.device, \"alpha[1]\"),\n vk.shaders.getPipeline(vk.device, \"alpha[2]\"),\n vk.shaders.getPipeline(vk.device, \"alpha[3]\")\n }};\n this->sampler = vk.resources.getSampler(vk.device);\n for (size_t i = 0; i < 3; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(i));\n for (size_t i = 0; i < 3; i++)\n this->lastDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(3));\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImg.getExtent();\n const VkExtent2D halfExtent = {\n .width = (extent.width + 1) >> 1,\n .height = (extent.height + 1) >> 1\n };\n for (size_t i = 0; i < 2; i++) {\n this->tempImgs1.at(i) = Core::Image(vk.device, halfExtent);\n this->tempImgs2.at(i) = Core::Image(vk.device, halfExtent);\n }\n\n const VkExtent2D quarterExtent = {\n .width = (halfExtent.width + 1) >> 1,\n .height = (halfExtent.height + 1) >> 1\n };\n for (size_t i = 0; i < 4; i++) {\n this->tempImgs3.at(i) = Core::Image(vk.device, quarterExtent);\n for (size_t j = 0; j < 3; j++)\n this->outImgs.at(j).at(i) = Core::Image(vk.device, quarterExtent);\n }\n\n // hook up shaders\n this->descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n this->descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n this->descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs3)\n .build();\n for (size_t i = 0; i < 3; i++)\n this->lastDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs3)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs.at(i))\n .build();\n}\n\nvoid Alpha::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto halfExtent = this->tempImgs1.at(0).getExtent();\n uint32_t threadsX = (halfExtent.width + 7) >> 3;\n uint32_t threadsY = (halfExtent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImg)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n this->descriptorSets.at(0).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n this->descriptorSets.at(1).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third pass\n const auto quarterExtent = this->tempImgs3.at(0).getExtent();\n threadsX = (quarterExtent.width + 7) >> 3;\n threadsY = (quarterExtent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs3)\n .build();\n\n this->pipelines.at(2).bind(buf);\n this->descriptorSets.at(2).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs3)\n .addR2W(this->outImgs.at(frameCount % 3))\n .build();\n\n this->pipelines.at(3).bind(buf);\n this->lastDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1_src/shaders/gamma.cpp", "#include \n#include \n\n#include \"v3_1/shaders/gamma.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nGamma::Gamma(Vulkan& vk, std::array, 3> inImgs1,\n Core::Image inImg2,\n std::optional optImg)\n : inImgs1(std::move(inImgs1)), inImg2(std::move(inImg2)),\n optImg(std::move(optImg)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"gamma[0]\",\n { { 1 , VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 9, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 3, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"gamma[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 3, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"gamma[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"gamma[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"gamma[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 6, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"gamma[0]\"),\n vk.shaders.getPipeline(vk.device, \"gamma[1]\"),\n vk.shaders.getPipeline(vk.device, \"gamma[2]\"),\n vk.shaders.getPipeline(vk.device, \"gamma[3]\"),\n vk.shaders.getPipeline(vk.device, \"gamma[4]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n this->samplers.at(2) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS, false);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs1.at(0).at(0).getExtent();\n for (size_t i = 0; i < 4; i++) {\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n }\n\n this->outImg = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n\n // hook up shaders\n for (size_t pass_idx = 0; pass_idx < vk.generationCount; pass_idx++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(pass_idx + 1) / static_cast(vk.generationCount + 1),\n !this->optImg.has_value());\n for (size_t i = 0; i < 3; i++) {\n pass.firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(0));\n pass.firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(2))\n .build();\n }\n pass.descriptorSets.at(0) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(1));\n pass.descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(2))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(1) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(2));\n pass.descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n pass.descriptorSets.at(2) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(3));\n pass.descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(3) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(4));\n pass.descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg)\n .build();\n }\n}\n\nvoid Gamma::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx) {\n auto& pass = this->passes.at(pass_idx);\n\n // first shader\n const auto extent = this->tempImgs1.at(0).getExtent();\n const uint32_t threadsX = (extent.width + 7) >> 3;\n const uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .addR2W(this->tempImgs1.at(2))\n .build();\n\n this->pipelines.at(0).bind(buf);\n pass.firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addW2R(this->tempImgs1.at(2))\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n pass.descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(2).bind(buf);\n pass.descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n pass.descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addW2R(this->optImg)\n .addW2R(this->inImg2)\n .addR2W(this->outImg)\n .build();\n\n this->pipelines.at(4).bind(buf);\n pass.descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/src/pool/shaderpool.cpp", "#include \"pool/shaderpool.hpp\"\n#include \"core/shadermodule.hpp\"\n#include \"core/device.hpp\"\n#include \"core/pipeline.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG::Pool;\n\nCore::ShaderModule ShaderPool::getShader(\n const Core::Device& device, const std::string& name,\n const std::vector>& types) {\n auto it = shaders.find(name);\n if (it != shaders.end())\n return it->second;\n\n // grab the shader\n auto bytecode = this->source(name);\n if (bytecode.empty())\n throw std::runtime_error(\"Shader code is empty: \" + name);\n\n // create the shader module\n Core::ShaderModule shader(device, bytecode, types);\n shaders[name] = shader;\n return shader;\n}\n\nCore::Pipeline ShaderPool::getPipeline(\n const Core::Device& device, const std::string& name) {\n auto it = pipelines.find(name);\n if (it != pipelines.end())\n return it->second;\n\n // grab the shader module\n auto shader = this->getShader(device, name, {});\n\n // create the pipeline\n Core::Pipeline pipeline(device, shader);\n pipelines[name] = pipeline;\n return pipeline;\n}\n"], ["/lsfg-vk/framegen/v3.1_src/shaders/beta.cpp", "#include \n#include \n\n#include \"v3_1/shaders/beta.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nBeta::Beta(Vulkan& vk, std::array, 3> inImgs)\n : inImgs(std::move(inImgs)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"beta[0]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 12, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"beta[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"beta[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"beta[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"beta[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 6, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"beta[0]\"),\n vk.shaders.getPipeline(vk.device, \"beta[1]\"),\n vk.shaders.getPipeline(vk.device, \"beta[2]\"),\n vk.shaders.getPipeline(vk.device, \"beta[3]\"),\n vk.shaders.getPipeline(vk.device, \"beta[4]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n for (size_t i = 0; i < 3; i++)\n this->firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(0));\n for (size_t i = 0; i < 4; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(i + 1));\n this->buffer = vk.resources.getBuffer(vk.device, 0.5F);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs.at(0).at(0).getExtent();\n for (size_t i = 0; i < 2; i++) {\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n }\n\n for (size_t i = 0; i < 6; i++)\n this->outImgs.at(i) = Core::Image(vk.device,\n { extent.width >> i, extent.height >> i },\n VK_FORMAT_R8_UNORM);\n\n // hook up shaders\n for (size_t i = 0; i < 3; i++) {\n this->firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at((i + 1) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n }\n this->descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n this->descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n this->descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n this->descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, this->buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs)\n .build();\n}\n\nvoid Beta::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto extent = this->tempImgs1.at(0).getExtent();\n uint32_t threadsX = (extent.width + 7) >> 3;\n uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs.at(0))\n .addW2R(this->inImgs.at(1))\n .addW2R(this->inImgs.at(2))\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n this->firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n this->descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(2).bind(buf);\n this->descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n this->descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth pass\n threadsX = (extent.width + 31) >> 5;\n threadsY = (extent.height + 31) >> 5;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->outImgs)\n .build();\n\n this->pipelines.at(4).bind(buf);\n this->descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/shaders/beta.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/beta.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nBeta::Beta(Vulkan& vk, std::array, 3> inImgs)\n : inImgs(std::move(inImgs)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"p_beta[0]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 6, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_beta[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_beta[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_beta[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_beta[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 6, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"p_beta[0]\"),\n vk.shaders.getPipeline(vk.device, \"p_beta[1]\"),\n vk.shaders.getPipeline(vk.device, \"p_beta[2]\"),\n vk.shaders.getPipeline(vk.device, \"p_beta[3]\"),\n vk.shaders.getPipeline(vk.device, \"p_beta[4]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n for (size_t i = 0; i < 3; i++)\n this->firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(0));\n for (size_t i = 0; i < 4; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(i + 1));\n this->buffer = vk.resources.getBuffer(vk.device, 0.5F);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs.at(0).at(0).getExtent();\n for (size_t i = 0; i < 2; i++) {\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n }\n\n for (size_t i = 0; i < 6; i++)\n this->outImgs.at(i) = Core::Image(vk.device,\n { extent.width >> i, extent.height >> i },\n VK_FORMAT_R8_UNORM);\n\n // hook up shaders\n for (size_t i = 0; i < 3; i++) {\n this->firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at((i + 1) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n }\n this->descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n this->descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n this->descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n this->descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, this->buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs)\n .build();\n}\n\nvoid Beta::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto extent = this->tempImgs1.at(0).getExtent();\n uint32_t threadsX = (extent.width + 7) >> 3;\n uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs.at(0))\n .addW2R(this->inImgs.at(1))\n .addW2R(this->inImgs.at(2))\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n this->firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n this->descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(2).bind(buf);\n this->descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n this->descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth pass\n threadsX = (extent.width + 31) >> 5;\n threadsY = (extent.height + 31) >> 5;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->outImgs)\n .build();\n\n this->pipelines.at(4).bind(buf);\n this->descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/shaders/delta.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/delta.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nDelta::Delta(Vulkan& vk, std::array, 3> inImgs1,\n Core::Image inImg2,\n std::optional optImg1,\n std::optional optImg2)\n : inImgs1(std::move(inImgs1)), inImg2(std::move(inImg2)),\n optImg1(std::move(optImg1)), optImg2(std::move(optImg2)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"p_delta[0]\",\n { { 1 , VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 5, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 3, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 3, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[5]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 6, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[6]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[7]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[8]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[9]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"p_delta[0]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[1]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[2]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[3]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[4]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[5]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[6]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[7]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[8]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[9]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n this->samplers.at(2) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS, false);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs1.at(0).at(0).getExtent();\n for (size_t i = 0; i < 3; i++)\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n for (size_t i = 0; i < 2; i++)\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n\n this->outImg1 = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n this->outImg2 = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n\n // hook up shaders\n for (size_t pass_idx = 0; pass_idx < vk.generationCount; pass_idx++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(pass_idx + 1) / static_cast(vk.generationCount + 1),\n false, !this->optImg1.has_value());\n for (size_t i = 0; i < 3; i++) {\n pass.firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(0));\n pass.firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n }\n pass.descriptorSets.at(0) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(1));\n pass.descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(1) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(2));\n pass.descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .build();\n pass.descriptorSets.at(2) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(3));\n pass.descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(3) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(4));\n pass.descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg1)\n .build();\n for (size_t i = 0; i < 3; i++) {\n pass.sixthDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(5));\n pass.sixthDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(0))\n .build();\n }\n pass.descriptorSets.at(4) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(6));\n pass.descriptorSets.at(4).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .build();\n pass.descriptorSets.at(5) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(7));\n pass.descriptorSets.at(5).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(0))\n .build();\n pass.descriptorSets.at(6) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(8));\n pass.descriptorSets.at(6).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .build();\n pass.descriptorSets.at(7) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(9));\n pass.descriptorSets.at(7).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg2)\n .build();\n }\n}\n\nvoid Delta::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx,\n bool last) {\n auto& pass = this->passes.at(pass_idx);\n\n // first shader\n const auto extent = this->tempImgs1.at(0).getExtent();\n const uint32_t threadsX = (extent.width + 7) >> 3;\n const uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg1)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n pass.firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n pass.descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(2).bind(buf);\n pass.descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n pass.descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addW2R(this->optImg1)\n .addW2R(this->inImg2)\n .addR2W(this->outImg1)\n .build();\n\n this->pipelines.at(4).bind(buf);\n pass.descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n\n // sixth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg1)\n .addW2R(this->optImg2)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(5).bind(buf);\n pass.sixthDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(5));\n buf.dispatch(threadsX, threadsY, 1);\n\n if (!last)\n return;\n\n // seventh shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .build();\n\n this->pipelines.at(6).bind(buf);\n pass.descriptorSets.at(4).bind(buf, this->pipelines.at(6));\n buf.dispatch(threadsX, threadsY, 1);\n\n // eighth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addR2W(this->tempImgs2)\n .build();\n this->pipelines.at(7).bind(buf);\n pass.descriptorSets.at(5).bind(buf, this->pipelines.at(7));\n buf.dispatch(threadsX, threadsY, 1);\n\n // ninth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .build();\n\n this->pipelines.at(8).bind(buf);\n pass.descriptorSets.at(6).bind(buf, this->pipelines.at(8));\n buf.dispatch(threadsX, threadsY, 1);\n\n // tenth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addR2W(this->outImg2)\n .build();\n\n this->pipelines.at(9).bind(buf);\n pass.descriptorSets.at(7).bind(buf, this->pipelines.at(9));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1_src/shaders/delta.cpp", "#include \n#include \n\n#include \"v3_1/shaders/delta.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nDelta::Delta(Vulkan& vk, std::array, 3> inImgs1,\n Core::Image inImg2,\n std::optional optImg1,\n std::optional optImg2,\n std::optional optImg3)\n : inImgs1(std::move(inImgs1)), inImg2(std::move(inImg2)),\n optImg1(std::move(optImg1)), optImg2(std::move(optImg2)),\n optImg3(std::move(optImg3)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"delta[0]\",\n { { 1 , VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 9, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 3, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 3, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 6, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[5]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 10, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[6]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[7]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[8]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[9]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 3, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"delta[0]\"),\n vk.shaders.getPipeline(vk.device, \"delta[1]\"),\n vk.shaders.getPipeline(vk.device, \"delta[2]\"),\n vk.shaders.getPipeline(vk.device, \"delta[3]\"),\n vk.shaders.getPipeline(vk.device, \"delta[4]\"),\n vk.shaders.getPipeline(vk.device, \"delta[5]\"),\n vk.shaders.getPipeline(vk.device, \"delta[6]\"),\n vk.shaders.getPipeline(vk.device, \"delta[7]\"),\n vk.shaders.getPipeline(vk.device, \"delta[8]\"),\n vk.shaders.getPipeline(vk.device, \"delta[9]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n this->samplers.at(2) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS, false);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs1.at(0).at(0).getExtent();\n for (size_t i = 0; i < 4; i++) {\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n }\n\n this->outImg1 = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n this->outImg2 = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n\n // hook up shaders\n for (size_t pass_idx = 0; pass_idx < vk.generationCount; pass_idx++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(pass_idx + 1) / static_cast(vk.generationCount + 1),\n false, !this->optImg1.has_value());\n for (size_t i = 0; i < 3; i++) {\n pass.firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(0));\n pass.firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(2))\n .build();\n }\n pass.descriptorSets.at(0) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(1));\n pass.descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(2))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(1) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(2));\n pass.descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n pass.descriptorSets.at(2) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(3));\n pass.descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(3) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(4));\n pass.descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg1)\n .build();\n for (size_t i = 0; i < 3; i++) {\n pass.sixthDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(5));\n pass.sixthDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(1))\n .build();\n }\n pass.descriptorSets.at(4) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(6));\n pass.descriptorSets.at(4).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .build();\n pass.descriptorSets.at(5) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(7));\n pass.descriptorSets.at(5).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(1))\n .build();\n pass.descriptorSets.at(6) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(8));\n pass.descriptorSets.at(6).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .build();\n pass.descriptorSets.at(7) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(9));\n pass.descriptorSets.at(7).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg3)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg2)\n .build();\n }\n}\n\nvoid Delta::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx) {\n auto& pass = this->passes.at(pass_idx);\n\n // first shader\n const auto extent = this->tempImgs1.at(0).getExtent();\n const uint32_t threadsX = (extent.width + 7) >> 3;\n const uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg1)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .addR2W(this->tempImgs1.at(2))\n .build();\n\n this->pipelines.at(0).bind(buf);\n pass.firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addW2R(this->tempImgs1.at(2))\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n pass.descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(2).bind(buf);\n pass.descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n pass.descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addW2R(this->optImg1)\n .addW2R(this->inImg2)\n .addR2W(this->outImg1)\n .build();\n\n this->pipelines.at(4).bind(buf);\n pass.descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n\n // sixth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg1)\n .addW2R(this->optImg2)\n .addR2W(this->tempImgs2.at(0))\n .addR2W(this->tempImgs2.at(1))\n .build();\n\n this->pipelines.at(5).bind(buf);\n pass.sixthDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(5));\n buf.dispatch(threadsX, threadsY, 1);\n\n // seventh shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2.at(0))\n .addW2R(this->tempImgs2.at(1))\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .build();\n\n this->pipelines.at(6).bind(buf);\n pass.descriptorSets.at(4).bind(buf, this->pipelines.at(6));\n buf.dispatch(threadsX, threadsY, 1);\n\n // eighth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addR2W(this->tempImgs2.at(0))\n .addR2W(this->tempImgs2.at(1))\n .build();\n this->pipelines.at(7).bind(buf);\n pass.descriptorSets.at(5).bind(buf, this->pipelines.at(7));\n buf.dispatch(threadsX, threadsY, 1);\n\n // ninth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2.at(0))\n .addW2R(this->tempImgs2.at(1))\n .addW2R(this->optImg3)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .build();\n\n this->pipelines.at(8).bind(buf);\n pass.descriptorSets.at(6).bind(buf, this->pipelines.at(8));\n buf.dispatch(threadsX, threadsY, 1);\n\n // tenth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addW2R(this->optImg3)\n .addR2W(this->outImg2)\n .build();\n\n this->pipelines.at(9).bind(buf);\n pass.descriptorSets.at(7).bind(buf, this->pipelines.at(9));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/thirdparty/volk/volk.h", "/**\n * volk\n *\n * Copyright (C) 2018-2025, by Arseny Kapoulkine (arseny.kapoulkine@gmail.com)\n * Report bugs and download new versions at https://github.com/zeux/volk\n *\n * This library is distributed under the MIT License. See notice at the end of this file.\n */\n/* clang-format off */\n#ifndef VOLK_H_\n#define VOLK_H_\n\n#if defined(VULKAN_H_) && !defined(VK_NO_PROTOTYPES)\n#\terror To use volk, you need to define VK_NO_PROTOTYPES before including vulkan.h\n#endif\n\n/* VOLK_GENERATE_VERSION_DEFINE */\n#define VOLK_HEADER_VERSION 323\n/* VOLK_GENERATE_VERSION_DEFINE */\n\n#ifndef VK_NO_PROTOTYPES\n#\tdefine VK_NO_PROTOTYPES\n#endif\n\n#ifndef VULKAN_H_\n#\tifdef VOLK_VULKAN_H_PATH\n#\t\tinclude VOLK_VULKAN_H_PATH\n#\telse /* Platform headers included below */\n#\t\tinclude \n#\t\tinclude \n#\tendif\n#endif\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\nstruct VolkDeviceTable;\n\n/**\n * Initialize library by loading Vulkan loader; call this function before creating the Vulkan instance.\n *\n * Returns VK_SUCCESS on success and VK_ERROR_INITIALIZATION_FAILED otherwise.\n */\nVkResult volkInitialize(void);\n\n/**\n * Initialize library by providing a custom handler to load global symbols.\n *\n * This function can be used instead of volkInitialize.\n * The handler function pointer will be asked to load global Vulkan symbols which require no instance\n * (such as vkCreateInstance, vkEnumerateInstance* and vkEnumerateInstanceVersion if available).\n */\nvoid volkInitializeCustom(PFN_vkGetInstanceProcAddr handler);\n\n/**\n * Finalize library by unloading Vulkan loader and resetting global symbols to NULL.\n *\n * This function does not need to be called on process exit (as loader will be unloaded automatically) or if volkInitialize failed.\n * In general this function is optional to call but may be useful in rare cases eg if volk needs to be reinitialized multiple times.\n */\nvoid volkFinalize(void);\n\n/**\n * Get Vulkan instance version supported by the Vulkan loader, or 0 if Vulkan isn't supported\n *\n * Returns 0 if volkInitialize wasn't called or failed.\n */\nuint32_t volkGetInstanceVersion(void);\n\n/**\n * Load global function pointers using application-created VkInstance; call this function after creating the Vulkan instance.\n */\nvoid volkLoadInstance(VkInstance instance);\n\n/**\n * Load global function pointers using application-created VkInstance; call this function after creating the Vulkan instance.\n * Skips loading device-based function pointers, requires usage of volkLoadDevice afterwards.\n */\nvoid volkLoadInstanceOnly(VkInstance instance);\n\n/**\n * Load global function pointers using application-created VkDevice; call this function after creating the Vulkan device.\n *\n * Note: this is not suitable for applications that want to use multiple VkDevice objects concurrently.\n */\nvoid volkLoadDevice(VkDevice device);\n\n/**\n * Return last VkInstance for which global function pointers have been loaded via volkLoadInstance(),\n * or VK_NULL_HANDLE if volkLoadInstance() has not been called.\n */\nVkInstance volkGetLoadedInstance(void);\n\n/**\n * Return last VkDevice for which global function pointers have been loaded via volkLoadDevice(),\n * or VK_NULL_HANDLE if volkLoadDevice() has not been called.\n */\nVkDevice volkGetLoadedDevice(void);\n\n/**\n * Load function pointers using application-created VkDevice into a table.\n * Application should use function pointers from that table instead of using global function pointers.\n */\nvoid volkLoadDeviceTable(struct VolkDeviceTable* table, VkDevice device);\n\n#ifdef __cplusplus\n}\n#endif\n\n/* Instead of directly including vulkan.h, we include platform-specific parts of the SDK manually\n * This is necessary to avoid including platform headers in some cases (which vulkan.h does unconditionally)\n * and replace them with forward declarations, which makes build times faster and avoids macro conflicts.\n *\n * Note that we only replace platform-specific headers when the headers are known to be problematic: very large\n * or slow to compile (Windows), or introducing unprefixed macros which can cause conflicts (Windows, Xlib).\n */\n#if !defined(VULKAN_H_) && !defined(VOLK_VULKAN_H_PATH)\n\n#ifdef VK_USE_PLATFORM_ANDROID_KHR\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_FUCHSIA\n#include \n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_IOS_MVK\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_MACOS_MVK\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_METAL_EXT\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_VI_NN\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_WAYLAND_KHR\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_WIN32_KHR\ntypedef unsigned long DWORD;\ntypedef const wchar_t* LPCWSTR;\ntypedef void* HANDLE;\ntypedef struct HINSTANCE__* HINSTANCE;\ntypedef struct HWND__* HWND;\ntypedef struct HMONITOR__* HMONITOR;\ntypedef struct _SECURITY_ATTRIBUTES SECURITY_ATTRIBUTES;\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_XCB_KHR\n#include \n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_XLIB_KHR\ntypedef struct _XDisplay Display;\ntypedef unsigned long Window;\ntypedef unsigned long VisualID;\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_DIRECTFB_EXT\n#include \n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT\ntypedef struct _XDisplay Display;\ntypedef unsigned long RROutput;\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_GGP\n#include \n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_SCREEN_QNX\n#include \n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_SCI\n#include \n#include \n#include \n#endif\n\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n#include \n#endif\n\n#endif\n\n/**\n * Device-specific function pointer table\n */\nstruct VolkDeviceTable\n{\n\t/* VOLK_GENERATE_DEVICE_TABLE */\n#if defined(VK_VERSION_1_0)\n\tPFN_vkAllocateCommandBuffers vkAllocateCommandBuffers;\n\tPFN_vkAllocateDescriptorSets vkAllocateDescriptorSets;\n\tPFN_vkAllocateMemory vkAllocateMemory;\n\tPFN_vkBeginCommandBuffer vkBeginCommandBuffer;\n\tPFN_vkBindBufferMemory vkBindBufferMemory;\n\tPFN_vkBindImageMemory vkBindImageMemory;\n\tPFN_vkCmdBeginQuery vkCmdBeginQuery;\n\tPFN_vkCmdBeginRenderPass vkCmdBeginRenderPass;\n\tPFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets;\n\tPFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer;\n\tPFN_vkCmdBindPipeline vkCmdBindPipeline;\n\tPFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers;\n\tPFN_vkCmdBlitImage vkCmdBlitImage;\n\tPFN_vkCmdClearAttachments vkCmdClearAttachments;\n\tPFN_vkCmdClearColorImage vkCmdClearColorImage;\n\tPFN_vkCmdClearDepthStencilImage vkCmdClearDepthStencilImage;\n\tPFN_vkCmdCopyBuffer vkCmdCopyBuffer;\n\tPFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage;\n\tPFN_vkCmdCopyImage vkCmdCopyImage;\n\tPFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer;\n\tPFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults;\n\tPFN_vkCmdDispatch vkCmdDispatch;\n\tPFN_vkCmdDispatchIndirect vkCmdDispatchIndirect;\n\tPFN_vkCmdDraw vkCmdDraw;\n\tPFN_vkCmdDrawIndexed vkCmdDrawIndexed;\n\tPFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect;\n\tPFN_vkCmdDrawIndirect vkCmdDrawIndirect;\n\tPFN_vkCmdEndQuery vkCmdEndQuery;\n\tPFN_vkCmdEndRenderPass vkCmdEndRenderPass;\n\tPFN_vkCmdExecuteCommands vkCmdExecuteCommands;\n\tPFN_vkCmdFillBuffer vkCmdFillBuffer;\n\tPFN_vkCmdNextSubpass vkCmdNextSubpass;\n\tPFN_vkCmdPipelineBarrier vkCmdPipelineBarrier;\n\tPFN_vkCmdPushConstants vkCmdPushConstants;\n\tPFN_vkCmdResetEvent vkCmdResetEvent;\n\tPFN_vkCmdResetQueryPool vkCmdResetQueryPool;\n\tPFN_vkCmdResolveImage vkCmdResolveImage;\n\tPFN_vkCmdSetBlendConstants vkCmdSetBlendConstants;\n\tPFN_vkCmdSetDepthBias vkCmdSetDepthBias;\n\tPFN_vkCmdSetDepthBounds vkCmdSetDepthBounds;\n\tPFN_vkCmdSetEvent vkCmdSetEvent;\n\tPFN_vkCmdSetLineWidth vkCmdSetLineWidth;\n\tPFN_vkCmdSetScissor vkCmdSetScissor;\n\tPFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask;\n\tPFN_vkCmdSetStencilReference vkCmdSetStencilReference;\n\tPFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask;\n\tPFN_vkCmdSetViewport vkCmdSetViewport;\n\tPFN_vkCmdUpdateBuffer vkCmdUpdateBuffer;\n\tPFN_vkCmdWaitEvents vkCmdWaitEvents;\n\tPFN_vkCmdWriteTimestamp vkCmdWriteTimestamp;\n\tPFN_vkCreateBuffer vkCreateBuffer;\n\tPFN_vkCreateBufferView vkCreateBufferView;\n\tPFN_vkCreateCommandPool vkCreateCommandPool;\n\tPFN_vkCreateComputePipelines vkCreateComputePipelines;\n\tPFN_vkCreateDescriptorPool vkCreateDescriptorPool;\n\tPFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout;\n\tPFN_vkCreateEvent vkCreateEvent;\n\tPFN_vkCreateFence vkCreateFence;\n\tPFN_vkCreateFramebuffer vkCreateFramebuffer;\n\tPFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines;\n\tPFN_vkCreateImage vkCreateImage;\n\tPFN_vkCreateImageView vkCreateImageView;\n\tPFN_vkCreatePipelineCache vkCreatePipelineCache;\n\tPFN_vkCreatePipelineLayout vkCreatePipelineLayout;\n\tPFN_vkCreateQueryPool vkCreateQueryPool;\n\tPFN_vkCreateRenderPass vkCreateRenderPass;\n\tPFN_vkCreateSampler vkCreateSampler;\n\tPFN_vkCreateSemaphore vkCreateSemaphore;\n\tPFN_vkCreateShaderModule vkCreateShaderModule;\n\tPFN_vkDestroyBuffer vkDestroyBuffer;\n\tPFN_vkDestroyBufferView vkDestroyBufferView;\n\tPFN_vkDestroyCommandPool vkDestroyCommandPool;\n\tPFN_vkDestroyDescriptorPool vkDestroyDescriptorPool;\n\tPFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout;\n\tPFN_vkDestroyDevice vkDestroyDevice;\n\tPFN_vkDestroyEvent vkDestroyEvent;\n\tPFN_vkDestroyFence vkDestroyFence;\n\tPFN_vkDestroyFramebuffer vkDestroyFramebuffer;\n\tPFN_vkDestroyImage vkDestroyImage;\n\tPFN_vkDestroyImageView vkDestroyImageView;\n\tPFN_vkDestroyPipeline vkDestroyPipeline;\n\tPFN_vkDestroyPipelineCache vkDestroyPipelineCache;\n\tPFN_vkDestroyPipelineLayout vkDestroyPipelineLayout;\n\tPFN_vkDestroyQueryPool vkDestroyQueryPool;\n\tPFN_vkDestroyRenderPass vkDestroyRenderPass;\n\tPFN_vkDestroySampler vkDestroySampler;\n\tPFN_vkDestroySemaphore vkDestroySemaphore;\n\tPFN_vkDestroyShaderModule vkDestroyShaderModule;\n\tPFN_vkDeviceWaitIdle vkDeviceWaitIdle;\n\tPFN_vkEndCommandBuffer vkEndCommandBuffer;\n\tPFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;\n\tPFN_vkFreeCommandBuffers vkFreeCommandBuffers;\n\tPFN_vkFreeDescriptorSets vkFreeDescriptorSets;\n\tPFN_vkFreeMemory vkFreeMemory;\n\tPFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;\n\tPFN_vkGetDeviceMemoryCommitment vkGetDeviceMemoryCommitment;\n\tPFN_vkGetDeviceQueue vkGetDeviceQueue;\n\tPFN_vkGetEventStatus vkGetEventStatus;\n\tPFN_vkGetFenceStatus vkGetFenceStatus;\n\tPFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;\n\tPFN_vkGetImageSparseMemoryRequirements vkGetImageSparseMemoryRequirements;\n\tPFN_vkGetImageSubresourceLayout vkGetImageSubresourceLayout;\n\tPFN_vkGetPipelineCacheData vkGetPipelineCacheData;\n\tPFN_vkGetQueryPoolResults vkGetQueryPoolResults;\n\tPFN_vkGetRenderAreaGranularity vkGetRenderAreaGranularity;\n\tPFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;\n\tPFN_vkMapMemory vkMapMemory;\n\tPFN_vkMergePipelineCaches vkMergePipelineCaches;\n\tPFN_vkQueueBindSparse vkQueueBindSparse;\n\tPFN_vkQueueSubmit vkQueueSubmit;\n\tPFN_vkQueueWaitIdle vkQueueWaitIdle;\n\tPFN_vkResetCommandBuffer vkResetCommandBuffer;\n\tPFN_vkResetCommandPool vkResetCommandPool;\n\tPFN_vkResetDescriptorPool vkResetDescriptorPool;\n\tPFN_vkResetEvent vkResetEvent;\n\tPFN_vkResetFences vkResetFences;\n\tPFN_vkSetEvent vkSetEvent;\n\tPFN_vkUnmapMemory vkUnmapMemory;\n\tPFN_vkUpdateDescriptorSets vkUpdateDescriptorSets;\n\tPFN_vkWaitForFences vkWaitForFences;\n#else\n\tPFN_vkVoidFunction padding_6ce80d51[120];\n#endif /* defined(VK_VERSION_1_0) */\n#if defined(VK_VERSION_1_1)\n\tPFN_vkBindBufferMemory2 vkBindBufferMemory2;\n\tPFN_vkBindImageMemory2 vkBindImageMemory2;\n\tPFN_vkCmdDispatchBase vkCmdDispatchBase;\n\tPFN_vkCmdSetDeviceMask vkCmdSetDeviceMask;\n\tPFN_vkCreateDescriptorUpdateTemplate vkCreateDescriptorUpdateTemplate;\n\tPFN_vkCreateSamplerYcbcrConversion vkCreateSamplerYcbcrConversion;\n\tPFN_vkDestroyDescriptorUpdateTemplate vkDestroyDescriptorUpdateTemplate;\n\tPFN_vkDestroySamplerYcbcrConversion vkDestroySamplerYcbcrConversion;\n\tPFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;\n\tPFN_vkGetDescriptorSetLayoutSupport vkGetDescriptorSetLayoutSupport;\n\tPFN_vkGetDeviceGroupPeerMemoryFeatures vkGetDeviceGroupPeerMemoryFeatures;\n\tPFN_vkGetDeviceQueue2 vkGetDeviceQueue2;\n\tPFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;\n\tPFN_vkGetImageSparseMemoryRequirements2 vkGetImageSparseMemoryRequirements2;\n\tPFN_vkTrimCommandPool vkTrimCommandPool;\n\tPFN_vkUpdateDescriptorSetWithTemplate vkUpdateDescriptorSetWithTemplate;\n#else\n\tPFN_vkVoidFunction padding_1ec56847[16];\n#endif /* defined(VK_VERSION_1_1) */\n#if defined(VK_VERSION_1_2)\n\tPFN_vkCmdBeginRenderPass2 vkCmdBeginRenderPass2;\n\tPFN_vkCmdDrawIndexedIndirectCount vkCmdDrawIndexedIndirectCount;\n\tPFN_vkCmdDrawIndirectCount vkCmdDrawIndirectCount;\n\tPFN_vkCmdEndRenderPass2 vkCmdEndRenderPass2;\n\tPFN_vkCmdNextSubpass2 vkCmdNextSubpass2;\n\tPFN_vkCreateRenderPass2 vkCreateRenderPass2;\n\tPFN_vkGetBufferDeviceAddress vkGetBufferDeviceAddress;\n\tPFN_vkGetBufferOpaqueCaptureAddress vkGetBufferOpaqueCaptureAddress;\n\tPFN_vkGetDeviceMemoryOpaqueCaptureAddress vkGetDeviceMemoryOpaqueCaptureAddress;\n\tPFN_vkGetSemaphoreCounterValue vkGetSemaphoreCounterValue;\n\tPFN_vkResetQueryPool vkResetQueryPool;\n\tPFN_vkSignalSemaphore vkSignalSemaphore;\n\tPFN_vkWaitSemaphores vkWaitSemaphores;\n#else\n\tPFN_vkVoidFunction padding_a3e00662[13];\n#endif /* defined(VK_VERSION_1_2) */\n#if defined(VK_VERSION_1_3)\n\tPFN_vkCmdBeginRendering vkCmdBeginRendering;\n\tPFN_vkCmdBindVertexBuffers2 vkCmdBindVertexBuffers2;\n\tPFN_vkCmdBlitImage2 vkCmdBlitImage2;\n\tPFN_vkCmdCopyBuffer2 vkCmdCopyBuffer2;\n\tPFN_vkCmdCopyBufferToImage2 vkCmdCopyBufferToImage2;\n\tPFN_vkCmdCopyImage2 vkCmdCopyImage2;\n\tPFN_vkCmdCopyImageToBuffer2 vkCmdCopyImageToBuffer2;\n\tPFN_vkCmdEndRendering vkCmdEndRendering;\n\tPFN_vkCmdPipelineBarrier2 vkCmdPipelineBarrier2;\n\tPFN_vkCmdResetEvent2 vkCmdResetEvent2;\n\tPFN_vkCmdResolveImage2 vkCmdResolveImage2;\n\tPFN_vkCmdSetCullMode vkCmdSetCullMode;\n\tPFN_vkCmdSetDepthBiasEnable vkCmdSetDepthBiasEnable;\n\tPFN_vkCmdSetDepthBoundsTestEnable vkCmdSetDepthBoundsTestEnable;\n\tPFN_vkCmdSetDepthCompareOp vkCmdSetDepthCompareOp;\n\tPFN_vkCmdSetDepthTestEnable vkCmdSetDepthTestEnable;\n\tPFN_vkCmdSetDepthWriteEnable vkCmdSetDepthWriteEnable;\n\tPFN_vkCmdSetEvent2 vkCmdSetEvent2;\n\tPFN_vkCmdSetFrontFace vkCmdSetFrontFace;\n\tPFN_vkCmdSetPrimitiveRestartEnable vkCmdSetPrimitiveRestartEnable;\n\tPFN_vkCmdSetPrimitiveTopology vkCmdSetPrimitiveTopology;\n\tPFN_vkCmdSetRasterizerDiscardEnable vkCmdSetRasterizerDiscardEnable;\n\tPFN_vkCmdSetScissorWithCount vkCmdSetScissorWithCount;\n\tPFN_vkCmdSetStencilOp vkCmdSetStencilOp;\n\tPFN_vkCmdSetStencilTestEnable vkCmdSetStencilTestEnable;\n\tPFN_vkCmdSetViewportWithCount vkCmdSetViewportWithCount;\n\tPFN_vkCmdWaitEvents2 vkCmdWaitEvents2;\n\tPFN_vkCmdWriteTimestamp2 vkCmdWriteTimestamp2;\n\tPFN_vkCreatePrivateDataSlot vkCreatePrivateDataSlot;\n\tPFN_vkDestroyPrivateDataSlot vkDestroyPrivateDataSlot;\n\tPFN_vkGetDeviceBufferMemoryRequirements vkGetDeviceBufferMemoryRequirements;\n\tPFN_vkGetDeviceImageMemoryRequirements vkGetDeviceImageMemoryRequirements;\n\tPFN_vkGetDeviceImageSparseMemoryRequirements vkGetDeviceImageSparseMemoryRequirements;\n\tPFN_vkGetPrivateData vkGetPrivateData;\n\tPFN_vkQueueSubmit2 vkQueueSubmit2;\n\tPFN_vkSetPrivateData vkSetPrivateData;\n#else\n\tPFN_vkVoidFunction padding_ee798a88[36];\n#endif /* defined(VK_VERSION_1_3) */\n#if defined(VK_VERSION_1_4)\n\tPFN_vkCmdBindDescriptorSets2 vkCmdBindDescriptorSets2;\n\tPFN_vkCmdBindIndexBuffer2 vkCmdBindIndexBuffer2;\n\tPFN_vkCmdPushConstants2 vkCmdPushConstants2;\n\tPFN_vkCmdPushDescriptorSet vkCmdPushDescriptorSet;\n\tPFN_vkCmdPushDescriptorSet2 vkCmdPushDescriptorSet2;\n\tPFN_vkCmdPushDescriptorSetWithTemplate vkCmdPushDescriptorSetWithTemplate;\n\tPFN_vkCmdPushDescriptorSetWithTemplate2 vkCmdPushDescriptorSetWithTemplate2;\n\tPFN_vkCmdSetLineStipple vkCmdSetLineStipple;\n\tPFN_vkCmdSetRenderingAttachmentLocations vkCmdSetRenderingAttachmentLocations;\n\tPFN_vkCmdSetRenderingInputAttachmentIndices vkCmdSetRenderingInputAttachmentIndices;\n\tPFN_vkCopyImageToImage vkCopyImageToImage;\n\tPFN_vkCopyImageToMemory vkCopyImageToMemory;\n\tPFN_vkCopyMemoryToImage vkCopyMemoryToImage;\n\tPFN_vkGetDeviceImageSubresourceLayout vkGetDeviceImageSubresourceLayout;\n\tPFN_vkGetImageSubresourceLayout2 vkGetImageSubresourceLayout2;\n\tPFN_vkGetRenderingAreaGranularity vkGetRenderingAreaGranularity;\n\tPFN_vkMapMemory2 vkMapMemory2;\n\tPFN_vkTransitionImageLayout vkTransitionImageLayout;\n\tPFN_vkUnmapMemory2 vkUnmapMemory2;\n#else\n\tPFN_vkVoidFunction padding_82585fa3[19];\n#endif /* defined(VK_VERSION_1_4) */\n#if defined(VK_AMDX_shader_enqueue)\n\tPFN_vkCmdDispatchGraphAMDX vkCmdDispatchGraphAMDX;\n\tPFN_vkCmdDispatchGraphIndirectAMDX vkCmdDispatchGraphIndirectAMDX;\n\tPFN_vkCmdDispatchGraphIndirectCountAMDX vkCmdDispatchGraphIndirectCountAMDX;\n\tPFN_vkCmdInitializeGraphScratchMemoryAMDX vkCmdInitializeGraphScratchMemoryAMDX;\n\tPFN_vkCreateExecutionGraphPipelinesAMDX vkCreateExecutionGraphPipelinesAMDX;\n\tPFN_vkGetExecutionGraphPipelineNodeIndexAMDX vkGetExecutionGraphPipelineNodeIndexAMDX;\n\tPFN_vkGetExecutionGraphPipelineScratchSizeAMDX vkGetExecutionGraphPipelineScratchSizeAMDX;\n#else\n\tPFN_vkVoidFunction padding_9d3e2bba[7];\n#endif /* defined(VK_AMDX_shader_enqueue) */\n#if defined(VK_AMD_anti_lag)\n\tPFN_vkAntiLagUpdateAMD vkAntiLagUpdateAMD;\n#else\n\tPFN_vkVoidFunction padding_cf792fb4[1];\n#endif /* defined(VK_AMD_anti_lag) */\n#if defined(VK_AMD_buffer_marker)\n\tPFN_vkCmdWriteBufferMarkerAMD vkCmdWriteBufferMarkerAMD;\n#else\n\tPFN_vkVoidFunction padding_7836e92f[1];\n#endif /* defined(VK_AMD_buffer_marker) */\n#if defined(VK_AMD_buffer_marker) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2))\n\tPFN_vkCmdWriteBufferMarker2AMD vkCmdWriteBufferMarker2AMD;\n#else\n\tPFN_vkVoidFunction padding_bbf9b7bb[1];\n#endif /* defined(VK_AMD_buffer_marker) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) */\n#if defined(VK_AMD_display_native_hdr)\n\tPFN_vkSetLocalDimmingAMD vkSetLocalDimmingAMD;\n#else\n\tPFN_vkVoidFunction padding_6b81b2fb[1];\n#endif /* defined(VK_AMD_display_native_hdr) */\n#if defined(VK_AMD_draw_indirect_count)\n\tPFN_vkCmdDrawIndexedIndirectCountAMD vkCmdDrawIndexedIndirectCountAMD;\n\tPFN_vkCmdDrawIndirectCountAMD vkCmdDrawIndirectCountAMD;\n#else\n\tPFN_vkVoidFunction padding_fbfa9964[2];\n#endif /* defined(VK_AMD_draw_indirect_count) */\n#if defined(VK_AMD_shader_info)\n\tPFN_vkGetShaderInfoAMD vkGetShaderInfoAMD;\n#else\n\tPFN_vkVoidFunction padding_bfb754b[1];\n#endif /* defined(VK_AMD_shader_info) */\n#if defined(VK_ANDROID_external_memory_android_hardware_buffer)\n\tPFN_vkGetAndroidHardwareBufferPropertiesANDROID vkGetAndroidHardwareBufferPropertiesANDROID;\n\tPFN_vkGetMemoryAndroidHardwareBufferANDROID vkGetMemoryAndroidHardwareBufferANDROID;\n#else\n\tPFN_vkVoidFunction padding_c67b1beb[2];\n#endif /* defined(VK_ANDROID_external_memory_android_hardware_buffer) */\n#if defined(VK_ARM_data_graph)\n\tPFN_vkBindDataGraphPipelineSessionMemoryARM vkBindDataGraphPipelineSessionMemoryARM;\n\tPFN_vkCmdDispatchDataGraphARM vkCmdDispatchDataGraphARM;\n\tPFN_vkCreateDataGraphPipelineSessionARM vkCreateDataGraphPipelineSessionARM;\n\tPFN_vkCreateDataGraphPipelinesARM vkCreateDataGraphPipelinesARM;\n\tPFN_vkDestroyDataGraphPipelineSessionARM vkDestroyDataGraphPipelineSessionARM;\n\tPFN_vkGetDataGraphPipelineAvailablePropertiesARM vkGetDataGraphPipelineAvailablePropertiesARM;\n\tPFN_vkGetDataGraphPipelinePropertiesARM vkGetDataGraphPipelinePropertiesARM;\n\tPFN_vkGetDataGraphPipelineSessionBindPointRequirementsARM vkGetDataGraphPipelineSessionBindPointRequirementsARM;\n\tPFN_vkGetDataGraphPipelineSessionMemoryRequirementsARM vkGetDataGraphPipelineSessionMemoryRequirementsARM;\n#else\n\tPFN_vkVoidFunction padding_894d85d8[9];\n#endif /* defined(VK_ARM_data_graph) */\n#if defined(VK_ARM_tensors)\n\tPFN_vkBindTensorMemoryARM vkBindTensorMemoryARM;\n\tPFN_vkCmdCopyTensorARM vkCmdCopyTensorARM;\n\tPFN_vkCreateTensorARM vkCreateTensorARM;\n\tPFN_vkCreateTensorViewARM vkCreateTensorViewARM;\n\tPFN_vkDestroyTensorARM vkDestroyTensorARM;\n\tPFN_vkDestroyTensorViewARM vkDestroyTensorViewARM;\n\tPFN_vkGetDeviceTensorMemoryRequirementsARM vkGetDeviceTensorMemoryRequirementsARM;\n\tPFN_vkGetTensorMemoryRequirementsARM vkGetTensorMemoryRequirementsARM;\n#else\n\tPFN_vkVoidFunction padding_df67a729[8];\n#endif /* defined(VK_ARM_tensors) */\n#if defined(VK_ARM_tensors) && defined(VK_EXT_descriptor_buffer)\n\tPFN_vkGetTensorOpaqueCaptureDescriptorDataARM vkGetTensorOpaqueCaptureDescriptorDataARM;\n\tPFN_vkGetTensorViewOpaqueCaptureDescriptorDataARM vkGetTensorViewOpaqueCaptureDescriptorDataARM;\n#else\n\tPFN_vkVoidFunction padding_9483bf7e[2];\n#endif /* defined(VK_ARM_tensors) && defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_EXT_attachment_feedback_loop_dynamic_state)\n\tPFN_vkCmdSetAttachmentFeedbackLoopEnableEXT vkCmdSetAttachmentFeedbackLoopEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_760a41f5[1];\n#endif /* defined(VK_EXT_attachment_feedback_loop_dynamic_state) */\n#if defined(VK_EXT_buffer_device_address)\n\tPFN_vkGetBufferDeviceAddressEXT vkGetBufferDeviceAddressEXT;\n#else\n\tPFN_vkVoidFunction padding_3b69d885[1];\n#endif /* defined(VK_EXT_buffer_device_address) */\n#if defined(VK_EXT_calibrated_timestamps)\n\tPFN_vkGetCalibratedTimestampsEXT vkGetCalibratedTimestampsEXT;\n#else\n\tPFN_vkVoidFunction padding_d0981c89[1];\n#endif /* defined(VK_EXT_calibrated_timestamps) */\n#if defined(VK_EXT_color_write_enable)\n\tPFN_vkCmdSetColorWriteEnableEXT vkCmdSetColorWriteEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_d301ecc3[1];\n#endif /* defined(VK_EXT_color_write_enable) */\n#if defined(VK_EXT_conditional_rendering)\n\tPFN_vkCmdBeginConditionalRenderingEXT vkCmdBeginConditionalRenderingEXT;\n\tPFN_vkCmdEndConditionalRenderingEXT vkCmdEndConditionalRenderingEXT;\n#else\n\tPFN_vkVoidFunction padding_ab532c18[2];\n#endif /* defined(VK_EXT_conditional_rendering) */\n#if defined(VK_EXT_debug_marker)\n\tPFN_vkCmdDebugMarkerBeginEXT vkCmdDebugMarkerBeginEXT;\n\tPFN_vkCmdDebugMarkerEndEXT vkCmdDebugMarkerEndEXT;\n\tPFN_vkCmdDebugMarkerInsertEXT vkCmdDebugMarkerInsertEXT;\n\tPFN_vkDebugMarkerSetObjectNameEXT vkDebugMarkerSetObjectNameEXT;\n\tPFN_vkDebugMarkerSetObjectTagEXT vkDebugMarkerSetObjectTagEXT;\n#else\n\tPFN_vkVoidFunction padding_89986968[5];\n#endif /* defined(VK_EXT_debug_marker) */\n#if defined(VK_EXT_depth_bias_control)\n\tPFN_vkCmdSetDepthBias2EXT vkCmdSetDepthBias2EXT;\n#else\n\tPFN_vkVoidFunction padding_bcddab4d[1];\n#endif /* defined(VK_EXT_depth_bias_control) */\n#if defined(VK_EXT_descriptor_buffer)\n\tPFN_vkCmdBindDescriptorBufferEmbeddedSamplersEXT vkCmdBindDescriptorBufferEmbeddedSamplersEXT;\n\tPFN_vkCmdBindDescriptorBuffersEXT vkCmdBindDescriptorBuffersEXT;\n\tPFN_vkCmdSetDescriptorBufferOffsetsEXT vkCmdSetDescriptorBufferOffsetsEXT;\n\tPFN_vkGetBufferOpaqueCaptureDescriptorDataEXT vkGetBufferOpaqueCaptureDescriptorDataEXT;\n\tPFN_vkGetDescriptorEXT vkGetDescriptorEXT;\n\tPFN_vkGetDescriptorSetLayoutBindingOffsetEXT vkGetDescriptorSetLayoutBindingOffsetEXT;\n\tPFN_vkGetDescriptorSetLayoutSizeEXT vkGetDescriptorSetLayoutSizeEXT;\n\tPFN_vkGetImageOpaqueCaptureDescriptorDataEXT vkGetImageOpaqueCaptureDescriptorDataEXT;\n\tPFN_vkGetImageViewOpaqueCaptureDescriptorDataEXT vkGetImageViewOpaqueCaptureDescriptorDataEXT;\n\tPFN_vkGetSamplerOpaqueCaptureDescriptorDataEXT vkGetSamplerOpaqueCaptureDescriptorDataEXT;\n#else\n\tPFN_vkVoidFunction padding_80aa973c[10];\n#endif /* defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing))\n\tPFN_vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT;\n#else\n\tPFN_vkVoidFunction padding_98d0fb33[1];\n#endif /* defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) */\n#if defined(VK_EXT_device_fault)\n\tPFN_vkGetDeviceFaultInfoEXT vkGetDeviceFaultInfoEXT;\n#else\n\tPFN_vkVoidFunction padding_55095419[1];\n#endif /* defined(VK_EXT_device_fault) */\n#if defined(VK_EXT_device_generated_commands)\n\tPFN_vkCmdExecuteGeneratedCommandsEXT vkCmdExecuteGeneratedCommandsEXT;\n\tPFN_vkCmdPreprocessGeneratedCommandsEXT vkCmdPreprocessGeneratedCommandsEXT;\n\tPFN_vkCreateIndirectCommandsLayoutEXT vkCreateIndirectCommandsLayoutEXT;\n\tPFN_vkCreateIndirectExecutionSetEXT vkCreateIndirectExecutionSetEXT;\n\tPFN_vkDestroyIndirectCommandsLayoutEXT vkDestroyIndirectCommandsLayoutEXT;\n\tPFN_vkDestroyIndirectExecutionSetEXT vkDestroyIndirectExecutionSetEXT;\n\tPFN_vkGetGeneratedCommandsMemoryRequirementsEXT vkGetGeneratedCommandsMemoryRequirementsEXT;\n\tPFN_vkUpdateIndirectExecutionSetPipelineEXT vkUpdateIndirectExecutionSetPipelineEXT;\n\tPFN_vkUpdateIndirectExecutionSetShaderEXT vkUpdateIndirectExecutionSetShaderEXT;\n#else\n\tPFN_vkVoidFunction padding_7ba7ebaa[9];\n#endif /* defined(VK_EXT_device_generated_commands) */\n#if defined(VK_EXT_discard_rectangles)\n\tPFN_vkCmdSetDiscardRectangleEXT vkCmdSetDiscardRectangleEXT;\n#else\n\tPFN_vkVoidFunction padding_d6355c2[1];\n#endif /* defined(VK_EXT_discard_rectangles) */\n#if defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2\n\tPFN_vkCmdSetDiscardRectangleEnableEXT vkCmdSetDiscardRectangleEnableEXT;\n\tPFN_vkCmdSetDiscardRectangleModeEXT vkCmdSetDiscardRectangleModeEXT;\n#else\n\tPFN_vkVoidFunction padding_7bb44f77[2];\n#endif /* defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 */\n#if defined(VK_EXT_display_control)\n\tPFN_vkDisplayPowerControlEXT vkDisplayPowerControlEXT;\n\tPFN_vkGetSwapchainCounterEXT vkGetSwapchainCounterEXT;\n\tPFN_vkRegisterDeviceEventEXT vkRegisterDeviceEventEXT;\n\tPFN_vkRegisterDisplayEventEXT vkRegisterDisplayEventEXT;\n#else\n\tPFN_vkVoidFunction padding_d30dfaaf[4];\n#endif /* defined(VK_EXT_display_control) */\n#if defined(VK_EXT_external_memory_host)\n\tPFN_vkGetMemoryHostPointerPropertiesEXT vkGetMemoryHostPointerPropertiesEXT;\n#else\n\tPFN_vkVoidFunction padding_357656e9[1];\n#endif /* defined(VK_EXT_external_memory_host) */\n#if defined(VK_EXT_external_memory_metal)\n\tPFN_vkGetMemoryMetalHandleEXT vkGetMemoryMetalHandleEXT;\n\tPFN_vkGetMemoryMetalHandlePropertiesEXT vkGetMemoryMetalHandlePropertiesEXT;\n#else\n\tPFN_vkVoidFunction padding_37d43fb[2];\n#endif /* defined(VK_EXT_external_memory_metal) */\n#if defined(VK_EXT_fragment_density_map_offset)\n\tPFN_vkCmdEndRendering2EXT vkCmdEndRendering2EXT;\n#else\n\tPFN_vkVoidFunction padding_9c90cf11[1];\n#endif /* defined(VK_EXT_fragment_density_map_offset) */\n#if defined(VK_EXT_full_screen_exclusive)\n\tPFN_vkAcquireFullScreenExclusiveModeEXT vkAcquireFullScreenExclusiveModeEXT;\n\tPFN_vkReleaseFullScreenExclusiveModeEXT vkReleaseFullScreenExclusiveModeEXT;\n#else\n\tPFN_vkVoidFunction padding_3859df46[2];\n#endif /* defined(VK_EXT_full_screen_exclusive) */\n#if defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1))\n\tPFN_vkGetDeviceGroupSurfacePresentModes2EXT vkGetDeviceGroupSurfacePresentModes2EXT;\n#else\n\tPFN_vkVoidFunction padding_e5b48b5b[1];\n#endif /* defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1)) */\n#if defined(VK_EXT_hdr_metadata)\n\tPFN_vkSetHdrMetadataEXT vkSetHdrMetadataEXT;\n#else\n\tPFN_vkVoidFunction padding_ca6d733c[1];\n#endif /* defined(VK_EXT_hdr_metadata) */\n#if defined(VK_EXT_host_image_copy)\n\tPFN_vkCopyImageToImageEXT vkCopyImageToImageEXT;\n\tPFN_vkCopyImageToMemoryEXT vkCopyImageToMemoryEXT;\n\tPFN_vkCopyMemoryToImageEXT vkCopyMemoryToImageEXT;\n\tPFN_vkTransitionImageLayoutEXT vkTransitionImageLayoutEXT;\n#else\n\tPFN_vkVoidFunction padding_dd6d9b61[4];\n#endif /* defined(VK_EXT_host_image_copy) */\n#if defined(VK_EXT_host_query_reset)\n\tPFN_vkResetQueryPoolEXT vkResetQueryPoolEXT;\n#else\n\tPFN_vkVoidFunction padding_34e58bd3[1];\n#endif /* defined(VK_EXT_host_query_reset) */\n#if defined(VK_EXT_image_drm_format_modifier)\n\tPFN_vkGetImageDrmFormatModifierPropertiesEXT vkGetImageDrmFormatModifierPropertiesEXT;\n#else\n\tPFN_vkVoidFunction padding_eb50dc14[1];\n#endif /* defined(VK_EXT_image_drm_format_modifier) */\n#if defined(VK_EXT_line_rasterization)\n\tPFN_vkCmdSetLineStippleEXT vkCmdSetLineStippleEXT;\n#else\n\tPFN_vkVoidFunction padding_8a212c37[1];\n#endif /* defined(VK_EXT_line_rasterization) */\n#if defined(VK_EXT_mesh_shader)\n\tPFN_vkCmdDrawMeshTasksEXT vkCmdDrawMeshTasksEXT;\n\tPFN_vkCmdDrawMeshTasksIndirectEXT vkCmdDrawMeshTasksIndirectEXT;\n#else\n\tPFN_vkVoidFunction padding_f65e838[2];\n#endif /* defined(VK_EXT_mesh_shader) */\n#if defined(VK_EXT_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2))\n\tPFN_vkCmdDrawMeshTasksIndirectCountEXT vkCmdDrawMeshTasksIndirectCountEXT;\n#else\n\tPFN_vkVoidFunction padding_dcbaac2f[1];\n#endif /* defined(VK_EXT_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2)) */\n#if defined(VK_EXT_metal_objects)\n\tPFN_vkExportMetalObjectsEXT vkExportMetalObjectsEXT;\n#else\n\tPFN_vkVoidFunction padding_df21f735[1];\n#endif /* defined(VK_EXT_metal_objects) */\n#if defined(VK_EXT_multi_draw)\n\tPFN_vkCmdDrawMultiEXT vkCmdDrawMultiEXT;\n\tPFN_vkCmdDrawMultiIndexedEXT vkCmdDrawMultiIndexedEXT;\n#else\n\tPFN_vkVoidFunction padding_ce8b93b6[2];\n#endif /* defined(VK_EXT_multi_draw) */\n#if defined(VK_EXT_opacity_micromap)\n\tPFN_vkBuildMicromapsEXT vkBuildMicromapsEXT;\n\tPFN_vkCmdBuildMicromapsEXT vkCmdBuildMicromapsEXT;\n\tPFN_vkCmdCopyMemoryToMicromapEXT vkCmdCopyMemoryToMicromapEXT;\n\tPFN_vkCmdCopyMicromapEXT vkCmdCopyMicromapEXT;\n\tPFN_vkCmdCopyMicromapToMemoryEXT vkCmdCopyMicromapToMemoryEXT;\n\tPFN_vkCmdWriteMicromapsPropertiesEXT vkCmdWriteMicromapsPropertiesEXT;\n\tPFN_vkCopyMemoryToMicromapEXT vkCopyMemoryToMicromapEXT;\n\tPFN_vkCopyMicromapEXT vkCopyMicromapEXT;\n\tPFN_vkCopyMicromapToMemoryEXT vkCopyMicromapToMemoryEXT;\n\tPFN_vkCreateMicromapEXT vkCreateMicromapEXT;\n\tPFN_vkDestroyMicromapEXT vkDestroyMicromapEXT;\n\tPFN_vkGetDeviceMicromapCompatibilityEXT vkGetDeviceMicromapCompatibilityEXT;\n\tPFN_vkGetMicromapBuildSizesEXT vkGetMicromapBuildSizesEXT;\n\tPFN_vkWriteMicromapsPropertiesEXT vkWriteMicromapsPropertiesEXT;\n#else\n\tPFN_vkVoidFunction padding_fa41e53c[14];\n#endif /* defined(VK_EXT_opacity_micromap) */\n#if defined(VK_EXT_pageable_device_local_memory)\n\tPFN_vkSetDeviceMemoryPriorityEXT vkSetDeviceMemoryPriorityEXT;\n#else\n\tPFN_vkVoidFunction padding_b2d2c2d7[1];\n#endif /* defined(VK_EXT_pageable_device_local_memory) */\n#if defined(VK_EXT_pipeline_properties)\n\tPFN_vkGetPipelinePropertiesEXT vkGetPipelinePropertiesEXT;\n#else\n\tPFN_vkVoidFunction padding_11313020[1];\n#endif /* defined(VK_EXT_pipeline_properties) */\n#if defined(VK_EXT_private_data)\n\tPFN_vkCreatePrivateDataSlotEXT vkCreatePrivateDataSlotEXT;\n\tPFN_vkDestroyPrivateDataSlotEXT vkDestroyPrivateDataSlotEXT;\n\tPFN_vkGetPrivateDataEXT vkGetPrivateDataEXT;\n\tPFN_vkSetPrivateDataEXT vkSetPrivateDataEXT;\n#else\n\tPFN_vkVoidFunction padding_108010f[4];\n#endif /* defined(VK_EXT_private_data) */\n#if defined(VK_EXT_sample_locations)\n\tPFN_vkCmdSetSampleLocationsEXT vkCmdSetSampleLocationsEXT;\n#else\n\tPFN_vkVoidFunction padding_26f9079f[1];\n#endif /* defined(VK_EXT_sample_locations) */\n#if defined(VK_EXT_shader_module_identifier)\n\tPFN_vkGetShaderModuleCreateInfoIdentifierEXT vkGetShaderModuleCreateInfoIdentifierEXT;\n\tPFN_vkGetShaderModuleIdentifierEXT vkGetShaderModuleIdentifierEXT;\n#else\n\tPFN_vkVoidFunction padding_e10c8f86[2];\n#endif /* defined(VK_EXT_shader_module_identifier) */\n#if defined(VK_EXT_shader_object)\n\tPFN_vkCmdBindShadersEXT vkCmdBindShadersEXT;\n\tPFN_vkCreateShadersEXT vkCreateShadersEXT;\n\tPFN_vkDestroyShaderEXT vkDestroyShaderEXT;\n\tPFN_vkGetShaderBinaryDataEXT vkGetShaderBinaryDataEXT;\n#else\n\tPFN_vkVoidFunction padding_374f3e18[4];\n#endif /* defined(VK_EXT_shader_object) */\n#if defined(VK_EXT_swapchain_maintenance1)\n\tPFN_vkReleaseSwapchainImagesEXT vkReleaseSwapchainImagesEXT;\n#else\n\tPFN_vkVoidFunction padding_ea55bf74[1];\n#endif /* defined(VK_EXT_swapchain_maintenance1) */\n#if defined(VK_EXT_transform_feedback)\n\tPFN_vkCmdBeginQueryIndexedEXT vkCmdBeginQueryIndexedEXT;\n\tPFN_vkCmdBeginTransformFeedbackEXT vkCmdBeginTransformFeedbackEXT;\n\tPFN_vkCmdBindTransformFeedbackBuffersEXT vkCmdBindTransformFeedbackBuffersEXT;\n\tPFN_vkCmdDrawIndirectByteCountEXT vkCmdDrawIndirectByteCountEXT;\n\tPFN_vkCmdEndQueryIndexedEXT vkCmdEndQueryIndexedEXT;\n\tPFN_vkCmdEndTransformFeedbackEXT vkCmdEndTransformFeedbackEXT;\n#else\n\tPFN_vkVoidFunction padding_36980658[6];\n#endif /* defined(VK_EXT_transform_feedback) */\n#if defined(VK_EXT_validation_cache)\n\tPFN_vkCreateValidationCacheEXT vkCreateValidationCacheEXT;\n\tPFN_vkDestroyValidationCacheEXT vkDestroyValidationCacheEXT;\n\tPFN_vkGetValidationCacheDataEXT vkGetValidationCacheDataEXT;\n\tPFN_vkMergeValidationCachesEXT vkMergeValidationCachesEXT;\n#else\n\tPFN_vkVoidFunction padding_b4f2df29[4];\n#endif /* defined(VK_EXT_validation_cache) */\n#if defined(VK_FUCHSIA_buffer_collection)\n\tPFN_vkCreateBufferCollectionFUCHSIA vkCreateBufferCollectionFUCHSIA;\n\tPFN_vkDestroyBufferCollectionFUCHSIA vkDestroyBufferCollectionFUCHSIA;\n\tPFN_vkGetBufferCollectionPropertiesFUCHSIA vkGetBufferCollectionPropertiesFUCHSIA;\n\tPFN_vkSetBufferCollectionBufferConstraintsFUCHSIA vkSetBufferCollectionBufferConstraintsFUCHSIA;\n\tPFN_vkSetBufferCollectionImageConstraintsFUCHSIA vkSetBufferCollectionImageConstraintsFUCHSIA;\n#else\n\tPFN_vkVoidFunction padding_8eaa27bc[5];\n#endif /* defined(VK_FUCHSIA_buffer_collection) */\n#if defined(VK_FUCHSIA_external_memory)\n\tPFN_vkGetMemoryZirconHandleFUCHSIA vkGetMemoryZirconHandleFUCHSIA;\n\tPFN_vkGetMemoryZirconHandlePropertiesFUCHSIA vkGetMemoryZirconHandlePropertiesFUCHSIA;\n#else\n\tPFN_vkVoidFunction padding_e3cb8a67[2];\n#endif /* defined(VK_FUCHSIA_external_memory) */\n#if defined(VK_FUCHSIA_external_semaphore)\n\tPFN_vkGetSemaphoreZirconHandleFUCHSIA vkGetSemaphoreZirconHandleFUCHSIA;\n\tPFN_vkImportSemaphoreZirconHandleFUCHSIA vkImportSemaphoreZirconHandleFUCHSIA;\n#else\n\tPFN_vkVoidFunction padding_3df6f656[2];\n#endif /* defined(VK_FUCHSIA_external_semaphore) */\n#if defined(VK_GOOGLE_display_timing)\n\tPFN_vkGetPastPresentationTimingGOOGLE vkGetPastPresentationTimingGOOGLE;\n\tPFN_vkGetRefreshCycleDurationGOOGLE vkGetRefreshCycleDurationGOOGLE;\n#else\n\tPFN_vkVoidFunction padding_2a6f50cd[2];\n#endif /* defined(VK_GOOGLE_display_timing) */\n#if defined(VK_HUAWEI_cluster_culling_shader)\n\tPFN_vkCmdDrawClusterHUAWEI vkCmdDrawClusterHUAWEI;\n\tPFN_vkCmdDrawClusterIndirectHUAWEI vkCmdDrawClusterIndirectHUAWEI;\n#else\n\tPFN_vkVoidFunction padding_75b97be6[2];\n#endif /* defined(VK_HUAWEI_cluster_culling_shader) */\n#if defined(VK_HUAWEI_invocation_mask)\n\tPFN_vkCmdBindInvocationMaskHUAWEI vkCmdBindInvocationMaskHUAWEI;\n#else\n\tPFN_vkVoidFunction padding_c3a4569f[1];\n#endif /* defined(VK_HUAWEI_invocation_mask) */\n#if defined(VK_HUAWEI_subpass_shading) && VK_HUAWEI_SUBPASS_SHADING_SPEC_VERSION >= 2\n\tPFN_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI;\n#else\n\tPFN_vkVoidFunction padding_2e923f32[1];\n#endif /* defined(VK_HUAWEI_subpass_shading) && VK_HUAWEI_SUBPASS_SHADING_SPEC_VERSION >= 2 */\n#if defined(VK_HUAWEI_subpass_shading)\n\tPFN_vkCmdSubpassShadingHUAWEI vkCmdSubpassShadingHUAWEI;\n#else\n\tPFN_vkVoidFunction padding_f766fdf5[1];\n#endif /* defined(VK_HUAWEI_subpass_shading) */\n#if defined(VK_INTEL_performance_query)\n\tPFN_vkAcquirePerformanceConfigurationINTEL vkAcquirePerformanceConfigurationINTEL;\n\tPFN_vkCmdSetPerformanceMarkerINTEL vkCmdSetPerformanceMarkerINTEL;\n\tPFN_vkCmdSetPerformanceOverrideINTEL vkCmdSetPerformanceOverrideINTEL;\n\tPFN_vkCmdSetPerformanceStreamMarkerINTEL vkCmdSetPerformanceStreamMarkerINTEL;\n\tPFN_vkGetPerformanceParameterINTEL vkGetPerformanceParameterINTEL;\n\tPFN_vkInitializePerformanceApiINTEL vkInitializePerformanceApiINTEL;\n\tPFN_vkQueueSetPerformanceConfigurationINTEL vkQueueSetPerformanceConfigurationINTEL;\n\tPFN_vkReleasePerformanceConfigurationINTEL vkReleasePerformanceConfigurationINTEL;\n\tPFN_vkUninitializePerformanceApiINTEL vkUninitializePerformanceApiINTEL;\n#else\n\tPFN_vkVoidFunction padding_495a0a0b[9];\n#endif /* defined(VK_INTEL_performance_query) */\n#if defined(VK_KHR_acceleration_structure)\n\tPFN_vkBuildAccelerationStructuresKHR vkBuildAccelerationStructuresKHR;\n\tPFN_vkCmdBuildAccelerationStructuresIndirectKHR vkCmdBuildAccelerationStructuresIndirectKHR;\n\tPFN_vkCmdBuildAccelerationStructuresKHR vkCmdBuildAccelerationStructuresKHR;\n\tPFN_vkCmdCopyAccelerationStructureKHR vkCmdCopyAccelerationStructureKHR;\n\tPFN_vkCmdCopyAccelerationStructureToMemoryKHR vkCmdCopyAccelerationStructureToMemoryKHR;\n\tPFN_vkCmdCopyMemoryToAccelerationStructureKHR vkCmdCopyMemoryToAccelerationStructureKHR;\n\tPFN_vkCmdWriteAccelerationStructuresPropertiesKHR vkCmdWriteAccelerationStructuresPropertiesKHR;\n\tPFN_vkCopyAccelerationStructureKHR vkCopyAccelerationStructureKHR;\n\tPFN_vkCopyAccelerationStructureToMemoryKHR vkCopyAccelerationStructureToMemoryKHR;\n\tPFN_vkCopyMemoryToAccelerationStructureKHR vkCopyMemoryToAccelerationStructureKHR;\n\tPFN_vkCreateAccelerationStructureKHR vkCreateAccelerationStructureKHR;\n\tPFN_vkDestroyAccelerationStructureKHR vkDestroyAccelerationStructureKHR;\n\tPFN_vkGetAccelerationStructureBuildSizesKHR vkGetAccelerationStructureBuildSizesKHR;\n\tPFN_vkGetAccelerationStructureDeviceAddressKHR vkGetAccelerationStructureDeviceAddressKHR;\n\tPFN_vkGetDeviceAccelerationStructureCompatibilityKHR vkGetDeviceAccelerationStructureCompatibilityKHR;\n\tPFN_vkWriteAccelerationStructuresPropertiesKHR vkWriteAccelerationStructuresPropertiesKHR;\n#else\n\tPFN_vkVoidFunction padding_5a999b78[16];\n#endif /* defined(VK_KHR_acceleration_structure) */\n#if defined(VK_KHR_bind_memory2)\n\tPFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR;\n\tPFN_vkBindImageMemory2KHR vkBindImageMemory2KHR;\n#else\n\tPFN_vkVoidFunction padding_ed8481f5[2];\n#endif /* defined(VK_KHR_bind_memory2) */\n#if defined(VK_KHR_buffer_device_address)\n\tPFN_vkGetBufferDeviceAddressKHR vkGetBufferDeviceAddressKHR;\n\tPFN_vkGetBufferOpaqueCaptureAddressKHR vkGetBufferOpaqueCaptureAddressKHR;\n\tPFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR vkGetDeviceMemoryOpaqueCaptureAddressKHR;\n#else\n\tPFN_vkVoidFunction padding_178fdf81[3];\n#endif /* defined(VK_KHR_buffer_device_address) */\n#if defined(VK_KHR_calibrated_timestamps)\n\tPFN_vkGetCalibratedTimestampsKHR vkGetCalibratedTimestampsKHR;\n#else\n\tPFN_vkVoidFunction padding_8fd6f40d[1];\n#endif /* defined(VK_KHR_calibrated_timestamps) */\n#if defined(VK_KHR_copy_commands2)\n\tPFN_vkCmdBlitImage2KHR vkCmdBlitImage2KHR;\n\tPFN_vkCmdCopyBuffer2KHR vkCmdCopyBuffer2KHR;\n\tPFN_vkCmdCopyBufferToImage2KHR vkCmdCopyBufferToImage2KHR;\n\tPFN_vkCmdCopyImage2KHR vkCmdCopyImage2KHR;\n\tPFN_vkCmdCopyImageToBuffer2KHR vkCmdCopyImageToBuffer2KHR;\n\tPFN_vkCmdResolveImage2KHR vkCmdResolveImage2KHR;\n#else\n\tPFN_vkVoidFunction padding_4c841ff2[6];\n#endif /* defined(VK_KHR_copy_commands2) */\n#if defined(VK_KHR_create_renderpass2)\n\tPFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR;\n\tPFN_vkCmdEndRenderPass2KHR vkCmdEndRenderPass2KHR;\n\tPFN_vkCmdNextSubpass2KHR vkCmdNextSubpass2KHR;\n\tPFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR;\n#else\n\tPFN_vkVoidFunction padding_2a0a8727[4];\n#endif /* defined(VK_KHR_create_renderpass2) */\n#if defined(VK_KHR_deferred_host_operations)\n\tPFN_vkCreateDeferredOperationKHR vkCreateDeferredOperationKHR;\n\tPFN_vkDeferredOperationJoinKHR vkDeferredOperationJoinKHR;\n\tPFN_vkDestroyDeferredOperationKHR vkDestroyDeferredOperationKHR;\n\tPFN_vkGetDeferredOperationMaxConcurrencyKHR vkGetDeferredOperationMaxConcurrencyKHR;\n\tPFN_vkGetDeferredOperationResultKHR vkGetDeferredOperationResultKHR;\n#else\n\tPFN_vkVoidFunction padding_346287bb[5];\n#endif /* defined(VK_KHR_deferred_host_operations) */\n#if defined(VK_KHR_descriptor_update_template)\n\tPFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR;\n\tPFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR;\n\tPFN_vkUpdateDescriptorSetWithTemplateKHR vkUpdateDescriptorSetWithTemplateKHR;\n#else\n\tPFN_vkVoidFunction padding_3d63aec0[3];\n#endif /* defined(VK_KHR_descriptor_update_template) */\n#if defined(VK_KHR_device_group)\n\tPFN_vkCmdDispatchBaseKHR vkCmdDispatchBaseKHR;\n\tPFN_vkCmdSetDeviceMaskKHR vkCmdSetDeviceMaskKHR;\n\tPFN_vkGetDeviceGroupPeerMemoryFeaturesKHR vkGetDeviceGroupPeerMemoryFeaturesKHR;\n#else\n\tPFN_vkVoidFunction padding_5ebe16bd[3];\n#endif /* defined(VK_KHR_device_group) */\n#if defined(VK_KHR_display_swapchain)\n\tPFN_vkCreateSharedSwapchainsKHR vkCreateSharedSwapchainsKHR;\n#else\n\tPFN_vkVoidFunction padding_12099367[1];\n#endif /* defined(VK_KHR_display_swapchain) */\n#if defined(VK_KHR_draw_indirect_count)\n\tPFN_vkCmdDrawIndexedIndirectCountKHR vkCmdDrawIndexedIndirectCountKHR;\n\tPFN_vkCmdDrawIndirectCountKHR vkCmdDrawIndirectCountKHR;\n#else\n\tPFN_vkVoidFunction padding_7b5bc4c1[2];\n#endif /* defined(VK_KHR_draw_indirect_count) */\n#if defined(VK_KHR_dynamic_rendering)\n\tPFN_vkCmdBeginRenderingKHR vkCmdBeginRenderingKHR;\n\tPFN_vkCmdEndRenderingKHR vkCmdEndRenderingKHR;\n#else\n\tPFN_vkVoidFunction padding_b80f75a5[2];\n#endif /* defined(VK_KHR_dynamic_rendering) */\n#if defined(VK_KHR_dynamic_rendering_local_read)\n\tPFN_vkCmdSetRenderingAttachmentLocationsKHR vkCmdSetRenderingAttachmentLocationsKHR;\n\tPFN_vkCmdSetRenderingInputAttachmentIndicesKHR vkCmdSetRenderingInputAttachmentIndicesKHR;\n#else\n\tPFN_vkVoidFunction padding_b1510532[2];\n#endif /* defined(VK_KHR_dynamic_rendering_local_read) */\n#if defined(VK_KHR_external_fence_fd)\n\tPFN_vkGetFenceFdKHR vkGetFenceFdKHR;\n\tPFN_vkImportFenceFdKHR vkImportFenceFdKHR;\n#else\n\tPFN_vkVoidFunction padding_a2c787d5[2];\n#endif /* defined(VK_KHR_external_fence_fd) */\n#if defined(VK_KHR_external_fence_win32)\n\tPFN_vkGetFenceWin32HandleKHR vkGetFenceWin32HandleKHR;\n\tPFN_vkImportFenceWin32HandleKHR vkImportFenceWin32HandleKHR;\n#else\n\tPFN_vkVoidFunction padding_55d8e6a9[2];\n#endif /* defined(VK_KHR_external_fence_win32) */\n#if defined(VK_KHR_external_memory_fd)\n\tPFN_vkGetMemoryFdKHR vkGetMemoryFdKHR;\n\tPFN_vkGetMemoryFdPropertiesKHR vkGetMemoryFdPropertiesKHR;\n#else\n\tPFN_vkVoidFunction padding_982d9e19[2];\n#endif /* defined(VK_KHR_external_memory_fd) */\n#if defined(VK_KHR_external_memory_win32)\n\tPFN_vkGetMemoryWin32HandleKHR vkGetMemoryWin32HandleKHR;\n\tPFN_vkGetMemoryWin32HandlePropertiesKHR vkGetMemoryWin32HandlePropertiesKHR;\n#else\n\tPFN_vkVoidFunction padding_4af9e25a[2];\n#endif /* defined(VK_KHR_external_memory_win32) */\n#if defined(VK_KHR_external_semaphore_fd)\n\tPFN_vkGetSemaphoreFdKHR vkGetSemaphoreFdKHR;\n\tPFN_vkImportSemaphoreFdKHR vkImportSemaphoreFdKHR;\n#else\n\tPFN_vkVoidFunction padding_2237b7cf[2];\n#endif /* defined(VK_KHR_external_semaphore_fd) */\n#if defined(VK_KHR_external_semaphore_win32)\n\tPFN_vkGetSemaphoreWin32HandleKHR vkGetSemaphoreWin32HandleKHR;\n\tPFN_vkImportSemaphoreWin32HandleKHR vkImportSemaphoreWin32HandleKHR;\n#else\n\tPFN_vkVoidFunction padding_c18dea52[2];\n#endif /* defined(VK_KHR_external_semaphore_win32) */\n#if defined(VK_KHR_fragment_shading_rate)\n\tPFN_vkCmdSetFragmentShadingRateKHR vkCmdSetFragmentShadingRateKHR;\n#else\n\tPFN_vkVoidFunction padding_f91b0a90[1];\n#endif /* defined(VK_KHR_fragment_shading_rate) */\n#if defined(VK_KHR_get_memory_requirements2)\n\tPFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;\n\tPFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;\n\tPFN_vkGetImageSparseMemoryRequirements2KHR vkGetImageSparseMemoryRequirements2KHR;\n#else\n\tPFN_vkVoidFunction padding_79d9c5c4[3];\n#endif /* defined(VK_KHR_get_memory_requirements2) */\n#if defined(VK_KHR_line_rasterization)\n\tPFN_vkCmdSetLineStippleKHR vkCmdSetLineStippleKHR;\n#else\n\tPFN_vkVoidFunction padding_83c2939[1];\n#endif /* defined(VK_KHR_line_rasterization) */\n#if defined(VK_KHR_maintenance1)\n\tPFN_vkTrimCommandPoolKHR vkTrimCommandPoolKHR;\n#else\n\tPFN_vkVoidFunction padding_4b372c56[1];\n#endif /* defined(VK_KHR_maintenance1) */\n#if defined(VK_KHR_maintenance3)\n\tPFN_vkGetDescriptorSetLayoutSupportKHR vkGetDescriptorSetLayoutSupportKHR;\n#else\n\tPFN_vkVoidFunction padding_5ea7858d[1];\n#endif /* defined(VK_KHR_maintenance3) */\n#if defined(VK_KHR_maintenance4)\n\tPFN_vkGetDeviceBufferMemoryRequirementsKHR vkGetDeviceBufferMemoryRequirementsKHR;\n\tPFN_vkGetDeviceImageMemoryRequirementsKHR vkGetDeviceImageMemoryRequirementsKHR;\n\tPFN_vkGetDeviceImageSparseMemoryRequirementsKHR vkGetDeviceImageSparseMemoryRequirementsKHR;\n#else\n\tPFN_vkVoidFunction padding_8e2d4198[3];\n#endif /* defined(VK_KHR_maintenance4) */\n#if defined(VK_KHR_maintenance5)\n\tPFN_vkCmdBindIndexBuffer2KHR vkCmdBindIndexBuffer2KHR;\n\tPFN_vkGetDeviceImageSubresourceLayoutKHR vkGetDeviceImageSubresourceLayoutKHR;\n\tPFN_vkGetImageSubresourceLayout2KHR vkGetImageSubresourceLayout2KHR;\n\tPFN_vkGetRenderingAreaGranularityKHR vkGetRenderingAreaGranularityKHR;\n#else\n\tPFN_vkVoidFunction padding_37040339[4];\n#endif /* defined(VK_KHR_maintenance5) */\n#if defined(VK_KHR_maintenance6)\n\tPFN_vkCmdBindDescriptorSets2KHR vkCmdBindDescriptorSets2KHR;\n\tPFN_vkCmdPushConstants2KHR vkCmdPushConstants2KHR;\n#else\n\tPFN_vkVoidFunction padding_442955d8[2];\n#endif /* defined(VK_KHR_maintenance6) */\n#if defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor)\n\tPFN_vkCmdPushDescriptorSet2KHR vkCmdPushDescriptorSet2KHR;\n\tPFN_vkCmdPushDescriptorSetWithTemplate2KHR vkCmdPushDescriptorSetWithTemplate2KHR;\n#else\n\tPFN_vkVoidFunction padding_80e8513f[2];\n#endif /* defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor) */\n#if defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer)\n\tPFN_vkCmdBindDescriptorBufferEmbeddedSamplers2EXT vkCmdBindDescriptorBufferEmbeddedSamplers2EXT;\n\tPFN_vkCmdSetDescriptorBufferOffsets2EXT vkCmdSetDescriptorBufferOffsets2EXT;\n#else\n\tPFN_vkVoidFunction padding_2816b9cd[2];\n#endif /* defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_KHR_map_memory2)\n\tPFN_vkMapMemory2KHR vkMapMemory2KHR;\n\tPFN_vkUnmapMemory2KHR vkUnmapMemory2KHR;\n#else\n\tPFN_vkVoidFunction padding_5a6d8986[2];\n#endif /* defined(VK_KHR_map_memory2) */\n#if defined(VK_KHR_performance_query)\n\tPFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR;\n\tPFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR;\n#else\n\tPFN_vkVoidFunction padding_76f2673b[2];\n#endif /* defined(VK_KHR_performance_query) */\n#if defined(VK_KHR_pipeline_binary)\n\tPFN_vkCreatePipelineBinariesKHR vkCreatePipelineBinariesKHR;\n\tPFN_vkDestroyPipelineBinaryKHR vkDestroyPipelineBinaryKHR;\n\tPFN_vkGetPipelineBinaryDataKHR vkGetPipelineBinaryDataKHR;\n\tPFN_vkGetPipelineKeyKHR vkGetPipelineKeyKHR;\n\tPFN_vkReleaseCapturedPipelineDataKHR vkReleaseCapturedPipelineDataKHR;\n#else\n\tPFN_vkVoidFunction padding_65232810[5];\n#endif /* defined(VK_KHR_pipeline_binary) */\n#if defined(VK_KHR_pipeline_executable_properties)\n\tPFN_vkGetPipelineExecutableInternalRepresentationsKHR vkGetPipelineExecutableInternalRepresentationsKHR;\n\tPFN_vkGetPipelineExecutablePropertiesKHR vkGetPipelineExecutablePropertiesKHR;\n\tPFN_vkGetPipelineExecutableStatisticsKHR vkGetPipelineExecutableStatisticsKHR;\n#else\n\tPFN_vkVoidFunction padding_f7629b1e[3];\n#endif /* defined(VK_KHR_pipeline_executable_properties) */\n#if defined(VK_KHR_present_wait)\n\tPFN_vkWaitForPresentKHR vkWaitForPresentKHR;\n#else\n\tPFN_vkVoidFunction padding_b16cbe03[1];\n#endif /* defined(VK_KHR_present_wait) */\n#if defined(VK_KHR_present_wait2)\n\tPFN_vkWaitForPresent2KHR vkWaitForPresent2KHR;\n#else\n\tPFN_vkVoidFunction padding_7401483a[1];\n#endif /* defined(VK_KHR_present_wait2) */\n#if defined(VK_KHR_push_descriptor)\n\tPFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR;\n#else\n\tPFN_vkVoidFunction padding_8f7712ad[1];\n#endif /* defined(VK_KHR_push_descriptor) */\n#if defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline)\n\tPFN_vkCmdTraceRaysIndirect2KHR vkCmdTraceRaysIndirect2KHR;\n#else\n\tPFN_vkVoidFunction padding_dd5f9b4a[1];\n#endif /* defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) */\n#if defined(VK_KHR_ray_tracing_pipeline)\n\tPFN_vkCmdSetRayTracingPipelineStackSizeKHR vkCmdSetRayTracingPipelineStackSizeKHR;\n\tPFN_vkCmdTraceRaysIndirectKHR vkCmdTraceRaysIndirectKHR;\n\tPFN_vkCmdTraceRaysKHR vkCmdTraceRaysKHR;\n\tPFN_vkCreateRayTracingPipelinesKHR vkCreateRayTracingPipelinesKHR;\n\tPFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR vkGetRayTracingCaptureReplayShaderGroupHandlesKHR;\n\tPFN_vkGetRayTracingShaderGroupHandlesKHR vkGetRayTracingShaderGroupHandlesKHR;\n\tPFN_vkGetRayTracingShaderGroupStackSizeKHR vkGetRayTracingShaderGroupStackSizeKHR;\n#else\n\tPFN_vkVoidFunction padding_af99aedc[7];\n#endif /* defined(VK_KHR_ray_tracing_pipeline) */\n#if defined(VK_KHR_sampler_ycbcr_conversion)\n\tPFN_vkCreateSamplerYcbcrConversionKHR vkCreateSamplerYcbcrConversionKHR;\n\tPFN_vkDestroySamplerYcbcrConversionKHR vkDestroySamplerYcbcrConversionKHR;\n#else\n\tPFN_vkVoidFunction padding_88e61b30[2];\n#endif /* defined(VK_KHR_sampler_ycbcr_conversion) */\n#if defined(VK_KHR_shared_presentable_image)\n\tPFN_vkGetSwapchainStatusKHR vkGetSwapchainStatusKHR;\n#else\n\tPFN_vkVoidFunction padding_1ff3379[1];\n#endif /* defined(VK_KHR_shared_presentable_image) */\n#if defined(VK_KHR_swapchain)\n\tPFN_vkAcquireNextImageKHR vkAcquireNextImageKHR;\n\tPFN_vkCreateSwapchainKHR vkCreateSwapchainKHR;\n\tPFN_vkDestroySwapchainKHR vkDestroySwapchainKHR;\n\tPFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR;\n\tPFN_vkQueuePresentKHR vkQueuePresentKHR;\n#else\n\tPFN_vkVoidFunction padding_a1de893b[5];\n#endif /* defined(VK_KHR_swapchain) */\n#if defined(VK_KHR_swapchain_maintenance1)\n\tPFN_vkReleaseSwapchainImagesKHR vkReleaseSwapchainImagesKHR;\n#else\n\tPFN_vkVoidFunction padding_e032d5c4[1];\n#endif /* defined(VK_KHR_swapchain_maintenance1) */\n#if defined(VK_KHR_synchronization2)\n\tPFN_vkCmdPipelineBarrier2KHR vkCmdPipelineBarrier2KHR;\n\tPFN_vkCmdResetEvent2KHR vkCmdResetEvent2KHR;\n\tPFN_vkCmdSetEvent2KHR vkCmdSetEvent2KHR;\n\tPFN_vkCmdWaitEvents2KHR vkCmdWaitEvents2KHR;\n\tPFN_vkCmdWriteTimestamp2KHR vkCmdWriteTimestamp2KHR;\n\tPFN_vkQueueSubmit2KHR vkQueueSubmit2KHR;\n#else\n\tPFN_vkVoidFunction padding_e85bf128[6];\n#endif /* defined(VK_KHR_synchronization2) */\n#if defined(VK_KHR_timeline_semaphore)\n\tPFN_vkGetSemaphoreCounterValueKHR vkGetSemaphoreCounterValueKHR;\n\tPFN_vkSignalSemaphoreKHR vkSignalSemaphoreKHR;\n\tPFN_vkWaitSemaphoresKHR vkWaitSemaphoresKHR;\n#else\n\tPFN_vkVoidFunction padding_c799d931[3];\n#endif /* defined(VK_KHR_timeline_semaphore) */\n#if defined(VK_KHR_video_decode_queue)\n\tPFN_vkCmdDecodeVideoKHR vkCmdDecodeVideoKHR;\n#else\n\tPFN_vkVoidFunction padding_7a7cc7ad[1];\n#endif /* defined(VK_KHR_video_decode_queue) */\n#if defined(VK_KHR_video_encode_queue)\n\tPFN_vkCmdEncodeVideoKHR vkCmdEncodeVideoKHR;\n\tPFN_vkGetEncodedVideoSessionParametersKHR vkGetEncodedVideoSessionParametersKHR;\n#else\n\tPFN_vkVoidFunction padding_f2997fb4[2];\n#endif /* defined(VK_KHR_video_encode_queue) */\n#if defined(VK_KHR_video_queue)\n\tPFN_vkBindVideoSessionMemoryKHR vkBindVideoSessionMemoryKHR;\n\tPFN_vkCmdBeginVideoCodingKHR vkCmdBeginVideoCodingKHR;\n\tPFN_vkCmdControlVideoCodingKHR vkCmdControlVideoCodingKHR;\n\tPFN_vkCmdEndVideoCodingKHR vkCmdEndVideoCodingKHR;\n\tPFN_vkCreateVideoSessionKHR vkCreateVideoSessionKHR;\n\tPFN_vkCreateVideoSessionParametersKHR vkCreateVideoSessionParametersKHR;\n\tPFN_vkDestroyVideoSessionKHR vkDestroyVideoSessionKHR;\n\tPFN_vkDestroyVideoSessionParametersKHR vkDestroyVideoSessionParametersKHR;\n\tPFN_vkGetVideoSessionMemoryRequirementsKHR vkGetVideoSessionMemoryRequirementsKHR;\n\tPFN_vkUpdateVideoSessionParametersKHR vkUpdateVideoSessionParametersKHR;\n#else\n\tPFN_vkVoidFunction padding_98fb7016[10];\n#endif /* defined(VK_KHR_video_queue) */\n#if defined(VK_NVX_binary_import)\n\tPFN_vkCmdCuLaunchKernelNVX vkCmdCuLaunchKernelNVX;\n\tPFN_vkCreateCuFunctionNVX vkCreateCuFunctionNVX;\n\tPFN_vkCreateCuModuleNVX vkCreateCuModuleNVX;\n\tPFN_vkDestroyCuFunctionNVX vkDestroyCuFunctionNVX;\n\tPFN_vkDestroyCuModuleNVX vkDestroyCuModuleNVX;\n#else\n\tPFN_vkVoidFunction padding_eb54309b[5];\n#endif /* defined(VK_NVX_binary_import) */\n#if defined(VK_NVX_image_view_handle)\n\tPFN_vkGetImageViewHandleNVX vkGetImageViewHandleNVX;\n#else\n\tPFN_vkVoidFunction padding_887f6736[1];\n#endif /* defined(VK_NVX_image_view_handle) */\n#if defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 3\n\tPFN_vkGetImageViewHandle64NVX vkGetImageViewHandle64NVX;\n#else\n\tPFN_vkVoidFunction padding_64ad40e2[1];\n#endif /* defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 3 */\n#if defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 2\n\tPFN_vkGetImageViewAddressNVX vkGetImageViewAddressNVX;\n#else\n\tPFN_vkVoidFunction padding_d290479a[1];\n#endif /* defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 2 */\n#if defined(VK_NV_clip_space_w_scaling)\n\tPFN_vkCmdSetViewportWScalingNV vkCmdSetViewportWScalingNV;\n#else\n\tPFN_vkVoidFunction padding_88d7eb2e[1];\n#endif /* defined(VK_NV_clip_space_w_scaling) */\n#if defined(VK_NV_cluster_acceleration_structure)\n\tPFN_vkCmdBuildClusterAccelerationStructureIndirectNV vkCmdBuildClusterAccelerationStructureIndirectNV;\n\tPFN_vkGetClusterAccelerationStructureBuildSizesNV vkGetClusterAccelerationStructureBuildSizesNV;\n#else\n\tPFN_vkVoidFunction padding_60e35395[2];\n#endif /* defined(VK_NV_cluster_acceleration_structure) */\n#if defined(VK_NV_cooperative_vector)\n\tPFN_vkCmdConvertCooperativeVectorMatrixNV vkCmdConvertCooperativeVectorMatrixNV;\n\tPFN_vkConvertCooperativeVectorMatrixNV vkConvertCooperativeVectorMatrixNV;\n#else\n\tPFN_vkVoidFunction padding_f4a887d0[2];\n#endif /* defined(VK_NV_cooperative_vector) */\n#if defined(VK_NV_copy_memory_indirect)\n\tPFN_vkCmdCopyMemoryIndirectNV vkCmdCopyMemoryIndirectNV;\n\tPFN_vkCmdCopyMemoryToImageIndirectNV vkCmdCopyMemoryToImageIndirectNV;\n#else\n\tPFN_vkVoidFunction padding_9536230e[2];\n#endif /* defined(VK_NV_copy_memory_indirect) */\n#if defined(VK_NV_cuda_kernel_launch)\n\tPFN_vkCmdCudaLaunchKernelNV vkCmdCudaLaunchKernelNV;\n\tPFN_vkCreateCudaFunctionNV vkCreateCudaFunctionNV;\n\tPFN_vkCreateCudaModuleNV vkCreateCudaModuleNV;\n\tPFN_vkDestroyCudaFunctionNV vkDestroyCudaFunctionNV;\n\tPFN_vkDestroyCudaModuleNV vkDestroyCudaModuleNV;\n\tPFN_vkGetCudaModuleCacheNV vkGetCudaModuleCacheNV;\n#else\n\tPFN_vkVoidFunction padding_2eabdf3b[6];\n#endif /* defined(VK_NV_cuda_kernel_launch) */\n#if defined(VK_NV_device_diagnostic_checkpoints)\n\tPFN_vkCmdSetCheckpointNV vkCmdSetCheckpointNV;\n\tPFN_vkGetQueueCheckpointDataNV vkGetQueueCheckpointDataNV;\n#else\n\tPFN_vkVoidFunction padding_adaa5a21[2];\n#endif /* defined(VK_NV_device_diagnostic_checkpoints) */\n#if defined(VK_NV_device_diagnostic_checkpoints) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2))\n\tPFN_vkGetQueueCheckpointData2NV vkGetQueueCheckpointData2NV;\n#else\n\tPFN_vkVoidFunction padding_c776633d[1];\n#endif /* defined(VK_NV_device_diagnostic_checkpoints) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) */\n#if defined(VK_NV_device_generated_commands)\n\tPFN_vkCmdBindPipelineShaderGroupNV vkCmdBindPipelineShaderGroupNV;\n\tPFN_vkCmdExecuteGeneratedCommandsNV vkCmdExecuteGeneratedCommandsNV;\n\tPFN_vkCmdPreprocessGeneratedCommandsNV vkCmdPreprocessGeneratedCommandsNV;\n\tPFN_vkCreateIndirectCommandsLayoutNV vkCreateIndirectCommandsLayoutNV;\n\tPFN_vkDestroyIndirectCommandsLayoutNV vkDestroyIndirectCommandsLayoutNV;\n\tPFN_vkGetGeneratedCommandsMemoryRequirementsNV vkGetGeneratedCommandsMemoryRequirementsNV;\n#else\n\tPFN_vkVoidFunction padding_4c7e4395[6];\n#endif /* defined(VK_NV_device_generated_commands) */\n#if defined(VK_NV_device_generated_commands_compute)\n\tPFN_vkCmdUpdatePipelineIndirectBufferNV vkCmdUpdatePipelineIndirectBufferNV;\n\tPFN_vkGetPipelineIndirectDeviceAddressNV vkGetPipelineIndirectDeviceAddressNV;\n\tPFN_vkGetPipelineIndirectMemoryRequirementsNV vkGetPipelineIndirectMemoryRequirementsNV;\n#else\n\tPFN_vkVoidFunction padding_5195094c[3];\n#endif /* defined(VK_NV_device_generated_commands_compute) */\n#if defined(VK_NV_external_compute_queue)\n\tPFN_vkCreateExternalComputeQueueNV vkCreateExternalComputeQueueNV;\n\tPFN_vkDestroyExternalComputeQueueNV vkDestroyExternalComputeQueueNV;\n\tPFN_vkGetExternalComputeQueueDataNV vkGetExternalComputeQueueDataNV;\n#else\n\tPFN_vkVoidFunction padding_4f947e0b[3];\n#endif /* defined(VK_NV_external_compute_queue) */\n#if defined(VK_NV_external_memory_rdma)\n\tPFN_vkGetMemoryRemoteAddressNV vkGetMemoryRemoteAddressNV;\n#else\n\tPFN_vkVoidFunction padding_920e405[1];\n#endif /* defined(VK_NV_external_memory_rdma) */\n#if defined(VK_NV_external_memory_win32)\n\tPFN_vkGetMemoryWin32HandleNV vkGetMemoryWin32HandleNV;\n#else\n\tPFN_vkVoidFunction padding_c13d6f3a[1];\n#endif /* defined(VK_NV_external_memory_win32) */\n#if defined(VK_NV_fragment_shading_rate_enums)\n\tPFN_vkCmdSetFragmentShadingRateEnumNV vkCmdSetFragmentShadingRateEnumNV;\n#else\n\tPFN_vkVoidFunction padding_4979ca14[1];\n#endif /* defined(VK_NV_fragment_shading_rate_enums) */\n#if defined(VK_NV_low_latency2)\n\tPFN_vkGetLatencyTimingsNV vkGetLatencyTimingsNV;\n\tPFN_vkLatencySleepNV vkLatencySleepNV;\n\tPFN_vkQueueNotifyOutOfBandNV vkQueueNotifyOutOfBandNV;\n\tPFN_vkSetLatencyMarkerNV vkSetLatencyMarkerNV;\n\tPFN_vkSetLatencySleepModeNV vkSetLatencySleepModeNV;\n#else\n\tPFN_vkVoidFunction padding_fabf8b19[5];\n#endif /* defined(VK_NV_low_latency2) */\n#if defined(VK_NV_memory_decompression)\n\tPFN_vkCmdDecompressMemoryIndirectCountNV vkCmdDecompressMemoryIndirectCountNV;\n\tPFN_vkCmdDecompressMemoryNV vkCmdDecompressMemoryNV;\n#else\n\tPFN_vkVoidFunction padding_706009[2];\n#endif /* defined(VK_NV_memory_decompression) */\n#if defined(VK_NV_mesh_shader)\n\tPFN_vkCmdDrawMeshTasksIndirectNV vkCmdDrawMeshTasksIndirectNV;\n\tPFN_vkCmdDrawMeshTasksNV vkCmdDrawMeshTasksNV;\n#else\n\tPFN_vkVoidFunction padding_ac232758[2];\n#endif /* defined(VK_NV_mesh_shader) */\n#if defined(VK_NV_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2))\n\tPFN_vkCmdDrawMeshTasksIndirectCountNV vkCmdDrawMeshTasksIndirectCountNV;\n#else\n\tPFN_vkVoidFunction padding_53495be7[1];\n#endif /* defined(VK_NV_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2)) */\n#if defined(VK_NV_optical_flow)\n\tPFN_vkBindOpticalFlowSessionImageNV vkBindOpticalFlowSessionImageNV;\n\tPFN_vkCmdOpticalFlowExecuteNV vkCmdOpticalFlowExecuteNV;\n\tPFN_vkCreateOpticalFlowSessionNV vkCreateOpticalFlowSessionNV;\n\tPFN_vkDestroyOpticalFlowSessionNV vkDestroyOpticalFlowSessionNV;\n#else\n\tPFN_vkVoidFunction padding_f67571eb[4];\n#endif /* defined(VK_NV_optical_flow) */\n#if defined(VK_NV_partitioned_acceleration_structure)\n\tPFN_vkCmdBuildPartitionedAccelerationStructuresNV vkCmdBuildPartitionedAccelerationStructuresNV;\n\tPFN_vkGetPartitionedAccelerationStructuresBuildSizesNV vkGetPartitionedAccelerationStructuresBuildSizesNV;\n#else\n\tPFN_vkVoidFunction padding_d27c8c6d[2];\n#endif /* defined(VK_NV_partitioned_acceleration_structure) */\n#if defined(VK_NV_ray_tracing)\n\tPFN_vkBindAccelerationStructureMemoryNV vkBindAccelerationStructureMemoryNV;\n\tPFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV;\n\tPFN_vkCmdCopyAccelerationStructureNV vkCmdCopyAccelerationStructureNV;\n\tPFN_vkCmdTraceRaysNV vkCmdTraceRaysNV;\n\tPFN_vkCmdWriteAccelerationStructuresPropertiesNV vkCmdWriteAccelerationStructuresPropertiesNV;\n\tPFN_vkCompileDeferredNV vkCompileDeferredNV;\n\tPFN_vkCreateAccelerationStructureNV vkCreateAccelerationStructureNV;\n\tPFN_vkCreateRayTracingPipelinesNV vkCreateRayTracingPipelinesNV;\n\tPFN_vkDestroyAccelerationStructureNV vkDestroyAccelerationStructureNV;\n\tPFN_vkGetAccelerationStructureHandleNV vkGetAccelerationStructureHandleNV;\n\tPFN_vkGetAccelerationStructureMemoryRequirementsNV vkGetAccelerationStructureMemoryRequirementsNV;\n\tPFN_vkGetRayTracingShaderGroupHandlesNV vkGetRayTracingShaderGroupHandlesNV;\n#else\n\tPFN_vkVoidFunction padding_feefbeac[12];\n#endif /* defined(VK_NV_ray_tracing) */\n#if defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2\n\tPFN_vkCmdSetExclusiveScissorEnableNV vkCmdSetExclusiveScissorEnableNV;\n#else\n\tPFN_vkVoidFunction padding_e3c24f80[1];\n#endif /* defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 */\n#if defined(VK_NV_scissor_exclusive)\n\tPFN_vkCmdSetExclusiveScissorNV vkCmdSetExclusiveScissorNV;\n#else\n\tPFN_vkVoidFunction padding_8e88d86c[1];\n#endif /* defined(VK_NV_scissor_exclusive) */\n#if defined(VK_NV_shading_rate_image)\n\tPFN_vkCmdBindShadingRateImageNV vkCmdBindShadingRateImageNV;\n\tPFN_vkCmdSetCoarseSampleOrderNV vkCmdSetCoarseSampleOrderNV;\n\tPFN_vkCmdSetViewportShadingRatePaletteNV vkCmdSetViewportShadingRatePaletteNV;\n#else\n\tPFN_vkVoidFunction padding_92a0767f[3];\n#endif /* defined(VK_NV_shading_rate_image) */\n#if defined(VK_QCOM_tile_memory_heap)\n\tPFN_vkCmdBindTileMemoryQCOM vkCmdBindTileMemoryQCOM;\n#else\n\tPFN_vkVoidFunction padding_e2d55d04[1];\n#endif /* defined(VK_QCOM_tile_memory_heap) */\n#if defined(VK_QCOM_tile_properties)\n\tPFN_vkGetDynamicRenderingTilePropertiesQCOM vkGetDynamicRenderingTilePropertiesQCOM;\n\tPFN_vkGetFramebufferTilePropertiesQCOM vkGetFramebufferTilePropertiesQCOM;\n#else\n\tPFN_vkVoidFunction padding_be12e32[2];\n#endif /* defined(VK_QCOM_tile_properties) */\n#if defined(VK_QCOM_tile_shading)\n\tPFN_vkCmdBeginPerTileExecutionQCOM vkCmdBeginPerTileExecutionQCOM;\n\tPFN_vkCmdDispatchTileQCOM vkCmdDispatchTileQCOM;\n\tPFN_vkCmdEndPerTileExecutionQCOM vkCmdEndPerTileExecutionQCOM;\n#else\n\tPFN_vkVoidFunction padding_fcd9e1df[3];\n#endif /* defined(VK_QCOM_tile_shading) */\n#if defined(VK_QNX_external_memory_screen_buffer)\n\tPFN_vkGetScreenBufferPropertiesQNX vkGetScreenBufferPropertiesQNX;\n#else\n\tPFN_vkVoidFunction padding_1c27735d[1];\n#endif /* defined(VK_QNX_external_memory_screen_buffer) */\n#if defined(VK_VALVE_descriptor_set_host_mapping)\n\tPFN_vkGetDescriptorSetHostMappingVALVE vkGetDescriptorSetHostMappingVALVE;\n\tPFN_vkGetDescriptorSetLayoutHostMappingInfoVALVE vkGetDescriptorSetLayoutHostMappingInfoVALVE;\n#else\n\tPFN_vkVoidFunction padding_fd71e4c6[2];\n#endif /* defined(VK_VALVE_descriptor_set_host_mapping) */\n#if (defined(VK_EXT_depth_clamp_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clamp_control))\n\tPFN_vkCmdSetDepthClampRangeEXT vkCmdSetDepthClampRangeEXT;\n#else\n\tPFN_vkVoidFunction padding_faa18a61[1];\n#endif /* (defined(VK_EXT_depth_clamp_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clamp_control)) */\n#if (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object))\n\tPFN_vkCmdBindVertexBuffers2EXT vkCmdBindVertexBuffers2EXT;\n\tPFN_vkCmdSetCullModeEXT vkCmdSetCullModeEXT;\n\tPFN_vkCmdSetDepthBoundsTestEnableEXT vkCmdSetDepthBoundsTestEnableEXT;\n\tPFN_vkCmdSetDepthCompareOpEXT vkCmdSetDepthCompareOpEXT;\n\tPFN_vkCmdSetDepthTestEnableEXT vkCmdSetDepthTestEnableEXT;\n\tPFN_vkCmdSetDepthWriteEnableEXT vkCmdSetDepthWriteEnableEXT;\n\tPFN_vkCmdSetFrontFaceEXT vkCmdSetFrontFaceEXT;\n\tPFN_vkCmdSetPrimitiveTopologyEXT vkCmdSetPrimitiveTopologyEXT;\n\tPFN_vkCmdSetScissorWithCountEXT vkCmdSetScissorWithCountEXT;\n\tPFN_vkCmdSetStencilOpEXT vkCmdSetStencilOpEXT;\n\tPFN_vkCmdSetStencilTestEnableEXT vkCmdSetStencilTestEnableEXT;\n\tPFN_vkCmdSetViewportWithCountEXT vkCmdSetViewportWithCountEXT;\n#else\n\tPFN_vkVoidFunction padding_3e8c720f[12];\n#endif /* (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object))\n\tPFN_vkCmdSetDepthBiasEnableEXT vkCmdSetDepthBiasEnableEXT;\n\tPFN_vkCmdSetLogicOpEXT vkCmdSetLogicOpEXT;\n\tPFN_vkCmdSetPatchControlPointsEXT vkCmdSetPatchControlPointsEXT;\n\tPFN_vkCmdSetPrimitiveRestartEnableEXT vkCmdSetPrimitiveRestartEnableEXT;\n\tPFN_vkCmdSetRasterizerDiscardEnableEXT vkCmdSetRasterizerDiscardEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_b93e02a6[5];\n#endif /* (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object))\n\tPFN_vkCmdSetAlphaToCoverageEnableEXT vkCmdSetAlphaToCoverageEnableEXT;\n\tPFN_vkCmdSetAlphaToOneEnableEXT vkCmdSetAlphaToOneEnableEXT;\n\tPFN_vkCmdSetColorBlendEnableEXT vkCmdSetColorBlendEnableEXT;\n\tPFN_vkCmdSetColorBlendEquationEXT vkCmdSetColorBlendEquationEXT;\n\tPFN_vkCmdSetColorWriteMaskEXT vkCmdSetColorWriteMaskEXT;\n\tPFN_vkCmdSetDepthClampEnableEXT vkCmdSetDepthClampEnableEXT;\n\tPFN_vkCmdSetLogicOpEnableEXT vkCmdSetLogicOpEnableEXT;\n\tPFN_vkCmdSetPolygonModeEXT vkCmdSetPolygonModeEXT;\n\tPFN_vkCmdSetRasterizationSamplesEXT vkCmdSetRasterizationSamplesEXT;\n\tPFN_vkCmdSetSampleMaskEXT vkCmdSetSampleMaskEXT;\n#else\n\tPFN_vkVoidFunction padding_ab566e7e[10];\n#endif /* (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object))\n\tPFN_vkCmdSetTessellationDomainOriginEXT vkCmdSetTessellationDomainOriginEXT;\n#else\n\tPFN_vkVoidFunction padding_6730ed0c[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback))\n\tPFN_vkCmdSetRasterizationStreamEXT vkCmdSetRasterizationStreamEXT;\n#else\n\tPFN_vkVoidFunction padding_d3ebb335[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization))\n\tPFN_vkCmdSetConservativeRasterizationModeEXT vkCmdSetConservativeRasterizationModeEXT;\n\tPFN_vkCmdSetExtraPrimitiveOverestimationSizeEXT vkCmdSetExtraPrimitiveOverestimationSizeEXT;\n#else\n\tPFN_vkVoidFunction padding_a21758f4[2];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable))\n\tPFN_vkCmdSetDepthClipEnableEXT vkCmdSetDepthClipEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_a498a838[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations))\n\tPFN_vkCmdSetSampleLocationsEnableEXT vkCmdSetSampleLocationsEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_67db38de[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced))\n\tPFN_vkCmdSetColorBlendAdvancedEXT vkCmdSetColorBlendAdvancedEXT;\n#else\n\tPFN_vkVoidFunction padding_fbea7481[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex))\n\tPFN_vkCmdSetProvokingVertexModeEXT vkCmdSetProvokingVertexModeEXT;\n#else\n\tPFN_vkVoidFunction padding_3a8ec90e[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization))\n\tPFN_vkCmdSetLineRasterizationModeEXT vkCmdSetLineRasterizationModeEXT;\n\tPFN_vkCmdSetLineStippleEnableEXT vkCmdSetLineStippleEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_29cdb756[2];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control))\n\tPFN_vkCmdSetDepthClipNegativeOneToOneEXT vkCmdSetDepthClipNegativeOneToOneEXT;\n#else\n\tPFN_vkVoidFunction padding_815a7240[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling))\n\tPFN_vkCmdSetViewportWScalingEnableNV vkCmdSetViewportWScalingEnableNV;\n#else\n\tPFN_vkVoidFunction padding_d1f00511[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle))\n\tPFN_vkCmdSetViewportSwizzleNV vkCmdSetViewportSwizzleNV;\n#else\n\tPFN_vkVoidFunction padding_7a73d553[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color))\n\tPFN_vkCmdSetCoverageToColorEnableNV vkCmdSetCoverageToColorEnableNV;\n\tPFN_vkCmdSetCoverageToColorLocationNV vkCmdSetCoverageToColorLocationNV;\n#else\n\tPFN_vkVoidFunction padding_6045fb8c[2];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples))\n\tPFN_vkCmdSetCoverageModulationModeNV vkCmdSetCoverageModulationModeNV;\n\tPFN_vkCmdSetCoverageModulationTableEnableNV vkCmdSetCoverageModulationTableEnableNV;\n\tPFN_vkCmdSetCoverageModulationTableNV vkCmdSetCoverageModulationTableNV;\n#else\n\tPFN_vkVoidFunction padding_bdc35c80[3];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image))\n\tPFN_vkCmdSetShadingRateImageEnableNV vkCmdSetShadingRateImageEnableNV;\n#else\n\tPFN_vkVoidFunction padding_9a5cd6e8[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test))\n\tPFN_vkCmdSetRepresentativeFragmentTestEnableNV vkCmdSetRepresentativeFragmentTestEnableNV;\n#else\n\tPFN_vkVoidFunction padding_3ee17e96[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode))\n\tPFN_vkCmdSetCoverageReductionModeNV vkCmdSetCoverageReductionModeNV;\n#else\n\tPFN_vkVoidFunction padding_263d525a[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) */\n#if (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control))\n\tPFN_vkGetImageSubresourceLayout2EXT vkGetImageSubresourceLayout2EXT;\n#else\n\tPFN_vkVoidFunction padding_ecddace1[1];\n#endif /* (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) */\n#if (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state))\n\tPFN_vkCmdSetVertexInputEXT vkCmdSetVertexInputEXT;\n#else\n\tPFN_vkVoidFunction padding_d83e1de1[1];\n#endif /* (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) */\n#if (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template)))\n\tPFN_vkCmdPushDescriptorSetWithTemplateKHR vkCmdPushDescriptorSetWithTemplateKHR;\n#else\n\tPFN_vkVoidFunction padding_60f8358a[1];\n#endif /* (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template))) */\n#if (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1))\n\tPFN_vkGetDeviceGroupPresentCapabilitiesKHR vkGetDeviceGroupPresentCapabilitiesKHR;\n\tPFN_vkGetDeviceGroupSurfacePresentModesKHR vkGetDeviceGroupSurfacePresentModesKHR;\n#else\n\tPFN_vkVoidFunction padding_460290c6[2];\n#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */\n#if (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1))\n\tPFN_vkAcquireNextImage2KHR vkAcquireNextImage2KHR;\n#else\n\tPFN_vkVoidFunction padding_cffc198[1];\n#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */\n\t/* VOLK_GENERATE_DEVICE_TABLE */\n};\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n/* VOLK_GENERATE_PROTOTYPES_H */\n#if defined(VK_VERSION_1_0)\nextern PFN_vkAllocateCommandBuffers vkAllocateCommandBuffers;\nextern PFN_vkAllocateDescriptorSets vkAllocateDescriptorSets;\nextern PFN_vkAllocateMemory vkAllocateMemory;\nextern PFN_vkBeginCommandBuffer vkBeginCommandBuffer;\nextern PFN_vkBindBufferMemory vkBindBufferMemory;\nextern PFN_vkBindImageMemory vkBindImageMemory;\nextern PFN_vkCmdBeginQuery vkCmdBeginQuery;\nextern PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass;\nextern PFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets;\nextern PFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer;\nextern PFN_vkCmdBindPipeline vkCmdBindPipeline;\nextern PFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers;\nextern PFN_vkCmdBlitImage vkCmdBlitImage;\nextern PFN_vkCmdClearAttachments vkCmdClearAttachments;\nextern PFN_vkCmdClearColorImage vkCmdClearColorImage;\nextern PFN_vkCmdClearDepthStencilImage vkCmdClearDepthStencilImage;\nextern PFN_vkCmdCopyBuffer vkCmdCopyBuffer;\nextern PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage;\nextern PFN_vkCmdCopyImage vkCmdCopyImage;\nextern PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer;\nextern PFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults;\nextern PFN_vkCmdDispatch vkCmdDispatch;\nextern PFN_vkCmdDispatchIndirect vkCmdDispatchIndirect;\nextern PFN_vkCmdDraw vkCmdDraw;\nextern PFN_vkCmdDrawIndexed vkCmdDrawIndexed;\nextern PFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect;\nextern PFN_vkCmdDrawIndirect vkCmdDrawIndirect;\nextern PFN_vkCmdEndQuery vkCmdEndQuery;\nextern PFN_vkCmdEndRenderPass vkCmdEndRenderPass;\nextern PFN_vkCmdExecuteCommands vkCmdExecuteCommands;\nextern PFN_vkCmdFillBuffer vkCmdFillBuffer;\nextern PFN_vkCmdNextSubpass vkCmdNextSubpass;\nextern PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier;\nextern PFN_vkCmdPushConstants vkCmdPushConstants;\nextern PFN_vkCmdResetEvent vkCmdResetEvent;\nextern PFN_vkCmdResetQueryPool vkCmdResetQueryPool;\nextern PFN_vkCmdResolveImage vkCmdResolveImage;\nextern PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants;\nextern PFN_vkCmdSetDepthBias vkCmdSetDepthBias;\nextern PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds;\nextern PFN_vkCmdSetEvent vkCmdSetEvent;\nextern PFN_vkCmdSetLineWidth vkCmdSetLineWidth;\nextern PFN_vkCmdSetScissor vkCmdSetScissor;\nextern PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask;\nextern PFN_vkCmdSetStencilReference vkCmdSetStencilReference;\nextern PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask;\nextern PFN_vkCmdSetViewport vkCmdSetViewport;\nextern PFN_vkCmdUpdateBuffer vkCmdUpdateBuffer;\nextern PFN_vkCmdWaitEvents vkCmdWaitEvents;\nextern PFN_vkCmdWriteTimestamp vkCmdWriteTimestamp;\nextern PFN_vkCreateBuffer vkCreateBuffer;\nextern PFN_vkCreateBufferView vkCreateBufferView;\nextern PFN_vkCreateCommandPool vkCreateCommandPool;\nextern PFN_vkCreateComputePipelines vkCreateComputePipelines;\nextern PFN_vkCreateDescriptorPool vkCreateDescriptorPool;\nextern PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout;\nextern PFN_vkCreateDevice vkCreateDevice;\nextern PFN_vkCreateEvent vkCreateEvent;\nextern PFN_vkCreateFence vkCreateFence;\nextern PFN_vkCreateFramebuffer vkCreateFramebuffer;\nextern PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines;\nextern PFN_vkCreateImage vkCreateImage;\nextern PFN_vkCreateImageView vkCreateImageView;\nextern PFN_vkCreateInstance vkCreateInstance;\nextern PFN_vkCreatePipelineCache vkCreatePipelineCache;\nextern PFN_vkCreatePipelineLayout vkCreatePipelineLayout;\nextern PFN_vkCreateQueryPool vkCreateQueryPool;\nextern PFN_vkCreateRenderPass vkCreateRenderPass;\nextern PFN_vkCreateSampler vkCreateSampler;\nextern PFN_vkCreateSemaphore vkCreateSemaphore;\nextern PFN_vkCreateShaderModule vkCreateShaderModule;\nextern PFN_vkDestroyBuffer vkDestroyBuffer;\nextern PFN_vkDestroyBufferView vkDestroyBufferView;\nextern PFN_vkDestroyCommandPool vkDestroyCommandPool;\nextern PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool;\nextern PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout;\nextern PFN_vkDestroyDevice vkDestroyDevice;\nextern PFN_vkDestroyEvent vkDestroyEvent;\nextern PFN_vkDestroyFence vkDestroyFence;\nextern PFN_vkDestroyFramebuffer vkDestroyFramebuffer;\nextern PFN_vkDestroyImage vkDestroyImage;\nextern PFN_vkDestroyImageView vkDestroyImageView;\nextern PFN_vkDestroyInstance vkDestroyInstance;\nextern PFN_vkDestroyPipeline vkDestroyPipeline;\nextern PFN_vkDestroyPipelineCache vkDestroyPipelineCache;\nextern PFN_vkDestroyPipelineLayout vkDestroyPipelineLayout;\nextern PFN_vkDestroyQueryPool vkDestroyQueryPool;\nextern PFN_vkDestroyRenderPass vkDestroyRenderPass;\nextern PFN_vkDestroySampler vkDestroySampler;\nextern PFN_vkDestroySemaphore vkDestroySemaphore;\nextern PFN_vkDestroyShaderModule vkDestroyShaderModule;\nextern PFN_vkDeviceWaitIdle vkDeviceWaitIdle;\nextern PFN_vkEndCommandBuffer vkEndCommandBuffer;\nextern PFN_vkEnumerateDeviceExtensionProperties vkEnumerateDeviceExtensionProperties;\nextern PFN_vkEnumerateDeviceLayerProperties vkEnumerateDeviceLayerProperties;\nextern PFN_vkEnumerateInstanceExtensionProperties vkEnumerateInstanceExtensionProperties;\nextern PFN_vkEnumerateInstanceLayerProperties vkEnumerateInstanceLayerProperties;\nextern PFN_vkEnumeratePhysicalDevices vkEnumeratePhysicalDevices;\nextern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;\nextern PFN_vkFreeCommandBuffers vkFreeCommandBuffers;\nextern PFN_vkFreeDescriptorSets vkFreeDescriptorSets;\nextern PFN_vkFreeMemory vkFreeMemory;\nextern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;\nextern PFN_vkGetDeviceMemoryCommitment vkGetDeviceMemoryCommitment;\nextern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;\nextern PFN_vkGetDeviceQueue vkGetDeviceQueue;\nextern PFN_vkGetEventStatus vkGetEventStatus;\nextern PFN_vkGetFenceStatus vkGetFenceStatus;\nextern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;\nextern PFN_vkGetImageSparseMemoryRequirements vkGetImageSparseMemoryRequirements;\nextern PFN_vkGetImageSubresourceLayout vkGetImageSubresourceLayout;\nextern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;\nextern PFN_vkGetPhysicalDeviceFeatures vkGetPhysicalDeviceFeatures;\nextern PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties;\nextern PFN_vkGetPhysicalDeviceImageFormatProperties vkGetPhysicalDeviceImageFormatProperties;\nextern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;\nextern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;\nextern PFN_vkGetPhysicalDeviceQueueFamilyProperties vkGetPhysicalDeviceQueueFamilyProperties;\nextern PFN_vkGetPhysicalDeviceSparseImageFormatProperties vkGetPhysicalDeviceSparseImageFormatProperties;\nextern PFN_vkGetPipelineCacheData vkGetPipelineCacheData;\nextern PFN_vkGetQueryPoolResults vkGetQueryPoolResults;\nextern PFN_vkGetRenderAreaGranularity vkGetRenderAreaGranularity;\nextern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;\nextern PFN_vkMapMemory vkMapMemory;\nextern PFN_vkMergePipelineCaches vkMergePipelineCaches;\nextern PFN_vkQueueBindSparse vkQueueBindSparse;\nextern PFN_vkQueueSubmit vkQueueSubmit;\nextern PFN_vkQueueWaitIdle vkQueueWaitIdle;\nextern PFN_vkResetCommandBuffer vkResetCommandBuffer;\nextern PFN_vkResetCommandPool vkResetCommandPool;\nextern PFN_vkResetDescriptorPool vkResetDescriptorPool;\nextern PFN_vkResetEvent vkResetEvent;\nextern PFN_vkResetFences vkResetFences;\nextern PFN_vkSetEvent vkSetEvent;\nextern PFN_vkUnmapMemory vkUnmapMemory;\nextern PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets;\nextern PFN_vkWaitForFences vkWaitForFences;\n#endif /* defined(VK_VERSION_1_0) */\n#if defined(VK_VERSION_1_1)\nextern PFN_vkBindBufferMemory2 vkBindBufferMemory2;\nextern PFN_vkBindImageMemory2 vkBindImageMemory2;\nextern PFN_vkCmdDispatchBase vkCmdDispatchBase;\nextern PFN_vkCmdSetDeviceMask vkCmdSetDeviceMask;\nextern PFN_vkCreateDescriptorUpdateTemplate vkCreateDescriptorUpdateTemplate;\nextern PFN_vkCreateSamplerYcbcrConversion vkCreateSamplerYcbcrConversion;\nextern PFN_vkDestroyDescriptorUpdateTemplate vkDestroyDescriptorUpdateTemplate;\nextern PFN_vkDestroySamplerYcbcrConversion vkDestroySamplerYcbcrConversion;\nextern PFN_vkEnumerateInstanceVersion vkEnumerateInstanceVersion;\nextern PFN_vkEnumeratePhysicalDeviceGroups vkEnumeratePhysicalDeviceGroups;\nextern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;\nextern PFN_vkGetDescriptorSetLayoutSupport vkGetDescriptorSetLayoutSupport;\nextern PFN_vkGetDeviceGroupPeerMemoryFeatures vkGetDeviceGroupPeerMemoryFeatures;\nextern PFN_vkGetDeviceQueue2 vkGetDeviceQueue2;\nextern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;\nextern PFN_vkGetImageSparseMemoryRequirements2 vkGetImageSparseMemoryRequirements2;\nextern PFN_vkGetPhysicalDeviceExternalBufferProperties vkGetPhysicalDeviceExternalBufferProperties;\nextern PFN_vkGetPhysicalDeviceExternalFenceProperties vkGetPhysicalDeviceExternalFenceProperties;\nextern PFN_vkGetPhysicalDeviceExternalSemaphoreProperties vkGetPhysicalDeviceExternalSemaphoreProperties;\nextern PFN_vkGetPhysicalDeviceFeatures2 vkGetPhysicalDeviceFeatures2;\nextern PFN_vkGetPhysicalDeviceFormatProperties2 vkGetPhysicalDeviceFormatProperties2;\nextern PFN_vkGetPhysicalDeviceImageFormatProperties2 vkGetPhysicalDeviceImageFormatProperties2;\nextern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2;\nextern PFN_vkGetPhysicalDeviceProperties2 vkGetPhysicalDeviceProperties2;\nextern PFN_vkGetPhysicalDeviceQueueFamilyProperties2 vkGetPhysicalDeviceQueueFamilyProperties2;\nextern PFN_vkGetPhysicalDeviceSparseImageFormatProperties2 vkGetPhysicalDeviceSparseImageFormatProperties2;\nextern PFN_vkTrimCommandPool vkTrimCommandPool;\nextern PFN_vkUpdateDescriptorSetWithTemplate vkUpdateDescriptorSetWithTemplate;\n#endif /* defined(VK_VERSION_1_1) */\n#if defined(VK_VERSION_1_2)\nextern PFN_vkCmdBeginRenderPass2 vkCmdBeginRenderPass2;\nextern PFN_vkCmdDrawIndexedIndirectCount vkCmdDrawIndexedIndirectCount;\nextern PFN_vkCmdDrawIndirectCount vkCmdDrawIndirectCount;\nextern PFN_vkCmdEndRenderPass2 vkCmdEndRenderPass2;\nextern PFN_vkCmdNextSubpass2 vkCmdNextSubpass2;\nextern PFN_vkCreateRenderPass2 vkCreateRenderPass2;\nextern PFN_vkGetBufferDeviceAddress vkGetBufferDeviceAddress;\nextern PFN_vkGetBufferOpaqueCaptureAddress vkGetBufferOpaqueCaptureAddress;\nextern PFN_vkGetDeviceMemoryOpaqueCaptureAddress vkGetDeviceMemoryOpaqueCaptureAddress;\nextern PFN_vkGetSemaphoreCounterValue vkGetSemaphoreCounterValue;\nextern PFN_vkResetQueryPool vkResetQueryPool;\nextern PFN_vkSignalSemaphore vkSignalSemaphore;\nextern PFN_vkWaitSemaphores vkWaitSemaphores;\n#endif /* defined(VK_VERSION_1_2) */\n#if defined(VK_VERSION_1_3)\nextern PFN_vkCmdBeginRendering vkCmdBeginRendering;\nextern PFN_vkCmdBindVertexBuffers2 vkCmdBindVertexBuffers2;\nextern PFN_vkCmdBlitImage2 vkCmdBlitImage2;\nextern PFN_vkCmdCopyBuffer2 vkCmdCopyBuffer2;\nextern PFN_vkCmdCopyBufferToImage2 vkCmdCopyBufferToImage2;\nextern PFN_vkCmdCopyImage2 vkCmdCopyImage2;\nextern PFN_vkCmdCopyImageToBuffer2 vkCmdCopyImageToBuffer2;\nextern PFN_vkCmdEndRendering vkCmdEndRendering;\nextern PFN_vkCmdPipelineBarrier2 vkCmdPipelineBarrier2;\nextern PFN_vkCmdResetEvent2 vkCmdResetEvent2;\nextern PFN_vkCmdResolveImage2 vkCmdResolveImage2;\nextern PFN_vkCmdSetCullMode vkCmdSetCullMode;\nextern PFN_vkCmdSetDepthBiasEnable vkCmdSetDepthBiasEnable;\nextern PFN_vkCmdSetDepthBoundsTestEnable vkCmdSetDepthBoundsTestEnable;\nextern PFN_vkCmdSetDepthCompareOp vkCmdSetDepthCompareOp;\nextern PFN_vkCmdSetDepthTestEnable vkCmdSetDepthTestEnable;\nextern PFN_vkCmdSetDepthWriteEnable vkCmdSetDepthWriteEnable;\nextern PFN_vkCmdSetEvent2 vkCmdSetEvent2;\nextern PFN_vkCmdSetFrontFace vkCmdSetFrontFace;\nextern PFN_vkCmdSetPrimitiveRestartEnable vkCmdSetPrimitiveRestartEnable;\nextern PFN_vkCmdSetPrimitiveTopology vkCmdSetPrimitiveTopology;\nextern PFN_vkCmdSetRasterizerDiscardEnable vkCmdSetRasterizerDiscardEnable;\nextern PFN_vkCmdSetScissorWithCount vkCmdSetScissorWithCount;\nextern PFN_vkCmdSetStencilOp vkCmdSetStencilOp;\nextern PFN_vkCmdSetStencilTestEnable vkCmdSetStencilTestEnable;\nextern PFN_vkCmdSetViewportWithCount vkCmdSetViewportWithCount;\nextern PFN_vkCmdWaitEvents2 vkCmdWaitEvents2;\nextern PFN_vkCmdWriteTimestamp2 vkCmdWriteTimestamp2;\nextern PFN_vkCreatePrivateDataSlot vkCreatePrivateDataSlot;\nextern PFN_vkDestroyPrivateDataSlot vkDestroyPrivateDataSlot;\nextern PFN_vkGetDeviceBufferMemoryRequirements vkGetDeviceBufferMemoryRequirements;\nextern PFN_vkGetDeviceImageMemoryRequirements vkGetDeviceImageMemoryRequirements;\nextern PFN_vkGetDeviceImageSparseMemoryRequirements vkGetDeviceImageSparseMemoryRequirements;\nextern PFN_vkGetPhysicalDeviceToolProperties vkGetPhysicalDeviceToolProperties;\nextern PFN_vkGetPrivateData vkGetPrivateData;\nextern PFN_vkQueueSubmit2 vkQueueSubmit2;\nextern PFN_vkSetPrivateData vkSetPrivateData;\n#endif /* defined(VK_VERSION_1_3) */\n#if defined(VK_VERSION_1_4)\nextern PFN_vkCmdBindDescriptorSets2 vkCmdBindDescriptorSets2;\nextern PFN_vkCmdBindIndexBuffer2 vkCmdBindIndexBuffer2;\nextern PFN_vkCmdPushConstants2 vkCmdPushConstants2;\nextern PFN_vkCmdPushDescriptorSet vkCmdPushDescriptorSet;\nextern PFN_vkCmdPushDescriptorSet2 vkCmdPushDescriptorSet2;\nextern PFN_vkCmdPushDescriptorSetWithTemplate vkCmdPushDescriptorSetWithTemplate;\nextern PFN_vkCmdPushDescriptorSetWithTemplate2 vkCmdPushDescriptorSetWithTemplate2;\nextern PFN_vkCmdSetLineStipple vkCmdSetLineStipple;\nextern PFN_vkCmdSetRenderingAttachmentLocations vkCmdSetRenderingAttachmentLocations;\nextern PFN_vkCmdSetRenderingInputAttachmentIndices vkCmdSetRenderingInputAttachmentIndices;\nextern PFN_vkCopyImageToImage vkCopyImageToImage;\nextern PFN_vkCopyImageToMemory vkCopyImageToMemory;\nextern PFN_vkCopyMemoryToImage vkCopyMemoryToImage;\nextern PFN_vkGetDeviceImageSubresourceLayout vkGetDeviceImageSubresourceLayout;\nextern PFN_vkGetImageSubresourceLayout2 vkGetImageSubresourceLayout2;\nextern PFN_vkGetRenderingAreaGranularity vkGetRenderingAreaGranularity;\nextern PFN_vkMapMemory2 vkMapMemory2;\nextern PFN_vkTransitionImageLayout vkTransitionImageLayout;\nextern PFN_vkUnmapMemory2 vkUnmapMemory2;\n#endif /* defined(VK_VERSION_1_4) */\n#if defined(VK_AMDX_shader_enqueue)\nextern PFN_vkCmdDispatchGraphAMDX vkCmdDispatchGraphAMDX;\nextern PFN_vkCmdDispatchGraphIndirectAMDX vkCmdDispatchGraphIndirectAMDX;\nextern PFN_vkCmdDispatchGraphIndirectCountAMDX vkCmdDispatchGraphIndirectCountAMDX;\nextern PFN_vkCmdInitializeGraphScratchMemoryAMDX vkCmdInitializeGraphScratchMemoryAMDX;\nextern PFN_vkCreateExecutionGraphPipelinesAMDX vkCreateExecutionGraphPipelinesAMDX;\nextern PFN_vkGetExecutionGraphPipelineNodeIndexAMDX vkGetExecutionGraphPipelineNodeIndexAMDX;\nextern PFN_vkGetExecutionGraphPipelineScratchSizeAMDX vkGetExecutionGraphPipelineScratchSizeAMDX;\n#endif /* defined(VK_AMDX_shader_enqueue) */\n#if defined(VK_AMD_anti_lag)\nextern PFN_vkAntiLagUpdateAMD vkAntiLagUpdateAMD;\n#endif /* defined(VK_AMD_anti_lag) */\n#if defined(VK_AMD_buffer_marker)\nextern PFN_vkCmdWriteBufferMarkerAMD vkCmdWriteBufferMarkerAMD;\n#endif /* defined(VK_AMD_buffer_marker) */\n#if defined(VK_AMD_buffer_marker) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2))\nextern PFN_vkCmdWriteBufferMarker2AMD vkCmdWriteBufferMarker2AMD;\n#endif /* defined(VK_AMD_buffer_marker) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) */\n#if defined(VK_AMD_display_native_hdr)\nextern PFN_vkSetLocalDimmingAMD vkSetLocalDimmingAMD;\n#endif /* defined(VK_AMD_display_native_hdr) */\n#if defined(VK_AMD_draw_indirect_count)\nextern PFN_vkCmdDrawIndexedIndirectCountAMD vkCmdDrawIndexedIndirectCountAMD;\nextern PFN_vkCmdDrawIndirectCountAMD vkCmdDrawIndirectCountAMD;\n#endif /* defined(VK_AMD_draw_indirect_count) */\n#if defined(VK_AMD_shader_info)\nextern PFN_vkGetShaderInfoAMD vkGetShaderInfoAMD;\n#endif /* defined(VK_AMD_shader_info) */\n#if defined(VK_ANDROID_external_memory_android_hardware_buffer)\nextern PFN_vkGetAndroidHardwareBufferPropertiesANDROID vkGetAndroidHardwareBufferPropertiesANDROID;\nextern PFN_vkGetMemoryAndroidHardwareBufferANDROID vkGetMemoryAndroidHardwareBufferANDROID;\n#endif /* defined(VK_ANDROID_external_memory_android_hardware_buffer) */\n#if defined(VK_ARM_data_graph)\nextern PFN_vkBindDataGraphPipelineSessionMemoryARM vkBindDataGraphPipelineSessionMemoryARM;\nextern PFN_vkCmdDispatchDataGraphARM vkCmdDispatchDataGraphARM;\nextern PFN_vkCreateDataGraphPipelineSessionARM vkCreateDataGraphPipelineSessionARM;\nextern PFN_vkCreateDataGraphPipelinesARM vkCreateDataGraphPipelinesARM;\nextern PFN_vkDestroyDataGraphPipelineSessionARM vkDestroyDataGraphPipelineSessionARM;\nextern PFN_vkGetDataGraphPipelineAvailablePropertiesARM vkGetDataGraphPipelineAvailablePropertiesARM;\nextern PFN_vkGetDataGraphPipelinePropertiesARM vkGetDataGraphPipelinePropertiesARM;\nextern PFN_vkGetDataGraphPipelineSessionBindPointRequirementsARM vkGetDataGraphPipelineSessionBindPointRequirementsARM;\nextern PFN_vkGetDataGraphPipelineSessionMemoryRequirementsARM vkGetDataGraphPipelineSessionMemoryRequirementsARM;\nextern PFN_vkGetPhysicalDeviceQueueFamilyDataGraphProcessingEnginePropertiesARM vkGetPhysicalDeviceQueueFamilyDataGraphProcessingEnginePropertiesARM;\nextern PFN_vkGetPhysicalDeviceQueueFamilyDataGraphPropertiesARM vkGetPhysicalDeviceQueueFamilyDataGraphPropertiesARM;\n#endif /* defined(VK_ARM_data_graph) */\n#if defined(VK_ARM_tensors)\nextern PFN_vkBindTensorMemoryARM vkBindTensorMemoryARM;\nextern PFN_vkCmdCopyTensorARM vkCmdCopyTensorARM;\nextern PFN_vkCreateTensorARM vkCreateTensorARM;\nextern PFN_vkCreateTensorViewARM vkCreateTensorViewARM;\nextern PFN_vkDestroyTensorARM vkDestroyTensorARM;\nextern PFN_vkDestroyTensorViewARM vkDestroyTensorViewARM;\nextern PFN_vkGetDeviceTensorMemoryRequirementsARM vkGetDeviceTensorMemoryRequirementsARM;\nextern PFN_vkGetPhysicalDeviceExternalTensorPropertiesARM vkGetPhysicalDeviceExternalTensorPropertiesARM;\nextern PFN_vkGetTensorMemoryRequirementsARM vkGetTensorMemoryRequirementsARM;\n#endif /* defined(VK_ARM_tensors) */\n#if defined(VK_ARM_tensors) && defined(VK_EXT_descriptor_buffer)\nextern PFN_vkGetTensorOpaqueCaptureDescriptorDataARM vkGetTensorOpaqueCaptureDescriptorDataARM;\nextern PFN_vkGetTensorViewOpaqueCaptureDescriptorDataARM vkGetTensorViewOpaqueCaptureDescriptorDataARM;\n#endif /* defined(VK_ARM_tensors) && defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_EXT_acquire_drm_display)\nextern PFN_vkAcquireDrmDisplayEXT vkAcquireDrmDisplayEXT;\nextern PFN_vkGetDrmDisplayEXT vkGetDrmDisplayEXT;\n#endif /* defined(VK_EXT_acquire_drm_display) */\n#if defined(VK_EXT_acquire_xlib_display)\nextern PFN_vkAcquireXlibDisplayEXT vkAcquireXlibDisplayEXT;\nextern PFN_vkGetRandROutputDisplayEXT vkGetRandROutputDisplayEXT;\n#endif /* defined(VK_EXT_acquire_xlib_display) */\n#if defined(VK_EXT_attachment_feedback_loop_dynamic_state)\nextern PFN_vkCmdSetAttachmentFeedbackLoopEnableEXT vkCmdSetAttachmentFeedbackLoopEnableEXT;\n#endif /* defined(VK_EXT_attachment_feedback_loop_dynamic_state) */\n#if defined(VK_EXT_buffer_device_address)\nextern PFN_vkGetBufferDeviceAddressEXT vkGetBufferDeviceAddressEXT;\n#endif /* defined(VK_EXT_buffer_device_address) */\n#if defined(VK_EXT_calibrated_timestamps)\nextern PFN_vkGetCalibratedTimestampsEXT vkGetCalibratedTimestampsEXT;\nextern PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT vkGetPhysicalDeviceCalibrateableTimeDomainsEXT;\n#endif /* defined(VK_EXT_calibrated_timestamps) */\n#if defined(VK_EXT_color_write_enable)\nextern PFN_vkCmdSetColorWriteEnableEXT vkCmdSetColorWriteEnableEXT;\n#endif /* defined(VK_EXT_color_write_enable) */\n#if defined(VK_EXT_conditional_rendering)\nextern PFN_vkCmdBeginConditionalRenderingEXT vkCmdBeginConditionalRenderingEXT;\nextern PFN_vkCmdEndConditionalRenderingEXT vkCmdEndConditionalRenderingEXT;\n#endif /* defined(VK_EXT_conditional_rendering) */\n#if defined(VK_EXT_debug_marker)\nextern PFN_vkCmdDebugMarkerBeginEXT vkCmdDebugMarkerBeginEXT;\nextern PFN_vkCmdDebugMarkerEndEXT vkCmdDebugMarkerEndEXT;\nextern PFN_vkCmdDebugMarkerInsertEXT vkCmdDebugMarkerInsertEXT;\nextern PFN_vkDebugMarkerSetObjectNameEXT vkDebugMarkerSetObjectNameEXT;\nextern PFN_vkDebugMarkerSetObjectTagEXT vkDebugMarkerSetObjectTagEXT;\n#endif /* defined(VK_EXT_debug_marker) */\n#if defined(VK_EXT_debug_report)\nextern PFN_vkCreateDebugReportCallbackEXT vkCreateDebugReportCallbackEXT;\nextern PFN_vkDebugReportMessageEXT vkDebugReportMessageEXT;\nextern PFN_vkDestroyDebugReportCallbackEXT vkDestroyDebugReportCallbackEXT;\n#endif /* defined(VK_EXT_debug_report) */\n#if defined(VK_EXT_debug_utils)\nextern PFN_vkCmdBeginDebugUtilsLabelEXT vkCmdBeginDebugUtilsLabelEXT;\nextern PFN_vkCmdEndDebugUtilsLabelEXT vkCmdEndDebugUtilsLabelEXT;\nextern PFN_vkCmdInsertDebugUtilsLabelEXT vkCmdInsertDebugUtilsLabelEXT;\nextern PFN_vkCreateDebugUtilsMessengerEXT vkCreateDebugUtilsMessengerEXT;\nextern PFN_vkDestroyDebugUtilsMessengerEXT vkDestroyDebugUtilsMessengerEXT;\nextern PFN_vkQueueBeginDebugUtilsLabelEXT vkQueueBeginDebugUtilsLabelEXT;\nextern PFN_vkQueueEndDebugUtilsLabelEXT vkQueueEndDebugUtilsLabelEXT;\nextern PFN_vkQueueInsertDebugUtilsLabelEXT vkQueueInsertDebugUtilsLabelEXT;\nextern PFN_vkSetDebugUtilsObjectNameEXT vkSetDebugUtilsObjectNameEXT;\nextern PFN_vkSetDebugUtilsObjectTagEXT vkSetDebugUtilsObjectTagEXT;\nextern PFN_vkSubmitDebugUtilsMessageEXT vkSubmitDebugUtilsMessageEXT;\n#endif /* defined(VK_EXT_debug_utils) */\n#if defined(VK_EXT_depth_bias_control)\nextern PFN_vkCmdSetDepthBias2EXT vkCmdSetDepthBias2EXT;\n#endif /* defined(VK_EXT_depth_bias_control) */\n#if defined(VK_EXT_descriptor_buffer)\nextern PFN_vkCmdBindDescriptorBufferEmbeddedSamplersEXT vkCmdBindDescriptorBufferEmbeddedSamplersEXT;\nextern PFN_vkCmdBindDescriptorBuffersEXT vkCmdBindDescriptorBuffersEXT;\nextern PFN_vkCmdSetDescriptorBufferOffsetsEXT vkCmdSetDescriptorBufferOffsetsEXT;\nextern PFN_vkGetBufferOpaqueCaptureDescriptorDataEXT vkGetBufferOpaqueCaptureDescriptorDataEXT;\nextern PFN_vkGetDescriptorEXT vkGetDescriptorEXT;\nextern PFN_vkGetDescriptorSetLayoutBindingOffsetEXT vkGetDescriptorSetLayoutBindingOffsetEXT;\nextern PFN_vkGetDescriptorSetLayoutSizeEXT vkGetDescriptorSetLayoutSizeEXT;\nextern PFN_vkGetImageOpaqueCaptureDescriptorDataEXT vkGetImageOpaqueCaptureDescriptorDataEXT;\nextern PFN_vkGetImageViewOpaqueCaptureDescriptorDataEXT vkGetImageViewOpaqueCaptureDescriptorDataEXT;\nextern PFN_vkGetSamplerOpaqueCaptureDescriptorDataEXT vkGetSamplerOpaqueCaptureDescriptorDataEXT;\n#endif /* defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing))\nextern PFN_vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT;\n#endif /* defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) */\n#if defined(VK_EXT_device_fault)\nextern PFN_vkGetDeviceFaultInfoEXT vkGetDeviceFaultInfoEXT;\n#endif /* defined(VK_EXT_device_fault) */\n#if defined(VK_EXT_device_generated_commands)\nextern PFN_vkCmdExecuteGeneratedCommandsEXT vkCmdExecuteGeneratedCommandsEXT;\nextern PFN_vkCmdPreprocessGeneratedCommandsEXT vkCmdPreprocessGeneratedCommandsEXT;\nextern PFN_vkCreateIndirectCommandsLayoutEXT vkCreateIndirectCommandsLayoutEXT;\nextern PFN_vkCreateIndirectExecutionSetEXT vkCreateIndirectExecutionSetEXT;\nextern PFN_vkDestroyIndirectCommandsLayoutEXT vkDestroyIndirectCommandsLayoutEXT;\nextern PFN_vkDestroyIndirectExecutionSetEXT vkDestroyIndirectExecutionSetEXT;\nextern PFN_vkGetGeneratedCommandsMemoryRequirementsEXT vkGetGeneratedCommandsMemoryRequirementsEXT;\nextern PFN_vkUpdateIndirectExecutionSetPipelineEXT vkUpdateIndirectExecutionSetPipelineEXT;\nextern PFN_vkUpdateIndirectExecutionSetShaderEXT vkUpdateIndirectExecutionSetShaderEXT;\n#endif /* defined(VK_EXT_device_generated_commands) */\n#if defined(VK_EXT_direct_mode_display)\nextern PFN_vkReleaseDisplayEXT vkReleaseDisplayEXT;\n#endif /* defined(VK_EXT_direct_mode_display) */\n#if defined(VK_EXT_directfb_surface)\nextern PFN_vkCreateDirectFBSurfaceEXT vkCreateDirectFBSurfaceEXT;\nextern PFN_vkGetPhysicalDeviceDirectFBPresentationSupportEXT vkGetPhysicalDeviceDirectFBPresentationSupportEXT;\n#endif /* defined(VK_EXT_directfb_surface) */\n#if defined(VK_EXT_discard_rectangles)\nextern PFN_vkCmdSetDiscardRectangleEXT vkCmdSetDiscardRectangleEXT;\n#endif /* defined(VK_EXT_discard_rectangles) */\n#if defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2\nextern PFN_vkCmdSetDiscardRectangleEnableEXT vkCmdSetDiscardRectangleEnableEXT;\nextern PFN_vkCmdSetDiscardRectangleModeEXT vkCmdSetDiscardRectangleModeEXT;\n#endif /* defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 */\n#if defined(VK_EXT_display_control)\nextern PFN_vkDisplayPowerControlEXT vkDisplayPowerControlEXT;\nextern PFN_vkGetSwapchainCounterEXT vkGetSwapchainCounterEXT;\nextern PFN_vkRegisterDeviceEventEXT vkRegisterDeviceEventEXT;\nextern PFN_vkRegisterDisplayEventEXT vkRegisterDisplayEventEXT;\n#endif /* defined(VK_EXT_display_control) */\n#if defined(VK_EXT_display_surface_counter)\nextern PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT vkGetPhysicalDeviceSurfaceCapabilities2EXT;\n#endif /* defined(VK_EXT_display_surface_counter) */\n#if defined(VK_EXT_external_memory_host)\nextern PFN_vkGetMemoryHostPointerPropertiesEXT vkGetMemoryHostPointerPropertiesEXT;\n#endif /* defined(VK_EXT_external_memory_host) */\n#if defined(VK_EXT_external_memory_metal)\nextern PFN_vkGetMemoryMetalHandleEXT vkGetMemoryMetalHandleEXT;\nextern PFN_vkGetMemoryMetalHandlePropertiesEXT vkGetMemoryMetalHandlePropertiesEXT;\n#endif /* defined(VK_EXT_external_memory_metal) */\n#if defined(VK_EXT_fragment_density_map_offset)\nextern PFN_vkCmdEndRendering2EXT vkCmdEndRendering2EXT;\n#endif /* defined(VK_EXT_fragment_density_map_offset) */\n#if defined(VK_EXT_full_screen_exclusive)\nextern PFN_vkAcquireFullScreenExclusiveModeEXT vkAcquireFullScreenExclusiveModeEXT;\nextern PFN_vkGetPhysicalDeviceSurfacePresentModes2EXT vkGetPhysicalDeviceSurfacePresentModes2EXT;\nextern PFN_vkReleaseFullScreenExclusiveModeEXT vkReleaseFullScreenExclusiveModeEXT;\n#endif /* defined(VK_EXT_full_screen_exclusive) */\n#if defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1))\nextern PFN_vkGetDeviceGroupSurfacePresentModes2EXT vkGetDeviceGroupSurfacePresentModes2EXT;\n#endif /* defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1)) */\n#if defined(VK_EXT_hdr_metadata)\nextern PFN_vkSetHdrMetadataEXT vkSetHdrMetadataEXT;\n#endif /* defined(VK_EXT_hdr_metadata) */\n#if defined(VK_EXT_headless_surface)\nextern PFN_vkCreateHeadlessSurfaceEXT vkCreateHeadlessSurfaceEXT;\n#endif /* defined(VK_EXT_headless_surface) */\n#if defined(VK_EXT_host_image_copy)\nextern PFN_vkCopyImageToImageEXT vkCopyImageToImageEXT;\nextern PFN_vkCopyImageToMemoryEXT vkCopyImageToMemoryEXT;\nextern PFN_vkCopyMemoryToImageEXT vkCopyMemoryToImageEXT;\nextern PFN_vkTransitionImageLayoutEXT vkTransitionImageLayoutEXT;\n#endif /* defined(VK_EXT_host_image_copy) */\n#if defined(VK_EXT_host_query_reset)\nextern PFN_vkResetQueryPoolEXT vkResetQueryPoolEXT;\n#endif /* defined(VK_EXT_host_query_reset) */\n#if defined(VK_EXT_image_drm_format_modifier)\nextern PFN_vkGetImageDrmFormatModifierPropertiesEXT vkGetImageDrmFormatModifierPropertiesEXT;\n#endif /* defined(VK_EXT_image_drm_format_modifier) */\n#if defined(VK_EXT_line_rasterization)\nextern PFN_vkCmdSetLineStippleEXT vkCmdSetLineStippleEXT;\n#endif /* defined(VK_EXT_line_rasterization) */\n#if defined(VK_EXT_mesh_shader)\nextern PFN_vkCmdDrawMeshTasksEXT vkCmdDrawMeshTasksEXT;\nextern PFN_vkCmdDrawMeshTasksIndirectEXT vkCmdDrawMeshTasksIndirectEXT;\n#endif /* defined(VK_EXT_mesh_shader) */\n#if defined(VK_EXT_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2))\nextern PFN_vkCmdDrawMeshTasksIndirectCountEXT vkCmdDrawMeshTasksIndirectCountEXT;\n#endif /* defined(VK_EXT_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2)) */\n#if defined(VK_EXT_metal_objects)\nextern PFN_vkExportMetalObjectsEXT vkExportMetalObjectsEXT;\n#endif /* defined(VK_EXT_metal_objects) */\n#if defined(VK_EXT_metal_surface)\nextern PFN_vkCreateMetalSurfaceEXT vkCreateMetalSurfaceEXT;\n#endif /* defined(VK_EXT_metal_surface) */\n#if defined(VK_EXT_multi_draw)\nextern PFN_vkCmdDrawMultiEXT vkCmdDrawMultiEXT;\nextern PFN_vkCmdDrawMultiIndexedEXT vkCmdDrawMultiIndexedEXT;\n#endif /* defined(VK_EXT_multi_draw) */\n#if defined(VK_EXT_opacity_micromap)\nextern PFN_vkBuildMicromapsEXT vkBuildMicromapsEXT;\nextern PFN_vkCmdBuildMicromapsEXT vkCmdBuildMicromapsEXT;\nextern PFN_vkCmdCopyMemoryToMicromapEXT vkCmdCopyMemoryToMicromapEXT;\nextern PFN_vkCmdCopyMicromapEXT vkCmdCopyMicromapEXT;\nextern PFN_vkCmdCopyMicromapToMemoryEXT vkCmdCopyMicromapToMemoryEXT;\nextern PFN_vkCmdWriteMicromapsPropertiesEXT vkCmdWriteMicromapsPropertiesEXT;\nextern PFN_vkCopyMemoryToMicromapEXT vkCopyMemoryToMicromapEXT;\nextern PFN_vkCopyMicromapEXT vkCopyMicromapEXT;\nextern PFN_vkCopyMicromapToMemoryEXT vkCopyMicromapToMemoryEXT;\nextern PFN_vkCreateMicromapEXT vkCreateMicromapEXT;\nextern PFN_vkDestroyMicromapEXT vkDestroyMicromapEXT;\nextern PFN_vkGetDeviceMicromapCompatibilityEXT vkGetDeviceMicromapCompatibilityEXT;\nextern PFN_vkGetMicromapBuildSizesEXT vkGetMicromapBuildSizesEXT;\nextern PFN_vkWriteMicromapsPropertiesEXT vkWriteMicromapsPropertiesEXT;\n#endif /* defined(VK_EXT_opacity_micromap) */\n#if defined(VK_EXT_pageable_device_local_memory)\nextern PFN_vkSetDeviceMemoryPriorityEXT vkSetDeviceMemoryPriorityEXT;\n#endif /* defined(VK_EXT_pageable_device_local_memory) */\n#if defined(VK_EXT_pipeline_properties)\nextern PFN_vkGetPipelinePropertiesEXT vkGetPipelinePropertiesEXT;\n#endif /* defined(VK_EXT_pipeline_properties) */\n#if defined(VK_EXT_private_data)\nextern PFN_vkCreatePrivateDataSlotEXT vkCreatePrivateDataSlotEXT;\nextern PFN_vkDestroyPrivateDataSlotEXT vkDestroyPrivateDataSlotEXT;\nextern PFN_vkGetPrivateDataEXT vkGetPrivateDataEXT;\nextern PFN_vkSetPrivateDataEXT vkSetPrivateDataEXT;\n#endif /* defined(VK_EXT_private_data) */\n#if defined(VK_EXT_sample_locations)\nextern PFN_vkCmdSetSampleLocationsEXT vkCmdSetSampleLocationsEXT;\nextern PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT vkGetPhysicalDeviceMultisamplePropertiesEXT;\n#endif /* defined(VK_EXT_sample_locations) */\n#if defined(VK_EXT_shader_module_identifier)\nextern PFN_vkGetShaderModuleCreateInfoIdentifierEXT vkGetShaderModuleCreateInfoIdentifierEXT;\nextern PFN_vkGetShaderModuleIdentifierEXT vkGetShaderModuleIdentifierEXT;\n#endif /* defined(VK_EXT_shader_module_identifier) */\n#if defined(VK_EXT_shader_object)\nextern PFN_vkCmdBindShadersEXT vkCmdBindShadersEXT;\nextern PFN_vkCreateShadersEXT vkCreateShadersEXT;\nextern PFN_vkDestroyShaderEXT vkDestroyShaderEXT;\nextern PFN_vkGetShaderBinaryDataEXT vkGetShaderBinaryDataEXT;\n#endif /* defined(VK_EXT_shader_object) */\n#if defined(VK_EXT_swapchain_maintenance1)\nextern PFN_vkReleaseSwapchainImagesEXT vkReleaseSwapchainImagesEXT;\n#endif /* defined(VK_EXT_swapchain_maintenance1) */\n#if defined(VK_EXT_tooling_info)\nextern PFN_vkGetPhysicalDeviceToolPropertiesEXT vkGetPhysicalDeviceToolPropertiesEXT;\n#endif /* defined(VK_EXT_tooling_info) */\n#if defined(VK_EXT_transform_feedback)\nextern PFN_vkCmdBeginQueryIndexedEXT vkCmdBeginQueryIndexedEXT;\nextern PFN_vkCmdBeginTransformFeedbackEXT vkCmdBeginTransformFeedbackEXT;\nextern PFN_vkCmdBindTransformFeedbackBuffersEXT vkCmdBindTransformFeedbackBuffersEXT;\nextern PFN_vkCmdDrawIndirectByteCountEXT vkCmdDrawIndirectByteCountEXT;\nextern PFN_vkCmdEndQueryIndexedEXT vkCmdEndQueryIndexedEXT;\nextern PFN_vkCmdEndTransformFeedbackEXT vkCmdEndTransformFeedbackEXT;\n#endif /* defined(VK_EXT_transform_feedback) */\n#if defined(VK_EXT_validation_cache)\nextern PFN_vkCreateValidationCacheEXT vkCreateValidationCacheEXT;\nextern PFN_vkDestroyValidationCacheEXT vkDestroyValidationCacheEXT;\nextern PFN_vkGetValidationCacheDataEXT vkGetValidationCacheDataEXT;\nextern PFN_vkMergeValidationCachesEXT vkMergeValidationCachesEXT;\n#endif /* defined(VK_EXT_validation_cache) */\n#if defined(VK_FUCHSIA_buffer_collection)\nextern PFN_vkCreateBufferCollectionFUCHSIA vkCreateBufferCollectionFUCHSIA;\nextern PFN_vkDestroyBufferCollectionFUCHSIA vkDestroyBufferCollectionFUCHSIA;\nextern PFN_vkGetBufferCollectionPropertiesFUCHSIA vkGetBufferCollectionPropertiesFUCHSIA;\nextern PFN_vkSetBufferCollectionBufferConstraintsFUCHSIA vkSetBufferCollectionBufferConstraintsFUCHSIA;\nextern PFN_vkSetBufferCollectionImageConstraintsFUCHSIA vkSetBufferCollectionImageConstraintsFUCHSIA;\n#endif /* defined(VK_FUCHSIA_buffer_collection) */\n#if defined(VK_FUCHSIA_external_memory)\nextern PFN_vkGetMemoryZirconHandleFUCHSIA vkGetMemoryZirconHandleFUCHSIA;\nextern PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA vkGetMemoryZirconHandlePropertiesFUCHSIA;\n#endif /* defined(VK_FUCHSIA_external_memory) */\n#if defined(VK_FUCHSIA_external_semaphore)\nextern PFN_vkGetSemaphoreZirconHandleFUCHSIA vkGetSemaphoreZirconHandleFUCHSIA;\nextern PFN_vkImportSemaphoreZirconHandleFUCHSIA vkImportSemaphoreZirconHandleFUCHSIA;\n#endif /* defined(VK_FUCHSIA_external_semaphore) */\n#if defined(VK_FUCHSIA_imagepipe_surface)\nextern PFN_vkCreateImagePipeSurfaceFUCHSIA vkCreateImagePipeSurfaceFUCHSIA;\n#endif /* defined(VK_FUCHSIA_imagepipe_surface) */\n#if defined(VK_GGP_stream_descriptor_surface)\nextern PFN_vkCreateStreamDescriptorSurfaceGGP vkCreateStreamDescriptorSurfaceGGP;\n#endif /* defined(VK_GGP_stream_descriptor_surface) */\n#if defined(VK_GOOGLE_display_timing)\nextern PFN_vkGetPastPresentationTimingGOOGLE vkGetPastPresentationTimingGOOGLE;\nextern PFN_vkGetRefreshCycleDurationGOOGLE vkGetRefreshCycleDurationGOOGLE;\n#endif /* defined(VK_GOOGLE_display_timing) */\n#if defined(VK_HUAWEI_cluster_culling_shader)\nextern PFN_vkCmdDrawClusterHUAWEI vkCmdDrawClusterHUAWEI;\nextern PFN_vkCmdDrawClusterIndirectHUAWEI vkCmdDrawClusterIndirectHUAWEI;\n#endif /* defined(VK_HUAWEI_cluster_culling_shader) */\n#if defined(VK_HUAWEI_invocation_mask)\nextern PFN_vkCmdBindInvocationMaskHUAWEI vkCmdBindInvocationMaskHUAWEI;\n#endif /* defined(VK_HUAWEI_invocation_mask) */\n#if defined(VK_HUAWEI_subpass_shading) && VK_HUAWEI_SUBPASS_SHADING_SPEC_VERSION >= 2\nextern PFN_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI;\n#endif /* defined(VK_HUAWEI_subpass_shading) && VK_HUAWEI_SUBPASS_SHADING_SPEC_VERSION >= 2 */\n#if defined(VK_HUAWEI_subpass_shading)\nextern PFN_vkCmdSubpassShadingHUAWEI vkCmdSubpassShadingHUAWEI;\n#endif /* defined(VK_HUAWEI_subpass_shading) */\n#if defined(VK_INTEL_performance_query)\nextern PFN_vkAcquirePerformanceConfigurationINTEL vkAcquirePerformanceConfigurationINTEL;\nextern PFN_vkCmdSetPerformanceMarkerINTEL vkCmdSetPerformanceMarkerINTEL;\nextern PFN_vkCmdSetPerformanceOverrideINTEL vkCmdSetPerformanceOverrideINTEL;\nextern PFN_vkCmdSetPerformanceStreamMarkerINTEL vkCmdSetPerformanceStreamMarkerINTEL;\nextern PFN_vkGetPerformanceParameterINTEL vkGetPerformanceParameterINTEL;\nextern PFN_vkInitializePerformanceApiINTEL vkInitializePerformanceApiINTEL;\nextern PFN_vkQueueSetPerformanceConfigurationINTEL vkQueueSetPerformanceConfigurationINTEL;\nextern PFN_vkReleasePerformanceConfigurationINTEL vkReleasePerformanceConfigurationINTEL;\nextern PFN_vkUninitializePerformanceApiINTEL vkUninitializePerformanceApiINTEL;\n#endif /* defined(VK_INTEL_performance_query) */\n#if defined(VK_KHR_acceleration_structure)\nextern PFN_vkBuildAccelerationStructuresKHR vkBuildAccelerationStructuresKHR;\nextern PFN_vkCmdBuildAccelerationStructuresIndirectKHR vkCmdBuildAccelerationStructuresIndirectKHR;\nextern PFN_vkCmdBuildAccelerationStructuresKHR vkCmdBuildAccelerationStructuresKHR;\nextern PFN_vkCmdCopyAccelerationStructureKHR vkCmdCopyAccelerationStructureKHR;\nextern PFN_vkCmdCopyAccelerationStructureToMemoryKHR vkCmdCopyAccelerationStructureToMemoryKHR;\nextern PFN_vkCmdCopyMemoryToAccelerationStructureKHR vkCmdCopyMemoryToAccelerationStructureKHR;\nextern PFN_vkCmdWriteAccelerationStructuresPropertiesKHR vkCmdWriteAccelerationStructuresPropertiesKHR;\nextern PFN_vkCopyAccelerationStructureKHR vkCopyAccelerationStructureKHR;\nextern PFN_vkCopyAccelerationStructureToMemoryKHR vkCopyAccelerationStructureToMemoryKHR;\nextern PFN_vkCopyMemoryToAccelerationStructureKHR vkCopyMemoryToAccelerationStructureKHR;\nextern PFN_vkCreateAccelerationStructureKHR vkCreateAccelerationStructureKHR;\nextern PFN_vkDestroyAccelerationStructureKHR vkDestroyAccelerationStructureKHR;\nextern PFN_vkGetAccelerationStructureBuildSizesKHR vkGetAccelerationStructureBuildSizesKHR;\nextern PFN_vkGetAccelerationStructureDeviceAddressKHR vkGetAccelerationStructureDeviceAddressKHR;\nextern PFN_vkGetDeviceAccelerationStructureCompatibilityKHR vkGetDeviceAccelerationStructureCompatibilityKHR;\nextern PFN_vkWriteAccelerationStructuresPropertiesKHR vkWriteAccelerationStructuresPropertiesKHR;\n#endif /* defined(VK_KHR_acceleration_structure) */\n#if defined(VK_KHR_android_surface)\nextern PFN_vkCreateAndroidSurfaceKHR vkCreateAndroidSurfaceKHR;\n#endif /* defined(VK_KHR_android_surface) */\n#if defined(VK_KHR_bind_memory2)\nextern PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR;\nextern PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR;\n#endif /* defined(VK_KHR_bind_memory2) */\n#if defined(VK_KHR_buffer_device_address)\nextern PFN_vkGetBufferDeviceAddressKHR vkGetBufferDeviceAddressKHR;\nextern PFN_vkGetBufferOpaqueCaptureAddressKHR vkGetBufferOpaqueCaptureAddressKHR;\nextern PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR vkGetDeviceMemoryOpaqueCaptureAddressKHR;\n#endif /* defined(VK_KHR_buffer_device_address) */\n#if defined(VK_KHR_calibrated_timestamps)\nextern PFN_vkGetCalibratedTimestampsKHR vkGetCalibratedTimestampsKHR;\nextern PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsKHR vkGetPhysicalDeviceCalibrateableTimeDomainsKHR;\n#endif /* defined(VK_KHR_calibrated_timestamps) */\n#if defined(VK_KHR_cooperative_matrix)\nextern PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR;\n#endif /* defined(VK_KHR_cooperative_matrix) */\n#if defined(VK_KHR_copy_commands2)\nextern PFN_vkCmdBlitImage2KHR vkCmdBlitImage2KHR;\nextern PFN_vkCmdCopyBuffer2KHR vkCmdCopyBuffer2KHR;\nextern PFN_vkCmdCopyBufferToImage2KHR vkCmdCopyBufferToImage2KHR;\nextern PFN_vkCmdCopyImage2KHR vkCmdCopyImage2KHR;\nextern PFN_vkCmdCopyImageToBuffer2KHR vkCmdCopyImageToBuffer2KHR;\nextern PFN_vkCmdResolveImage2KHR vkCmdResolveImage2KHR;\n#endif /* defined(VK_KHR_copy_commands2) */\n#if defined(VK_KHR_create_renderpass2)\nextern PFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR;\nextern PFN_vkCmdEndRenderPass2KHR vkCmdEndRenderPass2KHR;\nextern PFN_vkCmdNextSubpass2KHR vkCmdNextSubpass2KHR;\nextern PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR;\n#endif /* defined(VK_KHR_create_renderpass2) */\n#if defined(VK_KHR_deferred_host_operations)\nextern PFN_vkCreateDeferredOperationKHR vkCreateDeferredOperationKHR;\nextern PFN_vkDeferredOperationJoinKHR vkDeferredOperationJoinKHR;\nextern PFN_vkDestroyDeferredOperationKHR vkDestroyDeferredOperationKHR;\nextern PFN_vkGetDeferredOperationMaxConcurrencyKHR vkGetDeferredOperationMaxConcurrencyKHR;\nextern PFN_vkGetDeferredOperationResultKHR vkGetDeferredOperationResultKHR;\n#endif /* defined(VK_KHR_deferred_host_operations) */\n#if defined(VK_KHR_descriptor_update_template)\nextern PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR;\nextern PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR;\nextern PFN_vkUpdateDescriptorSetWithTemplateKHR vkUpdateDescriptorSetWithTemplateKHR;\n#endif /* defined(VK_KHR_descriptor_update_template) */\n#if defined(VK_KHR_device_group)\nextern PFN_vkCmdDispatchBaseKHR vkCmdDispatchBaseKHR;\nextern PFN_vkCmdSetDeviceMaskKHR vkCmdSetDeviceMaskKHR;\nextern PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR vkGetDeviceGroupPeerMemoryFeaturesKHR;\n#endif /* defined(VK_KHR_device_group) */\n#if defined(VK_KHR_device_group_creation)\nextern PFN_vkEnumeratePhysicalDeviceGroupsKHR vkEnumeratePhysicalDeviceGroupsKHR;\n#endif /* defined(VK_KHR_device_group_creation) */\n#if defined(VK_KHR_display)\nextern PFN_vkCreateDisplayModeKHR vkCreateDisplayModeKHR;\nextern PFN_vkCreateDisplayPlaneSurfaceKHR vkCreateDisplayPlaneSurfaceKHR;\nextern PFN_vkGetDisplayModePropertiesKHR vkGetDisplayModePropertiesKHR;\nextern PFN_vkGetDisplayPlaneCapabilitiesKHR vkGetDisplayPlaneCapabilitiesKHR;\nextern PFN_vkGetDisplayPlaneSupportedDisplaysKHR vkGetDisplayPlaneSupportedDisplaysKHR;\nextern PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR vkGetPhysicalDeviceDisplayPlanePropertiesKHR;\nextern PFN_vkGetPhysicalDeviceDisplayPropertiesKHR vkGetPhysicalDeviceDisplayPropertiesKHR;\n#endif /* defined(VK_KHR_display) */\n#if defined(VK_KHR_display_swapchain)\nextern PFN_vkCreateSharedSwapchainsKHR vkCreateSharedSwapchainsKHR;\n#endif /* defined(VK_KHR_display_swapchain) */\n#if defined(VK_KHR_draw_indirect_count)\nextern PFN_vkCmdDrawIndexedIndirectCountKHR vkCmdDrawIndexedIndirectCountKHR;\nextern PFN_vkCmdDrawIndirectCountKHR vkCmdDrawIndirectCountKHR;\n#endif /* defined(VK_KHR_draw_indirect_count) */\n#if defined(VK_KHR_dynamic_rendering)\nextern PFN_vkCmdBeginRenderingKHR vkCmdBeginRenderingKHR;\nextern PFN_vkCmdEndRenderingKHR vkCmdEndRenderingKHR;\n#endif /* defined(VK_KHR_dynamic_rendering) */\n#if defined(VK_KHR_dynamic_rendering_local_read)\nextern PFN_vkCmdSetRenderingAttachmentLocationsKHR vkCmdSetRenderingAttachmentLocationsKHR;\nextern PFN_vkCmdSetRenderingInputAttachmentIndicesKHR vkCmdSetRenderingInputAttachmentIndicesKHR;\n#endif /* defined(VK_KHR_dynamic_rendering_local_read) */\n#if defined(VK_KHR_external_fence_capabilities)\nextern PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR vkGetPhysicalDeviceExternalFencePropertiesKHR;\n#endif /* defined(VK_KHR_external_fence_capabilities) */\n#if defined(VK_KHR_external_fence_fd)\nextern PFN_vkGetFenceFdKHR vkGetFenceFdKHR;\nextern PFN_vkImportFenceFdKHR vkImportFenceFdKHR;\n#endif /* defined(VK_KHR_external_fence_fd) */\n#if defined(VK_KHR_external_fence_win32)\nextern PFN_vkGetFenceWin32HandleKHR vkGetFenceWin32HandleKHR;\nextern PFN_vkImportFenceWin32HandleKHR vkImportFenceWin32HandleKHR;\n#endif /* defined(VK_KHR_external_fence_win32) */\n#if defined(VK_KHR_external_memory_capabilities)\nextern PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR vkGetPhysicalDeviceExternalBufferPropertiesKHR;\n#endif /* defined(VK_KHR_external_memory_capabilities) */\n#if defined(VK_KHR_external_memory_fd)\nextern PFN_vkGetMemoryFdKHR vkGetMemoryFdKHR;\nextern PFN_vkGetMemoryFdPropertiesKHR vkGetMemoryFdPropertiesKHR;\n#endif /* defined(VK_KHR_external_memory_fd) */\n#if defined(VK_KHR_external_memory_win32)\nextern PFN_vkGetMemoryWin32HandleKHR vkGetMemoryWin32HandleKHR;\nextern PFN_vkGetMemoryWin32HandlePropertiesKHR vkGetMemoryWin32HandlePropertiesKHR;\n#endif /* defined(VK_KHR_external_memory_win32) */\n#if defined(VK_KHR_external_semaphore_capabilities)\nextern PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR vkGetPhysicalDeviceExternalSemaphorePropertiesKHR;\n#endif /* defined(VK_KHR_external_semaphore_capabilities) */\n#if defined(VK_KHR_external_semaphore_fd)\nextern PFN_vkGetSemaphoreFdKHR vkGetSemaphoreFdKHR;\nextern PFN_vkImportSemaphoreFdKHR vkImportSemaphoreFdKHR;\n#endif /* defined(VK_KHR_external_semaphore_fd) */\n#if defined(VK_KHR_external_semaphore_win32)\nextern PFN_vkGetSemaphoreWin32HandleKHR vkGetSemaphoreWin32HandleKHR;\nextern PFN_vkImportSemaphoreWin32HandleKHR vkImportSemaphoreWin32HandleKHR;\n#endif /* defined(VK_KHR_external_semaphore_win32) */\n#if defined(VK_KHR_fragment_shading_rate)\nextern PFN_vkCmdSetFragmentShadingRateKHR vkCmdSetFragmentShadingRateKHR;\nextern PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR vkGetPhysicalDeviceFragmentShadingRatesKHR;\n#endif /* defined(VK_KHR_fragment_shading_rate) */\n#if defined(VK_KHR_get_display_properties2)\nextern PFN_vkGetDisplayModeProperties2KHR vkGetDisplayModeProperties2KHR;\nextern PFN_vkGetDisplayPlaneCapabilities2KHR vkGetDisplayPlaneCapabilities2KHR;\nextern PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR vkGetPhysicalDeviceDisplayPlaneProperties2KHR;\nextern PFN_vkGetPhysicalDeviceDisplayProperties2KHR vkGetPhysicalDeviceDisplayProperties2KHR;\n#endif /* defined(VK_KHR_get_display_properties2) */\n#if defined(VK_KHR_get_memory_requirements2)\nextern PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;\nextern PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;\nextern PFN_vkGetImageSparseMemoryRequirements2KHR vkGetImageSparseMemoryRequirements2KHR;\n#endif /* defined(VK_KHR_get_memory_requirements2) */\n#if defined(VK_KHR_get_physical_device_properties2)\nextern PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR;\nextern PFN_vkGetPhysicalDeviceFormatProperties2KHR vkGetPhysicalDeviceFormatProperties2KHR;\nextern PFN_vkGetPhysicalDeviceImageFormatProperties2KHR vkGetPhysicalDeviceImageFormatProperties2KHR;\nextern PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR;\nextern PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR;\nextern PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR vkGetPhysicalDeviceQueueFamilyProperties2KHR;\nextern PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR vkGetPhysicalDeviceSparseImageFormatProperties2KHR;\n#endif /* defined(VK_KHR_get_physical_device_properties2) */\n#if defined(VK_KHR_get_surface_capabilities2)\nextern PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR vkGetPhysicalDeviceSurfaceCapabilities2KHR;\nextern PFN_vkGetPhysicalDeviceSurfaceFormats2KHR vkGetPhysicalDeviceSurfaceFormats2KHR;\n#endif /* defined(VK_KHR_get_surface_capabilities2) */\n#if defined(VK_KHR_line_rasterization)\nextern PFN_vkCmdSetLineStippleKHR vkCmdSetLineStippleKHR;\n#endif /* defined(VK_KHR_line_rasterization) */\n#if defined(VK_KHR_maintenance1)\nextern PFN_vkTrimCommandPoolKHR vkTrimCommandPoolKHR;\n#endif /* defined(VK_KHR_maintenance1) */\n#if defined(VK_KHR_maintenance3)\nextern PFN_vkGetDescriptorSetLayoutSupportKHR vkGetDescriptorSetLayoutSupportKHR;\n#endif /* defined(VK_KHR_maintenance3) */\n#if defined(VK_KHR_maintenance4)\nextern PFN_vkGetDeviceBufferMemoryRequirementsKHR vkGetDeviceBufferMemoryRequirementsKHR;\nextern PFN_vkGetDeviceImageMemoryRequirementsKHR vkGetDeviceImageMemoryRequirementsKHR;\nextern PFN_vkGetDeviceImageSparseMemoryRequirementsKHR vkGetDeviceImageSparseMemoryRequirementsKHR;\n#endif /* defined(VK_KHR_maintenance4) */\n#if defined(VK_KHR_maintenance5)\nextern PFN_vkCmdBindIndexBuffer2KHR vkCmdBindIndexBuffer2KHR;\nextern PFN_vkGetDeviceImageSubresourceLayoutKHR vkGetDeviceImageSubresourceLayoutKHR;\nextern PFN_vkGetImageSubresourceLayout2KHR vkGetImageSubresourceLayout2KHR;\nextern PFN_vkGetRenderingAreaGranularityKHR vkGetRenderingAreaGranularityKHR;\n#endif /* defined(VK_KHR_maintenance5) */\n#if defined(VK_KHR_maintenance6)\nextern PFN_vkCmdBindDescriptorSets2KHR vkCmdBindDescriptorSets2KHR;\nextern PFN_vkCmdPushConstants2KHR vkCmdPushConstants2KHR;\n#endif /* defined(VK_KHR_maintenance6) */\n#if defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor)\nextern PFN_vkCmdPushDescriptorSet2KHR vkCmdPushDescriptorSet2KHR;\nextern PFN_vkCmdPushDescriptorSetWithTemplate2KHR vkCmdPushDescriptorSetWithTemplate2KHR;\n#endif /* defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor) */\n#if defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer)\nextern PFN_vkCmdBindDescriptorBufferEmbeddedSamplers2EXT vkCmdBindDescriptorBufferEmbeddedSamplers2EXT;\nextern PFN_vkCmdSetDescriptorBufferOffsets2EXT vkCmdSetDescriptorBufferOffsets2EXT;\n#endif /* defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_KHR_map_memory2)\nextern PFN_vkMapMemory2KHR vkMapMemory2KHR;\nextern PFN_vkUnmapMemory2KHR vkUnmapMemory2KHR;\n#endif /* defined(VK_KHR_map_memory2) */\n#if defined(VK_KHR_performance_query)\nextern PFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR;\nextern PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR;\nextern PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR;\nextern PFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR;\n#endif /* defined(VK_KHR_performance_query) */\n#if defined(VK_KHR_pipeline_binary)\nextern PFN_vkCreatePipelineBinariesKHR vkCreatePipelineBinariesKHR;\nextern PFN_vkDestroyPipelineBinaryKHR vkDestroyPipelineBinaryKHR;\nextern PFN_vkGetPipelineBinaryDataKHR vkGetPipelineBinaryDataKHR;\nextern PFN_vkGetPipelineKeyKHR vkGetPipelineKeyKHR;\nextern PFN_vkReleaseCapturedPipelineDataKHR vkReleaseCapturedPipelineDataKHR;\n#endif /* defined(VK_KHR_pipeline_binary) */\n#if defined(VK_KHR_pipeline_executable_properties)\nextern PFN_vkGetPipelineExecutableInternalRepresentationsKHR vkGetPipelineExecutableInternalRepresentationsKHR;\nextern PFN_vkGetPipelineExecutablePropertiesKHR vkGetPipelineExecutablePropertiesKHR;\nextern PFN_vkGetPipelineExecutableStatisticsKHR vkGetPipelineExecutableStatisticsKHR;\n#endif /* defined(VK_KHR_pipeline_executable_properties) */\n#if defined(VK_KHR_present_wait)\nextern PFN_vkWaitForPresentKHR vkWaitForPresentKHR;\n#endif /* defined(VK_KHR_present_wait) */\n#if defined(VK_KHR_present_wait2)\nextern PFN_vkWaitForPresent2KHR vkWaitForPresent2KHR;\n#endif /* defined(VK_KHR_present_wait2) */\n#if defined(VK_KHR_push_descriptor)\nextern PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR;\n#endif /* defined(VK_KHR_push_descriptor) */\n#if defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline)\nextern PFN_vkCmdTraceRaysIndirect2KHR vkCmdTraceRaysIndirect2KHR;\n#endif /* defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) */\n#if defined(VK_KHR_ray_tracing_pipeline)\nextern PFN_vkCmdSetRayTracingPipelineStackSizeKHR vkCmdSetRayTracingPipelineStackSizeKHR;\nextern PFN_vkCmdTraceRaysIndirectKHR vkCmdTraceRaysIndirectKHR;\nextern PFN_vkCmdTraceRaysKHR vkCmdTraceRaysKHR;\nextern PFN_vkCreateRayTracingPipelinesKHR vkCreateRayTracingPipelinesKHR;\nextern PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR vkGetRayTracingCaptureReplayShaderGroupHandlesKHR;\nextern PFN_vkGetRayTracingShaderGroupHandlesKHR vkGetRayTracingShaderGroupHandlesKHR;\nextern PFN_vkGetRayTracingShaderGroupStackSizeKHR vkGetRayTracingShaderGroupStackSizeKHR;\n#endif /* defined(VK_KHR_ray_tracing_pipeline) */\n#if defined(VK_KHR_sampler_ycbcr_conversion)\nextern PFN_vkCreateSamplerYcbcrConversionKHR vkCreateSamplerYcbcrConversionKHR;\nextern PFN_vkDestroySamplerYcbcrConversionKHR vkDestroySamplerYcbcrConversionKHR;\n#endif /* defined(VK_KHR_sampler_ycbcr_conversion) */\n#if defined(VK_KHR_shared_presentable_image)\nextern PFN_vkGetSwapchainStatusKHR vkGetSwapchainStatusKHR;\n#endif /* defined(VK_KHR_shared_presentable_image) */\n#if defined(VK_KHR_surface)\nextern PFN_vkDestroySurfaceKHR vkDestroySurfaceKHR;\nextern PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR vkGetPhysicalDeviceSurfaceCapabilitiesKHR;\nextern PFN_vkGetPhysicalDeviceSurfaceFormatsKHR vkGetPhysicalDeviceSurfaceFormatsKHR;\nextern PFN_vkGetPhysicalDeviceSurfacePresentModesKHR vkGetPhysicalDeviceSurfacePresentModesKHR;\nextern PFN_vkGetPhysicalDeviceSurfaceSupportKHR vkGetPhysicalDeviceSurfaceSupportKHR;\n#endif /* defined(VK_KHR_surface) */\n#if defined(VK_KHR_swapchain)\nextern PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR;\nextern PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR;\nextern PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR;\nextern PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR;\nextern PFN_vkQueuePresentKHR vkQueuePresentKHR;\n#endif /* defined(VK_KHR_swapchain) */\n#if defined(VK_KHR_swapchain_maintenance1)\nextern PFN_vkReleaseSwapchainImagesKHR vkReleaseSwapchainImagesKHR;\n#endif /* defined(VK_KHR_swapchain_maintenance1) */\n#if defined(VK_KHR_synchronization2)\nextern PFN_vkCmdPipelineBarrier2KHR vkCmdPipelineBarrier2KHR;\nextern PFN_vkCmdResetEvent2KHR vkCmdResetEvent2KHR;\nextern PFN_vkCmdSetEvent2KHR vkCmdSetEvent2KHR;\nextern PFN_vkCmdWaitEvents2KHR vkCmdWaitEvents2KHR;\nextern PFN_vkCmdWriteTimestamp2KHR vkCmdWriteTimestamp2KHR;\nextern PFN_vkQueueSubmit2KHR vkQueueSubmit2KHR;\n#endif /* defined(VK_KHR_synchronization2) */\n#if defined(VK_KHR_timeline_semaphore)\nextern PFN_vkGetSemaphoreCounterValueKHR vkGetSemaphoreCounterValueKHR;\nextern PFN_vkSignalSemaphoreKHR vkSignalSemaphoreKHR;\nextern PFN_vkWaitSemaphoresKHR vkWaitSemaphoresKHR;\n#endif /* defined(VK_KHR_timeline_semaphore) */\n#if defined(VK_KHR_video_decode_queue)\nextern PFN_vkCmdDecodeVideoKHR vkCmdDecodeVideoKHR;\n#endif /* defined(VK_KHR_video_decode_queue) */\n#if defined(VK_KHR_video_encode_queue)\nextern PFN_vkCmdEncodeVideoKHR vkCmdEncodeVideoKHR;\nextern PFN_vkGetEncodedVideoSessionParametersKHR vkGetEncodedVideoSessionParametersKHR;\nextern PFN_vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR;\n#endif /* defined(VK_KHR_video_encode_queue) */\n#if defined(VK_KHR_video_queue)\nextern PFN_vkBindVideoSessionMemoryKHR vkBindVideoSessionMemoryKHR;\nextern PFN_vkCmdBeginVideoCodingKHR vkCmdBeginVideoCodingKHR;\nextern PFN_vkCmdControlVideoCodingKHR vkCmdControlVideoCodingKHR;\nextern PFN_vkCmdEndVideoCodingKHR vkCmdEndVideoCodingKHR;\nextern PFN_vkCreateVideoSessionKHR vkCreateVideoSessionKHR;\nextern PFN_vkCreateVideoSessionParametersKHR vkCreateVideoSessionParametersKHR;\nextern PFN_vkDestroyVideoSessionKHR vkDestroyVideoSessionKHR;\nextern PFN_vkDestroyVideoSessionParametersKHR vkDestroyVideoSessionParametersKHR;\nextern PFN_vkGetPhysicalDeviceVideoCapabilitiesKHR vkGetPhysicalDeviceVideoCapabilitiesKHR;\nextern PFN_vkGetPhysicalDeviceVideoFormatPropertiesKHR vkGetPhysicalDeviceVideoFormatPropertiesKHR;\nextern PFN_vkGetVideoSessionMemoryRequirementsKHR vkGetVideoSessionMemoryRequirementsKHR;\nextern PFN_vkUpdateVideoSessionParametersKHR vkUpdateVideoSessionParametersKHR;\n#endif /* defined(VK_KHR_video_queue) */\n#if defined(VK_KHR_wayland_surface)\nextern PFN_vkCreateWaylandSurfaceKHR vkCreateWaylandSurfaceKHR;\nextern PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR vkGetPhysicalDeviceWaylandPresentationSupportKHR;\n#endif /* defined(VK_KHR_wayland_surface) */\n#if defined(VK_KHR_win32_surface)\nextern PFN_vkCreateWin32SurfaceKHR vkCreateWin32SurfaceKHR;\nextern PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR vkGetPhysicalDeviceWin32PresentationSupportKHR;\n#endif /* defined(VK_KHR_win32_surface) */\n#if defined(VK_KHR_xcb_surface)\nextern PFN_vkCreateXcbSurfaceKHR vkCreateXcbSurfaceKHR;\nextern PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR vkGetPhysicalDeviceXcbPresentationSupportKHR;\n#endif /* defined(VK_KHR_xcb_surface) */\n#if defined(VK_KHR_xlib_surface)\nextern PFN_vkCreateXlibSurfaceKHR vkCreateXlibSurfaceKHR;\nextern PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR vkGetPhysicalDeviceXlibPresentationSupportKHR;\n#endif /* defined(VK_KHR_xlib_surface) */\n#if defined(VK_MVK_ios_surface)\nextern PFN_vkCreateIOSSurfaceMVK vkCreateIOSSurfaceMVK;\n#endif /* defined(VK_MVK_ios_surface) */\n#if defined(VK_MVK_macos_surface)\nextern PFN_vkCreateMacOSSurfaceMVK vkCreateMacOSSurfaceMVK;\n#endif /* defined(VK_MVK_macos_surface) */\n#if defined(VK_NN_vi_surface)\nextern PFN_vkCreateViSurfaceNN vkCreateViSurfaceNN;\n#endif /* defined(VK_NN_vi_surface) */\n#if defined(VK_NVX_binary_import)\nextern PFN_vkCmdCuLaunchKernelNVX vkCmdCuLaunchKernelNVX;\nextern PFN_vkCreateCuFunctionNVX vkCreateCuFunctionNVX;\nextern PFN_vkCreateCuModuleNVX vkCreateCuModuleNVX;\nextern PFN_vkDestroyCuFunctionNVX vkDestroyCuFunctionNVX;\nextern PFN_vkDestroyCuModuleNVX vkDestroyCuModuleNVX;\n#endif /* defined(VK_NVX_binary_import) */\n#if defined(VK_NVX_image_view_handle)\nextern PFN_vkGetImageViewHandleNVX vkGetImageViewHandleNVX;\n#endif /* defined(VK_NVX_image_view_handle) */\n#if defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 3\nextern PFN_vkGetImageViewHandle64NVX vkGetImageViewHandle64NVX;\n#endif /* defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 3 */\n#if defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 2\nextern PFN_vkGetImageViewAddressNVX vkGetImageViewAddressNVX;\n#endif /* defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 2 */\n#if defined(VK_NV_acquire_winrt_display)\nextern PFN_vkAcquireWinrtDisplayNV vkAcquireWinrtDisplayNV;\nextern PFN_vkGetWinrtDisplayNV vkGetWinrtDisplayNV;\n#endif /* defined(VK_NV_acquire_winrt_display) */\n#if defined(VK_NV_clip_space_w_scaling)\nextern PFN_vkCmdSetViewportWScalingNV vkCmdSetViewportWScalingNV;\n#endif /* defined(VK_NV_clip_space_w_scaling) */\n#if defined(VK_NV_cluster_acceleration_structure)\nextern PFN_vkCmdBuildClusterAccelerationStructureIndirectNV vkCmdBuildClusterAccelerationStructureIndirectNV;\nextern PFN_vkGetClusterAccelerationStructureBuildSizesNV vkGetClusterAccelerationStructureBuildSizesNV;\n#endif /* defined(VK_NV_cluster_acceleration_structure) */\n#if defined(VK_NV_cooperative_matrix)\nextern PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV vkGetPhysicalDeviceCooperativeMatrixPropertiesNV;\n#endif /* defined(VK_NV_cooperative_matrix) */\n#if defined(VK_NV_cooperative_matrix2)\nextern PFN_vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV;\n#endif /* defined(VK_NV_cooperative_matrix2) */\n#if defined(VK_NV_cooperative_vector)\nextern PFN_vkCmdConvertCooperativeVectorMatrixNV vkCmdConvertCooperativeVectorMatrixNV;\nextern PFN_vkConvertCooperativeVectorMatrixNV vkConvertCooperativeVectorMatrixNV;\nextern PFN_vkGetPhysicalDeviceCooperativeVectorPropertiesNV vkGetPhysicalDeviceCooperativeVectorPropertiesNV;\n#endif /* defined(VK_NV_cooperative_vector) */\n#if defined(VK_NV_copy_memory_indirect)\nextern PFN_vkCmdCopyMemoryIndirectNV vkCmdCopyMemoryIndirectNV;\nextern PFN_vkCmdCopyMemoryToImageIndirectNV vkCmdCopyMemoryToImageIndirectNV;\n#endif /* defined(VK_NV_copy_memory_indirect) */\n#if defined(VK_NV_coverage_reduction_mode)\nextern PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV;\n#endif /* defined(VK_NV_coverage_reduction_mode) */\n#if defined(VK_NV_cuda_kernel_launch)\nextern PFN_vkCmdCudaLaunchKernelNV vkCmdCudaLaunchKernelNV;\nextern PFN_vkCreateCudaFunctionNV vkCreateCudaFunctionNV;\nextern PFN_vkCreateCudaModuleNV vkCreateCudaModuleNV;\nextern PFN_vkDestroyCudaFunctionNV vkDestroyCudaFunctionNV;\nextern PFN_vkDestroyCudaModuleNV vkDestroyCudaModuleNV;\nextern PFN_vkGetCudaModuleCacheNV vkGetCudaModuleCacheNV;\n#endif /* defined(VK_NV_cuda_kernel_launch) */\n#if defined(VK_NV_device_diagnostic_checkpoints)\nextern PFN_vkCmdSetCheckpointNV vkCmdSetCheckpointNV;\nextern PFN_vkGetQueueCheckpointDataNV vkGetQueueCheckpointDataNV;\n#endif /* defined(VK_NV_device_diagnostic_checkpoints) */\n#if defined(VK_NV_device_diagnostic_checkpoints) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2))\nextern PFN_vkGetQueueCheckpointData2NV vkGetQueueCheckpointData2NV;\n#endif /* defined(VK_NV_device_diagnostic_checkpoints) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) */\n#if defined(VK_NV_device_generated_commands)\nextern PFN_vkCmdBindPipelineShaderGroupNV vkCmdBindPipelineShaderGroupNV;\nextern PFN_vkCmdExecuteGeneratedCommandsNV vkCmdExecuteGeneratedCommandsNV;\nextern PFN_vkCmdPreprocessGeneratedCommandsNV vkCmdPreprocessGeneratedCommandsNV;\nextern PFN_vkCreateIndirectCommandsLayoutNV vkCreateIndirectCommandsLayoutNV;\nextern PFN_vkDestroyIndirectCommandsLayoutNV vkDestroyIndirectCommandsLayoutNV;\nextern PFN_vkGetGeneratedCommandsMemoryRequirementsNV vkGetGeneratedCommandsMemoryRequirementsNV;\n#endif /* defined(VK_NV_device_generated_commands) */\n#if defined(VK_NV_device_generated_commands_compute)\nextern PFN_vkCmdUpdatePipelineIndirectBufferNV vkCmdUpdatePipelineIndirectBufferNV;\nextern PFN_vkGetPipelineIndirectDeviceAddressNV vkGetPipelineIndirectDeviceAddressNV;\nextern PFN_vkGetPipelineIndirectMemoryRequirementsNV vkGetPipelineIndirectMemoryRequirementsNV;\n#endif /* defined(VK_NV_device_generated_commands_compute) */\n#if defined(VK_NV_external_compute_queue)\nextern PFN_vkCreateExternalComputeQueueNV vkCreateExternalComputeQueueNV;\nextern PFN_vkDestroyExternalComputeQueueNV vkDestroyExternalComputeQueueNV;\nextern PFN_vkGetExternalComputeQueueDataNV vkGetExternalComputeQueueDataNV;\n#endif /* defined(VK_NV_external_compute_queue) */\n#if defined(VK_NV_external_memory_capabilities)\nextern PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV vkGetPhysicalDeviceExternalImageFormatPropertiesNV;\n#endif /* defined(VK_NV_external_memory_capabilities) */\n#if defined(VK_NV_external_memory_rdma)\nextern PFN_vkGetMemoryRemoteAddressNV vkGetMemoryRemoteAddressNV;\n#endif /* defined(VK_NV_external_memory_rdma) */\n#if defined(VK_NV_external_memory_win32)\nextern PFN_vkGetMemoryWin32HandleNV vkGetMemoryWin32HandleNV;\n#endif /* defined(VK_NV_external_memory_win32) */\n#if defined(VK_NV_fragment_shading_rate_enums)\nextern PFN_vkCmdSetFragmentShadingRateEnumNV vkCmdSetFragmentShadingRateEnumNV;\n#endif /* defined(VK_NV_fragment_shading_rate_enums) */\n#if defined(VK_NV_low_latency2)\nextern PFN_vkGetLatencyTimingsNV vkGetLatencyTimingsNV;\nextern PFN_vkLatencySleepNV vkLatencySleepNV;\nextern PFN_vkQueueNotifyOutOfBandNV vkQueueNotifyOutOfBandNV;\nextern PFN_vkSetLatencyMarkerNV vkSetLatencyMarkerNV;\nextern PFN_vkSetLatencySleepModeNV vkSetLatencySleepModeNV;\n#endif /* defined(VK_NV_low_latency2) */\n#if defined(VK_NV_memory_decompression)\nextern PFN_vkCmdDecompressMemoryIndirectCountNV vkCmdDecompressMemoryIndirectCountNV;\nextern PFN_vkCmdDecompressMemoryNV vkCmdDecompressMemoryNV;\n#endif /* defined(VK_NV_memory_decompression) */\n#if defined(VK_NV_mesh_shader)\nextern PFN_vkCmdDrawMeshTasksIndirectNV vkCmdDrawMeshTasksIndirectNV;\nextern PFN_vkCmdDrawMeshTasksNV vkCmdDrawMeshTasksNV;\n#endif /* defined(VK_NV_mesh_shader) */\n#if defined(VK_NV_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2))\nextern PFN_vkCmdDrawMeshTasksIndirectCountNV vkCmdDrawMeshTasksIndirectCountNV;\n#endif /* defined(VK_NV_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2)) */\n#if defined(VK_NV_optical_flow)\nextern PFN_vkBindOpticalFlowSessionImageNV vkBindOpticalFlowSessionImageNV;\nextern PFN_vkCmdOpticalFlowExecuteNV vkCmdOpticalFlowExecuteNV;\nextern PFN_vkCreateOpticalFlowSessionNV vkCreateOpticalFlowSessionNV;\nextern PFN_vkDestroyOpticalFlowSessionNV vkDestroyOpticalFlowSessionNV;\nextern PFN_vkGetPhysicalDeviceOpticalFlowImageFormatsNV vkGetPhysicalDeviceOpticalFlowImageFormatsNV;\n#endif /* defined(VK_NV_optical_flow) */\n#if defined(VK_NV_partitioned_acceleration_structure)\nextern PFN_vkCmdBuildPartitionedAccelerationStructuresNV vkCmdBuildPartitionedAccelerationStructuresNV;\nextern PFN_vkGetPartitionedAccelerationStructuresBuildSizesNV vkGetPartitionedAccelerationStructuresBuildSizesNV;\n#endif /* defined(VK_NV_partitioned_acceleration_structure) */\n#if defined(VK_NV_ray_tracing)\nextern PFN_vkBindAccelerationStructureMemoryNV vkBindAccelerationStructureMemoryNV;\nextern PFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV;\nextern PFN_vkCmdCopyAccelerationStructureNV vkCmdCopyAccelerationStructureNV;\nextern PFN_vkCmdTraceRaysNV vkCmdTraceRaysNV;\nextern PFN_vkCmdWriteAccelerationStructuresPropertiesNV vkCmdWriteAccelerationStructuresPropertiesNV;\nextern PFN_vkCompileDeferredNV vkCompileDeferredNV;\nextern PFN_vkCreateAccelerationStructureNV vkCreateAccelerationStructureNV;\nextern PFN_vkCreateRayTracingPipelinesNV vkCreateRayTracingPipelinesNV;\nextern PFN_vkDestroyAccelerationStructureNV vkDestroyAccelerationStructureNV;\nextern PFN_vkGetAccelerationStructureHandleNV vkGetAccelerationStructureHandleNV;\nextern PFN_vkGetAccelerationStructureMemoryRequirementsNV vkGetAccelerationStructureMemoryRequirementsNV;\nextern PFN_vkGetRayTracingShaderGroupHandlesNV vkGetRayTracingShaderGroupHandlesNV;\n#endif /* defined(VK_NV_ray_tracing) */\n#if defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2\nextern PFN_vkCmdSetExclusiveScissorEnableNV vkCmdSetExclusiveScissorEnableNV;\n#endif /* defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 */\n#if defined(VK_NV_scissor_exclusive)\nextern PFN_vkCmdSetExclusiveScissorNV vkCmdSetExclusiveScissorNV;\n#endif /* defined(VK_NV_scissor_exclusive) */\n#if defined(VK_NV_shading_rate_image)\nextern PFN_vkCmdBindShadingRateImageNV vkCmdBindShadingRateImageNV;\nextern PFN_vkCmdSetCoarseSampleOrderNV vkCmdSetCoarseSampleOrderNV;\nextern PFN_vkCmdSetViewportShadingRatePaletteNV vkCmdSetViewportShadingRatePaletteNV;\n#endif /* defined(VK_NV_shading_rate_image) */\n#if defined(VK_OHOS_surface)\nextern PFN_vkCreateSurfaceOHOS vkCreateSurfaceOHOS;\n#endif /* defined(VK_OHOS_surface) */\n#if defined(VK_QCOM_tile_memory_heap)\nextern PFN_vkCmdBindTileMemoryQCOM vkCmdBindTileMemoryQCOM;\n#endif /* defined(VK_QCOM_tile_memory_heap) */\n#if defined(VK_QCOM_tile_properties)\nextern PFN_vkGetDynamicRenderingTilePropertiesQCOM vkGetDynamicRenderingTilePropertiesQCOM;\nextern PFN_vkGetFramebufferTilePropertiesQCOM vkGetFramebufferTilePropertiesQCOM;\n#endif /* defined(VK_QCOM_tile_properties) */\n#if defined(VK_QCOM_tile_shading)\nextern PFN_vkCmdBeginPerTileExecutionQCOM vkCmdBeginPerTileExecutionQCOM;\nextern PFN_vkCmdDispatchTileQCOM vkCmdDispatchTileQCOM;\nextern PFN_vkCmdEndPerTileExecutionQCOM vkCmdEndPerTileExecutionQCOM;\n#endif /* defined(VK_QCOM_tile_shading) */\n#if defined(VK_QNX_external_memory_screen_buffer)\nextern PFN_vkGetScreenBufferPropertiesQNX vkGetScreenBufferPropertiesQNX;\n#endif /* defined(VK_QNX_external_memory_screen_buffer) */\n#if defined(VK_QNX_screen_surface)\nextern PFN_vkCreateScreenSurfaceQNX vkCreateScreenSurfaceQNX;\nextern PFN_vkGetPhysicalDeviceScreenPresentationSupportQNX vkGetPhysicalDeviceScreenPresentationSupportQNX;\n#endif /* defined(VK_QNX_screen_surface) */\n#if defined(VK_VALVE_descriptor_set_host_mapping)\nextern PFN_vkGetDescriptorSetHostMappingVALVE vkGetDescriptorSetHostMappingVALVE;\nextern PFN_vkGetDescriptorSetLayoutHostMappingInfoVALVE vkGetDescriptorSetLayoutHostMappingInfoVALVE;\n#endif /* defined(VK_VALVE_descriptor_set_host_mapping) */\n#if (defined(VK_EXT_depth_clamp_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clamp_control))\nextern PFN_vkCmdSetDepthClampRangeEXT vkCmdSetDepthClampRangeEXT;\n#endif /* (defined(VK_EXT_depth_clamp_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clamp_control)) */\n#if (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object))\nextern PFN_vkCmdBindVertexBuffers2EXT vkCmdBindVertexBuffers2EXT;\nextern PFN_vkCmdSetCullModeEXT vkCmdSetCullModeEXT;\nextern PFN_vkCmdSetDepthBoundsTestEnableEXT vkCmdSetDepthBoundsTestEnableEXT;\nextern PFN_vkCmdSetDepthCompareOpEXT vkCmdSetDepthCompareOpEXT;\nextern PFN_vkCmdSetDepthTestEnableEXT vkCmdSetDepthTestEnableEXT;\nextern PFN_vkCmdSetDepthWriteEnableEXT vkCmdSetDepthWriteEnableEXT;\nextern PFN_vkCmdSetFrontFaceEXT vkCmdSetFrontFaceEXT;\nextern PFN_vkCmdSetPrimitiveTopologyEXT vkCmdSetPrimitiveTopologyEXT;\nextern PFN_vkCmdSetScissorWithCountEXT vkCmdSetScissorWithCountEXT;\nextern PFN_vkCmdSetStencilOpEXT vkCmdSetStencilOpEXT;\nextern PFN_vkCmdSetStencilTestEnableEXT vkCmdSetStencilTestEnableEXT;\nextern PFN_vkCmdSetViewportWithCountEXT vkCmdSetViewportWithCountEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object))\nextern PFN_vkCmdSetDepthBiasEnableEXT vkCmdSetDepthBiasEnableEXT;\nextern PFN_vkCmdSetLogicOpEXT vkCmdSetLogicOpEXT;\nextern PFN_vkCmdSetPatchControlPointsEXT vkCmdSetPatchControlPointsEXT;\nextern PFN_vkCmdSetPrimitiveRestartEnableEXT vkCmdSetPrimitiveRestartEnableEXT;\nextern PFN_vkCmdSetRasterizerDiscardEnableEXT vkCmdSetRasterizerDiscardEnableEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object))\nextern PFN_vkCmdSetAlphaToCoverageEnableEXT vkCmdSetAlphaToCoverageEnableEXT;\nextern PFN_vkCmdSetAlphaToOneEnableEXT vkCmdSetAlphaToOneEnableEXT;\nextern PFN_vkCmdSetColorBlendEnableEXT vkCmdSetColorBlendEnableEXT;\nextern PFN_vkCmdSetColorBlendEquationEXT vkCmdSetColorBlendEquationEXT;\nextern PFN_vkCmdSetColorWriteMaskEXT vkCmdSetColorWriteMaskEXT;\nextern PFN_vkCmdSetDepthClampEnableEXT vkCmdSetDepthClampEnableEXT;\nextern PFN_vkCmdSetLogicOpEnableEXT vkCmdSetLogicOpEnableEXT;\nextern PFN_vkCmdSetPolygonModeEXT vkCmdSetPolygonModeEXT;\nextern PFN_vkCmdSetRasterizationSamplesEXT vkCmdSetRasterizationSamplesEXT;\nextern PFN_vkCmdSetSampleMaskEXT vkCmdSetSampleMaskEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object))\nextern PFN_vkCmdSetTessellationDomainOriginEXT vkCmdSetTessellationDomainOriginEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback))\nextern PFN_vkCmdSetRasterizationStreamEXT vkCmdSetRasterizationStreamEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization))\nextern PFN_vkCmdSetConservativeRasterizationModeEXT vkCmdSetConservativeRasterizationModeEXT;\nextern PFN_vkCmdSetExtraPrimitiveOverestimationSizeEXT vkCmdSetExtraPrimitiveOverestimationSizeEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable))\nextern PFN_vkCmdSetDepthClipEnableEXT vkCmdSetDepthClipEnableEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations))\nextern PFN_vkCmdSetSampleLocationsEnableEXT vkCmdSetSampleLocationsEnableEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced))\nextern PFN_vkCmdSetColorBlendAdvancedEXT vkCmdSetColorBlendAdvancedEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex))\nextern PFN_vkCmdSetProvokingVertexModeEXT vkCmdSetProvokingVertexModeEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization))\nextern PFN_vkCmdSetLineRasterizationModeEXT vkCmdSetLineRasterizationModeEXT;\nextern PFN_vkCmdSetLineStippleEnableEXT vkCmdSetLineStippleEnableEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control))\nextern PFN_vkCmdSetDepthClipNegativeOneToOneEXT vkCmdSetDepthClipNegativeOneToOneEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling))\nextern PFN_vkCmdSetViewportWScalingEnableNV vkCmdSetViewportWScalingEnableNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle))\nextern PFN_vkCmdSetViewportSwizzleNV vkCmdSetViewportSwizzleNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color))\nextern PFN_vkCmdSetCoverageToColorEnableNV vkCmdSetCoverageToColorEnableNV;\nextern PFN_vkCmdSetCoverageToColorLocationNV vkCmdSetCoverageToColorLocationNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples))\nextern PFN_vkCmdSetCoverageModulationModeNV vkCmdSetCoverageModulationModeNV;\nextern PFN_vkCmdSetCoverageModulationTableEnableNV vkCmdSetCoverageModulationTableEnableNV;\nextern PFN_vkCmdSetCoverageModulationTableNV vkCmdSetCoverageModulationTableNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image))\nextern PFN_vkCmdSetShadingRateImageEnableNV vkCmdSetShadingRateImageEnableNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test))\nextern PFN_vkCmdSetRepresentativeFragmentTestEnableNV vkCmdSetRepresentativeFragmentTestEnableNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode))\nextern PFN_vkCmdSetCoverageReductionModeNV vkCmdSetCoverageReductionModeNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) */\n#if (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control))\nextern PFN_vkGetImageSubresourceLayout2EXT vkGetImageSubresourceLayout2EXT;\n#endif /* (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) */\n#if (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state))\nextern PFN_vkCmdSetVertexInputEXT vkCmdSetVertexInputEXT;\n#endif /* (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) */\n#if (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template)))\nextern PFN_vkCmdPushDescriptorSetWithTemplateKHR vkCmdPushDescriptorSetWithTemplateKHR;\n#endif /* (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template))) */\n#if (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1))\nextern PFN_vkGetDeviceGroupPresentCapabilitiesKHR vkGetDeviceGroupPresentCapabilitiesKHR;\nextern PFN_vkGetDeviceGroupSurfacePresentModesKHR vkGetDeviceGroupSurfacePresentModesKHR;\nextern PFN_vkGetPhysicalDevicePresentRectanglesKHR vkGetPhysicalDevicePresentRectanglesKHR;\n#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */\n#if (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1))\nextern PFN_vkAcquireNextImage2KHR vkAcquireNextImage2KHR;\n#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */\n/* VOLK_GENERATE_PROTOTYPES_H */\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif\n\n#ifdef VOLK_IMPLEMENTATION\n#undef VOLK_IMPLEMENTATION\n/* Prevent tools like dependency checkers from detecting a cyclic dependency */\n#define VOLK_SOURCE \"volk.c\"\n#include VOLK_SOURCE\n#endif\n\n/**\n * Copyright (c) 2018-2025 Arseny Kapoulkine\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to deal\n * in the Software without restriction, including without limitation the rights\n * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in all\n * copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n * SOFTWARE.\n*/\n/* clang-format on */\n"], ["/lsfg-vk/src/config/config.cpp", "#include \"config/config.hpp\"\n#include \"common/exception.hpp\"\n\n#include \"config/default_conf.hpp\"\n\n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace Config;\n\nnamespace {\n Configuration globalConf{};\n std::optional> gameConfs;\n}\n\nConfiguration Config::activeConf{};\n\nnamespace {\n /// Turn a string into a VkPresentModeKHR enum value.\n VkPresentModeKHR into_present(const std::string& mode) {\n if (mode == \"fifo\" || mode == \"vsync\")\n return VkPresentModeKHR::VK_PRESENT_MODE_FIFO_KHR;\n if (mode == \"mailbox\")\n return VkPresentModeKHR::VK_PRESENT_MODE_MAILBOX_KHR;\n if (mode == \"immediate\")\n return VkPresentModeKHR::VK_PRESENT_MODE_IMMEDIATE_KHR;\n return VkPresentModeKHR::VK_PRESENT_MODE_FIFO_KHR;\n }\n}\n\nvoid Config::updateConfig(const std::string& file) {\n if (!std::filesystem::exists(file)) {\n std::cerr << \"lsfg-vk: Placing default configuration file at \" << file << '\\n';\n const auto parent = std::filesystem::path(file).parent_path();\n if (!std::filesystem::exists(parent))\n if (!std::filesystem::create_directories(parent))\n throw std::runtime_error(\"Unable to create configuration directory at \" + parent.string());\n\n std::ofstream out(file);\n if (!out.is_open())\n throw std::runtime_error(\"Unable to create configuration file at \" + file);\n out << DEFAULT_CONFIG;\n out.close();\n }\n\n // parse config file\n std::optional parsed;\n try {\n parsed.emplace(toml::parse(file));\n if (!parsed->contains(\"version\"))\n throw std::runtime_error(\"Configuration file is missing 'version' field\");\n if (parsed->at(\"version\").as_integer() != 1)\n throw std::runtime_error(\"Configuration file version is not supported, expected 1\");\n } catch (const std::exception& e) {\n throw LSFG::rethrowable_error(\"Unable to parse configuration file\", e);\n }\n auto& toml = *parsed;\n\n // parse global configuration\n const toml::value globalTable = toml::find_or_default(toml, \"global\");\n const Configuration global{\n .dll = toml::find_or(globalTable, \"dll\", std::string()),\n .config_file = file,\n .timestamp = std::filesystem::last_write_time(file)\n };\n\n // validate global configuration\n if (global.multiplier < 2)\n throw std::runtime_error(\"Global Multiplier cannot be less than 2\");\n if (global.flowScale < 0.25F || global.flowScale > 1.0F)\n throw std::runtime_error(\"Flow scale must be between 0.25 and 1.0\");\n\n // parse game-specific configuration\n std::unordered_map games;\n const toml::value gamesList = toml::find_or_default(toml, \"game\");\n for (const auto& gameTable : gamesList.as_array()) {\n if (!gameTable.is_table())\n throw std::runtime_error(\"Invalid game configuration entry\");\n if (!gameTable.contains(\"exe\"))\n throw std::runtime_error(\"Game override missing 'exe' field\");\n\n const std::string exe = toml::find(gameTable, \"exe\");\n Configuration game{\n .enable = true,\n .dll = global.dll,\n .multiplier = toml::find_or(gameTable, \"multiplier\", 2U),\n .flowScale = toml::find_or(gameTable, \"flow_scale\", 1.0F),\n .performance = toml::find_or(gameTable, \"performance_mode\", false),\n .hdr = toml::find_or(gameTable, \"hdr_mode\", false),\n .e_present = into_present(toml::find_or(gameTable, \"experimental_present_mode\", \"\")),\n .config_file = file,\n .timestamp = global.timestamp\n };\n\n // validate the configuration\n if (game.multiplier < 1)\n throw std::runtime_error(\"Multiplier cannot be less than 1\");\n if (game.flowScale < 0.25F || game.flowScale > 1.0F)\n throw std::runtime_error(\"Flow scale must be between 0.25 and 1.0\");\n games[exe] = std::move(game);\n }\n\n // store configurations\n globalConf = global;\n gameConfs = std::move(games);\n}\n\nConfiguration Config::getConfig(const std::pair& name) {\n // process legacy environment variables\n if (std::getenv(\"LSFG_LEGACY\")) {\n Configuration conf{\n .enable = true,\n .multiplier = 2,\n .flowScale = 1.0F,\n .e_present = VkPresentModeKHR::VK_PRESENT_MODE_FIFO_KHR\n };\n\n const char* dll = std::getenv(\"LSFG_DLL_PATH\");\n if (dll) conf.dll = std::string(dll);\n const char* multiplier = std::getenv(\"LSFG_MULTIPLIER\");\n if (multiplier) conf.multiplier = std::stoul(multiplier);\n const char* flow_scale = std::getenv(\"LSFG_FLOW_SCALE\");\n if (flow_scale) conf.flowScale = std::stof(flow_scale);\n const char* performance = std::getenv(\"LSFG_PERFORMANCE_MODE\");\n if (performance) conf.performance = std::string(performance) == \"1\";\n const char* hdr = std::getenv(\"LSFG_HDR_MODE\");\n if (hdr) conf.hdr = std::string(hdr) == \"1\";\n const char* e_present = std::getenv(\"LSFG_EXPERIMENTAL_PRESENT_MODE\");\n if (e_present) conf.e_present = into_present(std::string(e_present));\n\n return conf;\n }\n\n // process new configuration system\n if (!gameConfs.has_value())\n return globalConf;\n\n const auto& games = *gameConfs;\n auto it = std::ranges::find_if(games, [&name](const auto& pair) {\n return name.first.ends_with(pair.first) || (name.second == pair.first);\n });\n if (it != games.end())\n return it->second;\n\n return globalConf;\n}\n"], ["/lsfg-vk/src/utils/benchmark.cpp", "#include \"utils/benchmark.hpp\"\n#include \"config/config.hpp\"\n#include \"extract/extract.hpp\"\n#include \"extract/trans.hpp\"\n\n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace Benchmark;\n\nvoid Benchmark::run(uint32_t width, uint32_t height) {\n const auto& conf = Config::activeConf;\n\n auto* lsfgInitialize = LSFG_3_1::initialize;\n auto* lsfgCreateContext = LSFG_3_1::createContext;\n auto* lsfgPresentContext = LSFG_3_1::presentContext;\n if (conf.performance) {\n lsfgInitialize = LSFG_3_1P::initialize;\n lsfgCreateContext = LSFG_3_1P::createContext;\n lsfgPresentContext = LSFG_3_1P::presentContext;\n }\n\n // create the benchmark context\n const char* lsfgDeviceUUID = std::getenv(\"LSFG_DEVICE_UUID\");\n const uint64_t deviceUUID = lsfgDeviceUUID\n ? std::stoull(std::string(lsfgDeviceUUID), nullptr, 16) : 0x1463ABAC;\n\n setenv(\"DISABLE_LSFG\", \"1\", 1); // NOLINT\n\n Extract::extractShaders();\n lsfgInitialize(\n deviceUUID, // some magic number if not given\n conf.hdr, 1.0F / conf.flowScale, conf.multiplier - 1,\n [](const std::string& name) -> std::vector {\n auto dxbc = Extract::getShader(name);\n auto spirv = Extract::translateShader(dxbc);\n return spirv;\n }\n );\n const int32_t ctx = lsfgCreateContext(-1, -1, {},\n { .width = width, .height = height },\n conf.hdr ? VK_FORMAT_R16G16B16A16_SFLOAT : VK_FORMAT_R8G8B8A8_UNORM\n );\n\n unsetenv(\"DISABLE_LSFG\"); // NOLINT\n\n // run the benchmark (run 8*n + 1 so the fences are waited on)\n const auto now = std::chrono::high_resolution_clock::now();\n const uint64_t iterations = 8 * 500UL;\n\n std::cerr << \"lsfg-vk: Benchmark started, running \" << iterations << \" iterations...\\n\";\n for (uint64_t count = 0; count < iterations + 1; count++) {\n lsfgPresentContext(ctx, -1, {});\n\n if (count % 50 == 0 && count > 0)\n std::cerr << \"lsfg-vk: \"\n << std::setprecision(2) << std::fixed\n << static_cast(count) / static_cast(iterations) * 100.0F\n << \"% done (\" << count + 1 << \"/\" << iterations << \")\\r\";\n }\n const auto then = std::chrono::high_resolution_clock::now();\n\n // print results\n const auto ms = std::chrono::duration_cast(then - now).count();\n\n const auto perIteration = static_cast(ms) / static_cast(iterations);\n\n const uint64_t totalGen = (conf.multiplier - 1) * iterations;\n const auto genFps = static_cast(totalGen) / (static_cast(ms) / 1000.0F);\n\n const uint64_t totalFrames = iterations * conf.multiplier;\n const auto totalFps = static_cast(totalFrames) / (static_cast(ms) / 1000.0F);\n\n std::cerr << \"lsfg-vk: Benchmark completed in \" << ms << \" ms\\n\";\n std::cerr << \" Time taken per real frame: \"\n << std::setprecision(2) << std::fixed << perIteration << \" ms\\n\";\n std::cerr << \" Generated \" << totalGen << \" frames in total at \"\n << std::setprecision(2) << std::fixed << genFps << \" FPS\\n\";\n std::cerr << \" Total of \" << totalFrames << \" frames presented at \"\n << std::setprecision(2) << std::fixed << totalFps << \" FPS\\n\";\n\n // sleep for a second, then exit\n std::this_thread::sleep_for(std::chrono::seconds(1));\n _exit(0);\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_compiler.h", "class DxbcCompilerHsPhase {\n public:\n DxbcCompiler(\n const std::string& fileName,\n const DxbcModuleInfo& moduleInfo,\n const DxbcProgramInfo& programInfo,\n const Rc& isgn,\n const Rc& osgn,\n const Rc& psgn,\n const DxbcAnalysisInfo& analysis) {\n // Declare an entry point ID. We'll need it during the\n // initialization phase where the execution mode is set.\n m_entryPointId = m_module.allocateId();\n \n // Set the shader name so that we recognize it in renderdoc\n m_module.setDebugSource(\n spv::SourceLanguageUnknown, 0,\n m_module.addDebugString(fileName.c_str()),\n nullptr);\n\n // Set the memory model. This is the same for all shaders.\n m_module.enableCapability(\n spv::CapabilityVulkanMemoryModel);\n\n m_module.setMemoryModel(\n spv::AddressingModelLogical,\n spv::MemoryModelVulkan);\n \n // Make sure our interface registers are clear\n for (uint32_t i = 0; i < DxbcMaxInterfaceRegs; i++) {\n m_vRegs.at(i) = DxbcRegisterPointer { };\n m_oRegs.at(i) = DxbcRegisterPointer { };\n }\n \n this->emitInit();\n }\n ~DxbcCompiler() {\n \n }\n void processInstruction(\n const DxbcShaderInstruction& ins) {\n m_lastOp = m_currOp;\n m_currOp = ins.op;\n\n switch (ins.opClass) {\n case DxbcInstClass::Declaration:\n return this->emitDcl(ins);\n \n case DxbcInstClass::CustomData:\n return this->emitCustomData(ins);\n \n case DxbcInstClass::Atomic:\n return this->emitAtomic(ins);\n \n case DxbcInstClass::AtomicCounter:\n return this->emitAtomicCounter(ins);\n \n case DxbcInstClass::Barrier:\n return this->emitBarrier(ins);\n \n case DxbcInstClass::BitExtract:\n return this->emitBitExtract(ins);\n \n case DxbcInstClass::BitInsert:\n return this->emitBitInsert(ins);\n \n case DxbcInstClass::BitScan:\n return this->emitBitScan(ins);\n \n case DxbcInstClass::BufferQuery:\n return this->emitBufferQuery(ins);\n \n case DxbcInstClass::BufferLoad:\n return this->emitBufferLoad(ins);\n \n case DxbcInstClass::BufferStore:\n return this->emitBufferStore(ins);\n \n case DxbcInstClass::ConvertFloat16:\n return this->emitConvertFloat16(ins);\n \n case DxbcInstClass::ConvertFloat64:\n return this->emitConvertFloat64(ins);\n \n case DxbcInstClass::ControlFlow:\n return this->emitControlFlow(ins);\n \n case DxbcInstClass::GeometryEmit:\n return this->emitGeometryEmit(ins);\n \n case DxbcInstClass::HullShaderPhase:\n return this->emitHullShaderPhase(ins);\n \n case DxbcInstClass::HullShaderInstCnt:\n return this->emitHullShaderInstCnt(ins);\n \n case DxbcInstClass::Interpolate:\n return this->emitInterpolate(ins);\n \n case DxbcInstClass::NoOperation:\n return;\n\n case DxbcInstClass::SparseCheckAccess:\n return this->emitSparseCheckAccess(ins);\n\n case DxbcInstClass::TextureQuery:\n return this->emitTextureQuery(ins);\n \n case DxbcInstClass::TextureQueryLod:\n return this->emitTextureQueryLod(ins);\n \n case DxbcInstClass::TextureQueryMs:\n return this->emitTextureQueryMs(ins);\n \n case DxbcInstClass::TextureQueryMsPos:\n return this->emitTextureQueryMsPos(ins);\n \n case DxbcInstClass::TextureFetch:\n return this->emitTextureFetch(ins);\n \n case DxbcInstClass::TextureGather:\n return this->emitTextureGather(ins);\n \n case DxbcInstClass::TextureSample:\n return this->emitTextureSample(ins);\n \n case DxbcInstClass::TypedUavLoad:\n return this->emitTypedUavLoad(ins);\n \n case DxbcInstClass::TypedUavStore:\n return this->emitTypedUavStore(ins);\n \n case DxbcInstClass::VectorAlu:\n return this->emitVectorAlu(ins);\n \n case DxbcInstClass::VectorCmov:\n return this->emitVectorCmov(ins);\n \n case DxbcInstClass::VectorCmp:\n return this->emitVectorCmp(ins);\n \n case DxbcInstClass::VectorDeriv:\n return this->emitVectorDeriv(ins);\n \n case DxbcInstClass::VectorDot:\n return this->emitVectorDot(ins);\n \n case DxbcInstClass::VectorIdiv:\n return this->emitVectorIdiv(ins);\n \n case DxbcInstClass::VectorImul:\n return this->emitVectorImul(ins);\n \n case DxbcInstClass::VectorMsad:\n return this->emitVectorMsad(ins);\n \n case DxbcInstClass::VectorShift:\n return this->emitVectorShift(ins);\n \n case DxbcInstClass::VectorSinCos:\n return this->emitVectorSinCos(ins);\n \n default:\n Logger::warn(\n str::format(\"DxbcCompiler: Unhandled opcode class: \",\n ins.op));\n }\n }\n void processXfbPassthrough() {\n m_module.setExecutionMode (m_entryPointId, spv::ExecutionModeInputPoints);\n m_module.setExecutionMode (m_entryPointId, spv::ExecutionModeOutputPoints);\n m_module.setOutputVertices(m_entryPointId, 1);\n\n for (auto e = m_isgn->begin(); e != m_isgn->end(); e++) {\n emitDclInput(e->registerId, 1,\n e->componentMask, DxbcSystemValue::None,\n DxbcInterpolationMode::Undefined);\n }\n\n // Figure out which streams to enable\n uint32_t streamMask = 0;\n\n for (size_t i = 0; i < m_xfbVars.size(); i++)\n streamMask |= 1u << m_xfbVars[i].streamId;\n \n for (uint32_t streamId : bit::BitMask(streamMask)) {\n emitXfbOutputSetup(streamId, true);\n m_module.opEmitVertex(m_module.constu32(streamId));\n }\n\n // End the main function\n emitFunctionEnd();\n\n // For pass-through we always assume points\n m_inputTopology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST;\n }\n SpirvCodeBuffer finalize() {\n // Depending on the shader type, this will prepare\n // input registers, call various shader functions\n // and write back the output registers.\n switch (m_programInfo.type()) {\n case DxbcProgramType::VertexShader: this->emitVsFinalize(); break;\n case DxbcProgramType::HullShader: this->emitHsFinalize(); break;\n case DxbcProgramType::DomainShader: this->emitDsFinalize(); break;\n case DxbcProgramType::GeometryShader: this->emitGsFinalize(); break;\n case DxbcProgramType::PixelShader: this->emitPsFinalize(); break;\n case DxbcProgramType::ComputeShader: this->emitCsFinalize(); break;\n default: throw DxvkError(\"Invalid shader stage\");\n }\n\n // Emit float control mode if the extension is supported\n this->emitFloatControl();\n \n // Declare the entry point, we now have all the\n // information we need, including the interfaces\n m_module.addEntryPoint(m_entryPointId,\n m_programInfo.executionModel(), \"main\");\n m_module.setDebugName(m_entryPointId, \"main\");\n\n return m_module.compile();\n }\n private:\n DxbcModuleInfo m_moduleInfo;\n DxbcProgramInfo m_programInfo;\n SpirvModule m_module;\n Rc m_isgn;\n Rc m_osgn;\n Rc m_psgn;\n const DxbcAnalysisInfo* m_analysis;\n std::vector m_bindings;\n std::vector m_rRegs;\n std::vector m_xRegs;\n std::vector m_gRegs;\n std::array<\n DxbcRegisterPointer,\n DxbcMaxInterfaceRegs> m_vRegs;\n std::vector m_vMappings;\n std::array<\n DxbcRegisterPointer,\n DxbcMaxInterfaceRegs> m_oRegs;\n std::vector m_oMappings;\n std::vector m_xfbVars;\n std::vector m_indexRanges = { };\n std::array m_constantBuffers;\n std::array m_samplers;\n std::array m_textures;\n std::array m_uavs;\n bool m_hasGloballyCoherentUav = false;\n bool m_hasRasterizerOrderedUav = false;\n std::vector m_controlFlowBlocks;\n bool m_topLevelIsUniform = true;\n uint64_t m_uavRdMask = 0u;\n uint64_t m_uavWrMask = 0u;\n bool m_insideFunction = false;\n uint32_t m_vArrayLength = 0;\n uint32_t m_vArrayLengthId = 0;\n uint32_t m_vArray = 0;\n uint32_t m_positionIn = 0;\n uint32_t m_positionOut = 0;\n uint32_t m_clipDistances = 0;\n uint32_t m_cullDistances = 0;\n uint32_t m_primitiveIdIn = 0;\n uint32_t m_primitiveIdOut = 0;\n uint32_t m_icbArray = 0;\n std::vector m_icbData;\n uint32_t m_icbComponents = 0u;\n uint32_t m_icbSize = 0u;\n uint32_t m_samplePositions = 0;\n uint32_t m_uavCtrStructType = 0;\n uint32_t m_uavCtrPointerType = 0;\n std::unordered_map m_subroutines;\n uint32_t m_entryPointId = 0;\n bool m_hasRawAccessChains = false;\n uint32_t m_inputMask = 0u;\n uint32_t m_outputMask = 0u;\n DxbcCompilerVsPart m_vs;\n DxbcCompilerHsPart m_hs;\n DxbcCompilerDsPart m_ds;\n DxbcCompilerGsPart m_gs;\n DxbcCompilerPsPart m_ps;\n DxbcCompilerCsPart m_cs;\n bool m_precise = true;\n DxbcOpcode m_lastOp = DxbcOpcode::Nop;\n DxbcOpcode m_currOp = DxbcOpcode::Nop;\n VkPrimitiveTopology m_inputTopology = VK_PRIMITIVE_TOPOLOGY_MAX_ENUM;\n VkPrimitiveTopology m_outputTopology = VK_PRIMITIVE_TOPOLOGY_MAX_ENUM;\n void emitDcl(\n const DxbcShaderInstruction& ins) {\n switch (ins.op) {\n case DxbcOpcode::DclGlobalFlags:\n return this->emitDclGlobalFlags(ins);\n \n case DxbcOpcode::DclIndexRange:\n return this->emitDclIndexRange(ins);\n \n case DxbcOpcode::DclTemps:\n return this->emitDclTemps(ins);\n \n case DxbcOpcode::DclIndexableTemp:\n return this->emitDclIndexableTemp(ins);\n \n case DxbcOpcode::DclInput:\n case DxbcOpcode::DclInputSgv:\n case DxbcOpcode::DclInputSiv:\n case DxbcOpcode::DclInputPs:\n case DxbcOpcode::DclInputPsSgv:\n case DxbcOpcode::DclInputPsSiv:\n case DxbcOpcode::DclOutput:\n case DxbcOpcode::DclOutputSgv:\n case DxbcOpcode::DclOutputSiv:\n return this->emitDclInterfaceReg(ins);\n \n case DxbcOpcode::DclConstantBuffer:\n return this->emitDclConstantBuffer(ins);\n \n case DxbcOpcode::DclSampler:\n return this->emitDclSampler(ins);\n \n case DxbcOpcode::DclStream:\n return this->emitDclStream(ins);\n \n case DxbcOpcode::DclUavTyped:\n case DxbcOpcode::DclResource:\n return this->emitDclResourceTyped(ins);\n \n case DxbcOpcode::DclUavRaw:\n case DxbcOpcode::DclResourceRaw:\n case DxbcOpcode::DclUavStructured:\n case DxbcOpcode::DclResourceStructured:\n return this->emitDclResourceRawStructured(ins);\n \n case DxbcOpcode::DclThreadGroupSharedMemoryRaw:\n case DxbcOpcode::DclThreadGroupSharedMemoryStructured:\n return this->emitDclThreadGroupSharedMemory(ins);\n \n case DxbcOpcode::DclGsInputPrimitive:\n return this->emitDclGsInputPrimitive(ins);\n \n case DxbcOpcode::DclGsOutputPrimitiveTopology:\n return this->emitDclGsOutputTopology(ins);\n \n case DxbcOpcode::DclMaxOutputVertexCount:\n return this->emitDclMaxOutputVertexCount(ins);\n \n case DxbcOpcode::DclInputControlPointCount:\n return this->emitDclInputControlPointCount(ins);\n \n case DxbcOpcode::DclOutputControlPointCount:\n return this->emitDclOutputControlPointCount(ins);\n \n case DxbcOpcode::DclHsMaxTessFactor:\n return this->emitDclHsMaxTessFactor(ins);\n \n case DxbcOpcode::DclTessDomain:\n return this->emitDclTessDomain(ins);\n \n case DxbcOpcode::DclTessPartitioning:\n return this->emitDclTessPartitioning(ins);\n \n case DxbcOpcode::DclTessOutputPrimitive:\n return this->emitDclTessOutputPrimitive(ins);\n \n case DxbcOpcode::DclThreadGroup:\n return this->emitDclThreadGroup(ins);\n \n case DxbcOpcode::DclGsInstanceCount:\n return this->emitDclGsInstanceCount(ins);\n \n default:\n Logger::warn(\n str::format(\"DxbcCompiler: Unhandled opcode: \",\n ins.op));\n }\n }\n void emitDclGlobalFlags(\n const DxbcShaderInstruction& ins) {\n const DxbcGlobalFlags flags = ins.controls.globalFlags();\n \n if (flags.test(DxbcGlobalFlag::RefactoringAllowed))\n m_precise = false;\n\n if (flags.test(DxbcGlobalFlag::EarlyFragmentTests))\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeEarlyFragmentTests);\n }\n void emitDclIndexRange(\n const DxbcShaderInstruction& ins) {\n // dcl_index_range has one operand:\n // (0) Range start, either an input or output register\n // (1) Range end\n uint32_t index = ins.dst[0].idxDim - 1u;\n\n DxbcIndexRange range = { };\n range.type = ins.dst[0].type;\n range.start = ins.dst[0].idx[index].offset;\n range.length = ins.imm[0].u32;\n\n m_indexRanges.push_back(range);\n }\n void emitDclTemps(\n const DxbcShaderInstruction& ins) {\n // dcl_temps has one operand:\n // (imm0) Number of temp registers\n\n // Ignore this and declare temps on demand.\n }\n void emitDclIndexableTemp(\n const DxbcShaderInstruction& ins) {\n // dcl_indexable_temps has three operands:\n // (imm0) Array register index (x#)\n // (imm1) Number of vectors stored in the array\n // (imm2) Component count of each individual vector. This is\n // always 4 in fxc-generated binaries and therefore useless.\n const uint32_t regId = ins.imm[0].u32;\n\n DxbcRegisterInfo info;\n info.type.ctype = DxbcScalarType::Float32;\n info.type.ccount = m_analysis->xRegMasks.at(regId).minComponents();\n info.type.alength = ins.imm[1].u32;\n info.sclass = spv::StorageClassPrivate;\n\n if (regId >= m_xRegs.size())\n m_xRegs.resize(regId + 1);\n \n m_xRegs.at(regId).ccount = info.type.ccount;\n m_xRegs.at(regId).alength = info.type.alength;\n m_xRegs.at(regId).varId = emitNewVariable(info);\n \n m_module.setDebugName(m_xRegs.at(regId).varId,\n str::format(\"x\", regId).c_str());\n }\n void emitDclInterfaceReg(\n const DxbcShaderInstruction& ins) {\n switch (ins.dst[0].type) {\n case DxbcOperandType::InputControlPoint:\n if (m_programInfo.type() != DxbcProgramType::HullShader)\n break;\n [[fallthrough]];\n\n case DxbcOperandType::Input:\n case DxbcOperandType::Output: {\n // dcl_input and dcl_output instructions\n // have the following operands:\n // (dst0) The register to declare\n // (imm0) The system value (optional)\n uint32_t regDim = 0;\n uint32_t regIdx = 0;\n \n // In the vertex and fragment shader stage, the\n // operand indices will have the following format:\n // (0) Register index\n // \n // In other stages, the input and output registers\n // may be declared as arrays of a fixed size:\n // (0) Array length\n // (1) Register index\n if (ins.dst[0].idxDim == 2) {\n regDim = ins.dst[0].idx[0].offset;\n regIdx = ins.dst[0].idx[1].offset;\n } else if (ins.dst[0].idxDim == 1) {\n regIdx = ins.dst[0].idx[0].offset;\n } else {\n Logger::err(str::format(\n \"DxbcCompiler: \", ins.op,\n \": Invalid index dimension\"));\n return;\n }\n \n // This declaration may map an output register to a system\n // value. If that is the case, the system value type will\n // be stored in the second operand.\n const bool hasSv =\n ins.op == DxbcOpcode::DclInputSgv\n || ins.op == DxbcOpcode::DclInputSiv\n || ins.op == DxbcOpcode::DclInputPsSgv\n || ins.op == DxbcOpcode::DclInputPsSiv\n || ins.op == DxbcOpcode::DclOutputSgv\n || ins.op == DxbcOpcode::DclOutputSiv;\n \n DxbcSystemValue sv = DxbcSystemValue::None;\n \n if (hasSv)\n sv = static_cast(ins.imm[0].u32);\n \n // In the pixel shader, inputs are declared with an\n // interpolation mode that is part of the op token.\n const bool hasInterpolationMode =\n ins.op == DxbcOpcode::DclInputPs\n || ins.op == DxbcOpcode::DclInputPsSiv;\n \n DxbcInterpolationMode im = DxbcInterpolationMode::Undefined;\n \n if (hasInterpolationMode)\n im = ins.controls.interpolation();\n \n // Declare the actual input/output variable\n switch (ins.op) {\n case DxbcOpcode::DclInput:\n case DxbcOpcode::DclInputSgv:\n case DxbcOpcode::DclInputSiv:\n case DxbcOpcode::DclInputPs:\n case DxbcOpcode::DclInputPsSgv:\n case DxbcOpcode::DclInputPsSiv:\n this->emitDclInput(regIdx, regDim, ins.dst[0].mask, sv, im);\n break;\n \n case DxbcOpcode::DclOutput:\n case DxbcOpcode::DclOutputSgv:\n case DxbcOpcode::DclOutputSiv:\n this->emitDclOutput(regIdx, regDim, ins.dst[0].mask, sv, im);\n break;\n \n default:\n Logger::err(str::format(\n \"DxbcCompiler: Unexpected opcode: \",\n ins.op));\n }\n } break;\n \n case DxbcOperandType::InputThreadId: {\n m_cs.builtinGlobalInvocationId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 3, 0 },\n spv::StorageClassInput },\n spv::BuiltInGlobalInvocationId,\n \"vThreadId\");\n } break;\n \n case DxbcOperandType::InputThreadGroupId: {\n m_cs.builtinWorkgroupId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 3, 0 },\n spv::StorageClassInput },\n spv::BuiltInWorkgroupId,\n \"vThreadGroupId\");\n } break;\n \n case DxbcOperandType::InputThreadIdInGroup: {\n m_cs.builtinLocalInvocationId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 3, 0 },\n spv::StorageClassInput },\n spv::BuiltInLocalInvocationId,\n \"vThreadIdInGroup\");\n } break;\n \n case DxbcOperandType::InputThreadIndexInGroup: {\n m_cs.builtinLocalInvocationIndex = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInLocalInvocationIndex,\n \"vThreadIndexInGroup\");\n } break;\n \n case DxbcOperandType::InputCoverageMask: {\n m_ps.builtinSampleMaskIn = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 1 },\n spv::StorageClassInput },\n spv::BuiltInSampleMask,\n \"vCoverage\");\n } break;\n \n case DxbcOperandType::OutputCoverageMask: {\n m_ps.builtinSampleMaskOut = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 1 },\n spv::StorageClassOutput },\n spv::BuiltInSampleMask,\n \"oMask\");\n } break;\n \n case DxbcOperandType::OutputDepth: {\n m_module.setExecutionMode(m_entryPointId,\n spv::ExecutionModeDepthReplacing);\n m_ps.builtinDepth = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInFragDepth,\n \"oDepth\");\n } break;\n \n case DxbcOperandType::OutputStencilRef: {\n m_module.enableExtension(\"SPV_EXT_shader_stencil_export\");\n m_module.enableCapability(spv::CapabilityStencilExportEXT);\n m_module.setExecutionMode(m_entryPointId,\n spv::ExecutionModeStencilRefReplacingEXT);\n m_ps.builtinStencilRef = emitNewBuiltinVariable({\n { DxbcScalarType::Sint32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInFragStencilRefEXT,\n \"oStencilRef\");\n } break;\n\n case DxbcOperandType::OutputDepthGe: {\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDepthReplacing);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDepthGreater);\n m_ps.builtinDepth = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInFragDepth,\n \"oDepthGe\");\n } break;\n \n case DxbcOperandType::OutputDepthLe: {\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDepthReplacing);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDepthLess);\n m_ps.builtinDepth = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInFragDepth,\n \"oDepthLe\");\n } break;\n \n case DxbcOperandType::InputPrimitiveId: {\n m_primitiveIdIn = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInPrimitiveId,\n \"vPrim\");\n } break;\n \n case DxbcOperandType::InputDomainPoint: {\n m_ds.builtinTessCoord = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 3, 0 },\n spv::StorageClassInput },\n spv::BuiltInTessCoord,\n \"vDomain\");\n } break;\n \n case DxbcOperandType::InputForkInstanceId:\n case DxbcOperandType::InputJoinInstanceId: {\n auto phase = this->getCurrentHsForkJoinPhase();\n \n phase->instanceIdPtr = m_module.newVar(\n m_module.defPointerType(\n m_module.defIntType(32, 0),\n spv::StorageClassFunction),\n spv::StorageClassFunction);\n \n m_module.opStore(phase->instanceIdPtr, phase->instanceId);\n m_module.setDebugName(phase->instanceIdPtr,\n ins.dst[0].type == DxbcOperandType::InputForkInstanceId\n ? \"vForkInstanceId\" : \"vJoinInstanceId\");\n } break;\n \n case DxbcOperandType::OutputControlPointId: {\n // This system value map to the invocation\n // ID, which has been declared already.\n } break;\n \n case DxbcOperandType::InputPatchConstant:\n case DxbcOperandType::OutputControlPoint: {\n // These have been declared as global input and\n // output arrays, so there's nothing left to do.\n } break;\n \n case DxbcOperandType::InputGsInstanceId: {\n m_gs.builtinInvocationId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInInvocationId,\n \"vInstanceID\");\n } break;\n \n case DxbcOperandType::InputInnerCoverage: {\n m_module.enableExtension(\"SPV_EXT_fragment_fully_covered\");\n m_module.enableCapability(spv::CapabilityFragmentFullyCoveredEXT);\n\n // This is bool in SPIR-V but uint32 in DXBC. A bool value of\n // false must be 0, and bit 1 must be set to represent true.\n uint32_t builtinId = emitNewBuiltinVariable({\n { DxbcScalarType::Bool, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInFullyCoveredEXT,\n nullptr);\n\n m_ps.builtinInnerCoverageId = emitNewVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassPrivate });\n\n m_module.setDebugName(m_ps.builtinInnerCoverageId, \"vInnerCoverage\");\n\n uint32_t boolTypeId = m_module.defBoolType();\n uint32_t uintTypeId = m_module.defIntType(32, 0);\n\n m_module.opStore(m_ps.builtinInnerCoverageId,\n m_module.opSelect(uintTypeId,\n m_module.opLoad(boolTypeId, builtinId),\n m_module.constu32(1),\n m_module.constu32(0)));\n } break;\n\n default:\n Logger::err(str::format(\n \"DxbcCompiler: Unsupported operand type declaration: \",\n ins.dst[0].type));\n \n }\n }\n void emitDclInput(\n uint32_t regIdx,\n uint32_t regDim,\n DxbcRegMask regMask,\n DxbcSystemValue sv,\n DxbcInterpolationMode im) {\n // Avoid declaring the same variable multiple times.\n // This may happen when multiple system values are\n // mapped to different parts of the same register.\n if (m_vRegs.at(regIdx).id == 0 && sv == DxbcSystemValue::None) {\n const DxbcVectorType regType = getInputRegType(regIdx);\n \n DxbcRegisterInfo info;\n info.type.ctype = regType.ctype;\n info.type.ccount = regType.ccount;\n info.type.alength = regDim;\n info.sclass = spv::StorageClassInput;\n \n const uint32_t varId = emitNewVariable(info);\n \n m_module.decorateLocation(varId, regIdx);\n m_module.setDebugName(varId, str::format(\"v\", regIdx).c_str());\n \n m_vRegs.at(regIdx) = { regType, varId };\n \n // Interpolation mode, used in pixel shaders\n if (im == DxbcInterpolationMode::Constant)\n m_module.decorate(varId, spv::DecorationFlat);\n \n if (im == DxbcInterpolationMode::LinearCentroid\n || im == DxbcInterpolationMode::LinearNoPerspectiveCentroid)\n m_module.decorate(varId, spv::DecorationCentroid);\n \n if (im == DxbcInterpolationMode::LinearNoPerspective\n || im == DxbcInterpolationMode::LinearNoPerspectiveCentroid\n || im == DxbcInterpolationMode::LinearNoPerspectiveSample)\n m_module.decorate(varId, spv::DecorationNoPerspective);\n \n if (im == DxbcInterpolationMode::LinearSample\n || im == DxbcInterpolationMode::LinearNoPerspectiveSample) {\n m_module.enableCapability(spv::CapabilitySampleRateShading);\n m_module.decorate(varId, spv::DecorationSample);\n }\n\n if (m_moduleInfo.options.forceSampleRateShading) {\n if (im == DxbcInterpolationMode::Linear\n || im == DxbcInterpolationMode::LinearNoPerspective) {\n m_module.enableCapability(spv::CapabilitySampleRateShading);\n m_module.decorate(varId, spv::DecorationSample);\n }\n }\n\n // Declare the input slot as defined\n m_inputMask |= 1u << regIdx;\n m_vArrayLength = std::max(m_vArrayLength, regIdx + 1);\n } else if (sv != DxbcSystemValue::None) {\n // Add a new system value mapping if needed\n bool skipSv = sv == DxbcSystemValue::ClipDistance\n || sv == DxbcSystemValue::CullDistance;\n \n if (!skipSv)\n m_vMappings.push_back({ regIdx, regMask, sv });\n }\n }\n void emitDclOutput(\n uint32_t regIdx,\n uint32_t regDim,\n DxbcRegMask regMask,\n DxbcSystemValue sv,\n DxbcInterpolationMode im) {\n // Add a new system value mapping if needed. Clip\n // and cull distances are handled separately.\n if (sv != DxbcSystemValue::None\n && sv != DxbcSystemValue::ClipDistance\n && sv != DxbcSystemValue::CullDistance)\n m_oMappings.push_back({ regIdx, regMask, sv });\n \n if (m_programInfo.type() == DxbcProgramType::HullShader) {\n // Hull shaders don't use standard outputs\n if (getCurrentHsForkJoinPhase() != nullptr)\n m_hs.outputPerPatchMask |= 1 << regIdx;\n } else if (m_oRegs.at(regIdx).id == 0) {\n // Avoid declaring the same variable multiple times.\n // This may happen when multiple system values are\n // mapped to different parts of the same register.\n const DxbcVectorType regType = getOutputRegType(regIdx);\n \n DxbcRegisterInfo info;\n info.type.ctype = regType.ctype;\n info.type.ccount = regType.ccount;\n info.type.alength = regDim;\n info.sclass = spv::StorageClassOutput;\n\n // In xfb mode, we set up the actual\n // output vars when emitting a vertex\n if (m_moduleInfo.xfb != nullptr)\n info.sclass = spv::StorageClassPrivate;\n \n // In geometry shaders, don't duplicate system value outputs\n // to stay within device limits. The pixel shader will read\n // all GS system value outputs as system value inputs.\n if (m_programInfo.type() == DxbcProgramType::GeometryShader && sv != DxbcSystemValue::None)\n info.sclass = spv::StorageClassPrivate;\n\n const uint32_t varId = this->emitNewVariable(info);\n m_module.setDebugName(varId, str::format(\"o\", regIdx).c_str());\n \n if (info.sclass == spv::StorageClassOutput) {\n m_module.decorateLocation(varId, regIdx);\n\n // Add index decoration for potential dual-source blending\n if (m_programInfo.type() == DxbcProgramType::PixelShader)\n m_module.decorateIndex(varId, 0);\n\n // Declare vertex positions in all stages as invariant, even if\n // this is not the last stage, to help with potential Z fighting.\n if (sv == DxbcSystemValue::Position && m_moduleInfo.options.invariantPosition)\n m_module.decorate(varId, spv::DecorationInvariant);\n }\n \n m_oRegs.at(regIdx) = { regType, varId };\n \n // Declare the output slot as defined\n m_outputMask |= 1u << regIdx;\n }\n }\n void emitDclConstantBuffer(\n const DxbcShaderInstruction& ins) {\n // dcl_constant_buffer has one operand with two indices:\n // (0) Constant buffer register ID (cb#)\n // (1) Number of constants in the buffer\n uint32_t bufferId = ins.dst[0].idx[0].offset;\n uint32_t elementCount = ins.dst[0].idx[1].offset;\n\n // With dynamic indexing, games will often index constant buffers\n // out of bounds. Declare an upper bound to stay within spec.\n if (ins.controls.accessType() == DxbcConstantBufferAccessType::DynamicallyIndexed)\n elementCount = 4096;\n\n this->emitDclConstantBufferVar(bufferId, elementCount, 4u,\n str::format(\"cb\", bufferId).c_str());\n }\n void emitDclConstantBufferVar(\n uint32_t regIdx,\n uint32_t numConstants,\n uint32_t numComponents,\n const char* name) {\n // Uniform buffer data is stored as a fixed-size array\n // of 4x32-bit vectors. SPIR-V requires explicit strides.\n const uint32_t arrayType = m_module.defArrayTypeUnique(\n getVectorTypeId({ DxbcScalarType::Float32, numComponents }),\n m_module.constu32(numConstants));\n m_module.decorateArrayStride(arrayType, sizeof(uint32_t) * numComponents);\n \n // SPIR-V requires us to put that array into a\n // struct and decorate that struct as a block.\n const uint32_t structType = m_module.defStructTypeUnique(1, &arrayType);\n \n m_module.decorate(structType, spv::DecorationBlock);\n m_module.memberDecorateOffset(structType, 0, 0);\n \n m_module.setDebugName (structType, str::format(name, \"_t\").c_str());\n m_module.setDebugMemberName (structType, 0, \"m\");\n \n // Variable that we'll use to access the buffer\n const uint32_t varId = m_module.newVar(\n m_module.defPointerType(structType, spv::StorageClassUniform),\n spv::StorageClassUniform);\n \n m_module.setDebugName(varId, name);\n \n // Compute the DXVK binding slot index for the buffer.\n // D3D11 needs to bind the actual buffers to this slot.\n uint32_t bindingId = computeConstantBufferBinding(\n m_programInfo.type(), regIdx);\n \n m_module.decorateDescriptorSet(varId, 0);\n m_module.decorateBinding(varId, bindingId);\n\n DxbcConstantBuffer buf;\n buf.varId = varId;\n buf.size = numConstants;\n m_constantBuffers.at(regIdx) = buf;\n \n // Store descriptor info for the shader interface\n DxvkBindingInfo binding = { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER };\n binding.viewType = VK_IMAGE_VIEW_TYPE_MAX_ENUM;\n binding.access = VK_ACCESS_UNIFORM_READ_BIT;\n binding.resourceBinding = bindingId;\n binding.uboSet = true;\n m_bindings.push_back(binding);\n }\n void emitDclSampler(\n const DxbcShaderInstruction& ins) {\n // dclSampler takes one operand:\n // (dst0) The sampler register to declare\n const uint32_t samplerId = ins.dst[0].idx[0].offset;\n \n // The sampler type is opaque, but we still have to\n // define a pointer and a variable in oder to use it\n const uint32_t samplerType = m_module.defSamplerType();\n const uint32_t samplerPtrType = m_module.defPointerType(\n samplerType, spv::StorageClassUniformConstant);\n \n // Define the sampler variable\n const uint32_t varId = m_module.newVar(samplerPtrType,\n spv::StorageClassUniformConstant);\n m_module.setDebugName(varId,\n str::format(\"s\", samplerId).c_str());\n \n m_samplers.at(samplerId).varId = varId;\n m_samplers.at(samplerId).typeId = samplerType;\n \n // Compute binding slot index for the sampler\n uint32_t bindingId = computeSamplerBinding(\n m_programInfo.type(), samplerId);\n \n m_module.decorateDescriptorSet(varId, 0);\n m_module.decorateBinding(varId, bindingId);\n \n // Store descriptor info for the shader interface\n DxvkBindingInfo binding = { VK_DESCRIPTOR_TYPE_SAMPLER };\n binding.viewType = VK_IMAGE_VIEW_TYPE_MAX_ENUM;\n binding.resourceBinding = bindingId;\n m_bindings.push_back(binding);\n }\n void emitDclStream(\n const DxbcShaderInstruction& ins) {\n if (ins.dst[0].idx[0].offset != 0 && m_moduleInfo.xfb == nullptr)\n Logger::err(\"Dxbc: Multiple streams not supported\");\n }\n void emitDclResourceTyped(\n const DxbcShaderInstruction& ins) {\n // dclResource takes two operands:\n // (dst0) The resource register ID\n // (imm0) The resource return type\n const uint32_t registerId = ins.dst[0].idx[0].offset;\n \n // We also handle unordered access views here\n const bool isUav = ins.op == DxbcOpcode::DclUavTyped;\n \n if (isUav) {\n if (m_moduleInfo.options.supportsTypedUavLoadR32)\n m_module.enableCapability(spv::CapabilityStorageImageReadWithoutFormat);\n m_module.enableCapability(spv::CapabilityStorageImageWriteWithoutFormat);\n }\n \n // Defines the type of the resource (texture2D, ...)\n const DxbcResourceDim resourceType = ins.controls.resourceDim();\n \n // Defines the type of a read operation. DXBC has the ability\n // to define four different types whereas SPIR-V only allows\n // one, but in practice this should not be much of a problem.\n auto xType = static_cast(\n bit::extract(ins.imm[0].u32, 0, 3));\n auto yType = static_cast(\n bit::extract(ins.imm[0].u32, 4, 7));\n auto zType = static_cast(\n bit::extract(ins.imm[0].u32, 8, 11));\n auto wType = static_cast(\n bit::extract(ins.imm[0].u32, 12, 15));\n \n if ((xType != yType) || (xType != zType) || (xType != wType))\n Logger::warn(\"DxbcCompiler: dcl_resource: Ignoring resource return types\");\n \n // Declare the actual sampled type\n const DxbcScalarType sampledType = [xType] {\n switch (xType) {\n // FIXME is this correct? There's no documentation about it\n case DxbcResourceReturnType::Mixed: return DxbcScalarType::Uint32;\n // FIXME do we have to manually clamp writes to SNORM/UNORM resources?\n case DxbcResourceReturnType::Snorm: return DxbcScalarType::Float32;\n case DxbcResourceReturnType::Unorm: return DxbcScalarType::Float32;\n case DxbcResourceReturnType::Float: return DxbcScalarType::Float32;\n case DxbcResourceReturnType::Sint: return DxbcScalarType::Sint32;\n case DxbcResourceReturnType::Uint: return DxbcScalarType::Uint32;\n default: throw DxvkError(str::format(\"DxbcCompiler: Invalid sampled type: \", xType));\n }\n }();\n \n // Declare the resource type\n const uint32_t sampledTypeId = getScalarTypeId(sampledType);\n const DxbcImageInfo typeInfo = getResourceType(resourceType, isUav); \n \n // Declare additional capabilities if necessary\n switch (resourceType) {\n case DxbcResourceDim::Buffer:\n m_module.enableCapability(isUav\n ? spv::CapabilityImageBuffer\n : spv::CapabilitySampledBuffer);\n break;\n \n case DxbcResourceDim::Texture1D:\n case DxbcResourceDim::Texture1DArr:\n m_module.enableCapability(isUav\n ? spv::CapabilityImage1D\n : spv::CapabilitySampled1D);\n break;\n \n case DxbcResourceDim::TextureCubeArr:\n m_module.enableCapability(\n spv::CapabilitySampledCubeArray);\n break;\n \n default:\n // No additional capabilities required\n break;\n }\n \n // If the read-without-format capability is not set and this\n // image is access via a typed load, or if atomic operations\n // are used,, we must define the image format explicitly.\n spv::ImageFormat imageFormat = spv::ImageFormatUnknown;\n \n if (isUav) {\n if ((m_analysis->uavInfos[registerId].accessAtomicOp)\n || (m_analysis->uavInfos[registerId].accessTypedLoad\n && !m_moduleInfo.options.supportsTypedUavLoadR32))\n imageFormat = getScalarImageFormat(sampledType);\n }\n \n // We do not know whether the image is going to be used as\n // a color image or a depth image yet, but we can pick the\n // correct type when creating a sampled image object.\n const uint32_t imageTypeId = m_module.defImageType(sampledTypeId,\n typeInfo.dim, 0, typeInfo.array, typeInfo.ms, typeInfo.sampled,\n imageFormat);\n \n // We'll declare the texture variable with the color type\n // and decide which one to use when the texture is sampled.\n const uint32_t resourcePtrType = m_module.defPointerType(\n imageTypeId, spv::StorageClassUniformConstant);\n \n const uint32_t varId = m_module.newVar(resourcePtrType,\n spv::StorageClassUniformConstant);\n \n m_module.setDebugName(varId,\n str::format(isUav ? \"u\" : \"t\", registerId).c_str());\n \n // Compute the DXVK binding slot index for the resource.\n // D3D11 needs to bind the actual resource to this slot.\n uint32_t bindingId = isUav\n ? computeUavBinding(m_programInfo.type(), registerId)\n : computeSrvBinding(m_programInfo.type(), registerId);\n \n m_module.decorateDescriptorSet(varId, 0);\n m_module.decorateBinding(varId, bindingId);\n \n // Declare a specialization constant which will\n // store whether or not the resource is bound.\n if (isUav) {\n DxbcUav uav;\n uav.type = DxbcResourceType::Typed;\n uav.imageInfo = typeInfo;\n uav.varId = varId;\n uav.ctrId = 0;\n uav.sampledType = sampledType;\n uav.sampledTypeId = sampledTypeId;\n uav.imageTypeId = imageTypeId;\n uav.structStride = 0;\n uav.coherence = getUavCoherence(registerId, ins.controls.uavFlags());\n uav.isRawSsbo = false;\n m_uavs.at(registerId) = uav;\n } else {\n DxbcShaderResource res;\n res.type = DxbcResourceType::Typed;\n res.imageInfo = typeInfo;\n res.varId = varId;\n res.sampledType = sampledType;\n res.sampledTypeId = sampledTypeId;\n res.imageTypeId = imageTypeId;\n res.colorTypeId = imageTypeId;\n res.depthTypeId = 0;\n res.structStride = 0;\n res.isRawSsbo = false;\n \n if ((sampledType == DxbcScalarType::Float32)\n && (resourceType == DxbcResourceDim::Texture1D\n || resourceType == DxbcResourceDim::Texture1DArr\n || resourceType == DxbcResourceDim::Texture2D\n || resourceType == DxbcResourceDim::Texture2DArr\n || resourceType == DxbcResourceDim::TextureCube\n || resourceType == DxbcResourceDim::TextureCubeArr)) {\n res.depthTypeId = m_module.defImageType(sampledTypeId,\n typeInfo.dim, 1, typeInfo.array, typeInfo.ms, typeInfo.sampled,\n spv::ImageFormatUnknown);\n }\n \n m_textures.at(registerId) = res;\n }\n \n // Store descriptor info for the shader interface\n DxvkBindingInfo binding = { };\n binding.viewType = typeInfo.vtype;\n binding.resourceBinding = bindingId;\n binding.isMultisampled = typeInfo.ms;\n\n if (isUav) {\n binding.descriptorType = resourceType == DxbcResourceDim::Buffer\n ? VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER\n : VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;\n binding.access = m_analysis->uavInfos[registerId].accessFlags;\n\n if (!m_analysis->uavInfos[registerId].nonInvariantAccess)\n binding.accessOp = m_analysis->uavInfos[registerId].accessOp;\n\n if (!(binding.access & VK_ACCESS_SHADER_WRITE_BIT))\n m_module.decorate(varId, spv::DecorationNonWritable);\n if (!(binding.access & VK_ACCESS_SHADER_READ_BIT))\n m_module.decorate(varId, spv::DecorationNonReadable);\n } else {\n binding.descriptorType = resourceType == DxbcResourceDim::Buffer\n ? VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER\n : VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;\n binding.access = VK_ACCESS_SHADER_READ_BIT;\n }\n\n m_bindings.push_back(binding);\n }\n void emitDclResourceRawStructured(\n const DxbcShaderInstruction& ins) {\n // dcl_resource_raw and dcl_uav_raw take one argument:\n // (dst0) The resource register ID\n // dcl_resource_structured and dcl_uav_structured take two arguments:\n // (dst0) The resource register ID\n // (imm0) Structure stride, in bytes\n const uint32_t registerId = ins.dst[0].idx[0].offset;\n \n const bool isUav = ins.op == DxbcOpcode::DclUavRaw\n || ins.op == DxbcOpcode::DclUavStructured;\n \n const bool isStructured = ins.op == DxbcOpcode::DclUavStructured\n || ins.op == DxbcOpcode::DclResourceStructured;\n \n const DxbcScalarType sampledType = DxbcScalarType::Uint32;\n const uint32_t sampledTypeId = getScalarTypeId(sampledType);\n \n const DxbcImageInfo typeInfo = { spv::DimBuffer, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_MAX_ENUM };\n \n // Declare the resource type\n uint32_t resTypeId = 0;\n uint32_t varId = 0;\n \n // Write back resource info\n DxbcResourceType resType = isStructured\n ? DxbcResourceType::Structured\n : DxbcResourceType::Raw;\n \n uint32_t resStride = isStructured\n ? ins.imm[0].u32\n : 0;\n \n uint32_t resAlign = isStructured\n ? (resStride & -resStride)\n : 16;\n \n // Compute the DXVK binding slot index for the resource.\n uint32_t bindingId = isUav\n ? computeUavBinding(m_programInfo.type(), registerId)\n : computeSrvBinding(m_programInfo.type(), registerId);\n \n // Test whether we should use a raw SSBO for this resource\n bool hasSparseFeedback = isUav\n ? m_analysis->uavInfos[registerId].sparseFeedback\n : m_analysis->srvInfos[registerId].sparseFeedback;\n\n bool useRawSsbo = m_moduleInfo.options.minSsboAlignment <= resAlign && !hasSparseFeedback;\n \n if (useRawSsbo) {\n uint32_t elemType = getScalarTypeId(DxbcScalarType::Uint32);\n uint32_t arrayType = m_module.defRuntimeArrayTypeUnique(elemType);\n uint32_t structType = m_module.defStructTypeUnique(1, &arrayType);\n uint32_t ptrType = m_module.defPointerType(structType, spv::StorageClassStorageBuffer);\n\n resTypeId = m_module.defPointerType(elemType, spv::StorageClassStorageBuffer);\n varId = m_module.newVar(ptrType, spv::StorageClassStorageBuffer);\n \n m_module.decorateArrayStride(arrayType, sizeof(uint32_t));\n m_module.decorate(structType, spv::DecorationBlock);\n m_module.memberDecorateOffset(structType, 0, 0);\n\n m_module.setDebugName(structType,\n str::format(isUav ? \"u\" : \"t\", registerId, \"_t\").c_str());\n m_module.setDebugMemberName(structType, 0, \"m\");\n } else {\n // Structured and raw buffers are represented as\n // texel buffers consisting of 32-bit integers.\n m_module.enableCapability(isUav\n ? spv::CapabilityImageBuffer\n : spv::CapabilitySampledBuffer);\n \n resTypeId = m_module.defImageType(sampledTypeId,\n typeInfo.dim, 0, typeInfo.array, typeInfo.ms, typeInfo.sampled,\n spv::ImageFormatR32ui);\n \n varId = m_module.newVar(\n m_module.defPointerType(resTypeId, spv::StorageClassUniformConstant),\n spv::StorageClassUniformConstant);\n }\n\n m_module.setDebugName(varId,\n str::format(isUav ? \"u\" : \"t\", registerId).c_str());\n \n m_module.decorateDescriptorSet(varId, 0);\n m_module.decorateBinding(varId, bindingId);\n \n if (isUav) {\n DxbcUav uav;\n uav.type = resType;\n uav.imageInfo = typeInfo;\n uav.varId = varId;\n uav.ctrId = 0;\n uav.sampledType = sampledType;\n uav.sampledTypeId = sampledTypeId;\n uav.imageTypeId = resTypeId;\n uav.structStride = resStride;\n uav.coherence = getUavCoherence(registerId, ins.controls.uavFlags());\n uav.isRawSsbo = useRawSsbo;\n m_uavs.at(registerId) = uav;\n } else {\n DxbcShaderResource res;\n res.type = resType;\n res.imageInfo = typeInfo;\n res.varId = varId;\n res.sampledType = sampledType;\n res.sampledTypeId = sampledTypeId;\n res.imageTypeId = resTypeId;\n res.colorTypeId = resTypeId;\n res.depthTypeId = 0;\n res.structStride = resStride;\n res.isRawSsbo = useRawSsbo;\n m_textures.at(registerId) = res;\n }\n \n // Store descriptor info for the shader interface\n DxvkBindingInfo binding = { };\n binding.descriptorType = useRawSsbo\n ? VK_DESCRIPTOR_TYPE_STORAGE_BUFFER\n : (isUav ? VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER : VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER);\n binding.viewType = VK_IMAGE_VIEW_TYPE_MAX_ENUM;\n binding.resourceBinding = bindingId;\n binding.access = VK_ACCESS_SHADER_READ_BIT;\n\n if (isUav) {\n binding.access = m_analysis->uavInfos[registerId].accessFlags;\n\n if (!m_analysis->uavInfos[registerId].nonInvariantAccess)\n binding.accessOp = m_analysis->uavInfos[registerId].accessOp;\n }\n\n if (useRawSsbo || isUav) {\n if (!(binding.access & VK_ACCESS_SHADER_WRITE_BIT))\n m_module.decorate(varId, spv::DecorationNonWritable);\n if (!(binding.access & VK_ACCESS_SHADER_READ_BIT))\n m_module.decorate(varId, spv::DecorationNonReadable);\n }\n\n m_bindings.push_back(binding);\n\n // If supported, we'll be using raw access chains to access this\n if (!m_hasRawAccessChains && m_moduleInfo.options.supportsRawAccessChains) {\n m_module.enableExtension(\"SPV_NV_raw_access_chains\");\n m_module.enableCapability(spv::CapabilityRawAccessChainsNV);\n\n m_hasRawAccessChains = true;\n }\n }\n void emitDclThreadGroupSharedMemory(\n const DxbcShaderInstruction& ins) {\n // dcl_tgsm_raw takes two arguments:\n // (dst0) The resource register ID\n // (imm0) Block size, in bytes\n // dcl_tgsm_structured takes three arguments:\n // (dst0) The resource register ID\n // (imm0) Structure stride, in bytes\n // (imm1) Structure count\n const bool isStructured = ins.op == DxbcOpcode::DclThreadGroupSharedMemoryStructured;\n \n const uint32_t regId = ins.dst[0].idx[0].offset;\n \n if (regId >= m_gRegs.size())\n m_gRegs.resize(regId + 1);\n \n const uint32_t elementStride = isStructured ? ins.imm[0].u32 : 0;\n const uint32_t elementCount = isStructured ? ins.imm[1].u32 : ins.imm[0].u32;\n \n DxbcRegisterInfo varInfo;\n varInfo.type.ctype = DxbcScalarType::Uint32;\n varInfo.type.ccount = 1;\n varInfo.type.alength = isStructured\n ? elementCount * elementStride / 4\n : elementCount / 4;\n varInfo.sclass = spv::StorageClassWorkgroup;\n \n m_gRegs[regId].type = isStructured\n ? DxbcResourceType::Structured\n : DxbcResourceType::Raw;\n m_gRegs[regId].elementStride = elementStride;\n m_gRegs[regId].elementCount = elementCount;\n m_gRegs[regId].varId = emitNewVariable(varInfo);\n \n m_module.setDebugName(m_gRegs[regId].varId,\n str::format(\"g\", regId).c_str());\n }\n void emitDclGsInputPrimitive(\n const DxbcShaderInstruction& ins) {\n // The input primitive type is stored within in the\n // control bits of the opcode token. In SPIR-V, we\n // have to define an execution mode.\n const auto mode = [&] {\n switch (ins.controls.primitive()) {\n case DxbcPrimitive::Point: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_POINT_LIST, spv::ExecutionModeInputPoints);\n case DxbcPrimitive::Line: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_LINE_LIST, spv::ExecutionModeInputLines);\n case DxbcPrimitive::Triangle: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, spv::ExecutionModeTriangles);\n case DxbcPrimitive::LineAdj: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY, spv::ExecutionModeInputLinesAdjacency);\n case DxbcPrimitive::TriangleAdj: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY, spv::ExecutionModeInputTrianglesAdjacency);\n default: throw DxvkError(\"DxbcCompiler: Unsupported primitive type\");\n }\n }();\n\n m_gs.inputPrimitive = ins.controls.primitive();\n m_module.setExecutionMode(m_entryPointId, mode.second);\n m_inputTopology = mode.first;\n \n emitDclInputArray(primitiveVertexCount(m_gs.inputPrimitive));\n }\n void emitDclGsOutputTopology(\n const DxbcShaderInstruction& ins) {\n // The input primitive topology is stored within in the\n // control bits of the opcode token. In SPIR-V, we have\n // to define an execution mode.\n auto mode = [&] {\n switch (ins.controls.primitiveTopology()) {\n case DxbcPrimitiveTopology::PointList: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_POINT_LIST, spv::ExecutionModeOutputPoints);\n case DxbcPrimitiveTopology::LineStrip: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_LINE_LIST, spv::ExecutionModeOutputLineStrip);\n case DxbcPrimitiveTopology::TriangleStrip: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, spv::ExecutionModeOutputTriangleStrip);\n default: throw DxvkError(\"DxbcCompiler: Unsupported primitive topology\");\n }\n }();\n \n m_outputTopology = mode.first;\n m_module.setExecutionMode(m_entryPointId, mode.second);\n }\n void emitDclMaxOutputVertexCount(\n const DxbcShaderInstruction& ins) {\n // dcl_max_output_vertex_count has one operand:\n // (imm0) The maximum number of vertices\n m_gs.outputVertexCount = ins.imm[0].u32;\n \n m_module.setOutputVertices(m_entryPointId, m_gs.outputVertexCount);\n }\n void emitDclInputControlPointCount(\n const DxbcShaderInstruction& ins) {\n // dcl_input_control_points has the control point\n // count embedded within the opcode token.\n if (m_programInfo.type() == DxbcProgramType::HullShader) {\n m_hs.vertexCountIn = ins.controls.controlPointCount();\n \n emitDclInputArray(m_hs.vertexCountIn); \n } else {\n m_ds.vertexCountIn = ins.controls.controlPointCount();\n \n m_ds.inputPerPatch = emitTessInterfacePerPatch (spv::StorageClassInput);\n m_ds.inputPerVertex = emitTessInterfacePerVertex(spv::StorageClassInput, m_ds.vertexCountIn);\n }\n }\n void emitDclOutputControlPointCount(\n const DxbcShaderInstruction& ins) {\n // dcl_output_control_points has the control point\n // count embedded within the opcode token.\n m_hs.vertexCountOut = ins.controls.controlPointCount();\n \n m_hs.outputPerPatch = emitTessInterfacePerPatch(spv::StorageClassPrivate);\n m_hs.outputPerVertex = emitTessInterfacePerVertex(spv::StorageClassOutput, m_hs.vertexCountOut);\n \n m_module.setOutputVertices(m_entryPointId, m_hs.vertexCountOut);\n }\n void emitDclHsMaxTessFactor(\n const DxbcShaderInstruction& ins) {\n m_hs.maxTessFactor = ins.imm[0].f32;\n }\n void emitDclTessDomain(\n const DxbcShaderInstruction& ins) {\n auto mode = [&] {\n switch (ins.controls.tessDomain()) {\n case DxbcTessDomain::Isolines: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_LINE_LIST, spv::ExecutionModeIsolines);\n case DxbcTessDomain::Triangles: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, spv::ExecutionModeTriangles);\n case DxbcTessDomain::Quads: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, spv::ExecutionModeQuads);\n default: throw DxvkError(\"Dxbc: Invalid tess domain\");\n }\n }();\n \n m_outputTopology = mode.first;\n m_module.setExecutionMode(m_entryPointId, mode.second);\n }\n void emitDclTessPartitioning(\n const DxbcShaderInstruction& ins) {\n const spv::ExecutionMode executionMode = [&] {\n switch (ins.controls.tessPartitioning()) {\n case DxbcTessPartitioning::Pow2:\n case DxbcTessPartitioning::Integer: return spv::ExecutionModeSpacingEqual;\n case DxbcTessPartitioning::FractOdd: return spv::ExecutionModeSpacingFractionalOdd;\n case DxbcTessPartitioning::FractEven: return spv::ExecutionModeSpacingFractionalEven;\n default: throw DxvkError(\"Dxbc: Invalid tess partitioning\");\n }\n }();\n \n m_module.setExecutionMode(m_entryPointId, executionMode);\n }\n void emitDclTessOutputPrimitive(\n const DxbcShaderInstruction& ins) {\n switch (ins.controls.tessOutputPrimitive()) {\n case DxbcTessOutputPrimitive::Point:\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModePointMode);\n break;\n \n case DxbcTessOutputPrimitive::Line:\n break;\n \n case DxbcTessOutputPrimitive::TriangleCw:\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeVertexOrderCw);\n break;\n \n case DxbcTessOutputPrimitive::TriangleCcw:\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeVertexOrderCcw);\n break;\n \n default:\n throw DxvkError(\"Dxbc: Invalid tess output primitive\");\n }\n }\n void emitDclThreadGroup(\n const DxbcShaderInstruction& ins) {\n // dcl_thread_group has three operands:\n // (imm0) Number of threads in X dimension\n // (imm1) Number of threads in Y dimension\n // (imm2) Number of threads in Z dimension\n m_cs.workgroupSizeX = ins.imm[0].u32;\n m_cs.workgroupSizeY = ins.imm[1].u32;\n m_cs.workgroupSizeZ = ins.imm[2].u32;\n\n m_module.setLocalSize(m_entryPointId,\n ins.imm[0].u32, ins.imm[1].u32, ins.imm[2].u32);\n }\n void emitDclGsInstanceCount(\n const DxbcShaderInstruction& ins) {\n // dcl_gs_instance_count has one operand:\n // (imm0) Number of geometry shader invocations\n m_module.setInvocations(m_entryPointId, ins.imm[0].u32);\n m_gs.invocationCount = ins.imm[0].u32;\n }\n uint32_t emitDclUavCounter(\n uint32_t regId) {\n // Declare a structure type which holds the UAV counter\n if (m_uavCtrStructType == 0) {\n const uint32_t t_u32 = m_module.defIntType(32, 0);\n const uint32_t t_struct = m_module.defStructTypeUnique(1, &t_u32);\n \n m_module.decorate(t_struct, spv::DecorationBlock);\n m_module.memberDecorateOffset(t_struct, 0, 0);\n \n m_module.setDebugName (t_struct, \"uav_meta\");\n m_module.setDebugMemberName(t_struct, 0, \"ctr\");\n \n m_uavCtrStructType = t_struct;\n m_uavCtrPointerType = m_module.defPointerType(\n t_struct, spv::StorageClassStorageBuffer);\n }\n \n // Declare the buffer variable\n const uint32_t varId = m_module.newVar(\n m_uavCtrPointerType, spv::StorageClassStorageBuffer);\n \n m_module.setDebugName(varId,\n str::format(\"u\", regId, \"_meta\").c_str());\n \n uint32_t bindingId = computeUavCounterBinding(\n m_programInfo.type(), regId);\n \n m_module.decorateDescriptorSet(varId, 0);\n m_module.decorateBinding(varId, bindingId);\n \n // Declare the storage buffer binding\n DxvkBindingInfo binding = { VK_DESCRIPTOR_TYPE_STORAGE_BUFFER };\n binding.resourceBinding = bindingId;\n binding.viewType = VK_IMAGE_VIEW_TYPE_MAX_ENUM;\n binding.access = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;\n m_bindings.push_back(binding);\n\n return varId;\n }\n void emitDclImmediateConstantBuffer(\n const DxbcShaderInstruction& ins) {\n if (m_icbArray)\n throw DxvkError(\"DxbcCompiler: Immediate constant buffer already declared\");\n \n if ((ins.customDataSize & 0x3) != 0)\n throw DxvkError(\"DxbcCompiler: Immediate constant buffer size not a multiple of four DWORDs\");\n\n // A lot of the time we'll be dealing with a scalar or vec2\n // array here, there's no reason to emit all those zeroes.\n uint32_t componentCount = 1u;\n\n for (uint32_t i = 0; i < ins.customDataSize; i += 4u) {\n for (uint32_t c = componentCount; c < 4u; c++) {\n if (ins.customData[i + c])\n componentCount = c + 1u;\n }\n\n if (componentCount == 4u)\n break;\n }\n\n uint32_t vectorCount = (ins.customDataSize / 4u);\n uint32_t dwordCount = vectorCount * componentCount;\n\n if (dwordCount <= Icb_MaxBakedDwords) {\n this->emitDclImmediateConstantBufferBaked(\n ins.customDataSize, ins.customData, componentCount);\n } else {\n this->emitDclImmediateConstantBufferUbo(\n ins.customDataSize, ins.customData, componentCount);\n }\n }\n void emitDclImmediateConstantBufferBaked(\n uint32_t dwordCount,\n const uint32_t* dwordArray,\n uint32_t componentCount) {\n // Declare individual vector constants as 4x32-bit vectors\n small_vector vectorIds;\n \n DxbcVectorType vecType;\n vecType.ctype = DxbcScalarType::Uint32;\n vecType.ccount = componentCount;\n \n uint32_t vectorTypeId = getVectorTypeId(vecType);\n \n for (uint32_t i = 0; i < dwordCount; i += 4u) {\n std::array scalarIds = { };\n\n for (uint32_t c = 0; c < componentCount; c++)\n scalarIds[c] = m_module.constu32(dwordArray[i + c]);\n\n uint32_t id = scalarIds[0];\n\n if (componentCount > 1u)\n id = m_module.constComposite(vectorTypeId, componentCount, scalarIds.data());\n\n vectorIds.push_back(id);\n }\n\n // Pad array with one entry of zeroes so that we can\n // handle out-of-bounds accesses more conveniently.\n vectorIds.push_back(emitBuildZeroVector(vecType).id);\n\n // Declare the array that contains all the vectors\n DxbcArrayType arrInfo;\n arrInfo.ctype = DxbcScalarType::Uint32;\n arrInfo.ccount = componentCount;\n arrInfo.alength = vectorIds.size();\n\n uint32_t arrayTypeId = getArrayTypeId(arrInfo);\n uint32_t arrayId = m_module.constComposite(\n arrayTypeId, vectorIds.size(), vectorIds.data());\n\n // Declare the variable that will hold the constant\n // data and initialize it with the constant array.\n uint32_t pointerTypeId = m_module.defPointerType(\n arrayTypeId, spv::StorageClassPrivate);\n\n m_icbArray = m_module.newVarInit(\n pointerTypeId, spv::StorageClassPrivate,\n arrayId);\n\n m_module.setDebugName(m_icbArray, \"icb\");\n m_module.decorate(m_icbArray, spv::DecorationNonWritable);\n\n m_icbComponents = componentCount;\n m_icbSize = dwordCount / 4u;\n }\n void emitDclImmediateConstantBufferUbo(\n uint32_t dwordCount,\n const uint32_t* dwordArray,\n uint32_t componentCount) {\n uint32_t vectorCount = dwordCount / 4u;\n\n // Tightly pack vec2 or scalar arrays if possible. Don't bother with\n // vec3 since we'd rather have properly vectorized loads in that case.\n if (m_moduleInfo.options.supportsTightIcbPacking && componentCount <= 2u)\n m_icbComponents = componentCount;\n else\n m_icbComponents = 4u;\n\n // Immediate constant buffer can be read out of bounds, declare\n // it with the maximum possible size and rely on robustness.\n this->emitDclConstantBufferVar(Icb_BindingSlotId, 4096u, m_icbComponents, \"icb\");\n\n m_icbData.reserve(vectorCount * componentCount);\n\n for (uint32_t i = 0; i < dwordCount; i += 4u) {\n for (uint32_t c = 0; c < m_icbComponents; c++)\n m_icbData.push_back(dwordArray[i + c]);\n }\n\n m_icbSize = vectorCount;\n }\n void emitCustomData(\n const DxbcShaderInstruction& ins) {\n switch (ins.customDataType) {\n case DxbcCustomDataClass::ImmConstBuf:\n return emitDclImmediateConstantBuffer(ins);\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unsupported custom data block: \",\n ins.customDataType));\n }\n }\n void emitVectorAlu(\n const DxbcShaderInstruction& ins) {\n std::array src;\n \n for (uint32_t i = 0; i < ins.srcCount; i++)\n src.at(i) = emitRegisterLoad(ins.src[i], ins.dst[0].mask);\n \n DxbcRegisterValue dst;\n dst.type.ctype = ins.dst[0].dataType;\n dst.type.ccount = ins.dst[0].mask.popCount();\n\n if (isDoubleType(ins.dst[0].dataType))\n dst.type.ccount /= 2;\n \n const uint32_t typeId = getVectorTypeId(dst.type);\n \n switch (ins.op) {\n /////////////////////\n // Move instructions\n case DxbcOpcode::Mov:\n case DxbcOpcode::DMov:\n dst.id = src.at(0).id;\n break;\n \n /////////////////////////////////////\n // ALU operations on float32 numbers\n case DxbcOpcode::Add:\n case DxbcOpcode::DAdd:\n dst.id = m_module.opFAdd(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Div:\n case DxbcOpcode::DDiv:\n dst.id = m_module.opFDiv(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Exp:\n dst.id = m_module.opExp2(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Frc:\n dst.id = m_module.opFract(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Log:\n dst.id = m_module.opLog2(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Mad:\n case DxbcOpcode::DFma:\n if (ins.controls.precise()) {\n // FXC only emits precise mad if the shader explicitly uses\n // the HLSL mad()/fma() intrinsics, let's preserve that.\n dst.id = m_module.opFFma(typeId,\n src.at(0).id, src.at(1).id, src.at(2).id);\n } else {\n dst.id = m_module.opFMul(typeId, src.at(0).id, src.at(1).id);\n dst.id = m_module.opFAdd(typeId, dst.id, src.at(2).id);\n }\n break;\n \n case DxbcOpcode::Max:\n case DxbcOpcode::DMax:\n dst.id = m_module.opNMax(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Min:\n case DxbcOpcode::DMin:\n dst.id = m_module.opNMin(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Mul:\n case DxbcOpcode::DMul:\n dst.id = m_module.opFMul(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Rcp:\n dst.id = m_module.opFDiv(typeId,\n emitBuildConstVecf32(\n 1.0f, 1.0f, 1.0f, 1.0f,\n ins.dst[0].mask).id,\n src.at(0).id);\n break;\n \n case DxbcOpcode::DRcp:\n dst.id = m_module.opFDiv(typeId,\n emitBuildConstVecf64(1.0, 1.0,\n ins.dst[0].mask).id,\n src.at(0).id);\n break;\n \n case DxbcOpcode::RoundNe:\n dst.id = m_module.opRoundEven(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::RoundNi:\n dst.id = m_module.opFloor(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::RoundPi:\n dst.id = m_module.opCeil(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::RoundZ:\n dst.id = m_module.opTrunc(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Rsq:\n dst.id = m_module.opInverseSqrt(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Sqrt:\n dst.id = m_module.opSqrt(\n typeId, src.at(0).id);\n break;\n \n /////////////////////////////////////\n // ALU operations on signed integers\n case DxbcOpcode::IAdd:\n dst.id = m_module.opIAdd(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::IMad:\n case DxbcOpcode::UMad:\n dst.id = m_module.opIAdd(typeId,\n m_module.opIMul(typeId,\n src.at(0).id, src.at(1).id),\n src.at(2).id);\n break;\n \n case DxbcOpcode::IMax:\n dst.id = m_module.opSMax(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::IMin:\n dst.id = m_module.opSMin(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::INeg:\n dst.id = m_module.opSNegate(\n typeId, src.at(0).id);\n break;\n \n ///////////////////////////////////////\n // ALU operations on unsigned integers\n case DxbcOpcode::UMax:\n dst.id = m_module.opUMax(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::UMin:\n dst.id = m_module.opUMin(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n ///////////////////////////////////////\n // Bit operations on unsigned integers\n case DxbcOpcode::And:\n dst.id = m_module.opBitwiseAnd(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Not:\n dst.id = m_module.opNot(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Or:\n dst.id = m_module.opBitwiseOr(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Xor:\n dst.id = m_module.opBitwiseXor(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::CountBits:\n dst.id = m_module.opBitCount(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::BfRev:\n dst.id = m_module.opBitReverse(\n typeId, src.at(0).id);\n break;\n \n ///////////////////////////\n // Conversion instructions\n case DxbcOpcode::ItoF:\n dst.id = m_module.opConvertStoF(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::UtoF:\n dst.id = m_module.opConvertUtoF(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::FtoI:\n dst.id = m_module.opConvertFtoS(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::FtoU:\n dst.id = m_module.opConvertFtoU(\n typeId, src.at(0).id);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n if (ins.controls.precise() || m_precise)\n m_module.decorate(dst.id, spv::DecorationNoContraction);\n \n // Store computed value\n dst = emitDstOperandModifiers(dst, ins.modifiers);\n emitRegisterStore(ins.dst[0], dst);\n }\n void emitVectorCmov(\n const DxbcShaderInstruction& ins) {\n // movc and swapc have the following operands:\n // (dst0) The first destination register\n // (dst1) The second destination register (swapc only)\n // (src0) The condition vector\n // (src1) Vector to select from if the condition is not 0\n // (src2) Vector to select from if the condition is 0\n DxbcRegMask condMask = ins.dst[0].mask;\n\n if (ins.dst[0].dataType == DxbcScalarType::Float64) {\n condMask = DxbcRegMask(\n condMask[0] && condMask[1],\n condMask[2] && condMask[3],\n false, false);\n }\n \n const DxbcRegisterValue condition = emitRegisterLoad(ins.src[0], condMask);\n const DxbcRegisterValue selectTrue = emitRegisterLoad(ins.src[1], ins.dst[0].mask);\n const DxbcRegisterValue selectFalse = emitRegisterLoad(ins.src[2], ins.dst[0].mask);\n \n uint32_t componentCount = condMask.popCount();\n \n // We'll compare against a vector of zeroes to generate a\n // boolean vector, which in turn will be used by OpSelect\n uint32_t zeroType = m_module.defIntType(32, 0);\n uint32_t boolType = m_module.defBoolType();\n \n uint32_t zero = m_module.constu32(0);\n \n if (componentCount > 1) {\n zeroType = m_module.defVectorType(zeroType, componentCount);\n boolType = m_module.defVectorType(boolType, componentCount);\n \n const std::array zeroVec = { zero, zero, zero, zero };\n zero = m_module.constComposite(zeroType, componentCount, zeroVec.data());\n }\n \n // In case of swapc, the second destination operand receives\n // the output that a cmov instruction would normally get\n const uint32_t trueIndex = ins.op == DxbcOpcode::Swapc ? 1 : 0;\n \n for (uint32_t i = 0; i < ins.dstCount; i++) {\n DxbcRegisterValue result;\n result.type.ctype = ins.dst[i].dataType;\n result.type.ccount = componentCount;\n result.id = m_module.opSelect(\n getVectorTypeId(result.type),\n m_module.opINotEqual(boolType, condition.id, zero),\n i == trueIndex ? selectTrue.id : selectFalse.id,\n i != trueIndex ? selectTrue.id : selectFalse.id);\n \n result = emitDstOperandModifiers(result, ins.modifiers);\n emitRegisterStore(ins.dst[i], result);\n }\n }\n void emitVectorCmp(\n const DxbcShaderInstruction& ins) {\n // Compare instructions have three operands:\n // (dst0) The destination register\n // (src0) The first vector to compare\n // (src1) The second vector to compare\n uint32_t componentCount = ins.dst[0].mask.popCount();\n\n // For 64-bit operations, we'll return a 32-bit\n // vector, so we have to adjust the read mask\n DxbcRegMask srcMask = ins.dst[0].mask;\n\n if (isDoubleType(ins.src[0].dataType)) {\n srcMask = DxbcRegMask(\n componentCount > 0, componentCount > 0,\n componentCount > 1, componentCount > 1);\n }\n\n const std::array src = {\n emitRegisterLoad(ins.src[0], srcMask),\n emitRegisterLoad(ins.src[1], srcMask),\n };\n \n // Condition, which is a boolean vector used\n // to select between the ~0u and 0u vectors.\n uint32_t condition = 0;\n uint32_t conditionType = m_module.defBoolType();\n \n if (componentCount > 1)\n conditionType = m_module.defVectorType(conditionType, componentCount);\n \n bool invert = false;\n\n switch (ins.op) {\n case DxbcOpcode::Ne:\n case DxbcOpcode::DNe:\n invert = true;\n [[fallthrough]];\n\n case DxbcOpcode::Eq:\n case DxbcOpcode::DEq:\n condition = m_module.opFOrdEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Ge:\n case DxbcOpcode::DGe:\n condition = m_module.opFOrdGreaterThanEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Lt:\n case DxbcOpcode::DLt:\n condition = m_module.opFOrdLessThan(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::IEq:\n condition = m_module.opIEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::IGe:\n condition = m_module.opSGreaterThanEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::ILt:\n condition = m_module.opSLessThan(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::INe:\n condition = m_module.opINotEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::UGe:\n condition = m_module.opUGreaterThanEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::ULt:\n condition = m_module.opULessThan(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n // Generate constant vectors for selection\n uint32_t sFalse = m_module.constu32( 0u);\n uint32_t sTrue = m_module.constu32(~0u);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = componentCount;\n \n const uint32_t typeId = getVectorTypeId(result.type);\n \n if (componentCount > 1) {\n const std::array vFalse = { sFalse, sFalse, sFalse, sFalse };\n const std::array vTrue = { sTrue, sTrue, sTrue, sTrue };\n \n sFalse = m_module.constComposite(typeId, componentCount, vFalse.data());\n sTrue = m_module.constComposite(typeId, componentCount, vTrue .data());\n }\n \n if (invert)\n std::swap(sFalse, sTrue);\n\n // Perform component-wise mask selection\n // based on the condition evaluated above.\n result.id = m_module.opSelect(\n typeId, condition, sTrue, sFalse);\n \n emitRegisterStore(ins.dst[0], result);\n }\n void emitVectorDeriv(\n const DxbcShaderInstruction& ins) {\n // Derivative instructions have two operands:\n // (dst0) Destination register for the derivative\n // (src0) The operand to compute the derivative of\n DxbcRegisterValue value = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n const uint32_t typeId = getVectorTypeId(value.type);\n \n switch (ins.op) {\n case DxbcOpcode::DerivRtx:\n value.id = m_module.opDpdx(typeId, value.id);\n break;\n \n case DxbcOpcode::DerivRty:\n value.id = m_module.opDpdy(typeId, value.id);\n break;\n \n case DxbcOpcode::DerivRtxCoarse:\n value.id = m_module.opDpdxCoarse(typeId, value.id);\n break;\n \n case DxbcOpcode::DerivRtyCoarse:\n value.id = m_module.opDpdyCoarse(typeId, value.id);\n break;\n \n case DxbcOpcode::DerivRtxFine:\n value.id = m_module.opDpdxFine(typeId, value.id);\n break;\n \n case DxbcOpcode::DerivRtyFine:\n value.id = m_module.opDpdyFine(typeId, value.id);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n value = emitDstOperandModifiers(value, ins.modifiers);\n emitRegisterStore(ins.dst[0], value);\n }\n void emitVectorDot(\n const DxbcShaderInstruction& ins) {\n const DxbcRegMask srcMask(true,\n ins.op >= DxbcOpcode::Dp2,\n ins.op >= DxbcOpcode::Dp3,\n ins.op >= DxbcOpcode::Dp4);\n \n const std::array src = {\n emitRegisterLoad(ins.src[0], srcMask),\n emitRegisterLoad(ins.src[1], srcMask),\n };\n \n DxbcRegisterValue dst;\n dst.type.ctype = ins.dst[0].dataType;\n dst.type.ccount = 1;\n dst.id = 0;\n\n uint32_t componentType = getVectorTypeId(dst.type);\n uint32_t componentCount = srcMask.popCount();\n\n for (uint32_t i = 0; i < componentCount; i++) {\n if (dst.id) {\n dst.id = m_module.opFFma(componentType,\n m_module.opCompositeExtract(componentType, src.at(0).id, 1, &i),\n m_module.opCompositeExtract(componentType, src.at(1).id, 1, &i),\n dst.id);\n } else {\n dst.id = m_module.opFMul(componentType,\n m_module.opCompositeExtract(componentType, src.at(0).id, 1, &i),\n m_module.opCompositeExtract(componentType, src.at(1).id, 1, &i));\n }\n\n // Unconditionally mark as precise since the exact order of operation\n // matters for some games, even if the instruction itself is not marked\n // as precise.\n m_module.decorate(dst.id, spv::DecorationNoContraction);\n }\n\n dst = emitDstOperandModifiers(dst, ins.modifiers);\n emitRegisterStore(ins.dst[0], dst);\n }\n void emitVectorIdiv(\n const DxbcShaderInstruction& ins) {\n // udiv has four operands:\n // (dst0) Quotient destination register\n // (dst1) Remainder destination register\n // (src0) The first vector to compare\n // (src1) The second vector to compare\n if (ins.dst[0].type == DxbcOperandType::Null\n && ins.dst[1].type == DxbcOperandType::Null)\n return;\n \n // FIXME support this if applications require it\n if (ins.dst[0].type != DxbcOperandType::Null\n && ins.dst[1].type != DxbcOperandType::Null\n && ins.dst[0].mask != ins.dst[1].mask) {\n Logger::warn(\"DxbcCompiler: Idiv with different destination masks not supported\");\n return;\n }\n \n // Load source operands as integers with the\n // mask of one non-NULL destination operand\n const DxbcRegMask srcMask =\n ins.dst[0].type != DxbcOperandType::Null\n ? ins.dst[0].mask\n : ins.dst[1].mask;\n \n const std::array src = {\n emitRegisterLoad(ins.src[0], srcMask),\n emitRegisterLoad(ins.src[1], srcMask),\n };\n \n // Division by zero will return 0xffffffff for both results\n auto bvecId = getVectorTypeId({ DxbcScalarType::Bool, srcMask.popCount() });\n\n DxbcRegisterValue const0 = emitBuildConstVecu32( 0u, 0u, 0u, 0u, srcMask);\n DxbcRegisterValue constff = emitBuildConstVecu32(~0u, ~0u, ~0u, ~0u, srcMask);\n\n uint32_t cmpValue = m_module.opINotEqual(bvecId, src.at(1).id, const0.id);\n\n // Compute results only if the destination\n // operands are not NULL.\n if (ins.dst[0].type != DxbcOperandType::Null) {\n DxbcRegisterValue quotient;\n quotient.type.ctype = ins.dst[0].dataType;\n quotient.type.ccount = ins.dst[0].mask.popCount();\n \n quotient.id = m_module.opUDiv(\n getVectorTypeId(quotient.type),\n src.at(0).id, src.at(1).id);\n\n quotient.id = m_module.opSelect(\n getVectorTypeId(quotient.type),\n cmpValue, quotient.id, constff.id);\n \n quotient = emitDstOperandModifiers(quotient, ins.modifiers);\n emitRegisterStore(ins.dst[0], quotient);\n }\n \n if (ins.dst[1].type != DxbcOperandType::Null) {\n DxbcRegisterValue remainder;\n remainder.type.ctype = ins.dst[1].dataType;\n remainder.type.ccount = ins.dst[1].mask.popCount();\n \n remainder.id = m_module.opUMod(\n getVectorTypeId(remainder.type),\n src.at(0).id, src.at(1).id);\n\n remainder.id = m_module.opSelect(\n getVectorTypeId(remainder.type),\n cmpValue, remainder.id, constff.id);\n \n remainder = emitDstOperandModifiers(remainder, ins.modifiers);\n emitRegisterStore(ins.dst[1], remainder);\n }\n }\n void emitVectorImul(\n const DxbcShaderInstruction& ins) {\n // imul and umul have four operands:\n // (dst0) High destination register\n // (dst1) Low destination register\n // (src0) The first vector to compare\n // (src1) The second vector to compare\n if (ins.dst[0].type == DxbcOperandType::Null) {\n if (ins.dst[1].type == DxbcOperandType::Null)\n return;\n \n // If dst0 is NULL, this instruction behaves just\n // like any other three-operand ALU instruction\n const std::array src = {\n emitRegisterLoad(ins.src[0], ins.dst[1].mask),\n emitRegisterLoad(ins.src[1], ins.dst[1].mask),\n };\n \n DxbcRegisterValue result;\n result.type.ctype = ins.dst[1].dataType;\n result.type.ccount = ins.dst[1].mask.popCount();\n result.id = m_module.opIMul(\n getVectorTypeId(result.type),\n src.at(0).id, src.at(1).id);\n \n result = emitDstOperandModifiers(result, ins.modifiers);\n emitRegisterStore(ins.dst[1], result);\n } else {\n // TODO implement this\n Logger::warn(\"DxbcCompiler: Extended Imul not yet supported\");\n }\n }\n void emitVectorMsad(\n const DxbcShaderInstruction& ins) {\n // msad has four operands:\n // (dst0) Destination\n // (src0) Reference (packed uint8)\n // (src1) Source (packed uint8)\n // (src2) Accumulator\n DxbcRegisterValue refReg = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n DxbcRegisterValue srcReg = emitRegisterLoad(ins.src[1], ins.dst[0].mask);\n DxbcRegisterValue result = emitRegisterLoad(ins.src[2], ins.dst[0].mask);\n \n auto typeId = getVectorTypeId(result.type);\n auto bvecId = getVectorTypeId({ DxbcScalarType::Bool, result.type.ccount });\n\n for (uint32_t i = 0; i < 4; i++) {\n auto shift = m_module.constu32(8 * i);\n auto count = m_module.constu32(8);\n\n auto ref = m_module.opBitFieldUExtract(typeId, refReg.id, shift, count);\n auto src = m_module.opBitFieldUExtract(typeId, srcReg.id, shift, count);\n\n auto zero = emitBuildConstVecu32(0, 0, 0, 0, ins.dst[0].mask);\n auto mask = m_module.opINotEqual(bvecId, ref, zero.id);\n\n auto diff = m_module.opSAbs(typeId, m_module.opISub(typeId, ref, src));\n result.id = m_module.opSelect(typeId, mask, m_module.opIAdd(typeId, result.id, diff), result.id);\n }\n\n result = emitDstOperandModifiers(result, ins.modifiers);\n emitRegisterStore(ins.dst[0], result);\n }\n void emitVectorShift(\n const DxbcShaderInstruction& ins) {\n // Shift operations have three operands:\n // (dst0) The destination register\n // (src0) The register to shift\n // (src1) The shift amount (scalar)\n DxbcRegisterValue shiftReg = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n DxbcRegisterValue countReg = emitRegisterLoad(ins.src[1], ins.dst[0].mask);\n\n if (ins.src[1].type != DxbcOperandType::Imm32)\n countReg = emitRegisterMaskBits(countReg, 0x1F);\n \n if (countReg.type.ccount == 1)\n countReg = emitRegisterExtend(countReg, shiftReg.type.ccount);\n \n DxbcRegisterValue result;\n result.type.ctype = ins.dst[0].dataType;\n result.type.ccount = ins.dst[0].mask.popCount();\n \n switch (ins.op) {\n case DxbcOpcode::IShl:\n result.id = m_module.opShiftLeftLogical(\n getVectorTypeId(result.type),\n shiftReg.id, countReg.id);\n break;\n \n case DxbcOpcode::IShr:\n result.id = m_module.opShiftRightArithmetic(\n getVectorTypeId(result.type),\n shiftReg.id, countReg.id);\n break;\n \n case DxbcOpcode::UShr:\n result.id = m_module.opShiftRightLogical(\n getVectorTypeId(result.type),\n shiftReg.id, countReg.id);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n result = emitDstOperandModifiers(result, ins.modifiers);\n emitRegisterStore(ins.dst[0], result);\n }\n void emitVectorSinCos(\n const DxbcShaderInstruction& ins) {\n // sincos has three operands:\n // (dst0) Destination register for sin(x)\n // (dst1) Destination register for cos(x)\n // (src0) Source operand x\n \n // Load source operand as 32-bit float vector.\n const DxbcRegisterValue srcValue = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, true, true, true));\n\n uint32_t typeId = getScalarTypeId(srcValue.type.ctype);\n\n DxbcRegisterValue sinVector = { };\n sinVector.type.ctype = DxbcScalarType::Float32;\n\n DxbcRegisterValue cosVector = { };\n cosVector.type.ctype = DxbcScalarType::Float32;\n\n // Only compute sincos for enabled components\n std::array sinIds = { };\n std::array cosIds = { };\n\n for (uint32_t i = 0; i < 4; i++) {\n const uint32_t sinIndex = 0u;\n const uint32_t cosIndex = 1u;\n\n if (ins.dst[0].mask[i] || ins.dst[1].mask[i]) {\n uint32_t sincosId = m_module.opSinCos(m_module.opCompositeExtract(typeId, srcValue.id, 1u, &i), !m_moduleInfo.options.sincosEmulation);\n\n if (ins.dst[0].type != DxbcOperandType::Null && ins.dst[0].mask[i])\n sinIds[sinVector.type.ccount++] = m_module.opCompositeExtract(typeId, sincosId, 1u, &sinIndex);\n\n if (ins.dst[1].type != DxbcOperandType::Null && ins.dst[1].mask[i])\n cosIds[cosVector.type.ccount++] = m_module.opCompositeExtract(typeId, sincosId, 1u, &cosIndex);\n }\n }\n\n if (sinVector.type.ccount) {\n sinVector.id = sinVector.type.ccount > 1u\n ? m_module.opCompositeConstruct(getVectorTypeId(sinVector.type), sinVector.type.ccount, sinIds.data())\n : sinIds[0];\n\n emitRegisterStore(ins.dst[0], sinVector);\n }\n\n if (cosVector.type.ccount) {\n cosVector.id = cosVector.type.ccount > 1u\n ? m_module.opCompositeConstruct(getVectorTypeId(cosVector.type), cosVector.type.ccount, cosIds.data())\n : cosIds[0];\n\n emitRegisterStore(ins.dst[1], cosVector);\n }\n }\n void emitGeometryEmit(\n const DxbcShaderInstruction& ins) {\n // In xfb mode we might have multiple streams, so\n // we have to figure out which stream to write to\n uint32_t streamId = 0;\n uint32_t streamVar = 0;\n\n if (m_moduleInfo.xfb != nullptr) {\n streamId = ins.dstCount > 0 ? ins.dst[0].idx[0].offset : 0;\n streamVar = m_module.constu32(streamId);\n }\n\n // Checking the negation is easier for EmitThenCut/EmitThenCutStream\n bool doEmit = ins.op != DxbcOpcode::Cut && ins.op != DxbcOpcode::CutStream;\n bool doCut = ins.op != DxbcOpcode::Emit && ins.op != DxbcOpcode::EmitStream;\n\n if (doEmit) {\n if (m_gs.needsOutputSetup)\n emitOutputSetup();\n emitClipCullStore(DxbcSystemValue::ClipDistance, m_clipDistances);\n emitClipCullStore(DxbcSystemValue::CullDistance, m_cullDistances);\n emitXfbOutputSetup(streamId, false);\n m_module.opEmitVertex(streamVar);\n }\n\n if (doCut)\n m_module.opEndPrimitive(streamVar);\n }\n void emitAtomic(\n const DxbcShaderInstruction& ins) {\n // atomic_* operations have the following operands:\n // (dst0) Destination u# or g# register\n // (src0) Index into the texture or buffer\n // (src1) The source value for the operation\n // (src2) Second source operand (optional)\n // imm_atomic_* operations have the following operands:\n // (dst0) Register that receives the result\n // (dst1) Destination u# or g# register\n // (srcX) As above\n const DxbcBufferInfo bufferInfo = getBufferInfo(ins.dst[ins.dstCount - 1]);\n \n bool isImm = ins.dstCount == 2;\n bool isUav = ins.dst[ins.dstCount - 1].type == DxbcOperandType::UnorderedAccessView;\n bool isSsbo = bufferInfo.isSsbo;\n\n // Retrieve destination pointer for the atomic operation>\n const DxbcRegisterPointer pointer = emitGetAtomicPointer(\n ins.dst[ins.dstCount - 1], ins.src[0]);\n \n // Load source values\n std::array src;\n \n for (uint32_t i = 1; i < ins.srcCount; i++) {\n src[i - 1] = emitRegisterBitcast(\n emitRegisterLoad(ins.src[i], DxbcRegMask(true, false, false, false)),\n pointer.type.ctype);\n }\n \n // Define memory scope and semantics based on the operands\n uint32_t scope = 0;\n uint32_t semantics = 0;\n \n if (isUav) {\n scope = spv::ScopeQueueFamily;\n semantics = spv::MemorySemanticsAcquireReleaseMask;\n\n semantics |= isSsbo\n ? spv::MemorySemanticsUniformMemoryMask\n : spv::MemorySemanticsImageMemoryMask;\n } else {\n scope = spv::ScopeWorkgroup;\n semantics = spv::MemorySemanticsWorkgroupMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask;\n }\n\n const uint32_t scopeId = m_module.constu32(scope);\n const uint32_t semanticsId = m_module.constu32(semantics);\n \n // Perform the atomic operation on the given pointer\n DxbcRegisterValue value;\n value.type = pointer.type;\n value.id = 0;\n \n // The result type, which is a scalar integer\n const uint32_t typeId = getVectorTypeId(value.type);\n \n switch (ins.op) {\n case DxbcOpcode::AtomicCmpStore:\n case DxbcOpcode::ImmAtomicCmpExch:\n value.id = m_module.opAtomicCompareExchange(\n typeId, pointer.id, scopeId, semanticsId,\n m_module.constu32(spv::MemorySemanticsMaskNone),\n src[1].id, src[0].id);\n break;\n \n case DxbcOpcode::ImmAtomicExch:\n value.id = m_module.opAtomicExchange(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicIAdd:\n case DxbcOpcode::ImmAtomicIAdd:\n value.id = m_module.opAtomicIAdd(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicAnd:\n case DxbcOpcode::ImmAtomicAnd:\n value.id = m_module.opAtomicAnd(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicOr:\n case DxbcOpcode::ImmAtomicOr:\n value.id = m_module.opAtomicOr(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicXor:\n case DxbcOpcode::ImmAtomicXor:\n value.id = m_module.opAtomicXor(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicIMin:\n case DxbcOpcode::ImmAtomicIMin:\n value.id = m_module.opAtomicSMin(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicIMax:\n case DxbcOpcode::ImmAtomicIMax:\n value.id = m_module.opAtomicSMax(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicUMin:\n case DxbcOpcode::ImmAtomicUMin:\n value.id = m_module.opAtomicUMin(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicUMax:\n case DxbcOpcode::ImmAtomicUMax:\n value.id = m_module.opAtomicUMax(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n // Write back the result to the destination\n // register if this is an imm_atomic_* opcode.\n if (isImm)\n emitRegisterStore(ins.dst[0], value);\n }\n void emitAtomicCounter(\n const DxbcShaderInstruction& ins) {\n // imm_atomic_alloc and imm_atomic_consume have the following operands:\n // (dst0) The register that will hold the old counter value\n // (dst1) The UAV whose counter is going to be modified\n const uint32_t registerId = ins.dst[1].idx[0].offset;\n \n if (m_uavs.at(registerId).ctrId == 0)\n m_uavs.at(registerId).ctrId = emitDclUavCounter(registerId);\n \n // Get a pointer to the atomic counter in question\n DxbcRegisterInfo ptrType;\n ptrType.type.ctype = DxbcScalarType::Uint32;\n ptrType.type.ccount = 1;\n ptrType.type.alength = 0;\n ptrType.sclass = spv::StorageClassStorageBuffer;\n \n uint32_t zeroId = m_module.consti32(0);\n uint32_t ptrId = m_module.opAccessChain(\n getPointerTypeId(ptrType),\n m_uavs.at(registerId).ctrId,\n 1, &zeroId);\n \n // Define memory scope and semantics based on the operands\n uint32_t scope = spv::ScopeQueueFamily;\n uint32_t semantics = spv::MemorySemanticsUniformMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask;\n \n uint32_t scopeId = m_module.constu32(scope);\n uint32_t semanticsId = m_module.constu32(semantics);\n \n // Compute the result value\n DxbcRegisterValue value;\n value.type.ctype = DxbcScalarType::Uint32;\n value.type.ccount = 1;\n \n uint32_t typeId = getVectorTypeId(value.type);\n \n switch (ins.op) {\n case DxbcOpcode::ImmAtomicAlloc:\n value.id = m_module.opAtomicIAdd(typeId, ptrId,\n scopeId, semanticsId, m_module.constu32(1));\n break;\n \n case DxbcOpcode::ImmAtomicConsume:\n value.id = m_module.opAtomicISub(typeId, ptrId,\n scopeId, semanticsId, m_module.constu32(1));\n value.id = m_module.opISub(typeId, value.id,\n m_module.constu32(1));\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n\n // Store the result\n emitRegisterStore(ins.dst[0], value);\n }\n void emitBarrier(\n const DxbcShaderInstruction& ins) {\n // sync takes no operands. Instead, the synchronization\n // scope is defined by the operand control bits.\n const DxbcSyncFlags flags = ins.controls.syncFlags();\n \n uint32_t executionScope = spv::ScopeInvocation;\n uint32_t memoryScope = spv::ScopeInvocation;\n uint32_t memorySemantics = 0;\n \n if (flags.test(DxbcSyncFlag::ThreadsInGroup))\n executionScope = spv::ScopeWorkgroup;\n \n if (flags.test(DxbcSyncFlag::ThreadGroupSharedMemory)) {\n memoryScope = spv::ScopeWorkgroup;\n memorySemantics |= spv::MemorySemanticsWorkgroupMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask\n | spv::MemorySemanticsMakeAvailableMask\n | spv::MemorySemanticsMakeVisibleMask;\n }\n \n if (flags.test(DxbcSyncFlag::UavMemoryGroup)) {\n memoryScope = spv::ScopeWorkgroup;\n memorySemantics |= spv::MemorySemanticsImageMemoryMask\n | spv::MemorySemanticsUniformMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask\n | spv::MemorySemanticsMakeAvailableMask\n | spv::MemorySemanticsMakeVisibleMask;\n }\n \n if (flags.test(DxbcSyncFlag::UavMemoryGlobal)) {\n memoryScope = spv::ScopeQueueFamily;\n\n if (m_programInfo.type() == DxbcProgramType::ComputeShader && !m_hasGloballyCoherentUav)\n memoryScope = spv::ScopeWorkgroup;\n\n memorySemantics |= spv::MemorySemanticsImageMemoryMask\n | spv::MemorySemanticsUniformMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask\n | spv::MemorySemanticsMakeAvailableMask\n | spv::MemorySemanticsMakeVisibleMask;\n }\n \n if (executionScope != spv::ScopeInvocation) {\n m_module.opControlBarrier(\n m_module.constu32(executionScope),\n m_module.constu32(memoryScope),\n m_module.constu32(memorySemantics));\n } else if (memoryScope != spv::ScopeInvocation) {\n m_module.opMemoryBarrier(\n m_module.constu32(memoryScope),\n m_module.constu32(memorySemantics));\n } else {\n Logger::warn(\"DxbcCompiler: sync instruction has no effect\");\n }\n }\n void emitBitExtract(\n const DxbcShaderInstruction& ins) {\n // ibfe and ubfe take the following arguments:\n // (dst0) The destination register\n // (src0) Number of bits to extact\n // (src1) Offset of the bits to extract\n // (src2) Register to extract bits from\n const bool isSigned = ins.op == DxbcOpcode::IBfe;\n \n DxbcRegisterValue bitCnt = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n DxbcRegisterValue bitOfs = emitRegisterLoad(ins.src[1], ins.dst[0].mask);\n\n if (ins.src[0].type != DxbcOperandType::Imm32)\n bitCnt = emitRegisterMaskBits(bitCnt, 0x1F);\n \n if (ins.src[1].type != DxbcOperandType::Imm32)\n bitOfs = emitRegisterMaskBits(bitOfs, 0x1F);\n \n const DxbcRegisterValue src = emitRegisterLoad(ins.src[2], ins.dst[0].mask);\n \n const uint32_t componentCount = src.type.ccount;\n std::array componentIds = {{ 0, 0, 0, 0 }};\n \n for (uint32_t i = 0; i < componentCount; i++) {\n const DxbcRegisterValue currBitCnt = emitRegisterExtract(bitCnt, DxbcRegMask::select(i));\n const DxbcRegisterValue currBitOfs = emitRegisterExtract(bitOfs, DxbcRegMask::select(i));\n const DxbcRegisterValue currSrc = emitRegisterExtract(src, DxbcRegMask::select(i));\n \n const uint32_t typeId = getVectorTypeId(currSrc.type);\n \n componentIds[i] = isSigned\n ? m_module.opBitFieldSExtract(typeId, currSrc.id, currBitOfs.id, currBitCnt.id)\n : m_module.opBitFieldUExtract(typeId, currSrc.id, currBitOfs.id, currBitCnt.id);\n }\n \n DxbcRegisterValue result;\n result.type = src.type;\n result.id = componentCount > 1\n ? m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n componentCount, componentIds.data())\n : componentIds[0];\n emitRegisterStore(ins.dst[0], result);\n }\n void emitBitInsert(\n const DxbcShaderInstruction& ins) {\n // ibfe and ubfe take the following arguments:\n // (dst0) The destination register\n // (src0) Number of bits to extact\n // (src1) Offset of the bits to extract\n // (src2) Register to take bits from\n // (src3) Register to replace bits in\n DxbcRegisterValue bitCnt = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n DxbcRegisterValue bitOfs = emitRegisterLoad(ins.src[1], ins.dst[0].mask);\n \n if (ins.src[0].type != DxbcOperandType::Imm32)\n bitCnt = emitRegisterMaskBits(bitCnt, 0x1F);\n \n if (ins.src[1].type != DxbcOperandType::Imm32)\n bitOfs = emitRegisterMaskBits(bitOfs, 0x1F);\n \n const DxbcRegisterValue insert = emitRegisterLoad(ins.src[2], ins.dst[0].mask);\n const DxbcRegisterValue base = emitRegisterLoad(ins.src[3], ins.dst[0].mask);\n \n const uint32_t componentCount = base.type.ccount;\n std::array componentIds = {{ 0, 0, 0, 0 }};\n \n for (uint32_t i = 0; i < componentCount; i++) {\n const DxbcRegisterValue currBitCnt = emitRegisterExtract(bitCnt, DxbcRegMask::select(i));\n const DxbcRegisterValue currBitOfs = emitRegisterExtract(bitOfs, DxbcRegMask::select(i));\n const DxbcRegisterValue currInsert = emitRegisterExtract(insert, DxbcRegMask::select(i));\n const DxbcRegisterValue currBase = emitRegisterExtract(base, DxbcRegMask::select(i));\n \n componentIds[i] = m_module.opBitFieldInsert(\n getVectorTypeId(currBase.type),\n currBase.id, currInsert.id,\n currBitOfs.id, currBitCnt.id);\n }\n \n DxbcRegisterValue result;\n result.type = base.type;\n result.id = componentCount > 1\n ? m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n componentCount, componentIds.data())\n : componentIds[0];\n emitRegisterStore(ins.dst[0], result);\n }\n void emitBitScan(\n const DxbcShaderInstruction& ins) {\n // firstbit(lo|hi|shi) have two operands:\n // (dst0) The destination operant\n // (src0) Source operand to scan\n DxbcRegisterValue src = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n \n DxbcRegisterValue dst;\n dst.type.ctype = ins.dst[0].dataType;\n dst.type.ccount = ins.dst[0].mask.popCount();\n \n // Result type, should be an unsigned integer\n const uint32_t typeId = getVectorTypeId(dst.type);\n \n switch (ins.op) {\n case DxbcOpcode::FirstBitLo: dst.id = m_module.opFindILsb(typeId, src.id); break;\n case DxbcOpcode::FirstBitHi: dst.id = m_module.opFindUMsb(typeId, src.id); break;\n case DxbcOpcode::FirstBitShi: dst.id = m_module.opFindSMsb(typeId, src.id); break;\n default: Logger::warn(str::format(\"DxbcCompiler: Unhandled instruction: \", ins.op)); return;\n }\n \n // The 'Hi' variants are counted from the MSB in DXBC\n // rather than the LSB, so we have to invert the number\n if (ins.op == DxbcOpcode::FirstBitHi || ins.op == DxbcOpcode::FirstBitShi) {\n uint32_t boolTypeId = m_module.defBoolType();\n\n if (dst.type.ccount > 1)\n boolTypeId = m_module.defVectorType(boolTypeId, dst.type.ccount);\n\n DxbcRegisterValue const31 = emitBuildConstVecu32(31u, 31u, 31u, 31u, ins.dst[0].mask);\n DxbcRegisterValue constff = emitBuildConstVecu32(~0u, ~0u, ~0u, ~0u, ins.dst[0].mask);\n\n dst.id = m_module.opSelect(typeId,\n m_module.opINotEqual(boolTypeId, dst.id, constff.id),\n m_module.opISub(typeId, const31.id, dst.id),\n constff.id);\n }\n \n // No modifiers are supported\n emitRegisterStore(ins.dst[0], dst);\n }\n void emitBufferQuery(\n const DxbcShaderInstruction& ins) {\n // bufinfo takes two arguments\n // (dst0) The destination register\n // (src0) The buffer register to query\n const DxbcBufferInfo bufferInfo = getBufferInfo(ins.src[0]);\n bool isSsbo = bufferInfo.isSsbo;\n \n // We'll store this as a scalar unsigned integer\n DxbcRegisterValue result = isSsbo\n ? emitQueryBufferSize(ins.src[0])\n : emitQueryTexelBufferSize(ins.src[0]);\n \n uint32_t typeId = getVectorTypeId(result.type);\n \n // Adjust returned size if this is a raw or structured\n // buffer, as emitQueryTexelBufferSize only returns the\n // number of typed elements in the buffer.\n if (bufferInfo.type == DxbcResourceType::Raw) {\n result.id = m_module.opIMul(typeId,\n result.id, m_module.constu32(4));\n } else if (bufferInfo.type == DxbcResourceType::Structured) {\n result.id = m_module.opUDiv(typeId, result.id,\n m_module.constu32(bufferInfo.stride / 4));\n }\n\n // Store the result. The scalar will be extended to a\n // vector if the write mask consists of more than one\n // component, which is the desired behaviour.\n emitRegisterStore(ins.dst[0], result);\n }\n void emitBufferLoad(\n const DxbcShaderInstruction& ins) {\n // ld_raw takes three arguments:\n // (dst0) Destination register\n // (src0) Byte offset\n // (src1) Source register\n // ld_structured takes four arguments:\n // (dst0) Destination register\n // (src0) Structure index\n // (src1) Byte offset\n // (src2) Source register\n const bool isStructured = ins.op == DxbcOpcode::LdStructured\n || ins.op == DxbcOpcode::LdStructuredS;\n \n // Source register. The exact way we access\n // the data depends on the register type.\n const DxbcRegister& dstReg = ins.dst[0];\n const DxbcRegister& srcReg = isStructured ? ins.src[2] : ins.src[1];\n\n if (dstReg.type == DxbcOperandType::UnorderedAccessView)\n emitUavBarrier(uint64_t(1u) << srcReg.idx[0].offset, 0u);\n\n // Retrieve common info about the buffer\n const DxbcBufferInfo bufferInfo = getBufferInfo(srcReg);\n\n // Shared memory is the only type of buffer that\n // is not accessed through a texel buffer view\n bool isTgsm = srcReg.type == DxbcOperandType::ThreadGroupSharedMemory;\n bool isSsbo = bufferInfo.isSsbo;\n\n // Common types and IDs used while loading the data\n uint32_t bufferId = isTgsm || isSsbo ? 0 : m_module.opLoad(bufferInfo.typeId, bufferInfo.varId);\n\n uint32_t vectorTypeId = getVectorTypeId({ DxbcScalarType::Uint32, 4 });\n uint32_t scalarTypeId = getVectorTypeId({ DxbcScalarType::Uint32, 1 });\n\n // Since all data is represented as a sequence of 32-bit\n // integers, we have to load each component individually.\n std::array ccomps = { 0, 0, 0, 0 };\n std::array scomps = { 0, 0, 0, 0 };\n uint32_t scount = 0;\n\n // The sparse feedback ID will be non-zero for sparse\n // instructions on input. We need to reset it to 0.\n SpirvMemoryOperands memoryOperands;\n SpirvImageOperands imageOperands;\n imageOperands.sparse = ins.dstCount == 2;\n\n uint32_t coherence = bufferInfo.coherence;\n\n if (isTgsm && m_moduleInfo.options.forceVolatileTgsmAccess) {\n memoryOperands.flags |= spv::MemoryAccessVolatileMask;\n coherence = spv::ScopeWorkgroup;\n }\n\n if (coherence) {\n memoryOperands.flags |= spv::MemoryAccessNonPrivatePointerMask;\n\n if (coherence != spv::ScopeInvocation) {\n memoryOperands.flags |= spv::MemoryAccessMakePointerVisibleMask;\n memoryOperands.makeVisible = m_module.constu32(coherence);\n\n imageOperands.flags = spv::ImageOperandsNonPrivateTexelMask\n | spv::ImageOperandsMakeTexelVisibleMask;\n imageOperands.makeVisible = m_module.constu32(coherence);\n }\n }\n\n uint32_t sparseFeedbackId = 0;\n\n bool useRawAccessChains = m_hasRawAccessChains && isSsbo && !imageOperands.sparse;\n\n DxbcRegisterValue index = emitRegisterLoad(ins.src[0], DxbcRegMask(true, false, false, false));\n DxbcRegisterValue offset = index;\n\n if (isStructured)\n offset = emitRegisterLoad(ins.src[1], DxbcRegMask(true, false, false, false));\n\n DxbcRegisterValue elementIndex = { };\n\n uint32_t baseAlignment = sizeof(uint32_t);\n\n if (useRawAccessChains) {\n memoryOperands.flags |= spv::MemoryAccessAlignedMask;\n\n if (isStructured && ins.src[1].type == DxbcOperandType::Imm32) {\n baseAlignment = bufferInfo.stride | ins.src[1].imm.u32_1;\n baseAlignment = baseAlignment & -baseAlignment;\n baseAlignment = std::min(baseAlignment, uint32_t(m_moduleInfo.options.minSsboAlignment));\n }\n } else {\n elementIndex = isStructured\n ? emitCalcBufferIndexStructured(index, offset, bufferInfo.stride)\n : emitCalcBufferIndexRaw(offset);\n }\n\n uint32_t readMask = 0u;\n\n for (uint32_t i = 0; i < 4; i++) {\n if (dstReg.mask[i])\n readMask |= 1u << srcReg.swizzle[i];\n }\n\n while (readMask) {\n uint32_t sindex = bit::tzcnt(readMask);\n uint32_t scount = bit::tzcnt(~(readMask >> sindex));\n uint32_t zero = 0;\n\n if (useRawAccessChains) {\n uint32_t alignment = baseAlignment;\n uint32_t offsetId = offset.id;\n\n if (sindex) {\n offsetId = m_module.opIAdd(scalarTypeId,\n offsetId, m_module.constu32(sizeof(uint32_t) * sindex));\n alignment |= sizeof(uint32_t) * sindex;\n }\n\n DxbcRegisterInfo storeInfo;\n storeInfo.type.ctype = DxbcScalarType::Uint32;\n storeInfo.type.ccount = scount;\n storeInfo.type.alength = 0;\n storeInfo.sclass = spv::StorageClassStorageBuffer;\n\n uint32_t loadTypeId = getArrayTypeId(storeInfo.type);\n uint32_t ptrTypeId = getPointerTypeId(storeInfo);\n\n uint32_t accessChain = isStructured\n ? m_module.opRawAccessChain(ptrTypeId, bufferInfo.varId,\n m_module.constu32(bufferInfo.stride), index.id, offsetId,\n spv::RawAccessChainOperandsRobustnessPerElementNVMask)\n : m_module.opRawAccessChain(ptrTypeId, bufferInfo.varId,\n m_module.constu32(0), m_module.constu32(0), offsetId,\n spv::RawAccessChainOperandsRobustnessPerComponentNVMask);\n\n memoryOperands.alignment = alignment & -alignment;\n\n uint32_t vectorId = m_module.opLoad(loadTypeId, accessChain, memoryOperands);\n\n for (uint32_t i = 0; i < scount; i++) {\n ccomps[sindex + i] = vectorId;\n\n if (scount > 1) {\n ccomps[sindex + i] = m_module.opCompositeExtract(\n scalarTypeId, vectorId, 1, &i);\n }\n }\n\n readMask &= ~(((1u << scount) - 1u) << sindex);\n } else {\n uint32_t elementIndexAdjusted = m_module.opIAdd(\n getVectorTypeId(elementIndex.type), elementIndex.id,\n m_module.consti32(sindex));\n\n if (isTgsm) {\n ccomps[sindex] = m_module.opLoad(scalarTypeId,\n m_module.opAccessChain(bufferInfo.typeId,\n bufferInfo.varId, 1, &elementIndexAdjusted),\n memoryOperands);\n } else if (isSsbo) {\n uint32_t indices[2] = { m_module.constu32(0), elementIndexAdjusted };\n ccomps[sindex] = m_module.opLoad(scalarTypeId,\n m_module.opAccessChain(bufferInfo.typeId,\n bufferInfo.varId, 2, indices),\n memoryOperands);\n } else {\n uint32_t resultTypeId = vectorTypeId;\n uint32_t resultId = 0;\n\n if (imageOperands.sparse)\n resultTypeId = getSparseResultTypeId(vectorTypeId);\n\n if (srcReg.type == DxbcOperandType::Resource) {\n resultId = m_module.opImageFetch(resultTypeId,\n bufferId, elementIndexAdjusted, imageOperands);\n } else if (srcReg.type == DxbcOperandType::UnorderedAccessView) {\n resultId = m_module.opImageRead(resultTypeId,\n bufferId, elementIndexAdjusted, imageOperands);\n } else {\n throw DxvkError(\"DxbcCompiler: Invalid operand type for strucured/raw load\");\n }\n\n // Only read sparse feedback once. This may be somewhat inaccurate\n // for reads that straddle pages, but we can't easily emulate this.\n if (imageOperands.sparse) {\n imageOperands.sparse = false;\n sparseFeedbackId = resultId;\n\n resultId = emitExtractSparseTexel(vectorTypeId, resultId);\n }\n\n ccomps[sindex] = m_module.opCompositeExtract(scalarTypeId, resultId, 1, &zero);\n }\n\n readMask &= readMask - 1;\n }\n }\n\n for (uint32_t i = 0; i < 4; i++) {\n uint32_t sindex = srcReg.swizzle[i];\n\n if (dstReg.mask[i])\n scomps[scount++] = ccomps[sindex];\n }\n\n DxbcRegisterValue result = { };\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = scount;\n result.id = scomps[0];\n\n if (scount > 1) {\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n scount, scomps.data());\n }\n\n emitRegisterStore(dstReg, result);\n\n if (sparseFeedbackId)\n emitStoreSparseFeedback(ins.dst[1], sparseFeedbackId);\n }\n void emitBufferStore(\n const DxbcShaderInstruction& ins) {\n // store_raw takes three arguments:\n // (dst0) Destination register\n // (src0) Byte offset\n // (src1) Source register\n // store_structured takes four arguments:\n // (dst0) Destination register\n // (src0) Structure index\n // (src1) Byte offset\n // (src2) Source register\n const bool isStructured = ins.op == DxbcOpcode::StoreStructured;\n \n // Source register. The exact way we access\n // the data depends on the register type.\n const DxbcRegister& dstReg = ins.dst[0];\n const DxbcRegister& srcReg = isStructured ? ins.src[2] : ins.src[1];\n\n if (dstReg.type == DxbcOperandType::UnorderedAccessView)\n emitUavBarrier(0u, uint64_t(1u) << dstReg.idx[0].offset);\n\n DxbcRegisterValue value = emitRegisterLoad(srcReg, dstReg.mask);\n value = emitRegisterBitcast(value, DxbcScalarType::Uint32);\n\n // Retrieve common info about the buffer\n const DxbcBufferInfo bufferInfo = getBufferInfo(dstReg);\n\n // Thread Group Shared Memory is not accessed through a texel buffer view\n bool isTgsm = dstReg.type == DxbcOperandType::ThreadGroupSharedMemory;\n bool isSsbo = bufferInfo.isSsbo;\n\n uint32_t bufferId = isTgsm || isSsbo ? 0 : m_module.opLoad(bufferInfo.typeId, bufferInfo.varId);\n\n uint32_t scalarTypeId = getVectorTypeId({ DxbcScalarType::Uint32, 1 });\n uint32_t vectorTypeId = getVectorTypeId({ DxbcScalarType::Uint32, 4 });\n\n // Set memory operands according to resource properties\n SpirvMemoryOperands memoryOperands;\n SpirvImageOperands imageOperands;\n\n uint32_t coherence = bufferInfo.coherence;\n\n if (isTgsm && m_moduleInfo.options.forceVolatileTgsmAccess) {\n memoryOperands.flags |= spv::MemoryAccessVolatileMask;\n coherence = spv::ScopeWorkgroup;\n }\n\n if (coherence) {\n memoryOperands.flags |= spv::MemoryAccessNonPrivatePointerMask;\n\n if (coherence != spv::ScopeInvocation) {\n memoryOperands.flags |= spv::MemoryAccessMakePointerAvailableMask;\n memoryOperands.makeAvailable = m_module.constu32(coherence);\n\n imageOperands.flags = spv::ImageOperandsNonPrivateTexelMask\n | spv::ImageOperandsMakeTexelAvailableMask;\n imageOperands.makeAvailable = m_module.constu32(coherence);\n }\n }\n\n // Compute flat element index as necessary\n bool useRawAccessChains = isSsbo && m_hasRawAccessChains;\n\n DxbcRegisterValue index = emitRegisterLoad(ins.src[0], DxbcRegMask(true, false, false, false));\n DxbcRegisterValue offset = index;\n\n if (isStructured)\n offset = emitRegisterLoad(ins.src[1], DxbcRegMask(true, false, false, false));\n\n DxbcRegisterValue elementIndex = { };\n\n uint32_t baseAlignment = sizeof(uint32_t);\n\n if (useRawAccessChains) {\n memoryOperands.flags |= spv::MemoryAccessAlignedMask;\n\n if (isStructured && ins.src[1].type == DxbcOperandType::Imm32) {\n baseAlignment = bufferInfo.stride | ins.src[1].imm.u32_1;\n baseAlignment = baseAlignment & -baseAlignment;\n baseAlignment = std::min(baseAlignment, uint32_t(m_moduleInfo.options.minSsboAlignment));\n }\n } else {\n elementIndex = isStructured\n ? emitCalcBufferIndexStructured(index, offset, bufferInfo.stride)\n : emitCalcBufferIndexRaw(offset);\n }\n\n uint32_t writeMask = dstReg.mask.raw();\n\n while (writeMask) {\n uint32_t sindex = bit::tzcnt(writeMask);\n uint32_t scount = bit::tzcnt(~(writeMask >> sindex));\n\n if (useRawAccessChains) {\n uint32_t alignment = baseAlignment;\n uint32_t offsetId = offset.id;\n\n if (sindex) {\n offsetId = m_module.opIAdd(scalarTypeId,\n offsetId, m_module.constu32(sizeof(uint32_t) * sindex));\n alignment = alignment | (sizeof(uint32_t) * sindex);\n }\n\n DxbcRegisterInfo storeInfo;\n storeInfo.type.ctype = DxbcScalarType::Uint32;\n storeInfo.type.ccount = scount;\n storeInfo.type.alength = 0;\n storeInfo.sclass = spv::StorageClassStorageBuffer;\n\n uint32_t storeTypeId = getArrayTypeId(storeInfo.type);\n uint32_t ptrTypeId = getPointerTypeId(storeInfo);\n\n uint32_t accessChain = isStructured\n ? m_module.opRawAccessChain(ptrTypeId, bufferInfo.varId,\n m_module.constu32(bufferInfo.stride), index.id, offsetId,\n spv::RawAccessChainOperandsRobustnessPerElementNVMask)\n : m_module.opRawAccessChain(ptrTypeId, bufferInfo.varId,\n m_module.constu32(0), m_module.constu32(0), offsetId,\n spv::RawAccessChainOperandsRobustnessPerComponentNVMask);\n\n uint32_t valueId = value.id;\n\n if (scount < value.type.ccount) {\n if (scount == 1) {\n valueId = m_module.opCompositeExtract(storeTypeId, value.id, 1, &sindex);\n } else {\n std::array indices = { sindex, sindex + 1u, sindex + 2u, sindex + 3u };\n valueId = m_module.opVectorShuffle(storeTypeId, value.id, value.id, scount, indices.data());\n }\n }\n\n memoryOperands.alignment = alignment & -alignment;\n m_module.opStore(accessChain, valueId, memoryOperands);\n\n writeMask &= ~(((1u << scount) - 1u) << sindex);\n } else {\n uint32_t srcComponentId = value.type.ccount > 1\n ? m_module.opCompositeExtract(scalarTypeId,\n value.id, 1, &sindex)\n : value.id;\n\n uint32_t elementIndexAdjusted = sindex != 0\n ? m_module.opIAdd(getVectorTypeId(elementIndex.type),\n elementIndex.id, m_module.consti32(sindex))\n : elementIndex.id;\n\n if (isTgsm) {\n m_module.opStore(\n m_module.opAccessChain(bufferInfo.typeId,\n bufferInfo.varId, 1, &elementIndexAdjusted),\n srcComponentId, memoryOperands);\n } else if (isSsbo) {\n uint32_t indices[2] = { m_module.constu32(0), elementIndexAdjusted };\n m_module.opStore(\n m_module.opAccessChain(bufferInfo.typeId,\n bufferInfo.varId, 2, indices),\n srcComponentId, memoryOperands);\n } else if (dstReg.type == DxbcOperandType::UnorderedAccessView) {\n const std::array srcVectorIds = {\n srcComponentId, srcComponentId,\n srcComponentId, srcComponentId,\n };\n\n m_module.opImageWrite(\n bufferId, elementIndexAdjusted,\n m_module.opCompositeConstruct(vectorTypeId,\n 4, srcVectorIds.data()),\n imageOperands);\n } else {\n throw DxvkError(\"DxbcCompiler: Invalid operand type for strucured/raw store\");\n }\n\n writeMask &= writeMask - 1u;\n }\n }\n }\n void emitConvertFloat16(\n const DxbcShaderInstruction& ins) {\n // f32tof16 takes two operands:\n // (dst0) Destination register as a uint32 vector\n // (src0) Source register as a float32 vector\n // f16tof32 takes two operands:\n // (dst0) Destination register as a float32 vector\n // (src0) Source register as a uint32 vector\n const DxbcRegisterValue src = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n \n // We handle both packing and unpacking here\n const bool isPack = ins.op == DxbcOpcode::F32toF16;\n \n // The conversion instructions do not map very well to the\n // SPIR-V pack instructions, which operate on 2D vectors.\n std::array scalarIds = {{ 0, 0, 0, 0 }};\n \n const uint32_t componentCount = src.type.ccount;\n \n // These types are used in both pack and unpack operations\n const uint32_t t_u32 = getVectorTypeId({ DxbcScalarType::Uint32, 1 });\n const uint32_t t_f32 = getVectorTypeId({ DxbcScalarType::Float32, 1 });\n const uint32_t t_f32v2 = getVectorTypeId({ DxbcScalarType::Float32, 2 });\n \n // Constant zero-bit pattern, used for packing\n const uint32_t zerof32 = isPack ? m_module.constf32(0.0f) : 0;\n \n for (uint32_t i = 0; i < componentCount; i++) {\n const DxbcRegisterValue componentValue\n = emitRegisterExtract(src, DxbcRegMask::select(i));\n \n if (isPack) { // f32tof16\n const std::array packIds =\n {{ componentValue.id, zerof32 }};\n \n scalarIds[i] = m_module.opPackHalf2x16(t_u32,\n m_module.opCompositeConstruct(t_f32v2, packIds.size(), packIds.data()));\n } else { // f16tof32\n const uint32_t zeroIndex = 0;\n \n scalarIds[i] = m_module.opCompositeExtract(t_f32,\n m_module.opUnpackHalf2x16(t_f32v2, componentValue.id),\n 1, &zeroIndex);\n }\n }\n\n DxbcRegisterValue result;\n result.type.ctype = ins.dst[0].dataType;\n result.type.ccount = componentCount;\n\n uint32_t typeId = getVectorTypeId(result.type);\n result.id = componentCount > 1\n ? m_module.opCompositeConstruct(typeId,\n componentCount, scalarIds.data())\n : scalarIds[0];\n\n if (isPack) {\n // Some drivers return infinity if the input value is above a certain\n // threshold, but D3D wants us to return infinity only if the input is\n // actually infinite. Fix this up to return the maximum representable\n // 16-bit floating point number instead, but preserve input infinity.\n uint32_t t_bvec = getVectorTypeId({ DxbcScalarType::Bool, componentCount });\n uint32_t f16Infinity = m_module.constuReplicant(0x7C00, componentCount);\n uint32_t f16Unsigned = m_module.constuReplicant(0x7FFF, componentCount);\n\n uint32_t isInputInf = m_module.opIsInf(t_bvec, src.id);\n uint32_t isValueInf = m_module.opIEqual(t_bvec, f16Infinity,\n m_module.opBitwiseAnd(typeId, result.id, f16Unsigned));\n\n result.id = m_module.opSelect(getVectorTypeId(result.type),\n m_module.opLogicalAnd(t_bvec, isValueInf, m_module.opLogicalNot(t_bvec, isInputInf)),\n m_module.opISub(typeId, result.id, m_module.constuReplicant(1, componentCount)),\n result.id);\n }\n\n // Store result in the destination register\n emitRegisterStore(ins.dst[0], result);\n }\n void emitConvertFloat64(\n const DxbcShaderInstruction& ins) {\n // ftod and dtof take the following operands:\n // (dst0) Destination operand\n // (src0) Number to convert\n uint32_t dstBits = ins.dst[0].mask.popCount();\n\n DxbcRegMask srcMask = isDoubleType(ins.dst[0].dataType)\n ? DxbcRegMask(dstBits >= 2, dstBits >= 4, false, false)\n : DxbcRegMask(dstBits >= 1, dstBits >= 1, dstBits >= 2, dstBits >= 2);\n\n // Perform actual conversion, destination modifiers are not applied\n DxbcRegisterValue val = emitRegisterLoad(ins.src[0], srcMask);\n\n DxbcRegisterValue result;\n result.type.ctype = ins.dst[0].dataType;\n result.type.ccount = val.type.ccount;\n\n switch (ins.op) {\n case DxbcOpcode::DtoF:\n case DxbcOpcode::FtoD:\n result.id = m_module.opFConvert(\n getVectorTypeId(result.type), val.id);\n break;\n\n case DxbcOpcode::DtoI:\n result.id = m_module.opConvertFtoS(\n getVectorTypeId(result.type), val.id);\n break;\n\n case DxbcOpcode::DtoU:\n result.id = m_module.opConvertFtoU(\n getVectorTypeId(result.type), val.id);\n break;\n\n case DxbcOpcode::ItoD:\n result.id = m_module.opConvertStoF(\n getVectorTypeId(result.type), val.id);\n break;\n \n case DxbcOpcode::UtoD:\n result.id = m_module.opConvertUtoF(\n getVectorTypeId(result.type), val.id);\n break;\n \n default:\n Logger::warn(str::format(\"DxbcCompiler: Unhandled instruction: \", ins.op));\n return;\n }\n \n emitRegisterStore(ins.dst[0], result);\n }\n void emitHullShaderPhase(\n const DxbcShaderInstruction& ins) {\n switch (ins.op) {\n case DxbcOpcode::HsDecls: {\n if (m_hs.currPhaseType != DxbcCompilerHsPhase::None)\n Logger::err(\"DXBC: HsDecls not the first phase in hull shader\");\n \n m_hs.currPhaseType = DxbcCompilerHsPhase::Decl;\n } break;\n \n case DxbcOpcode::HsControlPointPhase: {\n m_hs.cpPhase = this->emitNewHullShaderControlPointPhase();\n \n m_hs.currPhaseType = DxbcCompilerHsPhase::ControlPoint;\n m_hs.currPhaseId = 0;\n \n m_module.setDebugName(m_hs.cpPhase.functionId, \"hs_control_point\");\n } break;\n \n case DxbcOpcode::HsForkPhase: {\n auto phase = this->emitNewHullShaderForkJoinPhase();\n m_hs.forkPhases.push_back(phase);\n \n m_hs.currPhaseType = DxbcCompilerHsPhase::Fork;\n m_hs.currPhaseId = m_hs.forkPhases.size() - 1;\n \n m_module.setDebugName(phase.functionId,\n str::format(\"hs_fork_\", m_hs.currPhaseId).c_str());\n } break;\n \n case DxbcOpcode::HsJoinPhase: {\n auto phase = this->emitNewHullShaderForkJoinPhase();\n m_hs.joinPhases.push_back(phase);\n \n m_hs.currPhaseType = DxbcCompilerHsPhase::Join;\n m_hs.currPhaseId = m_hs.joinPhases.size() - 1;\n \n m_module.setDebugName(phase.functionId,\n str::format(\"hs_join_\", m_hs.currPhaseId).c_str());\n } break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n }\n }\n void emitHullShaderInstCnt(\n const DxbcShaderInstruction& ins) {\n this->getCurrentHsForkJoinPhase()->instanceCount = ins.imm[0].u32;\n }\n void emitInterpolate(\n const DxbcShaderInstruction& ins) {\n m_module.enableCapability(spv::CapabilityInterpolationFunction);\n\n // The SPIR-V instructions operate on input variable pointers,\n // which are all declared as four-component float vectors.\n uint32_t registerId = ins.src[0].idx[0].offset;\n \n DxbcRegisterValue result;\n result.type = getInputRegType(registerId);\n \n switch (ins.op) {\n case DxbcOpcode::EvalCentroid: {\n result.id = m_module.opInterpolateAtCentroid(\n getVectorTypeId(result.type),\n m_vRegs.at(registerId).id);\n } break;\n \n case DxbcOpcode::EvalSampleIndex: {\n const DxbcRegisterValue sampleIndex = emitRegisterLoad(\n ins.src[1], DxbcRegMask(true, false, false, false));\n \n result.id = m_module.opInterpolateAtSample(\n getVectorTypeId(result.type),\n m_vRegs.at(registerId).id,\n sampleIndex.id);\n } break;\n \n case DxbcOpcode::EvalSnapped: {\n // The offset is encoded as a 4-bit fixed point value\n DxbcRegisterValue offset = emitRegisterLoad(\n ins.src[1], DxbcRegMask(true, true, false, false));\n offset.id = m_module.opBitFieldSExtract(\n getVectorTypeId(offset.type), offset.id,\n m_module.consti32(0), m_module.consti32(4));\n\n offset.type.ctype = DxbcScalarType::Float32;\n offset.id = m_module.opConvertStoF(\n getVectorTypeId(offset.type), offset.id);\n\n offset.id = m_module.opFMul(\n getVectorTypeId(offset.type), offset.id,\n m_module.constvec2f32(1.0f / 16.0f, 1.0f / 16.0f));\n\n result.id = m_module.opInterpolateAtOffset(\n getVectorTypeId(result.type),\n m_vRegs.at(registerId).id,\n offset.id);\n } break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n result = emitRegisterSwizzle(result,\n ins.src[0].swizzle, ins.dst[0].mask);\n emitRegisterStore(ins.dst[0], result);\n }\n void emitSparseCheckAccess(\n const DxbcShaderInstruction& ins) {\n // check_access_mapped has two operands:\n // (dst0) The destination register\n // (src0) The residency code\n m_module.enableCapability(spv::CapabilitySparseResidency);\n\n DxbcRegisterValue srcValue = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n\n uint32_t boolId = m_module.opImageSparseTexelsResident(\n m_module.defBoolType(), srcValue.id);\n\n DxbcRegisterValue dstValue;\n dstValue.type = { DxbcScalarType::Uint32, 1 };\n dstValue.id = m_module.opSelect(getScalarTypeId(DxbcScalarType::Uint32),\n boolId, m_module.constu32(~0u), m_module.constu32(0));\n\n emitRegisterStore(ins.dst[0], dstValue);\n }\n void emitTextureQuery(\n const DxbcShaderInstruction& ins) {\n // resinfo has three operands:\n // (dst0) The destination register\n // (src0) Resource LOD to query\n // (src1) Resource to query\n const DxbcBufferInfo resourceInfo = getBufferInfo(ins.src[1]);\n const DxbcResinfoType resinfoType = ins.controls.resinfoType();\n \n // Read the exact LOD for the image query\n const DxbcRegisterValue mipLod = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n const DxbcScalarType returnType = resinfoType == DxbcResinfoType::Uint\n ? DxbcScalarType::Uint32 : DxbcScalarType::Float32;\n \n // Query the size of the selected mip level, as well as the\n // total number of mip levels. We will have to combine the\n // result into a four-component vector later.\n DxbcRegisterValue imageSize = emitQueryTextureSize(ins.src[1], mipLod);\n DxbcRegisterValue imageLevels = emitQueryTextureLods(ins.src[1]);\n\n // If the mip level is out of bounds, D3D requires us to return\n // zero before applying modifiers, whereas SPIR-V is undefined,\n // so we need to fix it up manually here.\n imageSize.id = m_module.opSelect(getVectorTypeId(imageSize.type),\n m_module.opULessThan(m_module.defBoolType(), mipLod.id, imageLevels.id),\n imageSize.id, emitBuildZeroVector(imageSize.type).id);\n\n // Convert intermediates to the requested type\n if (returnType == DxbcScalarType::Float32) {\n imageSize.type.ctype = DxbcScalarType::Float32;\n imageSize.id = m_module.opConvertUtoF(\n getVectorTypeId(imageSize.type),\n imageSize.id);\n \n imageLevels.type.ctype = DxbcScalarType::Float32;\n imageLevels.id = m_module.opConvertUtoF(\n getVectorTypeId(imageLevels.type),\n imageLevels.id);\n }\n \n // If the selected return type is rcpFloat, we need\n // to compute the reciprocal of the image dimensions,\n // but not the array size, so we need to separate it.\n const uint32_t imageCoordDim = imageSize.type.ccount;\n \n DxbcRegisterValue imageLayers;\n imageLayers.type = imageSize.type;\n imageLayers.id = 0;\n \n if (resinfoType == DxbcResinfoType::RcpFloat && resourceInfo.image.array) {\n imageLayers = emitRegisterExtract(imageSize, DxbcRegMask::select(imageCoordDim - 1));\n imageSize = emitRegisterExtract(imageSize, DxbcRegMask::firstN(imageCoordDim - 1));\n }\n \n if (resinfoType == DxbcResinfoType::RcpFloat) {\n imageSize.id = m_module.opFDiv(\n getVectorTypeId(imageSize.type),\n emitBuildConstVecf32(1.0f, 1.0f, 1.0f, 1.0f,\n DxbcRegMask::firstN(imageSize.type.ccount)).id,\n imageSize.id);\n }\n \n // Concatenate result vectors and scalars to form a\n // 4D vector. Unused components will be set to zero.\n std::array vectorIds = { imageSize.id, 0, 0, 0 };\n uint32_t numVectorIds = 1;\n \n if (imageLayers.id != 0)\n vectorIds[numVectorIds++] = imageLayers.id;\n \n if (imageCoordDim < 3) {\n const uint32_t zero = returnType == DxbcScalarType::Uint32\n ? m_module.constu32(0)\n : m_module.constf32(0.0f);\n \n for (uint32_t i = imageCoordDim; i < 3; i++)\n vectorIds[numVectorIds++] = zero;\n }\n \n vectorIds[numVectorIds++] = imageLevels.id;\n \n // Create the actual result vector\n DxbcRegisterValue result;\n result.type.ctype = returnType;\n result.type.ccount = 4;\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n numVectorIds, vectorIds.data());\n \n // Swizzle components using the resource swizzle\n // and the destination operand's write mask\n result = emitRegisterSwizzle(result,\n ins.src[1].swizzle, ins.dst[0].mask);\n emitRegisterStore(ins.dst[0], result);\n }\n void emitTextureQueryLod(\n const DxbcShaderInstruction& ins) {\n // All sample instructions have at least these operands:\n // (dst0) The destination register\n // (src0) Texture coordinates\n // (src1) The texture itself\n // (src2) The sampler object\n const DxbcRegister& texCoordReg = ins.src[0];\n const DxbcRegister& textureReg = ins.src[1];\n const DxbcRegister& samplerReg = ins.src[2];\n \n // Texture and sampler register IDs\n const auto& texture = m_textures.at(textureReg.idx[0].offset);\n const auto& sampler = m_samplers.at(samplerReg.idx[0].offset);\n \n // Load texture coordinates\n const DxbcRegisterValue coord = emitRegisterLoad(texCoordReg,\n DxbcRegMask::firstN(getTexLayerDim(texture.imageInfo)));\n \n // Query the LOD. The result is a two-dimensional float32\n // vector containing the mip level and virtual LOD numbers.\n const uint32_t sampledImageId = emitLoadSampledImage(texture, sampler, false);\n const uint32_t queriedLodId = m_module.opImageQueryLod(\n getVectorTypeId({ DxbcScalarType::Float32, 2 }),\n sampledImageId, coord.id);\n \n // Build the result array vector by filling up\n // the remaining two components with zeroes.\n const uint32_t zero = m_module.constf32(0.0f);\n const std::array resultIds\n = {{ queriedLodId, zero, zero }};\n \n DxbcRegisterValue result;\n result.type = DxbcVectorType { DxbcScalarType::Float32, 4 };\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n resultIds.size(), resultIds.data());\n \n result = emitRegisterSwizzle(result, ins.src[1].swizzle, ins.dst[0].mask);\n emitRegisterStore(ins.dst[0], result);\n }\n void emitTextureQueryMs(\n const DxbcShaderInstruction& ins) {\n // sampleinfo has two operands:\n // (dst0) The destination register\n // (src0) Resource to query\n DxbcRegisterValue sampleCount = emitQueryTextureSamples(ins.src[0]);\n \n if (ins.controls.returnType() != DxbcInstructionReturnType::Uint) {\n sampleCount.type = { DxbcScalarType::Float32, 1 };\n sampleCount.id = m_module.opConvertUtoF(\n getVectorTypeId(sampleCount.type),\n sampleCount.id);\n }\n \n emitRegisterStore(ins.dst[0], sampleCount);\n }\n void emitTextureQueryMsPos(\n const DxbcShaderInstruction& ins) {\n // samplepos has three operands:\n // (dst0) The destination register\n // (src0) Resource to query \n // (src1) Sample index\n if (m_samplePositions == 0)\n m_samplePositions = emitSamplePosArray();\n \n // The lookup index is qual to the sample count plus the\n // sample index, or 0 if the resource cannot be queried.\n DxbcRegisterValue sampleCount = emitQueryTextureSamples(ins.src[0]);\n DxbcRegisterValue sampleIndex = emitRegisterLoad(\n ins.src[1], DxbcRegMask(true, false, false, false));\n \n uint32_t lookupIndex = m_module.opIAdd(\n getVectorTypeId(sampleCount.type),\n sampleCount.id, sampleIndex.id);\n \n // Validate the parameters\n uint32_t sampleCountValid = m_module.opULessThanEqual(\n m_module.defBoolType(),\n sampleCount.id,\n m_module.constu32(16));\n \n uint32_t sampleIndexValid = m_module.opULessThan(\n m_module.defBoolType(),\n sampleIndex.id,\n sampleCount.id);\n \n // If the lookup cannot be performed, set the lookup\n // index to zero, which will return a zero vector.\n lookupIndex = m_module.opSelect(\n getVectorTypeId(sampleCount.type),\n m_module.opLogicalAnd(\n m_module.defBoolType(),\n sampleCountValid,\n sampleIndexValid),\n lookupIndex,\n m_module.constu32(0));\n \n // Load sample pos vector and write the masked\n // components to the destination register.\n DxbcRegisterPointer samplePos;\n samplePos.type.ctype = DxbcScalarType::Float32;\n samplePos.type.ccount = 2;\n samplePos.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(samplePos.type),\n spv::StorageClassPrivate),\n m_samplePositions, 1, &lookupIndex);\n \n // Expand to vec4 by appending zeroes\n DxbcRegisterValue result = emitValueLoad(samplePos);\n\n DxbcRegisterValue zero;\n zero.type.ctype = DxbcScalarType::Float32;\n zero.type.ccount = 2;\n zero.id = m_module.constvec2f32(0.0f, 0.0f);\n\n result = emitRegisterConcat(result, zero);\n \n emitRegisterStore(ins.dst[0],\n emitRegisterSwizzle(result,\n ins.src[0].swizzle,\n ins.dst[0].mask));\n }\n void emitTextureFetch(\n const DxbcShaderInstruction& ins) {\n // ld has three operands:\n // (dst0) The destination register\n // (src0) Source address\n // (src1) Source texture\n // ld2dms has four operands:\n // (dst0) The destination register\n // (src0) Source address\n // (src1) Source texture\n // (src2) Sample number\n const auto& texture = m_textures.at(ins.src[1].idx[0].offset);\n const uint32_t imageLayerDim = getTexLayerDim(texture.imageInfo);\n \n bool isMultisampled = ins.op == DxbcOpcode::LdMs\n || ins.op == DxbcOpcode::LdMsS;\n\n // Load the texture coordinates. The last component\n // contains the LOD if the resource is an image.\n const DxbcRegisterValue address = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, true, true, true));\n \n // Additional image operands. This will store\n // the LOD and the address offset if present.\n SpirvImageOperands imageOperands;\n imageOperands.sparse = ins.dstCount == 2;\n \n if (ins.sampleControls.u != 0 || ins.sampleControls.v != 0 || ins.sampleControls.w != 0) {\n const std::array offsetIds = {\n imageLayerDim >= 1 ? m_module.consti32(ins.sampleControls.u) : 0,\n imageLayerDim >= 2 ? m_module.consti32(ins.sampleControls.v) : 0,\n imageLayerDim >= 3 ? m_module.consti32(ins.sampleControls.w) : 0,\n };\n \n imageOperands.flags |= spv::ImageOperandsConstOffsetMask;\n imageOperands.sConstOffset = offsetIds[0];\n\n if (imageLayerDim > 1) {\n imageOperands.sConstOffset = m_module.constComposite(\n getVectorTypeId({ DxbcScalarType::Sint32, imageLayerDim }),\n imageLayerDim, offsetIds.data());\n }\n }\n \n // The LOD is not present when reading from\n // a buffer or from a multisample texture.\n if (texture.imageInfo.dim != spv::DimBuffer && texture.imageInfo.ms == 0) {\n DxbcRegisterValue imageLod;\n \n if (!isMultisampled) {\n imageLod = emitRegisterExtract(\n address, DxbcRegMask(false, false, false, true));\n } else {\n // If we force-disabled MSAA, fetch from LOD 0\n imageLod.type = { DxbcScalarType::Uint32, 1 };\n imageLod.id = m_module.constu32(0);\n }\n \n imageOperands.flags |= spv::ImageOperandsLodMask;\n imageOperands.sLod = imageLod.id;\n }\n \n // The ld2dms instruction has a sample index, but we\n // are only allowed to set it for multisample views\n if (isMultisampled && texture.imageInfo.ms == 1) {\n DxbcRegisterValue sampleId = emitRegisterLoad(\n ins.src[2], DxbcRegMask(true, false, false, false));\n \n imageOperands.flags |= spv::ImageOperandsSampleMask;\n imageOperands.sSampleId = sampleId.id;\n }\n \n // Extract coordinates from address\n const DxbcRegisterValue coord = emitCalcTexCoord(address, texture.imageInfo);\n \n // Reading a typed image or buffer view\n // always returns a four-component vector.\n const uint32_t imageId = m_module.opLoad(texture.imageTypeId, texture.varId);\n\n DxbcVectorType texelType;\n texelType.ctype = texture.sampledType;\n texelType.ccount = 4;\n\n uint32_t texelTypeId = getVectorTypeId(texelType);\n uint32_t resultTypeId = texelTypeId;\n uint32_t resultId = 0;\n\n if (imageOperands.sparse)\n resultTypeId = getSparseResultTypeId(texelTypeId);\n\n resultId = m_module.opImageFetch(resultTypeId,\n imageId, coord.id, imageOperands);\n\n DxbcRegisterValue result;\n result.type = texelType;\n result.id = imageOperands.sparse\n ? emitExtractSparseTexel(texelTypeId, resultId)\n : resultId;\n \n // Swizzle components using the texture swizzle\n // and the destination operand's write mask\n result = emitRegisterSwizzle(result,\n ins.src[1].swizzle, ins.dst[0].mask);\n \n emitRegisterStore(ins.dst[0], result);\n\n if (imageOperands.sparse)\n emitStoreSparseFeedback(ins.dst[1], resultId);\n }\n void emitTextureGather(\n const DxbcShaderInstruction& ins) {\n // Gather4 takes the following operands:\n // (dst0) The destination register\n // (dst1) The residency code for sparse ops\n // (src0) Texture coordinates\n // (src1) The texture itself\n // (src2) The sampler, with a component selector\n // Gather4C takes the following additional operand:\n // (src3) The depth reference value\n // The Gather4Po variants take an additional operand\n // which defines an extended constant offset.\n // TODO reduce code duplication by moving some common code\n // in both sample() and gather() into separate methods\n const bool isExtendedGather = ins.op == DxbcOpcode::Gather4Po\n || ins.op == DxbcOpcode::Gather4PoC\n || ins.op == DxbcOpcode::Gather4PoS\n || ins.op == DxbcOpcode::Gather4PoCS;\n \n const DxbcRegister& texCoordReg = ins.src[0];\n const DxbcRegister& textureReg = ins.src[1 + isExtendedGather];\n const DxbcRegister& samplerReg = ins.src[2 + isExtendedGather];\n \n // Texture and sampler register IDs\n const auto& texture = m_textures.at(textureReg.idx[0].offset);\n const auto& sampler = m_samplers.at(samplerReg.idx[0].offset);\n \n // Image type, which stores the image dimensions etc.\n const uint32_t imageLayerDim = getTexLayerDim(texture.imageInfo);\n \n // Load the texture coordinates. SPIR-V allows these\n // to be float4 even if not all components are used.\n DxbcRegisterValue coord = emitLoadTexCoord(texCoordReg, texture.imageInfo);\n \n // Load reference value for depth-compare operations\n const bool isDepthCompare = ins.op == DxbcOpcode::Gather4C\n || ins.op == DxbcOpcode::Gather4PoC\n || ins.op == DxbcOpcode::Gather4CS\n || ins.op == DxbcOpcode::Gather4PoCS;\n\n const DxbcRegisterValue referenceValue = isDepthCompare\n ? emitRegisterLoad(ins.src[3 + isExtendedGather],\n DxbcRegMask(true, false, false, false))\n : DxbcRegisterValue();\n \n // Accumulate additional image operands.\n SpirvImageOperands imageOperands;\n imageOperands.sparse = ins.dstCount == 2;\n\n if (isExtendedGather) {\n m_module.enableCapability(spv::CapabilityImageGatherExtended);\n \n DxbcRegisterValue gatherOffset = emitRegisterLoad(\n ins.src[1], DxbcRegMask::firstN(imageLayerDim));\n \n imageOperands.flags |= spv::ImageOperandsOffsetMask;\n imageOperands.gOffset = gatherOffset.id;\n } else if (ins.sampleControls.u != 0 || ins.sampleControls.v != 0 || ins.sampleControls.w != 0) {\n const std::array offsetIds = {\n imageLayerDim >= 1 ? m_module.consti32(ins.sampleControls.u) : 0,\n imageLayerDim >= 2 ? m_module.consti32(ins.sampleControls.v) : 0,\n imageLayerDim >= 3 ? m_module.consti32(ins.sampleControls.w) : 0,\n };\n \n imageOperands.flags |= spv::ImageOperandsConstOffsetMask;\n imageOperands.sConstOffset = offsetIds[0];\n\n if (imageLayerDim > 1) {\n imageOperands.sConstOffset = m_module.constComposite(\n getVectorTypeId({ DxbcScalarType::Sint32, imageLayerDim }),\n imageLayerDim, offsetIds.data());\n }\n }\n\n // Gathering texels always returns a four-component\n // vector, even for the depth-compare variants.\n uint32_t sampledImageId = emitLoadSampledImage(texture, sampler, isDepthCompare);\n\n DxbcVectorType texelType;\n texelType.ctype = texture.sampledType;\n texelType.ccount = 4;\n\n uint32_t texelTypeId = getVectorTypeId(texelType);\n uint32_t resultTypeId = texelTypeId;\n uint32_t resultId = 0;\n\n if (imageOperands.sparse)\n resultTypeId = getSparseResultTypeId(texelTypeId);\n\n if (sampledImageId) {\n switch (ins.op) {\n // Simple image gather operation\n case DxbcOpcode::Gather4:\n case DxbcOpcode::Gather4S:\n case DxbcOpcode::Gather4Po:\n case DxbcOpcode::Gather4PoS: {\n resultId = m_module.opImageGather(\n resultTypeId, sampledImageId, coord.id,\n m_module.consti32(samplerReg.swizzle[0]),\n imageOperands);\n } break;\n\n // Depth-compare operation\n case DxbcOpcode::Gather4C:\n case DxbcOpcode::Gather4CS:\n case DxbcOpcode::Gather4PoC:\n case DxbcOpcode::Gather4PoCS: {\n resultId = m_module.opImageDrefGather(\n resultTypeId, sampledImageId, coord.id,\n referenceValue.id, imageOperands);\n } break;\n\n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n } else {\n Logger::warn(str::format(\"DxbcCompiler: \", ins.op, \": Unsupported image type\"));\n resultId = m_module.constNull(resultTypeId);\n }\n\n // If necessary, deal with the sparse result\n DxbcRegisterValue result;\n result.type = texelType;\n result.id = imageOperands.sparse\n ? emitExtractSparseTexel(texelTypeId, resultId)\n : resultId;\n\n // Swizzle components using the texture swizzle\n // and the destination operand's write mask\n result = emitRegisterSwizzle(result,\n textureReg.swizzle, ins.dst[0].mask);\n \n emitRegisterStore(ins.dst[0], result);\n\n if (imageOperands.sparse)\n emitStoreSparseFeedback(ins.dst[1], resultId);\n }\n void emitTextureSample(\n const DxbcShaderInstruction& ins) {\n // All sample instructions have at least these operands:\n // (dst0) The destination register\n // (src0) Texture coordinates\n // (src1) The texture itself\n // (src2) The sampler object\n const DxbcRegister& texCoordReg = ins.src[0];\n const DxbcRegister& textureReg = ins.src[1];\n const DxbcRegister& samplerReg = ins.src[2];\n \n // Texture and sampler register IDs\n const auto& texture = m_textures.at(textureReg.idx[0].offset);\n const auto& sampler = m_samplers.at(samplerReg.idx[0].offset);\n const uint32_t imageLayerDim = getTexLayerDim(texture.imageInfo);\n \n // Load the texture coordinates. SPIR-V allows these\n // to be float4 even if not all components are used.\n DxbcRegisterValue coord = emitLoadTexCoord(texCoordReg, texture.imageInfo);\n \n // Load reference value for depth-compare operations\n const bool isDepthCompare = ins.op == DxbcOpcode::SampleC\n || ins.op == DxbcOpcode::SampleClz\n || ins.op == DxbcOpcode::SampleCClampS\n || ins.op == DxbcOpcode::SampleClzS;\n \n const DxbcRegisterValue referenceValue = isDepthCompare\n ? emitRegisterLoad(ins.src[3], DxbcRegMask(true, false, false, false))\n : DxbcRegisterValue();\n \n // Load explicit gradients for sample operations that require them\n const bool hasExplicitGradients = ins.op == DxbcOpcode::SampleD\n || ins.op == DxbcOpcode::SampleDClampS;\n \n const DxbcRegisterValue explicitGradientX = hasExplicitGradients\n ? emitRegisterLoad(ins.src[3], DxbcRegMask::firstN(imageLayerDim))\n : DxbcRegisterValue();\n \n const DxbcRegisterValue explicitGradientY = hasExplicitGradients\n ? emitRegisterLoad(ins.src[4], DxbcRegMask::firstN(imageLayerDim))\n : DxbcRegisterValue();\n \n // LOD for certain sample operations\n const bool hasLod = ins.op == DxbcOpcode::SampleL\n || ins.op == DxbcOpcode::SampleLS\n || ins.op == DxbcOpcode::SampleB\n || ins.op == DxbcOpcode::SampleBClampS;\n \n const DxbcRegisterValue lod = hasLod\n ? emitRegisterLoad(ins.src[3], DxbcRegMask(true, false, false, false))\n : DxbcRegisterValue();\n\n // Min LOD for certain sparse operations\n const bool hasMinLod = ins.op == DxbcOpcode::SampleClampS\n || ins.op == DxbcOpcode::SampleBClampS\n || ins.op == DxbcOpcode::SampleDClampS\n || ins.op == DxbcOpcode::SampleCClampS;\n\n const DxbcRegisterValue minLod = hasMinLod && ins.src[ins.srcCount - 1].type != DxbcOperandType::Null\n ? emitRegisterLoad(ins.src[ins.srcCount - 1], DxbcRegMask(true, false, false, false))\n : DxbcRegisterValue();\n\n // Accumulate additional image operands. These are\n // not part of the actual operand token in SPIR-V.\n SpirvImageOperands imageOperands;\n imageOperands.sparse = ins.dstCount == 2;\n\n if (ins.sampleControls.u != 0 || ins.sampleControls.v != 0 || ins.sampleControls.w != 0) {\n const std::array offsetIds = {\n imageLayerDim >= 1 ? m_module.consti32(ins.sampleControls.u) : 0,\n imageLayerDim >= 2 ? m_module.consti32(ins.sampleControls.v) : 0,\n imageLayerDim >= 3 ? m_module.consti32(ins.sampleControls.w) : 0,\n };\n \n imageOperands.flags |= spv::ImageOperandsConstOffsetMask;\n imageOperands.sConstOffset = offsetIds[0];\n\n if (imageLayerDim > 1) {\n imageOperands.sConstOffset = m_module.constComposite(\n getVectorTypeId({ DxbcScalarType::Sint32, imageLayerDim }),\n imageLayerDim, offsetIds.data());\n }\n }\n\n if (hasMinLod) {\n m_module.enableCapability(spv::CapabilityMinLod);\n\n imageOperands.flags |= spv::ImageOperandsMinLodMask;\n imageOperands.sMinLod = minLod.id;\n }\n\n // Combine the texture and the sampler into a sampled image\n uint32_t sampledImageId = emitLoadSampledImage(texture, sampler, isDepthCompare);\n \n // Sampling an image always returns a four-component\n // vector, whereas depth-compare ops return a scalar.\n DxbcVectorType texelType;\n texelType.ctype = texture.sampledType;\n texelType.ccount = isDepthCompare ? 1 : 4;\n\n uint32_t texelTypeId = getVectorTypeId(texelType);\n uint32_t resultTypeId = texelTypeId;\n uint32_t resultId = 0;\n\n if (imageOperands.sparse)\n resultTypeId = getSparseResultTypeId(texelTypeId);\n\n if (sampledImageId) {\n switch (ins.op) {\n // Simple image sample operation\n case DxbcOpcode::Sample:\n case DxbcOpcode::SampleClampS: {\n resultId = m_module.opImageSampleImplicitLod(\n resultTypeId, sampledImageId, coord.id,\n imageOperands);\n } break;\n\n // Depth-compare operation\n case DxbcOpcode::SampleC:\n case DxbcOpcode::SampleCClampS: {\n resultId = m_module.opImageSampleDrefImplicitLod(\n resultTypeId, sampledImageId, coord.id,\n referenceValue.id, imageOperands);\n } break;\n\n // Depth-compare operation on mip level zero\n case DxbcOpcode::SampleClz:\n case DxbcOpcode::SampleClzS: {\n imageOperands.flags |= spv::ImageOperandsLodMask;\n imageOperands.sLod = m_module.constf32(0.0f);\n\n resultId = m_module.opImageSampleDrefExplicitLod(\n resultTypeId, sampledImageId, coord.id,\n referenceValue.id, imageOperands);\n } break;\n\n // Sample operation with explicit gradients\n case DxbcOpcode::SampleD:\n case DxbcOpcode::SampleDClampS: {\n imageOperands.flags |= spv::ImageOperandsGradMask;\n imageOperands.sGradX = explicitGradientX.id;\n imageOperands.sGradY = explicitGradientY.id;\n\n resultId = m_module.opImageSampleExplicitLod(\n resultTypeId, sampledImageId, coord.id,\n imageOperands);\n } break;\n\n // Sample operation with explicit LOD\n case DxbcOpcode::SampleL:\n case DxbcOpcode::SampleLS: {\n imageOperands.flags |= spv::ImageOperandsLodMask;\n imageOperands.sLod = lod.id;\n\n resultId = m_module.opImageSampleExplicitLod(\n resultTypeId, sampledImageId, coord.id,\n imageOperands);\n } break;\n\n // Sample operation with LOD bias\n case DxbcOpcode::SampleB:\n case DxbcOpcode::SampleBClampS: {\n imageOperands.flags |= spv::ImageOperandsBiasMask;\n imageOperands.sLodBias = lod.id;\n\n resultId = m_module.opImageSampleImplicitLod(\n resultTypeId, sampledImageId, coord.id,\n imageOperands);\n } break;\n\n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n } else {\n Logger::warn(str::format(\"DxbcCompiler: \", ins.op, \": Unsupported image type\"));\n resultId = m_module.constNull(resultTypeId);\n }\n \n DxbcRegisterValue result;\n result.type = texelType;\n result.id = imageOperands.sparse\n ? emitExtractSparseTexel(texelTypeId, resultId)\n : resultId;\n\n // Swizzle components using the texture swizzle\n // and the destination operand's write mask\n if (result.type.ccount != 1) {\n result = emitRegisterSwizzle(result,\n textureReg.swizzle, ins.dst[0].mask);\n }\n \n emitRegisterStore(ins.dst[0], result);\n\n if (imageOperands.sparse)\n emitStoreSparseFeedback(ins.dst[1], resultId);\n }\n void emitTypedUavLoad(\n const DxbcShaderInstruction& ins) {\n // load_uav_typed has three operands:\n // (dst0) The destination register\n // (src0) The texture or buffer coordinates\n // (src1) The UAV to load from\n const uint32_t registerId = ins.src[1].idx[0].offset;\n const DxbcUav uavInfo = m_uavs.at(registerId);\n\n emitUavBarrier(uint64_t(1u) << registerId, 0u);\n\n // Load texture coordinates\n DxbcRegisterValue texCoord = emitLoadTexCoord(\n ins.src[0], uavInfo.imageInfo);\n\n SpirvImageOperands imageOperands;\n imageOperands.sparse = ins.dstCount == 2;\n\n if (uavInfo.coherence) {\n imageOperands.flags |= spv::ImageOperandsNonPrivateTexelMask\n | spv::ImageOperandsMakeTexelVisibleMask;\n imageOperands.makeVisible = m_module.constu32(uavInfo.coherence);\n }\n\n DxbcVectorType texelType;\n texelType.ctype = uavInfo.sampledType;\n texelType.ccount = 4;\n\n uint32_t texelTypeId = getVectorTypeId(texelType);\n uint32_t resultTypeId = texelTypeId;\n uint32_t resultId = 0;\n\n if (imageOperands.sparse)\n resultTypeId = getSparseResultTypeId(texelTypeId);\n\n // Load source value from the UAV\n resultId = m_module.opImageRead(resultTypeId,\n m_module.opLoad(uavInfo.imageTypeId, uavInfo.varId),\n texCoord.id, imageOperands);\n \n // Apply component swizzle and mask\n DxbcRegisterValue uavValue;\n uavValue.type = texelType;\n uavValue.id = imageOperands.sparse\n ? emitExtractSparseTexel(texelTypeId, resultId)\n : resultId;\n\n uavValue = emitRegisterSwizzle(uavValue,\n ins.src[1].swizzle, ins.dst[0].mask);\n \n emitRegisterStore(ins.dst[0], uavValue);\n\n if (imageOperands.sparse)\n emitStoreSparseFeedback(ins.dst[1], resultId);\n }\n void emitTypedUavStore(\n const DxbcShaderInstruction& ins) {\n // store_uav_typed has three operands:\n // (dst0) The destination UAV\n // (src0) The texture or buffer coordinates\n // (src1) The value to store\n const DxbcBufferInfo uavInfo = getBufferInfo(ins.dst[0]);\n emitUavBarrier(0u, uint64_t(1u) << ins.dst[0].idx[0].offset);\n\n // Set image operands for coherent access if necessary \n SpirvImageOperands imageOperands;\n\n if (uavInfo.coherence) {\n imageOperands.flags |= spv::ImageOperandsNonPrivateTexelMask\n | spv::ImageOperandsMakeTexelAvailableMask;\n imageOperands.makeAvailable = m_module.constu32(uavInfo.coherence);\n }\n\n // Load texture coordinates\n DxbcRegisterValue texCoord = emitLoadTexCoord(ins.src[0], uavInfo.image);\n\n // Load the value that will be written to the image. We'll\n // have to cast it to the component type of the image.\n const DxbcRegisterValue texValue = emitRegisterBitcast(\n emitRegisterLoad(ins.src[1], DxbcRegMask(true, true, true, true)),\n uavInfo.stype);\n \n // Write the given value to the image\n m_module.opImageWrite(\n m_module.opLoad(uavInfo.typeId, uavInfo.varId),\n texCoord.id, texValue.id, imageOperands);\n }\n void emitControlFlowIf(\n const DxbcShaderInstruction& ins) {\n // Load the first component of the condition\n // operand and perform a zero test on it.\n const DxbcRegisterValue condition = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n // Declare the 'if' block. We do not know if there\n // will be an 'else' block or not, so we'll assume\n // that there is one and leave it empty otherwise.\n DxbcCfgBlock block;\n block.type = DxbcCfgBlockType::If;\n block.b_if.ztestId = emitRegisterZeroTest(condition, ins.controls.zeroTest()).id;\n block.b_if.labelIf = m_module.allocateId();\n block.b_if.labelElse = 0;\n block.b_if.labelEnd = m_module.allocateId();\n block.b_if.headerPtr = m_module.getInsertionPtr();\n m_controlFlowBlocks.push_back(block);\n \n // We'll insert the branch instruction when closing\n // the block, since we don't know whether or not an\n // else block is needed right now.\n m_module.opLabel(block.b_if.labelIf);\n }\n void emitControlFlowElse(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::If\n || m_controlFlowBlocks.back().b_if.labelElse != 0)\n throw DxvkError(\"DxbcCompiler: 'Else' without 'If' found\");\n \n // Set the 'Else' flag so that we do\n // not insert a dummy block on 'EndIf'\n DxbcCfgBlock& block = m_controlFlowBlocks.back();\n block.b_if.labelElse = m_module.allocateId();\n \n // Close the 'If' block by branching to\n // the merge block we declared earlier\n m_module.opBranch(block.b_if.labelEnd);\n m_module.opLabel (block.b_if.labelElse);\n }\n void emitControlFlowEndIf(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::If)\n throw DxvkError(\"DxbcCompiler: 'EndIf' without 'If' found\");\n \n // Remove the block from the stack, it's closed\n DxbcCfgBlock block = m_controlFlowBlocks.back();\n m_controlFlowBlocks.pop_back();\n \n // Write out the 'if' header\n m_module.beginInsertion(block.b_if.headerPtr);\n \n m_module.opSelectionMerge(\n block.b_if.labelEnd,\n spv::SelectionControlMaskNone);\n \n m_module.opBranchConditional(\n block.b_if.ztestId,\n block.b_if.labelIf,\n block.b_if.labelElse != 0\n ? block.b_if.labelElse\n : block.b_if.labelEnd);\n \n m_module.endInsertion();\n \n // End the active 'if' or 'else' block\n m_module.opBranch(block.b_if.labelEnd);\n m_module.opLabel (block.b_if.labelEnd);\n }\n void emitControlFlowSwitch(\n const DxbcShaderInstruction& ins) {\n // Load the selector as a scalar unsigned integer\n const DxbcRegisterValue selector = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n // Declare switch block. We cannot insert the switch\n // instruction itself yet because the number of case\n // statements and blocks is unknown at this point.\n DxbcCfgBlock block;\n block.type = DxbcCfgBlockType::Switch;\n block.b_switch.insertPtr = m_module.getInsertionPtr();\n block.b_switch.selectorId = selector.id;\n block.b_switch.labelBreak = m_module.allocateId();\n block.b_switch.labelCase = m_module.allocateId();\n block.b_switch.labelDefault = 0;\n block.b_switch.labelCases = nullptr;\n m_controlFlowBlocks.push_back(block);\n \n // Define the first 'case' label\n m_module.opLabel(block.b_switch.labelCase);\n }\n void emitControlFlowCase(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::Switch)\n throw DxvkError(\"DxbcCompiler: 'Case' without 'Switch' found\");\n \n // The source operand must be a 32-bit immediate.\n if (ins.src[0].type != DxbcOperandType::Imm32)\n throw DxvkError(\"DxbcCompiler: Invalid operand type for 'Case'\");\n\n // Use the last label allocated for 'case'.\n DxbcCfgBlockSwitch* block = &m_controlFlowBlocks.back().b_switch;\n\n if (caseBlockIsFallthrough()) {\n block->labelCase = m_module.allocateId();\n\n m_module.opBranch(block->labelCase);\n m_module.opLabel (block->labelCase);\n }\n\n DxbcSwitchLabel label;\n label.desc.literal = ins.src[0].imm.u32_1;\n label.desc.labelId = block->labelCase;\n label.next = block->labelCases;\n block->labelCases = new DxbcSwitchLabel(label);\n }\n void emitControlFlowDefault(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::Switch)\n throw DxvkError(\"DxbcCompiler: 'Default' without 'Switch' found\");\n \n DxbcCfgBlockSwitch* block = &m_controlFlowBlocks.back().b_switch;\n\n if (caseBlockIsFallthrough()) {\n block->labelCase = m_module.allocateId();\n\n m_module.opBranch(block->labelCase);\n m_module.opLabel (block->labelCase);\n }\n\n // Set the last label allocated for 'case' as the default label.\n block->labelDefault = block->labelCase;\n }\n void emitControlFlowEndSwitch(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::Switch)\n throw DxvkError(\"DxbcCompiler: 'EndSwitch' without 'Switch' found\");\n \n // Remove the block from the stack, it's closed\n DxbcCfgBlock block = m_controlFlowBlocks.back();\n m_controlFlowBlocks.pop_back();\n\n if (!block.b_switch.labelDefault) {\n block.b_switch.labelDefault = caseBlockIsFallthrough()\n ? block.b_switch.labelBreak\n : block.b_switch.labelCase;\n }\n \n // Close the current 'case' block\n m_module.opBranch(block.b_switch.labelBreak);\n \n // Insert the 'switch' statement. For that, we need to\n // gather all the literal-label pairs for the construct.\n m_module.beginInsertion(block.b_switch.insertPtr);\n m_module.opSelectionMerge(\n block.b_switch.labelBreak,\n spv::SelectionControlMaskNone);\n \n // We'll restore the original order of the case labels here\n std::vector jumpTargets;\n for (auto i = block.b_switch.labelCases; i != nullptr; i = i->next)\n jumpTargets.insert(jumpTargets.begin(), i->desc);\n \n m_module.opSwitch(\n block.b_switch.selectorId,\n block.b_switch.labelDefault,\n jumpTargets.size(),\n jumpTargets.data());\n m_module.endInsertion();\n \n // Destroy the list of case labels\n // FIXME we're leaking memory if compilation fails.\n DxbcSwitchLabel* caseLabel = block.b_switch.labelCases;\n \n while (caseLabel != nullptr)\n delete std::exchange(caseLabel, caseLabel->next);\n\n // Begin new block after switch blocks\n m_module.opLabel(block.b_switch.labelBreak);\n }\n void emitControlFlowLoop(\n const DxbcShaderInstruction& ins) {\n // Declare the 'loop' block\n DxbcCfgBlock block;\n block.type = DxbcCfgBlockType::Loop;\n block.b_loop.labelHeader = m_module.allocateId();\n block.b_loop.labelBegin = m_module.allocateId();\n block.b_loop.labelContinue = m_module.allocateId();\n block.b_loop.labelBreak = m_module.allocateId();\n m_controlFlowBlocks.push_back(block);\n \n m_module.opBranch(block.b_loop.labelHeader);\n m_module.opLabel (block.b_loop.labelHeader);\n \n m_module.opLoopMerge(\n block.b_loop.labelBreak,\n block.b_loop.labelContinue,\n spv::LoopControlMaskNone);\n \n m_module.opBranch(block.b_loop.labelBegin);\n m_module.opLabel (block.b_loop.labelBegin);\n }\n void emitControlFlowEndLoop(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::Loop)\n throw DxvkError(\"DxbcCompiler: 'EndLoop' without 'Loop' found\");\n \n // Remove the block from the stack, it's closed\n const DxbcCfgBlock block = m_controlFlowBlocks.back();\n m_controlFlowBlocks.pop_back();\n \n // Declare the continue block\n m_module.opBranch(block.b_loop.labelContinue);\n m_module.opLabel (block.b_loop.labelContinue);\n \n // Declare the merge block\n m_module.opBranch(block.b_loop.labelHeader);\n m_module.opLabel (block.b_loop.labelBreak);\n }\n void emitControlFlowBreak(\n const DxbcShaderInstruction& ins) {\n const bool isBreak = ins.op == DxbcOpcode::Break;\n \n DxbcCfgBlock* cfgBlock = isBreak\n ? cfgFindBlock({ DxbcCfgBlockType::Loop, DxbcCfgBlockType::Switch })\n : cfgFindBlock({ DxbcCfgBlockType::Loop });\n \n if (cfgBlock == nullptr)\n throw DxvkError(\"DxbcCompiler: 'Break' or 'Continue' outside 'Loop' or 'Switch' found\");\n \n if (cfgBlock->type == DxbcCfgBlockType::Loop) {\n m_module.opBranch(isBreak\n ? cfgBlock->b_loop.labelBreak\n : cfgBlock->b_loop.labelContinue);\n } else /* if (cfgBlock->type == DxbcCfgBlockType::Switch) */ {\n m_module.opBranch(cfgBlock->b_switch.labelBreak);\n }\n \n // Subsequent instructions assume that there is an open block\n const uint32_t labelId = m_module.allocateId();\n m_module.opLabel(labelId);\n \n // If this is on the same level as a switch-case construct,\n // rather than being nested inside an 'if' statement, close\n // the current 'case' block.\n if (m_controlFlowBlocks.back().type == DxbcCfgBlockType::Switch)\n cfgBlock->b_switch.labelCase = labelId;\n }\n void emitControlFlowBreakc(\n const DxbcShaderInstruction& ins) {\n const bool isBreak = ins.op == DxbcOpcode::Breakc;\n \n DxbcCfgBlock* cfgBlock = isBreak\n ? cfgFindBlock({ DxbcCfgBlockType::Loop, DxbcCfgBlockType::Switch })\n : cfgFindBlock({ DxbcCfgBlockType::Loop });\n \n if (cfgBlock == nullptr)\n throw DxvkError(\"DxbcCompiler: 'Breakc' or 'Continuec' outside 'Loop' or 'Switch' found\");\n \n // Perform zero test on the first component of the condition\n const DxbcRegisterValue condition = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n const DxbcRegisterValue zeroTest = emitRegisterZeroTest(\n condition, ins.controls.zeroTest());\n \n // We basically have to wrap this into an 'if' block\n const uint32_t breakBlock = m_module.allocateId();\n const uint32_t mergeBlock = m_module.allocateId();\n \n m_module.opSelectionMerge(mergeBlock,\n spv::SelectionControlMaskNone);\n \n m_module.opBranchConditional(\n zeroTest.id, breakBlock, mergeBlock);\n \n m_module.opLabel(breakBlock);\n \n if (cfgBlock->type == DxbcCfgBlockType::Loop) {\n m_module.opBranch(isBreak\n ? cfgBlock->b_loop.labelBreak\n : cfgBlock->b_loop.labelContinue);\n } else /* if (cfgBlock->type == DxbcCfgBlockType::Switch) */ {\n m_module.opBranch(cfgBlock->b_switch.labelBreak);\n }\n \n m_module.opLabel(mergeBlock);\n }\n void emitControlFlowRet(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() != 0) {\n uint32_t labelId = m_module.allocateId();\n \n m_module.opReturn();\n m_module.opLabel(labelId);\n\n // return can be used in place of break to terminate a case block\n if (m_controlFlowBlocks.back().type == DxbcCfgBlockType::Switch)\n m_controlFlowBlocks.back().b_switch.labelCase = labelId;\n\n m_topLevelIsUniform = false;\n } else {\n // Last instruction in the current function\n this->emitFunctionEnd();\n }\n }\n void emitControlFlowRetc(\n const DxbcShaderInstruction& ins) {\n // Perform zero test on the first component of the condition\n const DxbcRegisterValue condition = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n const DxbcRegisterValue zeroTest = emitRegisterZeroTest(\n condition, ins.controls.zeroTest());\n \n // We basically have to wrap this into an 'if' block\n const uint32_t returnLabel = m_module.allocateId();\n const uint32_t continueLabel = m_module.allocateId();\n \n m_module.opSelectionMerge(continueLabel,\n spv::SelectionControlMaskNone);\n \n m_module.opBranchConditional(\n zeroTest.id, returnLabel, continueLabel);\n \n m_module.opLabel(returnLabel);\n m_module.opReturn();\n\n m_module.opLabel(continueLabel);\n\n // The return condition may be non-uniform\n m_topLevelIsUniform = false;\n }\n void emitControlFlowDiscard(\n const DxbcShaderInstruction& ins) {\n // Discard actually has an operand that determines\n // whether or not the fragment should be discarded\n const DxbcRegisterValue condition = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n const DxbcRegisterValue zeroTest = emitRegisterZeroTest(\n condition, ins.controls.zeroTest());\n \n DxbcConditional cond;\n cond.labelIf = m_module.allocateId();\n cond.labelEnd = m_module.allocateId();\n \n m_module.opSelectionMerge(cond.labelEnd, spv::SelectionControlMaskNone);\n m_module.opBranchConditional(zeroTest.id, cond.labelIf, cond.labelEnd);\n \n m_module.opLabel(cond.labelIf);\n m_module.opDemoteToHelperInvocation();\n m_module.opBranch(cond.labelEnd);\n \n m_module.opLabel(cond.labelEnd);\n\n m_module.enableCapability(spv::CapabilityDemoteToHelperInvocation);\n\n // Discard is just retc in a trenchcoat\n m_topLevelIsUniform = false;\n }\n void emitControlFlowLabel(\n const DxbcShaderInstruction& ins) {\n uint32_t functionNr = ins.dst[0].idx[0].offset;\n uint32_t functionId = getFunctionId(functionNr);\n \n this->emitFunctionBegin(\n functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n \n m_module.opLabel(m_module.allocateId());\n m_module.setDebugName(functionId, str::format(\"label\", functionNr).c_str());\n \n m_insideFunction = true;\n\n // We have to assume that this function gets\n // called from non-uniform control flow\n m_topLevelIsUniform = false;\n }\n void emitControlFlowCall(\n const DxbcShaderInstruction& ins) {\n uint32_t functionNr = ins.src[0].idx[0].offset;\n uint32_t functionId = getFunctionId(functionNr);\n\n m_module.opFunctionCall(\n m_module.defVoidType(),\n functionId, 0, nullptr);\n }\n void emitControlFlowCallc(\n const DxbcShaderInstruction& ins) {\n uint32_t functionNr = ins.src[1].idx[0].offset;\n uint32_t functionId = getFunctionId(functionNr);\n\n // Perform zero test on the first component of the condition\n const DxbcRegisterValue condition = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n const DxbcRegisterValue zeroTest = emitRegisterZeroTest(\n condition, ins.controls.zeroTest());\n \n // We basically have to wrap this into an 'if' block\n const uint32_t callLabel = m_module.allocateId();\n const uint32_t skipLabel = m_module.allocateId();\n \n m_module.opSelectionMerge(skipLabel,\n spv::SelectionControlMaskNone);\n \n m_module.opBranchConditional(\n zeroTest.id, callLabel, skipLabel);\n \n m_module.opLabel(callLabel);\n m_module.opFunctionCall(\n m_module.defVoidType(),\n functionId, 0, nullptr);\n\n m_module.opBranch(skipLabel);\n m_module.opLabel(skipLabel);\n }\n void emitControlFlow(\n const DxbcShaderInstruction& ins) {\n switch (ins.op) {\n case DxbcOpcode::If:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowIf(ins);\n break;\n \n case DxbcOpcode::Else:\n this->emitControlFlowElse(ins);\n break;\n \n case DxbcOpcode::EndIf:\n this->emitControlFlowEndIf(ins);\n this->emitUavBarrier(0, 0);\n break;\n \n case DxbcOpcode::Switch:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowSwitch(ins);\n break;\n \n case DxbcOpcode::Case:\n this->emitControlFlowCase(ins);\n break;\n \n case DxbcOpcode::Default:\n this->emitControlFlowDefault(ins);\n break;\n \n case DxbcOpcode::EndSwitch:\n this->emitControlFlowEndSwitch(ins);\n this->emitUavBarrier(0, 0);\n break;\n\n case DxbcOpcode::Loop:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowLoop(ins);\n break;\n \n case DxbcOpcode::EndLoop:\n this->emitControlFlowEndLoop(ins);\n this->emitUavBarrier(0, 0);\n break;\n\n case DxbcOpcode::Break:\n case DxbcOpcode::Continue:\n this->emitControlFlowBreak(ins);\n break;\n \n case DxbcOpcode::Breakc:\n case DxbcOpcode::Continuec:\n this->emitControlFlowBreakc(ins);\n break;\n\n case DxbcOpcode::Ret:\n this->emitControlFlowRet(ins);\n break;\n\n case DxbcOpcode::Retc:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowRetc(ins);\n break;\n \n case DxbcOpcode::Discard:\n this->emitControlFlowDiscard(ins);\n break;\n \n case DxbcOpcode::Label:\n this->emitControlFlowLabel(ins);\n break;\n\n case DxbcOpcode::Call:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowCall(ins);\n this->emitUavBarrier(-1, -1);\n break;\n\n case DxbcOpcode::Callc:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowCallc(ins);\n this->emitUavBarrier(-1, -1);\n break;\n\n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n }\n }\n DxbcRegisterValue emitBuildConstVecf32(\n float x,\n float y,\n float z,\n float w,\n const DxbcRegMask& writeMask) {\n // TODO refactor these functions into one single template\n std::array ids = { 0, 0, 0, 0 };\n uint32_t componentIndex = 0;\n \n if (writeMask[0]) ids[componentIndex++] = m_module.constf32(x);\n if (writeMask[1]) ids[componentIndex++] = m_module.constf32(y);\n if (writeMask[2]) ids[componentIndex++] = m_module.constf32(z);\n if (writeMask[3]) ids[componentIndex++] = m_module.constf32(w);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Float32;\n result.type.ccount = componentIndex;\n result.id = componentIndex > 1\n ? m_module.constComposite(\n getVectorTypeId(result.type),\n componentIndex, ids.data())\n : ids[0];\n return result;\n }\n DxbcRegisterValue emitBuildConstVecu32(\n uint32_t x,\n uint32_t y,\n uint32_t z,\n uint32_t w,\n const DxbcRegMask& writeMask) {\n std::array ids = { 0, 0, 0, 0 };\n uint32_t componentIndex = 0;\n \n if (writeMask[0]) ids[componentIndex++] = m_module.constu32(x);\n if (writeMask[1]) ids[componentIndex++] = m_module.constu32(y);\n if (writeMask[2]) ids[componentIndex++] = m_module.constu32(z);\n if (writeMask[3]) ids[componentIndex++] = m_module.constu32(w);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = componentIndex;\n result.id = componentIndex > 1\n ? m_module.constComposite(\n getVectorTypeId(result.type),\n componentIndex, ids.data())\n : ids[0];\n return result;\n }\n DxbcRegisterValue emitBuildConstVeci32(\n int32_t x,\n int32_t y,\n int32_t z,\n int32_t w,\n const DxbcRegMask& writeMask) {\n std::array ids = { 0, 0, 0, 0 };\n uint32_t componentIndex = 0;\n \n if (writeMask[0]) ids[componentIndex++] = m_module.consti32(x);\n if (writeMask[1]) ids[componentIndex++] = m_module.consti32(y);\n if (writeMask[2]) ids[componentIndex++] = m_module.consti32(z);\n if (writeMask[3]) ids[componentIndex++] = m_module.consti32(w);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Sint32;\n result.type.ccount = componentIndex;\n result.id = componentIndex > 1\n ? m_module.constComposite(\n getVectorTypeId(result.type),\n componentIndex, ids.data())\n : ids[0];\n return result;\n }\n DxbcRegisterValue emitBuildConstVecf64(\n double xy,\n double zw,\n const DxbcRegMask& writeMask) {\n std::array ids = { 0, 0 };\n uint32_t componentIndex = 0;\n \n if (writeMask[0] && writeMask[1]) ids[componentIndex++] = m_module.constf64(xy);\n if (writeMask[2] && writeMask[3]) ids[componentIndex++] = m_module.constf64(zw);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Float64;\n result.type.ccount = componentIndex;\n result.id = componentIndex > 1\n ? m_module.constComposite(\n getVectorTypeId(result.type),\n componentIndex, ids.data())\n : ids[0];\n return result;\n }\n DxbcRegisterValue emitBuildVector(\n DxbcRegisterValue scalar,\n uint32_t count) {\n if (count == 1)\n return scalar;\n\n std::array scalarIds =\n { scalar.id, scalar.id, scalar.id, scalar.id };\n\n DxbcRegisterValue result;\n result.type.ctype = scalar.type.ctype;\n result.type.ccount = count;\n result.id = m_module.constComposite(\n getVectorTypeId(result.type),\n count, scalarIds.data());\n return result;\n }\n DxbcRegisterValue emitBuildZeroVector(\n DxbcVectorType type) {\n DxbcRegisterValue result;\n result.type.ctype = type.ctype;\n result.type.ccount = 1;\n\n switch (type.ctype) {\n case DxbcScalarType::Float32: result.id = m_module.constf32(0.0f); break;\n case DxbcScalarType::Uint32: result.id = m_module.constu32(0u); break;\n case DxbcScalarType::Sint32: result.id = m_module.consti32(0); break;\n default: throw DxvkError(\"DxbcCompiler: Invalid scalar type\");\n }\n\n return emitBuildVector(result, type.ccount);\n }\n DxbcRegisterValue emitRegisterBitcast(\n DxbcRegisterValue srcValue,\n DxbcScalarType dstType) {\n DxbcScalarType srcType = srcValue.type.ctype;\n\n if (srcType == dstType)\n return srcValue;\n \n DxbcRegisterValue result;\n result.type.ctype = dstType;\n result.type.ccount = srcValue.type.ccount;\n\n if (isDoubleType(srcType)) result.type.ccount *= 2;\n if (isDoubleType(dstType)) result.type.ccount /= 2;\n\n result.id = m_module.opBitcast(\n getVectorTypeId(result.type),\n srcValue.id);\n return result;\n }\n DxbcRegisterValue emitRegisterSwizzle(\n DxbcRegisterValue value,\n DxbcRegSwizzle swizzle,\n DxbcRegMask writeMask) {\n if (value.type.ccount == 1)\n return emitRegisterExtend(value, writeMask.popCount());\n \n std::array indices;\n \n uint32_t dstIndex = 0;\n \n for (uint32_t i = 0; i < 4; i++) {\n if (writeMask[i])\n indices[dstIndex++] = swizzle[i];\n }\n \n // If the swizzle combined with the mask can be reduced\n // to a no-op, we don't need to insert any instructions.\n bool isIdentitySwizzle = dstIndex == value.type.ccount;\n \n for (uint32_t i = 0; i < dstIndex && isIdentitySwizzle; i++)\n isIdentitySwizzle &= indices[i] == i;\n \n if (isIdentitySwizzle)\n return value;\n \n // Use OpCompositeExtract if the resulting vector contains\n // only one component, and OpVectorShuffle if it is a vector.\n DxbcRegisterValue result;\n result.type.ctype = value.type.ctype;\n result.type.ccount = dstIndex;\n \n const uint32_t typeId = getVectorTypeId(result.type);\n \n if (dstIndex == 1) {\n result.id = m_module.opCompositeExtract(\n typeId, value.id, 1, indices.data());\n } else {\n result.id = m_module.opVectorShuffle(\n typeId, value.id, value.id,\n dstIndex, indices.data());\n }\n \n return result;\n }\n DxbcRegisterValue emitRegisterExtract(\n DxbcRegisterValue value,\n DxbcRegMask mask) {\n return emitRegisterSwizzle(value,\n DxbcRegSwizzle(0, 1, 2, 3), mask);\n }\n DxbcRegisterValue emitRegisterInsert(\n DxbcRegisterValue dstValue,\n DxbcRegisterValue srcValue,\n DxbcRegMask srcMask) {\n DxbcRegisterValue result;\n result.type = dstValue.type;\n \n const uint32_t typeId = getVectorTypeId(result.type);\n \n if (srcMask.popCount() == 0) {\n // Nothing to do if the insertion mask is empty\n result.id = dstValue.id;\n } else if (dstValue.type.ccount == 1) {\n // Both values are scalar, so the first component\n // of the write mask decides which one to take.\n result.id = srcMask[0] ? srcValue.id : dstValue.id;\n } else if (srcValue.type.ccount == 1) {\n // The source value is scalar. Since OpVectorShuffle\n // requires both arguments to be vectors, we have to\n // use OpCompositeInsert to modify the vector instead.\n const uint32_t componentId = srcMask.firstSet();\n \n result.id = m_module.opCompositeInsert(typeId,\n srcValue.id, dstValue.id, 1, &componentId);\n } else {\n // Both arguments are vectors. We can determine which\n // components to take from which vector and use the\n // OpVectorShuffle instruction.\n std::array components;\n uint32_t srcComponentId = dstValue.type.ccount;\n \n for (uint32_t i = 0; i < dstValue.type.ccount; i++)\n components.at(i) = srcMask[i] ? srcComponentId++ : i;\n \n result.id = m_module.opVectorShuffle(\n typeId, dstValue.id, srcValue.id,\n dstValue.type.ccount, components.data());\n }\n \n return result;\n }\n DxbcRegisterValue emitRegisterConcat(\n DxbcRegisterValue value1,\n DxbcRegisterValue value2) {\n std::array ids =\n {{ value1.id, value2.id }};\n \n DxbcRegisterValue result;\n result.type.ctype = value1.type.ctype;\n result.type.ccount = value1.type.ccount + value2.type.ccount;\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n ids.size(), ids.data());\n return result;\n }\n DxbcRegisterValue emitRegisterExtend(\n DxbcRegisterValue value,\n uint32_t size) {\n if (size == 1)\n return value;\n \n std::array ids = {{\n value.id, value.id,\n value.id, value.id, \n }};\n \n DxbcRegisterValue result;\n result.type.ctype = value.type.ctype;\n result.type.ccount = size;\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n size, ids.data());\n return result;\n }\n DxbcRegisterValue emitRegisterAbsolute(\n DxbcRegisterValue value) {\n const uint32_t typeId = getVectorTypeId(value.type);\n \n switch (value.type.ctype) {\n case DxbcScalarType::Float32: value.id = m_module.opFAbs(typeId, value.id); break;\n case DxbcScalarType::Float64: value.id = m_module.opFAbs(typeId, value.id); break;\n case DxbcScalarType::Sint32: value.id = m_module.opSAbs(typeId, value.id); break;\n case DxbcScalarType::Sint64: value.id = m_module.opSAbs(typeId, value.id); break;\n default: Logger::warn(\"DxbcCompiler: Cannot get absolute value for given type\");\n }\n \n return value;\n }\n DxbcRegisterValue emitRegisterNegate(\n DxbcRegisterValue value) {\n const uint32_t typeId = getVectorTypeId(value.type);\n \n switch (value.type.ctype) {\n case DxbcScalarType::Float32: value.id = m_module.opFNegate(typeId, value.id); break;\n case DxbcScalarType::Float64: value.id = m_module.opFNegate(typeId, value.id); break;\n case DxbcScalarType::Sint32: value.id = m_module.opSNegate(typeId, value.id); break;\n case DxbcScalarType::Sint64: value.id = m_module.opSNegate(typeId, value.id); break;\n default: Logger::warn(\"DxbcCompiler: Cannot negate given type\");\n }\n \n return value;\n }\n DxbcRegisterValue emitRegisterZeroTest(\n DxbcRegisterValue value,\n DxbcZeroTest test) {\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Bool;\n result.type.ccount = 1;\n \n const uint32_t zeroId = m_module.constu32(0u);\n const uint32_t typeId = getVectorTypeId(result.type);\n \n result.id = test == DxbcZeroTest::TestZ\n ? m_module.opIEqual (typeId, value.id, zeroId)\n : m_module.opINotEqual(typeId, value.id, zeroId);\n return result;\n }\n DxbcRegisterValue emitRegisterMaskBits(\n DxbcRegisterValue value,\n uint32_t mask) {\n DxbcRegisterValue maskVector = emitBuildConstVecu32(\n mask, mask, mask, mask, DxbcRegMask::firstN(value.type.ccount));\n \n DxbcRegisterValue result;\n result.type = value.type;\n result.id = m_module.opBitwiseAnd(\n getVectorTypeId(result.type),\n value.id, maskVector.id);\n return result;\n }\n DxbcRegisterValue emitSrcOperandModifiers(\n DxbcRegisterValue value,\n DxbcRegModifiers modifiers) {\n if (modifiers.test(DxbcRegModifier::Abs))\n value = emitRegisterAbsolute(value);\n \n if (modifiers.test(DxbcRegModifier::Neg))\n value = emitRegisterNegate(value);\n return value;\n }\n DxbcRegisterValue emitDstOperandModifiers(\n DxbcRegisterValue value,\n DxbcOpModifiers modifiers) {\n const uint32_t typeId = getVectorTypeId(value.type);\n \n if (modifiers.saturate) {\n DxbcRegMask mask;\n DxbcRegisterValue vec0, vec1;\n\n if (value.type.ctype == DxbcScalarType::Float32) {\n mask = DxbcRegMask::firstN(value.type.ccount);\n vec0 = emitBuildConstVecf32(0.0f, 0.0f, 0.0f, 0.0f, mask);\n vec1 = emitBuildConstVecf32(1.0f, 1.0f, 1.0f, 1.0f, mask);\n } else if (value.type.ctype == DxbcScalarType::Float64) {\n mask = DxbcRegMask::firstN(value.type.ccount * 2);\n vec0 = emitBuildConstVecf64(0.0, 0.0, mask);\n vec1 = emitBuildConstVecf64(1.0, 1.0, mask);\n }\n\n if (mask)\n value.id = m_module.opNClamp(typeId, value.id, vec0.id, vec1.id);\n }\n \n return value;\n }\n uint32_t emitExtractSparseTexel(\n uint32_t texelTypeId,\n uint32_t resultId) {\n uint32_t index = 1;\n\n return m_module.opCompositeExtract(\n texelTypeId, resultId, 1, &index);\n }\n void emitStoreSparseFeedback(\n const DxbcRegister& feedbackRegister,\n uint32_t resultId) {\n if (feedbackRegister.type != DxbcOperandType::Null) {\n uint32_t index = 0;\n\n DxbcRegisterValue result;\n result.type = { DxbcScalarType::Uint32, 1 };\n result.id = m_module.opCompositeExtract(\n getScalarTypeId(DxbcScalarType::Uint32),\n resultId, 1, &index);\n\n emitRegisterStore(feedbackRegister, result);\n }\n }\n DxbcRegisterPointer emitArrayAccess(\n DxbcRegisterPointer pointer,\n spv::StorageClass sclass,\n uint32_t index) {\n uint32_t ptrTypeId = m_module.defPointerType(\n getVectorTypeId(pointer.type), sclass);\n \n DxbcRegisterPointer result;\n result.type = pointer.type;\n result.id = m_module.opAccessChain(\n ptrTypeId, pointer.id, 1, &index);\n return result;\n }\n uint32_t emitLoadSampledImage(\n const DxbcShaderResource& textureResource,\n const DxbcSampler& samplerResource,\n bool isDepthCompare) {\n uint32_t baseId = isDepthCompare\n ? textureResource.depthTypeId\n : textureResource.colorTypeId;\n\n if (!baseId)\n return 0;\n\n uint32_t sampledImageType = m_module.defSampledImageType(baseId);\n\n return m_module.opSampledImage(sampledImageType,\n m_module.opLoad(textureResource.imageTypeId, textureResource.varId),\n m_module.opLoad(samplerResource.typeId, samplerResource.varId));\n }\n DxbcRegisterPointer emitGetTempPtr(\n const DxbcRegister& operand) {\n // r# regs are indexed as follows:\n // (0) register index (immediate)\n uint32_t regIdx = operand.idx[0].offset;\n\n if (regIdx >= m_rRegs.size())\n m_rRegs.resize(regIdx + 1, 0u);\n\n if (!m_rRegs.at(regIdx)) {\n DxbcRegisterInfo info;\n info.type.ctype = DxbcScalarType::Float32;\n info.type.ccount = 4;\n info.type.alength = 0;\n info.sclass = spv::StorageClassPrivate;\n\n uint32_t varId = emitNewVariable(info);\n m_rRegs.at(regIdx) = varId;\n\n m_module.setDebugName(varId,\n str::format(\"r\", regIdx).c_str());\n }\n\n DxbcRegisterPointer result;\n result.type.ctype = DxbcScalarType::Float32;\n result.type.ccount = 4;\n result.id = m_rRegs.at(regIdx);\n return result;\n }\n DxbcRegisterPointer emitGetIndexableTempPtr(\n const DxbcRegister& operand) {\n return getIndexableTempPtr(operand, emitIndexLoad(operand.idx[1]));\n }\n DxbcRegisterPointer emitGetInputPtr(\n const DxbcRegister& operand) {\n // In the vertex and pixel stages,\n // v# regs are indexed as follows:\n // (0) register index (relative)\n // \n // In the tessellation and geometry\n // stages, the index has two dimensions:\n // (0) vertex index (relative)\n // (1) register index (relative)\n DxbcRegisterPointer result;\n result.type.ctype = DxbcScalarType::Float32;\n result.type.ccount = 4;\n \n std::array indices = {{ 0, 0 }};\n \n for (uint32_t i = 0; i < operand.idxDim; i++)\n indices.at(i) = emitIndexLoad(operand.idx[i]).id;\n \n // Pick the input array depending on\n // the program type and operand type\n struct InputArray {\n uint32_t id;\n spv::StorageClass sclass;\n };\n \n const InputArray array = [&] () -> InputArray {\n switch (operand.type) {\n case DxbcOperandType::InputControlPoint:\n return m_programInfo.type() == DxbcProgramType::HullShader\n ? InputArray { m_vArray, spv::StorageClassPrivate }\n : InputArray { m_ds.inputPerVertex, spv::StorageClassInput };\n case DxbcOperandType::InputPatchConstant:\n return m_programInfo.type() == DxbcProgramType::HullShader\n ? InputArray { m_hs.outputPerPatch, spv::StorageClassPrivate }\n : InputArray { m_ds.inputPerPatch, spv::StorageClassInput };\n case DxbcOperandType::OutputControlPoint:\n return InputArray { m_hs.outputPerVertex, spv::StorageClassOutput };\n default:\n return { m_vArray, spv::StorageClassPrivate };\n }\n }();\n \n DxbcRegisterInfo info;\n info.type.ctype = result.type.ctype;\n info.type.ccount = result.type.ccount;\n info.type.alength = 0;\n info.sclass = array.sclass;\n \n result.id = m_module.opAccessChain(\n getPointerTypeId(info), array.id,\n operand.idxDim, indices.data());\n \n return result;\n }\n DxbcRegisterPointer emitGetOutputPtr(\n const DxbcRegister& operand) {\n if (m_programInfo.type() == DxbcProgramType::HullShader) {\n // Hull shaders are special in that they have two sets of\n // output registers, one for per-patch values and one for\n // per-vertex values.\n DxbcRegisterPointer result;\n result.type.ctype = DxbcScalarType::Float32;\n result.type.ccount = 4;\n \n uint32_t registerId = emitIndexLoad(operand.idx[0]).id;\n\n if (m_hs.currPhaseType == DxbcCompilerHsPhase::ControlPoint) {\n std::array indices = {{\n m_module.opLoad(m_module.defIntType(32, 0), m_hs.builtinInvocationId),\n registerId,\n }};\n \n uint32_t ptrTypeId = m_module.defPointerType(\n getVectorTypeId(result.type),\n spv::StorageClassOutput);\n \n result.id = m_module.opAccessChain(\n ptrTypeId, m_hs.outputPerVertex,\n indices.size(), indices.data());\n } else {\n uint32_t ptrTypeId = m_module.defPointerType(\n getVectorTypeId(result.type),\n spv::StorageClassPrivate);\n \n result.id = m_module.opAccessChain(\n ptrTypeId, m_hs.outputPerPatch,\n 1, ®isterId);\n }\n\n return result;\n } else {\n // Regular shaders have their output\n // registers set up at declaration time\n return m_oRegs.at(operand.idx[0].offset);\n }\n }\n DxbcRegisterPointer emitGetConstBufPtr(\n const DxbcRegister& operand);\n DxbcRegisterPointer emitGetImmConstBufPtr(\n const DxbcRegister& operand) {\n DxbcRegisterValue constId = emitIndexLoad(operand.idx[0]);\n\n if (m_icbArray) {\n // We pad the icb array with an extra zero vector, so we can\n // clamp the index and get correct robustness behaviour.\n constId.id = m_module.opUMin(getVectorTypeId(constId.type),\n constId.id, m_module.constu32(m_icbSize));\n\n DxbcRegisterInfo ptrInfo;\n ptrInfo.type.ctype = DxbcScalarType::Uint32;\n ptrInfo.type.ccount = m_icbComponents;\n ptrInfo.type.alength = 0;\n ptrInfo.sclass = spv::StorageClassPrivate;\n\n DxbcRegisterPointer result;\n result.type.ctype = ptrInfo.type.ctype;\n result.type.ccount = ptrInfo.type.ccount;\n result.id = m_module.opAccessChain(\n getPointerTypeId(ptrInfo),\n m_icbArray, 1, &constId.id);\n return result;\n } else if (m_constantBuffers.at(Icb_BindingSlotId).varId != 0) {\n const std::array indices =\n {{ m_module.consti32(0), constId.id }};\n \n DxbcRegisterInfo ptrInfo;\n ptrInfo.type.ctype = DxbcScalarType::Float32;\n ptrInfo.type.ccount = m_icbComponents;\n ptrInfo.type.alength = 0;\n ptrInfo.sclass = spv::StorageClassUniform;\n\n DxbcRegisterPointer result;\n result.type.ctype = ptrInfo.type.ctype;\n result.type.ccount = ptrInfo.type.ccount;\n result.id = m_module.opAccessChain(\n getPointerTypeId(ptrInfo),\n m_constantBuffers.at(Icb_BindingSlotId).varId,\n indices.size(), indices.data());\n return result;\n } else {\n throw DxvkError(\"DxbcCompiler: Immediate constant buffer not defined\");\n }\n }\n DxbcRegisterPointer emitGetOperandPtr(\n const DxbcRegister& operand) {\n switch (operand.type) {\n case DxbcOperandType::Temp:\n return emitGetTempPtr(operand);\n \n case DxbcOperandType::IndexableTemp:\n return emitGetIndexableTempPtr(operand);\n \n case DxbcOperandType::Input:\n case DxbcOperandType::InputControlPoint:\n case DxbcOperandType::InputPatchConstant:\n case DxbcOperandType::OutputControlPoint:\n return emitGetInputPtr(operand);\n \n case DxbcOperandType::Output:\n return emitGetOutputPtr(operand);\n \n case DxbcOperandType::ImmediateConstantBuffer:\n return emitGetImmConstBufPtr(operand);\n\n case DxbcOperandType::InputThreadId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 3 },\n m_cs.builtinGlobalInvocationId };\n \n case DxbcOperandType::InputThreadGroupId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 3 },\n m_cs.builtinWorkgroupId };\n \n case DxbcOperandType::InputThreadIdInGroup:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 3 },\n m_cs.builtinLocalInvocationId };\n \n case DxbcOperandType::InputThreadIndexInGroup:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n m_cs.builtinLocalInvocationIndex };\n \n case DxbcOperandType::InputCoverageMask: {\n const std::array indices\n = {{ m_module.constu32(0) }};\n \n DxbcRegisterPointer result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(result.type),\n spv::StorageClassInput),\n m_ps.builtinSampleMaskIn,\n indices.size(), indices.data());\n return result;\n }\n \n case DxbcOperandType::OutputCoverageMask: {\n const std::array indices\n = {{ m_module.constu32(0) }};\n \n DxbcRegisterPointer result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(result.type),\n spv::StorageClassOutput),\n m_ps.builtinSampleMaskOut,\n indices.size(), indices.data());\n return result;\n }\n \n case DxbcOperandType::OutputDepth:\n case DxbcOperandType::OutputDepthGe:\n case DxbcOperandType::OutputDepthLe:\n return DxbcRegisterPointer {\n { DxbcScalarType::Float32, 1 },\n m_ps.builtinDepth };\n \n case DxbcOperandType::OutputStencilRef:\n return DxbcRegisterPointer {\n { DxbcScalarType::Sint32, 1 },\n m_ps.builtinStencilRef };\n\n case DxbcOperandType::InputPrimitiveId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n m_primitiveIdIn };\n \n case DxbcOperandType::InputDomainPoint:\n return DxbcRegisterPointer {\n { DxbcScalarType::Float32, 3 },\n m_ds.builtinTessCoord };\n \n case DxbcOperandType::OutputControlPointId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n m_hs.builtinInvocationId };\n \n case DxbcOperandType::InputForkInstanceId:\n case DxbcOperandType::InputJoinInstanceId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n getCurrentHsForkJoinPhase()->instanceIdPtr };\n \n case DxbcOperandType::InputGsInstanceId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n m_gs.builtinInvocationId };\n \n case DxbcOperandType::InputInnerCoverage:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n m_ps.builtinInnerCoverageId };\n \n default:\n throw DxvkError(str::format(\n \"DxbcCompiler: Unhandled operand type: \",\n operand.type));\n }\n }\n DxbcRegisterPointer emitGetAtomicPointer(\n const DxbcRegister& operand,\n const DxbcRegister& address) {\n // Query information about the resource itself\n const uint32_t registerId = operand.idx[0].offset;\n const DxbcBufferInfo resourceInfo = getBufferInfo(operand);\n \n // For UAVs and shared memory, different methods\n // of obtaining the final pointer are used.\n bool isTgsm = operand.type == DxbcOperandType::ThreadGroupSharedMemory;\n bool isSsbo = resourceInfo.isSsbo;\n \n // Compute the actual address into the resource\n const DxbcRegisterValue addressValue = [&] {\n switch (resourceInfo.type) {\n case DxbcResourceType::Raw:\n return emitCalcBufferIndexRaw(emitRegisterLoad(\n address, DxbcRegMask(true, false, false, false)));\n \n case DxbcResourceType::Structured: {\n const DxbcRegisterValue addressComponents = emitRegisterLoad(\n address, DxbcRegMask(true, true, false, false));\n \n return emitCalcBufferIndexStructured(\n emitRegisterExtract(addressComponents, DxbcRegMask(true, false, false, false)),\n emitRegisterExtract(addressComponents, DxbcRegMask(false, true, false, false)),\n resourceInfo.stride);\n };\n \n case DxbcResourceType::Typed: {\n if (isTgsm)\n throw DxvkError(\"DxbcCompiler: TGSM cannot be typed\");\n \n return emitLoadTexCoord(address,\n m_uavs.at(registerId).imageInfo);\n }\n \n default:\n throw DxvkError(\"DxbcCompiler: Unhandled resource type\");\n }\n }();\n \n // Compute the actual pointer\n DxbcRegisterPointer result;\n result.type.ctype = resourceInfo.stype;\n result.type.ccount = 1;\n\n if (isTgsm) {\n result.id = m_module.opAccessChain(resourceInfo.typeId,\n resourceInfo.varId, 1, &addressValue.id);\n } else if (isSsbo) {\n uint32_t indices[2] = { m_module.constu32(0), addressValue.id };\n result.id = m_module.opAccessChain(resourceInfo.typeId,\n resourceInfo.varId, 2, indices);\n } else {\n result.id = m_module.opImageTexelPointer(\n m_module.defPointerType(getVectorTypeId(result.type), spv::StorageClassImage),\n resourceInfo.varId, addressValue.id, m_module.constu32(0));\n }\n\n return result;\n }\n DxbcRegisterValue emitQueryBufferSize(\n const DxbcRegister& resource) {\n const DxbcBufferInfo bufferInfo = getBufferInfo(resource);\n\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opArrayLength(\n getVectorTypeId(result.type),\n bufferInfo.varId, 0);\n\n return result;\n }\n DxbcRegisterValue emitQueryTexelBufferSize(\n const DxbcRegister& resource) {\n // Load the texel buffer object. This cannot be used with\n // constant buffers or any other type of resource.\n const DxbcBufferInfo bufferInfo = getBufferInfo(resource);\n \n const uint32_t bufferId = m_module.opLoad(\n bufferInfo.typeId, bufferInfo.varId);\n \n // We'll store this as a scalar unsigned integer\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opImageQuerySize(\n getVectorTypeId(result.type), bufferId);\n\n return result;\n }\n DxbcRegisterValue emitQueryTextureLods(\n const DxbcRegister& resource) {\n const DxbcBufferInfo info = getBufferInfo(resource);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n \n if (info.image.ms == 0 && info.image.sampled == 1) {\n result.id = m_module.opImageQueryLevels(\n getVectorTypeId(result.type),\n m_module.opLoad(info.typeId, info.varId));\n } else {\n // Report one LOD in case of UAVs or multisampled images\n result.id = m_module.constu32(1);\n }\n\n return result;\n }\n DxbcRegisterValue emitQueryTextureSamples(\n const DxbcRegister& resource) {\n if (resource.type == DxbcOperandType::Rasterizer) {\n // SPIR-V has no gl_NumSamples equivalent, so we\n // have to work around it using a push constant\n if (!m_ps.pushConstantId)\n m_ps.pushConstantId = emitPushConstants();\n\n uint32_t uintTypeId = m_module.defIntType(32, 0);\n uint32_t ptrTypeId = m_module.defPointerType(uintTypeId, spv::StorageClassPushConstant);\n uint32_t index = m_module.constu32(0);\n\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opLoad(uintTypeId,\n m_module.opAccessChain(ptrTypeId, m_ps.pushConstantId, 1, &index));\n return result;\n } else {\n DxbcBufferInfo info = getBufferInfo(resource);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n\n if (info.image.ms) {\n result.id = m_module.opImageQuerySamples(\n getVectorTypeId(result.type),\n m_module.opLoad(info.typeId, info.varId));\n } else {\n // OpImageQuerySamples requires MSAA images\n result.id = m_module.constu32(1);\n }\n \n return result;\n }\n }\n DxbcRegisterValue emitQueryTextureSize(\n const DxbcRegister& resource,\n DxbcRegisterValue lod) {\n const DxbcBufferInfo info = getBufferInfo(resource);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = getTexSizeDim(info.image);\n \n if (info.image.ms == 0 && info.image.sampled == 1) {\n result.id = m_module.opImageQuerySizeLod(\n getVectorTypeId(result.type),\n m_module.opLoad(info.typeId, info.varId),\n lod.id);\n } else {\n result.id = m_module.opImageQuerySize(\n getVectorTypeId(result.type),\n m_module.opLoad(info.typeId, info.varId));\n }\n\n return result;\n }\n DxbcRegisterValue emitCalcBufferIndexStructured(\n DxbcRegisterValue structId,\n DxbcRegisterValue structOffset,\n uint32_t structStride) {\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Sint32;\n result.type.ccount = 1;\n \n uint32_t typeId = getVectorTypeId(result.type);\n uint32_t offset = m_module.opShiftRightLogical(typeId, structOffset.id, m_module.consti32(2));\n \n result.id = m_module.opIAdd(typeId,\n m_module.opIMul(typeId, structId.id, m_module.consti32(structStride / 4)),\n offset);\n return result;\n }\n DxbcRegisterValue emitCalcBufferIndexRaw(\n DxbcRegisterValue byteOffset) {\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Sint32;\n result.type.ccount = 1;\n \n uint32_t typeId = getVectorTypeId(result.type);\n result.id = m_module.opShiftRightLogical(typeId, byteOffset.id, m_module.consti32(2));\n return result;\n }\n DxbcRegisterValue emitCalcTexCoord(\n DxbcRegisterValue coordVector,\n const DxbcImageInfo& imageInfo) {\n const uint32_t dim = getTexCoordDim(imageInfo);\n \n if (dim != coordVector.type.ccount) {\n coordVector = emitRegisterExtract(\n coordVector, DxbcRegMask::firstN(dim)); \n }\n \n return coordVector;\n }\n DxbcRegisterValue emitLoadTexCoord(\n const DxbcRegister& coordReg,\n const DxbcImageInfo& imageInfo) {\n return emitCalcTexCoord(emitRegisterLoad(coordReg,\n DxbcRegMask(true, true, true, true)), imageInfo);\n }\n DxbcRegisterValue emitIndexLoad(\n DxbcRegIndex index) {\n if (index.relReg != nullptr) {\n DxbcRegisterValue result = emitRegisterLoad(\n *index.relReg, DxbcRegMask(true, false, false, false));\n \n if (index.offset != 0) {\n result.id = m_module.opIAdd(\n getVectorTypeId(result.type), result.id,\n m_module.consti32(index.offset));\n }\n \n return result;\n } else {\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Sint32;\n result.type.ccount = 1;\n result.id = m_module.consti32(index.offset);\n return result;\n }\n }\n DxbcRegisterValue emitValueLoad(\n DxbcRegisterPointer ptr) {\n DxbcRegisterValue result;\n result.type = ptr.type;\n result.id = m_module.opLoad(\n getVectorTypeId(result.type),\n ptr.id);\n return result;\n }\n void emitValueStore(\n DxbcRegisterPointer ptr,\n DxbcRegisterValue value,\n DxbcRegMask writeMask) {\n // If the component types are not compatible,\n // we need to bit-cast the source variable.\n if (value.type.ctype != ptr.type.ctype)\n value = emitRegisterBitcast(value, ptr.type.ctype);\n \n // If the source value consists of only one component,\n // it is stored in all components of the destination.\n if (value.type.ccount == 1)\n value = emitRegisterExtend(value, writeMask.popCount());\n \n if (ptr.type.ccount == writeMask.popCount()) {\n // Simple case: We write to the entire register\n m_module.opStore(ptr.id, value.id);\n } else {\n // We only write to part of the destination\n // register, so we need to load and modify it\n DxbcRegisterValue tmp = emitValueLoad(ptr);\n tmp = emitRegisterInsert(tmp, value, writeMask);\n \n m_module.opStore(ptr.id, tmp.id);\n }\n }\n DxbcRegisterValue emitRegisterLoadRaw(\n const DxbcRegister& reg) {\n // Try to find index range for the given register\n const DxbcIndexRange* indexRange = nullptr;\n\n if (reg.idxDim && reg.idx[reg.idxDim - 1u].relReg) {\n uint32_t offset = reg.idx[reg.idxDim - 1u].offset;\n\n for (const auto& range : m_indexRanges) {\n if (reg.type == range.type && offset >= range.start && offset < range.start + range.length)\n indexRange = ⦥\n }\n }\n\n if (reg.type == DxbcOperandType::IndexableTemp || indexRange) {\n bool doBoundsCheck = reg.idx[reg.idxDim - 1u].relReg != nullptr;\n\n if (doBoundsCheck) {\n DxbcRegisterValue indexId = emitIndexLoad(reg.idx[reg.idxDim - 1u]);\n uint32_t boundsCheck = 0u;\n\n if (reg.type == DxbcOperandType::IndexableTemp) {\n boundsCheck = m_module.opULessThan(\n m_module.defBoolType(), indexId.id,\n m_module.constu32(m_xRegs.at(reg.idx[0].offset).alength));\n } else {\n uint32_t adjustedId = m_module.opISub(getVectorTypeId(indexId.type),\n indexId.id, m_module.consti32(indexRange->start));\n\n boundsCheck = m_module.opULessThan(\n m_module.defBoolType(), adjustedId,\n m_module.constu32(indexRange->length));\n }\n\n // Kind of ugly to have an empty else block here but there's no\n // way for us to know the current block ID for the phi below\n DxbcConditional cond;\n cond.labelIf = m_module.allocateId();\n cond.labelElse = m_module.allocateId();\n cond.labelEnd = m_module.allocateId();\n\n m_module.opSelectionMerge(cond.labelEnd, spv::SelectionControlMaskNone);\n m_module.opBranchConditional(boundsCheck, cond.labelIf, cond.labelElse);\n\n m_module.opLabel(cond.labelIf);\n\n DxbcRegisterValue returnValue = emitValueLoad(emitGetOperandPtr(reg));\n\n m_module.opBranch(cond.labelEnd);\n m_module.opLabel (cond.labelElse);\n\n DxbcRegisterValue zeroValue = emitBuildZeroVector(returnValue.type);\n\n m_module.opBranch(cond.labelEnd);\n m_module.opLabel (cond.labelEnd);\n\n std::array phiLabels = {{\n { returnValue.id, cond.labelIf },\n { zeroValue.id, cond.labelElse },\n }};\n\n returnValue.id = m_module.opPhi(\n getVectorTypeId(returnValue.type),\n phiLabels.size(), phiLabels.data());\n return returnValue;\n }\n }\n\n DxbcRegisterValue value = emitValueLoad(emitGetOperandPtr(reg));\n\n // Pad icb values to a vec4 since the app may access components that are always 0\n if (reg.type == DxbcOperandType::ImmediateConstantBuffer && value.type.ccount < 4u) {\n DxbcVectorType zeroType;\n zeroType.ctype = value.type.ctype;\n zeroType.ccount = 4u - value.type.ccount;\n\n uint32_t zeroVector = emitBuildZeroVector(zeroType).id;\n\n std::array constituents = { value.id, zeroVector };\n\n value.type.ccount = 4u;\n value.id = m_module.opCompositeConstruct(getVectorTypeId(value.type),\n constituents.size(), constituents.data());\n }\n\n return value;\n }\n DxbcRegisterValue emitConstantBufferLoad(\n const DxbcRegister& reg,\n DxbcRegMask writeMask) {\n // Constant buffers take a two-dimensional index:\n // (0) register index (immediate)\n // (1) constant offset (relative)\n DxbcRegisterInfo info;\n info.type.ctype = DxbcScalarType::Float32;\n info.type.ccount = 4;\n info.type.alength = 0;\n info.sclass = spv::StorageClassUniform;\n \n uint32_t regId = reg.idx[0].offset;\n DxbcRegisterValue constId = emitIndexLoad(reg.idx[1]);\n \n uint32_t ptrTypeId = getPointerTypeId(info);\n \n const std::array indices =\n {{ m_module.consti32(0), constId.id }};\n \n DxbcRegisterPointer ptr;\n ptr.type.ctype = info.type.ctype;\n ptr.type.ccount = info.type.ccount;\n ptr.id = m_module.opAccessChain(ptrTypeId,\n m_constantBuffers.at(regId).varId,\n indices.size(), indices.data());\n\n // Load individual components from buffer\n std::array ccomps = { 0, 0, 0, 0 };\n std::array scomps = { 0, 0, 0, 0 };\n uint32_t scount = 0;\n\n for (uint32_t i = 0; i < 4; i++) {\n uint32_t sindex = reg.swizzle[i];\n\n if (!writeMask[i] || ccomps[sindex])\n continue;\n \n uint32_t componentId = m_module.constu32(sindex);\n uint32_t componentPtr = m_module.opAccessChain(\n m_module.defPointerType(\n getScalarTypeId(DxbcScalarType::Float32),\n spv::StorageClassUniform),\n ptr.id, 1, &componentId);\n \n ccomps[sindex] = m_module.opLoad(\n getScalarTypeId(DxbcScalarType::Float32),\n componentPtr);\n }\n\n for (uint32_t i = 0; i < 4; i++) {\n uint32_t sindex = reg.swizzle[i];\n \n if (writeMask[i])\n scomps[scount++] = ccomps[sindex];\n }\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Float32;\n result.type.ccount = scount;\n result.id = scomps[0];\n \n if (scount > 1) {\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n scount, scomps.data());\n }\n\n // Apply any post-processing that might be necessary\n result = emitRegisterBitcast(result, reg.dataType);\n result = emitSrcOperandModifiers(result, reg.modifiers);\n return result;\n }\n DxbcRegisterValue emitRegisterLoad(\n const DxbcRegister& reg,\n DxbcRegMask writeMask) {\n if (reg.type == DxbcOperandType::Imm32\n || reg.type == DxbcOperandType::Imm64) {\n DxbcRegisterValue result;\n \n if (reg.componentCount == DxbcComponentCount::Component1) {\n // Create one single u32 constant\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.constu32(reg.imm.u32_1);\n\n result = emitRegisterExtend(result, writeMask.popCount());\n } else if (reg.componentCount == DxbcComponentCount::Component4) {\n // Create a u32 vector with as many components as needed\n std::array indices = { };\n uint32_t indexId = 0;\n \n for (uint32_t i = 0; i < indices.size(); i++) {\n if (writeMask[i]) {\n indices.at(indexId++) =\n m_module.constu32(reg.imm.u32_4[i]);\n }\n }\n \n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = writeMask.popCount();\n result.id = indices.at(0);\n \n if (indexId > 1) {\n result.id = m_module.constComposite(\n getVectorTypeId(result.type),\n result.type.ccount, indices.data());\n }\n \n } else {\n // Something went horribly wrong in the decoder or the shader is broken\n throw DxvkError(\"DxbcCompiler: Invalid component count for immediate operand\");\n }\n \n // Cast constants to the requested type\n return emitRegisterBitcast(result, reg.dataType);\n } else if (reg.type == DxbcOperandType::ConstantBuffer) {\n return emitConstantBufferLoad(reg, writeMask);\n } else {\n // Load operand from the operand pointer\n DxbcRegisterValue result = emitRegisterLoadRaw(reg);\n \n // Apply operand swizzle to the operand value\n result = emitRegisterSwizzle(result, reg.swizzle, writeMask);\n \n // Cast it to the requested type. We need to do\n // this after the swizzling for 64-bit types.\n result = emitRegisterBitcast(result, reg.dataType);\n \n // Apply operand modifiers\n result = emitSrcOperandModifiers(result, reg.modifiers);\n return result;\n }\n }\n void emitRegisterStore(\n const DxbcRegister& reg,\n DxbcRegisterValue value) {\n if (reg.type == DxbcOperandType::IndexableTemp) {\n bool doBoundsCheck = reg.idx[1].relReg != nullptr;\n DxbcRegisterValue vectorId = emitIndexLoad(reg.idx[1]);\n\n if (doBoundsCheck) {\n uint32_t boundsCheck = m_module.opULessThan(\n m_module.defBoolType(), vectorId.id,\n m_module.constu32(m_xRegs.at(reg.idx[0].offset).alength));\n \n DxbcConditional cond;\n cond.labelIf = m_module.allocateId();\n cond.labelEnd = m_module.allocateId();\n \n m_module.opSelectionMerge(cond.labelEnd, spv::SelectionControlMaskNone);\n m_module.opBranchConditional(boundsCheck, cond.labelIf, cond.labelEnd);\n \n m_module.opLabel(cond.labelIf);\n\n emitValueStore(getIndexableTempPtr(reg, vectorId), value, reg.mask);\n\n m_module.opBranch(cond.labelEnd);\n m_module.opLabel (cond.labelEnd);\n } else {\n emitValueStore(getIndexableTempPtr(reg, vectorId), value, reg.mask);\n }\n } else {\n emitValueStore(emitGetOperandPtr(reg), value, reg.mask);\n }\n }\n void emitInputSetup() {\n m_module.setLateConst(m_vArrayLengthId, &m_vArrayLength);\n\n // Copy all defined v# registers into the input array. Note\n // that the outer index of the array is the vertex index.\n const uint32_t vecTypeId = m_module.defVectorType(m_module.defFloatType(32), 4);\n const uint32_t dstPtrTypeId = m_module.defPointerType(vecTypeId, spv::StorageClassPrivate);\n \n for (uint32_t i = 0; i < m_vRegs.size(); i++) {\n if (m_vRegs.at(i).id != 0) {\n const uint32_t registerId = m_module.consti32(i);\n \n for (uint32_t v = 0; v < vertexCount; v++) {\n std::array indices\n = {{ m_module.consti32(v), registerId }};\n \n DxbcRegisterPointer srcPtr;\n srcPtr.type = m_vRegs.at(i).type;\n srcPtr.id = m_module.opAccessChain(\n m_module.defPointerType(getVectorTypeId(srcPtr.type), spv::StorageClassInput),\n m_vRegs.at(i).id, 1, indices.data());\n \n DxbcRegisterValue srcValue = emitRegisterBitcast(\n emitValueLoad(srcPtr), DxbcScalarType::Float32);\n \n DxbcRegisterPointer dstPtr;\n dstPtr.type = { DxbcScalarType::Float32, 4 };\n dstPtr.id = m_module.opAccessChain(\n dstPtrTypeId, m_vArray, 2, indices.data());\n\n emitValueStore(dstPtr, srcValue, DxbcRegMask::firstN(srcValue.type.ccount));\n }\n }\n }\n \n // Copy all system value registers into the array,\n // preserving any previously written contents.\n for (const DxbcSvMapping& map : m_vMappings) {\n const uint32_t registerId = m_module.consti32(map.regId);\n \n for (uint32_t v = 0; v < vertexCount; v++) {\n const DxbcRegisterValue value = [&] {\n switch (m_programInfo.type()) {\n case DxbcProgramType::GeometryShader: return emitGsSystemValueLoad(map.sv, map.regMask, v);\n default: throw DxvkError(str::format(\"DxbcCompiler: Unexpected stage: \", m_programInfo.type()));\n }\n }();\n \n std::array indices = {\n m_module.consti32(v), registerId,\n };\n \n DxbcRegisterPointer inputReg;\n inputReg.type.ctype = DxbcScalarType::Float32;\n inputReg.type.ccount = 4;\n inputReg.id = m_module.opAccessChain(dstPtrTypeId,\n m_vArray, indices.size(), indices.data());\n emitValueStore(inputReg, value, map.regMask);\n }\n }\n }\n void emitInputSetup(uint32_t vertexCount) {\n m_module.setLateConst(m_vArrayLengthId, &m_vArrayLength);\n\n // Copy all defined v# registers into the input array. Note\n // that the outer index of the array is the vertex index.\n const uint32_t vecTypeId = m_module.defVectorType(m_module.defFloatType(32), 4);\n const uint32_t dstPtrTypeId = m_module.defPointerType(vecTypeId, spv::StorageClassPrivate);\n \n for (uint32_t i = 0; i < m_vRegs.size(); i++) {\n if (m_vRegs.at(i).id != 0) {\n const uint32_t registerId = m_module.consti32(i);\n \n for (uint32_t v = 0; v < vertexCount; v++) {\n std::array indices\n = {{ m_module.consti32(v), registerId }};\n \n DxbcRegisterPointer srcPtr;\n srcPtr.type = m_vRegs.at(i).type;\n srcPtr.id = m_module.opAccessChain(\n m_module.defPointerType(getVectorTypeId(srcPtr.type), spv::StorageClassInput),\n m_vRegs.at(i).id, 1, indices.data());\n \n DxbcRegisterValue srcValue = emitRegisterBitcast(\n emitValueLoad(srcPtr), DxbcScalarType::Float32);\n \n DxbcRegisterPointer dstPtr;\n dstPtr.type = { DxbcScalarType::Float32, 4 };\n dstPtr.id = m_module.opAccessChain(\n dstPtrTypeId, m_vArray, 2, indices.data());\n\n emitValueStore(dstPtr, srcValue, DxbcRegMask::firstN(srcValue.type.ccount));\n }\n }\n }\n \n // Copy all system value registers into the array,\n // preserving any previously written contents.\n for (const DxbcSvMapping& map : m_vMappings) {\n const uint32_t registerId = m_module.consti32(map.regId);\n \n for (uint32_t v = 0; v < vertexCount; v++) {\n const DxbcRegisterValue value = [&] {\n switch (m_programInfo.type()) {\n case DxbcProgramType::GeometryShader: return emitGsSystemValueLoad(map.sv, map.regMask, v);\n default: throw DxvkError(str::format(\"DxbcCompiler: Unexpected stage: \", m_programInfo.type()));\n }\n }();\n \n std::array indices = {\n m_module.consti32(v), registerId,\n };\n \n DxbcRegisterPointer inputReg;\n inputReg.type.ctype = DxbcScalarType::Float32;\n inputReg.type.ccount = 4;\n inputReg.id = m_module.opAccessChain(dstPtrTypeId,\n m_vArray, indices.size(), indices.data());\n emitValueStore(inputReg, value, map.regMask);\n }\n }\n }\n void emitOutputSetup() {\n for (const DxbcSvMapping& svMapping : m_oMappings) {\n DxbcRegisterPointer outputReg = m_oRegs.at(svMapping.regId);\n \n if (m_programInfo.type() == DxbcProgramType::HullShader) {\n uint32_t registerIndex = m_module.constu32(svMapping.regId);\n \n outputReg.type = { DxbcScalarType::Float32, 4 };\n outputReg.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(outputReg.type),\n spv::StorageClassPrivate),\n m_hs.outputPerPatch,\n 1, ®isterIndex);\n }\n \n auto sv = svMapping.sv;\n auto mask = svMapping.regMask;\n auto value = emitValueLoad(outputReg);\n \n switch (m_programInfo.type()) {\n case DxbcProgramType::VertexShader: emitVsSystemValueStore(sv, mask, value); break;\n case DxbcProgramType::GeometryShader: emitGsSystemValueStore(sv, mask, value); break;\n case DxbcProgramType::HullShader: emitHsSystemValueStore(sv, mask, value); break;\n case DxbcProgramType::DomainShader: emitDsSystemValueStore(sv, mask, value); break;\n case DxbcProgramType::PixelShader: emitPsSystemValueStore(sv, mask, value); break;\n default: break;\n }\n }\n }\n void emitOutputDepthClamp() {\n // HACK: Some drivers do not clamp FragDepth to [minDepth..maxDepth]\n // before writing to the depth attachment, but we do not have acccess\n // to those. Clamp to [0..1] instead.\n if (m_ps.builtinDepth) {\n DxbcRegisterPointer ptr;\n ptr.type = { DxbcScalarType::Float32, 1 };\n ptr.id = m_ps.builtinDepth;\n\n DxbcRegisterValue value = emitValueLoad(ptr);\n\n value.id = m_module.opNClamp(\n getVectorTypeId(ptr.type),\n value.id,\n m_module.constf32(0.0f),\n m_module.constf32(1.0f));\n \n emitValueStore(ptr, value,\n DxbcRegMask::firstN(1));\n }\n }\n void emitInitWorkgroupMemory() {\n bool hasTgsm = false;\n\n SpirvMemoryOperands memoryOperands;\n memoryOperands.flags = spv::MemoryAccessNonPrivatePointerMask;\n\n for (uint32_t i = 0; i < m_gRegs.size(); i++) {\n if (!m_gRegs[i].varId)\n continue;\n \n if (!m_cs.builtinLocalInvocationIndex) {\n m_cs.builtinLocalInvocationIndex = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInLocalInvocationIndex,\n \"vThreadIndexInGroup\");\n }\n\n uint32_t intTypeId = getScalarTypeId(DxbcScalarType::Uint32);\n uint32_t ptrTypeId = m_module.defPointerType(\n intTypeId, spv::StorageClassWorkgroup);\n\n uint32_t numElements = m_gRegs[i].type == DxbcResourceType::Structured\n ? m_gRegs[i].elementCount * m_gRegs[i].elementStride / 4\n : m_gRegs[i].elementCount / 4;\n \n uint32_t numThreads = m_cs.workgroupSizeX *\n m_cs.workgroupSizeY * m_cs.workgroupSizeZ;\n \n uint32_t numElementsPerThread = numElements / numThreads;\n uint32_t numElementsRemaining = numElements % numThreads;\n\n uint32_t threadId = m_module.opLoad(\n intTypeId, m_cs.builtinLocalInvocationIndex);\n uint32_t zeroId = m_module.constu32(0);\n\n for (uint32_t e = 0; e < numElementsPerThread; e++) {\n uint32_t ofsId = m_module.opIAdd(intTypeId, threadId,\n m_module.constu32(numThreads * e));\n \n uint32_t ptrId = m_module.opAccessChain(\n ptrTypeId, m_gRegs[i].varId, 1, &ofsId);\n\n m_module.opStore(ptrId, zeroId, memoryOperands);\n }\n\n if (numElementsRemaining) {\n uint32_t condition = m_module.opULessThan(\n m_module.defBoolType(), threadId,\n m_module.constu32(numElementsRemaining));\n \n DxbcConditional cond;\n cond.labelIf = m_module.allocateId();\n cond.labelEnd = m_module.allocateId();\n\n m_module.opSelectionMerge(cond.labelEnd, spv::SelectionControlMaskNone);\n m_module.opBranchConditional(condition, cond.labelIf, cond.labelEnd);\n\n m_module.opLabel(cond.labelIf);\n\n uint32_t ofsId = m_module.opIAdd(intTypeId, threadId,\n m_module.constu32(numThreads * numElementsPerThread));\n \n uint32_t ptrId = m_module.opAccessChain(\n ptrTypeId, m_gRegs[i].varId, 1, &ofsId);\n \n m_module.opStore(ptrId, zeroId, memoryOperands);\n\n m_module.opBranch(cond.labelEnd);\n m_module.opLabel (cond.labelEnd);\n }\n\n hasTgsm = true;\n }\n\n if (hasTgsm) {\n m_module.opControlBarrier(\n m_module.constu32(spv::ScopeWorkgroup),\n m_module.constu32(spv::ScopeWorkgroup),\n m_module.constu32(spv::MemorySemanticsWorkgroupMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask\n | spv::MemorySemanticsMakeAvailableMask\n | spv::MemorySemanticsMakeVisibleMask));\n }\n }\n DxbcRegisterValue emitVsSystemValueLoad(\n DxbcSystemValue sv,\n DxbcRegMask mask) {\n switch (sv) {\n case DxbcSystemValue::VertexId: {\n const uint32_t typeId = getScalarTypeId(DxbcScalarType::Uint32);\n \n if (m_vs.builtinVertexId == 0) {\n m_vs.builtinVertexId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInVertexIndex,\n \"vs_vertex_index\");\n }\n \n if (m_vs.builtinBaseVertex == 0) {\n m_vs.builtinBaseVertex = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInBaseVertex,\n \"vs_base_vertex\");\n }\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opISub(typeId,\n m_module.opLoad(typeId, m_vs.builtinVertexId),\n m_module.opLoad(typeId, m_vs.builtinBaseVertex));\n return result;\n } break;\n \n case DxbcSystemValue::InstanceId: {\n const uint32_t typeId = getScalarTypeId(DxbcScalarType::Uint32);\n \n if (m_vs.builtinInstanceId == 0) {\n m_vs.builtinInstanceId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInInstanceIndex,\n \"vs_instance_index\");\n }\n \n if (m_vs.builtinBaseInstance == 0) {\n m_vs.builtinBaseInstance = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInBaseInstance,\n \"vs_base_instance\");\n }\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opISub(typeId,\n m_module.opLoad(typeId, m_vs.builtinInstanceId),\n m_module.opLoad(typeId, m_vs.builtinBaseInstance));\n return result;\n } break;\n \n default:\n throw DxvkError(str::format(\n \"DxbcCompiler: Unhandled VS SV input: \", sv));\n }\n }\n DxbcRegisterValue emitGsSystemValueLoad(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n uint32_t vertexId) {\n switch (sv) {\n case DxbcSystemValue::Position: {\n uint32_t arrayIndex = m_module.consti32(vertexId);\n\n if (!m_positionIn) {\n m_positionIn = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 4, primitiveVertexCount(m_gs.inputPrimitive) },\n spv::StorageClassInput },\n spv::BuiltInPosition,\n \"in_position\");\n }\n\n DxbcRegisterPointer ptrIn;\n ptrIn.type.ctype = DxbcScalarType::Float32;\n ptrIn.type.ccount = 4;\n ptrIn.id = m_module.opAccessChain(\n m_module.defPointerType(getVectorTypeId(ptrIn.type), spv::StorageClassInput),\n m_positionIn, 1, &arrayIndex);\n \n return emitRegisterExtract(emitValueLoad(ptrIn), mask);\n } break;\n \n default:\n throw DxvkError(str::format(\n \"DxbcCompiler: Unhandled GS SV input: \", sv));\n }\n }\n DxbcRegisterValue emitPsSystemValueLoad(\n DxbcSystemValue sv,\n DxbcRegMask mask) {\n switch (sv) {\n case DxbcSystemValue::Position: {\n if (m_ps.builtinFragCoord == 0) {\n m_ps.builtinFragCoord = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 4, 0 },\n spv::StorageClassInput },\n spv::BuiltInFragCoord,\n \"ps_frag_coord\");\n }\n \n DxbcRegisterPointer ptrIn;\n ptrIn.type = { DxbcScalarType::Float32, 4 };\n ptrIn.id = m_ps.builtinFragCoord;\n \n // The X, Y and Z components of the SV_POSITION semantic\n // are identical to Vulkan's FragCoord builtin, but we\n // need to compute the reciprocal of the W component.\n DxbcRegisterValue fragCoord = emitValueLoad(ptrIn);\n \n uint32_t componentIndex = 3;\n uint32_t t_f32 = m_module.defFloatType(32);\n uint32_t v_wComp = m_module.opCompositeExtract(t_f32, fragCoord.id, 1, &componentIndex);\n v_wComp = m_module.opFDiv(t_f32, m_module.constf32(1.0f), v_wComp);\n \n fragCoord.id = m_module.opCompositeInsert(\n getVectorTypeId(fragCoord.type),\n v_wComp, fragCoord.id,\n 1, &componentIndex);\n \n return emitRegisterExtract(fragCoord, mask);\n } break;\n \n case DxbcSystemValue::IsFrontFace: {\n if (m_ps.builtinIsFrontFace == 0) {\n m_ps.builtinIsFrontFace = emitNewBuiltinVariable({\n { DxbcScalarType::Bool, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInFrontFacing,\n \"ps_is_front_face\");\n }\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opSelect(\n getVectorTypeId(result.type),\n m_module.opLoad(\n m_module.defBoolType(),\n m_ps.builtinIsFrontFace),\n m_module.constu32(0xFFFFFFFF),\n m_module.constu32(0x00000000));\n return result;\n } break;\n \n case DxbcSystemValue::PrimitiveId: {\n if (m_primitiveIdIn == 0) {\n m_module.enableCapability(spv::CapabilityGeometry);\n \n m_primitiveIdIn = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInPrimitiveId,\n \"ps_primitive_id\");\n }\n \n DxbcRegisterPointer ptrIn;\n ptrIn.type = { DxbcScalarType::Uint32, 1 };\n ptrIn.id = m_primitiveIdIn;\n \n return emitValueLoad(ptrIn);\n } break;\n \n case DxbcSystemValue::SampleIndex: {\n if (m_ps.builtinSampleId == 0) {\n m_module.enableCapability(spv::CapabilitySampleRateShading);\n \n m_ps.builtinSampleId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInSampleId,\n \"ps_sample_id\");\n }\n \n DxbcRegisterPointer ptrIn;\n ptrIn.type.ctype = DxbcScalarType::Uint32;\n ptrIn.type.ccount = 1;\n ptrIn.id = m_ps.builtinSampleId;\n \n return emitValueLoad(ptrIn);\n } break;\n \n case DxbcSystemValue::RenderTargetId: {\n if (m_ps.builtinLayer == 0) {\n m_module.enableCapability(spv::CapabilityGeometry);\n \n m_ps.builtinLayer = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInLayer,\n \"v_layer\");\n }\n \n DxbcRegisterPointer ptr;\n ptr.type.ctype = DxbcScalarType::Uint32;\n ptr.type.ccount = 1;\n ptr.id = m_ps.builtinLayer;\n \n return emitValueLoad(ptr);\n } break;\n \n case DxbcSystemValue::ViewportId: {\n if (m_ps.builtinViewportId == 0) {\n m_module.enableCapability(spv::CapabilityMultiViewport);\n \n m_ps.builtinViewportId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInViewportIndex,\n \"v_viewport\");\n }\n \n DxbcRegisterPointer ptr;\n ptr.type.ctype = DxbcScalarType::Uint32;\n ptr.type.ccount = 1;\n ptr.id = m_ps.builtinViewportId;\n \n return emitValueLoad(ptr);\n } break;\n \n default:\n throw DxvkError(str::format(\n \"DxbcCompiler: Unhandled PS SV input: \", sv));\n }\n }\n void emitVsSystemValueStore(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n const DxbcRegisterValue& value) {\n switch (sv) {\n case DxbcSystemValue::Position: {\n if (!m_positionOut) {\n m_positionOut = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 4, 0 },\n spv::StorageClassOutput },\n spv::BuiltInPosition,\n \"out_position\");\n }\n\n DxbcRegisterPointer ptr;\n ptr.type.ctype = DxbcScalarType::Float32;\n ptr.type.ccount = 4;\n ptr.id = m_positionOut;\n \n emitValueStore(ptr, value, mask);\n } break;\n \n case DxbcSystemValue::RenderTargetId: {\n if (m_programInfo.type() != DxbcProgramType::GeometryShader)\n m_module.enableCapability(spv::CapabilityShaderLayer);\n\n if (m_gs.builtinLayer == 0) {\n m_module.enableCapability(spv::CapabilityGeometry);\n\n m_gs.builtinLayer = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInLayer,\n \"o_layer\");\n }\n \n DxbcRegisterPointer ptr;\n ptr.type = { DxbcScalarType::Uint32, 1 };\n ptr.id = m_gs.builtinLayer;\n \n emitValueStore(\n ptr, emitRegisterExtract(value, mask),\n DxbcRegMask(true, false, false, false));\n } break;\n \n case DxbcSystemValue::ViewportId: {\n if (m_programInfo.type() != DxbcProgramType::GeometryShader)\n m_module.enableCapability(spv::CapabilityShaderViewportIndex);\n\n if (m_gs.builtinViewportId == 0) {\n m_module.enableCapability(spv::CapabilityMultiViewport);\n \n m_gs.builtinViewportId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInViewportIndex,\n \"o_viewport\");\n }\n \n DxbcRegisterPointer ptr;\n ptr.type = { DxbcScalarType::Uint32, 1};\n ptr.id = m_gs.builtinViewportId;\n \n emitValueStore(\n ptr, emitRegisterExtract(value, mask),\n DxbcRegMask(true, false, false, false));\n } break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled VS SV output: \", sv));\n }\n }\n void emitHsSystemValueStore(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n const DxbcRegisterValue& value) {\n if (sv >= DxbcSystemValue::FinalQuadUeq0EdgeTessFactor\n && sv <= DxbcSystemValue::FinalLineDensityTessFactor) {\n struct TessFactor {\n uint32_t array = 0;\n uint32_t index = 0;\n };\n \n static const std::array s_tessFactors = {{\n { m_hs.builtinTessLevelOuter, 0 }, // FinalQuadUeq0EdgeTessFactor\n { m_hs.builtinTessLevelOuter, 1 }, // FinalQuadVeq0EdgeTessFactor\n { m_hs.builtinTessLevelOuter, 2 }, // FinalQuadUeq1EdgeTessFactor\n { m_hs.builtinTessLevelOuter, 3 }, // FinalQuadVeq1EdgeTessFactor\n { m_hs.builtinTessLevelInner, 0 }, // FinalQuadUInsideTessFactor\n { m_hs.builtinTessLevelInner, 1 }, // FinalQuadVInsideTessFactor\n { m_hs.builtinTessLevelOuter, 0 }, // FinalTriUeq0EdgeTessFactor\n { m_hs.builtinTessLevelOuter, 1 }, // FinalTriVeq0EdgeTessFactor\n { m_hs.builtinTessLevelOuter, 2 }, // FinalTriWeq0EdgeTessFactor\n { m_hs.builtinTessLevelInner, 0 }, // FinalTriInsideTessFactor\n { m_hs.builtinTessLevelOuter, 0 }, // FinalLineDensityTessFactor\n { m_hs.builtinTessLevelOuter, 1 }, // FinalLineDetailTessFactor\n }};\n \n const TessFactor tessFactor = s_tessFactors.at(uint32_t(sv)\n - uint32_t(DxbcSystemValue::FinalQuadUeq0EdgeTessFactor));\n \n const uint32_t tessFactorArrayIndex\n = m_module.constu32(tessFactor.index);\n \n // Apply global tess factor limit\n float maxTessFactor = m_hs.maxTessFactor;\n\n if (m_moduleInfo.tess != nullptr) {\n if (m_moduleInfo.tess->maxTessFactor < maxTessFactor)\n maxTessFactor = m_moduleInfo.tess->maxTessFactor;\n }\n\n DxbcRegisterValue tessValue = emitRegisterExtract(value, mask);\n tessValue.id = m_module.opNClamp(getVectorTypeId(tessValue.type),\n tessValue.id, m_module.constf32(0.0f),\n m_module.constf32(maxTessFactor));\n \n DxbcRegisterPointer ptr;\n ptr.type.ctype = DxbcScalarType::Float32;\n ptr.type.ccount = 1;\n ptr.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(ptr.type),\n spv::StorageClassOutput),\n tessFactor.array, 1,\n &tessFactorArrayIndex);\n \n emitValueStore(ptr, tessValue,\n DxbcRegMask(true, false, false, false));\n } else {\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled HS SV output: \", sv));\n }\n }\n void emitDsSystemValueStore(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n const DxbcRegisterValue& value) {\n switch (sv) {\n case DxbcSystemValue::Position:\n case DxbcSystemValue::CullDistance:\n case DxbcSystemValue::ClipDistance:\n case DxbcSystemValue::RenderTargetId:\n case DxbcSystemValue::ViewportId:\n emitVsSystemValueStore(sv, mask, value);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled DS SV output: \", sv));\n }\n }\n void emitGsSystemValueStore(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n const DxbcRegisterValue& value) {\n switch (sv) {\n case DxbcSystemValue::Position:\n case DxbcSystemValue::CullDistance:\n case DxbcSystemValue::ClipDistance:\n case DxbcSystemValue::RenderTargetId:\n case DxbcSystemValue::ViewportId:\n emitVsSystemValueStore(sv, mask, value);\n break;\n \n case DxbcSystemValue::PrimitiveId: {\n if (m_primitiveIdOut == 0) {\n m_primitiveIdOut = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInPrimitiveId,\n \"gs_primitive_id\");\n }\n \n DxbcRegisterPointer ptr;\n ptr.type = { DxbcScalarType::Uint32, 1};\n ptr.id = m_primitiveIdOut;\n \n emitValueStore(\n ptr, emitRegisterExtract(value, mask),\n DxbcRegMask(true, false, false, false));\n } break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled GS SV output: \", sv));\n }\n }\n void emitPsSystemValueStore(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n const DxbcRegisterValue& value) {\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled PS SV output: \", sv));\n }\n void emitClipCullStore(\n DxbcSystemValue sv,\n uint32_t dstArray) {\n uint32_t offset = 0;\n \n if (dstArray == 0)\n return;\n \n for (auto e = m_osgn->begin(); e != m_osgn->end(); e++) {\n if (e->systemValue == sv) {\n DxbcRegisterPointer srcPtr = m_oRegs.at(e->registerId);\n DxbcRegisterValue srcValue = emitValueLoad(srcPtr);\n \n for (uint32_t i = 0; i < 4; i++) {\n if (e->componentMask[i]) {\n uint32_t offsetId = m_module.consti32(offset++);\n \n DxbcRegisterValue component = emitRegisterExtract(\n srcValue, DxbcRegMask::select(i));\n \n DxbcRegisterPointer dstPtr;\n dstPtr.type = { DxbcScalarType::Float32, 1 };\n dstPtr.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(dstPtr.type),\n spv::StorageClassOutput),\n dstArray, 1, &offsetId);\n \n emitValueStore(dstPtr, component,\n DxbcRegMask(true, false, false, false));\n }\n }\n }\n }\n }\n void emitClipCullLoad(\n DxbcSystemValue sv,\n uint32_t srcArray) {\n uint32_t offset = 0;\n \n if (srcArray == 0)\n return;\n \n for (auto e = m_isgn->begin(); e != m_isgn->end(); e++) {\n if (e->systemValue == sv) {\n // Load individual components from the source array\n uint32_t componentIndex = 0;\n std::array componentIds = {{ 0, 0, 0, 0 }};\n \n for (uint32_t i = 0; i < 4; i++) {\n if (e->componentMask[i]) {\n uint32_t offsetId = m_module.consti32(offset++);\n \n DxbcRegisterPointer srcPtr;\n srcPtr.type = { DxbcScalarType::Float32, 1 };\n srcPtr.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(srcPtr.type),\n spv::StorageClassInput),\n srcArray, 1, &offsetId);\n \n componentIds[componentIndex++]\n = emitValueLoad(srcPtr).id;\n }\n }\n \n // Put everything into one vector\n DxbcRegisterValue dstValue;\n dstValue.type = { DxbcScalarType::Float32, componentIndex };\n dstValue.id = componentIds[0];\n \n if (componentIndex > 1) {\n dstValue.id = m_module.opCompositeConstruct(\n getVectorTypeId(dstValue.type),\n componentIndex, componentIds.data());\n }\n \n // Store vector to the input array\n uint32_t registerId = m_module.consti32(e->registerId);\n \n DxbcRegisterPointer dstInput;\n dstInput.type = { DxbcScalarType::Float32, 4 };\n dstInput.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(dstInput.type),\n spv::StorageClassPrivate),\n m_vArray, 1, ®isterId);\n \n emitValueStore(dstInput, dstValue, e->componentMask);\n }\n }\n }\n void emitPointSizeStore() {\n if (m_moduleInfo.options.needsPointSizeExport) {\n uint32_t pointSizeId = emitNewBuiltinVariable(DxbcRegisterInfo {\n { DxbcScalarType::Float32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInPointSize,\n \"point_size\");\n\n m_module.opStore(pointSizeId, m_module.constf32(1.0f));\n }\n }\n void emitInit() {\n // Set up common capabilities for all shaders\n m_module.enableCapability(spv::CapabilityShader);\n m_module.enableCapability(spv::CapabilityImageQuery);\n \n // Initialize the shader module with capabilities\n // etc. Each shader type has its own peculiarities.\n switch (m_programInfo.type()) {\n case DxbcProgramType::VertexShader: emitVsInit(); break;\n case DxbcProgramType::HullShader: emitHsInit(); break;\n case DxbcProgramType::DomainShader: emitDsInit(); break;\n case DxbcProgramType::GeometryShader: emitGsInit(); break;\n case DxbcProgramType::PixelShader: emitPsInit(); break;\n case DxbcProgramType::ComputeShader: emitCsInit(); break;\n default: throw DxvkError(\"Invalid shader stage\");\n }\n }\n void emitFunctionBegin(\n uint32_t entryPoint,\n uint32_t returnType,\n uint32_t funcType) {\n this->emitFunctionEnd();\n \n m_module.functionBegin(\n returnType, entryPoint, funcType,\n spv::FunctionControlMaskNone);\n \n m_insideFunction = true;\n }\n void emitFunctionEnd() {\n if (m_insideFunction) {\n m_module.opReturn();\n m_module.functionEnd();\n }\n \n m_insideFunction = false;\n }\n void emitFunctionLabel() {\n m_module.opLabel(m_module.allocateId());\n }\n void emitMainFunctionBegin() {\n this->emitFunctionBegin(\n m_entryPointId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitVsInit() {\n m_module.enableCapability(spv::CapabilityClipDistance);\n m_module.enableCapability(spv::CapabilityCullDistance);\n m_module.enableCapability(spv::CapabilityDrawParameters);\n \n // Standard input array\n emitDclInputArray(0);\n \n // Cull/clip distances as outputs\n m_clipDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numClipPlanes,\n spv::BuiltInClipDistance,\n spv::StorageClassOutput);\n \n m_cullDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numCullPlanes,\n spv::BuiltInCullDistance,\n spv::StorageClassOutput);\n \n // Main function of the vertex shader\n m_vs.functionId = m_module.allocateId();\n m_module.setDebugName(m_vs.functionId, \"vs_main\");\n \n this->emitFunctionBegin(\n m_vs.functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitHsInit() {\n m_module.enableCapability(spv::CapabilityTessellation);\n m_module.enableCapability(spv::CapabilityClipDistance);\n m_module.enableCapability(spv::CapabilityCullDistance);\n \n m_hs.builtinInvocationId = emitNewBuiltinVariable(\n DxbcRegisterInfo {\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInInvocationId,\n \"vOutputControlPointId\");\n \n m_hs.builtinTessLevelOuter = emitBuiltinTessLevelOuter(spv::StorageClassOutput);\n m_hs.builtinTessLevelInner = emitBuiltinTessLevelInner(spv::StorageClassOutput);\n }\n void emitDsInit() {\n m_module.enableCapability(spv::CapabilityTessellation);\n m_module.enableCapability(spv::CapabilityClipDistance);\n m_module.enableCapability(spv::CapabilityCullDistance);\n \n m_ds.builtinTessLevelOuter = emitBuiltinTessLevelOuter(spv::StorageClassInput);\n m_ds.builtinTessLevelInner = emitBuiltinTessLevelInner(spv::StorageClassInput);\n \n // Cull/clip distances as outputs\n m_clipDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numClipPlanes,\n spv::BuiltInClipDistance,\n spv::StorageClassOutput);\n \n m_cullDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numCullPlanes,\n spv::BuiltInCullDistance,\n spv::StorageClassOutput);\n \n // Main function of the domain shader\n m_ds.functionId = m_module.allocateId();\n m_module.setDebugName(m_ds.functionId, \"ds_main\");\n \n this->emitFunctionBegin(\n m_ds.functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitGsInit() {\n m_module.enableCapability(spv::CapabilityGeometry);\n m_module.enableCapability(spv::CapabilityClipDistance);\n m_module.enableCapability(spv::CapabilityCullDistance);\n\n // Enable capabilities for xfb mode if necessary\n if (m_moduleInfo.xfb) {\n m_module.enableCapability(spv::CapabilityGeometryStreams);\n m_module.enableCapability(spv::CapabilityTransformFeedback);\n \n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeXfb);\n }\n\n // We only need outputs if rasterization is enabled\n m_gs.needsOutputSetup = !m_moduleInfo.xfb\n || m_moduleInfo.xfb->rasterizedStream >= 0;\n \n // Cull/clip distances as outputs\n m_clipDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numClipPlanes,\n spv::BuiltInClipDistance,\n spv::StorageClassOutput);\n \n m_cullDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numCullPlanes,\n spv::BuiltInCullDistance,\n spv::StorageClassOutput);\n \n // Emit Xfb variables if necessary\n if (m_moduleInfo.xfb)\n emitXfbOutputDeclarations();\n\n // Main function of the vertex shader\n m_gs.functionId = m_module.allocateId();\n m_module.setDebugName(m_gs.functionId, \"gs_main\");\n \n this->emitFunctionBegin(\n m_gs.functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitPsInit() {\n m_module.enableCapability(spv::CapabilityDerivativeControl);\n \n m_module.setExecutionMode(m_entryPointId,\n spv::ExecutionModeOriginUpperLeft);\n \n // Standard input array\n emitDclInputArray(0);\n \n // Cull/clip distances as inputs\n m_clipDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullIn.numClipPlanes,\n spv::BuiltInClipDistance,\n spv::StorageClassInput);\n \n m_cullDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullIn.numCullPlanes,\n spv::BuiltInCullDistance,\n spv::StorageClassInput);\n \n // Main function of the pixel shader\n m_ps.functionId = m_module.allocateId();\n m_module.setDebugName(m_ps.functionId, \"ps_main\");\n \n this->emitFunctionBegin(\n m_ps.functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitCsInit() {\n // Main function of the compute shader\n m_cs.functionId = m_module.allocateId();\n m_module.setDebugName(m_cs.functionId, \"cs_main\");\n \n this->emitFunctionBegin(\n m_cs.functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitVsFinalize() {\n this->emitMainFunctionBegin();\n this->emitInputSetup();\n m_module.opFunctionCall(\n m_module.defVoidType(),\n m_vs.functionId, 0, nullptr);\n this->emitOutputSetup();\n this->emitClipCullStore(DxbcSystemValue::ClipDistance, m_clipDistances);\n this->emitClipCullStore(DxbcSystemValue::CullDistance, m_cullDistances);\n this->emitPointSizeStore();\n this->emitFunctionEnd();\n }\n void emitHsFinalize() {\n if (m_hs.cpPhase.functionId == 0)\n m_hs.cpPhase = this->emitNewHullShaderPassthroughPhase();\n \n // Control point phase\n this->emitMainFunctionBegin();\n this->emitInputSetup(m_hs.vertexCountIn);\n this->emitHsControlPointPhase(m_hs.cpPhase);\n this->emitHsPhaseBarrier();\n \n // Fork-join phases and output setup\n this->emitHsInvocationBlockBegin(1);\n \n for (const auto& phase : m_hs.forkPhases)\n this->emitHsForkJoinPhase(phase);\n \n for (const auto& phase : m_hs.joinPhases)\n this->emitHsForkJoinPhase(phase);\n \n this->emitOutputSetup();\n this->emitHsOutputSetup();\n this->emitHsInvocationBlockEnd();\n this->emitFunctionEnd();\n }\n void emitDsFinalize() {\n this->emitMainFunctionBegin();\n m_module.opFunctionCall(\n m_module.defVoidType(),\n m_ds.functionId, 0, nullptr);\n this->emitOutputSetup();\n this->emitClipCullStore(DxbcSystemValue::ClipDistance, m_clipDistances);\n this->emitClipCullStore(DxbcSystemValue::CullDistance, m_cullDistances);\n this->emitFunctionEnd();\n }\n void emitGsFinalize() {\n if (!m_gs.invocationCount)\n m_module.setInvocations(m_entryPointId, 1);\n\n this->emitMainFunctionBegin();\n this->emitInputSetup(\n primitiveVertexCount(m_gs.inputPrimitive));\n m_module.opFunctionCall(\n m_module.defVoidType(),\n m_gs.functionId, 0, nullptr);\n // No output setup at this point as that was\n // already done during the EmitVertex step\n this->emitFunctionEnd();\n }\n void emitPsFinalize() {\n this->emitMainFunctionBegin();\n this->emitInputSetup();\n this->emitClipCullLoad(DxbcSystemValue::ClipDistance, m_clipDistances);\n this->emitClipCullLoad(DxbcSystemValue::CullDistance, m_cullDistances);\n\n if (m_hasRasterizerOrderedUav) {\n // For simplicity, just lock the entire fragment shader\n // if there are any rasterizer ordered views.\n m_module.enableExtension(\"SPV_EXT_fragment_shader_interlock\");\n\n if (m_module.hasCapability(spv::CapabilitySampleRateShading)\n && m_moduleInfo.options.enableSampleShadingInterlock) {\n m_module.enableCapability(spv::CapabilityFragmentShaderSampleInterlockEXT);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeSampleInterlockOrderedEXT);\n } else {\n m_module.enableCapability(spv::CapabilityFragmentShaderPixelInterlockEXT);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModePixelInterlockOrderedEXT);\n }\n\n m_module.opBeginInvocationInterlock();\n }\n\n m_module.opFunctionCall(\n m_module.defVoidType(),\n m_ps.functionId, 0, nullptr);\n\n if (m_hasRasterizerOrderedUav)\n m_module.opEndInvocationInterlock();\n\n this->emitOutputSetup();\n\n if (m_moduleInfo.options.useDepthClipWorkaround)\n this->emitOutputDepthClamp();\n \n this->emitFunctionEnd();\n }\n void emitCsFinalize() {\n this->emitMainFunctionBegin();\n\n if (m_moduleInfo.options.zeroInitWorkgroupMemory)\n this->emitInitWorkgroupMemory();\n\n m_module.opFunctionCall(\n m_module.defVoidType(),\n m_cs.functionId, 0, nullptr);\n \n this->emitFunctionEnd();\n }\n void emitXfbOutputDeclarations() {\n for (uint32_t i = 0; i < m_moduleInfo.xfb->entryCount; i++) {\n const DxbcXfbEntry* xfbEntry = m_moduleInfo.xfb->entries + i;\n const DxbcSgnEntry* sigEntry = m_osgn->find(\n xfbEntry->semanticName,\n xfbEntry->semanticIndex,\n xfbEntry->streamId);\n\n if (sigEntry == nullptr)\n continue;\n \n DxbcRegisterInfo varInfo;\n varInfo.type.ctype = DxbcScalarType::Float32;\n varInfo.type.ccount = xfbEntry->componentCount;\n varInfo.type.alength = 0;\n varInfo.sclass = spv::StorageClassOutput;\n \n uint32_t dstComponentMask = (1 << xfbEntry->componentCount) - 1;\n uint32_t srcComponentMask = dstComponentMask\n << sigEntry->componentMask.firstSet()\n << xfbEntry->componentIndex;\n \n DxbcXfbVar xfbVar;\n xfbVar.varId = emitNewVariable(varInfo);\n xfbVar.streamId = xfbEntry->streamId;\n xfbVar.outputId = sigEntry->registerId;\n xfbVar.srcMask = DxbcRegMask(srcComponentMask);\n xfbVar.dstMask = DxbcRegMask(dstComponentMask);\n m_xfbVars.push_back(xfbVar);\n\n m_module.setDebugName(xfbVar.varId,\n str::format(\"xfb\", i).c_str());\n \n m_module.decorateXfb(xfbVar.varId,\n xfbEntry->streamId, xfbEntry->bufferId, xfbEntry->offset,\n m_moduleInfo.xfb->strides[xfbEntry->bufferId]);\n }\n\n // TODO Compact location/component assignment\n for (uint32_t i = 0; i < m_xfbVars.size(); i++) {\n m_xfbVars[i].location = i;\n m_xfbVars[i].component = 0;\n }\n\n for (uint32_t i = 0; i < m_xfbVars.size(); i++) {\n const DxbcXfbVar* var = &m_xfbVars[i];\n\n m_module.decorateLocation (var->varId, var->location);\n m_module.decorateComponent(var->varId, var->component);\n }\n }\n void emitXfbOutputSetup(\n uint32_t streamId,\n bool passthrough) {\n for (size_t i = 0; i < m_xfbVars.size(); i++) {\n if (m_xfbVars[i].streamId == streamId) {\n DxbcRegisterPointer srcPtr = passthrough\n ? m_vRegs[m_xfbVars[i].outputId]\n : m_oRegs[m_xfbVars[i].outputId];\n\n if (passthrough) {\n srcPtr = emitArrayAccess(srcPtr,\n spv::StorageClassInput,\n m_module.constu32(0));\n }\n \n DxbcRegisterPointer dstPtr;\n dstPtr.type.ctype = DxbcScalarType::Float32;\n dstPtr.type.ccount = m_xfbVars[i].dstMask.popCount();\n dstPtr.id = m_xfbVars[i].varId;\n\n DxbcRegisterValue value = emitRegisterExtract(\n emitValueLoad(srcPtr), m_xfbVars[i].srcMask);\n emitValueStore(dstPtr, value, m_xfbVars[i].dstMask);\n }\n }\n }\n void emitHsControlPointPhase(\n const DxbcCompilerHsControlPointPhase& phase) {\n m_module.opFunctionCall(\n m_module.defVoidType(),\n phase.functionId, 0, nullptr);\n }\n void emitHsForkJoinPhase(\n const DxbcCompilerHsForkJoinPhase& phase) {\n for (uint32_t i = 0; i < phase.instanceCount; i++) {\n uint32_t invocationId = m_module.constu32(i);\n \n m_module.opFunctionCall(\n m_module.defVoidType(),\n phase.functionId, 1,\n &invocationId);\n }\n }\n void emitHsPhaseBarrier() {\n uint32_t exeScopeId = m_module.constu32(spv::ScopeWorkgroup);\n uint32_t memScopeId = m_module.constu32(spv::ScopeWorkgroup);\n uint32_t semanticId = m_module.constu32(\n spv::MemorySemanticsOutputMemoryMask |\n spv::MemorySemanticsAcquireReleaseMask |\n spv::MemorySemanticsMakeAvailableMask |\n spv::MemorySemanticsMakeVisibleMask);\n \n m_module.opControlBarrier(exeScopeId, memScopeId, semanticId);\n }\n void emitHsInvocationBlockBegin(\n uint32_t count) {\n uint32_t invocationId = m_module.opLoad(\n getScalarTypeId(DxbcScalarType::Uint32),\n m_hs.builtinInvocationId);\n \n uint32_t condition = m_module.opULessThan(\n m_module.defBoolType(), invocationId,\n m_module.constu32(count));\n \n m_hs.invocationBlockBegin = m_module.allocateId();\n m_hs.invocationBlockEnd = m_module.allocateId();\n \n m_module.opSelectionMerge(\n m_hs.invocationBlockEnd,\n spv::SelectionControlMaskNone);\n \n m_module.opBranchConditional(\n condition,\n m_hs.invocationBlockBegin,\n m_hs.invocationBlockEnd);\n \n m_module.opLabel(\n m_hs.invocationBlockBegin);\n }\n void emitHsInvocationBlockEnd() {\n m_module.opBranch (m_hs.invocationBlockEnd);\n m_module.opLabel (m_hs.invocationBlockEnd);\n \n m_hs.invocationBlockBegin = 0;\n m_hs.invocationBlockEnd = 0;\n }\n void emitHsOutputSetup() {\n uint32_t outputPerPatch = emitTessInterfacePerPatch(spv::StorageClassOutput);\n\n if (!outputPerPatch)\n return;\n\n uint32_t vecType = getVectorTypeId({ DxbcScalarType::Float32, 4 });\n\n uint32_t srcPtrType = m_module.defPointerType(vecType, spv::StorageClassPrivate);\n uint32_t dstPtrType = m_module.defPointerType(vecType, spv::StorageClassOutput);\n\n for (uint32_t i = 0; i < 32; i++) {\n if (m_hs.outputPerPatchMask & (1 << i)) {\n uint32_t index = m_module.constu32(i);\n\n uint32_t srcPtr = m_module.opAccessChain(srcPtrType, m_hs.outputPerPatch, 1, &index);\n uint32_t dstPtr = m_module.opAccessChain(dstPtrType, outputPerPatch, 1, &index);\n\n m_module.opStore(dstPtr, m_module.opLoad(vecType, srcPtr));\n }\n }\n }\n uint32_t emitTessInterfacePerPatch(\n spv::StorageClass storageClass) {\n const char* name = \"vPatch\";\n\n if (storageClass == spv::StorageClassPrivate)\n name = \"rPatch\";\n if (storageClass == spv::StorageClassOutput)\n name = \"oPatch\";\n \n uint32_t arrLen = m_psgn != nullptr ? m_psgn->maxRegisterCount() : 0;\n\n if (!arrLen)\n return 0;\n\n uint32_t vecType = m_module.defVectorType (m_module.defFloatType(32), 4);\n uint32_t arrType = m_module.defArrayType (vecType, m_module.constu32(arrLen));\n uint32_t ptrType = m_module.defPointerType(arrType, storageClass);\n uint32_t varId = m_module.newVar (ptrType, storageClass);\n \n m_module.setDebugName (varId, name);\n \n if (storageClass != spv::StorageClassPrivate) {\n m_module.decorate (varId, spv::DecorationPatch);\n m_module.decorateLocation (varId, 0);\n }\n\n return varId;\n }\n uint32_t emitTessInterfacePerVertex(\n spv::StorageClass storageClass,\n uint32_t vertexCount) {\n const bool isInput = storageClass == spv::StorageClassInput;\n \n uint32_t arrLen = isInput\n ? (m_isgn != nullptr ? m_isgn->maxRegisterCount() : 0)\n : (m_osgn != nullptr ? m_osgn->maxRegisterCount() : 0);\n \n if (!arrLen)\n return 0;\n \n uint32_t locIdx = m_psgn != nullptr\n ? m_psgn->maxRegisterCount()\n : 0;\n \n uint32_t vecType = m_module.defVectorType (m_module.defFloatType(32), 4);\n uint32_t arrTypeInner = m_module.defArrayType (vecType, m_module.constu32(arrLen));\n uint32_t arrTypeOuter = m_module.defArrayType (arrTypeInner, m_module.constu32(vertexCount));\n uint32_t ptrType = m_module.defPointerType(arrTypeOuter, storageClass);\n uint32_t varId = m_module.newVar (ptrType, storageClass);\n \n m_module.setDebugName (varId, isInput ? \"vVertex\" : \"oVertex\");\n m_module.decorateLocation (varId, locIdx);\n return varId;\n }\n void emitDclInputArray(\n uint32_t vertexCount) {\n DxbcVectorType info;\n info.ctype = DxbcScalarType::Float32;\n info.ccount = 4;\n\n // Define the array type. This will be two-dimensional\n // in some shaders, with the outer index representing\n // the vertex ID within an invocation.\n m_vArrayLength = m_isgn != nullptr ? std::max(1u, m_isgn->maxRegisterCount()) : 1;\n m_vArrayLengthId = m_module.lateConst32(getScalarTypeId(DxbcScalarType::Uint32));\n\n uint32_t vectorTypeId = getVectorTypeId(info);\n uint32_t arrayTypeId = m_module.defArrayType(vectorTypeId, m_vArrayLengthId);\n \n if (vertexCount != 0) {\n arrayTypeId = m_module.defArrayType(\n arrayTypeId, m_module.constu32(vertexCount));\n }\n \n // Define the actual variable. Note that this is private\n // because we will copy input registers and some system\n // variables to the array during the setup phase.\n const uint32_t ptrTypeId = m_module.defPointerType(\n arrayTypeId, spv::StorageClassPrivate);\n \n const uint32_t varId = m_module.newVar(\n ptrTypeId, spv::StorageClassPrivate);\n \n m_module.setDebugName(varId, \"shader_in\");\n m_vArray = varId;\n }\n uint32_t emitDclClipCullDistanceArray(\n uint32_t length,\n spv::BuiltIn builtIn,\n spv::StorageClass storageClass) {\n if (length == 0)\n return 0;\n \n uint32_t t_f32 = m_module.defFloatType(32);\n uint32_t t_arr = m_module.defArrayType(t_f32, m_module.constu32(length));\n uint32_t t_ptr = m_module.defPointerType(t_arr, storageClass);\n uint32_t varId = m_module.newVar(t_ptr, storageClass);\n \n m_module.decorateBuiltIn(varId, builtIn);\n m_module.setDebugName(varId,\n builtIn == spv::BuiltInClipDistance\n ? \"clip_distances\"\n : \"cull_distances\");\n \n return varId;\n }\n DxbcCompilerHsControlPointPhase emitNewHullShaderControlPointPhase() {\n uint32_t funTypeId = m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr);\n \n uint32_t funId = m_module.allocateId();\n \n this->emitFunctionBegin(funId,\n m_module.defVoidType(),\n funTypeId);\n this->emitFunctionLabel();\n \n DxbcCompilerHsControlPointPhase result;\n result.functionId = funId;\n return result;\n }\n DxbcCompilerHsControlPointPhase emitNewHullShaderPassthroughPhase() {\n uint32_t funTypeId = m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr);\n \n // Begin passthrough function\n uint32_t funId = m_module.allocateId();\n m_module.setDebugName(funId, \"hs_passthrough\");\n \n this->emitFunctionBegin(funId,\n m_module.defVoidType(),\n funTypeId);\n this->emitFunctionLabel();\n \n // We'll basically copy each input variable to the corresponding\n // output, using the shader's invocation ID as the array index.\n const uint32_t invocationId = m_module.opLoad(\n getScalarTypeId(DxbcScalarType::Uint32),\n m_hs.builtinInvocationId);\n \n for (auto i = m_isgn->begin(); i != m_isgn->end(); i++) {\n this->emitDclInput(\n i->registerId, m_hs.vertexCountIn,\n i->componentMask,\n DxbcSystemValue::None,\n DxbcInterpolationMode::Undefined);\n \n // Vector type index\n const std::array dstIndices\n = {{ invocationId, m_module.constu32(i->registerId) }};\n \n DxbcRegisterPointer srcPtr;\n srcPtr.type = m_vRegs.at(i->registerId).type;\n srcPtr.id = m_module.opAccessChain(\n m_module.defPointerType(getVectorTypeId(srcPtr.type), spv::StorageClassInput),\n m_vRegs.at(i->registerId).id, 1, &invocationId);\n \n DxbcRegisterValue srcValue = emitRegisterBitcast(\n emitValueLoad(srcPtr), DxbcScalarType::Float32);\n\n DxbcRegisterPointer dstPtr;\n dstPtr.type = { DxbcScalarType::Float32, 4 };\n dstPtr.id = m_module.opAccessChain(\n m_module.defPointerType(getVectorTypeId(dstPtr.type), spv::StorageClassOutput),\n m_hs.outputPerVertex, dstIndices.size(), dstIndices.data());\n\n emitValueStore(dstPtr, srcValue, DxbcRegMask::firstN(srcValue.type.ccount));\n }\n \n // End function\n this->emitFunctionEnd();\n \n DxbcCompilerHsControlPointPhase result;\n result.functionId = funId;\n return result;\n }\n DxbcCompilerHsForkJoinPhase emitNewHullShaderForkJoinPhase() {\n uint32_t argTypeId = m_module.defIntType(32, 0);\n uint32_t funTypeId = m_module.defFunctionType(\n m_module.defVoidType(), 1, &argTypeId);\n \n uint32_t funId = m_module.allocateId();\n \n this->emitFunctionBegin(funId,\n m_module.defVoidType(),\n funTypeId);\n \n uint32_t argId = m_module.functionParameter(argTypeId);\n this->emitFunctionLabel();\n \n DxbcCompilerHsForkJoinPhase result;\n result.functionId = funId;\n result.instanceId = argId;\n return result;\n }\n uint32_t emitSamplePosArray() {\n const std::array samplePosVectors = {{\n // Invalid sample count / unbound resource\n m_module.constvec2f32( 0.0f, 0.0f),\n // VK_SAMPLE_COUNT_1_BIT\n m_module.constvec2f32( 0.0f, 0.0f),\n // VK_SAMPLE_COUNT_2_BIT\n m_module.constvec2f32( 0.25f, 0.25f),\n m_module.constvec2f32(-0.25f,-0.25f),\n // VK_SAMPLE_COUNT_4_BIT\n m_module.constvec2f32(-0.125f,-0.375f),\n m_module.constvec2f32( 0.375f,-0.125f),\n m_module.constvec2f32(-0.375f, 0.125f),\n m_module.constvec2f32( 0.125f, 0.375f),\n // VK_SAMPLE_COUNT_8_BIT\n m_module.constvec2f32( 0.0625f,-0.1875f),\n m_module.constvec2f32(-0.0625f, 0.1875f),\n m_module.constvec2f32( 0.3125f, 0.0625f),\n m_module.constvec2f32(-0.1875f,-0.3125f),\n m_module.constvec2f32(-0.3125f, 0.3125f),\n m_module.constvec2f32(-0.4375f,-0.0625f),\n m_module.constvec2f32( 0.1875f, 0.4375f),\n m_module.constvec2f32( 0.4375f,-0.4375f),\n // VK_SAMPLE_COUNT_16_BIT\n m_module.constvec2f32( 0.0625f, 0.0625f),\n m_module.constvec2f32(-0.0625f,-0.1875f),\n m_module.constvec2f32(-0.1875f, 0.1250f),\n m_module.constvec2f32( 0.2500f,-0.0625f),\n m_module.constvec2f32(-0.3125f,-0.1250f),\n m_module.constvec2f32( 0.1250f, 0.3125f),\n m_module.constvec2f32( 0.3125f, 0.1875f),\n m_module.constvec2f32( 0.1875f,-0.3125f),\n m_module.constvec2f32(-0.1250f, 0.3750f),\n m_module.constvec2f32( 0.0000f,-0.4375f),\n m_module.constvec2f32(-0.2500f,-0.3750f),\n m_module.constvec2f32(-0.3750f, 0.2500f),\n m_module.constvec2f32(-0.5000f, 0.0000f),\n m_module.constvec2f32( 0.4375f,-0.2500f),\n m_module.constvec2f32( 0.3750f, 0.4375f),\n m_module.constvec2f32(-0.4375f,-0.5000f),\n }};\n \n uint32_t arrayTypeId = getArrayTypeId({\n DxbcScalarType::Float32, 2,\n static_cast(samplePosVectors.size()) });\n \n uint32_t samplePosArray = m_module.constComposite(\n arrayTypeId,\n samplePosVectors.size(),\n samplePosVectors.data());\n \n uint32_t varId = m_module.newVarInit(\n m_module.defPointerType(arrayTypeId, spv::StorageClassPrivate),\n spv::StorageClassPrivate, samplePosArray);\n \n m_module.setDebugName(varId, \"g_sample_pos\");\n m_module.decorate(varId, spv::DecorationNonWritable);\n return varId;\n }\n void emitFloatControl() {\n DxbcFloatControlFlags flags = m_moduleInfo.options.floatControl;\n\n if (flags.isClear())\n return;\n\n const uint32_t width32 = 32;\n const uint32_t width64 = 64;\n\n if (flags.test(DxbcFloatControlFlag::DenormFlushToZero32)) {\n m_module.enableCapability(spv::CapabilityDenormFlushToZero);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDenormFlushToZero, 1, &width32);\n }\n\n if (flags.test(DxbcFloatControlFlag::PreserveNan32)) {\n m_module.enableCapability(spv::CapabilitySignedZeroInfNanPreserve);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeSignedZeroInfNanPreserve, 1, &width32);\n }\n\n if (m_module.hasCapability(spv::CapabilityFloat64)) {\n if (flags.test(DxbcFloatControlFlag::DenormPreserve64)) {\n m_module.enableCapability(spv::CapabilityDenormPreserve);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDenormPreserve, 1, &width64);\n }\n\n if (flags.test(DxbcFloatControlFlag::PreserveNan64)) {\n m_module.enableCapability(spv::CapabilitySignedZeroInfNanPreserve);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeSignedZeroInfNanPreserve, 1, &width64);\n }\n }\n }\n uint32_t emitNewVariable(\n const DxbcRegisterInfo& info) {\n const uint32_t ptrTypeId = this->getPointerTypeId(info);\n return m_module.newVar(ptrTypeId, info.sclass);\n }\n uint32_t emitNewBuiltinVariable(\n const DxbcRegisterInfo& info,\n spv::BuiltIn builtIn,\n const char* name) {\n const uint32_t varId = emitNewVariable(info);\n \n if (name)\n m_module.setDebugName(varId, name);\n\n m_module.decorateBuiltIn(varId, builtIn);\n\n if (m_programInfo.type() == DxbcProgramType::PixelShader\n && info.type.ctype != DxbcScalarType::Float32\n && info.type.ctype != DxbcScalarType::Bool\n && info.sclass == spv::StorageClassInput)\n m_module.decorate(varId, spv::DecorationFlat);\n\n return varId;\n }\n uint32_t emitBuiltinTessLevelOuter(\n spv::StorageClass storageClass) {\n uint32_t id = emitNewBuiltinVariable(\n DxbcRegisterInfo {\n { DxbcScalarType::Float32, 0, 4 },\n storageClass },\n spv::BuiltInTessLevelOuter,\n \"bTessLevelOuter\");\n \n m_module.decorate(id, spv::DecorationPatch);\n return id;\n }\n uint32_t emitBuiltinTessLevelInner(\n spv::StorageClass storageClass) {\n uint32_t id = emitNewBuiltinVariable(\n DxbcRegisterInfo {\n { DxbcScalarType::Float32, 0, 2 },\n storageClass },\n spv::BuiltInTessLevelInner,\n \"bTessLevelInner\");\n \n m_module.decorate(id, spv::DecorationPatch);\n return id;\n }\n uint32_t emitPushConstants() {\n uint32_t uintTypeId = m_module.defIntType(32, 0);\n uint32_t structTypeId = m_module.defStructTypeUnique(1, &uintTypeId);\n\n m_module.setDebugName(structTypeId, \"pc_t\");\n m_module.setDebugMemberName(structTypeId, 0, \"RasterizerSampleCount\");\n m_module.memberDecorateOffset(structTypeId, 0, 0);\n\n uint32_t ptrTypeId = m_module.defPointerType(structTypeId, spv::StorageClassPushConstant);\n uint32_t varId = m_module.newVar(ptrTypeId, spv::StorageClassPushConstant);\n\n m_module.setDebugName(varId, \"pc\");\n return varId;\n }\n DxbcCfgBlock* cfgFindBlock(\n const std::initializer_list& types);\n DxbcBufferInfo getBufferInfo(\n const DxbcRegister& reg) {\n const uint32_t registerId = reg.idx[0].offset;\n \n switch (reg.type) {\n case DxbcOperandType::Resource: {\n const auto& texture = m_textures.at(registerId);\n\n DxbcBufferInfo result;\n result.image = texture.imageInfo;\n result.stype = texture.sampledType;\n result.type = texture.type;\n result.typeId = texture.imageTypeId;\n result.varId = texture.varId;\n result.stride = texture.structStride;\n result.coherence = 0;\n result.isSsbo = texture.isRawSsbo;\n return result;\n } break;\n \n case DxbcOperandType::UnorderedAccessView: {\n const auto& uav = m_uavs.at(registerId);\n\n DxbcBufferInfo result;\n result.image = uav.imageInfo;\n result.stype = uav.sampledType;\n result.type = uav.type;\n result.typeId = uav.imageTypeId;\n result.varId = uav.varId;\n result.stride = uav.structStride;\n result.coherence = uav.coherence;\n result.isSsbo = uav.isRawSsbo;\n return result;\n } break;\n \n case DxbcOperandType::ThreadGroupSharedMemory: {\n DxbcBufferInfo result;\n result.image = { spv::DimBuffer, 0, 0, 0 };\n result.stype = DxbcScalarType::Uint32;\n result.type = m_gRegs.at(registerId).type;\n result.typeId = m_module.defPointerType(\n getScalarTypeId(DxbcScalarType::Uint32),\n spv::StorageClassWorkgroup);\n result.varId = m_gRegs.at(registerId).varId;\n result.stride = m_gRegs.at(registerId).elementStride;\n result.coherence = spv::ScopeInvocation;\n result.isSsbo = false;\n return result;\n } break;\n \n default:\n throw DxvkError(str::format(\"DxbcCompiler: Invalid operand type for buffer: \", reg.type));\n }\n }\n uint32_t getTexSizeDim(\n const DxbcImageInfo& imageType) const {\n switch (imageType.dim) {\n case spv::DimBuffer: return 1 + imageType.array;\n case spv::Dim1D: return 1 + imageType.array;\n case spv::Dim2D: return 2 + imageType.array;\n case spv::Dim3D: return 3 + imageType.array;\n case spv::DimCube: return 2 + imageType.array;\n default: throw DxvkError(\"DxbcCompiler: getTexLayerDim: Unsupported image dimension\");\n }\n }\n uint32_t getTexLayerDim(\n const DxbcImageInfo& imageType) const {\n switch (imageType.dim) {\n case spv::DimBuffer: return 1;\n case spv::Dim1D: return 1;\n case spv::Dim2D: return 2;\n case spv::Dim3D: return 3;\n case spv::DimCube: return 3;\n default: throw DxvkError(\"DxbcCompiler: getTexLayerDim: Unsupported image dimension\");\n }\n }\n uint32_t getTexCoordDim(\n const DxbcImageInfo& imageType) const {\n return getTexLayerDim(imageType) + imageType.array;\n }\n DxbcRegMask getTexCoordMask(\n const DxbcImageInfo& imageType) const {\n return DxbcRegMask::firstN(getTexCoordDim(imageType));\n }\n DxbcVectorType getInputRegType(\n uint32_t regIdx) const {\n switch (m_programInfo.type()) {\n case DxbcProgramType::VertexShader: {\n const DxbcSgnEntry* entry = m_isgn->findByRegister(regIdx);\n \n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n \n if (entry != nullptr) {\n result.ctype = entry->componentType;\n result.ccount = entry->componentMask.popCount();\n }\n \n return result;\n }\n\n case DxbcProgramType::DomainShader: {\n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n return result;\n }\n\n default: {\n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n\n if (m_isgn == nullptr || !m_isgn->findByRegister(regIdx))\n return result;\n\n DxbcRegMask mask(0u);\n DxbcRegMask used(0u);\n\n for (const auto& e : *m_isgn) {\n if (e.registerId == regIdx && !ignoreInputSystemValue(e.systemValue)) {\n mask |= e.componentMask;\n used |= e.componentUsed;\n }\n }\n\n if (m_programInfo.type() == DxbcProgramType::PixelShader) {\n if ((used.raw() & mask.raw()) == used.raw())\n mask = used;\n }\n\n result.ccount = mask.minComponents();\n return result;\n }\n }\n }\n DxbcVectorType getOutputRegType(\n uint32_t regIdx) const {\n switch (m_programInfo.type()) {\n case DxbcProgramType::PixelShader: {\n const DxbcSgnEntry* entry = m_osgn->findByRegister(regIdx);\n\n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n \n if (entry != nullptr) {\n result.ctype = entry->componentType;\n result.ccount = entry->componentMask.popCount();\n }\n\n return result;\n }\n\n case DxbcProgramType::HullShader: {\n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n return result;\n }\n\n default: {\n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n\n if (m_osgn->findByRegister(regIdx))\n result.ccount = m_osgn->regMask(regIdx).minComponents();\n return result;\n }\n }\n }\n DxbcImageInfo getResourceType(\n DxbcResourceDim resourceType,\n bool isUav) const {\n uint32_t ms = m_moduleInfo.options.disableMsaa ? 0 : 1;\n\n switch (resourceType) {\n case DxbcResourceDim::Buffer: return { spv::DimBuffer, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_MAX_ENUM };\n case DxbcResourceDim::Texture1D: return { spv::Dim1D, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_1D };\n case DxbcResourceDim::Texture1DArr: return { spv::Dim1D, 1, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_1D_ARRAY };\n case DxbcResourceDim::Texture2D: return { spv::Dim2D, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_2D };\n case DxbcResourceDim::Texture2DArr: return { spv::Dim2D, 1, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_2D_ARRAY };\n case DxbcResourceDim::Texture2DMs: return { spv::Dim2D, 0, ms,isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_2D };\n case DxbcResourceDim::Texture2DMsArr: return { spv::Dim2D, 1, ms,isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_2D_ARRAY };\n case DxbcResourceDim::Texture3D: return { spv::Dim3D, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_3D };\n case DxbcResourceDim::TextureCube: return { spv::DimCube, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_CUBE };\n case DxbcResourceDim::TextureCubeArr: return { spv::DimCube, 1, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_CUBE_ARRAY };\n default: throw DxvkError(str::format(\"DxbcCompiler: Unsupported resource type: \", resourceType));\n }\n }\n spv::ImageFormat getScalarImageFormat(\n DxbcScalarType type) const {\n switch (type) {\n case DxbcScalarType::Float32: return spv::ImageFormatR32f;\n case DxbcScalarType::Sint32: return spv::ImageFormatR32i;\n case DxbcScalarType::Uint32: return spv::ImageFormatR32ui;\n default: throw DxvkError(\"DxbcCompiler: Unhandled scalar resource type\");\n }\n }\n bool isDoubleType(\n DxbcScalarType type) const {\n return type == DxbcScalarType::Sint64\n || type == DxbcScalarType::Uint64\n || type == DxbcScalarType::Float64;\n }\n DxbcRegisterPointer getIndexableTempPtr(\n const DxbcRegister& operand,\n DxbcRegisterValue vectorId) {\n // x# regs are indexed as follows:\n // (0) register index (immediate)\n // (1) element index (relative)\n const uint32_t regId = operand.idx[0].offset;\n \n DxbcRegisterInfo info;\n info.type.ctype = DxbcScalarType::Float32;\n info.type.ccount = m_xRegs[regId].ccount;\n info.type.alength = 0;\n info.sclass = spv::StorageClassPrivate;\n \n DxbcRegisterPointer result;\n result.type.ctype = info.type.ctype;\n result.type.ccount = info.type.ccount;\n result.id = m_module.opAccessChain(\n getPointerTypeId(info),\n m_xRegs.at(regId).varId,\n 1, &vectorId.id);\n\n return result;\n }\n bool caseBlockIsFallthrough() const {\n return m_lastOp != DxbcOpcode::Case\n && m_lastOp != DxbcOpcode::Default\n && m_lastOp != DxbcOpcode::Break\n && m_lastOp != DxbcOpcode::Ret;\n }\n uint32_t getUavCoherence(\n uint32_t registerId,\n DxbcUavFlags flags) {\n // For any ROV with write access, we must ensure that\n // availability operations happen within the locked scope.\n if (flags.test(DxbcUavFlag::RasterizerOrdered)\n && (m_analysis->uavInfos[registerId].accessFlags & VK_ACCESS_SHADER_WRITE_BIT)) {\n m_hasGloballyCoherentUav = true;\n m_hasRasterizerOrderedUav = true;\n return spv::ScopeQueueFamily;\n }\n\n // Ignore any resources that can't both be read and written in\n // the current shader, explicit availability/visibility operands\n // are not useful in that case.\n if (m_analysis->uavInfos[registerId].accessFlags != (VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT))\n return 0;\n\n // If the globally coherent flag is set, the resource must be\n // coherent across multiple workgroups of the same dispatch\n if (flags.test(DxbcUavFlag::GloballyCoherent)) {\n m_hasGloballyCoherentUav = true;\n return spv::ScopeQueueFamily;\n }\n\n // In compute shaders, UAVs are implicitly workgroup coherent,\n // but we can rely on memory barrier instructions to make any\n // access available and visible to the entire workgroup.\n if (m_programInfo.type() == DxbcProgramType::ComputeShader)\n return spv::ScopeInvocation;\n\n return 0;\n }\n bool ignoreInputSystemValue(\n DxbcSystemValue sv) const {\n switch (sv) {\n case DxbcSystemValue::Position:\n case DxbcSystemValue::IsFrontFace:\n case DxbcSystemValue::SampleIndex:\n case DxbcSystemValue::PrimitiveId:\n case DxbcSystemValue::Coverage:\n return m_programInfo.type() == DxbcProgramType::PixelShader;\n\n default:\n return false;\n }\n }\n void emitUavBarrier(\n uint64_t readMask,\n uint64_t writeMask) {\n if (!m_moduleInfo.options.forceComputeUavBarriers\n || m_programInfo.type() != DxbcProgramType::ComputeShader)\n return;\n\n // If both masks are 0, emit a barrier in case at least one read-write UAV\n // has a pending unsynchronized access. Only consider read-after-write and\n // write-after-read hazards, assume that back-to-back stores are safe and\n // do not overlap in memory. Atomics are also completely ignored here.\n uint64_t rdMask = m_uavRdMask;\n uint64_t wrMask = m_uavWrMask;\n\n bool insertBarrier = bool(rdMask & wrMask);\n\n if (readMask || writeMask) {\n rdMask &= m_uavWrMask;\n wrMask &= m_uavRdMask;\n }\n\n for (auto uav : bit::BitMask(rdMask | wrMask)) {\n constexpr VkAccessFlags rwAccess = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;\n insertBarrier |= (m_analysis->uavInfos[uav].accessFlags & rwAccess) == rwAccess;\n }\n\n // Need to be in uniform top-level control flow, or otherwise\n // it is not safe to insert control barriers.\n if (insertBarrier && m_controlFlowBlocks.empty() && m_topLevelIsUniform) {\n m_module.opControlBarrier(\n m_module.constu32(spv::ScopeWorkgroup),\n m_module.constu32(m_hasGloballyCoherentUav ? spv::ScopeQueueFamily : spv::ScopeWorkgroup),\n m_module.constu32(spv::MemorySemanticsWorkgroupMemoryMask\n | spv::MemorySemanticsImageMemoryMask\n | spv::MemorySemanticsUniformMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask\n | spv::MemorySemanticsMakeAvailableMask\n | spv::MemorySemanticsMakeVisibleMask));\n\n m_uavWrMask = 0u;\n m_uavRdMask = 0u;\n }\n\n // Mark pending accesses\n m_uavWrMask |= writeMask;\n m_uavRdMask |= readMask;\n }\n uint32_t getScalarTypeId(\n DxbcScalarType type) {\n if (type == DxbcScalarType::Float64)\n m_module.enableCapability(spv::CapabilityFloat64);\n \n if (type == DxbcScalarType::Sint64 || type == DxbcScalarType::Uint64)\n m_module.enableCapability(spv::CapabilityInt64);\n \n switch (type) {\n case DxbcScalarType::Uint32: return m_module.defIntType(32, 0);\n case DxbcScalarType::Uint64: return m_module.defIntType(64, 0);\n case DxbcScalarType::Sint32: return m_module.defIntType(32, 1);\n case DxbcScalarType::Sint64: return m_module.defIntType(64, 1);\n case DxbcScalarType::Float32: return m_module.defFloatType(32);\n case DxbcScalarType::Float64: return m_module.defFloatType(64);\n case DxbcScalarType::Bool: return m_module.defBoolType();\n }\n\n throw DxvkError(\"DxbcCompiler: Invalid scalar type\");\n }\n uint32_t getVectorTypeId(\n const DxbcVectorType& type) {\n uint32_t typeId = this->getScalarTypeId(type.ctype);\n \n if (type.ccount > 1)\n typeId = m_module.defVectorType(typeId, type.ccount);\n \n return typeId;\n }\n uint32_t getArrayTypeId(\n const DxbcArrayType& type) {\n DxbcVectorType vtype;\n vtype.ctype = type.ctype;\n vtype.ccount = type.ccount;\n \n uint32_t typeId = this->getVectorTypeId(vtype);\n \n if (type.alength != 0) {\n typeId = m_module.defArrayType(typeId,\n m_module.constu32(type.alength));\n }\n \n return typeId;\n }\n uint32_t getPointerTypeId(\n const DxbcRegisterInfo& type) {\n return m_module.defPointerType(\n this->getArrayTypeId(type.type),\n type.sclass);\n }\n uint32_t getSparseResultTypeId(\n uint32_t baseType) {\n m_module.enableCapability(spv::CapabilitySparseResidency);\n\n uint32_t uintType = getScalarTypeId(DxbcScalarType::Uint32);\n std::array typeIds = { uintType, baseType };\n return m_module.defStructType(typeIds.size(), typeIds.data());\n }\n uint32_t getFunctionId(\n uint32_t functionNr) {\n auto entry = m_subroutines.find(functionNr);\n if (entry != m_subroutines.end())\n return entry->second;\n \n uint32_t functionId = m_module.allocateId();\n m_subroutines.insert({ functionNr, functionId });\n return functionId;\n }\n DxbcCompilerHsForkJoinPhase* getCurrentHsForkJoinPhase();\n};"], ["/lsfg-vk/src/extract/extract.cpp", "#include \"extract/extract.hpp\"\n#include \"config/config.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace Extract;\n\nconst std::unordered_map nameIdxTable = {{\n { \"mipmaps\", 255 },\n { \"alpha[0]\", 267 },\n { \"alpha[1]\", 268 },\n { \"alpha[2]\", 269 },\n { \"alpha[3]\", 270 },\n { \"beta[0]\", 275 },\n { \"beta[1]\", 276 },\n { \"beta[2]\", 277 },\n { \"beta[3]\", 278 },\n { \"beta[4]\", 279 },\n { \"gamma[0]\", 257 },\n { \"gamma[1]\", 259 },\n { \"gamma[2]\", 260 },\n { \"gamma[3]\", 261 },\n { \"gamma[4]\", 262 },\n { \"delta[0]\", 257 },\n { \"delta[1]\", 263 },\n { \"delta[2]\", 264 },\n { \"delta[3]\", 265 },\n { \"delta[4]\", 266 },\n { \"delta[5]\", 258 },\n { \"delta[6]\", 271 },\n { \"delta[7]\", 272 },\n { \"delta[8]\", 273 },\n { \"delta[9]\", 274 },\n { \"generate\", 256 },\n { \"p_mipmaps\", 255 },\n { \"p_alpha[0]\", 290 },\n { \"p_alpha[1]\", 291 },\n { \"p_alpha[2]\", 292 },\n { \"p_alpha[3]\", 293 },\n { \"p_beta[0]\", 298 },\n { \"p_beta[1]\", 299 },\n { \"p_beta[2]\", 300 },\n { \"p_beta[3]\", 301 },\n { \"p_beta[4]\", 302 },\n { \"p_gamma[0]\", 280 },\n { \"p_gamma[1]\", 282 },\n { \"p_gamma[2]\", 283 },\n { \"p_gamma[3]\", 284 },\n { \"p_gamma[4]\", 285 },\n { \"p_delta[0]\", 280 },\n { \"p_delta[1]\", 286 },\n { \"p_delta[2]\", 287 },\n { \"p_delta[3]\", 288 },\n { \"p_delta[4]\", 289 },\n { \"p_delta[5]\", 281 },\n { \"p_delta[6]\", 294 },\n { \"p_delta[7]\", 295 },\n { \"p_delta[8]\", 296 },\n { \"p_delta[9]\", 297 },\n { \"p_generate\", 256 },\n}};\n\nnamespace {\n auto& shaders() {\n static std::unordered_map> shaderData;\n return shaderData;\n }\n\n int on_resource(void*, const peparse::resource& res) {\n if (res.type != peparse::RT_RCDATA || res.buf == nullptr || res.buf->bufLen <= 0)\n return 0;\n std::vector resource_data(res.buf->bufLen);\n std::copy_n(res.buf->buf, res.buf->bufLen, resource_data.data());\n shaders()[res.name] = resource_data;\n return 0;\n }\n\n const std::vector PATHS{{\n \".local/share/Steam/steamapps/common\",\n \".steam/steam/steamapps/common\",\n \".steam/debian-installation/steamapps/common\",\n \".var/app/com.valvesoftware.Steam/.local/share/Steam/steamapps/common\",\n \"snap/steam/common/.local/share/Steam/steamapps/common\"\n }};\n\n std::string getDllPath() {\n // overriden path\n std::string dllPath = Config::activeConf.dll;\n if (!dllPath.empty())\n return dllPath;\n // home based paths\n const char* home = getenv(\"HOME\");\n const std::string homeStr = home ? home : \"\";\n for (const auto& base : PATHS) {\n const std::filesystem::path path =\n std::filesystem::path(homeStr) / base / \"Lossless Scaling\" / \"Lossless.dll\";\n if (std::filesystem::exists(path))\n return path.string();\n }\n // xdg home\n const char* dataDir = getenv(\"XDG_DATA_HOME\");\n if (dataDir && *dataDir != '\\0')\n return std::string(dataDir) + \"/Steam/steamapps/common/Lossless Scaling/Lossless.dll\";\n // final fallback\n return \"Lossless.dll\";\n }\n}\n\nvoid Extract::extractShaders() {\n if (!shaders().empty())\n return;\n\n // parse the dll\n peparse::parsed_pe* dll = peparse::ParsePEFromFile(getDllPath().c_str());\n if (!dll)\n throw std::runtime_error(\"Unable to read Lossless.dll, is it installed?\");\n peparse::IterRsrc(dll, on_resource, nullptr);\n peparse::DestructParsedPE(dll);\n\n // ensure all shaders are present\n for (const auto& [name, idx] : nameIdxTable)\n if (shaders().find(idx) == shaders().end())\n throw std::runtime_error(\"Shader not found: \" + name + \".\\n- Is Lossless Scaling up to date?\");\n}\n\nstd::vector Extract::getShader(const std::string& name) {\n if (shaders().empty())\n throw std::runtime_error(\"Shaders are not loaded.\");\n\n auto hit = nameIdxTable.find(name);\n if (hit == nameIdxTable.end())\n throw std::runtime_error(\"Shader hash not found: \" + name);\n\n auto sit = shaders().find(hit->second);\n if (sit == shaders().end())\n throw std::runtime_error(\"Shader not found: \" + name);\n\n return sit->second;\n}\n"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/src/buffer.cpp", "/*\nThe MIT License (MIT)\n\nCopyright (c) 2013 Andrew Ruef\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n*/\n\n#include \n#include \n\n// keep this header above \"windows.h\" because it contains many types\n#include \n\n#ifdef _WIN32\n\n#define WIN32_LEAN_AND_MEAN\n#define VC_EXTRALEAN\n\n#include \n#include \n#else\n#include \n#include \n#include \n#include \n#endif\n\nnamespace {\n\ninline std::uint16_t byteSwapUint16(std::uint16_t val) {\n#if defined(_MSC_VER) || defined(_MSC_FULL_VER)\n return _byteswap_ushort(val);\n#else\n return __builtin_bswap16(val);\n#endif\n}\n\ninline std::uint32_t byteSwapUint32(std::uint32_t val) {\n#if defined(_MSC_VER) || defined(_MSC_FULL_VER)\n return _byteswap_ulong(val);\n#else\n return __builtin_bswap32(val);\n#endif\n}\n\ninline uint64_t byteSwapUint64(std::uint64_t val) {\n#if defined(_MSC_VER) || defined(_MSC_FULL_VER)\n return _byteswap_uint64(val);\n#else\n return __builtin_bswap64(val);\n#endif\n}\n\n} // anonymous namespace\n\nnamespace peparse {\n\nextern std::uint32_t err;\nextern std::string err_loc;\n\nstruct buffer_detail {\n#ifdef _WIN32\n HANDLE file;\n HANDLE sec;\n#else\n int fd;\n#endif\n};\n\nbool readByte(bounded_buffer *b, std::uint32_t offset, std::uint8_t &out) {\n if (b == nullptr) {\n PE_ERR(PEERR_BUFFER);\n return false;\n }\n\n if (offset >= b->bufLen) {\n PE_ERR(PEERR_ADDRESS);\n return false;\n }\n\n std::uint8_t *tmp = (b->buf + offset);\n out = *tmp;\n\n return true;\n}\n\nbool readWord(bounded_buffer *b, std::uint32_t offset, std::uint16_t &out) {\n if (b == nullptr) {\n PE_ERR(PEERR_BUFFER);\n return false;\n }\n\n if (static_cast(offset) + 1 >= b->bufLen) {\n PE_ERR(PEERR_ADDRESS);\n return false;\n }\n\n std::uint16_t tmp;\n memcpy(&tmp, (b->buf + offset), sizeof(std::uint16_t));\n if (b->swapBytes) {\n out = byteSwapUint16(tmp);\n } else {\n out = tmp;\n }\n\n return true;\n}\n\nbool readDword(bounded_buffer *b, std::uint32_t offset, std::uint32_t &out) {\n if (b == nullptr) {\n PE_ERR(PEERR_BUFFER);\n return false;\n }\n\n if (static_cast(offset) + 3 >= b->bufLen) {\n PE_ERR(PEERR_ADDRESS);\n return false;\n }\n\n std::uint32_t tmp;\n memcpy(&tmp, (b->buf + offset), sizeof(std::uint32_t));\n if (b->swapBytes) {\n out = byteSwapUint32(tmp);\n } else {\n out = tmp;\n }\n\n return true;\n}\n\nbool readQword(bounded_buffer *b, std::uint32_t offset, std::uint64_t &out) {\n if (b == nullptr) {\n PE_ERR(PEERR_BUFFER);\n return false;\n }\n\n if (static_cast(offset) + 7 >= b->bufLen) {\n PE_ERR(PEERR_ADDRESS);\n return false;\n }\n\n std::uint64_t tmp;\n memcpy(&tmp, (b->buf + offset), sizeof(std::uint64_t));\n if (b->swapBytes) {\n out = byteSwapUint64(tmp);\n } else {\n out = tmp;\n }\n\n return true;\n}\n\nbool readChar16(bounded_buffer *b, std::uint32_t offset, char16_t &out) {\n if (b == nullptr) {\n PE_ERR(PEERR_BUFFER);\n return false;\n }\n\n if (static_cast(offset) + 1 >= b->bufLen) {\n PE_ERR(PEERR_ADDRESS);\n return false;\n }\n\n char16_t tmp;\n if (b->swapBytes) {\n std::uint8_t tmpBuf[2];\n tmpBuf[0] = *(b->buf + offset + 1);\n tmpBuf[1] = *(b->buf + offset);\n memcpy(&tmp, tmpBuf, sizeof(std::uint16_t));\n } else {\n memcpy(&tmp, (b->buf + offset), sizeof(std::uint16_t));\n }\n out = tmp;\n\n return true;\n}\n\nbounded_buffer *readFileToFileBuffer(const char *filePath) {\n#ifdef _WIN32\n HANDLE h = CreateFileA(filePath,\n GENERIC_READ,\n FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,\n nullptr,\n OPEN_EXISTING,\n FILE_ATTRIBUTE_NORMAL,\n nullptr);\n if (h == INVALID_HANDLE_VALUE) {\n PE_ERR(PEERR_OPEN);\n return nullptr;\n }\n\n DWORD fileSize = GetFileSize(h, nullptr);\n\n if (fileSize == INVALID_FILE_SIZE) {\n CloseHandle(h);\n PE_ERR(PEERR_OPEN);\n return nullptr;\n }\n\n#else\n // only where we have mmap / open / etc\n int fd = open(filePath, O_RDONLY);\n\n if (fd == -1) {\n PE_ERR(PEERR_OPEN);\n return nullptr;\n }\n#endif\n\n // make a buffer object\n bounded_buffer *p = new (std::nothrow) bounded_buffer();\n if (p == nullptr) {\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n memset(p, 0, sizeof(bounded_buffer));\n buffer_detail *d = new (std::nothrow) buffer_detail();\n\n if (d == nullptr) {\n delete p;\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n memset(d, 0, sizeof(buffer_detail));\n p->detail = d;\n\n// only where we have mmap / open / etc\n#ifdef _WIN32\n p->detail->file = h;\n\n HANDLE hMap = CreateFileMapping(h, nullptr, PAGE_READONLY, 0, 0, nullptr);\n\n if (hMap == nullptr) {\n CloseHandle(h);\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n p->detail->sec = hMap;\n\n LPVOID ptr = MapViewOfFile(hMap, FILE_MAP_READ, 0, 0, 0);\n\n if (ptr == nullptr) {\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n p->buf = reinterpret_cast(ptr);\n p->bufLen = fileSize;\n#else\n p->detail->fd = fd;\n\n struct stat s;\n memset(&s, 0, sizeof(struct stat));\n\n if (fstat(fd, &s) != 0) {\n close(fd);\n delete d;\n delete p;\n PE_ERR(PEERR_STAT);\n return nullptr;\n }\n\n void *maddr = mmap(nullptr,\n static_cast(s.st_size),\n PROT_READ,\n MAP_SHARED,\n fd,\n 0);\n\n if (maddr == MAP_FAILED) {\n close(fd);\n delete d;\n delete p;\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n p->buf = reinterpret_cast(maddr);\n p->bufLen = static_cast(s.st_size);\n#endif\n p->copy = false;\n p->swapBytes = false;\n\n return p;\n}\n\nbounded_buffer *makeBufferFromPointer(std::uint8_t *data, std::uint32_t sz) {\n if (data == nullptr) {\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n bounded_buffer *p = new (std::nothrow) bounded_buffer();\n\n if (p == nullptr) {\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n p->copy = true;\n p->detail = nullptr;\n p->buf = data;\n p->bufLen = sz;\n p->swapBytes = false;\n\n return p;\n}\n\n// split buffer inclusively from from to to by offset\nbounded_buffer *\nsplitBuffer(bounded_buffer *b, std::uint32_t from, std::uint32_t to) {\n if (b == nullptr) {\n return nullptr;\n }\n\n // safety checks\n if (to < from || to > b->bufLen) {\n return nullptr;\n }\n\n // make a new buffer\n auto newBuff = new (std::nothrow) bounded_buffer();\n if (newBuff == nullptr) {\n return nullptr;\n }\n\n newBuff->copy = true;\n newBuff->buf = b->buf + from;\n newBuff->bufLen = (to - from);\n\n return newBuff;\n}\n\nvoid deleteBuffer(bounded_buffer *b) {\n if (b == nullptr) {\n return;\n }\n\n if (!b->copy) {\n#ifdef _WIN32\n UnmapViewOfFile(b->buf);\n CloseHandle(b->detail->sec);\n CloseHandle(b->detail->file);\n#else\n munmap(b->buf, b->bufLen);\n close(b->detail->fd);\n#endif\n }\n\n delete b->detail;\n delete b;\n}\n\nstd::uint64_t bufLen(bounded_buffer *b) {\n return b->bufLen;\n}\n} // namespace peparse\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxvk/dxvk_pipelayout.h", "#pragma once\n\n#include \n\n#include \n\n#include \"dxvk_hash.h\"\n\n#include \"util_math.h\"\n#include \"util_bit.h\"\n#include \"util_flags.h\"\n\nnamespace dxvk {\n\n class DxvkDevice;\n class DxvkPipelineManager;\n\n /**\n * \\brief Order-invariant atomic access operation\n *\n * Information used to optimize barriers when a resource\n * is accessed exlusively via order-invariant stores.\n */\n struct DxvkAccessOp {\n enum OpType : uint16_t {\n None = 0x0u,\n Or = 0x1u,\n And = 0x2u,\n Xor = 0x3u,\n Add = 0x4u,\n IMin = 0x5u,\n IMax = 0x6u,\n UMin = 0x7u,\n UMax = 0x8u,\n\n StoreF = 0xdu,\n StoreUi = 0xeu,\n StoreSi = 0xfu,\n };\n\n DxvkAccessOp() = default;\n DxvkAccessOp(OpType t)\n : op(uint16_t(t)) { }\n\n DxvkAccessOp(OpType t, uint16_t constant)\n : op(uint16_t(t) | (constant << 4u)) { }\n\n uint16_t op = 0u;\n\n bool operator == (const DxvkAccessOp& t) const { return op == t.op; }\n bool operator != (const DxvkAccessOp& t) const { return op != t.op; }\n\n template, bool> = true>\n explicit operator T() const { return op; }\n };\n\n static_assert(sizeof(DxvkAccessOp) == sizeof(uint16_t));\n\n /**\n * \\brief Binding info\n *\n * Stores metadata for a single binding in\n * a given shader, or for the whole pipeline.\n */\n struct DxvkBindingInfo {\n VkDescriptorType descriptorType = VK_DESCRIPTOR_TYPE_MAX_ENUM; ///< Vulkan descriptor type\n uint32_t resourceBinding = 0u; ///< API binding slot for the resource\n VkImageViewType viewType = VK_IMAGE_VIEW_TYPE_MAX_ENUM; ///< Image view type\n VkShaderStageFlagBits stage = VK_SHADER_STAGE_FLAG_BITS_MAX_ENUM; ///< Shader stage\n VkAccessFlags access = 0u; ///< Access mask for the resource\n DxvkAccessOp accessOp = DxvkAccessOp::None; ///< Order-invariant store type, if any\n bool uboSet = false; ///< Whether to include this in the UBO set\n bool isMultisampled = false; ///< Multisampled binding\n\n /**\n * \\brief Computes descriptor set index for the given binding\n *\n * This is determines based on the shader stages that use the binding.\n * \\returns Descriptor set index\n */\n uint32_t computeSetIndex() const;\n\n /**\n * \\brief Numeric value of the binding\n *\n * Used when sorting bindings.\n * \\returns Numeric value\n */\n uint32_t value() const;\n\n /**\n * \\brief Checks for equality\n *\n * \\param [in] other Binding to compare to\n * \\returns \\c true if both bindings are equal\n */\n bool eq(const DxvkBindingInfo& other) const;\n\n /**\n * \\brief Hashes binding info\n * \\returns Binding hash\n */\n size_t hash() const;\n\n };\n\n}\n"], ["/lsfg-vk/src/main.cpp", "#include \"config/config.hpp\"\n#include \"extract/extract.hpp\"\n#include \"utils/benchmark.hpp\"\n#include \"utils/utils.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace {\n __attribute__((constructor)) void lsfgvk_init() {\n std::cerr << std::unitbuf;\n\n // read configuration\n const std::string file = Utils::getConfigFile();\n try {\n Config::updateConfig(file);\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occured while trying to parse the configuration, IGNORING:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n return; // default configuration will unload\n }\n\n const auto name = Utils::getProcessName();\n try {\n Config::activeConf = Config::getConfig(name);\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: The configuration for \" << name.second << \" is invalid, IGNORING:\\n\";\n std::cerr << e.what() << '\\n';\n return; // default configuration will unload\n }\n\n // exit silently if not enabled\n auto& conf = Config::activeConf;\n if (!conf.enable && name.second != \"benchmark\")\n return; // default configuration will unload\n\n // print config\n std::cerr << \"lsfg-vk: Loaded configuration for \" << name.second << \":\\n\";\n if (!conf.dll.empty()) std::cerr << \" Using DLL from: \" << conf.dll << '\\n';\n std::cerr << \" Multiplier: \" << conf.multiplier << '\\n';\n std::cerr << \" Flow Scale: \" << conf.flowScale << '\\n';\n std::cerr << \" Performance Mode: \" << (conf.performance ? \"Enabled\" : \"Disabled\") << '\\n';\n std::cerr << \" HDR Mode: \" << (conf.hdr ? \"Enabled\" : \"Disabled\") << '\\n';\n if (conf.e_present != 2) std::cerr << \" ! Present Mode: \" << conf.e_present << '\\n';\n\n // remove mesa var in favor of config\n unsetenv(\"MESA_VK_WSI_PRESENT_MODE\"); // NOLINT\n\n // write latest file\n try {\n std::ofstream latest(\"/tmp/lsfg-vk_last\", std::ios::trunc);\n if (!latest.is_open())\n throw std::runtime_error(\"Failed to open /tmp/lsfg-vk_last for writing\");\n latest << \"exe: \" << name.first << '\\n';\n latest << \"comm: \" << name.second << '\\n';\n latest << \"pid: \" << getpid() << '\\n';\n if (!latest.good())\n throw std::runtime_error(\"Failed to write to /tmp/lsfg-vk_last\");\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred while trying to write the latest file, exiting:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n exit(EXIT_FAILURE);\n }\n\n // load shaders\n try {\n Extract::extractShaders();\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred while trying to extract the shaders, exiting:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n exit(EXIT_FAILURE);\n }\n std::cerr << \"lsfg-vk: Shaders extracted successfully.\\n\";\n\n // run benchmark if requested\n const char* benchmark_flag = std::getenv(\"LSFG_BENCHMARK\");\n if (!benchmark_flag)\n return;\n\n const std::string resolution(benchmark_flag);\n uint32_t width{};\n uint32_t height{};\n try {\n const size_t x = resolution.find('x');\n if (x == std::string::npos)\n throw std::runtime_error(\"Unable to find 'x' in benchmark string\");\n\n const std::string width_str = resolution.substr(0, x);\n const std::string height_str = resolution.substr(x + 1);\n if (width_str.empty() || height_str.empty())\n throw std::runtime_error(\"Invalid resolution\");\n\n const int32_t w = std::stoi(width_str);\n const int32_t h = std::stoi(height_str);\n if (w < 0 || h < 0)\n throw std::runtime_error(\"Resolution cannot be negative\");\n\n width = static_cast(w);\n height = static_cast(h);\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred while trying to parse the resolution, exiting:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n exit(EXIT_FAILURE);\n }\n\n std::thread benchmark([width, height]() {\n try {\n Benchmark::run(width, height);\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred during the benchmark:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n exit(EXIT_FAILURE);\n }\n });\n benchmark.detach();\n conf.enable = false;\n }\n}\n"], ["/lsfg-vk/thirdparty/toml11/tools/expand/main.cpp", "#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n\nstd::optional\nis_include(const std::string& line, const std::filesystem::path& filepath)\n{\n // [ws] # [ws] include [ws] \\\".+\\\"\n auto iter = line.begin();\n while(iter < line.end())\n {\n if(*iter != ' ' && *iter != '\\t')\n {\n break;\n }\n ++iter;\n }\n if(iter == line.end() || *iter != '#') {return std::nullopt;}\n\n assert(*iter == '#');\n ++iter;\n\n while(iter < line.end())\n {\n if(*iter != ' ' && *iter != '\\t')\n {\n break;\n }\n ++iter;\n }\n if(iter == line.end() || *iter != 'i') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'n') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'c') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'l') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'u') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'd') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'e') {return std::nullopt;} else {++iter;}\n\n while(iter < line.end())\n {\n if(*iter != ' ' && *iter != '\\t')\n {\n break;\n }\n ++iter;\n }\n if(iter == line.end() || *iter != '\"') {return std::nullopt;} else {++iter;}\n\n std::string filename;\n while(iter < line.end())\n {\n if(*iter == '\"') {break;}\n filename += *iter;\n ++iter;\n }\n if(iter == line.end() || *iter != '\"') {return std::nullopt;} else {++iter;}\n\n return std::filesystem::canonical(filepath.parent_path() / std::filesystem::path(filename));\n}\n\nstruct File\n{\n File() = default;\n\n explicit File(std::filesystem::path f)\n : filename(std::move(f))\n {\n std::ifstream ifs(filename);\n if( ! ifs.good())\n {\n throw std::runtime_error(\"file open error: \" + filename.string());\n }\n\n std::string line;\n while(std::getline(ifs, line))\n {\n if(const auto incl = is_include(line, filename))\n {\n includes.push_back(incl.value());\n }\n else\n {\n content.push_back(line);\n }\n }\n }\n\n File(std::filesystem::path f, std::vector c,\n std::vector i)\n : filename(std::move(f)), content(std::move(c)), includes(std::move(i))\n {}\n\n std::filesystem::path filename;\n std::vector content; // w/o include\n std::vector includes;\n};\n\nstruct Graph\n{\n struct Node\n {\n std::vector included;\n std::vector includes;\n };\n\n std::map nodes;\n};\n\nint main(int argc, char** argv)\n{\n using namespace std::literals::string_literals;\n if(argc != 2)\n {\n std::cerr << \"Usage: ./a.out path/to/toml.hpp > single_include/toml.hpp\" << std::endl;\n return 1;\n }\n\n const auto input_file = std::filesystem::path(std::string(argv[1]));\n assert(input_file.filename() == \"toml.hpp\");\n\n const auto include_path = input_file.parent_path();\n\n // -------------------------------------------------------------------------\n // load files and detect `include \"xxx.hpp\"`.\n // If the file has `_fwd` and `_impl`, expand those files first.\n\n std::set fwd_impl_files;\n for(const auto& entities : std::filesystem::directory_iterator(include_path/\"toml11\"/\"fwd\"))\n {\n if( ! entities.is_regular_file()) {continue;}\n std::string fname = entities.path().filename().string();\n if(fname.ends_with(\"_fwd.hpp\"))\n {\n for(const auto c : \"_fwd.hpp\"s) {fname.pop_back(); (void)c;}\n fwd_impl_files.insert(std::move(fname));\n }\n }\n for(const auto& entities : std::filesystem::directory_iterator(include_path/\"toml11\"/\"impl\"))\n {\n if( ! entities.is_regular_file()) {continue;}\n std::string fname = entities.path().filename().string();\n if(fname.ends_with(\"_impl.hpp\"))\n {\n for(const auto c : \"_impl.hpp\"s) {fname.pop_back(); (void)c;}\n // all impl files has fwd file\n assert(fwd_impl_files.contains(fname));\n }\n }\n\n const auto input = File(input_file);\n\n std::map files;\n files[input_file] = input;\n\n for(const auto& fname : input.includes)\n {\n if(fwd_impl_files.contains(fname.stem().string()))\n {\n std::cerr << \"expanding fwd/impl file of \" << fname.string() << std::endl;\n\n // expand the first include\n std::ifstream ifs(fname);\n\n std::vector content;\n std::vector includes;\n\n std::string line;\n while(std::getline(ifs, line))\n {\n // expand _fwd and _impl files first.\n const auto incl = is_include(line, fname);\n if(incl.has_value())\n {\n // if a file has _fwd/_impl files, it only includes fwd/impl files.\n assert(incl.value().string().ends_with(\"_impl.hpp\") ||\n incl.value().string().ends_with(\"_fwd.hpp\") );\n\n const File included(incl.value());\n for(const auto& l : included.content)\n {\n content.push_back(l);\n }\n for(const auto& i : included.includes)\n {\n includes.push_back(i);\n }\n }\n else\n {\n content.push_back(line);\n }\n }\n files[fname] = File(fname, std::move(content), std::move(includes));\n }\n else\n {\n files[fname] = File(fname);\n }\n std::cerr << \"file \" << fname << \" has \" << files.at(fname).content.size() << \" lines.\" << std::endl;\n }\n\n std::cerr << \"-------------------------------------------------------------\\n\";\n std::cerr << \"files have been read. next: constructing dependency graph...\\n\";\n std::cerr << \"-------------------------------------------------------------\\n\";\n\n // -------------------------------------------------------------------------\n // construct dependency graph\n\n Graph g;\n for(const auto& [k, v] : files)\n {\n g.nodes[k] = Graph::Node{};\n }\n\n for(const auto& [fname, file] : files)\n {\n for(auto incl : file.includes)\n {\n auto incl_stem = incl.stem().string();\n if(incl_stem.ends_with(\"_fwd\"))\n {\n for(const char c : \"_fwd\"s) {incl_stem.pop_back(); (void)c;}\n\n // include original file instaed\n incl = incl.parent_path() / \"..\" / std::filesystem::path(incl_stem + \".hpp\");\n }\n else if(incl_stem.ends_with(\"_impl\"))\n {\n for(const char c : \"_impl\"s) {incl_stem.pop_back(); (void)c;}\n\n // include original file instaed\n incl = incl.parent_path() / \"..\" / std::filesystem::path(incl_stem + \".hpp\");\n }\n incl = std::filesystem::canonical(incl);\n\n // avoid self include loop\n if(fname != incl)\n {\n std::cerr << fname << \" includes \" << incl << std::endl;\n\n g.nodes.at(fname).includes.push_back(incl);\n g.nodes.at(incl) .included.push_back(fname);\n }\n }\n }\n\n std::cerr << \"-------------------------------------------------------------\\n\";\n std::cerr << \"graph has been constructed. flattening...\\n\";\n std::cerr << \"-------------------------------------------------------------\\n\";\n\n // -------------------------------------------------------------------------\n // flatten graph by topological sort\n\n // collect files that does not include\n std::vector sources;\n for(const auto& [fname, node] : g.nodes)\n {\n if(node.includes.empty())\n {\n sources.push_back(fname);\n }\n }\n assert( ! sources.empty());\n\n std::vector sorted;\n while( ! sources.empty())\n {\n const auto file = sources.back();\n sorted.push_back(sources.back());\n sources.pop_back();\n\n for(const auto& included : g.nodes.at(file).included)\n {\n auto found = std::find(g.nodes.at(included).includes.begin(),\n g.nodes.at(included).includes.end(), file);\n g.nodes.at(included).includes.erase(found);\n\n if(g.nodes.at(included).includes.empty())\n {\n sources.push_back(included);\n }\n }\n }\n\n std::cerr << \"-------------------------------------------------------------\\n\";\n std::cerr << \"flattened. outputting...\\n\";\n std::cerr << \"-------------------------------------------------------------\\n\";\n\n // -------------------------------------------------------------------------\n // output all the file in the sorted order\n\n for(const auto& fname : sorted)\n {\n std::cerr << \"expanding: \" << fname << std::endl;\n for(const auto& line : files.at(fname).content)\n {\n std::cout << line << '\\n';\n }\n }\n\n return 0;\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxvk/dxvk_limits.h", "#pragma once\n\n#include \n\nnamespace dxvk {\n\n enum DxvkLimits : size_t {\n MaxNumRenderTargets = 8,\n MaxNumVertexAttributes = 32,\n MaxNumVertexBindings = 32,\n MaxNumXfbBuffers = 4,\n MaxNumXfbStreams = 4,\n MaxNumViewports = 16,\n MaxNumResourceSlots = 1216,\n MaxNumQueuedCommandBuffers = 32,\n MaxNumQueryCountPerPool = 128,\n MaxNumSpecConstants = 12,\n MaxUniformBufferSize = 65536,\n MaxVertexBindingStride = 2048,\n MaxPushConstantSize = 128,\n };\n\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_bit.h", "#pragma once\n\n#if (defined(__x86_64__) && !defined(__arm64ec__)) || (defined(_M_X64) && !defined(_M_ARM64EC)) \\\n || defined(__i386__) || defined(_M_IX86) || defined(__e2k__)\n #define DXVK_ARCH_X86\n #if defined(__x86_64__) || defined(_M_X64) || defined(__e2k__)\n #define DXVK_ARCH_X86_64\n #endif\n#elif defined(__aarch64__) || defined(_M_ARM64) || defined(_M_ARM64EC)\n #define DXVK_ARCH_ARM64\n#endif\n\n#ifdef DXVK_ARCH_X86\n #ifndef _MSC_VER\n #if defined(_WIN32) && (defined(__AVX__) || defined(__AVX2__))\n #error \"AVX-enabled builds not supported due to stack alignment issues.\"\n #endif\n #if defined(__WINE__) && defined(__clang__)\n #pragma push_macro(\"_WIN32\")\n #undef _WIN32\n #endif\n #include \n #if defined(__WINE__) && defined(__clang__)\n #pragma pop_macro(\"_WIN32\")\n #endif\n #else\n #include \n #endif\n#endif\n\n#include \"util_likely.h\"\n#include \"util_math.h\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace dxvk::bit {\n\n template\n T cast(const J& src) {\n static_assert(sizeof(T) == sizeof(J));\n static_assert(std::is_trivially_copyable::value && std::is_trivial::value);\n\n T dst;\n std::memcpy(&dst, &src, sizeof(T));\n return dst;\n }\n \n template\n T extract(T value, uint32_t fst, uint32_t lst) {\n return (value >> fst) & ~(~T(0) << (lst - fst + 1));\n }\n\n template\n T popcnt(T n) {\n n -= ((n >> 1u) & T(0x5555555555555555ull));\n n = (n & T(0x3333333333333333ull)) + ((n >> 2u) & T(0x3333333333333333ull));\n n = (n + (n >> 4u)) & T(0x0f0f0f0f0f0f0f0full);\n n *= T(0x0101010101010101ull);\n return n >> (8u * (sizeof(T) - 1u));\n }\n\n inline uint32_t tzcnt(uint32_t n) {\n #if defined(_MSC_VER) && !defined(__clang__)\n if(n == 0)\n return 32;\n return _tzcnt_u32(n);\n #elif defined(__BMI__)\n return __tzcnt_u32(n);\n #elif defined(DXVK_ARCH_X86) && (defined(__GNUC__) || defined(__clang__))\n // tzcnt is encoded as rep bsf, so we can use it on all\n // processors, but the behaviour of zero inputs differs:\n // - bsf: zf = 1, cf = ?, result = ?\n // - tzcnt: zf = 0, cf = 1, result = 32\n // We'll have to handle this case manually.\n uint32_t res;\n uint32_t tmp;\n asm (\n \"tzcnt %2, %0;\"\n \"mov $32, %1;\"\n \"test %2, %2;\"\n \"cmovz %1, %0;\"\n : \"=&r\" (res), \"=&r\" (tmp)\n : \"r\" (n)\n : \"cc\");\n return res;\n #elif defined(__GNUC__) || defined(__clang__)\n return n != 0 ? __builtin_ctz(n) : 32;\n #else\n uint32_t r = 31;\n n &= -n;\n r -= (n & 0x0000FFFF) ? 16 : 0;\n r -= (n & 0x00FF00FF) ? 8 : 0;\n r -= (n & 0x0F0F0F0F) ? 4 : 0;\n r -= (n & 0x33333333) ? 2 : 0;\n r -= (n & 0x55555555) ? 1 : 0;\n return n != 0 ? r : 32;\n #endif\n }\n\n inline uint32_t tzcnt(uint64_t n) {\n #if defined(DXVK_ARCH_X86_64) && defined(_MSC_VER) && !defined(__clang__)\n if(n == 0)\n return 64;\n return (uint32_t)_tzcnt_u64(n);\n #elif defined(DXVK_ARCH_X86_64) && defined(__BMI__)\n return __tzcnt_u64(n);\n #elif defined(DXVK_ARCH_X86_64) && (defined(__GNUC__) || defined(__clang__))\n uint64_t res;\n uint64_t tmp;\n asm (\n \"tzcnt %2, %0;\"\n \"mov $64, %1;\"\n \"test %2, %2;\"\n \"cmovz %1, %0;\"\n : \"=&r\" (res), \"=&r\" (tmp)\n : \"r\" (n)\n : \"cc\");\n return res;\n #elif defined(__GNUC__) || defined(__clang__)\n return n != 0 ? __builtin_ctzll(n) : 64;\n #else\n uint32_t lo = uint32_t(n);\n if (lo) {\n return tzcnt(lo);\n } else {\n uint32_t hi = uint32_t(n >> 32);\n return tzcnt(hi) + 32;\n }\n #endif\n }\n\n inline uint32_t bsf(uint32_t n) {\n #if (defined(__GNUC__) || defined(__clang__)) && !defined(__BMI__) && defined(DXVK_ARCH_X86)\n uint32_t res;\n asm (\"tzcnt %1,%0\"\n : \"=r\" (res)\n : \"r\" (n)\n : \"cc\");\n return res;\n #else\n return tzcnt(n);\n #endif\n }\n\n inline uint32_t bsf(uint64_t n) {\n #if (defined(__GNUC__) || defined(__clang__)) && !defined(__BMI__) && defined(DXVK_ARCH_X86_64)\n uint64_t res;\n asm (\"tzcnt %1,%0\"\n : \"=r\" (res)\n : \"r\" (n)\n : \"cc\");\n return res;\n #else\n return tzcnt(n);\n #endif\n }\n\n inline uint32_t lzcnt(uint32_t n) {\n #if defined(_MSC_VER) && !defined(__clang__) && !defined(__LZCNT__)\n unsigned long bsr;\n if(n == 0)\n return 32;\n _BitScanReverse(&bsr, n);\n return 31-bsr;\n #elif (defined(_MSC_VER) && !defined(__clang__)) || defined(__LZCNT__)\n return _lzcnt_u32(n);\n #elif defined(__GNUC__) || defined(__clang__)\n return n != 0 ? __builtin_clz(n) : 32;\n #else\n uint32_t r = 0;\n\n if (n == 0)\treturn 32;\n\n if (n <= 0x0000FFFF) { r += 16; n <<= 16; }\n if (n <= 0x00FFFFFF) { r += 8; n <<= 8; }\n if (n <= 0x0FFFFFFF) { r += 4; n <<= 4; }\n if (n <= 0x3FFFFFFF) { r += 2; n <<= 2; }\n if (n <= 0x7FFFFFFF) { r += 1; n <<= 1; }\n\n return r;\n #endif\n }\n\n inline uint32_t lzcnt(uint64_t n) {\n #if defined(_MSC_VER) && !defined(__clang__) && !defined(__LZCNT__) && defined(DXVK_ARCH_X86_64)\n unsigned long bsr;\n if(n == 0)\n return 64;\n _BitScanReverse64(&bsr, n);\n return 63-bsr;\n #elif defined(DXVK_ARCH_X86_64) && ((defined(_MSC_VER) && !defined(__clang__)) && defined(__LZCNT__))\n return _lzcnt_u64(n);\n #elif defined(DXVK_ARCH_X86_64) && (defined(__GNUC__) || defined(__clang__))\n return n != 0 ? __builtin_clzll(n) : 64;\n #else\n uint32_t lo = uint32_t(n);\n uint32_t hi = uint32_t(n >> 32u);\n return hi ? lzcnt(hi) : lzcnt(lo) + 32u;\n #endif\n }\n\n template\n uint32_t pack(T& dst, uint32_t& shift, T src, uint32_t count) {\n constexpr uint32_t Bits = 8 * sizeof(T);\n if (likely(shift < Bits))\n dst |= src << shift;\n shift += count;\n return shift > Bits ? shift - Bits : 0;\n }\n\n template\n uint32_t unpack(T& dst, T src, uint32_t& shift, uint32_t count) {\n constexpr uint32_t Bits = 8 * sizeof(T);\n if (likely(shift < Bits))\n dst = (src >> shift) & ((T(1) << count) - 1);\n shift += count;\n return shift > Bits ? shift - Bits : 0;\n }\n\n\n /**\n * \\brief Clears cache lines of memory\n *\n * Uses non-temporal stores. The memory region offset\n * and size are assumed to be aligned to 64 bytes.\n * \\param [in] mem Memory region to clear\n * \\param [in] size Number of bytes to clear\n */\n inline void bclear(void* mem, size_t size) {\n #if defined(DXVK_ARCH_X86) && (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER))\n auto zero = _mm_setzero_si128();\n\n #if defined(__clang__)\n #pragma nounroll\n #elif defined(__GNUC__)\n #pragma GCC unroll 0\n #endif\n for (size_t i = 0; i < size; i += 64u) {\n auto* ptr = reinterpret_cast<__m128i*>(mem) + i / sizeof(zero);\n _mm_stream_si128(ptr + 0u, zero);\n _mm_stream_si128(ptr + 1u, zero);\n _mm_stream_si128(ptr + 2u, zero);\n _mm_stream_si128(ptr + 3u, zero);\n }\n #else\n std::memset(mem, 0, size);\n #endif\n }\n\n\n /**\n * \\brief Compares two aligned structs bit by bit\n *\n * \\param [in] a First struct\n * \\param [in] b Second struct\n * \\returns \\c true if the structs are equal\n */\n template\n bool bcmpeq(const T* a, const T* b) {\n static_assert(alignof(T) >= 16);\n #if defined(DXVK_ARCH_X86) && (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER))\n auto ai = reinterpret_cast(a);\n auto bi = reinterpret_cast(b);\n\n size_t i = 0;\n\n #if defined(__clang__)\n #pragma nounroll\n #elif defined(__GNUC__)\n #pragma GCC unroll 0\n #endif\n\n for ( ; i < 2 * (sizeof(T) / 32); i += 2) {\n __m128i eq0 = _mm_cmpeq_epi8(\n _mm_load_si128(ai + i),\n _mm_load_si128(bi + i));\n __m128i eq1 = _mm_cmpeq_epi8(\n _mm_load_si128(ai + i + 1),\n _mm_load_si128(bi + i + 1));\n __m128i eq = _mm_and_si128(eq0, eq1);\n\n int mask = _mm_movemask_epi8(eq);\n if (mask != 0xFFFF)\n return false;\n }\n\n for ( ; i < sizeof(T) / 16; i++) {\n __m128i eq = _mm_cmpeq_epi8(\n _mm_load_si128(ai + i),\n _mm_load_si128(bi + i));\n\n int mask = _mm_movemask_epi8(eq);\n if (mask != 0xFFFF)\n return false;\n }\n\n return true;\n #else\n return !std::memcmp(a, b, sizeof(T));\n #endif\n }\n\n template \n class bitset {\n static constexpr size_t Dwords = align(Bits, 32) / 32;\n public:\n\n constexpr bitset()\n : m_dwords() {\n\n }\n\n constexpr bool get(uint32_t idx) const {\n uint32_t dword = 0;\n uint32_t bit = idx;\n\n // Compiler doesn't remove this otherwise.\n if constexpr (Dwords > 1) {\n dword = idx / 32;\n bit = idx % 32;\n }\n\n return m_dwords[dword] & (1u << bit);\n }\n\n constexpr void set(uint32_t idx, bool value) {\n uint32_t dword = 0;\n uint32_t bit = idx;\n\n // Compiler doesn't remove this otherwise.\n if constexpr (Dwords > 1) {\n dword = idx / 32;\n bit = idx % 32;\n }\n\n if (value)\n m_dwords[dword] |= 1u << bit;\n else\n m_dwords[dword] &= ~(1u << bit);\n }\n\n constexpr bool exchange(uint32_t idx, bool value) {\n bool oldValue = get(idx);\n set(idx, value);\n return oldValue;\n }\n\n constexpr void flip(uint32_t idx) {\n uint32_t dword = 0;\n uint32_t bit = idx;\n\n // Compiler doesn't remove this otherwise.\n if constexpr (Dwords > 1) {\n dword = idx / 32;\n bit = idx % 32;\n }\n\n m_dwords[dword] ^= 1u << bit;\n }\n\n constexpr void setAll() {\n if constexpr (Bits % 32 == 0) {\n for (size_t i = 0; i < Dwords; i++)\n m_dwords[i] = std::numeric_limits::max();\n }\n else {\n for (size_t i = 0; i < Dwords - 1; i++)\n m_dwords[i] = std::numeric_limits::max();\n\n m_dwords[Dwords - 1] = (1u << (Bits % 32)) - 1;\n }\n }\n\n constexpr void clearAll() {\n for (size_t i = 0; i < Dwords; i++)\n m_dwords[i] = 0;\n }\n\n constexpr bool any() const {\n for (size_t i = 0; i < Dwords; i++) {\n if (m_dwords[i] != 0)\n return true;\n }\n\n return false;\n }\n\n constexpr uint32_t& dword(uint32_t idx) {\n return m_dwords[idx];\n }\n\n constexpr size_t bitCount() {\n return Bits;\n }\n\n constexpr size_t dwordCount() {\n return Dwords;\n }\n\n constexpr bool operator [] (uint32_t idx) const {\n return get(idx);\n }\n\n constexpr void setN(uint32_t bits) {\n uint32_t fullDwords = bits / 32;\n uint32_t offset = bits % 32;\n\n for (size_t i = 0; i < fullDwords; i++)\n m_dwords[i] = std::numeric_limits::max();\n \n if (offset > 0)\n m_dwords[fullDwords] = (1u << offset) - 1;\n }\n\n private:\n\n uint32_t m_dwords[Dwords];\n\n };\n\n class bitvector {\n public:\n\n bool get(uint32_t idx) const {\n uint32_t dword = idx / 32;\n uint32_t bit = idx % 32;\n\n return m_dwords[dword] & (1u << bit);\n }\n\n void ensureSize(uint32_t bitCount) {\n uint32_t dword = bitCount / 32;\n if (unlikely(dword >= m_dwords.size())) {\n m_dwords.resize(dword + 1);\n }\n m_bitCount = std::max(m_bitCount, bitCount);\n }\n\n void set(uint32_t idx, bool value) {\n ensureSize(idx + 1);\n\n uint32_t dword = 0;\n uint32_t bit = idx;\n\n if (value)\n m_dwords[dword] |= 1u << bit;\n else\n m_dwords[dword] &= ~(1u << bit);\n }\n\n bool exchange(uint32_t idx, bool value) {\n ensureSize(idx + 1);\n\n bool oldValue = get(idx);\n set(idx, value);\n return oldValue;\n }\n\n void flip(uint32_t idx) {\n ensureSize(idx + 1);\n\n uint32_t dword = idx / 32;\n uint32_t bit = idx % 32;\n\n m_dwords[dword] ^= 1u << bit;\n }\n\n void setAll() {\n if (m_bitCount % 32 == 0) {\n for (size_t i = 0; i < m_dwords.size(); i++)\n m_dwords[i] = std::numeric_limits::max();\n }\n else {\n for (size_t i = 0; i < m_dwords.size() - 1; i++)\n m_dwords[i] = std::numeric_limits::max();\n\n m_dwords[m_dwords.size() - 1] = (1u << (m_bitCount % 32)) - 1;\n }\n }\n\n void clearAll() {\n for (size_t i = 0; i < m_dwords.size(); i++)\n m_dwords[i] = 0;\n }\n\n bool any() const {\n for (size_t i = 0; i < m_dwords.size(); i++) {\n if (m_dwords[i] != 0)\n return true;\n }\n\n return false;\n }\n\n uint32_t& dword(uint32_t idx) {\n return m_dwords[idx];\n }\n\n size_t bitCount() const {\n return m_bitCount;\n }\n\n size_t dwordCount() const {\n return m_dwords.size();\n }\n\n bool operator [] (uint32_t idx) const {\n return get(idx);\n }\n\n void setN(uint32_t bits) {\n ensureSize(bits);\n\n uint32_t fullDwords = bits / 32;\n uint32_t offset = bits % 32;\n\n for (size_t i = 0; i < fullDwords; i++)\n m_dwords[i] = std::numeric_limits::max();\n\n if (offset > 0)\n m_dwords[fullDwords] = (1u << offset) - 1;\n }\n\n private:\n\n std::vector m_dwords;\n uint32_t m_bitCount = 0;\n\n };\n\n template\n class BitMask {\n\n public:\n\n class iterator {\n public:\n using iterator_category = std::input_iterator_tag;\n using value_type = T;\n using difference_type = T;\n using pointer = const T*;\n using reference = T;\n\n explicit iterator(T flags)\n : m_mask(flags) { }\n\n iterator& operator ++ () {\n m_mask &= m_mask - 1;\n return *this;\n }\n\n iterator operator ++ (int) {\n iterator retval = *this;\n m_mask &= m_mask - 1;\n return retval;\n }\n\n T operator * () const {\n return bsf(m_mask);\n }\n\n bool operator == (iterator other) const { return m_mask == other.m_mask; }\n bool operator != (iterator other) const { return m_mask != other.m_mask; }\n\n private:\n\n T m_mask;\n\n };\n\n BitMask()\n : m_mask(0) { }\n\n explicit BitMask(T n)\n : m_mask(n) { }\n\n iterator begin() {\n return iterator(m_mask);\n }\n\n iterator end() {\n return iterator(0);\n }\n\n private:\n\n T m_mask;\n\n };\n\n\n /**\n * \\brief Encodes float as fixed point\n *\n * Rounds away from zero. If this is not suitable for\n * certain use cases, implement round to nearest even.\n * \\tparam T Integer type, may be signed\n * \\tparam I Integer bits\n * \\tparam F Fractional bits\n * \\param n Float to encode\n * \\returns Encoded fixed-point value\n */\n template\n T encodeFixed(float n) {\n if (n != n)\n return 0u;\n\n n *= float(1u << F);\n\n if constexpr (std::is_signed_v) {\n n = std::max(n, -float(1u << (I + F - 1u)));\n n = std::min(n, float(1u << (I + F - 1u)) - 1.0f);\n n += n < 0.0f ? -0.5f : 0.5f;\n } else {\n n = std::max(n, 0.0f);\n n = std::min(n, float(1u << (I + F)) - 1.0f);\n n += 0.5f;\n }\n\n T result = T(n);\n\n if constexpr (std::is_signed_v)\n result &= ((T(1u) << (I + F)) - 1u);\n\n return result;\n }\n\n\n /**\n * \\brief Decodes fixed-point integer to float\n *\n * \\tparam T Integer type, may be signed\n * \\tparam I Integer bits\n * \\tparam F Fractional bits\n * \\param n Number to decode\n * \\returns Decoded number\n */\n template\n float decodeFixed(T n) {\n // Sign-extend as necessary\n if constexpr (std::is_signed_v)\n n -= (n & (T(1u) << (I + F - 1u))) << 1u;\n\n return float(n) / float(1u << F);\n }\n\n\n /**\n * \\brief Inserts one null bit after each bit\n */\n inline uint32_t split2(uint32_t c) {\n c = (c ^ (c << 8u)) & 0x00ff00ffu;\n c = (c ^ (c << 4u)) & 0x0f0f0f0fu;\n c = (c ^ (c << 2u)) & 0x33333333u;\n c = (c ^ (c << 1u)) & 0x55555555u;\n return c;\n }\n\n\n /**\n * \\brief Inserts two null bits after each bit\n */\n inline uint64_t split3(uint64_t c) {\n c = (c | c << 32u) & 0x001f00000000ffffull;\n c = (c | c << 16u) & 0x001f0000ff0000ffull;\n c = (c | c << 8u) & 0x100f00f00f00f00full;\n c = (c | c << 4u) & 0x10c30c30c30c30c3ull;\n c = (c | c << 2u) & 0x1249249249249249ull;\n return c;\n }\n\n\n /**\n * \\brief Interleaves bits from two integers\n *\n * Both numbers must fit into 16 bits.\n * \\param [in] x X coordinate\n * \\param [in] y Y coordinate\n * \\returns Morton code of x and y\n */\n inline uint32_t interleave(uint16_t x, uint16_t y) {\n return split2(x) | (split2(y) << 1u);\n }\n\n\n /**\n * \\brief Interleaves bits from three integers\n *\n * All three numbers must fit into 16 bits.\n */\n inline uint64_t interleave(uint16_t x, uint16_t y, uint16_t z) {\n return split3(x) | (split3(y) << 1u) | (split3(z) << 2u);\n }\n\n\n /**\n * \\brief 48-bit integer storage type\n */\n struct uint48_t {\n explicit uint48_t(uint64_t n)\n : a(uint16_t(n)), b(uint16_t(n >> 16)), c(uint16_t(n >> 32)) { }\n\n uint16_t a;\n uint16_t b;\n uint16_t c;\n\n explicit operator uint64_t () const {\n // GCC generates worse code if we promote to uint64 directly\n uint32_t lo = uint32_t(a) | (uint32_t(b) << 16);\n return uint64_t(lo) | (uint64_t(c) << 32);\n }\n };\n\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_decoder.h", "class DxbcRegModifier {\n public:\n private:\n uint8_t m_mask = 0;\n public:\n private:\n uint8_t m_mask = 0;\n public:\n private:\n uint32_t m_bits;\n public:\n const uint32_t* ptrAt(uint32_t id) const;\n uint32_t at(uint32_t id) const {\n if (m_ptr + id >= m_end)\n throw DxvkError(\"DxbcCodeSlice: End of stream\");\n return m_ptr[id];\n }\n uint32_t read() {\n if (m_ptr >= m_end)\n throw DxvkError(\"DxbcCodeSlice: End of stream\");\n return *(m_ptr++);\n }\n DxbcCodeSlice take(uint32_t n) const {\n if (m_ptr + n > m_end)\n throw DxvkError(\"DxbcCodeSlice: End of stream\");\n return DxbcCodeSlice(m_ptr, m_ptr + n);\n }\n DxbcCodeSlice skip(uint32_t n) const {\n if (m_ptr + n > m_end)\n throw DxvkError(\"DxbcCodeSlice: End of stream\");\n return DxbcCodeSlice(m_ptr + n, m_end);\n }\n private:\n const uint32_t* m_ptr = nullptr;\n const uint32_t* m_end = nullptr;\n public:\n void decodeInstruction(DxbcCodeSlice& code) {\n const uint32_t token0 = code.at(0);\n \n // Initialize the instruction structure. Some of these values\n // may not get written otherwise while decoding the instruction.\n m_instruction.op = static_cast(bit::extract(token0, 0, 10));\n m_instruction.opClass = DxbcInstClass::Undefined;\n m_instruction.sampleControls = { 0, 0, 0 };\n m_instruction.dstCount = 0;\n m_instruction.srcCount = 0;\n m_instruction.immCount = 0;\n m_instruction.dst = m_dstOperands.data();\n m_instruction.src = m_srcOperands.data();\n m_instruction.imm = m_immOperands.data();\n m_instruction.customDataType = DxbcCustomDataClass::Comment;\n m_instruction.customDataSize = 0;\n m_instruction.customData = nullptr;\n \n // Reset the index pointer, which may still contain\n // a non-zero value from the previous iteration\n m_indexId = 0;\n \n // Instruction length, in DWORDs. This includes the token\n // itself and any other prefix that an instruction may have.\n uint32_t length = 0;\n \n if (m_instruction.op == DxbcOpcode::CustomData) {\n length = code.at(1);\n this->decodeCustomData(code.take(length));\n } else {\n length = bit::extract(token0, 24, 30);\n this->decodeOperation(code.take(length));\n }\n \n // Advance the caller's slice to the next token so that\n // they can make consecutive calls to decodeInstruction()\n code = code.skip(length);\n }\n private:\n DxbcShaderInstruction m_instruction;\n std::array m_dstOperands;\n std::array m_srcOperands;\n std::array m_immOperands;\n std::array m_indices;\n uint32_t m_indexId = 0;\n void decodeCustomData(DxbcCodeSlice code) {\n const uint32_t blockLength = code.at(1);\n \n if (blockLength < 2) {\n Logger::err(\"DxbcDecodeContext: Invalid custom data block\");\n return;\n }\n \n // Custom data blocks have their own instruction class\n m_instruction.op = DxbcOpcode::CustomData;\n m_instruction.opClass = DxbcInstClass::CustomData;\n \n // We'll point into the code buffer rather than making a copy\n m_instruction.customDataType = static_cast(\n bit::extract(code.at(0), 11, 31));\n m_instruction.customDataSize = blockLength - 2;\n m_instruction.customData = code.ptrAt(2);\n }\n void decodeOperation(DxbcCodeSlice code) {\n uint32_t token = code.read();\n \n // Result modifiers, which are applied to common ALU ops\n m_instruction.modifiers.saturate = !!bit::extract(token, 13, 13);\n m_instruction.modifiers.precise = !!bit::extract(token, 19, 22);\n \n // Opcode controls. It will depend on the\n // opcode itself which ones are valid.\n m_instruction.controls = DxbcShaderOpcodeControls(token);\n \n // Process extended opcode tokens\n while (bit::extract(token, 31, 31)) {\n token = code.read();\n \n const DxbcExtOpcode extOpcode\n = static_cast(bit::extract(token, 0, 5));\n \n switch (extOpcode) {\n case DxbcExtOpcode::SampleControls: {\n struct {\n int u : 4;\n int v : 4;\n int w : 4;\n } aoffimmi;\n \n aoffimmi.u = bit::extract(token, 9, 12);\n aoffimmi.v = bit::extract(token, 13, 16);\n aoffimmi.w = bit::extract(token, 17, 20);\n \n // Four-bit signed numbers, sign-extend them\n m_instruction.sampleControls.u = aoffimmi.u;\n m_instruction.sampleControls.v = aoffimmi.v;\n m_instruction.sampleControls.w = aoffimmi.w;\n } break;\n \n case DxbcExtOpcode::ResourceDim:\n case DxbcExtOpcode::ResourceReturnType:\n break; // part of resource description\n \n default:\n Logger::warn(str::format(\n \"DxbcDecodeContext: Unhandled extended opcode: \",\n extOpcode));\n }\n }\n \n // Retrieve the instruction format in order to parse the\n // operands. Doing this mostly automatically means that\n // the compiler can rely on the operands being valid.\n const DxbcInstFormat format = dxbcInstructionFormat(m_instruction.op);\n m_instruction.opClass = format.instructionClass;\n \n for (uint32_t i = 0; i < format.operandCount; i++)\n this->decodeOperand(code, format.operands[i]);\n }\n void decodeComponentSelection(DxbcRegister& reg, uint32_t token) {\n // Pick the correct component selection mode based on the\n // component count. We'll simplify this here so that the\n // compiler can assume that everything is a 4D vector.\n reg.componentCount = static_cast(bit::extract(token, 0, 1));\n \n switch (reg.componentCount) {\n // No components - used for samplers etc.\n case DxbcComponentCount::Component0:\n reg.mask = DxbcRegMask(false, false, false, false);\n reg.swizzle = DxbcRegSwizzle(0, 0, 0, 0);\n break;\n \n // One component - used for immediates\n // and a few built-in registers.\n case DxbcComponentCount::Component1:\n reg.mask = DxbcRegMask(true, false, false, false);\n reg.swizzle = DxbcRegSwizzle(0, 0, 0, 0);\n break;\n \n // Four components - everything else. This requires us\n // to actually parse the component selection mode.\n case DxbcComponentCount::Component4: {\n const DxbcRegMode componentMode =\n static_cast(bit::extract(token, 2, 3));\n \n switch (componentMode) {\n // Write mask for destination operands\n case DxbcRegMode::Mask:\n reg.mask = bit::extract(token, 4, 7);\n reg.swizzle = DxbcRegSwizzle(0, 1, 2, 3);\n break;\n \n // Swizzle for source operands (including resources)\n case DxbcRegMode::Swizzle:\n reg.mask = DxbcRegMask(true, true, true, true);\n reg.swizzle = DxbcRegSwizzle(\n bit::extract(token, 4, 5),\n bit::extract(token, 6, 7),\n bit::extract(token, 8, 9),\n bit::extract(token, 10, 11));\n break;\n \n // Selection of one component. We can generate both a\n // mask and a swizzle for this so that the compiler\n // won't have to deal with this case specifically.\n case DxbcRegMode::Select1: {\n const uint32_t n = bit::extract(token, 4, 5);\n reg.mask = DxbcRegMask(n == 0, n == 1, n == 2, n == 3);\n reg.swizzle = DxbcRegSwizzle(n, n, n, n);\n } break;\n \n default:\n Logger::warn(\"DxbcDecodeContext: Invalid component selection mode\");\n }\n } break;\n \n default:\n Logger::warn(\"DxbcDecodeContext: Invalid component count\");\n }\n }\n void decodeOperandExtensions(DxbcCodeSlice& code, DxbcRegister& reg, uint32_t token) {\n while (bit::extract(token, 31, 31)) {\n token = code.read();\n \n // Type of the extended operand token\n const DxbcOperandExt extTokenType =\n static_cast(bit::extract(token, 0, 5));\n \n switch (extTokenType) {\n // Operand modifiers, which are used to manipulate the\n // value of a source operand during the load operation\n case DxbcOperandExt::OperandModifier:\n reg.modifiers = bit::extract(token, 6, 13);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcDecodeContext: Unhandled extended operand token: \",\n extTokenType));\n }\n }\n }\n void decodeOperandImmediates(DxbcCodeSlice& code, DxbcRegister& reg) {\n if (reg.type == DxbcOperandType::Imm32\n || reg.type == DxbcOperandType::Imm64) {\n switch (reg.componentCount) {\n // This is commonly used if only one vector\n // component is involved in an operation\n case DxbcComponentCount::Component1: {\n reg.imm.u32_1 = code.read();\n } break;\n \n // Typical four-component vector\n case DxbcComponentCount::Component4: {\n reg.imm.u32_4[0] = code.read();\n reg.imm.u32_4[1] = code.read();\n reg.imm.u32_4[2] = code.read();\n reg.imm.u32_4[3] = code.read();\n } break;\n\n default:\n Logger::warn(\"DxbcDecodeContext: Invalid component count for immediate operand\");\n }\n }\n }\n void decodeOperandIndex(DxbcCodeSlice& code, DxbcRegister& reg, uint32_t token) {\n reg.idxDim = bit::extract(token, 20, 21);\n \n for (uint32_t i = 0; i < reg.idxDim; i++) {\n // An index can be encoded in various different ways\n const DxbcOperandIndexRepresentation repr =\n static_cast(\n bit::extract(token, 22 + 3 * i, 24 + 3 * i));\n \n switch (repr) {\n case DxbcOperandIndexRepresentation::Imm32:\n reg.idx[i].offset = static_cast(code.read());\n reg.idx[i].relReg = nullptr;\n break;\n \n case DxbcOperandIndexRepresentation::Relative:\n reg.idx[i].offset = 0;\n reg.idx[i].relReg = &m_indices.at(m_indexId);\n \n this->decodeRegister(code,\n m_indices.at(m_indexId++),\n DxbcScalarType::Sint32);\n break;\n \n case DxbcOperandIndexRepresentation::Imm32Relative:\n reg.idx[i].offset = static_cast(code.read());\n reg.idx[i].relReg = &m_indices.at(m_indexId);\n \n this->decodeRegister(code,\n m_indices.at(m_indexId++),\n DxbcScalarType::Sint32);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcDecodeContext: Unhandled index representation: \",\n repr));\n }\n }\n }\n void decodeRegister(DxbcCodeSlice& code, DxbcRegister& reg, DxbcScalarType type) {\n const uint32_t token = code.read();\n \n reg.type = static_cast(bit::extract(token, 12, 19));\n reg.dataType = type;\n reg.modifiers = 0;\n reg.idxDim = 0;\n \n for (uint32_t i = 0; i < DxbcMaxRegIndexDim; i++) {\n reg.idx[i].relReg = nullptr;\n reg.idx[i].offset = 0;\n }\n \n this->decodeComponentSelection(reg, token);\n this->decodeOperandExtensions(code, reg, token);\n this->decodeOperandImmediates(code, reg);\n this->decodeOperandIndex(code, reg, token);\n }\n void decodeImm32(DxbcCodeSlice& code, DxbcImmediate& imm, DxbcScalarType type) {\n imm.u32 = code.read();\n }\n void decodeOperand(DxbcCodeSlice& code, const DxbcInstOperandFormat& format) {\n switch (format.kind) {\n case DxbcOperandKind::DstReg: {\n const uint32_t operandId = m_instruction.dstCount++;\n this->decodeRegister(code, m_dstOperands.at(operandId), format.type);\n } break;\n \n case DxbcOperandKind::SrcReg: {\n const uint32_t operandId = m_instruction.srcCount++;\n this->decodeRegister(code, m_srcOperands.at(operandId), format.type);\n } break;\n \n case DxbcOperandKind::Imm32: {\n const uint32_t operandId = m_instruction.immCount++;\n this->decodeImm32(code, m_immOperands.at(operandId), format.type);\n } break;\n \n default:\n throw DxvkError(\"DxbcDecodeContext: Invalid operand format\");\n }\n }\n};"], ["/lsfg-vk/framegen/src/common/exception.cpp", "#include \"common/exception.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\n\nvulkan_error::vulkan_error(VkResult result, const std::string& message)\n : std::runtime_error(std::format(\"{} (error {})\", message, static_cast(result))),\n result(result) {}\n\nvulkan_error::~vulkan_error() noexcept = default;\n\nrethrowable_error::rethrowable_error(const std::string& message, const std::exception& exe)\n : std::runtime_error(message) {\n this->message = std::format(\"{}\\n- {}\", message, exe.what());\n}\n\nrethrowable_error::~rethrowable_error() noexcept = default;\n"], ["/lsfg-vk/thirdparty/dxbc/include/spirv/spirv_instruction.h", "#pragma once\n\n#include \"spirv_include.h\"\n\nnamespace dxvk {\n \n /**\n * \\brief SPIR-V instruction\n * \n * Helps parsing a single instruction, providing\n * access to the op code, instruction length and\n * instruction arguments.\n */\n class SpirvInstruction {\n \n public:\n \n SpirvInstruction() { }\n SpirvInstruction(uint32_t* code, uint32_t offset, uint32_t length)\n : m_code(code), m_offset(offset), m_length(length) { }\n \n /**\n * \\brief SPIR-V Op code\n * \\returns The op code\n */\n spv::Op opCode() const {\n return static_cast(\n this->arg(0) & spv::OpCodeMask);\n }\n \n /**\n * \\brief Instruction length\n * \\returns Number of DWORDs\n */\n uint32_t length() const {\n return this->arg(0) >> spv::WordCountShift;\n }\n \n /**\n * \\brief Instruction offset\n * \\returns Offset in DWORDs\n */\n uint32_t offset() const {\n return m_offset;\n }\n \n /**\n * \\brief Argument value\n * \n * Retrieves an argument DWORD. Note that some instructions\n * take 64-bit arguments which require more than one DWORD.\n * Arguments start at index 1. Calling this method with an\n * argument ID of 0 will return the opcode token.\n * \\param [in] idx Argument index, starting at 1\n * \\returns The argument value\n */\n uint32_t arg(uint32_t idx) const {\n const uint32_t index = m_offset + idx;\n return index < m_length ? m_code[index] : 0;\n }\n\n /**\n * \\brief Argument string\n *\n * Retrieves a pointer to a UTF-8-encoded string.\n * \\param [in] idx Argument index, starting at 1\n * \\returns Pointer to the literal string\n */\n const char* chr(uint32_t idx) const {\n const uint32_t index = m_offset + idx;\n return index < m_length ? reinterpret_cast(&m_code[index]) : nullptr;\n }\n \n /**\n * \\brief Changes the value of an argument\n * \n * \\param [in] idx Argument index, starting at 1\n * \\param [in] word New argument word\n */\n void setArg(uint32_t idx, uint32_t word) const {\n if (m_offset + idx < m_length)\n m_code[m_offset + idx] = word;\n }\n \n private:\n \n uint32_t* m_code = nullptr;\n uint32_t m_offset = 0;\n uint32_t m_length = 0;\n \n };\n \n \n /**\n * \\brief SPIR-V instruction iterator\n * \n * Convenient iterator that can be used\n * to process raw SPIR-V shader code.\n */\n class SpirvInstructionIterator {\n \n public:\n \n SpirvInstructionIterator() { }\n SpirvInstructionIterator(uint32_t* code, uint32_t offset, uint32_t length)\n : m_code (length != 0 ? code : nullptr),\n m_offset(length != 0 ? offset : 0),\n m_length(length) {\n if ((length >= 5) && (offset == 0) && (m_code[0] == spv::MagicNumber))\n this->advance(5);\n }\n \n SpirvInstructionIterator& operator ++ () {\n this->advance(SpirvInstruction(m_code, m_offset, m_length).length());\n return *this;\n }\n \n SpirvInstructionIterator operator ++ (int) {\n SpirvInstructionIterator result = *this;\n this->advance(SpirvInstruction(m_code, m_offset, m_length).length());\n return result;\n }\n \n SpirvInstruction operator * () const {\n return SpirvInstruction(m_code, m_offset, m_length);\n }\n \n bool operator == (const SpirvInstructionIterator& other) const {\n return this->m_code == other.m_code\n && this->m_offset == other.m_offset\n && this->m_length == other.m_length;\n }\n \n bool operator != (const SpirvInstructionIterator& other) const {\n return this->m_code != other.m_code\n || this->m_offset != other.m_offset\n || this->m_length != other.m_length;\n }\n \n private:\n \n uint32_t* m_code = nullptr;\n uint32_t m_offset = 0;\n uint32_t m_length = 0;\n \n void advance(uint32_t n) {\n if (m_offset + n < m_length) {\n m_offset += n;\n } else {\n m_code = nullptr;\n m_offset = 0;\n m_length = 0;\n }\n }\n \n };\n \n}"], ["/lsfg-vk/src/extract/trans.cpp", "#include \"extract/trans.hpp\"\n\n#include \n\n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n\nusing namespace Extract;\n\nstruct BindingOffsets {\n uint32_t bindingIndex{};\n uint32_t bindingOffset{};\n uint32_t setIndex{};\n uint32_t setOffset{};\n};\n\nstd::vector Extract::translateShader(std::vector bytecode) {\n // compile the shader\n dxvk::DxbcReader reader(reinterpret_cast(bytecode.data()), bytecode.size());\n dxvk::DxbcModule module(reader);\n const dxvk::DxbcModuleInfo info{};\n auto code = module.compile(info, \"CS\");\n\n // find all bindings\n std::vector bindingOffsets;\n std::vector varIds;\n for (auto ins : code) {\n if (ins.opCode() == spv::OpDecorate) {\n if (ins.arg(2) == spv::DecorationBinding) {\n const uint32_t varId = ins.arg(1);\n bindingOffsets.resize(std::max(bindingOffsets.size(), size_t(varId + 1)));\n bindingOffsets[varId].bindingIndex = ins.arg(3);\n bindingOffsets[varId].bindingOffset = ins.offset() + 3;\n varIds.push_back(varId);\n }\n\n if (ins.arg(2) == spv::DecorationDescriptorSet) {\n const uint32_t varId = ins.arg(1);\n bindingOffsets.resize(std::max(bindingOffsets.size(), size_t(varId + 1)));\n bindingOffsets[varId].setIndex = ins.arg(3);\n bindingOffsets[varId].setOffset = ins.offset() + 3;\n }\n }\n\n if (ins.opCode() == spv::OpFunction)\n break;\n }\n\n std::vector validBindings;\n for (const auto varId : varIds) {\n auto info = bindingOffsets[varId];\n\n if (info.bindingOffset)\n validBindings.push_back(info);\n }\n\n // patch binding offset\n #pragma clang diagnostic push\n #pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n for (size_t i = 0; i < validBindings.size(); i++)\n code.data()[validBindings.at(i).bindingOffset] // NOLINT\n = static_cast(i);\n #pragma clang diagnostic pop\n\n // return the new bytecode\n std::vector spirvBytecode(code.size());\n std::copy_n(reinterpret_cast(code.data()),\n code.size(), spirvBytecode.data());\n return spirvBytecode;\n}\n"], ["/lsfg-vk/thirdparty/pe-parse/pepy/pepy.cpp", "/*\n * Copyright (c) 2013, Wesley Shields . All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *\n * 2. Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND\n * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE\n * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n * SUCH DAMAGE.\n */\n\n#include \n#include \n#include \n\nusing namespace peparse;\n\n/* NOTE(ww): These don't necessarily have to be the same, but currently are.\n */\n#define PEPY_VERSION PEPARSE_VERSION\n\n/* These are used to across multiple objects. */\n#define PEPY_OBJECT_GET(OBJ, ATTR) \\\n static PyObject *pepy_##OBJ##_get_##ATTR(PyObject *self, void *closure) { \\\n Py_INCREF(((pepy_##OBJ *) self)->ATTR); \\\n return ((pepy_##OBJ *) self)->ATTR; \\\n }\n\n#define OBJECTGETTER(OBJ, ATTR, DOC) \\\n { \\\n (char *) #ATTR, (getter) pepy_##OBJ##_get_##ATTR, \\\n (setter) pepy_attr_not_writable, (char *) #DOC, NULL \\\n }\n\n/* 'OPTIONAL' references the fact that these are from the Optional Header */\n#define OBJECTGETTER_OPTIONAL(ATTR, DOC) \\\n { \\\n (char *) #ATTR, (getter) pepy_parsed_get_optional_##ATTR, \\\n (setter) pepy_attr_not_writable, (char *) #DOC, NULL \\\n }\n\nstatic PyObject *pepy_error;\n\nstruct pepy {\n PyObject_HEAD\n};\n\nstruct pepy_parsed {\n PyObject_HEAD parsed_pe *pe;\n};\n\nstruct pepy_section {\n PyObject_HEAD PyObject *name;\n PyObject *base;\n PyObject *length;\n PyObject *virtaddr;\n PyObject *virtsize;\n PyObject *numrelocs;\n PyObject *numlinenums;\n PyObject *characteristics;\n PyObject *data;\n};\n\nstruct pepy_resource {\n PyObject_HEAD PyObject *type_str;\n PyObject *name_str;\n PyObject *lang_str;\n PyObject *type;\n PyObject *name;\n PyObject *lang;\n PyObject *codepage;\n PyObject *RVA;\n PyObject *size;\n PyObject *data;\n};\n\nstruct pepy_import {\n PyObject_HEAD PyObject *name;\n PyObject *sym;\n PyObject *addr;\n};\n\nstruct pepy_export {\n PyObject_HEAD PyObject *mod;\n PyObject *func;\n PyObject *addr;\n};\n\nstruct pepy_relocation {\n PyObject_HEAD PyObject *type;\n PyObject *addr;\n};\n\n/* None of the attributes in these objects are writable. */\nstatic int\npepy_attr_not_writable(PyObject *self, PyObject *value, void *closure) {\n PyErr_SetString(PyExc_TypeError, \"Attribute not writable.\");\n return -1;\n}\n\nstatic PyObject *\npepy_import_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_import *self;\n\n self = (pepy_import *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int pepy_import_init(pepy_import *self, PyObject *args, PyObject *kwds) {\n if (!PyArg_ParseTuple(\n args, \"OOO:pepy_import_init\", &self->name, &self->sym, &self->addr))\n return -1;\n return 0;\n}\n\nstatic void pepy_import_dealloc(pepy_import *self) {\n Py_XDECREF(self->name);\n Py_XDECREF(self->sym);\n Py_XDECREF(self->addr);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nPEPY_OBJECT_GET(import, name);\nPEPY_OBJECT_GET(import, sym);\nPEPY_OBJECT_GET(import, addr);\n\nstatic PyGetSetDef pepy_import_getseters[] = {\n OBJECTGETTER(import, name, \"Name\"),\n OBJECTGETTER(import, sym, \"Symbol\"),\n OBJECTGETTER(import, addr, \"Address\"),\n {NULL}};\n\nstatic PyTypeObject pepy_import_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.import\", /* tp_name */\n sizeof(pepy_import), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_import_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT, /* tp_flags */\n \"pepy import object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n 0, /* tp_methods */\n 0, /* tp_members */\n pepy_import_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_import_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_import_new /* tp_new */\n};\n\nstatic PyObject *\npepy_export_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_export *self;\n\n self = (pepy_export *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int pepy_export_init(pepy_export *self, PyObject *args, PyObject *kwds) {\n if (!PyArg_ParseTuple(\n args, \"OOO:pepy_export_init\", &self->mod, &self->func, &self->addr))\n return -1;\n return 0;\n}\n\nstatic void pepy_export_dealloc(pepy_export *self) {\n Py_XDECREF(self->mod);\n Py_XDECREF(self->func);\n Py_XDECREF(self->addr);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nPEPY_OBJECT_GET(export, mod);\nPEPY_OBJECT_GET(export, func);\nPEPY_OBJECT_GET(export, addr);\n\nstatic PyGetSetDef pepy_export_getseters[] = {\n OBJECTGETTER(export, mod, \"Module\"),\n OBJECTGETTER(export, func, \"Function\"),\n OBJECTGETTER(export, addr, \"Address\"),\n {NULL}};\n\nstatic PyTypeObject pepy_export_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.export\", /* tp_name */\n sizeof(pepy_export), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_export_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT, /* tp_flags */\n \"pepy export object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n 0, /* tp_methods */\n 0, /* tp_members */\n pepy_export_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_export_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_export_new /* tp_new */\n};\n\nstatic PyObject *\npepy_relocation_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_relocation *self;\n\n self = (pepy_relocation *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int\npepy_relocation_init(pepy_relocation *self, PyObject *args, PyObject *kwds) {\n if (!PyArg_ParseTuple(\n args, \"OO:pepy_relocation_init\", &self->type, &self->addr))\n return -1;\n return 0;\n}\n\nstatic void pepy_relocation_dealloc(pepy_relocation *self) {\n Py_XDECREF(self->type);\n Py_XDECREF(self->addr);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nPEPY_OBJECT_GET(relocation, type);\nPEPY_OBJECT_GET(relocation, addr);\n\nstatic PyGetSetDef pepy_relocation_getseters[] = {\n OBJECTGETTER(relocation, type, \"Type\"),\n OBJECTGETTER(relocation, addr, \"Address\"),\n {NULL}};\n\nstatic PyTypeObject pepy_relocation_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.relocation\", /* tp_name */\n sizeof(pepy_relocation), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_relocation_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT, /* tp_flags */\n \"pepy relocation object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n 0, /* tp_methods */\n 0, /* tp_members */\n pepy_relocation_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_relocation_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_relocation_new /* tp_new */\n};\n\nstatic PyObject *\npepy_section_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_section *self;\n\n self = (pepy_section *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int\npepy_section_init(pepy_section *self, PyObject *args, PyObject *kwds) {\n if (!PyArg_ParseTuple(args,\n \"OOOOOOOOO:pepy_section_init\",\n &self->name,\n &self->base,\n &self->length,\n &self->virtaddr,\n &self->virtsize,\n &self->numrelocs,\n &self->numlinenums,\n &self->characteristics,\n &self->data))\n return -1;\n return 0;\n}\n\nstatic void pepy_section_dealloc(pepy_section *self) {\n Py_XDECREF(self->name);\n Py_XDECREF(self->base);\n Py_XDECREF(self->length);\n Py_XDECREF(self->virtaddr);\n Py_XDECREF(self->virtsize);\n Py_XDECREF(self->numrelocs);\n Py_XDECREF(self->numlinenums);\n Py_XDECREF(self->characteristics);\n Py_XDECREF(self->data);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nPEPY_OBJECT_GET(section, name);\nPEPY_OBJECT_GET(section, base);\nPEPY_OBJECT_GET(section, length);\nPEPY_OBJECT_GET(section, virtaddr);\nPEPY_OBJECT_GET(section, virtsize);\nPEPY_OBJECT_GET(section, numrelocs);\nPEPY_OBJECT_GET(section, numlinenums);\nPEPY_OBJECT_GET(section, characteristics);\nPEPY_OBJECT_GET(section, data);\n\nstatic PyGetSetDef pepy_section_getseters[] = {\n OBJECTGETTER(section, name, \"Name\"),\n OBJECTGETTER(section, base, \"Base address\"),\n OBJECTGETTER(section, length, \"Length\"),\n OBJECTGETTER(section, virtaddr, \"Virtual address\"),\n OBJECTGETTER(section, virtsize, \"Virtual size\"),\n OBJECTGETTER(section, numrelocs, \"Number of relocations\"),\n OBJECTGETTER(section, numlinenums, \"Number of line numbers\"),\n OBJECTGETTER(section, characteristics, \"Characteristics\"),\n OBJECTGETTER(section, data, \"Section data\"),\n {NULL}};\n\nstatic PyTypeObject pepy_section_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.section\", /* tp_name */\n sizeof(pepy_section), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_section_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT, /* tp_flags */\n \"pepy section object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n 0, /* tp_methods */\n 0, /* tp_members */\n pepy_section_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_section_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_section_new /* tp_new */\n};\n\nstatic PyObject *\npepy_resource_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_resource *self;\n\n self = (pepy_resource *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int\npepy_resource_init(pepy_resource *self, PyObject *args, PyObject *kwds) {\n if (!PyArg_ParseTuple(args,\n \"OOOOOOOOOO:pepy_resource_init\",\n &self->type_str,\n &self->name_str,\n &self->lang_str,\n &self->type,\n &self->name,\n &self->lang,\n &self->codepage,\n &self->RVA,\n &self->size,\n &self->data))\n return -1;\n\n return 0;\n}\n\nstatic void pepy_resource_dealloc(pepy_resource *self) {\n Py_XDECREF(self->type_str);\n Py_XDECREF(self->name_str);\n Py_XDECREF(self->lang_str);\n Py_XDECREF(self->type);\n Py_XDECREF(self->name);\n Py_XDECREF(self->lang);\n Py_XDECREF(self->codepage);\n Py_XDECREF(self->RVA);\n Py_XDECREF(self->size);\n Py_XDECREF(self->data);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nPEPY_OBJECT_GET(resource, type_str);\nPEPY_OBJECT_GET(resource, name_str);\nPEPY_OBJECT_GET(resource, lang_str);\nPEPY_OBJECT_GET(resource, type);\nPEPY_OBJECT_GET(resource, name);\nPEPY_OBJECT_GET(resource, lang);\nPEPY_OBJECT_GET(resource, codepage);\nPEPY_OBJECT_GET(resource, RVA);\nPEPY_OBJECT_GET(resource, size);\nPEPY_OBJECT_GET(resource, data);\n\nstatic PyObject *pepy_resource_type_as_str(PyObject *self, PyObject *args) {\n PyObject *ret;\n char *str;\n long type;\n\n type = PyLong_AsUnsignedLong(((pepy_resource *) self)->type);\n if (type == -1) {\n if (PyErr_Occurred()) {\n PyErr_PrintEx(0);\n return NULL;\n }\n }\n switch ((resource_type) type) {\n case (RT_CURSOR):\n str = (char *) \"CURSOR\";\n break;\n case (RT_BITMAP):\n str = (char *) \"BITMAP\";\n break;\n case (RT_ICON):\n str = (char *) \"ICON\";\n break;\n case (RT_MENU):\n str = (char *) \"MENU\";\n break;\n case (RT_DIALOG):\n str = (char *) \"DIALOG\";\n break;\n case (RT_STRING):\n str = (char *) \"STRING\";\n break;\n case (RT_FONTDIR):\n str = (char *) \"FONTDIR\";\n break;\n case (RT_FONT):\n str = (char *) \"FONT\";\n break;\n case (RT_ACCELERATOR):\n str = (char *) \"ACCELERATOR\";\n break;\n case (RT_RCDATA):\n str = (char *) \"RCDATA\";\n break;\n case (RT_MESSAGETABLE):\n str = (char *) \"MESSAGETABLE\";\n break;\n case (RT_GROUP_CURSOR):\n str = (char *) \"GROUP_CURSOR\";\n break;\n case (RT_GROUP_ICON):\n str = (char *) \"GROUP_ICON\";\n break;\n case (RT_VERSION):\n str = (char *) \"VERSION\";\n break;\n case (RT_DLGINCLUDE):\n str = (char *) \"DLGINCLUDE\";\n break;\n case (RT_PLUGPLAY):\n str = (char *) \"PLUGPLAY\";\n break;\n case (RT_VXD):\n str = (char *) \"VXD\";\n break;\n case (RT_ANICURSOR):\n str = (char *) \"ANICURSOR\";\n break;\n case (RT_ANIICON):\n str = (char *) \"ANIICON\";\n break;\n case (RT_HTML):\n str = (char *) \"HTML\";\n break;\n case (RT_MANIFEST):\n str = (char *) \"MANIFEST\";\n break;\n default:\n str = (char *) \"UNKNOWN\";\n break;\n }\n\n ret = PyUnicode_FromString(str);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create return string.\");\n return NULL;\n }\n\n return ret;\n}\n\nstatic PyMethodDef pepy_resource_methods[] = {\n {\"type_as_str\",\n pepy_resource_type_as_str,\n METH_NOARGS,\n \"Return the resource type as a string.\"},\n {NULL}};\n\nstatic PyGetSetDef pepy_resource_getseters[] = {\n OBJECTGETTER(resource, type_str, \"Type string\"),\n OBJECTGETTER(resource, name_str, \"Name string\"),\n OBJECTGETTER(resource, lang_str, \"Lang string\"),\n OBJECTGETTER(resource, type, \"Type\"),\n OBJECTGETTER(resource, name, \"Name\"),\n OBJECTGETTER(resource, lang, \"Language\"),\n OBJECTGETTER(resource, codepage, \"Codepage\"),\n OBJECTGETTER(resource, RVA, \"RVA\"),\n OBJECTGETTER(resource, size, \"Size (specified in RDAT)\"),\n OBJECTGETTER(resource, data, \"Resource data\"),\n {NULL}};\n\nstatic PyTypeObject pepy_resource_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.resource\", /* tp_name */\n sizeof(pepy_resource), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_resource_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT, /* tp_flags */\n \"pepy resource object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n pepy_resource_methods, /* tp_methods */\n 0, /* tp_members */\n pepy_resource_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_resource_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_resource_new /* tp_new */\n};\n\nstatic PyObject *\npepy_parsed_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_parsed *self;\n\n self = (pepy_parsed *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int pepy_parsed_init(pepy_parsed *self, PyObject *args, PyObject *kwds) {\n char *pe_path;\n\n if (!PyArg_ParseTuple(args, \"s:pepy_parse\", &pe_path))\n return -1;\n\n if (!pe_path)\n return -1;\n\n self->pe = ParsePEFromFile(pe_path);\n if (!self->pe) {\n return -2;\n }\n\n return 0;\n}\n\nstatic void pepy_parsed_dealloc(pepy_parsed *self) {\n DestructParsedPE(self->pe);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nstatic PyObject *pepy_parsed_get_entry_point(PyObject *self, PyObject *args) {\n VA entrypoint;\n PyObject *ret;\n\n if (!GetEntryPoint(((pepy_parsed *) self)->pe, entrypoint))\n Py_RETURN_NONE;\n\n ret = PyLong_FromUnsignedLongLong(entrypoint);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create return object.\");\n return NULL;\n }\n\n return ret;\n}\n\nstatic PyObject *pepy_parsed_get_machine_as_str(PyObject *self,\n PyObject *args) {\n PyObject *ret;\n const char *str;\n\n str = GetMachineAsString(((pepy_parsed *) self)->pe);\n if (!str)\n Py_RETURN_NONE;\n\n ret = PyUnicode_FromString(str);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create return string.\");\n return NULL;\n }\n\n return ret;\n}\n\nstatic PyObject *pepy_parsed_get_subsystem_as_str(PyObject *self,\n PyObject *args) {\n PyObject *ret;\n const char *str;\n\n str = GetSubsystemAsString(((pepy_parsed *) self)->pe);\n if (!str)\n Py_RETURN_NONE;\n\n ret = PyUnicode_FromString(str);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create return string.\");\n return NULL;\n }\n\n return ret;\n}\n\nstatic PyObject *pepy_parsed_get_bytes(PyObject *self, PyObject *args) {\n uint64_t start;\n Py_ssize_t len, idx;\n PyObject *ret;\n\n if (!PyArg_ParseTuple(args, \"KK:pepy_parsed_get_bytes\", &start, &len))\n return NULL;\n\n /*\n * XXX: a new implementation read all bytes in char* and use\n * PybyteArray_FromStringAndSize\n */\n\n uint8_t *buf = new (std::nothrow) uint8_t[len];\n if (!buf) {\n /* in case allocation failed */\n PyErr_SetString(pepy_error,\n \"Unable to create initial buffer (allocation failure).\");\n return NULL;\n }\n\n for (idx = 0; idx < len; idx++) {\n if (!ReadByteAtVA(((pepy_parsed *) self)->pe, start + idx, buf[idx]))\n break;\n }\n\n /* use idx as content length, if we get less than asked for */\n ret = PyByteArray_FromStringAndSize(reinterpret_cast(buf), idx);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new byte array.\");\n return NULL;\n }\n\n delete[] buf;\n return ret;\n}\n\n/*\n * This is used to convert bounded buffers into python byte array objects.\n * In case the buffer is NULL, return an empty bytearray.\n */\nstatic PyObject *pepy_data_converter(bounded_buffer *data) {\n PyObject *ret;\n const char *str;\n Py_ssize_t len;\n\n if (!data || !data->buf) {\n str = \"\";\n len = 0;\n } else {\n str = (const char *) data->buf;\n len = data->bufLen;\n }\n\n ret = PyByteArray_FromStringAndSize(str, len);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to convert data to byte array.\");\n return NULL;\n }\n\n return ret;\n}\n\nint section_callback(void *cbd,\n const VA &base,\n const std::string &name,\n const image_section_header &s,\n const bounded_buffer *data) {\n uint32_t buflen;\n PyObject *sect;\n PyObject *tuple;\n PyObject *list = (PyObject *) cbd;\n\n /*\n * I've seen some interesting binaries with a section where the\n * PointerToRawData and SizeOfRawData are invalid. The parser library\n * handles this by setting sectionData to NULL as returned by splitBuffer().\n * The sectionData (passed in to us as *data) is converted using\n * pepy_data_converter() which will return an empty string object.\n * However, we need to address the fact that we pass an invalid length\n * via data->bufLen.\n */\n if (!data) {\n buflen = 0;\n } else {\n buflen = data->bufLen;\n }\n\n /*\n * The tuple item order is important here. It is passed into the\n * section type initialization and parsed there.\n */\n tuple = Py_BuildValue(\"sKKIIHHIO&\",\n name.c_str(),\n base,\n buflen,\n s.VirtualAddress,\n s.Misc.VirtualSize,\n s.NumberOfRelocations,\n s.NumberOfLinenumbers,\n s.Characteristics,\n pepy_data_converter,\n data);\n if (!tuple)\n return 1;\n\n sect = pepy_section_new(&pepy_section_type, NULL, NULL);\n if (!sect) {\n Py_DECREF(tuple);\n return 1;\n }\n\n if (pepy_section_init((pepy_section *) sect, tuple, NULL) == -1) {\n PyErr_SetString(pepy_error, \"Unable to init new section.\");\n return 1;\n }\n\n if (PyList_Append(list, sect) == -1) {\n Py_DECREF(tuple);\n Py_DECREF(sect);\n return 1;\n }\n\n return 0;\n}\n\nstatic PyObject *pepy_parsed_get_sections(PyObject *self, PyObject *args) {\n PyObject *ret = PyList_New(0);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new list.\");\n return NULL;\n }\n\n IterSec(((pepy_parsed *) self)->pe, section_callback, ret);\n\n return ret;\n}\n\nint resource_callback(void *cbd, const resource &r) {\n PyObject *rsrc;\n PyObject *tuple;\n PyObject *list = (PyObject *) cbd;\n\n /*\n * The tuple item order is important here. It is passed into the\n * section type initialization and parsed there.\n */\n tuple = Py_BuildValue(\"s#s#s#IIIIIIO&\",\n r.type_str.c_str(),\n r.type_str.length(),\n r.name_str.c_str(),\n r.name_str.length(),\n r.lang_str.c_str(),\n r.lang_str.length(),\n r.type,\n r.name,\n r.lang,\n r.codepage,\n r.RVA,\n r.size,\n pepy_data_converter,\n r.buf);\n if (!tuple)\n return 1;\n\n rsrc = pepy_resource_new(&pepy_resource_type, NULL, NULL);\n if (!rsrc) {\n Py_DECREF(tuple);\n return 1;\n }\n\n if (pepy_resource_init((pepy_resource *) rsrc, tuple, NULL) == -1) {\n PyErr_SetString(pepy_error, \"Unable to init new resource.\");\n return 1;\n }\n\n if (PyList_Append(list, rsrc) == -1) {\n Py_DECREF(tuple);\n Py_DECREF(rsrc);\n return 1;\n }\n\n return 0;\n}\n\nstatic PyObject *pepy_parsed_get_resources(PyObject *self, PyObject *args) {\n PyObject *ret = PyList_New(0);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new list.\");\n return NULL;\n }\n\n IterRsrc(((pepy_parsed *) self)->pe, resource_callback, ret);\n\n return ret;\n}\n\nint import_callback(void *cbd,\n const VA &addr,\n const std::string &name,\n const std::string &sym) {\n PyObject *imp;\n PyObject *tuple;\n PyObject *list = (PyObject *) cbd;\n\n /*\n * The tuple item order is important here. It is passed into the\n * import type initialization and parsed there.\n */\n tuple = Py_BuildValue(\"ssI\", name.c_str(), sym.c_str(), addr);\n if (!tuple)\n return 1;\n\n imp = pepy_import_new(&pepy_import_type, NULL, NULL);\n if (!imp) {\n Py_DECREF(tuple);\n return 1;\n }\n\n if (pepy_import_init((pepy_import *) imp, tuple, NULL) == -1) {\n PyErr_SetString(pepy_error, \"Unable to init new section.\");\n return 1;\n }\n\n if (PyList_Append(list, imp) == -1) {\n Py_DECREF(tuple);\n Py_DECREF(imp);\n return 1;\n }\n\n return 0;\n}\n\nstatic PyObject *pepy_parsed_get_imports(PyObject *self, PyObject *args) {\n PyObject *ret = PyList_New(0);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new list.\");\n return NULL;\n }\n\n IterImpVAString(((pepy_parsed *) self)->pe, import_callback, ret);\n\n return ret;\n}\n\nint export_callback(void *cbd,\n const VA &addr,\n const std::string &mod,\n const std::string &func) {\n PyObject *exp;\n PyObject *tuple;\n PyObject *list = (PyObject *) cbd;\n\n /*\n * The tuple item order is important here. It is passed into the\n * export type initialization and parsed there.\n */\n tuple = Py_BuildValue(\"ssI\", mod.c_str(), func.c_str(), addr);\n if (!tuple)\n return 1;\n\n exp = pepy_export_new(&pepy_export_type, NULL, NULL);\n if (!exp) {\n Py_DECREF(tuple);\n return 1;\n }\n\n if (pepy_export_init((pepy_export *) exp, tuple, NULL) == -1) {\n PyErr_SetString(pepy_error, \"Unable to init new section.\");\n return 1;\n }\n\n if (PyList_Append(list, exp) == -1) {\n Py_DECREF(tuple);\n Py_DECREF(exp);\n return 1;\n }\n\n return 0;\n}\n\nstatic PyObject *pepy_parsed_get_exports(PyObject *self, PyObject *args) {\n PyObject *ret = PyList_New(0);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new list.\");\n return NULL;\n }\n\n /*\n * This could use the same callback and object as imports but the names\n * of the attributes would be slightly off.\n */\n IterExpVA(((pepy_parsed *) self)->pe, export_callback, ret);\n\n return ret;\n}\n\nint reloc_callback(void *cbd, const VA &addr, const reloc_type &type) {\n PyObject *reloc;\n PyObject *tuple;\n PyObject *list = (PyObject *) cbd;\n\n /*\n * The tuple item order is important here. It is passed into the\n * relocation type initialization and parsed there.\n */\n tuple = Py_BuildValue(\"II\", type, addr);\n if (!tuple)\n return 1;\n\n reloc = pepy_relocation_new(&pepy_relocation_type, NULL, NULL);\n if (!reloc) {\n Py_DECREF(tuple);\n return 1;\n }\n\n if (pepy_relocation_init((pepy_relocation *) reloc, tuple, NULL) == -1) {\n PyErr_SetString(pepy_error, \"Unable to init new section.\");\n return 1;\n }\n\n if (PyList_Append(list, reloc) == -1) {\n Py_DECREF(tuple);\n Py_DECREF(reloc);\n return 1;\n }\n\n return 0;\n}\n\nstatic PyObject *pepy_parsed_get_relocations(PyObject *self, PyObject *args) {\n PyObject *ret = PyList_New(0);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new list.\");\n return NULL;\n }\n\n IterRelocs(((pepy_parsed *) self)->pe, reloc_callback, ret);\n\n return ret;\n}\n\n#define PEPY_PARSED_GET(ATTR, VAL) \\\n static PyObject *pepy_parsed_get_##ATTR(PyObject *self, void *closure) { \\\n PyObject *ret = PyLong_FromUnsignedLongLong( \\\n ((pepy_parsed *) self)->pe->peHeader.nt.VAL); \\\n if (!ret) \\\n PyErr_SetString(PyExc_AttributeError, \"Error getting attribute.\"); \\\n return ret; \\\n }\n\nPEPY_PARSED_GET(signature, Signature);\nPEPY_PARSED_GET(machine, FileHeader.Machine);\nPEPY_PARSED_GET(numberofsections, FileHeader.NumberOfSections);\nPEPY_PARSED_GET(timedatestamp, FileHeader.TimeDateStamp);\nPEPY_PARSED_GET(numberofsymbols, FileHeader.NumberOfSymbols);\nPEPY_PARSED_GET(characteristics, FileHeader.Characteristics);\nPEPY_PARSED_GET(magic, OptionalMagic);\n\n/*\n * This is used to get things from the optional header, which can be either\n * the PE32 or PE32+ version, depending upon the magic value. Technically\n * the magic is stored in the OptionalHeader, but to make life easier pe-parse\n * stores the value in nt_header_32 along with the appropriate optional header.\n * This is why \"magic\" is handled above, and not here.\n */\n#define PEPY_PARSED_GET_OPTIONAL(ATTR, VAL) \\\n static PyObject *pepy_parsed_get_optional_##ATTR(PyObject *self, \\\n void *closure) { \\\n PyObject *ret = NULL; \\\n if (((pepy_parsed *) self)->pe->peHeader.nt.OptionalMagic == \\\n NT_OPTIONAL_32_MAGIC) { \\\n ret = PyLong_FromUnsignedLongLong( \\\n ((pepy_parsed *) self)->pe->peHeader.nt.OptionalHeader.VAL); \\\n if (!ret) \\\n PyErr_SetString(PyExc_AttributeError, \"Error getting attribute.\"); \\\n } else if (((pepy_parsed *) self)->pe->peHeader.nt.OptionalMagic == \\\n NT_OPTIONAL_64_MAGIC) { \\\n ret = PyLong_FromUnsignedLongLong( \\\n ((pepy_parsed *) self)->pe->peHeader.nt.OptionalHeader64.VAL); \\\n if (!ret) \\\n PyErr_SetString(PyExc_AttributeError, \"Error getting attribute.\"); \\\n } else { \\\n PyErr_SetString(pepy_error, \"Bad magic value.\"); \\\n } \\\n return ret; \\\n }\n\nPEPY_PARSED_GET_OPTIONAL(majorlinkerver, MajorLinkerVersion);\nPEPY_PARSED_GET_OPTIONAL(minorlinkerver, MinorLinkerVersion);\nPEPY_PARSED_GET_OPTIONAL(codesize, SizeOfCode);\nPEPY_PARSED_GET_OPTIONAL(initdatasize, SizeOfInitializedData);\nPEPY_PARSED_GET_OPTIONAL(uninitdatasize, SizeOfUninitializedData);\nPEPY_PARSED_GET_OPTIONAL(entrypointaddr, AddressOfEntryPoint);\nPEPY_PARSED_GET_OPTIONAL(baseofcode, BaseOfCode);\nPEPY_PARSED_GET_OPTIONAL(imagebase, ImageBase);\nPEPY_PARSED_GET_OPTIONAL(sectionalignement, SectionAlignment);\nPEPY_PARSED_GET_OPTIONAL(filealignment, FileAlignment);\nPEPY_PARSED_GET_OPTIONAL(majorosver, MajorOperatingSystemVersion);\nPEPY_PARSED_GET_OPTIONAL(minorosver, MinorOperatingSystemVersion);\nPEPY_PARSED_GET_OPTIONAL(win32ver, Win32VersionValue);\nPEPY_PARSED_GET_OPTIONAL(imagesize, SizeOfImage);\nPEPY_PARSED_GET_OPTIONAL(headersize, SizeOfHeaders);\nPEPY_PARSED_GET_OPTIONAL(checksum, CheckSum);\nPEPY_PARSED_GET_OPTIONAL(subsystem, Subsystem);\nPEPY_PARSED_GET_OPTIONAL(dllcharacteristics, DllCharacteristics);\nPEPY_PARSED_GET_OPTIONAL(stackreservesize, SizeOfStackReserve);\nPEPY_PARSED_GET_OPTIONAL(stackcommitsize, SizeOfStackCommit);\nPEPY_PARSED_GET_OPTIONAL(heapreservesize, SizeOfHeapReserve);\nPEPY_PARSED_GET_OPTIONAL(heapcommitsize, SizeOfHeapCommit);\nPEPY_PARSED_GET_OPTIONAL(loaderflags, LoaderFlags);\nPEPY_PARSED_GET_OPTIONAL(rvasandsize, NumberOfRvaAndSizes);\n\n/*\n * BaseOfData is only in PE32, not PE32+. Thus, it uses a non-standard\n * getter function compared to the other shared fields.\n */\nstatic PyObject *pepy_parsed_get_optional_baseofdata(PyObject *self,\n void *closure) {\n PyObject *ret = NULL;\n if (((pepy_parsed *) self)->pe->peHeader.nt.OptionalMagic ==\n NT_OPTIONAL_32_MAGIC) {\n ret = PyLong_FromUnsignedLong(\n ((pepy_parsed *) self)->pe->peHeader.nt.OptionalHeader.BaseOfData);\n if (!ret)\n PyErr_SetString(PyExc_AttributeError, \"Error getting attribute.\");\n } else if (((pepy_parsed *) self)->pe->peHeader.nt.OptionalMagic ==\n NT_OPTIONAL_64_MAGIC) {\n PyErr_SetString(PyExc_AttributeError, \"Not available on PE32+.\");\n } else {\n PyErr_SetString(pepy_error, \"Bad magic value.\");\n }\n return ret;\n}\n\nstatic PyGetSetDef pepy_parsed_getseters[] = {\n OBJECTGETTER(parsed, signature, \"PE Signature\"),\n OBJECTGETTER(parsed, machine, \"Machine\"),\n OBJECTGETTER(parsed, numberofsections, \"Number of sections\"),\n OBJECTGETTER(parsed, timedatestamp, \"Timedate stamp\"),\n OBJECTGETTER(parsed, numberofsymbols, \"Number of symbols\"),\n OBJECTGETTER(parsed, characteristics, \"Characteristics\"),\n OBJECTGETTER(parsed, magic, \"Magic\"),\n OBJECTGETTER_OPTIONAL(majorlinkerver, \"Major linker version\"),\n OBJECTGETTER_OPTIONAL(minorlinkerver, \"Minor linker version\"),\n OBJECTGETTER_OPTIONAL(codesize, \"Size of code\"),\n OBJECTGETTER_OPTIONAL(initdatasize, \"Size of initialized data\"),\n OBJECTGETTER_OPTIONAL(uninitdatasize, \"Size of uninitialized data\"),\n OBJECTGETTER_OPTIONAL(entrypointaddr, \"Address of entry point\"),\n OBJECTGETTER_OPTIONAL(baseofcode, \"Base address of code\"),\n OBJECTGETTER_OPTIONAL(imagebase, \"Image base address\"),\n OBJECTGETTER_OPTIONAL(sectionalignement, \"Section alignment\"),\n OBJECTGETTER_OPTIONAL(filealignment, \"File alignment\"),\n OBJECTGETTER_OPTIONAL(majorosver, \"Major OS version\"),\n OBJECTGETTER_OPTIONAL(minorosver, \"Minor OS version\"),\n OBJECTGETTER_OPTIONAL(win32ver, \"Win32 version\"),\n OBJECTGETTER_OPTIONAL(imagesize, \"Size of image\"),\n OBJECTGETTER_OPTIONAL(headersize, \"Size of headers\"),\n OBJECTGETTER_OPTIONAL(checksum, \"Checksum\"),\n OBJECTGETTER_OPTIONAL(subsystem, \"Subsystem\"),\n OBJECTGETTER_OPTIONAL(dllcharacteristics, \"DLL characteristics\"),\n OBJECTGETTER_OPTIONAL(stackreservesize, \"Size of stack reserve\"),\n OBJECTGETTER_OPTIONAL(stackcommitsize, \"Size of stack commit\"),\n OBJECTGETTER_OPTIONAL(heapreservesize, \"Size of heap reserve\"),\n OBJECTGETTER_OPTIONAL(heapcommitsize, \"Size of heap commit\"),\n OBJECTGETTER_OPTIONAL(loaderflags, \"Loader flags\"),\n OBJECTGETTER_OPTIONAL(rvasandsize, \"Number of RVA and sizes\"),\n /* Base of data is only available in PE32, not PE32+. */\n {(char *) \"baseofdata\",\n (getter) pepy_parsed_get_optional_baseofdata,\n (setter) pepy_attr_not_writable,\n (char *) \"Base address of data\",\n NULL},\n {NULL}};\n\nstatic PyMethodDef pepy_parsed_methods[] = {\n {\"get_entry_point\",\n pepy_parsed_get_entry_point,\n METH_NOARGS,\n \"Return the entry point address.\"},\n {\"get_machine_as_str\",\n pepy_parsed_get_machine_as_str,\n METH_NOARGS,\n \"Return the machine as a human readable string.\"},\n {\"get_subsystem_as_str\",\n pepy_parsed_get_subsystem_as_str,\n METH_NOARGS,\n \"Return the subsystem as a human readable string.\"},\n {\"get_bytes\",\n pepy_parsed_get_bytes,\n METH_VARARGS,\n \"Return the first N bytes at a given address.\"},\n {\"get_sections\",\n pepy_parsed_get_sections,\n METH_NOARGS,\n \"Return a list of section objects.\"},\n {\"get_imports\",\n pepy_parsed_get_imports,\n METH_NOARGS,\n \"Return a list of import objects.\"},\n {\"get_exports\",\n pepy_parsed_get_exports,\n METH_NOARGS,\n \"Return a list of export objects.\"},\n {\"get_relocations\",\n pepy_parsed_get_relocations,\n METH_NOARGS,\n \"Return a list of relocation objects.\"},\n {\"get_resources\",\n pepy_parsed_get_resources,\n METH_NOARGS,\n \"Return a list of resource objects.\"},\n {NULL}};\n\nstatic PyTypeObject pepy_parsed_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.parsed\", /* tp_name */\n sizeof(pepy_parsed), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_parsed_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */\n \"pepy parsed object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n pepy_parsed_methods, /* tp_methods */\n 0, /* tp_members */\n pepy_parsed_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_parsed_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_parsed_new /* tp_new */\n};\n\nstatic PyObject *pepy_parse(PyObject *self, PyObject *args) {\n PyObject *parsed;\n int ret;\n char *err_str = NULL;\n\n parsed = pepy_parsed_new(&pepy_parsed_type, NULL, NULL);\n if (!parsed) {\n PyErr_SetString(pepy_error, \"Unable to make new parsed object.\");\n return NULL;\n }\n\n ret = pepy_parsed_init((pepy_parsed *) parsed, args, NULL);\n if (ret < 0) {\n if (ret == -2) {\n // error (loc)\n size_t len = GetPEErrString().length() + GetPEErrLoc().length() + 4;\n err_str = (char *) malloc(len);\n if (!err_str)\n return PyErr_NoMemory();\n snprintf(err_str,\n len,\n \"%s (%s)\",\n GetPEErrString().c_str(),\n GetPEErrLoc().c_str());\n PyErr_SetString(pepy_error, err_str);\n } else\n PyErr_SetString(pepy_error, \"Unable to init new parsed object.\");\n return NULL;\n }\n\n return parsed;\n}\n\nstatic PyMethodDef pepy_methods[] = {\n {\"parse\", pepy_parse, METH_VARARGS, \"Parse PE from file.\"}, {NULL}};\n\nPyMODINIT_FUNC PyInit_pepy(void) {\n PyObject *m;\n\n if (PyType_Ready(&pepy_parsed_type) < 0 ||\n PyType_Ready(&pepy_section_type) < 0 ||\n PyType_Ready(&pepy_import_type) < 0 ||\n PyType_Ready(&pepy_export_type) < 0 ||\n PyType_Ready(&pepy_relocation_type) < 0 ||\n PyType_Ready(&pepy_resource_type) < 0)\n return NULL;\n\n static struct PyModuleDef moduledef = {\n PyModuleDef_HEAD_INIT,\n \"pepy\",\n \"Python interface to pe-parse.\",\n -1,\n pepy_methods,\n NULL,\n NULL,\n NULL,\n NULL,\n };\n\n m = PyModule_Create(&moduledef);\n if (!m)\n return NULL;\n\n pepy_error = PyErr_NewException((char *) \"pepy.error\", NULL, NULL);\n Py_INCREF(pepy_error);\n PyModule_AddObject(m, \"error\", pepy_error);\n\n Py_INCREF(&pepy_parsed_type);\n PyModule_AddObject(m, \"pepy_parsed\", (PyObject *) &pepy_parsed_type);\n\n Py_INCREF(&pepy_section_type);\n PyModule_AddObject(m, \"pepy_section\", (PyObject *) &pepy_section_type);\n\n Py_INCREF(&pepy_import_type);\n PyModule_AddObject(m, \"pepy_import\", (PyObject *) &pepy_import_type);\n\n Py_INCREF(&pepy_export_type);\n PyModule_AddObject(m, \"pepy_export\", (PyObject *) &pepy_export_type);\n\n Py_INCREF(&pepy_relocation_type);\n PyModule_AddObject(m, \"pepy_relocation\", (PyObject *) &pepy_relocation_type);\n\n Py_INCREF(&pepy_resource_type);\n PyModule_AddObject(m, \"pepy_resource\", (PyObject *) &pepy_resource_type);\n\n PyModule_AddStringMacro(m, PEPY_VERSION);\n PyModule_AddStringMacro(m, PEPARSE_VERSION);\n PyModule_AddStringConstant(m, \"__version__\", PEPY_VERSION);\n\n PyModule_AddIntMacro(m, MZ_MAGIC);\n PyModule_AddIntMacro(m, NT_MAGIC);\n PyModule_AddIntMacro(m, NUM_DIR_ENTRIES);\n PyModule_AddIntMacro(m, NT_OPTIONAL_32_MAGIC);\n PyModule_AddIntMacro(m, NT_SHORT_NAME_LEN);\n PyModule_AddIntMacro(m, DIR_EXPORT);\n PyModule_AddIntMacro(m, DIR_IMPORT);\n PyModule_AddIntMacro(m, DIR_RESOURCE);\n PyModule_AddIntMacro(m, DIR_EXCEPTION);\n PyModule_AddIntMacro(m, DIR_SECURITY);\n PyModule_AddIntMacro(m, DIR_BASERELOC);\n PyModule_AddIntMacro(m, DIR_DEBUG);\n PyModule_AddIntMacro(m, DIR_ARCHITECTURE);\n PyModule_AddIntMacro(m, DIR_GLOBALPTR);\n PyModule_AddIntMacro(m, DIR_TLS);\n PyModule_AddIntMacro(m, DIR_LOAD_CONFIG);\n PyModule_AddIntMacro(m, DIR_BOUND_IMPORT);\n PyModule_AddIntMacro(m, DIR_IAT);\n PyModule_AddIntMacro(m, DIR_DELAY_IMPORT);\n PyModule_AddIntMacro(m, DIR_COM_DESCRIPTOR);\n\n PyModule_AddIntMacro(m, IMAGE_SCN_TYPE_NO_PAD);\n PyModule_AddIntMacro(m, IMAGE_SCN_CNT_CODE);\n PyModule_AddIntMacro(m, IMAGE_SCN_CNT_INITIALIZED_DATA);\n PyModule_AddIntMacro(m, IMAGE_SCN_CNT_UNINITIALIZED_DATA);\n PyModule_AddIntMacro(m, IMAGE_SCN_LNK_OTHER);\n PyModule_AddIntMacro(m, IMAGE_SCN_LNK_INFO);\n PyModule_AddIntMacro(m, IMAGE_SCN_LNK_REMOVE);\n PyModule_AddIntMacro(m, IMAGE_SCN_LNK_COMDAT);\n PyModule_AddIntMacro(m, IMAGE_SCN_NO_DEFER_SPEC_EXC);\n PyModule_AddIntMacro(m, IMAGE_SCN_GPREL);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_FARDATA);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_PURGEABLE);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_16BIT);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_LOCKED);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_PRELOAD);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_1BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_2BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_4BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_8BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_16BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_32BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_64BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_128BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_256BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_512BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_1024BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_2048BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_4096BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_8192BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_MASK);\n PyModule_AddIntMacro(m, IMAGE_SCN_LNK_NRELOC_OVFL);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_DISCARDABLE);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_NOT_CACHED);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_NOT_PAGED);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_SHARED);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_EXECUTE);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_READ);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_WRITE);\n\n return m;\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/rc/util_rc_ptr.h", "#pragma once\n\n#include \n#include \n#include \n#include \n#include \n\nnamespace dxvk {\n\n /**\n * \\brief Pointer for reference-counted objects\n * \n * This only requires the given type to implement \\c incRef\n * and \\c decRef methods that adjust the reference count.\n * \\tparam T Object type\n */\n template\n class Rc {\n template\n friend class Rc;\n public:\n\n Rc() = default;\n Rc(std::nullptr_t) { }\n\n Rc(T* object)\n : m_object(object) {\n this->incRef();\n }\n\n Rc(const Rc& other)\n : m_object(other.m_object) {\n this->incRef();\n }\n\n template\n Rc(const Rc& other)\n : m_object(other.m_object) {\n this->incRef();\n }\n\n Rc(Rc&& other)\n : m_object(other.m_object) {\n other.m_object = nullptr;\n }\n\n template\n Rc(Rc&& other)\n : m_object(other.m_object) {\n other.m_object = nullptr;\n }\n\n Rc& operator = (std::nullptr_t) {\n this->decRef();\n m_object = nullptr;\n return *this;\n }\n\n Rc& operator = (const Rc& other) {\n other.incRef();\n this->decRef();\n m_object = other.m_object;\n return *this;\n }\n\n template\n Rc& operator = (const Rc& other) {\n other.incRef();\n this->decRef();\n m_object = other.m_object;\n return *this;\n }\n\n Rc& operator = (Rc&& other) {\n this->decRef();\n this->m_object = other.m_object;\n other.m_object = nullptr;\n return *this;\n }\n\n template\n Rc& operator = (Rc&& other) {\n this->decRef();\n this->m_object = other.m_object;\n other.m_object = nullptr;\n return *this;\n }\n\n ~Rc() {\n this->decRef();\n }\n\n T& operator * () const { return *m_object; }\n T* operator -> () const { return m_object; }\n T* ptr() const { return m_object; }\n\n template bool operator == (const Rc& other) const { return m_object == other.m_object; }\n template bool operator != (const Rc& other) const { return m_object != other.m_object; }\n\n template bool operator == (Tx* other) const { return m_object == other; }\n template bool operator != (Tx* other) const { return m_object != other; }\n\n bool operator == (std::nullptr_t) const { return m_object == nullptr; }\n bool operator != (std::nullptr_t) const { return m_object != nullptr; }\n \n explicit operator bool () const {\n return m_object != nullptr;\n }\n\n /**\n * \\brief Sets pointer without acquiring a reference\n *\n * Must only be use when a reference has been taken via\n * other means.\n * \\param [in] object Object pointer\n */\n void unsafeInsert(T* object) {\n this->decRef();\n m_object = object;\n }\n\n /**\n * \\brief Extracts raw pointer\n *\n * Sets the smart pointer to null without decrementing the\n * reference count. Must only be used when the reference\n * count is decremented in some other way.\n * \\returns Pointer to owned object\n */\n T* unsafeExtract() {\n return std::exchange(m_object, nullptr);\n }\n\n /**\n * \\brief Creates smart pointer without taking reference\n *\n * Must only be used when a refernece has been obtained via other means.\n * \\param [in] object Pointer to object to take ownership of\n */\n static Rc unsafeCreate(T* object) {\n return Rc(object, false);\n }\n\n private:\n\n T* m_object = nullptr;\n\n explicit Rc(T* object, bool)\n : m_object(object) { }\n\n force_inline void incRef() const {\n if (m_object != nullptr)\n m_object->incRef();\n }\n\n force_inline void decRef() const {\n if (m_object != nullptr) {\n if constexpr (std::is_void_vdecRef())>) {\n m_object->decRef();\n } else {\n // Deprecated, objects should manage themselves now.\n if (!m_object->decRef())\n delete m_object;\n }\n }\n }\n\n };\n\n template\n bool operator == (Tx* a, const Rc& b) { return b == a; }\n\n template\n bool operator != (Tx* a, const Rc& b) { return b != a; }\n\n struct RcHash {\n template\n size_t operator () (const Rc& rc) const {\n return reinterpret_cast(rc.ptr()) / sizeof(T);\n }\n };\n\n}\n\ntemplate\nstd::ostream& operator << (std::ostream& os, const dxvk::Rc& rc) {\n return os << rc.ptr();\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_module.h", "class DxbcAnalyzer {\n public:\n DxbcModule(DxbcReader& reader) {\n for (uint32_t i = 0; i < m_header.numChunks(); i++) {\n \n // The chunk tag is stored at the beginning of each chunk\n auto chunkReader = reader.clone(m_header.chunkOffset(i));\n auto tag = chunkReader.readTag();\n \n // The chunk size follows right after the four-character\n // code. This does not include the eight bytes that are\n // consumed by the FourCC and chunk length entry.\n auto chunkLength = chunkReader.readu32();\n \n chunkReader = chunkReader.clone(8);\n chunkReader = chunkReader.resize(chunkLength);\n \n if ((tag == \"SHDR\") || (tag == \"SHEX\"))\n m_shexChunk = new DxbcShex(chunkReader);\n \n if ((tag == \"ISGN\") || (tag == \"ISG1\"))\n m_isgnChunk = new DxbcIsgn(chunkReader, tag);\n \n if ((tag == \"OSGN\") || (tag == \"OSG5\") || (tag == \"OSG1\"))\n m_osgnChunk = new DxbcIsgn(chunkReader, tag);\n \n if ((tag == \"PCSG\") || (tag == \"PSG1\"))\n m_psgnChunk = new DxbcIsgn(chunkReader, tag);\n }\n }\n ~DxbcModule() {\n \n }\n SpirvCodeBuffer compile(\n const DxbcModuleInfo& moduleInfo,\n const std::string& fileName) {\n if (m_shexChunk == nullptr)\n throw DxvkError(\"DxbcModule::compile: No SHDR/SHEX chunk\");\n \n DxbcAnalysisInfo analysisInfo;\n \n DxbcAnalyzer analyzer(moduleInfo,\n m_shexChunk->programInfo(),\n m_isgnChunk, m_osgnChunk,\n m_psgnChunk, analysisInfo);\n \n this->runAnalyzer(analyzer, m_shexChunk->slice());\n\n m_bindings = std::make_optional(analysisInfo.bindings);\n \n DxbcCompiler compiler(\n fileName, moduleInfo,\n m_shexChunk->programInfo(),\n m_isgnChunk, m_osgnChunk,\n m_psgnChunk, analysisInfo);\n \n this->runCompiler(compiler, m_shexChunk->slice());\n\n m_icb = compiler.getIcbData();\n\n return compiler.finalize();\n }\n SpirvCodeBuffer compilePassthroughShader(\n const DxbcModuleInfo& moduleInfo,\n const std::string& fileName) const {\n if (m_shexChunk == nullptr)\n throw DxvkError(\"DxbcModule::compile: No SHDR/SHEX chunk\");\n \n DxbcAnalysisInfo analysisInfo;\n\n DxbcCompiler compiler(\n fileName, moduleInfo,\n DxbcProgramType::GeometryShader,\n m_osgnChunk, m_osgnChunk,\n m_psgnChunk, analysisInfo);\n \n compiler.processXfbPassthrough();\n return compiler.finalize();\n }\n private:\n DxbcHeader m_header;\n Rc m_isgnChunk;\n Rc m_osgnChunk;\n Rc m_psgnChunk;\n Rc m_shexChunk;\n std::vector m_icb;\n std::optional m_bindings;\n void runAnalyzer(\n DxbcAnalyzer& analyzer,\n DxbcCodeSlice slice) const {\n DxbcDecodeContext decoder;\n \n while (!slice.atEnd()) {\n decoder.decodeInstruction(slice);\n \n analyzer.processInstruction(\n decoder.getInstruction());\n }\n }\n void runCompiler(\n DxbcCompiler& compiler,\n DxbcCodeSlice slice) const {\n DxbcDecodeContext decoder;\n \n while (!slice.atEnd()) {\n decoder.decodeInstruction(slice);\n \n compiler.processInstruction(\n decoder.getInstruction());\n }\n }\n};"], ["/lsfg-vk/thirdparty/pe-parse/dump-pe/main.cpp", "/*\nThe MIT License (MIT)\n\nCopyright (c) 2013 Andrew Ruef\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n*/\n\n#include \n#include \n#include \n#include \n#include \n\n#include \n\n#include \"vendor/argh.h\"\n\nusing namespace peparse;\n\nint printExps(void *N,\n const VA &funcAddr,\n std::uint16_t ordinal,\n const std::string &mod,\n const std::string &func,\n const std::string &fwd) {\n static_cast(N);\n\n auto address = static_cast(funcAddr);\n\n // save default formatting\n std::ios initial(nullptr);\n initial.copyfmt(std::cout);\n\n std::cout << \"EXP #\";\n std::cout << ordinal;\n std::cout << \": \";\n std::cout << mod;\n std::cout << \"!\";\n std::cout << func;\n std::cout << \": \";\n if (!fwd.empty()) {\n std::cout << fwd;\n } else {\n std::cout << std::showbase << std::hex << address;\n }\n std::cout << \"\\n\";\n\n // restore default formatting\n std::cout.copyfmt(initial);\n return 0;\n}\n\nint printImports(void *N,\n const VA &impAddr,\n const std::string &modName,\n const std::string &symName) {\n static_cast(N);\n\n auto address = static_cast(impAddr);\n\n std::cout << \"0x\" << std::hex << address << \" \" << modName << \"!\" << symName;\n std::cout << \"\\n\";\n return 0;\n}\n\nint printRelocs(void *N, const VA &relocAddr, const reloc_type &type) {\n static_cast(N);\n\n std::cout << \"TYPE: \";\n switch (type) {\n case RELOC_ABSOLUTE:\n std::cout << \"ABSOLUTE\";\n break;\n case RELOC_HIGH:\n std::cout << \"HIGH\";\n break;\n case RELOC_LOW:\n std::cout << \"LOW\";\n break;\n case RELOC_HIGHLOW:\n std::cout << \"HIGHLOW\";\n break;\n case RELOC_HIGHADJ:\n std::cout << \"HIGHADJ\";\n break;\n case RELOC_MIPS_JMPADDR:\n std::cout << \"MIPS_JMPADDR\";\n break;\n case RELOC_MIPS_JMPADDR16:\n std::cout << \"MIPS_JMPADD16\";\n break;\n case RELOC_DIR64:\n std::cout << \"DIR64\";\n break;\n default:\n std::cout << \"UNKNOWN\";\n break;\n }\n\n std::cout << \" VA: 0x\" << std::hex << relocAddr << \"\\n\";\n\n return 0;\n}\n\nint printDebugs(void *N,\n const std::uint32_t &type,\n const bounded_buffer *data) {\n static_cast(N);\n\n std::cout << \"Debug Directory Type: \";\n switch (type) {\n case 0:\n std::cout << \"IMAGE_DEBUG_TYPE_UNKNOWN\";\n break;\n case 1:\n std::cout << \"IMAGE_DEBUG_TYPE_COFF\";\n break;\n case 2:\n std::cout << \"IMAGE_DEBUG_TYPE_CODEVIEW\";\n break;\n case 3:\n std::cout << \"IMAGE_DEBUG_TYPE_FPO\";\n break;\n case 4:\n std::cout << \"IMAGE_DEBUG_TYPE_MISC\";\n break;\n case 5:\n std::cout << \"IMAGE_DEBUG_TYPE_EXCEPTION\";\n break;\n case 6:\n std::cout << \"IMAGE_DEBUG_TYPE_FIXUP\";\n break;\n case 7:\n std::cout << \"IMAGE_DEBUG_TYPE_OMAP_TO_SRC\";\n break;\n case 8:\n std::cout << \"IMAGE_DEBUG_TYPE_OMAP_FROM_SRC\";\n break;\n case 9:\n std::cout << \"IMAGE_DEBUG_TYPE_BORLAND\";\n break;\n case 10:\n std::cout << \"IMAGE_DEBUG_TYPE_RESERVED10\";\n break;\n case 11:\n std::cout << \"IMAGE_DEBUG_TYPE_CLSID\";\n break;\n case 12:\n std::cout << \"IMAGE_DEBUG_TYPE_VC_FEATURE\";\n break;\n case 13:\n std::cout << \"IMAGE_DEBUG_TYPE_POGO\";\n break;\n case 14:\n std::cout << \"IMAGE_DEBUG_TYPE_ILTCG\";\n break;\n case 15:\n std::cout << \"IMAGE_DEBUG_TYPE_MPX\";\n break;\n case 16:\n std::cout << \"IMAGE_DEBUG_TYPE_REPRO\";\n break;\n case 20:\n std::cout << \"IMAGE_DEBUG_TYPE_EX_DLLCHARACTERISTICS\";\n break;\n default:\n std::cout << \"INVALID\";\n break;\n }\n std::cout << \"\\n\";\n std::cout << \"Debug Directory Data: \";\n for (uint32_t i = 0; i < data->bufLen; i++) {\n std::cout << \" 0x\" << std::hex << static_cast(data->buf[i]);\n }\n std::cout << \"\\n\";\n\n return 0;\n}\n\nint printSymbols(void *N,\n const std::string &strName,\n const uint32_t &value,\n const int16_t §ionNumber,\n const uint16_t &type,\n const uint8_t &storageClass,\n const uint8_t &numberOfAuxSymbols) {\n static_cast(N);\n\n std::cout << \"Symbol Name: \" << strName << \"\\n\";\n std::cout << \"Symbol Value: 0x\" << std::hex << value << \"\\n\";\n\n std::cout << \"Symbol Section Number: \";\n switch (sectionNumber) {\n case IMAGE_SYM_UNDEFINED:\n std::cout << \"UNDEFINED\";\n break;\n case IMAGE_SYM_ABSOLUTE:\n std::cout << \"ABSOLUTE\";\n break;\n case IMAGE_SYM_DEBUG:\n std::cout << \"DEBUG\";\n break;\n default:\n std::cout << sectionNumber;\n break;\n }\n std::cout << \"\\n\";\n\n std::cout << \"Symbol Type: \";\n switch (type) {\n case IMAGE_SYM_TYPE_NULL:\n std::cout << \"NULL\";\n break;\n case IMAGE_SYM_TYPE_VOID:\n std::cout << \"VOID\";\n break;\n case IMAGE_SYM_TYPE_CHAR:\n std::cout << \"CHAR\";\n break;\n case IMAGE_SYM_TYPE_SHORT:\n std::cout << \"SHORT\";\n break;\n case IMAGE_SYM_TYPE_INT:\n std::cout << \"INT\";\n break;\n case IMAGE_SYM_TYPE_LONG:\n std::cout << \"LONG\";\n break;\n case IMAGE_SYM_TYPE_FLOAT:\n std::cout << \"FLOAT\";\n break;\n case IMAGE_SYM_TYPE_DOUBLE:\n std::cout << \"DOUBLE\";\n break;\n case IMAGE_SYM_TYPE_STRUCT:\n std::cout << \"STRUCT\";\n break;\n case IMAGE_SYM_TYPE_UNION:\n std::cout << \"UNION\";\n break;\n case IMAGE_SYM_TYPE_ENUM:\n std::cout << \"ENUM\";\n break;\n case IMAGE_SYM_TYPE_MOE:\n std::cout << \"IMAGE_SYM_TYPE_MOE\";\n break;\n case IMAGE_SYM_TYPE_BYTE:\n std::cout << \"BYTE\";\n break;\n case IMAGE_SYM_TYPE_WORD:\n std::cout << \"WORD\";\n break;\n case IMAGE_SYM_TYPE_UINT:\n std::cout << \"UINT\";\n break;\n case IMAGE_SYM_TYPE_DWORD:\n std::cout << \"DWORD\";\n break;\n default:\n std::cout << \"UNKNOWN\";\n break;\n }\n std::cout << \"\\n\";\n\n std::cout << \"Symbol Storage Class: \";\n switch (storageClass) {\n case IMAGE_SYM_CLASS_END_OF_FUNCTION:\n std::cout << \"FUNCTION\";\n break;\n case IMAGE_SYM_CLASS_NULL:\n std::cout << \"NULL\";\n break;\n case IMAGE_SYM_CLASS_AUTOMATIC:\n std::cout << \"AUTOMATIC\";\n break;\n case IMAGE_SYM_CLASS_EXTERNAL:\n std::cout << \"EXTERNAL\";\n break;\n case IMAGE_SYM_CLASS_STATIC:\n std::cout << \"STATIC\";\n break;\n case IMAGE_SYM_CLASS_REGISTER:\n std::cout << \"REGISTER\";\n break;\n case IMAGE_SYM_CLASS_EXTERNAL_DEF:\n std::cout << \"EXTERNAL DEF\";\n break;\n case IMAGE_SYM_CLASS_LABEL:\n std::cout << \"LABEL\";\n break;\n case IMAGE_SYM_CLASS_UNDEFINED_LABEL:\n std::cout << \"UNDEFINED LABEL\";\n break;\n case IMAGE_SYM_CLASS_MEMBER_OF_STRUCT:\n std::cout << \"MEMBER OF STRUCT\";\n break;\n default:\n std::cout << \"UNKNOWN\";\n break;\n }\n std::cout << \"\\n\";\n\n std::cout << \"Symbol Number of Aux Symbols: \"\n << static_cast(numberOfAuxSymbols) << \"\\n\";\n\n return 0;\n}\n\nint printRich(void *N, const rich_entry &r) {\n static_cast(N);\n std::cout << std::dec;\n std::cout << std::setw(10) << \"ProdId:\" << std::setw(7) << r.ProductId;\n std::cout << std::setw(10) << \"Build:\" << std::setw(7) << r.BuildNumber;\n std::cout << std::setw(10) << \"Name:\" << std::setw(40)\n << GetRichProductName(r.BuildNumber) << \" \"\n << GetRichObjectType(r.ProductId);\n std::cout << std::setw(10) << \"Count:\" << std::setw(7) << r.Count << \"\\n\";\n return 0;\n}\n\nint printRsrc(void *N, const resource &r) {\n static_cast(N);\n\n if (r.type_str.length())\n std::cout << \"Type (string): \" << r.type_str << \"\\n\";\n else\n std::cout << \"Type: 0x\" << std::hex << r.type << \"\\n\";\n\n if (r.name_str.length())\n std::cout << \"Name (string): \" << r.name_str << \"\\n\";\n else\n std::cout << \"Name: 0x\" << std::hex << r.name << \"\\n\";\n\n if (r.lang_str.length())\n std::cout << \"Lang (string): \" << r.lang_str << \"\\n\";\n else\n std::cout << \"Lang: 0x\" << std::hex << r.lang << \"\\n\";\n\n std::cout << \"Codepage: 0x\" << std::hex << r.codepage << \"\\n\";\n std::cout << \"RVA: \" << std::dec << r.RVA << \"\\n\";\n std::cout << \"Size: \" << std::dec << r.size << \"\\n\";\n return 0;\n}\n\nint printSecs(void *N,\n const VA &secBase,\n const std::string &secName,\n const image_section_header &s,\n const bounded_buffer *data) {\n static_cast(N);\n static_cast(s);\n\n std::cout << \"Sec Name: \" << secName << \"\\n\";\n std::cout << \"Sec Base: 0x\" << std::hex << secBase << \"\\n\";\n if (data)\n std::cout << \"Sec Size: \" << std::dec << data->bufLen << \"\\n\";\n else\n std::cout << \"Sec Size: 0\"\n << \"\\n\";\n return 0;\n}\n\n#define DUMP_FIELD(x) \\\n std::cout << \"\" #x << \": 0x\"; \\\n std::cout << std::hex << static_cast(p->peHeader.x) << \"\\n\";\n#define DUMP_DEC_FIELD(x) \\\n std::cout << \"\" #x << \": \"; \\\n std::cout << std::dec << static_cast(p->peHeader.x) << \"\\n\";\n#define DUMP_BOOL_FIELD(x) \\\n std::cout << \"\" #x << \": \"; \\\n std::cout << std::boolalpha << static_cast(p->peHeader.x) << \"\\n\";\n\nint main(int argc, char *argv[]) {\n\n argh::parser cmdl(argv);\n\n if (cmdl[{\"-h\", \"--help\"}] || argc <= 1) {\n std::cout << \"dump-pe utility from Trail of Bits\\n\";\n std::cout << \"Repository: https://github.com/trailofbits/pe-parse\\n\\n\";\n std::cout << \"Usage:\\n\\tdump-pe /path/to/executable.exe\\n\";\n return 0;\n } else if (cmdl[{\"-v\", \"--version\"}]) {\n std::cout << \"dump-pe (pe-parse) version \" << PEPARSE_VERSION << \"\\n\";\n return 0;\n }\n\n parsed_pe *p = ParsePEFromFile(cmdl[1].c_str());\n\n if (p == nullptr) {\n std::cout << \"Error: \" << GetPEErr() << \" (\" << GetPEErrString() << \")\"\n << \"\\n\";\n std::cout << \"Location: \" << GetPEErrLoc() << \"\\n\";\n return 1;\n }\n\n if (p != NULL) {\n // Print DOS header\n DUMP_FIELD(dos.e_magic);\n DUMP_FIELD(dos.e_cp);\n DUMP_FIELD(dos.e_crlc);\n DUMP_FIELD(dos.e_cparhdr);\n DUMP_FIELD(dos.e_minalloc);\n DUMP_FIELD(dos.e_maxalloc);\n DUMP_FIELD(dos.e_ss);\n DUMP_FIELD(dos.e_sp);\n DUMP_FIELD(dos.e_csum);\n DUMP_FIELD(dos.e_ip);\n DUMP_FIELD(dos.e_cs);\n DUMP_FIELD(dos.e_lfarlc);\n DUMP_FIELD(dos.e_ovno);\n DUMP_FIELD(dos.e_res[0]);\n DUMP_FIELD(dos.e_res[1]);\n DUMP_FIELD(dos.e_res[2]);\n DUMP_FIELD(dos.e_res[3]);\n DUMP_FIELD(dos.e_oemid);\n DUMP_FIELD(dos.e_oeminfo);\n DUMP_FIELD(dos.e_res2[0]);\n DUMP_FIELD(dos.e_res2[1]);\n DUMP_FIELD(dos.e_res2[2]);\n DUMP_FIELD(dos.e_res2[3]);\n DUMP_FIELD(dos.e_res2[4]);\n DUMP_FIELD(dos.e_res2[5]);\n DUMP_FIELD(dos.e_res2[6]);\n DUMP_FIELD(dos.e_res2[7]);\n DUMP_FIELD(dos.e_res2[8]);\n DUMP_FIELD(dos.e_res2[9]);\n DUMP_FIELD(dos.e_lfanew);\n // Print Rich header info\n DUMP_BOOL_FIELD(rich.isPresent);\n if (p->peHeader.rich.isPresent) {\n DUMP_FIELD(rich.DecryptionKey);\n DUMP_FIELD(rich.Checksum);\n DUMP_BOOL_FIELD(rich.isValid);\n IterRich(p, printRich, NULL);\n }\n // print out some things\n DUMP_FIELD(nt.Signature);\n DUMP_FIELD(nt.FileHeader.Machine);\n DUMP_FIELD(nt.FileHeader.NumberOfSections);\n DUMP_DEC_FIELD(nt.FileHeader.TimeDateStamp);\n DUMP_FIELD(nt.FileHeader.PointerToSymbolTable);\n DUMP_DEC_FIELD(nt.FileHeader.NumberOfSymbols);\n DUMP_FIELD(nt.FileHeader.SizeOfOptionalHeader);\n DUMP_FIELD(nt.FileHeader.Characteristics);\n if (p->peHeader.nt.OptionalMagic == NT_OPTIONAL_32_MAGIC) {\n DUMP_FIELD(nt.OptionalHeader.Magic);\n DUMP_DEC_FIELD(nt.OptionalHeader.MajorLinkerVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader.MinorLinkerVersion);\n DUMP_FIELD(nt.OptionalHeader.SizeOfCode);\n DUMP_FIELD(nt.OptionalHeader.SizeOfInitializedData);\n DUMP_FIELD(nt.OptionalHeader.SizeOfUninitializedData);\n DUMP_FIELD(nt.OptionalHeader.AddressOfEntryPoint);\n DUMP_FIELD(nt.OptionalHeader.BaseOfCode);\n DUMP_FIELD(nt.OptionalHeader.BaseOfData);\n DUMP_FIELD(nt.OptionalHeader.ImageBase);\n DUMP_FIELD(nt.OptionalHeader.SectionAlignment);\n DUMP_FIELD(nt.OptionalHeader.FileAlignment);\n DUMP_DEC_FIELD(nt.OptionalHeader.MajorOperatingSystemVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader.MinorOperatingSystemVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader.Win32VersionValue);\n DUMP_FIELD(nt.OptionalHeader.SizeOfImage);\n DUMP_FIELD(nt.OptionalHeader.SizeOfHeaders);\n DUMP_FIELD(nt.OptionalHeader.CheckSum);\n DUMP_FIELD(nt.OptionalHeader.Subsystem);\n DUMP_FIELD(nt.OptionalHeader.DllCharacteristics);\n DUMP_FIELD(nt.OptionalHeader.SizeOfStackReserve);\n DUMP_FIELD(nt.OptionalHeader.SizeOfStackCommit);\n DUMP_FIELD(nt.OptionalHeader.SizeOfHeapReserve);\n DUMP_FIELD(nt.OptionalHeader.SizeOfHeapCommit);\n DUMP_FIELD(nt.OptionalHeader.LoaderFlags);\n DUMP_DEC_FIELD(nt.OptionalHeader.NumberOfRvaAndSizes);\n } else {\n DUMP_FIELD(nt.OptionalHeader64.Magic);\n DUMP_DEC_FIELD(nt.OptionalHeader64.MajorLinkerVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader64.MinorLinkerVersion);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfCode);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfInitializedData);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfUninitializedData);\n DUMP_FIELD(nt.OptionalHeader64.AddressOfEntryPoint);\n DUMP_FIELD(nt.OptionalHeader64.BaseOfCode);\n DUMP_FIELD(nt.OptionalHeader64.ImageBase);\n DUMP_FIELD(nt.OptionalHeader64.SectionAlignment);\n DUMP_FIELD(nt.OptionalHeader64.FileAlignment);\n DUMP_DEC_FIELD(nt.OptionalHeader64.MajorOperatingSystemVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader64.MinorOperatingSystemVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader64.Win32VersionValue);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfImage);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfHeaders);\n DUMP_FIELD(nt.OptionalHeader64.CheckSum);\n DUMP_FIELD(nt.OptionalHeader64.Subsystem);\n DUMP_FIELD(nt.OptionalHeader64.DllCharacteristics);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfStackReserve);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfStackCommit);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfHeapReserve);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfHeapCommit);\n DUMP_FIELD(nt.OptionalHeader64.LoaderFlags);\n DUMP_DEC_FIELD(nt.OptionalHeader64.NumberOfRvaAndSizes);\n }\n\n#undef DUMP_FIELD\n#undef DUMP_DEC_FIELD\n\n std::cout << \"Imports: \"\n << \"\\n\";\n IterImpVAString(p, printImports, NULL);\n std::cout << \"Relocations: \"\n << \"\\n\";\n IterRelocs(p, printRelocs, NULL);\n std::cout << \"Debug Directories: \"\n << \"\\n\";\n IterDebugs(p, printDebugs, NULL);\n std::cout << \"Symbols (symbol table): \"\n << \"\\n\";\n IterSymbols(p, printSymbols, NULL);\n std::cout << \"Sections: \"\n << \"\\n\";\n IterSec(p, printSecs, NULL);\n std::cout << \"Exports: \"\n << \"\\n\";\n IterExpFull(p, printExps, NULL);\n\n // read the first 8 bytes from the entry point and print them\n VA entryPoint;\n if (GetEntryPoint(p, entryPoint)) {\n std::cout << \"First 8 bytes from entry point (0x\";\n std::cout << std::hex << entryPoint << \"):\"\n << \"\\n\";\n for (std::size_t i = 0; i < 8; i++) {\n std::uint8_t b;\n if (!ReadByteAtVA(p, i + entryPoint, b)) {\n std::cout << \" ERR\";\n } else {\n std::cout << \" 0x\" << std::hex << static_cast(b);\n }\n }\n\n std::cout << \"\\n\";\n }\n\n std::cout << \"Resources: \"\n << \"\\n\";\n IterRsrc(p, printRsrc, NULL);\n\n DestructParsedPE(p);\n\n return 0;\n }\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/spirv/spirv_module.h", "class generates {\n public:\n explicit SpirvModule(uint32_t version) {\n this->instImportGlsl450();\n }\n ~SpirvModule() {\n \n }\n SpirvCodeBuffer compile() {\n SpirvCodeBuffer result;\n result.putHeader(m_version, m_id);\n result.append(m_capabilities);\n result.append(m_extensions);\n result.append(m_instExt);\n result.append(m_memoryModel);\n result.append(m_entryPoints);\n result.append(m_execModeInfo);\n result.append(m_debugNames);\n result.append(m_annotations);\n result.append(m_typeConstDefs);\n result.append(m_variables);\n\n // Perform some crude dead code elimination. In some cases, our compilers\n // may emit invalid code, such as an unreachable block branching to a loop's\n // continue block, but those cases cannot be reasonably detected up-front.\n std::unordered_set reachableBlocks;\n std::unordered_set mergeBlocks;\n\n classifyBlocks(reachableBlocks, mergeBlocks);\n\n bool reachable = true;\n\n for (auto ins : m_code) {\n if (ins.opCode() == spv::OpFunctionEnd) {\n reachable = true;\n result.append(ins);\n } else if (ins.opCode() == spv::OpLabel) {\n uint32_t labelId = ins.arg(1);\n\n if ((reachable = reachableBlocks.find(labelId) != reachableBlocks.end())) {\n result.append(ins);\n } else if (mergeBlocks.find(labelId) != mergeBlocks.end()) {\n result.append(ins);\n result.putIns(spv::OpUnreachable, 1);\n }\n } else if (reachable) {\n result.append(ins);\n }\n }\n\n return result;\n }\n uint32_t allocateId() {\n return m_id++;\n }\n bool hasCapability(\n spv::Capability capability) {\n for (auto ins : m_capabilities) {\n if (ins.opCode() == spv::OpCapability && ins.arg(1) == capability)\n return true;\n }\n\n return false;\n }\n void enableCapability(\n spv::Capability capability) {\n // Scan the generated instructions to check\n // whether we already enabled the capability.\n if (!hasCapability(capability)) {\n m_capabilities.putIns (spv::OpCapability, 2);\n m_capabilities.putWord(capability);\n }\n }\n void enableExtension(\n const char* extensionName) {\n m_extensions.putIns (spv::OpExtension, 1 + m_extensions.strLen(extensionName));\n m_extensions.putStr (extensionName);\n }\n void addEntryPoint(\n uint32_t entryPointId,\n spv::ExecutionModel executionModel,\n const char* name) {\n m_entryPoints.putIns (spv::OpEntryPoint, 3 + m_entryPoints.strLen(name) + m_interfaceVars.size());\n m_entryPoints.putWord (executionModel);\n m_entryPoints.putWord (entryPointId);\n m_entryPoints.putStr (name);\n \n for (uint32_t varId : m_interfaceVars)\n m_entryPoints.putWord(varId);\n }\n void setMemoryModel(\n spv::AddressingModel addressModel,\n spv::MemoryModel memoryModel) {\n m_memoryModel.putIns (spv::OpMemoryModel, 3);\n m_memoryModel.putWord (addressModel);\n m_memoryModel.putWord (memoryModel);\n }\n void setExecutionMode(\n uint32_t entryPointId,\n spv::ExecutionMode executionMode) {\n m_execModeInfo.putIns (spv::OpExecutionMode, 3 + argCount);\n m_execModeInfo.putWord(entryPointId);\n m_execModeInfo.putWord(executionMode);\n\n for (uint32_t i = 0; i < argCount; i++)\n m_execModeInfo.putWord(args[i]);\n }\n void setExecutionMode(\n uint32_t entryPointId,\n spv::ExecutionMode executionMode,\n uint32_t argCount,\n const uint32_t* args) {\n m_execModeInfo.putIns (spv::OpExecutionMode, 3 + argCount);\n m_execModeInfo.putWord(entryPointId);\n m_execModeInfo.putWord(executionMode);\n\n for (uint32_t i = 0; i < argCount; i++)\n m_execModeInfo.putWord(args[i]);\n }\n void setInvocations(\n uint32_t entryPointId,\n uint32_t invocations) {\n m_execModeInfo.putIns (spv::OpExecutionMode, 4);\n m_execModeInfo.putWord (entryPointId);\n m_execModeInfo.putWord (spv::ExecutionModeInvocations);\n m_execModeInfo.putInt32(invocations);\n }\n void setLocalSize(\n uint32_t entryPointId,\n uint32_t x,\n uint32_t y,\n uint32_t z) {\n m_execModeInfo.putIns (spv::OpExecutionMode, 6);\n m_execModeInfo.putWord (entryPointId);\n m_execModeInfo.putWord (spv::ExecutionModeLocalSize);\n m_execModeInfo.putInt32(x);\n m_execModeInfo.putInt32(y);\n m_execModeInfo.putInt32(z);\n }\n void setOutputVertices(\n uint32_t entryPointId,\n uint32_t vertexCount) {\n m_execModeInfo.putIns (spv::OpExecutionMode, 4);\n m_execModeInfo.putWord(entryPointId);\n m_execModeInfo.putWord(spv::ExecutionModeOutputVertices);\n m_execModeInfo.putWord(vertexCount);\n }\n uint32_t addDebugString(\n const char* string) {\n uint32_t resultId = this->allocateId();\n \n m_debugNames.putIns (spv::OpString,\n 2 + m_debugNames.strLen(string));\n m_debugNames.putWord(resultId);\n m_debugNames.putStr (string);\n return resultId;\n }\n void setDebugSource(\n spv::SourceLanguage language,\n uint32_t version,\n uint32_t file,\n const char* source) {\n uint32_t strLen = source != nullptr\n ? m_debugNames.strLen(source) : 0;\n \n m_debugNames.putIns (spv::OpSource, 4 + strLen);\n m_debugNames.putWord(language);\n m_debugNames.putWord(version);\n m_debugNames.putWord(file);\n \n if (source != nullptr)\n m_debugNames.putStr(source);\n }\n void setDebugName(\n uint32_t expressionId,\n const char* debugName) {\n m_debugNames.putIns (spv::OpName, 2 + m_debugNames.strLen(debugName));\n m_debugNames.putWord(expressionId);\n m_debugNames.putStr (debugName);\n }\n void setDebugMemberName(\n uint32_t structId,\n uint32_t memberId,\n const char* debugName) {\n m_debugNames.putIns (spv::OpMemberName, 3 + m_debugNames.strLen(debugName));\n m_debugNames.putWord(structId);\n m_debugNames.putWord(memberId);\n m_debugNames.putStr (debugName);\n }\n uint32_t constBool(\n bool v) {\n return this->defConst(v\n ? spv::OpConstantTrue\n : spv::OpConstantFalse,\n this->defBoolType(),\n 0, nullptr);\n }\n uint32_t consti32(\n int32_t v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defIntType(32, 1),\n data.size(),\n data.data());\n }\n uint32_t consti64(\n int64_t v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defIntType(64, 1),\n data.size(),\n data.data());\n }\n uint32_t constu32(\n uint32_t v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defIntType(32, 0),\n data.size(),\n data.data());\n }\n uint32_t constu64(\n uint64_t v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defIntType(64, 0),\n data.size(),\n data.data());\n }\n uint32_t constf32(\n float v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defFloatType(32),\n data.size(),\n data.data());\n }\n uint32_t constf64(\n double v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defFloatType(64),\n data.size(),\n data.data());\n }\n uint32_t constvec4i32(\n int32_t x,\n int32_t y,\n int32_t z,\n int32_t w) {\n std::array args = {{\n this->consti32(x), this->consti32(y),\n this->consti32(z), this->consti32(w),\n }};\n \n uint32_t scalarTypeId = this->defIntType(32, 1);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 4);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constvec4b32(\n bool x,\n bool y,\n bool z,\n bool w) {\n std::array args = {{\n this->constBool(x), this->constBool(y),\n this->constBool(z), this->constBool(w),\n }};\n \n uint32_t scalarTypeId = this->defBoolType();\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 4);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constvec4u32(\n uint32_t x,\n uint32_t y,\n uint32_t z,\n uint32_t w) {\n std::array args = {{\n this->constu32(x), this->constu32(y),\n this->constu32(z), this->constu32(w),\n }};\n \n uint32_t scalarTypeId = this->defIntType(32, 0);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 4);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constvec2f32(\n float x,\n float y) {\n std::array args = {{\n this->constf32(x), this->constf32(y),\n }};\n \n uint32_t scalarTypeId = this->defFloatType(32);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 2);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constvec3f32(\n float x,\n float y,\n float z) {\n std::array args = {{\n this->constf32(x), this->constf32(y),\n this->constf32(z),\n }};\n \n uint32_t scalarTypeId = this->defFloatType(32);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 3);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constvec4f32(\n float x,\n float y,\n float z,\n float w) {\n std::array args = {{\n this->constf32(x), this->constf32(y),\n this->constf32(z), this->constf32(w),\n }};\n \n uint32_t scalarTypeId = this->defFloatType(32);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 4);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constfReplicant(\n float replicant,\n uint32_t count) {\n uint32_t value = this->constf32(replicant);\n\n std::array args = { value, value, value, value };\n\n // Can't make a scalar composite.\n if (count == 1)\n return args[0];\n \n uint32_t scalarTypeId = this->defFloatType(32);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, count);\n \n return this->constComposite(vectorTypeId, count, args.data());\n }\n uint32_t constbReplicant(\n bool replicant,\n uint32_t count) {\n uint32_t value = this->constBool(replicant);\n\n std::array args = { value, value, value, value };\n\n // Can't make a scalar composite.\n if (count == 1)\n return args[0];\n \n uint32_t scalarTypeId = this->defBoolType();\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, count);\n \n return this->constComposite(vectorTypeId, count, args.data());\n }\n uint32_t constiReplicant(\n int32_t replicant,\n uint32_t count) {\n uint32_t value = this->consti32(replicant);\n\n std::array args = { value, value, value, value };\n\n // Can't make a scalar composite.\n if (count == 1)\n return args[0];\n \n uint32_t scalarTypeId = this->defIntType(32, 1);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, count);\n \n return this->constComposite(vectorTypeId, count, args.data());\n }\n uint32_t constuReplicant(\n int32_t replicant,\n uint32_t count) {\n uint32_t value = this->constu32(replicant);\n\n std::array args = { value, value, value, value };\n\n // Can't make a scalar composite.\n if (count == 1)\n return args[0];\n \n uint32_t scalarTypeId = this->defIntType(32, 0);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, count);\n \n return this->constComposite(vectorTypeId, count, args.data());\n }\n uint32_t constComposite(\n uint32_t typeId,\n uint32_t constCount,\n const uint32_t* constIds) {\n return this->defConst(\n spv::OpConstantComposite,\n typeId, constCount, constIds);\n }\n uint32_t constUndef(\n uint32_t typeId) {\n return this->defConst(spv::OpUndef,\n typeId, 0, nullptr);\n }\n uint32_t constNull(\n uint32_t typeId) {\n return this->defConst(spv::OpConstantNull,\n typeId, 0, nullptr);\n }\n uint32_t lateConst32(\n uint32_t typeId) {\n uint32_t resultId = this->allocateId();\n m_lateConsts.insert(resultId);\n\n m_typeConstDefs.putIns (spv::OpConstant, 4);\n m_typeConstDefs.putWord(typeId);\n m_typeConstDefs.putWord(resultId);\n m_typeConstDefs.putWord(0);\n return resultId;\n }\n void setLateConst(\n uint32_t constId,\n const uint32_t* argIds) {\n for (auto ins : m_typeConstDefs) {\n if (ins.opCode() != spv::OpConstant\n && ins.opCode() != spv::OpConstantComposite)\n continue;\n \n if (ins.arg(2) != constId)\n continue;\n\n for (uint32_t i = 3; i < ins.length(); i++)\n ins.setArg(i, argIds[i - 3]);\n\n return;\n }\n }\n uint32_t specConstBool(\n bool v) {\n uint32_t typeId = this->defBoolType();\n uint32_t resultId = this->allocateId();\n \n const spv::Op op = v\n ? spv::OpSpecConstantTrue\n : spv::OpSpecConstantFalse;\n \n m_typeConstDefs.putIns (op, 3);\n m_typeConstDefs.putWord (typeId);\n m_typeConstDefs.putWord (resultId);\n return resultId;\n }\n uint32_t specConst32(\n uint32_t typeId,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_typeConstDefs.putIns (spv::OpSpecConstant, 4);\n m_typeConstDefs.putWord (typeId);\n m_typeConstDefs.putWord (resultId);\n m_typeConstDefs.putWord (value);\n return resultId;\n }\n void decorate(\n uint32_t object,\n spv::Decoration decoration) {\n m_annotations.putIns (spv::OpDecorate, 3);\n m_annotations.putWord (object);\n m_annotations.putWord (decoration);\n }\n void decorateArrayStride(\n uint32_t object,\n uint32_t stride) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationArrayStride);\n m_annotations.putInt32(stride);\n }\n void decorateBinding(\n uint32_t object,\n uint32_t binding) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationBinding);\n m_annotations.putInt32(binding);\n }\n void decorateBlock(\n uint32_t object) {\n m_annotations.putIns (spv::OpDecorate, 3);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationBlock);\n }\n void decorateBuiltIn(\n uint32_t object,\n spv::BuiltIn builtIn) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationBuiltIn);\n m_annotations.putWord (builtIn);\n }\n void decorateComponent(\n uint32_t object,\n uint32_t location) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationComponent);\n m_annotations.putInt32(location);\n }\n void decorateDescriptorSet(\n uint32_t object,\n uint32_t set) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationDescriptorSet);\n m_annotations.putInt32(set);\n }\n void decorateIndex(\n uint32_t object,\n uint32_t index) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationIndex);\n m_annotations.putInt32(index);\n }\n void decorateLocation(\n uint32_t object,\n uint32_t location) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationLocation);\n m_annotations.putInt32(location);\n }\n void decorateSpecId(\n uint32_t object,\n uint32_t specId) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationSpecId);\n m_annotations.putInt32(specId);\n }\n void decorateXfb(\n uint32_t object,\n uint32_t streamId,\n uint32_t bufferId,\n uint32_t offset,\n uint32_t stride) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationStream);\n m_annotations.putInt32(streamId);\n\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationXfbBuffer);\n m_annotations.putInt32(bufferId);\n\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationXfbStride);\n m_annotations.putInt32(stride);\n\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationOffset);\n m_annotations.putInt32(offset);\n }\n void memberDecorateBuiltIn(\n uint32_t structId,\n uint32_t memberId,\n spv::BuiltIn builtIn) {\n m_annotations.putIns (spv::OpMemberDecorate, 5);\n m_annotations.putWord (structId);\n m_annotations.putWord (memberId);\n m_annotations.putWord (spv::DecorationBuiltIn);\n m_annotations.putWord (builtIn);\n }\n void memberDecorate(\n uint32_t structId,\n uint32_t memberId,\n spv::Decoration decoration) {\n m_annotations.putIns (spv::OpMemberDecorate, 4);\n m_annotations.putWord (structId);\n m_annotations.putWord (memberId);\n m_annotations.putWord (decoration);\n }\n void memberDecorateMatrixStride(\n uint32_t structId,\n uint32_t memberId,\n uint32_t stride) {\n m_annotations.putIns (spv::OpMemberDecorate, 5);\n m_annotations.putWord (structId);\n m_annotations.putWord (memberId);\n m_annotations.putWord (spv::DecorationMatrixStride);\n m_annotations.putWord (stride);\n }\n void memberDecorateOffset(\n uint32_t structId,\n uint32_t memberId,\n uint32_t offset) {\n m_annotations.putIns (spv::OpMemberDecorate, 5);\n m_annotations.putWord (structId);\n m_annotations.putWord (memberId);\n m_annotations.putWord (spv::DecorationOffset);\n m_annotations.putWord (offset);\n }\n uint32_t defVoidType() {\n return this->defType(spv::OpTypeVoid, 0, nullptr);\n }\n uint32_t defBoolType() {\n return this->defType(spv::OpTypeBool, 0, nullptr);\n }\n uint32_t defIntType(\n uint32_t width,\n uint32_t isSigned) {\n std::array args = {{ width, isSigned }};\n return this->defType(spv::OpTypeInt,\n args.size(), args.data());\n }\n uint32_t defFloatType(\n uint32_t width) {\n std::array args = {{ width }};\n return this->defType(spv::OpTypeFloat,\n args.size(), args.data());\n }\n uint32_t defVectorType(\n uint32_t elementType,\n uint32_t elementCount) {\n std::array args =\n {{ elementType, elementCount }};\n \n return this->defType(spv::OpTypeVector,\n args.size(), args.data());\n }\n uint32_t defMatrixType(\n uint32_t columnType,\n uint32_t columnCount) {\n std::array args =\n {{ columnType, columnCount }};\n \n return this->defType(spv::OpTypeMatrix,\n args.size(), args.data());\n }\n uint32_t defArrayType(\n uint32_t typeId,\n uint32_t length) {\n std::array args = {{ typeId, length }};\n \n return this->defType(spv::OpTypeArray,\n args.size(), args.data());\n }\n uint32_t defArrayTypeUnique(\n uint32_t typeId,\n uint32_t length) {\n uint32_t resultId = this->allocateId();\n \n m_typeConstDefs.putIns (spv::OpTypeArray, 4);\n m_typeConstDefs.putWord(resultId);\n m_typeConstDefs.putWord(typeId);\n m_typeConstDefs.putWord(length);\n return resultId;\n }\n uint32_t defRuntimeArrayType(\n uint32_t typeId) {\n std::array args = { typeId };\n \n return this->defType(spv::OpTypeRuntimeArray,\n args.size(), args.data());\n }\n uint32_t defRuntimeArrayTypeUnique(\n uint32_t typeId) {\n uint32_t resultId = this->allocateId();\n \n m_typeConstDefs.putIns (spv::OpTypeRuntimeArray, 3);\n m_typeConstDefs.putWord(resultId);\n m_typeConstDefs.putWord(typeId);\n return resultId;\n }\n uint32_t defFunctionType(\n uint32_t returnType,\n uint32_t argCount,\n const uint32_t* argTypes) {\n std::vector args;\n args.push_back(returnType);\n \n for (uint32_t i = 0; i < argCount; i++)\n args.push_back(argTypes[i]);\n \n return this->defType(spv::OpTypeFunction,\n args.size(), args.data());\n }\n uint32_t defStructType(\n uint32_t memberCount,\n const uint32_t* memberTypes) {\n return this->defType(spv::OpTypeStruct,\n memberCount, memberTypes);\n }\n uint32_t defStructTypeUnique(\n uint32_t memberCount,\n const uint32_t* memberTypes) {\n uint32_t resultId = this->allocateId();\n \n m_typeConstDefs.putIns (spv::OpTypeStruct, 2 + memberCount);\n m_typeConstDefs.putWord(resultId);\n \n for (uint32_t i = 0; i < memberCount; i++)\n m_typeConstDefs.putWord(memberTypes[i]);\n return resultId;\n }\n uint32_t defPointerType(\n uint32_t variableType,\n spv::StorageClass storageClass) {\n std::array args = {{\n static_cast(storageClass),\n variableType,\n }};\n \n return this->defType(spv::OpTypePointer,\n args.size(), args.data());\n }\n uint32_t defSamplerType() {\n return this->defType(spv::OpTypeSampler, 0, nullptr);\n }\n uint32_t defImageType(\n uint32_t sampledType,\n spv::Dim dimensionality,\n uint32_t depth,\n uint32_t arrayed,\n uint32_t multisample,\n uint32_t sampled,\n spv::ImageFormat format) {\n std::array args = {{\n sampledType,\n static_cast(dimensionality),\n depth, arrayed,\n multisample,\n sampled,\n static_cast(format)\n }};\n \n return this->defType(spv::OpTypeImage,\n args.size(), args.data());\n }\n uint32_t defSampledImageType(\n uint32_t imageType) {\n return this->defType(spv::OpTypeSampledImage, 1, &imageType);\n }\n uint32_t newVar(\n uint32_t pointerType,\n spv::StorageClass storageClass) {\n uint32_t resultId = this->allocateId();\n \n if (isInterfaceVar(storageClass))\n m_interfaceVars.push_back(resultId);\n\n auto& code = storageClass != spv::StorageClassFunction\n ? m_variables : m_code;\n\n code.putIns (spv::OpVariable, 4);\n code.putWord (pointerType);\n code.putWord (resultId);\n code.putWord (storageClass);\n return resultId;\n }\n uint32_t newVarInit(\n uint32_t pointerType,\n spv::StorageClass storageClass,\n uint32_t initialValue) {\n uint32_t resultId = this->allocateId();\n \n if (isInterfaceVar(storageClass))\n m_interfaceVars.push_back(resultId);\n\n auto& code = storageClass != spv::StorageClassFunction\n ? m_variables : m_code;\n \n code.putIns (spv::OpVariable, 5);\n code.putWord (pointerType);\n code.putWord (resultId);\n code.putWord (storageClass);\n code.putWord (initialValue);\n return resultId;\n }\n void functionBegin(\n uint32_t returnType,\n uint32_t functionId,\n uint32_t functionType,\n spv::FunctionControlMask functionControl) {\n m_code.putIns (spv::OpFunction, 5);\n m_code.putWord(returnType);\n m_code.putWord(functionId);\n m_code.putWord(functionControl);\n m_code.putWord(functionType);\n }\n uint32_t functionParameter(\n uint32_t parameterType) {\n uint32_t parameterId = this->allocateId();\n \n m_code.putIns (spv::OpFunctionParameter, 3);\n m_code.putWord(parameterType);\n m_code.putWord(parameterId);\n return parameterId;\n }\n void functionEnd() {\n m_code.putIns (spv::OpFunctionEnd, 1);\n }\n uint32_t opAccessChain(\n uint32_t resultType,\n uint32_t composite,\n uint32_t indexCount,\n const uint32_t* indexArray) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAccessChain, 4 + indexCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(composite);\n \n for (uint32_t i = 0; i < indexCount; i++)\n m_code.putInt32(indexArray[i]);\n return resultId;\n }\n uint32_t opArrayLength(\n uint32_t resultType,\n uint32_t structure,\n uint32_t memberId) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpArrayLength, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(structure);\n m_code.putWord(memberId);\n return resultId;\n }\n uint32_t opAny(\n uint32_t resultType,\n uint32_t vector) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAny, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector);\n return resultId;\n }\n uint32_t opAll(\n uint32_t resultType,\n uint32_t vector) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAll, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector);\n return resultId;\n }\n uint32_t opAtomicLoad(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicLoad, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n return resultId;\n }\n void opAtomicStore(\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n m_code.putIns (spv::OpAtomicStore, 5);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n }\n uint32_t opAtomicExchange(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicExchange, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicCompareExchange(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t equal,\n uint32_t unequal,\n uint32_t value,\n uint32_t comparator) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicCompareExchange, 9);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(equal);\n m_code.putWord(unequal);\n m_code.putWord(value);\n m_code.putWord(comparator);\n return resultId;\n }\n uint32_t opAtomicIIncrement(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicIIncrement, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n return resultId;\n }\n uint32_t opAtomicIDecrement(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicIDecrement, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n return resultId;\n }\n uint32_t opAtomicIAdd(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicIAdd, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicISub(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicISub, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicSMin(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicSMin, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicSMax(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicSMax, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicUMin(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicUMin, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicUMax(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicUMax, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicAnd(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicAnd, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicOr(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicOr, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicXor(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicXor, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opBitcast(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitcast, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opBitCount(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitCount, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opBitReverse(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitReverse, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFindILsb(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FindILsb);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFindUMsb(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FindUMsb);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFindSMsb(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FindSMsb);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opBitFieldInsert(\n uint32_t resultType,\n uint32_t base,\n uint32_t insert,\n uint32_t offset,\n uint32_t count) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitFieldInsert, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(insert);\n m_code.putWord(offset);\n m_code.putWord(count);\n return resultId;\n }\n uint32_t opBitFieldSExtract(\n uint32_t resultType,\n uint32_t base,\n uint32_t offset,\n uint32_t count) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitFieldSExtract, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(offset);\n m_code.putWord(count);\n return resultId;\n }\n uint32_t opBitFieldUExtract(\n uint32_t resultType,\n uint32_t base,\n uint32_t offset,\n uint32_t count) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitFieldUExtract, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(offset);\n m_code.putWord(count);\n return resultId;\n }\n uint32_t opBitwiseAnd(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitwiseAnd, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opBitwiseOr(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitwiseOr, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opBitwiseXor(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitwiseXor, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opNot(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpNot, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opShiftLeftLogical(\n uint32_t resultType,\n uint32_t base,\n uint32_t shift) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpShiftLeftLogical, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(shift);\n return resultId;\n }\n uint32_t opShiftRightArithmetic(\n uint32_t resultType,\n uint32_t base,\n uint32_t shift) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpShiftRightArithmetic, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(shift);\n return resultId;\n }\n uint32_t opShiftRightLogical(\n uint32_t resultType,\n uint32_t base,\n uint32_t shift) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpShiftRightLogical, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(shift);\n return resultId;\n }\n uint32_t opConvertFtoS(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpConvertFToS, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opConvertFtoU(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpConvertFToU, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opConvertStoF(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpConvertSToF, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opConvertUtoF(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpConvertUToF, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opCompositeConstruct(\n uint32_t resultType,\n uint32_t valueCount,\n const uint32_t* valueArray) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpCompositeConstruct, 3 + valueCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n \n for (uint32_t i = 0; i < valueCount; i++)\n m_code.putWord(valueArray[i]);\n return resultId;\n }\n uint32_t opCompositeExtract(\n uint32_t resultType,\n uint32_t composite,\n uint32_t indexCount,\n const uint32_t* indexArray) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpCompositeExtract, 4 + indexCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(composite);\n \n for (uint32_t i = 0; i < indexCount; i++)\n m_code.putInt32(indexArray[i]);\n return resultId;\n }\n uint32_t opCompositeInsert(\n uint32_t resultType,\n uint32_t object,\n uint32_t composite,\n uint32_t indexCount,\n const uint32_t* indexArray) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpCompositeInsert, 5 + indexCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(object);\n m_code.putWord(composite);\n \n for (uint32_t i = 0; i < indexCount; i++)\n m_code.putInt32(indexArray[i]);\n return resultId;\n }\n uint32_t opDpdx(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdx, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDpdy(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdy, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDpdxCoarse(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdxCoarse, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDpdyCoarse(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdyCoarse, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDpdxFine(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdxFine, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDpdyFine(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdyFine, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opVectorExtractDynamic(\n uint32_t resultType,\n uint32_t vector,\n uint32_t index) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpVectorExtractDynamic, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector);\n m_code.putWord(index);\n return resultId;\n }\n uint32_t opVectorShuffle(\n uint32_t resultType,\n uint32_t vectorLeft,\n uint32_t vectorRight,\n uint32_t indexCount,\n const uint32_t* indexArray) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpVectorShuffle, 5 + indexCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vectorLeft);\n m_code.putWord(vectorRight);\n \n for (uint32_t i = 0; i < indexCount; i++)\n m_code.putInt32(indexArray[i]);\n return resultId;\n }\n uint32_t opSNegate(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSNegate, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFNegate(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFNegate, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opSAbs(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450SAbs);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFAbs(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FAbs);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFSign(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FSign);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFMix(\n uint32_t resultType,\n uint32_t x,\n uint32_t y,\n uint32_t a) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 8);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FMix);\n m_code.putWord(x);\n m_code.putWord(y);\n m_code.putWord(a);\n return resultId;\n }\n uint32_t opCross(\n uint32_t resultType,\n uint32_t x,\n uint32_t y) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Cross);\n m_code.putWord(x);\n m_code.putWord(y);\n return resultId;\n }\n uint32_t opIAdd(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpIAdd, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opISub(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpISub, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFAdd(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFAdd, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFSub(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFSub, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opSDiv(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSDiv, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opUDiv(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpUDiv, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opSRem(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSRem, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opUMod(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpUMod, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFDiv(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFDiv, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opIMul(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpIMul, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFMul(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFMul, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opVectorTimesScalar(\n uint32_t resultType,\n uint32_t vector,\n uint32_t scalar) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpVectorTimesScalar, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector);\n m_code.putWord(scalar);\n return resultId;\n }\n uint32_t opMatrixTimesMatrix(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpMatrixTimesMatrix, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opMatrixTimesVector(\n uint32_t resultType,\n uint32_t matrix,\n uint32_t vector) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpMatrixTimesVector, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(matrix);\n m_code.putWord(vector);\n return resultId;\n }\n uint32_t opVectorTimesMatrix(\n uint32_t resultType,\n uint32_t vector,\n uint32_t matrix) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpVectorTimesMatrix, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector);\n m_code.putWord(matrix);\n return resultId;\n }\n uint32_t opTranspose(\n uint32_t resultType,\n uint32_t matrix) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpTranspose, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(matrix);\n return resultId;\n }\n uint32_t opInverse(\n uint32_t resultType,\n uint32_t matrix) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450MatrixInverse);\n m_code.putWord(matrix);\n return resultId;\n }\n uint32_t opFFma(\n uint32_t resultType,\n uint32_t a,\n uint32_t b,\n uint32_t c) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 8);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Fma);\n m_code.putWord(a);\n m_code.putWord(b);\n m_code.putWord(c);\n return resultId;\n }\n uint32_t opFMax(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FMax);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFMin(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FMin);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opNMax(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450NMax);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opNMin(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450NMin);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opSMax(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450SMax);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opSMin(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450SMin);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opUMax(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450UMax);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opUMin(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450UMin);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFClamp(\n uint32_t resultType,\n uint32_t x,\n uint32_t minVal,\n uint32_t maxVal) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 8);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FClamp);\n m_code.putWord(x);\n m_code.putWord(minVal);\n m_code.putWord(maxVal);\n return resultId;\n }\n uint32_t opNClamp(\n uint32_t resultType,\n uint32_t x,\n uint32_t minVal,\n uint32_t maxVal) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 8);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450NClamp);\n m_code.putWord(x);\n m_code.putWord(minVal);\n m_code.putWord(maxVal);\n return resultId;\n }\n uint32_t opIEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpIEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opINotEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpINotEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opSLessThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSLessThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opSLessThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSLessThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opSGreaterThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSGreaterThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opSGreaterThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSGreaterThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opULessThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpULessThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opULessThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpULessThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opUGreaterThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpUGreaterThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opUGreaterThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpUGreaterThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFOrdEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFOrdEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFUnordNotEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFUnordNotEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFOrdLessThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFOrdLessThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFOrdLessThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFOrdLessThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFOrdGreaterThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFOrdGreaterThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFOrdGreaterThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFOrdGreaterThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opLogicalEqual(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLogicalEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opLogicalNotEqual(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLogicalNotEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opLogicalAnd(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLogicalAnd, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opLogicalOr(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLogicalOr, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opLogicalNot(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLogicalNot, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDot(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDot, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opSin(\n uint32_t resultType,\n uint32_t vector) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Sin);\n m_code.putWord(vector);\n return resultId;\n }\n uint32_t opCos(\n uint32_t resultType,\n uint32_t vector) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Cos);\n m_code.putWord(vector);\n return resultId;\n }\n uint32_t opSqrt(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Sqrt);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opInverseSqrt(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450InverseSqrt);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opNormalize(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Normalize);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opRawAccessChain(\n uint32_t resultType,\n uint32_t base,\n uint32_t stride,\n uint32_t index,\n uint32_t offset,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns (spv::OpRawAccessChainNV, operand ? 8 : 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(stride);\n m_code.putWord(index);\n m_code.putWord(offset);\n\n if (operand)\n m_code.putWord(operand);\n\n return resultId;\n }\n uint32_t opReflect(\n uint32_t resultType,\n uint32_t incident,\n uint32_t normal) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Reflect);\n m_code.putWord(incident);\n m_code.putWord(normal);\n return resultId;\n }\n uint32_t opLength(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Length);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opExp2(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Exp2);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opExp(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Exp);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opLog2(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Log2);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opPow(\n uint32_t resultType,\n uint32_t base,\n uint32_t exponent) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Pow);\n m_code.putWord(base);\n m_code.putWord(exponent);\n return resultId;\n }\n uint32_t opFract(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Fract);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opCeil(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Ceil);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFloor(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Floor);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opRound(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Round);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opRoundEven(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450RoundEven);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opTrunc(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Trunc);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFConvert(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns (spv::OpFConvert, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opPackHalf2x16(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450PackHalf2x16);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opUnpackHalf2x16(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450UnpackHalf2x16);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opSelect(\n uint32_t resultType,\n uint32_t condition,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSelect, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(condition);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opIsNan(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpIsNan, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opIsInf(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpIsInf, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFunctionCall(\n uint32_t resultType,\n uint32_t functionId,\n uint32_t argCount,\n const uint32_t* argIds) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFunctionCall, 4 + argCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(functionId);\n \n for (uint32_t i = 0; i < argCount; i++)\n m_code.putWord(argIds[i]);\n return resultId;\n }\n void opLabel(\n uint32_t labelId) {\n m_code.putIns (spv::OpLabel, 2);\n m_code.putWord(labelId);\n\n m_blockId = labelId;\n }\n uint32_t opLoad(\n uint32_t typeId,\n uint32_t pointerId) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLoad, 4 + getMemoryOperandWordCount(operands));\n m_code.putWord(typeId);\n m_code.putWord(resultId);\n m_code.putWord(pointerId);\n\n putMemoryOperands(operands);\n return resultId;\n }\n uint32_t opLoad(\n uint32_t typeId,\n uint32_t pointerId,\n const SpirvMemoryOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLoad, 4 + getMemoryOperandWordCount(operands));\n m_code.putWord(typeId);\n m_code.putWord(resultId);\n m_code.putWord(pointerId);\n\n putMemoryOperands(operands);\n return resultId;\n }\n void opStore(\n uint32_t pointerId,\n uint32_t valueId) {\n m_code.putIns (spv::OpStore, 3 + getMemoryOperandWordCount(operands));\n m_code.putWord(pointerId);\n m_code.putWord(valueId);\n\n putMemoryOperands(operands);\n }\n void opStore(\n uint32_t pointerId,\n uint32_t valueId,\n const SpirvMemoryOperands& operands) {\n m_code.putIns (spv::OpStore, 3 + getMemoryOperandWordCount(operands));\n m_code.putWord(pointerId);\n m_code.putWord(valueId);\n\n putMemoryOperands(operands);\n }\n uint32_t opInterpolateAtCentroid(\n uint32_t resultType,\n uint32_t interpolant) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450InterpolateAtCentroid);\n m_code.putWord(interpolant);\n return resultId;\n }\n uint32_t opInterpolateAtSample(\n uint32_t resultType,\n uint32_t interpolant,\n uint32_t sample) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450InterpolateAtSample);\n m_code.putWord(interpolant);\n m_code.putWord(sample);\n return resultId;\n }\n uint32_t opInterpolateAtOffset(\n uint32_t resultType,\n uint32_t interpolant,\n uint32_t offset) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450InterpolateAtOffset);\n m_code.putWord(interpolant);\n m_code.putWord(offset);\n return resultId;\n }\n uint32_t opImage(\n uint32_t resultType,\n uint32_t sampledImage) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpImage, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n return resultId;\n }\n uint32_t opImageRead(\n uint32_t resultType,\n uint32_t image,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseRead\n : spv::OpImageRead;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n void opImageWrite(\n uint32_t image,\n uint32_t coordinates,\n uint32_t texel,\n const SpirvImageOperands& operands) {\n m_code.putIns (spv::OpImageWrite,\n 4 + getImageOperandWordCount(operands));\n m_code.putWord(image);\n m_code.putWord(coordinates);\n m_code.putWord(texel);\n \n putImageOperands(operands);\n }\n uint32_t opImageSparseTexelsResident(\n uint32_t resultType,\n uint32_t residentCode) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns (spv::OpImageSparseTexelsResident, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(residentCode);\n\n return resultId;\n }\n uint32_t opImageTexelPointer(\n uint32_t resultType,\n uint32_t image,\n uint32_t coordinates,\n uint32_t sample) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageTexelPointer, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n m_code.putWord(coordinates);\n m_code.putWord(sample);\n return resultId;\n }\n uint32_t opSampledImage(\n uint32_t resultType,\n uint32_t image,\n uint32_t sampler) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSampledImage, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n m_code.putWord(sampler);\n return resultId;\n }\n uint32_t opImageQuerySizeLod(\n uint32_t resultType,\n uint32_t image,\n uint32_t lod) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageQuerySizeLod, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n m_code.putWord(lod);\n return resultId;\n }\n uint32_t opImageQuerySize(\n uint32_t resultType,\n uint32_t image) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageQuerySize, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n return resultId;\n }\n uint32_t opImageQueryLevels(\n uint32_t resultType,\n uint32_t image) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageQueryLevels, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n return resultId;\n }\n uint32_t opImageQueryLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageQueryLod, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n return resultId;\n }\n uint32_t opImageQuerySamples(\n uint32_t resultType,\n uint32_t image) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageQuerySamples, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n return resultId;\n }\n uint32_t opImageFetch(\n uint32_t resultType,\n uint32_t image,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n\n spv::Op op = operands.sparse\n ? spv::OpImageSparseFetch\n : spv::OpImageFetch;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageGather(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t component,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseGather\n : spv::OpImageGather;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(component);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageDrefGather(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t reference,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseDrefGather\n : spv::OpImageDrefGather;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(reference);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleImplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleImplicitLod\n : spv::OpImageSampleImplicitLod;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleExplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleExplicitLod\n : spv::OpImageSampleExplicitLod;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleProjImplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleProjImplicitLod\n : spv::OpImageSampleProjImplicitLod;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleProjExplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleProjExplicitLod\n : spv::OpImageSampleProjExplicitLod;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleDrefImplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t reference,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleDrefImplicitLod\n : spv::OpImageSampleDrefImplicitLod;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(reference);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleDrefExplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t reference,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleDrefExplicitLod\n : spv::OpImageSampleDrefExplicitLod;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(reference);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleProjDrefImplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t reference,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleProjDrefImplicitLod\n : spv::OpImageSampleProjDrefImplicitLod;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(reference);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleProjDrefExplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t reference,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleProjDrefExplicitLod\n : spv::OpImageSampleProjDrefExplicitLod;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(reference);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opGroupNonUniformBallot(\n uint32_t resultType,\n uint32_t execution,\n uint32_t predicate) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpGroupNonUniformBallot, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(execution);\n m_code.putWord(predicate);\n return resultId;\n }\n uint32_t opGroupNonUniformBallotBitCount(\n uint32_t resultType,\n uint32_t execution,\n uint32_t operation,\n uint32_t ballot) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpGroupNonUniformBallotBitCount, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(execution);\n m_code.putWord(operation);\n m_code.putWord(ballot);\n return resultId;\n }\n uint32_t opGroupNonUniformElect(\n uint32_t resultType,\n uint32_t execution) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpGroupNonUniformElect, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(execution);\n return resultId;\n }\n uint32_t opGroupNonUniformBroadcastFirst(\n uint32_t resultType,\n uint32_t execution,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpGroupNonUniformBroadcastFirst, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(execution);\n m_code.putWord(value);\n return resultId;\n }\n void opControlBarrier(\n uint32_t execution,\n uint32_t memory,\n uint32_t semantics) {\n m_code.putIns (spv::OpControlBarrier, 4);\n m_code.putWord(execution);\n m_code.putWord(memory);\n m_code.putWord(semantics);\n }\n void opMemoryBarrier(\n uint32_t memory,\n uint32_t semantics) {\n m_code.putIns (spv::OpMemoryBarrier, 3);\n m_code.putWord(memory);\n m_code.putWord(semantics);\n }\n void opLoopMerge(\n uint32_t mergeBlock,\n uint32_t continueTarget,\n uint32_t loopControl) {\n m_code.putIns (spv::OpLoopMerge, 4);\n m_code.putWord(mergeBlock);\n m_code.putWord(continueTarget);\n m_code.putWord(loopControl);\n }\n void opSelectionMerge(\n uint32_t mergeBlock,\n uint32_t selectionControl) {\n m_code.putIns (spv::OpSelectionMerge, 3);\n m_code.putWord(mergeBlock);\n m_code.putWord(selectionControl);\n }\n void opBranch(\n uint32_t label) {\n m_code.putIns (spv::OpBranch, 2);\n m_code.putWord(label);\n\n m_blockId = 0;\n }\n void opBranchConditional(\n uint32_t condition,\n uint32_t trueLabel,\n uint32_t falseLabel) {\n m_code.putIns (spv::OpBranchConditional, 4);\n m_code.putWord(condition);\n m_code.putWord(trueLabel);\n m_code.putWord(falseLabel);\n\n m_blockId = 0;\n }\n void opSwitch(\n uint32_t selector,\n uint32_t jumpDefault,\n uint32_t caseCount,\n const SpirvSwitchCaseLabel* caseLabels) {\n m_code.putIns (spv::OpSwitch, 3 + 2 * caseCount);\n m_code.putWord(selector);\n m_code.putWord(jumpDefault);\n \n for (uint32_t i = 0; i < caseCount; i++) {\n m_code.putWord(caseLabels[i].literal);\n m_code.putWord(caseLabels[i].labelId);\n }\n\n m_blockId = 0;\n }\n uint32_t opPhi(\n uint32_t resultType,\n uint32_t sourceCount,\n const SpirvPhiLabel* sourceLabels) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpPhi, 3 + 2 * sourceCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n \n for (uint32_t i = 0; i < sourceCount; i++) {\n m_code.putWord(sourceLabels[i].varId);\n m_code.putWord(sourceLabels[i].labelId);\n }\n \n return resultId;\n }\n void opReturn() {\n m_code.putIns (spv::OpReturn, 1);\n m_blockId = 0;\n }\n void opDemoteToHelperInvocation() {\n m_code.putIns (spv::OpDemoteToHelperInvocation, 1);\n }\n void opEmitVertex(\n uint32_t streamId) {\n if (streamId == 0) {\n m_code.putIns (spv::OpEmitVertex, 1);\n } else {\n m_code.putIns (spv::OpEmitStreamVertex, 2);\n m_code.putWord(streamId);\n }\n }\n void opEndPrimitive(\n uint32_t streamId) {\n if (streamId == 0) {\n m_code.putIns (spv::OpEndPrimitive, 1);\n } else {\n m_code.putIns (spv::OpEndStreamPrimitive, 2);\n m_code.putWord(streamId);\n }\n }\n void opBeginInvocationInterlock() {\n m_code.putIns(spv::OpBeginInvocationInterlockEXT, 1);\n }\n void opEndInvocationInterlock() {\n m_code.putIns(spv::OpEndInvocationInterlockEXT, 1);\n }\n uint32_t opSinCos(\n uint32_t x,\n bool useBuiltIn) {\n // We only operate on 32-bit floats here\n uint32_t floatType = defFloatType(32);\n uint32_t resultType = defVectorType(floatType, 2u);\n\n if (useBuiltIn) {\n std::array members = { opSin(floatType, x), opCos(floatType, x) };\n return opCompositeConstruct(resultType, members.size(), members.data());\n } else {\n uint32_t uintType = defIntType(32, false);\n uint32_t sintType = defIntType(32, true);\n uint32_t boolType = defBoolType();\n\n // Normalize input to multiple of pi/4\n uint32_t xNorm = opFMul(floatType, opFAbs(floatType, x), constf32(4.0 / pi));\n\n uint32_t xTrunc = opTrunc(floatType, xNorm);\n uint32_t xFract = opFSub(floatType, xNorm, xTrunc);\n\n uint32_t xInt = opConvertFtoU(uintType, xTrunc);\n\n // Mirror input along x axis as necessary\n uint32_t mirror = opINotEqual(boolType, opBitwiseAnd(uintType, xInt, constu32(1u)), constu32(0u));\n xFract = opSelect(floatType, mirror, opFSub(floatType, constf32(1.0f), xFract), xFract);\n\n // Compute taylor series for fractional part\n uint32_t xFract_2 = opFMul(floatType, xFract, xFract);\n uint32_t xFract_4 = opFMul(floatType, xFract_2, xFract_2);\n uint32_t xFract_6 = opFMul(floatType, xFract_4, xFract_2);\n\n uint32_t taylor = opFMul(floatType, xFract_6, constf32(-sincosTaylorFactor(7)));\n decorate(taylor, spv::DecorationNoContraction);\n\n taylor = opFFma(floatType, xFract_4, constf32(sincosTaylorFactor(5)), taylor);\n decorate(taylor, spv::DecorationNoContraction);\n\n taylor = opFFma(floatType, xFract_2, constf32(-sincosTaylorFactor(3)), taylor);\n decorate(taylor, spv::DecorationNoContraction);\n\n taylor = opFAdd(floatType, constf32(sincosTaylorFactor(1)), taylor);\n decorate(taylor, spv::DecorationNoContraction);\n\n taylor = opFMul(floatType, taylor, xFract);\n decorate(taylor, spv::DecorationNoContraction);\n\n // Compute co-function based on sin^2 + cos^2 = 1\n uint32_t coFunc = opSqrt(floatType, opFSub(floatType, constf32(1.0f), opFMul(floatType, taylor, taylor)));\n\n // Determine whether the taylor series was used for sine or cosine and assign the correct result\n uint32_t funcIsSin = opIEqual(boolType, opBitwiseAnd(uintType, opIAdd(uintType, xInt, constu32(1u)), constu32(2u)), constu32(0u));\n\n uint32_t sin = opSelect(floatType, funcIsSin, taylor, coFunc);\n uint32_t cos = opSelect(floatType, funcIsSin, coFunc, taylor);\n\n // Determine whether sine is negative. Interpret the input as a\n // signed integer in order to propagate signed zeroes properly.\n uint32_t inputNeg = opSLessThan(boolType, opBitcast(sintType, x), consti32(0));\n\n uint32_t sinNeg = opINotEqual(boolType, opBitwiseAnd(uintType, xInt, constu32(4u)), constu32(0u));\n sinNeg = opLogicalNotEqual(boolType, sinNeg, inputNeg);\n\n // Determine whether cosine is negative\n uint32_t cosNeg = opINotEqual(boolType, opBitwiseAnd(uintType, opIAdd(uintType, xInt, constu32(2u)), constu32(4u)), constu32(0u));\n\n sin = opSelect(floatType, sinNeg, opFNegate(floatType, sin), sin);\n cos = opSelect(floatType, cosNeg, opFNegate(floatType, cos), cos);\n\n std::array members = { sin, cos };\n return opCompositeConstruct(resultType, members.size(), members.data());\n }\n }\n private:\n uint32_t m_version;\n uint32_t m_id = 1;\n uint32_t m_instExtGlsl450 = 0;\n uint32_t m_blockId = 0;\n SpirvCodeBuffer m_capabilities;\n SpirvCodeBuffer m_extensions;\n SpirvCodeBuffer m_instExt;\n SpirvCodeBuffer m_memoryModel;\n SpirvCodeBuffer m_entryPoints;\n SpirvCodeBuffer m_execModeInfo;\n SpirvCodeBuffer m_debugNames;\n SpirvCodeBuffer m_annotations;\n SpirvCodeBuffer m_typeConstDefs;\n SpirvCodeBuffer m_variables;\n SpirvCodeBuffer m_code;\n std::unordered_set m_lateConsts;\n std::vector m_interfaceVars;\n uint32_t defType(\n spv::Op op, \n uint32_t argCount,\n const uint32_t* argIds) {\n // Since the type info is stored in the code buffer,\n // we can use the code buffer to look up type IDs as\n // well. Result IDs are always stored as argument 1.\n for (auto ins : m_typeConstDefs) {\n bool match = ins.opCode() == op\n && ins.length() == 2 + argCount;\n \n for (uint32_t i = 0; i < argCount && match; i++)\n match &= ins.arg(2 + i) == argIds[i];\n \n if (match)\n return ins.arg(1);\n }\n \n // Type not yet declared, create a new one.\n uint32_t resultId = this->allocateId();\n m_typeConstDefs.putIns (op, 2 + argCount);\n m_typeConstDefs.putWord(resultId);\n \n for (uint32_t i = 0; i < argCount; i++)\n m_typeConstDefs.putWord(argIds[i]);\n return resultId;\n }\n uint32_t defConst(\n spv::Op op,\n uint32_t typeId,\n uint32_t argCount,\n const uint32_t* argIds) {\n // Avoid declaring constants multiple times\n for (auto ins : m_typeConstDefs) {\n bool match = ins.opCode() == op\n && ins.length() == 3 + argCount\n && ins.arg(1) == typeId;\n \n for (uint32_t i = 0; i < argCount && match; i++)\n match &= ins.arg(3 + i) == argIds[i];\n \n if (!match)\n continue;\n \n uint32_t id = ins.arg(2);\n\n if (m_lateConsts.find(id) == m_lateConsts.end())\n return id;\n }\n \n // Constant not yet declared, make a new one\n uint32_t resultId = this->allocateId();\n m_typeConstDefs.putIns (op, 3 + argCount);\n m_typeConstDefs.putWord(typeId);\n m_typeConstDefs.putWord(resultId);\n \n for (uint32_t i = 0; i < argCount; i++)\n m_typeConstDefs.putWord(argIds[i]);\n return resultId;\n }\n void instImportGlsl450() {\n m_instExtGlsl450 = this->allocateId();\n const char* name = \"GLSL.std.450\";\n \n m_instExt.putIns (spv::OpExtInstImport, 2 + m_instExt.strLen(name));\n m_instExt.putWord(m_instExtGlsl450);\n m_instExt.putStr (name);\n }\n uint32_t getMemoryOperandWordCount(\n const SpirvMemoryOperands& op) const {\n const uint32_t result\n = ((op.flags & spv::MemoryAccessAlignedMask) ? 1 : 0)\n + ((op.flags & spv::MemoryAccessMakePointerAvailableMask) ? 1 : 0)\n + ((op.flags & spv::MemoryAccessMakePointerVisibleMask) ? 1 : 0);\n\n return op.flags ? result + 1 : 0;\n }\n void putMemoryOperands(\n const SpirvMemoryOperands& op) {\n if (op.flags) {\n m_code.putWord(op.flags);\n\n if (op.flags & spv::MemoryAccessAlignedMask)\n m_code.putWord(op.alignment);\n\n if (op.flags & spv::MemoryAccessMakePointerAvailableMask)\n m_code.putWord(op.makeAvailable);\n\n if (op.flags & spv::MemoryAccessMakePointerVisibleMask)\n m_code.putWord(op.makeVisible);\n }\n }\n uint32_t getImageOperandWordCount(\n const SpirvImageOperands& op) const {\n // Each flag may add one or more operands\n const uint32_t result\n = ((op.flags & spv::ImageOperandsBiasMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsLodMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsConstOffsetMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsGradMask) ? 2 : 0)\n + ((op.flags & spv::ImageOperandsOffsetMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsConstOffsetsMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsSampleMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsMinLodMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsMakeTexelAvailableMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsMakeTexelVisibleMask) ? 1 : 0);\n \n // Add a DWORD for the operand mask if it is non-zero\n return op.flags ? result + 1 : 0;\n }\n void putImageOperands(\n const SpirvImageOperands& op) {\n if (op.flags) {\n m_code.putWord(op.flags);\n \n if (op.flags & spv::ImageOperandsBiasMask)\n m_code.putWord(op.sLodBias);\n \n if (op.flags & spv::ImageOperandsLodMask)\n m_code.putWord(op.sLod);\n\n if (op.flags & spv::ImageOperandsGradMask) {\n m_code.putWord(op.sGradX);\n m_code.putWord(op.sGradY);\n }\n\n if (op.flags & spv::ImageOperandsConstOffsetMask)\n m_code.putWord(op.sConstOffset);\n\n if (op.flags & spv::ImageOperandsOffsetMask)\n m_code.putWord(op.gOffset);\n \n if (op.flags & spv::ImageOperandsConstOffsetsMask)\n m_code.putWord(op.gConstOffsets);\n \n if (op.flags & spv::ImageOperandsSampleMask)\n m_code.putWord(op.sSampleId);\n \n if (op.flags & spv::ImageOperandsMinLodMask)\n m_code.putWord(op.sMinLod);\n\n if (op.flags & spv::ImageOperandsMakeTexelAvailableMask)\n m_code.putWord(op.makeAvailable);\n\n if (op.flags & spv::ImageOperandsMakeTexelVisibleMask)\n m_code.putWord(op.makeVisible);\n }\n }\n bool isInterfaceVar(\n spv::StorageClass sclass) const {\n if (m_version < spvVersion(1, 4)) {\n return sclass == spv::StorageClassInput\n || sclass == spv::StorageClassOutput;\n } else {\n // All global variables need to be declared\n return sclass != spv::StorageClassFunction;\n }\n }\n void classifyBlocks(\n std::unordered_set& reachableBlocks,\n std::unordered_set& mergeBlocks) {\n std::unordered_multimap branches;\n std::queue blockQueue;\n\n uint32_t blockId = 0;\n\n for (auto ins : m_code) {\n switch (ins.opCode()) {\n case spv::OpLabel: {\n uint32_t id = ins.arg(1);\n\n if (!blockId)\n branches.insert({ 0u, id });\n\n blockId = id;\n } break;\n\n case spv::OpFunction: {\n blockId = 0u;\n } break;\n\n case spv::OpBranch: {\n branches.insert({ blockId, ins.arg(1) });\n } break;\n\n case spv::OpBranchConditional: {\n branches.insert({ blockId, ins.arg(2) });\n branches.insert({ blockId, ins.arg(3) });\n } break;\n\n case spv::OpSwitch: {\n branches.insert({ blockId, ins.arg(2) });\n\n for (uint32_t i = 4; i < ins.length(); i += 2)\n branches.insert({ blockId, ins.arg(i) });\n } break;\n\n case spv::OpSelectionMerge: {\n mergeBlocks.insert(ins.arg(1));\n } break;\n\n case spv::OpLoopMerge: {\n mergeBlocks.insert(ins.arg(1));\n\n // It is possible for the continue block to be unreachable in\n // practice, but we still need to emit it if we are not going\n // to eliminate this loop. Since the current block dominates\n // the loop, use it to keep the continue block intact.\n branches.insert({ blockId, ins.arg(2) });\n } break;\n\n default:;\n }\n }\n\n blockQueue.push(0);\n\n while (!blockQueue.empty()) {\n uint32_t id = blockQueue.front();\n\n auto range = branches.equal_range(id);\n\n for (auto i = range.first; i != range.second; i++) {\n if (reachableBlocks.insert(i->second).second)\n blockQueue.push(i->second);\n }\n\n blockQueue.pop();\n }\n }\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_analysis.h", "class DxbcAnalyzer {\n public:\n DxbcAnalyzer(\n const DxbcModuleInfo& moduleInfo,\n const DxbcProgramInfo& programInfo,\n const Rc& isgn,\n const Rc& osgn,\n const Rc& psgn,\n DxbcAnalysisInfo& analysis) {\n // Get number of clipping and culling planes from the\n // input and output signatures. We will need this to\n // declare the shader input and output interfaces.\n m_analysis->clipCullIn = getClipCullInfo(m_isgn);\n m_analysis->clipCullOut = getClipCullInfo(m_osgn);\n }\n ~DxbcAnalyzer() {\n \n }\n void processInstruction(\n const DxbcShaderInstruction& ins) {\n switch (ins.opClass) {\n case DxbcInstClass::Atomic: {\n const uint32_t operandId = ins.dstCount - 1;\n\n if (ins.dst[operandId].type == DxbcOperandType::UnorderedAccessView) {\n const uint32_t registerId = ins.dst[operandId].idx[0].offset;\n m_analysis->uavInfos[registerId].accessAtomicOp = true;\n m_analysis->uavInfos[registerId].accessFlags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;\n\n // Check whether the atomic operation is order-invariant\n DxvkAccessOp op = DxvkAccessOp::None;\n\n switch (ins.op) {\n case DxbcOpcode::AtomicAnd: op = DxvkAccessOp::And; break;\n case DxbcOpcode::AtomicOr: op = DxvkAccessOp::Or; break;\n case DxbcOpcode::AtomicXor: op = DxvkAccessOp::Xor; break;\n case DxbcOpcode::AtomicIAdd: op = DxvkAccessOp::Add; break;\n case DxbcOpcode::AtomicIMax: op = DxvkAccessOp::IMax; break;\n case DxbcOpcode::AtomicIMin: op = DxvkAccessOp::IMin; break;\n case DxbcOpcode::AtomicUMax: op = DxvkAccessOp::UMax; break;\n case DxbcOpcode::AtomicUMin: op = DxvkAccessOp::UMin; break;\n default: break;\n }\n\n setUavAccessOp(registerId, op);\n }\n } break;\n\n case DxbcInstClass::TextureSample:\n case DxbcInstClass::TextureGather:\n case DxbcInstClass::TextureQueryLod:\n case DxbcInstClass::VectorDeriv: {\n m_analysis->usesDerivatives = true;\n } break;\n\n case DxbcInstClass::ControlFlow: {\n if (ins.op == DxbcOpcode::Discard)\n m_analysis->usesKill = true;\n } break;\n\n case DxbcInstClass::BufferLoad: {\n uint32_t operandId = ins.op == DxbcOpcode::LdStructured ? 2 : 1;\n bool sparseFeedback = ins.dstCount == 2;\n\n if (ins.src[operandId].type == DxbcOperandType::UnorderedAccessView) {\n const uint32_t registerId = ins.src[operandId].idx[0].offset;\n m_analysis->uavInfos[registerId].accessFlags |= VK_ACCESS_SHADER_READ_BIT;\n m_analysis->uavInfos[registerId].sparseFeedback |= sparseFeedback;\n\n setUavAccessOp(registerId, DxvkAccessOp::None);\n } else if (ins.src[operandId].type == DxbcOperandType::Resource) {\n const uint32_t registerId = ins.src[operandId].idx[0].offset;\n m_analysis->srvInfos[registerId].sparseFeedback |= sparseFeedback;\n }\n } break;\n\n case DxbcInstClass::BufferStore: {\n if (ins.dst[0].type == DxbcOperandType::UnorderedAccessView) {\n const uint32_t registerId = ins.dst[0].idx[0].offset;\n m_analysis->uavInfos[registerId].accessFlags |= VK_ACCESS_SHADER_WRITE_BIT;\n\n setUavAccessOp(registerId, getStoreAccessOp(ins.dst[0].mask, ins.src[ins.srcCount - 1u]));\n }\n } break;\n\n case DxbcInstClass::TypedUavLoad: {\n const uint32_t registerId = ins.src[1].idx[0].offset;\n m_analysis->uavInfos[registerId].accessTypedLoad = true;\n m_analysis->uavInfos[registerId].accessFlags |= VK_ACCESS_SHADER_READ_BIT;\n\n setUavAccessOp(registerId, DxvkAccessOp::None);\n } break;\n\n case DxbcInstClass::TypedUavStore: {\n const uint32_t registerId = ins.dst[0].idx[0].offset;\n m_analysis->uavInfos[registerId].accessFlags |= VK_ACCESS_SHADER_WRITE_BIT;\n\n // The UAV format may change between dispatches, so be conservative here\n // and only allow this optimization when the app is writing zeroes.\n DxvkAccessOp storeOp = getStoreAccessOp(DxbcRegMask(0xf), ins.src[1u]);\n\n if (storeOp != DxvkAccessOp(DxvkAccessOp::OpType::StoreUi, 0u))\n storeOp = DxvkAccessOp::None;\n\n setUavAccessOp(registerId, storeOp);\n } break;\n\n case DxbcInstClass::Declaration: {\n switch (ins.op) {\n case DxbcOpcode::DclConstantBuffer: {\n uint32_t registerId = ins.dst[0].idx[0].offset;\n\n if (registerId < DxbcConstBufBindingCount)\n m_analysis->bindings.cbvMask |= 1u << registerId;\n } break;\n\n case DxbcOpcode::DclSampler: {\n uint32_t registerId = ins.dst[0].idx[0].offset;\n\n if (registerId < DxbcSamplerBindingCount)\n m_analysis->bindings.samplerMask |= 1u << registerId;\n } break;\n\n case DxbcOpcode::DclResource:\n case DxbcOpcode::DclResourceRaw:\n case DxbcOpcode::DclResourceStructured: {\n uint32_t registerId = ins.dst[0].idx[0].offset;\n\n uint32_t idx = registerId / 64u;\n uint32_t bit = registerId % 64u;\n\n if (registerId < DxbcResourceBindingCount)\n m_analysis->bindings.srvMask[idx] |= uint64_t(1u) << bit;\n } break;\n\n case DxbcOpcode::DclUavTyped:\n case DxbcOpcode::DclUavRaw:\n case DxbcOpcode::DclUavStructured: {\n uint32_t registerId = ins.dst[0].idx[0].offset;\n\n if (registerId < DxbcUavBindingCount)\n m_analysis->bindings.uavMask |= uint64_t(1u) << registerId;\n } break;\n\n default: ;\n }\n } break;\n\n default:\n break;\n }\n\n for (uint32_t i = 0; i < ins.dstCount; i++) {\n if (ins.dst[i].type == DxbcOperandType::IndexableTemp) {\n uint32_t index = ins.dst[i].idx[0].offset;\n m_analysis->xRegMasks[index] |= ins.dst[i].mask;\n }\n }\n }\n private:\n Rc m_isgn;\n Rc m_osgn;\n Rc m_psgn;\n DxbcAnalysisInfo* m_analysis = nullptr;\n DxbcClipCullInfo getClipCullInfo(\n const Rc& sgn) const {\n DxbcClipCullInfo result;\n \n if (sgn != nullptr) {\n for (auto e = sgn->begin(); e != sgn->end(); e++) {\n const uint32_t componentCount = e->componentMask.popCount();\n \n if (e->systemValue == DxbcSystemValue::ClipDistance)\n result.numClipPlanes += componentCount;\n if (e->systemValue == DxbcSystemValue::CullDistance)\n result.numCullPlanes += componentCount;\n }\n }\n \n return result;\n }\n void setUavAccessOp(uint32_t uav, DxvkAccessOp op) {\n if (m_analysis->uavInfos[uav].accessOp == DxvkAccessOp::None)\n m_analysis->uavInfos[uav].accessOp = op;\n\n // Maintain ordering if the UAV is accessed via other operations as well\n if (op == DxvkAccessOp::None || m_analysis->uavInfos[uav].accessOp != op)\n m_analysis->uavInfos[uav].nonInvariantAccess = true;\n }\n static DxvkAccessOp getStoreAccessOp(DxbcRegMask writeMask, const DxbcRegister& src) {\n if (src.type != DxbcOperandType::Imm32)\n return DxvkAccessOp::None;\n\n // Trivial case, same value is written to all components\n if (src.componentCount == DxbcComponentCount::Component1)\n return getConstantStoreOp(src.imm.u32_1);\n\n if (src.componentCount != DxbcComponentCount::Component4)\n return DxvkAccessOp::None;\n\n // Otherwise, make sure that all written components are equal\n DxvkAccessOp op = DxvkAccessOp::None;\n\n for (uint32_t i = 0u; i < 4u; i++) {\n if (!writeMask[i])\n continue;\n\n // If the written value can't be represented, skip\n DxvkAccessOp scalarOp = getConstantStoreOp(src.imm.u32_4[i]);\n\n if (scalarOp == DxvkAccessOp::None)\n return DxvkAccessOp::None;\n\n // First component written\n if (op == DxvkAccessOp::None)\n op = scalarOp;\n\n // Conflicting store ops\n if (op != scalarOp)\n return DxvkAccessOp::None;\n }\n\n return op;\n }\n static DxvkAccessOp getConstantStoreOp(uint32_t value) {\n constexpr uint32_t mask = 0xfffu;\n\n uint32_t ubits = value & mask;\n uint32_t fbits = (value >> 20u);\n\n if (value == ubits)\n return DxvkAccessOp(DxvkAccessOp::OpType::StoreUi, ubits);\n\n if (value == (ubits | ~mask))\n return DxvkAccessOp(DxvkAccessOp::OpType::StoreSi, ubits);\n\n if (value == (fbits << 20u))\n return DxvkAccessOp(DxvkAccessOp::OpType::StoreF, fbits);\n\n return DxvkAccessOp::None;\n }\n};"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_small_vector.h", "#pragma once\n\n#include \n#include \n#include \n#include \n\n#include \"util_bit.h\"\n#include \"util_likely.h\"\n\nnamespace dxvk {\n\n template\n class small_vector {\n using storage = std::aligned_storage_t;\n public:\n\n constexpr static size_t MinCapacity = N;\n\n small_vector() { }\n\n small_vector(size_t size) {\n resize(size);\n }\n\n small_vector(const small_vector& other) {\n reserve(other.m_size);\n\n for (size_t i = 0; i < other.m_size; i++)\n *ptr(i) = *other.ptr(i);\n\n m_size = other.m_size;\n }\n\n small_vector& operator = (const small_vector& other) {\n for (size_t i = 0; i < m_size; i++)\n ptr(i)->~T();\n\n reserve(other.m_size);\n\n for (size_t i = 0; i < other.m_size; i++)\n *ptr(i) = *other.ptr(i);\n\n m_size = other.m_size;\n return *this;\n }\n\n small_vector(small_vector&& other) {\n if (other.m_size <= N) {\n for (size_t i = 0; i < other.m_size; i++)\n new (&u.m_data[i]) T(std::move(*other.ptr(i)));\n } else {\n u.m_ptr = other.u.m_ptr;\n m_capacity = other.m_capacity;\n\n other.u.m_ptr = nullptr;\n other.m_capacity = N;\n }\n\n m_size = other.m_size;\n other.m_size = 0;\n }\n\n small_vector& operator = (small_vector&& other) {\n for (size_t i = 0; i < m_size; i++)\n ptr(i)->~T();\n\n if (m_capacity > N)\n delete[] u.m_ptr;\n\n if (other.m_size <= N) {\n m_capacity = N;\n\n for (size_t i = 0; i < other.m_size; i++)\n new (&u.m_data[i]) T(std::move(*other.ptr(i)));\n } else {\n u.m_ptr = other.u.m_ptr;\n m_capacity = other.m_capacity;\n\n other.u.m_ptr = nullptr;\n other.m_capacity = N;\n }\n\n m_size = other.m_size;\n other.m_size = 0;\n return *this;\n }\n\n ~small_vector() {\n for (size_t i = 0; i < m_size; i++)\n ptr(i)->~T();\n\n if (m_capacity > N)\n delete[] u.m_ptr;\n }\n\n size_t size() const {\n return m_size;\n }\n\n void reserve(size_t n) {\n if (likely(n <= m_capacity))\n return;\n\n n = pick_capacity(n);\n\n storage* data = new storage[n];\n\n for (size_t i = 0; i < m_size; i++) {\n new (&data[i]) T(std::move(*ptr(i)));\n ptr(i)->~T();\n }\n\n if (m_capacity > N)\n delete[] u.m_ptr;\n\n m_capacity = n;\n u.m_ptr = data;\n }\n\n const T* data() const { return ptr(0); }\n T* data() { return ptr(0); }\n\n void resize(size_t n) {\n reserve(n);\n\n for (size_t i = n; i < m_size; i++)\n ptr(i)->~T();\n\n for (size_t i = m_size; i < n; i++)\n new (ptr(i)) T();\n\n m_size = n;\n }\n\n void push_back(const T& object) {\n reserve(m_size + 1);\n new (ptr(m_size++)) T(object);\n }\n\n void push_back(T&& object) {\n reserve(m_size + 1);\n new (ptr(m_size++)) T(std::move(object));\n }\n\n template\n T& emplace_back(Args... args) {\n reserve(m_size + 1);\n return *(new (ptr(m_size++)) T(std::forward(args)...));\n }\n\n void erase(size_t idx) {\n ptr(idx)->~T();\n\n for (size_t i = idx; i < m_size - 1; i++) {\n new (ptr(i)) T(std::move(*ptr(i + 1)));\n ptr(i + 1)->~T();\n }\n }\n\n void pop_back() {\n ptr(--m_size)->~T();\n }\n\n void clear() {\n for (size_t i = 0; i < m_size; i++)\n ptr(i)->~T();\n\n m_size = 0;\n }\n\n bool empty() const {\n return m_size == 0;\n }\n\n T& operator [] (size_t idx) { return *ptr(idx); }\n const T& operator [] (size_t idx) const { return *ptr(idx); }\n\n T& front() { return *ptr(0); }\n const T& front() const { return *ptr(0); }\n\n T& back() { return *ptr(m_size - 1); }\n const T& back() const { return *ptr(m_size - 1); }\n\n private:\n\n size_t m_capacity = N;\n size_t m_size = 0;\n\n union {\n storage* m_ptr;\n storage m_data[N];\n } u;\n\n size_t pick_capacity(size_t n) {\n // Pick next largest power of two for the new capacity\n return size_t(1u) << ((sizeof(n) * 8u) - bit::lzcnt(n - 1));\n }\n\n T* ptr(size_t idx) {\n return m_capacity == N\n ? reinterpret_cast(&u.m_data[idx])\n : reinterpret_cast(&u.m_ptr[idx]);\n }\n\n const T* ptr(size_t idx) const {\n return m_capacity == N\n ? reinterpret_cast(&u.m_data[idx])\n : reinterpret_cast(&u.m_ptr[idx]);\n }\n\n };\n\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/spirv/spirv_code_buffer.h", "class for {\n public:\n SpirvCodeBuffer() {\n stream.ignore(std::numeric_limits::max());\n std::streamsize length = stream.gcount();\n stream.clear();\n stream.seekg(0, std::ios_base::beg);\n \n std::vector buffer(length);\n stream.read(buffer.data(), length);\n buffer.resize(stream.gcount());\n \n m_code.resize(buffer.size() / sizeof(uint32_t));\n std::memcpy(reinterpret_cast(m_code.data()),\n buffer.data(), m_code.size() * sizeof(uint32_t));\n \n m_ptr = m_code.size();\n }\n explicit SpirvCodeBuffer(uint32_t size) {\n stream.ignore(std::numeric_limits::max());\n std::streamsize length = stream.gcount();\n stream.clear();\n stream.seekg(0, std::ios_base::beg);\n \n std::vector buffer(length);\n stream.read(buffer.data(), length);\n buffer.resize(stream.gcount());\n \n m_code.resize(buffer.size() / sizeof(uint32_t));\n std::memcpy(reinterpret_cast(m_code.data()),\n buffer.data(), m_code.size() * sizeof(uint32_t));\n \n m_ptr = m_code.size();\n }\n SpirvCodeBuffer(uint32_t size, const uint32_t* data) {\n stream.ignore(std::numeric_limits::max());\n std::streamsize length = stream.gcount();\n stream.clear();\n stream.seekg(0, std::ios_base::beg);\n \n std::vector buffer(length);\n stream.read(buffer.data(), length);\n buffer.resize(stream.gcount());\n \n m_code.resize(buffer.size() / sizeof(uint32_t));\n std::memcpy(reinterpret_cast(m_code.data()),\n buffer.data(), m_code.size() * sizeof(uint32_t));\n \n m_ptr = m_code.size();\n }\n SpirvCodeBuffer(std::istream& stream) {\n stream.ignore(std::numeric_limits::max());\n std::streamsize length = stream.gcount();\n stream.clear();\n stream.seekg(0, std::ios_base::beg);\n \n std::vector buffer(length);\n stream.read(buffer.data(), length);\n buffer.resize(stream.gcount());\n \n m_code.resize(buffer.size() / sizeof(uint32_t));\n std::memcpy(reinterpret_cast(m_code.data()),\n buffer.data(), m_code.size() * sizeof(uint32_t));\n \n m_ptr = m_code.size();\n }\n template\n SpirvCodeBuffer(const uint32_t (&data)[N])\n : SpirvCodeBuffer(N, data) { }\n ~SpirvCodeBuffer() { }\n uint32_t allocId() {\n constexpr size_t BoundIdsOffset = 3;\n\n if (m_code.size() <= BoundIdsOffset)\n return 0;\n\n return m_code[BoundIdsOffset]++;\n }\n void append(const SpirvInstruction& ins) {\n if (other.size() != 0) {\n const size_t size = m_code.size();\n m_code.resize(size + other.m_code.size());\n \n uint32_t* dst = this->m_code.data();\n const uint32_t* src = other.m_code.data();\n \n std::memcpy(dst + size, src, other.size());\n m_ptr += other.m_code.size();\n }\n }\n void append(const SpirvCodeBuffer& other) {\n if (other.size() != 0) {\n const size_t size = m_code.size();\n m_code.resize(size + other.m_code.size());\n \n uint32_t* dst = this->m_code.data();\n const uint32_t* src = other.m_code.data();\n \n std::memcpy(dst + size, src, other.size());\n m_ptr += other.m_code.size();\n }\n }\n void putWord(uint32_t word) {\n m_code.insert(m_code.begin() + m_ptr, word);\n m_ptr += 1;\n }\n void putIns(spv::Op opCode, uint16_t wordCount) {\n this->putWord(\n (static_cast(opCode) << 0)\n | (static_cast(wordCount) << 16));\n }\n void putInt32(uint32_t word) {\n this->putWord(word);\n }\n void putInt64(uint64_t value) {\n this->putWord(value >> 0);\n this->putWord(value >> 32);\n }\n void putFloat32(float value) {\n uint32_t tmp;\n static_assert(sizeof(tmp) == sizeof(value));\n std::memcpy(&tmp, &value, sizeof(value));\n this->putInt32(tmp);\n }\n void putFloat64(double value) {\n uint64_t tmp;\n static_assert(sizeof(tmp) == sizeof(value));\n std::memcpy(&tmp, &value, sizeof(value));\n this->putInt64(tmp);\n }\n void putStr(const char* str) {\n uint32_t word = 0;\n uint32_t nbit = 0;\n \n for (uint32_t i = 0; str[i] != '\\0'; str++) {\n word |= (static_cast(str[i]) & 0xFF) << nbit;\n \n if ((nbit += 8) == 32) {\n this->putWord(word);\n word = 0;\n nbit = 0;\n }\n }\n \n // Commit current word\n this->putWord(word);\n }\n void putHeader(uint32_t version, uint32_t boundIds) {\n this->putWord(spv::MagicNumber);\n this->putWord(version);\n this->putWord(0); // Generator\n this->putWord(boundIds);\n this->putWord(0); // Schema\n }\n void erase(size_t size) {\n m_code.erase(\n m_code.begin() + m_ptr,\n m_code.begin() + m_ptr + size);\n }\n uint32_t strLen(const char* str) {\n // Null-termination plus padding\n return (std::strlen(str) + 4) / 4;\n }\n void store(std::ostream& stream) const {\n stream.write(\n reinterpret_cast(m_code.data()),\n sizeof(uint32_t) * m_code.size());\n }\n private:\n std::vector m_code;\n size_t m_ptr = 0;\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_chunk_isgn.h", "class DxbcIsgn {\n public:\n DxbcIsgn(DxbcReader reader, DxbcTag tag) {\n uint32_t elementCount = reader.readu32();\n reader.skip(sizeof(uint32_t));\n \n std::array componentTypes = {\n DxbcScalarType::Uint32, DxbcScalarType::Uint32,\n DxbcScalarType::Sint32, DxbcScalarType::Float32,\n };\n\n // https://github.com/DarkStarSword/3d-fixes/blob/master/dx11shaderanalyse.py#L101\n bool hasStream = (tag == \"ISG1\") || (tag == \"OSG1\") || (tag == \"PSG1\") || (tag == \"OSG5\");\n bool hasPrecision = (tag == \"ISG1\") || (tag == \"OSG1\") || (tag == \"PSG1\");\n \n for (uint32_t i = 0; i < elementCount; i++) {\n DxbcSgnEntry entry;\n entry.streamId = hasStream ? reader.readu32() : 0;\n entry.semanticName = reader.clone(reader.readu32()).readString();\n entry.semanticIndex = reader.readu32();\n entry.systemValue = static_cast(reader.readu32());\n entry.componentType = componentTypes.at(reader.readu32());\n entry.registerId = reader.readu32();\n\n uint32_t mask = reader.readu32();\n\n entry.componentMask = bit::extract(mask, 0, 3);\n entry.componentUsed = bit::extract(mask, 8, 11);\n\n if (hasPrecision)\n reader.readu32();\n\n m_entries.push_back(entry);\n }\n }\n ~DxbcIsgn() {\n \n }\n const DxbcSgnEntry* findByRegister(\n uint32_t registerId) const;\n const DxbcSgnEntry* find(\n const std::string& semanticName,\n uint32_t semanticIndex,\n uint32_t streamIndex) const;\n DxbcRegMask regMask(\n uint32_t registerId) const {\n DxbcRegMask mask;\n\n for (auto e = this->begin(); e != this->end(); e++) {\n if (e->registerId == registerId)\n mask |= e->componentMask;\n }\n\n return mask;\n }\n uint32_t maxRegisterCount() const {\n uint32_t result = 0;\n for (auto e = this->begin(); e != this->end(); e++)\n result = std::max(result, e->registerId + 1);\n return result;\n }\n static bool compareSemanticNames(\n const std::string& a,\n const std::string& b) {\n if (a.size() != b.size())\n return false;\n \n for (size_t i = 0; i < a.size(); i++) {\n char ac = a[i];\n char bc = b[i];\n\n if (ac != bc) {\n if (ac >= 'A' && ac <= 'Z') ac += 'a' - 'A';\n if (bc >= 'A' && bc <= 'Z') bc += 'a' - 'A';\n\n if (ac != bc)\n return false;\n }\n }\n \n return true;\n }\n private:\n std::vector m_entries;\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_options.h", "#pragma once\n\n#include \n\n#include \"util_flags.h\"\n\nnamespace dxvk {\n\n struct D3D11Options;\n\n enum class DxbcFloatControlFlag : uint32_t {\n DenormFlushToZero32,\n DenormPreserve64,\n PreserveNan32,\n PreserveNan64,\n };\n\n using DxbcFloatControlFlags = Flags;\n\n struct DxbcOptions {\n DxbcOptions() {}\n\n // Clamp oDepth in fragment shaders if the depth\n // clip device feature is not supported\n bool useDepthClipWorkaround = false;\n\n /// Determines whether format qualifiers\n /// on typed UAV loads are required\n bool supportsTypedUavLoadR32 = false;\n\n /// Determines whether raw access chains are supported\n bool supportsRawAccessChains = false;\n\n /// Clear thread-group shared memory to zero\n bool zeroInitWorkgroupMemory = false;\n\n /// Declare vertex positions as invariant\n bool invariantPosition = false;\n\n /// Insert memory barriers after TGSM stoes\n bool forceVolatileTgsmAccess = false;\n\n /// Try to detect hazards in UAV access and insert\n /// barriers when we know control flow is uniform.\n bool forceComputeUavBarriers = false;\n\n /// Replace ld_ms with ld\n bool disableMsaa = false;\n\n /// Force sample rate shading by using sample\n /// interpolation for fragment shader inputs\n bool forceSampleRateShading = false;\n\n // Enable per-sample interlock if supported\n bool enableSampleShadingInterlock = false;\n\n /// Use tightly packed arrays for immediate\n /// constant buffers if possible\n bool supportsTightIcbPacking = false;\n\n /// Whether exporting point size is required\n bool needsPointSizeExport = true;\n\n /// Whether to enable sincos emulation\n bool sincosEmulation = false;\n\n /// Float control flags\n DxbcFloatControlFlags floatControl;\n\n /// Minimum storage buffer alignment\n VkDeviceSize minSsboAlignment = 0;\n };\n \n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_flags.h", "#pragma once\n\n#include \n\n#include \"util_bit.h\"\n\nnamespace dxvk {\n \n template\n class Flags {\n \n public:\n \n using IntType = std::underlying_type_t;\n \n Flags() { }\n \n Flags(IntType t)\n : m_bits(t) { }\n \n template\n Flags(T f, Tx... fx) {\n this->set(f, fx...);\n }\n \n template\n void set(Tx... fx) {\n m_bits |= bits(fx...);\n }\n \n void set(Flags flags) {\n m_bits |= flags.m_bits;\n }\n \n template\n void clr(Tx... fx) {\n m_bits &= ~bits(fx...);\n }\n \n void clr(Flags flags) {\n m_bits &= ~flags.m_bits;\n }\n \n template\n bool any(Tx... fx) const {\n return (m_bits & bits(fx...)) != 0;\n }\n \n template\n bool all(Tx... fx) const {\n const IntType mask = bits(fx...);\n return (m_bits & mask) == mask;\n }\n \n bool test(T f) const {\n return this->any(f);\n }\n \n bool isClear() const {\n return m_bits == 0;\n }\n \n void clrAll() {\n m_bits = 0;\n }\n \n IntType raw() const {\n return m_bits;\n }\n \n Flags operator & (const Flags& other) const {\n return Flags(m_bits & other.m_bits);\n }\n \n Flags operator | (const Flags& other) const {\n return Flags(m_bits | other.m_bits);\n }\n \n Flags operator ^ (const Flags& other) const {\n return Flags(m_bits ^ other.m_bits);\n }\n\n bool operator == (const Flags& other) const {\n return m_bits == other.m_bits;\n }\n \n bool operator != (const Flags& other) const {\n return m_bits != other.m_bits;\n }\n \n private:\n \n IntType m_bits = 0;\n \n static IntType bit(T f) {\n return IntType(1) << static_cast(f);\n }\n \n template\n static IntType bits(T f, Tx... fx) {\n return bit(f) | bits(fx...);\n }\n \n static IntType bits() {\n return 0;\n }\n \n };\n \n}"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_tag.h", "#pragma once\n\n#include \"dxbc_include.h\"\n\nnamespace dxvk {\n \n /**\n * \\brief Four-character tag\n * \n * Used to identify chunks in the\n * compiled DXBC file by name.\n */\n class DxbcTag {\n \n public:\n \n DxbcTag() {\n for (size_t i = 0; i < 4; i++)\n m_chars[i] = '\\0';\n }\n \n DxbcTag(const char* tag) {\n for (size_t i = 0; i < 4; i++)\n m_chars[i] = tag[i];\n }\n \n bool operator == (const DxbcTag& other) const {\n bool result = true;\n for (size_t i = 0; i < 4; i++)\n result &= m_chars[i] == other.m_chars[i];\n return result;\n }\n \n bool operator != (const DxbcTag& other) const {\n return !this->operator == (other);\n }\n \n const char* operator & () const { return m_chars; }\n char* operator & () { return m_chars; }\n \n private:\n \n char m_chars[4];\n \n };\n \n}"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_reader.h", "class DxbcReader {\n public:\n template\n auto readEnum() {\n using Tx = std::underlying_type_t;\n return static_cast(this->readNum());\n }\n DxbcTag readTag() {\n DxbcTag tag;\n this->read(&tag, 4);\n return tag;\n }\n std::string readString() {\n std::string result;\n \n while (m_data[m_pos] != '\\0')\n result.push_back(m_data[m_pos++]);\n \n m_pos++;\n return result;\n }\n void read(void* dst, size_t n) {\n if (m_pos + n > m_size)\n throw DxvkError(\"DxbcReader::read: Unexpected end of file\");\n std::memcpy(dst, m_data + m_pos, n);\n m_pos += n;\n }\n void skip(size_t n) {\n if (m_pos + n > m_size)\n throw DxvkError(\"DxbcReader::skip: Unexpected end of file\");\n m_pos += n;\n }\n DxbcReader clone(size_t pos) const {\n if (pos > m_size)\n throw DxvkError(\"DxbcReader::clone: Invalid offset\");\n return DxbcReader(m_data + pos, m_size - pos);\n }\n DxbcReader resize(size_t size) const {\n if (size > m_size)\n throw DxvkError(\"DxbcReader::resize: Invalid size\");\n return DxbcReader(m_data, size, m_pos);\n }\n void store(std::ostream&& stream) const {\n stream.write(m_data, m_size);\n }\n private:\n const char* m_data = nullptr;\n size_t m_size = 0;\n size_t m_pos = 0;\n template\n T readNum() {\n T result;\n this->read(&result, sizeof(result));\n return result;\n }\n};"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/src/unicode_winapi.cpp", "/*\nThe MIT License (MIT)\n\nCopyright (c) 2020 Trail of Bits, Inc.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n*/\n\n#include \n#include \n\nnamespace peparse {\nstd::string from_utf16(const UCharString &u) {\n std::string result;\n std::size_t size = WideCharToMultiByte(CP_UTF8,\n 0,\n u.data(),\n static_cast(u.size()),\n nullptr,\n 0,\n nullptr,\n nullptr);\n\n if (size <= 0) {\n return result;\n }\n\n result.reserve(size);\n WideCharToMultiByte(CP_UTF8,\n 0,\n u.data(),\n static_cast(u.size()),\n &result[0],\n static_cast(result.capacity()),\n nullptr,\n nullptr);\n\n return result;\n}\n} // namespace peparse\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_common.h", "class DxbcProgramType {\n public:\n VkShaderStageFlagBits shaderStage() const {\n switch (m_type) {\n case DxbcProgramType::PixelShader : return VK_SHADER_STAGE_FRAGMENT_BIT;\n case DxbcProgramType::VertexShader : return VK_SHADER_STAGE_VERTEX_BIT;\n case DxbcProgramType::GeometryShader : return VK_SHADER_STAGE_GEOMETRY_BIT;\n case DxbcProgramType::HullShader : return VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;\n case DxbcProgramType::DomainShader : return VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;\n case DxbcProgramType::ComputeShader : return VK_SHADER_STAGE_COMPUTE_BIT;\n default: throw DxvkError(\"DxbcProgramInfo::shaderStage: Unsupported program type\");\n }\n }\n spv::ExecutionModel executionModel() const {\n switch (m_type) {\n case DxbcProgramType::PixelShader : return spv::ExecutionModelFragment;\n case DxbcProgramType::VertexShader : return spv::ExecutionModelVertex;\n case DxbcProgramType::GeometryShader : return spv::ExecutionModelGeometry;\n case DxbcProgramType::HullShader : return spv::ExecutionModelTessellationControl;\n case DxbcProgramType::DomainShader : return spv::ExecutionModelTessellationEvaluation;\n case DxbcProgramType::ComputeShader : return spv::ExecutionModelGLCompute;\n default: throw DxvkError(\"DxbcProgramInfo::executionModel: Unsupported program type\");\n }\n }\n private:\n DxbcProgramType m_type = DxbcProgramType::PixelShader;\n};"], ["/lsfg-vk/thirdparty/dxbc/include/util/log/log.h", "#pragma once\n\n#include \n#include \n\nnamespace dxvk {\n \n enum class LogLevel : uint32_t {\n Trace = 0,\n Debug = 1,\n Info = 2,\n Warn = 3,\n Error = 4,\n None = 5,\n };\n\n /**\n * \\brief Logger\n * \n * Logger for one DLL. Creates a text file and\n * writes all log messages to that file.\n */\n class Logger {\n \n public:\n \n Logger() {}\n Logger(const std::string& file_name) {}\n ~Logger() {}\n \n static void trace(const std::string& message) {}\n static void debug(const std::string& message) {}\n static void info (const std::string& message) {}\n static void warn (const std::string& message) {}\n static void err (const std::string& message) {}\n static void log (LogLevel level, const std::string& message) {}\n \n static LogLevel logLevel() {\n return LogLevel::Warn;\n }\n\n };\n \n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_math.h", "#pragma once\n\n#include \n\nnamespace dxvk {\n \n constexpr size_t CACHE_LINE_SIZE = 64;\n constexpr double pi = 3.14159265359;\n\n template\n constexpr T clamp(T n, T lo, T hi) {\n if (n < lo) return lo;\n if (n > hi) return hi;\n return n;\n }\n \n template\n constexpr T align(T what, U to) {\n return (what + to - 1) & ~(to - 1);\n }\n\n template\n constexpr T alignDown(T what, U to) {\n return (what / to) * to;\n }\n\n // Equivalent of std::clamp for use with floating point numbers\n // Handles (-){INFINITY,NAN} cases.\n // Will return min in cases of NAN, etc.\n inline float fclamp(float value, float min, float max) {\n return std::fmin(\n std::fmax(value, min), max);\n }\n\n template\n inline T divCeil(T dividend, T divisor) {\n return (dividend + divisor - 1) / divisor;\n }\n \n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_enums.h", "#pragma once\n\n#include \"dxbc_include.h\"\n\nnamespace dxvk {\n \n /**\n * \\brief Instruction code listing\n */\n enum class DxbcOpcode : uint32_t {\n Add = 0,\n And = 1,\n Break = 2,\n Breakc = 3,\n Call = 4,\n Callc = 5,\n Case = 6,\n Continue = 7,\n Continuec = 8,\n Cut = 9,\n Default = 10,\n DerivRtx = 11,\n DerivRty = 12,\n Discard = 13,\n Div = 14,\n Dp2 = 15,\n Dp3 = 16,\n Dp4 = 17,\n Else = 18,\n Emit = 19,\n EmitThenCut = 20,\n EndIf = 21,\n EndLoop = 22,\n EndSwitch = 23,\n Eq = 24,\n Exp = 25,\n Frc = 26,\n FtoI = 27,\n FtoU = 28,\n Ge = 29,\n IAdd = 30,\n If = 31,\n IEq = 32,\n IGe = 33,\n ILt = 34,\n IMad = 35,\n IMax = 36,\n IMin = 37,\n IMul = 38,\n INe = 39,\n INeg = 40,\n IShl = 41,\n IShr = 42,\n ItoF = 43,\n Label = 44,\n Ld = 45,\n LdMs = 46,\n Log = 47,\n Loop = 48,\n Lt = 49,\n Mad = 50,\n Min = 51,\n Max = 52,\n CustomData = 53,\n Mov = 54,\n Movc = 55,\n Mul = 56,\n Ne = 57,\n Nop = 58,\n Not = 59,\n Or = 60,\n ResInfo = 61,\n Ret = 62,\n Retc = 63,\n RoundNe = 64,\n RoundNi = 65,\n RoundPi = 66,\n RoundZ = 67,\n Rsq = 68,\n Sample = 69,\n SampleC = 70,\n SampleClz = 71,\n SampleL = 72,\n SampleD = 73,\n SampleB = 74,\n Sqrt = 75,\n Switch = 76,\n SinCos = 77,\n UDiv = 78,\n ULt = 79,\n UGe = 80,\n UMul = 81,\n UMad = 82,\n UMax = 83,\n UMin = 84,\n UShr = 85,\n UtoF = 86,\n Xor = 87,\n DclResource = 88,\n DclConstantBuffer = 89,\n DclSampler = 90,\n DclIndexRange = 91,\n DclGsOutputPrimitiveTopology = 92,\n DclGsInputPrimitive = 93,\n DclMaxOutputVertexCount = 94,\n DclInput = 95,\n DclInputSgv = 96,\n DclInputSiv = 97,\n DclInputPs = 98,\n DclInputPsSgv = 99,\n DclInputPsSiv = 100,\n DclOutput = 101,\n DclOutputSgv = 102,\n DclOutputSiv = 103,\n DclTemps = 104,\n DclIndexableTemp = 105,\n DclGlobalFlags = 106,\n Reserved0 = 107,\n Lod = 108,\n Gather4 = 109,\n SamplePos = 110,\n SampleInfo = 111,\n Reserved1 = 112,\n HsDecls = 113,\n HsControlPointPhase = 114,\n HsForkPhase = 115,\n HsJoinPhase = 116,\n EmitStream = 117,\n CutStream = 118,\n EmitThenCutStream = 119,\n InterfaceCall = 120,\n BufInfo = 121,\n DerivRtxCoarse = 122,\n DerivRtxFine = 123,\n DerivRtyCoarse = 124,\n DerivRtyFine = 125,\n Gather4C = 126,\n Gather4Po = 127,\n Gather4PoC = 128,\n Rcp = 129,\n F32toF16 = 130,\n F16toF32 = 131,\n UAddc = 132,\n USubb = 133,\n CountBits = 134,\n FirstBitHi = 135,\n FirstBitLo = 136,\n FirstBitShi = 137,\n UBfe = 138,\n IBfe = 139,\n Bfi = 140,\n BfRev = 141,\n Swapc = 142,\n DclStream = 143,\n DclFunctionBody = 144,\n DclFunctionTable = 145,\n DclInterface = 146,\n DclInputControlPointCount = 147,\n DclOutputControlPointCount = 148,\n DclTessDomain = 149,\n DclTessPartitioning = 150,\n DclTessOutputPrimitive = 151,\n DclHsMaxTessFactor = 152,\n DclHsForkPhaseInstanceCount = 153,\n DclHsJoinPhaseInstanceCount = 154,\n DclThreadGroup = 155,\n DclUavTyped = 156,\n DclUavRaw = 157,\n DclUavStructured = 158,\n DclThreadGroupSharedMemoryRaw = 159,\n DclThreadGroupSharedMemoryStructured = 160,\n DclResourceRaw = 161,\n DclResourceStructured = 162,\n LdUavTyped = 163,\n StoreUavTyped = 164,\n LdRaw = 165,\n StoreRaw = 166,\n LdStructured = 167,\n StoreStructured = 168,\n AtomicAnd = 169,\n AtomicOr = 170,\n AtomicXor = 171,\n AtomicCmpStore = 172,\n AtomicIAdd = 173,\n AtomicIMax = 174,\n AtomicIMin = 175,\n AtomicUMax = 176,\n AtomicUMin = 177,\n ImmAtomicAlloc = 178,\n ImmAtomicConsume = 179,\n ImmAtomicIAdd = 180,\n ImmAtomicAnd = 181,\n ImmAtomicOr = 182,\n ImmAtomicXor = 183,\n ImmAtomicExch = 184,\n ImmAtomicCmpExch = 185,\n ImmAtomicIMax = 186,\n ImmAtomicIMin = 187,\n ImmAtomicUMax = 188,\n ImmAtomicUMin = 189,\n Sync = 190,\n DAdd = 191,\n DMax = 192,\n DMin = 193,\n DMul = 194,\n DEq = 195,\n DGe = 196,\n DLt = 197,\n DNe = 198,\n DMov = 199,\n DMovc = 200,\n DtoF = 201,\n FtoD = 202,\n EvalSnapped = 203,\n EvalSampleIndex = 204,\n EvalCentroid = 205,\n DclGsInstanceCount = 206,\n Abort = 207,\n DebugBreak = 208,\n ReservedBegin11_1 = 209,\n DDiv = 210,\n DFma = 211,\n DRcp = 212,\n Msad = 213,\n DtoI = 214,\n DtoU = 215,\n ItoD = 216,\n UtoD = 217,\n ReservedBegin11_2 = 218,\n Gather4S = 219,\n Gather4CS = 220,\n Gather4PoS = 221,\n Gather4PoCS = 222,\n LdS = 223,\n LdMsS = 224,\n LdUavTypedS = 225,\n LdRawS = 226,\n LdStructuredS = 227,\n SampleLS = 228,\n SampleClzS = 229,\n SampleClampS = 230,\n SampleBClampS = 231,\n SampleDClampS = 232,\n SampleCClampS = 233,\n CheckAccessFullyMapped = 234,\n };\n \n \n /**\n * \\brief Extended opcode\n */\n enum class DxbcExtOpcode : uint32_t {\n Empty = 0,\n SampleControls = 1,\n ResourceDim = 2,\n ResourceReturnType = 3,\n };\n \n \n /**\n * \\brief Operand type\n * \n * Selects the 'register file' from which\n * to retrieve an operand's value.\n */\n enum class DxbcOperandType : uint32_t {\n Temp = 0,\n Input = 1,\n Output = 2,\n IndexableTemp = 3,\n Imm32 = 4,\n Imm64 = 5,\n Sampler = 6,\n Resource = 7,\n ConstantBuffer = 8,\n ImmediateConstantBuffer = 9,\n Label = 10,\n InputPrimitiveId = 11,\n OutputDepth = 12,\n Null = 13,\n Rasterizer = 14,\n OutputCoverageMask = 15,\n Stream = 16,\n FunctionBody = 17,\n FunctionTable = 18,\n Interface = 19,\n FunctionInput = 20,\n FunctionOutput = 21,\n OutputControlPointId = 22,\n InputForkInstanceId = 23,\n InputJoinInstanceId = 24,\n InputControlPoint = 25,\n OutputControlPoint = 26,\n InputPatchConstant = 27,\n InputDomainPoint = 28,\n ThisPointer = 29,\n UnorderedAccessView = 30,\n ThreadGroupSharedMemory = 31,\n InputThreadId = 32,\n InputThreadGroupId = 33,\n InputThreadIdInGroup = 34,\n InputCoverageMask = 35,\n InputThreadIndexInGroup = 36,\n InputGsInstanceId = 37,\n OutputDepthGe = 38,\n OutputDepthLe = 39,\n CycleCounter = 40,\n OutputStencilRef = 41,\n InputInnerCoverage = 42,\n };\n \n \n /**\n * \\brief Number of components\n * \n * Used by operands to determine whether the\n * operand has one, four or zero components.\n */\n enum class DxbcComponentCount : uint32_t {\n Component0 = 0,\n Component1 = 1,\n Component4 = 2,\n };\n \n \n /**\n * \\brief Component selection mode\n * \n * When an operand has four components, the\n * component selection mode deterines which\n * components are used for the operation.\n */\n enum class DxbcRegMode : uint32_t {\n Mask = 0,\n Swizzle = 1,\n Select1 = 2,\n };\n \n \n /**\n * \\brief Index representation\n * \n * Determines how an operand\n * register index is stored.\n */\n enum class DxbcOperandIndexRepresentation : uint32_t {\n Imm32 = 0,\n Imm64 = 1,\n Relative = 2,\n Imm32Relative = 3,\n Imm64Relative = 4,\n };\n \n \n /**\n * \\brief Extended operand type\n */\n enum class DxbcOperandExt : uint32_t {\n OperandModifier = 1,\n };\n \n \n /**\n * \\brief Resource dimension\n * The type of a resource.\n */\n enum class DxbcResourceDim : uint32_t {\n Unknown = 0,\n Buffer = 1,\n Texture1D = 2,\n Texture2D = 3,\n Texture2DMs = 4,\n Texture3D = 5,\n TextureCube = 6,\n Texture1DArr = 7,\n Texture2DArr = 8,\n Texture2DMsArr = 9,\n TextureCubeArr = 10,\n RawBuffer = 11,\n StructuredBuffer = 12,\n };\n \n \n /**\n * \\brief Resource return type\n * Data type for resource read ops.\n */\n enum class DxbcResourceReturnType : uint32_t {\n Unorm = 1,\n Snorm = 2,\n Sint = 3,\n Uint = 4,\n Float = 5,\n Mixed = 6, /// ?\n Double = 7,\n Continued = 8, /// ?\n Unused = 9, /// ?\n };\n \n \n /**\n * \\brief Register component type\n * Data type of a register component.\n */\n enum class DxbcRegisterComponentType : uint32_t {\n Unknown = 0,\n Uint32 = 1,\n Sint32 = 2,\n Float32 = 3,\n };\n \n \n /**\n * \\brief Instruction return type\n */\n enum class DxbcInstructionReturnType : uint32_t {\n Float = 0,\n Uint = 1,\n };\n \n \n enum class DxbcSystemValue : uint32_t {\n None = 0,\n Position = 1,\n ClipDistance = 2,\n CullDistance = 3,\n RenderTargetId = 4,\n ViewportId = 5,\n VertexId = 6,\n PrimitiveId = 7,\n InstanceId = 8,\n IsFrontFace = 9,\n SampleIndex = 10,\n FinalQuadUeq0EdgeTessFactor = 11,\n FinalQuadVeq0EdgeTessFactor = 12,\n FinalQuadUeq1EdgeTessFactor = 13,\n FinalQuadVeq1EdgeTessFactor = 14,\n FinalQuadUInsideTessFactor = 15,\n FinalQuadVInsideTessFactor = 16,\n FinalTriUeq0EdgeTessFactor = 17,\n FinalTriVeq0EdgeTessFactor = 18,\n FinalTriWeq0EdgeTessFactor = 19,\n FinalTriInsideTessFactor = 20,\n FinalLineDetailTessFactor = 21,\n FinalLineDensityTessFactor = 22,\n Target = 64,\n Depth = 65,\n Coverage = 66,\n DepthGe = 67,\n DepthLe = 68\n };\n \n \n enum class DxbcInterpolationMode : uint32_t {\n Undefined = 0,\n Constant = 1,\n Linear = 2,\n LinearCentroid = 3,\n LinearNoPerspective = 4,\n LinearNoPerspectiveCentroid = 5,\n LinearSample = 6,\n LinearNoPerspectiveSample = 7,\n };\n \n \n enum class DxbcGlobalFlag : uint32_t {\n RefactoringAllowed = 0,\n DoublePrecision = 1,\n EarlyFragmentTests = 2,\n RawStructuredBuffers = 3,\n };\n \n using DxbcGlobalFlags = Flags;\n \n enum class DxbcZeroTest : uint32_t {\n TestZ = 0,\n TestNz = 1,\n };\n \n enum class DxbcResinfoType : uint32_t {\n Float = 0,\n RcpFloat = 1,\n Uint = 2,\n };\n \n enum class DxbcSyncFlag : uint32_t {\n ThreadsInGroup = 0,\n ThreadGroupSharedMemory = 1,\n UavMemoryGroup = 2,\n UavMemoryGlobal = 3,\n };\n \n using DxbcSyncFlags = Flags;\n \n \n /**\n * \\brief Geometry shader input primitive\n */\n enum class DxbcPrimitive : uint32_t {\n Undefined = 0,\n Point = 1,\n Line = 2,\n Triangle = 3,\n LineAdj = 6,\n TriangleAdj = 7,\n Patch1 = 8,\n Patch2 = 9,\n Patch3 = 10,\n Patch4 = 11,\n Patch5 = 12,\n Patch6 = 13,\n Patch7 = 14,\n Patch8 = 15,\n Patch9 = 16,\n Patch10 = 17,\n Patch11 = 18,\n Patch12 = 19,\n Patch13 = 20,\n Patch14 = 21,\n Patch15 = 22,\n Patch16 = 23,\n Patch17 = 24,\n Patch18 = 25,\n Patch19 = 26,\n Patch20 = 27,\n Patch21 = 28,\n Patch22 = 29,\n Patch23 = 30,\n Patch24 = 31,\n Patch25 = 32,\n Patch26 = 33,\n Patch27 = 34,\n Patch28 = 35,\n Patch29 = 36,\n Patch30 = 37,\n Patch31 = 38,\n Patch32 = 39,\n };\n \n \n /**\n * \\brief Geometry shader output topology\n */\n enum class DxbcPrimitiveTopology : uint32_t {\n Undefined = 0,\n PointList = 1,\n LineList = 2,\n LineStrip = 3,\n TriangleList = 4,\n TriangleStrip = 5,\n LineListAdj = 10,\n LineStripAdj = 11,\n TriangleListAdj = 12,\n TriangleStripAdj = 13,\n };\n \n \n /**\n * \\brief Sampler operation mode\n */\n enum class DxbcSamplerMode : uint32_t {\n Default = 0,\n Comparison = 1,\n Mono = 2,\n };\n \n \n /**\n * \\brief Scalar value type\n * \n * Enumerates possible register component\n * types. Scalar types are represented as\n * a one-component vector type.\n */\n enum class DxbcScalarType : uint32_t {\n Uint32 = 0,\n Uint64 = 1,\n Sint32 = 2,\n Sint64 = 3,\n Float32 = 4,\n Float64 = 5,\n Bool = 6,\n };\n \n \n /**\n * \\brief Tessellator domain\n */\n enum class DxbcTessDomain : uint32_t {\n Undefined = 0,\n Isolines = 1,\n Triangles = 2,\n Quads = 3,\n };\n \n /**\n * \\brief Tessellator partitioning\n */\n enum class DxbcTessPartitioning : uint32_t {\n Undefined = 0,\n Integer = 1,\n Pow2 = 2,\n FractOdd = 3,\n FractEven = 4,\n };\n \n /**\n * \\brief UAV definition flags\n */\n enum class DxbcUavFlag : uint32_t {\n GloballyCoherent = 0,\n RasterizerOrdered = 1,\n };\n \n using DxbcUavFlags = Flags;\n \n /**\n * \\brief Tessellator output primitive\n */\n enum class DxbcTessOutputPrimitive : uint32_t {\n Undefined = 0,\n Point = 1,\n Line = 2,\n TriangleCw = 3,\n TriangleCcw = 4,\n };\n \n /**\n * \\brief Custom data class\n * \n * Stores which type of custom data is\n * referenced by the instruction.\n */\n enum class DxbcCustomDataClass : uint32_t {\n Comment = 0,\n DebugInfo = 1,\n Opaque = 2,\n ImmConstBuf = 3,\n };\n \n \n enum class DxbcResourceType : uint32_t {\n Typed = 0,\n Raw = 1,\n Structured = 2,\n };\n\n\n enum class DxbcConstantBufferAccessType : uint32_t {\n StaticallyIndexed = 0,\n DynamicallyIndexed = 1,\n };\n \n}"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/include/pe-parse/nt-headers.h", "/*\nThe MIT License (MIT)\n\nCopyright (c) 2013 Andrew Ruef\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n*/\n\n#pragma once\n\n#include \n#include \n#include \n\n// need to pack these structure definitions\n\n// some constant definitions\n// clang-format off\nnamespace peparse {\nconstexpr std::uint32_t RICH_MAGIC_END = 0x68636952;\nconstexpr std::uint32_t RICH_MAGIC_START = 0x536e6144;\nconstexpr std::uint32_t RICH_OFFSET = 0x80;\nconstexpr std::uint16_t MZ_MAGIC = 0x5A4D;\nconstexpr std::uint32_t NT_MAGIC = 0x00004550;\nconstexpr std::uint16_t NUM_DIR_ENTRIES = 16;\nconstexpr std::uint16_t NT_OPTIONAL_32_MAGIC = 0x10B;\nconstexpr std::uint16_t NT_OPTIONAL_64_MAGIC = 0x20B;\nconstexpr std::uint16_t NT_SHORT_NAME_LEN = 8;\nconstexpr std::uint16_t SYMTAB_RECORD_LEN = 18;\n\n#ifndef _PEPARSE_WINDOWS_CONFLICTS\n// Machine Types\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_UNKNOWN = 0x0;\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_ALPHA = 0x184; // Alpha_AXP\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_ALPHA64 = 0x284; // ALPHA64\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_AM33 = 0x1d3; // Matsushita AM33\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_AMD64 = 0x8664; // x64\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_ARM = 0x1c0; // ARM little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_ARM64 = 0xaa64; // ARM64 little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_ARMNT = 0x1c4; // ARM Thumb-2 little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_AXP64 = 0x284; // ALPHA64\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_CEE = 0xc0ee;\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_CEF = 0xcef;\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_EBC = 0xebc; // EFI byte code\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_I386 = 0x14c; // Intel 386 or later processors and compatible processors\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_IA64 = 0x200; // Intel Itanium processor family\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_LOONGARCH32 = 0x6232; // LoongArch 32-bit address space\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_LOONGARCH64 = 0x6264; // LoongArch 64-bit address space\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_M32R = 0x9041; // Mitsubishi M32R little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_MIPS16 = 0x266; // MIPS16\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_MIPSFPU = 0x366; // MIPS with FPU\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_MIPSFPU16 = 0x466; // MIPS16 with FPU\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_POWERPC = 0x1f0; // Power PC little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_POWERPCFP = 0x1f1; // Power PC with floating point support\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_POWERPCBE = 0x1f2; // Power PC big endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_R3000 = 0x162; // MIPS little endian, 0x160 big-endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_R4000 = 0x166; // MIPS little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_R10000 = 0x168; // MIPS little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_RISCV32 = 0x5032; // RISC-V 32-bit address space\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_RISCV64 = 0x5064; // RISC-V 64-bit address space\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_RISCV128 = 0x5128; // RISC-V 128-bit address space\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_SH3 = 0x1a2; // Hitachi SH3\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_SH3DSP = 0x1a3; // Hitachi SH3 DSP\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_SH3E = 0x1a4; // Hitachi SH3E\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_SH4 = 0x1a6; // Hitachi SH4\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_SH5 = 0x1a8; // Hitachi SH5\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_THUMB = 0x1c2; // Thumb\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_TRICORE = 0x520; // Infineon\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_WCEMIPSV2 = 0x169; // MIPS little-endian WCE v2\n\nconstexpr std::uint16_t IMAGE_FILE_RELOCS_STRIPPED = 0x0001;\nconstexpr std::uint16_t IMAGE_FILE_EXECUTABLE_IMAGE = 0x0002;\nconstexpr std::uint16_t IMAGE_FILE_LINE_NUMS_STRIPPED = 0x0004;\nconstexpr std::uint16_t IMAGE_FILE_LOCAL_SYMS_STRIPPED = 0x0008;\nconstexpr std::uint16_t IMAGE_FILE_AGGRESSIVE_WS_TRIM = 0x0010;\nconstexpr std::uint16_t IMAGE_FILE_LARGE_ADDRESS_AWARE = 0x0020;\nconstexpr std::uint16_t IMAGE_FILE_BYTES_REVERSED_LO = 0x0080;\nconstexpr std::uint16_t IMAGE_FILE_32BIT_MACHINE = 0x0100;\nconstexpr std::uint16_t IMAGE_FILE_DEBUG_STRIPPED = 0x0200;\nconstexpr std::uint16_t IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP = 0x0400;\nconstexpr std::uint16_t IMAGE_FILE_NET_RUN_FROM_SWAP = 0x0800;\nconstexpr std::uint16_t IMAGE_FILE_SYSTEM = 0x1000;\nconstexpr std::uint16_t IMAGE_FILE_DLL = 0x2000;\nconstexpr std::uint16_t IMAGE_FILE_UP_SYSTEM_ONLY = 0x4000;\nconstexpr std::uint16_t IMAGE_FILE_BYTES_REVERSED_HI = 0x8000;\n\nconstexpr std::uint32_t IMAGE_SCN_TYPE_NO_PAD = 0x00000008;\nconstexpr std::uint32_t IMAGE_SCN_CNT_CODE = 0x00000020;\nconstexpr std::uint32_t IMAGE_SCN_CNT_INITIALIZED_DATA = 0x00000040;\nconstexpr std::uint32_t IMAGE_SCN_CNT_UNINITIALIZED_DATA = 0x00000080;\nconstexpr std::uint32_t IMAGE_SCN_LNK_OTHER = 0x00000100;\nconstexpr std::uint32_t IMAGE_SCN_LNK_INFO = 0x00000200;\nconstexpr std::uint32_t IMAGE_SCN_LNK_REMOVE = 0x00000800;\nconstexpr std::uint32_t IMAGE_SCN_LNK_COMDAT = 0x00001000;\nconstexpr std::uint32_t IMAGE_SCN_NO_DEFER_SPEC_EXC = 0x00004000;\nconstexpr std::uint32_t IMAGE_SCN_GPREL = 0x00008000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_FARDATA = 0x00008000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_PURGEABLE = 0x00020000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_16BIT = 0x00020000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_LOCKED = 0x00040000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_PRELOAD = 0x00080000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_1BYTES = 0x00100000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_2BYTES = 0x00200000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_4BYTES = 0x00300000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_8BYTES = 0x00400000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_16BYTES = 0x00500000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_32BYTES = 0x00600000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_64BYTES = 0x00700000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_128BYTES = 0x00800000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_256BYTES = 0x00900000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_512BYTES = 0x00A00000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_1024BYTES = 0x00B00000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_2048BYTES = 0x00C00000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_4096BYTES = 0x00D00000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_8192BYTES = 0x00E00000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_MASK = 0x00F00000;\nconstexpr std::uint32_t IMAGE_SCN_LNK_NRELOC_OVFL = 0x01000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_DISCARDABLE = 0x02000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_NOT_CACHED = 0x04000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_NOT_PAGED = 0x08000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_SHARED = 0x10000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_EXECUTE = 0x20000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_READ = 0x40000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_WRITE = 0x80000000;\n\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_UNKNOWN = 0;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_NATIVE = 1;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_WINDOWS_GUI = 2;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_WINDOWS_CUI = 3;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_OS2_CUI = 5;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_POSIX_CUI = 7;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_NATIVE_WINDOWS = 8;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_WINDOWS_CE_GUI = 9;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_EFI_APPLICATION = 10;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER = 11;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER = 12;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_EFI_ROM = 13;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_XBOX = 14;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION = 16;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_XBOX_CODE_CATALOG = 17;\n\n// Symbol section number values\nconstexpr std::int16_t IMAGE_SYM_UNDEFINED = 0;\nconstexpr std::int16_t IMAGE_SYM_ABSOLUTE = -1;\nconstexpr std::int16_t IMAGE_SYM_DEBUG = -2;\n\n// Symbol table types\nconstexpr std::uint16_t IMAGE_SYM_TYPE_NULL = 0;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_VOID = 1;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_CHAR = 2;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_SHORT = 3;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_INT = 4;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_LONG = 5;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_FLOAT = 6;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_DOUBLE = 7;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_STRUCT = 8;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_UNION = 9;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_ENUM = 10;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_MOE = 11;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_BYTE = 12;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_WORD = 13;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_UINT = 14;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_DWORD = 15;\nconstexpr std::uint16_t IMAGE_SYM_DTYPE_NULL = 0;\nconstexpr std::uint16_t IMAGE_SYM_DTYPE_POINTER = 1;\nconstexpr std::uint16_t IMAGE_SYM_DTYPE_FUNCTION = 2;\nconstexpr std::uint16_t IMAGE_SYM_DTYPE_ARRAY = 3;\n\n// Symbol table storage classes\nconstexpr std::uint8_t IMAGE_SYM_CLASS_END_OF_FUNCTION = static_cast(-1);\nconstexpr std::uint8_t IMAGE_SYM_CLASS_NULL = 0;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_AUTOMATIC = 1;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_EXTERNAL = 2;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_STATIC = 3;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_REGISTER = 4;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_EXTERNAL_DEF = 5;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_LABEL = 6;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_UNDEFINED_LABEL = 7;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_MEMBER_OF_STRUCT = 8;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_ARGUMENT = 9;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_STRUCT_TAG = 10;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_MEMBER_OF_UNION = 11;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_UNION_TAG = 12;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_TYPE_DEFINITION = 13;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_UNDEFINED_STATIC = 14;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_ENUM_TAG = 15;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_MEMBER_OF_ENUM = 16;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_REGISTER_PARAM = 17;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_BIT_FIELD = 18;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_BLOCK = 100;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_FUNCTION = 101;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_END_OF_STRUCT = 102;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_FILE = 103;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_SECTION = 104;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_WEAK_EXTERNAL = 105;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_CLR_TOKEN = 107;\n\n// Optional header DLL characteristics\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE = 0x0040;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY = 0x0080;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_NX_COMPAT = 0x0100;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_NO_ISOLATION = 0x0200;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_NO_SEH = 0x0400;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_NO_BIND = 0x0800;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_APPCONTAINER = 0x1000;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_WDM_DRIVER = 0x2000;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_GUARD_CF = 0x4000;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE = 0x8000;\n\n// Extended DLL characteristics\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_EX_CET_COMPAT = 0x0001;\n#endif\n// clang-format on\n\nstruct dos_header {\n std::uint16_t e_magic;\n std::uint16_t e_cblp;\n std::uint16_t e_cp;\n std::uint16_t e_crlc;\n std::uint16_t e_cparhdr;\n std::uint16_t e_minalloc;\n std::uint16_t e_maxalloc;\n std::uint16_t e_ss;\n std::uint16_t e_sp;\n std::uint16_t e_csum;\n std::uint16_t e_ip;\n std::uint16_t e_cs;\n std::uint16_t e_lfarlc;\n std::uint16_t e_ovno;\n std::uint16_t e_res[4];\n std::uint16_t e_oemid;\n std::uint16_t e_oeminfo;\n std::uint16_t e_res2[10];\n std::uint32_t e_lfanew;\n};\n\nstruct file_header {\n std::uint16_t Machine;\n std::uint16_t NumberOfSections;\n std::uint32_t TimeDateStamp;\n std::uint32_t PointerToSymbolTable;\n std::uint32_t NumberOfSymbols;\n std::uint16_t SizeOfOptionalHeader;\n std::uint16_t Characteristics;\n};\n\nstruct data_directory {\n std::uint32_t VirtualAddress;\n std::uint32_t Size;\n};\n\nenum data_directory_kind {\n DIR_EXPORT = 0,\n DIR_IMPORT = 1,\n DIR_RESOURCE = 2,\n DIR_EXCEPTION = 3,\n DIR_SECURITY = 4,\n DIR_BASERELOC = 5,\n DIR_DEBUG = 6,\n DIR_ARCHITECTURE = 7,\n DIR_GLOBALPTR = 8,\n DIR_TLS = 9,\n DIR_LOAD_CONFIG = 10,\n DIR_BOUND_IMPORT = 11,\n DIR_IAT = 12,\n DIR_DELAY_IMPORT = 13,\n DIR_COM_DESCRIPTOR = 14,\n DIR_RESERVED = 15,\n};\n\nstruct optional_header_32 {\n std::uint16_t Magic;\n std::uint8_t MajorLinkerVersion;\n std::uint8_t MinorLinkerVersion;\n std::uint32_t SizeOfCode;\n std::uint32_t SizeOfInitializedData;\n std::uint32_t SizeOfUninitializedData;\n std::uint32_t AddressOfEntryPoint;\n std::uint32_t BaseOfCode;\n std::uint32_t BaseOfData;\n std::uint32_t ImageBase;\n std::uint32_t SectionAlignment;\n std::uint32_t FileAlignment;\n std::uint16_t MajorOperatingSystemVersion;\n std::uint16_t MinorOperatingSystemVersion;\n std::uint16_t MajorImageVersion;\n std::uint16_t MinorImageVersion;\n std::uint16_t MajorSubsystemVersion;\n std::uint16_t MinorSubsystemVersion;\n std::uint32_t Win32VersionValue;\n std::uint32_t SizeOfImage;\n std::uint32_t SizeOfHeaders;\n std::uint32_t CheckSum;\n std::uint16_t Subsystem;\n std::uint16_t DllCharacteristics;\n std::uint32_t SizeOfStackReserve;\n std::uint32_t SizeOfStackCommit;\n std::uint32_t SizeOfHeapReserve;\n std::uint32_t SizeOfHeapCommit;\n std::uint32_t LoaderFlags;\n std::uint32_t NumberOfRvaAndSizes;\n data_directory DataDirectory[NUM_DIR_ENTRIES];\n};\n\n/*\n * This is used for PE32+ binaries. It is similar to optional_header_32\n * except some fields don't exist here (BaseOfData), and others are bigger.\n */\nstruct optional_header_64 {\n std::uint16_t Magic;\n std::uint8_t MajorLinkerVersion;\n std::uint8_t MinorLinkerVersion;\n std::uint32_t SizeOfCode;\n std::uint32_t SizeOfInitializedData;\n std::uint32_t SizeOfUninitializedData;\n std::uint32_t AddressOfEntryPoint;\n std::uint32_t BaseOfCode;\n std::uint64_t ImageBase;\n std::uint32_t SectionAlignment;\n std::uint32_t FileAlignment;\n std::uint16_t MajorOperatingSystemVersion;\n std::uint16_t MinorOperatingSystemVersion;\n std::uint16_t MajorImageVersion;\n std::uint16_t MinorImageVersion;\n std::uint16_t MajorSubsystemVersion;\n std::uint16_t MinorSubsystemVersion;\n std::uint32_t Win32VersionValue;\n std::uint32_t SizeOfImage;\n std::uint32_t SizeOfHeaders;\n std::uint32_t CheckSum;\n std::uint16_t Subsystem;\n std::uint16_t DllCharacteristics;\n std::uint64_t SizeOfStackReserve;\n std::uint64_t SizeOfStackCommit;\n std::uint64_t SizeOfHeapReserve;\n std::uint64_t SizeOfHeapCommit;\n std::uint32_t LoaderFlags;\n std::uint32_t NumberOfRvaAndSizes;\n data_directory DataDirectory[NUM_DIR_ENTRIES];\n};\n\nstruct nt_header_32 {\n std::uint32_t Signature;\n file_header FileHeader;\n optional_header_32 OptionalHeader;\n optional_header_64 OptionalHeader64;\n std::uint16_t OptionalMagic;\n};\n\nstruct rich_entry {\n std::uint16_t ProductId;\n std::uint16_t BuildNumber;\n std::uint32_t Count;\n};\n\nstruct rich_header {\n std::uint32_t StartSignature;\n std::vector Entries;\n std::uint32_t EndSignature;\n std::uint32_t DecryptionKey;\n std::uint32_t Checksum;\n bool isPresent;\n bool isValid;\n};\n\n/*\n * This structure is only used to know how far to move the offset\n * when parsing resources. The data is stored in a resource_dir_entry\n * struct but that also has extra information used in the parsing which\n * causes the size to be inaccurate.\n */\nstruct resource_dir_entry_sz {\n std::uint32_t ID;\n std::uint32_t RVA;\n};\n\nstruct resource_dir_entry {\n inline resource_dir_entry(void) : ID(0), RVA(0), type(0), name(0), lang(0) {\n }\n\n std::uint32_t ID;\n std::uint32_t RVA;\n std::uint32_t type;\n std::uint32_t name;\n std::uint32_t lang;\n std::string type_str;\n std::string name_str;\n std::string lang_str;\n};\n\nstruct resource_dir_table {\n std::uint32_t Characteristics;\n std::uint32_t TimeDateStamp;\n std::uint16_t MajorVersion;\n std::uint16_t MinorVersion;\n std::uint16_t NameEntries;\n std::uint16_t IDEntries;\n};\n\nstruct resource_dat_entry {\n std::uint32_t RVA;\n std::uint32_t size;\n std::uint32_t codepage;\n std::uint32_t reserved;\n};\n\nstruct image_section_header {\n std::uint8_t Name[NT_SHORT_NAME_LEN];\n union {\n std::uint32_t PhysicalAddress;\n std::uint32_t VirtualSize;\n } Misc;\n std::uint32_t VirtualAddress;\n std::uint32_t SizeOfRawData;\n std::uint32_t PointerToRawData;\n std::uint32_t PointerToRelocations;\n std::uint32_t PointerToLinenumbers;\n std::uint16_t NumberOfRelocations;\n std::uint16_t NumberOfLinenumbers;\n std::uint32_t Characteristics;\n};\n\nstruct import_dir_entry {\n std::uint32_t LookupTableRVA;\n std::uint32_t TimeStamp;\n std::uint32_t ForwarderChain;\n std::uint32_t NameRVA;\n std::uint32_t AddressRVA;\n};\n\nstruct export_dir_table {\n std::uint32_t ExportFlags;\n std::uint32_t TimeDateStamp;\n std::uint16_t MajorVersion;\n std::uint16_t MinorVersion;\n std::uint32_t NameRVA;\n std::uint32_t OrdinalBase;\n std::uint32_t AddressTableEntries;\n std::uint32_t NumberOfNamePointers;\n std::uint32_t ExportAddressTableRVA;\n std::uint32_t NamePointerRVA;\n std::uint32_t OrdinalTableRVA;\n};\n\nstruct debug_dir_entry {\n std::uint32_t Characteristics;\n std::uint32_t TimeStamp;\n std::uint16_t MajorVersion;\n std::uint16_t MinorVersion;\n std::uint32_t Type;\n std::uint32_t SizeOfData;\n std::uint32_t AddressOfRawData;\n std::uint32_t PointerToRawData;\n};\n\nenum reloc_type {\n RELOC_ABSOLUTE = 0,\n RELOC_HIGH = 1,\n RELOC_LOW = 2,\n RELOC_HIGHLOW = 3,\n RELOC_HIGHADJ = 4,\n RELOC_MIPS_JMPADDR = 5, // only valid on MIPS\n RELOC_ARM_MOV32 = 5, // only valid on ARM/Thumb\n RELOC_RISCV_HIGH20 = 5, // only valid on RISC-V\n RELOC_RESERVED = 6,\n RELOC_THUMB_MOV32 = 7, // only valid on Thumb\n RELOC_RISCV_LOW32I = 7, // only valid on RISC-V\n RELOC_RISCV_LOW12S = 8, // only valid on RISC-V\n RELOC_LOONGARCH32_MARK_LA = 8, // only valid on LoongArch 32\n RELOC_LOONGARCH64_MARK_LA = 8, // only valid on LoongArch 64\n RELOC_MIPS_JMPADDR16 = 9, // only valid on MIPS\n RELOC_IA64_IMM64 = 9,\n RELOC_DIR64 = 10\n};\n\nstruct reloc_block {\n std::uint32_t PageRVA;\n std::uint32_t BlockSize;\n};\n\nstruct image_load_config_code_integrity {\n std::uint16_t Flags;\n std::uint16_t Catalog;\n std::uint32_t CatalogOffset;\n std::uint32_t Reserved;\n};\n\nstruct image_load_config_32 {\n std::uint32_t Size;\n std::uint32_t TimeDateStamp;\n std::uint16_t MajorVersion;\n std::uint16_t MinorVersion;\n std::uint32_t GlobalFlagsClear;\n std::uint32_t GlobalFlagsSet;\n std::uint32_t CriticalSectionDefaultTimeout;\n std::uint32_t DeCommitFreeBlockThreshold;\n std::uint32_t DeCommitTotalFreeThreshold;\n std::uint32_t LockPrefixTable;\n std::uint32_t MaximumAllocationSize;\n std::uint32_t VirtualMemoryThreshold;\n std::uint32_t ProcessHeapFlags;\n std::uint32_t ProcessAffinityMask;\n std::uint16_t CSDVersion;\n std::uint16_t DependentLoadFlags;\n std::uint32_t EditList;\n std::uint32_t SecurityCookie;\n std::uint32_t SEHandlerTable;\n std::uint32_t SEHandlerCount;\n std::uint32_t GuardCFCheckFunctionPointer;\n std::uint32_t GuardCFDispatchFunctionPointer;\n std::uint32_t GuardCFFunctionTable;\n std::uint32_t GuardCFFunctionCount;\n std::uint32_t GuardFlags;\n image_load_config_code_integrity CodeIntegrity;\n std::uint32_t GuardAddressTakenIatEntryTable;\n std::uint32_t GuardAddressTakenIatEntryCount;\n std::uint32_t GuardLongJumpTargetTable;\n std::uint32_t GuardLongJumpTargetCount;\n std::uint32_t DynamicValueRelocTable;\n std::uint32_t CHPEMetadataPointer;\n std::uint32_t GuardRFFailureRoutine;\n std::uint32_t GuardRFFailureRoutineFunctionPointer;\n std::uint32_t DynamicValueRelocTableOffset;\n std::uint16_t DynamicValueRelocTableSection;\n std::uint16_t Reserved2;\n std::uint32_t GuardRFVerifyStackPointerFunctionPointer;\n std::uint32_t HotPatchTableOffset;\n std::uint32_t Reserved3;\n std::uint32_t EnclaveConfigurationPointer;\n std::uint32_t VolatileMetadataPointer;\n};\n\nstruct image_load_config_64 {\n std::uint32_t Size;\n std::uint32_t TimeDateStamp;\n std::uint16_t MajorVersion;\n std::uint16_t MinorVersion;\n std::uint32_t GlobalFlagsClear;\n std::uint32_t GlobalFlagsSet;\n std::uint32_t CriticalSectionDefaultTimeout;\n std::uint64_t DeCommitFreeBlockThreshold;\n std::uint64_t DeCommitTotalFreeThreshold;\n std::uint64_t LockPrefixTable;\n std::uint64_t MaximumAllocationSize;\n std::uint64_t VirtualMemoryThreshold;\n std::uint64_t ProcessAffinityMask;\n std::uint32_t ProcessHeapFlags;\n std::uint16_t CSDVersion;\n std::uint16_t DependentLoadFlags;\n std::uint64_t EditList;\n std::uint64_t SecurityCookie;\n std::uint64_t SEHandlerTable;\n std::uint64_t SEHandlerCount;\n std::uint64_t GuardCFCheckFunctionPointer;\n std::uint64_t GuardCFDispatchFunctionPointer;\n std::uint64_t GuardCFFunctionTable;\n std::uint64_t GuardCFFunctionCount;\n std::uint32_t GuardFlags;\n image_load_config_code_integrity CodeIntegrity;\n std::uint64_t GuardAddressTakenIatEntryTable;\n std::uint64_t GuardAddressTakenIatEntryCount;\n std::uint64_t GuardLongJumpTargetTable;\n std::uint64_t GuardLongJumpTargetCount;\n std::uint64_t DynamicValueRelocTable;\n std::uint64_t CHPEMetadataPointer;\n std::uint64_t GuardRFFailureRoutine;\n std::uint64_t GuardRFFailureRoutineFunctionPointer;\n std::uint32_t DynamicValueRelocTableOffset;\n std::uint16_t DynamicValueRelocTableSection;\n std::uint16_t Reserved2;\n std::uint64_t GuardRFVerifyStackPointerFunctionPointer;\n std::uint32_t HotPatchTableOffset;\n std::uint32_t Reserved3;\n std::uint64_t EnclaveConfigurationPointer;\n std::uint64_t VolatileMetadataPointer;\n};\n} // namespace peparse\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/log/log_debug.h", "#pragma once\n\n#include \n\n#include \"log/log.h\"\n\n#ifdef _MSC_VER\n#define METHOD_NAME __FUNCSIG__\n#else\n#define METHOD_NAME __PRETTY_FUNCTION__\n#endif\n\n#define TRACE_ENABLED\n\n#ifdef TRACE_ENABLED\n#define TRACE(...) \\\n do { dxvk::debug::trace(METHOD_NAME, ##__VA_ARGS__); } while (0)\n#else\n#define TRACE(...) \\\n do { } while (0)\n#endif\n\nnamespace dxvk::debug {\n \n std::string methodName(const std::string& prettyName);\n \n inline void traceArgs(std::stringstream& stream) { }\n \n template\n void traceArgs(std::stringstream& stream, const Arg1& arg1) {\n stream << arg1;\n }\n \n template\n void traceArgs(std::stringstream& stream, const Arg1& arg1, const Arg2& arg2, const Args&... args) {\n stream << arg1 << \",\";\n traceArgs(stream, arg2, args...);\n }\n \n template\n void trace(const std::string& funcName, const Args&... args) {\n std::stringstream stream;\n stream << methodName(funcName) << \"(\";\n traceArgs(stream, args...);\n stream << \")\";\n Logger::trace(stream.str());\n }\n \n}\n"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/src/unicode_codecvt.cpp", "/*\nThe MIT License (MIT)\n\nCopyright (c) 2019 Trail of Bits, Inc.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n*/\n\n#include \n#include \n#include \n\nnamespace peparse {\n// See\n// https://stackoverflow.com/questions/38688417/utf-conversion-functions-in-c11\nstd::string from_utf16(const UCharString &u) {\n std::wstring_convert, char16_t> convert;\n return convert.to_bytes(u);\n}\n} // namespace peparse\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_chunk_shex.h", "class DxbcShex {\n public:\n DxbcShex(DxbcReader reader) {\n // The shader version and type are stored in a 32-bit unit,\n // where the first byte contains the major and minor version\n // numbers, and the high word contains the program type.\n reader.skip(2);\n auto pType = reader.readEnum();\n m_programInfo = DxbcProgramInfo(pType);\n \n // Read the actual shader code as an array of DWORDs.\n auto codeLength = reader.readu32() - 2;\n m_code.resize(codeLength);\n reader.read(m_code.data(), codeLength * sizeof(uint32_t));\n }\n ~DxbcShex() {\n \n }\n private:\n DxbcProgramInfo m_programInfo;\n std::vector m_code;\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_header.h", "class DxbcHeader {\n public:\n DxbcHeader(DxbcReader& reader) {\n // FourCC at the start of the file, must be 'DXBC'\n DxbcTag fourcc = reader.readTag();\n \n if (fourcc != \"DXBC\")\n throw DxvkError(\"DxbcHeader::DxbcHeader: Invalid fourcc, expected 'DXBC'\");\n \n // Stuff we don't actually need to store\n reader.skip(4 * sizeof(uint32_t)); // Check sum\n reader.skip(1 * sizeof(uint32_t)); // Constant 1\n reader.skip(1 * sizeof(uint32_t)); // Bytecode length\n \n // Number of chunks in the file\n uint32_t chunkCount = reader.readu32();\n \n // Chunk offsets are stored immediately after\n for (uint32_t i = 0; i < chunkCount; i++)\n m_chunkOffsets.push_back(reader.readu32());\n }\n ~DxbcHeader() {\n \n }\n private:\n std::vector m_chunkOffsets;\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_include.h", "#pragma once\n\n#include \n#include \n\n#include \"dxvk_limits.h\"\n#include \"dxvk_pipelayout.h\"\n\n#include \"log/log.h\"\n#include \"log/log_debug.h\"\n\n#include \"rc/util_rc.h\"\n#include \"rc/util_rc_ptr.h\"\n\n#include \"util_bit.h\"\n#include \"util_enum.h\"\n#include \"util_error.h\"\n#include \"util_string.h\"\n#include \"util_flags.h\"\n#include \"util_small_vector.h\"\n"], ["/lsfg-vk/thirdparty/dxbc/src/util/util_log.cpp", "#include \"log/log_debug.h\"\n\nnamespace dxvk::debug {\n \n std::string methodName(const std::string& prettyName) {\n size_t end = prettyName.find(\"(\");\n size_t begin = prettyName.substr(0, end).rfind(\" \") + 1;\n return prettyName.substr(begin,end - begin);\n }\n \n}\n"], ["/lsfg-vk/thirdparty/toml11/src/skip.cpp", "#include \n#include \n#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n\nnamespace toml\n{\n\nnamespace detail\n{\ntemplate bool skip_whitespace (location& loc, const context&);\ntemplate bool skip_empty_lines (location& loc, const context&);\ntemplate void skip_comment_block (location& loc, const context&);\ntemplate void skip_empty_or_comment_lines(location& loc, const context&);\ntemplate void skip_string_like (location& loc, const context&);\ntemplate void skip_array_like (location& loc, const context&);\ntemplate void skip_inline_table_like (location& loc, const context&);\ntemplate void skip_value (location& loc, const context&);\ntemplate void skip_key_value_pair (location& loc, const context&);\ntemplate void skip_until_next_table (location& loc, const context&);\n\ntemplate bool skip_whitespace (location& loc, const context&);\ntemplate bool skip_empty_lines (location& loc, const context&);\ntemplate void skip_comment_block (location& loc, const context&);\ntemplate void skip_empty_or_comment_lines(location& loc, const context&);\ntemplate void skip_string_like (location& loc, const context&);\ntemplate void skip_array_like (location& loc, const context&);\ntemplate void skip_inline_table_like (location& loc, const context&);\ntemplate void skip_value (location& loc, const context&);\ntemplate void skip_key_value_pair (location& loc, const context&);\ntemplate void skip_until_next_table (location& loc, const context&);\n\n} // detail\n} // toml\n"], ["/lsfg-vk/thirdparty/toml11/src/serializer.cpp", "#include \n#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n\nnamespace toml\n{\nstruct type_config;\nstruct ordered_type_config;\n\ntemplate typename basic_value::string_type\nformat(const basic_value&, const spec);\n\ntemplate typename basic_value::string_type\nformat(const typename basic_value::key_type& k,\n const basic_value& v, const spec);\n\ntemplate typename basic_value::string_type\nformat(const std::vector::key_type>& ks,\n const basic_value& v, const spec s);\n\ntemplate typename basic_value::string_type\nformat(const basic_value&, const spec);\n\ntemplate typename basic_value::string_type\nformat(const typename basic_value::key_type& k,\n const basic_value& v, const spec);\n\ntemplate typename basic_value::string_type\nformat(const std::vector::key_type>& ks,\n const basic_value& v, const spec s);\n\nnamespace detail\n{\ntemplate class serializer<::toml::type_config>;\ntemplate class serializer<::toml::ordered_type_config>;\n} // detail\n} // toml\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/rc/util_rc.h", "#pragma once\n\n#include \n\n#include \"../util_likely.h\"\n\nnamespace dxvk {\n \n /**\n * \\brief Reference-counted object\n */\n class RcObject {\n \n public:\n \n /**\n * \\brief Increments reference count\n * \\returns New reference count\n */\n force_inline uint32_t incRef() {\n return ++m_refCount;\n }\n \n /**\n * \\brief Decrements reference count\n * \\returns New reference count\n */\n force_inline uint32_t decRef() {\n return --m_refCount;\n }\n \n private:\n \n std::atomic m_refCount = { 0u };\n \n };\n \n}"], ["/lsfg-vk/thirdparty/dxbc/include/dxvk/dxvk_hash.h", "#pragma once\n\n#include \n\nnamespace dxvk {\n\n struct DxvkEq {\n template\n size_t operator () (const T& a, const T& b) const {\n return a.eq(b);\n }\n };\n\n struct DxvkHash {\n template\n size_t operator () (const T& object) const {\n return object.hash();\n }\n };\n\n class DxvkHashState {\n\n public:\n\n void add(size_t hash) {\n m_value ^= hash + 0x9e3779b9\n + (m_value << 6)\n + (m_value >> 2);\n }\n\n operator size_t () const {\n return m_value;\n }\n\n private:\n\n size_t m_value = 0;\n\n };\n\n}\n"], ["/lsfg-vk/thirdparty/toml11/src/parser.cpp", "#include \n#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n\nnamespace toml\n{\n\ntemplate result, std::vector> try_parse(std::vector, std::string, spec);\ntemplate result, std::vector> try_parse(std::istream&, std::string, spec);\ntemplate result, std::vector> try_parse(std::string, spec);\ntemplate result, std::vector> try_parse(FILE*, std::string, spec);\ntemplate result, std::vector> try_parse_str(std::string, spec, cxx::source_location);\n\ntemplate basic_value parse(std::vector, std::string, spec);\ntemplate basic_value parse(std::istream&, std::string, spec);\ntemplate basic_value parse(std::string, spec);\ntemplate basic_value parse(FILE*, std::string, spec);\ntemplate basic_value parse_str(std::string, spec, cxx::source_location);\n\ntemplate result, std::vector> try_parse(std::vector, std::string, spec);\ntemplate result, std::vector> try_parse(std::istream&, std::string, spec);\ntemplate result, std::vector> try_parse(std::string, spec);\ntemplate result, std::vector> try_parse(FILE*, std::string, spec);\ntemplate result, std::vector> try_parse_str(std::string, spec, cxx::source_location);\n\ntemplate basic_value parse(std::vector, std::string, spec);\ntemplate basic_value parse(std::istream&, std::string, spec);\ntemplate basic_value parse(std::string, spec);\ntemplate basic_value parse(FILE*, std::string, spec);\ntemplate basic_value parse_str(std::string, spec, cxx::source_location);\n\n#if defined(TOML11_HAS_FILESYSTEM)\ntemplate cxx::enable_if_t::value, result, std::vector>> try_parse(const std::filesystem::path&, spec);\ntemplate cxx::enable_if_t::value, result, std::vector>> try_parse(const std::filesystem::path&, spec);\ntemplate cxx::enable_if_t::value, basic_value > parse (const std::filesystem::path&, spec);\ntemplate cxx::enable_if_t::value, basic_value > parse (const std::filesystem::path&, spec);\n#endif // filesystem\n\n} // toml\n"], ["/lsfg-vk/thirdparty/dxbc/include/spirv/spirv_include.h", "#pragma once\n\n#include \n#include \n\n#include \"log/log.h\"\n#include \"log/log_debug.h\"\n\n#include \"util_error.h\"\n#include \"util_flags.h\"\n#include \"util_likely.h\"\n#include \"util_string.h\"\n\n#include \"rc/util_rc.h\"\n#include \"rc/util_rc_ptr.h\"\n"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/include/pe-parse/to_string.h", "#pragma once\n\n#include \n#include \n\n#if defined(_MSC_VER)\ntypedef std::basic_string UCharString;\n#else\ntypedef std::u16string UCharString;\n#endif\n\nnamespace peparse {\ntemplate \nstatic std::string to_string(T t, std::ios_base &(*f)(std::ios_base &) ) {\n std::ostringstream oss;\n oss << f << t;\n return oss.str();\n}\n\nstd::string from_utf16(const UCharString &u);\n} // namespace peparse\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_modinfo.h", "#pragma once\n\n#include \"dxbc_options.h\"\n\nnamespace dxvk {\n\n /**\n * \\brief Tessellation info\n * \n * Stores the maximum tessellation factor\n * to export from tessellation shaders.\n */\n struct DxbcTessInfo {\n float maxTessFactor;\n };\n\n /**\n * \\brief Xfb capture entry\n * \n * Stores an output variable to capture,\n * as well as the buffer to write it to.\n */\n struct DxbcXfbEntry {\n const char* semanticName;\n uint32_t semanticIndex;\n uint32_t componentIndex;\n uint32_t componentCount;\n uint32_t streamId;\n uint32_t bufferId;\n uint32_t offset;\n };\n\n /**\n * \\brief Xfb info\n * \n * Stores capture entries and output buffer\n * strides. This structure must only be\n * defined if \\c entryCount is non-zero.\n */\n struct DxbcXfbInfo {\n uint32_t entryCount;\n DxbcXfbEntry entries[128];\n uint32_t strides[4];\n int32_t rasterizedStream;\n };\n\n /**\n * \\brief Shader module info\n * \n * Stores information which may affect shader compilation.\n * This data can be supplied by the client API implementation.\n */\n struct DxbcModuleInfo {\n DxbcOptions options;\n DxbcTessInfo* tess;\n DxbcXfbInfo* xfb;\n };\n\n}"], ["/lsfg-vk/thirdparty/dxbc/include/spirv/thirdparty/GLSL.std.450.h", "/*\n** Copyright (c) 2014-2024 The Khronos Group Inc.\n**\n** Permission is hereby granted, free of charge, to any person obtaining a copy\n** of this software and/or associated documentation files (the \"Materials\"),\n** to deal in the Materials without restriction, including without limitation\n** the rights to use, copy, modify, merge, publish, distribute, sublicense,\n** and/or sell copies of the Materials, and to permit persons to whom the\n** Materials are furnished to do so, subject to the following conditions:\n**\n** The above copyright notice and this permission notice shall be included in\n** all copies or substantial portions of the Materials.\n**\n** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS\n** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND\n** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ \n**\n** THE MATERIALS ARE PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS\n** IN THE MATERIALS.\n*/\n\n#ifndef GLSLstd450_H\n#define GLSLstd450_H\n\nstatic const int GLSLstd450Version = 100;\nstatic const int GLSLstd450Revision = 3;\n\nenum GLSLstd450 {\n GLSLstd450Bad = 0, // Don't use\n\n GLSLstd450Round = 1,\n GLSLstd450RoundEven = 2,\n GLSLstd450Trunc = 3,\n GLSLstd450FAbs = 4,\n GLSLstd450SAbs = 5,\n GLSLstd450FSign = 6,\n GLSLstd450SSign = 7,\n GLSLstd450Floor = 8,\n GLSLstd450Ceil = 9,\n GLSLstd450Fract = 10,\n\n GLSLstd450Radians = 11,\n GLSLstd450Degrees = 12,\n GLSLstd450Sin = 13,\n GLSLstd450Cos = 14,\n GLSLstd450Tan = 15,\n GLSLstd450Asin = 16,\n GLSLstd450Acos = 17,\n GLSLstd450Atan = 18,\n GLSLstd450Sinh = 19,\n GLSLstd450Cosh = 20,\n GLSLstd450Tanh = 21,\n GLSLstd450Asinh = 22,\n GLSLstd450Acosh = 23,\n GLSLstd450Atanh = 24,\n GLSLstd450Atan2 = 25,\n\n GLSLstd450Pow = 26,\n GLSLstd450Exp = 27,\n GLSLstd450Log = 28,\n GLSLstd450Exp2 = 29,\n GLSLstd450Log2 = 30,\n GLSLstd450Sqrt = 31,\n GLSLstd450InverseSqrt = 32,\n\n GLSLstd450Determinant = 33,\n GLSLstd450MatrixInverse = 34,\n\n GLSLstd450Modf = 35, // second operand needs an OpVariable to write to\n GLSLstd450ModfStruct = 36, // no OpVariable operand\n GLSLstd450FMin = 37,\n GLSLstd450UMin = 38,\n GLSLstd450SMin = 39,\n GLSLstd450FMax = 40,\n GLSLstd450UMax = 41,\n GLSLstd450SMax = 42,\n GLSLstd450FClamp = 43,\n GLSLstd450UClamp = 44,\n GLSLstd450SClamp = 45,\n GLSLstd450FMix = 46,\n GLSLstd450IMix = 47, // Reserved\n GLSLstd450Step = 48,\n GLSLstd450SmoothStep = 49,\n\n GLSLstd450Fma = 50,\n GLSLstd450Frexp = 51, // second operand needs an OpVariable to write to\n GLSLstd450FrexpStruct = 52, // no OpVariable operand\n GLSLstd450Ldexp = 53,\n\n GLSLstd450PackSnorm4x8 = 54,\n GLSLstd450PackUnorm4x8 = 55,\n GLSLstd450PackSnorm2x16 = 56,\n GLSLstd450PackUnorm2x16 = 57,\n GLSLstd450PackHalf2x16 = 58,\n GLSLstd450PackDouble2x32 = 59,\n GLSLstd450UnpackSnorm2x16 = 60,\n GLSLstd450UnpackUnorm2x16 = 61,\n GLSLstd450UnpackHalf2x16 = 62,\n GLSLstd450UnpackSnorm4x8 = 63,\n GLSLstd450UnpackUnorm4x8 = 64,\n GLSLstd450UnpackDouble2x32 = 65,\n\n GLSLstd450Length = 66,\n GLSLstd450Distance = 67,\n GLSLstd450Cross = 68,\n GLSLstd450Normalize = 69,\n GLSLstd450FaceForward = 70,\n GLSLstd450Reflect = 71,\n GLSLstd450Refract = 72,\n\n GLSLstd450FindILsb = 73,\n GLSLstd450FindSMsb = 74,\n GLSLstd450FindUMsb = 75,\n\n GLSLstd450InterpolateAtCentroid = 76,\n GLSLstd450InterpolateAtSample = 77,\n GLSLstd450InterpolateAtOffset = 78,\n\n GLSLstd450NMin = 79,\n GLSLstd450NMax = 80,\n GLSLstd450NClamp = 81,\n\n GLSLstd450Count\n};\n\n#endif // #ifndef GLSLstd450_H\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_error.h", "#pragma once\n\n#include \n\nnamespace dxvk {\n \n /**\n * \\brief DXVK error\n * \n * A generic exception class that stores a\n * message. Exceptions should be logged.\n */\n class DxvkError {\n \n public:\n \n DxvkError() { }\n DxvkError(std::string&& message)\n : m_message(std::move(message)) { }\n \n const std::string& message() const {\n return m_message;\n }\n \n private:\n \n std::string m_message;\n \n };\n \n}"], ["/lsfg-vk/thirdparty/toml11/src/context.cpp", "#include \n#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n\nnamespace toml\n{\nnamespace detail\n{\ntemplate class context<::toml::type_config>;\ntemplate class context<::toml::ordered_type_config>;\n} // detail\n} // toml\n"], ["/lsfg-vk/thirdparty/toml11/src/types.cpp", "#include \n#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n\nnamespace toml\n{\ntemplate class basic_value;\ntemplate class basic_value;\n} // toml\n"], ["/lsfg-vk/thirdparty/toml11/src/region.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/format.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/color.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/comments.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/location.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/literal.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/scanner.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/datetime.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/value_t.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/syntax.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/error_info.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/source_location.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_likely.h", "#pragma once\n\n#ifdef __GNUC__\n#define likely(x) __builtin_expect(bool(x),1)\n#define unlikely(x) __builtin_expect(bool(x),0)\n#define force_inline inline __attribute__((always_inline))\n#else\n#define likely(x) (x)\n#define unlikely(x) (x)\n#define force_inline inline\n#endif\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_enum.h", "#pragma once\n\n#define ENUM_NAME(name) \\\n case name: return os << #name\n\n#define ENUM_DEFAULT(name) \\\n default: return os << static_cast(e)\n"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/include/pe-parse/parse.h", "class parse {\n};"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_string.h", "class util_string {\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_util.h", "class dxbc_util {\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_names.h", "class dxbc_names {\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_defs.h", "class DxbcOperandKind {\n};"]], "task_instance_info": {"created_time": "2025-08-20 20:01:24", "created_task_model": "DeepSeek-R1", "function_skeleton": "CommandPool::CommandPool(const Core::Device& device) {}", "code_description": "This code segment functions to: Initialize a resource management structure for command execution tasks, associate it with a specific processing queue, and enforce automatic cleanup of allocated resources upon destruction."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```cpp\n[TASK_BEGIN]\nCommandPool::CommandPool(const Core::Device& device) {\n VkCommandPoolCreateInfo poolInfo{};\n poolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;\n poolInfo.queueFamilyIndex = device.graphics_family_index();\n poolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;\n\n VK_CHECK_RESULT(vkCreateCommandPool(\n device.device(),\n &poolInfo,\n nullptr,\n &m_pool\n ));\n}\n[TASK_END]\n```", "inference_time": "2025-08-20 20-02-18"}, "editdistance_info": {"edit_distance": 22.9412, "calculate_time": "2025-08-20 20:02:18", "true_code_clean": "CommandPool::CommandPool(const Core::Device& device) {\n const VkCommandPoolCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,\n .queueFamilyIndex = device.getComputeFamilyIdx()\n };\n VkCommandPool commandPoolHandle{};\n auto res = vkCreateCommandPool(device.handle(), &desc, nullptr, &commandPoolHandle);\n if (res != VK_SUCCESS || commandPoolHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create command pool\");\n this->commandPool = std::shared_ptr(\n new VkCommandPool(commandPoolHandle),\n [dev = device.handle()](VkCommandPool* commandPoolHandle) {\n vkDestroyCommandPool(dev, *commandPoolHandle, nullptr);\n }\n );\n}", "predict_code_clean": "CommandPool::CommandPool(const Core::Device& device) {\n VkCommandPoolCreateInfo poolInfo{};\n poolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;\n poolInfo.queueFamilyIndex = device.graphics_family_index();\n poolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;\n VK_CHECK_RESULT(vkCreateCommandPool(\n device.device(),\n &poolInfo,\n nullptr,\n &m_pool\n ));\n}"}} {"repo_name": "lsfg-vk", "file_name": "/lsfg-vk/src/utils/utils.cpp", "inference_info": {"prefix_code": "#include \"utils/utils.hpp\"\n#include \"common/exception.hpp\"\n#include \"layer.hpp\"\n\n#include \n#include \n#include // NOLINT\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace Utils;\n\nstd::pair Utils::findQueue(VkDevice device, VkPhysicalDevice physicalDevice,\n VkDeviceCreateInfo* desc, VkQueueFlags flags) {\n std::vector enabledQueues(desc->queueCreateInfoCount);\n std::copy_n(desc->pQueueCreateInfos, enabledQueues.size(), enabledQueues.data());\n\n uint32_t familyCount{};\n Layer::ovkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, &familyCount, nullptr);\n std::vector families(familyCount);\n Layer::ovkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, &familyCount,\n families.data());\n\n std::optional idx;\n for (const auto& queueInfo : enabledQueues) {\n if ((queueInfo.queueFamilyIndex < families.size()) &&\n (families[queueInfo.queueFamilyIndex].queueFlags & flags)) {\n idx = queueInfo.queueFamilyIndex;\n break;\n }\n }\n if (!idx.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"No suitable queue found\");\n\n VkQueue queue{};\n Layer::ovkGetDeviceQueue(device, *idx, 0, &queue);\n\n auto res = Layer::ovkSetDeviceLoaderData(device, queue);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to set device loader data for queue\");\n\n return { *idx, queue };\n}\n\nuint64_t Utils::getDeviceUUID(VkPhysicalDevice physicalDevice) {\n VkPhysicalDeviceProperties properties{};\n Layer::ovkGetPhysicalDeviceProperties(physicalDevice, &properties);\n\n return static_cast(properties.vendorID) << 32 | properties.deviceID;\n}\n\nuint32_t Utils::getMaxImageCount(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface) {\n VkSurfaceCapabilitiesKHR capabilities{};\n auto res = Layer::ovkGetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice,\n surface, &capabilities);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to get surface capabilities\");\n if (capabilities.maxImageCount == 0)\n return 999; // :3\n return capabilities.maxImageCount;\n}\n\n", "suffix_code": "\n\nvoid Utils::copyImage(VkCommandBuffer buf,\n VkImage src, VkImage dst,\n uint32_t width, uint32_t height,\n VkPipelineStageFlags pre, VkPipelineStageFlags post,\n bool makeSrcPresentable, bool makeDstPresentable) {\n const VkImageMemoryBarrier srcBarrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,\n .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,\n .oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,\n .newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,\n .image = src,\n .subresourceRange = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n const VkImageMemoryBarrier dstBarrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,\n .dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,\n .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n .image = dst,\n .subresourceRange = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n const std::vector barriers = { srcBarrier, dstBarrier };\n Layer::ovkCmdPipelineBarrier(buf,\n pre, VK_PIPELINE_STAGE_TRANSFER_BIT, 0,\n 0, nullptr, 0, nullptr,\n static_cast(barriers.size()), barriers.data());\n\n const VkImageBlit imageBlit{\n .srcSubresource = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .layerCount = 1\n },\n .srcOffsets = {\n { 0, 0, 0 },\n { static_cast(width), static_cast(height), 1 }\n },\n .dstSubresource = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .layerCount = 1\n },\n .dstOffsets = {\n { 0, 0, 0 },\n { static_cast(width), static_cast(height), 1 }\n }\n };\n Layer::ovkCmdBlitImage(\n buf,\n src, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,\n dst, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n 1, &imageBlit,\n VK_FILTER_NEAREST\n );\n\n if (makeSrcPresentable) {\n const VkImageMemoryBarrier presentBarrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,\n .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,\n .newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,\n .image = src,\n .subresourceRange = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n Layer::ovkCmdPipelineBarrier(buf,\n VK_PIPELINE_STAGE_TRANSFER_BIT, post, 0,\n 0, nullptr, 0, nullptr,\n 1, &presentBarrier);\n }\n\n if (makeDstPresentable) {\n const VkImageMemoryBarrier presentBarrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,\n .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,\n .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT,\n .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n .newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,\n .image = dst,\n .subresourceRange = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n Layer::ovkCmdPipelineBarrier(buf,\n VK_PIPELINE_STAGE_TRANSFER_BIT, post, 0,\n 0, nullptr, 0, nullptr,\n 1, &presentBarrier);\n }\n}\n\nnamespace {\n auto& logCounts() {\n static std::unordered_map map;\n return map;\n }\n}\n\nvoid Utils::logLimitN(const std::string& id, size_t n, const std::string& message) {\n auto& count = logCounts()[id];\n if (count <= n)\n std::cerr << \"lsfg-vk: \" << message << '\\n';\n if (count == n)\n std::cerr << \"(above message has been repeated \" << n << \" times, suppressing further)\\n\";\n count++;\n}\n\nvoid Utils::resetLimitN(const std::string& id) noexcept {\n logCounts().erase(id);\n}\n\nstd::pair Utils::getProcessName() {\n const char* process_name = std::getenv(\"LSFG_PROCESS\");\n if (process_name && *process_name != '\\0')\n return { process_name, process_name };\n\n const char* benchmark_flag = std::getenv(\"LSFG_BENCHMARK\");\n if (benchmark_flag)\n return { \"benchmark\", \"benchmark\" };\n std::array exe{};\n\n const ssize_t exe_len = readlink(\"/proc/self/exe\", exe.data(), exe.size() - 1);\n if (exe_len <= 0)\n return { \"Unknown Process\", \"unknown\" };\n exe.at(static_cast(exe_len)) = '\\0';\n\n std::ifstream comm_file(\"/proc/self/comm\");\n if (!comm_file.is_open())\n return { std::string(exe.data()), \"unknown\" };\n std::array comm{};\n comm_file.read(comm.data(), 256);\n comm.at(static_cast(comm_file.gcount())) = '\\0';\n std::string comm_str(comm.data());\n if (comm_str.back() == '\\n')\n comm_str.pop_back();\n\n return{ std::string(exe.data()), comm_str };\n}\n\nstd::string Utils::getConfigFile() {\n const char* configFile = std::getenv(\"LSFG_CONFIG\");\n if (configFile && *configFile != '\\0')\n return{configFile};\n const char* xdgPath = std::getenv(\"XDG_CONFIG_HOME\");\n if (xdgPath && *xdgPath != '\\0')\n return std::string(xdgPath) + \"/lsfg-vk/conf.toml\";\n const char* homePath = std::getenv(\"HOME\");\n if (homePath && *homePath != '\\0')\n return std::string(homePath) + \"/.config/lsfg-vk/conf.toml\";\n return \"/etc/lsfg-vk/conf.toml\";\n}\n", "middle_code": "std::vector Utils::addExtensions(const char* const* extensions, size_t count,\n const std::vector& requiredExtensions) {\n std::vector ext(count);\n std::copy_n(extensions, count, ext.data());\n for (const auto& e : requiredExtensions) {\n auto it = std::ranges::find_if(ext,\n [e](const char* extName) {\n return std::string(extName) == std::string(e);\n });\n if (it == ext.end())\n ext.push_back(e);\n }\n return ext;\n}", "code_description": null, "fill_type": "FUNCTION_TYPE", "language_type": "cpp", "sub_task_type": null}, "context_code": [["/lsfg-vk/framegen/src/common/utils.cpp", "#include \n#include \n\n#include \"common/utils.hpp\"\n#include \"core/buffer.hpp\"\n#include \"core/image.hpp\"\n#include \"core/device.hpp\"\n#include \"core/commandpool.hpp\"\n#include \"core/fence.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG::Utils;\n\nBarrierBuilder& BarrierBuilder::addR2W(Core::Image& image) {\n this->barriers.emplace_back(VkImageMemoryBarrier2 {\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,\n .srcStageMask = VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,\n .srcAccessMask = VK_ACCESS_2_SHADER_READ_BIT,\n .dstStageMask = VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,\n .dstAccessMask = VK_ACCESS_2_SHADER_WRITE_BIT,\n .oldLayout = image.getLayout(),\n .newLayout = VK_IMAGE_LAYOUT_GENERAL,\n .image = image.handle(),\n .subresourceRange = {\n .aspectMask = image.getAspectFlags(),\n .levelCount = 1,\n .layerCount = 1\n }\n });\n image.setLayout(VK_IMAGE_LAYOUT_GENERAL);\n\n return *this;\n}\n\nBarrierBuilder& BarrierBuilder::addW2R(Core::Image& image) {\n this->barriers.emplace_back(VkImageMemoryBarrier2 {\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,\n .srcStageMask = VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,\n .srcAccessMask = VK_ACCESS_2_SHADER_WRITE_BIT,\n .dstStageMask = VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,\n .dstAccessMask = VK_ACCESS_2_SHADER_READ_BIT,\n .oldLayout = image.getLayout(),\n .newLayout = VK_IMAGE_LAYOUT_GENERAL,\n .image = image.handle(),\n .subresourceRange = {\n .aspectMask = image.getAspectFlags(),\n .levelCount = 1,\n .layerCount = 1\n }\n });\n image.setLayout(VK_IMAGE_LAYOUT_GENERAL);\n\n return *this;\n}\n\nvoid BarrierBuilder::build() const {\n const VkDependencyInfo dependencyInfo = {\n .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,\n .imageMemoryBarrierCount = static_cast(this->barriers.size()),\n .pImageMemoryBarriers = this->barriers.data()\n };\n vkCmdPipelineBarrier2(this->commandBuffer->handle(), &dependencyInfo);\n}\n\nvoid Utils::uploadImage(const Core::Device& device, const Core::CommandPool& commandPool,\n Core::Image& image, const std::string& path) {\n // read image bytecode\n std::ifstream file(path.data(), std::ios::binary | std::ios::ate);\n if (!file.is_open())\n throw std::system_error(errno, std::generic_category(), \"Failed to open image: \" + path);\n\n std::streamsize size = file.tellg();\n size -= 124 + 4; // dds header and magic bytes\n std::vector code(static_cast(size));\n\n file.seekg(124 + 4, std::ios::beg);\n if (!file.read(code.data(), size))\n throw std::system_error(errno, std::generic_category(), \"Failed to read image: \" + path);\n\n file.close();\n\n // copy data to buffer\n const Core::Buffer stagingBuffer(\n device, code.data(), static_cast(code.size()),\n VK_BUFFER_USAGE_TRANSFER_SRC_BIT\n );\n\n // perform the upload\n Core::CommandBuffer commandBuffer(device, commandPool);\n commandBuffer.begin();\n\n const VkImageMemoryBarrier barrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,\n .srcAccessMask = VK_ACCESS_NONE,\n .dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,\n .oldLayout = image.getLayout(),\n .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n .image = image.handle(),\n .subresourceRange = {\n .aspectMask = image.getAspectFlags(),\n .levelCount = 1,\n .layerCount = 1\n }\n };\n image.setLayout(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);\n vkCmdPipelineBarrier(\n commandBuffer.handle(),\n VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,\n 0, 0, nullptr, 0, nullptr, 1, &barrier\n );\n\n auto extent = image.getExtent();\n const VkBufferImageCopy region{\n .bufferImageHeight = 0,\n .imageSubresource = {\n .aspectMask = image.getAspectFlags(),\n .layerCount = 1\n },\n .imageExtent = { extent.width, extent.height, 1 }\n };\n vkCmdCopyBufferToImage(\n commandBuffer.handle(),\n stagingBuffer.handle(), image.handle(),\n VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion\n );\n\n commandBuffer.end();\n\n Core::Fence fence(device);\n commandBuffer.submit(device.getComputeQueue(), fence);\n\n // wait for the upload to complete\n if (!fence.wait(device))\n throw LSFG::vulkan_error(VK_TIMEOUT, \"Upload operation timed out\");\n}\n\nvoid Utils::clearImage(const Core::Device& device, Core::Image& image, bool white) {\n Core::Fence fence(device);\n const Core::CommandPool cmdPool(device);\n Core::CommandBuffer cmdBuf(device, cmdPool);\n cmdBuf.begin();\n\n const VkImageMemoryBarrier2 barrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,\n .dstStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT,\n .dstAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,\n .oldLayout = image.getLayout(),\n .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n .image = image.handle(),\n .subresourceRange = {\n .aspectMask = image.getAspectFlags(),\n .levelCount = 1,\n .layerCount = 1\n }\n };\n const VkDependencyInfo dependencyInfo = {\n .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,\n .imageMemoryBarrierCount = 1,\n .pImageMemoryBarriers = &barrier\n };\n image.setLayout(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);\n vkCmdPipelineBarrier2(cmdBuf.handle(), &dependencyInfo);\n\n const float clearValue = white ? 1.0F : 0.0F;\n const VkClearColorValue clearColor = {{ clearValue, clearValue, clearValue, clearValue }};\n const VkImageSubresourceRange subresourceRange = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .levelCount = 1,\n .layerCount = 1\n };\n vkCmdClearColorImage(cmdBuf.handle(),\n image.handle(), image.getLayout(),\n &clearColor,\n 1, &subresourceRange);\n\n cmdBuf.end();\n\n cmdBuf.submit(device.getComputeQueue(), fence);\n if (!fence.wait(device))\n throw LSFG::vulkan_error(VK_TIMEOUT, \"Failed to wait for clearing fence.\");\n}\n"], ["/lsfg-vk/src/context.cpp", "#include \"context.hpp\"\n#include \"config/config.hpp\"\n#include \"common/exception.hpp\"\n#include \"extract/extract.hpp\"\n#include \"extract/trans.hpp\"\n#include \"utils/utils.hpp\"\n#include \"hooks.hpp\"\n#include \"layer.hpp\"\n\n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nLsContext::LsContext(const Hooks::DeviceInfo& info, VkSwapchainKHR swapchain,\n VkExtent2D extent, const std::vector& swapchainImages)\n : swapchain(swapchain), swapchainImages(swapchainImages),\n extent(extent) {\n // get updated configuration\n auto& conf = Config::activeConf;\n if (!conf.config_file.empty()\n && (\n !std::filesystem::exists(conf.config_file)\n || conf.timestamp != std::filesystem::last_write_time(conf.config_file)\n )) {\n std::cerr << \"lsfg-vk: Rereading configuration, as it is no longer valid.\\n\";\n std::this_thread::sleep_for(std::chrono::milliseconds(100));\n\n // reread configuration\n const std::string file = Utils::getConfigFile();\n const auto name = Utils::getProcessName();\n try {\n Config::updateConfig(file);\n conf = Config::getConfig(name);\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: Failed to update configuration, continuing using old:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n }\n\n LSFG_3_1P::finalize();\n LSFG_3_1::finalize();\n\n // print config\n std::cerr << \"lsfg-vk: Reloaded configuration for \" << name.second << \":\\n\";\n if (!conf.dll.empty()) std::cerr << \" Using DLL from: \" << conf.dll << '\\n';\n std::cerr << \" Multiplier: \" << conf.multiplier << '\\n';\n std::cerr << \" Flow Scale: \" << conf.flowScale << '\\n';\n std::cerr << \" Performance Mode: \" << (conf.performance ? \"Enabled\" : \"Disabled\") << '\\n';\n std::cerr << \" HDR Mode: \" << (conf.hdr ? \"Enabled\" : \"Disabled\") << '\\n';\n if (conf.e_present != 2) std::cerr << \" ! Present Mode: \" << conf.e_present << '\\n';\n\n if (conf.multiplier <= 1) return;\n }\n // we could take the format from the swapchain,\n // but honestly this is safer.\n const VkFormat format = conf.hdr\n ? VK_FORMAT_R8G8B8A8_UNORM\n : VK_FORMAT_R16G16B16A16_SFLOAT;\n\n // prepare textures for lsfg\n std::array fds{};\n this->frame_0 = Mini::Image(info.device, info.physicalDevice,\n extent, format, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_ASPECT_COLOR_BIT,\n &fds.at(0));\n this->frame_1 = Mini::Image(info.device, info.physicalDevice,\n extent, format, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_ASPECT_COLOR_BIT,\n &fds.at(1));\n\n std::vector outFds(conf.multiplier - 1);\n for (size_t i = 0; i < (conf.multiplier - 1); ++i)\n this->out_n.emplace_back(info.device, info.physicalDevice,\n extent, format,\n VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_ASPECT_COLOR_BIT,\n &outFds.at(i));\n\n // initialize lsfg\n auto* lsfgInitialize = LSFG_3_1::initialize;\n auto* lsfgCreateContext = LSFG_3_1::createContext;\n auto* lsfgDeleteContext = LSFG_3_1::deleteContext;\n if (conf.performance) {\n lsfgInitialize = LSFG_3_1P::initialize;\n lsfgCreateContext = LSFG_3_1P::createContext;\n lsfgDeleteContext = LSFG_3_1P::deleteContext;\n }\n\n setenv(\"DISABLE_LSFG\", \"1\", 1); // NOLINT\n\n lsfgInitialize(\n Utils::getDeviceUUID(info.physicalDevice),\n conf.hdr, 1.0F / conf.flowScale, conf.multiplier - 1,\n [](const std::string& name) {\n auto dxbc = Extract::getShader(name);\n auto spirv = Extract::translateShader(dxbc);\n return spirv;\n }\n );\n\n this->lsfgCtxId = std::shared_ptr(\n new int32_t(lsfgCreateContext(fds.at(0), fds.at(1), outFds, extent, format)),\n [lsfgDeleteContext = lsfgDeleteContext](const int32_t* id) {\n lsfgDeleteContext(*id);\n }\n );\n\n unsetenv(\"DISABLE_LSFG\"); // NOLINT\n\n // prepare render passes\n this->cmdPool = Mini::CommandPool(info.device, info.queue.first);\n for (size_t i = 0; i < 8; i++) {\n auto& pass = this->passInfos.at(i);\n pass.renderSemaphores.resize(conf.multiplier - 1);\n pass.acquireSemaphores.resize(conf.multiplier - 1);\n pass.postCopyBufs.resize(conf.multiplier - 1);\n pass.postCopySemaphores.resize(conf.multiplier - 1);\n pass.prevPostCopySemaphores.resize(conf.multiplier - 1);\n }\n}\n\nVkResult LsContext::present(const Hooks::DeviceInfo& info, const void* pNext, VkQueue queue,\n const std::vector& gameRenderSemaphores, uint32_t presentIdx) {\n const auto& conf = Config::activeConf;\n auto& pass = this->passInfos.at(this->frameIdx % 8);\n\n // 1. copy swapchain image to frame_0/frame_1\n int preCopySemaphoreFd{};\n pass.preCopySemaphores.at(0) = Mini::Semaphore(info.device, &preCopySemaphoreFd);\n pass.preCopySemaphores.at(1) = Mini::Semaphore(info.device);\n pass.preCopyBuf = Mini::CommandBuffer(info.device, this->cmdPool);\n pass.preCopyBuf.begin();\n\n Utils::copyImage(pass.preCopyBuf.handle(),\n this->swapchainImages.at(presentIdx),\n this->frameIdx % 2 == 0 ? this->frame_0.handle() : this->frame_1.handle(),\n this->extent.width, this->extent.height,\n VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,\n true, false);\n\n pass.preCopyBuf.end();\n\n std::vector gameRenderSemaphores2 = gameRenderSemaphores;\n if (this->frameIdx > 0)\n gameRenderSemaphores2.emplace_back(this->passInfos.at((this->frameIdx - 1) % 8)\n .preCopySemaphores.at(1).handle());\n pass.preCopyBuf.submit(info.queue.second,\n gameRenderSemaphores2,\n { pass.preCopySemaphores.at(0).handle(),\n pass.preCopySemaphores.at(1).handle() });\n\n // 2. render intermediary frames\n std::vector renderSemaphoreFds(conf.multiplier - 1);\n for (size_t i = 0; i < (conf.multiplier - 1); ++i)\n pass.renderSemaphores.at(i) = Mini::Semaphore(info.device, &renderSemaphoreFds.at(i));\n\n if (conf.performance)\n LSFG_3_1P::presentContext(*this->lsfgCtxId,\n preCopySemaphoreFd,\n renderSemaphoreFds);\n else\n LSFG_3_1::presentContext(*this->lsfgCtxId,\n preCopySemaphoreFd,\n renderSemaphoreFds);\n\n for (size_t i = 0; i < (conf.multiplier - 1); i++) {\n // 3. acquire next swapchain image\n pass.acquireSemaphores.at(i) = Mini::Semaphore(info.device);\n uint32_t imageIdx{};\n auto res = Layer::ovkAcquireNextImageKHR(info.device, this->swapchain, UINT64_MAX,\n pass.acquireSemaphores.at(i).handle(), VK_NULL_HANDLE, &imageIdx);\n if (res != VK_SUCCESS && res != VK_SUBOPTIMAL_KHR)\n throw LSFG::vulkan_error(res, \"Failed to acquire next swapchain image\");\n\n // 4. copy output image to swapchain image\n pass.postCopySemaphores.at(i) = Mini::Semaphore(info.device);\n pass.prevPostCopySemaphores.at(i) = Mini::Semaphore(info.device);\n pass.postCopyBufs.at(i) = Mini::CommandBuffer(info.device, this->cmdPool);\n pass.postCopyBufs.at(i).begin();\n\n Utils::copyImage(pass.postCopyBufs.at(i).handle(),\n this->out_n.at(i).handle(),\n this->swapchainImages.at(imageIdx),\n this->extent.width, this->extent.height,\n VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,\n false, true);\n\n pass.postCopyBufs.at(i).end();\n pass.postCopyBufs.at(i).submit(info.queue.second,\n { pass.acquireSemaphores.at(i).handle(),\n pass.renderSemaphores.at(i).handle() },\n { pass.postCopySemaphores.at(i).handle(),\n pass.prevPostCopySemaphores.at(i).handle() });\n\n // 5. present swapchain image\n std::vector waitSemaphores{ pass.postCopySemaphores.at(i).handle() };\n if (i != 0) waitSemaphores.emplace_back(pass.prevPostCopySemaphores.at(i - 1).handle());\n\n const VkPresentInfoKHR presentInfo{\n .sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,\n .pNext = i == 0 ? pNext : nullptr, // only set on first present\n .waitSemaphoreCount = static_cast(waitSemaphores.size()),\n .pWaitSemaphores = waitSemaphores.data(),\n .swapchainCount = 1,\n .pSwapchains = &this->swapchain,\n .pImageIndices = &imageIdx,\n };\n res = Layer::ovkQueuePresentKHR(queue, &presentInfo);\n if (res != VK_SUCCESS && res != VK_SUBOPTIMAL_KHR)\n throw LSFG::vulkan_error(res, \"Failed to present swapchain image\");\n }\n\n // 6. present actual next frame\n VkSemaphore lastPrevPostCopySemaphore =\n pass.prevPostCopySemaphores.at(conf.multiplier - 1 - 1).handle();\n const VkPresentInfoKHR presentInfo{\n .sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,\n .waitSemaphoreCount = 1,\n .pWaitSemaphores = &lastPrevPostCopySemaphore,\n .swapchainCount = 1,\n .pSwapchains = &this->swapchain,\n .pImageIndices = &presentIdx,\n };\n auto res = Layer::ovkQueuePresentKHR(queue, &presentInfo);\n if (res != VK_SUCCESS && res != VK_SUBOPTIMAL_KHR)\n throw LSFG::vulkan_error(res, \"Failed to present swapchain image\");\n\n this->frameIdx++;\n return res;\n}\n"], ["/lsfg-vk/src/hooks.cpp", "#include \"hooks.hpp\"\n#include \"common/exception.hpp\"\n#include \"config/config.hpp\"\n#include \"utils/utils.hpp\"\n#include \"context.hpp\"\n#include \"layer.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace Hooks;\n\nnamespace {\n\n ///\n /// Add extensions to the instance create info.\n ///\n VkResult myvkCreateInstance(\n const VkInstanceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkInstance* pInstance) {\n auto extensions = Utils::addExtensions(\n pCreateInfo->ppEnabledExtensionNames,\n pCreateInfo->enabledExtensionCount,\n {\n \"VK_KHR_get_physical_device_properties2\",\n \"VK_KHR_external_memory_capabilities\",\n \"VK_KHR_external_semaphore_capabilities\"\n }\n );\n VkInstanceCreateInfo createInfo = *pCreateInfo;\n createInfo.enabledExtensionCount = static_cast(extensions.size());\n createInfo.ppEnabledExtensionNames = extensions.data();\n auto res = Layer::ovkCreateInstance(&createInfo, pAllocator, pInstance);\n if (res == VK_ERROR_EXTENSION_NOT_PRESENT)\n throw std::runtime_error(\n \"Required Vulkan instance extensions are not present.\"\n \"Your GPU driver is not supported.\");\n return res;\n }\n\n /// Map of devices to related information.\n std::unordered_map deviceToInfo;\n\n ///\n /// Add extensions to the device create info.\n /// (function pointers are not initialized yet)\n ///\n VkResult myvkCreateDevicePre(\n VkPhysicalDevice physicalDevice,\n const VkDeviceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkDevice* pDevice) {\n // add extensions\n auto extensions = Utils::addExtensions(\n pCreateInfo->ppEnabledExtensionNames,\n pCreateInfo->enabledExtensionCount,\n {\n \"VK_KHR_external_memory\",\n \"VK_KHR_external_memory_fd\",\n \"VK_KHR_external_semaphore\",\n \"VK_KHR_external_semaphore_fd\"\n }\n );\n VkDeviceCreateInfo createInfo = *pCreateInfo;\n createInfo.enabledExtensionCount = static_cast(extensions.size());\n createInfo.ppEnabledExtensionNames = extensions.data();\n auto res = Layer::ovkCreateDevice(physicalDevice, &createInfo, pAllocator, pDevice);\n if (res == VK_ERROR_EXTENSION_NOT_PRESENT)\n throw std::runtime_error(\n \"Required Vulkan device extensions are not present.\"\n \"Your GPU driver is not supported.\");\n return res;\n }\n\n ///\n /// Add related device information after the device is created.\n ///\n VkResult myvkCreateDevicePost(\n VkPhysicalDevice physicalDevice,\n VkDeviceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks*,\n VkDevice* pDevice) {\n deviceToInfo.emplace(*pDevice, DeviceInfo {\n .device = *pDevice,\n .physicalDevice = physicalDevice,\n .queue = Utils::findQueue(*pDevice, physicalDevice, pCreateInfo, VK_QUEUE_GRAPHICS_BIT)\n });\n return VK_SUCCESS;\n }\n\n /// Erase the device information when the device is destroyed.\n void myvkDestroyDevice(VkDevice device, const VkAllocationCallbacks* pAllocator) noexcept {\n deviceToInfo.erase(device);\n Layer::ovkDestroyDevice(device, pAllocator);\n }\n\n std::unordered_map swapchains;\n std::unordered_map swapchainToDeviceTable;\n std::unordered_map swapchainToPresent;\n\n ///\n /// Adjust swapchain creation parameters and create a swapchain context.\n ///\n VkResult myvkCreateSwapchainKHR(\n VkDevice device,\n const VkSwapchainCreateInfoKHR* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkSwapchainKHR* pSwapchain) noexcept {\n // find device\n auto it = deviceToInfo.find(device);\n if (it == deviceToInfo.end()) {\n Utils::logLimitN(\"swapMap\", 5, \"Device not found in map\");\n return Layer::ovkCreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);\n }\n Utils::resetLimitN(\"swapMap\");\n auto& deviceInfo = it->second;\n\n // increase amount of images in swapchain\n VkSwapchainCreateInfoKHR createInfo = *pCreateInfo;\n const auto maxImages = Utils::getMaxImageCount(\n deviceInfo.physicalDevice, pCreateInfo->surface);\n createInfo.minImageCount = createInfo.minImageCount + 1\n + static_cast(deviceInfo.queue.first);\n if (createInfo.minImageCount > maxImages) {\n createInfo.minImageCount = maxImages;\n Utils::logLimitN(\"swapCount\", 10,\n \"Requested image count (\" +\n std::to_string(pCreateInfo->minImageCount) + \") \"\n \"exceeds maximum allowed (\" +\n std::to_string(maxImages) + \"). \"\n \"Continuing with maximum allowed image count. \"\n \"This might lead to performance degradation.\");\n } else {\n Utils::resetLimitN(\"swapCount\");\n }\n\n // allow copy operations on swapchain images\n createInfo.imageUsage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;\n createInfo.imageUsage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;\n\n // enforce present mode\n createInfo.presentMode = Config::activeConf.e_present;\n\n // retire potential old swapchain\n if (pCreateInfo->oldSwapchain) {\n swapchains.erase(pCreateInfo->oldSwapchain);\n swapchainToDeviceTable.erase(pCreateInfo->oldSwapchain);\n }\n\n // create swapchain\n auto res = Layer::ovkCreateSwapchainKHR(device, &createInfo, pAllocator, pSwapchain);\n if (res != VK_SUCCESS)\n return res; // can't be caused by lsfg-vk (yet)\n\n try {\n swapchainToPresent.emplace(*pSwapchain, createInfo.presentMode);\n\n // get all swapchain images\n uint32_t imageCount{};\n res = Layer::ovkGetSwapchainImagesKHR(device, *pSwapchain, &imageCount, nullptr);\n if (res != VK_SUCCESS || imageCount == 0)\n throw LSFG::vulkan_error(res, \"Failed to get swapchain image count\");\n\n std::vector swapchainImages(imageCount);\n res = Layer::ovkGetSwapchainImagesKHR(device, *pSwapchain,\n &imageCount, swapchainImages.data());\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to get swapchain images\");\n\n // create swapchain context\n swapchainToDeviceTable.emplace(*pSwapchain, device);\n swapchains.emplace(*pSwapchain, LsContext(\n deviceInfo, *pSwapchain, pCreateInfo->imageExtent,\n swapchainImages\n ));\n\n std::cerr << \"lsfg-vk: Swapchain context \" <<\n (createInfo.oldSwapchain ? \"recreated\" : \"created\")\n << \" (using \" << imageCount << \" images).\\n\";\n\n Utils::resetLimitN(\"swapCtxCreate\");\n } catch (const std::exception& e) {\n Utils::logLimitN(\"swapCtxCreate\", 5,\n \"An error occurred while creating the swapchain wrapper:\\n\"\n \"- \" + std::string(e.what()));\n return VK_SUCCESS; // swapchain is still valid\n }\n return VK_SUCCESS;\n }\n\n ///\n /// Update presentation parameters and present the next frame(s).\n ///\n VkResult myvkQueuePresentKHR(\n VkQueue queue,\n const VkPresentInfoKHR* pPresentInfo) noexcept {\n // find swapchain device\n auto it = swapchainToDeviceTable.find(*pPresentInfo->pSwapchains);\n if (it == swapchainToDeviceTable.end()) {\n Utils::logLimitN(\"swapMap\", 5,\n \"Swapchain not found in map\");\n return Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n }\n\n // find device info\n auto it2 = deviceToInfo.find(it->second);\n if (it2 == deviceToInfo.end()) {\n Utils::logLimitN(\"swapMap\", 5,\n \"Device not found in map\");\n return Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n }\n auto& deviceInfo = it2->second;\n\n // find swapchain context\n auto it3 = swapchains.find(*pPresentInfo->pSwapchains);\n if (it3 == swapchains.end()) {\n Utils::logLimitN(\"swapMap\", 5,\n \"Swapchain context not found in map\");\n return Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n }\n auto& swapchain = it3->second;\n\n // find present mode\n auto it4 = swapchainToPresent.find(*pPresentInfo->pSwapchains);\n if (it4 == swapchainToPresent.end()) {\n Utils::logLimitN(\"swapMap\", 5,\n \"Swapchain present mode not found in map\");\n return Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n }\n auto& present = it4->second;\n\n // enforce present mode | NOLINTBEGIN\n #pragma clang diagnostic push\n #pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n const VkSwapchainPresentModeInfoEXT* presentModeInfo =\n reinterpret_cast(pPresentInfo->pNext);\n while (presentModeInfo) {\n if (presentModeInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_PRESENT_MODE_INFO_EXT) {\n for (size_t i = 0; i < presentModeInfo->swapchainCount; i++)\n const_cast(presentModeInfo->pPresentModes)[i] =\n present;\n }\n presentModeInfo =\n reinterpret_cast(presentModeInfo->pNext);\n }\n #pragma clang diagnostic pop\n\n // NOLINTEND | present the next frame\n VkResult res{}; // might return VK_SUBOPTIMAL_KHR\n try {\n // ensure config is valid\n auto& conf = Config::activeConf;\n if (!conf.config_file.empty()\n && (\n !std::filesystem::exists(conf.config_file)\n || conf.timestamp != std::filesystem::last_write_time(conf.config_file)\n )) {\n Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n return VK_ERROR_OUT_OF_DATE_KHR;\n }\n\n // ensure present mode is still valid\n if (present != conf.e_present) {\n Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n return VK_ERROR_OUT_OF_DATE_KHR;\n }\n\n // skip if disabled\n if (conf.multiplier <= 1)\n return Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n\n // present the swapchain\n std::vector semaphores(pPresentInfo->waitSemaphoreCount);\n std::copy_n(pPresentInfo->pWaitSemaphores, semaphores.size(), semaphores.data());\n\n res = swapchain.present(deviceInfo, pPresentInfo->pNext,\n queue, semaphores, *pPresentInfo->pImageIndices);\n\n Utils::resetLimitN(\"swapPresent\");\n } catch (const std::exception& e) {\n Utils::logLimitN(\"swapPresent\", 5,\n \"An error occurred while presenting the swapchain:\\n\"\n \"- \" + std::string(e.what()));\n return VK_ERROR_INITIALIZATION_FAILED;\n }\n return res;\n }\n\n /// Erase the swapchain context and mapping when the swapchain is destroyed.\n void myvkDestroySwapchainKHR(\n VkDevice device,\n VkSwapchainKHR swapchain,\n const VkAllocationCallbacks* pAllocator) noexcept {\n swapchains.erase(swapchain);\n swapchainToDeviceTable.erase(swapchain);\n swapchainToPresent.erase(swapchain);\n Layer::ovkDestroySwapchainKHR(device, swapchain, pAllocator);\n }\n}\n\nstd::unordered_map Hooks::hooks = {\n // instance hooks\n {\"vkCreateInstance\", reinterpret_cast(myvkCreateInstance)},\n\n // device hooks\n {\"vkCreateDevicePre\", reinterpret_cast(myvkCreateDevicePre)},\n {\"vkCreateDevicePost\", reinterpret_cast(myvkCreateDevicePost)},\n {\"vkDestroyDevice\", reinterpret_cast(myvkDestroyDevice)},\n\n // swapchain hooks\n {\"vkCreateSwapchainKHR\", reinterpret_cast(myvkCreateSwapchainKHR)},\n {\"vkQueuePresentKHR\", reinterpret_cast(myvkQueuePresentKHR)},\n {\"vkDestroySwapchainKHR\", reinterpret_cast(myvkDestroySwapchainKHR)}\n};\n"], ["/lsfg-vk/src/layer.cpp", "#include \"layer.hpp\"\n#include \"common/exception.hpp\"\n#include \"config/config.hpp\"\n#include \"hooks.hpp\"\n\n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n\nnamespace {\n PFN_vkCreateInstance next_vkCreateInstance{};\n PFN_vkDestroyInstance next_vkDestroyInstance{};\n\n PFN_vkCreateDevice next_vkCreateDevice{};\n PFN_vkDestroyDevice next_vkDestroyDevice{};\n\n PFN_vkSetDeviceLoaderData next_vSetDeviceLoaderData{};\n\n PFN_vkGetInstanceProcAddr next_vkGetInstanceProcAddr{};\n PFN_vkGetDeviceProcAddr next_vkGetDeviceProcAddr{};\n\n PFN_vkGetPhysicalDeviceQueueFamilyProperties next_vkGetPhysicalDeviceQueueFamilyProperties{};\n PFN_vkGetPhysicalDeviceMemoryProperties next_vkGetPhysicalDeviceMemoryProperties{};\n PFN_vkGetPhysicalDeviceProperties next_vkGetPhysicalDeviceProperties{};\n PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR next_vkGetPhysicalDeviceSurfaceCapabilitiesKHR{};\n\n PFN_vkCreateSwapchainKHR next_vkCreateSwapchainKHR{};\n PFN_vkQueuePresentKHR next_vkQueuePresentKHR{};\n PFN_vkDestroySwapchainKHR next_vkDestroySwapchainKHR{};\n PFN_vkGetSwapchainImagesKHR next_vkGetSwapchainImagesKHR{};\n PFN_vkAllocateCommandBuffers next_vkAllocateCommandBuffers{};\n PFN_vkFreeCommandBuffers next_vkFreeCommandBuffers{};\n PFN_vkBeginCommandBuffer next_vkBeginCommandBuffer{};\n PFN_vkEndCommandBuffer next_vkEndCommandBuffer{};\n PFN_vkCreateCommandPool next_vkCreateCommandPool{};\n PFN_vkDestroyCommandPool next_vkDestroyCommandPool{};\n PFN_vkCreateImage next_vkCreateImage{};\n PFN_vkDestroyImage next_vkDestroyImage{};\n PFN_vkGetImageMemoryRequirements next_vkGetImageMemoryRequirements{};\n PFN_vkBindImageMemory next_vkBindImageMemory{};\n PFN_vkAllocateMemory next_vkAllocateMemory{};\n PFN_vkFreeMemory next_vkFreeMemory{};\n PFN_vkCreateSemaphore next_vkCreateSemaphore{};\n PFN_vkDestroySemaphore next_vkDestroySemaphore{};\n PFN_vkGetMemoryFdKHR next_vkGetMemoryFdKHR{};\n PFN_vkGetSemaphoreFdKHR next_vkGetSemaphoreFdKHR{};\n PFN_vkGetDeviceQueue next_vkGetDeviceQueue{};\n PFN_vkQueueSubmit next_vkQueueSubmit{};\n PFN_vkCmdPipelineBarrier next_vkCmdPipelineBarrier{};\n PFN_vkCmdBlitImage next_vkCmdBlitImage{};\n PFN_vkAcquireNextImageKHR next_vkAcquireNextImageKHR{};\n\n template\n bool initInstanceFunc(VkInstance instance, const char* name, T* func) {\n *func = reinterpret_cast(next_vkGetInstanceProcAddr(instance, name));\n if (!*func) {\n std::cerr << \"(no function pointer for \" << name << \")\\n\";\n return false;\n }\n return true;\n }\n\n template\n bool initDeviceFunc(VkDevice device, const char* name, T* func) {\n *func = reinterpret_cast(next_vkGetDeviceProcAddr(device, name));\n if (!*func) {\n std::cerr << \"(no function pointer for \" << name << \")\\n\";\n return false;\n }\n return true;\n }\n}\n\nnamespace {\n VkResult layer_vkCreateInstance(\n const VkInstanceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkInstance* pInstance) {\n try {\n // prepare layer | NOLINTBEGIN\n auto* layerDesc = const_cast(\n reinterpret_cast(pCreateInfo->pNext));\n while (layerDesc && (layerDesc->sType != VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO\n || layerDesc->function != VK_LAYER_LINK_INFO)) {\n layerDesc = const_cast(\n reinterpret_cast(layerDesc->pNext));\n }\n if (!layerDesc)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"No layer creation info found in pNext chain\");\n\n next_vkGetInstanceProcAddr = layerDesc->u.pLayerInfo->pfnNextGetInstanceProcAddr;\n layerDesc->u.pLayerInfo = layerDesc->u.pLayerInfo->pNext;\n\n bool success = initInstanceFunc(nullptr, \"vkCreateInstance\", &next_vkCreateInstance);\n if (!success)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"Failed to get instance function pointer for vkCreateInstance\");\n\n // NOLINTEND | skip initialization if the layer is disabled\n if (!Config::activeConf.enable) {\n auto res = next_vkCreateInstance(pCreateInfo, pAllocator, pInstance);\n initInstanceFunc(*pInstance, \"vkCreateDevice\", &next_vkCreateDevice);\n return res;\n }\n\n // create instance\n try {\n auto* createInstanceHook = reinterpret_cast(\n Hooks::hooks[\"vkCreateInstance\"]);\n auto res = createInstanceHook(pCreateInfo, pAllocator, pInstance);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unknown error\");\n } catch (const std::exception& e) {\n throw LSFG::rethrowable_error(\"Failed to create Vulkan instance\", e);\n }\n\n // get relevant function pointers from the next layer\n success = true;\n success &= initInstanceFunc(*pInstance,\n \"vkDestroyInstance\", &next_vkDestroyInstance);\n success &= initInstanceFunc(*pInstance,\n \"vkCreateDevice\", &next_vkCreateDevice); // workaround mesa bug\n success &= initInstanceFunc(*pInstance,\n \"vkGetPhysicalDeviceQueueFamilyProperties\", &next_vkGetPhysicalDeviceQueueFamilyProperties);\n success &= initInstanceFunc(*pInstance,\n \"vkGetPhysicalDeviceMemoryProperties\", &next_vkGetPhysicalDeviceMemoryProperties);\n success &= initInstanceFunc(*pInstance,\n \"vkGetPhysicalDeviceProperties\", &next_vkGetPhysicalDeviceProperties);\n success &= initInstanceFunc(*pInstance,\n \"vkGetPhysicalDeviceSurfaceCapabilitiesKHR\", &next_vkGetPhysicalDeviceSurfaceCapabilitiesKHR);\n if (!success)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"Failed to get instance function pointers\");\n\n std::cerr << \"lsfg-vk: Vulkan instance layer initialized successfully.\\n\";\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred while initializing the Vulkan instance layer:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n return VK_ERROR_INITIALIZATION_FAILED;\n }\n return VK_SUCCESS;\n }\n\n VkResult layer_vkCreateDevice( // NOLINTBEGIN\n VkPhysicalDevice physicalDevice,\n const VkDeviceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkDevice* pDevice) {\n try {\n // prepare layer | NOLINTBEGIN\n auto* layerDesc = const_cast(\n reinterpret_cast(pCreateInfo->pNext));\n while (layerDesc && (layerDesc->sType != VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO\n || layerDesc->function != VK_LAYER_LINK_INFO)) {\n layerDesc = const_cast(\n reinterpret_cast(layerDesc->pNext));\n }\n if (!layerDesc)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"No layer creation info found in pNext chain\");\n\n next_vkGetDeviceProcAddr = layerDesc->u.pLayerInfo->pfnNextGetDeviceProcAddr;\n layerDesc->u.pLayerInfo = layerDesc->u.pLayerInfo->pNext;\n\n auto* layerDesc2 = const_cast(\n reinterpret_cast(pCreateInfo->pNext));\n while (layerDesc2 && (layerDesc2->sType != VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO\n || layerDesc2->function != VK_LOADER_DATA_CALLBACK)) {\n layerDesc2 = const_cast(\n reinterpret_cast(layerDesc2->pNext));\n }\n if (!layerDesc2)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"No layer device loader data found in pNext chain\");\n\n next_vSetDeviceLoaderData = layerDesc2->u.pfnSetDeviceLoaderData;\n\n // NOLINTEND | skip initialization if the layer is disabled\n if (!Config::activeConf.enable)\n return next_vkCreateDevice(physicalDevice, pCreateInfo, pAllocator, pDevice);\n\n // create device\n try {\n auto* createDeviceHook = reinterpret_cast(\n Hooks::hooks[\"vkCreateDevicePre\"]);\n auto res = createDeviceHook(physicalDevice, pCreateInfo, pAllocator, pDevice);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unknown error\");\n } catch (const std::exception& e) {\n throw LSFG::rethrowable_error(\"Failed to create Vulkan device\", e);\n }\n\n // get relevant function pointers from the next layer\n bool success = true;\n success &= initDeviceFunc(*pDevice, \"vkDestroyDevice\", &next_vkDestroyDevice);\n success &= initDeviceFunc(*pDevice, \"vkCreateSwapchainKHR\", &next_vkCreateSwapchainKHR);\n success &= initDeviceFunc(*pDevice, \"vkQueuePresentKHR\", &next_vkQueuePresentKHR);\n success &= initDeviceFunc(*pDevice, \"vkDestroySwapchainKHR\", &next_vkDestroySwapchainKHR);\n success &= initDeviceFunc(*pDevice, \"vkGetSwapchainImagesKHR\", &next_vkGetSwapchainImagesKHR);\n success &= initDeviceFunc(*pDevice, \"vkAllocateCommandBuffers\", &next_vkAllocateCommandBuffers);\n success &= initDeviceFunc(*pDevice, \"vkFreeCommandBuffers\", &next_vkFreeCommandBuffers);\n success &= initDeviceFunc(*pDevice, \"vkBeginCommandBuffer\", &next_vkBeginCommandBuffer);\n success &= initDeviceFunc(*pDevice, \"vkEndCommandBuffer\", &next_vkEndCommandBuffer);\n success &= initDeviceFunc(*pDevice, \"vkCreateCommandPool\", &next_vkCreateCommandPool);\n success &= initDeviceFunc(*pDevice, \"vkDestroyCommandPool\", &next_vkDestroyCommandPool);\n success &= initDeviceFunc(*pDevice, \"vkCreateImage\", &next_vkCreateImage);\n success &= initDeviceFunc(*pDevice, \"vkDestroyImage\", &next_vkDestroyImage);\n success &= initDeviceFunc(*pDevice, \"vkGetImageMemoryRequirements\", &next_vkGetImageMemoryRequirements);\n success &= initDeviceFunc(*pDevice, \"vkBindImageMemory\", &next_vkBindImageMemory);\n success &= initDeviceFunc(*pDevice, \"vkGetMemoryFdKHR\", &next_vkGetMemoryFdKHR);\n success &= initDeviceFunc(*pDevice, \"vkAllocateMemory\", &next_vkAllocateMemory);\n success &= initDeviceFunc(*pDevice, \"vkFreeMemory\", &next_vkFreeMemory);\n success &= initDeviceFunc(*pDevice, \"vkCreateSemaphore\", &next_vkCreateSemaphore);\n success &= initDeviceFunc(*pDevice, \"vkDestroySemaphore\", &next_vkDestroySemaphore);\n success &= initDeviceFunc(*pDevice, \"vkGetSemaphoreFdKHR\", &next_vkGetSemaphoreFdKHR);\n success &= initDeviceFunc(*pDevice, \"vkGetDeviceQueue\", &next_vkGetDeviceQueue);\n success &= initDeviceFunc(*pDevice, \"vkQueueSubmit\", &next_vkQueueSubmit);\n success &= initDeviceFunc(*pDevice, \"vkCmdPipelineBarrier\", &next_vkCmdPipelineBarrier);\n success &= initDeviceFunc(*pDevice, \"vkCmdBlitImage\", &next_vkCmdBlitImage);\n success &= initDeviceFunc(*pDevice, \"vkAcquireNextImageKHR\", &next_vkAcquireNextImageKHR);\n if (!success)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"Failed to get device function pointers\");\n\n auto postCreateDeviceHook = reinterpret_cast(\n Hooks::hooks[\"vkCreateDevicePost\"]);\n auto res = postCreateDeviceHook(physicalDevice, pCreateInfo, pAllocator, pDevice);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unknown error\");\n\n std::cerr << \"lsfg-vk: Vulkan device layer initialized successfully.\\n\";\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred while initializing the Vulkan device layer:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n return VK_ERROR_INITIALIZATION_FAILED;\n }\n return VK_SUCCESS;\n } // NOLINTEND\n}\n\nconst std::unordered_map layerFunctions = {\n { \"vkCreateInstance\",\n reinterpret_cast(&layer_vkCreateInstance) },\n { \"vkCreateDevice\",\n reinterpret_cast(&layer_vkCreateDevice) },\n { \"vkGetInstanceProcAddr\",\n reinterpret_cast(&layer_vkGetInstanceProcAddr) },\n { \"vkGetDeviceProcAddr\",\n reinterpret_cast(&layer_vkGetDeviceProcAddr) },\n};\n\nPFN_vkVoidFunction layer_vkGetInstanceProcAddr(VkInstance instance, const char* pName) {\n const std::string name(pName);\n auto it = layerFunctions.find(name);\n if (it != layerFunctions.end())\n return it->second;\n\n it = Hooks::hooks.find(name);\n if (it != Hooks::hooks.end() && Config::activeConf.enable)\n return it->second;\n\n return next_vkGetInstanceProcAddr(instance, pName);\n}\n\nPFN_vkVoidFunction layer_vkGetDeviceProcAddr(VkDevice device, const char* pName) {\n const std::string name(pName);\n auto it = layerFunctions.find(name);\n if (it != layerFunctions.end())\n return it->second;\n\n it = Hooks::hooks.find(name);\n if (it != Hooks::hooks.end() && Config::activeConf.enable)\n return it->second;\n\n return next_vkGetDeviceProcAddr(device, pName);\n}\n\n// original functions\nnamespace Layer {\n VkResult ovkCreateInstance(\n const VkInstanceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkInstance* pInstance) {\n return next_vkCreateInstance(pCreateInfo, pAllocator, pInstance);\n }\n void ovkDestroyInstance(\n VkInstance instance,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroyInstance(instance, pAllocator);\n }\n\n VkResult ovkCreateDevice(\n VkPhysicalDevice physicalDevice,\n const VkDeviceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkDevice* pDevice) {\n return next_vkCreateDevice(physicalDevice, pCreateInfo, pAllocator, pDevice);\n }\n void ovkDestroyDevice(\n VkDevice device,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroyDevice(device, pAllocator);\n }\n\n VkResult ovkSetDeviceLoaderData(VkDevice device, void* object) {\n return next_vSetDeviceLoaderData(device, object);\n }\n\n PFN_vkVoidFunction ovkGetInstanceProcAddr(\n VkInstance instance,\n const char* pName) {\n return next_vkGetInstanceProcAddr(instance, pName);\n }\n PFN_vkVoidFunction ovkGetDeviceProcAddr(\n VkDevice device,\n const char* pName) {\n return next_vkGetDeviceProcAddr(device, pName);\n }\n\n void ovkGetPhysicalDeviceQueueFamilyProperties(\n VkPhysicalDevice physicalDevice,\n uint32_t* pQueueFamilyPropertyCount,\n VkQueueFamilyProperties* pQueueFamilyProperties) {\n next_vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties);\n }\n void ovkGetPhysicalDeviceMemoryProperties(\n VkPhysicalDevice physicalDevice,\n VkPhysicalDeviceMemoryProperties* pMemoryProperties) {\n next_vkGetPhysicalDeviceMemoryProperties(physicalDevice, pMemoryProperties);\n }\n void ovkGetPhysicalDeviceProperties(\n VkPhysicalDevice physicalDevice,\n VkPhysicalDeviceProperties* pProperties) {\n next_vkGetPhysicalDeviceProperties(physicalDevice, pProperties);\n }\n VkResult ovkGetPhysicalDeviceSurfaceCapabilitiesKHR(\n VkPhysicalDevice physicalDevice,\n VkSurfaceKHR surface,\n VkSurfaceCapabilitiesKHR* pSurfaceCapabilities) {\n return next_vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface, pSurfaceCapabilities);\n }\n\n VkResult ovkCreateSwapchainKHR(\n VkDevice device,\n const VkSwapchainCreateInfoKHR* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkSwapchainKHR* pSwapchain) {\n return next_vkCreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);\n }\n VkResult ovkQueuePresentKHR(\n VkQueue queue,\n const VkPresentInfoKHR* pPresentInfo) {\n return next_vkQueuePresentKHR(queue, pPresentInfo);\n }\n void ovkDestroySwapchainKHR(\n VkDevice device,\n VkSwapchainKHR swapchain,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroySwapchainKHR(device, swapchain, pAllocator);\n }\n\n VkResult ovkGetSwapchainImagesKHR(\n VkDevice device,\n VkSwapchainKHR swapchain,\n uint32_t* pSwapchainImageCount,\n VkImage* pSwapchainImages) {\n return next_vkGetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);\n }\n\n VkResult ovkAllocateCommandBuffers(\n VkDevice device,\n const VkCommandBufferAllocateInfo* pAllocateInfo,\n VkCommandBuffer* pCommandBuffers) {\n return next_vkAllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers);\n }\n void ovkFreeCommandBuffers(\n VkDevice device,\n VkCommandPool commandPool,\n uint32_t commandBufferCount,\n const VkCommandBuffer* pCommandBuffers) {\n next_vkFreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);\n }\n\n VkResult ovkBeginCommandBuffer(\n VkCommandBuffer commandBuffer,\n const VkCommandBufferBeginInfo* pBeginInfo) {\n return next_vkBeginCommandBuffer(commandBuffer, pBeginInfo);\n }\n VkResult ovkEndCommandBuffer(\n VkCommandBuffer commandBuffer) {\n return next_vkEndCommandBuffer(commandBuffer);\n }\n\n VkResult ovkCreateCommandPool(\n VkDevice device,\n const VkCommandPoolCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkCommandPool* pCommandPool) {\n return next_vkCreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);\n }\n void ovkDestroyCommandPool(\n VkDevice device,\n VkCommandPool commandPool,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroyCommandPool(device, commandPool, pAllocator);\n }\n\n VkResult ovkCreateImage(\n VkDevice device,\n const VkImageCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkImage* pImage) {\n return next_vkCreateImage(device, pCreateInfo, pAllocator, pImage);\n }\n void ovkDestroyImage(\n VkDevice device,\n VkImage image,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroyImage(device, image, pAllocator);\n }\n\n void ovkGetImageMemoryRequirements(\n VkDevice device,\n VkImage image,\n VkMemoryRequirements* pMemoryRequirements) {\n next_vkGetImageMemoryRequirements(device, image, pMemoryRequirements);\n }\n VkResult ovkBindImageMemory(\n VkDevice device,\n VkImage image,\n VkDeviceMemory memory,\n VkDeviceSize memoryOffset) {\n return next_vkBindImageMemory(device, image, memory, memoryOffset);\n }\n\n VkResult ovkAllocateMemory(\n VkDevice device,\n const VkMemoryAllocateInfo* pAllocateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkDeviceMemory* pMemory) {\n return next_vkAllocateMemory(device, pAllocateInfo, pAllocator, pMemory);\n }\n void ovkFreeMemory(\n VkDevice device,\n VkDeviceMemory memory,\n const VkAllocationCallbacks* pAllocator) {\n next_vkFreeMemory(device, memory, pAllocator);\n }\n\n VkResult ovkCreateSemaphore(\n VkDevice device,\n const VkSemaphoreCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkSemaphore* pSemaphore) {\n return next_vkCreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);\n }\n void ovkDestroySemaphore(\n VkDevice device,\n VkSemaphore semaphore,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroySemaphore(device, semaphore, pAllocator);\n }\n\n VkResult ovkGetMemoryFdKHR(\n VkDevice device,\n const VkMemoryGetFdInfoKHR* pGetFdInfo,\n int* pFd) {\n return next_vkGetMemoryFdKHR(device, pGetFdInfo, pFd);\n }\n VkResult ovkGetSemaphoreFdKHR(\n VkDevice device,\n const VkSemaphoreGetFdInfoKHR* pGetFdInfo,\n int* pFd) {\n return next_vkGetSemaphoreFdKHR(device, pGetFdInfo, pFd);\n }\n\n void ovkGetDeviceQueue(\n VkDevice device,\n uint32_t queueFamilyIndex,\n uint32_t queueIndex,\n VkQueue* pQueue) {\n next_vkGetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);\n }\n VkResult ovkQueueSubmit(\n VkQueue queue,\n uint32_t submitCount,\n const VkSubmitInfo* pSubmits,\n VkFence fence) {\n return next_vkQueueSubmit(queue, submitCount, pSubmits, fence);\n }\n\n void ovkCmdPipelineBarrier(\n VkCommandBuffer commandBuffer,\n VkPipelineStageFlags srcStageMask,\n VkPipelineStageFlags dstStageMask,\n VkDependencyFlags dependencyFlags,\n uint32_t memoryBarrierCount,\n const VkMemoryBarrier* pMemoryBarriers,\n uint32_t bufferMemoryBarrierCount,\n const VkBufferMemoryBarrier* pBufferMemoryBarriers,\n uint32_t imageMemoryBarrierCount,\n const VkImageMemoryBarrier* pImageMemoryBarriers) {\n next_vkCmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,\n memoryBarrierCount, pMemoryBarriers,\n bufferMemoryBarrierCount, pBufferMemoryBarriers,\n imageMemoryBarrierCount, pImageMemoryBarriers);\n }\n void ovkCmdBlitImage(\n VkCommandBuffer commandBuffer,\n VkImage srcImage,\n VkImageLayout srcImageLayout,\n VkImage dstImage,\n VkImageLayout dstImageLayout,\n uint32_t regionCount,\n const VkImageBlit* pRegions,\n VkFilter filter) {\n next_vkCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter);\n }\n\n VkResult ovkAcquireNextImageKHR(\n VkDevice device,\n VkSwapchainKHR swapchain,\n uint64_t timeout,\n VkSemaphore semaphore,\n VkFence fence,\n uint32_t* pImageIndex) {\n return next_vkAcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);\n }\n}\n"], ["/lsfg-vk/framegen/src/core/image.cpp", "#include \n#include \n\n#include \"core/image.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nImage::Image(const Core::Device& device, VkExtent2D extent, VkFormat format,\n VkImageUsageFlags usage, VkImageAspectFlags aspectFlags)\n : extent(extent), format(format), aspectFlags(aspectFlags) {\n // create image\n const VkImageCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,\n .imageType = VK_IMAGE_TYPE_2D,\n .format = format,\n .extent = {\n .width = extent.width,\n .height = extent.height,\n .depth = 1\n },\n .mipLevels = 1,\n .arrayLayers = 1,\n .samples = VK_SAMPLE_COUNT_1_BIT,\n .usage = usage,\n .sharingMode = VK_SHARING_MODE_EXCLUSIVE\n };\n VkImage imageHandle{};\n auto res = vkCreateImage(device.handle(), &desc, nullptr, &imageHandle);\n if (res != VK_SUCCESS || imageHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create Vulkan image\");\n\n // find memory type\n VkPhysicalDeviceMemoryProperties memProps;\n vkGetPhysicalDeviceMemoryProperties(device.getPhysicalDevice(), &memProps);\n\n VkMemoryRequirements memReqs;\n vkGetImageMemoryRequirements(device.handle(), imageHandle, &memReqs);\n\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n std::optional memType{};\n for (uint32_t i = 0; i < memProps.memoryTypeCount; ++i) {\n if ((memReqs.memoryTypeBits & (1 << i)) && // NOLINTBEGIN\n (memProps.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) {\n memType.emplace(i);\n break;\n } // NOLINTEND\n }\n if (!memType.has_value())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Unable to find memory type for image\");\n#pragma clang diagnostic pop\n\n // allocate and bind memory\n const VkMemoryAllocateInfo allocInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,\n .allocationSize = memReqs.size,\n .memoryTypeIndex = memType.value()\n };\n VkDeviceMemory memoryHandle{};\n res = vkAllocateMemory(device.handle(), &allocInfo, nullptr, &memoryHandle);\n if (res != VK_SUCCESS || memoryHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to allocate memory for Vulkan image\");\n\n res = vkBindImageMemory(device.handle(), imageHandle, memoryHandle, 0);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to bind memory to Vulkan image\");\n\n // create image view\n const VkImageViewCreateInfo viewDesc{\n .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,\n .image = imageHandle,\n .viewType = VK_IMAGE_VIEW_TYPE_2D,\n .format = format,\n .components = {\n .r = VK_COMPONENT_SWIZZLE_IDENTITY,\n .g = VK_COMPONENT_SWIZZLE_IDENTITY,\n .b = VK_COMPONENT_SWIZZLE_IDENTITY,\n .a = VK_COMPONENT_SWIZZLE_IDENTITY\n },\n .subresourceRange = {\n .aspectMask = aspectFlags,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n\n VkImageView viewHandle{};\n res = vkCreateImageView(device.handle(), &viewDesc, nullptr, &viewHandle);\n if (res != VK_SUCCESS || viewHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create image view\");\n\n // store objects in shared ptr\n this->layout = std::make_shared(VK_IMAGE_LAYOUT_UNDEFINED);\n this->image = std::shared_ptr(\n new VkImage(imageHandle),\n [dev = device.handle()](VkImage* img) {\n vkDestroyImage(dev, *img, nullptr);\n }\n );\n this->memory = std::shared_ptr(\n new VkDeviceMemory(memoryHandle),\n [dev = device.handle()](VkDeviceMemory* mem) {\n vkFreeMemory(dev, *mem, nullptr);\n }\n );\n this->view = std::shared_ptr(\n new VkImageView(viewHandle),\n [dev = device.handle()](VkImageView* imgView) {\n vkDestroyImageView(dev, *imgView, nullptr);\n }\n );\n}\n\n// shared memory constructor\n\nImage::Image(const Core::Device& device, VkExtent2D extent, VkFormat format,\n VkImageUsageFlags usage, VkImageAspectFlags aspectFlags, int fd)\n : extent(extent), format(format), aspectFlags(aspectFlags) {\n // create image\n const VkExternalMemoryImageCreateInfo externalInfo{\n .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,\n .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR\n };\n const VkImageCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,\n .pNext = &externalInfo,\n .imageType = VK_IMAGE_TYPE_2D,\n .format = format,\n .extent = {\n .width = extent.width,\n .height = extent.height,\n .depth = 1\n },\n .mipLevels = 1,\n .arrayLayers = 1,\n .samples = VK_SAMPLE_COUNT_1_BIT,\n .usage = usage,\n .sharingMode = VK_SHARING_MODE_EXCLUSIVE\n };\n VkImage imageHandle{};\n auto res = vkCreateImage(device.handle(), &desc, nullptr, &imageHandle);\n if (res != VK_SUCCESS || imageHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create Vulkan image\");\n\n // find memory type\n VkPhysicalDeviceMemoryProperties memProps;\n vkGetPhysicalDeviceMemoryProperties(device.getPhysicalDevice(), &memProps);\n\n VkMemoryRequirements memReqs;\n vkGetImageMemoryRequirements(device.handle(), imageHandle, &memReqs);\n\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n std::optional memType{};\n for (uint32_t i = 0; i < memProps.memoryTypeCount; ++i) {\n if ((memReqs.memoryTypeBits & (1 << i)) && // NOLINTBEGIN\n (memProps.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) {\n memType.emplace(i);\n break;\n } // NOLINTEND\n }\n if (!memType.has_value())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Unable to find memory type for image\");\n#pragma clang diagnostic pop\n\n // ~~allocate~~ and bind memory\n const VkMemoryDedicatedAllocateInfoKHR dedicatedInfo2{\n .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,\n .image = imageHandle,\n };\n const VkImportMemoryFdInfoKHR importInfo{\n .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR,\n .pNext = &dedicatedInfo2,\n .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR,\n .fd = fd // closes the fd\n };\n const VkMemoryAllocateInfo allocInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,\n .pNext = fd == -1 ? nullptr : &importInfo,\n .allocationSize = memReqs.size,\n .memoryTypeIndex = memType.value()\n };\n VkDeviceMemory memoryHandle{};\n res = vkAllocateMemory(device.handle(), &allocInfo, nullptr, &memoryHandle);\n if (res != VK_SUCCESS || memoryHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to allocate memory for Vulkan image\");\n\n res = vkBindImageMemory(device.handle(), imageHandle, memoryHandle, 0);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to bind memory to Vulkan image\");\n\n // create image view\n const VkImageViewCreateInfo viewDesc{\n .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,\n .image = imageHandle,\n .viewType = VK_IMAGE_VIEW_TYPE_2D,\n .format = format,\n .components = {\n .r = VK_COMPONENT_SWIZZLE_IDENTITY,\n .g = VK_COMPONENT_SWIZZLE_IDENTITY,\n .b = VK_COMPONENT_SWIZZLE_IDENTITY,\n .a = VK_COMPONENT_SWIZZLE_IDENTITY\n },\n .subresourceRange = {\n .aspectMask = aspectFlags,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n\n VkImageView viewHandle{};\n res = vkCreateImageView(device.handle(), &viewDesc, nullptr, &viewHandle);\n if (res != VK_SUCCESS || viewHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create image view\");\n\n // store objects in shared ptr\n this->layout = std::make_shared(VK_IMAGE_LAYOUT_UNDEFINED);\n this->image = std::shared_ptr(\n new VkImage(imageHandle),\n [dev = device.handle()](VkImage* img) {\n vkDestroyImage(dev, *img, nullptr);\n }\n );\n this->memory = std::shared_ptr(\n new VkDeviceMemory(memoryHandle),\n [dev = device.handle()](VkDeviceMemory* mem) {\n vkFreeMemory(dev, *mem, nullptr);\n }\n );\n this->view = std::shared_ptr(\n new VkImageView(viewHandle),\n [dev = device.handle()](VkImageView* imgView) {\n vkDestroyImageView(dev, *imgView, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/src/core/device.cpp", "#include \n#include \n\n#include \"core/device.hpp\"\n#include \"core/instance.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nconst std::vector requiredExtensions = {\n \"VK_KHR_external_memory_fd\",\n \"VK_KHR_external_semaphore_fd\",\n \"VK_EXT_robustness2\",\n};\n\nDevice::Device(const Instance& instance, uint64_t deviceUUID) {\n // get all physical devices\n uint32_t deviceCount{};\n auto res = vkEnumeratePhysicalDevices(instance.handle(), &deviceCount, nullptr);\n if (res != VK_SUCCESS || deviceCount == 0)\n throw LSFG::vulkan_error(res, \"Failed to enumerate physical devices\");\n\n std::vector devices(deviceCount);\n res = vkEnumeratePhysicalDevices(instance.handle(), &deviceCount, devices.data());\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to get physical devices\");\n\n // get device by uuid\n std::optional physicalDevice;\n for (const auto& device : devices) {\n VkPhysicalDeviceProperties properties;\n vkGetPhysicalDeviceProperties(device, &properties);\n\n const uint64_t uuid =\n static_cast(properties.vendorID) << 32 | properties.deviceID;\n if (deviceUUID == uuid || deviceUUID == 0x1463ABAC) {\n physicalDevice = device;\n break;\n }\n }\n if (!physicalDevice)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"Could not find physical device with UUID\");\n\n // find queue family indices\n uint32_t familyCount{};\n vkGetPhysicalDeviceQueueFamilyProperties(*physicalDevice, &familyCount, nullptr);\n\n std::vector queueFamilies(familyCount);\n vkGetPhysicalDeviceQueueFamilyProperties(*physicalDevice, &familyCount, queueFamilies.data());\n\n std::optional computeFamilyIdx;\n for (uint32_t i = 0; i < familyCount; ++i) {\n if (queueFamilies[i].queueFlags & VK_QUEUE_COMPUTE_BIT)\n computeFamilyIdx = i;\n }\n if (!computeFamilyIdx)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"No compute queue family found\");\n\n // create logical device\n const float queuePriority{1.0F}; // highest priority\n VkPhysicalDeviceRobustness2FeaturesEXT robustness2{\n .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT,\n .nullDescriptor = VK_TRUE,\n };\n VkPhysicalDeviceVulkan13Features features13{\n .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES,\n .pNext = &robustness2,\n .synchronization2 = VK_TRUE\n };\n const VkPhysicalDeviceVulkan12Features features12{\n .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES,\n .pNext = &features13,\n .timelineSemaphore = VK_TRUE,\n .vulkanMemoryModel = VK_TRUE\n };\n const VkDeviceQueueCreateInfo computeQueueDesc{\n .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,\n .queueFamilyIndex = *computeFamilyIdx,\n .queueCount = 1,\n .pQueuePriorities = &queuePriority\n };\n const VkDeviceCreateInfo deviceCreateInfo{\n .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,\n .pNext = &features12,\n .queueCreateInfoCount = 1,\n .pQueueCreateInfos = &computeQueueDesc,\n .enabledExtensionCount = static_cast(requiredExtensions.size()),\n .ppEnabledExtensionNames = requiredExtensions.data()\n };\n VkDevice deviceHandle{};\n res = vkCreateDevice(*physicalDevice, &deviceCreateInfo, nullptr, &deviceHandle);\n if (res != VK_SUCCESS | deviceHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create logical device\");\n\n volkLoadDevice(deviceHandle);\n\n // get compute queue\n VkQueue queueHandle{};\n vkGetDeviceQueue(deviceHandle, *computeFamilyIdx, 0, &queueHandle);\n\n // store in shared ptr\n this->computeQueue = queueHandle;\n this->computeFamilyIdx = *computeFamilyIdx;\n this->physicalDevice = *physicalDevice;\n this->device = std::shared_ptr(\n new VkDevice(deviceHandle),\n [](VkDevice* device) {\n vkDestroyDevice(*device, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/src/mini/image.cpp", "#include \"mini/image.hpp\"\n#include \"common/exception.hpp\"\n#include \"layer.hpp\"\n\n#include \n\n#include \n#include \n#include \n\nusing namespace Mini;\n\nImage::Image(VkDevice device, VkPhysicalDevice physicalDevice,\n VkExtent2D extent, VkFormat format,\n VkImageUsageFlags usage, VkImageAspectFlags aspectFlags, int* fd)\n : extent(extent), format(format), aspectFlags(aspectFlags) {\n // create image\n const VkExternalMemoryImageCreateInfo externalInfo{\n .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,\n .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR\n };\n const VkImageCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,\n .pNext = &externalInfo,\n .imageType = VK_IMAGE_TYPE_2D,\n .format = format,\n .extent = {\n .width = extent.width,\n .height = extent.height,\n .depth = 1\n },\n .mipLevels = 1,\n .arrayLayers = 1,\n .samples = VK_SAMPLE_COUNT_1_BIT,\n .usage = usage,\n .sharingMode = VK_SHARING_MODE_EXCLUSIVE\n };\n VkImage imageHandle{};\n auto res = Layer::ovkCreateImage(device, &desc, nullptr, &imageHandle);\n if (res != VK_SUCCESS || imageHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create Vulkan image\");\n\n // find memory type\n VkPhysicalDeviceMemoryProperties memProps;\n Layer::ovkGetPhysicalDeviceMemoryProperties(physicalDevice, &memProps);\n\n VkMemoryRequirements memReqs;\n Layer::ovkGetImageMemoryRequirements(device, imageHandle, &memReqs);\n\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n std::optional memType{};\n for (uint32_t i = 0; i < memProps.memoryTypeCount; ++i) {\n if ((memReqs.memoryTypeBits & (1 << i)) && // NOLINTBEGIN\n (memProps.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) {\n memType.emplace(i);\n break;\n } // NOLINTEND\n }\n if (!memType.has_value())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Unable to find memory type for image\");\n#pragma clang diagnostic pop\n\n // allocate and bind memory\n const VkMemoryDedicatedAllocateInfoKHR dedicatedInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,\n .image = imageHandle,\n };\n const VkExportMemoryAllocateInfo exportInfo{\n .sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO,\n .pNext = &dedicatedInfo,\n .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR\n };\n const VkMemoryAllocateInfo allocInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,\n .pNext = &exportInfo,\n .allocationSize = memReqs.size,\n .memoryTypeIndex = memType.value()\n };\n VkDeviceMemory memoryHandle{};\n res = Layer::ovkAllocateMemory(device, &allocInfo, nullptr, &memoryHandle);\n if (res != VK_SUCCESS || memoryHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to allocate memory for Vulkan image\");\n\n res = Layer::ovkBindImageMemory(device, imageHandle, memoryHandle, 0);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to bind memory to Vulkan image\");\n\n // obtain the sharing fd\n const VkMemoryGetFdInfoKHR fdInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR,\n .memory = memoryHandle,\n .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR,\n };\n res = Layer::ovkGetMemoryFdKHR(device, &fdInfo, fd);\n if (res != VK_SUCCESS || *fd < 0)\n throw LSFG::vulkan_error(res, \"Failed to obtain sharing fd for Vulkan image\");\n\n // store objects in shared ptr\n this->image = std::shared_ptr(\n new VkImage(imageHandle),\n [dev = device](VkImage* img) {\n Layer::ovkDestroyImage(dev, *img, nullptr);\n }\n );\n this->memory = std::shared_ptr(\n new VkDeviceMemory(memoryHandle),\n [dev = device](VkDeviceMemory* mem) {\n Layer::ovkFreeMemory(dev, *mem, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/src/config/config.cpp", "#include \"config/config.hpp\"\n#include \"common/exception.hpp\"\n\n#include \"config/default_conf.hpp\"\n\n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace Config;\n\nnamespace {\n Configuration globalConf{};\n std::optional> gameConfs;\n}\n\nConfiguration Config::activeConf{};\n\nnamespace {\n /// Turn a string into a VkPresentModeKHR enum value.\n VkPresentModeKHR into_present(const std::string& mode) {\n if (mode == \"fifo\" || mode == \"vsync\")\n return VkPresentModeKHR::VK_PRESENT_MODE_FIFO_KHR;\n if (mode == \"mailbox\")\n return VkPresentModeKHR::VK_PRESENT_MODE_MAILBOX_KHR;\n if (mode == \"immediate\")\n return VkPresentModeKHR::VK_PRESENT_MODE_IMMEDIATE_KHR;\n return VkPresentModeKHR::VK_PRESENT_MODE_FIFO_KHR;\n }\n}\n\nvoid Config::updateConfig(const std::string& file) {\n if (!std::filesystem::exists(file)) {\n std::cerr << \"lsfg-vk: Placing default configuration file at \" << file << '\\n';\n const auto parent = std::filesystem::path(file).parent_path();\n if (!std::filesystem::exists(parent))\n if (!std::filesystem::create_directories(parent))\n throw std::runtime_error(\"Unable to create configuration directory at \" + parent.string());\n\n std::ofstream out(file);\n if (!out.is_open())\n throw std::runtime_error(\"Unable to create configuration file at \" + file);\n out << DEFAULT_CONFIG;\n out.close();\n }\n\n // parse config file\n std::optional parsed;\n try {\n parsed.emplace(toml::parse(file));\n if (!parsed->contains(\"version\"))\n throw std::runtime_error(\"Configuration file is missing 'version' field\");\n if (parsed->at(\"version\").as_integer() != 1)\n throw std::runtime_error(\"Configuration file version is not supported, expected 1\");\n } catch (const std::exception& e) {\n throw LSFG::rethrowable_error(\"Unable to parse configuration file\", e);\n }\n auto& toml = *parsed;\n\n // parse global configuration\n const toml::value globalTable = toml::find_or_default(toml, \"global\");\n const Configuration global{\n .dll = toml::find_or(globalTable, \"dll\", std::string()),\n .config_file = file,\n .timestamp = std::filesystem::last_write_time(file)\n };\n\n // validate global configuration\n if (global.multiplier < 2)\n throw std::runtime_error(\"Global Multiplier cannot be less than 2\");\n if (global.flowScale < 0.25F || global.flowScale > 1.0F)\n throw std::runtime_error(\"Flow scale must be between 0.25 and 1.0\");\n\n // parse game-specific configuration\n std::unordered_map games;\n const toml::value gamesList = toml::find_or_default(toml, \"game\");\n for (const auto& gameTable : gamesList.as_array()) {\n if (!gameTable.is_table())\n throw std::runtime_error(\"Invalid game configuration entry\");\n if (!gameTable.contains(\"exe\"))\n throw std::runtime_error(\"Game override missing 'exe' field\");\n\n const std::string exe = toml::find(gameTable, \"exe\");\n Configuration game{\n .enable = true,\n .dll = global.dll,\n .multiplier = toml::find_or(gameTable, \"multiplier\", 2U),\n .flowScale = toml::find_or(gameTable, \"flow_scale\", 1.0F),\n .performance = toml::find_or(gameTable, \"performance_mode\", false),\n .hdr = toml::find_or(gameTable, \"hdr_mode\", false),\n .e_present = into_present(toml::find_or(gameTable, \"experimental_present_mode\", \"\")),\n .config_file = file,\n .timestamp = global.timestamp\n };\n\n // validate the configuration\n if (game.multiplier < 1)\n throw std::runtime_error(\"Multiplier cannot be less than 1\");\n if (game.flowScale < 0.25F || game.flowScale > 1.0F)\n throw std::runtime_error(\"Flow scale must be between 0.25 and 1.0\");\n games[exe] = std::move(game);\n }\n\n // store configurations\n globalConf = global;\n gameConfs = std::move(games);\n}\n\nConfiguration Config::getConfig(const std::pair& name) {\n // process legacy environment variables\n if (std::getenv(\"LSFG_LEGACY\")) {\n Configuration conf{\n .enable = true,\n .multiplier = 2,\n .flowScale = 1.0F,\n .e_present = VkPresentModeKHR::VK_PRESENT_MODE_FIFO_KHR\n };\n\n const char* dll = std::getenv(\"LSFG_DLL_PATH\");\n if (dll) conf.dll = std::string(dll);\n const char* multiplier = std::getenv(\"LSFG_MULTIPLIER\");\n if (multiplier) conf.multiplier = std::stoul(multiplier);\n const char* flow_scale = std::getenv(\"LSFG_FLOW_SCALE\");\n if (flow_scale) conf.flowScale = std::stof(flow_scale);\n const char* performance = std::getenv(\"LSFG_PERFORMANCE_MODE\");\n if (performance) conf.performance = std::string(performance) == \"1\";\n const char* hdr = std::getenv(\"LSFG_HDR_MODE\");\n if (hdr) conf.hdr = std::string(hdr) == \"1\";\n const char* e_present = std::getenv(\"LSFG_EXPERIMENTAL_PRESENT_MODE\");\n if (e_present) conf.e_present = into_present(std::string(e_present));\n\n return conf;\n }\n\n // process new configuration system\n if (!gameConfs.has_value())\n return globalConf;\n\n const auto& games = *gameConfs;\n auto it = std::ranges::find_if(games, [&name](const auto& pair) {\n return name.first.ends_with(pair.first) || (name.second == pair.first);\n });\n if (it != games.end())\n return it->second;\n\n return globalConf;\n}\n"], ["/lsfg-vk/src/mini/commandbuffer.cpp", "#include \"mini/commandbuffer.hpp\"\n#include \"mini/commandpool.hpp\"\n#include \"common/exception.hpp\"\n#include \"layer.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n\nusing namespace Mini;\n\nCommandBuffer::CommandBuffer(VkDevice device, const CommandPool& pool) {\n // create command buffer\n const VkCommandBufferAllocateInfo desc{\n .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,\n .commandPool = pool.handle(),\n .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,\n .commandBufferCount = 1\n };\n VkCommandBuffer commandBufferHandle{};\n auto res = Layer::ovkAllocateCommandBuffers(device, &desc, &commandBufferHandle);\n if (res != VK_SUCCESS || commandBufferHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to allocate command buffer\");\n res = Layer::ovkSetDeviceLoaderData(device, commandBufferHandle);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to set device loader data for command buffer\");\n\n // store command buffer in shared ptr\n this->state = std::make_shared(CommandBufferState::Empty);\n this->commandBuffer = std::shared_ptr(\n new VkCommandBuffer(commandBufferHandle),\n [dev = device, pool = pool.handle()](VkCommandBuffer* cmdBuffer) {\n Layer::ovkFreeCommandBuffers(dev, pool, 1, cmdBuffer);\n }\n );\n}\n\nvoid CommandBuffer::begin() {\n if (*this->state != CommandBufferState::Empty)\n throw std::logic_error(\"Command buffer is not in Empty state\");\n\n const VkCommandBufferBeginInfo beginInfo = {\n .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,\n .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT\n };\n auto res = Layer::ovkBeginCommandBuffer(*this->commandBuffer, &beginInfo);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to begin command buffer\");\n\n *this->state = CommandBufferState::Recording;\n}\n\nvoid CommandBuffer::end() {\n if (*this->state != CommandBufferState::Recording)\n throw std::logic_error(\"Command buffer is not in Recording state\");\n\n auto res = Layer::ovkEndCommandBuffer(*this->commandBuffer);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to end command buffer\");\n\n *this->state = CommandBufferState::Full;\n}\n\nvoid CommandBuffer::submit(VkQueue queue,\n const std::vector& waitSemaphores,\n const std::vector& signalSemaphores) {\n if (*this->state != CommandBufferState::Full)\n throw std::logic_error(\"Command buffer is not in Full state\");\n\n const std::vector waitStages(waitSemaphores.size(),\n VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);\n\n const VkSubmitInfo submitInfo{\n .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,\n .waitSemaphoreCount = static_cast(waitSemaphores.size()),\n .pWaitSemaphores = waitSemaphores.data(),\n .pWaitDstStageMask = waitStages.data(),\n .commandBufferCount = 1,\n .pCommandBuffers = &(*this->commandBuffer),\n .signalSemaphoreCount = static_cast(signalSemaphores.size()),\n .pSignalSemaphores = signalSemaphores.data()\n };\n auto res = Layer::ovkQueueSubmit(queue, 1, &submitInfo, VK_NULL_HANDLE);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to submit command buffer\");\n\n *this->state = CommandBufferState::Submitted;\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_compiler.h", "class DxbcCompilerHsPhase {\n public:\n DxbcCompiler(\n const std::string& fileName,\n const DxbcModuleInfo& moduleInfo,\n const DxbcProgramInfo& programInfo,\n const Rc& isgn,\n const Rc& osgn,\n const Rc& psgn,\n const DxbcAnalysisInfo& analysis) {\n // Declare an entry point ID. We'll need it during the\n // initialization phase where the execution mode is set.\n m_entryPointId = m_module.allocateId();\n \n // Set the shader name so that we recognize it in renderdoc\n m_module.setDebugSource(\n spv::SourceLanguageUnknown, 0,\n m_module.addDebugString(fileName.c_str()),\n nullptr);\n\n // Set the memory model. This is the same for all shaders.\n m_module.enableCapability(\n spv::CapabilityVulkanMemoryModel);\n\n m_module.setMemoryModel(\n spv::AddressingModelLogical,\n spv::MemoryModelVulkan);\n \n // Make sure our interface registers are clear\n for (uint32_t i = 0; i < DxbcMaxInterfaceRegs; i++) {\n m_vRegs.at(i) = DxbcRegisterPointer { };\n m_oRegs.at(i) = DxbcRegisterPointer { };\n }\n \n this->emitInit();\n }\n ~DxbcCompiler() {\n \n }\n void processInstruction(\n const DxbcShaderInstruction& ins) {\n m_lastOp = m_currOp;\n m_currOp = ins.op;\n\n switch (ins.opClass) {\n case DxbcInstClass::Declaration:\n return this->emitDcl(ins);\n \n case DxbcInstClass::CustomData:\n return this->emitCustomData(ins);\n \n case DxbcInstClass::Atomic:\n return this->emitAtomic(ins);\n \n case DxbcInstClass::AtomicCounter:\n return this->emitAtomicCounter(ins);\n \n case DxbcInstClass::Barrier:\n return this->emitBarrier(ins);\n \n case DxbcInstClass::BitExtract:\n return this->emitBitExtract(ins);\n \n case DxbcInstClass::BitInsert:\n return this->emitBitInsert(ins);\n \n case DxbcInstClass::BitScan:\n return this->emitBitScan(ins);\n \n case DxbcInstClass::BufferQuery:\n return this->emitBufferQuery(ins);\n \n case DxbcInstClass::BufferLoad:\n return this->emitBufferLoad(ins);\n \n case DxbcInstClass::BufferStore:\n return this->emitBufferStore(ins);\n \n case DxbcInstClass::ConvertFloat16:\n return this->emitConvertFloat16(ins);\n \n case DxbcInstClass::ConvertFloat64:\n return this->emitConvertFloat64(ins);\n \n case DxbcInstClass::ControlFlow:\n return this->emitControlFlow(ins);\n \n case DxbcInstClass::GeometryEmit:\n return this->emitGeometryEmit(ins);\n \n case DxbcInstClass::HullShaderPhase:\n return this->emitHullShaderPhase(ins);\n \n case DxbcInstClass::HullShaderInstCnt:\n return this->emitHullShaderInstCnt(ins);\n \n case DxbcInstClass::Interpolate:\n return this->emitInterpolate(ins);\n \n case DxbcInstClass::NoOperation:\n return;\n\n case DxbcInstClass::SparseCheckAccess:\n return this->emitSparseCheckAccess(ins);\n\n case DxbcInstClass::TextureQuery:\n return this->emitTextureQuery(ins);\n \n case DxbcInstClass::TextureQueryLod:\n return this->emitTextureQueryLod(ins);\n \n case DxbcInstClass::TextureQueryMs:\n return this->emitTextureQueryMs(ins);\n \n case DxbcInstClass::TextureQueryMsPos:\n return this->emitTextureQueryMsPos(ins);\n \n case DxbcInstClass::TextureFetch:\n return this->emitTextureFetch(ins);\n \n case DxbcInstClass::TextureGather:\n return this->emitTextureGather(ins);\n \n case DxbcInstClass::TextureSample:\n return this->emitTextureSample(ins);\n \n case DxbcInstClass::TypedUavLoad:\n return this->emitTypedUavLoad(ins);\n \n case DxbcInstClass::TypedUavStore:\n return this->emitTypedUavStore(ins);\n \n case DxbcInstClass::VectorAlu:\n return this->emitVectorAlu(ins);\n \n case DxbcInstClass::VectorCmov:\n return this->emitVectorCmov(ins);\n \n case DxbcInstClass::VectorCmp:\n return this->emitVectorCmp(ins);\n \n case DxbcInstClass::VectorDeriv:\n return this->emitVectorDeriv(ins);\n \n case DxbcInstClass::VectorDot:\n return this->emitVectorDot(ins);\n \n case DxbcInstClass::VectorIdiv:\n return this->emitVectorIdiv(ins);\n \n case DxbcInstClass::VectorImul:\n return this->emitVectorImul(ins);\n \n case DxbcInstClass::VectorMsad:\n return this->emitVectorMsad(ins);\n \n case DxbcInstClass::VectorShift:\n return this->emitVectorShift(ins);\n \n case DxbcInstClass::VectorSinCos:\n return this->emitVectorSinCos(ins);\n \n default:\n Logger::warn(\n str::format(\"DxbcCompiler: Unhandled opcode class: \",\n ins.op));\n }\n }\n void processXfbPassthrough() {\n m_module.setExecutionMode (m_entryPointId, spv::ExecutionModeInputPoints);\n m_module.setExecutionMode (m_entryPointId, spv::ExecutionModeOutputPoints);\n m_module.setOutputVertices(m_entryPointId, 1);\n\n for (auto e = m_isgn->begin(); e != m_isgn->end(); e++) {\n emitDclInput(e->registerId, 1,\n e->componentMask, DxbcSystemValue::None,\n DxbcInterpolationMode::Undefined);\n }\n\n // Figure out which streams to enable\n uint32_t streamMask = 0;\n\n for (size_t i = 0; i < m_xfbVars.size(); i++)\n streamMask |= 1u << m_xfbVars[i].streamId;\n \n for (uint32_t streamId : bit::BitMask(streamMask)) {\n emitXfbOutputSetup(streamId, true);\n m_module.opEmitVertex(m_module.constu32(streamId));\n }\n\n // End the main function\n emitFunctionEnd();\n\n // For pass-through we always assume points\n m_inputTopology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST;\n }\n SpirvCodeBuffer finalize() {\n // Depending on the shader type, this will prepare\n // input registers, call various shader functions\n // and write back the output registers.\n switch (m_programInfo.type()) {\n case DxbcProgramType::VertexShader: this->emitVsFinalize(); break;\n case DxbcProgramType::HullShader: this->emitHsFinalize(); break;\n case DxbcProgramType::DomainShader: this->emitDsFinalize(); break;\n case DxbcProgramType::GeometryShader: this->emitGsFinalize(); break;\n case DxbcProgramType::PixelShader: this->emitPsFinalize(); break;\n case DxbcProgramType::ComputeShader: this->emitCsFinalize(); break;\n default: throw DxvkError(\"Invalid shader stage\");\n }\n\n // Emit float control mode if the extension is supported\n this->emitFloatControl();\n \n // Declare the entry point, we now have all the\n // information we need, including the interfaces\n m_module.addEntryPoint(m_entryPointId,\n m_programInfo.executionModel(), \"main\");\n m_module.setDebugName(m_entryPointId, \"main\");\n\n return m_module.compile();\n }\n private:\n DxbcModuleInfo m_moduleInfo;\n DxbcProgramInfo m_programInfo;\n SpirvModule m_module;\n Rc m_isgn;\n Rc m_osgn;\n Rc m_psgn;\n const DxbcAnalysisInfo* m_analysis;\n std::vector m_bindings;\n std::vector m_rRegs;\n std::vector m_xRegs;\n std::vector m_gRegs;\n std::array<\n DxbcRegisterPointer,\n DxbcMaxInterfaceRegs> m_vRegs;\n std::vector m_vMappings;\n std::array<\n DxbcRegisterPointer,\n DxbcMaxInterfaceRegs> m_oRegs;\n std::vector m_oMappings;\n std::vector m_xfbVars;\n std::vector m_indexRanges = { };\n std::array m_constantBuffers;\n std::array m_samplers;\n std::array m_textures;\n std::array m_uavs;\n bool m_hasGloballyCoherentUav = false;\n bool m_hasRasterizerOrderedUav = false;\n std::vector m_controlFlowBlocks;\n bool m_topLevelIsUniform = true;\n uint64_t m_uavRdMask = 0u;\n uint64_t m_uavWrMask = 0u;\n bool m_insideFunction = false;\n uint32_t m_vArrayLength = 0;\n uint32_t m_vArrayLengthId = 0;\n uint32_t m_vArray = 0;\n uint32_t m_positionIn = 0;\n uint32_t m_positionOut = 0;\n uint32_t m_clipDistances = 0;\n uint32_t m_cullDistances = 0;\n uint32_t m_primitiveIdIn = 0;\n uint32_t m_primitiveIdOut = 0;\n uint32_t m_icbArray = 0;\n std::vector m_icbData;\n uint32_t m_icbComponents = 0u;\n uint32_t m_icbSize = 0u;\n uint32_t m_samplePositions = 0;\n uint32_t m_uavCtrStructType = 0;\n uint32_t m_uavCtrPointerType = 0;\n std::unordered_map m_subroutines;\n uint32_t m_entryPointId = 0;\n bool m_hasRawAccessChains = false;\n uint32_t m_inputMask = 0u;\n uint32_t m_outputMask = 0u;\n DxbcCompilerVsPart m_vs;\n DxbcCompilerHsPart m_hs;\n DxbcCompilerDsPart m_ds;\n DxbcCompilerGsPart m_gs;\n DxbcCompilerPsPart m_ps;\n DxbcCompilerCsPart m_cs;\n bool m_precise = true;\n DxbcOpcode m_lastOp = DxbcOpcode::Nop;\n DxbcOpcode m_currOp = DxbcOpcode::Nop;\n VkPrimitiveTopology m_inputTopology = VK_PRIMITIVE_TOPOLOGY_MAX_ENUM;\n VkPrimitiveTopology m_outputTopology = VK_PRIMITIVE_TOPOLOGY_MAX_ENUM;\n void emitDcl(\n const DxbcShaderInstruction& ins) {\n switch (ins.op) {\n case DxbcOpcode::DclGlobalFlags:\n return this->emitDclGlobalFlags(ins);\n \n case DxbcOpcode::DclIndexRange:\n return this->emitDclIndexRange(ins);\n \n case DxbcOpcode::DclTemps:\n return this->emitDclTemps(ins);\n \n case DxbcOpcode::DclIndexableTemp:\n return this->emitDclIndexableTemp(ins);\n \n case DxbcOpcode::DclInput:\n case DxbcOpcode::DclInputSgv:\n case DxbcOpcode::DclInputSiv:\n case DxbcOpcode::DclInputPs:\n case DxbcOpcode::DclInputPsSgv:\n case DxbcOpcode::DclInputPsSiv:\n case DxbcOpcode::DclOutput:\n case DxbcOpcode::DclOutputSgv:\n case DxbcOpcode::DclOutputSiv:\n return this->emitDclInterfaceReg(ins);\n \n case DxbcOpcode::DclConstantBuffer:\n return this->emitDclConstantBuffer(ins);\n \n case DxbcOpcode::DclSampler:\n return this->emitDclSampler(ins);\n \n case DxbcOpcode::DclStream:\n return this->emitDclStream(ins);\n \n case DxbcOpcode::DclUavTyped:\n case DxbcOpcode::DclResource:\n return this->emitDclResourceTyped(ins);\n \n case DxbcOpcode::DclUavRaw:\n case DxbcOpcode::DclResourceRaw:\n case DxbcOpcode::DclUavStructured:\n case DxbcOpcode::DclResourceStructured:\n return this->emitDclResourceRawStructured(ins);\n \n case DxbcOpcode::DclThreadGroupSharedMemoryRaw:\n case DxbcOpcode::DclThreadGroupSharedMemoryStructured:\n return this->emitDclThreadGroupSharedMemory(ins);\n \n case DxbcOpcode::DclGsInputPrimitive:\n return this->emitDclGsInputPrimitive(ins);\n \n case DxbcOpcode::DclGsOutputPrimitiveTopology:\n return this->emitDclGsOutputTopology(ins);\n \n case DxbcOpcode::DclMaxOutputVertexCount:\n return this->emitDclMaxOutputVertexCount(ins);\n \n case DxbcOpcode::DclInputControlPointCount:\n return this->emitDclInputControlPointCount(ins);\n \n case DxbcOpcode::DclOutputControlPointCount:\n return this->emitDclOutputControlPointCount(ins);\n \n case DxbcOpcode::DclHsMaxTessFactor:\n return this->emitDclHsMaxTessFactor(ins);\n \n case DxbcOpcode::DclTessDomain:\n return this->emitDclTessDomain(ins);\n \n case DxbcOpcode::DclTessPartitioning:\n return this->emitDclTessPartitioning(ins);\n \n case DxbcOpcode::DclTessOutputPrimitive:\n return this->emitDclTessOutputPrimitive(ins);\n \n case DxbcOpcode::DclThreadGroup:\n return this->emitDclThreadGroup(ins);\n \n case DxbcOpcode::DclGsInstanceCount:\n return this->emitDclGsInstanceCount(ins);\n \n default:\n Logger::warn(\n str::format(\"DxbcCompiler: Unhandled opcode: \",\n ins.op));\n }\n }\n void emitDclGlobalFlags(\n const DxbcShaderInstruction& ins) {\n const DxbcGlobalFlags flags = ins.controls.globalFlags();\n \n if (flags.test(DxbcGlobalFlag::RefactoringAllowed))\n m_precise = false;\n\n if (flags.test(DxbcGlobalFlag::EarlyFragmentTests))\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeEarlyFragmentTests);\n }\n void emitDclIndexRange(\n const DxbcShaderInstruction& ins) {\n // dcl_index_range has one operand:\n // (0) Range start, either an input or output register\n // (1) Range end\n uint32_t index = ins.dst[0].idxDim - 1u;\n\n DxbcIndexRange range = { };\n range.type = ins.dst[0].type;\n range.start = ins.dst[0].idx[index].offset;\n range.length = ins.imm[0].u32;\n\n m_indexRanges.push_back(range);\n }\n void emitDclTemps(\n const DxbcShaderInstruction& ins) {\n // dcl_temps has one operand:\n // (imm0) Number of temp registers\n\n // Ignore this and declare temps on demand.\n }\n void emitDclIndexableTemp(\n const DxbcShaderInstruction& ins) {\n // dcl_indexable_temps has three operands:\n // (imm0) Array register index (x#)\n // (imm1) Number of vectors stored in the array\n // (imm2) Component count of each individual vector. This is\n // always 4 in fxc-generated binaries and therefore useless.\n const uint32_t regId = ins.imm[0].u32;\n\n DxbcRegisterInfo info;\n info.type.ctype = DxbcScalarType::Float32;\n info.type.ccount = m_analysis->xRegMasks.at(regId).minComponents();\n info.type.alength = ins.imm[1].u32;\n info.sclass = spv::StorageClassPrivate;\n\n if (regId >= m_xRegs.size())\n m_xRegs.resize(regId + 1);\n \n m_xRegs.at(regId).ccount = info.type.ccount;\n m_xRegs.at(regId).alength = info.type.alength;\n m_xRegs.at(regId).varId = emitNewVariable(info);\n \n m_module.setDebugName(m_xRegs.at(regId).varId,\n str::format(\"x\", regId).c_str());\n }\n void emitDclInterfaceReg(\n const DxbcShaderInstruction& ins) {\n switch (ins.dst[0].type) {\n case DxbcOperandType::InputControlPoint:\n if (m_programInfo.type() != DxbcProgramType::HullShader)\n break;\n [[fallthrough]];\n\n case DxbcOperandType::Input:\n case DxbcOperandType::Output: {\n // dcl_input and dcl_output instructions\n // have the following operands:\n // (dst0) The register to declare\n // (imm0) The system value (optional)\n uint32_t regDim = 0;\n uint32_t regIdx = 0;\n \n // In the vertex and fragment shader stage, the\n // operand indices will have the following format:\n // (0) Register index\n // \n // In other stages, the input and output registers\n // may be declared as arrays of a fixed size:\n // (0) Array length\n // (1) Register index\n if (ins.dst[0].idxDim == 2) {\n regDim = ins.dst[0].idx[0].offset;\n regIdx = ins.dst[0].idx[1].offset;\n } else if (ins.dst[0].idxDim == 1) {\n regIdx = ins.dst[0].idx[0].offset;\n } else {\n Logger::err(str::format(\n \"DxbcCompiler: \", ins.op,\n \": Invalid index dimension\"));\n return;\n }\n \n // This declaration may map an output register to a system\n // value. If that is the case, the system value type will\n // be stored in the second operand.\n const bool hasSv =\n ins.op == DxbcOpcode::DclInputSgv\n || ins.op == DxbcOpcode::DclInputSiv\n || ins.op == DxbcOpcode::DclInputPsSgv\n || ins.op == DxbcOpcode::DclInputPsSiv\n || ins.op == DxbcOpcode::DclOutputSgv\n || ins.op == DxbcOpcode::DclOutputSiv;\n \n DxbcSystemValue sv = DxbcSystemValue::None;\n \n if (hasSv)\n sv = static_cast(ins.imm[0].u32);\n \n // In the pixel shader, inputs are declared with an\n // interpolation mode that is part of the op token.\n const bool hasInterpolationMode =\n ins.op == DxbcOpcode::DclInputPs\n || ins.op == DxbcOpcode::DclInputPsSiv;\n \n DxbcInterpolationMode im = DxbcInterpolationMode::Undefined;\n \n if (hasInterpolationMode)\n im = ins.controls.interpolation();\n \n // Declare the actual input/output variable\n switch (ins.op) {\n case DxbcOpcode::DclInput:\n case DxbcOpcode::DclInputSgv:\n case DxbcOpcode::DclInputSiv:\n case DxbcOpcode::DclInputPs:\n case DxbcOpcode::DclInputPsSgv:\n case DxbcOpcode::DclInputPsSiv:\n this->emitDclInput(regIdx, regDim, ins.dst[0].mask, sv, im);\n break;\n \n case DxbcOpcode::DclOutput:\n case DxbcOpcode::DclOutputSgv:\n case DxbcOpcode::DclOutputSiv:\n this->emitDclOutput(regIdx, regDim, ins.dst[0].mask, sv, im);\n break;\n \n default:\n Logger::err(str::format(\n \"DxbcCompiler: Unexpected opcode: \",\n ins.op));\n }\n } break;\n \n case DxbcOperandType::InputThreadId: {\n m_cs.builtinGlobalInvocationId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 3, 0 },\n spv::StorageClassInput },\n spv::BuiltInGlobalInvocationId,\n \"vThreadId\");\n } break;\n \n case DxbcOperandType::InputThreadGroupId: {\n m_cs.builtinWorkgroupId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 3, 0 },\n spv::StorageClassInput },\n spv::BuiltInWorkgroupId,\n \"vThreadGroupId\");\n } break;\n \n case DxbcOperandType::InputThreadIdInGroup: {\n m_cs.builtinLocalInvocationId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 3, 0 },\n spv::StorageClassInput },\n spv::BuiltInLocalInvocationId,\n \"vThreadIdInGroup\");\n } break;\n \n case DxbcOperandType::InputThreadIndexInGroup: {\n m_cs.builtinLocalInvocationIndex = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInLocalInvocationIndex,\n \"vThreadIndexInGroup\");\n } break;\n \n case DxbcOperandType::InputCoverageMask: {\n m_ps.builtinSampleMaskIn = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 1 },\n spv::StorageClassInput },\n spv::BuiltInSampleMask,\n \"vCoverage\");\n } break;\n \n case DxbcOperandType::OutputCoverageMask: {\n m_ps.builtinSampleMaskOut = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 1 },\n spv::StorageClassOutput },\n spv::BuiltInSampleMask,\n \"oMask\");\n } break;\n \n case DxbcOperandType::OutputDepth: {\n m_module.setExecutionMode(m_entryPointId,\n spv::ExecutionModeDepthReplacing);\n m_ps.builtinDepth = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInFragDepth,\n \"oDepth\");\n } break;\n \n case DxbcOperandType::OutputStencilRef: {\n m_module.enableExtension(\"SPV_EXT_shader_stencil_export\");\n m_module.enableCapability(spv::CapabilityStencilExportEXT);\n m_module.setExecutionMode(m_entryPointId,\n spv::ExecutionModeStencilRefReplacingEXT);\n m_ps.builtinStencilRef = emitNewBuiltinVariable({\n { DxbcScalarType::Sint32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInFragStencilRefEXT,\n \"oStencilRef\");\n } break;\n\n case DxbcOperandType::OutputDepthGe: {\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDepthReplacing);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDepthGreater);\n m_ps.builtinDepth = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInFragDepth,\n \"oDepthGe\");\n } break;\n \n case DxbcOperandType::OutputDepthLe: {\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDepthReplacing);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDepthLess);\n m_ps.builtinDepth = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInFragDepth,\n \"oDepthLe\");\n } break;\n \n case DxbcOperandType::InputPrimitiveId: {\n m_primitiveIdIn = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInPrimitiveId,\n \"vPrim\");\n } break;\n \n case DxbcOperandType::InputDomainPoint: {\n m_ds.builtinTessCoord = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 3, 0 },\n spv::StorageClassInput },\n spv::BuiltInTessCoord,\n \"vDomain\");\n } break;\n \n case DxbcOperandType::InputForkInstanceId:\n case DxbcOperandType::InputJoinInstanceId: {\n auto phase = this->getCurrentHsForkJoinPhase();\n \n phase->instanceIdPtr = m_module.newVar(\n m_module.defPointerType(\n m_module.defIntType(32, 0),\n spv::StorageClassFunction),\n spv::StorageClassFunction);\n \n m_module.opStore(phase->instanceIdPtr, phase->instanceId);\n m_module.setDebugName(phase->instanceIdPtr,\n ins.dst[0].type == DxbcOperandType::InputForkInstanceId\n ? \"vForkInstanceId\" : \"vJoinInstanceId\");\n } break;\n \n case DxbcOperandType::OutputControlPointId: {\n // This system value map to the invocation\n // ID, which has been declared already.\n } break;\n \n case DxbcOperandType::InputPatchConstant:\n case DxbcOperandType::OutputControlPoint: {\n // These have been declared as global input and\n // output arrays, so there's nothing left to do.\n } break;\n \n case DxbcOperandType::InputGsInstanceId: {\n m_gs.builtinInvocationId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInInvocationId,\n \"vInstanceID\");\n } break;\n \n case DxbcOperandType::InputInnerCoverage: {\n m_module.enableExtension(\"SPV_EXT_fragment_fully_covered\");\n m_module.enableCapability(spv::CapabilityFragmentFullyCoveredEXT);\n\n // This is bool in SPIR-V but uint32 in DXBC. A bool value of\n // false must be 0, and bit 1 must be set to represent true.\n uint32_t builtinId = emitNewBuiltinVariable({\n { DxbcScalarType::Bool, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInFullyCoveredEXT,\n nullptr);\n\n m_ps.builtinInnerCoverageId = emitNewVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassPrivate });\n\n m_module.setDebugName(m_ps.builtinInnerCoverageId, \"vInnerCoverage\");\n\n uint32_t boolTypeId = m_module.defBoolType();\n uint32_t uintTypeId = m_module.defIntType(32, 0);\n\n m_module.opStore(m_ps.builtinInnerCoverageId,\n m_module.opSelect(uintTypeId,\n m_module.opLoad(boolTypeId, builtinId),\n m_module.constu32(1),\n m_module.constu32(0)));\n } break;\n\n default:\n Logger::err(str::format(\n \"DxbcCompiler: Unsupported operand type declaration: \",\n ins.dst[0].type));\n \n }\n }\n void emitDclInput(\n uint32_t regIdx,\n uint32_t regDim,\n DxbcRegMask regMask,\n DxbcSystemValue sv,\n DxbcInterpolationMode im) {\n // Avoid declaring the same variable multiple times.\n // This may happen when multiple system values are\n // mapped to different parts of the same register.\n if (m_vRegs.at(regIdx).id == 0 && sv == DxbcSystemValue::None) {\n const DxbcVectorType regType = getInputRegType(regIdx);\n \n DxbcRegisterInfo info;\n info.type.ctype = regType.ctype;\n info.type.ccount = regType.ccount;\n info.type.alength = regDim;\n info.sclass = spv::StorageClassInput;\n \n const uint32_t varId = emitNewVariable(info);\n \n m_module.decorateLocation(varId, regIdx);\n m_module.setDebugName(varId, str::format(\"v\", regIdx).c_str());\n \n m_vRegs.at(regIdx) = { regType, varId };\n \n // Interpolation mode, used in pixel shaders\n if (im == DxbcInterpolationMode::Constant)\n m_module.decorate(varId, spv::DecorationFlat);\n \n if (im == DxbcInterpolationMode::LinearCentroid\n || im == DxbcInterpolationMode::LinearNoPerspectiveCentroid)\n m_module.decorate(varId, spv::DecorationCentroid);\n \n if (im == DxbcInterpolationMode::LinearNoPerspective\n || im == DxbcInterpolationMode::LinearNoPerspectiveCentroid\n || im == DxbcInterpolationMode::LinearNoPerspectiveSample)\n m_module.decorate(varId, spv::DecorationNoPerspective);\n \n if (im == DxbcInterpolationMode::LinearSample\n || im == DxbcInterpolationMode::LinearNoPerspectiveSample) {\n m_module.enableCapability(spv::CapabilitySampleRateShading);\n m_module.decorate(varId, spv::DecorationSample);\n }\n\n if (m_moduleInfo.options.forceSampleRateShading) {\n if (im == DxbcInterpolationMode::Linear\n || im == DxbcInterpolationMode::LinearNoPerspective) {\n m_module.enableCapability(spv::CapabilitySampleRateShading);\n m_module.decorate(varId, spv::DecorationSample);\n }\n }\n\n // Declare the input slot as defined\n m_inputMask |= 1u << regIdx;\n m_vArrayLength = std::max(m_vArrayLength, regIdx + 1);\n } else if (sv != DxbcSystemValue::None) {\n // Add a new system value mapping if needed\n bool skipSv = sv == DxbcSystemValue::ClipDistance\n || sv == DxbcSystemValue::CullDistance;\n \n if (!skipSv)\n m_vMappings.push_back({ regIdx, regMask, sv });\n }\n }\n void emitDclOutput(\n uint32_t regIdx,\n uint32_t regDim,\n DxbcRegMask regMask,\n DxbcSystemValue sv,\n DxbcInterpolationMode im) {\n // Add a new system value mapping if needed. Clip\n // and cull distances are handled separately.\n if (sv != DxbcSystemValue::None\n && sv != DxbcSystemValue::ClipDistance\n && sv != DxbcSystemValue::CullDistance)\n m_oMappings.push_back({ regIdx, regMask, sv });\n \n if (m_programInfo.type() == DxbcProgramType::HullShader) {\n // Hull shaders don't use standard outputs\n if (getCurrentHsForkJoinPhase() != nullptr)\n m_hs.outputPerPatchMask |= 1 << regIdx;\n } else if (m_oRegs.at(regIdx).id == 0) {\n // Avoid declaring the same variable multiple times.\n // This may happen when multiple system values are\n // mapped to different parts of the same register.\n const DxbcVectorType regType = getOutputRegType(regIdx);\n \n DxbcRegisterInfo info;\n info.type.ctype = regType.ctype;\n info.type.ccount = regType.ccount;\n info.type.alength = regDim;\n info.sclass = spv::StorageClassOutput;\n\n // In xfb mode, we set up the actual\n // output vars when emitting a vertex\n if (m_moduleInfo.xfb != nullptr)\n info.sclass = spv::StorageClassPrivate;\n \n // In geometry shaders, don't duplicate system value outputs\n // to stay within device limits. The pixel shader will read\n // all GS system value outputs as system value inputs.\n if (m_programInfo.type() == DxbcProgramType::GeometryShader && sv != DxbcSystemValue::None)\n info.sclass = spv::StorageClassPrivate;\n\n const uint32_t varId = this->emitNewVariable(info);\n m_module.setDebugName(varId, str::format(\"o\", regIdx).c_str());\n \n if (info.sclass == spv::StorageClassOutput) {\n m_module.decorateLocation(varId, regIdx);\n\n // Add index decoration for potential dual-source blending\n if (m_programInfo.type() == DxbcProgramType::PixelShader)\n m_module.decorateIndex(varId, 0);\n\n // Declare vertex positions in all stages as invariant, even if\n // this is not the last stage, to help with potential Z fighting.\n if (sv == DxbcSystemValue::Position && m_moduleInfo.options.invariantPosition)\n m_module.decorate(varId, spv::DecorationInvariant);\n }\n \n m_oRegs.at(regIdx) = { regType, varId };\n \n // Declare the output slot as defined\n m_outputMask |= 1u << regIdx;\n }\n }\n void emitDclConstantBuffer(\n const DxbcShaderInstruction& ins) {\n // dcl_constant_buffer has one operand with two indices:\n // (0) Constant buffer register ID (cb#)\n // (1) Number of constants in the buffer\n uint32_t bufferId = ins.dst[0].idx[0].offset;\n uint32_t elementCount = ins.dst[0].idx[1].offset;\n\n // With dynamic indexing, games will often index constant buffers\n // out of bounds. Declare an upper bound to stay within spec.\n if (ins.controls.accessType() == DxbcConstantBufferAccessType::DynamicallyIndexed)\n elementCount = 4096;\n\n this->emitDclConstantBufferVar(bufferId, elementCount, 4u,\n str::format(\"cb\", bufferId).c_str());\n }\n void emitDclConstantBufferVar(\n uint32_t regIdx,\n uint32_t numConstants,\n uint32_t numComponents,\n const char* name) {\n // Uniform buffer data is stored as a fixed-size array\n // of 4x32-bit vectors. SPIR-V requires explicit strides.\n const uint32_t arrayType = m_module.defArrayTypeUnique(\n getVectorTypeId({ DxbcScalarType::Float32, numComponents }),\n m_module.constu32(numConstants));\n m_module.decorateArrayStride(arrayType, sizeof(uint32_t) * numComponents);\n \n // SPIR-V requires us to put that array into a\n // struct and decorate that struct as a block.\n const uint32_t structType = m_module.defStructTypeUnique(1, &arrayType);\n \n m_module.decorate(structType, spv::DecorationBlock);\n m_module.memberDecorateOffset(structType, 0, 0);\n \n m_module.setDebugName (structType, str::format(name, \"_t\").c_str());\n m_module.setDebugMemberName (structType, 0, \"m\");\n \n // Variable that we'll use to access the buffer\n const uint32_t varId = m_module.newVar(\n m_module.defPointerType(structType, spv::StorageClassUniform),\n spv::StorageClassUniform);\n \n m_module.setDebugName(varId, name);\n \n // Compute the DXVK binding slot index for the buffer.\n // D3D11 needs to bind the actual buffers to this slot.\n uint32_t bindingId = computeConstantBufferBinding(\n m_programInfo.type(), regIdx);\n \n m_module.decorateDescriptorSet(varId, 0);\n m_module.decorateBinding(varId, bindingId);\n\n DxbcConstantBuffer buf;\n buf.varId = varId;\n buf.size = numConstants;\n m_constantBuffers.at(regIdx) = buf;\n \n // Store descriptor info for the shader interface\n DxvkBindingInfo binding = { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER };\n binding.viewType = VK_IMAGE_VIEW_TYPE_MAX_ENUM;\n binding.access = VK_ACCESS_UNIFORM_READ_BIT;\n binding.resourceBinding = bindingId;\n binding.uboSet = true;\n m_bindings.push_back(binding);\n }\n void emitDclSampler(\n const DxbcShaderInstruction& ins) {\n // dclSampler takes one operand:\n // (dst0) The sampler register to declare\n const uint32_t samplerId = ins.dst[0].idx[0].offset;\n \n // The sampler type is opaque, but we still have to\n // define a pointer and a variable in oder to use it\n const uint32_t samplerType = m_module.defSamplerType();\n const uint32_t samplerPtrType = m_module.defPointerType(\n samplerType, spv::StorageClassUniformConstant);\n \n // Define the sampler variable\n const uint32_t varId = m_module.newVar(samplerPtrType,\n spv::StorageClassUniformConstant);\n m_module.setDebugName(varId,\n str::format(\"s\", samplerId).c_str());\n \n m_samplers.at(samplerId).varId = varId;\n m_samplers.at(samplerId).typeId = samplerType;\n \n // Compute binding slot index for the sampler\n uint32_t bindingId = computeSamplerBinding(\n m_programInfo.type(), samplerId);\n \n m_module.decorateDescriptorSet(varId, 0);\n m_module.decorateBinding(varId, bindingId);\n \n // Store descriptor info for the shader interface\n DxvkBindingInfo binding = { VK_DESCRIPTOR_TYPE_SAMPLER };\n binding.viewType = VK_IMAGE_VIEW_TYPE_MAX_ENUM;\n binding.resourceBinding = bindingId;\n m_bindings.push_back(binding);\n }\n void emitDclStream(\n const DxbcShaderInstruction& ins) {\n if (ins.dst[0].idx[0].offset != 0 && m_moduleInfo.xfb == nullptr)\n Logger::err(\"Dxbc: Multiple streams not supported\");\n }\n void emitDclResourceTyped(\n const DxbcShaderInstruction& ins) {\n // dclResource takes two operands:\n // (dst0) The resource register ID\n // (imm0) The resource return type\n const uint32_t registerId = ins.dst[0].idx[0].offset;\n \n // We also handle unordered access views here\n const bool isUav = ins.op == DxbcOpcode::DclUavTyped;\n \n if (isUav) {\n if (m_moduleInfo.options.supportsTypedUavLoadR32)\n m_module.enableCapability(spv::CapabilityStorageImageReadWithoutFormat);\n m_module.enableCapability(spv::CapabilityStorageImageWriteWithoutFormat);\n }\n \n // Defines the type of the resource (texture2D, ...)\n const DxbcResourceDim resourceType = ins.controls.resourceDim();\n \n // Defines the type of a read operation. DXBC has the ability\n // to define four different types whereas SPIR-V only allows\n // one, but in practice this should not be much of a problem.\n auto xType = static_cast(\n bit::extract(ins.imm[0].u32, 0, 3));\n auto yType = static_cast(\n bit::extract(ins.imm[0].u32, 4, 7));\n auto zType = static_cast(\n bit::extract(ins.imm[0].u32, 8, 11));\n auto wType = static_cast(\n bit::extract(ins.imm[0].u32, 12, 15));\n \n if ((xType != yType) || (xType != zType) || (xType != wType))\n Logger::warn(\"DxbcCompiler: dcl_resource: Ignoring resource return types\");\n \n // Declare the actual sampled type\n const DxbcScalarType sampledType = [xType] {\n switch (xType) {\n // FIXME is this correct? There's no documentation about it\n case DxbcResourceReturnType::Mixed: return DxbcScalarType::Uint32;\n // FIXME do we have to manually clamp writes to SNORM/UNORM resources?\n case DxbcResourceReturnType::Snorm: return DxbcScalarType::Float32;\n case DxbcResourceReturnType::Unorm: return DxbcScalarType::Float32;\n case DxbcResourceReturnType::Float: return DxbcScalarType::Float32;\n case DxbcResourceReturnType::Sint: return DxbcScalarType::Sint32;\n case DxbcResourceReturnType::Uint: return DxbcScalarType::Uint32;\n default: throw DxvkError(str::format(\"DxbcCompiler: Invalid sampled type: \", xType));\n }\n }();\n \n // Declare the resource type\n const uint32_t sampledTypeId = getScalarTypeId(sampledType);\n const DxbcImageInfo typeInfo = getResourceType(resourceType, isUav); \n \n // Declare additional capabilities if necessary\n switch (resourceType) {\n case DxbcResourceDim::Buffer:\n m_module.enableCapability(isUav\n ? spv::CapabilityImageBuffer\n : spv::CapabilitySampledBuffer);\n break;\n \n case DxbcResourceDim::Texture1D:\n case DxbcResourceDim::Texture1DArr:\n m_module.enableCapability(isUav\n ? spv::CapabilityImage1D\n : spv::CapabilitySampled1D);\n break;\n \n case DxbcResourceDim::TextureCubeArr:\n m_module.enableCapability(\n spv::CapabilitySampledCubeArray);\n break;\n \n default:\n // No additional capabilities required\n break;\n }\n \n // If the read-without-format capability is not set and this\n // image is access via a typed load, or if atomic operations\n // are used,, we must define the image format explicitly.\n spv::ImageFormat imageFormat = spv::ImageFormatUnknown;\n \n if (isUav) {\n if ((m_analysis->uavInfos[registerId].accessAtomicOp)\n || (m_analysis->uavInfos[registerId].accessTypedLoad\n && !m_moduleInfo.options.supportsTypedUavLoadR32))\n imageFormat = getScalarImageFormat(sampledType);\n }\n \n // We do not know whether the image is going to be used as\n // a color image or a depth image yet, but we can pick the\n // correct type when creating a sampled image object.\n const uint32_t imageTypeId = m_module.defImageType(sampledTypeId,\n typeInfo.dim, 0, typeInfo.array, typeInfo.ms, typeInfo.sampled,\n imageFormat);\n \n // We'll declare the texture variable with the color type\n // and decide which one to use when the texture is sampled.\n const uint32_t resourcePtrType = m_module.defPointerType(\n imageTypeId, spv::StorageClassUniformConstant);\n \n const uint32_t varId = m_module.newVar(resourcePtrType,\n spv::StorageClassUniformConstant);\n \n m_module.setDebugName(varId,\n str::format(isUav ? \"u\" : \"t\", registerId).c_str());\n \n // Compute the DXVK binding slot index for the resource.\n // D3D11 needs to bind the actual resource to this slot.\n uint32_t bindingId = isUav\n ? computeUavBinding(m_programInfo.type(), registerId)\n : computeSrvBinding(m_programInfo.type(), registerId);\n \n m_module.decorateDescriptorSet(varId, 0);\n m_module.decorateBinding(varId, bindingId);\n \n // Declare a specialization constant which will\n // store whether or not the resource is bound.\n if (isUav) {\n DxbcUav uav;\n uav.type = DxbcResourceType::Typed;\n uav.imageInfo = typeInfo;\n uav.varId = varId;\n uav.ctrId = 0;\n uav.sampledType = sampledType;\n uav.sampledTypeId = sampledTypeId;\n uav.imageTypeId = imageTypeId;\n uav.structStride = 0;\n uav.coherence = getUavCoherence(registerId, ins.controls.uavFlags());\n uav.isRawSsbo = false;\n m_uavs.at(registerId) = uav;\n } else {\n DxbcShaderResource res;\n res.type = DxbcResourceType::Typed;\n res.imageInfo = typeInfo;\n res.varId = varId;\n res.sampledType = sampledType;\n res.sampledTypeId = sampledTypeId;\n res.imageTypeId = imageTypeId;\n res.colorTypeId = imageTypeId;\n res.depthTypeId = 0;\n res.structStride = 0;\n res.isRawSsbo = false;\n \n if ((sampledType == DxbcScalarType::Float32)\n && (resourceType == DxbcResourceDim::Texture1D\n || resourceType == DxbcResourceDim::Texture1DArr\n || resourceType == DxbcResourceDim::Texture2D\n || resourceType == DxbcResourceDim::Texture2DArr\n || resourceType == DxbcResourceDim::TextureCube\n || resourceType == DxbcResourceDim::TextureCubeArr)) {\n res.depthTypeId = m_module.defImageType(sampledTypeId,\n typeInfo.dim, 1, typeInfo.array, typeInfo.ms, typeInfo.sampled,\n spv::ImageFormatUnknown);\n }\n \n m_textures.at(registerId) = res;\n }\n \n // Store descriptor info for the shader interface\n DxvkBindingInfo binding = { };\n binding.viewType = typeInfo.vtype;\n binding.resourceBinding = bindingId;\n binding.isMultisampled = typeInfo.ms;\n\n if (isUav) {\n binding.descriptorType = resourceType == DxbcResourceDim::Buffer\n ? VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER\n : VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;\n binding.access = m_analysis->uavInfos[registerId].accessFlags;\n\n if (!m_analysis->uavInfos[registerId].nonInvariantAccess)\n binding.accessOp = m_analysis->uavInfos[registerId].accessOp;\n\n if (!(binding.access & VK_ACCESS_SHADER_WRITE_BIT))\n m_module.decorate(varId, spv::DecorationNonWritable);\n if (!(binding.access & VK_ACCESS_SHADER_READ_BIT))\n m_module.decorate(varId, spv::DecorationNonReadable);\n } else {\n binding.descriptorType = resourceType == DxbcResourceDim::Buffer\n ? VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER\n : VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;\n binding.access = VK_ACCESS_SHADER_READ_BIT;\n }\n\n m_bindings.push_back(binding);\n }\n void emitDclResourceRawStructured(\n const DxbcShaderInstruction& ins) {\n // dcl_resource_raw and dcl_uav_raw take one argument:\n // (dst0) The resource register ID\n // dcl_resource_structured and dcl_uav_structured take two arguments:\n // (dst0) The resource register ID\n // (imm0) Structure stride, in bytes\n const uint32_t registerId = ins.dst[0].idx[0].offset;\n \n const bool isUav = ins.op == DxbcOpcode::DclUavRaw\n || ins.op == DxbcOpcode::DclUavStructured;\n \n const bool isStructured = ins.op == DxbcOpcode::DclUavStructured\n || ins.op == DxbcOpcode::DclResourceStructured;\n \n const DxbcScalarType sampledType = DxbcScalarType::Uint32;\n const uint32_t sampledTypeId = getScalarTypeId(sampledType);\n \n const DxbcImageInfo typeInfo = { spv::DimBuffer, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_MAX_ENUM };\n \n // Declare the resource type\n uint32_t resTypeId = 0;\n uint32_t varId = 0;\n \n // Write back resource info\n DxbcResourceType resType = isStructured\n ? DxbcResourceType::Structured\n : DxbcResourceType::Raw;\n \n uint32_t resStride = isStructured\n ? ins.imm[0].u32\n : 0;\n \n uint32_t resAlign = isStructured\n ? (resStride & -resStride)\n : 16;\n \n // Compute the DXVK binding slot index for the resource.\n uint32_t bindingId = isUav\n ? computeUavBinding(m_programInfo.type(), registerId)\n : computeSrvBinding(m_programInfo.type(), registerId);\n \n // Test whether we should use a raw SSBO for this resource\n bool hasSparseFeedback = isUav\n ? m_analysis->uavInfos[registerId].sparseFeedback\n : m_analysis->srvInfos[registerId].sparseFeedback;\n\n bool useRawSsbo = m_moduleInfo.options.minSsboAlignment <= resAlign && !hasSparseFeedback;\n \n if (useRawSsbo) {\n uint32_t elemType = getScalarTypeId(DxbcScalarType::Uint32);\n uint32_t arrayType = m_module.defRuntimeArrayTypeUnique(elemType);\n uint32_t structType = m_module.defStructTypeUnique(1, &arrayType);\n uint32_t ptrType = m_module.defPointerType(structType, spv::StorageClassStorageBuffer);\n\n resTypeId = m_module.defPointerType(elemType, spv::StorageClassStorageBuffer);\n varId = m_module.newVar(ptrType, spv::StorageClassStorageBuffer);\n \n m_module.decorateArrayStride(arrayType, sizeof(uint32_t));\n m_module.decorate(structType, spv::DecorationBlock);\n m_module.memberDecorateOffset(structType, 0, 0);\n\n m_module.setDebugName(structType,\n str::format(isUav ? \"u\" : \"t\", registerId, \"_t\").c_str());\n m_module.setDebugMemberName(structType, 0, \"m\");\n } else {\n // Structured and raw buffers are represented as\n // texel buffers consisting of 32-bit integers.\n m_module.enableCapability(isUav\n ? spv::CapabilityImageBuffer\n : spv::CapabilitySampledBuffer);\n \n resTypeId = m_module.defImageType(sampledTypeId,\n typeInfo.dim, 0, typeInfo.array, typeInfo.ms, typeInfo.sampled,\n spv::ImageFormatR32ui);\n \n varId = m_module.newVar(\n m_module.defPointerType(resTypeId, spv::StorageClassUniformConstant),\n spv::StorageClassUniformConstant);\n }\n\n m_module.setDebugName(varId,\n str::format(isUav ? \"u\" : \"t\", registerId).c_str());\n \n m_module.decorateDescriptorSet(varId, 0);\n m_module.decorateBinding(varId, bindingId);\n \n if (isUav) {\n DxbcUav uav;\n uav.type = resType;\n uav.imageInfo = typeInfo;\n uav.varId = varId;\n uav.ctrId = 0;\n uav.sampledType = sampledType;\n uav.sampledTypeId = sampledTypeId;\n uav.imageTypeId = resTypeId;\n uav.structStride = resStride;\n uav.coherence = getUavCoherence(registerId, ins.controls.uavFlags());\n uav.isRawSsbo = useRawSsbo;\n m_uavs.at(registerId) = uav;\n } else {\n DxbcShaderResource res;\n res.type = resType;\n res.imageInfo = typeInfo;\n res.varId = varId;\n res.sampledType = sampledType;\n res.sampledTypeId = sampledTypeId;\n res.imageTypeId = resTypeId;\n res.colorTypeId = resTypeId;\n res.depthTypeId = 0;\n res.structStride = resStride;\n res.isRawSsbo = useRawSsbo;\n m_textures.at(registerId) = res;\n }\n \n // Store descriptor info for the shader interface\n DxvkBindingInfo binding = { };\n binding.descriptorType = useRawSsbo\n ? VK_DESCRIPTOR_TYPE_STORAGE_BUFFER\n : (isUav ? VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER : VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER);\n binding.viewType = VK_IMAGE_VIEW_TYPE_MAX_ENUM;\n binding.resourceBinding = bindingId;\n binding.access = VK_ACCESS_SHADER_READ_BIT;\n\n if (isUav) {\n binding.access = m_analysis->uavInfos[registerId].accessFlags;\n\n if (!m_analysis->uavInfos[registerId].nonInvariantAccess)\n binding.accessOp = m_analysis->uavInfos[registerId].accessOp;\n }\n\n if (useRawSsbo || isUav) {\n if (!(binding.access & VK_ACCESS_SHADER_WRITE_BIT))\n m_module.decorate(varId, spv::DecorationNonWritable);\n if (!(binding.access & VK_ACCESS_SHADER_READ_BIT))\n m_module.decorate(varId, spv::DecorationNonReadable);\n }\n\n m_bindings.push_back(binding);\n\n // If supported, we'll be using raw access chains to access this\n if (!m_hasRawAccessChains && m_moduleInfo.options.supportsRawAccessChains) {\n m_module.enableExtension(\"SPV_NV_raw_access_chains\");\n m_module.enableCapability(spv::CapabilityRawAccessChainsNV);\n\n m_hasRawAccessChains = true;\n }\n }\n void emitDclThreadGroupSharedMemory(\n const DxbcShaderInstruction& ins) {\n // dcl_tgsm_raw takes two arguments:\n // (dst0) The resource register ID\n // (imm0) Block size, in bytes\n // dcl_tgsm_structured takes three arguments:\n // (dst0) The resource register ID\n // (imm0) Structure stride, in bytes\n // (imm1) Structure count\n const bool isStructured = ins.op == DxbcOpcode::DclThreadGroupSharedMemoryStructured;\n \n const uint32_t regId = ins.dst[0].idx[0].offset;\n \n if (regId >= m_gRegs.size())\n m_gRegs.resize(regId + 1);\n \n const uint32_t elementStride = isStructured ? ins.imm[0].u32 : 0;\n const uint32_t elementCount = isStructured ? ins.imm[1].u32 : ins.imm[0].u32;\n \n DxbcRegisterInfo varInfo;\n varInfo.type.ctype = DxbcScalarType::Uint32;\n varInfo.type.ccount = 1;\n varInfo.type.alength = isStructured\n ? elementCount * elementStride / 4\n : elementCount / 4;\n varInfo.sclass = spv::StorageClassWorkgroup;\n \n m_gRegs[regId].type = isStructured\n ? DxbcResourceType::Structured\n : DxbcResourceType::Raw;\n m_gRegs[regId].elementStride = elementStride;\n m_gRegs[regId].elementCount = elementCount;\n m_gRegs[regId].varId = emitNewVariable(varInfo);\n \n m_module.setDebugName(m_gRegs[regId].varId,\n str::format(\"g\", regId).c_str());\n }\n void emitDclGsInputPrimitive(\n const DxbcShaderInstruction& ins) {\n // The input primitive type is stored within in the\n // control bits of the opcode token. In SPIR-V, we\n // have to define an execution mode.\n const auto mode = [&] {\n switch (ins.controls.primitive()) {\n case DxbcPrimitive::Point: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_POINT_LIST, spv::ExecutionModeInputPoints);\n case DxbcPrimitive::Line: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_LINE_LIST, spv::ExecutionModeInputLines);\n case DxbcPrimitive::Triangle: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, spv::ExecutionModeTriangles);\n case DxbcPrimitive::LineAdj: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY, spv::ExecutionModeInputLinesAdjacency);\n case DxbcPrimitive::TriangleAdj: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY, spv::ExecutionModeInputTrianglesAdjacency);\n default: throw DxvkError(\"DxbcCompiler: Unsupported primitive type\");\n }\n }();\n\n m_gs.inputPrimitive = ins.controls.primitive();\n m_module.setExecutionMode(m_entryPointId, mode.second);\n m_inputTopology = mode.first;\n \n emitDclInputArray(primitiveVertexCount(m_gs.inputPrimitive));\n }\n void emitDclGsOutputTopology(\n const DxbcShaderInstruction& ins) {\n // The input primitive topology is stored within in the\n // control bits of the opcode token. In SPIR-V, we have\n // to define an execution mode.\n auto mode = [&] {\n switch (ins.controls.primitiveTopology()) {\n case DxbcPrimitiveTopology::PointList: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_POINT_LIST, spv::ExecutionModeOutputPoints);\n case DxbcPrimitiveTopology::LineStrip: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_LINE_LIST, spv::ExecutionModeOutputLineStrip);\n case DxbcPrimitiveTopology::TriangleStrip: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, spv::ExecutionModeOutputTriangleStrip);\n default: throw DxvkError(\"DxbcCompiler: Unsupported primitive topology\");\n }\n }();\n \n m_outputTopology = mode.first;\n m_module.setExecutionMode(m_entryPointId, mode.second);\n }\n void emitDclMaxOutputVertexCount(\n const DxbcShaderInstruction& ins) {\n // dcl_max_output_vertex_count has one operand:\n // (imm0) The maximum number of vertices\n m_gs.outputVertexCount = ins.imm[0].u32;\n \n m_module.setOutputVertices(m_entryPointId, m_gs.outputVertexCount);\n }\n void emitDclInputControlPointCount(\n const DxbcShaderInstruction& ins) {\n // dcl_input_control_points has the control point\n // count embedded within the opcode token.\n if (m_programInfo.type() == DxbcProgramType::HullShader) {\n m_hs.vertexCountIn = ins.controls.controlPointCount();\n \n emitDclInputArray(m_hs.vertexCountIn); \n } else {\n m_ds.vertexCountIn = ins.controls.controlPointCount();\n \n m_ds.inputPerPatch = emitTessInterfacePerPatch (spv::StorageClassInput);\n m_ds.inputPerVertex = emitTessInterfacePerVertex(spv::StorageClassInput, m_ds.vertexCountIn);\n }\n }\n void emitDclOutputControlPointCount(\n const DxbcShaderInstruction& ins) {\n // dcl_output_control_points has the control point\n // count embedded within the opcode token.\n m_hs.vertexCountOut = ins.controls.controlPointCount();\n \n m_hs.outputPerPatch = emitTessInterfacePerPatch(spv::StorageClassPrivate);\n m_hs.outputPerVertex = emitTessInterfacePerVertex(spv::StorageClassOutput, m_hs.vertexCountOut);\n \n m_module.setOutputVertices(m_entryPointId, m_hs.vertexCountOut);\n }\n void emitDclHsMaxTessFactor(\n const DxbcShaderInstruction& ins) {\n m_hs.maxTessFactor = ins.imm[0].f32;\n }\n void emitDclTessDomain(\n const DxbcShaderInstruction& ins) {\n auto mode = [&] {\n switch (ins.controls.tessDomain()) {\n case DxbcTessDomain::Isolines: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_LINE_LIST, spv::ExecutionModeIsolines);\n case DxbcTessDomain::Triangles: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, spv::ExecutionModeTriangles);\n case DxbcTessDomain::Quads: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, spv::ExecutionModeQuads);\n default: throw DxvkError(\"Dxbc: Invalid tess domain\");\n }\n }();\n \n m_outputTopology = mode.first;\n m_module.setExecutionMode(m_entryPointId, mode.second);\n }\n void emitDclTessPartitioning(\n const DxbcShaderInstruction& ins) {\n const spv::ExecutionMode executionMode = [&] {\n switch (ins.controls.tessPartitioning()) {\n case DxbcTessPartitioning::Pow2:\n case DxbcTessPartitioning::Integer: return spv::ExecutionModeSpacingEqual;\n case DxbcTessPartitioning::FractOdd: return spv::ExecutionModeSpacingFractionalOdd;\n case DxbcTessPartitioning::FractEven: return spv::ExecutionModeSpacingFractionalEven;\n default: throw DxvkError(\"Dxbc: Invalid tess partitioning\");\n }\n }();\n \n m_module.setExecutionMode(m_entryPointId, executionMode);\n }\n void emitDclTessOutputPrimitive(\n const DxbcShaderInstruction& ins) {\n switch (ins.controls.tessOutputPrimitive()) {\n case DxbcTessOutputPrimitive::Point:\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModePointMode);\n break;\n \n case DxbcTessOutputPrimitive::Line:\n break;\n \n case DxbcTessOutputPrimitive::TriangleCw:\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeVertexOrderCw);\n break;\n \n case DxbcTessOutputPrimitive::TriangleCcw:\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeVertexOrderCcw);\n break;\n \n default:\n throw DxvkError(\"Dxbc: Invalid tess output primitive\");\n }\n }\n void emitDclThreadGroup(\n const DxbcShaderInstruction& ins) {\n // dcl_thread_group has three operands:\n // (imm0) Number of threads in X dimension\n // (imm1) Number of threads in Y dimension\n // (imm2) Number of threads in Z dimension\n m_cs.workgroupSizeX = ins.imm[0].u32;\n m_cs.workgroupSizeY = ins.imm[1].u32;\n m_cs.workgroupSizeZ = ins.imm[2].u32;\n\n m_module.setLocalSize(m_entryPointId,\n ins.imm[0].u32, ins.imm[1].u32, ins.imm[2].u32);\n }\n void emitDclGsInstanceCount(\n const DxbcShaderInstruction& ins) {\n // dcl_gs_instance_count has one operand:\n // (imm0) Number of geometry shader invocations\n m_module.setInvocations(m_entryPointId, ins.imm[0].u32);\n m_gs.invocationCount = ins.imm[0].u32;\n }\n uint32_t emitDclUavCounter(\n uint32_t regId) {\n // Declare a structure type which holds the UAV counter\n if (m_uavCtrStructType == 0) {\n const uint32_t t_u32 = m_module.defIntType(32, 0);\n const uint32_t t_struct = m_module.defStructTypeUnique(1, &t_u32);\n \n m_module.decorate(t_struct, spv::DecorationBlock);\n m_module.memberDecorateOffset(t_struct, 0, 0);\n \n m_module.setDebugName (t_struct, \"uav_meta\");\n m_module.setDebugMemberName(t_struct, 0, \"ctr\");\n \n m_uavCtrStructType = t_struct;\n m_uavCtrPointerType = m_module.defPointerType(\n t_struct, spv::StorageClassStorageBuffer);\n }\n \n // Declare the buffer variable\n const uint32_t varId = m_module.newVar(\n m_uavCtrPointerType, spv::StorageClassStorageBuffer);\n \n m_module.setDebugName(varId,\n str::format(\"u\", regId, \"_meta\").c_str());\n \n uint32_t bindingId = computeUavCounterBinding(\n m_programInfo.type(), regId);\n \n m_module.decorateDescriptorSet(varId, 0);\n m_module.decorateBinding(varId, bindingId);\n \n // Declare the storage buffer binding\n DxvkBindingInfo binding = { VK_DESCRIPTOR_TYPE_STORAGE_BUFFER };\n binding.resourceBinding = bindingId;\n binding.viewType = VK_IMAGE_VIEW_TYPE_MAX_ENUM;\n binding.access = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;\n m_bindings.push_back(binding);\n\n return varId;\n }\n void emitDclImmediateConstantBuffer(\n const DxbcShaderInstruction& ins) {\n if (m_icbArray)\n throw DxvkError(\"DxbcCompiler: Immediate constant buffer already declared\");\n \n if ((ins.customDataSize & 0x3) != 0)\n throw DxvkError(\"DxbcCompiler: Immediate constant buffer size not a multiple of four DWORDs\");\n\n // A lot of the time we'll be dealing with a scalar or vec2\n // array here, there's no reason to emit all those zeroes.\n uint32_t componentCount = 1u;\n\n for (uint32_t i = 0; i < ins.customDataSize; i += 4u) {\n for (uint32_t c = componentCount; c < 4u; c++) {\n if (ins.customData[i + c])\n componentCount = c + 1u;\n }\n\n if (componentCount == 4u)\n break;\n }\n\n uint32_t vectorCount = (ins.customDataSize / 4u);\n uint32_t dwordCount = vectorCount * componentCount;\n\n if (dwordCount <= Icb_MaxBakedDwords) {\n this->emitDclImmediateConstantBufferBaked(\n ins.customDataSize, ins.customData, componentCount);\n } else {\n this->emitDclImmediateConstantBufferUbo(\n ins.customDataSize, ins.customData, componentCount);\n }\n }\n void emitDclImmediateConstantBufferBaked(\n uint32_t dwordCount,\n const uint32_t* dwordArray,\n uint32_t componentCount) {\n // Declare individual vector constants as 4x32-bit vectors\n small_vector vectorIds;\n \n DxbcVectorType vecType;\n vecType.ctype = DxbcScalarType::Uint32;\n vecType.ccount = componentCount;\n \n uint32_t vectorTypeId = getVectorTypeId(vecType);\n \n for (uint32_t i = 0; i < dwordCount; i += 4u) {\n std::array scalarIds = { };\n\n for (uint32_t c = 0; c < componentCount; c++)\n scalarIds[c] = m_module.constu32(dwordArray[i + c]);\n\n uint32_t id = scalarIds[0];\n\n if (componentCount > 1u)\n id = m_module.constComposite(vectorTypeId, componentCount, scalarIds.data());\n\n vectorIds.push_back(id);\n }\n\n // Pad array with one entry of zeroes so that we can\n // handle out-of-bounds accesses more conveniently.\n vectorIds.push_back(emitBuildZeroVector(vecType).id);\n\n // Declare the array that contains all the vectors\n DxbcArrayType arrInfo;\n arrInfo.ctype = DxbcScalarType::Uint32;\n arrInfo.ccount = componentCount;\n arrInfo.alength = vectorIds.size();\n\n uint32_t arrayTypeId = getArrayTypeId(arrInfo);\n uint32_t arrayId = m_module.constComposite(\n arrayTypeId, vectorIds.size(), vectorIds.data());\n\n // Declare the variable that will hold the constant\n // data and initialize it with the constant array.\n uint32_t pointerTypeId = m_module.defPointerType(\n arrayTypeId, spv::StorageClassPrivate);\n\n m_icbArray = m_module.newVarInit(\n pointerTypeId, spv::StorageClassPrivate,\n arrayId);\n\n m_module.setDebugName(m_icbArray, \"icb\");\n m_module.decorate(m_icbArray, spv::DecorationNonWritable);\n\n m_icbComponents = componentCount;\n m_icbSize = dwordCount / 4u;\n }\n void emitDclImmediateConstantBufferUbo(\n uint32_t dwordCount,\n const uint32_t* dwordArray,\n uint32_t componentCount) {\n uint32_t vectorCount = dwordCount / 4u;\n\n // Tightly pack vec2 or scalar arrays if possible. Don't bother with\n // vec3 since we'd rather have properly vectorized loads in that case.\n if (m_moduleInfo.options.supportsTightIcbPacking && componentCount <= 2u)\n m_icbComponents = componentCount;\n else\n m_icbComponents = 4u;\n\n // Immediate constant buffer can be read out of bounds, declare\n // it with the maximum possible size and rely on robustness.\n this->emitDclConstantBufferVar(Icb_BindingSlotId, 4096u, m_icbComponents, \"icb\");\n\n m_icbData.reserve(vectorCount * componentCount);\n\n for (uint32_t i = 0; i < dwordCount; i += 4u) {\n for (uint32_t c = 0; c < m_icbComponents; c++)\n m_icbData.push_back(dwordArray[i + c]);\n }\n\n m_icbSize = vectorCount;\n }\n void emitCustomData(\n const DxbcShaderInstruction& ins) {\n switch (ins.customDataType) {\n case DxbcCustomDataClass::ImmConstBuf:\n return emitDclImmediateConstantBuffer(ins);\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unsupported custom data block: \",\n ins.customDataType));\n }\n }\n void emitVectorAlu(\n const DxbcShaderInstruction& ins) {\n std::array src;\n \n for (uint32_t i = 0; i < ins.srcCount; i++)\n src.at(i) = emitRegisterLoad(ins.src[i], ins.dst[0].mask);\n \n DxbcRegisterValue dst;\n dst.type.ctype = ins.dst[0].dataType;\n dst.type.ccount = ins.dst[0].mask.popCount();\n\n if (isDoubleType(ins.dst[0].dataType))\n dst.type.ccount /= 2;\n \n const uint32_t typeId = getVectorTypeId(dst.type);\n \n switch (ins.op) {\n /////////////////////\n // Move instructions\n case DxbcOpcode::Mov:\n case DxbcOpcode::DMov:\n dst.id = src.at(0).id;\n break;\n \n /////////////////////////////////////\n // ALU operations on float32 numbers\n case DxbcOpcode::Add:\n case DxbcOpcode::DAdd:\n dst.id = m_module.opFAdd(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Div:\n case DxbcOpcode::DDiv:\n dst.id = m_module.opFDiv(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Exp:\n dst.id = m_module.opExp2(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Frc:\n dst.id = m_module.opFract(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Log:\n dst.id = m_module.opLog2(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Mad:\n case DxbcOpcode::DFma:\n if (ins.controls.precise()) {\n // FXC only emits precise mad if the shader explicitly uses\n // the HLSL mad()/fma() intrinsics, let's preserve that.\n dst.id = m_module.opFFma(typeId,\n src.at(0).id, src.at(1).id, src.at(2).id);\n } else {\n dst.id = m_module.opFMul(typeId, src.at(0).id, src.at(1).id);\n dst.id = m_module.opFAdd(typeId, dst.id, src.at(2).id);\n }\n break;\n \n case DxbcOpcode::Max:\n case DxbcOpcode::DMax:\n dst.id = m_module.opNMax(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Min:\n case DxbcOpcode::DMin:\n dst.id = m_module.opNMin(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Mul:\n case DxbcOpcode::DMul:\n dst.id = m_module.opFMul(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Rcp:\n dst.id = m_module.opFDiv(typeId,\n emitBuildConstVecf32(\n 1.0f, 1.0f, 1.0f, 1.0f,\n ins.dst[0].mask).id,\n src.at(0).id);\n break;\n \n case DxbcOpcode::DRcp:\n dst.id = m_module.opFDiv(typeId,\n emitBuildConstVecf64(1.0, 1.0,\n ins.dst[0].mask).id,\n src.at(0).id);\n break;\n \n case DxbcOpcode::RoundNe:\n dst.id = m_module.opRoundEven(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::RoundNi:\n dst.id = m_module.opFloor(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::RoundPi:\n dst.id = m_module.opCeil(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::RoundZ:\n dst.id = m_module.opTrunc(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Rsq:\n dst.id = m_module.opInverseSqrt(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Sqrt:\n dst.id = m_module.opSqrt(\n typeId, src.at(0).id);\n break;\n \n /////////////////////////////////////\n // ALU operations on signed integers\n case DxbcOpcode::IAdd:\n dst.id = m_module.opIAdd(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::IMad:\n case DxbcOpcode::UMad:\n dst.id = m_module.opIAdd(typeId,\n m_module.opIMul(typeId,\n src.at(0).id, src.at(1).id),\n src.at(2).id);\n break;\n \n case DxbcOpcode::IMax:\n dst.id = m_module.opSMax(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::IMin:\n dst.id = m_module.opSMin(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::INeg:\n dst.id = m_module.opSNegate(\n typeId, src.at(0).id);\n break;\n \n ///////////////////////////////////////\n // ALU operations on unsigned integers\n case DxbcOpcode::UMax:\n dst.id = m_module.opUMax(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::UMin:\n dst.id = m_module.opUMin(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n ///////////////////////////////////////\n // Bit operations on unsigned integers\n case DxbcOpcode::And:\n dst.id = m_module.opBitwiseAnd(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Not:\n dst.id = m_module.opNot(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Or:\n dst.id = m_module.opBitwiseOr(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Xor:\n dst.id = m_module.opBitwiseXor(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::CountBits:\n dst.id = m_module.opBitCount(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::BfRev:\n dst.id = m_module.opBitReverse(\n typeId, src.at(0).id);\n break;\n \n ///////////////////////////\n // Conversion instructions\n case DxbcOpcode::ItoF:\n dst.id = m_module.opConvertStoF(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::UtoF:\n dst.id = m_module.opConvertUtoF(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::FtoI:\n dst.id = m_module.opConvertFtoS(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::FtoU:\n dst.id = m_module.opConvertFtoU(\n typeId, src.at(0).id);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n if (ins.controls.precise() || m_precise)\n m_module.decorate(dst.id, spv::DecorationNoContraction);\n \n // Store computed value\n dst = emitDstOperandModifiers(dst, ins.modifiers);\n emitRegisterStore(ins.dst[0], dst);\n }\n void emitVectorCmov(\n const DxbcShaderInstruction& ins) {\n // movc and swapc have the following operands:\n // (dst0) The first destination register\n // (dst1) The second destination register (swapc only)\n // (src0) The condition vector\n // (src1) Vector to select from if the condition is not 0\n // (src2) Vector to select from if the condition is 0\n DxbcRegMask condMask = ins.dst[0].mask;\n\n if (ins.dst[0].dataType == DxbcScalarType::Float64) {\n condMask = DxbcRegMask(\n condMask[0] && condMask[1],\n condMask[2] && condMask[3],\n false, false);\n }\n \n const DxbcRegisterValue condition = emitRegisterLoad(ins.src[0], condMask);\n const DxbcRegisterValue selectTrue = emitRegisterLoad(ins.src[1], ins.dst[0].mask);\n const DxbcRegisterValue selectFalse = emitRegisterLoad(ins.src[2], ins.dst[0].mask);\n \n uint32_t componentCount = condMask.popCount();\n \n // We'll compare against a vector of zeroes to generate a\n // boolean vector, which in turn will be used by OpSelect\n uint32_t zeroType = m_module.defIntType(32, 0);\n uint32_t boolType = m_module.defBoolType();\n \n uint32_t zero = m_module.constu32(0);\n \n if (componentCount > 1) {\n zeroType = m_module.defVectorType(zeroType, componentCount);\n boolType = m_module.defVectorType(boolType, componentCount);\n \n const std::array zeroVec = { zero, zero, zero, zero };\n zero = m_module.constComposite(zeroType, componentCount, zeroVec.data());\n }\n \n // In case of swapc, the second destination operand receives\n // the output that a cmov instruction would normally get\n const uint32_t trueIndex = ins.op == DxbcOpcode::Swapc ? 1 : 0;\n \n for (uint32_t i = 0; i < ins.dstCount; i++) {\n DxbcRegisterValue result;\n result.type.ctype = ins.dst[i].dataType;\n result.type.ccount = componentCount;\n result.id = m_module.opSelect(\n getVectorTypeId(result.type),\n m_module.opINotEqual(boolType, condition.id, zero),\n i == trueIndex ? selectTrue.id : selectFalse.id,\n i != trueIndex ? selectTrue.id : selectFalse.id);\n \n result = emitDstOperandModifiers(result, ins.modifiers);\n emitRegisterStore(ins.dst[i], result);\n }\n }\n void emitVectorCmp(\n const DxbcShaderInstruction& ins) {\n // Compare instructions have three operands:\n // (dst0) The destination register\n // (src0) The first vector to compare\n // (src1) The second vector to compare\n uint32_t componentCount = ins.dst[0].mask.popCount();\n\n // For 64-bit operations, we'll return a 32-bit\n // vector, so we have to adjust the read mask\n DxbcRegMask srcMask = ins.dst[0].mask;\n\n if (isDoubleType(ins.src[0].dataType)) {\n srcMask = DxbcRegMask(\n componentCount > 0, componentCount > 0,\n componentCount > 1, componentCount > 1);\n }\n\n const std::array src = {\n emitRegisterLoad(ins.src[0], srcMask),\n emitRegisterLoad(ins.src[1], srcMask),\n };\n \n // Condition, which is a boolean vector used\n // to select between the ~0u and 0u vectors.\n uint32_t condition = 0;\n uint32_t conditionType = m_module.defBoolType();\n \n if (componentCount > 1)\n conditionType = m_module.defVectorType(conditionType, componentCount);\n \n bool invert = false;\n\n switch (ins.op) {\n case DxbcOpcode::Ne:\n case DxbcOpcode::DNe:\n invert = true;\n [[fallthrough]];\n\n case DxbcOpcode::Eq:\n case DxbcOpcode::DEq:\n condition = m_module.opFOrdEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Ge:\n case DxbcOpcode::DGe:\n condition = m_module.opFOrdGreaterThanEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Lt:\n case DxbcOpcode::DLt:\n condition = m_module.opFOrdLessThan(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::IEq:\n condition = m_module.opIEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::IGe:\n condition = m_module.opSGreaterThanEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::ILt:\n condition = m_module.opSLessThan(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::INe:\n condition = m_module.opINotEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::UGe:\n condition = m_module.opUGreaterThanEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::ULt:\n condition = m_module.opULessThan(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n // Generate constant vectors for selection\n uint32_t sFalse = m_module.constu32( 0u);\n uint32_t sTrue = m_module.constu32(~0u);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = componentCount;\n \n const uint32_t typeId = getVectorTypeId(result.type);\n \n if (componentCount > 1) {\n const std::array vFalse = { sFalse, sFalse, sFalse, sFalse };\n const std::array vTrue = { sTrue, sTrue, sTrue, sTrue };\n \n sFalse = m_module.constComposite(typeId, componentCount, vFalse.data());\n sTrue = m_module.constComposite(typeId, componentCount, vTrue .data());\n }\n \n if (invert)\n std::swap(sFalse, sTrue);\n\n // Perform component-wise mask selection\n // based on the condition evaluated above.\n result.id = m_module.opSelect(\n typeId, condition, sTrue, sFalse);\n \n emitRegisterStore(ins.dst[0], result);\n }\n void emitVectorDeriv(\n const DxbcShaderInstruction& ins) {\n // Derivative instructions have two operands:\n // (dst0) Destination register for the derivative\n // (src0) The operand to compute the derivative of\n DxbcRegisterValue value = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n const uint32_t typeId = getVectorTypeId(value.type);\n \n switch (ins.op) {\n case DxbcOpcode::DerivRtx:\n value.id = m_module.opDpdx(typeId, value.id);\n break;\n \n case DxbcOpcode::DerivRty:\n value.id = m_module.opDpdy(typeId, value.id);\n break;\n \n case DxbcOpcode::DerivRtxCoarse:\n value.id = m_module.opDpdxCoarse(typeId, value.id);\n break;\n \n case DxbcOpcode::DerivRtyCoarse:\n value.id = m_module.opDpdyCoarse(typeId, value.id);\n break;\n \n case DxbcOpcode::DerivRtxFine:\n value.id = m_module.opDpdxFine(typeId, value.id);\n break;\n \n case DxbcOpcode::DerivRtyFine:\n value.id = m_module.opDpdyFine(typeId, value.id);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n value = emitDstOperandModifiers(value, ins.modifiers);\n emitRegisterStore(ins.dst[0], value);\n }\n void emitVectorDot(\n const DxbcShaderInstruction& ins) {\n const DxbcRegMask srcMask(true,\n ins.op >= DxbcOpcode::Dp2,\n ins.op >= DxbcOpcode::Dp3,\n ins.op >= DxbcOpcode::Dp4);\n \n const std::array src = {\n emitRegisterLoad(ins.src[0], srcMask),\n emitRegisterLoad(ins.src[1], srcMask),\n };\n \n DxbcRegisterValue dst;\n dst.type.ctype = ins.dst[0].dataType;\n dst.type.ccount = 1;\n dst.id = 0;\n\n uint32_t componentType = getVectorTypeId(dst.type);\n uint32_t componentCount = srcMask.popCount();\n\n for (uint32_t i = 0; i < componentCount; i++) {\n if (dst.id) {\n dst.id = m_module.opFFma(componentType,\n m_module.opCompositeExtract(componentType, src.at(0).id, 1, &i),\n m_module.opCompositeExtract(componentType, src.at(1).id, 1, &i),\n dst.id);\n } else {\n dst.id = m_module.opFMul(componentType,\n m_module.opCompositeExtract(componentType, src.at(0).id, 1, &i),\n m_module.opCompositeExtract(componentType, src.at(1).id, 1, &i));\n }\n\n // Unconditionally mark as precise since the exact order of operation\n // matters for some games, even if the instruction itself is not marked\n // as precise.\n m_module.decorate(dst.id, spv::DecorationNoContraction);\n }\n\n dst = emitDstOperandModifiers(dst, ins.modifiers);\n emitRegisterStore(ins.dst[0], dst);\n }\n void emitVectorIdiv(\n const DxbcShaderInstruction& ins) {\n // udiv has four operands:\n // (dst0) Quotient destination register\n // (dst1) Remainder destination register\n // (src0) The first vector to compare\n // (src1) The second vector to compare\n if (ins.dst[0].type == DxbcOperandType::Null\n && ins.dst[1].type == DxbcOperandType::Null)\n return;\n \n // FIXME support this if applications require it\n if (ins.dst[0].type != DxbcOperandType::Null\n && ins.dst[1].type != DxbcOperandType::Null\n && ins.dst[0].mask != ins.dst[1].mask) {\n Logger::warn(\"DxbcCompiler: Idiv with different destination masks not supported\");\n return;\n }\n \n // Load source operands as integers with the\n // mask of one non-NULL destination operand\n const DxbcRegMask srcMask =\n ins.dst[0].type != DxbcOperandType::Null\n ? ins.dst[0].mask\n : ins.dst[1].mask;\n \n const std::array src = {\n emitRegisterLoad(ins.src[0], srcMask),\n emitRegisterLoad(ins.src[1], srcMask),\n };\n \n // Division by zero will return 0xffffffff for both results\n auto bvecId = getVectorTypeId({ DxbcScalarType::Bool, srcMask.popCount() });\n\n DxbcRegisterValue const0 = emitBuildConstVecu32( 0u, 0u, 0u, 0u, srcMask);\n DxbcRegisterValue constff = emitBuildConstVecu32(~0u, ~0u, ~0u, ~0u, srcMask);\n\n uint32_t cmpValue = m_module.opINotEqual(bvecId, src.at(1).id, const0.id);\n\n // Compute results only if the destination\n // operands are not NULL.\n if (ins.dst[0].type != DxbcOperandType::Null) {\n DxbcRegisterValue quotient;\n quotient.type.ctype = ins.dst[0].dataType;\n quotient.type.ccount = ins.dst[0].mask.popCount();\n \n quotient.id = m_module.opUDiv(\n getVectorTypeId(quotient.type),\n src.at(0).id, src.at(1).id);\n\n quotient.id = m_module.opSelect(\n getVectorTypeId(quotient.type),\n cmpValue, quotient.id, constff.id);\n \n quotient = emitDstOperandModifiers(quotient, ins.modifiers);\n emitRegisterStore(ins.dst[0], quotient);\n }\n \n if (ins.dst[1].type != DxbcOperandType::Null) {\n DxbcRegisterValue remainder;\n remainder.type.ctype = ins.dst[1].dataType;\n remainder.type.ccount = ins.dst[1].mask.popCount();\n \n remainder.id = m_module.opUMod(\n getVectorTypeId(remainder.type),\n src.at(0).id, src.at(1).id);\n\n remainder.id = m_module.opSelect(\n getVectorTypeId(remainder.type),\n cmpValue, remainder.id, constff.id);\n \n remainder = emitDstOperandModifiers(remainder, ins.modifiers);\n emitRegisterStore(ins.dst[1], remainder);\n }\n }\n void emitVectorImul(\n const DxbcShaderInstruction& ins) {\n // imul and umul have four operands:\n // (dst0) High destination register\n // (dst1) Low destination register\n // (src0) The first vector to compare\n // (src1) The second vector to compare\n if (ins.dst[0].type == DxbcOperandType::Null) {\n if (ins.dst[1].type == DxbcOperandType::Null)\n return;\n \n // If dst0 is NULL, this instruction behaves just\n // like any other three-operand ALU instruction\n const std::array src = {\n emitRegisterLoad(ins.src[0], ins.dst[1].mask),\n emitRegisterLoad(ins.src[1], ins.dst[1].mask),\n };\n \n DxbcRegisterValue result;\n result.type.ctype = ins.dst[1].dataType;\n result.type.ccount = ins.dst[1].mask.popCount();\n result.id = m_module.opIMul(\n getVectorTypeId(result.type),\n src.at(0).id, src.at(1).id);\n \n result = emitDstOperandModifiers(result, ins.modifiers);\n emitRegisterStore(ins.dst[1], result);\n } else {\n // TODO implement this\n Logger::warn(\"DxbcCompiler: Extended Imul not yet supported\");\n }\n }\n void emitVectorMsad(\n const DxbcShaderInstruction& ins) {\n // msad has four operands:\n // (dst0) Destination\n // (src0) Reference (packed uint8)\n // (src1) Source (packed uint8)\n // (src2) Accumulator\n DxbcRegisterValue refReg = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n DxbcRegisterValue srcReg = emitRegisterLoad(ins.src[1], ins.dst[0].mask);\n DxbcRegisterValue result = emitRegisterLoad(ins.src[2], ins.dst[0].mask);\n \n auto typeId = getVectorTypeId(result.type);\n auto bvecId = getVectorTypeId({ DxbcScalarType::Bool, result.type.ccount });\n\n for (uint32_t i = 0; i < 4; i++) {\n auto shift = m_module.constu32(8 * i);\n auto count = m_module.constu32(8);\n\n auto ref = m_module.opBitFieldUExtract(typeId, refReg.id, shift, count);\n auto src = m_module.opBitFieldUExtract(typeId, srcReg.id, shift, count);\n\n auto zero = emitBuildConstVecu32(0, 0, 0, 0, ins.dst[0].mask);\n auto mask = m_module.opINotEqual(bvecId, ref, zero.id);\n\n auto diff = m_module.opSAbs(typeId, m_module.opISub(typeId, ref, src));\n result.id = m_module.opSelect(typeId, mask, m_module.opIAdd(typeId, result.id, diff), result.id);\n }\n\n result = emitDstOperandModifiers(result, ins.modifiers);\n emitRegisterStore(ins.dst[0], result);\n }\n void emitVectorShift(\n const DxbcShaderInstruction& ins) {\n // Shift operations have three operands:\n // (dst0) The destination register\n // (src0) The register to shift\n // (src1) The shift amount (scalar)\n DxbcRegisterValue shiftReg = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n DxbcRegisterValue countReg = emitRegisterLoad(ins.src[1], ins.dst[0].mask);\n\n if (ins.src[1].type != DxbcOperandType::Imm32)\n countReg = emitRegisterMaskBits(countReg, 0x1F);\n \n if (countReg.type.ccount == 1)\n countReg = emitRegisterExtend(countReg, shiftReg.type.ccount);\n \n DxbcRegisterValue result;\n result.type.ctype = ins.dst[0].dataType;\n result.type.ccount = ins.dst[0].mask.popCount();\n \n switch (ins.op) {\n case DxbcOpcode::IShl:\n result.id = m_module.opShiftLeftLogical(\n getVectorTypeId(result.type),\n shiftReg.id, countReg.id);\n break;\n \n case DxbcOpcode::IShr:\n result.id = m_module.opShiftRightArithmetic(\n getVectorTypeId(result.type),\n shiftReg.id, countReg.id);\n break;\n \n case DxbcOpcode::UShr:\n result.id = m_module.opShiftRightLogical(\n getVectorTypeId(result.type),\n shiftReg.id, countReg.id);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n result = emitDstOperandModifiers(result, ins.modifiers);\n emitRegisterStore(ins.dst[0], result);\n }\n void emitVectorSinCos(\n const DxbcShaderInstruction& ins) {\n // sincos has three operands:\n // (dst0) Destination register for sin(x)\n // (dst1) Destination register for cos(x)\n // (src0) Source operand x\n \n // Load source operand as 32-bit float vector.\n const DxbcRegisterValue srcValue = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, true, true, true));\n\n uint32_t typeId = getScalarTypeId(srcValue.type.ctype);\n\n DxbcRegisterValue sinVector = { };\n sinVector.type.ctype = DxbcScalarType::Float32;\n\n DxbcRegisterValue cosVector = { };\n cosVector.type.ctype = DxbcScalarType::Float32;\n\n // Only compute sincos for enabled components\n std::array sinIds = { };\n std::array cosIds = { };\n\n for (uint32_t i = 0; i < 4; i++) {\n const uint32_t sinIndex = 0u;\n const uint32_t cosIndex = 1u;\n\n if (ins.dst[0].mask[i] || ins.dst[1].mask[i]) {\n uint32_t sincosId = m_module.opSinCos(m_module.opCompositeExtract(typeId, srcValue.id, 1u, &i), !m_moduleInfo.options.sincosEmulation);\n\n if (ins.dst[0].type != DxbcOperandType::Null && ins.dst[0].mask[i])\n sinIds[sinVector.type.ccount++] = m_module.opCompositeExtract(typeId, sincosId, 1u, &sinIndex);\n\n if (ins.dst[1].type != DxbcOperandType::Null && ins.dst[1].mask[i])\n cosIds[cosVector.type.ccount++] = m_module.opCompositeExtract(typeId, sincosId, 1u, &cosIndex);\n }\n }\n\n if (sinVector.type.ccount) {\n sinVector.id = sinVector.type.ccount > 1u\n ? m_module.opCompositeConstruct(getVectorTypeId(sinVector.type), sinVector.type.ccount, sinIds.data())\n : sinIds[0];\n\n emitRegisterStore(ins.dst[0], sinVector);\n }\n\n if (cosVector.type.ccount) {\n cosVector.id = cosVector.type.ccount > 1u\n ? m_module.opCompositeConstruct(getVectorTypeId(cosVector.type), cosVector.type.ccount, cosIds.data())\n : cosIds[0];\n\n emitRegisterStore(ins.dst[1], cosVector);\n }\n }\n void emitGeometryEmit(\n const DxbcShaderInstruction& ins) {\n // In xfb mode we might have multiple streams, so\n // we have to figure out which stream to write to\n uint32_t streamId = 0;\n uint32_t streamVar = 0;\n\n if (m_moduleInfo.xfb != nullptr) {\n streamId = ins.dstCount > 0 ? ins.dst[0].idx[0].offset : 0;\n streamVar = m_module.constu32(streamId);\n }\n\n // Checking the negation is easier for EmitThenCut/EmitThenCutStream\n bool doEmit = ins.op != DxbcOpcode::Cut && ins.op != DxbcOpcode::CutStream;\n bool doCut = ins.op != DxbcOpcode::Emit && ins.op != DxbcOpcode::EmitStream;\n\n if (doEmit) {\n if (m_gs.needsOutputSetup)\n emitOutputSetup();\n emitClipCullStore(DxbcSystemValue::ClipDistance, m_clipDistances);\n emitClipCullStore(DxbcSystemValue::CullDistance, m_cullDistances);\n emitXfbOutputSetup(streamId, false);\n m_module.opEmitVertex(streamVar);\n }\n\n if (doCut)\n m_module.opEndPrimitive(streamVar);\n }\n void emitAtomic(\n const DxbcShaderInstruction& ins) {\n // atomic_* operations have the following operands:\n // (dst0) Destination u# or g# register\n // (src0) Index into the texture or buffer\n // (src1) The source value for the operation\n // (src2) Second source operand (optional)\n // imm_atomic_* operations have the following operands:\n // (dst0) Register that receives the result\n // (dst1) Destination u# or g# register\n // (srcX) As above\n const DxbcBufferInfo bufferInfo = getBufferInfo(ins.dst[ins.dstCount - 1]);\n \n bool isImm = ins.dstCount == 2;\n bool isUav = ins.dst[ins.dstCount - 1].type == DxbcOperandType::UnorderedAccessView;\n bool isSsbo = bufferInfo.isSsbo;\n\n // Retrieve destination pointer for the atomic operation>\n const DxbcRegisterPointer pointer = emitGetAtomicPointer(\n ins.dst[ins.dstCount - 1], ins.src[0]);\n \n // Load source values\n std::array src;\n \n for (uint32_t i = 1; i < ins.srcCount; i++) {\n src[i - 1] = emitRegisterBitcast(\n emitRegisterLoad(ins.src[i], DxbcRegMask(true, false, false, false)),\n pointer.type.ctype);\n }\n \n // Define memory scope and semantics based on the operands\n uint32_t scope = 0;\n uint32_t semantics = 0;\n \n if (isUav) {\n scope = spv::ScopeQueueFamily;\n semantics = spv::MemorySemanticsAcquireReleaseMask;\n\n semantics |= isSsbo\n ? spv::MemorySemanticsUniformMemoryMask\n : spv::MemorySemanticsImageMemoryMask;\n } else {\n scope = spv::ScopeWorkgroup;\n semantics = spv::MemorySemanticsWorkgroupMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask;\n }\n\n const uint32_t scopeId = m_module.constu32(scope);\n const uint32_t semanticsId = m_module.constu32(semantics);\n \n // Perform the atomic operation on the given pointer\n DxbcRegisterValue value;\n value.type = pointer.type;\n value.id = 0;\n \n // The result type, which is a scalar integer\n const uint32_t typeId = getVectorTypeId(value.type);\n \n switch (ins.op) {\n case DxbcOpcode::AtomicCmpStore:\n case DxbcOpcode::ImmAtomicCmpExch:\n value.id = m_module.opAtomicCompareExchange(\n typeId, pointer.id, scopeId, semanticsId,\n m_module.constu32(spv::MemorySemanticsMaskNone),\n src[1].id, src[0].id);\n break;\n \n case DxbcOpcode::ImmAtomicExch:\n value.id = m_module.opAtomicExchange(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicIAdd:\n case DxbcOpcode::ImmAtomicIAdd:\n value.id = m_module.opAtomicIAdd(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicAnd:\n case DxbcOpcode::ImmAtomicAnd:\n value.id = m_module.opAtomicAnd(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicOr:\n case DxbcOpcode::ImmAtomicOr:\n value.id = m_module.opAtomicOr(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicXor:\n case DxbcOpcode::ImmAtomicXor:\n value.id = m_module.opAtomicXor(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicIMin:\n case DxbcOpcode::ImmAtomicIMin:\n value.id = m_module.opAtomicSMin(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicIMax:\n case DxbcOpcode::ImmAtomicIMax:\n value.id = m_module.opAtomicSMax(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicUMin:\n case DxbcOpcode::ImmAtomicUMin:\n value.id = m_module.opAtomicUMin(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicUMax:\n case DxbcOpcode::ImmAtomicUMax:\n value.id = m_module.opAtomicUMax(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n // Write back the result to the destination\n // register if this is an imm_atomic_* opcode.\n if (isImm)\n emitRegisterStore(ins.dst[0], value);\n }\n void emitAtomicCounter(\n const DxbcShaderInstruction& ins) {\n // imm_atomic_alloc and imm_atomic_consume have the following operands:\n // (dst0) The register that will hold the old counter value\n // (dst1) The UAV whose counter is going to be modified\n const uint32_t registerId = ins.dst[1].idx[0].offset;\n \n if (m_uavs.at(registerId).ctrId == 0)\n m_uavs.at(registerId).ctrId = emitDclUavCounter(registerId);\n \n // Get a pointer to the atomic counter in question\n DxbcRegisterInfo ptrType;\n ptrType.type.ctype = DxbcScalarType::Uint32;\n ptrType.type.ccount = 1;\n ptrType.type.alength = 0;\n ptrType.sclass = spv::StorageClassStorageBuffer;\n \n uint32_t zeroId = m_module.consti32(0);\n uint32_t ptrId = m_module.opAccessChain(\n getPointerTypeId(ptrType),\n m_uavs.at(registerId).ctrId,\n 1, &zeroId);\n \n // Define memory scope and semantics based on the operands\n uint32_t scope = spv::ScopeQueueFamily;\n uint32_t semantics = spv::MemorySemanticsUniformMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask;\n \n uint32_t scopeId = m_module.constu32(scope);\n uint32_t semanticsId = m_module.constu32(semantics);\n \n // Compute the result value\n DxbcRegisterValue value;\n value.type.ctype = DxbcScalarType::Uint32;\n value.type.ccount = 1;\n \n uint32_t typeId = getVectorTypeId(value.type);\n \n switch (ins.op) {\n case DxbcOpcode::ImmAtomicAlloc:\n value.id = m_module.opAtomicIAdd(typeId, ptrId,\n scopeId, semanticsId, m_module.constu32(1));\n break;\n \n case DxbcOpcode::ImmAtomicConsume:\n value.id = m_module.opAtomicISub(typeId, ptrId,\n scopeId, semanticsId, m_module.constu32(1));\n value.id = m_module.opISub(typeId, value.id,\n m_module.constu32(1));\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n\n // Store the result\n emitRegisterStore(ins.dst[0], value);\n }\n void emitBarrier(\n const DxbcShaderInstruction& ins) {\n // sync takes no operands. Instead, the synchronization\n // scope is defined by the operand control bits.\n const DxbcSyncFlags flags = ins.controls.syncFlags();\n \n uint32_t executionScope = spv::ScopeInvocation;\n uint32_t memoryScope = spv::ScopeInvocation;\n uint32_t memorySemantics = 0;\n \n if (flags.test(DxbcSyncFlag::ThreadsInGroup))\n executionScope = spv::ScopeWorkgroup;\n \n if (flags.test(DxbcSyncFlag::ThreadGroupSharedMemory)) {\n memoryScope = spv::ScopeWorkgroup;\n memorySemantics |= spv::MemorySemanticsWorkgroupMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask\n | spv::MemorySemanticsMakeAvailableMask\n | spv::MemorySemanticsMakeVisibleMask;\n }\n \n if (flags.test(DxbcSyncFlag::UavMemoryGroup)) {\n memoryScope = spv::ScopeWorkgroup;\n memorySemantics |= spv::MemorySemanticsImageMemoryMask\n | spv::MemorySemanticsUniformMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask\n | spv::MemorySemanticsMakeAvailableMask\n | spv::MemorySemanticsMakeVisibleMask;\n }\n \n if (flags.test(DxbcSyncFlag::UavMemoryGlobal)) {\n memoryScope = spv::ScopeQueueFamily;\n\n if (m_programInfo.type() == DxbcProgramType::ComputeShader && !m_hasGloballyCoherentUav)\n memoryScope = spv::ScopeWorkgroup;\n\n memorySemantics |= spv::MemorySemanticsImageMemoryMask\n | spv::MemorySemanticsUniformMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask\n | spv::MemorySemanticsMakeAvailableMask\n | spv::MemorySemanticsMakeVisibleMask;\n }\n \n if (executionScope != spv::ScopeInvocation) {\n m_module.opControlBarrier(\n m_module.constu32(executionScope),\n m_module.constu32(memoryScope),\n m_module.constu32(memorySemantics));\n } else if (memoryScope != spv::ScopeInvocation) {\n m_module.opMemoryBarrier(\n m_module.constu32(memoryScope),\n m_module.constu32(memorySemantics));\n } else {\n Logger::warn(\"DxbcCompiler: sync instruction has no effect\");\n }\n }\n void emitBitExtract(\n const DxbcShaderInstruction& ins) {\n // ibfe and ubfe take the following arguments:\n // (dst0) The destination register\n // (src0) Number of bits to extact\n // (src1) Offset of the bits to extract\n // (src2) Register to extract bits from\n const bool isSigned = ins.op == DxbcOpcode::IBfe;\n \n DxbcRegisterValue bitCnt = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n DxbcRegisterValue bitOfs = emitRegisterLoad(ins.src[1], ins.dst[0].mask);\n\n if (ins.src[0].type != DxbcOperandType::Imm32)\n bitCnt = emitRegisterMaskBits(bitCnt, 0x1F);\n \n if (ins.src[1].type != DxbcOperandType::Imm32)\n bitOfs = emitRegisterMaskBits(bitOfs, 0x1F);\n \n const DxbcRegisterValue src = emitRegisterLoad(ins.src[2], ins.dst[0].mask);\n \n const uint32_t componentCount = src.type.ccount;\n std::array componentIds = {{ 0, 0, 0, 0 }};\n \n for (uint32_t i = 0; i < componentCount; i++) {\n const DxbcRegisterValue currBitCnt = emitRegisterExtract(bitCnt, DxbcRegMask::select(i));\n const DxbcRegisterValue currBitOfs = emitRegisterExtract(bitOfs, DxbcRegMask::select(i));\n const DxbcRegisterValue currSrc = emitRegisterExtract(src, DxbcRegMask::select(i));\n \n const uint32_t typeId = getVectorTypeId(currSrc.type);\n \n componentIds[i] = isSigned\n ? m_module.opBitFieldSExtract(typeId, currSrc.id, currBitOfs.id, currBitCnt.id)\n : m_module.opBitFieldUExtract(typeId, currSrc.id, currBitOfs.id, currBitCnt.id);\n }\n \n DxbcRegisterValue result;\n result.type = src.type;\n result.id = componentCount > 1\n ? m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n componentCount, componentIds.data())\n : componentIds[0];\n emitRegisterStore(ins.dst[0], result);\n }\n void emitBitInsert(\n const DxbcShaderInstruction& ins) {\n // ibfe and ubfe take the following arguments:\n // (dst0) The destination register\n // (src0) Number of bits to extact\n // (src1) Offset of the bits to extract\n // (src2) Register to take bits from\n // (src3) Register to replace bits in\n DxbcRegisterValue bitCnt = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n DxbcRegisterValue bitOfs = emitRegisterLoad(ins.src[1], ins.dst[0].mask);\n \n if (ins.src[0].type != DxbcOperandType::Imm32)\n bitCnt = emitRegisterMaskBits(bitCnt, 0x1F);\n \n if (ins.src[1].type != DxbcOperandType::Imm32)\n bitOfs = emitRegisterMaskBits(bitOfs, 0x1F);\n \n const DxbcRegisterValue insert = emitRegisterLoad(ins.src[2], ins.dst[0].mask);\n const DxbcRegisterValue base = emitRegisterLoad(ins.src[3], ins.dst[0].mask);\n \n const uint32_t componentCount = base.type.ccount;\n std::array componentIds = {{ 0, 0, 0, 0 }};\n \n for (uint32_t i = 0; i < componentCount; i++) {\n const DxbcRegisterValue currBitCnt = emitRegisterExtract(bitCnt, DxbcRegMask::select(i));\n const DxbcRegisterValue currBitOfs = emitRegisterExtract(bitOfs, DxbcRegMask::select(i));\n const DxbcRegisterValue currInsert = emitRegisterExtract(insert, DxbcRegMask::select(i));\n const DxbcRegisterValue currBase = emitRegisterExtract(base, DxbcRegMask::select(i));\n \n componentIds[i] = m_module.opBitFieldInsert(\n getVectorTypeId(currBase.type),\n currBase.id, currInsert.id,\n currBitOfs.id, currBitCnt.id);\n }\n \n DxbcRegisterValue result;\n result.type = base.type;\n result.id = componentCount > 1\n ? m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n componentCount, componentIds.data())\n : componentIds[0];\n emitRegisterStore(ins.dst[0], result);\n }\n void emitBitScan(\n const DxbcShaderInstruction& ins) {\n // firstbit(lo|hi|shi) have two operands:\n // (dst0) The destination operant\n // (src0) Source operand to scan\n DxbcRegisterValue src = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n \n DxbcRegisterValue dst;\n dst.type.ctype = ins.dst[0].dataType;\n dst.type.ccount = ins.dst[0].mask.popCount();\n \n // Result type, should be an unsigned integer\n const uint32_t typeId = getVectorTypeId(dst.type);\n \n switch (ins.op) {\n case DxbcOpcode::FirstBitLo: dst.id = m_module.opFindILsb(typeId, src.id); break;\n case DxbcOpcode::FirstBitHi: dst.id = m_module.opFindUMsb(typeId, src.id); break;\n case DxbcOpcode::FirstBitShi: dst.id = m_module.opFindSMsb(typeId, src.id); break;\n default: Logger::warn(str::format(\"DxbcCompiler: Unhandled instruction: \", ins.op)); return;\n }\n \n // The 'Hi' variants are counted from the MSB in DXBC\n // rather than the LSB, so we have to invert the number\n if (ins.op == DxbcOpcode::FirstBitHi || ins.op == DxbcOpcode::FirstBitShi) {\n uint32_t boolTypeId = m_module.defBoolType();\n\n if (dst.type.ccount > 1)\n boolTypeId = m_module.defVectorType(boolTypeId, dst.type.ccount);\n\n DxbcRegisterValue const31 = emitBuildConstVecu32(31u, 31u, 31u, 31u, ins.dst[0].mask);\n DxbcRegisterValue constff = emitBuildConstVecu32(~0u, ~0u, ~0u, ~0u, ins.dst[0].mask);\n\n dst.id = m_module.opSelect(typeId,\n m_module.opINotEqual(boolTypeId, dst.id, constff.id),\n m_module.opISub(typeId, const31.id, dst.id),\n constff.id);\n }\n \n // No modifiers are supported\n emitRegisterStore(ins.dst[0], dst);\n }\n void emitBufferQuery(\n const DxbcShaderInstruction& ins) {\n // bufinfo takes two arguments\n // (dst0) The destination register\n // (src0) The buffer register to query\n const DxbcBufferInfo bufferInfo = getBufferInfo(ins.src[0]);\n bool isSsbo = bufferInfo.isSsbo;\n \n // We'll store this as a scalar unsigned integer\n DxbcRegisterValue result = isSsbo\n ? emitQueryBufferSize(ins.src[0])\n : emitQueryTexelBufferSize(ins.src[0]);\n \n uint32_t typeId = getVectorTypeId(result.type);\n \n // Adjust returned size if this is a raw or structured\n // buffer, as emitQueryTexelBufferSize only returns the\n // number of typed elements in the buffer.\n if (bufferInfo.type == DxbcResourceType::Raw) {\n result.id = m_module.opIMul(typeId,\n result.id, m_module.constu32(4));\n } else if (bufferInfo.type == DxbcResourceType::Structured) {\n result.id = m_module.opUDiv(typeId, result.id,\n m_module.constu32(bufferInfo.stride / 4));\n }\n\n // Store the result. The scalar will be extended to a\n // vector if the write mask consists of more than one\n // component, which is the desired behaviour.\n emitRegisterStore(ins.dst[0], result);\n }\n void emitBufferLoad(\n const DxbcShaderInstruction& ins) {\n // ld_raw takes three arguments:\n // (dst0) Destination register\n // (src0) Byte offset\n // (src1) Source register\n // ld_structured takes four arguments:\n // (dst0) Destination register\n // (src0) Structure index\n // (src1) Byte offset\n // (src2) Source register\n const bool isStructured = ins.op == DxbcOpcode::LdStructured\n || ins.op == DxbcOpcode::LdStructuredS;\n \n // Source register. The exact way we access\n // the data depends on the register type.\n const DxbcRegister& dstReg = ins.dst[0];\n const DxbcRegister& srcReg = isStructured ? ins.src[2] : ins.src[1];\n\n if (dstReg.type == DxbcOperandType::UnorderedAccessView)\n emitUavBarrier(uint64_t(1u) << srcReg.idx[0].offset, 0u);\n\n // Retrieve common info about the buffer\n const DxbcBufferInfo bufferInfo = getBufferInfo(srcReg);\n\n // Shared memory is the only type of buffer that\n // is not accessed through a texel buffer view\n bool isTgsm = srcReg.type == DxbcOperandType::ThreadGroupSharedMemory;\n bool isSsbo = bufferInfo.isSsbo;\n\n // Common types and IDs used while loading the data\n uint32_t bufferId = isTgsm || isSsbo ? 0 : m_module.opLoad(bufferInfo.typeId, bufferInfo.varId);\n\n uint32_t vectorTypeId = getVectorTypeId({ DxbcScalarType::Uint32, 4 });\n uint32_t scalarTypeId = getVectorTypeId({ DxbcScalarType::Uint32, 1 });\n\n // Since all data is represented as a sequence of 32-bit\n // integers, we have to load each component individually.\n std::array ccomps = { 0, 0, 0, 0 };\n std::array scomps = { 0, 0, 0, 0 };\n uint32_t scount = 0;\n\n // The sparse feedback ID will be non-zero for sparse\n // instructions on input. We need to reset it to 0.\n SpirvMemoryOperands memoryOperands;\n SpirvImageOperands imageOperands;\n imageOperands.sparse = ins.dstCount == 2;\n\n uint32_t coherence = bufferInfo.coherence;\n\n if (isTgsm && m_moduleInfo.options.forceVolatileTgsmAccess) {\n memoryOperands.flags |= spv::MemoryAccessVolatileMask;\n coherence = spv::ScopeWorkgroup;\n }\n\n if (coherence) {\n memoryOperands.flags |= spv::MemoryAccessNonPrivatePointerMask;\n\n if (coherence != spv::ScopeInvocation) {\n memoryOperands.flags |= spv::MemoryAccessMakePointerVisibleMask;\n memoryOperands.makeVisible = m_module.constu32(coherence);\n\n imageOperands.flags = spv::ImageOperandsNonPrivateTexelMask\n | spv::ImageOperandsMakeTexelVisibleMask;\n imageOperands.makeVisible = m_module.constu32(coherence);\n }\n }\n\n uint32_t sparseFeedbackId = 0;\n\n bool useRawAccessChains = m_hasRawAccessChains && isSsbo && !imageOperands.sparse;\n\n DxbcRegisterValue index = emitRegisterLoad(ins.src[0], DxbcRegMask(true, false, false, false));\n DxbcRegisterValue offset = index;\n\n if (isStructured)\n offset = emitRegisterLoad(ins.src[1], DxbcRegMask(true, false, false, false));\n\n DxbcRegisterValue elementIndex = { };\n\n uint32_t baseAlignment = sizeof(uint32_t);\n\n if (useRawAccessChains) {\n memoryOperands.flags |= spv::MemoryAccessAlignedMask;\n\n if (isStructured && ins.src[1].type == DxbcOperandType::Imm32) {\n baseAlignment = bufferInfo.stride | ins.src[1].imm.u32_1;\n baseAlignment = baseAlignment & -baseAlignment;\n baseAlignment = std::min(baseAlignment, uint32_t(m_moduleInfo.options.minSsboAlignment));\n }\n } else {\n elementIndex = isStructured\n ? emitCalcBufferIndexStructured(index, offset, bufferInfo.stride)\n : emitCalcBufferIndexRaw(offset);\n }\n\n uint32_t readMask = 0u;\n\n for (uint32_t i = 0; i < 4; i++) {\n if (dstReg.mask[i])\n readMask |= 1u << srcReg.swizzle[i];\n }\n\n while (readMask) {\n uint32_t sindex = bit::tzcnt(readMask);\n uint32_t scount = bit::tzcnt(~(readMask >> sindex));\n uint32_t zero = 0;\n\n if (useRawAccessChains) {\n uint32_t alignment = baseAlignment;\n uint32_t offsetId = offset.id;\n\n if (sindex) {\n offsetId = m_module.opIAdd(scalarTypeId,\n offsetId, m_module.constu32(sizeof(uint32_t) * sindex));\n alignment |= sizeof(uint32_t) * sindex;\n }\n\n DxbcRegisterInfo storeInfo;\n storeInfo.type.ctype = DxbcScalarType::Uint32;\n storeInfo.type.ccount = scount;\n storeInfo.type.alength = 0;\n storeInfo.sclass = spv::StorageClassStorageBuffer;\n\n uint32_t loadTypeId = getArrayTypeId(storeInfo.type);\n uint32_t ptrTypeId = getPointerTypeId(storeInfo);\n\n uint32_t accessChain = isStructured\n ? m_module.opRawAccessChain(ptrTypeId, bufferInfo.varId,\n m_module.constu32(bufferInfo.stride), index.id, offsetId,\n spv::RawAccessChainOperandsRobustnessPerElementNVMask)\n : m_module.opRawAccessChain(ptrTypeId, bufferInfo.varId,\n m_module.constu32(0), m_module.constu32(0), offsetId,\n spv::RawAccessChainOperandsRobustnessPerComponentNVMask);\n\n memoryOperands.alignment = alignment & -alignment;\n\n uint32_t vectorId = m_module.opLoad(loadTypeId, accessChain, memoryOperands);\n\n for (uint32_t i = 0; i < scount; i++) {\n ccomps[sindex + i] = vectorId;\n\n if (scount > 1) {\n ccomps[sindex + i] = m_module.opCompositeExtract(\n scalarTypeId, vectorId, 1, &i);\n }\n }\n\n readMask &= ~(((1u << scount) - 1u) << sindex);\n } else {\n uint32_t elementIndexAdjusted = m_module.opIAdd(\n getVectorTypeId(elementIndex.type), elementIndex.id,\n m_module.consti32(sindex));\n\n if (isTgsm) {\n ccomps[sindex] = m_module.opLoad(scalarTypeId,\n m_module.opAccessChain(bufferInfo.typeId,\n bufferInfo.varId, 1, &elementIndexAdjusted),\n memoryOperands);\n } else if (isSsbo) {\n uint32_t indices[2] = { m_module.constu32(0), elementIndexAdjusted };\n ccomps[sindex] = m_module.opLoad(scalarTypeId,\n m_module.opAccessChain(bufferInfo.typeId,\n bufferInfo.varId, 2, indices),\n memoryOperands);\n } else {\n uint32_t resultTypeId = vectorTypeId;\n uint32_t resultId = 0;\n\n if (imageOperands.sparse)\n resultTypeId = getSparseResultTypeId(vectorTypeId);\n\n if (srcReg.type == DxbcOperandType::Resource) {\n resultId = m_module.opImageFetch(resultTypeId,\n bufferId, elementIndexAdjusted, imageOperands);\n } else if (srcReg.type == DxbcOperandType::UnorderedAccessView) {\n resultId = m_module.opImageRead(resultTypeId,\n bufferId, elementIndexAdjusted, imageOperands);\n } else {\n throw DxvkError(\"DxbcCompiler: Invalid operand type for strucured/raw load\");\n }\n\n // Only read sparse feedback once. This may be somewhat inaccurate\n // for reads that straddle pages, but we can't easily emulate this.\n if (imageOperands.sparse) {\n imageOperands.sparse = false;\n sparseFeedbackId = resultId;\n\n resultId = emitExtractSparseTexel(vectorTypeId, resultId);\n }\n\n ccomps[sindex] = m_module.opCompositeExtract(scalarTypeId, resultId, 1, &zero);\n }\n\n readMask &= readMask - 1;\n }\n }\n\n for (uint32_t i = 0; i < 4; i++) {\n uint32_t sindex = srcReg.swizzle[i];\n\n if (dstReg.mask[i])\n scomps[scount++] = ccomps[sindex];\n }\n\n DxbcRegisterValue result = { };\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = scount;\n result.id = scomps[0];\n\n if (scount > 1) {\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n scount, scomps.data());\n }\n\n emitRegisterStore(dstReg, result);\n\n if (sparseFeedbackId)\n emitStoreSparseFeedback(ins.dst[1], sparseFeedbackId);\n }\n void emitBufferStore(\n const DxbcShaderInstruction& ins) {\n // store_raw takes three arguments:\n // (dst0) Destination register\n // (src0) Byte offset\n // (src1) Source register\n // store_structured takes four arguments:\n // (dst0) Destination register\n // (src0) Structure index\n // (src1) Byte offset\n // (src2) Source register\n const bool isStructured = ins.op == DxbcOpcode::StoreStructured;\n \n // Source register. The exact way we access\n // the data depends on the register type.\n const DxbcRegister& dstReg = ins.dst[0];\n const DxbcRegister& srcReg = isStructured ? ins.src[2] : ins.src[1];\n\n if (dstReg.type == DxbcOperandType::UnorderedAccessView)\n emitUavBarrier(0u, uint64_t(1u) << dstReg.idx[0].offset);\n\n DxbcRegisterValue value = emitRegisterLoad(srcReg, dstReg.mask);\n value = emitRegisterBitcast(value, DxbcScalarType::Uint32);\n\n // Retrieve common info about the buffer\n const DxbcBufferInfo bufferInfo = getBufferInfo(dstReg);\n\n // Thread Group Shared Memory is not accessed through a texel buffer view\n bool isTgsm = dstReg.type == DxbcOperandType::ThreadGroupSharedMemory;\n bool isSsbo = bufferInfo.isSsbo;\n\n uint32_t bufferId = isTgsm || isSsbo ? 0 : m_module.opLoad(bufferInfo.typeId, bufferInfo.varId);\n\n uint32_t scalarTypeId = getVectorTypeId({ DxbcScalarType::Uint32, 1 });\n uint32_t vectorTypeId = getVectorTypeId({ DxbcScalarType::Uint32, 4 });\n\n // Set memory operands according to resource properties\n SpirvMemoryOperands memoryOperands;\n SpirvImageOperands imageOperands;\n\n uint32_t coherence = bufferInfo.coherence;\n\n if (isTgsm && m_moduleInfo.options.forceVolatileTgsmAccess) {\n memoryOperands.flags |= spv::MemoryAccessVolatileMask;\n coherence = spv::ScopeWorkgroup;\n }\n\n if (coherence) {\n memoryOperands.flags |= spv::MemoryAccessNonPrivatePointerMask;\n\n if (coherence != spv::ScopeInvocation) {\n memoryOperands.flags |= spv::MemoryAccessMakePointerAvailableMask;\n memoryOperands.makeAvailable = m_module.constu32(coherence);\n\n imageOperands.flags = spv::ImageOperandsNonPrivateTexelMask\n | spv::ImageOperandsMakeTexelAvailableMask;\n imageOperands.makeAvailable = m_module.constu32(coherence);\n }\n }\n\n // Compute flat element index as necessary\n bool useRawAccessChains = isSsbo && m_hasRawAccessChains;\n\n DxbcRegisterValue index = emitRegisterLoad(ins.src[0], DxbcRegMask(true, false, false, false));\n DxbcRegisterValue offset = index;\n\n if (isStructured)\n offset = emitRegisterLoad(ins.src[1], DxbcRegMask(true, false, false, false));\n\n DxbcRegisterValue elementIndex = { };\n\n uint32_t baseAlignment = sizeof(uint32_t);\n\n if (useRawAccessChains) {\n memoryOperands.flags |= spv::MemoryAccessAlignedMask;\n\n if (isStructured && ins.src[1].type == DxbcOperandType::Imm32) {\n baseAlignment = bufferInfo.stride | ins.src[1].imm.u32_1;\n baseAlignment = baseAlignment & -baseAlignment;\n baseAlignment = std::min(baseAlignment, uint32_t(m_moduleInfo.options.minSsboAlignment));\n }\n } else {\n elementIndex = isStructured\n ? emitCalcBufferIndexStructured(index, offset, bufferInfo.stride)\n : emitCalcBufferIndexRaw(offset);\n }\n\n uint32_t writeMask = dstReg.mask.raw();\n\n while (writeMask) {\n uint32_t sindex = bit::tzcnt(writeMask);\n uint32_t scount = bit::tzcnt(~(writeMask >> sindex));\n\n if (useRawAccessChains) {\n uint32_t alignment = baseAlignment;\n uint32_t offsetId = offset.id;\n\n if (sindex) {\n offsetId = m_module.opIAdd(scalarTypeId,\n offsetId, m_module.constu32(sizeof(uint32_t) * sindex));\n alignment = alignment | (sizeof(uint32_t) * sindex);\n }\n\n DxbcRegisterInfo storeInfo;\n storeInfo.type.ctype = DxbcScalarType::Uint32;\n storeInfo.type.ccount = scount;\n storeInfo.type.alength = 0;\n storeInfo.sclass = spv::StorageClassStorageBuffer;\n\n uint32_t storeTypeId = getArrayTypeId(storeInfo.type);\n uint32_t ptrTypeId = getPointerTypeId(storeInfo);\n\n uint32_t accessChain = isStructured\n ? m_module.opRawAccessChain(ptrTypeId, bufferInfo.varId,\n m_module.constu32(bufferInfo.stride), index.id, offsetId,\n spv::RawAccessChainOperandsRobustnessPerElementNVMask)\n : m_module.opRawAccessChain(ptrTypeId, bufferInfo.varId,\n m_module.constu32(0), m_module.constu32(0), offsetId,\n spv::RawAccessChainOperandsRobustnessPerComponentNVMask);\n\n uint32_t valueId = value.id;\n\n if (scount < value.type.ccount) {\n if (scount == 1) {\n valueId = m_module.opCompositeExtract(storeTypeId, value.id, 1, &sindex);\n } else {\n std::array indices = { sindex, sindex + 1u, sindex + 2u, sindex + 3u };\n valueId = m_module.opVectorShuffle(storeTypeId, value.id, value.id, scount, indices.data());\n }\n }\n\n memoryOperands.alignment = alignment & -alignment;\n m_module.opStore(accessChain, valueId, memoryOperands);\n\n writeMask &= ~(((1u << scount) - 1u) << sindex);\n } else {\n uint32_t srcComponentId = value.type.ccount > 1\n ? m_module.opCompositeExtract(scalarTypeId,\n value.id, 1, &sindex)\n : value.id;\n\n uint32_t elementIndexAdjusted = sindex != 0\n ? m_module.opIAdd(getVectorTypeId(elementIndex.type),\n elementIndex.id, m_module.consti32(sindex))\n : elementIndex.id;\n\n if (isTgsm) {\n m_module.opStore(\n m_module.opAccessChain(bufferInfo.typeId,\n bufferInfo.varId, 1, &elementIndexAdjusted),\n srcComponentId, memoryOperands);\n } else if (isSsbo) {\n uint32_t indices[2] = { m_module.constu32(0), elementIndexAdjusted };\n m_module.opStore(\n m_module.opAccessChain(bufferInfo.typeId,\n bufferInfo.varId, 2, indices),\n srcComponentId, memoryOperands);\n } else if (dstReg.type == DxbcOperandType::UnorderedAccessView) {\n const std::array srcVectorIds = {\n srcComponentId, srcComponentId,\n srcComponentId, srcComponentId,\n };\n\n m_module.opImageWrite(\n bufferId, elementIndexAdjusted,\n m_module.opCompositeConstruct(vectorTypeId,\n 4, srcVectorIds.data()),\n imageOperands);\n } else {\n throw DxvkError(\"DxbcCompiler: Invalid operand type for strucured/raw store\");\n }\n\n writeMask &= writeMask - 1u;\n }\n }\n }\n void emitConvertFloat16(\n const DxbcShaderInstruction& ins) {\n // f32tof16 takes two operands:\n // (dst0) Destination register as a uint32 vector\n // (src0) Source register as a float32 vector\n // f16tof32 takes two operands:\n // (dst0) Destination register as a float32 vector\n // (src0) Source register as a uint32 vector\n const DxbcRegisterValue src = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n \n // We handle both packing and unpacking here\n const bool isPack = ins.op == DxbcOpcode::F32toF16;\n \n // The conversion instructions do not map very well to the\n // SPIR-V pack instructions, which operate on 2D vectors.\n std::array scalarIds = {{ 0, 0, 0, 0 }};\n \n const uint32_t componentCount = src.type.ccount;\n \n // These types are used in both pack and unpack operations\n const uint32_t t_u32 = getVectorTypeId({ DxbcScalarType::Uint32, 1 });\n const uint32_t t_f32 = getVectorTypeId({ DxbcScalarType::Float32, 1 });\n const uint32_t t_f32v2 = getVectorTypeId({ DxbcScalarType::Float32, 2 });\n \n // Constant zero-bit pattern, used for packing\n const uint32_t zerof32 = isPack ? m_module.constf32(0.0f) : 0;\n \n for (uint32_t i = 0; i < componentCount; i++) {\n const DxbcRegisterValue componentValue\n = emitRegisterExtract(src, DxbcRegMask::select(i));\n \n if (isPack) { // f32tof16\n const std::array packIds =\n {{ componentValue.id, zerof32 }};\n \n scalarIds[i] = m_module.opPackHalf2x16(t_u32,\n m_module.opCompositeConstruct(t_f32v2, packIds.size(), packIds.data()));\n } else { // f16tof32\n const uint32_t zeroIndex = 0;\n \n scalarIds[i] = m_module.opCompositeExtract(t_f32,\n m_module.opUnpackHalf2x16(t_f32v2, componentValue.id),\n 1, &zeroIndex);\n }\n }\n\n DxbcRegisterValue result;\n result.type.ctype = ins.dst[0].dataType;\n result.type.ccount = componentCount;\n\n uint32_t typeId = getVectorTypeId(result.type);\n result.id = componentCount > 1\n ? m_module.opCompositeConstruct(typeId,\n componentCount, scalarIds.data())\n : scalarIds[0];\n\n if (isPack) {\n // Some drivers return infinity if the input value is above a certain\n // threshold, but D3D wants us to return infinity only if the input is\n // actually infinite. Fix this up to return the maximum representable\n // 16-bit floating point number instead, but preserve input infinity.\n uint32_t t_bvec = getVectorTypeId({ DxbcScalarType::Bool, componentCount });\n uint32_t f16Infinity = m_module.constuReplicant(0x7C00, componentCount);\n uint32_t f16Unsigned = m_module.constuReplicant(0x7FFF, componentCount);\n\n uint32_t isInputInf = m_module.opIsInf(t_bvec, src.id);\n uint32_t isValueInf = m_module.opIEqual(t_bvec, f16Infinity,\n m_module.opBitwiseAnd(typeId, result.id, f16Unsigned));\n\n result.id = m_module.opSelect(getVectorTypeId(result.type),\n m_module.opLogicalAnd(t_bvec, isValueInf, m_module.opLogicalNot(t_bvec, isInputInf)),\n m_module.opISub(typeId, result.id, m_module.constuReplicant(1, componentCount)),\n result.id);\n }\n\n // Store result in the destination register\n emitRegisterStore(ins.dst[0], result);\n }\n void emitConvertFloat64(\n const DxbcShaderInstruction& ins) {\n // ftod and dtof take the following operands:\n // (dst0) Destination operand\n // (src0) Number to convert\n uint32_t dstBits = ins.dst[0].mask.popCount();\n\n DxbcRegMask srcMask = isDoubleType(ins.dst[0].dataType)\n ? DxbcRegMask(dstBits >= 2, dstBits >= 4, false, false)\n : DxbcRegMask(dstBits >= 1, dstBits >= 1, dstBits >= 2, dstBits >= 2);\n\n // Perform actual conversion, destination modifiers are not applied\n DxbcRegisterValue val = emitRegisterLoad(ins.src[0], srcMask);\n\n DxbcRegisterValue result;\n result.type.ctype = ins.dst[0].dataType;\n result.type.ccount = val.type.ccount;\n\n switch (ins.op) {\n case DxbcOpcode::DtoF:\n case DxbcOpcode::FtoD:\n result.id = m_module.opFConvert(\n getVectorTypeId(result.type), val.id);\n break;\n\n case DxbcOpcode::DtoI:\n result.id = m_module.opConvertFtoS(\n getVectorTypeId(result.type), val.id);\n break;\n\n case DxbcOpcode::DtoU:\n result.id = m_module.opConvertFtoU(\n getVectorTypeId(result.type), val.id);\n break;\n\n case DxbcOpcode::ItoD:\n result.id = m_module.opConvertStoF(\n getVectorTypeId(result.type), val.id);\n break;\n \n case DxbcOpcode::UtoD:\n result.id = m_module.opConvertUtoF(\n getVectorTypeId(result.type), val.id);\n break;\n \n default:\n Logger::warn(str::format(\"DxbcCompiler: Unhandled instruction: \", ins.op));\n return;\n }\n \n emitRegisterStore(ins.dst[0], result);\n }\n void emitHullShaderPhase(\n const DxbcShaderInstruction& ins) {\n switch (ins.op) {\n case DxbcOpcode::HsDecls: {\n if (m_hs.currPhaseType != DxbcCompilerHsPhase::None)\n Logger::err(\"DXBC: HsDecls not the first phase in hull shader\");\n \n m_hs.currPhaseType = DxbcCompilerHsPhase::Decl;\n } break;\n \n case DxbcOpcode::HsControlPointPhase: {\n m_hs.cpPhase = this->emitNewHullShaderControlPointPhase();\n \n m_hs.currPhaseType = DxbcCompilerHsPhase::ControlPoint;\n m_hs.currPhaseId = 0;\n \n m_module.setDebugName(m_hs.cpPhase.functionId, \"hs_control_point\");\n } break;\n \n case DxbcOpcode::HsForkPhase: {\n auto phase = this->emitNewHullShaderForkJoinPhase();\n m_hs.forkPhases.push_back(phase);\n \n m_hs.currPhaseType = DxbcCompilerHsPhase::Fork;\n m_hs.currPhaseId = m_hs.forkPhases.size() - 1;\n \n m_module.setDebugName(phase.functionId,\n str::format(\"hs_fork_\", m_hs.currPhaseId).c_str());\n } break;\n \n case DxbcOpcode::HsJoinPhase: {\n auto phase = this->emitNewHullShaderForkJoinPhase();\n m_hs.joinPhases.push_back(phase);\n \n m_hs.currPhaseType = DxbcCompilerHsPhase::Join;\n m_hs.currPhaseId = m_hs.joinPhases.size() - 1;\n \n m_module.setDebugName(phase.functionId,\n str::format(\"hs_join_\", m_hs.currPhaseId).c_str());\n } break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n }\n }\n void emitHullShaderInstCnt(\n const DxbcShaderInstruction& ins) {\n this->getCurrentHsForkJoinPhase()->instanceCount = ins.imm[0].u32;\n }\n void emitInterpolate(\n const DxbcShaderInstruction& ins) {\n m_module.enableCapability(spv::CapabilityInterpolationFunction);\n\n // The SPIR-V instructions operate on input variable pointers,\n // which are all declared as four-component float vectors.\n uint32_t registerId = ins.src[0].idx[0].offset;\n \n DxbcRegisterValue result;\n result.type = getInputRegType(registerId);\n \n switch (ins.op) {\n case DxbcOpcode::EvalCentroid: {\n result.id = m_module.opInterpolateAtCentroid(\n getVectorTypeId(result.type),\n m_vRegs.at(registerId).id);\n } break;\n \n case DxbcOpcode::EvalSampleIndex: {\n const DxbcRegisterValue sampleIndex = emitRegisterLoad(\n ins.src[1], DxbcRegMask(true, false, false, false));\n \n result.id = m_module.opInterpolateAtSample(\n getVectorTypeId(result.type),\n m_vRegs.at(registerId).id,\n sampleIndex.id);\n } break;\n \n case DxbcOpcode::EvalSnapped: {\n // The offset is encoded as a 4-bit fixed point value\n DxbcRegisterValue offset = emitRegisterLoad(\n ins.src[1], DxbcRegMask(true, true, false, false));\n offset.id = m_module.opBitFieldSExtract(\n getVectorTypeId(offset.type), offset.id,\n m_module.consti32(0), m_module.consti32(4));\n\n offset.type.ctype = DxbcScalarType::Float32;\n offset.id = m_module.opConvertStoF(\n getVectorTypeId(offset.type), offset.id);\n\n offset.id = m_module.opFMul(\n getVectorTypeId(offset.type), offset.id,\n m_module.constvec2f32(1.0f / 16.0f, 1.0f / 16.0f));\n\n result.id = m_module.opInterpolateAtOffset(\n getVectorTypeId(result.type),\n m_vRegs.at(registerId).id,\n offset.id);\n } break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n result = emitRegisterSwizzle(result,\n ins.src[0].swizzle, ins.dst[0].mask);\n emitRegisterStore(ins.dst[0], result);\n }\n void emitSparseCheckAccess(\n const DxbcShaderInstruction& ins) {\n // check_access_mapped has two operands:\n // (dst0) The destination register\n // (src0) The residency code\n m_module.enableCapability(spv::CapabilitySparseResidency);\n\n DxbcRegisterValue srcValue = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n\n uint32_t boolId = m_module.opImageSparseTexelsResident(\n m_module.defBoolType(), srcValue.id);\n\n DxbcRegisterValue dstValue;\n dstValue.type = { DxbcScalarType::Uint32, 1 };\n dstValue.id = m_module.opSelect(getScalarTypeId(DxbcScalarType::Uint32),\n boolId, m_module.constu32(~0u), m_module.constu32(0));\n\n emitRegisterStore(ins.dst[0], dstValue);\n }\n void emitTextureQuery(\n const DxbcShaderInstruction& ins) {\n // resinfo has three operands:\n // (dst0) The destination register\n // (src0) Resource LOD to query\n // (src1) Resource to query\n const DxbcBufferInfo resourceInfo = getBufferInfo(ins.src[1]);\n const DxbcResinfoType resinfoType = ins.controls.resinfoType();\n \n // Read the exact LOD for the image query\n const DxbcRegisterValue mipLod = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n const DxbcScalarType returnType = resinfoType == DxbcResinfoType::Uint\n ? DxbcScalarType::Uint32 : DxbcScalarType::Float32;\n \n // Query the size of the selected mip level, as well as the\n // total number of mip levels. We will have to combine the\n // result into a four-component vector later.\n DxbcRegisterValue imageSize = emitQueryTextureSize(ins.src[1], mipLod);\n DxbcRegisterValue imageLevels = emitQueryTextureLods(ins.src[1]);\n\n // If the mip level is out of bounds, D3D requires us to return\n // zero before applying modifiers, whereas SPIR-V is undefined,\n // so we need to fix it up manually here.\n imageSize.id = m_module.opSelect(getVectorTypeId(imageSize.type),\n m_module.opULessThan(m_module.defBoolType(), mipLod.id, imageLevels.id),\n imageSize.id, emitBuildZeroVector(imageSize.type).id);\n\n // Convert intermediates to the requested type\n if (returnType == DxbcScalarType::Float32) {\n imageSize.type.ctype = DxbcScalarType::Float32;\n imageSize.id = m_module.opConvertUtoF(\n getVectorTypeId(imageSize.type),\n imageSize.id);\n \n imageLevels.type.ctype = DxbcScalarType::Float32;\n imageLevels.id = m_module.opConvertUtoF(\n getVectorTypeId(imageLevels.type),\n imageLevels.id);\n }\n \n // If the selected return type is rcpFloat, we need\n // to compute the reciprocal of the image dimensions,\n // but not the array size, so we need to separate it.\n const uint32_t imageCoordDim = imageSize.type.ccount;\n \n DxbcRegisterValue imageLayers;\n imageLayers.type = imageSize.type;\n imageLayers.id = 0;\n \n if (resinfoType == DxbcResinfoType::RcpFloat && resourceInfo.image.array) {\n imageLayers = emitRegisterExtract(imageSize, DxbcRegMask::select(imageCoordDim - 1));\n imageSize = emitRegisterExtract(imageSize, DxbcRegMask::firstN(imageCoordDim - 1));\n }\n \n if (resinfoType == DxbcResinfoType::RcpFloat) {\n imageSize.id = m_module.opFDiv(\n getVectorTypeId(imageSize.type),\n emitBuildConstVecf32(1.0f, 1.0f, 1.0f, 1.0f,\n DxbcRegMask::firstN(imageSize.type.ccount)).id,\n imageSize.id);\n }\n \n // Concatenate result vectors and scalars to form a\n // 4D vector. Unused components will be set to zero.\n std::array vectorIds = { imageSize.id, 0, 0, 0 };\n uint32_t numVectorIds = 1;\n \n if (imageLayers.id != 0)\n vectorIds[numVectorIds++] = imageLayers.id;\n \n if (imageCoordDim < 3) {\n const uint32_t zero = returnType == DxbcScalarType::Uint32\n ? m_module.constu32(0)\n : m_module.constf32(0.0f);\n \n for (uint32_t i = imageCoordDim; i < 3; i++)\n vectorIds[numVectorIds++] = zero;\n }\n \n vectorIds[numVectorIds++] = imageLevels.id;\n \n // Create the actual result vector\n DxbcRegisterValue result;\n result.type.ctype = returnType;\n result.type.ccount = 4;\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n numVectorIds, vectorIds.data());\n \n // Swizzle components using the resource swizzle\n // and the destination operand's write mask\n result = emitRegisterSwizzle(result,\n ins.src[1].swizzle, ins.dst[0].mask);\n emitRegisterStore(ins.dst[0], result);\n }\n void emitTextureQueryLod(\n const DxbcShaderInstruction& ins) {\n // All sample instructions have at least these operands:\n // (dst0) The destination register\n // (src0) Texture coordinates\n // (src1) The texture itself\n // (src2) The sampler object\n const DxbcRegister& texCoordReg = ins.src[0];\n const DxbcRegister& textureReg = ins.src[1];\n const DxbcRegister& samplerReg = ins.src[2];\n \n // Texture and sampler register IDs\n const auto& texture = m_textures.at(textureReg.idx[0].offset);\n const auto& sampler = m_samplers.at(samplerReg.idx[0].offset);\n \n // Load texture coordinates\n const DxbcRegisterValue coord = emitRegisterLoad(texCoordReg,\n DxbcRegMask::firstN(getTexLayerDim(texture.imageInfo)));\n \n // Query the LOD. The result is a two-dimensional float32\n // vector containing the mip level and virtual LOD numbers.\n const uint32_t sampledImageId = emitLoadSampledImage(texture, sampler, false);\n const uint32_t queriedLodId = m_module.opImageQueryLod(\n getVectorTypeId({ DxbcScalarType::Float32, 2 }),\n sampledImageId, coord.id);\n \n // Build the result array vector by filling up\n // the remaining two components with zeroes.\n const uint32_t zero = m_module.constf32(0.0f);\n const std::array resultIds\n = {{ queriedLodId, zero, zero }};\n \n DxbcRegisterValue result;\n result.type = DxbcVectorType { DxbcScalarType::Float32, 4 };\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n resultIds.size(), resultIds.data());\n \n result = emitRegisterSwizzle(result, ins.src[1].swizzle, ins.dst[0].mask);\n emitRegisterStore(ins.dst[0], result);\n }\n void emitTextureQueryMs(\n const DxbcShaderInstruction& ins) {\n // sampleinfo has two operands:\n // (dst0) The destination register\n // (src0) Resource to query\n DxbcRegisterValue sampleCount = emitQueryTextureSamples(ins.src[0]);\n \n if (ins.controls.returnType() != DxbcInstructionReturnType::Uint) {\n sampleCount.type = { DxbcScalarType::Float32, 1 };\n sampleCount.id = m_module.opConvertUtoF(\n getVectorTypeId(sampleCount.type),\n sampleCount.id);\n }\n \n emitRegisterStore(ins.dst[0], sampleCount);\n }\n void emitTextureQueryMsPos(\n const DxbcShaderInstruction& ins) {\n // samplepos has three operands:\n // (dst0) The destination register\n // (src0) Resource to query \n // (src1) Sample index\n if (m_samplePositions == 0)\n m_samplePositions = emitSamplePosArray();\n \n // The lookup index is qual to the sample count plus the\n // sample index, or 0 if the resource cannot be queried.\n DxbcRegisterValue sampleCount = emitQueryTextureSamples(ins.src[0]);\n DxbcRegisterValue sampleIndex = emitRegisterLoad(\n ins.src[1], DxbcRegMask(true, false, false, false));\n \n uint32_t lookupIndex = m_module.opIAdd(\n getVectorTypeId(sampleCount.type),\n sampleCount.id, sampleIndex.id);\n \n // Validate the parameters\n uint32_t sampleCountValid = m_module.opULessThanEqual(\n m_module.defBoolType(),\n sampleCount.id,\n m_module.constu32(16));\n \n uint32_t sampleIndexValid = m_module.opULessThan(\n m_module.defBoolType(),\n sampleIndex.id,\n sampleCount.id);\n \n // If the lookup cannot be performed, set the lookup\n // index to zero, which will return a zero vector.\n lookupIndex = m_module.opSelect(\n getVectorTypeId(sampleCount.type),\n m_module.opLogicalAnd(\n m_module.defBoolType(),\n sampleCountValid,\n sampleIndexValid),\n lookupIndex,\n m_module.constu32(0));\n \n // Load sample pos vector and write the masked\n // components to the destination register.\n DxbcRegisterPointer samplePos;\n samplePos.type.ctype = DxbcScalarType::Float32;\n samplePos.type.ccount = 2;\n samplePos.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(samplePos.type),\n spv::StorageClassPrivate),\n m_samplePositions, 1, &lookupIndex);\n \n // Expand to vec4 by appending zeroes\n DxbcRegisterValue result = emitValueLoad(samplePos);\n\n DxbcRegisterValue zero;\n zero.type.ctype = DxbcScalarType::Float32;\n zero.type.ccount = 2;\n zero.id = m_module.constvec2f32(0.0f, 0.0f);\n\n result = emitRegisterConcat(result, zero);\n \n emitRegisterStore(ins.dst[0],\n emitRegisterSwizzle(result,\n ins.src[0].swizzle,\n ins.dst[0].mask));\n }\n void emitTextureFetch(\n const DxbcShaderInstruction& ins) {\n // ld has three operands:\n // (dst0) The destination register\n // (src0) Source address\n // (src1) Source texture\n // ld2dms has four operands:\n // (dst0) The destination register\n // (src0) Source address\n // (src1) Source texture\n // (src2) Sample number\n const auto& texture = m_textures.at(ins.src[1].idx[0].offset);\n const uint32_t imageLayerDim = getTexLayerDim(texture.imageInfo);\n \n bool isMultisampled = ins.op == DxbcOpcode::LdMs\n || ins.op == DxbcOpcode::LdMsS;\n\n // Load the texture coordinates. The last component\n // contains the LOD if the resource is an image.\n const DxbcRegisterValue address = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, true, true, true));\n \n // Additional image operands. This will store\n // the LOD and the address offset if present.\n SpirvImageOperands imageOperands;\n imageOperands.sparse = ins.dstCount == 2;\n \n if (ins.sampleControls.u != 0 || ins.sampleControls.v != 0 || ins.sampleControls.w != 0) {\n const std::array offsetIds = {\n imageLayerDim >= 1 ? m_module.consti32(ins.sampleControls.u) : 0,\n imageLayerDim >= 2 ? m_module.consti32(ins.sampleControls.v) : 0,\n imageLayerDim >= 3 ? m_module.consti32(ins.sampleControls.w) : 0,\n };\n \n imageOperands.flags |= spv::ImageOperandsConstOffsetMask;\n imageOperands.sConstOffset = offsetIds[0];\n\n if (imageLayerDim > 1) {\n imageOperands.sConstOffset = m_module.constComposite(\n getVectorTypeId({ DxbcScalarType::Sint32, imageLayerDim }),\n imageLayerDim, offsetIds.data());\n }\n }\n \n // The LOD is not present when reading from\n // a buffer or from a multisample texture.\n if (texture.imageInfo.dim != spv::DimBuffer && texture.imageInfo.ms == 0) {\n DxbcRegisterValue imageLod;\n \n if (!isMultisampled) {\n imageLod = emitRegisterExtract(\n address, DxbcRegMask(false, false, false, true));\n } else {\n // If we force-disabled MSAA, fetch from LOD 0\n imageLod.type = { DxbcScalarType::Uint32, 1 };\n imageLod.id = m_module.constu32(0);\n }\n \n imageOperands.flags |= spv::ImageOperandsLodMask;\n imageOperands.sLod = imageLod.id;\n }\n \n // The ld2dms instruction has a sample index, but we\n // are only allowed to set it for multisample views\n if (isMultisampled && texture.imageInfo.ms == 1) {\n DxbcRegisterValue sampleId = emitRegisterLoad(\n ins.src[2], DxbcRegMask(true, false, false, false));\n \n imageOperands.flags |= spv::ImageOperandsSampleMask;\n imageOperands.sSampleId = sampleId.id;\n }\n \n // Extract coordinates from address\n const DxbcRegisterValue coord = emitCalcTexCoord(address, texture.imageInfo);\n \n // Reading a typed image or buffer view\n // always returns a four-component vector.\n const uint32_t imageId = m_module.opLoad(texture.imageTypeId, texture.varId);\n\n DxbcVectorType texelType;\n texelType.ctype = texture.sampledType;\n texelType.ccount = 4;\n\n uint32_t texelTypeId = getVectorTypeId(texelType);\n uint32_t resultTypeId = texelTypeId;\n uint32_t resultId = 0;\n\n if (imageOperands.sparse)\n resultTypeId = getSparseResultTypeId(texelTypeId);\n\n resultId = m_module.opImageFetch(resultTypeId,\n imageId, coord.id, imageOperands);\n\n DxbcRegisterValue result;\n result.type = texelType;\n result.id = imageOperands.sparse\n ? emitExtractSparseTexel(texelTypeId, resultId)\n : resultId;\n \n // Swizzle components using the texture swizzle\n // and the destination operand's write mask\n result = emitRegisterSwizzle(result,\n ins.src[1].swizzle, ins.dst[0].mask);\n \n emitRegisterStore(ins.dst[0], result);\n\n if (imageOperands.sparse)\n emitStoreSparseFeedback(ins.dst[1], resultId);\n }\n void emitTextureGather(\n const DxbcShaderInstruction& ins) {\n // Gather4 takes the following operands:\n // (dst0) The destination register\n // (dst1) The residency code for sparse ops\n // (src0) Texture coordinates\n // (src1) The texture itself\n // (src2) The sampler, with a component selector\n // Gather4C takes the following additional operand:\n // (src3) The depth reference value\n // The Gather4Po variants take an additional operand\n // which defines an extended constant offset.\n // TODO reduce code duplication by moving some common code\n // in both sample() and gather() into separate methods\n const bool isExtendedGather = ins.op == DxbcOpcode::Gather4Po\n || ins.op == DxbcOpcode::Gather4PoC\n || ins.op == DxbcOpcode::Gather4PoS\n || ins.op == DxbcOpcode::Gather4PoCS;\n \n const DxbcRegister& texCoordReg = ins.src[0];\n const DxbcRegister& textureReg = ins.src[1 + isExtendedGather];\n const DxbcRegister& samplerReg = ins.src[2 + isExtendedGather];\n \n // Texture and sampler register IDs\n const auto& texture = m_textures.at(textureReg.idx[0].offset);\n const auto& sampler = m_samplers.at(samplerReg.idx[0].offset);\n \n // Image type, which stores the image dimensions etc.\n const uint32_t imageLayerDim = getTexLayerDim(texture.imageInfo);\n \n // Load the texture coordinates. SPIR-V allows these\n // to be float4 even if not all components are used.\n DxbcRegisterValue coord = emitLoadTexCoord(texCoordReg, texture.imageInfo);\n \n // Load reference value for depth-compare operations\n const bool isDepthCompare = ins.op == DxbcOpcode::Gather4C\n || ins.op == DxbcOpcode::Gather4PoC\n || ins.op == DxbcOpcode::Gather4CS\n || ins.op == DxbcOpcode::Gather4PoCS;\n\n const DxbcRegisterValue referenceValue = isDepthCompare\n ? emitRegisterLoad(ins.src[3 + isExtendedGather],\n DxbcRegMask(true, false, false, false))\n : DxbcRegisterValue();\n \n // Accumulate additional image operands.\n SpirvImageOperands imageOperands;\n imageOperands.sparse = ins.dstCount == 2;\n\n if (isExtendedGather) {\n m_module.enableCapability(spv::CapabilityImageGatherExtended);\n \n DxbcRegisterValue gatherOffset = emitRegisterLoad(\n ins.src[1], DxbcRegMask::firstN(imageLayerDim));\n \n imageOperands.flags |= spv::ImageOperandsOffsetMask;\n imageOperands.gOffset = gatherOffset.id;\n } else if (ins.sampleControls.u != 0 || ins.sampleControls.v != 0 || ins.sampleControls.w != 0) {\n const std::array offsetIds = {\n imageLayerDim >= 1 ? m_module.consti32(ins.sampleControls.u) : 0,\n imageLayerDim >= 2 ? m_module.consti32(ins.sampleControls.v) : 0,\n imageLayerDim >= 3 ? m_module.consti32(ins.sampleControls.w) : 0,\n };\n \n imageOperands.flags |= spv::ImageOperandsConstOffsetMask;\n imageOperands.sConstOffset = offsetIds[0];\n\n if (imageLayerDim > 1) {\n imageOperands.sConstOffset = m_module.constComposite(\n getVectorTypeId({ DxbcScalarType::Sint32, imageLayerDim }),\n imageLayerDim, offsetIds.data());\n }\n }\n\n // Gathering texels always returns a four-component\n // vector, even for the depth-compare variants.\n uint32_t sampledImageId = emitLoadSampledImage(texture, sampler, isDepthCompare);\n\n DxbcVectorType texelType;\n texelType.ctype = texture.sampledType;\n texelType.ccount = 4;\n\n uint32_t texelTypeId = getVectorTypeId(texelType);\n uint32_t resultTypeId = texelTypeId;\n uint32_t resultId = 0;\n\n if (imageOperands.sparse)\n resultTypeId = getSparseResultTypeId(texelTypeId);\n\n if (sampledImageId) {\n switch (ins.op) {\n // Simple image gather operation\n case DxbcOpcode::Gather4:\n case DxbcOpcode::Gather4S:\n case DxbcOpcode::Gather4Po:\n case DxbcOpcode::Gather4PoS: {\n resultId = m_module.opImageGather(\n resultTypeId, sampledImageId, coord.id,\n m_module.consti32(samplerReg.swizzle[0]),\n imageOperands);\n } break;\n\n // Depth-compare operation\n case DxbcOpcode::Gather4C:\n case DxbcOpcode::Gather4CS:\n case DxbcOpcode::Gather4PoC:\n case DxbcOpcode::Gather4PoCS: {\n resultId = m_module.opImageDrefGather(\n resultTypeId, sampledImageId, coord.id,\n referenceValue.id, imageOperands);\n } break;\n\n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n } else {\n Logger::warn(str::format(\"DxbcCompiler: \", ins.op, \": Unsupported image type\"));\n resultId = m_module.constNull(resultTypeId);\n }\n\n // If necessary, deal with the sparse result\n DxbcRegisterValue result;\n result.type = texelType;\n result.id = imageOperands.sparse\n ? emitExtractSparseTexel(texelTypeId, resultId)\n : resultId;\n\n // Swizzle components using the texture swizzle\n // and the destination operand's write mask\n result = emitRegisterSwizzle(result,\n textureReg.swizzle, ins.dst[0].mask);\n \n emitRegisterStore(ins.dst[0], result);\n\n if (imageOperands.sparse)\n emitStoreSparseFeedback(ins.dst[1], resultId);\n }\n void emitTextureSample(\n const DxbcShaderInstruction& ins) {\n // All sample instructions have at least these operands:\n // (dst0) The destination register\n // (src0) Texture coordinates\n // (src1) The texture itself\n // (src2) The sampler object\n const DxbcRegister& texCoordReg = ins.src[0];\n const DxbcRegister& textureReg = ins.src[1];\n const DxbcRegister& samplerReg = ins.src[2];\n \n // Texture and sampler register IDs\n const auto& texture = m_textures.at(textureReg.idx[0].offset);\n const auto& sampler = m_samplers.at(samplerReg.idx[0].offset);\n const uint32_t imageLayerDim = getTexLayerDim(texture.imageInfo);\n \n // Load the texture coordinates. SPIR-V allows these\n // to be float4 even if not all components are used.\n DxbcRegisterValue coord = emitLoadTexCoord(texCoordReg, texture.imageInfo);\n \n // Load reference value for depth-compare operations\n const bool isDepthCompare = ins.op == DxbcOpcode::SampleC\n || ins.op == DxbcOpcode::SampleClz\n || ins.op == DxbcOpcode::SampleCClampS\n || ins.op == DxbcOpcode::SampleClzS;\n \n const DxbcRegisterValue referenceValue = isDepthCompare\n ? emitRegisterLoad(ins.src[3], DxbcRegMask(true, false, false, false))\n : DxbcRegisterValue();\n \n // Load explicit gradients for sample operations that require them\n const bool hasExplicitGradients = ins.op == DxbcOpcode::SampleD\n || ins.op == DxbcOpcode::SampleDClampS;\n \n const DxbcRegisterValue explicitGradientX = hasExplicitGradients\n ? emitRegisterLoad(ins.src[3], DxbcRegMask::firstN(imageLayerDim))\n : DxbcRegisterValue();\n \n const DxbcRegisterValue explicitGradientY = hasExplicitGradients\n ? emitRegisterLoad(ins.src[4], DxbcRegMask::firstN(imageLayerDim))\n : DxbcRegisterValue();\n \n // LOD for certain sample operations\n const bool hasLod = ins.op == DxbcOpcode::SampleL\n || ins.op == DxbcOpcode::SampleLS\n || ins.op == DxbcOpcode::SampleB\n || ins.op == DxbcOpcode::SampleBClampS;\n \n const DxbcRegisterValue lod = hasLod\n ? emitRegisterLoad(ins.src[3], DxbcRegMask(true, false, false, false))\n : DxbcRegisterValue();\n\n // Min LOD for certain sparse operations\n const bool hasMinLod = ins.op == DxbcOpcode::SampleClampS\n || ins.op == DxbcOpcode::SampleBClampS\n || ins.op == DxbcOpcode::SampleDClampS\n || ins.op == DxbcOpcode::SampleCClampS;\n\n const DxbcRegisterValue minLod = hasMinLod && ins.src[ins.srcCount - 1].type != DxbcOperandType::Null\n ? emitRegisterLoad(ins.src[ins.srcCount - 1], DxbcRegMask(true, false, false, false))\n : DxbcRegisterValue();\n\n // Accumulate additional image operands. These are\n // not part of the actual operand token in SPIR-V.\n SpirvImageOperands imageOperands;\n imageOperands.sparse = ins.dstCount == 2;\n\n if (ins.sampleControls.u != 0 || ins.sampleControls.v != 0 || ins.sampleControls.w != 0) {\n const std::array offsetIds = {\n imageLayerDim >= 1 ? m_module.consti32(ins.sampleControls.u) : 0,\n imageLayerDim >= 2 ? m_module.consti32(ins.sampleControls.v) : 0,\n imageLayerDim >= 3 ? m_module.consti32(ins.sampleControls.w) : 0,\n };\n \n imageOperands.flags |= spv::ImageOperandsConstOffsetMask;\n imageOperands.sConstOffset = offsetIds[0];\n\n if (imageLayerDim > 1) {\n imageOperands.sConstOffset = m_module.constComposite(\n getVectorTypeId({ DxbcScalarType::Sint32, imageLayerDim }),\n imageLayerDim, offsetIds.data());\n }\n }\n\n if (hasMinLod) {\n m_module.enableCapability(spv::CapabilityMinLod);\n\n imageOperands.flags |= spv::ImageOperandsMinLodMask;\n imageOperands.sMinLod = minLod.id;\n }\n\n // Combine the texture and the sampler into a sampled image\n uint32_t sampledImageId = emitLoadSampledImage(texture, sampler, isDepthCompare);\n \n // Sampling an image always returns a four-component\n // vector, whereas depth-compare ops return a scalar.\n DxbcVectorType texelType;\n texelType.ctype = texture.sampledType;\n texelType.ccount = isDepthCompare ? 1 : 4;\n\n uint32_t texelTypeId = getVectorTypeId(texelType);\n uint32_t resultTypeId = texelTypeId;\n uint32_t resultId = 0;\n\n if (imageOperands.sparse)\n resultTypeId = getSparseResultTypeId(texelTypeId);\n\n if (sampledImageId) {\n switch (ins.op) {\n // Simple image sample operation\n case DxbcOpcode::Sample:\n case DxbcOpcode::SampleClampS: {\n resultId = m_module.opImageSampleImplicitLod(\n resultTypeId, sampledImageId, coord.id,\n imageOperands);\n } break;\n\n // Depth-compare operation\n case DxbcOpcode::SampleC:\n case DxbcOpcode::SampleCClampS: {\n resultId = m_module.opImageSampleDrefImplicitLod(\n resultTypeId, sampledImageId, coord.id,\n referenceValue.id, imageOperands);\n } break;\n\n // Depth-compare operation on mip level zero\n case DxbcOpcode::SampleClz:\n case DxbcOpcode::SampleClzS: {\n imageOperands.flags |= spv::ImageOperandsLodMask;\n imageOperands.sLod = m_module.constf32(0.0f);\n\n resultId = m_module.opImageSampleDrefExplicitLod(\n resultTypeId, sampledImageId, coord.id,\n referenceValue.id, imageOperands);\n } break;\n\n // Sample operation with explicit gradients\n case DxbcOpcode::SampleD:\n case DxbcOpcode::SampleDClampS: {\n imageOperands.flags |= spv::ImageOperandsGradMask;\n imageOperands.sGradX = explicitGradientX.id;\n imageOperands.sGradY = explicitGradientY.id;\n\n resultId = m_module.opImageSampleExplicitLod(\n resultTypeId, sampledImageId, coord.id,\n imageOperands);\n } break;\n\n // Sample operation with explicit LOD\n case DxbcOpcode::SampleL:\n case DxbcOpcode::SampleLS: {\n imageOperands.flags |= spv::ImageOperandsLodMask;\n imageOperands.sLod = lod.id;\n\n resultId = m_module.opImageSampleExplicitLod(\n resultTypeId, sampledImageId, coord.id,\n imageOperands);\n } break;\n\n // Sample operation with LOD bias\n case DxbcOpcode::SampleB:\n case DxbcOpcode::SampleBClampS: {\n imageOperands.flags |= spv::ImageOperandsBiasMask;\n imageOperands.sLodBias = lod.id;\n\n resultId = m_module.opImageSampleImplicitLod(\n resultTypeId, sampledImageId, coord.id,\n imageOperands);\n } break;\n\n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n } else {\n Logger::warn(str::format(\"DxbcCompiler: \", ins.op, \": Unsupported image type\"));\n resultId = m_module.constNull(resultTypeId);\n }\n \n DxbcRegisterValue result;\n result.type = texelType;\n result.id = imageOperands.sparse\n ? emitExtractSparseTexel(texelTypeId, resultId)\n : resultId;\n\n // Swizzle components using the texture swizzle\n // and the destination operand's write mask\n if (result.type.ccount != 1) {\n result = emitRegisterSwizzle(result,\n textureReg.swizzle, ins.dst[0].mask);\n }\n \n emitRegisterStore(ins.dst[0], result);\n\n if (imageOperands.sparse)\n emitStoreSparseFeedback(ins.dst[1], resultId);\n }\n void emitTypedUavLoad(\n const DxbcShaderInstruction& ins) {\n // load_uav_typed has three operands:\n // (dst0) The destination register\n // (src0) The texture or buffer coordinates\n // (src1) The UAV to load from\n const uint32_t registerId = ins.src[1].idx[0].offset;\n const DxbcUav uavInfo = m_uavs.at(registerId);\n\n emitUavBarrier(uint64_t(1u) << registerId, 0u);\n\n // Load texture coordinates\n DxbcRegisterValue texCoord = emitLoadTexCoord(\n ins.src[0], uavInfo.imageInfo);\n\n SpirvImageOperands imageOperands;\n imageOperands.sparse = ins.dstCount == 2;\n\n if (uavInfo.coherence) {\n imageOperands.flags |= spv::ImageOperandsNonPrivateTexelMask\n | spv::ImageOperandsMakeTexelVisibleMask;\n imageOperands.makeVisible = m_module.constu32(uavInfo.coherence);\n }\n\n DxbcVectorType texelType;\n texelType.ctype = uavInfo.sampledType;\n texelType.ccount = 4;\n\n uint32_t texelTypeId = getVectorTypeId(texelType);\n uint32_t resultTypeId = texelTypeId;\n uint32_t resultId = 0;\n\n if (imageOperands.sparse)\n resultTypeId = getSparseResultTypeId(texelTypeId);\n\n // Load source value from the UAV\n resultId = m_module.opImageRead(resultTypeId,\n m_module.opLoad(uavInfo.imageTypeId, uavInfo.varId),\n texCoord.id, imageOperands);\n \n // Apply component swizzle and mask\n DxbcRegisterValue uavValue;\n uavValue.type = texelType;\n uavValue.id = imageOperands.sparse\n ? emitExtractSparseTexel(texelTypeId, resultId)\n : resultId;\n\n uavValue = emitRegisterSwizzle(uavValue,\n ins.src[1].swizzle, ins.dst[0].mask);\n \n emitRegisterStore(ins.dst[0], uavValue);\n\n if (imageOperands.sparse)\n emitStoreSparseFeedback(ins.dst[1], resultId);\n }\n void emitTypedUavStore(\n const DxbcShaderInstruction& ins) {\n // store_uav_typed has three operands:\n // (dst0) The destination UAV\n // (src0) The texture or buffer coordinates\n // (src1) The value to store\n const DxbcBufferInfo uavInfo = getBufferInfo(ins.dst[0]);\n emitUavBarrier(0u, uint64_t(1u) << ins.dst[0].idx[0].offset);\n\n // Set image operands for coherent access if necessary \n SpirvImageOperands imageOperands;\n\n if (uavInfo.coherence) {\n imageOperands.flags |= spv::ImageOperandsNonPrivateTexelMask\n | spv::ImageOperandsMakeTexelAvailableMask;\n imageOperands.makeAvailable = m_module.constu32(uavInfo.coherence);\n }\n\n // Load texture coordinates\n DxbcRegisterValue texCoord = emitLoadTexCoord(ins.src[0], uavInfo.image);\n\n // Load the value that will be written to the image. We'll\n // have to cast it to the component type of the image.\n const DxbcRegisterValue texValue = emitRegisterBitcast(\n emitRegisterLoad(ins.src[1], DxbcRegMask(true, true, true, true)),\n uavInfo.stype);\n \n // Write the given value to the image\n m_module.opImageWrite(\n m_module.opLoad(uavInfo.typeId, uavInfo.varId),\n texCoord.id, texValue.id, imageOperands);\n }\n void emitControlFlowIf(\n const DxbcShaderInstruction& ins) {\n // Load the first component of the condition\n // operand and perform a zero test on it.\n const DxbcRegisterValue condition = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n // Declare the 'if' block. We do not know if there\n // will be an 'else' block or not, so we'll assume\n // that there is one and leave it empty otherwise.\n DxbcCfgBlock block;\n block.type = DxbcCfgBlockType::If;\n block.b_if.ztestId = emitRegisterZeroTest(condition, ins.controls.zeroTest()).id;\n block.b_if.labelIf = m_module.allocateId();\n block.b_if.labelElse = 0;\n block.b_if.labelEnd = m_module.allocateId();\n block.b_if.headerPtr = m_module.getInsertionPtr();\n m_controlFlowBlocks.push_back(block);\n \n // We'll insert the branch instruction when closing\n // the block, since we don't know whether or not an\n // else block is needed right now.\n m_module.opLabel(block.b_if.labelIf);\n }\n void emitControlFlowElse(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::If\n || m_controlFlowBlocks.back().b_if.labelElse != 0)\n throw DxvkError(\"DxbcCompiler: 'Else' without 'If' found\");\n \n // Set the 'Else' flag so that we do\n // not insert a dummy block on 'EndIf'\n DxbcCfgBlock& block = m_controlFlowBlocks.back();\n block.b_if.labelElse = m_module.allocateId();\n \n // Close the 'If' block by branching to\n // the merge block we declared earlier\n m_module.opBranch(block.b_if.labelEnd);\n m_module.opLabel (block.b_if.labelElse);\n }\n void emitControlFlowEndIf(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::If)\n throw DxvkError(\"DxbcCompiler: 'EndIf' without 'If' found\");\n \n // Remove the block from the stack, it's closed\n DxbcCfgBlock block = m_controlFlowBlocks.back();\n m_controlFlowBlocks.pop_back();\n \n // Write out the 'if' header\n m_module.beginInsertion(block.b_if.headerPtr);\n \n m_module.opSelectionMerge(\n block.b_if.labelEnd,\n spv::SelectionControlMaskNone);\n \n m_module.opBranchConditional(\n block.b_if.ztestId,\n block.b_if.labelIf,\n block.b_if.labelElse != 0\n ? block.b_if.labelElse\n : block.b_if.labelEnd);\n \n m_module.endInsertion();\n \n // End the active 'if' or 'else' block\n m_module.opBranch(block.b_if.labelEnd);\n m_module.opLabel (block.b_if.labelEnd);\n }\n void emitControlFlowSwitch(\n const DxbcShaderInstruction& ins) {\n // Load the selector as a scalar unsigned integer\n const DxbcRegisterValue selector = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n // Declare switch block. We cannot insert the switch\n // instruction itself yet because the number of case\n // statements and blocks is unknown at this point.\n DxbcCfgBlock block;\n block.type = DxbcCfgBlockType::Switch;\n block.b_switch.insertPtr = m_module.getInsertionPtr();\n block.b_switch.selectorId = selector.id;\n block.b_switch.labelBreak = m_module.allocateId();\n block.b_switch.labelCase = m_module.allocateId();\n block.b_switch.labelDefault = 0;\n block.b_switch.labelCases = nullptr;\n m_controlFlowBlocks.push_back(block);\n \n // Define the first 'case' label\n m_module.opLabel(block.b_switch.labelCase);\n }\n void emitControlFlowCase(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::Switch)\n throw DxvkError(\"DxbcCompiler: 'Case' without 'Switch' found\");\n \n // The source operand must be a 32-bit immediate.\n if (ins.src[0].type != DxbcOperandType::Imm32)\n throw DxvkError(\"DxbcCompiler: Invalid operand type for 'Case'\");\n\n // Use the last label allocated for 'case'.\n DxbcCfgBlockSwitch* block = &m_controlFlowBlocks.back().b_switch;\n\n if (caseBlockIsFallthrough()) {\n block->labelCase = m_module.allocateId();\n\n m_module.opBranch(block->labelCase);\n m_module.opLabel (block->labelCase);\n }\n\n DxbcSwitchLabel label;\n label.desc.literal = ins.src[0].imm.u32_1;\n label.desc.labelId = block->labelCase;\n label.next = block->labelCases;\n block->labelCases = new DxbcSwitchLabel(label);\n }\n void emitControlFlowDefault(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::Switch)\n throw DxvkError(\"DxbcCompiler: 'Default' without 'Switch' found\");\n \n DxbcCfgBlockSwitch* block = &m_controlFlowBlocks.back().b_switch;\n\n if (caseBlockIsFallthrough()) {\n block->labelCase = m_module.allocateId();\n\n m_module.opBranch(block->labelCase);\n m_module.opLabel (block->labelCase);\n }\n\n // Set the last label allocated for 'case' as the default label.\n block->labelDefault = block->labelCase;\n }\n void emitControlFlowEndSwitch(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::Switch)\n throw DxvkError(\"DxbcCompiler: 'EndSwitch' without 'Switch' found\");\n \n // Remove the block from the stack, it's closed\n DxbcCfgBlock block = m_controlFlowBlocks.back();\n m_controlFlowBlocks.pop_back();\n\n if (!block.b_switch.labelDefault) {\n block.b_switch.labelDefault = caseBlockIsFallthrough()\n ? block.b_switch.labelBreak\n : block.b_switch.labelCase;\n }\n \n // Close the current 'case' block\n m_module.opBranch(block.b_switch.labelBreak);\n \n // Insert the 'switch' statement. For that, we need to\n // gather all the literal-label pairs for the construct.\n m_module.beginInsertion(block.b_switch.insertPtr);\n m_module.opSelectionMerge(\n block.b_switch.labelBreak,\n spv::SelectionControlMaskNone);\n \n // We'll restore the original order of the case labels here\n std::vector jumpTargets;\n for (auto i = block.b_switch.labelCases; i != nullptr; i = i->next)\n jumpTargets.insert(jumpTargets.begin(), i->desc);\n \n m_module.opSwitch(\n block.b_switch.selectorId,\n block.b_switch.labelDefault,\n jumpTargets.size(),\n jumpTargets.data());\n m_module.endInsertion();\n \n // Destroy the list of case labels\n // FIXME we're leaking memory if compilation fails.\n DxbcSwitchLabel* caseLabel = block.b_switch.labelCases;\n \n while (caseLabel != nullptr)\n delete std::exchange(caseLabel, caseLabel->next);\n\n // Begin new block after switch blocks\n m_module.opLabel(block.b_switch.labelBreak);\n }\n void emitControlFlowLoop(\n const DxbcShaderInstruction& ins) {\n // Declare the 'loop' block\n DxbcCfgBlock block;\n block.type = DxbcCfgBlockType::Loop;\n block.b_loop.labelHeader = m_module.allocateId();\n block.b_loop.labelBegin = m_module.allocateId();\n block.b_loop.labelContinue = m_module.allocateId();\n block.b_loop.labelBreak = m_module.allocateId();\n m_controlFlowBlocks.push_back(block);\n \n m_module.opBranch(block.b_loop.labelHeader);\n m_module.opLabel (block.b_loop.labelHeader);\n \n m_module.opLoopMerge(\n block.b_loop.labelBreak,\n block.b_loop.labelContinue,\n spv::LoopControlMaskNone);\n \n m_module.opBranch(block.b_loop.labelBegin);\n m_module.opLabel (block.b_loop.labelBegin);\n }\n void emitControlFlowEndLoop(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::Loop)\n throw DxvkError(\"DxbcCompiler: 'EndLoop' without 'Loop' found\");\n \n // Remove the block from the stack, it's closed\n const DxbcCfgBlock block = m_controlFlowBlocks.back();\n m_controlFlowBlocks.pop_back();\n \n // Declare the continue block\n m_module.opBranch(block.b_loop.labelContinue);\n m_module.opLabel (block.b_loop.labelContinue);\n \n // Declare the merge block\n m_module.opBranch(block.b_loop.labelHeader);\n m_module.opLabel (block.b_loop.labelBreak);\n }\n void emitControlFlowBreak(\n const DxbcShaderInstruction& ins) {\n const bool isBreak = ins.op == DxbcOpcode::Break;\n \n DxbcCfgBlock* cfgBlock = isBreak\n ? cfgFindBlock({ DxbcCfgBlockType::Loop, DxbcCfgBlockType::Switch })\n : cfgFindBlock({ DxbcCfgBlockType::Loop });\n \n if (cfgBlock == nullptr)\n throw DxvkError(\"DxbcCompiler: 'Break' or 'Continue' outside 'Loop' or 'Switch' found\");\n \n if (cfgBlock->type == DxbcCfgBlockType::Loop) {\n m_module.opBranch(isBreak\n ? cfgBlock->b_loop.labelBreak\n : cfgBlock->b_loop.labelContinue);\n } else /* if (cfgBlock->type == DxbcCfgBlockType::Switch) */ {\n m_module.opBranch(cfgBlock->b_switch.labelBreak);\n }\n \n // Subsequent instructions assume that there is an open block\n const uint32_t labelId = m_module.allocateId();\n m_module.opLabel(labelId);\n \n // If this is on the same level as a switch-case construct,\n // rather than being nested inside an 'if' statement, close\n // the current 'case' block.\n if (m_controlFlowBlocks.back().type == DxbcCfgBlockType::Switch)\n cfgBlock->b_switch.labelCase = labelId;\n }\n void emitControlFlowBreakc(\n const DxbcShaderInstruction& ins) {\n const bool isBreak = ins.op == DxbcOpcode::Breakc;\n \n DxbcCfgBlock* cfgBlock = isBreak\n ? cfgFindBlock({ DxbcCfgBlockType::Loop, DxbcCfgBlockType::Switch })\n : cfgFindBlock({ DxbcCfgBlockType::Loop });\n \n if (cfgBlock == nullptr)\n throw DxvkError(\"DxbcCompiler: 'Breakc' or 'Continuec' outside 'Loop' or 'Switch' found\");\n \n // Perform zero test on the first component of the condition\n const DxbcRegisterValue condition = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n const DxbcRegisterValue zeroTest = emitRegisterZeroTest(\n condition, ins.controls.zeroTest());\n \n // We basically have to wrap this into an 'if' block\n const uint32_t breakBlock = m_module.allocateId();\n const uint32_t mergeBlock = m_module.allocateId();\n \n m_module.opSelectionMerge(mergeBlock,\n spv::SelectionControlMaskNone);\n \n m_module.opBranchConditional(\n zeroTest.id, breakBlock, mergeBlock);\n \n m_module.opLabel(breakBlock);\n \n if (cfgBlock->type == DxbcCfgBlockType::Loop) {\n m_module.opBranch(isBreak\n ? cfgBlock->b_loop.labelBreak\n : cfgBlock->b_loop.labelContinue);\n } else /* if (cfgBlock->type == DxbcCfgBlockType::Switch) */ {\n m_module.opBranch(cfgBlock->b_switch.labelBreak);\n }\n \n m_module.opLabel(mergeBlock);\n }\n void emitControlFlowRet(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() != 0) {\n uint32_t labelId = m_module.allocateId();\n \n m_module.opReturn();\n m_module.opLabel(labelId);\n\n // return can be used in place of break to terminate a case block\n if (m_controlFlowBlocks.back().type == DxbcCfgBlockType::Switch)\n m_controlFlowBlocks.back().b_switch.labelCase = labelId;\n\n m_topLevelIsUniform = false;\n } else {\n // Last instruction in the current function\n this->emitFunctionEnd();\n }\n }\n void emitControlFlowRetc(\n const DxbcShaderInstruction& ins) {\n // Perform zero test on the first component of the condition\n const DxbcRegisterValue condition = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n const DxbcRegisterValue zeroTest = emitRegisterZeroTest(\n condition, ins.controls.zeroTest());\n \n // We basically have to wrap this into an 'if' block\n const uint32_t returnLabel = m_module.allocateId();\n const uint32_t continueLabel = m_module.allocateId();\n \n m_module.opSelectionMerge(continueLabel,\n spv::SelectionControlMaskNone);\n \n m_module.opBranchConditional(\n zeroTest.id, returnLabel, continueLabel);\n \n m_module.opLabel(returnLabel);\n m_module.opReturn();\n\n m_module.opLabel(continueLabel);\n\n // The return condition may be non-uniform\n m_topLevelIsUniform = false;\n }\n void emitControlFlowDiscard(\n const DxbcShaderInstruction& ins) {\n // Discard actually has an operand that determines\n // whether or not the fragment should be discarded\n const DxbcRegisterValue condition = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n const DxbcRegisterValue zeroTest = emitRegisterZeroTest(\n condition, ins.controls.zeroTest());\n \n DxbcConditional cond;\n cond.labelIf = m_module.allocateId();\n cond.labelEnd = m_module.allocateId();\n \n m_module.opSelectionMerge(cond.labelEnd, spv::SelectionControlMaskNone);\n m_module.opBranchConditional(zeroTest.id, cond.labelIf, cond.labelEnd);\n \n m_module.opLabel(cond.labelIf);\n m_module.opDemoteToHelperInvocation();\n m_module.opBranch(cond.labelEnd);\n \n m_module.opLabel(cond.labelEnd);\n\n m_module.enableCapability(spv::CapabilityDemoteToHelperInvocation);\n\n // Discard is just retc in a trenchcoat\n m_topLevelIsUniform = false;\n }\n void emitControlFlowLabel(\n const DxbcShaderInstruction& ins) {\n uint32_t functionNr = ins.dst[0].idx[0].offset;\n uint32_t functionId = getFunctionId(functionNr);\n \n this->emitFunctionBegin(\n functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n \n m_module.opLabel(m_module.allocateId());\n m_module.setDebugName(functionId, str::format(\"label\", functionNr).c_str());\n \n m_insideFunction = true;\n\n // We have to assume that this function gets\n // called from non-uniform control flow\n m_topLevelIsUniform = false;\n }\n void emitControlFlowCall(\n const DxbcShaderInstruction& ins) {\n uint32_t functionNr = ins.src[0].idx[0].offset;\n uint32_t functionId = getFunctionId(functionNr);\n\n m_module.opFunctionCall(\n m_module.defVoidType(),\n functionId, 0, nullptr);\n }\n void emitControlFlowCallc(\n const DxbcShaderInstruction& ins) {\n uint32_t functionNr = ins.src[1].idx[0].offset;\n uint32_t functionId = getFunctionId(functionNr);\n\n // Perform zero test on the first component of the condition\n const DxbcRegisterValue condition = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n const DxbcRegisterValue zeroTest = emitRegisterZeroTest(\n condition, ins.controls.zeroTest());\n \n // We basically have to wrap this into an 'if' block\n const uint32_t callLabel = m_module.allocateId();\n const uint32_t skipLabel = m_module.allocateId();\n \n m_module.opSelectionMerge(skipLabel,\n spv::SelectionControlMaskNone);\n \n m_module.opBranchConditional(\n zeroTest.id, callLabel, skipLabel);\n \n m_module.opLabel(callLabel);\n m_module.opFunctionCall(\n m_module.defVoidType(),\n functionId, 0, nullptr);\n\n m_module.opBranch(skipLabel);\n m_module.opLabel(skipLabel);\n }\n void emitControlFlow(\n const DxbcShaderInstruction& ins) {\n switch (ins.op) {\n case DxbcOpcode::If:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowIf(ins);\n break;\n \n case DxbcOpcode::Else:\n this->emitControlFlowElse(ins);\n break;\n \n case DxbcOpcode::EndIf:\n this->emitControlFlowEndIf(ins);\n this->emitUavBarrier(0, 0);\n break;\n \n case DxbcOpcode::Switch:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowSwitch(ins);\n break;\n \n case DxbcOpcode::Case:\n this->emitControlFlowCase(ins);\n break;\n \n case DxbcOpcode::Default:\n this->emitControlFlowDefault(ins);\n break;\n \n case DxbcOpcode::EndSwitch:\n this->emitControlFlowEndSwitch(ins);\n this->emitUavBarrier(0, 0);\n break;\n\n case DxbcOpcode::Loop:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowLoop(ins);\n break;\n \n case DxbcOpcode::EndLoop:\n this->emitControlFlowEndLoop(ins);\n this->emitUavBarrier(0, 0);\n break;\n\n case DxbcOpcode::Break:\n case DxbcOpcode::Continue:\n this->emitControlFlowBreak(ins);\n break;\n \n case DxbcOpcode::Breakc:\n case DxbcOpcode::Continuec:\n this->emitControlFlowBreakc(ins);\n break;\n\n case DxbcOpcode::Ret:\n this->emitControlFlowRet(ins);\n break;\n\n case DxbcOpcode::Retc:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowRetc(ins);\n break;\n \n case DxbcOpcode::Discard:\n this->emitControlFlowDiscard(ins);\n break;\n \n case DxbcOpcode::Label:\n this->emitControlFlowLabel(ins);\n break;\n\n case DxbcOpcode::Call:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowCall(ins);\n this->emitUavBarrier(-1, -1);\n break;\n\n case DxbcOpcode::Callc:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowCallc(ins);\n this->emitUavBarrier(-1, -1);\n break;\n\n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n }\n }\n DxbcRegisterValue emitBuildConstVecf32(\n float x,\n float y,\n float z,\n float w,\n const DxbcRegMask& writeMask) {\n // TODO refactor these functions into one single template\n std::array ids = { 0, 0, 0, 0 };\n uint32_t componentIndex = 0;\n \n if (writeMask[0]) ids[componentIndex++] = m_module.constf32(x);\n if (writeMask[1]) ids[componentIndex++] = m_module.constf32(y);\n if (writeMask[2]) ids[componentIndex++] = m_module.constf32(z);\n if (writeMask[3]) ids[componentIndex++] = m_module.constf32(w);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Float32;\n result.type.ccount = componentIndex;\n result.id = componentIndex > 1\n ? m_module.constComposite(\n getVectorTypeId(result.type),\n componentIndex, ids.data())\n : ids[0];\n return result;\n }\n DxbcRegisterValue emitBuildConstVecu32(\n uint32_t x,\n uint32_t y,\n uint32_t z,\n uint32_t w,\n const DxbcRegMask& writeMask) {\n std::array ids = { 0, 0, 0, 0 };\n uint32_t componentIndex = 0;\n \n if (writeMask[0]) ids[componentIndex++] = m_module.constu32(x);\n if (writeMask[1]) ids[componentIndex++] = m_module.constu32(y);\n if (writeMask[2]) ids[componentIndex++] = m_module.constu32(z);\n if (writeMask[3]) ids[componentIndex++] = m_module.constu32(w);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = componentIndex;\n result.id = componentIndex > 1\n ? m_module.constComposite(\n getVectorTypeId(result.type),\n componentIndex, ids.data())\n : ids[0];\n return result;\n }\n DxbcRegisterValue emitBuildConstVeci32(\n int32_t x,\n int32_t y,\n int32_t z,\n int32_t w,\n const DxbcRegMask& writeMask) {\n std::array ids = { 0, 0, 0, 0 };\n uint32_t componentIndex = 0;\n \n if (writeMask[0]) ids[componentIndex++] = m_module.consti32(x);\n if (writeMask[1]) ids[componentIndex++] = m_module.consti32(y);\n if (writeMask[2]) ids[componentIndex++] = m_module.consti32(z);\n if (writeMask[3]) ids[componentIndex++] = m_module.consti32(w);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Sint32;\n result.type.ccount = componentIndex;\n result.id = componentIndex > 1\n ? m_module.constComposite(\n getVectorTypeId(result.type),\n componentIndex, ids.data())\n : ids[0];\n return result;\n }\n DxbcRegisterValue emitBuildConstVecf64(\n double xy,\n double zw,\n const DxbcRegMask& writeMask) {\n std::array ids = { 0, 0 };\n uint32_t componentIndex = 0;\n \n if (writeMask[0] && writeMask[1]) ids[componentIndex++] = m_module.constf64(xy);\n if (writeMask[2] && writeMask[3]) ids[componentIndex++] = m_module.constf64(zw);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Float64;\n result.type.ccount = componentIndex;\n result.id = componentIndex > 1\n ? m_module.constComposite(\n getVectorTypeId(result.type),\n componentIndex, ids.data())\n : ids[0];\n return result;\n }\n DxbcRegisterValue emitBuildVector(\n DxbcRegisterValue scalar,\n uint32_t count) {\n if (count == 1)\n return scalar;\n\n std::array scalarIds =\n { scalar.id, scalar.id, scalar.id, scalar.id };\n\n DxbcRegisterValue result;\n result.type.ctype = scalar.type.ctype;\n result.type.ccount = count;\n result.id = m_module.constComposite(\n getVectorTypeId(result.type),\n count, scalarIds.data());\n return result;\n }\n DxbcRegisterValue emitBuildZeroVector(\n DxbcVectorType type) {\n DxbcRegisterValue result;\n result.type.ctype = type.ctype;\n result.type.ccount = 1;\n\n switch (type.ctype) {\n case DxbcScalarType::Float32: result.id = m_module.constf32(0.0f); break;\n case DxbcScalarType::Uint32: result.id = m_module.constu32(0u); break;\n case DxbcScalarType::Sint32: result.id = m_module.consti32(0); break;\n default: throw DxvkError(\"DxbcCompiler: Invalid scalar type\");\n }\n\n return emitBuildVector(result, type.ccount);\n }\n DxbcRegisterValue emitRegisterBitcast(\n DxbcRegisterValue srcValue,\n DxbcScalarType dstType) {\n DxbcScalarType srcType = srcValue.type.ctype;\n\n if (srcType == dstType)\n return srcValue;\n \n DxbcRegisterValue result;\n result.type.ctype = dstType;\n result.type.ccount = srcValue.type.ccount;\n\n if (isDoubleType(srcType)) result.type.ccount *= 2;\n if (isDoubleType(dstType)) result.type.ccount /= 2;\n\n result.id = m_module.opBitcast(\n getVectorTypeId(result.type),\n srcValue.id);\n return result;\n }\n DxbcRegisterValue emitRegisterSwizzle(\n DxbcRegisterValue value,\n DxbcRegSwizzle swizzle,\n DxbcRegMask writeMask) {\n if (value.type.ccount == 1)\n return emitRegisterExtend(value, writeMask.popCount());\n \n std::array indices;\n \n uint32_t dstIndex = 0;\n \n for (uint32_t i = 0; i < 4; i++) {\n if (writeMask[i])\n indices[dstIndex++] = swizzle[i];\n }\n \n // If the swizzle combined with the mask can be reduced\n // to a no-op, we don't need to insert any instructions.\n bool isIdentitySwizzle = dstIndex == value.type.ccount;\n \n for (uint32_t i = 0; i < dstIndex && isIdentitySwizzle; i++)\n isIdentitySwizzle &= indices[i] == i;\n \n if (isIdentitySwizzle)\n return value;\n \n // Use OpCompositeExtract if the resulting vector contains\n // only one component, and OpVectorShuffle if it is a vector.\n DxbcRegisterValue result;\n result.type.ctype = value.type.ctype;\n result.type.ccount = dstIndex;\n \n const uint32_t typeId = getVectorTypeId(result.type);\n \n if (dstIndex == 1) {\n result.id = m_module.opCompositeExtract(\n typeId, value.id, 1, indices.data());\n } else {\n result.id = m_module.opVectorShuffle(\n typeId, value.id, value.id,\n dstIndex, indices.data());\n }\n \n return result;\n }\n DxbcRegisterValue emitRegisterExtract(\n DxbcRegisterValue value,\n DxbcRegMask mask) {\n return emitRegisterSwizzle(value,\n DxbcRegSwizzle(0, 1, 2, 3), mask);\n }\n DxbcRegisterValue emitRegisterInsert(\n DxbcRegisterValue dstValue,\n DxbcRegisterValue srcValue,\n DxbcRegMask srcMask) {\n DxbcRegisterValue result;\n result.type = dstValue.type;\n \n const uint32_t typeId = getVectorTypeId(result.type);\n \n if (srcMask.popCount() == 0) {\n // Nothing to do if the insertion mask is empty\n result.id = dstValue.id;\n } else if (dstValue.type.ccount == 1) {\n // Both values are scalar, so the first component\n // of the write mask decides which one to take.\n result.id = srcMask[0] ? srcValue.id : dstValue.id;\n } else if (srcValue.type.ccount == 1) {\n // The source value is scalar. Since OpVectorShuffle\n // requires both arguments to be vectors, we have to\n // use OpCompositeInsert to modify the vector instead.\n const uint32_t componentId = srcMask.firstSet();\n \n result.id = m_module.opCompositeInsert(typeId,\n srcValue.id, dstValue.id, 1, &componentId);\n } else {\n // Both arguments are vectors. We can determine which\n // components to take from which vector and use the\n // OpVectorShuffle instruction.\n std::array components;\n uint32_t srcComponentId = dstValue.type.ccount;\n \n for (uint32_t i = 0; i < dstValue.type.ccount; i++)\n components.at(i) = srcMask[i] ? srcComponentId++ : i;\n \n result.id = m_module.opVectorShuffle(\n typeId, dstValue.id, srcValue.id,\n dstValue.type.ccount, components.data());\n }\n \n return result;\n }\n DxbcRegisterValue emitRegisterConcat(\n DxbcRegisterValue value1,\n DxbcRegisterValue value2) {\n std::array ids =\n {{ value1.id, value2.id }};\n \n DxbcRegisterValue result;\n result.type.ctype = value1.type.ctype;\n result.type.ccount = value1.type.ccount + value2.type.ccount;\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n ids.size(), ids.data());\n return result;\n }\n DxbcRegisterValue emitRegisterExtend(\n DxbcRegisterValue value,\n uint32_t size) {\n if (size == 1)\n return value;\n \n std::array ids = {{\n value.id, value.id,\n value.id, value.id, \n }};\n \n DxbcRegisterValue result;\n result.type.ctype = value.type.ctype;\n result.type.ccount = size;\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n size, ids.data());\n return result;\n }\n DxbcRegisterValue emitRegisterAbsolute(\n DxbcRegisterValue value) {\n const uint32_t typeId = getVectorTypeId(value.type);\n \n switch (value.type.ctype) {\n case DxbcScalarType::Float32: value.id = m_module.opFAbs(typeId, value.id); break;\n case DxbcScalarType::Float64: value.id = m_module.opFAbs(typeId, value.id); break;\n case DxbcScalarType::Sint32: value.id = m_module.opSAbs(typeId, value.id); break;\n case DxbcScalarType::Sint64: value.id = m_module.opSAbs(typeId, value.id); break;\n default: Logger::warn(\"DxbcCompiler: Cannot get absolute value for given type\");\n }\n \n return value;\n }\n DxbcRegisterValue emitRegisterNegate(\n DxbcRegisterValue value) {\n const uint32_t typeId = getVectorTypeId(value.type);\n \n switch (value.type.ctype) {\n case DxbcScalarType::Float32: value.id = m_module.opFNegate(typeId, value.id); break;\n case DxbcScalarType::Float64: value.id = m_module.opFNegate(typeId, value.id); break;\n case DxbcScalarType::Sint32: value.id = m_module.opSNegate(typeId, value.id); break;\n case DxbcScalarType::Sint64: value.id = m_module.opSNegate(typeId, value.id); break;\n default: Logger::warn(\"DxbcCompiler: Cannot negate given type\");\n }\n \n return value;\n }\n DxbcRegisterValue emitRegisterZeroTest(\n DxbcRegisterValue value,\n DxbcZeroTest test) {\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Bool;\n result.type.ccount = 1;\n \n const uint32_t zeroId = m_module.constu32(0u);\n const uint32_t typeId = getVectorTypeId(result.type);\n \n result.id = test == DxbcZeroTest::TestZ\n ? m_module.opIEqual (typeId, value.id, zeroId)\n : m_module.opINotEqual(typeId, value.id, zeroId);\n return result;\n }\n DxbcRegisterValue emitRegisterMaskBits(\n DxbcRegisterValue value,\n uint32_t mask) {\n DxbcRegisterValue maskVector = emitBuildConstVecu32(\n mask, mask, mask, mask, DxbcRegMask::firstN(value.type.ccount));\n \n DxbcRegisterValue result;\n result.type = value.type;\n result.id = m_module.opBitwiseAnd(\n getVectorTypeId(result.type),\n value.id, maskVector.id);\n return result;\n }\n DxbcRegisterValue emitSrcOperandModifiers(\n DxbcRegisterValue value,\n DxbcRegModifiers modifiers) {\n if (modifiers.test(DxbcRegModifier::Abs))\n value = emitRegisterAbsolute(value);\n \n if (modifiers.test(DxbcRegModifier::Neg))\n value = emitRegisterNegate(value);\n return value;\n }\n DxbcRegisterValue emitDstOperandModifiers(\n DxbcRegisterValue value,\n DxbcOpModifiers modifiers) {\n const uint32_t typeId = getVectorTypeId(value.type);\n \n if (modifiers.saturate) {\n DxbcRegMask mask;\n DxbcRegisterValue vec0, vec1;\n\n if (value.type.ctype == DxbcScalarType::Float32) {\n mask = DxbcRegMask::firstN(value.type.ccount);\n vec0 = emitBuildConstVecf32(0.0f, 0.0f, 0.0f, 0.0f, mask);\n vec1 = emitBuildConstVecf32(1.0f, 1.0f, 1.0f, 1.0f, mask);\n } else if (value.type.ctype == DxbcScalarType::Float64) {\n mask = DxbcRegMask::firstN(value.type.ccount * 2);\n vec0 = emitBuildConstVecf64(0.0, 0.0, mask);\n vec1 = emitBuildConstVecf64(1.0, 1.0, mask);\n }\n\n if (mask)\n value.id = m_module.opNClamp(typeId, value.id, vec0.id, vec1.id);\n }\n \n return value;\n }\n uint32_t emitExtractSparseTexel(\n uint32_t texelTypeId,\n uint32_t resultId) {\n uint32_t index = 1;\n\n return m_module.opCompositeExtract(\n texelTypeId, resultId, 1, &index);\n }\n void emitStoreSparseFeedback(\n const DxbcRegister& feedbackRegister,\n uint32_t resultId) {\n if (feedbackRegister.type != DxbcOperandType::Null) {\n uint32_t index = 0;\n\n DxbcRegisterValue result;\n result.type = { DxbcScalarType::Uint32, 1 };\n result.id = m_module.opCompositeExtract(\n getScalarTypeId(DxbcScalarType::Uint32),\n resultId, 1, &index);\n\n emitRegisterStore(feedbackRegister, result);\n }\n }\n DxbcRegisterPointer emitArrayAccess(\n DxbcRegisterPointer pointer,\n spv::StorageClass sclass,\n uint32_t index) {\n uint32_t ptrTypeId = m_module.defPointerType(\n getVectorTypeId(pointer.type), sclass);\n \n DxbcRegisterPointer result;\n result.type = pointer.type;\n result.id = m_module.opAccessChain(\n ptrTypeId, pointer.id, 1, &index);\n return result;\n }\n uint32_t emitLoadSampledImage(\n const DxbcShaderResource& textureResource,\n const DxbcSampler& samplerResource,\n bool isDepthCompare) {\n uint32_t baseId = isDepthCompare\n ? textureResource.depthTypeId\n : textureResource.colorTypeId;\n\n if (!baseId)\n return 0;\n\n uint32_t sampledImageType = m_module.defSampledImageType(baseId);\n\n return m_module.opSampledImage(sampledImageType,\n m_module.opLoad(textureResource.imageTypeId, textureResource.varId),\n m_module.opLoad(samplerResource.typeId, samplerResource.varId));\n }\n DxbcRegisterPointer emitGetTempPtr(\n const DxbcRegister& operand) {\n // r# regs are indexed as follows:\n // (0) register index (immediate)\n uint32_t regIdx = operand.idx[0].offset;\n\n if (regIdx >= m_rRegs.size())\n m_rRegs.resize(regIdx + 1, 0u);\n\n if (!m_rRegs.at(regIdx)) {\n DxbcRegisterInfo info;\n info.type.ctype = DxbcScalarType::Float32;\n info.type.ccount = 4;\n info.type.alength = 0;\n info.sclass = spv::StorageClassPrivate;\n\n uint32_t varId = emitNewVariable(info);\n m_rRegs.at(regIdx) = varId;\n\n m_module.setDebugName(varId,\n str::format(\"r\", regIdx).c_str());\n }\n\n DxbcRegisterPointer result;\n result.type.ctype = DxbcScalarType::Float32;\n result.type.ccount = 4;\n result.id = m_rRegs.at(regIdx);\n return result;\n }\n DxbcRegisterPointer emitGetIndexableTempPtr(\n const DxbcRegister& operand) {\n return getIndexableTempPtr(operand, emitIndexLoad(operand.idx[1]));\n }\n DxbcRegisterPointer emitGetInputPtr(\n const DxbcRegister& operand) {\n // In the vertex and pixel stages,\n // v# regs are indexed as follows:\n // (0) register index (relative)\n // \n // In the tessellation and geometry\n // stages, the index has two dimensions:\n // (0) vertex index (relative)\n // (1) register index (relative)\n DxbcRegisterPointer result;\n result.type.ctype = DxbcScalarType::Float32;\n result.type.ccount = 4;\n \n std::array indices = {{ 0, 0 }};\n \n for (uint32_t i = 0; i < operand.idxDim; i++)\n indices.at(i) = emitIndexLoad(operand.idx[i]).id;\n \n // Pick the input array depending on\n // the program type and operand type\n struct InputArray {\n uint32_t id;\n spv::StorageClass sclass;\n };\n \n const InputArray array = [&] () -> InputArray {\n switch (operand.type) {\n case DxbcOperandType::InputControlPoint:\n return m_programInfo.type() == DxbcProgramType::HullShader\n ? InputArray { m_vArray, spv::StorageClassPrivate }\n : InputArray { m_ds.inputPerVertex, spv::StorageClassInput };\n case DxbcOperandType::InputPatchConstant:\n return m_programInfo.type() == DxbcProgramType::HullShader\n ? InputArray { m_hs.outputPerPatch, spv::StorageClassPrivate }\n : InputArray { m_ds.inputPerPatch, spv::StorageClassInput };\n case DxbcOperandType::OutputControlPoint:\n return InputArray { m_hs.outputPerVertex, spv::StorageClassOutput };\n default:\n return { m_vArray, spv::StorageClassPrivate };\n }\n }();\n \n DxbcRegisterInfo info;\n info.type.ctype = result.type.ctype;\n info.type.ccount = result.type.ccount;\n info.type.alength = 0;\n info.sclass = array.sclass;\n \n result.id = m_module.opAccessChain(\n getPointerTypeId(info), array.id,\n operand.idxDim, indices.data());\n \n return result;\n }\n DxbcRegisterPointer emitGetOutputPtr(\n const DxbcRegister& operand) {\n if (m_programInfo.type() == DxbcProgramType::HullShader) {\n // Hull shaders are special in that they have two sets of\n // output registers, one for per-patch values and one for\n // per-vertex values.\n DxbcRegisterPointer result;\n result.type.ctype = DxbcScalarType::Float32;\n result.type.ccount = 4;\n \n uint32_t registerId = emitIndexLoad(operand.idx[0]).id;\n\n if (m_hs.currPhaseType == DxbcCompilerHsPhase::ControlPoint) {\n std::array indices = {{\n m_module.opLoad(m_module.defIntType(32, 0), m_hs.builtinInvocationId),\n registerId,\n }};\n \n uint32_t ptrTypeId = m_module.defPointerType(\n getVectorTypeId(result.type),\n spv::StorageClassOutput);\n \n result.id = m_module.opAccessChain(\n ptrTypeId, m_hs.outputPerVertex,\n indices.size(), indices.data());\n } else {\n uint32_t ptrTypeId = m_module.defPointerType(\n getVectorTypeId(result.type),\n spv::StorageClassPrivate);\n \n result.id = m_module.opAccessChain(\n ptrTypeId, m_hs.outputPerPatch,\n 1, ®isterId);\n }\n\n return result;\n } else {\n // Regular shaders have their output\n // registers set up at declaration time\n return m_oRegs.at(operand.idx[0].offset);\n }\n }\n DxbcRegisterPointer emitGetConstBufPtr(\n const DxbcRegister& operand);\n DxbcRegisterPointer emitGetImmConstBufPtr(\n const DxbcRegister& operand) {\n DxbcRegisterValue constId = emitIndexLoad(operand.idx[0]);\n\n if (m_icbArray) {\n // We pad the icb array with an extra zero vector, so we can\n // clamp the index and get correct robustness behaviour.\n constId.id = m_module.opUMin(getVectorTypeId(constId.type),\n constId.id, m_module.constu32(m_icbSize));\n\n DxbcRegisterInfo ptrInfo;\n ptrInfo.type.ctype = DxbcScalarType::Uint32;\n ptrInfo.type.ccount = m_icbComponents;\n ptrInfo.type.alength = 0;\n ptrInfo.sclass = spv::StorageClassPrivate;\n\n DxbcRegisterPointer result;\n result.type.ctype = ptrInfo.type.ctype;\n result.type.ccount = ptrInfo.type.ccount;\n result.id = m_module.opAccessChain(\n getPointerTypeId(ptrInfo),\n m_icbArray, 1, &constId.id);\n return result;\n } else if (m_constantBuffers.at(Icb_BindingSlotId).varId != 0) {\n const std::array indices =\n {{ m_module.consti32(0), constId.id }};\n \n DxbcRegisterInfo ptrInfo;\n ptrInfo.type.ctype = DxbcScalarType::Float32;\n ptrInfo.type.ccount = m_icbComponents;\n ptrInfo.type.alength = 0;\n ptrInfo.sclass = spv::StorageClassUniform;\n\n DxbcRegisterPointer result;\n result.type.ctype = ptrInfo.type.ctype;\n result.type.ccount = ptrInfo.type.ccount;\n result.id = m_module.opAccessChain(\n getPointerTypeId(ptrInfo),\n m_constantBuffers.at(Icb_BindingSlotId).varId,\n indices.size(), indices.data());\n return result;\n } else {\n throw DxvkError(\"DxbcCompiler: Immediate constant buffer not defined\");\n }\n }\n DxbcRegisterPointer emitGetOperandPtr(\n const DxbcRegister& operand) {\n switch (operand.type) {\n case DxbcOperandType::Temp:\n return emitGetTempPtr(operand);\n \n case DxbcOperandType::IndexableTemp:\n return emitGetIndexableTempPtr(operand);\n \n case DxbcOperandType::Input:\n case DxbcOperandType::InputControlPoint:\n case DxbcOperandType::InputPatchConstant:\n case DxbcOperandType::OutputControlPoint:\n return emitGetInputPtr(operand);\n \n case DxbcOperandType::Output:\n return emitGetOutputPtr(operand);\n \n case DxbcOperandType::ImmediateConstantBuffer:\n return emitGetImmConstBufPtr(operand);\n\n case DxbcOperandType::InputThreadId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 3 },\n m_cs.builtinGlobalInvocationId };\n \n case DxbcOperandType::InputThreadGroupId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 3 },\n m_cs.builtinWorkgroupId };\n \n case DxbcOperandType::InputThreadIdInGroup:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 3 },\n m_cs.builtinLocalInvocationId };\n \n case DxbcOperandType::InputThreadIndexInGroup:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n m_cs.builtinLocalInvocationIndex };\n \n case DxbcOperandType::InputCoverageMask: {\n const std::array indices\n = {{ m_module.constu32(0) }};\n \n DxbcRegisterPointer result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(result.type),\n spv::StorageClassInput),\n m_ps.builtinSampleMaskIn,\n indices.size(), indices.data());\n return result;\n }\n \n case DxbcOperandType::OutputCoverageMask: {\n const std::array indices\n = {{ m_module.constu32(0) }};\n \n DxbcRegisterPointer result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(result.type),\n spv::StorageClassOutput),\n m_ps.builtinSampleMaskOut,\n indices.size(), indices.data());\n return result;\n }\n \n case DxbcOperandType::OutputDepth:\n case DxbcOperandType::OutputDepthGe:\n case DxbcOperandType::OutputDepthLe:\n return DxbcRegisterPointer {\n { DxbcScalarType::Float32, 1 },\n m_ps.builtinDepth };\n \n case DxbcOperandType::OutputStencilRef:\n return DxbcRegisterPointer {\n { DxbcScalarType::Sint32, 1 },\n m_ps.builtinStencilRef };\n\n case DxbcOperandType::InputPrimitiveId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n m_primitiveIdIn };\n \n case DxbcOperandType::InputDomainPoint:\n return DxbcRegisterPointer {\n { DxbcScalarType::Float32, 3 },\n m_ds.builtinTessCoord };\n \n case DxbcOperandType::OutputControlPointId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n m_hs.builtinInvocationId };\n \n case DxbcOperandType::InputForkInstanceId:\n case DxbcOperandType::InputJoinInstanceId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n getCurrentHsForkJoinPhase()->instanceIdPtr };\n \n case DxbcOperandType::InputGsInstanceId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n m_gs.builtinInvocationId };\n \n case DxbcOperandType::InputInnerCoverage:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n m_ps.builtinInnerCoverageId };\n \n default:\n throw DxvkError(str::format(\n \"DxbcCompiler: Unhandled operand type: \",\n operand.type));\n }\n }\n DxbcRegisterPointer emitGetAtomicPointer(\n const DxbcRegister& operand,\n const DxbcRegister& address) {\n // Query information about the resource itself\n const uint32_t registerId = operand.idx[0].offset;\n const DxbcBufferInfo resourceInfo = getBufferInfo(operand);\n \n // For UAVs and shared memory, different methods\n // of obtaining the final pointer are used.\n bool isTgsm = operand.type == DxbcOperandType::ThreadGroupSharedMemory;\n bool isSsbo = resourceInfo.isSsbo;\n \n // Compute the actual address into the resource\n const DxbcRegisterValue addressValue = [&] {\n switch (resourceInfo.type) {\n case DxbcResourceType::Raw:\n return emitCalcBufferIndexRaw(emitRegisterLoad(\n address, DxbcRegMask(true, false, false, false)));\n \n case DxbcResourceType::Structured: {\n const DxbcRegisterValue addressComponents = emitRegisterLoad(\n address, DxbcRegMask(true, true, false, false));\n \n return emitCalcBufferIndexStructured(\n emitRegisterExtract(addressComponents, DxbcRegMask(true, false, false, false)),\n emitRegisterExtract(addressComponents, DxbcRegMask(false, true, false, false)),\n resourceInfo.stride);\n };\n \n case DxbcResourceType::Typed: {\n if (isTgsm)\n throw DxvkError(\"DxbcCompiler: TGSM cannot be typed\");\n \n return emitLoadTexCoord(address,\n m_uavs.at(registerId).imageInfo);\n }\n \n default:\n throw DxvkError(\"DxbcCompiler: Unhandled resource type\");\n }\n }();\n \n // Compute the actual pointer\n DxbcRegisterPointer result;\n result.type.ctype = resourceInfo.stype;\n result.type.ccount = 1;\n\n if (isTgsm) {\n result.id = m_module.opAccessChain(resourceInfo.typeId,\n resourceInfo.varId, 1, &addressValue.id);\n } else if (isSsbo) {\n uint32_t indices[2] = { m_module.constu32(0), addressValue.id };\n result.id = m_module.opAccessChain(resourceInfo.typeId,\n resourceInfo.varId, 2, indices);\n } else {\n result.id = m_module.opImageTexelPointer(\n m_module.defPointerType(getVectorTypeId(result.type), spv::StorageClassImage),\n resourceInfo.varId, addressValue.id, m_module.constu32(0));\n }\n\n return result;\n }\n DxbcRegisterValue emitQueryBufferSize(\n const DxbcRegister& resource) {\n const DxbcBufferInfo bufferInfo = getBufferInfo(resource);\n\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opArrayLength(\n getVectorTypeId(result.type),\n bufferInfo.varId, 0);\n\n return result;\n }\n DxbcRegisterValue emitQueryTexelBufferSize(\n const DxbcRegister& resource) {\n // Load the texel buffer object. This cannot be used with\n // constant buffers or any other type of resource.\n const DxbcBufferInfo bufferInfo = getBufferInfo(resource);\n \n const uint32_t bufferId = m_module.opLoad(\n bufferInfo.typeId, bufferInfo.varId);\n \n // We'll store this as a scalar unsigned integer\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opImageQuerySize(\n getVectorTypeId(result.type), bufferId);\n\n return result;\n }\n DxbcRegisterValue emitQueryTextureLods(\n const DxbcRegister& resource) {\n const DxbcBufferInfo info = getBufferInfo(resource);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n \n if (info.image.ms == 0 && info.image.sampled == 1) {\n result.id = m_module.opImageQueryLevels(\n getVectorTypeId(result.type),\n m_module.opLoad(info.typeId, info.varId));\n } else {\n // Report one LOD in case of UAVs or multisampled images\n result.id = m_module.constu32(1);\n }\n\n return result;\n }\n DxbcRegisterValue emitQueryTextureSamples(\n const DxbcRegister& resource) {\n if (resource.type == DxbcOperandType::Rasterizer) {\n // SPIR-V has no gl_NumSamples equivalent, so we\n // have to work around it using a push constant\n if (!m_ps.pushConstantId)\n m_ps.pushConstantId = emitPushConstants();\n\n uint32_t uintTypeId = m_module.defIntType(32, 0);\n uint32_t ptrTypeId = m_module.defPointerType(uintTypeId, spv::StorageClassPushConstant);\n uint32_t index = m_module.constu32(0);\n\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opLoad(uintTypeId,\n m_module.opAccessChain(ptrTypeId, m_ps.pushConstantId, 1, &index));\n return result;\n } else {\n DxbcBufferInfo info = getBufferInfo(resource);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n\n if (info.image.ms) {\n result.id = m_module.opImageQuerySamples(\n getVectorTypeId(result.type),\n m_module.opLoad(info.typeId, info.varId));\n } else {\n // OpImageQuerySamples requires MSAA images\n result.id = m_module.constu32(1);\n }\n \n return result;\n }\n }\n DxbcRegisterValue emitQueryTextureSize(\n const DxbcRegister& resource,\n DxbcRegisterValue lod) {\n const DxbcBufferInfo info = getBufferInfo(resource);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = getTexSizeDim(info.image);\n \n if (info.image.ms == 0 && info.image.sampled == 1) {\n result.id = m_module.opImageQuerySizeLod(\n getVectorTypeId(result.type),\n m_module.opLoad(info.typeId, info.varId),\n lod.id);\n } else {\n result.id = m_module.opImageQuerySize(\n getVectorTypeId(result.type),\n m_module.opLoad(info.typeId, info.varId));\n }\n\n return result;\n }\n DxbcRegisterValue emitCalcBufferIndexStructured(\n DxbcRegisterValue structId,\n DxbcRegisterValue structOffset,\n uint32_t structStride) {\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Sint32;\n result.type.ccount = 1;\n \n uint32_t typeId = getVectorTypeId(result.type);\n uint32_t offset = m_module.opShiftRightLogical(typeId, structOffset.id, m_module.consti32(2));\n \n result.id = m_module.opIAdd(typeId,\n m_module.opIMul(typeId, structId.id, m_module.consti32(structStride / 4)),\n offset);\n return result;\n }\n DxbcRegisterValue emitCalcBufferIndexRaw(\n DxbcRegisterValue byteOffset) {\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Sint32;\n result.type.ccount = 1;\n \n uint32_t typeId = getVectorTypeId(result.type);\n result.id = m_module.opShiftRightLogical(typeId, byteOffset.id, m_module.consti32(2));\n return result;\n }\n DxbcRegisterValue emitCalcTexCoord(\n DxbcRegisterValue coordVector,\n const DxbcImageInfo& imageInfo) {\n const uint32_t dim = getTexCoordDim(imageInfo);\n \n if (dim != coordVector.type.ccount) {\n coordVector = emitRegisterExtract(\n coordVector, DxbcRegMask::firstN(dim)); \n }\n \n return coordVector;\n }\n DxbcRegisterValue emitLoadTexCoord(\n const DxbcRegister& coordReg,\n const DxbcImageInfo& imageInfo) {\n return emitCalcTexCoord(emitRegisterLoad(coordReg,\n DxbcRegMask(true, true, true, true)), imageInfo);\n }\n DxbcRegisterValue emitIndexLoad(\n DxbcRegIndex index) {\n if (index.relReg != nullptr) {\n DxbcRegisterValue result = emitRegisterLoad(\n *index.relReg, DxbcRegMask(true, false, false, false));\n \n if (index.offset != 0) {\n result.id = m_module.opIAdd(\n getVectorTypeId(result.type), result.id,\n m_module.consti32(index.offset));\n }\n \n return result;\n } else {\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Sint32;\n result.type.ccount = 1;\n result.id = m_module.consti32(index.offset);\n return result;\n }\n }\n DxbcRegisterValue emitValueLoad(\n DxbcRegisterPointer ptr) {\n DxbcRegisterValue result;\n result.type = ptr.type;\n result.id = m_module.opLoad(\n getVectorTypeId(result.type),\n ptr.id);\n return result;\n }\n void emitValueStore(\n DxbcRegisterPointer ptr,\n DxbcRegisterValue value,\n DxbcRegMask writeMask) {\n // If the component types are not compatible,\n // we need to bit-cast the source variable.\n if (value.type.ctype != ptr.type.ctype)\n value = emitRegisterBitcast(value, ptr.type.ctype);\n \n // If the source value consists of only one component,\n // it is stored in all components of the destination.\n if (value.type.ccount == 1)\n value = emitRegisterExtend(value, writeMask.popCount());\n \n if (ptr.type.ccount == writeMask.popCount()) {\n // Simple case: We write to the entire register\n m_module.opStore(ptr.id, value.id);\n } else {\n // We only write to part of the destination\n // register, so we need to load and modify it\n DxbcRegisterValue tmp = emitValueLoad(ptr);\n tmp = emitRegisterInsert(tmp, value, writeMask);\n \n m_module.opStore(ptr.id, tmp.id);\n }\n }\n DxbcRegisterValue emitRegisterLoadRaw(\n const DxbcRegister& reg) {\n // Try to find index range for the given register\n const DxbcIndexRange* indexRange = nullptr;\n\n if (reg.idxDim && reg.idx[reg.idxDim - 1u].relReg) {\n uint32_t offset = reg.idx[reg.idxDim - 1u].offset;\n\n for (const auto& range : m_indexRanges) {\n if (reg.type == range.type && offset >= range.start && offset < range.start + range.length)\n indexRange = ⦥\n }\n }\n\n if (reg.type == DxbcOperandType::IndexableTemp || indexRange) {\n bool doBoundsCheck = reg.idx[reg.idxDim - 1u].relReg != nullptr;\n\n if (doBoundsCheck) {\n DxbcRegisterValue indexId = emitIndexLoad(reg.idx[reg.idxDim - 1u]);\n uint32_t boundsCheck = 0u;\n\n if (reg.type == DxbcOperandType::IndexableTemp) {\n boundsCheck = m_module.opULessThan(\n m_module.defBoolType(), indexId.id,\n m_module.constu32(m_xRegs.at(reg.idx[0].offset).alength));\n } else {\n uint32_t adjustedId = m_module.opISub(getVectorTypeId(indexId.type),\n indexId.id, m_module.consti32(indexRange->start));\n\n boundsCheck = m_module.opULessThan(\n m_module.defBoolType(), adjustedId,\n m_module.constu32(indexRange->length));\n }\n\n // Kind of ugly to have an empty else block here but there's no\n // way for us to know the current block ID for the phi below\n DxbcConditional cond;\n cond.labelIf = m_module.allocateId();\n cond.labelElse = m_module.allocateId();\n cond.labelEnd = m_module.allocateId();\n\n m_module.opSelectionMerge(cond.labelEnd, spv::SelectionControlMaskNone);\n m_module.opBranchConditional(boundsCheck, cond.labelIf, cond.labelElse);\n\n m_module.opLabel(cond.labelIf);\n\n DxbcRegisterValue returnValue = emitValueLoad(emitGetOperandPtr(reg));\n\n m_module.opBranch(cond.labelEnd);\n m_module.opLabel (cond.labelElse);\n\n DxbcRegisterValue zeroValue = emitBuildZeroVector(returnValue.type);\n\n m_module.opBranch(cond.labelEnd);\n m_module.opLabel (cond.labelEnd);\n\n std::array phiLabels = {{\n { returnValue.id, cond.labelIf },\n { zeroValue.id, cond.labelElse },\n }};\n\n returnValue.id = m_module.opPhi(\n getVectorTypeId(returnValue.type),\n phiLabels.size(), phiLabels.data());\n return returnValue;\n }\n }\n\n DxbcRegisterValue value = emitValueLoad(emitGetOperandPtr(reg));\n\n // Pad icb values to a vec4 since the app may access components that are always 0\n if (reg.type == DxbcOperandType::ImmediateConstantBuffer && value.type.ccount < 4u) {\n DxbcVectorType zeroType;\n zeroType.ctype = value.type.ctype;\n zeroType.ccount = 4u - value.type.ccount;\n\n uint32_t zeroVector = emitBuildZeroVector(zeroType).id;\n\n std::array constituents = { value.id, zeroVector };\n\n value.type.ccount = 4u;\n value.id = m_module.opCompositeConstruct(getVectorTypeId(value.type),\n constituents.size(), constituents.data());\n }\n\n return value;\n }\n DxbcRegisterValue emitConstantBufferLoad(\n const DxbcRegister& reg,\n DxbcRegMask writeMask) {\n // Constant buffers take a two-dimensional index:\n // (0) register index (immediate)\n // (1) constant offset (relative)\n DxbcRegisterInfo info;\n info.type.ctype = DxbcScalarType::Float32;\n info.type.ccount = 4;\n info.type.alength = 0;\n info.sclass = spv::StorageClassUniform;\n \n uint32_t regId = reg.idx[0].offset;\n DxbcRegisterValue constId = emitIndexLoad(reg.idx[1]);\n \n uint32_t ptrTypeId = getPointerTypeId(info);\n \n const std::array indices =\n {{ m_module.consti32(0), constId.id }};\n \n DxbcRegisterPointer ptr;\n ptr.type.ctype = info.type.ctype;\n ptr.type.ccount = info.type.ccount;\n ptr.id = m_module.opAccessChain(ptrTypeId,\n m_constantBuffers.at(regId).varId,\n indices.size(), indices.data());\n\n // Load individual components from buffer\n std::array ccomps = { 0, 0, 0, 0 };\n std::array scomps = { 0, 0, 0, 0 };\n uint32_t scount = 0;\n\n for (uint32_t i = 0; i < 4; i++) {\n uint32_t sindex = reg.swizzle[i];\n\n if (!writeMask[i] || ccomps[sindex])\n continue;\n \n uint32_t componentId = m_module.constu32(sindex);\n uint32_t componentPtr = m_module.opAccessChain(\n m_module.defPointerType(\n getScalarTypeId(DxbcScalarType::Float32),\n spv::StorageClassUniform),\n ptr.id, 1, &componentId);\n \n ccomps[sindex] = m_module.opLoad(\n getScalarTypeId(DxbcScalarType::Float32),\n componentPtr);\n }\n\n for (uint32_t i = 0; i < 4; i++) {\n uint32_t sindex = reg.swizzle[i];\n \n if (writeMask[i])\n scomps[scount++] = ccomps[sindex];\n }\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Float32;\n result.type.ccount = scount;\n result.id = scomps[0];\n \n if (scount > 1) {\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n scount, scomps.data());\n }\n\n // Apply any post-processing that might be necessary\n result = emitRegisterBitcast(result, reg.dataType);\n result = emitSrcOperandModifiers(result, reg.modifiers);\n return result;\n }\n DxbcRegisterValue emitRegisterLoad(\n const DxbcRegister& reg,\n DxbcRegMask writeMask) {\n if (reg.type == DxbcOperandType::Imm32\n || reg.type == DxbcOperandType::Imm64) {\n DxbcRegisterValue result;\n \n if (reg.componentCount == DxbcComponentCount::Component1) {\n // Create one single u32 constant\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.constu32(reg.imm.u32_1);\n\n result = emitRegisterExtend(result, writeMask.popCount());\n } else if (reg.componentCount == DxbcComponentCount::Component4) {\n // Create a u32 vector with as many components as needed\n std::array indices = { };\n uint32_t indexId = 0;\n \n for (uint32_t i = 0; i < indices.size(); i++) {\n if (writeMask[i]) {\n indices.at(indexId++) =\n m_module.constu32(reg.imm.u32_4[i]);\n }\n }\n \n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = writeMask.popCount();\n result.id = indices.at(0);\n \n if (indexId > 1) {\n result.id = m_module.constComposite(\n getVectorTypeId(result.type),\n result.type.ccount, indices.data());\n }\n \n } else {\n // Something went horribly wrong in the decoder or the shader is broken\n throw DxvkError(\"DxbcCompiler: Invalid component count for immediate operand\");\n }\n \n // Cast constants to the requested type\n return emitRegisterBitcast(result, reg.dataType);\n } else if (reg.type == DxbcOperandType::ConstantBuffer) {\n return emitConstantBufferLoad(reg, writeMask);\n } else {\n // Load operand from the operand pointer\n DxbcRegisterValue result = emitRegisterLoadRaw(reg);\n \n // Apply operand swizzle to the operand value\n result = emitRegisterSwizzle(result, reg.swizzle, writeMask);\n \n // Cast it to the requested type. We need to do\n // this after the swizzling for 64-bit types.\n result = emitRegisterBitcast(result, reg.dataType);\n \n // Apply operand modifiers\n result = emitSrcOperandModifiers(result, reg.modifiers);\n return result;\n }\n }\n void emitRegisterStore(\n const DxbcRegister& reg,\n DxbcRegisterValue value) {\n if (reg.type == DxbcOperandType::IndexableTemp) {\n bool doBoundsCheck = reg.idx[1].relReg != nullptr;\n DxbcRegisterValue vectorId = emitIndexLoad(reg.idx[1]);\n\n if (doBoundsCheck) {\n uint32_t boundsCheck = m_module.opULessThan(\n m_module.defBoolType(), vectorId.id,\n m_module.constu32(m_xRegs.at(reg.idx[0].offset).alength));\n \n DxbcConditional cond;\n cond.labelIf = m_module.allocateId();\n cond.labelEnd = m_module.allocateId();\n \n m_module.opSelectionMerge(cond.labelEnd, spv::SelectionControlMaskNone);\n m_module.opBranchConditional(boundsCheck, cond.labelIf, cond.labelEnd);\n \n m_module.opLabel(cond.labelIf);\n\n emitValueStore(getIndexableTempPtr(reg, vectorId), value, reg.mask);\n\n m_module.opBranch(cond.labelEnd);\n m_module.opLabel (cond.labelEnd);\n } else {\n emitValueStore(getIndexableTempPtr(reg, vectorId), value, reg.mask);\n }\n } else {\n emitValueStore(emitGetOperandPtr(reg), value, reg.mask);\n }\n }\n void emitInputSetup() {\n m_module.setLateConst(m_vArrayLengthId, &m_vArrayLength);\n\n // Copy all defined v# registers into the input array. Note\n // that the outer index of the array is the vertex index.\n const uint32_t vecTypeId = m_module.defVectorType(m_module.defFloatType(32), 4);\n const uint32_t dstPtrTypeId = m_module.defPointerType(vecTypeId, spv::StorageClassPrivate);\n \n for (uint32_t i = 0; i < m_vRegs.size(); i++) {\n if (m_vRegs.at(i).id != 0) {\n const uint32_t registerId = m_module.consti32(i);\n \n for (uint32_t v = 0; v < vertexCount; v++) {\n std::array indices\n = {{ m_module.consti32(v), registerId }};\n \n DxbcRegisterPointer srcPtr;\n srcPtr.type = m_vRegs.at(i).type;\n srcPtr.id = m_module.opAccessChain(\n m_module.defPointerType(getVectorTypeId(srcPtr.type), spv::StorageClassInput),\n m_vRegs.at(i).id, 1, indices.data());\n \n DxbcRegisterValue srcValue = emitRegisterBitcast(\n emitValueLoad(srcPtr), DxbcScalarType::Float32);\n \n DxbcRegisterPointer dstPtr;\n dstPtr.type = { DxbcScalarType::Float32, 4 };\n dstPtr.id = m_module.opAccessChain(\n dstPtrTypeId, m_vArray, 2, indices.data());\n\n emitValueStore(dstPtr, srcValue, DxbcRegMask::firstN(srcValue.type.ccount));\n }\n }\n }\n \n // Copy all system value registers into the array,\n // preserving any previously written contents.\n for (const DxbcSvMapping& map : m_vMappings) {\n const uint32_t registerId = m_module.consti32(map.regId);\n \n for (uint32_t v = 0; v < vertexCount; v++) {\n const DxbcRegisterValue value = [&] {\n switch (m_programInfo.type()) {\n case DxbcProgramType::GeometryShader: return emitGsSystemValueLoad(map.sv, map.regMask, v);\n default: throw DxvkError(str::format(\"DxbcCompiler: Unexpected stage: \", m_programInfo.type()));\n }\n }();\n \n std::array indices = {\n m_module.consti32(v), registerId,\n };\n \n DxbcRegisterPointer inputReg;\n inputReg.type.ctype = DxbcScalarType::Float32;\n inputReg.type.ccount = 4;\n inputReg.id = m_module.opAccessChain(dstPtrTypeId,\n m_vArray, indices.size(), indices.data());\n emitValueStore(inputReg, value, map.regMask);\n }\n }\n }\n void emitInputSetup(uint32_t vertexCount) {\n m_module.setLateConst(m_vArrayLengthId, &m_vArrayLength);\n\n // Copy all defined v# registers into the input array. Note\n // that the outer index of the array is the vertex index.\n const uint32_t vecTypeId = m_module.defVectorType(m_module.defFloatType(32), 4);\n const uint32_t dstPtrTypeId = m_module.defPointerType(vecTypeId, spv::StorageClassPrivate);\n \n for (uint32_t i = 0; i < m_vRegs.size(); i++) {\n if (m_vRegs.at(i).id != 0) {\n const uint32_t registerId = m_module.consti32(i);\n \n for (uint32_t v = 0; v < vertexCount; v++) {\n std::array indices\n = {{ m_module.consti32(v), registerId }};\n \n DxbcRegisterPointer srcPtr;\n srcPtr.type = m_vRegs.at(i).type;\n srcPtr.id = m_module.opAccessChain(\n m_module.defPointerType(getVectorTypeId(srcPtr.type), spv::StorageClassInput),\n m_vRegs.at(i).id, 1, indices.data());\n \n DxbcRegisterValue srcValue = emitRegisterBitcast(\n emitValueLoad(srcPtr), DxbcScalarType::Float32);\n \n DxbcRegisterPointer dstPtr;\n dstPtr.type = { DxbcScalarType::Float32, 4 };\n dstPtr.id = m_module.opAccessChain(\n dstPtrTypeId, m_vArray, 2, indices.data());\n\n emitValueStore(dstPtr, srcValue, DxbcRegMask::firstN(srcValue.type.ccount));\n }\n }\n }\n \n // Copy all system value registers into the array,\n // preserving any previously written contents.\n for (const DxbcSvMapping& map : m_vMappings) {\n const uint32_t registerId = m_module.consti32(map.regId);\n \n for (uint32_t v = 0; v < vertexCount; v++) {\n const DxbcRegisterValue value = [&] {\n switch (m_programInfo.type()) {\n case DxbcProgramType::GeometryShader: return emitGsSystemValueLoad(map.sv, map.regMask, v);\n default: throw DxvkError(str::format(\"DxbcCompiler: Unexpected stage: \", m_programInfo.type()));\n }\n }();\n \n std::array indices = {\n m_module.consti32(v), registerId,\n };\n \n DxbcRegisterPointer inputReg;\n inputReg.type.ctype = DxbcScalarType::Float32;\n inputReg.type.ccount = 4;\n inputReg.id = m_module.opAccessChain(dstPtrTypeId,\n m_vArray, indices.size(), indices.data());\n emitValueStore(inputReg, value, map.regMask);\n }\n }\n }\n void emitOutputSetup() {\n for (const DxbcSvMapping& svMapping : m_oMappings) {\n DxbcRegisterPointer outputReg = m_oRegs.at(svMapping.regId);\n \n if (m_programInfo.type() == DxbcProgramType::HullShader) {\n uint32_t registerIndex = m_module.constu32(svMapping.regId);\n \n outputReg.type = { DxbcScalarType::Float32, 4 };\n outputReg.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(outputReg.type),\n spv::StorageClassPrivate),\n m_hs.outputPerPatch,\n 1, ®isterIndex);\n }\n \n auto sv = svMapping.sv;\n auto mask = svMapping.regMask;\n auto value = emitValueLoad(outputReg);\n \n switch (m_programInfo.type()) {\n case DxbcProgramType::VertexShader: emitVsSystemValueStore(sv, mask, value); break;\n case DxbcProgramType::GeometryShader: emitGsSystemValueStore(sv, mask, value); break;\n case DxbcProgramType::HullShader: emitHsSystemValueStore(sv, mask, value); break;\n case DxbcProgramType::DomainShader: emitDsSystemValueStore(sv, mask, value); break;\n case DxbcProgramType::PixelShader: emitPsSystemValueStore(sv, mask, value); break;\n default: break;\n }\n }\n }\n void emitOutputDepthClamp() {\n // HACK: Some drivers do not clamp FragDepth to [minDepth..maxDepth]\n // before writing to the depth attachment, but we do not have acccess\n // to those. Clamp to [0..1] instead.\n if (m_ps.builtinDepth) {\n DxbcRegisterPointer ptr;\n ptr.type = { DxbcScalarType::Float32, 1 };\n ptr.id = m_ps.builtinDepth;\n\n DxbcRegisterValue value = emitValueLoad(ptr);\n\n value.id = m_module.opNClamp(\n getVectorTypeId(ptr.type),\n value.id,\n m_module.constf32(0.0f),\n m_module.constf32(1.0f));\n \n emitValueStore(ptr, value,\n DxbcRegMask::firstN(1));\n }\n }\n void emitInitWorkgroupMemory() {\n bool hasTgsm = false;\n\n SpirvMemoryOperands memoryOperands;\n memoryOperands.flags = spv::MemoryAccessNonPrivatePointerMask;\n\n for (uint32_t i = 0; i < m_gRegs.size(); i++) {\n if (!m_gRegs[i].varId)\n continue;\n \n if (!m_cs.builtinLocalInvocationIndex) {\n m_cs.builtinLocalInvocationIndex = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInLocalInvocationIndex,\n \"vThreadIndexInGroup\");\n }\n\n uint32_t intTypeId = getScalarTypeId(DxbcScalarType::Uint32);\n uint32_t ptrTypeId = m_module.defPointerType(\n intTypeId, spv::StorageClassWorkgroup);\n\n uint32_t numElements = m_gRegs[i].type == DxbcResourceType::Structured\n ? m_gRegs[i].elementCount * m_gRegs[i].elementStride / 4\n : m_gRegs[i].elementCount / 4;\n \n uint32_t numThreads = m_cs.workgroupSizeX *\n m_cs.workgroupSizeY * m_cs.workgroupSizeZ;\n \n uint32_t numElementsPerThread = numElements / numThreads;\n uint32_t numElementsRemaining = numElements % numThreads;\n\n uint32_t threadId = m_module.opLoad(\n intTypeId, m_cs.builtinLocalInvocationIndex);\n uint32_t zeroId = m_module.constu32(0);\n\n for (uint32_t e = 0; e < numElementsPerThread; e++) {\n uint32_t ofsId = m_module.opIAdd(intTypeId, threadId,\n m_module.constu32(numThreads * e));\n \n uint32_t ptrId = m_module.opAccessChain(\n ptrTypeId, m_gRegs[i].varId, 1, &ofsId);\n\n m_module.opStore(ptrId, zeroId, memoryOperands);\n }\n\n if (numElementsRemaining) {\n uint32_t condition = m_module.opULessThan(\n m_module.defBoolType(), threadId,\n m_module.constu32(numElementsRemaining));\n \n DxbcConditional cond;\n cond.labelIf = m_module.allocateId();\n cond.labelEnd = m_module.allocateId();\n\n m_module.opSelectionMerge(cond.labelEnd, spv::SelectionControlMaskNone);\n m_module.opBranchConditional(condition, cond.labelIf, cond.labelEnd);\n\n m_module.opLabel(cond.labelIf);\n\n uint32_t ofsId = m_module.opIAdd(intTypeId, threadId,\n m_module.constu32(numThreads * numElementsPerThread));\n \n uint32_t ptrId = m_module.opAccessChain(\n ptrTypeId, m_gRegs[i].varId, 1, &ofsId);\n \n m_module.opStore(ptrId, zeroId, memoryOperands);\n\n m_module.opBranch(cond.labelEnd);\n m_module.opLabel (cond.labelEnd);\n }\n\n hasTgsm = true;\n }\n\n if (hasTgsm) {\n m_module.opControlBarrier(\n m_module.constu32(spv::ScopeWorkgroup),\n m_module.constu32(spv::ScopeWorkgroup),\n m_module.constu32(spv::MemorySemanticsWorkgroupMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask\n | spv::MemorySemanticsMakeAvailableMask\n | spv::MemorySemanticsMakeVisibleMask));\n }\n }\n DxbcRegisterValue emitVsSystemValueLoad(\n DxbcSystemValue sv,\n DxbcRegMask mask) {\n switch (sv) {\n case DxbcSystemValue::VertexId: {\n const uint32_t typeId = getScalarTypeId(DxbcScalarType::Uint32);\n \n if (m_vs.builtinVertexId == 0) {\n m_vs.builtinVertexId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInVertexIndex,\n \"vs_vertex_index\");\n }\n \n if (m_vs.builtinBaseVertex == 0) {\n m_vs.builtinBaseVertex = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInBaseVertex,\n \"vs_base_vertex\");\n }\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opISub(typeId,\n m_module.opLoad(typeId, m_vs.builtinVertexId),\n m_module.opLoad(typeId, m_vs.builtinBaseVertex));\n return result;\n } break;\n \n case DxbcSystemValue::InstanceId: {\n const uint32_t typeId = getScalarTypeId(DxbcScalarType::Uint32);\n \n if (m_vs.builtinInstanceId == 0) {\n m_vs.builtinInstanceId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInInstanceIndex,\n \"vs_instance_index\");\n }\n \n if (m_vs.builtinBaseInstance == 0) {\n m_vs.builtinBaseInstance = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInBaseInstance,\n \"vs_base_instance\");\n }\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opISub(typeId,\n m_module.opLoad(typeId, m_vs.builtinInstanceId),\n m_module.opLoad(typeId, m_vs.builtinBaseInstance));\n return result;\n } break;\n \n default:\n throw DxvkError(str::format(\n \"DxbcCompiler: Unhandled VS SV input: \", sv));\n }\n }\n DxbcRegisterValue emitGsSystemValueLoad(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n uint32_t vertexId) {\n switch (sv) {\n case DxbcSystemValue::Position: {\n uint32_t arrayIndex = m_module.consti32(vertexId);\n\n if (!m_positionIn) {\n m_positionIn = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 4, primitiveVertexCount(m_gs.inputPrimitive) },\n spv::StorageClassInput },\n spv::BuiltInPosition,\n \"in_position\");\n }\n\n DxbcRegisterPointer ptrIn;\n ptrIn.type.ctype = DxbcScalarType::Float32;\n ptrIn.type.ccount = 4;\n ptrIn.id = m_module.opAccessChain(\n m_module.defPointerType(getVectorTypeId(ptrIn.type), spv::StorageClassInput),\n m_positionIn, 1, &arrayIndex);\n \n return emitRegisterExtract(emitValueLoad(ptrIn), mask);\n } break;\n \n default:\n throw DxvkError(str::format(\n \"DxbcCompiler: Unhandled GS SV input: \", sv));\n }\n }\n DxbcRegisterValue emitPsSystemValueLoad(\n DxbcSystemValue sv,\n DxbcRegMask mask) {\n switch (sv) {\n case DxbcSystemValue::Position: {\n if (m_ps.builtinFragCoord == 0) {\n m_ps.builtinFragCoord = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 4, 0 },\n spv::StorageClassInput },\n spv::BuiltInFragCoord,\n \"ps_frag_coord\");\n }\n \n DxbcRegisterPointer ptrIn;\n ptrIn.type = { DxbcScalarType::Float32, 4 };\n ptrIn.id = m_ps.builtinFragCoord;\n \n // The X, Y and Z components of the SV_POSITION semantic\n // are identical to Vulkan's FragCoord builtin, but we\n // need to compute the reciprocal of the W component.\n DxbcRegisterValue fragCoord = emitValueLoad(ptrIn);\n \n uint32_t componentIndex = 3;\n uint32_t t_f32 = m_module.defFloatType(32);\n uint32_t v_wComp = m_module.opCompositeExtract(t_f32, fragCoord.id, 1, &componentIndex);\n v_wComp = m_module.opFDiv(t_f32, m_module.constf32(1.0f), v_wComp);\n \n fragCoord.id = m_module.opCompositeInsert(\n getVectorTypeId(fragCoord.type),\n v_wComp, fragCoord.id,\n 1, &componentIndex);\n \n return emitRegisterExtract(fragCoord, mask);\n } break;\n \n case DxbcSystemValue::IsFrontFace: {\n if (m_ps.builtinIsFrontFace == 0) {\n m_ps.builtinIsFrontFace = emitNewBuiltinVariable({\n { DxbcScalarType::Bool, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInFrontFacing,\n \"ps_is_front_face\");\n }\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opSelect(\n getVectorTypeId(result.type),\n m_module.opLoad(\n m_module.defBoolType(),\n m_ps.builtinIsFrontFace),\n m_module.constu32(0xFFFFFFFF),\n m_module.constu32(0x00000000));\n return result;\n } break;\n \n case DxbcSystemValue::PrimitiveId: {\n if (m_primitiveIdIn == 0) {\n m_module.enableCapability(spv::CapabilityGeometry);\n \n m_primitiveIdIn = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInPrimitiveId,\n \"ps_primitive_id\");\n }\n \n DxbcRegisterPointer ptrIn;\n ptrIn.type = { DxbcScalarType::Uint32, 1 };\n ptrIn.id = m_primitiveIdIn;\n \n return emitValueLoad(ptrIn);\n } break;\n \n case DxbcSystemValue::SampleIndex: {\n if (m_ps.builtinSampleId == 0) {\n m_module.enableCapability(spv::CapabilitySampleRateShading);\n \n m_ps.builtinSampleId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInSampleId,\n \"ps_sample_id\");\n }\n \n DxbcRegisterPointer ptrIn;\n ptrIn.type.ctype = DxbcScalarType::Uint32;\n ptrIn.type.ccount = 1;\n ptrIn.id = m_ps.builtinSampleId;\n \n return emitValueLoad(ptrIn);\n } break;\n \n case DxbcSystemValue::RenderTargetId: {\n if (m_ps.builtinLayer == 0) {\n m_module.enableCapability(spv::CapabilityGeometry);\n \n m_ps.builtinLayer = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInLayer,\n \"v_layer\");\n }\n \n DxbcRegisterPointer ptr;\n ptr.type.ctype = DxbcScalarType::Uint32;\n ptr.type.ccount = 1;\n ptr.id = m_ps.builtinLayer;\n \n return emitValueLoad(ptr);\n } break;\n \n case DxbcSystemValue::ViewportId: {\n if (m_ps.builtinViewportId == 0) {\n m_module.enableCapability(spv::CapabilityMultiViewport);\n \n m_ps.builtinViewportId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInViewportIndex,\n \"v_viewport\");\n }\n \n DxbcRegisterPointer ptr;\n ptr.type.ctype = DxbcScalarType::Uint32;\n ptr.type.ccount = 1;\n ptr.id = m_ps.builtinViewportId;\n \n return emitValueLoad(ptr);\n } break;\n \n default:\n throw DxvkError(str::format(\n \"DxbcCompiler: Unhandled PS SV input: \", sv));\n }\n }\n void emitVsSystemValueStore(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n const DxbcRegisterValue& value) {\n switch (sv) {\n case DxbcSystemValue::Position: {\n if (!m_positionOut) {\n m_positionOut = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 4, 0 },\n spv::StorageClassOutput },\n spv::BuiltInPosition,\n \"out_position\");\n }\n\n DxbcRegisterPointer ptr;\n ptr.type.ctype = DxbcScalarType::Float32;\n ptr.type.ccount = 4;\n ptr.id = m_positionOut;\n \n emitValueStore(ptr, value, mask);\n } break;\n \n case DxbcSystemValue::RenderTargetId: {\n if (m_programInfo.type() != DxbcProgramType::GeometryShader)\n m_module.enableCapability(spv::CapabilityShaderLayer);\n\n if (m_gs.builtinLayer == 0) {\n m_module.enableCapability(spv::CapabilityGeometry);\n\n m_gs.builtinLayer = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInLayer,\n \"o_layer\");\n }\n \n DxbcRegisterPointer ptr;\n ptr.type = { DxbcScalarType::Uint32, 1 };\n ptr.id = m_gs.builtinLayer;\n \n emitValueStore(\n ptr, emitRegisterExtract(value, mask),\n DxbcRegMask(true, false, false, false));\n } break;\n \n case DxbcSystemValue::ViewportId: {\n if (m_programInfo.type() != DxbcProgramType::GeometryShader)\n m_module.enableCapability(spv::CapabilityShaderViewportIndex);\n\n if (m_gs.builtinViewportId == 0) {\n m_module.enableCapability(spv::CapabilityMultiViewport);\n \n m_gs.builtinViewportId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInViewportIndex,\n \"o_viewport\");\n }\n \n DxbcRegisterPointer ptr;\n ptr.type = { DxbcScalarType::Uint32, 1};\n ptr.id = m_gs.builtinViewportId;\n \n emitValueStore(\n ptr, emitRegisterExtract(value, mask),\n DxbcRegMask(true, false, false, false));\n } break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled VS SV output: \", sv));\n }\n }\n void emitHsSystemValueStore(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n const DxbcRegisterValue& value) {\n if (sv >= DxbcSystemValue::FinalQuadUeq0EdgeTessFactor\n && sv <= DxbcSystemValue::FinalLineDensityTessFactor) {\n struct TessFactor {\n uint32_t array = 0;\n uint32_t index = 0;\n };\n \n static const std::array s_tessFactors = {{\n { m_hs.builtinTessLevelOuter, 0 }, // FinalQuadUeq0EdgeTessFactor\n { m_hs.builtinTessLevelOuter, 1 }, // FinalQuadVeq0EdgeTessFactor\n { m_hs.builtinTessLevelOuter, 2 }, // FinalQuadUeq1EdgeTessFactor\n { m_hs.builtinTessLevelOuter, 3 }, // FinalQuadVeq1EdgeTessFactor\n { m_hs.builtinTessLevelInner, 0 }, // FinalQuadUInsideTessFactor\n { m_hs.builtinTessLevelInner, 1 }, // FinalQuadVInsideTessFactor\n { m_hs.builtinTessLevelOuter, 0 }, // FinalTriUeq0EdgeTessFactor\n { m_hs.builtinTessLevelOuter, 1 }, // FinalTriVeq0EdgeTessFactor\n { m_hs.builtinTessLevelOuter, 2 }, // FinalTriWeq0EdgeTessFactor\n { m_hs.builtinTessLevelInner, 0 }, // FinalTriInsideTessFactor\n { m_hs.builtinTessLevelOuter, 0 }, // FinalLineDensityTessFactor\n { m_hs.builtinTessLevelOuter, 1 }, // FinalLineDetailTessFactor\n }};\n \n const TessFactor tessFactor = s_tessFactors.at(uint32_t(sv)\n - uint32_t(DxbcSystemValue::FinalQuadUeq0EdgeTessFactor));\n \n const uint32_t tessFactorArrayIndex\n = m_module.constu32(tessFactor.index);\n \n // Apply global tess factor limit\n float maxTessFactor = m_hs.maxTessFactor;\n\n if (m_moduleInfo.tess != nullptr) {\n if (m_moduleInfo.tess->maxTessFactor < maxTessFactor)\n maxTessFactor = m_moduleInfo.tess->maxTessFactor;\n }\n\n DxbcRegisterValue tessValue = emitRegisterExtract(value, mask);\n tessValue.id = m_module.opNClamp(getVectorTypeId(tessValue.type),\n tessValue.id, m_module.constf32(0.0f),\n m_module.constf32(maxTessFactor));\n \n DxbcRegisterPointer ptr;\n ptr.type.ctype = DxbcScalarType::Float32;\n ptr.type.ccount = 1;\n ptr.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(ptr.type),\n spv::StorageClassOutput),\n tessFactor.array, 1,\n &tessFactorArrayIndex);\n \n emitValueStore(ptr, tessValue,\n DxbcRegMask(true, false, false, false));\n } else {\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled HS SV output: \", sv));\n }\n }\n void emitDsSystemValueStore(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n const DxbcRegisterValue& value) {\n switch (sv) {\n case DxbcSystemValue::Position:\n case DxbcSystemValue::CullDistance:\n case DxbcSystemValue::ClipDistance:\n case DxbcSystemValue::RenderTargetId:\n case DxbcSystemValue::ViewportId:\n emitVsSystemValueStore(sv, mask, value);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled DS SV output: \", sv));\n }\n }\n void emitGsSystemValueStore(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n const DxbcRegisterValue& value) {\n switch (sv) {\n case DxbcSystemValue::Position:\n case DxbcSystemValue::CullDistance:\n case DxbcSystemValue::ClipDistance:\n case DxbcSystemValue::RenderTargetId:\n case DxbcSystemValue::ViewportId:\n emitVsSystemValueStore(sv, mask, value);\n break;\n \n case DxbcSystemValue::PrimitiveId: {\n if (m_primitiveIdOut == 0) {\n m_primitiveIdOut = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInPrimitiveId,\n \"gs_primitive_id\");\n }\n \n DxbcRegisterPointer ptr;\n ptr.type = { DxbcScalarType::Uint32, 1};\n ptr.id = m_primitiveIdOut;\n \n emitValueStore(\n ptr, emitRegisterExtract(value, mask),\n DxbcRegMask(true, false, false, false));\n } break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled GS SV output: \", sv));\n }\n }\n void emitPsSystemValueStore(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n const DxbcRegisterValue& value) {\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled PS SV output: \", sv));\n }\n void emitClipCullStore(\n DxbcSystemValue sv,\n uint32_t dstArray) {\n uint32_t offset = 0;\n \n if (dstArray == 0)\n return;\n \n for (auto e = m_osgn->begin(); e != m_osgn->end(); e++) {\n if (e->systemValue == sv) {\n DxbcRegisterPointer srcPtr = m_oRegs.at(e->registerId);\n DxbcRegisterValue srcValue = emitValueLoad(srcPtr);\n \n for (uint32_t i = 0; i < 4; i++) {\n if (e->componentMask[i]) {\n uint32_t offsetId = m_module.consti32(offset++);\n \n DxbcRegisterValue component = emitRegisterExtract(\n srcValue, DxbcRegMask::select(i));\n \n DxbcRegisterPointer dstPtr;\n dstPtr.type = { DxbcScalarType::Float32, 1 };\n dstPtr.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(dstPtr.type),\n spv::StorageClassOutput),\n dstArray, 1, &offsetId);\n \n emitValueStore(dstPtr, component,\n DxbcRegMask(true, false, false, false));\n }\n }\n }\n }\n }\n void emitClipCullLoad(\n DxbcSystemValue sv,\n uint32_t srcArray) {\n uint32_t offset = 0;\n \n if (srcArray == 0)\n return;\n \n for (auto e = m_isgn->begin(); e != m_isgn->end(); e++) {\n if (e->systemValue == sv) {\n // Load individual components from the source array\n uint32_t componentIndex = 0;\n std::array componentIds = {{ 0, 0, 0, 0 }};\n \n for (uint32_t i = 0; i < 4; i++) {\n if (e->componentMask[i]) {\n uint32_t offsetId = m_module.consti32(offset++);\n \n DxbcRegisterPointer srcPtr;\n srcPtr.type = { DxbcScalarType::Float32, 1 };\n srcPtr.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(srcPtr.type),\n spv::StorageClassInput),\n srcArray, 1, &offsetId);\n \n componentIds[componentIndex++]\n = emitValueLoad(srcPtr).id;\n }\n }\n \n // Put everything into one vector\n DxbcRegisterValue dstValue;\n dstValue.type = { DxbcScalarType::Float32, componentIndex };\n dstValue.id = componentIds[0];\n \n if (componentIndex > 1) {\n dstValue.id = m_module.opCompositeConstruct(\n getVectorTypeId(dstValue.type),\n componentIndex, componentIds.data());\n }\n \n // Store vector to the input array\n uint32_t registerId = m_module.consti32(e->registerId);\n \n DxbcRegisterPointer dstInput;\n dstInput.type = { DxbcScalarType::Float32, 4 };\n dstInput.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(dstInput.type),\n spv::StorageClassPrivate),\n m_vArray, 1, ®isterId);\n \n emitValueStore(dstInput, dstValue, e->componentMask);\n }\n }\n }\n void emitPointSizeStore() {\n if (m_moduleInfo.options.needsPointSizeExport) {\n uint32_t pointSizeId = emitNewBuiltinVariable(DxbcRegisterInfo {\n { DxbcScalarType::Float32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInPointSize,\n \"point_size\");\n\n m_module.opStore(pointSizeId, m_module.constf32(1.0f));\n }\n }\n void emitInit() {\n // Set up common capabilities for all shaders\n m_module.enableCapability(spv::CapabilityShader);\n m_module.enableCapability(spv::CapabilityImageQuery);\n \n // Initialize the shader module with capabilities\n // etc. Each shader type has its own peculiarities.\n switch (m_programInfo.type()) {\n case DxbcProgramType::VertexShader: emitVsInit(); break;\n case DxbcProgramType::HullShader: emitHsInit(); break;\n case DxbcProgramType::DomainShader: emitDsInit(); break;\n case DxbcProgramType::GeometryShader: emitGsInit(); break;\n case DxbcProgramType::PixelShader: emitPsInit(); break;\n case DxbcProgramType::ComputeShader: emitCsInit(); break;\n default: throw DxvkError(\"Invalid shader stage\");\n }\n }\n void emitFunctionBegin(\n uint32_t entryPoint,\n uint32_t returnType,\n uint32_t funcType) {\n this->emitFunctionEnd();\n \n m_module.functionBegin(\n returnType, entryPoint, funcType,\n spv::FunctionControlMaskNone);\n \n m_insideFunction = true;\n }\n void emitFunctionEnd() {\n if (m_insideFunction) {\n m_module.opReturn();\n m_module.functionEnd();\n }\n \n m_insideFunction = false;\n }\n void emitFunctionLabel() {\n m_module.opLabel(m_module.allocateId());\n }\n void emitMainFunctionBegin() {\n this->emitFunctionBegin(\n m_entryPointId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitVsInit() {\n m_module.enableCapability(spv::CapabilityClipDistance);\n m_module.enableCapability(spv::CapabilityCullDistance);\n m_module.enableCapability(spv::CapabilityDrawParameters);\n \n // Standard input array\n emitDclInputArray(0);\n \n // Cull/clip distances as outputs\n m_clipDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numClipPlanes,\n spv::BuiltInClipDistance,\n spv::StorageClassOutput);\n \n m_cullDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numCullPlanes,\n spv::BuiltInCullDistance,\n spv::StorageClassOutput);\n \n // Main function of the vertex shader\n m_vs.functionId = m_module.allocateId();\n m_module.setDebugName(m_vs.functionId, \"vs_main\");\n \n this->emitFunctionBegin(\n m_vs.functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitHsInit() {\n m_module.enableCapability(spv::CapabilityTessellation);\n m_module.enableCapability(spv::CapabilityClipDistance);\n m_module.enableCapability(spv::CapabilityCullDistance);\n \n m_hs.builtinInvocationId = emitNewBuiltinVariable(\n DxbcRegisterInfo {\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInInvocationId,\n \"vOutputControlPointId\");\n \n m_hs.builtinTessLevelOuter = emitBuiltinTessLevelOuter(spv::StorageClassOutput);\n m_hs.builtinTessLevelInner = emitBuiltinTessLevelInner(spv::StorageClassOutput);\n }\n void emitDsInit() {\n m_module.enableCapability(spv::CapabilityTessellation);\n m_module.enableCapability(spv::CapabilityClipDistance);\n m_module.enableCapability(spv::CapabilityCullDistance);\n \n m_ds.builtinTessLevelOuter = emitBuiltinTessLevelOuter(spv::StorageClassInput);\n m_ds.builtinTessLevelInner = emitBuiltinTessLevelInner(spv::StorageClassInput);\n \n // Cull/clip distances as outputs\n m_clipDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numClipPlanes,\n spv::BuiltInClipDistance,\n spv::StorageClassOutput);\n \n m_cullDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numCullPlanes,\n spv::BuiltInCullDistance,\n spv::StorageClassOutput);\n \n // Main function of the domain shader\n m_ds.functionId = m_module.allocateId();\n m_module.setDebugName(m_ds.functionId, \"ds_main\");\n \n this->emitFunctionBegin(\n m_ds.functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitGsInit() {\n m_module.enableCapability(spv::CapabilityGeometry);\n m_module.enableCapability(spv::CapabilityClipDistance);\n m_module.enableCapability(spv::CapabilityCullDistance);\n\n // Enable capabilities for xfb mode if necessary\n if (m_moduleInfo.xfb) {\n m_module.enableCapability(spv::CapabilityGeometryStreams);\n m_module.enableCapability(spv::CapabilityTransformFeedback);\n \n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeXfb);\n }\n\n // We only need outputs if rasterization is enabled\n m_gs.needsOutputSetup = !m_moduleInfo.xfb\n || m_moduleInfo.xfb->rasterizedStream >= 0;\n \n // Cull/clip distances as outputs\n m_clipDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numClipPlanes,\n spv::BuiltInClipDistance,\n spv::StorageClassOutput);\n \n m_cullDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numCullPlanes,\n spv::BuiltInCullDistance,\n spv::StorageClassOutput);\n \n // Emit Xfb variables if necessary\n if (m_moduleInfo.xfb)\n emitXfbOutputDeclarations();\n\n // Main function of the vertex shader\n m_gs.functionId = m_module.allocateId();\n m_module.setDebugName(m_gs.functionId, \"gs_main\");\n \n this->emitFunctionBegin(\n m_gs.functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitPsInit() {\n m_module.enableCapability(spv::CapabilityDerivativeControl);\n \n m_module.setExecutionMode(m_entryPointId,\n spv::ExecutionModeOriginUpperLeft);\n \n // Standard input array\n emitDclInputArray(0);\n \n // Cull/clip distances as inputs\n m_clipDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullIn.numClipPlanes,\n spv::BuiltInClipDistance,\n spv::StorageClassInput);\n \n m_cullDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullIn.numCullPlanes,\n spv::BuiltInCullDistance,\n spv::StorageClassInput);\n \n // Main function of the pixel shader\n m_ps.functionId = m_module.allocateId();\n m_module.setDebugName(m_ps.functionId, \"ps_main\");\n \n this->emitFunctionBegin(\n m_ps.functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitCsInit() {\n // Main function of the compute shader\n m_cs.functionId = m_module.allocateId();\n m_module.setDebugName(m_cs.functionId, \"cs_main\");\n \n this->emitFunctionBegin(\n m_cs.functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitVsFinalize() {\n this->emitMainFunctionBegin();\n this->emitInputSetup();\n m_module.opFunctionCall(\n m_module.defVoidType(),\n m_vs.functionId, 0, nullptr);\n this->emitOutputSetup();\n this->emitClipCullStore(DxbcSystemValue::ClipDistance, m_clipDistances);\n this->emitClipCullStore(DxbcSystemValue::CullDistance, m_cullDistances);\n this->emitPointSizeStore();\n this->emitFunctionEnd();\n }\n void emitHsFinalize() {\n if (m_hs.cpPhase.functionId == 0)\n m_hs.cpPhase = this->emitNewHullShaderPassthroughPhase();\n \n // Control point phase\n this->emitMainFunctionBegin();\n this->emitInputSetup(m_hs.vertexCountIn);\n this->emitHsControlPointPhase(m_hs.cpPhase);\n this->emitHsPhaseBarrier();\n \n // Fork-join phases and output setup\n this->emitHsInvocationBlockBegin(1);\n \n for (const auto& phase : m_hs.forkPhases)\n this->emitHsForkJoinPhase(phase);\n \n for (const auto& phase : m_hs.joinPhases)\n this->emitHsForkJoinPhase(phase);\n \n this->emitOutputSetup();\n this->emitHsOutputSetup();\n this->emitHsInvocationBlockEnd();\n this->emitFunctionEnd();\n }\n void emitDsFinalize() {\n this->emitMainFunctionBegin();\n m_module.opFunctionCall(\n m_module.defVoidType(),\n m_ds.functionId, 0, nullptr);\n this->emitOutputSetup();\n this->emitClipCullStore(DxbcSystemValue::ClipDistance, m_clipDistances);\n this->emitClipCullStore(DxbcSystemValue::CullDistance, m_cullDistances);\n this->emitFunctionEnd();\n }\n void emitGsFinalize() {\n if (!m_gs.invocationCount)\n m_module.setInvocations(m_entryPointId, 1);\n\n this->emitMainFunctionBegin();\n this->emitInputSetup(\n primitiveVertexCount(m_gs.inputPrimitive));\n m_module.opFunctionCall(\n m_module.defVoidType(),\n m_gs.functionId, 0, nullptr);\n // No output setup at this point as that was\n // already done during the EmitVertex step\n this->emitFunctionEnd();\n }\n void emitPsFinalize() {\n this->emitMainFunctionBegin();\n this->emitInputSetup();\n this->emitClipCullLoad(DxbcSystemValue::ClipDistance, m_clipDistances);\n this->emitClipCullLoad(DxbcSystemValue::CullDistance, m_cullDistances);\n\n if (m_hasRasterizerOrderedUav) {\n // For simplicity, just lock the entire fragment shader\n // if there are any rasterizer ordered views.\n m_module.enableExtension(\"SPV_EXT_fragment_shader_interlock\");\n\n if (m_module.hasCapability(spv::CapabilitySampleRateShading)\n && m_moduleInfo.options.enableSampleShadingInterlock) {\n m_module.enableCapability(spv::CapabilityFragmentShaderSampleInterlockEXT);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeSampleInterlockOrderedEXT);\n } else {\n m_module.enableCapability(spv::CapabilityFragmentShaderPixelInterlockEXT);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModePixelInterlockOrderedEXT);\n }\n\n m_module.opBeginInvocationInterlock();\n }\n\n m_module.opFunctionCall(\n m_module.defVoidType(),\n m_ps.functionId, 0, nullptr);\n\n if (m_hasRasterizerOrderedUav)\n m_module.opEndInvocationInterlock();\n\n this->emitOutputSetup();\n\n if (m_moduleInfo.options.useDepthClipWorkaround)\n this->emitOutputDepthClamp();\n \n this->emitFunctionEnd();\n }\n void emitCsFinalize() {\n this->emitMainFunctionBegin();\n\n if (m_moduleInfo.options.zeroInitWorkgroupMemory)\n this->emitInitWorkgroupMemory();\n\n m_module.opFunctionCall(\n m_module.defVoidType(),\n m_cs.functionId, 0, nullptr);\n \n this->emitFunctionEnd();\n }\n void emitXfbOutputDeclarations() {\n for (uint32_t i = 0; i < m_moduleInfo.xfb->entryCount; i++) {\n const DxbcXfbEntry* xfbEntry = m_moduleInfo.xfb->entries + i;\n const DxbcSgnEntry* sigEntry = m_osgn->find(\n xfbEntry->semanticName,\n xfbEntry->semanticIndex,\n xfbEntry->streamId);\n\n if (sigEntry == nullptr)\n continue;\n \n DxbcRegisterInfo varInfo;\n varInfo.type.ctype = DxbcScalarType::Float32;\n varInfo.type.ccount = xfbEntry->componentCount;\n varInfo.type.alength = 0;\n varInfo.sclass = spv::StorageClassOutput;\n \n uint32_t dstComponentMask = (1 << xfbEntry->componentCount) - 1;\n uint32_t srcComponentMask = dstComponentMask\n << sigEntry->componentMask.firstSet()\n << xfbEntry->componentIndex;\n \n DxbcXfbVar xfbVar;\n xfbVar.varId = emitNewVariable(varInfo);\n xfbVar.streamId = xfbEntry->streamId;\n xfbVar.outputId = sigEntry->registerId;\n xfbVar.srcMask = DxbcRegMask(srcComponentMask);\n xfbVar.dstMask = DxbcRegMask(dstComponentMask);\n m_xfbVars.push_back(xfbVar);\n\n m_module.setDebugName(xfbVar.varId,\n str::format(\"xfb\", i).c_str());\n \n m_module.decorateXfb(xfbVar.varId,\n xfbEntry->streamId, xfbEntry->bufferId, xfbEntry->offset,\n m_moduleInfo.xfb->strides[xfbEntry->bufferId]);\n }\n\n // TODO Compact location/component assignment\n for (uint32_t i = 0; i < m_xfbVars.size(); i++) {\n m_xfbVars[i].location = i;\n m_xfbVars[i].component = 0;\n }\n\n for (uint32_t i = 0; i < m_xfbVars.size(); i++) {\n const DxbcXfbVar* var = &m_xfbVars[i];\n\n m_module.decorateLocation (var->varId, var->location);\n m_module.decorateComponent(var->varId, var->component);\n }\n }\n void emitXfbOutputSetup(\n uint32_t streamId,\n bool passthrough) {\n for (size_t i = 0; i < m_xfbVars.size(); i++) {\n if (m_xfbVars[i].streamId == streamId) {\n DxbcRegisterPointer srcPtr = passthrough\n ? m_vRegs[m_xfbVars[i].outputId]\n : m_oRegs[m_xfbVars[i].outputId];\n\n if (passthrough) {\n srcPtr = emitArrayAccess(srcPtr,\n spv::StorageClassInput,\n m_module.constu32(0));\n }\n \n DxbcRegisterPointer dstPtr;\n dstPtr.type.ctype = DxbcScalarType::Float32;\n dstPtr.type.ccount = m_xfbVars[i].dstMask.popCount();\n dstPtr.id = m_xfbVars[i].varId;\n\n DxbcRegisterValue value = emitRegisterExtract(\n emitValueLoad(srcPtr), m_xfbVars[i].srcMask);\n emitValueStore(dstPtr, value, m_xfbVars[i].dstMask);\n }\n }\n }\n void emitHsControlPointPhase(\n const DxbcCompilerHsControlPointPhase& phase) {\n m_module.opFunctionCall(\n m_module.defVoidType(),\n phase.functionId, 0, nullptr);\n }\n void emitHsForkJoinPhase(\n const DxbcCompilerHsForkJoinPhase& phase) {\n for (uint32_t i = 0; i < phase.instanceCount; i++) {\n uint32_t invocationId = m_module.constu32(i);\n \n m_module.opFunctionCall(\n m_module.defVoidType(),\n phase.functionId, 1,\n &invocationId);\n }\n }\n void emitHsPhaseBarrier() {\n uint32_t exeScopeId = m_module.constu32(spv::ScopeWorkgroup);\n uint32_t memScopeId = m_module.constu32(spv::ScopeWorkgroup);\n uint32_t semanticId = m_module.constu32(\n spv::MemorySemanticsOutputMemoryMask |\n spv::MemorySemanticsAcquireReleaseMask |\n spv::MemorySemanticsMakeAvailableMask |\n spv::MemorySemanticsMakeVisibleMask);\n \n m_module.opControlBarrier(exeScopeId, memScopeId, semanticId);\n }\n void emitHsInvocationBlockBegin(\n uint32_t count) {\n uint32_t invocationId = m_module.opLoad(\n getScalarTypeId(DxbcScalarType::Uint32),\n m_hs.builtinInvocationId);\n \n uint32_t condition = m_module.opULessThan(\n m_module.defBoolType(), invocationId,\n m_module.constu32(count));\n \n m_hs.invocationBlockBegin = m_module.allocateId();\n m_hs.invocationBlockEnd = m_module.allocateId();\n \n m_module.opSelectionMerge(\n m_hs.invocationBlockEnd,\n spv::SelectionControlMaskNone);\n \n m_module.opBranchConditional(\n condition,\n m_hs.invocationBlockBegin,\n m_hs.invocationBlockEnd);\n \n m_module.opLabel(\n m_hs.invocationBlockBegin);\n }\n void emitHsInvocationBlockEnd() {\n m_module.opBranch (m_hs.invocationBlockEnd);\n m_module.opLabel (m_hs.invocationBlockEnd);\n \n m_hs.invocationBlockBegin = 0;\n m_hs.invocationBlockEnd = 0;\n }\n void emitHsOutputSetup() {\n uint32_t outputPerPatch = emitTessInterfacePerPatch(spv::StorageClassOutput);\n\n if (!outputPerPatch)\n return;\n\n uint32_t vecType = getVectorTypeId({ DxbcScalarType::Float32, 4 });\n\n uint32_t srcPtrType = m_module.defPointerType(vecType, spv::StorageClassPrivate);\n uint32_t dstPtrType = m_module.defPointerType(vecType, spv::StorageClassOutput);\n\n for (uint32_t i = 0; i < 32; i++) {\n if (m_hs.outputPerPatchMask & (1 << i)) {\n uint32_t index = m_module.constu32(i);\n\n uint32_t srcPtr = m_module.opAccessChain(srcPtrType, m_hs.outputPerPatch, 1, &index);\n uint32_t dstPtr = m_module.opAccessChain(dstPtrType, outputPerPatch, 1, &index);\n\n m_module.opStore(dstPtr, m_module.opLoad(vecType, srcPtr));\n }\n }\n }\n uint32_t emitTessInterfacePerPatch(\n spv::StorageClass storageClass) {\n const char* name = \"vPatch\";\n\n if (storageClass == spv::StorageClassPrivate)\n name = \"rPatch\";\n if (storageClass == spv::StorageClassOutput)\n name = \"oPatch\";\n \n uint32_t arrLen = m_psgn != nullptr ? m_psgn->maxRegisterCount() : 0;\n\n if (!arrLen)\n return 0;\n\n uint32_t vecType = m_module.defVectorType (m_module.defFloatType(32), 4);\n uint32_t arrType = m_module.defArrayType (vecType, m_module.constu32(arrLen));\n uint32_t ptrType = m_module.defPointerType(arrType, storageClass);\n uint32_t varId = m_module.newVar (ptrType, storageClass);\n \n m_module.setDebugName (varId, name);\n \n if (storageClass != spv::StorageClassPrivate) {\n m_module.decorate (varId, spv::DecorationPatch);\n m_module.decorateLocation (varId, 0);\n }\n\n return varId;\n }\n uint32_t emitTessInterfacePerVertex(\n spv::StorageClass storageClass,\n uint32_t vertexCount) {\n const bool isInput = storageClass == spv::StorageClassInput;\n \n uint32_t arrLen = isInput\n ? (m_isgn != nullptr ? m_isgn->maxRegisterCount() : 0)\n : (m_osgn != nullptr ? m_osgn->maxRegisterCount() : 0);\n \n if (!arrLen)\n return 0;\n \n uint32_t locIdx = m_psgn != nullptr\n ? m_psgn->maxRegisterCount()\n : 0;\n \n uint32_t vecType = m_module.defVectorType (m_module.defFloatType(32), 4);\n uint32_t arrTypeInner = m_module.defArrayType (vecType, m_module.constu32(arrLen));\n uint32_t arrTypeOuter = m_module.defArrayType (arrTypeInner, m_module.constu32(vertexCount));\n uint32_t ptrType = m_module.defPointerType(arrTypeOuter, storageClass);\n uint32_t varId = m_module.newVar (ptrType, storageClass);\n \n m_module.setDebugName (varId, isInput ? \"vVertex\" : \"oVertex\");\n m_module.decorateLocation (varId, locIdx);\n return varId;\n }\n void emitDclInputArray(\n uint32_t vertexCount) {\n DxbcVectorType info;\n info.ctype = DxbcScalarType::Float32;\n info.ccount = 4;\n\n // Define the array type. This will be two-dimensional\n // in some shaders, with the outer index representing\n // the vertex ID within an invocation.\n m_vArrayLength = m_isgn != nullptr ? std::max(1u, m_isgn->maxRegisterCount()) : 1;\n m_vArrayLengthId = m_module.lateConst32(getScalarTypeId(DxbcScalarType::Uint32));\n\n uint32_t vectorTypeId = getVectorTypeId(info);\n uint32_t arrayTypeId = m_module.defArrayType(vectorTypeId, m_vArrayLengthId);\n \n if (vertexCount != 0) {\n arrayTypeId = m_module.defArrayType(\n arrayTypeId, m_module.constu32(vertexCount));\n }\n \n // Define the actual variable. Note that this is private\n // because we will copy input registers and some system\n // variables to the array during the setup phase.\n const uint32_t ptrTypeId = m_module.defPointerType(\n arrayTypeId, spv::StorageClassPrivate);\n \n const uint32_t varId = m_module.newVar(\n ptrTypeId, spv::StorageClassPrivate);\n \n m_module.setDebugName(varId, \"shader_in\");\n m_vArray = varId;\n }\n uint32_t emitDclClipCullDistanceArray(\n uint32_t length,\n spv::BuiltIn builtIn,\n spv::StorageClass storageClass) {\n if (length == 0)\n return 0;\n \n uint32_t t_f32 = m_module.defFloatType(32);\n uint32_t t_arr = m_module.defArrayType(t_f32, m_module.constu32(length));\n uint32_t t_ptr = m_module.defPointerType(t_arr, storageClass);\n uint32_t varId = m_module.newVar(t_ptr, storageClass);\n \n m_module.decorateBuiltIn(varId, builtIn);\n m_module.setDebugName(varId,\n builtIn == spv::BuiltInClipDistance\n ? \"clip_distances\"\n : \"cull_distances\");\n \n return varId;\n }\n DxbcCompilerHsControlPointPhase emitNewHullShaderControlPointPhase() {\n uint32_t funTypeId = m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr);\n \n uint32_t funId = m_module.allocateId();\n \n this->emitFunctionBegin(funId,\n m_module.defVoidType(),\n funTypeId);\n this->emitFunctionLabel();\n \n DxbcCompilerHsControlPointPhase result;\n result.functionId = funId;\n return result;\n }\n DxbcCompilerHsControlPointPhase emitNewHullShaderPassthroughPhase() {\n uint32_t funTypeId = m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr);\n \n // Begin passthrough function\n uint32_t funId = m_module.allocateId();\n m_module.setDebugName(funId, \"hs_passthrough\");\n \n this->emitFunctionBegin(funId,\n m_module.defVoidType(),\n funTypeId);\n this->emitFunctionLabel();\n \n // We'll basically copy each input variable to the corresponding\n // output, using the shader's invocation ID as the array index.\n const uint32_t invocationId = m_module.opLoad(\n getScalarTypeId(DxbcScalarType::Uint32),\n m_hs.builtinInvocationId);\n \n for (auto i = m_isgn->begin(); i != m_isgn->end(); i++) {\n this->emitDclInput(\n i->registerId, m_hs.vertexCountIn,\n i->componentMask,\n DxbcSystemValue::None,\n DxbcInterpolationMode::Undefined);\n \n // Vector type index\n const std::array dstIndices\n = {{ invocationId, m_module.constu32(i->registerId) }};\n \n DxbcRegisterPointer srcPtr;\n srcPtr.type = m_vRegs.at(i->registerId).type;\n srcPtr.id = m_module.opAccessChain(\n m_module.defPointerType(getVectorTypeId(srcPtr.type), spv::StorageClassInput),\n m_vRegs.at(i->registerId).id, 1, &invocationId);\n \n DxbcRegisterValue srcValue = emitRegisterBitcast(\n emitValueLoad(srcPtr), DxbcScalarType::Float32);\n\n DxbcRegisterPointer dstPtr;\n dstPtr.type = { DxbcScalarType::Float32, 4 };\n dstPtr.id = m_module.opAccessChain(\n m_module.defPointerType(getVectorTypeId(dstPtr.type), spv::StorageClassOutput),\n m_hs.outputPerVertex, dstIndices.size(), dstIndices.data());\n\n emitValueStore(dstPtr, srcValue, DxbcRegMask::firstN(srcValue.type.ccount));\n }\n \n // End function\n this->emitFunctionEnd();\n \n DxbcCompilerHsControlPointPhase result;\n result.functionId = funId;\n return result;\n }\n DxbcCompilerHsForkJoinPhase emitNewHullShaderForkJoinPhase() {\n uint32_t argTypeId = m_module.defIntType(32, 0);\n uint32_t funTypeId = m_module.defFunctionType(\n m_module.defVoidType(), 1, &argTypeId);\n \n uint32_t funId = m_module.allocateId();\n \n this->emitFunctionBegin(funId,\n m_module.defVoidType(),\n funTypeId);\n \n uint32_t argId = m_module.functionParameter(argTypeId);\n this->emitFunctionLabel();\n \n DxbcCompilerHsForkJoinPhase result;\n result.functionId = funId;\n result.instanceId = argId;\n return result;\n }\n uint32_t emitSamplePosArray() {\n const std::array samplePosVectors = {{\n // Invalid sample count / unbound resource\n m_module.constvec2f32( 0.0f, 0.0f),\n // VK_SAMPLE_COUNT_1_BIT\n m_module.constvec2f32( 0.0f, 0.0f),\n // VK_SAMPLE_COUNT_2_BIT\n m_module.constvec2f32( 0.25f, 0.25f),\n m_module.constvec2f32(-0.25f,-0.25f),\n // VK_SAMPLE_COUNT_4_BIT\n m_module.constvec2f32(-0.125f,-0.375f),\n m_module.constvec2f32( 0.375f,-0.125f),\n m_module.constvec2f32(-0.375f, 0.125f),\n m_module.constvec2f32( 0.125f, 0.375f),\n // VK_SAMPLE_COUNT_8_BIT\n m_module.constvec2f32( 0.0625f,-0.1875f),\n m_module.constvec2f32(-0.0625f, 0.1875f),\n m_module.constvec2f32( 0.3125f, 0.0625f),\n m_module.constvec2f32(-0.1875f,-0.3125f),\n m_module.constvec2f32(-0.3125f, 0.3125f),\n m_module.constvec2f32(-0.4375f,-0.0625f),\n m_module.constvec2f32( 0.1875f, 0.4375f),\n m_module.constvec2f32( 0.4375f,-0.4375f),\n // VK_SAMPLE_COUNT_16_BIT\n m_module.constvec2f32( 0.0625f, 0.0625f),\n m_module.constvec2f32(-0.0625f,-0.1875f),\n m_module.constvec2f32(-0.1875f, 0.1250f),\n m_module.constvec2f32( 0.2500f,-0.0625f),\n m_module.constvec2f32(-0.3125f,-0.1250f),\n m_module.constvec2f32( 0.1250f, 0.3125f),\n m_module.constvec2f32( 0.3125f, 0.1875f),\n m_module.constvec2f32( 0.1875f,-0.3125f),\n m_module.constvec2f32(-0.1250f, 0.3750f),\n m_module.constvec2f32( 0.0000f,-0.4375f),\n m_module.constvec2f32(-0.2500f,-0.3750f),\n m_module.constvec2f32(-0.3750f, 0.2500f),\n m_module.constvec2f32(-0.5000f, 0.0000f),\n m_module.constvec2f32( 0.4375f,-0.2500f),\n m_module.constvec2f32( 0.3750f, 0.4375f),\n m_module.constvec2f32(-0.4375f,-0.5000f),\n }};\n \n uint32_t arrayTypeId = getArrayTypeId({\n DxbcScalarType::Float32, 2,\n static_cast(samplePosVectors.size()) });\n \n uint32_t samplePosArray = m_module.constComposite(\n arrayTypeId,\n samplePosVectors.size(),\n samplePosVectors.data());\n \n uint32_t varId = m_module.newVarInit(\n m_module.defPointerType(arrayTypeId, spv::StorageClassPrivate),\n spv::StorageClassPrivate, samplePosArray);\n \n m_module.setDebugName(varId, \"g_sample_pos\");\n m_module.decorate(varId, spv::DecorationNonWritable);\n return varId;\n }\n void emitFloatControl() {\n DxbcFloatControlFlags flags = m_moduleInfo.options.floatControl;\n\n if (flags.isClear())\n return;\n\n const uint32_t width32 = 32;\n const uint32_t width64 = 64;\n\n if (flags.test(DxbcFloatControlFlag::DenormFlushToZero32)) {\n m_module.enableCapability(spv::CapabilityDenormFlushToZero);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDenormFlushToZero, 1, &width32);\n }\n\n if (flags.test(DxbcFloatControlFlag::PreserveNan32)) {\n m_module.enableCapability(spv::CapabilitySignedZeroInfNanPreserve);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeSignedZeroInfNanPreserve, 1, &width32);\n }\n\n if (m_module.hasCapability(spv::CapabilityFloat64)) {\n if (flags.test(DxbcFloatControlFlag::DenormPreserve64)) {\n m_module.enableCapability(spv::CapabilityDenormPreserve);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDenormPreserve, 1, &width64);\n }\n\n if (flags.test(DxbcFloatControlFlag::PreserveNan64)) {\n m_module.enableCapability(spv::CapabilitySignedZeroInfNanPreserve);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeSignedZeroInfNanPreserve, 1, &width64);\n }\n }\n }\n uint32_t emitNewVariable(\n const DxbcRegisterInfo& info) {\n const uint32_t ptrTypeId = this->getPointerTypeId(info);\n return m_module.newVar(ptrTypeId, info.sclass);\n }\n uint32_t emitNewBuiltinVariable(\n const DxbcRegisterInfo& info,\n spv::BuiltIn builtIn,\n const char* name) {\n const uint32_t varId = emitNewVariable(info);\n \n if (name)\n m_module.setDebugName(varId, name);\n\n m_module.decorateBuiltIn(varId, builtIn);\n\n if (m_programInfo.type() == DxbcProgramType::PixelShader\n && info.type.ctype != DxbcScalarType::Float32\n && info.type.ctype != DxbcScalarType::Bool\n && info.sclass == spv::StorageClassInput)\n m_module.decorate(varId, spv::DecorationFlat);\n\n return varId;\n }\n uint32_t emitBuiltinTessLevelOuter(\n spv::StorageClass storageClass) {\n uint32_t id = emitNewBuiltinVariable(\n DxbcRegisterInfo {\n { DxbcScalarType::Float32, 0, 4 },\n storageClass },\n spv::BuiltInTessLevelOuter,\n \"bTessLevelOuter\");\n \n m_module.decorate(id, spv::DecorationPatch);\n return id;\n }\n uint32_t emitBuiltinTessLevelInner(\n spv::StorageClass storageClass) {\n uint32_t id = emitNewBuiltinVariable(\n DxbcRegisterInfo {\n { DxbcScalarType::Float32, 0, 2 },\n storageClass },\n spv::BuiltInTessLevelInner,\n \"bTessLevelInner\");\n \n m_module.decorate(id, spv::DecorationPatch);\n return id;\n }\n uint32_t emitPushConstants() {\n uint32_t uintTypeId = m_module.defIntType(32, 0);\n uint32_t structTypeId = m_module.defStructTypeUnique(1, &uintTypeId);\n\n m_module.setDebugName(structTypeId, \"pc_t\");\n m_module.setDebugMemberName(structTypeId, 0, \"RasterizerSampleCount\");\n m_module.memberDecorateOffset(structTypeId, 0, 0);\n\n uint32_t ptrTypeId = m_module.defPointerType(structTypeId, spv::StorageClassPushConstant);\n uint32_t varId = m_module.newVar(ptrTypeId, spv::StorageClassPushConstant);\n\n m_module.setDebugName(varId, \"pc\");\n return varId;\n }\n DxbcCfgBlock* cfgFindBlock(\n const std::initializer_list& types);\n DxbcBufferInfo getBufferInfo(\n const DxbcRegister& reg) {\n const uint32_t registerId = reg.idx[0].offset;\n \n switch (reg.type) {\n case DxbcOperandType::Resource: {\n const auto& texture = m_textures.at(registerId);\n\n DxbcBufferInfo result;\n result.image = texture.imageInfo;\n result.stype = texture.sampledType;\n result.type = texture.type;\n result.typeId = texture.imageTypeId;\n result.varId = texture.varId;\n result.stride = texture.structStride;\n result.coherence = 0;\n result.isSsbo = texture.isRawSsbo;\n return result;\n } break;\n \n case DxbcOperandType::UnorderedAccessView: {\n const auto& uav = m_uavs.at(registerId);\n\n DxbcBufferInfo result;\n result.image = uav.imageInfo;\n result.stype = uav.sampledType;\n result.type = uav.type;\n result.typeId = uav.imageTypeId;\n result.varId = uav.varId;\n result.stride = uav.structStride;\n result.coherence = uav.coherence;\n result.isSsbo = uav.isRawSsbo;\n return result;\n } break;\n \n case DxbcOperandType::ThreadGroupSharedMemory: {\n DxbcBufferInfo result;\n result.image = { spv::DimBuffer, 0, 0, 0 };\n result.stype = DxbcScalarType::Uint32;\n result.type = m_gRegs.at(registerId).type;\n result.typeId = m_module.defPointerType(\n getScalarTypeId(DxbcScalarType::Uint32),\n spv::StorageClassWorkgroup);\n result.varId = m_gRegs.at(registerId).varId;\n result.stride = m_gRegs.at(registerId).elementStride;\n result.coherence = spv::ScopeInvocation;\n result.isSsbo = false;\n return result;\n } break;\n \n default:\n throw DxvkError(str::format(\"DxbcCompiler: Invalid operand type for buffer: \", reg.type));\n }\n }\n uint32_t getTexSizeDim(\n const DxbcImageInfo& imageType) const {\n switch (imageType.dim) {\n case spv::DimBuffer: return 1 + imageType.array;\n case spv::Dim1D: return 1 + imageType.array;\n case spv::Dim2D: return 2 + imageType.array;\n case spv::Dim3D: return 3 + imageType.array;\n case spv::DimCube: return 2 + imageType.array;\n default: throw DxvkError(\"DxbcCompiler: getTexLayerDim: Unsupported image dimension\");\n }\n }\n uint32_t getTexLayerDim(\n const DxbcImageInfo& imageType) const {\n switch (imageType.dim) {\n case spv::DimBuffer: return 1;\n case spv::Dim1D: return 1;\n case spv::Dim2D: return 2;\n case spv::Dim3D: return 3;\n case spv::DimCube: return 3;\n default: throw DxvkError(\"DxbcCompiler: getTexLayerDim: Unsupported image dimension\");\n }\n }\n uint32_t getTexCoordDim(\n const DxbcImageInfo& imageType) const {\n return getTexLayerDim(imageType) + imageType.array;\n }\n DxbcRegMask getTexCoordMask(\n const DxbcImageInfo& imageType) const {\n return DxbcRegMask::firstN(getTexCoordDim(imageType));\n }\n DxbcVectorType getInputRegType(\n uint32_t regIdx) const {\n switch (m_programInfo.type()) {\n case DxbcProgramType::VertexShader: {\n const DxbcSgnEntry* entry = m_isgn->findByRegister(regIdx);\n \n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n \n if (entry != nullptr) {\n result.ctype = entry->componentType;\n result.ccount = entry->componentMask.popCount();\n }\n \n return result;\n }\n\n case DxbcProgramType::DomainShader: {\n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n return result;\n }\n\n default: {\n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n\n if (m_isgn == nullptr || !m_isgn->findByRegister(regIdx))\n return result;\n\n DxbcRegMask mask(0u);\n DxbcRegMask used(0u);\n\n for (const auto& e : *m_isgn) {\n if (e.registerId == regIdx && !ignoreInputSystemValue(e.systemValue)) {\n mask |= e.componentMask;\n used |= e.componentUsed;\n }\n }\n\n if (m_programInfo.type() == DxbcProgramType::PixelShader) {\n if ((used.raw() & mask.raw()) == used.raw())\n mask = used;\n }\n\n result.ccount = mask.minComponents();\n return result;\n }\n }\n }\n DxbcVectorType getOutputRegType(\n uint32_t regIdx) const {\n switch (m_programInfo.type()) {\n case DxbcProgramType::PixelShader: {\n const DxbcSgnEntry* entry = m_osgn->findByRegister(regIdx);\n\n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n \n if (entry != nullptr) {\n result.ctype = entry->componentType;\n result.ccount = entry->componentMask.popCount();\n }\n\n return result;\n }\n\n case DxbcProgramType::HullShader: {\n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n return result;\n }\n\n default: {\n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n\n if (m_osgn->findByRegister(regIdx))\n result.ccount = m_osgn->regMask(regIdx).minComponents();\n return result;\n }\n }\n }\n DxbcImageInfo getResourceType(\n DxbcResourceDim resourceType,\n bool isUav) const {\n uint32_t ms = m_moduleInfo.options.disableMsaa ? 0 : 1;\n\n switch (resourceType) {\n case DxbcResourceDim::Buffer: return { spv::DimBuffer, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_MAX_ENUM };\n case DxbcResourceDim::Texture1D: return { spv::Dim1D, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_1D };\n case DxbcResourceDim::Texture1DArr: return { spv::Dim1D, 1, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_1D_ARRAY };\n case DxbcResourceDim::Texture2D: return { spv::Dim2D, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_2D };\n case DxbcResourceDim::Texture2DArr: return { spv::Dim2D, 1, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_2D_ARRAY };\n case DxbcResourceDim::Texture2DMs: return { spv::Dim2D, 0, ms,isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_2D };\n case DxbcResourceDim::Texture2DMsArr: return { spv::Dim2D, 1, ms,isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_2D_ARRAY };\n case DxbcResourceDim::Texture3D: return { spv::Dim3D, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_3D };\n case DxbcResourceDim::TextureCube: return { spv::DimCube, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_CUBE };\n case DxbcResourceDim::TextureCubeArr: return { spv::DimCube, 1, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_CUBE_ARRAY };\n default: throw DxvkError(str::format(\"DxbcCompiler: Unsupported resource type: \", resourceType));\n }\n }\n spv::ImageFormat getScalarImageFormat(\n DxbcScalarType type) const {\n switch (type) {\n case DxbcScalarType::Float32: return spv::ImageFormatR32f;\n case DxbcScalarType::Sint32: return spv::ImageFormatR32i;\n case DxbcScalarType::Uint32: return spv::ImageFormatR32ui;\n default: throw DxvkError(\"DxbcCompiler: Unhandled scalar resource type\");\n }\n }\n bool isDoubleType(\n DxbcScalarType type) const {\n return type == DxbcScalarType::Sint64\n || type == DxbcScalarType::Uint64\n || type == DxbcScalarType::Float64;\n }\n DxbcRegisterPointer getIndexableTempPtr(\n const DxbcRegister& operand,\n DxbcRegisterValue vectorId) {\n // x# regs are indexed as follows:\n // (0) register index (immediate)\n // (1) element index (relative)\n const uint32_t regId = operand.idx[0].offset;\n \n DxbcRegisterInfo info;\n info.type.ctype = DxbcScalarType::Float32;\n info.type.ccount = m_xRegs[regId].ccount;\n info.type.alength = 0;\n info.sclass = spv::StorageClassPrivate;\n \n DxbcRegisterPointer result;\n result.type.ctype = info.type.ctype;\n result.type.ccount = info.type.ccount;\n result.id = m_module.opAccessChain(\n getPointerTypeId(info),\n m_xRegs.at(regId).varId,\n 1, &vectorId.id);\n\n return result;\n }\n bool caseBlockIsFallthrough() const {\n return m_lastOp != DxbcOpcode::Case\n && m_lastOp != DxbcOpcode::Default\n && m_lastOp != DxbcOpcode::Break\n && m_lastOp != DxbcOpcode::Ret;\n }\n uint32_t getUavCoherence(\n uint32_t registerId,\n DxbcUavFlags flags) {\n // For any ROV with write access, we must ensure that\n // availability operations happen within the locked scope.\n if (flags.test(DxbcUavFlag::RasterizerOrdered)\n && (m_analysis->uavInfos[registerId].accessFlags & VK_ACCESS_SHADER_WRITE_BIT)) {\n m_hasGloballyCoherentUav = true;\n m_hasRasterizerOrderedUav = true;\n return spv::ScopeQueueFamily;\n }\n\n // Ignore any resources that can't both be read and written in\n // the current shader, explicit availability/visibility operands\n // are not useful in that case.\n if (m_analysis->uavInfos[registerId].accessFlags != (VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT))\n return 0;\n\n // If the globally coherent flag is set, the resource must be\n // coherent across multiple workgroups of the same dispatch\n if (flags.test(DxbcUavFlag::GloballyCoherent)) {\n m_hasGloballyCoherentUav = true;\n return spv::ScopeQueueFamily;\n }\n\n // In compute shaders, UAVs are implicitly workgroup coherent,\n // but we can rely on memory barrier instructions to make any\n // access available and visible to the entire workgroup.\n if (m_programInfo.type() == DxbcProgramType::ComputeShader)\n return spv::ScopeInvocation;\n\n return 0;\n }\n bool ignoreInputSystemValue(\n DxbcSystemValue sv) const {\n switch (sv) {\n case DxbcSystemValue::Position:\n case DxbcSystemValue::IsFrontFace:\n case DxbcSystemValue::SampleIndex:\n case DxbcSystemValue::PrimitiveId:\n case DxbcSystemValue::Coverage:\n return m_programInfo.type() == DxbcProgramType::PixelShader;\n\n default:\n return false;\n }\n }\n void emitUavBarrier(\n uint64_t readMask,\n uint64_t writeMask) {\n if (!m_moduleInfo.options.forceComputeUavBarriers\n || m_programInfo.type() != DxbcProgramType::ComputeShader)\n return;\n\n // If both masks are 0, emit a barrier in case at least one read-write UAV\n // has a pending unsynchronized access. Only consider read-after-write and\n // write-after-read hazards, assume that back-to-back stores are safe and\n // do not overlap in memory. Atomics are also completely ignored here.\n uint64_t rdMask = m_uavRdMask;\n uint64_t wrMask = m_uavWrMask;\n\n bool insertBarrier = bool(rdMask & wrMask);\n\n if (readMask || writeMask) {\n rdMask &= m_uavWrMask;\n wrMask &= m_uavRdMask;\n }\n\n for (auto uav : bit::BitMask(rdMask | wrMask)) {\n constexpr VkAccessFlags rwAccess = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;\n insertBarrier |= (m_analysis->uavInfos[uav].accessFlags & rwAccess) == rwAccess;\n }\n\n // Need to be in uniform top-level control flow, or otherwise\n // it is not safe to insert control barriers.\n if (insertBarrier && m_controlFlowBlocks.empty() && m_topLevelIsUniform) {\n m_module.opControlBarrier(\n m_module.constu32(spv::ScopeWorkgroup),\n m_module.constu32(m_hasGloballyCoherentUav ? spv::ScopeQueueFamily : spv::ScopeWorkgroup),\n m_module.constu32(spv::MemorySemanticsWorkgroupMemoryMask\n | spv::MemorySemanticsImageMemoryMask\n | spv::MemorySemanticsUniformMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask\n | spv::MemorySemanticsMakeAvailableMask\n | spv::MemorySemanticsMakeVisibleMask));\n\n m_uavWrMask = 0u;\n m_uavRdMask = 0u;\n }\n\n // Mark pending accesses\n m_uavWrMask |= writeMask;\n m_uavRdMask |= readMask;\n }\n uint32_t getScalarTypeId(\n DxbcScalarType type) {\n if (type == DxbcScalarType::Float64)\n m_module.enableCapability(spv::CapabilityFloat64);\n \n if (type == DxbcScalarType::Sint64 || type == DxbcScalarType::Uint64)\n m_module.enableCapability(spv::CapabilityInt64);\n \n switch (type) {\n case DxbcScalarType::Uint32: return m_module.defIntType(32, 0);\n case DxbcScalarType::Uint64: return m_module.defIntType(64, 0);\n case DxbcScalarType::Sint32: return m_module.defIntType(32, 1);\n case DxbcScalarType::Sint64: return m_module.defIntType(64, 1);\n case DxbcScalarType::Float32: return m_module.defFloatType(32);\n case DxbcScalarType::Float64: return m_module.defFloatType(64);\n case DxbcScalarType::Bool: return m_module.defBoolType();\n }\n\n throw DxvkError(\"DxbcCompiler: Invalid scalar type\");\n }\n uint32_t getVectorTypeId(\n const DxbcVectorType& type) {\n uint32_t typeId = this->getScalarTypeId(type.ctype);\n \n if (type.ccount > 1)\n typeId = m_module.defVectorType(typeId, type.ccount);\n \n return typeId;\n }\n uint32_t getArrayTypeId(\n const DxbcArrayType& type) {\n DxbcVectorType vtype;\n vtype.ctype = type.ctype;\n vtype.ccount = type.ccount;\n \n uint32_t typeId = this->getVectorTypeId(vtype);\n \n if (type.alength != 0) {\n typeId = m_module.defArrayType(typeId,\n m_module.constu32(type.alength));\n }\n \n return typeId;\n }\n uint32_t getPointerTypeId(\n const DxbcRegisterInfo& type) {\n return m_module.defPointerType(\n this->getArrayTypeId(type.type),\n type.sclass);\n }\n uint32_t getSparseResultTypeId(\n uint32_t baseType) {\n m_module.enableCapability(spv::CapabilitySparseResidency);\n\n uint32_t uintType = getScalarTypeId(DxbcScalarType::Uint32);\n std::array typeIds = { uintType, baseType };\n return m_module.defStructType(typeIds.size(), typeIds.data());\n }\n uint32_t getFunctionId(\n uint32_t functionNr) {\n auto entry = m_subroutines.find(functionNr);\n if (entry != m_subroutines.end())\n return entry->second;\n \n uint32_t functionId = m_module.allocateId();\n m_subroutines.insert({ functionNr, functionId });\n return functionId;\n }\n DxbcCompilerHsForkJoinPhase* getCurrentHsForkJoinPhase();\n};"], ["/lsfg-vk/framegen/src/core/buffer.cpp", "#include \n#include \n\n#include \"core/buffer.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nvoid Buffer::construct(const Core::Device& device, const void* data, VkBufferUsageFlags usage) {\n // create buffer\n const VkBufferCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,\n .size = this->size,\n .usage = usage,\n .sharingMode = VK_SHARING_MODE_EXCLUSIVE\n };\n VkBuffer bufferHandle{};\n auto res = vkCreateBuffer(device.handle(), &desc, nullptr, &bufferHandle);\n if (res != VK_SUCCESS || bufferHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create Vulkan buffer\");\n\n // find memory type\n VkPhysicalDeviceMemoryProperties memProps;\n vkGetPhysicalDeviceMemoryProperties(device.getPhysicalDevice(), &memProps);\n\n VkMemoryRequirements memReqs;\n vkGetBufferMemoryRequirements(device.handle(), bufferHandle, &memReqs);\n\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n std::optional memType{};\n for (uint32_t i = 0; i < memProps.memoryTypeCount; ++i) {\n if ((memReqs.memoryTypeBits & (1 << i)) && // NOLINTBEGIN\n (memProps.memoryTypes[i].propertyFlags &\n (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT))) {\n memType.emplace(i);\n break;\n } // NOLINTEND\n }\n if (!memType.has_value())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Unable to find memory type for buffer\");\n#pragma clang diagnostic pop\n\n // allocate and bind memory\n const VkMemoryAllocateInfo allocInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,\n .allocationSize = memReqs.size,\n .memoryTypeIndex = memType.value()\n };\n VkDeviceMemory memoryHandle{};\n res = vkAllocateMemory(device.handle(), &allocInfo, nullptr, &memoryHandle);\n if (res != VK_SUCCESS || memoryHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to allocate memory for Vulkan buffer\");\n\n res = vkBindBufferMemory(device.handle(), bufferHandle, memoryHandle, 0);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to bind memory to Vulkan buffer\");\n\n // upload data to buffer\n uint8_t* buf{};\n res = vkMapMemory(device.handle(), memoryHandle, 0, this->size, 0, reinterpret_cast(&buf));\n if (res != VK_SUCCESS || buf == nullptr)\n throw LSFG::vulkan_error(res, \"Failed to map memory for Vulkan buffer\");\n std::copy_n(reinterpret_cast(data), this->size, buf);\n vkUnmapMemory(device.handle(), memoryHandle);\n\n // store buffer and memory in shared ptr\n this->buffer = std::shared_ptr(\n new VkBuffer(bufferHandle),\n [dev = device.handle()](VkBuffer* img) {\n vkDestroyBuffer(dev, *img, nullptr);\n }\n );\n this->memory = std::shared_ptr(\n new VkDeviceMemory(memoryHandle),\n [dev = device.handle()](VkDeviceMemory* mem) {\n vkFreeMemory(dev, *mem, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/src/core/commandbuffer.cpp", "#include \n#include \n\n#include \"core/commandbuffer.hpp\"\n#include \"core/device.hpp\"\n#include \"core/commandpool.hpp\"\n#include \"core/fence.hpp\"\n#include \"core/semaphore.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nCommandBuffer::CommandBuffer(const Core::Device& device, const CommandPool& pool) {\n // create command buffer\n const VkCommandBufferAllocateInfo desc{\n .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,\n .commandPool = pool.handle(),\n .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,\n .commandBufferCount = 1\n };\n VkCommandBuffer commandBufferHandle{};\n auto res = vkAllocateCommandBuffers(device.handle(), &desc, &commandBufferHandle);\n if (res != VK_SUCCESS || commandBufferHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to allocate command buffer\");\n\n // store command buffer in shared ptr\n this->state = std::make_shared(CommandBufferState::Empty);\n this->commandBuffer = std::shared_ptr(\n new VkCommandBuffer(commandBufferHandle),\n [dev = device.handle(), pool = pool.handle()](VkCommandBuffer* cmdBuffer) {\n vkFreeCommandBuffers(dev, pool, 1, cmdBuffer);\n }\n );\n}\n\nvoid CommandBuffer::begin() {\n if (*this->state != CommandBufferState::Empty)\n throw std::logic_error(\"Command buffer is not in Empty state\");\n\n const VkCommandBufferBeginInfo beginInfo = {\n .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,\n .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT\n };\n auto res = vkBeginCommandBuffer(*this->commandBuffer, &beginInfo);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to begin command buffer\");\n\n *this->state = CommandBufferState::Recording;\n}\n\nvoid CommandBuffer::dispatch(uint32_t x, uint32_t y, uint32_t z) const {\n if (*this->state != CommandBufferState::Recording)\n throw std::logic_error(\"Command buffer is not in Recording state\");\n\n vkCmdDispatch(*this->commandBuffer, x, y, z);\n}\n\nvoid CommandBuffer::end() {\n if (*this->state != CommandBufferState::Recording)\n throw std::logic_error(\"Command buffer is not in Recording state\");\n\n auto res = vkEndCommandBuffer(*this->commandBuffer);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to end command buffer\");\n\n *this->state = CommandBufferState::Full;\n}\n\nvoid CommandBuffer::submit(VkQueue queue, std::optional fence,\n const std::vector& waitSemaphores,\n std::optional> waitSemaphoreValues,\n const std::vector& signalSemaphores,\n std::optional> signalSemaphoreValues) {\n if (*this->state != CommandBufferState::Full)\n throw std::logic_error(\"Command buffer is not in Full state\");\n\n const std::vector waitStages(waitSemaphores.size(),\n VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);\n VkTimelineSemaphoreSubmitInfo timelineInfo{\n .sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO,\n };\n if (waitSemaphoreValues.has_value()) {\n timelineInfo.waitSemaphoreValueCount =\n static_cast(waitSemaphoreValues->size());\n timelineInfo.pWaitSemaphoreValues = waitSemaphoreValues->data();\n }\n if (signalSemaphoreValues.has_value()) {\n timelineInfo.signalSemaphoreValueCount =\n static_cast(signalSemaphoreValues->size());\n timelineInfo.pSignalSemaphoreValues = signalSemaphoreValues->data();\n }\n\n std::vector waitSemaphoresHandles;\n waitSemaphoresHandles.reserve(waitSemaphores.size());\n for (const auto& semaphore : waitSemaphores)\n waitSemaphoresHandles.push_back(semaphore.handle());\n std::vector signalSemaphoresHandles;\n signalSemaphoresHandles.reserve(signalSemaphores.size());\n for (const auto& semaphore : signalSemaphores)\n signalSemaphoresHandles.push_back(semaphore.handle());\n\n const VkSubmitInfo submitInfo{\n .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,\n .pNext = (waitSemaphoreValues.has_value() || signalSemaphoreValues.has_value())\n ? &timelineInfo : nullptr,\n .waitSemaphoreCount = static_cast(waitSemaphores.size()),\n .pWaitSemaphores = waitSemaphoresHandles.data(),\n .pWaitDstStageMask = waitStages.data(),\n .commandBufferCount = 1,\n .pCommandBuffers = &(*this->commandBuffer),\n .signalSemaphoreCount = static_cast(signalSemaphores.size()),\n .pSignalSemaphores = signalSemaphoresHandles.data()\n };\n auto res = vkQueueSubmit(queue, 1, &submitInfo, fence ? fence->handle() : VK_NULL_HANDLE);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to submit command buffer\");\n\n *this->state = CommandBufferState::Submitted;\n}\n"], ["/lsfg-vk/framegen/src/core/descriptorset.cpp", "#include \n#include \n\n#include \"core/descriptorset.hpp\"\n#include \"core/device.hpp\"\n#include \"core/descriptorpool.hpp\"\n#include \"core/shadermodule.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/pipeline.hpp\"\n#include \"core/image.hpp\"\n#include \"core/sampler.hpp\"\n#include \"core/buffer.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n\nusing namespace LSFG::Core;\n\nDescriptorSet::DescriptorSet(const Core::Device& device,\n const DescriptorPool& pool, const ShaderModule& shaderModule) {\n // create descriptor set\n VkDescriptorSetLayout layout = shaderModule.getLayout();\n const VkDescriptorSetAllocateInfo desc{\n .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,\n .descriptorPool = pool.handle(),\n .descriptorSetCount = 1,\n .pSetLayouts = &layout\n };\n VkDescriptorSet descriptorSetHandle{};\n auto res = vkAllocateDescriptorSets(device.handle(), &desc, &descriptorSetHandle);\n if (res != VK_SUCCESS || descriptorSetHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to allocate descriptor set\");\n\n /// store set in shared ptr\n this->descriptorSet = std::shared_ptr(\n new VkDescriptorSet(descriptorSetHandle),\n [dev = device.handle(), pool = pool](VkDescriptorSet* setHandle) {\n vkFreeDescriptorSets(dev, pool.handle(), 1, setHandle);\n }\n );\n}\n\nDescriptorSetUpdateBuilder DescriptorSet::update(const Core::Device& device) const {\n return { *this, device };\n}\n\nvoid DescriptorSet::bind(const CommandBuffer& commandBuffer, const Pipeline& pipeline) const {\n VkDescriptorSet descriptorSetHandle = this->handle();\n vkCmdBindDescriptorSets(commandBuffer.handle(),\n VK_PIPELINE_BIND_POINT_COMPUTE, pipeline.getLayout(),\n 0, 1, &descriptorSetHandle, 0, nullptr);\n}\n\n// updater class\n\nDescriptorSetUpdateBuilder& DescriptorSetUpdateBuilder::add(VkDescriptorType type, const Image& image) {\n this->entries.push_back({\n .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,\n .dstSet = this->descriptorSet->handle(),\n .dstBinding = static_cast(this->entries.size()),\n .descriptorCount = 1,\n .descriptorType = type,\n .pImageInfo = new VkDescriptorImageInfo {\n .imageView = image.getView(),\n .imageLayout = VK_IMAGE_LAYOUT_GENERAL\n },\n .pBufferInfo = nullptr\n });\n return *this;\n}\n\nDescriptorSetUpdateBuilder& DescriptorSetUpdateBuilder::add(VkDescriptorType type, const Sampler& sampler) {\n this->entries.push_back({\n .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,\n .dstSet = this->descriptorSet->handle(),\n .dstBinding = static_cast(this->entries.size()),\n .descriptorCount = 1,\n .descriptorType = type,\n .pImageInfo = new VkDescriptorImageInfo {\n .sampler = sampler.handle(),\n },\n .pBufferInfo = nullptr\n });\n return *this;\n}\n\nDescriptorSetUpdateBuilder& DescriptorSetUpdateBuilder::add(VkDescriptorType type, const Buffer& buffer) {\n this->entries.push_back({\n .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,\n .dstSet = this->descriptorSet->handle(),\n .dstBinding = static_cast(this->entries.size()),\n .descriptorCount = 1,\n .descriptorType = type,\n .pImageInfo = nullptr,\n .pBufferInfo = new VkDescriptorBufferInfo {\n .buffer = buffer.handle(),\n .range = buffer.getSize()\n }\n });\n return *this;\n}\n\nDescriptorSetUpdateBuilder& DescriptorSetUpdateBuilder::add(VkDescriptorType type) {\n this->entries.push_back({\n .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,\n .dstSet = this->descriptorSet->handle(),\n .dstBinding = static_cast(this->entries.size()),\n .descriptorCount = 1,\n .descriptorType = type,\n .pImageInfo = new VkDescriptorImageInfo {\n },\n .pBufferInfo = nullptr\n });\n return *this;\n}\n\nvoid DescriptorSetUpdateBuilder::build() {\n vkUpdateDescriptorSets(this->device->handle(),\n static_cast(this->entries.size()),\n this->entries.data(), 0, nullptr);\n\n // NOLINTBEGIN\n for (const auto& entry : this->entries) {\n delete entry.pImageInfo;\n delete entry.pBufferInfo;\n }\n // NOLINTEND\n}\n"], ["/lsfg-vk/framegen/src/core/shadermodule.cpp", "#include \n#include \n\n#include \"core/shadermodule.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nShaderModule::ShaderModule(const Core::Device& device, const std::vector& code,\n const std::vector>& descriptorTypes) {\n // create shader module\n const uint8_t* data_ptr = code.data();\n const VkShaderModuleCreateInfo createInfo{\n .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,\n .codeSize = code.size(),\n .pCode = reinterpret_cast(data_ptr)\n };\n VkShaderModule shaderModuleHandle{};\n auto res = vkCreateShaderModule(device.handle(), &createInfo, nullptr, &shaderModuleHandle);\n if (res != VK_SUCCESS || !shaderModuleHandle)\n throw LSFG::vulkan_error(res, \"Failed to create shader module\");\n\n // create descriptor set layout\n std::vector layoutBindings;\n size_t bindIdx = 0;\n for (const auto &[count, type] : descriptorTypes)\n for (size_t i = 0; i < count; i++, bindIdx++)\n layoutBindings.emplace_back(VkDescriptorSetLayoutBinding {\n .binding = static_cast(bindIdx),\n .descriptorType = type,\n .descriptorCount = 1,\n .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT\n });\n\n const VkDescriptorSetLayoutCreateInfo layoutDesc{\n .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,\n .bindingCount = static_cast(layoutBindings.size()),\n .pBindings = layoutBindings.data()\n };\n VkDescriptorSetLayout descriptorSetLayout{};\n res = vkCreateDescriptorSetLayout(device.handle(), &layoutDesc, nullptr, &descriptorSetLayout);\n if (res != VK_SUCCESS || !descriptorSetLayout)\n throw LSFG::vulkan_error(res, \"Failed to create descriptor set layout\");\n\n // store module and layout in shared ptr\n this->shaderModule = std::shared_ptr(\n new VkShaderModule(shaderModuleHandle),\n [dev = device.handle()](VkShaderModule* shaderModuleHandle) {\n vkDestroyShaderModule(dev, *shaderModuleHandle, nullptr);\n }\n );\n this->descriptorSetLayout = std::shared_ptr(\n new VkDescriptorSetLayout(descriptorSetLayout),\n [dev = device.handle()](VkDescriptorSetLayout* layout) {\n vkDestroyDescriptorSetLayout(dev, *layout, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/src/utils/benchmark.cpp", "#include \"utils/benchmark.hpp\"\n#include \"config/config.hpp\"\n#include \"extract/extract.hpp\"\n#include \"extract/trans.hpp\"\n\n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace Benchmark;\n\nvoid Benchmark::run(uint32_t width, uint32_t height) {\n const auto& conf = Config::activeConf;\n\n auto* lsfgInitialize = LSFG_3_1::initialize;\n auto* lsfgCreateContext = LSFG_3_1::createContext;\n auto* lsfgPresentContext = LSFG_3_1::presentContext;\n if (conf.performance) {\n lsfgInitialize = LSFG_3_1P::initialize;\n lsfgCreateContext = LSFG_3_1P::createContext;\n lsfgPresentContext = LSFG_3_1P::presentContext;\n }\n\n // create the benchmark context\n const char* lsfgDeviceUUID = std::getenv(\"LSFG_DEVICE_UUID\");\n const uint64_t deviceUUID = lsfgDeviceUUID\n ? std::stoull(std::string(lsfgDeviceUUID), nullptr, 16) : 0x1463ABAC;\n\n setenv(\"DISABLE_LSFG\", \"1\", 1); // NOLINT\n\n Extract::extractShaders();\n lsfgInitialize(\n deviceUUID, // some magic number if not given\n conf.hdr, 1.0F / conf.flowScale, conf.multiplier - 1,\n [](const std::string& name) -> std::vector {\n auto dxbc = Extract::getShader(name);\n auto spirv = Extract::translateShader(dxbc);\n return spirv;\n }\n );\n const int32_t ctx = lsfgCreateContext(-1, -1, {},\n { .width = width, .height = height },\n conf.hdr ? VK_FORMAT_R16G16B16A16_SFLOAT : VK_FORMAT_R8G8B8A8_UNORM\n );\n\n unsetenv(\"DISABLE_LSFG\"); // NOLINT\n\n // run the benchmark (run 8*n + 1 so the fences are waited on)\n const auto now = std::chrono::high_resolution_clock::now();\n const uint64_t iterations = 8 * 500UL;\n\n std::cerr << \"lsfg-vk: Benchmark started, running \" << iterations << \" iterations...\\n\";\n for (uint64_t count = 0; count < iterations + 1; count++) {\n lsfgPresentContext(ctx, -1, {});\n\n if (count % 50 == 0 && count > 0)\n std::cerr << \"lsfg-vk: \"\n << std::setprecision(2) << std::fixed\n << static_cast(count) / static_cast(iterations) * 100.0F\n << \"% done (\" << count + 1 << \"/\" << iterations << \")\\r\";\n }\n const auto then = std::chrono::high_resolution_clock::now();\n\n // print results\n const auto ms = std::chrono::duration_cast(then - now).count();\n\n const auto perIteration = static_cast(ms) / static_cast(iterations);\n\n const uint64_t totalGen = (conf.multiplier - 1) * iterations;\n const auto genFps = static_cast(totalGen) / (static_cast(ms) / 1000.0F);\n\n const uint64_t totalFrames = iterations * conf.multiplier;\n const auto totalFps = static_cast(totalFrames) / (static_cast(ms) / 1000.0F);\n\n std::cerr << \"lsfg-vk: Benchmark completed in \" << ms << \" ms\\n\";\n std::cerr << \" Time taken per real frame: \"\n << std::setprecision(2) << std::fixed << perIteration << \" ms\\n\";\n std::cerr << \" Generated \" << totalGen << \" frames in total at \"\n << std::setprecision(2) << std::fixed << genFps << \" FPS\\n\";\n std::cerr << \" Total of \" << totalFrames << \" frames presented at \"\n << std::setprecision(2) << std::fixed << totalFps << \" FPS\\n\";\n\n // sleep for a second, then exit\n std::this_thread::sleep_for(std::chrono::seconds(1));\n _exit(0);\n}\n"], ["/lsfg-vk/src/main.cpp", "#include \"config/config.hpp\"\n#include \"extract/extract.hpp\"\n#include \"utils/benchmark.hpp\"\n#include \"utils/utils.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace {\n __attribute__((constructor)) void lsfgvk_init() {\n std::cerr << std::unitbuf;\n\n // read configuration\n const std::string file = Utils::getConfigFile();\n try {\n Config::updateConfig(file);\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occured while trying to parse the configuration, IGNORING:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n return; // default configuration will unload\n }\n\n const auto name = Utils::getProcessName();\n try {\n Config::activeConf = Config::getConfig(name);\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: The configuration for \" << name.second << \" is invalid, IGNORING:\\n\";\n std::cerr << e.what() << '\\n';\n return; // default configuration will unload\n }\n\n // exit silently if not enabled\n auto& conf = Config::activeConf;\n if (!conf.enable && name.second != \"benchmark\")\n return; // default configuration will unload\n\n // print config\n std::cerr << \"lsfg-vk: Loaded configuration for \" << name.second << \":\\n\";\n if (!conf.dll.empty()) std::cerr << \" Using DLL from: \" << conf.dll << '\\n';\n std::cerr << \" Multiplier: \" << conf.multiplier << '\\n';\n std::cerr << \" Flow Scale: \" << conf.flowScale << '\\n';\n std::cerr << \" Performance Mode: \" << (conf.performance ? \"Enabled\" : \"Disabled\") << '\\n';\n std::cerr << \" HDR Mode: \" << (conf.hdr ? \"Enabled\" : \"Disabled\") << '\\n';\n if (conf.e_present != 2) std::cerr << \" ! Present Mode: \" << conf.e_present << '\\n';\n\n // remove mesa var in favor of config\n unsetenv(\"MESA_VK_WSI_PRESENT_MODE\"); // NOLINT\n\n // write latest file\n try {\n std::ofstream latest(\"/tmp/lsfg-vk_last\", std::ios::trunc);\n if (!latest.is_open())\n throw std::runtime_error(\"Failed to open /tmp/lsfg-vk_last for writing\");\n latest << \"exe: \" << name.first << '\\n';\n latest << \"comm: \" << name.second << '\\n';\n latest << \"pid: \" << getpid() << '\\n';\n if (!latest.good())\n throw std::runtime_error(\"Failed to write to /tmp/lsfg-vk_last\");\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred while trying to write the latest file, exiting:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n exit(EXIT_FAILURE);\n }\n\n // load shaders\n try {\n Extract::extractShaders();\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred while trying to extract the shaders, exiting:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n exit(EXIT_FAILURE);\n }\n std::cerr << \"lsfg-vk: Shaders extracted successfully.\\n\";\n\n // run benchmark if requested\n const char* benchmark_flag = std::getenv(\"LSFG_BENCHMARK\");\n if (!benchmark_flag)\n return;\n\n const std::string resolution(benchmark_flag);\n uint32_t width{};\n uint32_t height{};\n try {\n const size_t x = resolution.find('x');\n if (x == std::string::npos)\n throw std::runtime_error(\"Unable to find 'x' in benchmark string\");\n\n const std::string width_str = resolution.substr(0, x);\n const std::string height_str = resolution.substr(x + 1);\n if (width_str.empty() || height_str.empty())\n throw std::runtime_error(\"Invalid resolution\");\n\n const int32_t w = std::stoi(width_str);\n const int32_t h = std::stoi(height_str);\n if (w < 0 || h < 0)\n throw std::runtime_error(\"Resolution cannot be negative\");\n\n width = static_cast(w);\n height = static_cast(h);\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred while trying to parse the resolution, exiting:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n exit(EXIT_FAILURE);\n }\n\n std::thread benchmark([width, height]() {\n try {\n Benchmark::run(width, height);\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred during the benchmark:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n exit(EXIT_FAILURE);\n }\n });\n benchmark.detach();\n conf.enable = false;\n }\n}\n"], ["/lsfg-vk/framegen/src/core/semaphore.cpp", "#include \n#include \n\n#include \"core/semaphore.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nSemaphore::Semaphore(const Core::Device& device, std::optional initial) {\n // create semaphore\n const VkSemaphoreTypeCreateInfo typeInfo{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO,\n .semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE,\n .initialValue = initial.value_or(0)\n };\n const VkSemaphoreCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,\n .pNext = initial.has_value() ? &typeInfo : nullptr,\n };\n VkSemaphore semaphoreHandle{};\n auto res = vkCreateSemaphore(device.handle(), &desc, nullptr, &semaphoreHandle);\n if (res != VK_SUCCESS || semaphoreHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create semaphore\");\n\n // store semaphore in shared ptr\n this->isTimeline = initial.has_value();\n this->semaphore = std::shared_ptr(\n new VkSemaphore(semaphoreHandle),\n [dev = device.handle()](VkSemaphore* semaphoreHandle) {\n vkDestroySemaphore(dev, *semaphoreHandle, nullptr);\n }\n );\n}\n\nSemaphore::Semaphore(const Core::Device& device, int fd) {\n // create semaphore\n const VkExportSemaphoreCreateInfo exportInfo{\n .sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO,\n .handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT\n };\n const VkSemaphoreCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,\n .pNext = &exportInfo\n };\n VkSemaphore semaphoreHandle{};\n auto res = vkCreateSemaphore(device.handle(), &desc, nullptr, &semaphoreHandle);\n if (res != VK_SUCCESS || semaphoreHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create semaphore\");\n\n // import semaphore from fd\n auto vkImportSemaphoreFdKHR = reinterpret_cast(\n vkGetDeviceProcAddr(device.handle(), \"vkImportSemaphoreFdKHR\"));\n\n const VkImportSemaphoreFdInfoKHR importInfo{\n .sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR,\n .semaphore = semaphoreHandle,\n .handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT,\n .fd = fd // closes the fd\n };\n res = vkImportSemaphoreFdKHR(device.handle(), &importInfo);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to import semaphore from fd\");\n\n // store semaphore in shared ptr\n this->isTimeline = false;\n this->semaphore = std::shared_ptr(\n new VkSemaphore(semaphoreHandle),\n [dev = device.handle()](VkSemaphore* semaphoreHandle) {\n vkDestroySemaphore(dev, *semaphoreHandle, nullptr);\n }\n );\n}\n\nvoid Semaphore::signal(const Core::Device& device, uint64_t value) const {\n if (!this->isTimeline)\n throw std::logic_error(\"Invalid timeline semaphore\");\n\n const VkSemaphoreSignalInfo signalInfo{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO,\n .semaphore = this->handle(),\n .value = value\n };\n auto res = vkSignalSemaphore(device.handle(), &signalInfo);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to signal semaphore\");\n}\n\nbool Semaphore::wait(const Core::Device& device, uint64_t value, uint64_t timeout) const {\n if (!this->isTimeline)\n throw std::logic_error(\"Invalid timeline semaphore\");\n\n VkSemaphore semaphore = this->handle();\n const VkSemaphoreWaitInfo waitInfo{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO,\n .semaphoreCount = 1,\n .pSemaphores = &semaphore,\n .pValues = &value\n };\n auto res = vkWaitSemaphores(device.handle(), &waitInfo, timeout);\n if (res != VK_SUCCESS && res != VK_TIMEOUT)\n throw LSFG::vulkan_error(res, \"Unable to wait for semaphore\");\n\n return res == VK_SUCCESS;\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/shaders/generate.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/generate.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nGenerate::Generate(Vulkan& vk,\n Core::Image inImg1, Core::Image inImg2,\n Core::Image inImg3, Core::Image inImg4, Core::Image inImg5,\n const std::vector& fds, VkFormat format)\n : inImg1(std::move(inImg1)), inImg2(std::move(inImg2)),\n inImg3(std::move(inImg3)), inImg4(std::move(inImg4)),\n inImg5(std::move(inImg5)) {\n // create resources\n this->shaderModule = vk.shaders.getShader(vk.device, \"p_generate\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 5, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } });\n this->pipeline = vk.shaders.getPipeline(vk.device, \"p_generate\");\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImg1.getExtent();\n for (size_t i = 0; i < vk.generationCount; i++)\n this->outImgs.emplace_back(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, fds.empty() ? -1 : fds.at(i));\n\n // hook up shaders\n for (size_t i = 0; i < vk.generationCount; i++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(i + 1) / static_cast(vk.generationCount + 1));\n for (size_t j = 0; j < 2; j++) {\n pass.descriptorSet.at(j) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModule);\n pass.descriptorSet.at(j).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, j == 0 ? this->inImg2 : this->inImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, j == 0 ? this->inImg1 : this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg3)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg4)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg5)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs.at(i))\n .build();\n }\n }\n}\n\nvoid Generate::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx) {\n auto& pass = this->passes.at(pass_idx);\n\n // first pass\n const auto extent = this->inImg1.getExtent();\n const uint32_t threadsX = (extent.width + 15) >> 4;\n const uint32_t threadsY = (extent.height + 15) >> 4;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImg1)\n .addW2R(this->inImg2)\n .addW2R(this->inImg3)\n .addW2R(this->inImg4)\n .addW2R(this->inImg5)\n .addR2W(this->outImgs.at(pass_idx))\n .build();\n\n this->pipeline.bind(buf);\n pass.descriptorSet.at(frameCount % 2).bind(buf, this->pipeline);\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1_src/shaders/generate.cpp", "#include \n#include \n\n#include \"v3_1/shaders/generate.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nGenerate::Generate(Vulkan& vk,\n Core::Image inImg1, Core::Image inImg2,\n Core::Image inImg3, Core::Image inImg4, Core::Image inImg5,\n const std::vector& fds, VkFormat format)\n : inImg1(std::move(inImg1)), inImg2(std::move(inImg2)),\n inImg3(std::move(inImg3)), inImg4(std::move(inImg4)),\n inImg5(std::move(inImg5)) {\n // create resources\n this->shaderModule = vk.shaders.getShader(vk.device, \"generate\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 5, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } });\n this->pipeline = vk.shaders.getPipeline(vk.device, \"generate\");\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImg1.getExtent();\n for (size_t i = 0; i < vk.generationCount; i++)\n this->outImgs.emplace_back(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, fds.empty() ? -1 : fds.at(i));\n\n // hook up shaders\n for (size_t i = 0; i < vk.generationCount; i++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(i + 1) / static_cast(vk.generationCount + 1));\n for (size_t j = 0; j < 2; j++) {\n pass.descriptorSet.at(j) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModule);\n pass.descriptorSet.at(j).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, j == 0 ? this->inImg2 : this->inImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, j == 0 ? this->inImg1 : this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg3)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg4)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg5)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs.at(i))\n .build();\n }\n }\n}\n\nvoid Generate::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx) {\n auto& pass = this->passes.at(pass_idx);\n\n // first pass\n const auto extent = this->inImg1.getExtent();\n const uint32_t threadsX = (extent.width + 15) >> 4;\n const uint32_t threadsY = (extent.height + 15) >> 4;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImg1)\n .addW2R(this->inImg2)\n .addW2R(this->inImg3)\n .addW2R(this->inImg4)\n .addW2R(this->inImg5)\n .addR2W(this->outImgs.at(pass_idx))\n .build();\n\n this->pipeline.bind(buf);\n pass.descriptorSet.at(frameCount % 2).bind(buf, this->pipeline);\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/context.cpp", "#include \n#include \n\n#include \"v3_1p/context.hpp\"\n#include \"common/utils.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG_3_1P;\n\nContext::Context(Vulkan& vk,\n int in0, int in1, const std::vector& outN,\n VkExtent2D extent, VkFormat format) {\n // import input images\n this->inImg_0 = Core::Image(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, in0);\n this->inImg_1 = Core::Image(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, in1);\n\n // prepare render data\n for (size_t i = 0; i < 8; i++) {\n auto& data = this->data.at(i);\n data.internalSemaphores.resize(vk.generationCount);\n data.outSemaphores.resize(vk.generationCount);\n data.completionFences.resize(vk.generationCount);\n data.cmdBuffers2.resize(vk.generationCount);\n }\n\n // create shader chains\n this->mipmaps = Shaders::Mipmaps(vk, this->inImg_0, this->inImg_1);\n for (size_t i = 0; i < 7; i++)\n this->alpha.at(i) = Shaders::Alpha(vk, this->mipmaps.getOutImages().at(i));\n this->beta = Shaders::Beta(vk, this->alpha.at(0).getOutImages());\n for (size_t i = 0; i < 7; i++) {\n this->gamma.at(i) = Shaders::Gamma(vk,\n this->alpha.at(6 - i).getOutImages(),\n this->beta.getOutImages().at(std::min(6 - i, 5)),\n (i == 0) ? std::nullopt : std::make_optional(this->gamma.at(i - 1).getOutImage()));\n if (i < 4) continue;\n\n this->delta.at(i - 4) = Shaders::Delta(vk,\n this->alpha.at(6 - i).getOutImages(),\n this->beta.getOutImages().at(6 - i),\n (i == 4) ? std::nullopt : std::make_optional(this->gamma.at(i - 1).getOutImage()),\n (i == 4) ? std::nullopt : std::make_optional(this->delta.at(i - 5).getOutImage1()));\n }\n this->generate = Shaders::Generate(vk,\n this->inImg_0, this->inImg_1,\n this->gamma.at(6).getOutImage(),\n this->delta.at(2).getOutImage1(),\n this->delta.at(2).getOutImage2(),\n outN, format);\n}\n\nvoid Context::present(Vulkan& vk,\n int inSem, const std::vector& outSem) {\n auto& data = this->data.at(this->frameIdx % 8);\n\n // 3. wait for completion of previous frame in this slot\n if (data.shouldWait)\n for (auto& fence : data.completionFences)\n if (!fence.wait(vk.device, UINT64_MAX))\n throw LSFG::vulkan_error(VK_TIMEOUT, \"Fence wait timed out\");\n data.shouldWait = true;\n\n // 1. create mipmaps and process input image\n if (inSem >= 0) data.inSemaphore = Core::Semaphore(vk.device, inSem);\n for (size_t i = 0; i < vk.generationCount; i++)\n data.internalSemaphores.at(i) = Core::Semaphore(vk.device);\n\n data.cmdBuffer1 = Core::CommandBuffer(vk.device, vk.commandPool);\n data.cmdBuffer1.begin();\n\n this->mipmaps.Dispatch(data.cmdBuffer1, this->frameIdx);\n for (size_t i = 0; i < 7; i++)\n this->alpha.at(6 - i).Dispatch(data.cmdBuffer1, this->frameIdx);\n this->beta.Dispatch(data.cmdBuffer1, this->frameIdx);\n\n data.cmdBuffer1.end();\n std::vector waits = { data.inSemaphore };\n if (inSem < 0) waits.clear();\n data.cmdBuffer1.submit(vk.device.getComputeQueue(), std::nullopt,\n waits, std::nullopt,\n data.internalSemaphores, std::nullopt);\n\n // 2. generate intermediary frames\n for (size_t pass = 0; pass < vk.generationCount; pass++) {\n auto& internalSemaphore = data.internalSemaphores.at(pass);\n auto& outSemaphore = data.outSemaphores.at(pass);\n if (inSem >= 0) outSemaphore = Core::Semaphore(vk.device, outSem.empty() ? -1 : outSem.at(pass));\n auto& completionFence = data.completionFences.at(pass);\n completionFence = Core::Fence(vk.device);\n\n auto& buf2 = data.cmdBuffers2.at(pass);\n buf2 = Core::CommandBuffer(vk.device, vk.commandPool);\n buf2.begin();\n\n for (size_t i = 0; i < 7; i++) {\n this->gamma.at(i).Dispatch(buf2, this->frameIdx, pass);\n if (i >= 4)\n this->delta.at(i - 4).Dispatch(buf2, this->frameIdx, pass, i == 6);\n }\n this->generate.Dispatch(buf2, this->frameIdx, pass);\n\n buf2.end();\n std::vector signals = { outSemaphore };\n if (inSem < 0) signals.clear();\n buf2.submit(vk.device.getComputeQueue(), completionFence,\n { internalSemaphore }, std::nullopt,\n signals, std::nullopt);\n }\n\n this->frameIdx++;\n}\n"], ["/lsfg-vk/framegen/v3.1_src/context.cpp", "#include \n#include \n\n#include \"v3_1/context.hpp\"\n#include \"common/utils.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1;\n\nContext::Context(Vulkan& vk,\n int in0, int in1, const std::vector& outN,\n VkExtent2D extent, VkFormat format) {\n // import input images\n this->inImg_0 = Core::Image(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, in0);\n this->inImg_1 = Core::Image(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, in1);\n\n // prepare render data\n for (size_t i = 0; i < 8; i++) {\n auto& data = this->data.at(i);\n data.internalSemaphores.resize(vk.generationCount);\n data.outSemaphores.resize(vk.generationCount);\n data.completionFences.resize(vk.generationCount);\n data.cmdBuffers2.resize(vk.generationCount);\n }\n\n // create shader chains\n this->mipmaps = Shaders::Mipmaps(vk, this->inImg_0, this->inImg_1);\n for (size_t i = 0; i < 7; i++)\n this->alpha.at(i) = Shaders::Alpha(vk, this->mipmaps.getOutImages().at(i));\n this->beta = Shaders::Beta(vk, this->alpha.at(0).getOutImages());\n for (size_t i = 0; i < 7; i++) {\n this->gamma.at(i) = Shaders::Gamma(vk,\n this->alpha.at(6 - i).getOutImages(),\n this->beta.getOutImages().at(std::min(6 - i, 5)),\n (i == 0) ? std::nullopt : std::make_optional(this->gamma.at(i - 1).getOutImage()));\n if (i < 4) continue;\n\n this->delta.at(i - 4) = Shaders::Delta(vk,\n this->alpha.at(6 - i).getOutImages(),\n this->beta.getOutImages().at(6 - i),\n (i == 4) ? std::nullopt : std::make_optional(this->gamma.at(i - 1).getOutImage()),\n (i == 4) ? std::nullopt : std::make_optional(this->delta.at(i - 5).getOutImage1()),\n (i == 4) ? std::nullopt : std::make_optional(this->delta.at(i - 5).getOutImage2()));\n }\n this->generate = Shaders::Generate(vk,\n this->inImg_0, this->inImg_1,\n this->gamma.at(6).getOutImage(),\n this->delta.at(2).getOutImage1(),\n this->delta.at(2).getOutImage2(),\n outN, format);\n}\n\nvoid Context::present(Vulkan& vk,\n int inSem, const std::vector& outSem) {\n auto& data = this->data.at(this->frameIdx % 8);\n\n // 3. wait for completion of previous frame in this slot\n if (data.shouldWait)\n for (auto& fence : data.completionFences)\n if (!fence.wait(vk.device, UINT64_MAX))\n throw LSFG::vulkan_error(VK_TIMEOUT, \"Fence wait timed out\");\n data.shouldWait = true;\n\n // 1. create mipmaps and process input image\n if (inSem >= 0) data.inSemaphore = Core::Semaphore(vk.device, inSem);\n for (size_t i = 0; i < vk.generationCount; i++)\n data.internalSemaphores.at(i) = Core::Semaphore(vk.device);\n\n data.cmdBuffer1 = Core::CommandBuffer(vk.device, vk.commandPool);\n data.cmdBuffer1.begin();\n\n this->mipmaps.Dispatch(data.cmdBuffer1, this->frameIdx);\n for (size_t i = 0; i < 7; i++)\n this->alpha.at(6 - i).Dispatch(data.cmdBuffer1, this->frameIdx);\n this->beta.Dispatch(data.cmdBuffer1, this->frameIdx);\n\n data.cmdBuffer1.end();\n std::vector waits = { data.inSemaphore };\n if (inSem < 0) waits.clear();\n data.cmdBuffer1.submit(vk.device.getComputeQueue(), std::nullopt,\n waits, std::nullopt,\n data.internalSemaphores, std::nullopt);\n\n // 2. generate intermediary frames\n for (size_t pass = 0; pass < vk.generationCount; pass++) {\n auto& internalSemaphore = data.internalSemaphores.at(pass);\n auto& outSemaphore = data.outSemaphores.at(pass);\n if (inSem >= 0) outSemaphore = Core::Semaphore(vk.device, outSem.empty() ? -1 : outSem.at(pass));\n auto& completionFence = data.completionFences.at(pass);\n completionFence = Core::Fence(vk.device);\n\n auto& buf2 = data.cmdBuffers2.at(pass);\n buf2 = Core::CommandBuffer(vk.device, vk.commandPool);\n buf2.begin();\n\n for (size_t i = 0; i < 7; i++) {\n this->gamma.at(i).Dispatch(buf2, this->frameIdx, pass);\n if (i >= 4)\n this->delta.at(i - 4).Dispatch(buf2, this->frameIdx, pass);\n }\n this->generate.Dispatch(buf2, this->frameIdx, pass);\n\n buf2.end();\n std::vector signals = { outSemaphore };\n if (inSem < 0) signals.clear();\n buf2.submit(vk.device.getComputeQueue(), completionFence,\n { internalSemaphore }, std::nullopt,\n signals, std::nullopt);\n }\n\n this->frameIdx++;\n}\n"], ["/lsfg-vk/src/extract/extract.cpp", "#include \"extract/extract.hpp\"\n#include \"config/config.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace Extract;\n\nconst std::unordered_map nameIdxTable = {{\n { \"mipmaps\", 255 },\n { \"alpha[0]\", 267 },\n { \"alpha[1]\", 268 },\n { \"alpha[2]\", 269 },\n { \"alpha[3]\", 270 },\n { \"beta[0]\", 275 },\n { \"beta[1]\", 276 },\n { \"beta[2]\", 277 },\n { \"beta[3]\", 278 },\n { \"beta[4]\", 279 },\n { \"gamma[0]\", 257 },\n { \"gamma[1]\", 259 },\n { \"gamma[2]\", 260 },\n { \"gamma[3]\", 261 },\n { \"gamma[4]\", 262 },\n { \"delta[0]\", 257 },\n { \"delta[1]\", 263 },\n { \"delta[2]\", 264 },\n { \"delta[3]\", 265 },\n { \"delta[4]\", 266 },\n { \"delta[5]\", 258 },\n { \"delta[6]\", 271 },\n { \"delta[7]\", 272 },\n { \"delta[8]\", 273 },\n { \"delta[9]\", 274 },\n { \"generate\", 256 },\n { \"p_mipmaps\", 255 },\n { \"p_alpha[0]\", 290 },\n { \"p_alpha[1]\", 291 },\n { \"p_alpha[2]\", 292 },\n { \"p_alpha[3]\", 293 },\n { \"p_beta[0]\", 298 },\n { \"p_beta[1]\", 299 },\n { \"p_beta[2]\", 300 },\n { \"p_beta[3]\", 301 },\n { \"p_beta[4]\", 302 },\n { \"p_gamma[0]\", 280 },\n { \"p_gamma[1]\", 282 },\n { \"p_gamma[2]\", 283 },\n { \"p_gamma[3]\", 284 },\n { \"p_gamma[4]\", 285 },\n { \"p_delta[0]\", 280 },\n { \"p_delta[1]\", 286 },\n { \"p_delta[2]\", 287 },\n { \"p_delta[3]\", 288 },\n { \"p_delta[4]\", 289 },\n { \"p_delta[5]\", 281 },\n { \"p_delta[6]\", 294 },\n { \"p_delta[7]\", 295 },\n { \"p_delta[8]\", 296 },\n { \"p_delta[9]\", 297 },\n { \"p_generate\", 256 },\n}};\n\nnamespace {\n auto& shaders() {\n static std::unordered_map> shaderData;\n return shaderData;\n }\n\n int on_resource(void*, const peparse::resource& res) {\n if (res.type != peparse::RT_RCDATA || res.buf == nullptr || res.buf->bufLen <= 0)\n return 0;\n std::vector resource_data(res.buf->bufLen);\n std::copy_n(res.buf->buf, res.buf->bufLen, resource_data.data());\n shaders()[res.name] = resource_data;\n return 0;\n }\n\n const std::vector PATHS{{\n \".local/share/Steam/steamapps/common\",\n \".steam/steam/steamapps/common\",\n \".steam/debian-installation/steamapps/common\",\n \".var/app/com.valvesoftware.Steam/.local/share/Steam/steamapps/common\",\n \"snap/steam/common/.local/share/Steam/steamapps/common\"\n }};\n\n std::string getDllPath() {\n // overriden path\n std::string dllPath = Config::activeConf.dll;\n if (!dllPath.empty())\n return dllPath;\n // home based paths\n const char* home = getenv(\"HOME\");\n const std::string homeStr = home ? home : \"\";\n for (const auto& base : PATHS) {\n const std::filesystem::path path =\n std::filesystem::path(homeStr) / base / \"Lossless Scaling\" / \"Lossless.dll\";\n if (std::filesystem::exists(path))\n return path.string();\n }\n // xdg home\n const char* dataDir = getenv(\"XDG_DATA_HOME\");\n if (dataDir && *dataDir != '\\0')\n return std::string(dataDir) + \"/Steam/steamapps/common/Lossless Scaling/Lossless.dll\";\n // final fallback\n return \"Lossless.dll\";\n }\n}\n\nvoid Extract::extractShaders() {\n if (!shaders().empty())\n return;\n\n // parse the dll\n peparse::parsed_pe* dll = peparse::ParsePEFromFile(getDllPath().c_str());\n if (!dll)\n throw std::runtime_error(\"Unable to read Lossless.dll, is it installed?\");\n peparse::IterRsrc(dll, on_resource, nullptr);\n peparse::DestructParsedPE(dll);\n\n // ensure all shaders are present\n for (const auto& [name, idx] : nameIdxTable)\n if (shaders().find(idx) == shaders().end())\n throw std::runtime_error(\"Shader not found: \" + name + \".\\n- Is Lossless Scaling up to date?\");\n}\n\nstd::vector Extract::getShader(const std::string& name) {\n if (shaders().empty())\n throw std::runtime_error(\"Shaders are not loaded.\");\n\n auto hit = nameIdxTable.find(name);\n if (hit == nameIdxTable.end())\n throw std::runtime_error(\"Shader hash not found: \" + name);\n\n auto sit = shaders().find(hit->second);\n if (sit == shaders().end())\n throw std::runtime_error(\"Shader not found: \" + name);\n\n return sit->second;\n}\n"], ["/lsfg-vk/framegen/src/core/descriptorpool.cpp", "#include \n#include \n\n#include \"core/descriptorpool.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nDescriptorPool::DescriptorPool(const Core::Device& device) {\n // create descriptor pool\n const std::array pools{{ // arbitrary limits\n { .type = VK_DESCRIPTOR_TYPE_SAMPLER, .descriptorCount = 4096 },\n { .type = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, .descriptorCount = 4096 },\n { .type = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, .descriptorCount = 4096 },\n { .type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, .descriptorCount = 4096 }\n }};\n const VkDescriptorPoolCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,\n .flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,\n .maxSets = 16384,\n .poolSizeCount = static_cast(pools.size()),\n .pPoolSizes = pools.data()\n };\n VkDescriptorPool poolHandle{};\n auto res = vkCreateDescriptorPool(device.handle(), &desc, nullptr, &poolHandle);\n if (res != VK_SUCCESS || poolHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create descriptor pool\");\n\n // store pool in shared ptr\n this->descriptorPool = std::shared_ptr(\n new VkDescriptorPool(poolHandle),\n [dev = device.handle()](VkDescriptorPool* poolHandle) {\n vkDestroyDescriptorPool(dev, *poolHandle, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/shaders/delta.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/delta.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nDelta::Delta(Vulkan& vk, std::array, 3> inImgs1,\n Core::Image inImg2,\n std::optional optImg1,\n std::optional optImg2)\n : inImgs1(std::move(inImgs1)), inImg2(std::move(inImg2)),\n optImg1(std::move(optImg1)), optImg2(std::move(optImg2)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"p_delta[0]\",\n { { 1 , VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 5, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 3, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 3, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[5]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 6, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[6]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[7]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[8]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[9]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"p_delta[0]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[1]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[2]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[3]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[4]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[5]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[6]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[7]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[8]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[9]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n this->samplers.at(2) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS, false);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs1.at(0).at(0).getExtent();\n for (size_t i = 0; i < 3; i++)\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n for (size_t i = 0; i < 2; i++)\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n\n this->outImg1 = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n this->outImg2 = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n\n // hook up shaders\n for (size_t pass_idx = 0; pass_idx < vk.generationCount; pass_idx++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(pass_idx + 1) / static_cast(vk.generationCount + 1),\n false, !this->optImg1.has_value());\n for (size_t i = 0; i < 3; i++) {\n pass.firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(0));\n pass.firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n }\n pass.descriptorSets.at(0) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(1));\n pass.descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(1) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(2));\n pass.descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .build();\n pass.descriptorSets.at(2) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(3));\n pass.descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(3) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(4));\n pass.descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg1)\n .build();\n for (size_t i = 0; i < 3; i++) {\n pass.sixthDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(5));\n pass.sixthDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(0))\n .build();\n }\n pass.descriptorSets.at(4) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(6));\n pass.descriptorSets.at(4).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .build();\n pass.descriptorSets.at(5) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(7));\n pass.descriptorSets.at(5).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(0))\n .build();\n pass.descriptorSets.at(6) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(8));\n pass.descriptorSets.at(6).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .build();\n pass.descriptorSets.at(7) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(9));\n pass.descriptorSets.at(7).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg2)\n .build();\n }\n}\n\nvoid Delta::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx,\n bool last) {\n auto& pass = this->passes.at(pass_idx);\n\n // first shader\n const auto extent = this->tempImgs1.at(0).getExtent();\n const uint32_t threadsX = (extent.width + 7) >> 3;\n const uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg1)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n pass.firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n pass.descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(2).bind(buf);\n pass.descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n pass.descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addW2R(this->optImg1)\n .addW2R(this->inImg2)\n .addR2W(this->outImg1)\n .build();\n\n this->pipelines.at(4).bind(buf);\n pass.descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n\n // sixth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg1)\n .addW2R(this->optImg2)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(5).bind(buf);\n pass.sixthDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(5));\n buf.dispatch(threadsX, threadsY, 1);\n\n if (!last)\n return;\n\n // seventh shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .build();\n\n this->pipelines.at(6).bind(buf);\n pass.descriptorSets.at(4).bind(buf, this->pipelines.at(6));\n buf.dispatch(threadsX, threadsY, 1);\n\n // eighth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addR2W(this->tempImgs2)\n .build();\n this->pipelines.at(7).bind(buf);\n pass.descriptorSets.at(5).bind(buf, this->pipelines.at(7));\n buf.dispatch(threadsX, threadsY, 1);\n\n // ninth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .build();\n\n this->pipelines.at(8).bind(buf);\n pass.descriptorSets.at(6).bind(buf, this->pipelines.at(8));\n buf.dispatch(threadsX, threadsY, 1);\n\n // tenth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addR2W(this->outImg2)\n .build();\n\n this->pipelines.at(9).bind(buf);\n pass.descriptorSets.at(7).bind(buf, this->pipelines.at(9));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/src/core/pipeline.cpp", "#include \n#include \n\n#include \"core/pipeline.hpp\"\n#include \"core/device.hpp\"\n#include \"core/shadermodule.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n\nusing namespace LSFG::Core;\n\nPipeline::Pipeline(const Core::Device& device, const ShaderModule& shader) {\n // create pipeline layout\n VkDescriptorSetLayout shaderLayout = shader.getLayout();\n const VkPipelineLayoutCreateInfo layoutDesc{\n .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,\n .setLayoutCount = 1,\n .pSetLayouts = &shaderLayout,\n };\n VkPipelineLayout layoutHandle{};\n auto res = vkCreatePipelineLayout(device.handle(), &layoutDesc, nullptr, &layoutHandle);\n if (res != VK_SUCCESS || !layoutHandle)\n throw LSFG::vulkan_error(res, \"Failed to create pipeline layout\");\n\n // create pipeline\n const VkPipelineShaderStageCreateInfo shaderStageInfo{\n .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,\n .stage = VK_SHADER_STAGE_COMPUTE_BIT,\n .module = shader.handle(),\n .pName = \"main\",\n };\n const VkComputePipelineCreateInfo pipelineDesc{\n .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,\n .stage = shaderStageInfo,\n .layout = layoutHandle,\n };\n VkPipeline pipelineHandle{};\n res = vkCreateComputePipelines(device.handle(),\n VK_NULL_HANDLE, 1, &pipelineDesc, nullptr, &pipelineHandle);\n if (res != VK_SUCCESS || !pipelineHandle)\n throw LSFG::vulkan_error(res, \"Failed to create compute pipeline\");\n\n // store layout and pipeline in shared ptr\n this->layout = std::shared_ptr(\n new VkPipelineLayout(layoutHandle),\n [dev = device.handle()](VkPipelineLayout* layout) {\n vkDestroyPipelineLayout(dev, *layout, nullptr);\n }\n );\n this->pipeline = std::shared_ptr(\n new VkPipeline(pipelineHandle),\n [dev = device.handle()](VkPipeline* pipeline) {\n vkDestroyPipeline(dev, *pipeline, nullptr);\n }\n );\n}\n\nvoid Pipeline::bind(const CommandBuffer& commandBuffer) const {\n vkCmdBindPipeline(commandBuffer.handle(), VK_PIPELINE_BIND_POINT_COMPUTE, *this->pipeline);\n}\n"], ["/lsfg-vk/framegen/v3.1_src/shaders/gamma.cpp", "#include \n#include \n\n#include \"v3_1/shaders/gamma.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nGamma::Gamma(Vulkan& vk, std::array, 3> inImgs1,\n Core::Image inImg2,\n std::optional optImg)\n : inImgs1(std::move(inImgs1)), inImg2(std::move(inImg2)),\n optImg(std::move(optImg)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"gamma[0]\",\n { { 1 , VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 9, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 3, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"gamma[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 3, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"gamma[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"gamma[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"gamma[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 6, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"gamma[0]\"),\n vk.shaders.getPipeline(vk.device, \"gamma[1]\"),\n vk.shaders.getPipeline(vk.device, \"gamma[2]\"),\n vk.shaders.getPipeline(vk.device, \"gamma[3]\"),\n vk.shaders.getPipeline(vk.device, \"gamma[4]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n this->samplers.at(2) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS, false);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs1.at(0).at(0).getExtent();\n for (size_t i = 0; i < 4; i++) {\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n }\n\n this->outImg = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n\n // hook up shaders\n for (size_t pass_idx = 0; pass_idx < vk.generationCount; pass_idx++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(pass_idx + 1) / static_cast(vk.generationCount + 1),\n !this->optImg.has_value());\n for (size_t i = 0; i < 3; i++) {\n pass.firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(0));\n pass.firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(2))\n .build();\n }\n pass.descriptorSets.at(0) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(1));\n pass.descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(2))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(1) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(2));\n pass.descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n pass.descriptorSets.at(2) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(3));\n pass.descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(3) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(4));\n pass.descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg)\n .build();\n }\n}\n\nvoid Gamma::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx) {\n auto& pass = this->passes.at(pass_idx);\n\n // first shader\n const auto extent = this->tempImgs1.at(0).getExtent();\n const uint32_t threadsX = (extent.width + 7) >> 3;\n const uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .addR2W(this->tempImgs1.at(2))\n .build();\n\n this->pipelines.at(0).bind(buf);\n pass.firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addW2R(this->tempImgs1.at(2))\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n pass.descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(2).bind(buf);\n pass.descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n pass.descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addW2R(this->optImg)\n .addW2R(this->inImg2)\n .addR2W(this->outImg)\n .build();\n\n this->pipelines.at(4).bind(buf);\n pass.descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/shaders/gamma.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/gamma.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nGamma::Gamma(Vulkan& vk, std::array, 3> inImgs1,\n Core::Image inImg2,\n std::optional optImg)\n : inImgs1(std::move(inImgs1)), inImg2(std::move(inImg2)),\n optImg(std::move(optImg)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"p_gamma[0]\",\n { { 1 , VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 5, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 3, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_gamma[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 3, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_gamma[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_gamma[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_gamma[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"p_gamma[0]\"),\n vk.shaders.getPipeline(vk.device, \"p_gamma[1]\"),\n vk.shaders.getPipeline(vk.device, \"p_gamma[2]\"),\n vk.shaders.getPipeline(vk.device, \"p_gamma[3]\"),\n vk.shaders.getPipeline(vk.device, \"p_gamma[4]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n this->samplers.at(2) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS, false);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs1.at(0).at(0).getExtent();\n for (size_t i = 0; i < 3; i++)\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n for (size_t i = 0; i < 2; i++)\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n\n this->outImg = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n\n // hook up shaders\n for (size_t pass_idx = 0; pass_idx < vk.generationCount; pass_idx++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(pass_idx + 1) / static_cast(vk.generationCount + 1),\n !this->optImg.has_value());\n for (size_t i = 0; i < 3; i++) {\n pass.firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(0));\n pass.firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n }\n pass.descriptorSets.at(0) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(1));\n pass.descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(1) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(2));\n pass.descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .build();\n pass.descriptorSets.at(2) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(3));\n pass.descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(3) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(4));\n pass.descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg)\n .build();\n }\n}\n\nvoid Gamma::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx) {\n auto& pass = this->passes.at(pass_idx);\n\n // first shader\n const auto extent = this->tempImgs1.at(0).getExtent();\n const uint32_t threadsX = (extent.width + 7) >> 3;\n const uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n pass.firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n pass.descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .build();\n\n this->pipelines.at(2).bind(buf);\n pass.descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n pass.descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addW2R(this->optImg)\n .addW2R(this->inImg2)\n .addR2W(this->outImg)\n .build();\n\n this->pipelines.at(4).bind(buf);\n pass.descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/src/mini/semaphore.cpp", "#include \"mini/semaphore.hpp\"\n#include \"common/exception.hpp\"\n#include \"layer.hpp\"\n\n#include \n\n#include \n\nusing namespace Mini;\n\nSemaphore::Semaphore(VkDevice device) {\n // create semaphore\n const VkSemaphoreCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO\n };\n VkSemaphore semaphoreHandle{};\n auto res = Layer::ovkCreateSemaphore(device, &desc, nullptr, &semaphoreHandle);\n if (res != VK_SUCCESS || semaphoreHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create semaphore\");\n\n // store semaphore in shared ptr\n this->semaphore = std::shared_ptr(\n new VkSemaphore(semaphoreHandle),\n [dev = device](VkSemaphore* semaphoreHandle) {\n Layer::ovkDestroySemaphore(dev, *semaphoreHandle, nullptr);\n }\n );\n}\n\nSemaphore::Semaphore(VkDevice device, int* fd) {\n // create semaphore\n const VkExportSemaphoreCreateInfo exportInfo{\n .sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO,\n .handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT\n };\n const VkSemaphoreCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,\n .pNext = &exportInfo\n };\n VkSemaphore semaphoreHandle{};\n auto res = Layer::ovkCreateSemaphore(device, &desc, nullptr, &semaphoreHandle);\n if (res != VK_SUCCESS || semaphoreHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create semaphore\");\n\n // export semaphore to fd\n const VkSemaphoreGetFdInfoKHR fdInfo{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR,\n .semaphore = semaphoreHandle,\n .handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT\n };\n res = Layer::ovkGetSemaphoreFdKHR(device, &fdInfo, fd);\n if (res != VK_SUCCESS || *fd < 0)\n throw LSFG::vulkan_error(res, \"Unable to export semaphore to fd\");\n\n // store semaphore in shared ptr\n this->semaphore = std::shared_ptr(\n new VkSemaphore(semaphoreHandle),\n [dev = device](VkSemaphore* semaphoreHandle) {\n Layer::ovkDestroySemaphore(dev, *semaphoreHandle, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/v3.1_src/shaders/delta.cpp", "#include \n#include \n\n#include \"v3_1/shaders/delta.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nDelta::Delta(Vulkan& vk, std::array, 3> inImgs1,\n Core::Image inImg2,\n std::optional optImg1,\n std::optional optImg2,\n std::optional optImg3)\n : inImgs1(std::move(inImgs1)), inImg2(std::move(inImg2)),\n optImg1(std::move(optImg1)), optImg2(std::move(optImg2)),\n optImg3(std::move(optImg3)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"delta[0]\",\n { { 1 , VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 9, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 3, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 3, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 6, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[5]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 10, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[6]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[7]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[8]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[9]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 3, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"delta[0]\"),\n vk.shaders.getPipeline(vk.device, \"delta[1]\"),\n vk.shaders.getPipeline(vk.device, \"delta[2]\"),\n vk.shaders.getPipeline(vk.device, \"delta[3]\"),\n vk.shaders.getPipeline(vk.device, \"delta[4]\"),\n vk.shaders.getPipeline(vk.device, \"delta[5]\"),\n vk.shaders.getPipeline(vk.device, \"delta[6]\"),\n vk.shaders.getPipeline(vk.device, \"delta[7]\"),\n vk.shaders.getPipeline(vk.device, \"delta[8]\"),\n vk.shaders.getPipeline(vk.device, \"delta[9]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n this->samplers.at(2) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS, false);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs1.at(0).at(0).getExtent();\n for (size_t i = 0; i < 4; i++) {\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n }\n\n this->outImg1 = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n this->outImg2 = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n\n // hook up shaders\n for (size_t pass_idx = 0; pass_idx < vk.generationCount; pass_idx++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(pass_idx + 1) / static_cast(vk.generationCount + 1),\n false, !this->optImg1.has_value());\n for (size_t i = 0; i < 3; i++) {\n pass.firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(0));\n pass.firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(2))\n .build();\n }\n pass.descriptorSets.at(0) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(1));\n pass.descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(2))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(1) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(2));\n pass.descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n pass.descriptorSets.at(2) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(3));\n pass.descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(3) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(4));\n pass.descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg1)\n .build();\n for (size_t i = 0; i < 3; i++) {\n pass.sixthDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(5));\n pass.sixthDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(1))\n .build();\n }\n pass.descriptorSets.at(4) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(6));\n pass.descriptorSets.at(4).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .build();\n pass.descriptorSets.at(5) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(7));\n pass.descriptorSets.at(5).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(1))\n .build();\n pass.descriptorSets.at(6) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(8));\n pass.descriptorSets.at(6).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .build();\n pass.descriptorSets.at(7) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(9));\n pass.descriptorSets.at(7).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg3)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg2)\n .build();\n }\n}\n\nvoid Delta::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx) {\n auto& pass = this->passes.at(pass_idx);\n\n // first shader\n const auto extent = this->tempImgs1.at(0).getExtent();\n const uint32_t threadsX = (extent.width + 7) >> 3;\n const uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg1)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .addR2W(this->tempImgs1.at(2))\n .build();\n\n this->pipelines.at(0).bind(buf);\n pass.firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addW2R(this->tempImgs1.at(2))\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n pass.descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(2).bind(buf);\n pass.descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n pass.descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addW2R(this->optImg1)\n .addW2R(this->inImg2)\n .addR2W(this->outImg1)\n .build();\n\n this->pipelines.at(4).bind(buf);\n pass.descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n\n // sixth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg1)\n .addW2R(this->optImg2)\n .addR2W(this->tempImgs2.at(0))\n .addR2W(this->tempImgs2.at(1))\n .build();\n\n this->pipelines.at(5).bind(buf);\n pass.sixthDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(5));\n buf.dispatch(threadsX, threadsY, 1);\n\n // seventh shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2.at(0))\n .addW2R(this->tempImgs2.at(1))\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .build();\n\n this->pipelines.at(6).bind(buf);\n pass.descriptorSets.at(4).bind(buf, this->pipelines.at(6));\n buf.dispatch(threadsX, threadsY, 1);\n\n // eighth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addR2W(this->tempImgs2.at(0))\n .addR2W(this->tempImgs2.at(1))\n .build();\n this->pipelines.at(7).bind(buf);\n pass.descriptorSets.at(5).bind(buf, this->pipelines.at(7));\n buf.dispatch(threadsX, threadsY, 1);\n\n // ninth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2.at(0))\n .addW2R(this->tempImgs2.at(1))\n .addW2R(this->optImg3)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .build();\n\n this->pipelines.at(8).bind(buf);\n pass.descriptorSets.at(6).bind(buf, this->pipelines.at(8));\n buf.dispatch(threadsX, threadsY, 1);\n\n // tenth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addW2R(this->optImg3)\n .addR2W(this->outImg2)\n .build();\n\n this->pipelines.at(9).bind(buf);\n pass.descriptorSets.at(7).bind(buf, this->pipelines.at(9));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/src/core/instance.cpp", "#include \n#include \n\n#include \"core/instance.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nconst std::vector requiredExtensions = {\n\n};\n\nInstance::Instance() {\n volkInitialize();\n\n // create Vulkan instance\n const VkApplicationInfo appInfo{\n .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,\n .pApplicationName = \"lsfg-vk-base\",\n .applicationVersion = VK_MAKE_VERSION(0, 0, 1),\n .pEngineName = \"lsfg-vk-base\",\n .engineVersion = VK_MAKE_VERSION(0, 0, 1),\n .apiVersion = VK_API_VERSION_1_3\n };\n const VkInstanceCreateInfo createInfo{\n .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,\n .pApplicationInfo = &appInfo,\n .enabledExtensionCount = static_cast(requiredExtensions.size()),\n .ppEnabledExtensionNames = requiredExtensions.data()\n };\n VkInstance instanceHandle{};\n auto res = vkCreateInstance(&createInfo, nullptr, &instanceHandle);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to create Vulkan instance\");\n\n volkLoadInstance(instanceHandle);\n\n // store in shared ptr\n this->instance = std::shared_ptr(\n new VkInstance(instanceHandle),\n [](VkInstance* instance) {\n vkDestroyInstance(*instance, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/shaders/mipmaps.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/mipmaps.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/image.hpp\"\n#include \"core/commandbuffer.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nMipmaps::Mipmaps(Vulkan& vk,\n Core::Image inImg_0, Core::Image inImg_1)\n : inImg_0(std::move(inImg_0)), inImg_1(std::move(inImg_1)) {\n // create resources\n this->shaderModule = vk.shaders.getShader(vk.device, \"p_mipmaps\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 7, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } });\n this->pipeline = vk.shaders.getPipeline(vk.device, \"p_mipmaps\");\n this->buffer = vk.resources.getBuffer(vk.device);\n this->sampler = vk.resources.getSampler(vk.device);\n for (size_t i = 0; i < 2; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModule);\n\n // create outputs\n const VkExtent2D flowExtent{\n .width = static_cast(\n static_cast(this->inImg_0.getExtent().width) / vk.flowScale),\n .height = static_cast(\n static_cast(this->inImg_0.getExtent().height) / vk.flowScale)\n };\n for (size_t i = 0; i < 7; i++)\n this->outImgs.at(i) = Core::Image(vk.device,\n { flowExtent.width >> i, flowExtent.height >> i },\n VK_FORMAT_R8_UNORM);\n\n // hook up shaders\n for (size_t fc = 0; fc < 2; fc++)\n this->descriptorSets.at(fc).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, this->buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, (fc % 2 == 0) ? this->inImg_0 : this->inImg_1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs)\n .build();\n}\n\nvoid Mipmaps::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto flowExtent = this->outImgs.at(0).getExtent();\n const uint32_t threadsX = (flowExtent.width + 63) >> 6;\n const uint32_t threadsY = (flowExtent.height + 63) >> 6;\n\n Utils::BarrierBuilder(buf)\n .addW2R((frameCount % 2 == 0) ? this->inImg_0 : this->inImg_1)\n .addR2W(this->outImgs)\n .build();\n\n this->pipeline.bind(buf);\n this->descriptorSets.at(frameCount % 2).bind(buf, this->pipeline);\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1_src/shaders/mipmaps.cpp", "#include \n#include \n\n#include \"v3_1/shaders/mipmaps.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/image.hpp\"\n#include \"core/commandbuffer.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nMipmaps::Mipmaps(Vulkan& vk,\n Core::Image inImg_0, Core::Image inImg_1)\n : inImg_0(std::move(inImg_0)), inImg_1(std::move(inImg_1)) {\n // create resources\n this->shaderModule = vk.shaders.getShader(vk.device, \"mipmaps\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 7, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } });\n this->pipeline = vk.shaders.getPipeline(vk.device, \"mipmaps\");\n this->buffer = vk.resources.getBuffer(vk.device);\n this->sampler = vk.resources.getSampler(vk.device);\n for (size_t i = 0; i < 2; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModule);\n\n // create outputs\n const VkExtent2D flowExtent{\n .width = static_cast(\n static_cast(this->inImg_0.getExtent().width) / vk.flowScale),\n .height = static_cast(\n static_cast(this->inImg_0.getExtent().height) / vk.flowScale)\n };\n for (size_t i = 0; i < 7; i++)\n this->outImgs.at(i) = Core::Image(vk.device,\n { flowExtent.width >> i, flowExtent.height >> i },\n VK_FORMAT_R8_UNORM);\n\n // hook up shaders\n for (size_t fc = 0; fc < 2; fc++)\n this->descriptorSets.at(fc).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, this->buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, (fc % 2 == 0) ? this->inImg_0 : this->inImg_1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs)\n .build();\n}\n\nvoid Mipmaps::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto flowExtent = this->outImgs.at(0).getExtent();\n const uint32_t threadsX = (flowExtent.width + 63) >> 6;\n const uint32_t threadsY = (flowExtent.height + 63) >> 6;\n\n Utils::BarrierBuilder(buf)\n .addW2R((frameCount % 2 == 0) ? this->inImg_0 : this->inImg_1)\n .addR2W(this->outImgs)\n .build();\n\n this->pipeline.bind(buf);\n this->descriptorSets.at(frameCount % 2).bind(buf, this->pipeline);\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/thirdparty/toml11/tools/expand/main.cpp", "#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n\nstd::optional\nis_include(const std::string& line, const std::filesystem::path& filepath)\n{\n // [ws] # [ws] include [ws] \\\".+\\\"\n auto iter = line.begin();\n while(iter < line.end())\n {\n if(*iter != ' ' && *iter != '\\t')\n {\n break;\n }\n ++iter;\n }\n if(iter == line.end() || *iter != '#') {return std::nullopt;}\n\n assert(*iter == '#');\n ++iter;\n\n while(iter < line.end())\n {\n if(*iter != ' ' && *iter != '\\t')\n {\n break;\n }\n ++iter;\n }\n if(iter == line.end() || *iter != 'i') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'n') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'c') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'l') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'u') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'd') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'e') {return std::nullopt;} else {++iter;}\n\n while(iter < line.end())\n {\n if(*iter != ' ' && *iter != '\\t')\n {\n break;\n }\n ++iter;\n }\n if(iter == line.end() || *iter != '\"') {return std::nullopt;} else {++iter;}\n\n std::string filename;\n while(iter < line.end())\n {\n if(*iter == '\"') {break;}\n filename += *iter;\n ++iter;\n }\n if(iter == line.end() || *iter != '\"') {return std::nullopt;} else {++iter;}\n\n return std::filesystem::canonical(filepath.parent_path() / std::filesystem::path(filename));\n}\n\nstruct File\n{\n File() = default;\n\n explicit File(std::filesystem::path f)\n : filename(std::move(f))\n {\n std::ifstream ifs(filename);\n if( ! ifs.good())\n {\n throw std::runtime_error(\"file open error: \" + filename.string());\n }\n\n std::string line;\n while(std::getline(ifs, line))\n {\n if(const auto incl = is_include(line, filename))\n {\n includes.push_back(incl.value());\n }\n else\n {\n content.push_back(line);\n }\n }\n }\n\n File(std::filesystem::path f, std::vector c,\n std::vector i)\n : filename(std::move(f)), content(std::move(c)), includes(std::move(i))\n {}\n\n std::filesystem::path filename;\n std::vector content; // w/o include\n std::vector includes;\n};\n\nstruct Graph\n{\n struct Node\n {\n std::vector included;\n std::vector includes;\n };\n\n std::map nodes;\n};\n\nint main(int argc, char** argv)\n{\n using namespace std::literals::string_literals;\n if(argc != 2)\n {\n std::cerr << \"Usage: ./a.out path/to/toml.hpp > single_include/toml.hpp\" << std::endl;\n return 1;\n }\n\n const auto input_file = std::filesystem::path(std::string(argv[1]));\n assert(input_file.filename() == \"toml.hpp\");\n\n const auto include_path = input_file.parent_path();\n\n // -------------------------------------------------------------------------\n // load files and detect `include \"xxx.hpp\"`.\n // If the file has `_fwd` and `_impl`, expand those files first.\n\n std::set fwd_impl_files;\n for(const auto& entities : std::filesystem::directory_iterator(include_path/\"toml11\"/\"fwd\"))\n {\n if( ! entities.is_regular_file()) {continue;}\n std::string fname = entities.path().filename().string();\n if(fname.ends_with(\"_fwd.hpp\"))\n {\n for(const auto c : \"_fwd.hpp\"s) {fname.pop_back(); (void)c;}\n fwd_impl_files.insert(std::move(fname));\n }\n }\n for(const auto& entities : std::filesystem::directory_iterator(include_path/\"toml11\"/\"impl\"))\n {\n if( ! entities.is_regular_file()) {continue;}\n std::string fname = entities.path().filename().string();\n if(fname.ends_with(\"_impl.hpp\"))\n {\n for(const auto c : \"_impl.hpp\"s) {fname.pop_back(); (void)c;}\n // all impl files has fwd file\n assert(fwd_impl_files.contains(fname));\n }\n }\n\n const auto input = File(input_file);\n\n std::map files;\n files[input_file] = input;\n\n for(const auto& fname : input.includes)\n {\n if(fwd_impl_files.contains(fname.stem().string()))\n {\n std::cerr << \"expanding fwd/impl file of \" << fname.string() << std::endl;\n\n // expand the first include\n std::ifstream ifs(fname);\n\n std::vector content;\n std::vector includes;\n\n std::string line;\n while(std::getline(ifs, line))\n {\n // expand _fwd and _impl files first.\n const auto incl = is_include(line, fname);\n if(incl.has_value())\n {\n // if a file has _fwd/_impl files, it only includes fwd/impl files.\n assert(incl.value().string().ends_with(\"_impl.hpp\") ||\n incl.value().string().ends_with(\"_fwd.hpp\") );\n\n const File included(incl.value());\n for(const auto& l : included.content)\n {\n content.push_back(l);\n }\n for(const auto& i : included.includes)\n {\n includes.push_back(i);\n }\n }\n else\n {\n content.push_back(line);\n }\n }\n files[fname] = File(fname, std::move(content), std::move(includes));\n }\n else\n {\n files[fname] = File(fname);\n }\n std::cerr << \"file \" << fname << \" has \" << files.at(fname).content.size() << \" lines.\" << std::endl;\n }\n\n std::cerr << \"-------------------------------------------------------------\\n\";\n std::cerr << \"files have been read. next: constructing dependency graph...\\n\";\n std::cerr << \"-------------------------------------------------------------\\n\";\n\n // -------------------------------------------------------------------------\n // construct dependency graph\n\n Graph g;\n for(const auto& [k, v] : files)\n {\n g.nodes[k] = Graph::Node{};\n }\n\n for(const auto& [fname, file] : files)\n {\n for(auto incl : file.includes)\n {\n auto incl_stem = incl.stem().string();\n if(incl_stem.ends_with(\"_fwd\"))\n {\n for(const char c : \"_fwd\"s) {incl_stem.pop_back(); (void)c;}\n\n // include original file instaed\n incl = incl.parent_path() / \"..\" / std::filesystem::path(incl_stem + \".hpp\");\n }\n else if(incl_stem.ends_with(\"_impl\"))\n {\n for(const char c : \"_impl\"s) {incl_stem.pop_back(); (void)c;}\n\n // include original file instaed\n incl = incl.parent_path() / \"..\" / std::filesystem::path(incl_stem + \".hpp\");\n }\n incl = std::filesystem::canonical(incl);\n\n // avoid self include loop\n if(fname != incl)\n {\n std::cerr << fname << \" includes \" << incl << std::endl;\n\n g.nodes.at(fname).includes.push_back(incl);\n g.nodes.at(incl) .included.push_back(fname);\n }\n }\n }\n\n std::cerr << \"-------------------------------------------------------------\\n\";\n std::cerr << \"graph has been constructed. flattening...\\n\";\n std::cerr << \"-------------------------------------------------------------\\n\";\n\n // -------------------------------------------------------------------------\n // flatten graph by topological sort\n\n // collect files that does not include\n std::vector sources;\n for(const auto& [fname, node] : g.nodes)\n {\n if(node.includes.empty())\n {\n sources.push_back(fname);\n }\n }\n assert( ! sources.empty());\n\n std::vector sorted;\n while( ! sources.empty())\n {\n const auto file = sources.back();\n sorted.push_back(sources.back());\n sources.pop_back();\n\n for(const auto& included : g.nodes.at(file).included)\n {\n auto found = std::find(g.nodes.at(included).includes.begin(),\n g.nodes.at(included).includes.end(), file);\n g.nodes.at(included).includes.erase(found);\n\n if(g.nodes.at(included).includes.empty())\n {\n sources.push_back(included);\n }\n }\n }\n\n std::cerr << \"-------------------------------------------------------------\\n\";\n std::cerr << \"flattened. outputting...\\n\";\n std::cerr << \"-------------------------------------------------------------\\n\";\n\n // -------------------------------------------------------------------------\n // output all the file in the sorted order\n\n for(const auto& fname : sorted)\n {\n std::cerr << \"expanding: \" << fname << std::endl;\n for(const auto& line : files.at(fname).content)\n {\n std::cout << line << '\\n';\n }\n }\n\n return 0;\n}\n"], ["/lsfg-vk/framegen/v3.1_src/shaders/alpha.cpp", "#include \n#include \n\n#include \"v3_1/shaders/alpha.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nAlpha::Alpha(Vulkan& vk, Core::Image inImg) : inImg(std::move(inImg)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"alpha[0]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"alpha[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"alpha[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"alpha[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"alpha[0]\"),\n vk.shaders.getPipeline(vk.device, \"alpha[1]\"),\n vk.shaders.getPipeline(vk.device, \"alpha[2]\"),\n vk.shaders.getPipeline(vk.device, \"alpha[3]\")\n }};\n this->sampler = vk.resources.getSampler(vk.device);\n for (size_t i = 0; i < 3; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(i));\n for (size_t i = 0; i < 3; i++)\n this->lastDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(3));\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImg.getExtent();\n const VkExtent2D halfExtent = {\n .width = (extent.width + 1) >> 1,\n .height = (extent.height + 1) >> 1\n };\n for (size_t i = 0; i < 2; i++) {\n this->tempImgs1.at(i) = Core::Image(vk.device, halfExtent);\n this->tempImgs2.at(i) = Core::Image(vk.device, halfExtent);\n }\n\n const VkExtent2D quarterExtent = {\n .width = (halfExtent.width + 1) >> 1,\n .height = (halfExtent.height + 1) >> 1\n };\n for (size_t i = 0; i < 4; i++) {\n this->tempImgs3.at(i) = Core::Image(vk.device, quarterExtent);\n for (size_t j = 0; j < 3; j++)\n this->outImgs.at(j).at(i) = Core::Image(vk.device, quarterExtent);\n }\n\n // hook up shaders\n this->descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n this->descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n this->descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs3)\n .build();\n for (size_t i = 0; i < 3; i++)\n this->lastDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs3)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs.at(i))\n .build();\n}\n\nvoid Alpha::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto halfExtent = this->tempImgs1.at(0).getExtent();\n uint32_t threadsX = (halfExtent.width + 7) >> 3;\n uint32_t threadsY = (halfExtent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImg)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n this->descriptorSets.at(0).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n this->descriptorSets.at(1).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third pass\n const auto quarterExtent = this->tempImgs3.at(0).getExtent();\n threadsX = (quarterExtent.width + 7) >> 3;\n threadsY = (quarterExtent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs3)\n .build();\n\n this->pipelines.at(2).bind(buf);\n this->descriptorSets.at(2).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs3)\n .addR2W(this->outImgs.at(frameCount % 3))\n .build();\n\n this->pipelines.at(3).bind(buf);\n this->lastDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/lsfg.cpp", "#include \n#include \n\n#include \"lsfg_3_1p.hpp\"\n#include \"v3_1p/context.hpp\"\n#include \"core/commandpool.hpp\"\n#include \"core/descriptorpool.hpp\"\n#include \"core/instance.hpp\"\n#include \"pool/shaderpool.hpp\"\n#include \"common/exception.hpp\"\n#include \"common/utils.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG_3_1P;\n\nnamespace {\n std::optional instance;\n std::optional device;\n std::unordered_map contexts;\n}\n\nvoid LSFG_3_1P::initialize(uint64_t deviceUUID,\n bool isHdr, float flowScale, uint64_t generationCount,\n const std::function(const std::string&)>& loader) {\n if (instance.has_value() || device.has_value())\n return;\n\n instance.emplace();\n device.emplace(Vulkan {\n .device{*instance, deviceUUID},\n .generationCount = generationCount,\n .flowScale = flowScale,\n .isHdr = isHdr\n });\n contexts = std::unordered_map();\n\n device->commandPool = Core::CommandPool(device->device);\n device->descriptorPool = Core::DescriptorPool(device->device);\n\n device->resources = Pool::ResourcePool(device->isHdr, device->flowScale);\n device->shaders = Pool::ShaderPool(loader);\n\n std::srand(static_cast(std::time(nullptr)));\n}\n\nint32_t LSFG_3_1P::createContext(\n int in0, int in1, const std::vector& outN,\n VkExtent2D extent, VkFormat format) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n const int32_t id = std::rand();\n contexts.emplace(id, Context(*device, in0, in1, outN, extent, format));\n return id;\n}\n\nvoid LSFG_3_1P::presentContext(int32_t id, int inSem, const std::vector& outSem) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n auto it = contexts.find(id);\n if (it == contexts.end())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Context not found\");\n\n it->second.present(*device, inSem, outSem);\n}\n\nvoid LSFG_3_1P::deleteContext(int32_t id) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n auto it = contexts.find(id);\n if (it == contexts.end())\n throw LSFG::vulkan_error(VK_ERROR_DEVICE_LOST, \"No such context\");\n\n vkDeviceWaitIdle(device->device.handle());\n contexts.erase(it);\n}\n\nvoid LSFG_3_1P::finalize() {\n if (!instance.has_value() || !device.has_value())\n return;\n\n vkDeviceWaitIdle(device->device.handle());\n contexts.clear();\n device.reset();\n instance.reset();\n}\n"], ["/lsfg-vk/framegen/v3.1_src/lsfg.cpp", "#include \n#include \n\n#include \"lsfg_3_1.hpp\"\n#include \"v3_1/context.hpp\"\n#include \"core/commandpool.hpp\"\n#include \"core/descriptorpool.hpp\"\n#include \"core/instance.hpp\"\n#include \"pool/shaderpool.hpp\"\n#include \"common/exception.hpp\"\n#include \"common/utils.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG_3_1;\n\nnamespace {\n std::optional instance;\n std::optional device;\n std::unordered_map contexts;\n}\n\nvoid LSFG_3_1::initialize(uint64_t deviceUUID,\n bool isHdr, float flowScale, uint64_t generationCount,\n const std::function(const std::string&)>& loader) {\n if (instance.has_value() || device.has_value())\n return;\n\n instance.emplace();\n device.emplace(Vulkan {\n .device{*instance, deviceUUID},\n .generationCount = generationCount,\n .flowScale = flowScale,\n .isHdr = isHdr\n });\n contexts = std::unordered_map();\n\n device->commandPool = Core::CommandPool(device->device);\n device->descriptorPool = Core::DescriptorPool(device->device);\n\n device->resources = Pool::ResourcePool(device->isHdr, device->flowScale);\n device->shaders = Pool::ShaderPool(loader);\n\n std::srand(static_cast(std::time(nullptr)));\n}\n\nint32_t LSFG_3_1::createContext(\n int in0, int in1, const std::vector& outN,\n VkExtent2D extent, VkFormat format) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n const int32_t id = std::rand();\n contexts.emplace(id, Context(*device, in0, in1, outN, extent, format));\n return id;\n}\n\nvoid LSFG_3_1::presentContext(int32_t id, int inSem, const std::vector& outSem) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n auto it = contexts.find(id);\n if (it == contexts.end())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Context not found\");\n\n it->second.present(*device, inSem, outSem);\n}\n\nvoid LSFG_3_1::deleteContext(int32_t id) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n auto it = contexts.find(id);\n if (it == contexts.end())\n throw LSFG::vulkan_error(VK_ERROR_DEVICE_LOST, \"No such context\");\n\n vkDeviceWaitIdle(device->device.handle());\n contexts.erase(it);\n}\n\nvoid LSFG_3_1::finalize() {\n if (!instance.has_value() || !device.has_value())\n return;\n\n vkDeviceWaitIdle(device->device.handle());\n contexts.clear();\n device.reset();\n instance.reset();\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/shaders/alpha.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/alpha.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nAlpha::Alpha(Vulkan& vk, Core::Image inImg) : inImg(std::move(inImg)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"p_alpha[0]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_alpha[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_alpha[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_alpha[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"p_alpha[0]\"),\n vk.shaders.getPipeline(vk.device, \"p_alpha[1]\"),\n vk.shaders.getPipeline(vk.device, \"p_alpha[2]\"),\n vk.shaders.getPipeline(vk.device, \"p_alpha[3]\")\n }};\n this->sampler = vk.resources.getSampler(vk.device);\n for (size_t i = 0; i < 3; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(i));\n for (size_t i = 0; i < 3; i++)\n this->lastDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(3));\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImg.getExtent();\n const VkExtent2D halfExtent = {\n .width = (extent.width + 1) >> 1,\n .height = (extent.height + 1) >> 1\n };\n this->tempImg1 = Core::Image(vk.device, halfExtent);\n this->tempImg2 = Core::Image(vk.device, halfExtent);\n\n const VkExtent2D quarterExtent = {\n .width = (halfExtent.width + 1) >> 1,\n .height = (halfExtent.height + 1) >> 1\n };\n for (size_t i = 0; i < 2; i++) {\n this->tempImgs3.at(i) = Core::Image(vk.device, quarterExtent);\n for (size_t j = 0; j < 3; j++)\n this->outImgs.at(j).at(i) = Core::Image(vk.device, quarterExtent);\n }\n\n // hook up shaders\n this->descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImg1)\n .build();\n this->descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImg1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImg2)\n .build();\n this->descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs3)\n .build();\n for (size_t i = 0; i < 3; i++)\n this->lastDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs3)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs.at(i))\n .build();\n}\n\nvoid Alpha::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto halfExtent = this->tempImg1.getExtent();\n uint32_t threadsX = (halfExtent.width + 7) >> 3;\n uint32_t threadsY = (halfExtent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImg)\n .addR2W(this->tempImg1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n this->descriptorSets.at(0).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImg1)\n .addR2W(this->tempImg2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n this->descriptorSets.at(1).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third pass\n const auto quarterExtent = this->tempImgs3.at(0).getExtent();\n threadsX = (quarterExtent.width + 7) >> 3;\n threadsY = (quarterExtent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImg2)\n .addR2W(this->tempImgs3)\n .build();\n\n this->pipelines.at(2).bind(buf);\n this->descriptorSets.at(2).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs3)\n .addR2W(this->outImgs.at(frameCount % 3))\n .build();\n\n this->pipelines.at(3).bind(buf);\n this->lastDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/shaders/beta.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/beta.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nBeta::Beta(Vulkan& vk, std::array, 3> inImgs)\n : inImgs(std::move(inImgs)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"p_beta[0]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 6, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_beta[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_beta[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_beta[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_beta[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 6, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"p_beta[0]\"),\n vk.shaders.getPipeline(vk.device, \"p_beta[1]\"),\n vk.shaders.getPipeline(vk.device, \"p_beta[2]\"),\n vk.shaders.getPipeline(vk.device, \"p_beta[3]\"),\n vk.shaders.getPipeline(vk.device, \"p_beta[4]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n for (size_t i = 0; i < 3; i++)\n this->firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(0));\n for (size_t i = 0; i < 4; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(i + 1));\n this->buffer = vk.resources.getBuffer(vk.device, 0.5F);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs.at(0).at(0).getExtent();\n for (size_t i = 0; i < 2; i++) {\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n }\n\n for (size_t i = 0; i < 6; i++)\n this->outImgs.at(i) = Core::Image(vk.device,\n { extent.width >> i, extent.height >> i },\n VK_FORMAT_R8_UNORM);\n\n // hook up shaders\n for (size_t i = 0; i < 3; i++) {\n this->firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at((i + 1) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n }\n this->descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n this->descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n this->descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n this->descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, this->buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs)\n .build();\n}\n\nvoid Beta::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto extent = this->tempImgs1.at(0).getExtent();\n uint32_t threadsX = (extent.width + 7) >> 3;\n uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs.at(0))\n .addW2R(this->inImgs.at(1))\n .addW2R(this->inImgs.at(2))\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n this->firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n this->descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(2).bind(buf);\n this->descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n this->descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth pass\n threadsX = (extent.width + 31) >> 5;\n threadsY = (extent.height + 31) >> 5;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->outImgs)\n .build();\n\n this->pipelines.at(4).bind(buf);\n this->descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1_src/shaders/beta.cpp", "#include \n#include \n\n#include \"v3_1/shaders/beta.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nBeta::Beta(Vulkan& vk, std::array, 3> inImgs)\n : inImgs(std::move(inImgs)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"beta[0]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 12, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"beta[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"beta[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"beta[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"beta[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 6, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"beta[0]\"),\n vk.shaders.getPipeline(vk.device, \"beta[1]\"),\n vk.shaders.getPipeline(vk.device, \"beta[2]\"),\n vk.shaders.getPipeline(vk.device, \"beta[3]\"),\n vk.shaders.getPipeline(vk.device, \"beta[4]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n for (size_t i = 0; i < 3; i++)\n this->firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(0));\n for (size_t i = 0; i < 4; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(i + 1));\n this->buffer = vk.resources.getBuffer(vk.device, 0.5F);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs.at(0).at(0).getExtent();\n for (size_t i = 0; i < 2; i++) {\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n }\n\n for (size_t i = 0; i < 6; i++)\n this->outImgs.at(i) = Core::Image(vk.device,\n { extent.width >> i, extent.height >> i },\n VK_FORMAT_R8_UNORM);\n\n // hook up shaders\n for (size_t i = 0; i < 3; i++) {\n this->firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at((i + 1) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n }\n this->descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n this->descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n this->descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n this->descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, this->buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs)\n .build();\n}\n\nvoid Beta::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto extent = this->tempImgs1.at(0).getExtent();\n uint32_t threadsX = (extent.width + 7) >> 3;\n uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs.at(0))\n .addW2R(this->inImgs.at(1))\n .addW2R(this->inImgs.at(2))\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n this->firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n this->descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(2).bind(buf);\n this->descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n this->descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth pass\n threadsX = (extent.width + 31) >> 5;\n threadsY = (extent.height + 31) >> 5;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->outImgs)\n .build();\n\n this->pipelines.at(4).bind(buf);\n this->descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/src/pool/resourcepool.cpp", "#include \"pool/resourcepool.hpp\"\n#include \"core/buffer.hpp\"\n#include \"core/device.hpp\"\n#include \"core/sampler.hpp\"\n\n#include \n\n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG::Pool;\n\nstruct ConstantBuffer {\n std::array inputOffset;\n uint32_t firstIter;\n uint32_t firstIterS;\n uint32_t advancedColorKind;\n uint32_t hdrSupport;\n float resolutionInvScale;\n float timestamp;\n float uiThreshold;\n std::array pad;\n};\n\nCore::Buffer ResourcePool::getBuffer(\n const Core::Device& device,\n float timestamp, bool firstIter, bool firstIterS) {\n uint64_t hash = 0;\n const union { float f; uint32_t i; } u{\n .f = timestamp };\n hash |= u.i;\n hash |= static_cast(firstIter) << 32;\n hash |= static_cast(firstIterS) << 33;\n\n auto it = buffers.find(hash);\n if (it != buffers.end())\n return it->second;\n\n // create the buffer\n const ConstantBuffer data{\n .inputOffset = { 0, 0 },\n .advancedColorKind = this->isHdr ? 2U : 0U,\n .hdrSupport = this->isHdr,\n .resolutionInvScale = this->flowScale,\n .timestamp = timestamp,\n .uiThreshold = 0.5F,\n };\n Core::Buffer buffer(device, data, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);\n buffers[hash] = buffer;\n return buffer;\n}\n\nCore::Sampler ResourcePool::getSampler(\n const Core::Device& device,\n VkSamplerAddressMode type,\n VkCompareOp compare,\n bool isWhite) {\n uint64_t hash = 0;\n hash |= static_cast(type) << 0;\n hash |= static_cast(compare) << 8;\n hash |= static_cast(isWhite) << 16;\n\n auto it = samplers.find(hash);\n if (it != samplers.end())\n return it->second;\n\n // create the sampler\n Core::Sampler sampler(device, type, compare, isWhite);\n samplers[hash] = sampler;\n return sampler;\n}\n"], ["/lsfg-vk/src/mini/commandpool.cpp", "#include \"mini/commandpool.hpp\"\n#include \"common/exception.hpp\"\n#include \"layer.hpp\"\n\n#include \n\n#include \n#include \n\nusing namespace Mini;\n\nCommandPool::CommandPool(VkDevice device, uint32_t graphicsFamilyIdx) {\n // create command pool\n const VkCommandPoolCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,\n .queueFamilyIndex = graphicsFamilyIdx\n };\n VkCommandPool commandPoolHandle{};\n auto res = Layer::ovkCreateCommandPool(device, &desc, nullptr, &commandPoolHandle);\n if (res != VK_SUCCESS || commandPoolHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create command pool\");\n\n // store command pool in shared ptr\n this->commandPool = std::shared_ptr(\n new VkCommandPool(commandPoolHandle),\n [dev = device](VkCommandPool* commandPoolHandle) {\n Layer::ovkDestroyCommandPool(dev, *commandPoolHandle, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/spirv/spirv_module.h", "class generates {\n public:\n explicit SpirvModule(uint32_t version) {\n this->instImportGlsl450();\n }\n ~SpirvModule() {\n \n }\n SpirvCodeBuffer compile() {\n SpirvCodeBuffer result;\n result.putHeader(m_version, m_id);\n result.append(m_capabilities);\n result.append(m_extensions);\n result.append(m_instExt);\n result.append(m_memoryModel);\n result.append(m_entryPoints);\n result.append(m_execModeInfo);\n result.append(m_debugNames);\n result.append(m_annotations);\n result.append(m_typeConstDefs);\n result.append(m_variables);\n\n // Perform some crude dead code elimination. In some cases, our compilers\n // may emit invalid code, such as an unreachable block branching to a loop's\n // continue block, but those cases cannot be reasonably detected up-front.\n std::unordered_set reachableBlocks;\n std::unordered_set mergeBlocks;\n\n classifyBlocks(reachableBlocks, mergeBlocks);\n\n bool reachable = true;\n\n for (auto ins : m_code) {\n if (ins.opCode() == spv::OpFunctionEnd) {\n reachable = true;\n result.append(ins);\n } else if (ins.opCode() == spv::OpLabel) {\n uint32_t labelId = ins.arg(1);\n\n if ((reachable = reachableBlocks.find(labelId) != reachableBlocks.end())) {\n result.append(ins);\n } else if (mergeBlocks.find(labelId) != mergeBlocks.end()) {\n result.append(ins);\n result.putIns(spv::OpUnreachable, 1);\n }\n } else if (reachable) {\n result.append(ins);\n }\n }\n\n return result;\n }\n uint32_t allocateId() {\n return m_id++;\n }\n bool hasCapability(\n spv::Capability capability) {\n for (auto ins : m_capabilities) {\n if (ins.opCode() == spv::OpCapability && ins.arg(1) == capability)\n return true;\n }\n\n return false;\n }\n void enableCapability(\n spv::Capability capability) {\n // Scan the generated instructions to check\n // whether we already enabled the capability.\n if (!hasCapability(capability)) {\n m_capabilities.putIns (spv::OpCapability, 2);\n m_capabilities.putWord(capability);\n }\n }\n void enableExtension(\n const char* extensionName) {\n m_extensions.putIns (spv::OpExtension, 1 + m_extensions.strLen(extensionName));\n m_extensions.putStr (extensionName);\n }\n void addEntryPoint(\n uint32_t entryPointId,\n spv::ExecutionModel executionModel,\n const char* name) {\n m_entryPoints.putIns (spv::OpEntryPoint, 3 + m_entryPoints.strLen(name) + m_interfaceVars.size());\n m_entryPoints.putWord (executionModel);\n m_entryPoints.putWord (entryPointId);\n m_entryPoints.putStr (name);\n \n for (uint32_t varId : m_interfaceVars)\n m_entryPoints.putWord(varId);\n }\n void setMemoryModel(\n spv::AddressingModel addressModel,\n spv::MemoryModel memoryModel) {\n m_memoryModel.putIns (spv::OpMemoryModel, 3);\n m_memoryModel.putWord (addressModel);\n m_memoryModel.putWord (memoryModel);\n }\n void setExecutionMode(\n uint32_t entryPointId,\n spv::ExecutionMode executionMode) {\n m_execModeInfo.putIns (spv::OpExecutionMode, 3 + argCount);\n m_execModeInfo.putWord(entryPointId);\n m_execModeInfo.putWord(executionMode);\n\n for (uint32_t i = 0; i < argCount; i++)\n m_execModeInfo.putWord(args[i]);\n }\n void setExecutionMode(\n uint32_t entryPointId,\n spv::ExecutionMode executionMode,\n uint32_t argCount,\n const uint32_t* args) {\n m_execModeInfo.putIns (spv::OpExecutionMode, 3 + argCount);\n m_execModeInfo.putWord(entryPointId);\n m_execModeInfo.putWord(executionMode);\n\n for (uint32_t i = 0; i < argCount; i++)\n m_execModeInfo.putWord(args[i]);\n }\n void setInvocations(\n uint32_t entryPointId,\n uint32_t invocations) {\n m_execModeInfo.putIns (spv::OpExecutionMode, 4);\n m_execModeInfo.putWord (entryPointId);\n m_execModeInfo.putWord (spv::ExecutionModeInvocations);\n m_execModeInfo.putInt32(invocations);\n }\n void setLocalSize(\n uint32_t entryPointId,\n uint32_t x,\n uint32_t y,\n uint32_t z) {\n m_execModeInfo.putIns (spv::OpExecutionMode, 6);\n m_execModeInfo.putWord (entryPointId);\n m_execModeInfo.putWord (spv::ExecutionModeLocalSize);\n m_execModeInfo.putInt32(x);\n m_execModeInfo.putInt32(y);\n m_execModeInfo.putInt32(z);\n }\n void setOutputVertices(\n uint32_t entryPointId,\n uint32_t vertexCount) {\n m_execModeInfo.putIns (spv::OpExecutionMode, 4);\n m_execModeInfo.putWord(entryPointId);\n m_execModeInfo.putWord(spv::ExecutionModeOutputVertices);\n m_execModeInfo.putWord(vertexCount);\n }\n uint32_t addDebugString(\n const char* string) {\n uint32_t resultId = this->allocateId();\n \n m_debugNames.putIns (spv::OpString,\n 2 + m_debugNames.strLen(string));\n m_debugNames.putWord(resultId);\n m_debugNames.putStr (string);\n return resultId;\n }\n void setDebugSource(\n spv::SourceLanguage language,\n uint32_t version,\n uint32_t file,\n const char* source) {\n uint32_t strLen = source != nullptr\n ? m_debugNames.strLen(source) : 0;\n \n m_debugNames.putIns (spv::OpSource, 4 + strLen);\n m_debugNames.putWord(language);\n m_debugNames.putWord(version);\n m_debugNames.putWord(file);\n \n if (source != nullptr)\n m_debugNames.putStr(source);\n }\n void setDebugName(\n uint32_t expressionId,\n const char* debugName) {\n m_debugNames.putIns (spv::OpName, 2 + m_debugNames.strLen(debugName));\n m_debugNames.putWord(expressionId);\n m_debugNames.putStr (debugName);\n }\n void setDebugMemberName(\n uint32_t structId,\n uint32_t memberId,\n const char* debugName) {\n m_debugNames.putIns (spv::OpMemberName, 3 + m_debugNames.strLen(debugName));\n m_debugNames.putWord(structId);\n m_debugNames.putWord(memberId);\n m_debugNames.putStr (debugName);\n }\n uint32_t constBool(\n bool v) {\n return this->defConst(v\n ? spv::OpConstantTrue\n : spv::OpConstantFalse,\n this->defBoolType(),\n 0, nullptr);\n }\n uint32_t consti32(\n int32_t v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defIntType(32, 1),\n data.size(),\n data.data());\n }\n uint32_t consti64(\n int64_t v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defIntType(64, 1),\n data.size(),\n data.data());\n }\n uint32_t constu32(\n uint32_t v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defIntType(32, 0),\n data.size(),\n data.data());\n }\n uint32_t constu64(\n uint64_t v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defIntType(64, 0),\n data.size(),\n data.data());\n }\n uint32_t constf32(\n float v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defFloatType(32),\n data.size(),\n data.data());\n }\n uint32_t constf64(\n double v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defFloatType(64),\n data.size(),\n data.data());\n }\n uint32_t constvec4i32(\n int32_t x,\n int32_t y,\n int32_t z,\n int32_t w) {\n std::array args = {{\n this->consti32(x), this->consti32(y),\n this->consti32(z), this->consti32(w),\n }};\n \n uint32_t scalarTypeId = this->defIntType(32, 1);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 4);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constvec4b32(\n bool x,\n bool y,\n bool z,\n bool w) {\n std::array args = {{\n this->constBool(x), this->constBool(y),\n this->constBool(z), this->constBool(w),\n }};\n \n uint32_t scalarTypeId = this->defBoolType();\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 4);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constvec4u32(\n uint32_t x,\n uint32_t y,\n uint32_t z,\n uint32_t w) {\n std::array args = {{\n this->constu32(x), this->constu32(y),\n this->constu32(z), this->constu32(w),\n }};\n \n uint32_t scalarTypeId = this->defIntType(32, 0);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 4);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constvec2f32(\n float x,\n float y) {\n std::array args = {{\n this->constf32(x), this->constf32(y),\n }};\n \n uint32_t scalarTypeId = this->defFloatType(32);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 2);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constvec3f32(\n float x,\n float y,\n float z) {\n std::array args = {{\n this->constf32(x), this->constf32(y),\n this->constf32(z),\n }};\n \n uint32_t scalarTypeId = this->defFloatType(32);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 3);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constvec4f32(\n float x,\n float y,\n float z,\n float w) {\n std::array args = {{\n this->constf32(x), this->constf32(y),\n this->constf32(z), this->constf32(w),\n }};\n \n uint32_t scalarTypeId = this->defFloatType(32);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 4);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constfReplicant(\n float replicant,\n uint32_t count) {\n uint32_t value = this->constf32(replicant);\n\n std::array args = { value, value, value, value };\n\n // Can't make a scalar composite.\n if (count == 1)\n return args[0];\n \n uint32_t scalarTypeId = this->defFloatType(32);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, count);\n \n return this->constComposite(vectorTypeId, count, args.data());\n }\n uint32_t constbReplicant(\n bool replicant,\n uint32_t count) {\n uint32_t value = this->constBool(replicant);\n\n std::array args = { value, value, value, value };\n\n // Can't make a scalar composite.\n if (count == 1)\n return args[0];\n \n uint32_t scalarTypeId = this->defBoolType();\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, count);\n \n return this->constComposite(vectorTypeId, count, args.data());\n }\n uint32_t constiReplicant(\n int32_t replicant,\n uint32_t count) {\n uint32_t value = this->consti32(replicant);\n\n std::array args = { value, value, value, value };\n\n // Can't make a scalar composite.\n if (count == 1)\n return args[0];\n \n uint32_t scalarTypeId = this->defIntType(32, 1);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, count);\n \n return this->constComposite(vectorTypeId, count, args.data());\n }\n uint32_t constuReplicant(\n int32_t replicant,\n uint32_t count) {\n uint32_t value = this->constu32(replicant);\n\n std::array args = { value, value, value, value };\n\n // Can't make a scalar composite.\n if (count == 1)\n return args[0];\n \n uint32_t scalarTypeId = this->defIntType(32, 0);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, count);\n \n return this->constComposite(vectorTypeId, count, args.data());\n }\n uint32_t constComposite(\n uint32_t typeId,\n uint32_t constCount,\n const uint32_t* constIds) {\n return this->defConst(\n spv::OpConstantComposite,\n typeId, constCount, constIds);\n }\n uint32_t constUndef(\n uint32_t typeId) {\n return this->defConst(spv::OpUndef,\n typeId, 0, nullptr);\n }\n uint32_t constNull(\n uint32_t typeId) {\n return this->defConst(spv::OpConstantNull,\n typeId, 0, nullptr);\n }\n uint32_t lateConst32(\n uint32_t typeId) {\n uint32_t resultId = this->allocateId();\n m_lateConsts.insert(resultId);\n\n m_typeConstDefs.putIns (spv::OpConstant, 4);\n m_typeConstDefs.putWord(typeId);\n m_typeConstDefs.putWord(resultId);\n m_typeConstDefs.putWord(0);\n return resultId;\n }\n void setLateConst(\n uint32_t constId,\n const uint32_t* argIds) {\n for (auto ins : m_typeConstDefs) {\n if (ins.opCode() != spv::OpConstant\n && ins.opCode() != spv::OpConstantComposite)\n continue;\n \n if (ins.arg(2) != constId)\n continue;\n\n for (uint32_t i = 3; i < ins.length(); i++)\n ins.setArg(i, argIds[i - 3]);\n\n return;\n }\n }\n uint32_t specConstBool(\n bool v) {\n uint32_t typeId = this->defBoolType();\n uint32_t resultId = this->allocateId();\n \n const spv::Op op = v\n ? spv::OpSpecConstantTrue\n : spv::OpSpecConstantFalse;\n \n m_typeConstDefs.putIns (op, 3);\n m_typeConstDefs.putWord (typeId);\n m_typeConstDefs.putWord (resultId);\n return resultId;\n }\n uint32_t specConst32(\n uint32_t typeId,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_typeConstDefs.putIns (spv::OpSpecConstant, 4);\n m_typeConstDefs.putWord (typeId);\n m_typeConstDefs.putWord (resultId);\n m_typeConstDefs.putWord (value);\n return resultId;\n }\n void decorate(\n uint32_t object,\n spv::Decoration decoration) {\n m_annotations.putIns (spv::OpDecorate, 3);\n m_annotations.putWord (object);\n m_annotations.putWord (decoration);\n }\n void decorateArrayStride(\n uint32_t object,\n uint32_t stride) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationArrayStride);\n m_annotations.putInt32(stride);\n }\n void decorateBinding(\n uint32_t object,\n uint32_t binding) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationBinding);\n m_annotations.putInt32(binding);\n }\n void decorateBlock(\n uint32_t object) {\n m_annotations.putIns (spv::OpDecorate, 3);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationBlock);\n }\n void decorateBuiltIn(\n uint32_t object,\n spv::BuiltIn builtIn) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationBuiltIn);\n m_annotations.putWord (builtIn);\n }\n void decorateComponent(\n uint32_t object,\n uint32_t location) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationComponent);\n m_annotations.putInt32(location);\n }\n void decorateDescriptorSet(\n uint32_t object,\n uint32_t set) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationDescriptorSet);\n m_annotations.putInt32(set);\n }\n void decorateIndex(\n uint32_t object,\n uint32_t index) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationIndex);\n m_annotations.putInt32(index);\n }\n void decorateLocation(\n uint32_t object,\n uint32_t location) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationLocation);\n m_annotations.putInt32(location);\n }\n void decorateSpecId(\n uint32_t object,\n uint32_t specId) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationSpecId);\n m_annotations.putInt32(specId);\n }\n void decorateXfb(\n uint32_t object,\n uint32_t streamId,\n uint32_t bufferId,\n uint32_t offset,\n uint32_t stride) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationStream);\n m_annotations.putInt32(streamId);\n\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationXfbBuffer);\n m_annotations.putInt32(bufferId);\n\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationXfbStride);\n m_annotations.putInt32(stride);\n\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationOffset);\n m_annotations.putInt32(offset);\n }\n void memberDecorateBuiltIn(\n uint32_t structId,\n uint32_t memberId,\n spv::BuiltIn builtIn) {\n m_annotations.putIns (spv::OpMemberDecorate, 5);\n m_annotations.putWord (structId);\n m_annotations.putWord (memberId);\n m_annotations.putWord (spv::DecorationBuiltIn);\n m_annotations.putWord (builtIn);\n }\n void memberDecorate(\n uint32_t structId,\n uint32_t memberId,\n spv::Decoration decoration) {\n m_annotations.putIns (spv::OpMemberDecorate, 4);\n m_annotations.putWord (structId);\n m_annotations.putWord (memberId);\n m_annotations.putWord (decoration);\n }\n void memberDecorateMatrixStride(\n uint32_t structId,\n uint32_t memberId,\n uint32_t stride) {\n m_annotations.putIns (spv::OpMemberDecorate, 5);\n m_annotations.putWord (structId);\n m_annotations.putWord (memberId);\n m_annotations.putWord (spv::DecorationMatrixStride);\n m_annotations.putWord (stride);\n }\n void memberDecorateOffset(\n uint32_t structId,\n uint32_t memberId,\n uint32_t offset) {\n m_annotations.putIns (spv::OpMemberDecorate, 5);\n m_annotations.putWord (structId);\n m_annotations.putWord (memberId);\n m_annotations.putWord (spv::DecorationOffset);\n m_annotations.putWord (offset);\n }\n uint32_t defVoidType() {\n return this->defType(spv::OpTypeVoid, 0, nullptr);\n }\n uint32_t defBoolType() {\n return this->defType(spv::OpTypeBool, 0, nullptr);\n }\n uint32_t defIntType(\n uint32_t width,\n uint32_t isSigned) {\n std::array args = {{ width, isSigned }};\n return this->defType(spv::OpTypeInt,\n args.size(), args.data());\n }\n uint32_t defFloatType(\n uint32_t width) {\n std::array args = {{ width }};\n return this->defType(spv::OpTypeFloat,\n args.size(), args.data());\n }\n uint32_t defVectorType(\n uint32_t elementType,\n uint32_t elementCount) {\n std::array args =\n {{ elementType, elementCount }};\n \n return this->defType(spv::OpTypeVector,\n args.size(), args.data());\n }\n uint32_t defMatrixType(\n uint32_t columnType,\n uint32_t columnCount) {\n std::array args =\n {{ columnType, columnCount }};\n \n return this->defType(spv::OpTypeMatrix,\n args.size(), args.data());\n }\n uint32_t defArrayType(\n uint32_t typeId,\n uint32_t length) {\n std::array args = {{ typeId, length }};\n \n return this->defType(spv::OpTypeArray,\n args.size(), args.data());\n }\n uint32_t defArrayTypeUnique(\n uint32_t typeId,\n uint32_t length) {\n uint32_t resultId = this->allocateId();\n \n m_typeConstDefs.putIns (spv::OpTypeArray, 4);\n m_typeConstDefs.putWord(resultId);\n m_typeConstDefs.putWord(typeId);\n m_typeConstDefs.putWord(length);\n return resultId;\n }\n uint32_t defRuntimeArrayType(\n uint32_t typeId) {\n std::array args = { typeId };\n \n return this->defType(spv::OpTypeRuntimeArray,\n args.size(), args.data());\n }\n uint32_t defRuntimeArrayTypeUnique(\n uint32_t typeId) {\n uint32_t resultId = this->allocateId();\n \n m_typeConstDefs.putIns (spv::OpTypeRuntimeArray, 3);\n m_typeConstDefs.putWord(resultId);\n m_typeConstDefs.putWord(typeId);\n return resultId;\n }\n uint32_t defFunctionType(\n uint32_t returnType,\n uint32_t argCount,\n const uint32_t* argTypes) {\n std::vector args;\n args.push_back(returnType);\n \n for (uint32_t i = 0; i < argCount; i++)\n args.push_back(argTypes[i]);\n \n return this->defType(spv::OpTypeFunction,\n args.size(), args.data());\n }\n uint32_t defStructType(\n uint32_t memberCount,\n const uint32_t* memberTypes) {\n return this->defType(spv::OpTypeStruct,\n memberCount, memberTypes);\n }\n uint32_t defStructTypeUnique(\n uint32_t memberCount,\n const uint32_t* memberTypes) {\n uint32_t resultId = this->allocateId();\n \n m_typeConstDefs.putIns (spv::OpTypeStruct, 2 + memberCount);\n m_typeConstDefs.putWord(resultId);\n \n for (uint32_t i = 0; i < memberCount; i++)\n m_typeConstDefs.putWord(memberTypes[i]);\n return resultId;\n }\n uint32_t defPointerType(\n uint32_t variableType,\n spv::StorageClass storageClass) {\n std::array args = {{\n static_cast(storageClass),\n variableType,\n }};\n \n return this->defType(spv::OpTypePointer,\n args.size(), args.data());\n }\n uint32_t defSamplerType() {\n return this->defType(spv::OpTypeSampler, 0, nullptr);\n }\n uint32_t defImageType(\n uint32_t sampledType,\n spv::Dim dimensionality,\n uint32_t depth,\n uint32_t arrayed,\n uint32_t multisample,\n uint32_t sampled,\n spv::ImageFormat format) {\n std::array args = {{\n sampledType,\n static_cast(dimensionality),\n depth, arrayed,\n multisample,\n sampled,\n static_cast(format)\n }};\n \n return this->defType(spv::OpTypeImage,\n args.size(), args.data());\n }\n uint32_t defSampledImageType(\n uint32_t imageType) {\n return this->defType(spv::OpTypeSampledImage, 1, &imageType);\n }\n uint32_t newVar(\n uint32_t pointerType,\n spv::StorageClass storageClass) {\n uint32_t resultId = this->allocateId();\n \n if (isInterfaceVar(storageClass))\n m_interfaceVars.push_back(resultId);\n\n auto& code = storageClass != spv::StorageClassFunction\n ? m_variables : m_code;\n\n code.putIns (spv::OpVariable, 4);\n code.putWord (pointerType);\n code.putWord (resultId);\n code.putWord (storageClass);\n return resultId;\n }\n uint32_t newVarInit(\n uint32_t pointerType,\n spv::StorageClass storageClass,\n uint32_t initialValue) {\n uint32_t resultId = this->allocateId();\n \n if (isInterfaceVar(storageClass))\n m_interfaceVars.push_back(resultId);\n\n auto& code = storageClass != spv::StorageClassFunction\n ? m_variables : m_code;\n \n code.putIns (spv::OpVariable, 5);\n code.putWord (pointerType);\n code.putWord (resultId);\n code.putWord (storageClass);\n code.putWord (initialValue);\n return resultId;\n }\n void functionBegin(\n uint32_t returnType,\n uint32_t functionId,\n uint32_t functionType,\n spv::FunctionControlMask functionControl) {\n m_code.putIns (spv::OpFunction, 5);\n m_code.putWord(returnType);\n m_code.putWord(functionId);\n m_code.putWord(functionControl);\n m_code.putWord(functionType);\n }\n uint32_t functionParameter(\n uint32_t parameterType) {\n uint32_t parameterId = this->allocateId();\n \n m_code.putIns (spv::OpFunctionParameter, 3);\n m_code.putWord(parameterType);\n m_code.putWord(parameterId);\n return parameterId;\n }\n void functionEnd() {\n m_code.putIns (spv::OpFunctionEnd, 1);\n }\n uint32_t opAccessChain(\n uint32_t resultType,\n uint32_t composite,\n uint32_t indexCount,\n const uint32_t* indexArray) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAccessChain, 4 + indexCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(composite);\n \n for (uint32_t i = 0; i < indexCount; i++)\n m_code.putInt32(indexArray[i]);\n return resultId;\n }\n uint32_t opArrayLength(\n uint32_t resultType,\n uint32_t structure,\n uint32_t memberId) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpArrayLength, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(structure);\n m_code.putWord(memberId);\n return resultId;\n }\n uint32_t opAny(\n uint32_t resultType,\n uint32_t vector) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAny, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector);\n return resultId;\n }\n uint32_t opAll(\n uint32_t resultType,\n uint32_t vector) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAll, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector);\n return resultId;\n }\n uint32_t opAtomicLoad(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicLoad, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n return resultId;\n }\n void opAtomicStore(\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n m_code.putIns (spv::OpAtomicStore, 5);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n }\n uint32_t opAtomicExchange(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicExchange, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicCompareExchange(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t equal,\n uint32_t unequal,\n uint32_t value,\n uint32_t comparator) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicCompareExchange, 9);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(equal);\n m_code.putWord(unequal);\n m_code.putWord(value);\n m_code.putWord(comparator);\n return resultId;\n }\n uint32_t opAtomicIIncrement(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicIIncrement, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n return resultId;\n }\n uint32_t opAtomicIDecrement(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicIDecrement, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n return resultId;\n }\n uint32_t opAtomicIAdd(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicIAdd, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicISub(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicISub, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicSMin(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicSMin, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicSMax(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicSMax, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicUMin(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicUMin, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicUMax(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicUMax, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicAnd(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicAnd, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicOr(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicOr, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicXor(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicXor, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opBitcast(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitcast, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opBitCount(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitCount, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opBitReverse(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitReverse, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFindILsb(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FindILsb);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFindUMsb(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FindUMsb);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFindSMsb(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FindSMsb);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opBitFieldInsert(\n uint32_t resultType,\n uint32_t base,\n uint32_t insert,\n uint32_t offset,\n uint32_t count) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitFieldInsert, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(insert);\n m_code.putWord(offset);\n m_code.putWord(count);\n return resultId;\n }\n uint32_t opBitFieldSExtract(\n uint32_t resultType,\n uint32_t base,\n uint32_t offset,\n uint32_t count) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitFieldSExtract, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(offset);\n m_code.putWord(count);\n return resultId;\n }\n uint32_t opBitFieldUExtract(\n uint32_t resultType,\n uint32_t base,\n uint32_t offset,\n uint32_t count) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitFieldUExtract, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(offset);\n m_code.putWord(count);\n return resultId;\n }\n uint32_t opBitwiseAnd(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitwiseAnd, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opBitwiseOr(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitwiseOr, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opBitwiseXor(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitwiseXor, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opNot(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpNot, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opShiftLeftLogical(\n uint32_t resultType,\n uint32_t base,\n uint32_t shift) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpShiftLeftLogical, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(shift);\n return resultId;\n }\n uint32_t opShiftRightArithmetic(\n uint32_t resultType,\n uint32_t base,\n uint32_t shift) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpShiftRightArithmetic, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(shift);\n return resultId;\n }\n uint32_t opShiftRightLogical(\n uint32_t resultType,\n uint32_t base,\n uint32_t shift) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpShiftRightLogical, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(shift);\n return resultId;\n }\n uint32_t opConvertFtoS(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpConvertFToS, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opConvertFtoU(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpConvertFToU, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opConvertStoF(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpConvertSToF, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opConvertUtoF(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpConvertUToF, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opCompositeConstruct(\n uint32_t resultType,\n uint32_t valueCount,\n const uint32_t* valueArray) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpCompositeConstruct, 3 + valueCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n \n for (uint32_t i = 0; i < valueCount; i++)\n m_code.putWord(valueArray[i]);\n return resultId;\n }\n uint32_t opCompositeExtract(\n uint32_t resultType,\n uint32_t composite,\n uint32_t indexCount,\n const uint32_t* indexArray) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpCompositeExtract, 4 + indexCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(composite);\n \n for (uint32_t i = 0; i < indexCount; i++)\n m_code.putInt32(indexArray[i]);\n return resultId;\n }\n uint32_t opCompositeInsert(\n uint32_t resultType,\n uint32_t object,\n uint32_t composite,\n uint32_t indexCount,\n const uint32_t* indexArray) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpCompositeInsert, 5 + indexCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(object);\n m_code.putWord(composite);\n \n for (uint32_t i = 0; i < indexCount; i++)\n m_code.putInt32(indexArray[i]);\n return resultId;\n }\n uint32_t opDpdx(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdx, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDpdy(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdy, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDpdxCoarse(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdxCoarse, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDpdyCoarse(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdyCoarse, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDpdxFine(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdxFine, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDpdyFine(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdyFine, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opVectorExtractDynamic(\n uint32_t resultType,\n uint32_t vector,\n uint32_t index) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpVectorExtractDynamic, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector);\n m_code.putWord(index);\n return resultId;\n }\n uint32_t opVectorShuffle(\n uint32_t resultType,\n uint32_t vectorLeft,\n uint32_t vectorRight,\n uint32_t indexCount,\n const uint32_t* indexArray) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpVectorShuffle, 5 + indexCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vectorLeft);\n m_code.putWord(vectorRight);\n \n for (uint32_t i = 0; i < indexCount; i++)\n m_code.putInt32(indexArray[i]);\n return resultId;\n }\n uint32_t opSNegate(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSNegate, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFNegate(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFNegate, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opSAbs(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450SAbs);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFAbs(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FAbs);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFSign(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FSign);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFMix(\n uint32_t resultType,\n uint32_t x,\n uint32_t y,\n uint32_t a) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 8);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FMix);\n m_code.putWord(x);\n m_code.putWord(y);\n m_code.putWord(a);\n return resultId;\n }\n uint32_t opCross(\n uint32_t resultType,\n uint32_t x,\n uint32_t y) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Cross);\n m_code.putWord(x);\n m_code.putWord(y);\n return resultId;\n }\n uint32_t opIAdd(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpIAdd, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opISub(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpISub, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFAdd(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFAdd, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFSub(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFSub, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opSDiv(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSDiv, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opUDiv(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpUDiv, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opSRem(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSRem, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opUMod(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpUMod, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFDiv(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFDiv, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opIMul(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpIMul, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFMul(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFMul, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opVectorTimesScalar(\n uint32_t resultType,\n uint32_t vector,\n uint32_t scalar) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpVectorTimesScalar, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector);\n m_code.putWord(scalar);\n return resultId;\n }\n uint32_t opMatrixTimesMatrix(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpMatrixTimesMatrix, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opMatrixTimesVector(\n uint32_t resultType,\n uint32_t matrix,\n uint32_t vector) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpMatrixTimesVector, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(matrix);\n m_code.putWord(vector);\n return resultId;\n }\n uint32_t opVectorTimesMatrix(\n uint32_t resultType,\n uint32_t vector,\n uint32_t matrix) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpVectorTimesMatrix, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector);\n m_code.putWord(matrix);\n return resultId;\n }\n uint32_t opTranspose(\n uint32_t resultType,\n uint32_t matrix) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpTranspose, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(matrix);\n return resultId;\n }\n uint32_t opInverse(\n uint32_t resultType,\n uint32_t matrix) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450MatrixInverse);\n m_code.putWord(matrix);\n return resultId;\n }\n uint32_t opFFma(\n uint32_t resultType,\n uint32_t a,\n uint32_t b,\n uint32_t c) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 8);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Fma);\n m_code.putWord(a);\n m_code.putWord(b);\n m_code.putWord(c);\n return resultId;\n }\n uint32_t opFMax(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FMax);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFMin(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FMin);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opNMax(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450NMax);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opNMin(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450NMin);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opSMax(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450SMax);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opSMin(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450SMin);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opUMax(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450UMax);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opUMin(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450UMin);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFClamp(\n uint32_t resultType,\n uint32_t x,\n uint32_t minVal,\n uint32_t maxVal) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 8);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FClamp);\n m_code.putWord(x);\n m_code.putWord(minVal);\n m_code.putWord(maxVal);\n return resultId;\n }\n uint32_t opNClamp(\n uint32_t resultType,\n uint32_t x,\n uint32_t minVal,\n uint32_t maxVal) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 8);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450NClamp);\n m_code.putWord(x);\n m_code.putWord(minVal);\n m_code.putWord(maxVal);\n return resultId;\n }\n uint32_t opIEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpIEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opINotEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpINotEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opSLessThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSLessThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opSLessThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSLessThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opSGreaterThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSGreaterThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opSGreaterThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSGreaterThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opULessThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpULessThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opULessThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpULessThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opUGreaterThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpUGreaterThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opUGreaterThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpUGreaterThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFOrdEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFOrdEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFUnordNotEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFUnordNotEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFOrdLessThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFOrdLessThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFOrdLessThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFOrdLessThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFOrdGreaterThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFOrdGreaterThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFOrdGreaterThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFOrdGreaterThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opLogicalEqual(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLogicalEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opLogicalNotEqual(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLogicalNotEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opLogicalAnd(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLogicalAnd, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opLogicalOr(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLogicalOr, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opLogicalNot(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLogicalNot, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDot(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDot, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opSin(\n uint32_t resultType,\n uint32_t vector) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Sin);\n m_code.putWord(vector);\n return resultId;\n }\n uint32_t opCos(\n uint32_t resultType,\n uint32_t vector) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Cos);\n m_code.putWord(vector);\n return resultId;\n }\n uint32_t opSqrt(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Sqrt);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opInverseSqrt(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450InverseSqrt);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opNormalize(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Normalize);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opRawAccessChain(\n uint32_t resultType,\n uint32_t base,\n uint32_t stride,\n uint32_t index,\n uint32_t offset,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns (spv::OpRawAccessChainNV, operand ? 8 : 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(stride);\n m_code.putWord(index);\n m_code.putWord(offset);\n\n if (operand)\n m_code.putWord(operand);\n\n return resultId;\n }\n uint32_t opReflect(\n uint32_t resultType,\n uint32_t incident,\n uint32_t normal) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Reflect);\n m_code.putWord(incident);\n m_code.putWord(normal);\n return resultId;\n }\n uint32_t opLength(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Length);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opExp2(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Exp2);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opExp(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Exp);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opLog2(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Log2);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opPow(\n uint32_t resultType,\n uint32_t base,\n uint32_t exponent) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Pow);\n m_code.putWord(base);\n m_code.putWord(exponent);\n return resultId;\n }\n uint32_t opFract(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Fract);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opCeil(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Ceil);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFloor(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Floor);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opRound(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Round);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opRoundEven(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450RoundEven);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opTrunc(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Trunc);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFConvert(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns (spv::OpFConvert, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opPackHalf2x16(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450PackHalf2x16);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opUnpackHalf2x16(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450UnpackHalf2x16);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opSelect(\n uint32_t resultType,\n uint32_t condition,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSelect, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(condition);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opIsNan(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpIsNan, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opIsInf(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpIsInf, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFunctionCall(\n uint32_t resultType,\n uint32_t functionId,\n uint32_t argCount,\n const uint32_t* argIds) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFunctionCall, 4 + argCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(functionId);\n \n for (uint32_t i = 0; i < argCount; i++)\n m_code.putWord(argIds[i]);\n return resultId;\n }\n void opLabel(\n uint32_t labelId) {\n m_code.putIns (spv::OpLabel, 2);\n m_code.putWord(labelId);\n\n m_blockId = labelId;\n }\n uint32_t opLoad(\n uint32_t typeId,\n uint32_t pointerId) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLoad, 4 + getMemoryOperandWordCount(operands));\n m_code.putWord(typeId);\n m_code.putWord(resultId);\n m_code.putWord(pointerId);\n\n putMemoryOperands(operands);\n return resultId;\n }\n uint32_t opLoad(\n uint32_t typeId,\n uint32_t pointerId,\n const SpirvMemoryOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLoad, 4 + getMemoryOperandWordCount(operands));\n m_code.putWord(typeId);\n m_code.putWord(resultId);\n m_code.putWord(pointerId);\n\n putMemoryOperands(operands);\n return resultId;\n }\n void opStore(\n uint32_t pointerId,\n uint32_t valueId) {\n m_code.putIns (spv::OpStore, 3 + getMemoryOperandWordCount(operands));\n m_code.putWord(pointerId);\n m_code.putWord(valueId);\n\n putMemoryOperands(operands);\n }\n void opStore(\n uint32_t pointerId,\n uint32_t valueId,\n const SpirvMemoryOperands& operands) {\n m_code.putIns (spv::OpStore, 3 + getMemoryOperandWordCount(operands));\n m_code.putWord(pointerId);\n m_code.putWord(valueId);\n\n putMemoryOperands(operands);\n }\n uint32_t opInterpolateAtCentroid(\n uint32_t resultType,\n uint32_t interpolant) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450InterpolateAtCentroid);\n m_code.putWord(interpolant);\n return resultId;\n }\n uint32_t opInterpolateAtSample(\n uint32_t resultType,\n uint32_t interpolant,\n uint32_t sample) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450InterpolateAtSample);\n m_code.putWord(interpolant);\n m_code.putWord(sample);\n return resultId;\n }\n uint32_t opInterpolateAtOffset(\n uint32_t resultType,\n uint32_t interpolant,\n uint32_t offset) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450InterpolateAtOffset);\n m_code.putWord(interpolant);\n m_code.putWord(offset);\n return resultId;\n }\n uint32_t opImage(\n uint32_t resultType,\n uint32_t sampledImage) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpImage, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n return resultId;\n }\n uint32_t opImageRead(\n uint32_t resultType,\n uint32_t image,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseRead\n : spv::OpImageRead;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n void opImageWrite(\n uint32_t image,\n uint32_t coordinates,\n uint32_t texel,\n const SpirvImageOperands& operands) {\n m_code.putIns (spv::OpImageWrite,\n 4 + getImageOperandWordCount(operands));\n m_code.putWord(image);\n m_code.putWord(coordinates);\n m_code.putWord(texel);\n \n putImageOperands(operands);\n }\n uint32_t opImageSparseTexelsResident(\n uint32_t resultType,\n uint32_t residentCode) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns (spv::OpImageSparseTexelsResident, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(residentCode);\n\n return resultId;\n }\n uint32_t opImageTexelPointer(\n uint32_t resultType,\n uint32_t image,\n uint32_t coordinates,\n uint32_t sample) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageTexelPointer, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n m_code.putWord(coordinates);\n m_code.putWord(sample);\n return resultId;\n }\n uint32_t opSampledImage(\n uint32_t resultType,\n uint32_t image,\n uint32_t sampler) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSampledImage, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n m_code.putWord(sampler);\n return resultId;\n }\n uint32_t opImageQuerySizeLod(\n uint32_t resultType,\n uint32_t image,\n uint32_t lod) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageQuerySizeLod, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n m_code.putWord(lod);\n return resultId;\n }\n uint32_t opImageQuerySize(\n uint32_t resultType,\n uint32_t image) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageQuerySize, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n return resultId;\n }\n uint32_t opImageQueryLevels(\n uint32_t resultType,\n uint32_t image) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageQueryLevels, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n return resultId;\n }\n uint32_t opImageQueryLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageQueryLod, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n return resultId;\n }\n uint32_t opImageQuerySamples(\n uint32_t resultType,\n uint32_t image) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageQuerySamples, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n return resultId;\n }\n uint32_t opImageFetch(\n uint32_t resultType,\n uint32_t image,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n\n spv::Op op = operands.sparse\n ? spv::OpImageSparseFetch\n : spv::OpImageFetch;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageGather(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t component,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseGather\n : spv::OpImageGather;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(component);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageDrefGather(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t reference,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseDrefGather\n : spv::OpImageDrefGather;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(reference);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleImplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleImplicitLod\n : spv::OpImageSampleImplicitLod;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleExplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleExplicitLod\n : spv::OpImageSampleExplicitLod;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleProjImplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleProjImplicitLod\n : spv::OpImageSampleProjImplicitLod;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleProjExplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleProjExplicitLod\n : spv::OpImageSampleProjExplicitLod;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleDrefImplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t reference,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleDrefImplicitLod\n : spv::OpImageSampleDrefImplicitLod;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(reference);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleDrefExplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t reference,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleDrefExplicitLod\n : spv::OpImageSampleDrefExplicitLod;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(reference);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleProjDrefImplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t reference,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleProjDrefImplicitLod\n : spv::OpImageSampleProjDrefImplicitLod;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(reference);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleProjDrefExplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t reference,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleProjDrefExplicitLod\n : spv::OpImageSampleProjDrefExplicitLod;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(reference);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opGroupNonUniformBallot(\n uint32_t resultType,\n uint32_t execution,\n uint32_t predicate) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpGroupNonUniformBallot, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(execution);\n m_code.putWord(predicate);\n return resultId;\n }\n uint32_t opGroupNonUniformBallotBitCount(\n uint32_t resultType,\n uint32_t execution,\n uint32_t operation,\n uint32_t ballot) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpGroupNonUniformBallotBitCount, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(execution);\n m_code.putWord(operation);\n m_code.putWord(ballot);\n return resultId;\n }\n uint32_t opGroupNonUniformElect(\n uint32_t resultType,\n uint32_t execution) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpGroupNonUniformElect, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(execution);\n return resultId;\n }\n uint32_t opGroupNonUniformBroadcastFirst(\n uint32_t resultType,\n uint32_t execution,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpGroupNonUniformBroadcastFirst, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(execution);\n m_code.putWord(value);\n return resultId;\n }\n void opControlBarrier(\n uint32_t execution,\n uint32_t memory,\n uint32_t semantics) {\n m_code.putIns (spv::OpControlBarrier, 4);\n m_code.putWord(execution);\n m_code.putWord(memory);\n m_code.putWord(semantics);\n }\n void opMemoryBarrier(\n uint32_t memory,\n uint32_t semantics) {\n m_code.putIns (spv::OpMemoryBarrier, 3);\n m_code.putWord(memory);\n m_code.putWord(semantics);\n }\n void opLoopMerge(\n uint32_t mergeBlock,\n uint32_t continueTarget,\n uint32_t loopControl) {\n m_code.putIns (spv::OpLoopMerge, 4);\n m_code.putWord(mergeBlock);\n m_code.putWord(continueTarget);\n m_code.putWord(loopControl);\n }\n void opSelectionMerge(\n uint32_t mergeBlock,\n uint32_t selectionControl) {\n m_code.putIns (spv::OpSelectionMerge, 3);\n m_code.putWord(mergeBlock);\n m_code.putWord(selectionControl);\n }\n void opBranch(\n uint32_t label) {\n m_code.putIns (spv::OpBranch, 2);\n m_code.putWord(label);\n\n m_blockId = 0;\n }\n void opBranchConditional(\n uint32_t condition,\n uint32_t trueLabel,\n uint32_t falseLabel) {\n m_code.putIns (spv::OpBranchConditional, 4);\n m_code.putWord(condition);\n m_code.putWord(trueLabel);\n m_code.putWord(falseLabel);\n\n m_blockId = 0;\n }\n void opSwitch(\n uint32_t selector,\n uint32_t jumpDefault,\n uint32_t caseCount,\n const SpirvSwitchCaseLabel* caseLabels) {\n m_code.putIns (spv::OpSwitch, 3 + 2 * caseCount);\n m_code.putWord(selector);\n m_code.putWord(jumpDefault);\n \n for (uint32_t i = 0; i < caseCount; i++) {\n m_code.putWord(caseLabels[i].literal);\n m_code.putWord(caseLabels[i].labelId);\n }\n\n m_blockId = 0;\n }\n uint32_t opPhi(\n uint32_t resultType,\n uint32_t sourceCount,\n const SpirvPhiLabel* sourceLabels) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpPhi, 3 + 2 * sourceCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n \n for (uint32_t i = 0; i < sourceCount; i++) {\n m_code.putWord(sourceLabels[i].varId);\n m_code.putWord(sourceLabels[i].labelId);\n }\n \n return resultId;\n }\n void opReturn() {\n m_code.putIns (spv::OpReturn, 1);\n m_blockId = 0;\n }\n void opDemoteToHelperInvocation() {\n m_code.putIns (spv::OpDemoteToHelperInvocation, 1);\n }\n void opEmitVertex(\n uint32_t streamId) {\n if (streamId == 0) {\n m_code.putIns (spv::OpEmitVertex, 1);\n } else {\n m_code.putIns (spv::OpEmitStreamVertex, 2);\n m_code.putWord(streamId);\n }\n }\n void opEndPrimitive(\n uint32_t streamId) {\n if (streamId == 0) {\n m_code.putIns (spv::OpEndPrimitive, 1);\n } else {\n m_code.putIns (spv::OpEndStreamPrimitive, 2);\n m_code.putWord(streamId);\n }\n }\n void opBeginInvocationInterlock() {\n m_code.putIns(spv::OpBeginInvocationInterlockEXT, 1);\n }\n void opEndInvocationInterlock() {\n m_code.putIns(spv::OpEndInvocationInterlockEXT, 1);\n }\n uint32_t opSinCos(\n uint32_t x,\n bool useBuiltIn) {\n // We only operate on 32-bit floats here\n uint32_t floatType = defFloatType(32);\n uint32_t resultType = defVectorType(floatType, 2u);\n\n if (useBuiltIn) {\n std::array members = { opSin(floatType, x), opCos(floatType, x) };\n return opCompositeConstruct(resultType, members.size(), members.data());\n } else {\n uint32_t uintType = defIntType(32, false);\n uint32_t sintType = defIntType(32, true);\n uint32_t boolType = defBoolType();\n\n // Normalize input to multiple of pi/4\n uint32_t xNorm = opFMul(floatType, opFAbs(floatType, x), constf32(4.0 / pi));\n\n uint32_t xTrunc = opTrunc(floatType, xNorm);\n uint32_t xFract = opFSub(floatType, xNorm, xTrunc);\n\n uint32_t xInt = opConvertFtoU(uintType, xTrunc);\n\n // Mirror input along x axis as necessary\n uint32_t mirror = opINotEqual(boolType, opBitwiseAnd(uintType, xInt, constu32(1u)), constu32(0u));\n xFract = opSelect(floatType, mirror, opFSub(floatType, constf32(1.0f), xFract), xFract);\n\n // Compute taylor series for fractional part\n uint32_t xFract_2 = opFMul(floatType, xFract, xFract);\n uint32_t xFract_4 = opFMul(floatType, xFract_2, xFract_2);\n uint32_t xFract_6 = opFMul(floatType, xFract_4, xFract_2);\n\n uint32_t taylor = opFMul(floatType, xFract_6, constf32(-sincosTaylorFactor(7)));\n decorate(taylor, spv::DecorationNoContraction);\n\n taylor = opFFma(floatType, xFract_4, constf32(sincosTaylorFactor(5)), taylor);\n decorate(taylor, spv::DecorationNoContraction);\n\n taylor = opFFma(floatType, xFract_2, constf32(-sincosTaylorFactor(3)), taylor);\n decorate(taylor, spv::DecorationNoContraction);\n\n taylor = opFAdd(floatType, constf32(sincosTaylorFactor(1)), taylor);\n decorate(taylor, spv::DecorationNoContraction);\n\n taylor = opFMul(floatType, taylor, xFract);\n decorate(taylor, spv::DecorationNoContraction);\n\n // Compute co-function based on sin^2 + cos^2 = 1\n uint32_t coFunc = opSqrt(floatType, opFSub(floatType, constf32(1.0f), opFMul(floatType, taylor, taylor)));\n\n // Determine whether the taylor series was used for sine or cosine and assign the correct result\n uint32_t funcIsSin = opIEqual(boolType, opBitwiseAnd(uintType, opIAdd(uintType, xInt, constu32(1u)), constu32(2u)), constu32(0u));\n\n uint32_t sin = opSelect(floatType, funcIsSin, taylor, coFunc);\n uint32_t cos = opSelect(floatType, funcIsSin, coFunc, taylor);\n\n // Determine whether sine is negative. Interpret the input as a\n // signed integer in order to propagate signed zeroes properly.\n uint32_t inputNeg = opSLessThan(boolType, opBitcast(sintType, x), consti32(0));\n\n uint32_t sinNeg = opINotEqual(boolType, opBitwiseAnd(uintType, xInt, constu32(4u)), constu32(0u));\n sinNeg = opLogicalNotEqual(boolType, sinNeg, inputNeg);\n\n // Determine whether cosine is negative\n uint32_t cosNeg = opINotEqual(boolType, opBitwiseAnd(uintType, opIAdd(uintType, xInt, constu32(2u)), constu32(4u)), constu32(0u));\n\n sin = opSelect(floatType, sinNeg, opFNegate(floatType, sin), sin);\n cos = opSelect(floatType, cosNeg, opFNegate(floatType, cos), cos);\n\n std::array members = { sin, cos };\n return opCompositeConstruct(resultType, members.size(), members.data());\n }\n }\n private:\n uint32_t m_version;\n uint32_t m_id = 1;\n uint32_t m_instExtGlsl450 = 0;\n uint32_t m_blockId = 0;\n SpirvCodeBuffer m_capabilities;\n SpirvCodeBuffer m_extensions;\n SpirvCodeBuffer m_instExt;\n SpirvCodeBuffer m_memoryModel;\n SpirvCodeBuffer m_entryPoints;\n SpirvCodeBuffer m_execModeInfo;\n SpirvCodeBuffer m_debugNames;\n SpirvCodeBuffer m_annotations;\n SpirvCodeBuffer m_typeConstDefs;\n SpirvCodeBuffer m_variables;\n SpirvCodeBuffer m_code;\n std::unordered_set m_lateConsts;\n std::vector m_interfaceVars;\n uint32_t defType(\n spv::Op op, \n uint32_t argCount,\n const uint32_t* argIds) {\n // Since the type info is stored in the code buffer,\n // we can use the code buffer to look up type IDs as\n // well. Result IDs are always stored as argument 1.\n for (auto ins : m_typeConstDefs) {\n bool match = ins.opCode() == op\n && ins.length() == 2 + argCount;\n \n for (uint32_t i = 0; i < argCount && match; i++)\n match &= ins.arg(2 + i) == argIds[i];\n \n if (match)\n return ins.arg(1);\n }\n \n // Type not yet declared, create a new one.\n uint32_t resultId = this->allocateId();\n m_typeConstDefs.putIns (op, 2 + argCount);\n m_typeConstDefs.putWord(resultId);\n \n for (uint32_t i = 0; i < argCount; i++)\n m_typeConstDefs.putWord(argIds[i]);\n return resultId;\n }\n uint32_t defConst(\n spv::Op op,\n uint32_t typeId,\n uint32_t argCount,\n const uint32_t* argIds) {\n // Avoid declaring constants multiple times\n for (auto ins : m_typeConstDefs) {\n bool match = ins.opCode() == op\n && ins.length() == 3 + argCount\n && ins.arg(1) == typeId;\n \n for (uint32_t i = 0; i < argCount && match; i++)\n match &= ins.arg(3 + i) == argIds[i];\n \n if (!match)\n continue;\n \n uint32_t id = ins.arg(2);\n\n if (m_lateConsts.find(id) == m_lateConsts.end())\n return id;\n }\n \n // Constant not yet declared, make a new one\n uint32_t resultId = this->allocateId();\n m_typeConstDefs.putIns (op, 3 + argCount);\n m_typeConstDefs.putWord(typeId);\n m_typeConstDefs.putWord(resultId);\n \n for (uint32_t i = 0; i < argCount; i++)\n m_typeConstDefs.putWord(argIds[i]);\n return resultId;\n }\n void instImportGlsl450() {\n m_instExtGlsl450 = this->allocateId();\n const char* name = \"GLSL.std.450\";\n \n m_instExt.putIns (spv::OpExtInstImport, 2 + m_instExt.strLen(name));\n m_instExt.putWord(m_instExtGlsl450);\n m_instExt.putStr (name);\n }\n uint32_t getMemoryOperandWordCount(\n const SpirvMemoryOperands& op) const {\n const uint32_t result\n = ((op.flags & spv::MemoryAccessAlignedMask) ? 1 : 0)\n + ((op.flags & spv::MemoryAccessMakePointerAvailableMask) ? 1 : 0)\n + ((op.flags & spv::MemoryAccessMakePointerVisibleMask) ? 1 : 0);\n\n return op.flags ? result + 1 : 0;\n }\n void putMemoryOperands(\n const SpirvMemoryOperands& op) {\n if (op.flags) {\n m_code.putWord(op.flags);\n\n if (op.flags & spv::MemoryAccessAlignedMask)\n m_code.putWord(op.alignment);\n\n if (op.flags & spv::MemoryAccessMakePointerAvailableMask)\n m_code.putWord(op.makeAvailable);\n\n if (op.flags & spv::MemoryAccessMakePointerVisibleMask)\n m_code.putWord(op.makeVisible);\n }\n }\n uint32_t getImageOperandWordCount(\n const SpirvImageOperands& op) const {\n // Each flag may add one or more operands\n const uint32_t result\n = ((op.flags & spv::ImageOperandsBiasMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsLodMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsConstOffsetMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsGradMask) ? 2 : 0)\n + ((op.flags & spv::ImageOperandsOffsetMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsConstOffsetsMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsSampleMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsMinLodMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsMakeTexelAvailableMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsMakeTexelVisibleMask) ? 1 : 0);\n \n // Add a DWORD for the operand mask if it is non-zero\n return op.flags ? result + 1 : 0;\n }\n void putImageOperands(\n const SpirvImageOperands& op) {\n if (op.flags) {\n m_code.putWord(op.flags);\n \n if (op.flags & spv::ImageOperandsBiasMask)\n m_code.putWord(op.sLodBias);\n \n if (op.flags & spv::ImageOperandsLodMask)\n m_code.putWord(op.sLod);\n\n if (op.flags & spv::ImageOperandsGradMask) {\n m_code.putWord(op.sGradX);\n m_code.putWord(op.sGradY);\n }\n\n if (op.flags & spv::ImageOperandsConstOffsetMask)\n m_code.putWord(op.sConstOffset);\n\n if (op.flags & spv::ImageOperandsOffsetMask)\n m_code.putWord(op.gOffset);\n \n if (op.flags & spv::ImageOperandsConstOffsetsMask)\n m_code.putWord(op.gConstOffsets);\n \n if (op.flags & spv::ImageOperandsSampleMask)\n m_code.putWord(op.sSampleId);\n \n if (op.flags & spv::ImageOperandsMinLodMask)\n m_code.putWord(op.sMinLod);\n\n if (op.flags & spv::ImageOperandsMakeTexelAvailableMask)\n m_code.putWord(op.makeAvailable);\n\n if (op.flags & spv::ImageOperandsMakeTexelVisibleMask)\n m_code.putWord(op.makeVisible);\n }\n }\n bool isInterfaceVar(\n spv::StorageClass sclass) const {\n if (m_version < spvVersion(1, 4)) {\n return sclass == spv::StorageClassInput\n || sclass == spv::StorageClassOutput;\n } else {\n // All global variables need to be declared\n return sclass != spv::StorageClassFunction;\n }\n }\n void classifyBlocks(\n std::unordered_set& reachableBlocks,\n std::unordered_set& mergeBlocks) {\n std::unordered_multimap branches;\n std::queue blockQueue;\n\n uint32_t blockId = 0;\n\n for (auto ins : m_code) {\n switch (ins.opCode()) {\n case spv::OpLabel: {\n uint32_t id = ins.arg(1);\n\n if (!blockId)\n branches.insert({ 0u, id });\n\n blockId = id;\n } break;\n\n case spv::OpFunction: {\n blockId = 0u;\n } break;\n\n case spv::OpBranch: {\n branches.insert({ blockId, ins.arg(1) });\n } break;\n\n case spv::OpBranchConditional: {\n branches.insert({ blockId, ins.arg(2) });\n branches.insert({ blockId, ins.arg(3) });\n } break;\n\n case spv::OpSwitch: {\n branches.insert({ blockId, ins.arg(2) });\n\n for (uint32_t i = 4; i < ins.length(); i += 2)\n branches.insert({ blockId, ins.arg(i) });\n } break;\n\n case spv::OpSelectionMerge: {\n mergeBlocks.insert(ins.arg(1));\n } break;\n\n case spv::OpLoopMerge: {\n mergeBlocks.insert(ins.arg(1));\n\n // It is possible for the continue block to be unreachable in\n // practice, but we still need to emit it if we are not going\n // to eliminate this loop. Since the current block dominates\n // the loop, use it to keep the continue block intact.\n branches.insert({ blockId, ins.arg(2) });\n } break;\n\n default:;\n }\n }\n\n blockQueue.push(0);\n\n while (!blockQueue.empty()) {\n uint32_t id = blockQueue.front();\n\n auto range = branches.equal_range(id);\n\n for (auto i = range.first; i != range.second; i++) {\n if (reachableBlocks.insert(i->second).second)\n blockQueue.push(i->second);\n }\n\n blockQueue.pop();\n }\n }\n};"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_bit.h", "#pragma once\n\n#if (defined(__x86_64__) && !defined(__arm64ec__)) || (defined(_M_X64) && !defined(_M_ARM64EC)) \\\n || defined(__i386__) || defined(_M_IX86) || defined(__e2k__)\n #define DXVK_ARCH_X86\n #if defined(__x86_64__) || defined(_M_X64) || defined(__e2k__)\n #define DXVK_ARCH_X86_64\n #endif\n#elif defined(__aarch64__) || defined(_M_ARM64) || defined(_M_ARM64EC)\n #define DXVK_ARCH_ARM64\n#endif\n\n#ifdef DXVK_ARCH_X86\n #ifndef _MSC_VER\n #if defined(_WIN32) && (defined(__AVX__) || defined(__AVX2__))\n #error \"AVX-enabled builds not supported due to stack alignment issues.\"\n #endif\n #if defined(__WINE__) && defined(__clang__)\n #pragma push_macro(\"_WIN32\")\n #undef _WIN32\n #endif\n #include \n #if defined(__WINE__) && defined(__clang__)\n #pragma pop_macro(\"_WIN32\")\n #endif\n #else\n #include \n #endif\n#endif\n\n#include \"util_likely.h\"\n#include \"util_math.h\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace dxvk::bit {\n\n template\n T cast(const J& src) {\n static_assert(sizeof(T) == sizeof(J));\n static_assert(std::is_trivially_copyable::value && std::is_trivial::value);\n\n T dst;\n std::memcpy(&dst, &src, sizeof(T));\n return dst;\n }\n \n template\n T extract(T value, uint32_t fst, uint32_t lst) {\n return (value >> fst) & ~(~T(0) << (lst - fst + 1));\n }\n\n template\n T popcnt(T n) {\n n -= ((n >> 1u) & T(0x5555555555555555ull));\n n = (n & T(0x3333333333333333ull)) + ((n >> 2u) & T(0x3333333333333333ull));\n n = (n + (n >> 4u)) & T(0x0f0f0f0f0f0f0f0full);\n n *= T(0x0101010101010101ull);\n return n >> (8u * (sizeof(T) - 1u));\n }\n\n inline uint32_t tzcnt(uint32_t n) {\n #if defined(_MSC_VER) && !defined(__clang__)\n if(n == 0)\n return 32;\n return _tzcnt_u32(n);\n #elif defined(__BMI__)\n return __tzcnt_u32(n);\n #elif defined(DXVK_ARCH_X86) && (defined(__GNUC__) || defined(__clang__))\n // tzcnt is encoded as rep bsf, so we can use it on all\n // processors, but the behaviour of zero inputs differs:\n // - bsf: zf = 1, cf = ?, result = ?\n // - tzcnt: zf = 0, cf = 1, result = 32\n // We'll have to handle this case manually.\n uint32_t res;\n uint32_t tmp;\n asm (\n \"tzcnt %2, %0;\"\n \"mov $32, %1;\"\n \"test %2, %2;\"\n \"cmovz %1, %0;\"\n : \"=&r\" (res), \"=&r\" (tmp)\n : \"r\" (n)\n : \"cc\");\n return res;\n #elif defined(__GNUC__) || defined(__clang__)\n return n != 0 ? __builtin_ctz(n) : 32;\n #else\n uint32_t r = 31;\n n &= -n;\n r -= (n & 0x0000FFFF) ? 16 : 0;\n r -= (n & 0x00FF00FF) ? 8 : 0;\n r -= (n & 0x0F0F0F0F) ? 4 : 0;\n r -= (n & 0x33333333) ? 2 : 0;\n r -= (n & 0x55555555) ? 1 : 0;\n return n != 0 ? r : 32;\n #endif\n }\n\n inline uint32_t tzcnt(uint64_t n) {\n #if defined(DXVK_ARCH_X86_64) && defined(_MSC_VER) && !defined(__clang__)\n if(n == 0)\n return 64;\n return (uint32_t)_tzcnt_u64(n);\n #elif defined(DXVK_ARCH_X86_64) && defined(__BMI__)\n return __tzcnt_u64(n);\n #elif defined(DXVK_ARCH_X86_64) && (defined(__GNUC__) || defined(__clang__))\n uint64_t res;\n uint64_t tmp;\n asm (\n \"tzcnt %2, %0;\"\n \"mov $64, %1;\"\n \"test %2, %2;\"\n \"cmovz %1, %0;\"\n : \"=&r\" (res), \"=&r\" (tmp)\n : \"r\" (n)\n : \"cc\");\n return res;\n #elif defined(__GNUC__) || defined(__clang__)\n return n != 0 ? __builtin_ctzll(n) : 64;\n #else\n uint32_t lo = uint32_t(n);\n if (lo) {\n return tzcnt(lo);\n } else {\n uint32_t hi = uint32_t(n >> 32);\n return tzcnt(hi) + 32;\n }\n #endif\n }\n\n inline uint32_t bsf(uint32_t n) {\n #if (defined(__GNUC__) || defined(__clang__)) && !defined(__BMI__) && defined(DXVK_ARCH_X86)\n uint32_t res;\n asm (\"tzcnt %1,%0\"\n : \"=r\" (res)\n : \"r\" (n)\n : \"cc\");\n return res;\n #else\n return tzcnt(n);\n #endif\n }\n\n inline uint32_t bsf(uint64_t n) {\n #if (defined(__GNUC__) || defined(__clang__)) && !defined(__BMI__) && defined(DXVK_ARCH_X86_64)\n uint64_t res;\n asm (\"tzcnt %1,%0\"\n : \"=r\" (res)\n : \"r\" (n)\n : \"cc\");\n return res;\n #else\n return tzcnt(n);\n #endif\n }\n\n inline uint32_t lzcnt(uint32_t n) {\n #if defined(_MSC_VER) && !defined(__clang__) && !defined(__LZCNT__)\n unsigned long bsr;\n if(n == 0)\n return 32;\n _BitScanReverse(&bsr, n);\n return 31-bsr;\n #elif (defined(_MSC_VER) && !defined(__clang__)) || defined(__LZCNT__)\n return _lzcnt_u32(n);\n #elif defined(__GNUC__) || defined(__clang__)\n return n != 0 ? __builtin_clz(n) : 32;\n #else\n uint32_t r = 0;\n\n if (n == 0)\treturn 32;\n\n if (n <= 0x0000FFFF) { r += 16; n <<= 16; }\n if (n <= 0x00FFFFFF) { r += 8; n <<= 8; }\n if (n <= 0x0FFFFFFF) { r += 4; n <<= 4; }\n if (n <= 0x3FFFFFFF) { r += 2; n <<= 2; }\n if (n <= 0x7FFFFFFF) { r += 1; n <<= 1; }\n\n return r;\n #endif\n }\n\n inline uint32_t lzcnt(uint64_t n) {\n #if defined(_MSC_VER) && !defined(__clang__) && !defined(__LZCNT__) && defined(DXVK_ARCH_X86_64)\n unsigned long bsr;\n if(n == 0)\n return 64;\n _BitScanReverse64(&bsr, n);\n return 63-bsr;\n #elif defined(DXVK_ARCH_X86_64) && ((defined(_MSC_VER) && !defined(__clang__)) && defined(__LZCNT__))\n return _lzcnt_u64(n);\n #elif defined(DXVK_ARCH_X86_64) && (defined(__GNUC__) || defined(__clang__))\n return n != 0 ? __builtin_clzll(n) : 64;\n #else\n uint32_t lo = uint32_t(n);\n uint32_t hi = uint32_t(n >> 32u);\n return hi ? lzcnt(hi) : lzcnt(lo) + 32u;\n #endif\n }\n\n template\n uint32_t pack(T& dst, uint32_t& shift, T src, uint32_t count) {\n constexpr uint32_t Bits = 8 * sizeof(T);\n if (likely(shift < Bits))\n dst |= src << shift;\n shift += count;\n return shift > Bits ? shift - Bits : 0;\n }\n\n template\n uint32_t unpack(T& dst, T src, uint32_t& shift, uint32_t count) {\n constexpr uint32_t Bits = 8 * sizeof(T);\n if (likely(shift < Bits))\n dst = (src >> shift) & ((T(1) << count) - 1);\n shift += count;\n return shift > Bits ? shift - Bits : 0;\n }\n\n\n /**\n * \\brief Clears cache lines of memory\n *\n * Uses non-temporal stores. The memory region offset\n * and size are assumed to be aligned to 64 bytes.\n * \\param [in] mem Memory region to clear\n * \\param [in] size Number of bytes to clear\n */\n inline void bclear(void* mem, size_t size) {\n #if defined(DXVK_ARCH_X86) && (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER))\n auto zero = _mm_setzero_si128();\n\n #if defined(__clang__)\n #pragma nounroll\n #elif defined(__GNUC__)\n #pragma GCC unroll 0\n #endif\n for (size_t i = 0; i < size; i += 64u) {\n auto* ptr = reinterpret_cast<__m128i*>(mem) + i / sizeof(zero);\n _mm_stream_si128(ptr + 0u, zero);\n _mm_stream_si128(ptr + 1u, zero);\n _mm_stream_si128(ptr + 2u, zero);\n _mm_stream_si128(ptr + 3u, zero);\n }\n #else\n std::memset(mem, 0, size);\n #endif\n }\n\n\n /**\n * \\brief Compares two aligned structs bit by bit\n *\n * \\param [in] a First struct\n * \\param [in] b Second struct\n * \\returns \\c true if the structs are equal\n */\n template\n bool bcmpeq(const T* a, const T* b) {\n static_assert(alignof(T) >= 16);\n #if defined(DXVK_ARCH_X86) && (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER))\n auto ai = reinterpret_cast(a);\n auto bi = reinterpret_cast(b);\n\n size_t i = 0;\n\n #if defined(__clang__)\n #pragma nounroll\n #elif defined(__GNUC__)\n #pragma GCC unroll 0\n #endif\n\n for ( ; i < 2 * (sizeof(T) / 32); i += 2) {\n __m128i eq0 = _mm_cmpeq_epi8(\n _mm_load_si128(ai + i),\n _mm_load_si128(bi + i));\n __m128i eq1 = _mm_cmpeq_epi8(\n _mm_load_si128(ai + i + 1),\n _mm_load_si128(bi + i + 1));\n __m128i eq = _mm_and_si128(eq0, eq1);\n\n int mask = _mm_movemask_epi8(eq);\n if (mask != 0xFFFF)\n return false;\n }\n\n for ( ; i < sizeof(T) / 16; i++) {\n __m128i eq = _mm_cmpeq_epi8(\n _mm_load_si128(ai + i),\n _mm_load_si128(bi + i));\n\n int mask = _mm_movemask_epi8(eq);\n if (mask != 0xFFFF)\n return false;\n }\n\n return true;\n #else\n return !std::memcmp(a, b, sizeof(T));\n #endif\n }\n\n template \n class bitset {\n static constexpr size_t Dwords = align(Bits, 32) / 32;\n public:\n\n constexpr bitset()\n : m_dwords() {\n\n }\n\n constexpr bool get(uint32_t idx) const {\n uint32_t dword = 0;\n uint32_t bit = idx;\n\n // Compiler doesn't remove this otherwise.\n if constexpr (Dwords > 1) {\n dword = idx / 32;\n bit = idx % 32;\n }\n\n return m_dwords[dword] & (1u << bit);\n }\n\n constexpr void set(uint32_t idx, bool value) {\n uint32_t dword = 0;\n uint32_t bit = idx;\n\n // Compiler doesn't remove this otherwise.\n if constexpr (Dwords > 1) {\n dword = idx / 32;\n bit = idx % 32;\n }\n\n if (value)\n m_dwords[dword] |= 1u << bit;\n else\n m_dwords[dword] &= ~(1u << bit);\n }\n\n constexpr bool exchange(uint32_t idx, bool value) {\n bool oldValue = get(idx);\n set(idx, value);\n return oldValue;\n }\n\n constexpr void flip(uint32_t idx) {\n uint32_t dword = 0;\n uint32_t bit = idx;\n\n // Compiler doesn't remove this otherwise.\n if constexpr (Dwords > 1) {\n dword = idx / 32;\n bit = idx % 32;\n }\n\n m_dwords[dword] ^= 1u << bit;\n }\n\n constexpr void setAll() {\n if constexpr (Bits % 32 == 0) {\n for (size_t i = 0; i < Dwords; i++)\n m_dwords[i] = std::numeric_limits::max();\n }\n else {\n for (size_t i = 0; i < Dwords - 1; i++)\n m_dwords[i] = std::numeric_limits::max();\n\n m_dwords[Dwords - 1] = (1u << (Bits % 32)) - 1;\n }\n }\n\n constexpr void clearAll() {\n for (size_t i = 0; i < Dwords; i++)\n m_dwords[i] = 0;\n }\n\n constexpr bool any() const {\n for (size_t i = 0; i < Dwords; i++) {\n if (m_dwords[i] != 0)\n return true;\n }\n\n return false;\n }\n\n constexpr uint32_t& dword(uint32_t idx) {\n return m_dwords[idx];\n }\n\n constexpr size_t bitCount() {\n return Bits;\n }\n\n constexpr size_t dwordCount() {\n return Dwords;\n }\n\n constexpr bool operator [] (uint32_t idx) const {\n return get(idx);\n }\n\n constexpr void setN(uint32_t bits) {\n uint32_t fullDwords = bits / 32;\n uint32_t offset = bits % 32;\n\n for (size_t i = 0; i < fullDwords; i++)\n m_dwords[i] = std::numeric_limits::max();\n \n if (offset > 0)\n m_dwords[fullDwords] = (1u << offset) - 1;\n }\n\n private:\n\n uint32_t m_dwords[Dwords];\n\n };\n\n class bitvector {\n public:\n\n bool get(uint32_t idx) const {\n uint32_t dword = idx / 32;\n uint32_t bit = idx % 32;\n\n return m_dwords[dword] & (1u << bit);\n }\n\n void ensureSize(uint32_t bitCount) {\n uint32_t dword = bitCount / 32;\n if (unlikely(dword >= m_dwords.size())) {\n m_dwords.resize(dword + 1);\n }\n m_bitCount = std::max(m_bitCount, bitCount);\n }\n\n void set(uint32_t idx, bool value) {\n ensureSize(idx + 1);\n\n uint32_t dword = 0;\n uint32_t bit = idx;\n\n if (value)\n m_dwords[dword] |= 1u << bit;\n else\n m_dwords[dword] &= ~(1u << bit);\n }\n\n bool exchange(uint32_t idx, bool value) {\n ensureSize(idx + 1);\n\n bool oldValue = get(idx);\n set(idx, value);\n return oldValue;\n }\n\n void flip(uint32_t idx) {\n ensureSize(idx + 1);\n\n uint32_t dword = idx / 32;\n uint32_t bit = idx % 32;\n\n m_dwords[dword] ^= 1u << bit;\n }\n\n void setAll() {\n if (m_bitCount % 32 == 0) {\n for (size_t i = 0; i < m_dwords.size(); i++)\n m_dwords[i] = std::numeric_limits::max();\n }\n else {\n for (size_t i = 0; i < m_dwords.size() - 1; i++)\n m_dwords[i] = std::numeric_limits::max();\n\n m_dwords[m_dwords.size() - 1] = (1u << (m_bitCount % 32)) - 1;\n }\n }\n\n void clearAll() {\n for (size_t i = 0; i < m_dwords.size(); i++)\n m_dwords[i] = 0;\n }\n\n bool any() const {\n for (size_t i = 0; i < m_dwords.size(); i++) {\n if (m_dwords[i] != 0)\n return true;\n }\n\n return false;\n }\n\n uint32_t& dword(uint32_t idx) {\n return m_dwords[idx];\n }\n\n size_t bitCount() const {\n return m_bitCount;\n }\n\n size_t dwordCount() const {\n return m_dwords.size();\n }\n\n bool operator [] (uint32_t idx) const {\n return get(idx);\n }\n\n void setN(uint32_t bits) {\n ensureSize(bits);\n\n uint32_t fullDwords = bits / 32;\n uint32_t offset = bits % 32;\n\n for (size_t i = 0; i < fullDwords; i++)\n m_dwords[i] = std::numeric_limits::max();\n\n if (offset > 0)\n m_dwords[fullDwords] = (1u << offset) - 1;\n }\n\n private:\n\n std::vector m_dwords;\n uint32_t m_bitCount = 0;\n\n };\n\n template\n class BitMask {\n\n public:\n\n class iterator {\n public:\n using iterator_category = std::input_iterator_tag;\n using value_type = T;\n using difference_type = T;\n using pointer = const T*;\n using reference = T;\n\n explicit iterator(T flags)\n : m_mask(flags) { }\n\n iterator& operator ++ () {\n m_mask &= m_mask - 1;\n return *this;\n }\n\n iterator operator ++ (int) {\n iterator retval = *this;\n m_mask &= m_mask - 1;\n return retval;\n }\n\n T operator * () const {\n return bsf(m_mask);\n }\n\n bool operator == (iterator other) const { return m_mask == other.m_mask; }\n bool operator != (iterator other) const { return m_mask != other.m_mask; }\n\n private:\n\n T m_mask;\n\n };\n\n BitMask()\n : m_mask(0) { }\n\n explicit BitMask(T n)\n : m_mask(n) { }\n\n iterator begin() {\n return iterator(m_mask);\n }\n\n iterator end() {\n return iterator(0);\n }\n\n private:\n\n T m_mask;\n\n };\n\n\n /**\n * \\brief Encodes float as fixed point\n *\n * Rounds away from zero. If this is not suitable for\n * certain use cases, implement round to nearest even.\n * \\tparam T Integer type, may be signed\n * \\tparam I Integer bits\n * \\tparam F Fractional bits\n * \\param n Float to encode\n * \\returns Encoded fixed-point value\n */\n template\n T encodeFixed(float n) {\n if (n != n)\n return 0u;\n\n n *= float(1u << F);\n\n if constexpr (std::is_signed_v) {\n n = std::max(n, -float(1u << (I + F - 1u)));\n n = std::min(n, float(1u << (I + F - 1u)) - 1.0f);\n n += n < 0.0f ? -0.5f : 0.5f;\n } else {\n n = std::max(n, 0.0f);\n n = std::min(n, float(1u << (I + F)) - 1.0f);\n n += 0.5f;\n }\n\n T result = T(n);\n\n if constexpr (std::is_signed_v)\n result &= ((T(1u) << (I + F)) - 1u);\n\n return result;\n }\n\n\n /**\n * \\brief Decodes fixed-point integer to float\n *\n * \\tparam T Integer type, may be signed\n * \\tparam I Integer bits\n * \\tparam F Fractional bits\n * \\param n Number to decode\n * \\returns Decoded number\n */\n template\n float decodeFixed(T n) {\n // Sign-extend as necessary\n if constexpr (std::is_signed_v)\n n -= (n & (T(1u) << (I + F - 1u))) << 1u;\n\n return float(n) / float(1u << F);\n }\n\n\n /**\n * \\brief Inserts one null bit after each bit\n */\n inline uint32_t split2(uint32_t c) {\n c = (c ^ (c << 8u)) & 0x00ff00ffu;\n c = (c ^ (c << 4u)) & 0x0f0f0f0fu;\n c = (c ^ (c << 2u)) & 0x33333333u;\n c = (c ^ (c << 1u)) & 0x55555555u;\n return c;\n }\n\n\n /**\n * \\brief Inserts two null bits after each bit\n */\n inline uint64_t split3(uint64_t c) {\n c = (c | c << 32u) & 0x001f00000000ffffull;\n c = (c | c << 16u) & 0x001f0000ff0000ffull;\n c = (c | c << 8u) & 0x100f00f00f00f00full;\n c = (c | c << 4u) & 0x10c30c30c30c30c3ull;\n c = (c | c << 2u) & 0x1249249249249249ull;\n return c;\n }\n\n\n /**\n * \\brief Interleaves bits from two integers\n *\n * Both numbers must fit into 16 bits.\n * \\param [in] x X coordinate\n * \\param [in] y Y coordinate\n * \\returns Morton code of x and y\n */\n inline uint32_t interleave(uint16_t x, uint16_t y) {\n return split2(x) | (split2(y) << 1u);\n }\n\n\n /**\n * \\brief Interleaves bits from three integers\n *\n * All three numbers must fit into 16 bits.\n */\n inline uint64_t interleave(uint16_t x, uint16_t y, uint16_t z) {\n return split3(x) | (split3(y) << 1u) | (split3(z) << 2u);\n }\n\n\n /**\n * \\brief 48-bit integer storage type\n */\n struct uint48_t {\n explicit uint48_t(uint64_t n)\n : a(uint16_t(n)), b(uint16_t(n >> 16)), c(uint16_t(n >> 32)) { }\n\n uint16_t a;\n uint16_t b;\n uint16_t c;\n\n explicit operator uint64_t () const {\n // GCC generates worse code if we promote to uint64 directly\n uint32_t lo = uint32_t(a) | (uint32_t(b) << 16);\n return uint64_t(lo) | (uint64_t(c) << 32);\n }\n };\n\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_analysis.h", "class DxbcAnalyzer {\n public:\n DxbcAnalyzer(\n const DxbcModuleInfo& moduleInfo,\n const DxbcProgramInfo& programInfo,\n const Rc& isgn,\n const Rc& osgn,\n const Rc& psgn,\n DxbcAnalysisInfo& analysis) {\n // Get number of clipping and culling planes from the\n // input and output signatures. We will need this to\n // declare the shader input and output interfaces.\n m_analysis->clipCullIn = getClipCullInfo(m_isgn);\n m_analysis->clipCullOut = getClipCullInfo(m_osgn);\n }\n ~DxbcAnalyzer() {\n \n }\n void processInstruction(\n const DxbcShaderInstruction& ins) {\n switch (ins.opClass) {\n case DxbcInstClass::Atomic: {\n const uint32_t operandId = ins.dstCount - 1;\n\n if (ins.dst[operandId].type == DxbcOperandType::UnorderedAccessView) {\n const uint32_t registerId = ins.dst[operandId].idx[0].offset;\n m_analysis->uavInfos[registerId].accessAtomicOp = true;\n m_analysis->uavInfos[registerId].accessFlags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;\n\n // Check whether the atomic operation is order-invariant\n DxvkAccessOp op = DxvkAccessOp::None;\n\n switch (ins.op) {\n case DxbcOpcode::AtomicAnd: op = DxvkAccessOp::And; break;\n case DxbcOpcode::AtomicOr: op = DxvkAccessOp::Or; break;\n case DxbcOpcode::AtomicXor: op = DxvkAccessOp::Xor; break;\n case DxbcOpcode::AtomicIAdd: op = DxvkAccessOp::Add; break;\n case DxbcOpcode::AtomicIMax: op = DxvkAccessOp::IMax; break;\n case DxbcOpcode::AtomicIMin: op = DxvkAccessOp::IMin; break;\n case DxbcOpcode::AtomicUMax: op = DxvkAccessOp::UMax; break;\n case DxbcOpcode::AtomicUMin: op = DxvkAccessOp::UMin; break;\n default: break;\n }\n\n setUavAccessOp(registerId, op);\n }\n } break;\n\n case DxbcInstClass::TextureSample:\n case DxbcInstClass::TextureGather:\n case DxbcInstClass::TextureQueryLod:\n case DxbcInstClass::VectorDeriv: {\n m_analysis->usesDerivatives = true;\n } break;\n\n case DxbcInstClass::ControlFlow: {\n if (ins.op == DxbcOpcode::Discard)\n m_analysis->usesKill = true;\n } break;\n\n case DxbcInstClass::BufferLoad: {\n uint32_t operandId = ins.op == DxbcOpcode::LdStructured ? 2 : 1;\n bool sparseFeedback = ins.dstCount == 2;\n\n if (ins.src[operandId].type == DxbcOperandType::UnorderedAccessView) {\n const uint32_t registerId = ins.src[operandId].idx[0].offset;\n m_analysis->uavInfos[registerId].accessFlags |= VK_ACCESS_SHADER_READ_BIT;\n m_analysis->uavInfos[registerId].sparseFeedback |= sparseFeedback;\n\n setUavAccessOp(registerId, DxvkAccessOp::None);\n } else if (ins.src[operandId].type == DxbcOperandType::Resource) {\n const uint32_t registerId = ins.src[operandId].idx[0].offset;\n m_analysis->srvInfos[registerId].sparseFeedback |= sparseFeedback;\n }\n } break;\n\n case DxbcInstClass::BufferStore: {\n if (ins.dst[0].type == DxbcOperandType::UnorderedAccessView) {\n const uint32_t registerId = ins.dst[0].idx[0].offset;\n m_analysis->uavInfos[registerId].accessFlags |= VK_ACCESS_SHADER_WRITE_BIT;\n\n setUavAccessOp(registerId, getStoreAccessOp(ins.dst[0].mask, ins.src[ins.srcCount - 1u]));\n }\n } break;\n\n case DxbcInstClass::TypedUavLoad: {\n const uint32_t registerId = ins.src[1].idx[0].offset;\n m_analysis->uavInfos[registerId].accessTypedLoad = true;\n m_analysis->uavInfos[registerId].accessFlags |= VK_ACCESS_SHADER_READ_BIT;\n\n setUavAccessOp(registerId, DxvkAccessOp::None);\n } break;\n\n case DxbcInstClass::TypedUavStore: {\n const uint32_t registerId = ins.dst[0].idx[0].offset;\n m_analysis->uavInfos[registerId].accessFlags |= VK_ACCESS_SHADER_WRITE_BIT;\n\n // The UAV format may change between dispatches, so be conservative here\n // and only allow this optimization when the app is writing zeroes.\n DxvkAccessOp storeOp = getStoreAccessOp(DxbcRegMask(0xf), ins.src[1u]);\n\n if (storeOp != DxvkAccessOp(DxvkAccessOp::OpType::StoreUi, 0u))\n storeOp = DxvkAccessOp::None;\n\n setUavAccessOp(registerId, storeOp);\n } break;\n\n case DxbcInstClass::Declaration: {\n switch (ins.op) {\n case DxbcOpcode::DclConstantBuffer: {\n uint32_t registerId = ins.dst[0].idx[0].offset;\n\n if (registerId < DxbcConstBufBindingCount)\n m_analysis->bindings.cbvMask |= 1u << registerId;\n } break;\n\n case DxbcOpcode::DclSampler: {\n uint32_t registerId = ins.dst[0].idx[0].offset;\n\n if (registerId < DxbcSamplerBindingCount)\n m_analysis->bindings.samplerMask |= 1u << registerId;\n } break;\n\n case DxbcOpcode::DclResource:\n case DxbcOpcode::DclResourceRaw:\n case DxbcOpcode::DclResourceStructured: {\n uint32_t registerId = ins.dst[0].idx[0].offset;\n\n uint32_t idx = registerId / 64u;\n uint32_t bit = registerId % 64u;\n\n if (registerId < DxbcResourceBindingCount)\n m_analysis->bindings.srvMask[idx] |= uint64_t(1u) << bit;\n } break;\n\n case DxbcOpcode::DclUavTyped:\n case DxbcOpcode::DclUavRaw:\n case DxbcOpcode::DclUavStructured: {\n uint32_t registerId = ins.dst[0].idx[0].offset;\n\n if (registerId < DxbcUavBindingCount)\n m_analysis->bindings.uavMask |= uint64_t(1u) << registerId;\n } break;\n\n default: ;\n }\n } break;\n\n default:\n break;\n }\n\n for (uint32_t i = 0; i < ins.dstCount; i++) {\n if (ins.dst[i].type == DxbcOperandType::IndexableTemp) {\n uint32_t index = ins.dst[i].idx[0].offset;\n m_analysis->xRegMasks[index] |= ins.dst[i].mask;\n }\n }\n }\n private:\n Rc m_isgn;\n Rc m_osgn;\n Rc m_psgn;\n DxbcAnalysisInfo* m_analysis = nullptr;\n DxbcClipCullInfo getClipCullInfo(\n const Rc& sgn) const {\n DxbcClipCullInfo result;\n \n if (sgn != nullptr) {\n for (auto e = sgn->begin(); e != sgn->end(); e++) {\n const uint32_t componentCount = e->componentMask.popCount();\n \n if (e->systemValue == DxbcSystemValue::ClipDistance)\n result.numClipPlanes += componentCount;\n if (e->systemValue == DxbcSystemValue::CullDistance)\n result.numCullPlanes += componentCount;\n }\n }\n \n return result;\n }\n void setUavAccessOp(uint32_t uav, DxvkAccessOp op) {\n if (m_analysis->uavInfos[uav].accessOp == DxvkAccessOp::None)\n m_analysis->uavInfos[uav].accessOp = op;\n\n // Maintain ordering if the UAV is accessed via other operations as well\n if (op == DxvkAccessOp::None || m_analysis->uavInfos[uav].accessOp != op)\n m_analysis->uavInfos[uav].nonInvariantAccess = true;\n }\n static DxvkAccessOp getStoreAccessOp(DxbcRegMask writeMask, const DxbcRegister& src) {\n if (src.type != DxbcOperandType::Imm32)\n return DxvkAccessOp::None;\n\n // Trivial case, same value is written to all components\n if (src.componentCount == DxbcComponentCount::Component1)\n return getConstantStoreOp(src.imm.u32_1);\n\n if (src.componentCount != DxbcComponentCount::Component4)\n return DxvkAccessOp::None;\n\n // Otherwise, make sure that all written components are equal\n DxvkAccessOp op = DxvkAccessOp::None;\n\n for (uint32_t i = 0u; i < 4u; i++) {\n if (!writeMask[i])\n continue;\n\n // If the written value can't be represented, skip\n DxvkAccessOp scalarOp = getConstantStoreOp(src.imm.u32_4[i]);\n\n if (scalarOp == DxvkAccessOp::None)\n return DxvkAccessOp::None;\n\n // First component written\n if (op == DxvkAccessOp::None)\n op = scalarOp;\n\n // Conflicting store ops\n if (op != scalarOp)\n return DxvkAccessOp::None;\n }\n\n return op;\n }\n static DxvkAccessOp getConstantStoreOp(uint32_t value) {\n constexpr uint32_t mask = 0xfffu;\n\n uint32_t ubits = value & mask;\n uint32_t fbits = (value >> 20u);\n\n if (value == ubits)\n return DxvkAccessOp(DxvkAccessOp::OpType::StoreUi, ubits);\n\n if (value == (ubits | ~mask))\n return DxvkAccessOp(DxvkAccessOp::OpType::StoreSi, ubits);\n\n if (value == (fbits << 20u))\n return DxvkAccessOp(DxvkAccessOp::OpType::StoreF, fbits);\n\n return DxvkAccessOp::None;\n }\n};"], ["/lsfg-vk/thirdparty/dxbc/include/spirv/spirv_code_buffer.h", "class for {\n public:\n SpirvCodeBuffer() {\n stream.ignore(std::numeric_limits::max());\n std::streamsize length = stream.gcount();\n stream.clear();\n stream.seekg(0, std::ios_base::beg);\n \n std::vector buffer(length);\n stream.read(buffer.data(), length);\n buffer.resize(stream.gcount());\n \n m_code.resize(buffer.size() / sizeof(uint32_t));\n std::memcpy(reinterpret_cast(m_code.data()),\n buffer.data(), m_code.size() * sizeof(uint32_t));\n \n m_ptr = m_code.size();\n }\n explicit SpirvCodeBuffer(uint32_t size) {\n stream.ignore(std::numeric_limits::max());\n std::streamsize length = stream.gcount();\n stream.clear();\n stream.seekg(0, std::ios_base::beg);\n \n std::vector buffer(length);\n stream.read(buffer.data(), length);\n buffer.resize(stream.gcount());\n \n m_code.resize(buffer.size() / sizeof(uint32_t));\n std::memcpy(reinterpret_cast(m_code.data()),\n buffer.data(), m_code.size() * sizeof(uint32_t));\n \n m_ptr = m_code.size();\n }\n SpirvCodeBuffer(uint32_t size, const uint32_t* data) {\n stream.ignore(std::numeric_limits::max());\n std::streamsize length = stream.gcount();\n stream.clear();\n stream.seekg(0, std::ios_base::beg);\n \n std::vector buffer(length);\n stream.read(buffer.data(), length);\n buffer.resize(stream.gcount());\n \n m_code.resize(buffer.size() / sizeof(uint32_t));\n std::memcpy(reinterpret_cast(m_code.data()),\n buffer.data(), m_code.size() * sizeof(uint32_t));\n \n m_ptr = m_code.size();\n }\n SpirvCodeBuffer(std::istream& stream) {\n stream.ignore(std::numeric_limits::max());\n std::streamsize length = stream.gcount();\n stream.clear();\n stream.seekg(0, std::ios_base::beg);\n \n std::vector buffer(length);\n stream.read(buffer.data(), length);\n buffer.resize(stream.gcount());\n \n m_code.resize(buffer.size() / sizeof(uint32_t));\n std::memcpy(reinterpret_cast(m_code.data()),\n buffer.data(), m_code.size() * sizeof(uint32_t));\n \n m_ptr = m_code.size();\n }\n template\n SpirvCodeBuffer(const uint32_t (&data)[N])\n : SpirvCodeBuffer(N, data) { }\n ~SpirvCodeBuffer() { }\n uint32_t allocId() {\n constexpr size_t BoundIdsOffset = 3;\n\n if (m_code.size() <= BoundIdsOffset)\n return 0;\n\n return m_code[BoundIdsOffset]++;\n }\n void append(const SpirvInstruction& ins) {\n if (other.size() != 0) {\n const size_t size = m_code.size();\n m_code.resize(size + other.m_code.size());\n \n uint32_t* dst = this->m_code.data();\n const uint32_t* src = other.m_code.data();\n \n std::memcpy(dst + size, src, other.size());\n m_ptr += other.m_code.size();\n }\n }\n void append(const SpirvCodeBuffer& other) {\n if (other.size() != 0) {\n const size_t size = m_code.size();\n m_code.resize(size + other.m_code.size());\n \n uint32_t* dst = this->m_code.data();\n const uint32_t* src = other.m_code.data();\n \n std::memcpy(dst + size, src, other.size());\n m_ptr += other.m_code.size();\n }\n }\n void putWord(uint32_t word) {\n m_code.insert(m_code.begin() + m_ptr, word);\n m_ptr += 1;\n }\n void putIns(spv::Op opCode, uint16_t wordCount) {\n this->putWord(\n (static_cast(opCode) << 0)\n | (static_cast(wordCount) << 16));\n }\n void putInt32(uint32_t word) {\n this->putWord(word);\n }\n void putInt64(uint64_t value) {\n this->putWord(value >> 0);\n this->putWord(value >> 32);\n }\n void putFloat32(float value) {\n uint32_t tmp;\n static_assert(sizeof(tmp) == sizeof(value));\n std::memcpy(&tmp, &value, sizeof(value));\n this->putInt32(tmp);\n }\n void putFloat64(double value) {\n uint64_t tmp;\n static_assert(sizeof(tmp) == sizeof(value));\n std::memcpy(&tmp, &value, sizeof(value));\n this->putInt64(tmp);\n }\n void putStr(const char* str) {\n uint32_t word = 0;\n uint32_t nbit = 0;\n \n for (uint32_t i = 0; str[i] != '\\0'; str++) {\n word |= (static_cast(str[i]) & 0xFF) << nbit;\n \n if ((nbit += 8) == 32) {\n this->putWord(word);\n word = 0;\n nbit = 0;\n }\n }\n \n // Commit current word\n this->putWord(word);\n }\n void putHeader(uint32_t version, uint32_t boundIds) {\n this->putWord(spv::MagicNumber);\n this->putWord(version);\n this->putWord(0); // Generator\n this->putWord(boundIds);\n this->putWord(0); // Schema\n }\n void erase(size_t size) {\n m_code.erase(\n m_code.begin() + m_ptr,\n m_code.begin() + m_ptr + size);\n }\n uint32_t strLen(const char* str) {\n // Null-termination plus padding\n return (std::strlen(str) + 4) / 4;\n }\n void store(std::ostream& stream) const {\n stream.write(\n reinterpret_cast(m_code.data()),\n sizeof(uint32_t) * m_code.size());\n }\n private:\n std::vector m_code;\n size_t m_ptr = 0;\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_decoder.h", "class DxbcRegModifier {\n public:\n private:\n uint8_t m_mask = 0;\n public:\n private:\n uint8_t m_mask = 0;\n public:\n private:\n uint32_t m_bits;\n public:\n const uint32_t* ptrAt(uint32_t id) const;\n uint32_t at(uint32_t id) const {\n if (m_ptr + id >= m_end)\n throw DxvkError(\"DxbcCodeSlice: End of stream\");\n return m_ptr[id];\n }\n uint32_t read() {\n if (m_ptr >= m_end)\n throw DxvkError(\"DxbcCodeSlice: End of stream\");\n return *(m_ptr++);\n }\n DxbcCodeSlice take(uint32_t n) const {\n if (m_ptr + n > m_end)\n throw DxvkError(\"DxbcCodeSlice: End of stream\");\n return DxbcCodeSlice(m_ptr, m_ptr + n);\n }\n DxbcCodeSlice skip(uint32_t n) const {\n if (m_ptr + n > m_end)\n throw DxvkError(\"DxbcCodeSlice: End of stream\");\n return DxbcCodeSlice(m_ptr + n, m_end);\n }\n private:\n const uint32_t* m_ptr = nullptr;\n const uint32_t* m_end = nullptr;\n public:\n void decodeInstruction(DxbcCodeSlice& code) {\n const uint32_t token0 = code.at(0);\n \n // Initialize the instruction structure. Some of these values\n // may not get written otherwise while decoding the instruction.\n m_instruction.op = static_cast(bit::extract(token0, 0, 10));\n m_instruction.opClass = DxbcInstClass::Undefined;\n m_instruction.sampleControls = { 0, 0, 0 };\n m_instruction.dstCount = 0;\n m_instruction.srcCount = 0;\n m_instruction.immCount = 0;\n m_instruction.dst = m_dstOperands.data();\n m_instruction.src = m_srcOperands.data();\n m_instruction.imm = m_immOperands.data();\n m_instruction.customDataType = DxbcCustomDataClass::Comment;\n m_instruction.customDataSize = 0;\n m_instruction.customData = nullptr;\n \n // Reset the index pointer, which may still contain\n // a non-zero value from the previous iteration\n m_indexId = 0;\n \n // Instruction length, in DWORDs. This includes the token\n // itself and any other prefix that an instruction may have.\n uint32_t length = 0;\n \n if (m_instruction.op == DxbcOpcode::CustomData) {\n length = code.at(1);\n this->decodeCustomData(code.take(length));\n } else {\n length = bit::extract(token0, 24, 30);\n this->decodeOperation(code.take(length));\n }\n \n // Advance the caller's slice to the next token so that\n // they can make consecutive calls to decodeInstruction()\n code = code.skip(length);\n }\n private:\n DxbcShaderInstruction m_instruction;\n std::array m_dstOperands;\n std::array m_srcOperands;\n std::array m_immOperands;\n std::array m_indices;\n uint32_t m_indexId = 0;\n void decodeCustomData(DxbcCodeSlice code) {\n const uint32_t blockLength = code.at(1);\n \n if (blockLength < 2) {\n Logger::err(\"DxbcDecodeContext: Invalid custom data block\");\n return;\n }\n \n // Custom data blocks have their own instruction class\n m_instruction.op = DxbcOpcode::CustomData;\n m_instruction.opClass = DxbcInstClass::CustomData;\n \n // We'll point into the code buffer rather than making a copy\n m_instruction.customDataType = static_cast(\n bit::extract(code.at(0), 11, 31));\n m_instruction.customDataSize = blockLength - 2;\n m_instruction.customData = code.ptrAt(2);\n }\n void decodeOperation(DxbcCodeSlice code) {\n uint32_t token = code.read();\n \n // Result modifiers, which are applied to common ALU ops\n m_instruction.modifiers.saturate = !!bit::extract(token, 13, 13);\n m_instruction.modifiers.precise = !!bit::extract(token, 19, 22);\n \n // Opcode controls. It will depend on the\n // opcode itself which ones are valid.\n m_instruction.controls = DxbcShaderOpcodeControls(token);\n \n // Process extended opcode tokens\n while (bit::extract(token, 31, 31)) {\n token = code.read();\n \n const DxbcExtOpcode extOpcode\n = static_cast(bit::extract(token, 0, 5));\n \n switch (extOpcode) {\n case DxbcExtOpcode::SampleControls: {\n struct {\n int u : 4;\n int v : 4;\n int w : 4;\n } aoffimmi;\n \n aoffimmi.u = bit::extract(token, 9, 12);\n aoffimmi.v = bit::extract(token, 13, 16);\n aoffimmi.w = bit::extract(token, 17, 20);\n \n // Four-bit signed numbers, sign-extend them\n m_instruction.sampleControls.u = aoffimmi.u;\n m_instruction.sampleControls.v = aoffimmi.v;\n m_instruction.sampleControls.w = aoffimmi.w;\n } break;\n \n case DxbcExtOpcode::ResourceDim:\n case DxbcExtOpcode::ResourceReturnType:\n break; // part of resource description\n \n default:\n Logger::warn(str::format(\n \"DxbcDecodeContext: Unhandled extended opcode: \",\n extOpcode));\n }\n }\n \n // Retrieve the instruction format in order to parse the\n // operands. Doing this mostly automatically means that\n // the compiler can rely on the operands being valid.\n const DxbcInstFormat format = dxbcInstructionFormat(m_instruction.op);\n m_instruction.opClass = format.instructionClass;\n \n for (uint32_t i = 0; i < format.operandCount; i++)\n this->decodeOperand(code, format.operands[i]);\n }\n void decodeComponentSelection(DxbcRegister& reg, uint32_t token) {\n // Pick the correct component selection mode based on the\n // component count. We'll simplify this here so that the\n // compiler can assume that everything is a 4D vector.\n reg.componentCount = static_cast(bit::extract(token, 0, 1));\n \n switch (reg.componentCount) {\n // No components - used for samplers etc.\n case DxbcComponentCount::Component0:\n reg.mask = DxbcRegMask(false, false, false, false);\n reg.swizzle = DxbcRegSwizzle(0, 0, 0, 0);\n break;\n \n // One component - used for immediates\n // and a few built-in registers.\n case DxbcComponentCount::Component1:\n reg.mask = DxbcRegMask(true, false, false, false);\n reg.swizzle = DxbcRegSwizzle(0, 0, 0, 0);\n break;\n \n // Four components - everything else. This requires us\n // to actually parse the component selection mode.\n case DxbcComponentCount::Component4: {\n const DxbcRegMode componentMode =\n static_cast(bit::extract(token, 2, 3));\n \n switch (componentMode) {\n // Write mask for destination operands\n case DxbcRegMode::Mask:\n reg.mask = bit::extract(token, 4, 7);\n reg.swizzle = DxbcRegSwizzle(0, 1, 2, 3);\n break;\n \n // Swizzle for source operands (including resources)\n case DxbcRegMode::Swizzle:\n reg.mask = DxbcRegMask(true, true, true, true);\n reg.swizzle = DxbcRegSwizzle(\n bit::extract(token, 4, 5),\n bit::extract(token, 6, 7),\n bit::extract(token, 8, 9),\n bit::extract(token, 10, 11));\n break;\n \n // Selection of one component. We can generate both a\n // mask and a swizzle for this so that the compiler\n // won't have to deal with this case specifically.\n case DxbcRegMode::Select1: {\n const uint32_t n = bit::extract(token, 4, 5);\n reg.mask = DxbcRegMask(n == 0, n == 1, n == 2, n == 3);\n reg.swizzle = DxbcRegSwizzle(n, n, n, n);\n } break;\n \n default:\n Logger::warn(\"DxbcDecodeContext: Invalid component selection mode\");\n }\n } break;\n \n default:\n Logger::warn(\"DxbcDecodeContext: Invalid component count\");\n }\n }\n void decodeOperandExtensions(DxbcCodeSlice& code, DxbcRegister& reg, uint32_t token) {\n while (bit::extract(token, 31, 31)) {\n token = code.read();\n \n // Type of the extended operand token\n const DxbcOperandExt extTokenType =\n static_cast(bit::extract(token, 0, 5));\n \n switch (extTokenType) {\n // Operand modifiers, which are used to manipulate the\n // value of a source operand during the load operation\n case DxbcOperandExt::OperandModifier:\n reg.modifiers = bit::extract(token, 6, 13);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcDecodeContext: Unhandled extended operand token: \",\n extTokenType));\n }\n }\n }\n void decodeOperandImmediates(DxbcCodeSlice& code, DxbcRegister& reg) {\n if (reg.type == DxbcOperandType::Imm32\n || reg.type == DxbcOperandType::Imm64) {\n switch (reg.componentCount) {\n // This is commonly used if only one vector\n // component is involved in an operation\n case DxbcComponentCount::Component1: {\n reg.imm.u32_1 = code.read();\n } break;\n \n // Typical four-component vector\n case DxbcComponentCount::Component4: {\n reg.imm.u32_4[0] = code.read();\n reg.imm.u32_4[1] = code.read();\n reg.imm.u32_4[2] = code.read();\n reg.imm.u32_4[3] = code.read();\n } break;\n\n default:\n Logger::warn(\"DxbcDecodeContext: Invalid component count for immediate operand\");\n }\n }\n }\n void decodeOperandIndex(DxbcCodeSlice& code, DxbcRegister& reg, uint32_t token) {\n reg.idxDim = bit::extract(token, 20, 21);\n \n for (uint32_t i = 0; i < reg.idxDim; i++) {\n // An index can be encoded in various different ways\n const DxbcOperandIndexRepresentation repr =\n static_cast(\n bit::extract(token, 22 + 3 * i, 24 + 3 * i));\n \n switch (repr) {\n case DxbcOperandIndexRepresentation::Imm32:\n reg.idx[i].offset = static_cast(code.read());\n reg.idx[i].relReg = nullptr;\n break;\n \n case DxbcOperandIndexRepresentation::Relative:\n reg.idx[i].offset = 0;\n reg.idx[i].relReg = &m_indices.at(m_indexId);\n \n this->decodeRegister(code,\n m_indices.at(m_indexId++),\n DxbcScalarType::Sint32);\n break;\n \n case DxbcOperandIndexRepresentation::Imm32Relative:\n reg.idx[i].offset = static_cast(code.read());\n reg.idx[i].relReg = &m_indices.at(m_indexId);\n \n this->decodeRegister(code,\n m_indices.at(m_indexId++),\n DxbcScalarType::Sint32);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcDecodeContext: Unhandled index representation: \",\n repr));\n }\n }\n }\n void decodeRegister(DxbcCodeSlice& code, DxbcRegister& reg, DxbcScalarType type) {\n const uint32_t token = code.read();\n \n reg.type = static_cast(bit::extract(token, 12, 19));\n reg.dataType = type;\n reg.modifiers = 0;\n reg.idxDim = 0;\n \n for (uint32_t i = 0; i < DxbcMaxRegIndexDim; i++) {\n reg.idx[i].relReg = nullptr;\n reg.idx[i].offset = 0;\n }\n \n this->decodeComponentSelection(reg, token);\n this->decodeOperandExtensions(code, reg, token);\n this->decodeOperandImmediates(code, reg);\n this->decodeOperandIndex(code, reg, token);\n }\n void decodeImm32(DxbcCodeSlice& code, DxbcImmediate& imm, DxbcScalarType type) {\n imm.u32 = code.read();\n }\n void decodeOperand(DxbcCodeSlice& code, const DxbcInstOperandFormat& format) {\n switch (format.kind) {\n case DxbcOperandKind::DstReg: {\n const uint32_t operandId = m_instruction.dstCount++;\n this->decodeRegister(code, m_dstOperands.at(operandId), format.type);\n } break;\n \n case DxbcOperandKind::SrcReg: {\n const uint32_t operandId = m_instruction.srcCount++;\n this->decodeRegister(code, m_srcOperands.at(operandId), format.type);\n } break;\n \n case DxbcOperandKind::Imm32: {\n const uint32_t operandId = m_instruction.immCount++;\n this->decodeImm32(code, m_immOperands.at(operandId), format.type);\n } break;\n \n default:\n throw DxvkError(\"DxbcDecodeContext: Invalid operand format\");\n }\n }\n};"], ["/lsfg-vk/framegen/src/core/fence.cpp", "#include \n#include \n\n#include \"core/fence.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n\nusing namespace LSFG::Core;\n\nFence::Fence(const Core::Device& device) {\n // create fence\n const VkFenceCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO\n };\n VkFence fenceHandle{};\n auto res = vkCreateFence(device.handle(), &desc, nullptr, &fenceHandle);\n if (res != VK_SUCCESS || fenceHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create fence\");\n\n // store fence in shared ptr\n this->fence = std::shared_ptr(\n new VkFence(fenceHandle),\n [dev = device.handle()](VkFence* fenceHandle) {\n vkDestroyFence(dev, *fenceHandle, nullptr);\n }\n );\n}\n\nvoid Fence::reset(const Core::Device& device) const {\n VkFence fenceHandle = this->handle();\n auto res = vkResetFences(device.handle(), 1, &fenceHandle);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to reset fence\");\n}\n\nbool Fence::wait(const Core::Device& device, uint64_t timeout) const {\n VkFence fenceHandle = this->handle();\n auto res = vkWaitForFences(device.handle(), 1, &fenceHandle, VK_TRUE, timeout);\n if (res != VK_SUCCESS && res != VK_TIMEOUT)\n throw LSFG::vulkan_error(res, \"Unable to wait for fence\");\n\n return res == VK_SUCCESS;\n}\n"], ["/lsfg-vk/thirdparty/pe-parse/dump-pe/main.cpp", "/*\nThe MIT License (MIT)\n\nCopyright (c) 2013 Andrew Ruef\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n*/\n\n#include \n#include \n#include \n#include \n#include \n\n#include \n\n#include \"vendor/argh.h\"\n\nusing namespace peparse;\n\nint printExps(void *N,\n const VA &funcAddr,\n std::uint16_t ordinal,\n const std::string &mod,\n const std::string &func,\n const std::string &fwd) {\n static_cast(N);\n\n auto address = static_cast(funcAddr);\n\n // save default formatting\n std::ios initial(nullptr);\n initial.copyfmt(std::cout);\n\n std::cout << \"EXP #\";\n std::cout << ordinal;\n std::cout << \": \";\n std::cout << mod;\n std::cout << \"!\";\n std::cout << func;\n std::cout << \": \";\n if (!fwd.empty()) {\n std::cout << fwd;\n } else {\n std::cout << std::showbase << std::hex << address;\n }\n std::cout << \"\\n\";\n\n // restore default formatting\n std::cout.copyfmt(initial);\n return 0;\n}\n\nint printImports(void *N,\n const VA &impAddr,\n const std::string &modName,\n const std::string &symName) {\n static_cast(N);\n\n auto address = static_cast(impAddr);\n\n std::cout << \"0x\" << std::hex << address << \" \" << modName << \"!\" << symName;\n std::cout << \"\\n\";\n return 0;\n}\n\nint printRelocs(void *N, const VA &relocAddr, const reloc_type &type) {\n static_cast(N);\n\n std::cout << \"TYPE: \";\n switch (type) {\n case RELOC_ABSOLUTE:\n std::cout << \"ABSOLUTE\";\n break;\n case RELOC_HIGH:\n std::cout << \"HIGH\";\n break;\n case RELOC_LOW:\n std::cout << \"LOW\";\n break;\n case RELOC_HIGHLOW:\n std::cout << \"HIGHLOW\";\n break;\n case RELOC_HIGHADJ:\n std::cout << \"HIGHADJ\";\n break;\n case RELOC_MIPS_JMPADDR:\n std::cout << \"MIPS_JMPADDR\";\n break;\n case RELOC_MIPS_JMPADDR16:\n std::cout << \"MIPS_JMPADD16\";\n break;\n case RELOC_DIR64:\n std::cout << \"DIR64\";\n break;\n default:\n std::cout << \"UNKNOWN\";\n break;\n }\n\n std::cout << \" VA: 0x\" << std::hex << relocAddr << \"\\n\";\n\n return 0;\n}\n\nint printDebugs(void *N,\n const std::uint32_t &type,\n const bounded_buffer *data) {\n static_cast(N);\n\n std::cout << \"Debug Directory Type: \";\n switch (type) {\n case 0:\n std::cout << \"IMAGE_DEBUG_TYPE_UNKNOWN\";\n break;\n case 1:\n std::cout << \"IMAGE_DEBUG_TYPE_COFF\";\n break;\n case 2:\n std::cout << \"IMAGE_DEBUG_TYPE_CODEVIEW\";\n break;\n case 3:\n std::cout << \"IMAGE_DEBUG_TYPE_FPO\";\n break;\n case 4:\n std::cout << \"IMAGE_DEBUG_TYPE_MISC\";\n break;\n case 5:\n std::cout << \"IMAGE_DEBUG_TYPE_EXCEPTION\";\n break;\n case 6:\n std::cout << \"IMAGE_DEBUG_TYPE_FIXUP\";\n break;\n case 7:\n std::cout << \"IMAGE_DEBUG_TYPE_OMAP_TO_SRC\";\n break;\n case 8:\n std::cout << \"IMAGE_DEBUG_TYPE_OMAP_FROM_SRC\";\n break;\n case 9:\n std::cout << \"IMAGE_DEBUG_TYPE_BORLAND\";\n break;\n case 10:\n std::cout << \"IMAGE_DEBUG_TYPE_RESERVED10\";\n break;\n case 11:\n std::cout << \"IMAGE_DEBUG_TYPE_CLSID\";\n break;\n case 12:\n std::cout << \"IMAGE_DEBUG_TYPE_VC_FEATURE\";\n break;\n case 13:\n std::cout << \"IMAGE_DEBUG_TYPE_POGO\";\n break;\n case 14:\n std::cout << \"IMAGE_DEBUG_TYPE_ILTCG\";\n break;\n case 15:\n std::cout << \"IMAGE_DEBUG_TYPE_MPX\";\n break;\n case 16:\n std::cout << \"IMAGE_DEBUG_TYPE_REPRO\";\n break;\n case 20:\n std::cout << \"IMAGE_DEBUG_TYPE_EX_DLLCHARACTERISTICS\";\n break;\n default:\n std::cout << \"INVALID\";\n break;\n }\n std::cout << \"\\n\";\n std::cout << \"Debug Directory Data: \";\n for (uint32_t i = 0; i < data->bufLen; i++) {\n std::cout << \" 0x\" << std::hex << static_cast(data->buf[i]);\n }\n std::cout << \"\\n\";\n\n return 0;\n}\n\nint printSymbols(void *N,\n const std::string &strName,\n const uint32_t &value,\n const int16_t §ionNumber,\n const uint16_t &type,\n const uint8_t &storageClass,\n const uint8_t &numberOfAuxSymbols) {\n static_cast(N);\n\n std::cout << \"Symbol Name: \" << strName << \"\\n\";\n std::cout << \"Symbol Value: 0x\" << std::hex << value << \"\\n\";\n\n std::cout << \"Symbol Section Number: \";\n switch (sectionNumber) {\n case IMAGE_SYM_UNDEFINED:\n std::cout << \"UNDEFINED\";\n break;\n case IMAGE_SYM_ABSOLUTE:\n std::cout << \"ABSOLUTE\";\n break;\n case IMAGE_SYM_DEBUG:\n std::cout << \"DEBUG\";\n break;\n default:\n std::cout << sectionNumber;\n break;\n }\n std::cout << \"\\n\";\n\n std::cout << \"Symbol Type: \";\n switch (type) {\n case IMAGE_SYM_TYPE_NULL:\n std::cout << \"NULL\";\n break;\n case IMAGE_SYM_TYPE_VOID:\n std::cout << \"VOID\";\n break;\n case IMAGE_SYM_TYPE_CHAR:\n std::cout << \"CHAR\";\n break;\n case IMAGE_SYM_TYPE_SHORT:\n std::cout << \"SHORT\";\n break;\n case IMAGE_SYM_TYPE_INT:\n std::cout << \"INT\";\n break;\n case IMAGE_SYM_TYPE_LONG:\n std::cout << \"LONG\";\n break;\n case IMAGE_SYM_TYPE_FLOAT:\n std::cout << \"FLOAT\";\n break;\n case IMAGE_SYM_TYPE_DOUBLE:\n std::cout << \"DOUBLE\";\n break;\n case IMAGE_SYM_TYPE_STRUCT:\n std::cout << \"STRUCT\";\n break;\n case IMAGE_SYM_TYPE_UNION:\n std::cout << \"UNION\";\n break;\n case IMAGE_SYM_TYPE_ENUM:\n std::cout << \"ENUM\";\n break;\n case IMAGE_SYM_TYPE_MOE:\n std::cout << \"IMAGE_SYM_TYPE_MOE\";\n break;\n case IMAGE_SYM_TYPE_BYTE:\n std::cout << \"BYTE\";\n break;\n case IMAGE_SYM_TYPE_WORD:\n std::cout << \"WORD\";\n break;\n case IMAGE_SYM_TYPE_UINT:\n std::cout << \"UINT\";\n break;\n case IMAGE_SYM_TYPE_DWORD:\n std::cout << \"DWORD\";\n break;\n default:\n std::cout << \"UNKNOWN\";\n break;\n }\n std::cout << \"\\n\";\n\n std::cout << \"Symbol Storage Class: \";\n switch (storageClass) {\n case IMAGE_SYM_CLASS_END_OF_FUNCTION:\n std::cout << \"FUNCTION\";\n break;\n case IMAGE_SYM_CLASS_NULL:\n std::cout << \"NULL\";\n break;\n case IMAGE_SYM_CLASS_AUTOMATIC:\n std::cout << \"AUTOMATIC\";\n break;\n case IMAGE_SYM_CLASS_EXTERNAL:\n std::cout << \"EXTERNAL\";\n break;\n case IMAGE_SYM_CLASS_STATIC:\n std::cout << \"STATIC\";\n break;\n case IMAGE_SYM_CLASS_REGISTER:\n std::cout << \"REGISTER\";\n break;\n case IMAGE_SYM_CLASS_EXTERNAL_DEF:\n std::cout << \"EXTERNAL DEF\";\n break;\n case IMAGE_SYM_CLASS_LABEL:\n std::cout << \"LABEL\";\n break;\n case IMAGE_SYM_CLASS_UNDEFINED_LABEL:\n std::cout << \"UNDEFINED LABEL\";\n break;\n case IMAGE_SYM_CLASS_MEMBER_OF_STRUCT:\n std::cout << \"MEMBER OF STRUCT\";\n break;\n default:\n std::cout << \"UNKNOWN\";\n break;\n }\n std::cout << \"\\n\";\n\n std::cout << \"Symbol Number of Aux Symbols: \"\n << static_cast(numberOfAuxSymbols) << \"\\n\";\n\n return 0;\n}\n\nint printRich(void *N, const rich_entry &r) {\n static_cast(N);\n std::cout << std::dec;\n std::cout << std::setw(10) << \"ProdId:\" << std::setw(7) << r.ProductId;\n std::cout << std::setw(10) << \"Build:\" << std::setw(7) << r.BuildNumber;\n std::cout << std::setw(10) << \"Name:\" << std::setw(40)\n << GetRichProductName(r.BuildNumber) << \" \"\n << GetRichObjectType(r.ProductId);\n std::cout << std::setw(10) << \"Count:\" << std::setw(7) << r.Count << \"\\n\";\n return 0;\n}\n\nint printRsrc(void *N, const resource &r) {\n static_cast(N);\n\n if (r.type_str.length())\n std::cout << \"Type (string): \" << r.type_str << \"\\n\";\n else\n std::cout << \"Type: 0x\" << std::hex << r.type << \"\\n\";\n\n if (r.name_str.length())\n std::cout << \"Name (string): \" << r.name_str << \"\\n\";\n else\n std::cout << \"Name: 0x\" << std::hex << r.name << \"\\n\";\n\n if (r.lang_str.length())\n std::cout << \"Lang (string): \" << r.lang_str << \"\\n\";\n else\n std::cout << \"Lang: 0x\" << std::hex << r.lang << \"\\n\";\n\n std::cout << \"Codepage: 0x\" << std::hex << r.codepage << \"\\n\";\n std::cout << \"RVA: \" << std::dec << r.RVA << \"\\n\";\n std::cout << \"Size: \" << std::dec << r.size << \"\\n\";\n return 0;\n}\n\nint printSecs(void *N,\n const VA &secBase,\n const std::string &secName,\n const image_section_header &s,\n const bounded_buffer *data) {\n static_cast(N);\n static_cast(s);\n\n std::cout << \"Sec Name: \" << secName << \"\\n\";\n std::cout << \"Sec Base: 0x\" << std::hex << secBase << \"\\n\";\n if (data)\n std::cout << \"Sec Size: \" << std::dec << data->bufLen << \"\\n\";\n else\n std::cout << \"Sec Size: 0\"\n << \"\\n\";\n return 0;\n}\n\n#define DUMP_FIELD(x) \\\n std::cout << \"\" #x << \": 0x\"; \\\n std::cout << std::hex << static_cast(p->peHeader.x) << \"\\n\";\n#define DUMP_DEC_FIELD(x) \\\n std::cout << \"\" #x << \": \"; \\\n std::cout << std::dec << static_cast(p->peHeader.x) << \"\\n\";\n#define DUMP_BOOL_FIELD(x) \\\n std::cout << \"\" #x << \": \"; \\\n std::cout << std::boolalpha << static_cast(p->peHeader.x) << \"\\n\";\n\nint main(int argc, char *argv[]) {\n\n argh::parser cmdl(argv);\n\n if (cmdl[{\"-h\", \"--help\"}] || argc <= 1) {\n std::cout << \"dump-pe utility from Trail of Bits\\n\";\n std::cout << \"Repository: https://github.com/trailofbits/pe-parse\\n\\n\";\n std::cout << \"Usage:\\n\\tdump-pe /path/to/executable.exe\\n\";\n return 0;\n } else if (cmdl[{\"-v\", \"--version\"}]) {\n std::cout << \"dump-pe (pe-parse) version \" << PEPARSE_VERSION << \"\\n\";\n return 0;\n }\n\n parsed_pe *p = ParsePEFromFile(cmdl[1].c_str());\n\n if (p == nullptr) {\n std::cout << \"Error: \" << GetPEErr() << \" (\" << GetPEErrString() << \")\"\n << \"\\n\";\n std::cout << \"Location: \" << GetPEErrLoc() << \"\\n\";\n return 1;\n }\n\n if (p != NULL) {\n // Print DOS header\n DUMP_FIELD(dos.e_magic);\n DUMP_FIELD(dos.e_cp);\n DUMP_FIELD(dos.e_crlc);\n DUMP_FIELD(dos.e_cparhdr);\n DUMP_FIELD(dos.e_minalloc);\n DUMP_FIELD(dos.e_maxalloc);\n DUMP_FIELD(dos.e_ss);\n DUMP_FIELD(dos.e_sp);\n DUMP_FIELD(dos.e_csum);\n DUMP_FIELD(dos.e_ip);\n DUMP_FIELD(dos.e_cs);\n DUMP_FIELD(dos.e_lfarlc);\n DUMP_FIELD(dos.e_ovno);\n DUMP_FIELD(dos.e_res[0]);\n DUMP_FIELD(dos.e_res[1]);\n DUMP_FIELD(dos.e_res[2]);\n DUMP_FIELD(dos.e_res[3]);\n DUMP_FIELD(dos.e_oemid);\n DUMP_FIELD(dos.e_oeminfo);\n DUMP_FIELD(dos.e_res2[0]);\n DUMP_FIELD(dos.e_res2[1]);\n DUMP_FIELD(dos.e_res2[2]);\n DUMP_FIELD(dos.e_res2[3]);\n DUMP_FIELD(dos.e_res2[4]);\n DUMP_FIELD(dos.e_res2[5]);\n DUMP_FIELD(dos.e_res2[6]);\n DUMP_FIELD(dos.e_res2[7]);\n DUMP_FIELD(dos.e_res2[8]);\n DUMP_FIELD(dos.e_res2[9]);\n DUMP_FIELD(dos.e_lfanew);\n // Print Rich header info\n DUMP_BOOL_FIELD(rich.isPresent);\n if (p->peHeader.rich.isPresent) {\n DUMP_FIELD(rich.DecryptionKey);\n DUMP_FIELD(rich.Checksum);\n DUMP_BOOL_FIELD(rich.isValid);\n IterRich(p, printRich, NULL);\n }\n // print out some things\n DUMP_FIELD(nt.Signature);\n DUMP_FIELD(nt.FileHeader.Machine);\n DUMP_FIELD(nt.FileHeader.NumberOfSections);\n DUMP_DEC_FIELD(nt.FileHeader.TimeDateStamp);\n DUMP_FIELD(nt.FileHeader.PointerToSymbolTable);\n DUMP_DEC_FIELD(nt.FileHeader.NumberOfSymbols);\n DUMP_FIELD(nt.FileHeader.SizeOfOptionalHeader);\n DUMP_FIELD(nt.FileHeader.Characteristics);\n if (p->peHeader.nt.OptionalMagic == NT_OPTIONAL_32_MAGIC) {\n DUMP_FIELD(nt.OptionalHeader.Magic);\n DUMP_DEC_FIELD(nt.OptionalHeader.MajorLinkerVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader.MinorLinkerVersion);\n DUMP_FIELD(nt.OptionalHeader.SizeOfCode);\n DUMP_FIELD(nt.OptionalHeader.SizeOfInitializedData);\n DUMP_FIELD(nt.OptionalHeader.SizeOfUninitializedData);\n DUMP_FIELD(nt.OptionalHeader.AddressOfEntryPoint);\n DUMP_FIELD(nt.OptionalHeader.BaseOfCode);\n DUMP_FIELD(nt.OptionalHeader.BaseOfData);\n DUMP_FIELD(nt.OptionalHeader.ImageBase);\n DUMP_FIELD(nt.OptionalHeader.SectionAlignment);\n DUMP_FIELD(nt.OptionalHeader.FileAlignment);\n DUMP_DEC_FIELD(nt.OptionalHeader.MajorOperatingSystemVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader.MinorOperatingSystemVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader.Win32VersionValue);\n DUMP_FIELD(nt.OptionalHeader.SizeOfImage);\n DUMP_FIELD(nt.OptionalHeader.SizeOfHeaders);\n DUMP_FIELD(nt.OptionalHeader.CheckSum);\n DUMP_FIELD(nt.OptionalHeader.Subsystem);\n DUMP_FIELD(nt.OptionalHeader.DllCharacteristics);\n DUMP_FIELD(nt.OptionalHeader.SizeOfStackReserve);\n DUMP_FIELD(nt.OptionalHeader.SizeOfStackCommit);\n DUMP_FIELD(nt.OptionalHeader.SizeOfHeapReserve);\n DUMP_FIELD(nt.OptionalHeader.SizeOfHeapCommit);\n DUMP_FIELD(nt.OptionalHeader.LoaderFlags);\n DUMP_DEC_FIELD(nt.OptionalHeader.NumberOfRvaAndSizes);\n } else {\n DUMP_FIELD(nt.OptionalHeader64.Magic);\n DUMP_DEC_FIELD(nt.OptionalHeader64.MajorLinkerVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader64.MinorLinkerVersion);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfCode);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfInitializedData);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfUninitializedData);\n DUMP_FIELD(nt.OptionalHeader64.AddressOfEntryPoint);\n DUMP_FIELD(nt.OptionalHeader64.BaseOfCode);\n DUMP_FIELD(nt.OptionalHeader64.ImageBase);\n DUMP_FIELD(nt.OptionalHeader64.SectionAlignment);\n DUMP_FIELD(nt.OptionalHeader64.FileAlignment);\n DUMP_DEC_FIELD(nt.OptionalHeader64.MajorOperatingSystemVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader64.MinorOperatingSystemVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader64.Win32VersionValue);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfImage);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfHeaders);\n DUMP_FIELD(nt.OptionalHeader64.CheckSum);\n DUMP_FIELD(nt.OptionalHeader64.Subsystem);\n DUMP_FIELD(nt.OptionalHeader64.DllCharacteristics);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfStackReserve);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfStackCommit);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfHeapReserve);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfHeapCommit);\n DUMP_FIELD(nt.OptionalHeader64.LoaderFlags);\n DUMP_DEC_FIELD(nt.OptionalHeader64.NumberOfRvaAndSizes);\n }\n\n#undef DUMP_FIELD\n#undef DUMP_DEC_FIELD\n\n std::cout << \"Imports: \"\n << \"\\n\";\n IterImpVAString(p, printImports, NULL);\n std::cout << \"Relocations: \"\n << \"\\n\";\n IterRelocs(p, printRelocs, NULL);\n std::cout << \"Debug Directories: \"\n << \"\\n\";\n IterDebugs(p, printDebugs, NULL);\n std::cout << \"Symbols (symbol table): \"\n << \"\\n\";\n IterSymbols(p, printSymbols, NULL);\n std::cout << \"Sections: \"\n << \"\\n\";\n IterSec(p, printSecs, NULL);\n std::cout << \"Exports: \"\n << \"\\n\";\n IterExpFull(p, printExps, NULL);\n\n // read the first 8 bytes from the entry point and print them\n VA entryPoint;\n if (GetEntryPoint(p, entryPoint)) {\n std::cout << \"First 8 bytes from entry point (0x\";\n std::cout << std::hex << entryPoint << \"):\"\n << \"\\n\";\n for (std::size_t i = 0; i < 8; i++) {\n std::uint8_t b;\n if (!ReadByteAtVA(p, i + entryPoint, b)) {\n std::cout << \" ERR\";\n } else {\n std::cout << \" 0x\" << std::hex << static_cast(b);\n }\n }\n\n std::cout << \"\\n\";\n }\n\n std::cout << \"Resources: \"\n << \"\\n\";\n IterRsrc(p, printRsrc, NULL);\n\n DestructParsedPE(p);\n\n return 0;\n }\n}\n"], ["/lsfg-vk/framegen/src/core/sampler.cpp", "#include \n#include \n\n#include \"core/sampler.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n\nusing namespace LSFG::Core;\n\nSampler::Sampler(const Core::Device& device,\n VkSamplerAddressMode mode,\n VkCompareOp compare,\n bool isWhite) {\n // create sampler\n const VkSamplerCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,\n .magFilter = VK_FILTER_LINEAR,\n .minFilter = VK_FILTER_LINEAR,\n .mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR,\n .addressModeU = mode,\n .addressModeV = mode,\n .addressModeW = mode,\n .compareOp = compare,\n .maxLod = VK_LOD_CLAMP_NONE,\n .borderColor =\n isWhite ? VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE\n : VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK\n };\n VkSampler samplerHandle{};\n auto res = vkCreateSampler(device.handle(), &desc, nullptr, &samplerHandle);\n if (res != VK_SUCCESS || samplerHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create sampler\");\n\n // store sampler in shared ptr\n this->sampler = std::shared_ptr(\n new VkSampler(samplerHandle),\n [dev = device.handle()](VkSampler* samplerHandle) {\n vkDestroySampler(dev, *samplerHandle, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_chunk_isgn.h", "class DxbcIsgn {\n public:\n DxbcIsgn(DxbcReader reader, DxbcTag tag) {\n uint32_t elementCount = reader.readu32();\n reader.skip(sizeof(uint32_t));\n \n std::array componentTypes = {\n DxbcScalarType::Uint32, DxbcScalarType::Uint32,\n DxbcScalarType::Sint32, DxbcScalarType::Float32,\n };\n\n // https://github.com/DarkStarSword/3d-fixes/blob/master/dx11shaderanalyse.py#L101\n bool hasStream = (tag == \"ISG1\") || (tag == \"OSG1\") || (tag == \"PSG1\") || (tag == \"OSG5\");\n bool hasPrecision = (tag == \"ISG1\") || (tag == \"OSG1\") || (tag == \"PSG1\");\n \n for (uint32_t i = 0; i < elementCount; i++) {\n DxbcSgnEntry entry;\n entry.streamId = hasStream ? reader.readu32() : 0;\n entry.semanticName = reader.clone(reader.readu32()).readString();\n entry.semanticIndex = reader.readu32();\n entry.systemValue = static_cast(reader.readu32());\n entry.componentType = componentTypes.at(reader.readu32());\n entry.registerId = reader.readu32();\n\n uint32_t mask = reader.readu32();\n\n entry.componentMask = bit::extract(mask, 0, 3);\n entry.componentUsed = bit::extract(mask, 8, 11);\n\n if (hasPrecision)\n reader.readu32();\n\n m_entries.push_back(entry);\n }\n }\n ~DxbcIsgn() {\n \n }\n const DxbcSgnEntry* findByRegister(\n uint32_t registerId) const;\n const DxbcSgnEntry* find(\n const std::string& semanticName,\n uint32_t semanticIndex,\n uint32_t streamIndex) const;\n DxbcRegMask regMask(\n uint32_t registerId) const {\n DxbcRegMask mask;\n\n for (auto e = this->begin(); e != this->end(); e++) {\n if (e->registerId == registerId)\n mask |= e->componentMask;\n }\n\n return mask;\n }\n uint32_t maxRegisterCount() const {\n uint32_t result = 0;\n for (auto e = this->begin(); e != this->end(); e++)\n result = std::max(result, e->registerId + 1);\n return result;\n }\n static bool compareSemanticNames(\n const std::string& a,\n const std::string& b) {\n if (a.size() != b.size())\n return false;\n \n for (size_t i = 0; i < a.size(); i++) {\n char ac = a[i];\n char bc = b[i];\n\n if (ac != bc) {\n if (ac >= 'A' && ac <= 'Z') ac += 'a' - 'A';\n if (bc >= 'A' && bc <= 'Z') bc += 'a' - 'A';\n\n if (ac != bc)\n return false;\n }\n }\n \n return true;\n }\n private:\n std::vector m_entries;\n};"], ["/lsfg-vk/framegen/src/core/commandpool.cpp", "#include \n#include \n\n#include \"core/commandpool.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n\nusing namespace LSFG::Core;\n\nCommandPool::CommandPool(const Core::Device& device) {\n // create command pool\n const VkCommandPoolCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,\n .queueFamilyIndex = device.getComputeFamilyIdx()\n };\n VkCommandPool commandPoolHandle{};\n auto res = vkCreateCommandPool(device.handle(), &desc, nullptr, &commandPoolHandle);\n if (res != VK_SUCCESS || commandPoolHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create command pool\");\n\n // store command pool in shared ptr\n this->commandPool = std::shared_ptr(\n new VkCommandPool(commandPoolHandle),\n [dev = device.handle()](VkCommandPool* commandPoolHandle) {\n vkDestroyCommandPool(dev, *commandPoolHandle, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/thirdparty/pe-parse/pepy/pepy.cpp", "/*\n * Copyright (c) 2013, Wesley Shields . All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *\n * 2. Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND\n * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE\n * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n * SUCH DAMAGE.\n */\n\n#include \n#include \n#include \n\nusing namespace peparse;\n\n/* NOTE(ww): These don't necessarily have to be the same, but currently are.\n */\n#define PEPY_VERSION PEPARSE_VERSION\n\n/* These are used to across multiple objects. */\n#define PEPY_OBJECT_GET(OBJ, ATTR) \\\n static PyObject *pepy_##OBJ##_get_##ATTR(PyObject *self, void *closure) { \\\n Py_INCREF(((pepy_##OBJ *) self)->ATTR); \\\n return ((pepy_##OBJ *) self)->ATTR; \\\n }\n\n#define OBJECTGETTER(OBJ, ATTR, DOC) \\\n { \\\n (char *) #ATTR, (getter) pepy_##OBJ##_get_##ATTR, \\\n (setter) pepy_attr_not_writable, (char *) #DOC, NULL \\\n }\n\n/* 'OPTIONAL' references the fact that these are from the Optional Header */\n#define OBJECTGETTER_OPTIONAL(ATTR, DOC) \\\n { \\\n (char *) #ATTR, (getter) pepy_parsed_get_optional_##ATTR, \\\n (setter) pepy_attr_not_writable, (char *) #DOC, NULL \\\n }\n\nstatic PyObject *pepy_error;\n\nstruct pepy {\n PyObject_HEAD\n};\n\nstruct pepy_parsed {\n PyObject_HEAD parsed_pe *pe;\n};\n\nstruct pepy_section {\n PyObject_HEAD PyObject *name;\n PyObject *base;\n PyObject *length;\n PyObject *virtaddr;\n PyObject *virtsize;\n PyObject *numrelocs;\n PyObject *numlinenums;\n PyObject *characteristics;\n PyObject *data;\n};\n\nstruct pepy_resource {\n PyObject_HEAD PyObject *type_str;\n PyObject *name_str;\n PyObject *lang_str;\n PyObject *type;\n PyObject *name;\n PyObject *lang;\n PyObject *codepage;\n PyObject *RVA;\n PyObject *size;\n PyObject *data;\n};\n\nstruct pepy_import {\n PyObject_HEAD PyObject *name;\n PyObject *sym;\n PyObject *addr;\n};\n\nstruct pepy_export {\n PyObject_HEAD PyObject *mod;\n PyObject *func;\n PyObject *addr;\n};\n\nstruct pepy_relocation {\n PyObject_HEAD PyObject *type;\n PyObject *addr;\n};\n\n/* None of the attributes in these objects are writable. */\nstatic int\npepy_attr_not_writable(PyObject *self, PyObject *value, void *closure) {\n PyErr_SetString(PyExc_TypeError, \"Attribute not writable.\");\n return -1;\n}\n\nstatic PyObject *\npepy_import_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_import *self;\n\n self = (pepy_import *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int pepy_import_init(pepy_import *self, PyObject *args, PyObject *kwds) {\n if (!PyArg_ParseTuple(\n args, \"OOO:pepy_import_init\", &self->name, &self->sym, &self->addr))\n return -1;\n return 0;\n}\n\nstatic void pepy_import_dealloc(pepy_import *self) {\n Py_XDECREF(self->name);\n Py_XDECREF(self->sym);\n Py_XDECREF(self->addr);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nPEPY_OBJECT_GET(import, name);\nPEPY_OBJECT_GET(import, sym);\nPEPY_OBJECT_GET(import, addr);\n\nstatic PyGetSetDef pepy_import_getseters[] = {\n OBJECTGETTER(import, name, \"Name\"),\n OBJECTGETTER(import, sym, \"Symbol\"),\n OBJECTGETTER(import, addr, \"Address\"),\n {NULL}};\n\nstatic PyTypeObject pepy_import_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.import\", /* tp_name */\n sizeof(pepy_import), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_import_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT, /* tp_flags */\n \"pepy import object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n 0, /* tp_methods */\n 0, /* tp_members */\n pepy_import_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_import_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_import_new /* tp_new */\n};\n\nstatic PyObject *\npepy_export_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_export *self;\n\n self = (pepy_export *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int pepy_export_init(pepy_export *self, PyObject *args, PyObject *kwds) {\n if (!PyArg_ParseTuple(\n args, \"OOO:pepy_export_init\", &self->mod, &self->func, &self->addr))\n return -1;\n return 0;\n}\n\nstatic void pepy_export_dealloc(pepy_export *self) {\n Py_XDECREF(self->mod);\n Py_XDECREF(self->func);\n Py_XDECREF(self->addr);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nPEPY_OBJECT_GET(export, mod);\nPEPY_OBJECT_GET(export, func);\nPEPY_OBJECT_GET(export, addr);\n\nstatic PyGetSetDef pepy_export_getseters[] = {\n OBJECTGETTER(export, mod, \"Module\"),\n OBJECTGETTER(export, func, \"Function\"),\n OBJECTGETTER(export, addr, \"Address\"),\n {NULL}};\n\nstatic PyTypeObject pepy_export_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.export\", /* tp_name */\n sizeof(pepy_export), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_export_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT, /* tp_flags */\n \"pepy export object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n 0, /* tp_methods */\n 0, /* tp_members */\n pepy_export_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_export_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_export_new /* tp_new */\n};\n\nstatic PyObject *\npepy_relocation_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_relocation *self;\n\n self = (pepy_relocation *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int\npepy_relocation_init(pepy_relocation *self, PyObject *args, PyObject *kwds) {\n if (!PyArg_ParseTuple(\n args, \"OO:pepy_relocation_init\", &self->type, &self->addr))\n return -1;\n return 0;\n}\n\nstatic void pepy_relocation_dealloc(pepy_relocation *self) {\n Py_XDECREF(self->type);\n Py_XDECREF(self->addr);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nPEPY_OBJECT_GET(relocation, type);\nPEPY_OBJECT_GET(relocation, addr);\n\nstatic PyGetSetDef pepy_relocation_getseters[] = {\n OBJECTGETTER(relocation, type, \"Type\"),\n OBJECTGETTER(relocation, addr, \"Address\"),\n {NULL}};\n\nstatic PyTypeObject pepy_relocation_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.relocation\", /* tp_name */\n sizeof(pepy_relocation), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_relocation_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT, /* tp_flags */\n \"pepy relocation object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n 0, /* tp_methods */\n 0, /* tp_members */\n pepy_relocation_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_relocation_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_relocation_new /* tp_new */\n};\n\nstatic PyObject *\npepy_section_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_section *self;\n\n self = (pepy_section *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int\npepy_section_init(pepy_section *self, PyObject *args, PyObject *kwds) {\n if (!PyArg_ParseTuple(args,\n \"OOOOOOOOO:pepy_section_init\",\n &self->name,\n &self->base,\n &self->length,\n &self->virtaddr,\n &self->virtsize,\n &self->numrelocs,\n &self->numlinenums,\n &self->characteristics,\n &self->data))\n return -1;\n return 0;\n}\n\nstatic void pepy_section_dealloc(pepy_section *self) {\n Py_XDECREF(self->name);\n Py_XDECREF(self->base);\n Py_XDECREF(self->length);\n Py_XDECREF(self->virtaddr);\n Py_XDECREF(self->virtsize);\n Py_XDECREF(self->numrelocs);\n Py_XDECREF(self->numlinenums);\n Py_XDECREF(self->characteristics);\n Py_XDECREF(self->data);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nPEPY_OBJECT_GET(section, name);\nPEPY_OBJECT_GET(section, base);\nPEPY_OBJECT_GET(section, length);\nPEPY_OBJECT_GET(section, virtaddr);\nPEPY_OBJECT_GET(section, virtsize);\nPEPY_OBJECT_GET(section, numrelocs);\nPEPY_OBJECT_GET(section, numlinenums);\nPEPY_OBJECT_GET(section, characteristics);\nPEPY_OBJECT_GET(section, data);\n\nstatic PyGetSetDef pepy_section_getseters[] = {\n OBJECTGETTER(section, name, \"Name\"),\n OBJECTGETTER(section, base, \"Base address\"),\n OBJECTGETTER(section, length, \"Length\"),\n OBJECTGETTER(section, virtaddr, \"Virtual address\"),\n OBJECTGETTER(section, virtsize, \"Virtual size\"),\n OBJECTGETTER(section, numrelocs, \"Number of relocations\"),\n OBJECTGETTER(section, numlinenums, \"Number of line numbers\"),\n OBJECTGETTER(section, characteristics, \"Characteristics\"),\n OBJECTGETTER(section, data, \"Section data\"),\n {NULL}};\n\nstatic PyTypeObject pepy_section_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.section\", /* tp_name */\n sizeof(pepy_section), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_section_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT, /* tp_flags */\n \"pepy section object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n 0, /* tp_methods */\n 0, /* tp_members */\n pepy_section_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_section_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_section_new /* tp_new */\n};\n\nstatic PyObject *\npepy_resource_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_resource *self;\n\n self = (pepy_resource *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int\npepy_resource_init(pepy_resource *self, PyObject *args, PyObject *kwds) {\n if (!PyArg_ParseTuple(args,\n \"OOOOOOOOOO:pepy_resource_init\",\n &self->type_str,\n &self->name_str,\n &self->lang_str,\n &self->type,\n &self->name,\n &self->lang,\n &self->codepage,\n &self->RVA,\n &self->size,\n &self->data))\n return -1;\n\n return 0;\n}\n\nstatic void pepy_resource_dealloc(pepy_resource *self) {\n Py_XDECREF(self->type_str);\n Py_XDECREF(self->name_str);\n Py_XDECREF(self->lang_str);\n Py_XDECREF(self->type);\n Py_XDECREF(self->name);\n Py_XDECREF(self->lang);\n Py_XDECREF(self->codepage);\n Py_XDECREF(self->RVA);\n Py_XDECREF(self->size);\n Py_XDECREF(self->data);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nPEPY_OBJECT_GET(resource, type_str);\nPEPY_OBJECT_GET(resource, name_str);\nPEPY_OBJECT_GET(resource, lang_str);\nPEPY_OBJECT_GET(resource, type);\nPEPY_OBJECT_GET(resource, name);\nPEPY_OBJECT_GET(resource, lang);\nPEPY_OBJECT_GET(resource, codepage);\nPEPY_OBJECT_GET(resource, RVA);\nPEPY_OBJECT_GET(resource, size);\nPEPY_OBJECT_GET(resource, data);\n\nstatic PyObject *pepy_resource_type_as_str(PyObject *self, PyObject *args) {\n PyObject *ret;\n char *str;\n long type;\n\n type = PyLong_AsUnsignedLong(((pepy_resource *) self)->type);\n if (type == -1) {\n if (PyErr_Occurred()) {\n PyErr_PrintEx(0);\n return NULL;\n }\n }\n switch ((resource_type) type) {\n case (RT_CURSOR):\n str = (char *) \"CURSOR\";\n break;\n case (RT_BITMAP):\n str = (char *) \"BITMAP\";\n break;\n case (RT_ICON):\n str = (char *) \"ICON\";\n break;\n case (RT_MENU):\n str = (char *) \"MENU\";\n break;\n case (RT_DIALOG):\n str = (char *) \"DIALOG\";\n break;\n case (RT_STRING):\n str = (char *) \"STRING\";\n break;\n case (RT_FONTDIR):\n str = (char *) \"FONTDIR\";\n break;\n case (RT_FONT):\n str = (char *) \"FONT\";\n break;\n case (RT_ACCELERATOR):\n str = (char *) \"ACCELERATOR\";\n break;\n case (RT_RCDATA):\n str = (char *) \"RCDATA\";\n break;\n case (RT_MESSAGETABLE):\n str = (char *) \"MESSAGETABLE\";\n break;\n case (RT_GROUP_CURSOR):\n str = (char *) \"GROUP_CURSOR\";\n break;\n case (RT_GROUP_ICON):\n str = (char *) \"GROUP_ICON\";\n break;\n case (RT_VERSION):\n str = (char *) \"VERSION\";\n break;\n case (RT_DLGINCLUDE):\n str = (char *) \"DLGINCLUDE\";\n break;\n case (RT_PLUGPLAY):\n str = (char *) \"PLUGPLAY\";\n break;\n case (RT_VXD):\n str = (char *) \"VXD\";\n break;\n case (RT_ANICURSOR):\n str = (char *) \"ANICURSOR\";\n break;\n case (RT_ANIICON):\n str = (char *) \"ANIICON\";\n break;\n case (RT_HTML):\n str = (char *) \"HTML\";\n break;\n case (RT_MANIFEST):\n str = (char *) \"MANIFEST\";\n break;\n default:\n str = (char *) \"UNKNOWN\";\n break;\n }\n\n ret = PyUnicode_FromString(str);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create return string.\");\n return NULL;\n }\n\n return ret;\n}\n\nstatic PyMethodDef pepy_resource_methods[] = {\n {\"type_as_str\",\n pepy_resource_type_as_str,\n METH_NOARGS,\n \"Return the resource type as a string.\"},\n {NULL}};\n\nstatic PyGetSetDef pepy_resource_getseters[] = {\n OBJECTGETTER(resource, type_str, \"Type string\"),\n OBJECTGETTER(resource, name_str, \"Name string\"),\n OBJECTGETTER(resource, lang_str, \"Lang string\"),\n OBJECTGETTER(resource, type, \"Type\"),\n OBJECTGETTER(resource, name, \"Name\"),\n OBJECTGETTER(resource, lang, \"Language\"),\n OBJECTGETTER(resource, codepage, \"Codepage\"),\n OBJECTGETTER(resource, RVA, \"RVA\"),\n OBJECTGETTER(resource, size, \"Size (specified in RDAT)\"),\n OBJECTGETTER(resource, data, \"Resource data\"),\n {NULL}};\n\nstatic PyTypeObject pepy_resource_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.resource\", /* tp_name */\n sizeof(pepy_resource), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_resource_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT, /* tp_flags */\n \"pepy resource object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n pepy_resource_methods, /* tp_methods */\n 0, /* tp_members */\n pepy_resource_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_resource_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_resource_new /* tp_new */\n};\n\nstatic PyObject *\npepy_parsed_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_parsed *self;\n\n self = (pepy_parsed *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int pepy_parsed_init(pepy_parsed *self, PyObject *args, PyObject *kwds) {\n char *pe_path;\n\n if (!PyArg_ParseTuple(args, \"s:pepy_parse\", &pe_path))\n return -1;\n\n if (!pe_path)\n return -1;\n\n self->pe = ParsePEFromFile(pe_path);\n if (!self->pe) {\n return -2;\n }\n\n return 0;\n}\n\nstatic void pepy_parsed_dealloc(pepy_parsed *self) {\n DestructParsedPE(self->pe);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nstatic PyObject *pepy_parsed_get_entry_point(PyObject *self, PyObject *args) {\n VA entrypoint;\n PyObject *ret;\n\n if (!GetEntryPoint(((pepy_parsed *) self)->pe, entrypoint))\n Py_RETURN_NONE;\n\n ret = PyLong_FromUnsignedLongLong(entrypoint);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create return object.\");\n return NULL;\n }\n\n return ret;\n}\n\nstatic PyObject *pepy_parsed_get_machine_as_str(PyObject *self,\n PyObject *args) {\n PyObject *ret;\n const char *str;\n\n str = GetMachineAsString(((pepy_parsed *) self)->pe);\n if (!str)\n Py_RETURN_NONE;\n\n ret = PyUnicode_FromString(str);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create return string.\");\n return NULL;\n }\n\n return ret;\n}\n\nstatic PyObject *pepy_parsed_get_subsystem_as_str(PyObject *self,\n PyObject *args) {\n PyObject *ret;\n const char *str;\n\n str = GetSubsystemAsString(((pepy_parsed *) self)->pe);\n if (!str)\n Py_RETURN_NONE;\n\n ret = PyUnicode_FromString(str);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create return string.\");\n return NULL;\n }\n\n return ret;\n}\n\nstatic PyObject *pepy_parsed_get_bytes(PyObject *self, PyObject *args) {\n uint64_t start;\n Py_ssize_t len, idx;\n PyObject *ret;\n\n if (!PyArg_ParseTuple(args, \"KK:pepy_parsed_get_bytes\", &start, &len))\n return NULL;\n\n /*\n * XXX: a new implementation read all bytes in char* and use\n * PybyteArray_FromStringAndSize\n */\n\n uint8_t *buf = new (std::nothrow) uint8_t[len];\n if (!buf) {\n /* in case allocation failed */\n PyErr_SetString(pepy_error,\n \"Unable to create initial buffer (allocation failure).\");\n return NULL;\n }\n\n for (idx = 0; idx < len; idx++) {\n if (!ReadByteAtVA(((pepy_parsed *) self)->pe, start + idx, buf[idx]))\n break;\n }\n\n /* use idx as content length, if we get less than asked for */\n ret = PyByteArray_FromStringAndSize(reinterpret_cast(buf), idx);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new byte array.\");\n return NULL;\n }\n\n delete[] buf;\n return ret;\n}\n\n/*\n * This is used to convert bounded buffers into python byte array objects.\n * In case the buffer is NULL, return an empty bytearray.\n */\nstatic PyObject *pepy_data_converter(bounded_buffer *data) {\n PyObject *ret;\n const char *str;\n Py_ssize_t len;\n\n if (!data || !data->buf) {\n str = \"\";\n len = 0;\n } else {\n str = (const char *) data->buf;\n len = data->bufLen;\n }\n\n ret = PyByteArray_FromStringAndSize(str, len);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to convert data to byte array.\");\n return NULL;\n }\n\n return ret;\n}\n\nint section_callback(void *cbd,\n const VA &base,\n const std::string &name,\n const image_section_header &s,\n const bounded_buffer *data) {\n uint32_t buflen;\n PyObject *sect;\n PyObject *tuple;\n PyObject *list = (PyObject *) cbd;\n\n /*\n * I've seen some interesting binaries with a section where the\n * PointerToRawData and SizeOfRawData are invalid. The parser library\n * handles this by setting sectionData to NULL as returned by splitBuffer().\n * The sectionData (passed in to us as *data) is converted using\n * pepy_data_converter() which will return an empty string object.\n * However, we need to address the fact that we pass an invalid length\n * via data->bufLen.\n */\n if (!data) {\n buflen = 0;\n } else {\n buflen = data->bufLen;\n }\n\n /*\n * The tuple item order is important here. It is passed into the\n * section type initialization and parsed there.\n */\n tuple = Py_BuildValue(\"sKKIIHHIO&\",\n name.c_str(),\n base,\n buflen,\n s.VirtualAddress,\n s.Misc.VirtualSize,\n s.NumberOfRelocations,\n s.NumberOfLinenumbers,\n s.Characteristics,\n pepy_data_converter,\n data);\n if (!tuple)\n return 1;\n\n sect = pepy_section_new(&pepy_section_type, NULL, NULL);\n if (!sect) {\n Py_DECREF(tuple);\n return 1;\n }\n\n if (pepy_section_init((pepy_section *) sect, tuple, NULL) == -1) {\n PyErr_SetString(pepy_error, \"Unable to init new section.\");\n return 1;\n }\n\n if (PyList_Append(list, sect) == -1) {\n Py_DECREF(tuple);\n Py_DECREF(sect);\n return 1;\n }\n\n return 0;\n}\n\nstatic PyObject *pepy_parsed_get_sections(PyObject *self, PyObject *args) {\n PyObject *ret = PyList_New(0);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new list.\");\n return NULL;\n }\n\n IterSec(((pepy_parsed *) self)->pe, section_callback, ret);\n\n return ret;\n}\n\nint resource_callback(void *cbd, const resource &r) {\n PyObject *rsrc;\n PyObject *tuple;\n PyObject *list = (PyObject *) cbd;\n\n /*\n * The tuple item order is important here. It is passed into the\n * section type initialization and parsed there.\n */\n tuple = Py_BuildValue(\"s#s#s#IIIIIIO&\",\n r.type_str.c_str(),\n r.type_str.length(),\n r.name_str.c_str(),\n r.name_str.length(),\n r.lang_str.c_str(),\n r.lang_str.length(),\n r.type,\n r.name,\n r.lang,\n r.codepage,\n r.RVA,\n r.size,\n pepy_data_converter,\n r.buf);\n if (!tuple)\n return 1;\n\n rsrc = pepy_resource_new(&pepy_resource_type, NULL, NULL);\n if (!rsrc) {\n Py_DECREF(tuple);\n return 1;\n }\n\n if (pepy_resource_init((pepy_resource *) rsrc, tuple, NULL) == -1) {\n PyErr_SetString(pepy_error, \"Unable to init new resource.\");\n return 1;\n }\n\n if (PyList_Append(list, rsrc) == -1) {\n Py_DECREF(tuple);\n Py_DECREF(rsrc);\n return 1;\n }\n\n return 0;\n}\n\nstatic PyObject *pepy_parsed_get_resources(PyObject *self, PyObject *args) {\n PyObject *ret = PyList_New(0);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new list.\");\n return NULL;\n }\n\n IterRsrc(((pepy_parsed *) self)->pe, resource_callback, ret);\n\n return ret;\n}\n\nint import_callback(void *cbd,\n const VA &addr,\n const std::string &name,\n const std::string &sym) {\n PyObject *imp;\n PyObject *tuple;\n PyObject *list = (PyObject *) cbd;\n\n /*\n * The tuple item order is important here. It is passed into the\n * import type initialization and parsed there.\n */\n tuple = Py_BuildValue(\"ssI\", name.c_str(), sym.c_str(), addr);\n if (!tuple)\n return 1;\n\n imp = pepy_import_new(&pepy_import_type, NULL, NULL);\n if (!imp) {\n Py_DECREF(tuple);\n return 1;\n }\n\n if (pepy_import_init((pepy_import *) imp, tuple, NULL) == -1) {\n PyErr_SetString(pepy_error, \"Unable to init new section.\");\n return 1;\n }\n\n if (PyList_Append(list, imp) == -1) {\n Py_DECREF(tuple);\n Py_DECREF(imp);\n return 1;\n }\n\n return 0;\n}\n\nstatic PyObject *pepy_parsed_get_imports(PyObject *self, PyObject *args) {\n PyObject *ret = PyList_New(0);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new list.\");\n return NULL;\n }\n\n IterImpVAString(((pepy_parsed *) self)->pe, import_callback, ret);\n\n return ret;\n}\n\nint export_callback(void *cbd,\n const VA &addr,\n const std::string &mod,\n const std::string &func) {\n PyObject *exp;\n PyObject *tuple;\n PyObject *list = (PyObject *) cbd;\n\n /*\n * The tuple item order is important here. It is passed into the\n * export type initialization and parsed there.\n */\n tuple = Py_BuildValue(\"ssI\", mod.c_str(), func.c_str(), addr);\n if (!tuple)\n return 1;\n\n exp = pepy_export_new(&pepy_export_type, NULL, NULL);\n if (!exp) {\n Py_DECREF(tuple);\n return 1;\n }\n\n if (pepy_export_init((pepy_export *) exp, tuple, NULL) == -1) {\n PyErr_SetString(pepy_error, \"Unable to init new section.\");\n return 1;\n }\n\n if (PyList_Append(list, exp) == -1) {\n Py_DECREF(tuple);\n Py_DECREF(exp);\n return 1;\n }\n\n return 0;\n}\n\nstatic PyObject *pepy_parsed_get_exports(PyObject *self, PyObject *args) {\n PyObject *ret = PyList_New(0);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new list.\");\n return NULL;\n }\n\n /*\n * This could use the same callback and object as imports but the names\n * of the attributes would be slightly off.\n */\n IterExpVA(((pepy_parsed *) self)->pe, export_callback, ret);\n\n return ret;\n}\n\nint reloc_callback(void *cbd, const VA &addr, const reloc_type &type) {\n PyObject *reloc;\n PyObject *tuple;\n PyObject *list = (PyObject *) cbd;\n\n /*\n * The tuple item order is important here. It is passed into the\n * relocation type initialization and parsed there.\n */\n tuple = Py_BuildValue(\"II\", type, addr);\n if (!tuple)\n return 1;\n\n reloc = pepy_relocation_new(&pepy_relocation_type, NULL, NULL);\n if (!reloc) {\n Py_DECREF(tuple);\n return 1;\n }\n\n if (pepy_relocation_init((pepy_relocation *) reloc, tuple, NULL) == -1) {\n PyErr_SetString(pepy_error, \"Unable to init new section.\");\n return 1;\n }\n\n if (PyList_Append(list, reloc) == -1) {\n Py_DECREF(tuple);\n Py_DECREF(reloc);\n return 1;\n }\n\n return 0;\n}\n\nstatic PyObject *pepy_parsed_get_relocations(PyObject *self, PyObject *args) {\n PyObject *ret = PyList_New(0);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new list.\");\n return NULL;\n }\n\n IterRelocs(((pepy_parsed *) self)->pe, reloc_callback, ret);\n\n return ret;\n}\n\n#define PEPY_PARSED_GET(ATTR, VAL) \\\n static PyObject *pepy_parsed_get_##ATTR(PyObject *self, void *closure) { \\\n PyObject *ret = PyLong_FromUnsignedLongLong( \\\n ((pepy_parsed *) self)->pe->peHeader.nt.VAL); \\\n if (!ret) \\\n PyErr_SetString(PyExc_AttributeError, \"Error getting attribute.\"); \\\n return ret; \\\n }\n\nPEPY_PARSED_GET(signature, Signature);\nPEPY_PARSED_GET(machine, FileHeader.Machine);\nPEPY_PARSED_GET(numberofsections, FileHeader.NumberOfSections);\nPEPY_PARSED_GET(timedatestamp, FileHeader.TimeDateStamp);\nPEPY_PARSED_GET(numberofsymbols, FileHeader.NumberOfSymbols);\nPEPY_PARSED_GET(characteristics, FileHeader.Characteristics);\nPEPY_PARSED_GET(magic, OptionalMagic);\n\n/*\n * This is used to get things from the optional header, which can be either\n * the PE32 or PE32+ version, depending upon the magic value. Technically\n * the magic is stored in the OptionalHeader, but to make life easier pe-parse\n * stores the value in nt_header_32 along with the appropriate optional header.\n * This is why \"magic\" is handled above, and not here.\n */\n#define PEPY_PARSED_GET_OPTIONAL(ATTR, VAL) \\\n static PyObject *pepy_parsed_get_optional_##ATTR(PyObject *self, \\\n void *closure) { \\\n PyObject *ret = NULL; \\\n if (((pepy_parsed *) self)->pe->peHeader.nt.OptionalMagic == \\\n NT_OPTIONAL_32_MAGIC) { \\\n ret = PyLong_FromUnsignedLongLong( \\\n ((pepy_parsed *) self)->pe->peHeader.nt.OptionalHeader.VAL); \\\n if (!ret) \\\n PyErr_SetString(PyExc_AttributeError, \"Error getting attribute.\"); \\\n } else if (((pepy_parsed *) self)->pe->peHeader.nt.OptionalMagic == \\\n NT_OPTIONAL_64_MAGIC) { \\\n ret = PyLong_FromUnsignedLongLong( \\\n ((pepy_parsed *) self)->pe->peHeader.nt.OptionalHeader64.VAL); \\\n if (!ret) \\\n PyErr_SetString(PyExc_AttributeError, \"Error getting attribute.\"); \\\n } else { \\\n PyErr_SetString(pepy_error, \"Bad magic value.\"); \\\n } \\\n return ret; \\\n }\n\nPEPY_PARSED_GET_OPTIONAL(majorlinkerver, MajorLinkerVersion);\nPEPY_PARSED_GET_OPTIONAL(minorlinkerver, MinorLinkerVersion);\nPEPY_PARSED_GET_OPTIONAL(codesize, SizeOfCode);\nPEPY_PARSED_GET_OPTIONAL(initdatasize, SizeOfInitializedData);\nPEPY_PARSED_GET_OPTIONAL(uninitdatasize, SizeOfUninitializedData);\nPEPY_PARSED_GET_OPTIONAL(entrypointaddr, AddressOfEntryPoint);\nPEPY_PARSED_GET_OPTIONAL(baseofcode, BaseOfCode);\nPEPY_PARSED_GET_OPTIONAL(imagebase, ImageBase);\nPEPY_PARSED_GET_OPTIONAL(sectionalignement, SectionAlignment);\nPEPY_PARSED_GET_OPTIONAL(filealignment, FileAlignment);\nPEPY_PARSED_GET_OPTIONAL(majorosver, MajorOperatingSystemVersion);\nPEPY_PARSED_GET_OPTIONAL(minorosver, MinorOperatingSystemVersion);\nPEPY_PARSED_GET_OPTIONAL(win32ver, Win32VersionValue);\nPEPY_PARSED_GET_OPTIONAL(imagesize, SizeOfImage);\nPEPY_PARSED_GET_OPTIONAL(headersize, SizeOfHeaders);\nPEPY_PARSED_GET_OPTIONAL(checksum, CheckSum);\nPEPY_PARSED_GET_OPTIONAL(subsystem, Subsystem);\nPEPY_PARSED_GET_OPTIONAL(dllcharacteristics, DllCharacteristics);\nPEPY_PARSED_GET_OPTIONAL(stackreservesize, SizeOfStackReserve);\nPEPY_PARSED_GET_OPTIONAL(stackcommitsize, SizeOfStackCommit);\nPEPY_PARSED_GET_OPTIONAL(heapreservesize, SizeOfHeapReserve);\nPEPY_PARSED_GET_OPTIONAL(heapcommitsize, SizeOfHeapCommit);\nPEPY_PARSED_GET_OPTIONAL(loaderflags, LoaderFlags);\nPEPY_PARSED_GET_OPTIONAL(rvasandsize, NumberOfRvaAndSizes);\n\n/*\n * BaseOfData is only in PE32, not PE32+. Thus, it uses a non-standard\n * getter function compared to the other shared fields.\n */\nstatic PyObject *pepy_parsed_get_optional_baseofdata(PyObject *self,\n void *closure) {\n PyObject *ret = NULL;\n if (((pepy_parsed *) self)->pe->peHeader.nt.OptionalMagic ==\n NT_OPTIONAL_32_MAGIC) {\n ret = PyLong_FromUnsignedLong(\n ((pepy_parsed *) self)->pe->peHeader.nt.OptionalHeader.BaseOfData);\n if (!ret)\n PyErr_SetString(PyExc_AttributeError, \"Error getting attribute.\");\n } else if (((pepy_parsed *) self)->pe->peHeader.nt.OptionalMagic ==\n NT_OPTIONAL_64_MAGIC) {\n PyErr_SetString(PyExc_AttributeError, \"Not available on PE32+.\");\n } else {\n PyErr_SetString(pepy_error, \"Bad magic value.\");\n }\n return ret;\n}\n\nstatic PyGetSetDef pepy_parsed_getseters[] = {\n OBJECTGETTER(parsed, signature, \"PE Signature\"),\n OBJECTGETTER(parsed, machine, \"Machine\"),\n OBJECTGETTER(parsed, numberofsections, \"Number of sections\"),\n OBJECTGETTER(parsed, timedatestamp, \"Timedate stamp\"),\n OBJECTGETTER(parsed, numberofsymbols, \"Number of symbols\"),\n OBJECTGETTER(parsed, characteristics, \"Characteristics\"),\n OBJECTGETTER(parsed, magic, \"Magic\"),\n OBJECTGETTER_OPTIONAL(majorlinkerver, \"Major linker version\"),\n OBJECTGETTER_OPTIONAL(minorlinkerver, \"Minor linker version\"),\n OBJECTGETTER_OPTIONAL(codesize, \"Size of code\"),\n OBJECTGETTER_OPTIONAL(initdatasize, \"Size of initialized data\"),\n OBJECTGETTER_OPTIONAL(uninitdatasize, \"Size of uninitialized data\"),\n OBJECTGETTER_OPTIONAL(entrypointaddr, \"Address of entry point\"),\n OBJECTGETTER_OPTIONAL(baseofcode, \"Base address of code\"),\n OBJECTGETTER_OPTIONAL(imagebase, \"Image base address\"),\n OBJECTGETTER_OPTIONAL(sectionalignement, \"Section alignment\"),\n OBJECTGETTER_OPTIONAL(filealignment, \"File alignment\"),\n OBJECTGETTER_OPTIONAL(majorosver, \"Major OS version\"),\n OBJECTGETTER_OPTIONAL(minorosver, \"Minor OS version\"),\n OBJECTGETTER_OPTIONAL(win32ver, \"Win32 version\"),\n OBJECTGETTER_OPTIONAL(imagesize, \"Size of image\"),\n OBJECTGETTER_OPTIONAL(headersize, \"Size of headers\"),\n OBJECTGETTER_OPTIONAL(checksum, \"Checksum\"),\n OBJECTGETTER_OPTIONAL(subsystem, \"Subsystem\"),\n OBJECTGETTER_OPTIONAL(dllcharacteristics, \"DLL characteristics\"),\n OBJECTGETTER_OPTIONAL(stackreservesize, \"Size of stack reserve\"),\n OBJECTGETTER_OPTIONAL(stackcommitsize, \"Size of stack commit\"),\n OBJECTGETTER_OPTIONAL(heapreservesize, \"Size of heap reserve\"),\n OBJECTGETTER_OPTIONAL(heapcommitsize, \"Size of heap commit\"),\n OBJECTGETTER_OPTIONAL(loaderflags, \"Loader flags\"),\n OBJECTGETTER_OPTIONAL(rvasandsize, \"Number of RVA and sizes\"),\n /* Base of data is only available in PE32, not PE32+. */\n {(char *) \"baseofdata\",\n (getter) pepy_parsed_get_optional_baseofdata,\n (setter) pepy_attr_not_writable,\n (char *) \"Base address of data\",\n NULL},\n {NULL}};\n\nstatic PyMethodDef pepy_parsed_methods[] = {\n {\"get_entry_point\",\n pepy_parsed_get_entry_point,\n METH_NOARGS,\n \"Return the entry point address.\"},\n {\"get_machine_as_str\",\n pepy_parsed_get_machine_as_str,\n METH_NOARGS,\n \"Return the machine as a human readable string.\"},\n {\"get_subsystem_as_str\",\n pepy_parsed_get_subsystem_as_str,\n METH_NOARGS,\n \"Return the subsystem as a human readable string.\"},\n {\"get_bytes\",\n pepy_parsed_get_bytes,\n METH_VARARGS,\n \"Return the first N bytes at a given address.\"},\n {\"get_sections\",\n pepy_parsed_get_sections,\n METH_NOARGS,\n \"Return a list of section objects.\"},\n {\"get_imports\",\n pepy_parsed_get_imports,\n METH_NOARGS,\n \"Return a list of import objects.\"},\n {\"get_exports\",\n pepy_parsed_get_exports,\n METH_NOARGS,\n \"Return a list of export objects.\"},\n {\"get_relocations\",\n pepy_parsed_get_relocations,\n METH_NOARGS,\n \"Return a list of relocation objects.\"},\n {\"get_resources\",\n pepy_parsed_get_resources,\n METH_NOARGS,\n \"Return a list of resource objects.\"},\n {NULL}};\n\nstatic PyTypeObject pepy_parsed_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.parsed\", /* tp_name */\n sizeof(pepy_parsed), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_parsed_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */\n \"pepy parsed object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n pepy_parsed_methods, /* tp_methods */\n 0, /* tp_members */\n pepy_parsed_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_parsed_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_parsed_new /* tp_new */\n};\n\nstatic PyObject *pepy_parse(PyObject *self, PyObject *args) {\n PyObject *parsed;\n int ret;\n char *err_str = NULL;\n\n parsed = pepy_parsed_new(&pepy_parsed_type, NULL, NULL);\n if (!parsed) {\n PyErr_SetString(pepy_error, \"Unable to make new parsed object.\");\n return NULL;\n }\n\n ret = pepy_parsed_init((pepy_parsed *) parsed, args, NULL);\n if (ret < 0) {\n if (ret == -2) {\n // error (loc)\n size_t len = GetPEErrString().length() + GetPEErrLoc().length() + 4;\n err_str = (char *) malloc(len);\n if (!err_str)\n return PyErr_NoMemory();\n snprintf(err_str,\n len,\n \"%s (%s)\",\n GetPEErrString().c_str(),\n GetPEErrLoc().c_str());\n PyErr_SetString(pepy_error, err_str);\n } else\n PyErr_SetString(pepy_error, \"Unable to init new parsed object.\");\n return NULL;\n }\n\n return parsed;\n}\n\nstatic PyMethodDef pepy_methods[] = {\n {\"parse\", pepy_parse, METH_VARARGS, \"Parse PE from file.\"}, {NULL}};\n\nPyMODINIT_FUNC PyInit_pepy(void) {\n PyObject *m;\n\n if (PyType_Ready(&pepy_parsed_type) < 0 ||\n PyType_Ready(&pepy_section_type) < 0 ||\n PyType_Ready(&pepy_import_type) < 0 ||\n PyType_Ready(&pepy_export_type) < 0 ||\n PyType_Ready(&pepy_relocation_type) < 0 ||\n PyType_Ready(&pepy_resource_type) < 0)\n return NULL;\n\n static struct PyModuleDef moduledef = {\n PyModuleDef_HEAD_INIT,\n \"pepy\",\n \"Python interface to pe-parse.\",\n -1,\n pepy_methods,\n NULL,\n NULL,\n NULL,\n NULL,\n };\n\n m = PyModule_Create(&moduledef);\n if (!m)\n return NULL;\n\n pepy_error = PyErr_NewException((char *) \"pepy.error\", NULL, NULL);\n Py_INCREF(pepy_error);\n PyModule_AddObject(m, \"error\", pepy_error);\n\n Py_INCREF(&pepy_parsed_type);\n PyModule_AddObject(m, \"pepy_parsed\", (PyObject *) &pepy_parsed_type);\n\n Py_INCREF(&pepy_section_type);\n PyModule_AddObject(m, \"pepy_section\", (PyObject *) &pepy_section_type);\n\n Py_INCREF(&pepy_import_type);\n PyModule_AddObject(m, \"pepy_import\", (PyObject *) &pepy_import_type);\n\n Py_INCREF(&pepy_export_type);\n PyModule_AddObject(m, \"pepy_export\", (PyObject *) &pepy_export_type);\n\n Py_INCREF(&pepy_relocation_type);\n PyModule_AddObject(m, \"pepy_relocation\", (PyObject *) &pepy_relocation_type);\n\n Py_INCREF(&pepy_resource_type);\n PyModule_AddObject(m, \"pepy_resource\", (PyObject *) &pepy_resource_type);\n\n PyModule_AddStringMacro(m, PEPY_VERSION);\n PyModule_AddStringMacro(m, PEPARSE_VERSION);\n PyModule_AddStringConstant(m, \"__version__\", PEPY_VERSION);\n\n PyModule_AddIntMacro(m, MZ_MAGIC);\n PyModule_AddIntMacro(m, NT_MAGIC);\n PyModule_AddIntMacro(m, NUM_DIR_ENTRIES);\n PyModule_AddIntMacro(m, NT_OPTIONAL_32_MAGIC);\n PyModule_AddIntMacro(m, NT_SHORT_NAME_LEN);\n PyModule_AddIntMacro(m, DIR_EXPORT);\n PyModule_AddIntMacro(m, DIR_IMPORT);\n PyModule_AddIntMacro(m, DIR_RESOURCE);\n PyModule_AddIntMacro(m, DIR_EXCEPTION);\n PyModule_AddIntMacro(m, DIR_SECURITY);\n PyModule_AddIntMacro(m, DIR_BASERELOC);\n PyModule_AddIntMacro(m, DIR_DEBUG);\n PyModule_AddIntMacro(m, DIR_ARCHITECTURE);\n PyModule_AddIntMacro(m, DIR_GLOBALPTR);\n PyModule_AddIntMacro(m, DIR_TLS);\n PyModule_AddIntMacro(m, DIR_LOAD_CONFIG);\n PyModule_AddIntMacro(m, DIR_BOUND_IMPORT);\n PyModule_AddIntMacro(m, DIR_IAT);\n PyModule_AddIntMacro(m, DIR_DELAY_IMPORT);\n PyModule_AddIntMacro(m, DIR_COM_DESCRIPTOR);\n\n PyModule_AddIntMacro(m, IMAGE_SCN_TYPE_NO_PAD);\n PyModule_AddIntMacro(m, IMAGE_SCN_CNT_CODE);\n PyModule_AddIntMacro(m, IMAGE_SCN_CNT_INITIALIZED_DATA);\n PyModule_AddIntMacro(m, IMAGE_SCN_CNT_UNINITIALIZED_DATA);\n PyModule_AddIntMacro(m, IMAGE_SCN_LNK_OTHER);\n PyModule_AddIntMacro(m, IMAGE_SCN_LNK_INFO);\n PyModule_AddIntMacro(m, IMAGE_SCN_LNK_REMOVE);\n PyModule_AddIntMacro(m, IMAGE_SCN_LNK_COMDAT);\n PyModule_AddIntMacro(m, IMAGE_SCN_NO_DEFER_SPEC_EXC);\n PyModule_AddIntMacro(m, IMAGE_SCN_GPREL);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_FARDATA);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_PURGEABLE);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_16BIT);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_LOCKED);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_PRELOAD);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_1BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_2BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_4BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_8BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_16BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_32BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_64BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_128BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_256BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_512BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_1024BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_2048BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_4096BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_8192BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_MASK);\n PyModule_AddIntMacro(m, IMAGE_SCN_LNK_NRELOC_OVFL);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_DISCARDABLE);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_NOT_CACHED);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_NOT_PAGED);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_SHARED);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_EXECUTE);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_READ);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_WRITE);\n\n return m;\n}\n"], ["/lsfg-vk/thirdparty/volk/volk.h", "/**\n * volk\n *\n * Copyright (C) 2018-2025, by Arseny Kapoulkine (arseny.kapoulkine@gmail.com)\n * Report bugs and download new versions at https://github.com/zeux/volk\n *\n * This library is distributed under the MIT License. See notice at the end of this file.\n */\n/* clang-format off */\n#ifndef VOLK_H_\n#define VOLK_H_\n\n#if defined(VULKAN_H_) && !defined(VK_NO_PROTOTYPES)\n#\terror To use volk, you need to define VK_NO_PROTOTYPES before including vulkan.h\n#endif\n\n/* VOLK_GENERATE_VERSION_DEFINE */\n#define VOLK_HEADER_VERSION 323\n/* VOLK_GENERATE_VERSION_DEFINE */\n\n#ifndef VK_NO_PROTOTYPES\n#\tdefine VK_NO_PROTOTYPES\n#endif\n\n#ifndef VULKAN_H_\n#\tifdef VOLK_VULKAN_H_PATH\n#\t\tinclude VOLK_VULKAN_H_PATH\n#\telse /* Platform headers included below */\n#\t\tinclude \n#\t\tinclude \n#\tendif\n#endif\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\nstruct VolkDeviceTable;\n\n/**\n * Initialize library by loading Vulkan loader; call this function before creating the Vulkan instance.\n *\n * Returns VK_SUCCESS on success and VK_ERROR_INITIALIZATION_FAILED otherwise.\n */\nVkResult volkInitialize(void);\n\n/**\n * Initialize library by providing a custom handler to load global symbols.\n *\n * This function can be used instead of volkInitialize.\n * The handler function pointer will be asked to load global Vulkan symbols which require no instance\n * (such as vkCreateInstance, vkEnumerateInstance* and vkEnumerateInstanceVersion if available).\n */\nvoid volkInitializeCustom(PFN_vkGetInstanceProcAddr handler);\n\n/**\n * Finalize library by unloading Vulkan loader and resetting global symbols to NULL.\n *\n * This function does not need to be called on process exit (as loader will be unloaded automatically) or if volkInitialize failed.\n * In general this function is optional to call but may be useful in rare cases eg if volk needs to be reinitialized multiple times.\n */\nvoid volkFinalize(void);\n\n/**\n * Get Vulkan instance version supported by the Vulkan loader, or 0 if Vulkan isn't supported\n *\n * Returns 0 if volkInitialize wasn't called or failed.\n */\nuint32_t volkGetInstanceVersion(void);\n\n/**\n * Load global function pointers using application-created VkInstance; call this function after creating the Vulkan instance.\n */\nvoid volkLoadInstance(VkInstance instance);\n\n/**\n * Load global function pointers using application-created VkInstance; call this function after creating the Vulkan instance.\n * Skips loading device-based function pointers, requires usage of volkLoadDevice afterwards.\n */\nvoid volkLoadInstanceOnly(VkInstance instance);\n\n/**\n * Load global function pointers using application-created VkDevice; call this function after creating the Vulkan device.\n *\n * Note: this is not suitable for applications that want to use multiple VkDevice objects concurrently.\n */\nvoid volkLoadDevice(VkDevice device);\n\n/**\n * Return last VkInstance for which global function pointers have been loaded via volkLoadInstance(),\n * or VK_NULL_HANDLE if volkLoadInstance() has not been called.\n */\nVkInstance volkGetLoadedInstance(void);\n\n/**\n * Return last VkDevice for which global function pointers have been loaded via volkLoadDevice(),\n * or VK_NULL_HANDLE if volkLoadDevice() has not been called.\n */\nVkDevice volkGetLoadedDevice(void);\n\n/**\n * Load function pointers using application-created VkDevice into a table.\n * Application should use function pointers from that table instead of using global function pointers.\n */\nvoid volkLoadDeviceTable(struct VolkDeviceTable* table, VkDevice device);\n\n#ifdef __cplusplus\n}\n#endif\n\n/* Instead of directly including vulkan.h, we include platform-specific parts of the SDK manually\n * This is necessary to avoid including platform headers in some cases (which vulkan.h does unconditionally)\n * and replace them with forward declarations, which makes build times faster and avoids macro conflicts.\n *\n * Note that we only replace platform-specific headers when the headers are known to be problematic: very large\n * or slow to compile (Windows), or introducing unprefixed macros which can cause conflicts (Windows, Xlib).\n */\n#if !defined(VULKAN_H_) && !defined(VOLK_VULKAN_H_PATH)\n\n#ifdef VK_USE_PLATFORM_ANDROID_KHR\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_FUCHSIA\n#include \n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_IOS_MVK\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_MACOS_MVK\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_METAL_EXT\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_VI_NN\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_WAYLAND_KHR\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_WIN32_KHR\ntypedef unsigned long DWORD;\ntypedef const wchar_t* LPCWSTR;\ntypedef void* HANDLE;\ntypedef struct HINSTANCE__* HINSTANCE;\ntypedef struct HWND__* HWND;\ntypedef struct HMONITOR__* HMONITOR;\ntypedef struct _SECURITY_ATTRIBUTES SECURITY_ATTRIBUTES;\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_XCB_KHR\n#include \n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_XLIB_KHR\ntypedef struct _XDisplay Display;\ntypedef unsigned long Window;\ntypedef unsigned long VisualID;\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_DIRECTFB_EXT\n#include \n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT\ntypedef struct _XDisplay Display;\ntypedef unsigned long RROutput;\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_GGP\n#include \n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_SCREEN_QNX\n#include \n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_SCI\n#include \n#include \n#include \n#endif\n\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n#include \n#endif\n\n#endif\n\n/**\n * Device-specific function pointer table\n */\nstruct VolkDeviceTable\n{\n\t/* VOLK_GENERATE_DEVICE_TABLE */\n#if defined(VK_VERSION_1_0)\n\tPFN_vkAllocateCommandBuffers vkAllocateCommandBuffers;\n\tPFN_vkAllocateDescriptorSets vkAllocateDescriptorSets;\n\tPFN_vkAllocateMemory vkAllocateMemory;\n\tPFN_vkBeginCommandBuffer vkBeginCommandBuffer;\n\tPFN_vkBindBufferMemory vkBindBufferMemory;\n\tPFN_vkBindImageMemory vkBindImageMemory;\n\tPFN_vkCmdBeginQuery vkCmdBeginQuery;\n\tPFN_vkCmdBeginRenderPass vkCmdBeginRenderPass;\n\tPFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets;\n\tPFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer;\n\tPFN_vkCmdBindPipeline vkCmdBindPipeline;\n\tPFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers;\n\tPFN_vkCmdBlitImage vkCmdBlitImage;\n\tPFN_vkCmdClearAttachments vkCmdClearAttachments;\n\tPFN_vkCmdClearColorImage vkCmdClearColorImage;\n\tPFN_vkCmdClearDepthStencilImage vkCmdClearDepthStencilImage;\n\tPFN_vkCmdCopyBuffer vkCmdCopyBuffer;\n\tPFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage;\n\tPFN_vkCmdCopyImage vkCmdCopyImage;\n\tPFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer;\n\tPFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults;\n\tPFN_vkCmdDispatch vkCmdDispatch;\n\tPFN_vkCmdDispatchIndirect vkCmdDispatchIndirect;\n\tPFN_vkCmdDraw vkCmdDraw;\n\tPFN_vkCmdDrawIndexed vkCmdDrawIndexed;\n\tPFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect;\n\tPFN_vkCmdDrawIndirect vkCmdDrawIndirect;\n\tPFN_vkCmdEndQuery vkCmdEndQuery;\n\tPFN_vkCmdEndRenderPass vkCmdEndRenderPass;\n\tPFN_vkCmdExecuteCommands vkCmdExecuteCommands;\n\tPFN_vkCmdFillBuffer vkCmdFillBuffer;\n\tPFN_vkCmdNextSubpass vkCmdNextSubpass;\n\tPFN_vkCmdPipelineBarrier vkCmdPipelineBarrier;\n\tPFN_vkCmdPushConstants vkCmdPushConstants;\n\tPFN_vkCmdResetEvent vkCmdResetEvent;\n\tPFN_vkCmdResetQueryPool vkCmdResetQueryPool;\n\tPFN_vkCmdResolveImage vkCmdResolveImage;\n\tPFN_vkCmdSetBlendConstants vkCmdSetBlendConstants;\n\tPFN_vkCmdSetDepthBias vkCmdSetDepthBias;\n\tPFN_vkCmdSetDepthBounds vkCmdSetDepthBounds;\n\tPFN_vkCmdSetEvent vkCmdSetEvent;\n\tPFN_vkCmdSetLineWidth vkCmdSetLineWidth;\n\tPFN_vkCmdSetScissor vkCmdSetScissor;\n\tPFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask;\n\tPFN_vkCmdSetStencilReference vkCmdSetStencilReference;\n\tPFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask;\n\tPFN_vkCmdSetViewport vkCmdSetViewport;\n\tPFN_vkCmdUpdateBuffer vkCmdUpdateBuffer;\n\tPFN_vkCmdWaitEvents vkCmdWaitEvents;\n\tPFN_vkCmdWriteTimestamp vkCmdWriteTimestamp;\n\tPFN_vkCreateBuffer vkCreateBuffer;\n\tPFN_vkCreateBufferView vkCreateBufferView;\n\tPFN_vkCreateCommandPool vkCreateCommandPool;\n\tPFN_vkCreateComputePipelines vkCreateComputePipelines;\n\tPFN_vkCreateDescriptorPool vkCreateDescriptorPool;\n\tPFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout;\n\tPFN_vkCreateEvent vkCreateEvent;\n\tPFN_vkCreateFence vkCreateFence;\n\tPFN_vkCreateFramebuffer vkCreateFramebuffer;\n\tPFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines;\n\tPFN_vkCreateImage vkCreateImage;\n\tPFN_vkCreateImageView vkCreateImageView;\n\tPFN_vkCreatePipelineCache vkCreatePipelineCache;\n\tPFN_vkCreatePipelineLayout vkCreatePipelineLayout;\n\tPFN_vkCreateQueryPool vkCreateQueryPool;\n\tPFN_vkCreateRenderPass vkCreateRenderPass;\n\tPFN_vkCreateSampler vkCreateSampler;\n\tPFN_vkCreateSemaphore vkCreateSemaphore;\n\tPFN_vkCreateShaderModule vkCreateShaderModule;\n\tPFN_vkDestroyBuffer vkDestroyBuffer;\n\tPFN_vkDestroyBufferView vkDestroyBufferView;\n\tPFN_vkDestroyCommandPool vkDestroyCommandPool;\n\tPFN_vkDestroyDescriptorPool vkDestroyDescriptorPool;\n\tPFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout;\n\tPFN_vkDestroyDevice vkDestroyDevice;\n\tPFN_vkDestroyEvent vkDestroyEvent;\n\tPFN_vkDestroyFence vkDestroyFence;\n\tPFN_vkDestroyFramebuffer vkDestroyFramebuffer;\n\tPFN_vkDestroyImage vkDestroyImage;\n\tPFN_vkDestroyImageView vkDestroyImageView;\n\tPFN_vkDestroyPipeline vkDestroyPipeline;\n\tPFN_vkDestroyPipelineCache vkDestroyPipelineCache;\n\tPFN_vkDestroyPipelineLayout vkDestroyPipelineLayout;\n\tPFN_vkDestroyQueryPool vkDestroyQueryPool;\n\tPFN_vkDestroyRenderPass vkDestroyRenderPass;\n\tPFN_vkDestroySampler vkDestroySampler;\n\tPFN_vkDestroySemaphore vkDestroySemaphore;\n\tPFN_vkDestroyShaderModule vkDestroyShaderModule;\n\tPFN_vkDeviceWaitIdle vkDeviceWaitIdle;\n\tPFN_vkEndCommandBuffer vkEndCommandBuffer;\n\tPFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;\n\tPFN_vkFreeCommandBuffers vkFreeCommandBuffers;\n\tPFN_vkFreeDescriptorSets vkFreeDescriptorSets;\n\tPFN_vkFreeMemory vkFreeMemory;\n\tPFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;\n\tPFN_vkGetDeviceMemoryCommitment vkGetDeviceMemoryCommitment;\n\tPFN_vkGetDeviceQueue vkGetDeviceQueue;\n\tPFN_vkGetEventStatus vkGetEventStatus;\n\tPFN_vkGetFenceStatus vkGetFenceStatus;\n\tPFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;\n\tPFN_vkGetImageSparseMemoryRequirements vkGetImageSparseMemoryRequirements;\n\tPFN_vkGetImageSubresourceLayout vkGetImageSubresourceLayout;\n\tPFN_vkGetPipelineCacheData vkGetPipelineCacheData;\n\tPFN_vkGetQueryPoolResults vkGetQueryPoolResults;\n\tPFN_vkGetRenderAreaGranularity vkGetRenderAreaGranularity;\n\tPFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;\n\tPFN_vkMapMemory vkMapMemory;\n\tPFN_vkMergePipelineCaches vkMergePipelineCaches;\n\tPFN_vkQueueBindSparse vkQueueBindSparse;\n\tPFN_vkQueueSubmit vkQueueSubmit;\n\tPFN_vkQueueWaitIdle vkQueueWaitIdle;\n\tPFN_vkResetCommandBuffer vkResetCommandBuffer;\n\tPFN_vkResetCommandPool vkResetCommandPool;\n\tPFN_vkResetDescriptorPool vkResetDescriptorPool;\n\tPFN_vkResetEvent vkResetEvent;\n\tPFN_vkResetFences vkResetFences;\n\tPFN_vkSetEvent vkSetEvent;\n\tPFN_vkUnmapMemory vkUnmapMemory;\n\tPFN_vkUpdateDescriptorSets vkUpdateDescriptorSets;\n\tPFN_vkWaitForFences vkWaitForFences;\n#else\n\tPFN_vkVoidFunction padding_6ce80d51[120];\n#endif /* defined(VK_VERSION_1_0) */\n#if defined(VK_VERSION_1_1)\n\tPFN_vkBindBufferMemory2 vkBindBufferMemory2;\n\tPFN_vkBindImageMemory2 vkBindImageMemory2;\n\tPFN_vkCmdDispatchBase vkCmdDispatchBase;\n\tPFN_vkCmdSetDeviceMask vkCmdSetDeviceMask;\n\tPFN_vkCreateDescriptorUpdateTemplate vkCreateDescriptorUpdateTemplate;\n\tPFN_vkCreateSamplerYcbcrConversion vkCreateSamplerYcbcrConversion;\n\tPFN_vkDestroyDescriptorUpdateTemplate vkDestroyDescriptorUpdateTemplate;\n\tPFN_vkDestroySamplerYcbcrConversion vkDestroySamplerYcbcrConversion;\n\tPFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;\n\tPFN_vkGetDescriptorSetLayoutSupport vkGetDescriptorSetLayoutSupport;\n\tPFN_vkGetDeviceGroupPeerMemoryFeatures vkGetDeviceGroupPeerMemoryFeatures;\n\tPFN_vkGetDeviceQueue2 vkGetDeviceQueue2;\n\tPFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;\n\tPFN_vkGetImageSparseMemoryRequirements2 vkGetImageSparseMemoryRequirements2;\n\tPFN_vkTrimCommandPool vkTrimCommandPool;\n\tPFN_vkUpdateDescriptorSetWithTemplate vkUpdateDescriptorSetWithTemplate;\n#else\n\tPFN_vkVoidFunction padding_1ec56847[16];\n#endif /* defined(VK_VERSION_1_1) */\n#if defined(VK_VERSION_1_2)\n\tPFN_vkCmdBeginRenderPass2 vkCmdBeginRenderPass2;\n\tPFN_vkCmdDrawIndexedIndirectCount vkCmdDrawIndexedIndirectCount;\n\tPFN_vkCmdDrawIndirectCount vkCmdDrawIndirectCount;\n\tPFN_vkCmdEndRenderPass2 vkCmdEndRenderPass2;\n\tPFN_vkCmdNextSubpass2 vkCmdNextSubpass2;\n\tPFN_vkCreateRenderPass2 vkCreateRenderPass2;\n\tPFN_vkGetBufferDeviceAddress vkGetBufferDeviceAddress;\n\tPFN_vkGetBufferOpaqueCaptureAddress vkGetBufferOpaqueCaptureAddress;\n\tPFN_vkGetDeviceMemoryOpaqueCaptureAddress vkGetDeviceMemoryOpaqueCaptureAddress;\n\tPFN_vkGetSemaphoreCounterValue vkGetSemaphoreCounterValue;\n\tPFN_vkResetQueryPool vkResetQueryPool;\n\tPFN_vkSignalSemaphore vkSignalSemaphore;\n\tPFN_vkWaitSemaphores vkWaitSemaphores;\n#else\n\tPFN_vkVoidFunction padding_a3e00662[13];\n#endif /* defined(VK_VERSION_1_2) */\n#if defined(VK_VERSION_1_3)\n\tPFN_vkCmdBeginRendering vkCmdBeginRendering;\n\tPFN_vkCmdBindVertexBuffers2 vkCmdBindVertexBuffers2;\n\tPFN_vkCmdBlitImage2 vkCmdBlitImage2;\n\tPFN_vkCmdCopyBuffer2 vkCmdCopyBuffer2;\n\tPFN_vkCmdCopyBufferToImage2 vkCmdCopyBufferToImage2;\n\tPFN_vkCmdCopyImage2 vkCmdCopyImage2;\n\tPFN_vkCmdCopyImageToBuffer2 vkCmdCopyImageToBuffer2;\n\tPFN_vkCmdEndRendering vkCmdEndRendering;\n\tPFN_vkCmdPipelineBarrier2 vkCmdPipelineBarrier2;\n\tPFN_vkCmdResetEvent2 vkCmdResetEvent2;\n\tPFN_vkCmdResolveImage2 vkCmdResolveImage2;\n\tPFN_vkCmdSetCullMode vkCmdSetCullMode;\n\tPFN_vkCmdSetDepthBiasEnable vkCmdSetDepthBiasEnable;\n\tPFN_vkCmdSetDepthBoundsTestEnable vkCmdSetDepthBoundsTestEnable;\n\tPFN_vkCmdSetDepthCompareOp vkCmdSetDepthCompareOp;\n\tPFN_vkCmdSetDepthTestEnable vkCmdSetDepthTestEnable;\n\tPFN_vkCmdSetDepthWriteEnable vkCmdSetDepthWriteEnable;\n\tPFN_vkCmdSetEvent2 vkCmdSetEvent2;\n\tPFN_vkCmdSetFrontFace vkCmdSetFrontFace;\n\tPFN_vkCmdSetPrimitiveRestartEnable vkCmdSetPrimitiveRestartEnable;\n\tPFN_vkCmdSetPrimitiveTopology vkCmdSetPrimitiveTopology;\n\tPFN_vkCmdSetRasterizerDiscardEnable vkCmdSetRasterizerDiscardEnable;\n\tPFN_vkCmdSetScissorWithCount vkCmdSetScissorWithCount;\n\tPFN_vkCmdSetStencilOp vkCmdSetStencilOp;\n\tPFN_vkCmdSetStencilTestEnable vkCmdSetStencilTestEnable;\n\tPFN_vkCmdSetViewportWithCount vkCmdSetViewportWithCount;\n\tPFN_vkCmdWaitEvents2 vkCmdWaitEvents2;\n\tPFN_vkCmdWriteTimestamp2 vkCmdWriteTimestamp2;\n\tPFN_vkCreatePrivateDataSlot vkCreatePrivateDataSlot;\n\tPFN_vkDestroyPrivateDataSlot vkDestroyPrivateDataSlot;\n\tPFN_vkGetDeviceBufferMemoryRequirements vkGetDeviceBufferMemoryRequirements;\n\tPFN_vkGetDeviceImageMemoryRequirements vkGetDeviceImageMemoryRequirements;\n\tPFN_vkGetDeviceImageSparseMemoryRequirements vkGetDeviceImageSparseMemoryRequirements;\n\tPFN_vkGetPrivateData vkGetPrivateData;\n\tPFN_vkQueueSubmit2 vkQueueSubmit2;\n\tPFN_vkSetPrivateData vkSetPrivateData;\n#else\n\tPFN_vkVoidFunction padding_ee798a88[36];\n#endif /* defined(VK_VERSION_1_3) */\n#if defined(VK_VERSION_1_4)\n\tPFN_vkCmdBindDescriptorSets2 vkCmdBindDescriptorSets2;\n\tPFN_vkCmdBindIndexBuffer2 vkCmdBindIndexBuffer2;\n\tPFN_vkCmdPushConstants2 vkCmdPushConstants2;\n\tPFN_vkCmdPushDescriptorSet vkCmdPushDescriptorSet;\n\tPFN_vkCmdPushDescriptorSet2 vkCmdPushDescriptorSet2;\n\tPFN_vkCmdPushDescriptorSetWithTemplate vkCmdPushDescriptorSetWithTemplate;\n\tPFN_vkCmdPushDescriptorSetWithTemplate2 vkCmdPushDescriptorSetWithTemplate2;\n\tPFN_vkCmdSetLineStipple vkCmdSetLineStipple;\n\tPFN_vkCmdSetRenderingAttachmentLocations vkCmdSetRenderingAttachmentLocations;\n\tPFN_vkCmdSetRenderingInputAttachmentIndices vkCmdSetRenderingInputAttachmentIndices;\n\tPFN_vkCopyImageToImage vkCopyImageToImage;\n\tPFN_vkCopyImageToMemory vkCopyImageToMemory;\n\tPFN_vkCopyMemoryToImage vkCopyMemoryToImage;\n\tPFN_vkGetDeviceImageSubresourceLayout vkGetDeviceImageSubresourceLayout;\n\tPFN_vkGetImageSubresourceLayout2 vkGetImageSubresourceLayout2;\n\tPFN_vkGetRenderingAreaGranularity vkGetRenderingAreaGranularity;\n\tPFN_vkMapMemory2 vkMapMemory2;\n\tPFN_vkTransitionImageLayout vkTransitionImageLayout;\n\tPFN_vkUnmapMemory2 vkUnmapMemory2;\n#else\n\tPFN_vkVoidFunction padding_82585fa3[19];\n#endif /* defined(VK_VERSION_1_4) */\n#if defined(VK_AMDX_shader_enqueue)\n\tPFN_vkCmdDispatchGraphAMDX vkCmdDispatchGraphAMDX;\n\tPFN_vkCmdDispatchGraphIndirectAMDX vkCmdDispatchGraphIndirectAMDX;\n\tPFN_vkCmdDispatchGraphIndirectCountAMDX vkCmdDispatchGraphIndirectCountAMDX;\n\tPFN_vkCmdInitializeGraphScratchMemoryAMDX vkCmdInitializeGraphScratchMemoryAMDX;\n\tPFN_vkCreateExecutionGraphPipelinesAMDX vkCreateExecutionGraphPipelinesAMDX;\n\tPFN_vkGetExecutionGraphPipelineNodeIndexAMDX vkGetExecutionGraphPipelineNodeIndexAMDX;\n\tPFN_vkGetExecutionGraphPipelineScratchSizeAMDX vkGetExecutionGraphPipelineScratchSizeAMDX;\n#else\n\tPFN_vkVoidFunction padding_9d3e2bba[7];\n#endif /* defined(VK_AMDX_shader_enqueue) */\n#if defined(VK_AMD_anti_lag)\n\tPFN_vkAntiLagUpdateAMD vkAntiLagUpdateAMD;\n#else\n\tPFN_vkVoidFunction padding_cf792fb4[1];\n#endif /* defined(VK_AMD_anti_lag) */\n#if defined(VK_AMD_buffer_marker)\n\tPFN_vkCmdWriteBufferMarkerAMD vkCmdWriteBufferMarkerAMD;\n#else\n\tPFN_vkVoidFunction padding_7836e92f[1];\n#endif /* defined(VK_AMD_buffer_marker) */\n#if defined(VK_AMD_buffer_marker) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2))\n\tPFN_vkCmdWriteBufferMarker2AMD vkCmdWriteBufferMarker2AMD;\n#else\n\tPFN_vkVoidFunction padding_bbf9b7bb[1];\n#endif /* defined(VK_AMD_buffer_marker) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) */\n#if defined(VK_AMD_display_native_hdr)\n\tPFN_vkSetLocalDimmingAMD vkSetLocalDimmingAMD;\n#else\n\tPFN_vkVoidFunction padding_6b81b2fb[1];\n#endif /* defined(VK_AMD_display_native_hdr) */\n#if defined(VK_AMD_draw_indirect_count)\n\tPFN_vkCmdDrawIndexedIndirectCountAMD vkCmdDrawIndexedIndirectCountAMD;\n\tPFN_vkCmdDrawIndirectCountAMD vkCmdDrawIndirectCountAMD;\n#else\n\tPFN_vkVoidFunction padding_fbfa9964[2];\n#endif /* defined(VK_AMD_draw_indirect_count) */\n#if defined(VK_AMD_shader_info)\n\tPFN_vkGetShaderInfoAMD vkGetShaderInfoAMD;\n#else\n\tPFN_vkVoidFunction padding_bfb754b[1];\n#endif /* defined(VK_AMD_shader_info) */\n#if defined(VK_ANDROID_external_memory_android_hardware_buffer)\n\tPFN_vkGetAndroidHardwareBufferPropertiesANDROID vkGetAndroidHardwareBufferPropertiesANDROID;\n\tPFN_vkGetMemoryAndroidHardwareBufferANDROID vkGetMemoryAndroidHardwareBufferANDROID;\n#else\n\tPFN_vkVoidFunction padding_c67b1beb[2];\n#endif /* defined(VK_ANDROID_external_memory_android_hardware_buffer) */\n#if defined(VK_ARM_data_graph)\n\tPFN_vkBindDataGraphPipelineSessionMemoryARM vkBindDataGraphPipelineSessionMemoryARM;\n\tPFN_vkCmdDispatchDataGraphARM vkCmdDispatchDataGraphARM;\n\tPFN_vkCreateDataGraphPipelineSessionARM vkCreateDataGraphPipelineSessionARM;\n\tPFN_vkCreateDataGraphPipelinesARM vkCreateDataGraphPipelinesARM;\n\tPFN_vkDestroyDataGraphPipelineSessionARM vkDestroyDataGraphPipelineSessionARM;\n\tPFN_vkGetDataGraphPipelineAvailablePropertiesARM vkGetDataGraphPipelineAvailablePropertiesARM;\n\tPFN_vkGetDataGraphPipelinePropertiesARM vkGetDataGraphPipelinePropertiesARM;\n\tPFN_vkGetDataGraphPipelineSessionBindPointRequirementsARM vkGetDataGraphPipelineSessionBindPointRequirementsARM;\n\tPFN_vkGetDataGraphPipelineSessionMemoryRequirementsARM vkGetDataGraphPipelineSessionMemoryRequirementsARM;\n#else\n\tPFN_vkVoidFunction padding_894d85d8[9];\n#endif /* defined(VK_ARM_data_graph) */\n#if defined(VK_ARM_tensors)\n\tPFN_vkBindTensorMemoryARM vkBindTensorMemoryARM;\n\tPFN_vkCmdCopyTensorARM vkCmdCopyTensorARM;\n\tPFN_vkCreateTensorARM vkCreateTensorARM;\n\tPFN_vkCreateTensorViewARM vkCreateTensorViewARM;\n\tPFN_vkDestroyTensorARM vkDestroyTensorARM;\n\tPFN_vkDestroyTensorViewARM vkDestroyTensorViewARM;\n\tPFN_vkGetDeviceTensorMemoryRequirementsARM vkGetDeviceTensorMemoryRequirementsARM;\n\tPFN_vkGetTensorMemoryRequirementsARM vkGetTensorMemoryRequirementsARM;\n#else\n\tPFN_vkVoidFunction padding_df67a729[8];\n#endif /* defined(VK_ARM_tensors) */\n#if defined(VK_ARM_tensors) && defined(VK_EXT_descriptor_buffer)\n\tPFN_vkGetTensorOpaqueCaptureDescriptorDataARM vkGetTensorOpaqueCaptureDescriptorDataARM;\n\tPFN_vkGetTensorViewOpaqueCaptureDescriptorDataARM vkGetTensorViewOpaqueCaptureDescriptorDataARM;\n#else\n\tPFN_vkVoidFunction padding_9483bf7e[2];\n#endif /* defined(VK_ARM_tensors) && defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_EXT_attachment_feedback_loop_dynamic_state)\n\tPFN_vkCmdSetAttachmentFeedbackLoopEnableEXT vkCmdSetAttachmentFeedbackLoopEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_760a41f5[1];\n#endif /* defined(VK_EXT_attachment_feedback_loop_dynamic_state) */\n#if defined(VK_EXT_buffer_device_address)\n\tPFN_vkGetBufferDeviceAddressEXT vkGetBufferDeviceAddressEXT;\n#else\n\tPFN_vkVoidFunction padding_3b69d885[1];\n#endif /* defined(VK_EXT_buffer_device_address) */\n#if defined(VK_EXT_calibrated_timestamps)\n\tPFN_vkGetCalibratedTimestampsEXT vkGetCalibratedTimestampsEXT;\n#else\n\tPFN_vkVoidFunction padding_d0981c89[1];\n#endif /* defined(VK_EXT_calibrated_timestamps) */\n#if defined(VK_EXT_color_write_enable)\n\tPFN_vkCmdSetColorWriteEnableEXT vkCmdSetColorWriteEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_d301ecc3[1];\n#endif /* defined(VK_EXT_color_write_enable) */\n#if defined(VK_EXT_conditional_rendering)\n\tPFN_vkCmdBeginConditionalRenderingEXT vkCmdBeginConditionalRenderingEXT;\n\tPFN_vkCmdEndConditionalRenderingEXT vkCmdEndConditionalRenderingEXT;\n#else\n\tPFN_vkVoidFunction padding_ab532c18[2];\n#endif /* defined(VK_EXT_conditional_rendering) */\n#if defined(VK_EXT_debug_marker)\n\tPFN_vkCmdDebugMarkerBeginEXT vkCmdDebugMarkerBeginEXT;\n\tPFN_vkCmdDebugMarkerEndEXT vkCmdDebugMarkerEndEXT;\n\tPFN_vkCmdDebugMarkerInsertEXT vkCmdDebugMarkerInsertEXT;\n\tPFN_vkDebugMarkerSetObjectNameEXT vkDebugMarkerSetObjectNameEXT;\n\tPFN_vkDebugMarkerSetObjectTagEXT vkDebugMarkerSetObjectTagEXT;\n#else\n\tPFN_vkVoidFunction padding_89986968[5];\n#endif /* defined(VK_EXT_debug_marker) */\n#if defined(VK_EXT_depth_bias_control)\n\tPFN_vkCmdSetDepthBias2EXT vkCmdSetDepthBias2EXT;\n#else\n\tPFN_vkVoidFunction padding_bcddab4d[1];\n#endif /* defined(VK_EXT_depth_bias_control) */\n#if defined(VK_EXT_descriptor_buffer)\n\tPFN_vkCmdBindDescriptorBufferEmbeddedSamplersEXT vkCmdBindDescriptorBufferEmbeddedSamplersEXT;\n\tPFN_vkCmdBindDescriptorBuffersEXT vkCmdBindDescriptorBuffersEXT;\n\tPFN_vkCmdSetDescriptorBufferOffsetsEXT vkCmdSetDescriptorBufferOffsetsEXT;\n\tPFN_vkGetBufferOpaqueCaptureDescriptorDataEXT vkGetBufferOpaqueCaptureDescriptorDataEXT;\n\tPFN_vkGetDescriptorEXT vkGetDescriptorEXT;\n\tPFN_vkGetDescriptorSetLayoutBindingOffsetEXT vkGetDescriptorSetLayoutBindingOffsetEXT;\n\tPFN_vkGetDescriptorSetLayoutSizeEXT vkGetDescriptorSetLayoutSizeEXT;\n\tPFN_vkGetImageOpaqueCaptureDescriptorDataEXT vkGetImageOpaqueCaptureDescriptorDataEXT;\n\tPFN_vkGetImageViewOpaqueCaptureDescriptorDataEXT vkGetImageViewOpaqueCaptureDescriptorDataEXT;\n\tPFN_vkGetSamplerOpaqueCaptureDescriptorDataEXT vkGetSamplerOpaqueCaptureDescriptorDataEXT;\n#else\n\tPFN_vkVoidFunction padding_80aa973c[10];\n#endif /* defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing))\n\tPFN_vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT;\n#else\n\tPFN_vkVoidFunction padding_98d0fb33[1];\n#endif /* defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) */\n#if defined(VK_EXT_device_fault)\n\tPFN_vkGetDeviceFaultInfoEXT vkGetDeviceFaultInfoEXT;\n#else\n\tPFN_vkVoidFunction padding_55095419[1];\n#endif /* defined(VK_EXT_device_fault) */\n#if defined(VK_EXT_device_generated_commands)\n\tPFN_vkCmdExecuteGeneratedCommandsEXT vkCmdExecuteGeneratedCommandsEXT;\n\tPFN_vkCmdPreprocessGeneratedCommandsEXT vkCmdPreprocessGeneratedCommandsEXT;\n\tPFN_vkCreateIndirectCommandsLayoutEXT vkCreateIndirectCommandsLayoutEXT;\n\tPFN_vkCreateIndirectExecutionSetEXT vkCreateIndirectExecutionSetEXT;\n\tPFN_vkDestroyIndirectCommandsLayoutEXT vkDestroyIndirectCommandsLayoutEXT;\n\tPFN_vkDestroyIndirectExecutionSetEXT vkDestroyIndirectExecutionSetEXT;\n\tPFN_vkGetGeneratedCommandsMemoryRequirementsEXT vkGetGeneratedCommandsMemoryRequirementsEXT;\n\tPFN_vkUpdateIndirectExecutionSetPipelineEXT vkUpdateIndirectExecutionSetPipelineEXT;\n\tPFN_vkUpdateIndirectExecutionSetShaderEXT vkUpdateIndirectExecutionSetShaderEXT;\n#else\n\tPFN_vkVoidFunction padding_7ba7ebaa[9];\n#endif /* defined(VK_EXT_device_generated_commands) */\n#if defined(VK_EXT_discard_rectangles)\n\tPFN_vkCmdSetDiscardRectangleEXT vkCmdSetDiscardRectangleEXT;\n#else\n\tPFN_vkVoidFunction padding_d6355c2[1];\n#endif /* defined(VK_EXT_discard_rectangles) */\n#if defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2\n\tPFN_vkCmdSetDiscardRectangleEnableEXT vkCmdSetDiscardRectangleEnableEXT;\n\tPFN_vkCmdSetDiscardRectangleModeEXT vkCmdSetDiscardRectangleModeEXT;\n#else\n\tPFN_vkVoidFunction padding_7bb44f77[2];\n#endif /* defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 */\n#if defined(VK_EXT_display_control)\n\tPFN_vkDisplayPowerControlEXT vkDisplayPowerControlEXT;\n\tPFN_vkGetSwapchainCounterEXT vkGetSwapchainCounterEXT;\n\tPFN_vkRegisterDeviceEventEXT vkRegisterDeviceEventEXT;\n\tPFN_vkRegisterDisplayEventEXT vkRegisterDisplayEventEXT;\n#else\n\tPFN_vkVoidFunction padding_d30dfaaf[4];\n#endif /* defined(VK_EXT_display_control) */\n#if defined(VK_EXT_external_memory_host)\n\tPFN_vkGetMemoryHostPointerPropertiesEXT vkGetMemoryHostPointerPropertiesEXT;\n#else\n\tPFN_vkVoidFunction padding_357656e9[1];\n#endif /* defined(VK_EXT_external_memory_host) */\n#if defined(VK_EXT_external_memory_metal)\n\tPFN_vkGetMemoryMetalHandleEXT vkGetMemoryMetalHandleEXT;\n\tPFN_vkGetMemoryMetalHandlePropertiesEXT vkGetMemoryMetalHandlePropertiesEXT;\n#else\n\tPFN_vkVoidFunction padding_37d43fb[2];\n#endif /* defined(VK_EXT_external_memory_metal) */\n#if defined(VK_EXT_fragment_density_map_offset)\n\tPFN_vkCmdEndRendering2EXT vkCmdEndRendering2EXT;\n#else\n\tPFN_vkVoidFunction padding_9c90cf11[1];\n#endif /* defined(VK_EXT_fragment_density_map_offset) */\n#if defined(VK_EXT_full_screen_exclusive)\n\tPFN_vkAcquireFullScreenExclusiveModeEXT vkAcquireFullScreenExclusiveModeEXT;\n\tPFN_vkReleaseFullScreenExclusiveModeEXT vkReleaseFullScreenExclusiveModeEXT;\n#else\n\tPFN_vkVoidFunction padding_3859df46[2];\n#endif /* defined(VK_EXT_full_screen_exclusive) */\n#if defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1))\n\tPFN_vkGetDeviceGroupSurfacePresentModes2EXT vkGetDeviceGroupSurfacePresentModes2EXT;\n#else\n\tPFN_vkVoidFunction padding_e5b48b5b[1];\n#endif /* defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1)) */\n#if defined(VK_EXT_hdr_metadata)\n\tPFN_vkSetHdrMetadataEXT vkSetHdrMetadataEXT;\n#else\n\tPFN_vkVoidFunction padding_ca6d733c[1];\n#endif /* defined(VK_EXT_hdr_metadata) */\n#if defined(VK_EXT_host_image_copy)\n\tPFN_vkCopyImageToImageEXT vkCopyImageToImageEXT;\n\tPFN_vkCopyImageToMemoryEXT vkCopyImageToMemoryEXT;\n\tPFN_vkCopyMemoryToImageEXT vkCopyMemoryToImageEXT;\n\tPFN_vkTransitionImageLayoutEXT vkTransitionImageLayoutEXT;\n#else\n\tPFN_vkVoidFunction padding_dd6d9b61[4];\n#endif /* defined(VK_EXT_host_image_copy) */\n#if defined(VK_EXT_host_query_reset)\n\tPFN_vkResetQueryPoolEXT vkResetQueryPoolEXT;\n#else\n\tPFN_vkVoidFunction padding_34e58bd3[1];\n#endif /* defined(VK_EXT_host_query_reset) */\n#if defined(VK_EXT_image_drm_format_modifier)\n\tPFN_vkGetImageDrmFormatModifierPropertiesEXT vkGetImageDrmFormatModifierPropertiesEXT;\n#else\n\tPFN_vkVoidFunction padding_eb50dc14[1];\n#endif /* defined(VK_EXT_image_drm_format_modifier) */\n#if defined(VK_EXT_line_rasterization)\n\tPFN_vkCmdSetLineStippleEXT vkCmdSetLineStippleEXT;\n#else\n\tPFN_vkVoidFunction padding_8a212c37[1];\n#endif /* defined(VK_EXT_line_rasterization) */\n#if defined(VK_EXT_mesh_shader)\n\tPFN_vkCmdDrawMeshTasksEXT vkCmdDrawMeshTasksEXT;\n\tPFN_vkCmdDrawMeshTasksIndirectEXT vkCmdDrawMeshTasksIndirectEXT;\n#else\n\tPFN_vkVoidFunction padding_f65e838[2];\n#endif /* defined(VK_EXT_mesh_shader) */\n#if defined(VK_EXT_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2))\n\tPFN_vkCmdDrawMeshTasksIndirectCountEXT vkCmdDrawMeshTasksIndirectCountEXT;\n#else\n\tPFN_vkVoidFunction padding_dcbaac2f[1];\n#endif /* defined(VK_EXT_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2)) */\n#if defined(VK_EXT_metal_objects)\n\tPFN_vkExportMetalObjectsEXT vkExportMetalObjectsEXT;\n#else\n\tPFN_vkVoidFunction padding_df21f735[1];\n#endif /* defined(VK_EXT_metal_objects) */\n#if defined(VK_EXT_multi_draw)\n\tPFN_vkCmdDrawMultiEXT vkCmdDrawMultiEXT;\n\tPFN_vkCmdDrawMultiIndexedEXT vkCmdDrawMultiIndexedEXT;\n#else\n\tPFN_vkVoidFunction padding_ce8b93b6[2];\n#endif /* defined(VK_EXT_multi_draw) */\n#if defined(VK_EXT_opacity_micromap)\n\tPFN_vkBuildMicromapsEXT vkBuildMicromapsEXT;\n\tPFN_vkCmdBuildMicromapsEXT vkCmdBuildMicromapsEXT;\n\tPFN_vkCmdCopyMemoryToMicromapEXT vkCmdCopyMemoryToMicromapEXT;\n\tPFN_vkCmdCopyMicromapEXT vkCmdCopyMicromapEXT;\n\tPFN_vkCmdCopyMicromapToMemoryEXT vkCmdCopyMicromapToMemoryEXT;\n\tPFN_vkCmdWriteMicromapsPropertiesEXT vkCmdWriteMicromapsPropertiesEXT;\n\tPFN_vkCopyMemoryToMicromapEXT vkCopyMemoryToMicromapEXT;\n\tPFN_vkCopyMicromapEXT vkCopyMicromapEXT;\n\tPFN_vkCopyMicromapToMemoryEXT vkCopyMicromapToMemoryEXT;\n\tPFN_vkCreateMicromapEXT vkCreateMicromapEXT;\n\tPFN_vkDestroyMicromapEXT vkDestroyMicromapEXT;\n\tPFN_vkGetDeviceMicromapCompatibilityEXT vkGetDeviceMicromapCompatibilityEXT;\n\tPFN_vkGetMicromapBuildSizesEXT vkGetMicromapBuildSizesEXT;\n\tPFN_vkWriteMicromapsPropertiesEXT vkWriteMicromapsPropertiesEXT;\n#else\n\tPFN_vkVoidFunction padding_fa41e53c[14];\n#endif /* defined(VK_EXT_opacity_micromap) */\n#if defined(VK_EXT_pageable_device_local_memory)\n\tPFN_vkSetDeviceMemoryPriorityEXT vkSetDeviceMemoryPriorityEXT;\n#else\n\tPFN_vkVoidFunction padding_b2d2c2d7[1];\n#endif /* defined(VK_EXT_pageable_device_local_memory) */\n#if defined(VK_EXT_pipeline_properties)\n\tPFN_vkGetPipelinePropertiesEXT vkGetPipelinePropertiesEXT;\n#else\n\tPFN_vkVoidFunction padding_11313020[1];\n#endif /* defined(VK_EXT_pipeline_properties) */\n#if defined(VK_EXT_private_data)\n\tPFN_vkCreatePrivateDataSlotEXT vkCreatePrivateDataSlotEXT;\n\tPFN_vkDestroyPrivateDataSlotEXT vkDestroyPrivateDataSlotEXT;\n\tPFN_vkGetPrivateDataEXT vkGetPrivateDataEXT;\n\tPFN_vkSetPrivateDataEXT vkSetPrivateDataEXT;\n#else\n\tPFN_vkVoidFunction padding_108010f[4];\n#endif /* defined(VK_EXT_private_data) */\n#if defined(VK_EXT_sample_locations)\n\tPFN_vkCmdSetSampleLocationsEXT vkCmdSetSampleLocationsEXT;\n#else\n\tPFN_vkVoidFunction padding_26f9079f[1];\n#endif /* defined(VK_EXT_sample_locations) */\n#if defined(VK_EXT_shader_module_identifier)\n\tPFN_vkGetShaderModuleCreateInfoIdentifierEXT vkGetShaderModuleCreateInfoIdentifierEXT;\n\tPFN_vkGetShaderModuleIdentifierEXT vkGetShaderModuleIdentifierEXT;\n#else\n\tPFN_vkVoidFunction padding_e10c8f86[2];\n#endif /* defined(VK_EXT_shader_module_identifier) */\n#if defined(VK_EXT_shader_object)\n\tPFN_vkCmdBindShadersEXT vkCmdBindShadersEXT;\n\tPFN_vkCreateShadersEXT vkCreateShadersEXT;\n\tPFN_vkDestroyShaderEXT vkDestroyShaderEXT;\n\tPFN_vkGetShaderBinaryDataEXT vkGetShaderBinaryDataEXT;\n#else\n\tPFN_vkVoidFunction padding_374f3e18[4];\n#endif /* defined(VK_EXT_shader_object) */\n#if defined(VK_EXT_swapchain_maintenance1)\n\tPFN_vkReleaseSwapchainImagesEXT vkReleaseSwapchainImagesEXT;\n#else\n\tPFN_vkVoidFunction padding_ea55bf74[1];\n#endif /* defined(VK_EXT_swapchain_maintenance1) */\n#if defined(VK_EXT_transform_feedback)\n\tPFN_vkCmdBeginQueryIndexedEXT vkCmdBeginQueryIndexedEXT;\n\tPFN_vkCmdBeginTransformFeedbackEXT vkCmdBeginTransformFeedbackEXT;\n\tPFN_vkCmdBindTransformFeedbackBuffersEXT vkCmdBindTransformFeedbackBuffersEXT;\n\tPFN_vkCmdDrawIndirectByteCountEXT vkCmdDrawIndirectByteCountEXT;\n\tPFN_vkCmdEndQueryIndexedEXT vkCmdEndQueryIndexedEXT;\n\tPFN_vkCmdEndTransformFeedbackEXT vkCmdEndTransformFeedbackEXT;\n#else\n\tPFN_vkVoidFunction padding_36980658[6];\n#endif /* defined(VK_EXT_transform_feedback) */\n#if defined(VK_EXT_validation_cache)\n\tPFN_vkCreateValidationCacheEXT vkCreateValidationCacheEXT;\n\tPFN_vkDestroyValidationCacheEXT vkDestroyValidationCacheEXT;\n\tPFN_vkGetValidationCacheDataEXT vkGetValidationCacheDataEXT;\n\tPFN_vkMergeValidationCachesEXT vkMergeValidationCachesEXT;\n#else\n\tPFN_vkVoidFunction padding_b4f2df29[4];\n#endif /* defined(VK_EXT_validation_cache) */\n#if defined(VK_FUCHSIA_buffer_collection)\n\tPFN_vkCreateBufferCollectionFUCHSIA vkCreateBufferCollectionFUCHSIA;\n\tPFN_vkDestroyBufferCollectionFUCHSIA vkDestroyBufferCollectionFUCHSIA;\n\tPFN_vkGetBufferCollectionPropertiesFUCHSIA vkGetBufferCollectionPropertiesFUCHSIA;\n\tPFN_vkSetBufferCollectionBufferConstraintsFUCHSIA vkSetBufferCollectionBufferConstraintsFUCHSIA;\n\tPFN_vkSetBufferCollectionImageConstraintsFUCHSIA vkSetBufferCollectionImageConstraintsFUCHSIA;\n#else\n\tPFN_vkVoidFunction padding_8eaa27bc[5];\n#endif /* defined(VK_FUCHSIA_buffer_collection) */\n#if defined(VK_FUCHSIA_external_memory)\n\tPFN_vkGetMemoryZirconHandleFUCHSIA vkGetMemoryZirconHandleFUCHSIA;\n\tPFN_vkGetMemoryZirconHandlePropertiesFUCHSIA vkGetMemoryZirconHandlePropertiesFUCHSIA;\n#else\n\tPFN_vkVoidFunction padding_e3cb8a67[2];\n#endif /* defined(VK_FUCHSIA_external_memory) */\n#if defined(VK_FUCHSIA_external_semaphore)\n\tPFN_vkGetSemaphoreZirconHandleFUCHSIA vkGetSemaphoreZirconHandleFUCHSIA;\n\tPFN_vkImportSemaphoreZirconHandleFUCHSIA vkImportSemaphoreZirconHandleFUCHSIA;\n#else\n\tPFN_vkVoidFunction padding_3df6f656[2];\n#endif /* defined(VK_FUCHSIA_external_semaphore) */\n#if defined(VK_GOOGLE_display_timing)\n\tPFN_vkGetPastPresentationTimingGOOGLE vkGetPastPresentationTimingGOOGLE;\n\tPFN_vkGetRefreshCycleDurationGOOGLE vkGetRefreshCycleDurationGOOGLE;\n#else\n\tPFN_vkVoidFunction padding_2a6f50cd[2];\n#endif /* defined(VK_GOOGLE_display_timing) */\n#if defined(VK_HUAWEI_cluster_culling_shader)\n\tPFN_vkCmdDrawClusterHUAWEI vkCmdDrawClusterHUAWEI;\n\tPFN_vkCmdDrawClusterIndirectHUAWEI vkCmdDrawClusterIndirectHUAWEI;\n#else\n\tPFN_vkVoidFunction padding_75b97be6[2];\n#endif /* defined(VK_HUAWEI_cluster_culling_shader) */\n#if defined(VK_HUAWEI_invocation_mask)\n\tPFN_vkCmdBindInvocationMaskHUAWEI vkCmdBindInvocationMaskHUAWEI;\n#else\n\tPFN_vkVoidFunction padding_c3a4569f[1];\n#endif /* defined(VK_HUAWEI_invocation_mask) */\n#if defined(VK_HUAWEI_subpass_shading) && VK_HUAWEI_SUBPASS_SHADING_SPEC_VERSION >= 2\n\tPFN_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI;\n#else\n\tPFN_vkVoidFunction padding_2e923f32[1];\n#endif /* defined(VK_HUAWEI_subpass_shading) && VK_HUAWEI_SUBPASS_SHADING_SPEC_VERSION >= 2 */\n#if defined(VK_HUAWEI_subpass_shading)\n\tPFN_vkCmdSubpassShadingHUAWEI vkCmdSubpassShadingHUAWEI;\n#else\n\tPFN_vkVoidFunction padding_f766fdf5[1];\n#endif /* defined(VK_HUAWEI_subpass_shading) */\n#if defined(VK_INTEL_performance_query)\n\tPFN_vkAcquirePerformanceConfigurationINTEL vkAcquirePerformanceConfigurationINTEL;\n\tPFN_vkCmdSetPerformanceMarkerINTEL vkCmdSetPerformanceMarkerINTEL;\n\tPFN_vkCmdSetPerformanceOverrideINTEL vkCmdSetPerformanceOverrideINTEL;\n\tPFN_vkCmdSetPerformanceStreamMarkerINTEL vkCmdSetPerformanceStreamMarkerINTEL;\n\tPFN_vkGetPerformanceParameterINTEL vkGetPerformanceParameterINTEL;\n\tPFN_vkInitializePerformanceApiINTEL vkInitializePerformanceApiINTEL;\n\tPFN_vkQueueSetPerformanceConfigurationINTEL vkQueueSetPerformanceConfigurationINTEL;\n\tPFN_vkReleasePerformanceConfigurationINTEL vkReleasePerformanceConfigurationINTEL;\n\tPFN_vkUninitializePerformanceApiINTEL vkUninitializePerformanceApiINTEL;\n#else\n\tPFN_vkVoidFunction padding_495a0a0b[9];\n#endif /* defined(VK_INTEL_performance_query) */\n#if defined(VK_KHR_acceleration_structure)\n\tPFN_vkBuildAccelerationStructuresKHR vkBuildAccelerationStructuresKHR;\n\tPFN_vkCmdBuildAccelerationStructuresIndirectKHR vkCmdBuildAccelerationStructuresIndirectKHR;\n\tPFN_vkCmdBuildAccelerationStructuresKHR vkCmdBuildAccelerationStructuresKHR;\n\tPFN_vkCmdCopyAccelerationStructureKHR vkCmdCopyAccelerationStructureKHR;\n\tPFN_vkCmdCopyAccelerationStructureToMemoryKHR vkCmdCopyAccelerationStructureToMemoryKHR;\n\tPFN_vkCmdCopyMemoryToAccelerationStructureKHR vkCmdCopyMemoryToAccelerationStructureKHR;\n\tPFN_vkCmdWriteAccelerationStructuresPropertiesKHR vkCmdWriteAccelerationStructuresPropertiesKHR;\n\tPFN_vkCopyAccelerationStructureKHR vkCopyAccelerationStructureKHR;\n\tPFN_vkCopyAccelerationStructureToMemoryKHR vkCopyAccelerationStructureToMemoryKHR;\n\tPFN_vkCopyMemoryToAccelerationStructureKHR vkCopyMemoryToAccelerationStructureKHR;\n\tPFN_vkCreateAccelerationStructureKHR vkCreateAccelerationStructureKHR;\n\tPFN_vkDestroyAccelerationStructureKHR vkDestroyAccelerationStructureKHR;\n\tPFN_vkGetAccelerationStructureBuildSizesKHR vkGetAccelerationStructureBuildSizesKHR;\n\tPFN_vkGetAccelerationStructureDeviceAddressKHR vkGetAccelerationStructureDeviceAddressKHR;\n\tPFN_vkGetDeviceAccelerationStructureCompatibilityKHR vkGetDeviceAccelerationStructureCompatibilityKHR;\n\tPFN_vkWriteAccelerationStructuresPropertiesKHR vkWriteAccelerationStructuresPropertiesKHR;\n#else\n\tPFN_vkVoidFunction padding_5a999b78[16];\n#endif /* defined(VK_KHR_acceleration_structure) */\n#if defined(VK_KHR_bind_memory2)\n\tPFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR;\n\tPFN_vkBindImageMemory2KHR vkBindImageMemory2KHR;\n#else\n\tPFN_vkVoidFunction padding_ed8481f5[2];\n#endif /* defined(VK_KHR_bind_memory2) */\n#if defined(VK_KHR_buffer_device_address)\n\tPFN_vkGetBufferDeviceAddressKHR vkGetBufferDeviceAddressKHR;\n\tPFN_vkGetBufferOpaqueCaptureAddressKHR vkGetBufferOpaqueCaptureAddressKHR;\n\tPFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR vkGetDeviceMemoryOpaqueCaptureAddressKHR;\n#else\n\tPFN_vkVoidFunction padding_178fdf81[3];\n#endif /* defined(VK_KHR_buffer_device_address) */\n#if defined(VK_KHR_calibrated_timestamps)\n\tPFN_vkGetCalibratedTimestampsKHR vkGetCalibratedTimestampsKHR;\n#else\n\tPFN_vkVoidFunction padding_8fd6f40d[1];\n#endif /* defined(VK_KHR_calibrated_timestamps) */\n#if defined(VK_KHR_copy_commands2)\n\tPFN_vkCmdBlitImage2KHR vkCmdBlitImage2KHR;\n\tPFN_vkCmdCopyBuffer2KHR vkCmdCopyBuffer2KHR;\n\tPFN_vkCmdCopyBufferToImage2KHR vkCmdCopyBufferToImage2KHR;\n\tPFN_vkCmdCopyImage2KHR vkCmdCopyImage2KHR;\n\tPFN_vkCmdCopyImageToBuffer2KHR vkCmdCopyImageToBuffer2KHR;\n\tPFN_vkCmdResolveImage2KHR vkCmdResolveImage2KHR;\n#else\n\tPFN_vkVoidFunction padding_4c841ff2[6];\n#endif /* defined(VK_KHR_copy_commands2) */\n#if defined(VK_KHR_create_renderpass2)\n\tPFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR;\n\tPFN_vkCmdEndRenderPass2KHR vkCmdEndRenderPass2KHR;\n\tPFN_vkCmdNextSubpass2KHR vkCmdNextSubpass2KHR;\n\tPFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR;\n#else\n\tPFN_vkVoidFunction padding_2a0a8727[4];\n#endif /* defined(VK_KHR_create_renderpass2) */\n#if defined(VK_KHR_deferred_host_operations)\n\tPFN_vkCreateDeferredOperationKHR vkCreateDeferredOperationKHR;\n\tPFN_vkDeferredOperationJoinKHR vkDeferredOperationJoinKHR;\n\tPFN_vkDestroyDeferredOperationKHR vkDestroyDeferredOperationKHR;\n\tPFN_vkGetDeferredOperationMaxConcurrencyKHR vkGetDeferredOperationMaxConcurrencyKHR;\n\tPFN_vkGetDeferredOperationResultKHR vkGetDeferredOperationResultKHR;\n#else\n\tPFN_vkVoidFunction padding_346287bb[5];\n#endif /* defined(VK_KHR_deferred_host_operations) */\n#if defined(VK_KHR_descriptor_update_template)\n\tPFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR;\n\tPFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR;\n\tPFN_vkUpdateDescriptorSetWithTemplateKHR vkUpdateDescriptorSetWithTemplateKHR;\n#else\n\tPFN_vkVoidFunction padding_3d63aec0[3];\n#endif /* defined(VK_KHR_descriptor_update_template) */\n#if defined(VK_KHR_device_group)\n\tPFN_vkCmdDispatchBaseKHR vkCmdDispatchBaseKHR;\n\tPFN_vkCmdSetDeviceMaskKHR vkCmdSetDeviceMaskKHR;\n\tPFN_vkGetDeviceGroupPeerMemoryFeaturesKHR vkGetDeviceGroupPeerMemoryFeaturesKHR;\n#else\n\tPFN_vkVoidFunction padding_5ebe16bd[3];\n#endif /* defined(VK_KHR_device_group) */\n#if defined(VK_KHR_display_swapchain)\n\tPFN_vkCreateSharedSwapchainsKHR vkCreateSharedSwapchainsKHR;\n#else\n\tPFN_vkVoidFunction padding_12099367[1];\n#endif /* defined(VK_KHR_display_swapchain) */\n#if defined(VK_KHR_draw_indirect_count)\n\tPFN_vkCmdDrawIndexedIndirectCountKHR vkCmdDrawIndexedIndirectCountKHR;\n\tPFN_vkCmdDrawIndirectCountKHR vkCmdDrawIndirectCountKHR;\n#else\n\tPFN_vkVoidFunction padding_7b5bc4c1[2];\n#endif /* defined(VK_KHR_draw_indirect_count) */\n#if defined(VK_KHR_dynamic_rendering)\n\tPFN_vkCmdBeginRenderingKHR vkCmdBeginRenderingKHR;\n\tPFN_vkCmdEndRenderingKHR vkCmdEndRenderingKHR;\n#else\n\tPFN_vkVoidFunction padding_b80f75a5[2];\n#endif /* defined(VK_KHR_dynamic_rendering) */\n#if defined(VK_KHR_dynamic_rendering_local_read)\n\tPFN_vkCmdSetRenderingAttachmentLocationsKHR vkCmdSetRenderingAttachmentLocationsKHR;\n\tPFN_vkCmdSetRenderingInputAttachmentIndicesKHR vkCmdSetRenderingInputAttachmentIndicesKHR;\n#else\n\tPFN_vkVoidFunction padding_b1510532[2];\n#endif /* defined(VK_KHR_dynamic_rendering_local_read) */\n#if defined(VK_KHR_external_fence_fd)\n\tPFN_vkGetFenceFdKHR vkGetFenceFdKHR;\n\tPFN_vkImportFenceFdKHR vkImportFenceFdKHR;\n#else\n\tPFN_vkVoidFunction padding_a2c787d5[2];\n#endif /* defined(VK_KHR_external_fence_fd) */\n#if defined(VK_KHR_external_fence_win32)\n\tPFN_vkGetFenceWin32HandleKHR vkGetFenceWin32HandleKHR;\n\tPFN_vkImportFenceWin32HandleKHR vkImportFenceWin32HandleKHR;\n#else\n\tPFN_vkVoidFunction padding_55d8e6a9[2];\n#endif /* defined(VK_KHR_external_fence_win32) */\n#if defined(VK_KHR_external_memory_fd)\n\tPFN_vkGetMemoryFdKHR vkGetMemoryFdKHR;\n\tPFN_vkGetMemoryFdPropertiesKHR vkGetMemoryFdPropertiesKHR;\n#else\n\tPFN_vkVoidFunction padding_982d9e19[2];\n#endif /* defined(VK_KHR_external_memory_fd) */\n#if defined(VK_KHR_external_memory_win32)\n\tPFN_vkGetMemoryWin32HandleKHR vkGetMemoryWin32HandleKHR;\n\tPFN_vkGetMemoryWin32HandlePropertiesKHR vkGetMemoryWin32HandlePropertiesKHR;\n#else\n\tPFN_vkVoidFunction padding_4af9e25a[2];\n#endif /* defined(VK_KHR_external_memory_win32) */\n#if defined(VK_KHR_external_semaphore_fd)\n\tPFN_vkGetSemaphoreFdKHR vkGetSemaphoreFdKHR;\n\tPFN_vkImportSemaphoreFdKHR vkImportSemaphoreFdKHR;\n#else\n\tPFN_vkVoidFunction padding_2237b7cf[2];\n#endif /* defined(VK_KHR_external_semaphore_fd) */\n#if defined(VK_KHR_external_semaphore_win32)\n\tPFN_vkGetSemaphoreWin32HandleKHR vkGetSemaphoreWin32HandleKHR;\n\tPFN_vkImportSemaphoreWin32HandleKHR vkImportSemaphoreWin32HandleKHR;\n#else\n\tPFN_vkVoidFunction padding_c18dea52[2];\n#endif /* defined(VK_KHR_external_semaphore_win32) */\n#if defined(VK_KHR_fragment_shading_rate)\n\tPFN_vkCmdSetFragmentShadingRateKHR vkCmdSetFragmentShadingRateKHR;\n#else\n\tPFN_vkVoidFunction padding_f91b0a90[1];\n#endif /* defined(VK_KHR_fragment_shading_rate) */\n#if defined(VK_KHR_get_memory_requirements2)\n\tPFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;\n\tPFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;\n\tPFN_vkGetImageSparseMemoryRequirements2KHR vkGetImageSparseMemoryRequirements2KHR;\n#else\n\tPFN_vkVoidFunction padding_79d9c5c4[3];\n#endif /* defined(VK_KHR_get_memory_requirements2) */\n#if defined(VK_KHR_line_rasterization)\n\tPFN_vkCmdSetLineStippleKHR vkCmdSetLineStippleKHR;\n#else\n\tPFN_vkVoidFunction padding_83c2939[1];\n#endif /* defined(VK_KHR_line_rasterization) */\n#if defined(VK_KHR_maintenance1)\n\tPFN_vkTrimCommandPoolKHR vkTrimCommandPoolKHR;\n#else\n\tPFN_vkVoidFunction padding_4b372c56[1];\n#endif /* defined(VK_KHR_maintenance1) */\n#if defined(VK_KHR_maintenance3)\n\tPFN_vkGetDescriptorSetLayoutSupportKHR vkGetDescriptorSetLayoutSupportKHR;\n#else\n\tPFN_vkVoidFunction padding_5ea7858d[1];\n#endif /* defined(VK_KHR_maintenance3) */\n#if defined(VK_KHR_maintenance4)\n\tPFN_vkGetDeviceBufferMemoryRequirementsKHR vkGetDeviceBufferMemoryRequirementsKHR;\n\tPFN_vkGetDeviceImageMemoryRequirementsKHR vkGetDeviceImageMemoryRequirementsKHR;\n\tPFN_vkGetDeviceImageSparseMemoryRequirementsKHR vkGetDeviceImageSparseMemoryRequirementsKHR;\n#else\n\tPFN_vkVoidFunction padding_8e2d4198[3];\n#endif /* defined(VK_KHR_maintenance4) */\n#if defined(VK_KHR_maintenance5)\n\tPFN_vkCmdBindIndexBuffer2KHR vkCmdBindIndexBuffer2KHR;\n\tPFN_vkGetDeviceImageSubresourceLayoutKHR vkGetDeviceImageSubresourceLayoutKHR;\n\tPFN_vkGetImageSubresourceLayout2KHR vkGetImageSubresourceLayout2KHR;\n\tPFN_vkGetRenderingAreaGranularityKHR vkGetRenderingAreaGranularityKHR;\n#else\n\tPFN_vkVoidFunction padding_37040339[4];\n#endif /* defined(VK_KHR_maintenance5) */\n#if defined(VK_KHR_maintenance6)\n\tPFN_vkCmdBindDescriptorSets2KHR vkCmdBindDescriptorSets2KHR;\n\tPFN_vkCmdPushConstants2KHR vkCmdPushConstants2KHR;\n#else\n\tPFN_vkVoidFunction padding_442955d8[2];\n#endif /* defined(VK_KHR_maintenance6) */\n#if defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor)\n\tPFN_vkCmdPushDescriptorSet2KHR vkCmdPushDescriptorSet2KHR;\n\tPFN_vkCmdPushDescriptorSetWithTemplate2KHR vkCmdPushDescriptorSetWithTemplate2KHR;\n#else\n\tPFN_vkVoidFunction padding_80e8513f[2];\n#endif /* defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor) */\n#if defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer)\n\tPFN_vkCmdBindDescriptorBufferEmbeddedSamplers2EXT vkCmdBindDescriptorBufferEmbeddedSamplers2EXT;\n\tPFN_vkCmdSetDescriptorBufferOffsets2EXT vkCmdSetDescriptorBufferOffsets2EXT;\n#else\n\tPFN_vkVoidFunction padding_2816b9cd[2];\n#endif /* defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_KHR_map_memory2)\n\tPFN_vkMapMemory2KHR vkMapMemory2KHR;\n\tPFN_vkUnmapMemory2KHR vkUnmapMemory2KHR;\n#else\n\tPFN_vkVoidFunction padding_5a6d8986[2];\n#endif /* defined(VK_KHR_map_memory2) */\n#if defined(VK_KHR_performance_query)\n\tPFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR;\n\tPFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR;\n#else\n\tPFN_vkVoidFunction padding_76f2673b[2];\n#endif /* defined(VK_KHR_performance_query) */\n#if defined(VK_KHR_pipeline_binary)\n\tPFN_vkCreatePipelineBinariesKHR vkCreatePipelineBinariesKHR;\n\tPFN_vkDestroyPipelineBinaryKHR vkDestroyPipelineBinaryKHR;\n\tPFN_vkGetPipelineBinaryDataKHR vkGetPipelineBinaryDataKHR;\n\tPFN_vkGetPipelineKeyKHR vkGetPipelineKeyKHR;\n\tPFN_vkReleaseCapturedPipelineDataKHR vkReleaseCapturedPipelineDataKHR;\n#else\n\tPFN_vkVoidFunction padding_65232810[5];\n#endif /* defined(VK_KHR_pipeline_binary) */\n#if defined(VK_KHR_pipeline_executable_properties)\n\tPFN_vkGetPipelineExecutableInternalRepresentationsKHR vkGetPipelineExecutableInternalRepresentationsKHR;\n\tPFN_vkGetPipelineExecutablePropertiesKHR vkGetPipelineExecutablePropertiesKHR;\n\tPFN_vkGetPipelineExecutableStatisticsKHR vkGetPipelineExecutableStatisticsKHR;\n#else\n\tPFN_vkVoidFunction padding_f7629b1e[3];\n#endif /* defined(VK_KHR_pipeline_executable_properties) */\n#if defined(VK_KHR_present_wait)\n\tPFN_vkWaitForPresentKHR vkWaitForPresentKHR;\n#else\n\tPFN_vkVoidFunction padding_b16cbe03[1];\n#endif /* defined(VK_KHR_present_wait) */\n#if defined(VK_KHR_present_wait2)\n\tPFN_vkWaitForPresent2KHR vkWaitForPresent2KHR;\n#else\n\tPFN_vkVoidFunction padding_7401483a[1];\n#endif /* defined(VK_KHR_present_wait2) */\n#if defined(VK_KHR_push_descriptor)\n\tPFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR;\n#else\n\tPFN_vkVoidFunction padding_8f7712ad[1];\n#endif /* defined(VK_KHR_push_descriptor) */\n#if defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline)\n\tPFN_vkCmdTraceRaysIndirect2KHR vkCmdTraceRaysIndirect2KHR;\n#else\n\tPFN_vkVoidFunction padding_dd5f9b4a[1];\n#endif /* defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) */\n#if defined(VK_KHR_ray_tracing_pipeline)\n\tPFN_vkCmdSetRayTracingPipelineStackSizeKHR vkCmdSetRayTracingPipelineStackSizeKHR;\n\tPFN_vkCmdTraceRaysIndirectKHR vkCmdTraceRaysIndirectKHR;\n\tPFN_vkCmdTraceRaysKHR vkCmdTraceRaysKHR;\n\tPFN_vkCreateRayTracingPipelinesKHR vkCreateRayTracingPipelinesKHR;\n\tPFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR vkGetRayTracingCaptureReplayShaderGroupHandlesKHR;\n\tPFN_vkGetRayTracingShaderGroupHandlesKHR vkGetRayTracingShaderGroupHandlesKHR;\n\tPFN_vkGetRayTracingShaderGroupStackSizeKHR vkGetRayTracingShaderGroupStackSizeKHR;\n#else\n\tPFN_vkVoidFunction padding_af99aedc[7];\n#endif /* defined(VK_KHR_ray_tracing_pipeline) */\n#if defined(VK_KHR_sampler_ycbcr_conversion)\n\tPFN_vkCreateSamplerYcbcrConversionKHR vkCreateSamplerYcbcrConversionKHR;\n\tPFN_vkDestroySamplerYcbcrConversionKHR vkDestroySamplerYcbcrConversionKHR;\n#else\n\tPFN_vkVoidFunction padding_88e61b30[2];\n#endif /* defined(VK_KHR_sampler_ycbcr_conversion) */\n#if defined(VK_KHR_shared_presentable_image)\n\tPFN_vkGetSwapchainStatusKHR vkGetSwapchainStatusKHR;\n#else\n\tPFN_vkVoidFunction padding_1ff3379[1];\n#endif /* defined(VK_KHR_shared_presentable_image) */\n#if defined(VK_KHR_swapchain)\n\tPFN_vkAcquireNextImageKHR vkAcquireNextImageKHR;\n\tPFN_vkCreateSwapchainKHR vkCreateSwapchainKHR;\n\tPFN_vkDestroySwapchainKHR vkDestroySwapchainKHR;\n\tPFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR;\n\tPFN_vkQueuePresentKHR vkQueuePresentKHR;\n#else\n\tPFN_vkVoidFunction padding_a1de893b[5];\n#endif /* defined(VK_KHR_swapchain) */\n#if defined(VK_KHR_swapchain_maintenance1)\n\tPFN_vkReleaseSwapchainImagesKHR vkReleaseSwapchainImagesKHR;\n#else\n\tPFN_vkVoidFunction padding_e032d5c4[1];\n#endif /* defined(VK_KHR_swapchain_maintenance1) */\n#if defined(VK_KHR_synchronization2)\n\tPFN_vkCmdPipelineBarrier2KHR vkCmdPipelineBarrier2KHR;\n\tPFN_vkCmdResetEvent2KHR vkCmdResetEvent2KHR;\n\tPFN_vkCmdSetEvent2KHR vkCmdSetEvent2KHR;\n\tPFN_vkCmdWaitEvents2KHR vkCmdWaitEvents2KHR;\n\tPFN_vkCmdWriteTimestamp2KHR vkCmdWriteTimestamp2KHR;\n\tPFN_vkQueueSubmit2KHR vkQueueSubmit2KHR;\n#else\n\tPFN_vkVoidFunction padding_e85bf128[6];\n#endif /* defined(VK_KHR_synchronization2) */\n#if defined(VK_KHR_timeline_semaphore)\n\tPFN_vkGetSemaphoreCounterValueKHR vkGetSemaphoreCounterValueKHR;\n\tPFN_vkSignalSemaphoreKHR vkSignalSemaphoreKHR;\n\tPFN_vkWaitSemaphoresKHR vkWaitSemaphoresKHR;\n#else\n\tPFN_vkVoidFunction padding_c799d931[3];\n#endif /* defined(VK_KHR_timeline_semaphore) */\n#if defined(VK_KHR_video_decode_queue)\n\tPFN_vkCmdDecodeVideoKHR vkCmdDecodeVideoKHR;\n#else\n\tPFN_vkVoidFunction padding_7a7cc7ad[1];\n#endif /* defined(VK_KHR_video_decode_queue) */\n#if defined(VK_KHR_video_encode_queue)\n\tPFN_vkCmdEncodeVideoKHR vkCmdEncodeVideoKHR;\n\tPFN_vkGetEncodedVideoSessionParametersKHR vkGetEncodedVideoSessionParametersKHR;\n#else\n\tPFN_vkVoidFunction padding_f2997fb4[2];\n#endif /* defined(VK_KHR_video_encode_queue) */\n#if defined(VK_KHR_video_queue)\n\tPFN_vkBindVideoSessionMemoryKHR vkBindVideoSessionMemoryKHR;\n\tPFN_vkCmdBeginVideoCodingKHR vkCmdBeginVideoCodingKHR;\n\tPFN_vkCmdControlVideoCodingKHR vkCmdControlVideoCodingKHR;\n\tPFN_vkCmdEndVideoCodingKHR vkCmdEndVideoCodingKHR;\n\tPFN_vkCreateVideoSessionKHR vkCreateVideoSessionKHR;\n\tPFN_vkCreateVideoSessionParametersKHR vkCreateVideoSessionParametersKHR;\n\tPFN_vkDestroyVideoSessionKHR vkDestroyVideoSessionKHR;\n\tPFN_vkDestroyVideoSessionParametersKHR vkDestroyVideoSessionParametersKHR;\n\tPFN_vkGetVideoSessionMemoryRequirementsKHR vkGetVideoSessionMemoryRequirementsKHR;\n\tPFN_vkUpdateVideoSessionParametersKHR vkUpdateVideoSessionParametersKHR;\n#else\n\tPFN_vkVoidFunction padding_98fb7016[10];\n#endif /* defined(VK_KHR_video_queue) */\n#if defined(VK_NVX_binary_import)\n\tPFN_vkCmdCuLaunchKernelNVX vkCmdCuLaunchKernelNVX;\n\tPFN_vkCreateCuFunctionNVX vkCreateCuFunctionNVX;\n\tPFN_vkCreateCuModuleNVX vkCreateCuModuleNVX;\n\tPFN_vkDestroyCuFunctionNVX vkDestroyCuFunctionNVX;\n\tPFN_vkDestroyCuModuleNVX vkDestroyCuModuleNVX;\n#else\n\tPFN_vkVoidFunction padding_eb54309b[5];\n#endif /* defined(VK_NVX_binary_import) */\n#if defined(VK_NVX_image_view_handle)\n\tPFN_vkGetImageViewHandleNVX vkGetImageViewHandleNVX;\n#else\n\tPFN_vkVoidFunction padding_887f6736[1];\n#endif /* defined(VK_NVX_image_view_handle) */\n#if defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 3\n\tPFN_vkGetImageViewHandle64NVX vkGetImageViewHandle64NVX;\n#else\n\tPFN_vkVoidFunction padding_64ad40e2[1];\n#endif /* defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 3 */\n#if defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 2\n\tPFN_vkGetImageViewAddressNVX vkGetImageViewAddressNVX;\n#else\n\tPFN_vkVoidFunction padding_d290479a[1];\n#endif /* defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 2 */\n#if defined(VK_NV_clip_space_w_scaling)\n\tPFN_vkCmdSetViewportWScalingNV vkCmdSetViewportWScalingNV;\n#else\n\tPFN_vkVoidFunction padding_88d7eb2e[1];\n#endif /* defined(VK_NV_clip_space_w_scaling) */\n#if defined(VK_NV_cluster_acceleration_structure)\n\tPFN_vkCmdBuildClusterAccelerationStructureIndirectNV vkCmdBuildClusterAccelerationStructureIndirectNV;\n\tPFN_vkGetClusterAccelerationStructureBuildSizesNV vkGetClusterAccelerationStructureBuildSizesNV;\n#else\n\tPFN_vkVoidFunction padding_60e35395[2];\n#endif /* defined(VK_NV_cluster_acceleration_structure) */\n#if defined(VK_NV_cooperative_vector)\n\tPFN_vkCmdConvertCooperativeVectorMatrixNV vkCmdConvertCooperativeVectorMatrixNV;\n\tPFN_vkConvertCooperativeVectorMatrixNV vkConvertCooperativeVectorMatrixNV;\n#else\n\tPFN_vkVoidFunction padding_f4a887d0[2];\n#endif /* defined(VK_NV_cooperative_vector) */\n#if defined(VK_NV_copy_memory_indirect)\n\tPFN_vkCmdCopyMemoryIndirectNV vkCmdCopyMemoryIndirectNV;\n\tPFN_vkCmdCopyMemoryToImageIndirectNV vkCmdCopyMemoryToImageIndirectNV;\n#else\n\tPFN_vkVoidFunction padding_9536230e[2];\n#endif /* defined(VK_NV_copy_memory_indirect) */\n#if defined(VK_NV_cuda_kernel_launch)\n\tPFN_vkCmdCudaLaunchKernelNV vkCmdCudaLaunchKernelNV;\n\tPFN_vkCreateCudaFunctionNV vkCreateCudaFunctionNV;\n\tPFN_vkCreateCudaModuleNV vkCreateCudaModuleNV;\n\tPFN_vkDestroyCudaFunctionNV vkDestroyCudaFunctionNV;\n\tPFN_vkDestroyCudaModuleNV vkDestroyCudaModuleNV;\n\tPFN_vkGetCudaModuleCacheNV vkGetCudaModuleCacheNV;\n#else\n\tPFN_vkVoidFunction padding_2eabdf3b[6];\n#endif /* defined(VK_NV_cuda_kernel_launch) */\n#if defined(VK_NV_device_diagnostic_checkpoints)\n\tPFN_vkCmdSetCheckpointNV vkCmdSetCheckpointNV;\n\tPFN_vkGetQueueCheckpointDataNV vkGetQueueCheckpointDataNV;\n#else\n\tPFN_vkVoidFunction padding_adaa5a21[2];\n#endif /* defined(VK_NV_device_diagnostic_checkpoints) */\n#if defined(VK_NV_device_diagnostic_checkpoints) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2))\n\tPFN_vkGetQueueCheckpointData2NV vkGetQueueCheckpointData2NV;\n#else\n\tPFN_vkVoidFunction padding_c776633d[1];\n#endif /* defined(VK_NV_device_diagnostic_checkpoints) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) */\n#if defined(VK_NV_device_generated_commands)\n\tPFN_vkCmdBindPipelineShaderGroupNV vkCmdBindPipelineShaderGroupNV;\n\tPFN_vkCmdExecuteGeneratedCommandsNV vkCmdExecuteGeneratedCommandsNV;\n\tPFN_vkCmdPreprocessGeneratedCommandsNV vkCmdPreprocessGeneratedCommandsNV;\n\tPFN_vkCreateIndirectCommandsLayoutNV vkCreateIndirectCommandsLayoutNV;\n\tPFN_vkDestroyIndirectCommandsLayoutNV vkDestroyIndirectCommandsLayoutNV;\n\tPFN_vkGetGeneratedCommandsMemoryRequirementsNV vkGetGeneratedCommandsMemoryRequirementsNV;\n#else\n\tPFN_vkVoidFunction padding_4c7e4395[6];\n#endif /* defined(VK_NV_device_generated_commands) */\n#if defined(VK_NV_device_generated_commands_compute)\n\tPFN_vkCmdUpdatePipelineIndirectBufferNV vkCmdUpdatePipelineIndirectBufferNV;\n\tPFN_vkGetPipelineIndirectDeviceAddressNV vkGetPipelineIndirectDeviceAddressNV;\n\tPFN_vkGetPipelineIndirectMemoryRequirementsNV vkGetPipelineIndirectMemoryRequirementsNV;\n#else\n\tPFN_vkVoidFunction padding_5195094c[3];\n#endif /* defined(VK_NV_device_generated_commands_compute) */\n#if defined(VK_NV_external_compute_queue)\n\tPFN_vkCreateExternalComputeQueueNV vkCreateExternalComputeQueueNV;\n\tPFN_vkDestroyExternalComputeQueueNV vkDestroyExternalComputeQueueNV;\n\tPFN_vkGetExternalComputeQueueDataNV vkGetExternalComputeQueueDataNV;\n#else\n\tPFN_vkVoidFunction padding_4f947e0b[3];\n#endif /* defined(VK_NV_external_compute_queue) */\n#if defined(VK_NV_external_memory_rdma)\n\tPFN_vkGetMemoryRemoteAddressNV vkGetMemoryRemoteAddressNV;\n#else\n\tPFN_vkVoidFunction padding_920e405[1];\n#endif /* defined(VK_NV_external_memory_rdma) */\n#if defined(VK_NV_external_memory_win32)\n\tPFN_vkGetMemoryWin32HandleNV vkGetMemoryWin32HandleNV;\n#else\n\tPFN_vkVoidFunction padding_c13d6f3a[1];\n#endif /* defined(VK_NV_external_memory_win32) */\n#if defined(VK_NV_fragment_shading_rate_enums)\n\tPFN_vkCmdSetFragmentShadingRateEnumNV vkCmdSetFragmentShadingRateEnumNV;\n#else\n\tPFN_vkVoidFunction padding_4979ca14[1];\n#endif /* defined(VK_NV_fragment_shading_rate_enums) */\n#if defined(VK_NV_low_latency2)\n\tPFN_vkGetLatencyTimingsNV vkGetLatencyTimingsNV;\n\tPFN_vkLatencySleepNV vkLatencySleepNV;\n\tPFN_vkQueueNotifyOutOfBandNV vkQueueNotifyOutOfBandNV;\n\tPFN_vkSetLatencyMarkerNV vkSetLatencyMarkerNV;\n\tPFN_vkSetLatencySleepModeNV vkSetLatencySleepModeNV;\n#else\n\tPFN_vkVoidFunction padding_fabf8b19[5];\n#endif /* defined(VK_NV_low_latency2) */\n#if defined(VK_NV_memory_decompression)\n\tPFN_vkCmdDecompressMemoryIndirectCountNV vkCmdDecompressMemoryIndirectCountNV;\n\tPFN_vkCmdDecompressMemoryNV vkCmdDecompressMemoryNV;\n#else\n\tPFN_vkVoidFunction padding_706009[2];\n#endif /* defined(VK_NV_memory_decompression) */\n#if defined(VK_NV_mesh_shader)\n\tPFN_vkCmdDrawMeshTasksIndirectNV vkCmdDrawMeshTasksIndirectNV;\n\tPFN_vkCmdDrawMeshTasksNV vkCmdDrawMeshTasksNV;\n#else\n\tPFN_vkVoidFunction padding_ac232758[2];\n#endif /* defined(VK_NV_mesh_shader) */\n#if defined(VK_NV_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2))\n\tPFN_vkCmdDrawMeshTasksIndirectCountNV vkCmdDrawMeshTasksIndirectCountNV;\n#else\n\tPFN_vkVoidFunction padding_53495be7[1];\n#endif /* defined(VK_NV_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2)) */\n#if defined(VK_NV_optical_flow)\n\tPFN_vkBindOpticalFlowSessionImageNV vkBindOpticalFlowSessionImageNV;\n\tPFN_vkCmdOpticalFlowExecuteNV vkCmdOpticalFlowExecuteNV;\n\tPFN_vkCreateOpticalFlowSessionNV vkCreateOpticalFlowSessionNV;\n\tPFN_vkDestroyOpticalFlowSessionNV vkDestroyOpticalFlowSessionNV;\n#else\n\tPFN_vkVoidFunction padding_f67571eb[4];\n#endif /* defined(VK_NV_optical_flow) */\n#if defined(VK_NV_partitioned_acceleration_structure)\n\tPFN_vkCmdBuildPartitionedAccelerationStructuresNV vkCmdBuildPartitionedAccelerationStructuresNV;\n\tPFN_vkGetPartitionedAccelerationStructuresBuildSizesNV vkGetPartitionedAccelerationStructuresBuildSizesNV;\n#else\n\tPFN_vkVoidFunction padding_d27c8c6d[2];\n#endif /* defined(VK_NV_partitioned_acceleration_structure) */\n#if defined(VK_NV_ray_tracing)\n\tPFN_vkBindAccelerationStructureMemoryNV vkBindAccelerationStructureMemoryNV;\n\tPFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV;\n\tPFN_vkCmdCopyAccelerationStructureNV vkCmdCopyAccelerationStructureNV;\n\tPFN_vkCmdTraceRaysNV vkCmdTraceRaysNV;\n\tPFN_vkCmdWriteAccelerationStructuresPropertiesNV vkCmdWriteAccelerationStructuresPropertiesNV;\n\tPFN_vkCompileDeferredNV vkCompileDeferredNV;\n\tPFN_vkCreateAccelerationStructureNV vkCreateAccelerationStructureNV;\n\tPFN_vkCreateRayTracingPipelinesNV vkCreateRayTracingPipelinesNV;\n\tPFN_vkDestroyAccelerationStructureNV vkDestroyAccelerationStructureNV;\n\tPFN_vkGetAccelerationStructureHandleNV vkGetAccelerationStructureHandleNV;\n\tPFN_vkGetAccelerationStructureMemoryRequirementsNV vkGetAccelerationStructureMemoryRequirementsNV;\n\tPFN_vkGetRayTracingShaderGroupHandlesNV vkGetRayTracingShaderGroupHandlesNV;\n#else\n\tPFN_vkVoidFunction padding_feefbeac[12];\n#endif /* defined(VK_NV_ray_tracing) */\n#if defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2\n\tPFN_vkCmdSetExclusiveScissorEnableNV vkCmdSetExclusiveScissorEnableNV;\n#else\n\tPFN_vkVoidFunction padding_e3c24f80[1];\n#endif /* defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 */\n#if defined(VK_NV_scissor_exclusive)\n\tPFN_vkCmdSetExclusiveScissorNV vkCmdSetExclusiveScissorNV;\n#else\n\tPFN_vkVoidFunction padding_8e88d86c[1];\n#endif /* defined(VK_NV_scissor_exclusive) */\n#if defined(VK_NV_shading_rate_image)\n\tPFN_vkCmdBindShadingRateImageNV vkCmdBindShadingRateImageNV;\n\tPFN_vkCmdSetCoarseSampleOrderNV vkCmdSetCoarseSampleOrderNV;\n\tPFN_vkCmdSetViewportShadingRatePaletteNV vkCmdSetViewportShadingRatePaletteNV;\n#else\n\tPFN_vkVoidFunction padding_92a0767f[3];\n#endif /* defined(VK_NV_shading_rate_image) */\n#if defined(VK_QCOM_tile_memory_heap)\n\tPFN_vkCmdBindTileMemoryQCOM vkCmdBindTileMemoryQCOM;\n#else\n\tPFN_vkVoidFunction padding_e2d55d04[1];\n#endif /* defined(VK_QCOM_tile_memory_heap) */\n#if defined(VK_QCOM_tile_properties)\n\tPFN_vkGetDynamicRenderingTilePropertiesQCOM vkGetDynamicRenderingTilePropertiesQCOM;\n\tPFN_vkGetFramebufferTilePropertiesQCOM vkGetFramebufferTilePropertiesQCOM;\n#else\n\tPFN_vkVoidFunction padding_be12e32[2];\n#endif /* defined(VK_QCOM_tile_properties) */\n#if defined(VK_QCOM_tile_shading)\n\tPFN_vkCmdBeginPerTileExecutionQCOM vkCmdBeginPerTileExecutionQCOM;\n\tPFN_vkCmdDispatchTileQCOM vkCmdDispatchTileQCOM;\n\tPFN_vkCmdEndPerTileExecutionQCOM vkCmdEndPerTileExecutionQCOM;\n#else\n\tPFN_vkVoidFunction padding_fcd9e1df[3];\n#endif /* defined(VK_QCOM_tile_shading) */\n#if defined(VK_QNX_external_memory_screen_buffer)\n\tPFN_vkGetScreenBufferPropertiesQNX vkGetScreenBufferPropertiesQNX;\n#else\n\tPFN_vkVoidFunction padding_1c27735d[1];\n#endif /* defined(VK_QNX_external_memory_screen_buffer) */\n#if defined(VK_VALVE_descriptor_set_host_mapping)\n\tPFN_vkGetDescriptorSetHostMappingVALVE vkGetDescriptorSetHostMappingVALVE;\n\tPFN_vkGetDescriptorSetLayoutHostMappingInfoVALVE vkGetDescriptorSetLayoutHostMappingInfoVALVE;\n#else\n\tPFN_vkVoidFunction padding_fd71e4c6[2];\n#endif /* defined(VK_VALVE_descriptor_set_host_mapping) */\n#if (defined(VK_EXT_depth_clamp_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clamp_control))\n\tPFN_vkCmdSetDepthClampRangeEXT vkCmdSetDepthClampRangeEXT;\n#else\n\tPFN_vkVoidFunction padding_faa18a61[1];\n#endif /* (defined(VK_EXT_depth_clamp_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clamp_control)) */\n#if (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object))\n\tPFN_vkCmdBindVertexBuffers2EXT vkCmdBindVertexBuffers2EXT;\n\tPFN_vkCmdSetCullModeEXT vkCmdSetCullModeEXT;\n\tPFN_vkCmdSetDepthBoundsTestEnableEXT vkCmdSetDepthBoundsTestEnableEXT;\n\tPFN_vkCmdSetDepthCompareOpEXT vkCmdSetDepthCompareOpEXT;\n\tPFN_vkCmdSetDepthTestEnableEXT vkCmdSetDepthTestEnableEXT;\n\tPFN_vkCmdSetDepthWriteEnableEXT vkCmdSetDepthWriteEnableEXT;\n\tPFN_vkCmdSetFrontFaceEXT vkCmdSetFrontFaceEXT;\n\tPFN_vkCmdSetPrimitiveTopologyEXT vkCmdSetPrimitiveTopologyEXT;\n\tPFN_vkCmdSetScissorWithCountEXT vkCmdSetScissorWithCountEXT;\n\tPFN_vkCmdSetStencilOpEXT vkCmdSetStencilOpEXT;\n\tPFN_vkCmdSetStencilTestEnableEXT vkCmdSetStencilTestEnableEXT;\n\tPFN_vkCmdSetViewportWithCountEXT vkCmdSetViewportWithCountEXT;\n#else\n\tPFN_vkVoidFunction padding_3e8c720f[12];\n#endif /* (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object))\n\tPFN_vkCmdSetDepthBiasEnableEXT vkCmdSetDepthBiasEnableEXT;\n\tPFN_vkCmdSetLogicOpEXT vkCmdSetLogicOpEXT;\n\tPFN_vkCmdSetPatchControlPointsEXT vkCmdSetPatchControlPointsEXT;\n\tPFN_vkCmdSetPrimitiveRestartEnableEXT vkCmdSetPrimitiveRestartEnableEXT;\n\tPFN_vkCmdSetRasterizerDiscardEnableEXT vkCmdSetRasterizerDiscardEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_b93e02a6[5];\n#endif /* (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object))\n\tPFN_vkCmdSetAlphaToCoverageEnableEXT vkCmdSetAlphaToCoverageEnableEXT;\n\tPFN_vkCmdSetAlphaToOneEnableEXT vkCmdSetAlphaToOneEnableEXT;\n\tPFN_vkCmdSetColorBlendEnableEXT vkCmdSetColorBlendEnableEXT;\n\tPFN_vkCmdSetColorBlendEquationEXT vkCmdSetColorBlendEquationEXT;\n\tPFN_vkCmdSetColorWriteMaskEXT vkCmdSetColorWriteMaskEXT;\n\tPFN_vkCmdSetDepthClampEnableEXT vkCmdSetDepthClampEnableEXT;\n\tPFN_vkCmdSetLogicOpEnableEXT vkCmdSetLogicOpEnableEXT;\n\tPFN_vkCmdSetPolygonModeEXT vkCmdSetPolygonModeEXT;\n\tPFN_vkCmdSetRasterizationSamplesEXT vkCmdSetRasterizationSamplesEXT;\n\tPFN_vkCmdSetSampleMaskEXT vkCmdSetSampleMaskEXT;\n#else\n\tPFN_vkVoidFunction padding_ab566e7e[10];\n#endif /* (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object))\n\tPFN_vkCmdSetTessellationDomainOriginEXT vkCmdSetTessellationDomainOriginEXT;\n#else\n\tPFN_vkVoidFunction padding_6730ed0c[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback))\n\tPFN_vkCmdSetRasterizationStreamEXT vkCmdSetRasterizationStreamEXT;\n#else\n\tPFN_vkVoidFunction padding_d3ebb335[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization))\n\tPFN_vkCmdSetConservativeRasterizationModeEXT vkCmdSetConservativeRasterizationModeEXT;\n\tPFN_vkCmdSetExtraPrimitiveOverestimationSizeEXT vkCmdSetExtraPrimitiveOverestimationSizeEXT;\n#else\n\tPFN_vkVoidFunction padding_a21758f4[2];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable))\n\tPFN_vkCmdSetDepthClipEnableEXT vkCmdSetDepthClipEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_a498a838[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations))\n\tPFN_vkCmdSetSampleLocationsEnableEXT vkCmdSetSampleLocationsEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_67db38de[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced))\n\tPFN_vkCmdSetColorBlendAdvancedEXT vkCmdSetColorBlendAdvancedEXT;\n#else\n\tPFN_vkVoidFunction padding_fbea7481[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex))\n\tPFN_vkCmdSetProvokingVertexModeEXT vkCmdSetProvokingVertexModeEXT;\n#else\n\tPFN_vkVoidFunction padding_3a8ec90e[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization))\n\tPFN_vkCmdSetLineRasterizationModeEXT vkCmdSetLineRasterizationModeEXT;\n\tPFN_vkCmdSetLineStippleEnableEXT vkCmdSetLineStippleEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_29cdb756[2];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control))\n\tPFN_vkCmdSetDepthClipNegativeOneToOneEXT vkCmdSetDepthClipNegativeOneToOneEXT;\n#else\n\tPFN_vkVoidFunction padding_815a7240[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling))\n\tPFN_vkCmdSetViewportWScalingEnableNV vkCmdSetViewportWScalingEnableNV;\n#else\n\tPFN_vkVoidFunction padding_d1f00511[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle))\n\tPFN_vkCmdSetViewportSwizzleNV vkCmdSetViewportSwizzleNV;\n#else\n\tPFN_vkVoidFunction padding_7a73d553[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color))\n\tPFN_vkCmdSetCoverageToColorEnableNV vkCmdSetCoverageToColorEnableNV;\n\tPFN_vkCmdSetCoverageToColorLocationNV vkCmdSetCoverageToColorLocationNV;\n#else\n\tPFN_vkVoidFunction padding_6045fb8c[2];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples))\n\tPFN_vkCmdSetCoverageModulationModeNV vkCmdSetCoverageModulationModeNV;\n\tPFN_vkCmdSetCoverageModulationTableEnableNV vkCmdSetCoverageModulationTableEnableNV;\n\tPFN_vkCmdSetCoverageModulationTableNV vkCmdSetCoverageModulationTableNV;\n#else\n\tPFN_vkVoidFunction padding_bdc35c80[3];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image))\n\tPFN_vkCmdSetShadingRateImageEnableNV vkCmdSetShadingRateImageEnableNV;\n#else\n\tPFN_vkVoidFunction padding_9a5cd6e8[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test))\n\tPFN_vkCmdSetRepresentativeFragmentTestEnableNV vkCmdSetRepresentativeFragmentTestEnableNV;\n#else\n\tPFN_vkVoidFunction padding_3ee17e96[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode))\n\tPFN_vkCmdSetCoverageReductionModeNV vkCmdSetCoverageReductionModeNV;\n#else\n\tPFN_vkVoidFunction padding_263d525a[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) */\n#if (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control))\n\tPFN_vkGetImageSubresourceLayout2EXT vkGetImageSubresourceLayout2EXT;\n#else\n\tPFN_vkVoidFunction padding_ecddace1[1];\n#endif /* (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) */\n#if (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state))\n\tPFN_vkCmdSetVertexInputEXT vkCmdSetVertexInputEXT;\n#else\n\tPFN_vkVoidFunction padding_d83e1de1[1];\n#endif /* (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) */\n#if (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template)))\n\tPFN_vkCmdPushDescriptorSetWithTemplateKHR vkCmdPushDescriptorSetWithTemplateKHR;\n#else\n\tPFN_vkVoidFunction padding_60f8358a[1];\n#endif /* (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template))) */\n#if (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1))\n\tPFN_vkGetDeviceGroupPresentCapabilitiesKHR vkGetDeviceGroupPresentCapabilitiesKHR;\n\tPFN_vkGetDeviceGroupSurfacePresentModesKHR vkGetDeviceGroupSurfacePresentModesKHR;\n#else\n\tPFN_vkVoidFunction padding_460290c6[2];\n#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */\n#if (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1))\n\tPFN_vkAcquireNextImage2KHR vkAcquireNextImage2KHR;\n#else\n\tPFN_vkVoidFunction padding_cffc198[1];\n#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */\n\t/* VOLK_GENERATE_DEVICE_TABLE */\n};\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n/* VOLK_GENERATE_PROTOTYPES_H */\n#if defined(VK_VERSION_1_0)\nextern PFN_vkAllocateCommandBuffers vkAllocateCommandBuffers;\nextern PFN_vkAllocateDescriptorSets vkAllocateDescriptorSets;\nextern PFN_vkAllocateMemory vkAllocateMemory;\nextern PFN_vkBeginCommandBuffer vkBeginCommandBuffer;\nextern PFN_vkBindBufferMemory vkBindBufferMemory;\nextern PFN_vkBindImageMemory vkBindImageMemory;\nextern PFN_vkCmdBeginQuery vkCmdBeginQuery;\nextern PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass;\nextern PFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets;\nextern PFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer;\nextern PFN_vkCmdBindPipeline vkCmdBindPipeline;\nextern PFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers;\nextern PFN_vkCmdBlitImage vkCmdBlitImage;\nextern PFN_vkCmdClearAttachments vkCmdClearAttachments;\nextern PFN_vkCmdClearColorImage vkCmdClearColorImage;\nextern PFN_vkCmdClearDepthStencilImage vkCmdClearDepthStencilImage;\nextern PFN_vkCmdCopyBuffer vkCmdCopyBuffer;\nextern PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage;\nextern PFN_vkCmdCopyImage vkCmdCopyImage;\nextern PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer;\nextern PFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults;\nextern PFN_vkCmdDispatch vkCmdDispatch;\nextern PFN_vkCmdDispatchIndirect vkCmdDispatchIndirect;\nextern PFN_vkCmdDraw vkCmdDraw;\nextern PFN_vkCmdDrawIndexed vkCmdDrawIndexed;\nextern PFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect;\nextern PFN_vkCmdDrawIndirect vkCmdDrawIndirect;\nextern PFN_vkCmdEndQuery vkCmdEndQuery;\nextern PFN_vkCmdEndRenderPass vkCmdEndRenderPass;\nextern PFN_vkCmdExecuteCommands vkCmdExecuteCommands;\nextern PFN_vkCmdFillBuffer vkCmdFillBuffer;\nextern PFN_vkCmdNextSubpass vkCmdNextSubpass;\nextern PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier;\nextern PFN_vkCmdPushConstants vkCmdPushConstants;\nextern PFN_vkCmdResetEvent vkCmdResetEvent;\nextern PFN_vkCmdResetQueryPool vkCmdResetQueryPool;\nextern PFN_vkCmdResolveImage vkCmdResolveImage;\nextern PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants;\nextern PFN_vkCmdSetDepthBias vkCmdSetDepthBias;\nextern PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds;\nextern PFN_vkCmdSetEvent vkCmdSetEvent;\nextern PFN_vkCmdSetLineWidth vkCmdSetLineWidth;\nextern PFN_vkCmdSetScissor vkCmdSetScissor;\nextern PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask;\nextern PFN_vkCmdSetStencilReference vkCmdSetStencilReference;\nextern PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask;\nextern PFN_vkCmdSetViewport vkCmdSetViewport;\nextern PFN_vkCmdUpdateBuffer vkCmdUpdateBuffer;\nextern PFN_vkCmdWaitEvents vkCmdWaitEvents;\nextern PFN_vkCmdWriteTimestamp vkCmdWriteTimestamp;\nextern PFN_vkCreateBuffer vkCreateBuffer;\nextern PFN_vkCreateBufferView vkCreateBufferView;\nextern PFN_vkCreateCommandPool vkCreateCommandPool;\nextern PFN_vkCreateComputePipelines vkCreateComputePipelines;\nextern PFN_vkCreateDescriptorPool vkCreateDescriptorPool;\nextern PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout;\nextern PFN_vkCreateDevice vkCreateDevice;\nextern PFN_vkCreateEvent vkCreateEvent;\nextern PFN_vkCreateFence vkCreateFence;\nextern PFN_vkCreateFramebuffer vkCreateFramebuffer;\nextern PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines;\nextern PFN_vkCreateImage vkCreateImage;\nextern PFN_vkCreateImageView vkCreateImageView;\nextern PFN_vkCreateInstance vkCreateInstance;\nextern PFN_vkCreatePipelineCache vkCreatePipelineCache;\nextern PFN_vkCreatePipelineLayout vkCreatePipelineLayout;\nextern PFN_vkCreateQueryPool vkCreateQueryPool;\nextern PFN_vkCreateRenderPass vkCreateRenderPass;\nextern PFN_vkCreateSampler vkCreateSampler;\nextern PFN_vkCreateSemaphore vkCreateSemaphore;\nextern PFN_vkCreateShaderModule vkCreateShaderModule;\nextern PFN_vkDestroyBuffer vkDestroyBuffer;\nextern PFN_vkDestroyBufferView vkDestroyBufferView;\nextern PFN_vkDestroyCommandPool vkDestroyCommandPool;\nextern PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool;\nextern PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout;\nextern PFN_vkDestroyDevice vkDestroyDevice;\nextern PFN_vkDestroyEvent vkDestroyEvent;\nextern PFN_vkDestroyFence vkDestroyFence;\nextern PFN_vkDestroyFramebuffer vkDestroyFramebuffer;\nextern PFN_vkDestroyImage vkDestroyImage;\nextern PFN_vkDestroyImageView vkDestroyImageView;\nextern PFN_vkDestroyInstance vkDestroyInstance;\nextern PFN_vkDestroyPipeline vkDestroyPipeline;\nextern PFN_vkDestroyPipelineCache vkDestroyPipelineCache;\nextern PFN_vkDestroyPipelineLayout vkDestroyPipelineLayout;\nextern PFN_vkDestroyQueryPool vkDestroyQueryPool;\nextern PFN_vkDestroyRenderPass vkDestroyRenderPass;\nextern PFN_vkDestroySampler vkDestroySampler;\nextern PFN_vkDestroySemaphore vkDestroySemaphore;\nextern PFN_vkDestroyShaderModule vkDestroyShaderModule;\nextern PFN_vkDeviceWaitIdle vkDeviceWaitIdle;\nextern PFN_vkEndCommandBuffer vkEndCommandBuffer;\nextern PFN_vkEnumerateDeviceExtensionProperties vkEnumerateDeviceExtensionProperties;\nextern PFN_vkEnumerateDeviceLayerProperties vkEnumerateDeviceLayerProperties;\nextern PFN_vkEnumerateInstanceExtensionProperties vkEnumerateInstanceExtensionProperties;\nextern PFN_vkEnumerateInstanceLayerProperties vkEnumerateInstanceLayerProperties;\nextern PFN_vkEnumeratePhysicalDevices vkEnumeratePhysicalDevices;\nextern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;\nextern PFN_vkFreeCommandBuffers vkFreeCommandBuffers;\nextern PFN_vkFreeDescriptorSets vkFreeDescriptorSets;\nextern PFN_vkFreeMemory vkFreeMemory;\nextern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;\nextern PFN_vkGetDeviceMemoryCommitment vkGetDeviceMemoryCommitment;\nextern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;\nextern PFN_vkGetDeviceQueue vkGetDeviceQueue;\nextern PFN_vkGetEventStatus vkGetEventStatus;\nextern PFN_vkGetFenceStatus vkGetFenceStatus;\nextern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;\nextern PFN_vkGetImageSparseMemoryRequirements vkGetImageSparseMemoryRequirements;\nextern PFN_vkGetImageSubresourceLayout vkGetImageSubresourceLayout;\nextern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;\nextern PFN_vkGetPhysicalDeviceFeatures vkGetPhysicalDeviceFeatures;\nextern PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties;\nextern PFN_vkGetPhysicalDeviceImageFormatProperties vkGetPhysicalDeviceImageFormatProperties;\nextern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;\nextern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;\nextern PFN_vkGetPhysicalDeviceQueueFamilyProperties vkGetPhysicalDeviceQueueFamilyProperties;\nextern PFN_vkGetPhysicalDeviceSparseImageFormatProperties vkGetPhysicalDeviceSparseImageFormatProperties;\nextern PFN_vkGetPipelineCacheData vkGetPipelineCacheData;\nextern PFN_vkGetQueryPoolResults vkGetQueryPoolResults;\nextern PFN_vkGetRenderAreaGranularity vkGetRenderAreaGranularity;\nextern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;\nextern PFN_vkMapMemory vkMapMemory;\nextern PFN_vkMergePipelineCaches vkMergePipelineCaches;\nextern PFN_vkQueueBindSparse vkQueueBindSparse;\nextern PFN_vkQueueSubmit vkQueueSubmit;\nextern PFN_vkQueueWaitIdle vkQueueWaitIdle;\nextern PFN_vkResetCommandBuffer vkResetCommandBuffer;\nextern PFN_vkResetCommandPool vkResetCommandPool;\nextern PFN_vkResetDescriptorPool vkResetDescriptorPool;\nextern PFN_vkResetEvent vkResetEvent;\nextern PFN_vkResetFences vkResetFences;\nextern PFN_vkSetEvent vkSetEvent;\nextern PFN_vkUnmapMemory vkUnmapMemory;\nextern PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets;\nextern PFN_vkWaitForFences vkWaitForFences;\n#endif /* defined(VK_VERSION_1_0) */\n#if defined(VK_VERSION_1_1)\nextern PFN_vkBindBufferMemory2 vkBindBufferMemory2;\nextern PFN_vkBindImageMemory2 vkBindImageMemory2;\nextern PFN_vkCmdDispatchBase vkCmdDispatchBase;\nextern PFN_vkCmdSetDeviceMask vkCmdSetDeviceMask;\nextern PFN_vkCreateDescriptorUpdateTemplate vkCreateDescriptorUpdateTemplate;\nextern PFN_vkCreateSamplerYcbcrConversion vkCreateSamplerYcbcrConversion;\nextern PFN_vkDestroyDescriptorUpdateTemplate vkDestroyDescriptorUpdateTemplate;\nextern PFN_vkDestroySamplerYcbcrConversion vkDestroySamplerYcbcrConversion;\nextern PFN_vkEnumerateInstanceVersion vkEnumerateInstanceVersion;\nextern PFN_vkEnumeratePhysicalDeviceGroups vkEnumeratePhysicalDeviceGroups;\nextern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;\nextern PFN_vkGetDescriptorSetLayoutSupport vkGetDescriptorSetLayoutSupport;\nextern PFN_vkGetDeviceGroupPeerMemoryFeatures vkGetDeviceGroupPeerMemoryFeatures;\nextern PFN_vkGetDeviceQueue2 vkGetDeviceQueue2;\nextern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;\nextern PFN_vkGetImageSparseMemoryRequirements2 vkGetImageSparseMemoryRequirements2;\nextern PFN_vkGetPhysicalDeviceExternalBufferProperties vkGetPhysicalDeviceExternalBufferProperties;\nextern PFN_vkGetPhysicalDeviceExternalFenceProperties vkGetPhysicalDeviceExternalFenceProperties;\nextern PFN_vkGetPhysicalDeviceExternalSemaphoreProperties vkGetPhysicalDeviceExternalSemaphoreProperties;\nextern PFN_vkGetPhysicalDeviceFeatures2 vkGetPhysicalDeviceFeatures2;\nextern PFN_vkGetPhysicalDeviceFormatProperties2 vkGetPhysicalDeviceFormatProperties2;\nextern PFN_vkGetPhysicalDeviceImageFormatProperties2 vkGetPhysicalDeviceImageFormatProperties2;\nextern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2;\nextern PFN_vkGetPhysicalDeviceProperties2 vkGetPhysicalDeviceProperties2;\nextern PFN_vkGetPhysicalDeviceQueueFamilyProperties2 vkGetPhysicalDeviceQueueFamilyProperties2;\nextern PFN_vkGetPhysicalDeviceSparseImageFormatProperties2 vkGetPhysicalDeviceSparseImageFormatProperties2;\nextern PFN_vkTrimCommandPool vkTrimCommandPool;\nextern PFN_vkUpdateDescriptorSetWithTemplate vkUpdateDescriptorSetWithTemplate;\n#endif /* defined(VK_VERSION_1_1) */\n#if defined(VK_VERSION_1_2)\nextern PFN_vkCmdBeginRenderPass2 vkCmdBeginRenderPass2;\nextern PFN_vkCmdDrawIndexedIndirectCount vkCmdDrawIndexedIndirectCount;\nextern PFN_vkCmdDrawIndirectCount vkCmdDrawIndirectCount;\nextern PFN_vkCmdEndRenderPass2 vkCmdEndRenderPass2;\nextern PFN_vkCmdNextSubpass2 vkCmdNextSubpass2;\nextern PFN_vkCreateRenderPass2 vkCreateRenderPass2;\nextern PFN_vkGetBufferDeviceAddress vkGetBufferDeviceAddress;\nextern PFN_vkGetBufferOpaqueCaptureAddress vkGetBufferOpaqueCaptureAddress;\nextern PFN_vkGetDeviceMemoryOpaqueCaptureAddress vkGetDeviceMemoryOpaqueCaptureAddress;\nextern PFN_vkGetSemaphoreCounterValue vkGetSemaphoreCounterValue;\nextern PFN_vkResetQueryPool vkResetQueryPool;\nextern PFN_vkSignalSemaphore vkSignalSemaphore;\nextern PFN_vkWaitSemaphores vkWaitSemaphores;\n#endif /* defined(VK_VERSION_1_2) */\n#if defined(VK_VERSION_1_3)\nextern PFN_vkCmdBeginRendering vkCmdBeginRendering;\nextern PFN_vkCmdBindVertexBuffers2 vkCmdBindVertexBuffers2;\nextern PFN_vkCmdBlitImage2 vkCmdBlitImage2;\nextern PFN_vkCmdCopyBuffer2 vkCmdCopyBuffer2;\nextern PFN_vkCmdCopyBufferToImage2 vkCmdCopyBufferToImage2;\nextern PFN_vkCmdCopyImage2 vkCmdCopyImage2;\nextern PFN_vkCmdCopyImageToBuffer2 vkCmdCopyImageToBuffer2;\nextern PFN_vkCmdEndRendering vkCmdEndRendering;\nextern PFN_vkCmdPipelineBarrier2 vkCmdPipelineBarrier2;\nextern PFN_vkCmdResetEvent2 vkCmdResetEvent2;\nextern PFN_vkCmdResolveImage2 vkCmdResolveImage2;\nextern PFN_vkCmdSetCullMode vkCmdSetCullMode;\nextern PFN_vkCmdSetDepthBiasEnable vkCmdSetDepthBiasEnable;\nextern PFN_vkCmdSetDepthBoundsTestEnable vkCmdSetDepthBoundsTestEnable;\nextern PFN_vkCmdSetDepthCompareOp vkCmdSetDepthCompareOp;\nextern PFN_vkCmdSetDepthTestEnable vkCmdSetDepthTestEnable;\nextern PFN_vkCmdSetDepthWriteEnable vkCmdSetDepthWriteEnable;\nextern PFN_vkCmdSetEvent2 vkCmdSetEvent2;\nextern PFN_vkCmdSetFrontFace vkCmdSetFrontFace;\nextern PFN_vkCmdSetPrimitiveRestartEnable vkCmdSetPrimitiveRestartEnable;\nextern PFN_vkCmdSetPrimitiveTopology vkCmdSetPrimitiveTopology;\nextern PFN_vkCmdSetRasterizerDiscardEnable vkCmdSetRasterizerDiscardEnable;\nextern PFN_vkCmdSetScissorWithCount vkCmdSetScissorWithCount;\nextern PFN_vkCmdSetStencilOp vkCmdSetStencilOp;\nextern PFN_vkCmdSetStencilTestEnable vkCmdSetStencilTestEnable;\nextern PFN_vkCmdSetViewportWithCount vkCmdSetViewportWithCount;\nextern PFN_vkCmdWaitEvents2 vkCmdWaitEvents2;\nextern PFN_vkCmdWriteTimestamp2 vkCmdWriteTimestamp2;\nextern PFN_vkCreatePrivateDataSlot vkCreatePrivateDataSlot;\nextern PFN_vkDestroyPrivateDataSlot vkDestroyPrivateDataSlot;\nextern PFN_vkGetDeviceBufferMemoryRequirements vkGetDeviceBufferMemoryRequirements;\nextern PFN_vkGetDeviceImageMemoryRequirements vkGetDeviceImageMemoryRequirements;\nextern PFN_vkGetDeviceImageSparseMemoryRequirements vkGetDeviceImageSparseMemoryRequirements;\nextern PFN_vkGetPhysicalDeviceToolProperties vkGetPhysicalDeviceToolProperties;\nextern PFN_vkGetPrivateData vkGetPrivateData;\nextern PFN_vkQueueSubmit2 vkQueueSubmit2;\nextern PFN_vkSetPrivateData vkSetPrivateData;\n#endif /* defined(VK_VERSION_1_3) */\n#if defined(VK_VERSION_1_4)\nextern PFN_vkCmdBindDescriptorSets2 vkCmdBindDescriptorSets2;\nextern PFN_vkCmdBindIndexBuffer2 vkCmdBindIndexBuffer2;\nextern PFN_vkCmdPushConstants2 vkCmdPushConstants2;\nextern PFN_vkCmdPushDescriptorSet vkCmdPushDescriptorSet;\nextern PFN_vkCmdPushDescriptorSet2 vkCmdPushDescriptorSet2;\nextern PFN_vkCmdPushDescriptorSetWithTemplate vkCmdPushDescriptorSetWithTemplate;\nextern PFN_vkCmdPushDescriptorSetWithTemplate2 vkCmdPushDescriptorSetWithTemplate2;\nextern PFN_vkCmdSetLineStipple vkCmdSetLineStipple;\nextern PFN_vkCmdSetRenderingAttachmentLocations vkCmdSetRenderingAttachmentLocations;\nextern PFN_vkCmdSetRenderingInputAttachmentIndices vkCmdSetRenderingInputAttachmentIndices;\nextern PFN_vkCopyImageToImage vkCopyImageToImage;\nextern PFN_vkCopyImageToMemory vkCopyImageToMemory;\nextern PFN_vkCopyMemoryToImage vkCopyMemoryToImage;\nextern PFN_vkGetDeviceImageSubresourceLayout vkGetDeviceImageSubresourceLayout;\nextern PFN_vkGetImageSubresourceLayout2 vkGetImageSubresourceLayout2;\nextern PFN_vkGetRenderingAreaGranularity vkGetRenderingAreaGranularity;\nextern PFN_vkMapMemory2 vkMapMemory2;\nextern PFN_vkTransitionImageLayout vkTransitionImageLayout;\nextern PFN_vkUnmapMemory2 vkUnmapMemory2;\n#endif /* defined(VK_VERSION_1_4) */\n#if defined(VK_AMDX_shader_enqueue)\nextern PFN_vkCmdDispatchGraphAMDX vkCmdDispatchGraphAMDX;\nextern PFN_vkCmdDispatchGraphIndirectAMDX vkCmdDispatchGraphIndirectAMDX;\nextern PFN_vkCmdDispatchGraphIndirectCountAMDX vkCmdDispatchGraphIndirectCountAMDX;\nextern PFN_vkCmdInitializeGraphScratchMemoryAMDX vkCmdInitializeGraphScratchMemoryAMDX;\nextern PFN_vkCreateExecutionGraphPipelinesAMDX vkCreateExecutionGraphPipelinesAMDX;\nextern PFN_vkGetExecutionGraphPipelineNodeIndexAMDX vkGetExecutionGraphPipelineNodeIndexAMDX;\nextern PFN_vkGetExecutionGraphPipelineScratchSizeAMDX vkGetExecutionGraphPipelineScratchSizeAMDX;\n#endif /* defined(VK_AMDX_shader_enqueue) */\n#if defined(VK_AMD_anti_lag)\nextern PFN_vkAntiLagUpdateAMD vkAntiLagUpdateAMD;\n#endif /* defined(VK_AMD_anti_lag) */\n#if defined(VK_AMD_buffer_marker)\nextern PFN_vkCmdWriteBufferMarkerAMD vkCmdWriteBufferMarkerAMD;\n#endif /* defined(VK_AMD_buffer_marker) */\n#if defined(VK_AMD_buffer_marker) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2))\nextern PFN_vkCmdWriteBufferMarker2AMD vkCmdWriteBufferMarker2AMD;\n#endif /* defined(VK_AMD_buffer_marker) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) */\n#if defined(VK_AMD_display_native_hdr)\nextern PFN_vkSetLocalDimmingAMD vkSetLocalDimmingAMD;\n#endif /* defined(VK_AMD_display_native_hdr) */\n#if defined(VK_AMD_draw_indirect_count)\nextern PFN_vkCmdDrawIndexedIndirectCountAMD vkCmdDrawIndexedIndirectCountAMD;\nextern PFN_vkCmdDrawIndirectCountAMD vkCmdDrawIndirectCountAMD;\n#endif /* defined(VK_AMD_draw_indirect_count) */\n#if defined(VK_AMD_shader_info)\nextern PFN_vkGetShaderInfoAMD vkGetShaderInfoAMD;\n#endif /* defined(VK_AMD_shader_info) */\n#if defined(VK_ANDROID_external_memory_android_hardware_buffer)\nextern PFN_vkGetAndroidHardwareBufferPropertiesANDROID vkGetAndroidHardwareBufferPropertiesANDROID;\nextern PFN_vkGetMemoryAndroidHardwareBufferANDROID vkGetMemoryAndroidHardwareBufferANDROID;\n#endif /* defined(VK_ANDROID_external_memory_android_hardware_buffer) */\n#if defined(VK_ARM_data_graph)\nextern PFN_vkBindDataGraphPipelineSessionMemoryARM vkBindDataGraphPipelineSessionMemoryARM;\nextern PFN_vkCmdDispatchDataGraphARM vkCmdDispatchDataGraphARM;\nextern PFN_vkCreateDataGraphPipelineSessionARM vkCreateDataGraphPipelineSessionARM;\nextern PFN_vkCreateDataGraphPipelinesARM vkCreateDataGraphPipelinesARM;\nextern PFN_vkDestroyDataGraphPipelineSessionARM vkDestroyDataGraphPipelineSessionARM;\nextern PFN_vkGetDataGraphPipelineAvailablePropertiesARM vkGetDataGraphPipelineAvailablePropertiesARM;\nextern PFN_vkGetDataGraphPipelinePropertiesARM vkGetDataGraphPipelinePropertiesARM;\nextern PFN_vkGetDataGraphPipelineSessionBindPointRequirementsARM vkGetDataGraphPipelineSessionBindPointRequirementsARM;\nextern PFN_vkGetDataGraphPipelineSessionMemoryRequirementsARM vkGetDataGraphPipelineSessionMemoryRequirementsARM;\nextern PFN_vkGetPhysicalDeviceQueueFamilyDataGraphProcessingEnginePropertiesARM vkGetPhysicalDeviceQueueFamilyDataGraphProcessingEnginePropertiesARM;\nextern PFN_vkGetPhysicalDeviceQueueFamilyDataGraphPropertiesARM vkGetPhysicalDeviceQueueFamilyDataGraphPropertiesARM;\n#endif /* defined(VK_ARM_data_graph) */\n#if defined(VK_ARM_tensors)\nextern PFN_vkBindTensorMemoryARM vkBindTensorMemoryARM;\nextern PFN_vkCmdCopyTensorARM vkCmdCopyTensorARM;\nextern PFN_vkCreateTensorARM vkCreateTensorARM;\nextern PFN_vkCreateTensorViewARM vkCreateTensorViewARM;\nextern PFN_vkDestroyTensorARM vkDestroyTensorARM;\nextern PFN_vkDestroyTensorViewARM vkDestroyTensorViewARM;\nextern PFN_vkGetDeviceTensorMemoryRequirementsARM vkGetDeviceTensorMemoryRequirementsARM;\nextern PFN_vkGetPhysicalDeviceExternalTensorPropertiesARM vkGetPhysicalDeviceExternalTensorPropertiesARM;\nextern PFN_vkGetTensorMemoryRequirementsARM vkGetTensorMemoryRequirementsARM;\n#endif /* defined(VK_ARM_tensors) */\n#if defined(VK_ARM_tensors) && defined(VK_EXT_descriptor_buffer)\nextern PFN_vkGetTensorOpaqueCaptureDescriptorDataARM vkGetTensorOpaqueCaptureDescriptorDataARM;\nextern PFN_vkGetTensorViewOpaqueCaptureDescriptorDataARM vkGetTensorViewOpaqueCaptureDescriptorDataARM;\n#endif /* defined(VK_ARM_tensors) && defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_EXT_acquire_drm_display)\nextern PFN_vkAcquireDrmDisplayEXT vkAcquireDrmDisplayEXT;\nextern PFN_vkGetDrmDisplayEXT vkGetDrmDisplayEXT;\n#endif /* defined(VK_EXT_acquire_drm_display) */\n#if defined(VK_EXT_acquire_xlib_display)\nextern PFN_vkAcquireXlibDisplayEXT vkAcquireXlibDisplayEXT;\nextern PFN_vkGetRandROutputDisplayEXT vkGetRandROutputDisplayEXT;\n#endif /* defined(VK_EXT_acquire_xlib_display) */\n#if defined(VK_EXT_attachment_feedback_loop_dynamic_state)\nextern PFN_vkCmdSetAttachmentFeedbackLoopEnableEXT vkCmdSetAttachmentFeedbackLoopEnableEXT;\n#endif /* defined(VK_EXT_attachment_feedback_loop_dynamic_state) */\n#if defined(VK_EXT_buffer_device_address)\nextern PFN_vkGetBufferDeviceAddressEXT vkGetBufferDeviceAddressEXT;\n#endif /* defined(VK_EXT_buffer_device_address) */\n#if defined(VK_EXT_calibrated_timestamps)\nextern PFN_vkGetCalibratedTimestampsEXT vkGetCalibratedTimestampsEXT;\nextern PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT vkGetPhysicalDeviceCalibrateableTimeDomainsEXT;\n#endif /* defined(VK_EXT_calibrated_timestamps) */\n#if defined(VK_EXT_color_write_enable)\nextern PFN_vkCmdSetColorWriteEnableEXT vkCmdSetColorWriteEnableEXT;\n#endif /* defined(VK_EXT_color_write_enable) */\n#if defined(VK_EXT_conditional_rendering)\nextern PFN_vkCmdBeginConditionalRenderingEXT vkCmdBeginConditionalRenderingEXT;\nextern PFN_vkCmdEndConditionalRenderingEXT vkCmdEndConditionalRenderingEXT;\n#endif /* defined(VK_EXT_conditional_rendering) */\n#if defined(VK_EXT_debug_marker)\nextern PFN_vkCmdDebugMarkerBeginEXT vkCmdDebugMarkerBeginEXT;\nextern PFN_vkCmdDebugMarkerEndEXT vkCmdDebugMarkerEndEXT;\nextern PFN_vkCmdDebugMarkerInsertEXT vkCmdDebugMarkerInsertEXT;\nextern PFN_vkDebugMarkerSetObjectNameEXT vkDebugMarkerSetObjectNameEXT;\nextern PFN_vkDebugMarkerSetObjectTagEXT vkDebugMarkerSetObjectTagEXT;\n#endif /* defined(VK_EXT_debug_marker) */\n#if defined(VK_EXT_debug_report)\nextern PFN_vkCreateDebugReportCallbackEXT vkCreateDebugReportCallbackEXT;\nextern PFN_vkDebugReportMessageEXT vkDebugReportMessageEXT;\nextern PFN_vkDestroyDebugReportCallbackEXT vkDestroyDebugReportCallbackEXT;\n#endif /* defined(VK_EXT_debug_report) */\n#if defined(VK_EXT_debug_utils)\nextern PFN_vkCmdBeginDebugUtilsLabelEXT vkCmdBeginDebugUtilsLabelEXT;\nextern PFN_vkCmdEndDebugUtilsLabelEXT vkCmdEndDebugUtilsLabelEXT;\nextern PFN_vkCmdInsertDebugUtilsLabelEXT vkCmdInsertDebugUtilsLabelEXT;\nextern PFN_vkCreateDebugUtilsMessengerEXT vkCreateDebugUtilsMessengerEXT;\nextern PFN_vkDestroyDebugUtilsMessengerEXT vkDestroyDebugUtilsMessengerEXT;\nextern PFN_vkQueueBeginDebugUtilsLabelEXT vkQueueBeginDebugUtilsLabelEXT;\nextern PFN_vkQueueEndDebugUtilsLabelEXT vkQueueEndDebugUtilsLabelEXT;\nextern PFN_vkQueueInsertDebugUtilsLabelEXT vkQueueInsertDebugUtilsLabelEXT;\nextern PFN_vkSetDebugUtilsObjectNameEXT vkSetDebugUtilsObjectNameEXT;\nextern PFN_vkSetDebugUtilsObjectTagEXT vkSetDebugUtilsObjectTagEXT;\nextern PFN_vkSubmitDebugUtilsMessageEXT vkSubmitDebugUtilsMessageEXT;\n#endif /* defined(VK_EXT_debug_utils) */\n#if defined(VK_EXT_depth_bias_control)\nextern PFN_vkCmdSetDepthBias2EXT vkCmdSetDepthBias2EXT;\n#endif /* defined(VK_EXT_depth_bias_control) */\n#if defined(VK_EXT_descriptor_buffer)\nextern PFN_vkCmdBindDescriptorBufferEmbeddedSamplersEXT vkCmdBindDescriptorBufferEmbeddedSamplersEXT;\nextern PFN_vkCmdBindDescriptorBuffersEXT vkCmdBindDescriptorBuffersEXT;\nextern PFN_vkCmdSetDescriptorBufferOffsetsEXT vkCmdSetDescriptorBufferOffsetsEXT;\nextern PFN_vkGetBufferOpaqueCaptureDescriptorDataEXT vkGetBufferOpaqueCaptureDescriptorDataEXT;\nextern PFN_vkGetDescriptorEXT vkGetDescriptorEXT;\nextern PFN_vkGetDescriptorSetLayoutBindingOffsetEXT vkGetDescriptorSetLayoutBindingOffsetEXT;\nextern PFN_vkGetDescriptorSetLayoutSizeEXT vkGetDescriptorSetLayoutSizeEXT;\nextern PFN_vkGetImageOpaqueCaptureDescriptorDataEXT vkGetImageOpaqueCaptureDescriptorDataEXT;\nextern PFN_vkGetImageViewOpaqueCaptureDescriptorDataEXT vkGetImageViewOpaqueCaptureDescriptorDataEXT;\nextern PFN_vkGetSamplerOpaqueCaptureDescriptorDataEXT vkGetSamplerOpaqueCaptureDescriptorDataEXT;\n#endif /* defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing))\nextern PFN_vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT;\n#endif /* defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) */\n#if defined(VK_EXT_device_fault)\nextern PFN_vkGetDeviceFaultInfoEXT vkGetDeviceFaultInfoEXT;\n#endif /* defined(VK_EXT_device_fault) */\n#if defined(VK_EXT_device_generated_commands)\nextern PFN_vkCmdExecuteGeneratedCommandsEXT vkCmdExecuteGeneratedCommandsEXT;\nextern PFN_vkCmdPreprocessGeneratedCommandsEXT vkCmdPreprocessGeneratedCommandsEXT;\nextern PFN_vkCreateIndirectCommandsLayoutEXT vkCreateIndirectCommandsLayoutEXT;\nextern PFN_vkCreateIndirectExecutionSetEXT vkCreateIndirectExecutionSetEXT;\nextern PFN_vkDestroyIndirectCommandsLayoutEXT vkDestroyIndirectCommandsLayoutEXT;\nextern PFN_vkDestroyIndirectExecutionSetEXT vkDestroyIndirectExecutionSetEXT;\nextern PFN_vkGetGeneratedCommandsMemoryRequirementsEXT vkGetGeneratedCommandsMemoryRequirementsEXT;\nextern PFN_vkUpdateIndirectExecutionSetPipelineEXT vkUpdateIndirectExecutionSetPipelineEXT;\nextern PFN_vkUpdateIndirectExecutionSetShaderEXT vkUpdateIndirectExecutionSetShaderEXT;\n#endif /* defined(VK_EXT_device_generated_commands) */\n#if defined(VK_EXT_direct_mode_display)\nextern PFN_vkReleaseDisplayEXT vkReleaseDisplayEXT;\n#endif /* defined(VK_EXT_direct_mode_display) */\n#if defined(VK_EXT_directfb_surface)\nextern PFN_vkCreateDirectFBSurfaceEXT vkCreateDirectFBSurfaceEXT;\nextern PFN_vkGetPhysicalDeviceDirectFBPresentationSupportEXT vkGetPhysicalDeviceDirectFBPresentationSupportEXT;\n#endif /* defined(VK_EXT_directfb_surface) */\n#if defined(VK_EXT_discard_rectangles)\nextern PFN_vkCmdSetDiscardRectangleEXT vkCmdSetDiscardRectangleEXT;\n#endif /* defined(VK_EXT_discard_rectangles) */\n#if defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2\nextern PFN_vkCmdSetDiscardRectangleEnableEXT vkCmdSetDiscardRectangleEnableEXT;\nextern PFN_vkCmdSetDiscardRectangleModeEXT vkCmdSetDiscardRectangleModeEXT;\n#endif /* defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 */\n#if defined(VK_EXT_display_control)\nextern PFN_vkDisplayPowerControlEXT vkDisplayPowerControlEXT;\nextern PFN_vkGetSwapchainCounterEXT vkGetSwapchainCounterEXT;\nextern PFN_vkRegisterDeviceEventEXT vkRegisterDeviceEventEXT;\nextern PFN_vkRegisterDisplayEventEXT vkRegisterDisplayEventEXT;\n#endif /* defined(VK_EXT_display_control) */\n#if defined(VK_EXT_display_surface_counter)\nextern PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT vkGetPhysicalDeviceSurfaceCapabilities2EXT;\n#endif /* defined(VK_EXT_display_surface_counter) */\n#if defined(VK_EXT_external_memory_host)\nextern PFN_vkGetMemoryHostPointerPropertiesEXT vkGetMemoryHostPointerPropertiesEXT;\n#endif /* defined(VK_EXT_external_memory_host) */\n#if defined(VK_EXT_external_memory_metal)\nextern PFN_vkGetMemoryMetalHandleEXT vkGetMemoryMetalHandleEXT;\nextern PFN_vkGetMemoryMetalHandlePropertiesEXT vkGetMemoryMetalHandlePropertiesEXT;\n#endif /* defined(VK_EXT_external_memory_metal) */\n#if defined(VK_EXT_fragment_density_map_offset)\nextern PFN_vkCmdEndRendering2EXT vkCmdEndRendering2EXT;\n#endif /* defined(VK_EXT_fragment_density_map_offset) */\n#if defined(VK_EXT_full_screen_exclusive)\nextern PFN_vkAcquireFullScreenExclusiveModeEXT vkAcquireFullScreenExclusiveModeEXT;\nextern PFN_vkGetPhysicalDeviceSurfacePresentModes2EXT vkGetPhysicalDeviceSurfacePresentModes2EXT;\nextern PFN_vkReleaseFullScreenExclusiveModeEXT vkReleaseFullScreenExclusiveModeEXT;\n#endif /* defined(VK_EXT_full_screen_exclusive) */\n#if defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1))\nextern PFN_vkGetDeviceGroupSurfacePresentModes2EXT vkGetDeviceGroupSurfacePresentModes2EXT;\n#endif /* defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1)) */\n#if defined(VK_EXT_hdr_metadata)\nextern PFN_vkSetHdrMetadataEXT vkSetHdrMetadataEXT;\n#endif /* defined(VK_EXT_hdr_metadata) */\n#if defined(VK_EXT_headless_surface)\nextern PFN_vkCreateHeadlessSurfaceEXT vkCreateHeadlessSurfaceEXT;\n#endif /* defined(VK_EXT_headless_surface) */\n#if defined(VK_EXT_host_image_copy)\nextern PFN_vkCopyImageToImageEXT vkCopyImageToImageEXT;\nextern PFN_vkCopyImageToMemoryEXT vkCopyImageToMemoryEXT;\nextern PFN_vkCopyMemoryToImageEXT vkCopyMemoryToImageEXT;\nextern PFN_vkTransitionImageLayoutEXT vkTransitionImageLayoutEXT;\n#endif /* defined(VK_EXT_host_image_copy) */\n#if defined(VK_EXT_host_query_reset)\nextern PFN_vkResetQueryPoolEXT vkResetQueryPoolEXT;\n#endif /* defined(VK_EXT_host_query_reset) */\n#if defined(VK_EXT_image_drm_format_modifier)\nextern PFN_vkGetImageDrmFormatModifierPropertiesEXT vkGetImageDrmFormatModifierPropertiesEXT;\n#endif /* defined(VK_EXT_image_drm_format_modifier) */\n#if defined(VK_EXT_line_rasterization)\nextern PFN_vkCmdSetLineStippleEXT vkCmdSetLineStippleEXT;\n#endif /* defined(VK_EXT_line_rasterization) */\n#if defined(VK_EXT_mesh_shader)\nextern PFN_vkCmdDrawMeshTasksEXT vkCmdDrawMeshTasksEXT;\nextern PFN_vkCmdDrawMeshTasksIndirectEXT vkCmdDrawMeshTasksIndirectEXT;\n#endif /* defined(VK_EXT_mesh_shader) */\n#if defined(VK_EXT_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2))\nextern PFN_vkCmdDrawMeshTasksIndirectCountEXT vkCmdDrawMeshTasksIndirectCountEXT;\n#endif /* defined(VK_EXT_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2)) */\n#if defined(VK_EXT_metal_objects)\nextern PFN_vkExportMetalObjectsEXT vkExportMetalObjectsEXT;\n#endif /* defined(VK_EXT_metal_objects) */\n#if defined(VK_EXT_metal_surface)\nextern PFN_vkCreateMetalSurfaceEXT vkCreateMetalSurfaceEXT;\n#endif /* defined(VK_EXT_metal_surface) */\n#if defined(VK_EXT_multi_draw)\nextern PFN_vkCmdDrawMultiEXT vkCmdDrawMultiEXT;\nextern PFN_vkCmdDrawMultiIndexedEXT vkCmdDrawMultiIndexedEXT;\n#endif /* defined(VK_EXT_multi_draw) */\n#if defined(VK_EXT_opacity_micromap)\nextern PFN_vkBuildMicromapsEXT vkBuildMicromapsEXT;\nextern PFN_vkCmdBuildMicromapsEXT vkCmdBuildMicromapsEXT;\nextern PFN_vkCmdCopyMemoryToMicromapEXT vkCmdCopyMemoryToMicromapEXT;\nextern PFN_vkCmdCopyMicromapEXT vkCmdCopyMicromapEXT;\nextern PFN_vkCmdCopyMicromapToMemoryEXT vkCmdCopyMicromapToMemoryEXT;\nextern PFN_vkCmdWriteMicromapsPropertiesEXT vkCmdWriteMicromapsPropertiesEXT;\nextern PFN_vkCopyMemoryToMicromapEXT vkCopyMemoryToMicromapEXT;\nextern PFN_vkCopyMicromapEXT vkCopyMicromapEXT;\nextern PFN_vkCopyMicromapToMemoryEXT vkCopyMicromapToMemoryEXT;\nextern PFN_vkCreateMicromapEXT vkCreateMicromapEXT;\nextern PFN_vkDestroyMicromapEXT vkDestroyMicromapEXT;\nextern PFN_vkGetDeviceMicromapCompatibilityEXT vkGetDeviceMicromapCompatibilityEXT;\nextern PFN_vkGetMicromapBuildSizesEXT vkGetMicromapBuildSizesEXT;\nextern PFN_vkWriteMicromapsPropertiesEXT vkWriteMicromapsPropertiesEXT;\n#endif /* defined(VK_EXT_opacity_micromap) */\n#if defined(VK_EXT_pageable_device_local_memory)\nextern PFN_vkSetDeviceMemoryPriorityEXT vkSetDeviceMemoryPriorityEXT;\n#endif /* defined(VK_EXT_pageable_device_local_memory) */\n#if defined(VK_EXT_pipeline_properties)\nextern PFN_vkGetPipelinePropertiesEXT vkGetPipelinePropertiesEXT;\n#endif /* defined(VK_EXT_pipeline_properties) */\n#if defined(VK_EXT_private_data)\nextern PFN_vkCreatePrivateDataSlotEXT vkCreatePrivateDataSlotEXT;\nextern PFN_vkDestroyPrivateDataSlotEXT vkDestroyPrivateDataSlotEXT;\nextern PFN_vkGetPrivateDataEXT vkGetPrivateDataEXT;\nextern PFN_vkSetPrivateDataEXT vkSetPrivateDataEXT;\n#endif /* defined(VK_EXT_private_data) */\n#if defined(VK_EXT_sample_locations)\nextern PFN_vkCmdSetSampleLocationsEXT vkCmdSetSampleLocationsEXT;\nextern PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT vkGetPhysicalDeviceMultisamplePropertiesEXT;\n#endif /* defined(VK_EXT_sample_locations) */\n#if defined(VK_EXT_shader_module_identifier)\nextern PFN_vkGetShaderModuleCreateInfoIdentifierEXT vkGetShaderModuleCreateInfoIdentifierEXT;\nextern PFN_vkGetShaderModuleIdentifierEXT vkGetShaderModuleIdentifierEXT;\n#endif /* defined(VK_EXT_shader_module_identifier) */\n#if defined(VK_EXT_shader_object)\nextern PFN_vkCmdBindShadersEXT vkCmdBindShadersEXT;\nextern PFN_vkCreateShadersEXT vkCreateShadersEXT;\nextern PFN_vkDestroyShaderEXT vkDestroyShaderEXT;\nextern PFN_vkGetShaderBinaryDataEXT vkGetShaderBinaryDataEXT;\n#endif /* defined(VK_EXT_shader_object) */\n#if defined(VK_EXT_swapchain_maintenance1)\nextern PFN_vkReleaseSwapchainImagesEXT vkReleaseSwapchainImagesEXT;\n#endif /* defined(VK_EXT_swapchain_maintenance1) */\n#if defined(VK_EXT_tooling_info)\nextern PFN_vkGetPhysicalDeviceToolPropertiesEXT vkGetPhysicalDeviceToolPropertiesEXT;\n#endif /* defined(VK_EXT_tooling_info) */\n#if defined(VK_EXT_transform_feedback)\nextern PFN_vkCmdBeginQueryIndexedEXT vkCmdBeginQueryIndexedEXT;\nextern PFN_vkCmdBeginTransformFeedbackEXT vkCmdBeginTransformFeedbackEXT;\nextern PFN_vkCmdBindTransformFeedbackBuffersEXT vkCmdBindTransformFeedbackBuffersEXT;\nextern PFN_vkCmdDrawIndirectByteCountEXT vkCmdDrawIndirectByteCountEXT;\nextern PFN_vkCmdEndQueryIndexedEXT vkCmdEndQueryIndexedEXT;\nextern PFN_vkCmdEndTransformFeedbackEXT vkCmdEndTransformFeedbackEXT;\n#endif /* defined(VK_EXT_transform_feedback) */\n#if defined(VK_EXT_validation_cache)\nextern PFN_vkCreateValidationCacheEXT vkCreateValidationCacheEXT;\nextern PFN_vkDestroyValidationCacheEXT vkDestroyValidationCacheEXT;\nextern PFN_vkGetValidationCacheDataEXT vkGetValidationCacheDataEXT;\nextern PFN_vkMergeValidationCachesEXT vkMergeValidationCachesEXT;\n#endif /* defined(VK_EXT_validation_cache) */\n#if defined(VK_FUCHSIA_buffer_collection)\nextern PFN_vkCreateBufferCollectionFUCHSIA vkCreateBufferCollectionFUCHSIA;\nextern PFN_vkDestroyBufferCollectionFUCHSIA vkDestroyBufferCollectionFUCHSIA;\nextern PFN_vkGetBufferCollectionPropertiesFUCHSIA vkGetBufferCollectionPropertiesFUCHSIA;\nextern PFN_vkSetBufferCollectionBufferConstraintsFUCHSIA vkSetBufferCollectionBufferConstraintsFUCHSIA;\nextern PFN_vkSetBufferCollectionImageConstraintsFUCHSIA vkSetBufferCollectionImageConstraintsFUCHSIA;\n#endif /* defined(VK_FUCHSIA_buffer_collection) */\n#if defined(VK_FUCHSIA_external_memory)\nextern PFN_vkGetMemoryZirconHandleFUCHSIA vkGetMemoryZirconHandleFUCHSIA;\nextern PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA vkGetMemoryZirconHandlePropertiesFUCHSIA;\n#endif /* defined(VK_FUCHSIA_external_memory) */\n#if defined(VK_FUCHSIA_external_semaphore)\nextern PFN_vkGetSemaphoreZirconHandleFUCHSIA vkGetSemaphoreZirconHandleFUCHSIA;\nextern PFN_vkImportSemaphoreZirconHandleFUCHSIA vkImportSemaphoreZirconHandleFUCHSIA;\n#endif /* defined(VK_FUCHSIA_external_semaphore) */\n#if defined(VK_FUCHSIA_imagepipe_surface)\nextern PFN_vkCreateImagePipeSurfaceFUCHSIA vkCreateImagePipeSurfaceFUCHSIA;\n#endif /* defined(VK_FUCHSIA_imagepipe_surface) */\n#if defined(VK_GGP_stream_descriptor_surface)\nextern PFN_vkCreateStreamDescriptorSurfaceGGP vkCreateStreamDescriptorSurfaceGGP;\n#endif /* defined(VK_GGP_stream_descriptor_surface) */\n#if defined(VK_GOOGLE_display_timing)\nextern PFN_vkGetPastPresentationTimingGOOGLE vkGetPastPresentationTimingGOOGLE;\nextern PFN_vkGetRefreshCycleDurationGOOGLE vkGetRefreshCycleDurationGOOGLE;\n#endif /* defined(VK_GOOGLE_display_timing) */\n#if defined(VK_HUAWEI_cluster_culling_shader)\nextern PFN_vkCmdDrawClusterHUAWEI vkCmdDrawClusterHUAWEI;\nextern PFN_vkCmdDrawClusterIndirectHUAWEI vkCmdDrawClusterIndirectHUAWEI;\n#endif /* defined(VK_HUAWEI_cluster_culling_shader) */\n#if defined(VK_HUAWEI_invocation_mask)\nextern PFN_vkCmdBindInvocationMaskHUAWEI vkCmdBindInvocationMaskHUAWEI;\n#endif /* defined(VK_HUAWEI_invocation_mask) */\n#if defined(VK_HUAWEI_subpass_shading) && VK_HUAWEI_SUBPASS_SHADING_SPEC_VERSION >= 2\nextern PFN_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI;\n#endif /* defined(VK_HUAWEI_subpass_shading) && VK_HUAWEI_SUBPASS_SHADING_SPEC_VERSION >= 2 */\n#if defined(VK_HUAWEI_subpass_shading)\nextern PFN_vkCmdSubpassShadingHUAWEI vkCmdSubpassShadingHUAWEI;\n#endif /* defined(VK_HUAWEI_subpass_shading) */\n#if defined(VK_INTEL_performance_query)\nextern PFN_vkAcquirePerformanceConfigurationINTEL vkAcquirePerformanceConfigurationINTEL;\nextern PFN_vkCmdSetPerformanceMarkerINTEL vkCmdSetPerformanceMarkerINTEL;\nextern PFN_vkCmdSetPerformanceOverrideINTEL vkCmdSetPerformanceOverrideINTEL;\nextern PFN_vkCmdSetPerformanceStreamMarkerINTEL vkCmdSetPerformanceStreamMarkerINTEL;\nextern PFN_vkGetPerformanceParameterINTEL vkGetPerformanceParameterINTEL;\nextern PFN_vkInitializePerformanceApiINTEL vkInitializePerformanceApiINTEL;\nextern PFN_vkQueueSetPerformanceConfigurationINTEL vkQueueSetPerformanceConfigurationINTEL;\nextern PFN_vkReleasePerformanceConfigurationINTEL vkReleasePerformanceConfigurationINTEL;\nextern PFN_vkUninitializePerformanceApiINTEL vkUninitializePerformanceApiINTEL;\n#endif /* defined(VK_INTEL_performance_query) */\n#if defined(VK_KHR_acceleration_structure)\nextern PFN_vkBuildAccelerationStructuresKHR vkBuildAccelerationStructuresKHR;\nextern PFN_vkCmdBuildAccelerationStructuresIndirectKHR vkCmdBuildAccelerationStructuresIndirectKHR;\nextern PFN_vkCmdBuildAccelerationStructuresKHR vkCmdBuildAccelerationStructuresKHR;\nextern PFN_vkCmdCopyAccelerationStructureKHR vkCmdCopyAccelerationStructureKHR;\nextern PFN_vkCmdCopyAccelerationStructureToMemoryKHR vkCmdCopyAccelerationStructureToMemoryKHR;\nextern PFN_vkCmdCopyMemoryToAccelerationStructureKHR vkCmdCopyMemoryToAccelerationStructureKHR;\nextern PFN_vkCmdWriteAccelerationStructuresPropertiesKHR vkCmdWriteAccelerationStructuresPropertiesKHR;\nextern PFN_vkCopyAccelerationStructureKHR vkCopyAccelerationStructureKHR;\nextern PFN_vkCopyAccelerationStructureToMemoryKHR vkCopyAccelerationStructureToMemoryKHR;\nextern PFN_vkCopyMemoryToAccelerationStructureKHR vkCopyMemoryToAccelerationStructureKHR;\nextern PFN_vkCreateAccelerationStructureKHR vkCreateAccelerationStructureKHR;\nextern PFN_vkDestroyAccelerationStructureKHR vkDestroyAccelerationStructureKHR;\nextern PFN_vkGetAccelerationStructureBuildSizesKHR vkGetAccelerationStructureBuildSizesKHR;\nextern PFN_vkGetAccelerationStructureDeviceAddressKHR vkGetAccelerationStructureDeviceAddressKHR;\nextern PFN_vkGetDeviceAccelerationStructureCompatibilityKHR vkGetDeviceAccelerationStructureCompatibilityKHR;\nextern PFN_vkWriteAccelerationStructuresPropertiesKHR vkWriteAccelerationStructuresPropertiesKHR;\n#endif /* defined(VK_KHR_acceleration_structure) */\n#if defined(VK_KHR_android_surface)\nextern PFN_vkCreateAndroidSurfaceKHR vkCreateAndroidSurfaceKHR;\n#endif /* defined(VK_KHR_android_surface) */\n#if defined(VK_KHR_bind_memory2)\nextern PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR;\nextern PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR;\n#endif /* defined(VK_KHR_bind_memory2) */\n#if defined(VK_KHR_buffer_device_address)\nextern PFN_vkGetBufferDeviceAddressKHR vkGetBufferDeviceAddressKHR;\nextern PFN_vkGetBufferOpaqueCaptureAddressKHR vkGetBufferOpaqueCaptureAddressKHR;\nextern PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR vkGetDeviceMemoryOpaqueCaptureAddressKHR;\n#endif /* defined(VK_KHR_buffer_device_address) */\n#if defined(VK_KHR_calibrated_timestamps)\nextern PFN_vkGetCalibratedTimestampsKHR vkGetCalibratedTimestampsKHR;\nextern PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsKHR vkGetPhysicalDeviceCalibrateableTimeDomainsKHR;\n#endif /* defined(VK_KHR_calibrated_timestamps) */\n#if defined(VK_KHR_cooperative_matrix)\nextern PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR;\n#endif /* defined(VK_KHR_cooperative_matrix) */\n#if defined(VK_KHR_copy_commands2)\nextern PFN_vkCmdBlitImage2KHR vkCmdBlitImage2KHR;\nextern PFN_vkCmdCopyBuffer2KHR vkCmdCopyBuffer2KHR;\nextern PFN_vkCmdCopyBufferToImage2KHR vkCmdCopyBufferToImage2KHR;\nextern PFN_vkCmdCopyImage2KHR vkCmdCopyImage2KHR;\nextern PFN_vkCmdCopyImageToBuffer2KHR vkCmdCopyImageToBuffer2KHR;\nextern PFN_vkCmdResolveImage2KHR vkCmdResolveImage2KHR;\n#endif /* defined(VK_KHR_copy_commands2) */\n#if defined(VK_KHR_create_renderpass2)\nextern PFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR;\nextern PFN_vkCmdEndRenderPass2KHR vkCmdEndRenderPass2KHR;\nextern PFN_vkCmdNextSubpass2KHR vkCmdNextSubpass2KHR;\nextern PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR;\n#endif /* defined(VK_KHR_create_renderpass2) */\n#if defined(VK_KHR_deferred_host_operations)\nextern PFN_vkCreateDeferredOperationKHR vkCreateDeferredOperationKHR;\nextern PFN_vkDeferredOperationJoinKHR vkDeferredOperationJoinKHR;\nextern PFN_vkDestroyDeferredOperationKHR vkDestroyDeferredOperationKHR;\nextern PFN_vkGetDeferredOperationMaxConcurrencyKHR vkGetDeferredOperationMaxConcurrencyKHR;\nextern PFN_vkGetDeferredOperationResultKHR vkGetDeferredOperationResultKHR;\n#endif /* defined(VK_KHR_deferred_host_operations) */\n#if defined(VK_KHR_descriptor_update_template)\nextern PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR;\nextern PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR;\nextern PFN_vkUpdateDescriptorSetWithTemplateKHR vkUpdateDescriptorSetWithTemplateKHR;\n#endif /* defined(VK_KHR_descriptor_update_template) */\n#if defined(VK_KHR_device_group)\nextern PFN_vkCmdDispatchBaseKHR vkCmdDispatchBaseKHR;\nextern PFN_vkCmdSetDeviceMaskKHR vkCmdSetDeviceMaskKHR;\nextern PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR vkGetDeviceGroupPeerMemoryFeaturesKHR;\n#endif /* defined(VK_KHR_device_group) */\n#if defined(VK_KHR_device_group_creation)\nextern PFN_vkEnumeratePhysicalDeviceGroupsKHR vkEnumeratePhysicalDeviceGroupsKHR;\n#endif /* defined(VK_KHR_device_group_creation) */\n#if defined(VK_KHR_display)\nextern PFN_vkCreateDisplayModeKHR vkCreateDisplayModeKHR;\nextern PFN_vkCreateDisplayPlaneSurfaceKHR vkCreateDisplayPlaneSurfaceKHR;\nextern PFN_vkGetDisplayModePropertiesKHR vkGetDisplayModePropertiesKHR;\nextern PFN_vkGetDisplayPlaneCapabilitiesKHR vkGetDisplayPlaneCapabilitiesKHR;\nextern PFN_vkGetDisplayPlaneSupportedDisplaysKHR vkGetDisplayPlaneSupportedDisplaysKHR;\nextern PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR vkGetPhysicalDeviceDisplayPlanePropertiesKHR;\nextern PFN_vkGetPhysicalDeviceDisplayPropertiesKHR vkGetPhysicalDeviceDisplayPropertiesKHR;\n#endif /* defined(VK_KHR_display) */\n#if defined(VK_KHR_display_swapchain)\nextern PFN_vkCreateSharedSwapchainsKHR vkCreateSharedSwapchainsKHR;\n#endif /* defined(VK_KHR_display_swapchain) */\n#if defined(VK_KHR_draw_indirect_count)\nextern PFN_vkCmdDrawIndexedIndirectCountKHR vkCmdDrawIndexedIndirectCountKHR;\nextern PFN_vkCmdDrawIndirectCountKHR vkCmdDrawIndirectCountKHR;\n#endif /* defined(VK_KHR_draw_indirect_count) */\n#if defined(VK_KHR_dynamic_rendering)\nextern PFN_vkCmdBeginRenderingKHR vkCmdBeginRenderingKHR;\nextern PFN_vkCmdEndRenderingKHR vkCmdEndRenderingKHR;\n#endif /* defined(VK_KHR_dynamic_rendering) */\n#if defined(VK_KHR_dynamic_rendering_local_read)\nextern PFN_vkCmdSetRenderingAttachmentLocationsKHR vkCmdSetRenderingAttachmentLocationsKHR;\nextern PFN_vkCmdSetRenderingInputAttachmentIndicesKHR vkCmdSetRenderingInputAttachmentIndicesKHR;\n#endif /* defined(VK_KHR_dynamic_rendering_local_read) */\n#if defined(VK_KHR_external_fence_capabilities)\nextern PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR vkGetPhysicalDeviceExternalFencePropertiesKHR;\n#endif /* defined(VK_KHR_external_fence_capabilities) */\n#if defined(VK_KHR_external_fence_fd)\nextern PFN_vkGetFenceFdKHR vkGetFenceFdKHR;\nextern PFN_vkImportFenceFdKHR vkImportFenceFdKHR;\n#endif /* defined(VK_KHR_external_fence_fd) */\n#if defined(VK_KHR_external_fence_win32)\nextern PFN_vkGetFenceWin32HandleKHR vkGetFenceWin32HandleKHR;\nextern PFN_vkImportFenceWin32HandleKHR vkImportFenceWin32HandleKHR;\n#endif /* defined(VK_KHR_external_fence_win32) */\n#if defined(VK_KHR_external_memory_capabilities)\nextern PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR vkGetPhysicalDeviceExternalBufferPropertiesKHR;\n#endif /* defined(VK_KHR_external_memory_capabilities) */\n#if defined(VK_KHR_external_memory_fd)\nextern PFN_vkGetMemoryFdKHR vkGetMemoryFdKHR;\nextern PFN_vkGetMemoryFdPropertiesKHR vkGetMemoryFdPropertiesKHR;\n#endif /* defined(VK_KHR_external_memory_fd) */\n#if defined(VK_KHR_external_memory_win32)\nextern PFN_vkGetMemoryWin32HandleKHR vkGetMemoryWin32HandleKHR;\nextern PFN_vkGetMemoryWin32HandlePropertiesKHR vkGetMemoryWin32HandlePropertiesKHR;\n#endif /* defined(VK_KHR_external_memory_win32) */\n#if defined(VK_KHR_external_semaphore_capabilities)\nextern PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR vkGetPhysicalDeviceExternalSemaphorePropertiesKHR;\n#endif /* defined(VK_KHR_external_semaphore_capabilities) */\n#if defined(VK_KHR_external_semaphore_fd)\nextern PFN_vkGetSemaphoreFdKHR vkGetSemaphoreFdKHR;\nextern PFN_vkImportSemaphoreFdKHR vkImportSemaphoreFdKHR;\n#endif /* defined(VK_KHR_external_semaphore_fd) */\n#if defined(VK_KHR_external_semaphore_win32)\nextern PFN_vkGetSemaphoreWin32HandleKHR vkGetSemaphoreWin32HandleKHR;\nextern PFN_vkImportSemaphoreWin32HandleKHR vkImportSemaphoreWin32HandleKHR;\n#endif /* defined(VK_KHR_external_semaphore_win32) */\n#if defined(VK_KHR_fragment_shading_rate)\nextern PFN_vkCmdSetFragmentShadingRateKHR vkCmdSetFragmentShadingRateKHR;\nextern PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR vkGetPhysicalDeviceFragmentShadingRatesKHR;\n#endif /* defined(VK_KHR_fragment_shading_rate) */\n#if defined(VK_KHR_get_display_properties2)\nextern PFN_vkGetDisplayModeProperties2KHR vkGetDisplayModeProperties2KHR;\nextern PFN_vkGetDisplayPlaneCapabilities2KHR vkGetDisplayPlaneCapabilities2KHR;\nextern PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR vkGetPhysicalDeviceDisplayPlaneProperties2KHR;\nextern PFN_vkGetPhysicalDeviceDisplayProperties2KHR vkGetPhysicalDeviceDisplayProperties2KHR;\n#endif /* defined(VK_KHR_get_display_properties2) */\n#if defined(VK_KHR_get_memory_requirements2)\nextern PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;\nextern PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;\nextern PFN_vkGetImageSparseMemoryRequirements2KHR vkGetImageSparseMemoryRequirements2KHR;\n#endif /* defined(VK_KHR_get_memory_requirements2) */\n#if defined(VK_KHR_get_physical_device_properties2)\nextern PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR;\nextern PFN_vkGetPhysicalDeviceFormatProperties2KHR vkGetPhysicalDeviceFormatProperties2KHR;\nextern PFN_vkGetPhysicalDeviceImageFormatProperties2KHR vkGetPhysicalDeviceImageFormatProperties2KHR;\nextern PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR;\nextern PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR;\nextern PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR vkGetPhysicalDeviceQueueFamilyProperties2KHR;\nextern PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR vkGetPhysicalDeviceSparseImageFormatProperties2KHR;\n#endif /* defined(VK_KHR_get_physical_device_properties2) */\n#if defined(VK_KHR_get_surface_capabilities2)\nextern PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR vkGetPhysicalDeviceSurfaceCapabilities2KHR;\nextern PFN_vkGetPhysicalDeviceSurfaceFormats2KHR vkGetPhysicalDeviceSurfaceFormats2KHR;\n#endif /* defined(VK_KHR_get_surface_capabilities2) */\n#if defined(VK_KHR_line_rasterization)\nextern PFN_vkCmdSetLineStippleKHR vkCmdSetLineStippleKHR;\n#endif /* defined(VK_KHR_line_rasterization) */\n#if defined(VK_KHR_maintenance1)\nextern PFN_vkTrimCommandPoolKHR vkTrimCommandPoolKHR;\n#endif /* defined(VK_KHR_maintenance1) */\n#if defined(VK_KHR_maintenance3)\nextern PFN_vkGetDescriptorSetLayoutSupportKHR vkGetDescriptorSetLayoutSupportKHR;\n#endif /* defined(VK_KHR_maintenance3) */\n#if defined(VK_KHR_maintenance4)\nextern PFN_vkGetDeviceBufferMemoryRequirementsKHR vkGetDeviceBufferMemoryRequirementsKHR;\nextern PFN_vkGetDeviceImageMemoryRequirementsKHR vkGetDeviceImageMemoryRequirementsKHR;\nextern PFN_vkGetDeviceImageSparseMemoryRequirementsKHR vkGetDeviceImageSparseMemoryRequirementsKHR;\n#endif /* defined(VK_KHR_maintenance4) */\n#if defined(VK_KHR_maintenance5)\nextern PFN_vkCmdBindIndexBuffer2KHR vkCmdBindIndexBuffer2KHR;\nextern PFN_vkGetDeviceImageSubresourceLayoutKHR vkGetDeviceImageSubresourceLayoutKHR;\nextern PFN_vkGetImageSubresourceLayout2KHR vkGetImageSubresourceLayout2KHR;\nextern PFN_vkGetRenderingAreaGranularityKHR vkGetRenderingAreaGranularityKHR;\n#endif /* defined(VK_KHR_maintenance5) */\n#if defined(VK_KHR_maintenance6)\nextern PFN_vkCmdBindDescriptorSets2KHR vkCmdBindDescriptorSets2KHR;\nextern PFN_vkCmdPushConstants2KHR vkCmdPushConstants2KHR;\n#endif /* defined(VK_KHR_maintenance6) */\n#if defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor)\nextern PFN_vkCmdPushDescriptorSet2KHR vkCmdPushDescriptorSet2KHR;\nextern PFN_vkCmdPushDescriptorSetWithTemplate2KHR vkCmdPushDescriptorSetWithTemplate2KHR;\n#endif /* defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor) */\n#if defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer)\nextern PFN_vkCmdBindDescriptorBufferEmbeddedSamplers2EXT vkCmdBindDescriptorBufferEmbeddedSamplers2EXT;\nextern PFN_vkCmdSetDescriptorBufferOffsets2EXT vkCmdSetDescriptorBufferOffsets2EXT;\n#endif /* defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_KHR_map_memory2)\nextern PFN_vkMapMemory2KHR vkMapMemory2KHR;\nextern PFN_vkUnmapMemory2KHR vkUnmapMemory2KHR;\n#endif /* defined(VK_KHR_map_memory2) */\n#if defined(VK_KHR_performance_query)\nextern PFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR;\nextern PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR;\nextern PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR;\nextern PFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR;\n#endif /* defined(VK_KHR_performance_query) */\n#if defined(VK_KHR_pipeline_binary)\nextern PFN_vkCreatePipelineBinariesKHR vkCreatePipelineBinariesKHR;\nextern PFN_vkDestroyPipelineBinaryKHR vkDestroyPipelineBinaryKHR;\nextern PFN_vkGetPipelineBinaryDataKHR vkGetPipelineBinaryDataKHR;\nextern PFN_vkGetPipelineKeyKHR vkGetPipelineKeyKHR;\nextern PFN_vkReleaseCapturedPipelineDataKHR vkReleaseCapturedPipelineDataKHR;\n#endif /* defined(VK_KHR_pipeline_binary) */\n#if defined(VK_KHR_pipeline_executable_properties)\nextern PFN_vkGetPipelineExecutableInternalRepresentationsKHR vkGetPipelineExecutableInternalRepresentationsKHR;\nextern PFN_vkGetPipelineExecutablePropertiesKHR vkGetPipelineExecutablePropertiesKHR;\nextern PFN_vkGetPipelineExecutableStatisticsKHR vkGetPipelineExecutableStatisticsKHR;\n#endif /* defined(VK_KHR_pipeline_executable_properties) */\n#if defined(VK_KHR_present_wait)\nextern PFN_vkWaitForPresentKHR vkWaitForPresentKHR;\n#endif /* defined(VK_KHR_present_wait) */\n#if defined(VK_KHR_present_wait2)\nextern PFN_vkWaitForPresent2KHR vkWaitForPresent2KHR;\n#endif /* defined(VK_KHR_present_wait2) */\n#if defined(VK_KHR_push_descriptor)\nextern PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR;\n#endif /* defined(VK_KHR_push_descriptor) */\n#if defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline)\nextern PFN_vkCmdTraceRaysIndirect2KHR vkCmdTraceRaysIndirect2KHR;\n#endif /* defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) */\n#if defined(VK_KHR_ray_tracing_pipeline)\nextern PFN_vkCmdSetRayTracingPipelineStackSizeKHR vkCmdSetRayTracingPipelineStackSizeKHR;\nextern PFN_vkCmdTraceRaysIndirectKHR vkCmdTraceRaysIndirectKHR;\nextern PFN_vkCmdTraceRaysKHR vkCmdTraceRaysKHR;\nextern PFN_vkCreateRayTracingPipelinesKHR vkCreateRayTracingPipelinesKHR;\nextern PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR vkGetRayTracingCaptureReplayShaderGroupHandlesKHR;\nextern PFN_vkGetRayTracingShaderGroupHandlesKHR vkGetRayTracingShaderGroupHandlesKHR;\nextern PFN_vkGetRayTracingShaderGroupStackSizeKHR vkGetRayTracingShaderGroupStackSizeKHR;\n#endif /* defined(VK_KHR_ray_tracing_pipeline) */\n#if defined(VK_KHR_sampler_ycbcr_conversion)\nextern PFN_vkCreateSamplerYcbcrConversionKHR vkCreateSamplerYcbcrConversionKHR;\nextern PFN_vkDestroySamplerYcbcrConversionKHR vkDestroySamplerYcbcrConversionKHR;\n#endif /* defined(VK_KHR_sampler_ycbcr_conversion) */\n#if defined(VK_KHR_shared_presentable_image)\nextern PFN_vkGetSwapchainStatusKHR vkGetSwapchainStatusKHR;\n#endif /* defined(VK_KHR_shared_presentable_image) */\n#if defined(VK_KHR_surface)\nextern PFN_vkDestroySurfaceKHR vkDestroySurfaceKHR;\nextern PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR vkGetPhysicalDeviceSurfaceCapabilitiesKHR;\nextern PFN_vkGetPhysicalDeviceSurfaceFormatsKHR vkGetPhysicalDeviceSurfaceFormatsKHR;\nextern PFN_vkGetPhysicalDeviceSurfacePresentModesKHR vkGetPhysicalDeviceSurfacePresentModesKHR;\nextern PFN_vkGetPhysicalDeviceSurfaceSupportKHR vkGetPhysicalDeviceSurfaceSupportKHR;\n#endif /* defined(VK_KHR_surface) */\n#if defined(VK_KHR_swapchain)\nextern PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR;\nextern PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR;\nextern PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR;\nextern PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR;\nextern PFN_vkQueuePresentKHR vkQueuePresentKHR;\n#endif /* defined(VK_KHR_swapchain) */\n#if defined(VK_KHR_swapchain_maintenance1)\nextern PFN_vkReleaseSwapchainImagesKHR vkReleaseSwapchainImagesKHR;\n#endif /* defined(VK_KHR_swapchain_maintenance1) */\n#if defined(VK_KHR_synchronization2)\nextern PFN_vkCmdPipelineBarrier2KHR vkCmdPipelineBarrier2KHR;\nextern PFN_vkCmdResetEvent2KHR vkCmdResetEvent2KHR;\nextern PFN_vkCmdSetEvent2KHR vkCmdSetEvent2KHR;\nextern PFN_vkCmdWaitEvents2KHR vkCmdWaitEvents2KHR;\nextern PFN_vkCmdWriteTimestamp2KHR vkCmdWriteTimestamp2KHR;\nextern PFN_vkQueueSubmit2KHR vkQueueSubmit2KHR;\n#endif /* defined(VK_KHR_synchronization2) */\n#if defined(VK_KHR_timeline_semaphore)\nextern PFN_vkGetSemaphoreCounterValueKHR vkGetSemaphoreCounterValueKHR;\nextern PFN_vkSignalSemaphoreKHR vkSignalSemaphoreKHR;\nextern PFN_vkWaitSemaphoresKHR vkWaitSemaphoresKHR;\n#endif /* defined(VK_KHR_timeline_semaphore) */\n#if defined(VK_KHR_video_decode_queue)\nextern PFN_vkCmdDecodeVideoKHR vkCmdDecodeVideoKHR;\n#endif /* defined(VK_KHR_video_decode_queue) */\n#if defined(VK_KHR_video_encode_queue)\nextern PFN_vkCmdEncodeVideoKHR vkCmdEncodeVideoKHR;\nextern PFN_vkGetEncodedVideoSessionParametersKHR vkGetEncodedVideoSessionParametersKHR;\nextern PFN_vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR;\n#endif /* defined(VK_KHR_video_encode_queue) */\n#if defined(VK_KHR_video_queue)\nextern PFN_vkBindVideoSessionMemoryKHR vkBindVideoSessionMemoryKHR;\nextern PFN_vkCmdBeginVideoCodingKHR vkCmdBeginVideoCodingKHR;\nextern PFN_vkCmdControlVideoCodingKHR vkCmdControlVideoCodingKHR;\nextern PFN_vkCmdEndVideoCodingKHR vkCmdEndVideoCodingKHR;\nextern PFN_vkCreateVideoSessionKHR vkCreateVideoSessionKHR;\nextern PFN_vkCreateVideoSessionParametersKHR vkCreateVideoSessionParametersKHR;\nextern PFN_vkDestroyVideoSessionKHR vkDestroyVideoSessionKHR;\nextern PFN_vkDestroyVideoSessionParametersKHR vkDestroyVideoSessionParametersKHR;\nextern PFN_vkGetPhysicalDeviceVideoCapabilitiesKHR vkGetPhysicalDeviceVideoCapabilitiesKHR;\nextern PFN_vkGetPhysicalDeviceVideoFormatPropertiesKHR vkGetPhysicalDeviceVideoFormatPropertiesKHR;\nextern PFN_vkGetVideoSessionMemoryRequirementsKHR vkGetVideoSessionMemoryRequirementsKHR;\nextern PFN_vkUpdateVideoSessionParametersKHR vkUpdateVideoSessionParametersKHR;\n#endif /* defined(VK_KHR_video_queue) */\n#if defined(VK_KHR_wayland_surface)\nextern PFN_vkCreateWaylandSurfaceKHR vkCreateWaylandSurfaceKHR;\nextern PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR vkGetPhysicalDeviceWaylandPresentationSupportKHR;\n#endif /* defined(VK_KHR_wayland_surface) */\n#if defined(VK_KHR_win32_surface)\nextern PFN_vkCreateWin32SurfaceKHR vkCreateWin32SurfaceKHR;\nextern PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR vkGetPhysicalDeviceWin32PresentationSupportKHR;\n#endif /* defined(VK_KHR_win32_surface) */\n#if defined(VK_KHR_xcb_surface)\nextern PFN_vkCreateXcbSurfaceKHR vkCreateXcbSurfaceKHR;\nextern PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR vkGetPhysicalDeviceXcbPresentationSupportKHR;\n#endif /* defined(VK_KHR_xcb_surface) */\n#if defined(VK_KHR_xlib_surface)\nextern PFN_vkCreateXlibSurfaceKHR vkCreateXlibSurfaceKHR;\nextern PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR vkGetPhysicalDeviceXlibPresentationSupportKHR;\n#endif /* defined(VK_KHR_xlib_surface) */\n#if defined(VK_MVK_ios_surface)\nextern PFN_vkCreateIOSSurfaceMVK vkCreateIOSSurfaceMVK;\n#endif /* defined(VK_MVK_ios_surface) */\n#if defined(VK_MVK_macos_surface)\nextern PFN_vkCreateMacOSSurfaceMVK vkCreateMacOSSurfaceMVK;\n#endif /* defined(VK_MVK_macos_surface) */\n#if defined(VK_NN_vi_surface)\nextern PFN_vkCreateViSurfaceNN vkCreateViSurfaceNN;\n#endif /* defined(VK_NN_vi_surface) */\n#if defined(VK_NVX_binary_import)\nextern PFN_vkCmdCuLaunchKernelNVX vkCmdCuLaunchKernelNVX;\nextern PFN_vkCreateCuFunctionNVX vkCreateCuFunctionNVX;\nextern PFN_vkCreateCuModuleNVX vkCreateCuModuleNVX;\nextern PFN_vkDestroyCuFunctionNVX vkDestroyCuFunctionNVX;\nextern PFN_vkDestroyCuModuleNVX vkDestroyCuModuleNVX;\n#endif /* defined(VK_NVX_binary_import) */\n#if defined(VK_NVX_image_view_handle)\nextern PFN_vkGetImageViewHandleNVX vkGetImageViewHandleNVX;\n#endif /* defined(VK_NVX_image_view_handle) */\n#if defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 3\nextern PFN_vkGetImageViewHandle64NVX vkGetImageViewHandle64NVX;\n#endif /* defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 3 */\n#if defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 2\nextern PFN_vkGetImageViewAddressNVX vkGetImageViewAddressNVX;\n#endif /* defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 2 */\n#if defined(VK_NV_acquire_winrt_display)\nextern PFN_vkAcquireWinrtDisplayNV vkAcquireWinrtDisplayNV;\nextern PFN_vkGetWinrtDisplayNV vkGetWinrtDisplayNV;\n#endif /* defined(VK_NV_acquire_winrt_display) */\n#if defined(VK_NV_clip_space_w_scaling)\nextern PFN_vkCmdSetViewportWScalingNV vkCmdSetViewportWScalingNV;\n#endif /* defined(VK_NV_clip_space_w_scaling) */\n#if defined(VK_NV_cluster_acceleration_structure)\nextern PFN_vkCmdBuildClusterAccelerationStructureIndirectNV vkCmdBuildClusterAccelerationStructureIndirectNV;\nextern PFN_vkGetClusterAccelerationStructureBuildSizesNV vkGetClusterAccelerationStructureBuildSizesNV;\n#endif /* defined(VK_NV_cluster_acceleration_structure) */\n#if defined(VK_NV_cooperative_matrix)\nextern PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV vkGetPhysicalDeviceCooperativeMatrixPropertiesNV;\n#endif /* defined(VK_NV_cooperative_matrix) */\n#if defined(VK_NV_cooperative_matrix2)\nextern PFN_vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV;\n#endif /* defined(VK_NV_cooperative_matrix2) */\n#if defined(VK_NV_cooperative_vector)\nextern PFN_vkCmdConvertCooperativeVectorMatrixNV vkCmdConvertCooperativeVectorMatrixNV;\nextern PFN_vkConvertCooperativeVectorMatrixNV vkConvertCooperativeVectorMatrixNV;\nextern PFN_vkGetPhysicalDeviceCooperativeVectorPropertiesNV vkGetPhysicalDeviceCooperativeVectorPropertiesNV;\n#endif /* defined(VK_NV_cooperative_vector) */\n#if defined(VK_NV_copy_memory_indirect)\nextern PFN_vkCmdCopyMemoryIndirectNV vkCmdCopyMemoryIndirectNV;\nextern PFN_vkCmdCopyMemoryToImageIndirectNV vkCmdCopyMemoryToImageIndirectNV;\n#endif /* defined(VK_NV_copy_memory_indirect) */\n#if defined(VK_NV_coverage_reduction_mode)\nextern PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV;\n#endif /* defined(VK_NV_coverage_reduction_mode) */\n#if defined(VK_NV_cuda_kernel_launch)\nextern PFN_vkCmdCudaLaunchKernelNV vkCmdCudaLaunchKernelNV;\nextern PFN_vkCreateCudaFunctionNV vkCreateCudaFunctionNV;\nextern PFN_vkCreateCudaModuleNV vkCreateCudaModuleNV;\nextern PFN_vkDestroyCudaFunctionNV vkDestroyCudaFunctionNV;\nextern PFN_vkDestroyCudaModuleNV vkDestroyCudaModuleNV;\nextern PFN_vkGetCudaModuleCacheNV vkGetCudaModuleCacheNV;\n#endif /* defined(VK_NV_cuda_kernel_launch) */\n#if defined(VK_NV_device_diagnostic_checkpoints)\nextern PFN_vkCmdSetCheckpointNV vkCmdSetCheckpointNV;\nextern PFN_vkGetQueueCheckpointDataNV vkGetQueueCheckpointDataNV;\n#endif /* defined(VK_NV_device_diagnostic_checkpoints) */\n#if defined(VK_NV_device_diagnostic_checkpoints) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2))\nextern PFN_vkGetQueueCheckpointData2NV vkGetQueueCheckpointData2NV;\n#endif /* defined(VK_NV_device_diagnostic_checkpoints) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) */\n#if defined(VK_NV_device_generated_commands)\nextern PFN_vkCmdBindPipelineShaderGroupNV vkCmdBindPipelineShaderGroupNV;\nextern PFN_vkCmdExecuteGeneratedCommandsNV vkCmdExecuteGeneratedCommandsNV;\nextern PFN_vkCmdPreprocessGeneratedCommandsNV vkCmdPreprocessGeneratedCommandsNV;\nextern PFN_vkCreateIndirectCommandsLayoutNV vkCreateIndirectCommandsLayoutNV;\nextern PFN_vkDestroyIndirectCommandsLayoutNV vkDestroyIndirectCommandsLayoutNV;\nextern PFN_vkGetGeneratedCommandsMemoryRequirementsNV vkGetGeneratedCommandsMemoryRequirementsNV;\n#endif /* defined(VK_NV_device_generated_commands) */\n#if defined(VK_NV_device_generated_commands_compute)\nextern PFN_vkCmdUpdatePipelineIndirectBufferNV vkCmdUpdatePipelineIndirectBufferNV;\nextern PFN_vkGetPipelineIndirectDeviceAddressNV vkGetPipelineIndirectDeviceAddressNV;\nextern PFN_vkGetPipelineIndirectMemoryRequirementsNV vkGetPipelineIndirectMemoryRequirementsNV;\n#endif /* defined(VK_NV_device_generated_commands_compute) */\n#if defined(VK_NV_external_compute_queue)\nextern PFN_vkCreateExternalComputeQueueNV vkCreateExternalComputeQueueNV;\nextern PFN_vkDestroyExternalComputeQueueNV vkDestroyExternalComputeQueueNV;\nextern PFN_vkGetExternalComputeQueueDataNV vkGetExternalComputeQueueDataNV;\n#endif /* defined(VK_NV_external_compute_queue) */\n#if defined(VK_NV_external_memory_capabilities)\nextern PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV vkGetPhysicalDeviceExternalImageFormatPropertiesNV;\n#endif /* defined(VK_NV_external_memory_capabilities) */\n#if defined(VK_NV_external_memory_rdma)\nextern PFN_vkGetMemoryRemoteAddressNV vkGetMemoryRemoteAddressNV;\n#endif /* defined(VK_NV_external_memory_rdma) */\n#if defined(VK_NV_external_memory_win32)\nextern PFN_vkGetMemoryWin32HandleNV vkGetMemoryWin32HandleNV;\n#endif /* defined(VK_NV_external_memory_win32) */\n#if defined(VK_NV_fragment_shading_rate_enums)\nextern PFN_vkCmdSetFragmentShadingRateEnumNV vkCmdSetFragmentShadingRateEnumNV;\n#endif /* defined(VK_NV_fragment_shading_rate_enums) */\n#if defined(VK_NV_low_latency2)\nextern PFN_vkGetLatencyTimingsNV vkGetLatencyTimingsNV;\nextern PFN_vkLatencySleepNV vkLatencySleepNV;\nextern PFN_vkQueueNotifyOutOfBandNV vkQueueNotifyOutOfBandNV;\nextern PFN_vkSetLatencyMarkerNV vkSetLatencyMarkerNV;\nextern PFN_vkSetLatencySleepModeNV vkSetLatencySleepModeNV;\n#endif /* defined(VK_NV_low_latency2) */\n#if defined(VK_NV_memory_decompression)\nextern PFN_vkCmdDecompressMemoryIndirectCountNV vkCmdDecompressMemoryIndirectCountNV;\nextern PFN_vkCmdDecompressMemoryNV vkCmdDecompressMemoryNV;\n#endif /* defined(VK_NV_memory_decompression) */\n#if defined(VK_NV_mesh_shader)\nextern PFN_vkCmdDrawMeshTasksIndirectNV vkCmdDrawMeshTasksIndirectNV;\nextern PFN_vkCmdDrawMeshTasksNV vkCmdDrawMeshTasksNV;\n#endif /* defined(VK_NV_mesh_shader) */\n#if defined(VK_NV_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2))\nextern PFN_vkCmdDrawMeshTasksIndirectCountNV vkCmdDrawMeshTasksIndirectCountNV;\n#endif /* defined(VK_NV_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2)) */\n#if defined(VK_NV_optical_flow)\nextern PFN_vkBindOpticalFlowSessionImageNV vkBindOpticalFlowSessionImageNV;\nextern PFN_vkCmdOpticalFlowExecuteNV vkCmdOpticalFlowExecuteNV;\nextern PFN_vkCreateOpticalFlowSessionNV vkCreateOpticalFlowSessionNV;\nextern PFN_vkDestroyOpticalFlowSessionNV vkDestroyOpticalFlowSessionNV;\nextern PFN_vkGetPhysicalDeviceOpticalFlowImageFormatsNV vkGetPhysicalDeviceOpticalFlowImageFormatsNV;\n#endif /* defined(VK_NV_optical_flow) */\n#if defined(VK_NV_partitioned_acceleration_structure)\nextern PFN_vkCmdBuildPartitionedAccelerationStructuresNV vkCmdBuildPartitionedAccelerationStructuresNV;\nextern PFN_vkGetPartitionedAccelerationStructuresBuildSizesNV vkGetPartitionedAccelerationStructuresBuildSizesNV;\n#endif /* defined(VK_NV_partitioned_acceleration_structure) */\n#if defined(VK_NV_ray_tracing)\nextern PFN_vkBindAccelerationStructureMemoryNV vkBindAccelerationStructureMemoryNV;\nextern PFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV;\nextern PFN_vkCmdCopyAccelerationStructureNV vkCmdCopyAccelerationStructureNV;\nextern PFN_vkCmdTraceRaysNV vkCmdTraceRaysNV;\nextern PFN_vkCmdWriteAccelerationStructuresPropertiesNV vkCmdWriteAccelerationStructuresPropertiesNV;\nextern PFN_vkCompileDeferredNV vkCompileDeferredNV;\nextern PFN_vkCreateAccelerationStructureNV vkCreateAccelerationStructureNV;\nextern PFN_vkCreateRayTracingPipelinesNV vkCreateRayTracingPipelinesNV;\nextern PFN_vkDestroyAccelerationStructureNV vkDestroyAccelerationStructureNV;\nextern PFN_vkGetAccelerationStructureHandleNV vkGetAccelerationStructureHandleNV;\nextern PFN_vkGetAccelerationStructureMemoryRequirementsNV vkGetAccelerationStructureMemoryRequirementsNV;\nextern PFN_vkGetRayTracingShaderGroupHandlesNV vkGetRayTracingShaderGroupHandlesNV;\n#endif /* defined(VK_NV_ray_tracing) */\n#if defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2\nextern PFN_vkCmdSetExclusiveScissorEnableNV vkCmdSetExclusiveScissorEnableNV;\n#endif /* defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 */\n#if defined(VK_NV_scissor_exclusive)\nextern PFN_vkCmdSetExclusiveScissorNV vkCmdSetExclusiveScissorNV;\n#endif /* defined(VK_NV_scissor_exclusive) */\n#if defined(VK_NV_shading_rate_image)\nextern PFN_vkCmdBindShadingRateImageNV vkCmdBindShadingRateImageNV;\nextern PFN_vkCmdSetCoarseSampleOrderNV vkCmdSetCoarseSampleOrderNV;\nextern PFN_vkCmdSetViewportShadingRatePaletteNV vkCmdSetViewportShadingRatePaletteNV;\n#endif /* defined(VK_NV_shading_rate_image) */\n#if defined(VK_OHOS_surface)\nextern PFN_vkCreateSurfaceOHOS vkCreateSurfaceOHOS;\n#endif /* defined(VK_OHOS_surface) */\n#if defined(VK_QCOM_tile_memory_heap)\nextern PFN_vkCmdBindTileMemoryQCOM vkCmdBindTileMemoryQCOM;\n#endif /* defined(VK_QCOM_tile_memory_heap) */\n#if defined(VK_QCOM_tile_properties)\nextern PFN_vkGetDynamicRenderingTilePropertiesQCOM vkGetDynamicRenderingTilePropertiesQCOM;\nextern PFN_vkGetFramebufferTilePropertiesQCOM vkGetFramebufferTilePropertiesQCOM;\n#endif /* defined(VK_QCOM_tile_properties) */\n#if defined(VK_QCOM_tile_shading)\nextern PFN_vkCmdBeginPerTileExecutionQCOM vkCmdBeginPerTileExecutionQCOM;\nextern PFN_vkCmdDispatchTileQCOM vkCmdDispatchTileQCOM;\nextern PFN_vkCmdEndPerTileExecutionQCOM vkCmdEndPerTileExecutionQCOM;\n#endif /* defined(VK_QCOM_tile_shading) */\n#if defined(VK_QNX_external_memory_screen_buffer)\nextern PFN_vkGetScreenBufferPropertiesQNX vkGetScreenBufferPropertiesQNX;\n#endif /* defined(VK_QNX_external_memory_screen_buffer) */\n#if defined(VK_QNX_screen_surface)\nextern PFN_vkCreateScreenSurfaceQNX vkCreateScreenSurfaceQNX;\nextern PFN_vkGetPhysicalDeviceScreenPresentationSupportQNX vkGetPhysicalDeviceScreenPresentationSupportQNX;\n#endif /* defined(VK_QNX_screen_surface) */\n#if defined(VK_VALVE_descriptor_set_host_mapping)\nextern PFN_vkGetDescriptorSetHostMappingVALVE vkGetDescriptorSetHostMappingVALVE;\nextern PFN_vkGetDescriptorSetLayoutHostMappingInfoVALVE vkGetDescriptorSetLayoutHostMappingInfoVALVE;\n#endif /* defined(VK_VALVE_descriptor_set_host_mapping) */\n#if (defined(VK_EXT_depth_clamp_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clamp_control))\nextern PFN_vkCmdSetDepthClampRangeEXT vkCmdSetDepthClampRangeEXT;\n#endif /* (defined(VK_EXT_depth_clamp_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clamp_control)) */\n#if (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object))\nextern PFN_vkCmdBindVertexBuffers2EXT vkCmdBindVertexBuffers2EXT;\nextern PFN_vkCmdSetCullModeEXT vkCmdSetCullModeEXT;\nextern PFN_vkCmdSetDepthBoundsTestEnableEXT vkCmdSetDepthBoundsTestEnableEXT;\nextern PFN_vkCmdSetDepthCompareOpEXT vkCmdSetDepthCompareOpEXT;\nextern PFN_vkCmdSetDepthTestEnableEXT vkCmdSetDepthTestEnableEXT;\nextern PFN_vkCmdSetDepthWriteEnableEXT vkCmdSetDepthWriteEnableEXT;\nextern PFN_vkCmdSetFrontFaceEXT vkCmdSetFrontFaceEXT;\nextern PFN_vkCmdSetPrimitiveTopologyEXT vkCmdSetPrimitiveTopologyEXT;\nextern PFN_vkCmdSetScissorWithCountEXT vkCmdSetScissorWithCountEXT;\nextern PFN_vkCmdSetStencilOpEXT vkCmdSetStencilOpEXT;\nextern PFN_vkCmdSetStencilTestEnableEXT vkCmdSetStencilTestEnableEXT;\nextern PFN_vkCmdSetViewportWithCountEXT vkCmdSetViewportWithCountEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object))\nextern PFN_vkCmdSetDepthBiasEnableEXT vkCmdSetDepthBiasEnableEXT;\nextern PFN_vkCmdSetLogicOpEXT vkCmdSetLogicOpEXT;\nextern PFN_vkCmdSetPatchControlPointsEXT vkCmdSetPatchControlPointsEXT;\nextern PFN_vkCmdSetPrimitiveRestartEnableEXT vkCmdSetPrimitiveRestartEnableEXT;\nextern PFN_vkCmdSetRasterizerDiscardEnableEXT vkCmdSetRasterizerDiscardEnableEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object))\nextern PFN_vkCmdSetAlphaToCoverageEnableEXT vkCmdSetAlphaToCoverageEnableEXT;\nextern PFN_vkCmdSetAlphaToOneEnableEXT vkCmdSetAlphaToOneEnableEXT;\nextern PFN_vkCmdSetColorBlendEnableEXT vkCmdSetColorBlendEnableEXT;\nextern PFN_vkCmdSetColorBlendEquationEXT vkCmdSetColorBlendEquationEXT;\nextern PFN_vkCmdSetColorWriteMaskEXT vkCmdSetColorWriteMaskEXT;\nextern PFN_vkCmdSetDepthClampEnableEXT vkCmdSetDepthClampEnableEXT;\nextern PFN_vkCmdSetLogicOpEnableEXT vkCmdSetLogicOpEnableEXT;\nextern PFN_vkCmdSetPolygonModeEXT vkCmdSetPolygonModeEXT;\nextern PFN_vkCmdSetRasterizationSamplesEXT vkCmdSetRasterizationSamplesEXT;\nextern PFN_vkCmdSetSampleMaskEXT vkCmdSetSampleMaskEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object))\nextern PFN_vkCmdSetTessellationDomainOriginEXT vkCmdSetTessellationDomainOriginEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback))\nextern PFN_vkCmdSetRasterizationStreamEXT vkCmdSetRasterizationStreamEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization))\nextern PFN_vkCmdSetConservativeRasterizationModeEXT vkCmdSetConservativeRasterizationModeEXT;\nextern PFN_vkCmdSetExtraPrimitiveOverestimationSizeEXT vkCmdSetExtraPrimitiveOverestimationSizeEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable))\nextern PFN_vkCmdSetDepthClipEnableEXT vkCmdSetDepthClipEnableEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations))\nextern PFN_vkCmdSetSampleLocationsEnableEXT vkCmdSetSampleLocationsEnableEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced))\nextern PFN_vkCmdSetColorBlendAdvancedEXT vkCmdSetColorBlendAdvancedEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex))\nextern PFN_vkCmdSetProvokingVertexModeEXT vkCmdSetProvokingVertexModeEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization))\nextern PFN_vkCmdSetLineRasterizationModeEXT vkCmdSetLineRasterizationModeEXT;\nextern PFN_vkCmdSetLineStippleEnableEXT vkCmdSetLineStippleEnableEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control))\nextern PFN_vkCmdSetDepthClipNegativeOneToOneEXT vkCmdSetDepthClipNegativeOneToOneEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling))\nextern PFN_vkCmdSetViewportWScalingEnableNV vkCmdSetViewportWScalingEnableNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle))\nextern PFN_vkCmdSetViewportSwizzleNV vkCmdSetViewportSwizzleNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color))\nextern PFN_vkCmdSetCoverageToColorEnableNV vkCmdSetCoverageToColorEnableNV;\nextern PFN_vkCmdSetCoverageToColorLocationNV vkCmdSetCoverageToColorLocationNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples))\nextern PFN_vkCmdSetCoverageModulationModeNV vkCmdSetCoverageModulationModeNV;\nextern PFN_vkCmdSetCoverageModulationTableEnableNV vkCmdSetCoverageModulationTableEnableNV;\nextern PFN_vkCmdSetCoverageModulationTableNV vkCmdSetCoverageModulationTableNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image))\nextern PFN_vkCmdSetShadingRateImageEnableNV vkCmdSetShadingRateImageEnableNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test))\nextern PFN_vkCmdSetRepresentativeFragmentTestEnableNV vkCmdSetRepresentativeFragmentTestEnableNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode))\nextern PFN_vkCmdSetCoverageReductionModeNV vkCmdSetCoverageReductionModeNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) */\n#if (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control))\nextern PFN_vkGetImageSubresourceLayout2EXT vkGetImageSubresourceLayout2EXT;\n#endif /* (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) */\n#if (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state))\nextern PFN_vkCmdSetVertexInputEXT vkCmdSetVertexInputEXT;\n#endif /* (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) */\n#if (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template)))\nextern PFN_vkCmdPushDescriptorSetWithTemplateKHR vkCmdPushDescriptorSetWithTemplateKHR;\n#endif /* (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template))) */\n#if (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1))\nextern PFN_vkGetDeviceGroupPresentCapabilitiesKHR vkGetDeviceGroupPresentCapabilitiesKHR;\nextern PFN_vkGetDeviceGroupSurfacePresentModesKHR vkGetDeviceGroupSurfacePresentModesKHR;\nextern PFN_vkGetPhysicalDevicePresentRectanglesKHR vkGetPhysicalDevicePresentRectanglesKHR;\n#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */\n#if (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1))\nextern PFN_vkAcquireNextImage2KHR vkAcquireNextImage2KHR;\n#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */\n/* VOLK_GENERATE_PROTOTYPES_H */\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif\n\n#ifdef VOLK_IMPLEMENTATION\n#undef VOLK_IMPLEMENTATION\n/* Prevent tools like dependency checkers from detecting a cyclic dependency */\n#define VOLK_SOURCE \"volk.c\"\n#include VOLK_SOURCE\n#endif\n\n/**\n * Copyright (c) 2018-2025 Arseny Kapoulkine\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to deal\n * in the Software without restriction, including without limitation the rights\n * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in all\n * copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n * SOFTWARE.\n*/\n/* clang-format on */\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxvk/dxvk_pipelayout.h", "#pragma once\n\n#include \n\n#include \n\n#include \"dxvk_hash.h\"\n\n#include \"util_math.h\"\n#include \"util_bit.h\"\n#include \"util_flags.h\"\n\nnamespace dxvk {\n\n class DxvkDevice;\n class DxvkPipelineManager;\n\n /**\n * \\brief Order-invariant atomic access operation\n *\n * Information used to optimize barriers when a resource\n * is accessed exlusively via order-invariant stores.\n */\n struct DxvkAccessOp {\n enum OpType : uint16_t {\n None = 0x0u,\n Or = 0x1u,\n And = 0x2u,\n Xor = 0x3u,\n Add = 0x4u,\n IMin = 0x5u,\n IMax = 0x6u,\n UMin = 0x7u,\n UMax = 0x8u,\n\n StoreF = 0xdu,\n StoreUi = 0xeu,\n StoreSi = 0xfu,\n };\n\n DxvkAccessOp() = default;\n DxvkAccessOp(OpType t)\n : op(uint16_t(t)) { }\n\n DxvkAccessOp(OpType t, uint16_t constant)\n : op(uint16_t(t) | (constant << 4u)) { }\n\n uint16_t op = 0u;\n\n bool operator == (const DxvkAccessOp& t) const { return op == t.op; }\n bool operator != (const DxvkAccessOp& t) const { return op != t.op; }\n\n template, bool> = true>\n explicit operator T() const { return op; }\n };\n\n static_assert(sizeof(DxvkAccessOp) == sizeof(uint16_t));\n\n /**\n * \\brief Binding info\n *\n * Stores metadata for a single binding in\n * a given shader, or for the whole pipeline.\n */\n struct DxvkBindingInfo {\n VkDescriptorType descriptorType = VK_DESCRIPTOR_TYPE_MAX_ENUM; ///< Vulkan descriptor type\n uint32_t resourceBinding = 0u; ///< API binding slot for the resource\n VkImageViewType viewType = VK_IMAGE_VIEW_TYPE_MAX_ENUM; ///< Image view type\n VkShaderStageFlagBits stage = VK_SHADER_STAGE_FLAG_BITS_MAX_ENUM; ///< Shader stage\n VkAccessFlags access = 0u; ///< Access mask for the resource\n DxvkAccessOp accessOp = DxvkAccessOp::None; ///< Order-invariant store type, if any\n bool uboSet = false; ///< Whether to include this in the UBO set\n bool isMultisampled = false; ///< Multisampled binding\n\n /**\n * \\brief Computes descriptor set index for the given binding\n *\n * This is determines based on the shader stages that use the binding.\n * \\returns Descriptor set index\n */\n uint32_t computeSetIndex() const;\n\n /**\n * \\brief Numeric value of the binding\n *\n * Used when sorting bindings.\n * \\returns Numeric value\n */\n uint32_t value() const;\n\n /**\n * \\brief Checks for equality\n *\n * \\param [in] other Binding to compare to\n * \\returns \\c true if both bindings are equal\n */\n bool eq(const DxvkBindingInfo& other) const;\n\n /**\n * \\brief Hashes binding info\n * \\returns Binding hash\n */\n size_t hash() const;\n\n };\n\n}\n"], ["/lsfg-vk/src/extract/trans.cpp", "#include \"extract/trans.hpp\"\n\n#include \n\n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n\nusing namespace Extract;\n\nstruct BindingOffsets {\n uint32_t bindingIndex{};\n uint32_t bindingOffset{};\n uint32_t setIndex{};\n uint32_t setOffset{};\n};\n\nstd::vector Extract::translateShader(std::vector bytecode) {\n // compile the shader\n dxvk::DxbcReader reader(reinterpret_cast(bytecode.data()), bytecode.size());\n dxvk::DxbcModule module(reader);\n const dxvk::DxbcModuleInfo info{};\n auto code = module.compile(info, \"CS\");\n\n // find all bindings\n std::vector bindingOffsets;\n std::vector varIds;\n for (auto ins : code) {\n if (ins.opCode() == spv::OpDecorate) {\n if (ins.arg(2) == spv::DecorationBinding) {\n const uint32_t varId = ins.arg(1);\n bindingOffsets.resize(std::max(bindingOffsets.size(), size_t(varId + 1)));\n bindingOffsets[varId].bindingIndex = ins.arg(3);\n bindingOffsets[varId].bindingOffset = ins.offset() + 3;\n varIds.push_back(varId);\n }\n\n if (ins.arg(2) == spv::DecorationDescriptorSet) {\n const uint32_t varId = ins.arg(1);\n bindingOffsets.resize(std::max(bindingOffsets.size(), size_t(varId + 1)));\n bindingOffsets[varId].setIndex = ins.arg(3);\n bindingOffsets[varId].setOffset = ins.offset() + 3;\n }\n }\n\n if (ins.opCode() == spv::OpFunction)\n break;\n }\n\n std::vector validBindings;\n for (const auto varId : varIds) {\n auto info = bindingOffsets[varId];\n\n if (info.bindingOffset)\n validBindings.push_back(info);\n }\n\n // patch binding offset\n #pragma clang diagnostic push\n #pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n for (size_t i = 0; i < validBindings.size(); i++)\n code.data()[validBindings.at(i).bindingOffset] // NOLINT\n = static_cast(i);\n #pragma clang diagnostic pop\n\n // return the new bytecode\n std::vector spirvBytecode(code.size());\n std::copy_n(reinterpret_cast(code.data()),\n code.size(), spirvBytecode.data());\n return spirvBytecode;\n}\n"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/src/buffer.cpp", "/*\nThe MIT License (MIT)\n\nCopyright (c) 2013 Andrew Ruef\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n*/\n\n#include \n#include \n\n// keep this header above \"windows.h\" because it contains many types\n#include \n\n#ifdef _WIN32\n\n#define WIN32_LEAN_AND_MEAN\n#define VC_EXTRALEAN\n\n#include \n#include \n#else\n#include \n#include \n#include \n#include \n#endif\n\nnamespace {\n\ninline std::uint16_t byteSwapUint16(std::uint16_t val) {\n#if defined(_MSC_VER) || defined(_MSC_FULL_VER)\n return _byteswap_ushort(val);\n#else\n return __builtin_bswap16(val);\n#endif\n}\n\ninline std::uint32_t byteSwapUint32(std::uint32_t val) {\n#if defined(_MSC_VER) || defined(_MSC_FULL_VER)\n return _byteswap_ulong(val);\n#else\n return __builtin_bswap32(val);\n#endif\n}\n\ninline uint64_t byteSwapUint64(std::uint64_t val) {\n#if defined(_MSC_VER) || defined(_MSC_FULL_VER)\n return _byteswap_uint64(val);\n#else\n return __builtin_bswap64(val);\n#endif\n}\n\n} // anonymous namespace\n\nnamespace peparse {\n\nextern std::uint32_t err;\nextern std::string err_loc;\n\nstruct buffer_detail {\n#ifdef _WIN32\n HANDLE file;\n HANDLE sec;\n#else\n int fd;\n#endif\n};\n\nbool readByte(bounded_buffer *b, std::uint32_t offset, std::uint8_t &out) {\n if (b == nullptr) {\n PE_ERR(PEERR_BUFFER);\n return false;\n }\n\n if (offset >= b->bufLen) {\n PE_ERR(PEERR_ADDRESS);\n return false;\n }\n\n std::uint8_t *tmp = (b->buf + offset);\n out = *tmp;\n\n return true;\n}\n\nbool readWord(bounded_buffer *b, std::uint32_t offset, std::uint16_t &out) {\n if (b == nullptr) {\n PE_ERR(PEERR_BUFFER);\n return false;\n }\n\n if (static_cast(offset) + 1 >= b->bufLen) {\n PE_ERR(PEERR_ADDRESS);\n return false;\n }\n\n std::uint16_t tmp;\n memcpy(&tmp, (b->buf + offset), sizeof(std::uint16_t));\n if (b->swapBytes) {\n out = byteSwapUint16(tmp);\n } else {\n out = tmp;\n }\n\n return true;\n}\n\nbool readDword(bounded_buffer *b, std::uint32_t offset, std::uint32_t &out) {\n if (b == nullptr) {\n PE_ERR(PEERR_BUFFER);\n return false;\n }\n\n if (static_cast(offset) + 3 >= b->bufLen) {\n PE_ERR(PEERR_ADDRESS);\n return false;\n }\n\n std::uint32_t tmp;\n memcpy(&tmp, (b->buf + offset), sizeof(std::uint32_t));\n if (b->swapBytes) {\n out = byteSwapUint32(tmp);\n } else {\n out = tmp;\n }\n\n return true;\n}\n\nbool readQword(bounded_buffer *b, std::uint32_t offset, std::uint64_t &out) {\n if (b == nullptr) {\n PE_ERR(PEERR_BUFFER);\n return false;\n }\n\n if (static_cast(offset) + 7 >= b->bufLen) {\n PE_ERR(PEERR_ADDRESS);\n return false;\n }\n\n std::uint64_t tmp;\n memcpy(&tmp, (b->buf + offset), sizeof(std::uint64_t));\n if (b->swapBytes) {\n out = byteSwapUint64(tmp);\n } else {\n out = tmp;\n }\n\n return true;\n}\n\nbool readChar16(bounded_buffer *b, std::uint32_t offset, char16_t &out) {\n if (b == nullptr) {\n PE_ERR(PEERR_BUFFER);\n return false;\n }\n\n if (static_cast(offset) + 1 >= b->bufLen) {\n PE_ERR(PEERR_ADDRESS);\n return false;\n }\n\n char16_t tmp;\n if (b->swapBytes) {\n std::uint8_t tmpBuf[2];\n tmpBuf[0] = *(b->buf + offset + 1);\n tmpBuf[1] = *(b->buf + offset);\n memcpy(&tmp, tmpBuf, sizeof(std::uint16_t));\n } else {\n memcpy(&tmp, (b->buf + offset), sizeof(std::uint16_t));\n }\n out = tmp;\n\n return true;\n}\n\nbounded_buffer *readFileToFileBuffer(const char *filePath) {\n#ifdef _WIN32\n HANDLE h = CreateFileA(filePath,\n GENERIC_READ,\n FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,\n nullptr,\n OPEN_EXISTING,\n FILE_ATTRIBUTE_NORMAL,\n nullptr);\n if (h == INVALID_HANDLE_VALUE) {\n PE_ERR(PEERR_OPEN);\n return nullptr;\n }\n\n DWORD fileSize = GetFileSize(h, nullptr);\n\n if (fileSize == INVALID_FILE_SIZE) {\n CloseHandle(h);\n PE_ERR(PEERR_OPEN);\n return nullptr;\n }\n\n#else\n // only where we have mmap / open / etc\n int fd = open(filePath, O_RDONLY);\n\n if (fd == -1) {\n PE_ERR(PEERR_OPEN);\n return nullptr;\n }\n#endif\n\n // make a buffer object\n bounded_buffer *p = new (std::nothrow) bounded_buffer();\n if (p == nullptr) {\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n memset(p, 0, sizeof(bounded_buffer));\n buffer_detail *d = new (std::nothrow) buffer_detail();\n\n if (d == nullptr) {\n delete p;\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n memset(d, 0, sizeof(buffer_detail));\n p->detail = d;\n\n// only where we have mmap / open / etc\n#ifdef _WIN32\n p->detail->file = h;\n\n HANDLE hMap = CreateFileMapping(h, nullptr, PAGE_READONLY, 0, 0, nullptr);\n\n if (hMap == nullptr) {\n CloseHandle(h);\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n p->detail->sec = hMap;\n\n LPVOID ptr = MapViewOfFile(hMap, FILE_MAP_READ, 0, 0, 0);\n\n if (ptr == nullptr) {\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n p->buf = reinterpret_cast(ptr);\n p->bufLen = fileSize;\n#else\n p->detail->fd = fd;\n\n struct stat s;\n memset(&s, 0, sizeof(struct stat));\n\n if (fstat(fd, &s) != 0) {\n close(fd);\n delete d;\n delete p;\n PE_ERR(PEERR_STAT);\n return nullptr;\n }\n\n void *maddr = mmap(nullptr,\n static_cast(s.st_size),\n PROT_READ,\n MAP_SHARED,\n fd,\n 0);\n\n if (maddr == MAP_FAILED) {\n close(fd);\n delete d;\n delete p;\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n p->buf = reinterpret_cast(maddr);\n p->bufLen = static_cast(s.st_size);\n#endif\n p->copy = false;\n p->swapBytes = false;\n\n return p;\n}\n\nbounded_buffer *makeBufferFromPointer(std::uint8_t *data, std::uint32_t sz) {\n if (data == nullptr) {\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n bounded_buffer *p = new (std::nothrow) bounded_buffer();\n\n if (p == nullptr) {\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n p->copy = true;\n p->detail = nullptr;\n p->buf = data;\n p->bufLen = sz;\n p->swapBytes = false;\n\n return p;\n}\n\n// split buffer inclusively from from to to by offset\nbounded_buffer *\nsplitBuffer(bounded_buffer *b, std::uint32_t from, std::uint32_t to) {\n if (b == nullptr) {\n return nullptr;\n }\n\n // safety checks\n if (to < from || to > b->bufLen) {\n return nullptr;\n }\n\n // make a new buffer\n auto newBuff = new (std::nothrow) bounded_buffer();\n if (newBuff == nullptr) {\n return nullptr;\n }\n\n newBuff->copy = true;\n newBuff->buf = b->buf + from;\n newBuff->bufLen = (to - from);\n\n return newBuff;\n}\n\nvoid deleteBuffer(bounded_buffer *b) {\n if (b == nullptr) {\n return;\n }\n\n if (!b->copy) {\n#ifdef _WIN32\n UnmapViewOfFile(b->buf);\n CloseHandle(b->detail->sec);\n CloseHandle(b->detail->file);\n#else\n munmap(b->buf, b->bufLen);\n close(b->detail->fd);\n#endif\n }\n\n delete b->detail;\n delete b;\n}\n\nstd::uint64_t bufLen(bounded_buffer *b) {\n return b->bufLen;\n}\n} // namespace peparse\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_module.h", "class DxbcAnalyzer {\n public:\n DxbcModule(DxbcReader& reader) {\n for (uint32_t i = 0; i < m_header.numChunks(); i++) {\n \n // The chunk tag is stored at the beginning of each chunk\n auto chunkReader = reader.clone(m_header.chunkOffset(i));\n auto tag = chunkReader.readTag();\n \n // The chunk size follows right after the four-character\n // code. This does not include the eight bytes that are\n // consumed by the FourCC and chunk length entry.\n auto chunkLength = chunkReader.readu32();\n \n chunkReader = chunkReader.clone(8);\n chunkReader = chunkReader.resize(chunkLength);\n \n if ((tag == \"SHDR\") || (tag == \"SHEX\"))\n m_shexChunk = new DxbcShex(chunkReader);\n \n if ((tag == \"ISGN\") || (tag == \"ISG1\"))\n m_isgnChunk = new DxbcIsgn(chunkReader, tag);\n \n if ((tag == \"OSGN\") || (tag == \"OSG5\") || (tag == \"OSG1\"))\n m_osgnChunk = new DxbcIsgn(chunkReader, tag);\n \n if ((tag == \"PCSG\") || (tag == \"PSG1\"))\n m_psgnChunk = new DxbcIsgn(chunkReader, tag);\n }\n }\n ~DxbcModule() {\n \n }\n SpirvCodeBuffer compile(\n const DxbcModuleInfo& moduleInfo,\n const std::string& fileName) {\n if (m_shexChunk == nullptr)\n throw DxvkError(\"DxbcModule::compile: No SHDR/SHEX chunk\");\n \n DxbcAnalysisInfo analysisInfo;\n \n DxbcAnalyzer analyzer(moduleInfo,\n m_shexChunk->programInfo(),\n m_isgnChunk, m_osgnChunk,\n m_psgnChunk, analysisInfo);\n \n this->runAnalyzer(analyzer, m_shexChunk->slice());\n\n m_bindings = std::make_optional(analysisInfo.bindings);\n \n DxbcCompiler compiler(\n fileName, moduleInfo,\n m_shexChunk->programInfo(),\n m_isgnChunk, m_osgnChunk,\n m_psgnChunk, analysisInfo);\n \n this->runCompiler(compiler, m_shexChunk->slice());\n\n m_icb = compiler.getIcbData();\n\n return compiler.finalize();\n }\n SpirvCodeBuffer compilePassthroughShader(\n const DxbcModuleInfo& moduleInfo,\n const std::string& fileName) const {\n if (m_shexChunk == nullptr)\n throw DxvkError(\"DxbcModule::compile: No SHDR/SHEX chunk\");\n \n DxbcAnalysisInfo analysisInfo;\n\n DxbcCompiler compiler(\n fileName, moduleInfo,\n DxbcProgramType::GeometryShader,\n m_osgnChunk, m_osgnChunk,\n m_psgnChunk, analysisInfo);\n \n compiler.processXfbPassthrough();\n return compiler.finalize();\n }\n private:\n DxbcHeader m_header;\n Rc m_isgnChunk;\n Rc m_osgnChunk;\n Rc m_psgnChunk;\n Rc m_shexChunk;\n std::vector m_icb;\n std::optional m_bindings;\n void runAnalyzer(\n DxbcAnalyzer& analyzer,\n DxbcCodeSlice slice) const {\n DxbcDecodeContext decoder;\n \n while (!slice.atEnd()) {\n decoder.decodeInstruction(slice);\n \n analyzer.processInstruction(\n decoder.getInstruction());\n }\n }\n void runCompiler(\n DxbcCompiler& compiler,\n DxbcCodeSlice slice) const {\n DxbcDecodeContext decoder;\n \n while (!slice.atEnd()) {\n decoder.decodeInstruction(slice);\n \n compiler.processInstruction(\n decoder.getInstruction());\n }\n }\n};"], ["/lsfg-vk/thirdparty/dxbc/include/spirv/spirv_instruction.h", "#pragma once\n\n#include \"spirv_include.h\"\n\nnamespace dxvk {\n \n /**\n * \\brief SPIR-V instruction\n * \n * Helps parsing a single instruction, providing\n * access to the op code, instruction length and\n * instruction arguments.\n */\n class SpirvInstruction {\n \n public:\n \n SpirvInstruction() { }\n SpirvInstruction(uint32_t* code, uint32_t offset, uint32_t length)\n : m_code(code), m_offset(offset), m_length(length) { }\n \n /**\n * \\brief SPIR-V Op code\n * \\returns The op code\n */\n spv::Op opCode() const {\n return static_cast(\n this->arg(0) & spv::OpCodeMask);\n }\n \n /**\n * \\brief Instruction length\n * \\returns Number of DWORDs\n */\n uint32_t length() const {\n return this->arg(0) >> spv::WordCountShift;\n }\n \n /**\n * \\brief Instruction offset\n * \\returns Offset in DWORDs\n */\n uint32_t offset() const {\n return m_offset;\n }\n \n /**\n * \\brief Argument value\n * \n * Retrieves an argument DWORD. Note that some instructions\n * take 64-bit arguments which require more than one DWORD.\n * Arguments start at index 1. Calling this method with an\n * argument ID of 0 will return the opcode token.\n * \\param [in] idx Argument index, starting at 1\n * \\returns The argument value\n */\n uint32_t arg(uint32_t idx) const {\n const uint32_t index = m_offset + idx;\n return index < m_length ? m_code[index] : 0;\n }\n\n /**\n * \\brief Argument string\n *\n * Retrieves a pointer to a UTF-8-encoded string.\n * \\param [in] idx Argument index, starting at 1\n * \\returns Pointer to the literal string\n */\n const char* chr(uint32_t idx) const {\n const uint32_t index = m_offset + idx;\n return index < m_length ? reinterpret_cast(&m_code[index]) : nullptr;\n }\n \n /**\n * \\brief Changes the value of an argument\n * \n * \\param [in] idx Argument index, starting at 1\n * \\param [in] word New argument word\n */\n void setArg(uint32_t idx, uint32_t word) const {\n if (m_offset + idx < m_length)\n m_code[m_offset + idx] = word;\n }\n \n private:\n \n uint32_t* m_code = nullptr;\n uint32_t m_offset = 0;\n uint32_t m_length = 0;\n \n };\n \n \n /**\n * \\brief SPIR-V instruction iterator\n * \n * Convenient iterator that can be used\n * to process raw SPIR-V shader code.\n */\n class SpirvInstructionIterator {\n \n public:\n \n SpirvInstructionIterator() { }\n SpirvInstructionIterator(uint32_t* code, uint32_t offset, uint32_t length)\n : m_code (length != 0 ? code : nullptr),\n m_offset(length != 0 ? offset : 0),\n m_length(length) {\n if ((length >= 5) && (offset == 0) && (m_code[0] == spv::MagicNumber))\n this->advance(5);\n }\n \n SpirvInstructionIterator& operator ++ () {\n this->advance(SpirvInstruction(m_code, m_offset, m_length).length());\n return *this;\n }\n \n SpirvInstructionIterator operator ++ (int) {\n SpirvInstructionIterator result = *this;\n this->advance(SpirvInstruction(m_code, m_offset, m_length).length());\n return result;\n }\n \n SpirvInstruction operator * () const {\n return SpirvInstruction(m_code, m_offset, m_length);\n }\n \n bool operator == (const SpirvInstructionIterator& other) const {\n return this->m_code == other.m_code\n && this->m_offset == other.m_offset\n && this->m_length == other.m_length;\n }\n \n bool operator != (const SpirvInstructionIterator& other) const {\n return this->m_code != other.m_code\n || this->m_offset != other.m_offset\n || this->m_length != other.m_length;\n }\n \n private:\n \n uint32_t* m_code = nullptr;\n uint32_t m_offset = 0;\n uint32_t m_length = 0;\n \n void advance(uint32_t n) {\n if (m_offset + n < m_length) {\n m_offset += n;\n } else {\n m_code = nullptr;\n m_offset = 0;\n m_length = 0;\n }\n }\n \n };\n \n}"], ["/lsfg-vk/framegen/src/pool/shaderpool.cpp", "#include \"pool/shaderpool.hpp\"\n#include \"core/shadermodule.hpp\"\n#include \"core/device.hpp\"\n#include \"core/pipeline.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG::Pool;\n\nCore::ShaderModule ShaderPool::getShader(\n const Core::Device& device, const std::string& name,\n const std::vector>& types) {\n auto it = shaders.find(name);\n if (it != shaders.end())\n return it->second;\n\n // grab the shader\n auto bytecode = this->source(name);\n if (bytecode.empty())\n throw std::runtime_error(\"Shader code is empty: \" + name);\n\n // create the shader module\n Core::ShaderModule shader(device, bytecode, types);\n shaders[name] = shader;\n return shader;\n}\n\nCore::Pipeline ShaderPool::getPipeline(\n const Core::Device& device, const std::string& name) {\n auto it = pipelines.find(name);\n if (it != pipelines.end())\n return it->second;\n\n // grab the shader module\n auto shader = this->getShader(device, name, {});\n\n // create the pipeline\n Core::Pipeline pipeline(device, shader);\n pipelines[name] = pipeline;\n return pipeline;\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/rc/util_rc_ptr.h", "#pragma once\n\n#include \n#include \n#include \n#include \n#include \n\nnamespace dxvk {\n\n /**\n * \\brief Pointer for reference-counted objects\n * \n * This only requires the given type to implement \\c incRef\n * and \\c decRef methods that adjust the reference count.\n * \\tparam T Object type\n */\n template\n class Rc {\n template\n friend class Rc;\n public:\n\n Rc() = default;\n Rc(std::nullptr_t) { }\n\n Rc(T* object)\n : m_object(object) {\n this->incRef();\n }\n\n Rc(const Rc& other)\n : m_object(other.m_object) {\n this->incRef();\n }\n\n template\n Rc(const Rc& other)\n : m_object(other.m_object) {\n this->incRef();\n }\n\n Rc(Rc&& other)\n : m_object(other.m_object) {\n other.m_object = nullptr;\n }\n\n template\n Rc(Rc&& other)\n : m_object(other.m_object) {\n other.m_object = nullptr;\n }\n\n Rc& operator = (std::nullptr_t) {\n this->decRef();\n m_object = nullptr;\n return *this;\n }\n\n Rc& operator = (const Rc& other) {\n other.incRef();\n this->decRef();\n m_object = other.m_object;\n return *this;\n }\n\n template\n Rc& operator = (const Rc& other) {\n other.incRef();\n this->decRef();\n m_object = other.m_object;\n return *this;\n }\n\n Rc& operator = (Rc&& other) {\n this->decRef();\n this->m_object = other.m_object;\n other.m_object = nullptr;\n return *this;\n }\n\n template\n Rc& operator = (Rc&& other) {\n this->decRef();\n this->m_object = other.m_object;\n other.m_object = nullptr;\n return *this;\n }\n\n ~Rc() {\n this->decRef();\n }\n\n T& operator * () const { return *m_object; }\n T* operator -> () const { return m_object; }\n T* ptr() const { return m_object; }\n\n template bool operator == (const Rc& other) const { return m_object == other.m_object; }\n template bool operator != (const Rc& other) const { return m_object != other.m_object; }\n\n template bool operator == (Tx* other) const { return m_object == other; }\n template bool operator != (Tx* other) const { return m_object != other; }\n\n bool operator == (std::nullptr_t) const { return m_object == nullptr; }\n bool operator != (std::nullptr_t) const { return m_object != nullptr; }\n \n explicit operator bool () const {\n return m_object != nullptr;\n }\n\n /**\n * \\brief Sets pointer without acquiring a reference\n *\n * Must only be use when a reference has been taken via\n * other means.\n * \\param [in] object Object pointer\n */\n void unsafeInsert(T* object) {\n this->decRef();\n m_object = object;\n }\n\n /**\n * \\brief Extracts raw pointer\n *\n * Sets the smart pointer to null without decrementing the\n * reference count. Must only be used when the reference\n * count is decremented in some other way.\n * \\returns Pointer to owned object\n */\n T* unsafeExtract() {\n return std::exchange(m_object, nullptr);\n }\n\n /**\n * \\brief Creates smart pointer without taking reference\n *\n * Must only be used when a refernece has been obtained via other means.\n * \\param [in] object Pointer to object to take ownership of\n */\n static Rc unsafeCreate(T* object) {\n return Rc(object, false);\n }\n\n private:\n\n T* m_object = nullptr;\n\n explicit Rc(T* object, bool)\n : m_object(object) { }\n\n force_inline void incRef() const {\n if (m_object != nullptr)\n m_object->incRef();\n }\n\n force_inline void decRef() const {\n if (m_object != nullptr) {\n if constexpr (std::is_void_vdecRef())>) {\n m_object->decRef();\n } else {\n // Deprecated, objects should manage themselves now.\n if (!m_object->decRef())\n delete m_object;\n }\n }\n }\n\n };\n\n template\n bool operator == (Tx* a, const Rc& b) { return b == a; }\n\n template\n bool operator != (Tx* a, const Rc& b) { return b != a; }\n\n struct RcHash {\n template\n size_t operator () (const Rc& rc) const {\n return reinterpret_cast(rc.ptr()) / sizeof(T);\n }\n };\n\n}\n\ntemplate\nstd::ostream& operator << (std::ostream& os, const dxvk::Rc& rc) {\n return os << rc.ptr();\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_small_vector.h", "#pragma once\n\n#include \n#include \n#include \n#include \n\n#include \"util_bit.h\"\n#include \"util_likely.h\"\n\nnamespace dxvk {\n\n template\n class small_vector {\n using storage = std::aligned_storage_t;\n public:\n\n constexpr static size_t MinCapacity = N;\n\n small_vector() { }\n\n small_vector(size_t size) {\n resize(size);\n }\n\n small_vector(const small_vector& other) {\n reserve(other.m_size);\n\n for (size_t i = 0; i < other.m_size; i++)\n *ptr(i) = *other.ptr(i);\n\n m_size = other.m_size;\n }\n\n small_vector& operator = (const small_vector& other) {\n for (size_t i = 0; i < m_size; i++)\n ptr(i)->~T();\n\n reserve(other.m_size);\n\n for (size_t i = 0; i < other.m_size; i++)\n *ptr(i) = *other.ptr(i);\n\n m_size = other.m_size;\n return *this;\n }\n\n small_vector(small_vector&& other) {\n if (other.m_size <= N) {\n for (size_t i = 0; i < other.m_size; i++)\n new (&u.m_data[i]) T(std::move(*other.ptr(i)));\n } else {\n u.m_ptr = other.u.m_ptr;\n m_capacity = other.m_capacity;\n\n other.u.m_ptr = nullptr;\n other.m_capacity = N;\n }\n\n m_size = other.m_size;\n other.m_size = 0;\n }\n\n small_vector& operator = (small_vector&& other) {\n for (size_t i = 0; i < m_size; i++)\n ptr(i)->~T();\n\n if (m_capacity > N)\n delete[] u.m_ptr;\n\n if (other.m_size <= N) {\n m_capacity = N;\n\n for (size_t i = 0; i < other.m_size; i++)\n new (&u.m_data[i]) T(std::move(*other.ptr(i)));\n } else {\n u.m_ptr = other.u.m_ptr;\n m_capacity = other.m_capacity;\n\n other.u.m_ptr = nullptr;\n other.m_capacity = N;\n }\n\n m_size = other.m_size;\n other.m_size = 0;\n return *this;\n }\n\n ~small_vector() {\n for (size_t i = 0; i < m_size; i++)\n ptr(i)->~T();\n\n if (m_capacity > N)\n delete[] u.m_ptr;\n }\n\n size_t size() const {\n return m_size;\n }\n\n void reserve(size_t n) {\n if (likely(n <= m_capacity))\n return;\n\n n = pick_capacity(n);\n\n storage* data = new storage[n];\n\n for (size_t i = 0; i < m_size; i++) {\n new (&data[i]) T(std::move(*ptr(i)));\n ptr(i)->~T();\n }\n\n if (m_capacity > N)\n delete[] u.m_ptr;\n\n m_capacity = n;\n u.m_ptr = data;\n }\n\n const T* data() const { return ptr(0); }\n T* data() { return ptr(0); }\n\n void resize(size_t n) {\n reserve(n);\n\n for (size_t i = n; i < m_size; i++)\n ptr(i)->~T();\n\n for (size_t i = m_size; i < n; i++)\n new (ptr(i)) T();\n\n m_size = n;\n }\n\n void push_back(const T& object) {\n reserve(m_size + 1);\n new (ptr(m_size++)) T(object);\n }\n\n void push_back(T&& object) {\n reserve(m_size + 1);\n new (ptr(m_size++)) T(std::move(object));\n }\n\n template\n T& emplace_back(Args... args) {\n reserve(m_size + 1);\n return *(new (ptr(m_size++)) T(std::forward(args)...));\n }\n\n void erase(size_t idx) {\n ptr(idx)->~T();\n\n for (size_t i = idx; i < m_size - 1; i++) {\n new (ptr(i)) T(std::move(*ptr(i + 1)));\n ptr(i + 1)->~T();\n }\n }\n\n void pop_back() {\n ptr(--m_size)->~T();\n }\n\n void clear() {\n for (size_t i = 0; i < m_size; i++)\n ptr(i)->~T();\n\n m_size = 0;\n }\n\n bool empty() const {\n return m_size == 0;\n }\n\n T& operator [] (size_t idx) { return *ptr(idx); }\n const T& operator [] (size_t idx) const { return *ptr(idx); }\n\n T& front() { return *ptr(0); }\n const T& front() const { return *ptr(0); }\n\n T& back() { return *ptr(m_size - 1); }\n const T& back() const { return *ptr(m_size - 1); }\n\n private:\n\n size_t m_capacity = N;\n size_t m_size = 0;\n\n union {\n storage* m_ptr;\n storage m_data[N];\n } u;\n\n size_t pick_capacity(size_t n) {\n // Pick next largest power of two for the new capacity\n return size_t(1u) << ((sizeof(n) * 8u) - bit::lzcnt(n - 1));\n }\n\n T* ptr(size_t idx) {\n return m_capacity == N\n ? reinterpret_cast(&u.m_data[idx])\n : reinterpret_cast(&u.m_ptr[idx]);\n }\n\n const T* ptr(size_t idx) const {\n return m_capacity == N\n ? reinterpret_cast(&u.m_data[idx])\n : reinterpret_cast(&u.m_ptr[idx]);\n }\n\n };\n\n}\n"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/include/pe-parse/nt-headers.h", "/*\nThe MIT License (MIT)\n\nCopyright (c) 2013 Andrew Ruef\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n*/\n\n#pragma once\n\n#include \n#include \n#include \n\n// need to pack these structure definitions\n\n// some constant definitions\n// clang-format off\nnamespace peparse {\nconstexpr std::uint32_t RICH_MAGIC_END = 0x68636952;\nconstexpr std::uint32_t RICH_MAGIC_START = 0x536e6144;\nconstexpr std::uint32_t RICH_OFFSET = 0x80;\nconstexpr std::uint16_t MZ_MAGIC = 0x5A4D;\nconstexpr std::uint32_t NT_MAGIC = 0x00004550;\nconstexpr std::uint16_t NUM_DIR_ENTRIES = 16;\nconstexpr std::uint16_t NT_OPTIONAL_32_MAGIC = 0x10B;\nconstexpr std::uint16_t NT_OPTIONAL_64_MAGIC = 0x20B;\nconstexpr std::uint16_t NT_SHORT_NAME_LEN = 8;\nconstexpr std::uint16_t SYMTAB_RECORD_LEN = 18;\n\n#ifndef _PEPARSE_WINDOWS_CONFLICTS\n// Machine Types\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_UNKNOWN = 0x0;\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_ALPHA = 0x184; // Alpha_AXP\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_ALPHA64 = 0x284; // ALPHA64\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_AM33 = 0x1d3; // Matsushita AM33\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_AMD64 = 0x8664; // x64\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_ARM = 0x1c0; // ARM little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_ARM64 = 0xaa64; // ARM64 little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_ARMNT = 0x1c4; // ARM Thumb-2 little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_AXP64 = 0x284; // ALPHA64\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_CEE = 0xc0ee;\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_CEF = 0xcef;\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_EBC = 0xebc; // EFI byte code\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_I386 = 0x14c; // Intel 386 or later processors and compatible processors\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_IA64 = 0x200; // Intel Itanium processor family\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_LOONGARCH32 = 0x6232; // LoongArch 32-bit address space\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_LOONGARCH64 = 0x6264; // LoongArch 64-bit address space\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_M32R = 0x9041; // Mitsubishi M32R little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_MIPS16 = 0x266; // MIPS16\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_MIPSFPU = 0x366; // MIPS with FPU\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_MIPSFPU16 = 0x466; // MIPS16 with FPU\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_POWERPC = 0x1f0; // Power PC little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_POWERPCFP = 0x1f1; // Power PC with floating point support\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_POWERPCBE = 0x1f2; // Power PC big endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_R3000 = 0x162; // MIPS little endian, 0x160 big-endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_R4000 = 0x166; // MIPS little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_R10000 = 0x168; // MIPS little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_RISCV32 = 0x5032; // RISC-V 32-bit address space\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_RISCV64 = 0x5064; // RISC-V 64-bit address space\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_RISCV128 = 0x5128; // RISC-V 128-bit address space\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_SH3 = 0x1a2; // Hitachi SH3\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_SH3DSP = 0x1a3; // Hitachi SH3 DSP\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_SH3E = 0x1a4; // Hitachi SH3E\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_SH4 = 0x1a6; // Hitachi SH4\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_SH5 = 0x1a8; // Hitachi SH5\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_THUMB = 0x1c2; // Thumb\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_TRICORE = 0x520; // Infineon\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_WCEMIPSV2 = 0x169; // MIPS little-endian WCE v2\n\nconstexpr std::uint16_t IMAGE_FILE_RELOCS_STRIPPED = 0x0001;\nconstexpr std::uint16_t IMAGE_FILE_EXECUTABLE_IMAGE = 0x0002;\nconstexpr std::uint16_t IMAGE_FILE_LINE_NUMS_STRIPPED = 0x0004;\nconstexpr std::uint16_t IMAGE_FILE_LOCAL_SYMS_STRIPPED = 0x0008;\nconstexpr std::uint16_t IMAGE_FILE_AGGRESSIVE_WS_TRIM = 0x0010;\nconstexpr std::uint16_t IMAGE_FILE_LARGE_ADDRESS_AWARE = 0x0020;\nconstexpr std::uint16_t IMAGE_FILE_BYTES_REVERSED_LO = 0x0080;\nconstexpr std::uint16_t IMAGE_FILE_32BIT_MACHINE = 0x0100;\nconstexpr std::uint16_t IMAGE_FILE_DEBUG_STRIPPED = 0x0200;\nconstexpr std::uint16_t IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP = 0x0400;\nconstexpr std::uint16_t IMAGE_FILE_NET_RUN_FROM_SWAP = 0x0800;\nconstexpr std::uint16_t IMAGE_FILE_SYSTEM = 0x1000;\nconstexpr std::uint16_t IMAGE_FILE_DLL = 0x2000;\nconstexpr std::uint16_t IMAGE_FILE_UP_SYSTEM_ONLY = 0x4000;\nconstexpr std::uint16_t IMAGE_FILE_BYTES_REVERSED_HI = 0x8000;\n\nconstexpr std::uint32_t IMAGE_SCN_TYPE_NO_PAD = 0x00000008;\nconstexpr std::uint32_t IMAGE_SCN_CNT_CODE = 0x00000020;\nconstexpr std::uint32_t IMAGE_SCN_CNT_INITIALIZED_DATA = 0x00000040;\nconstexpr std::uint32_t IMAGE_SCN_CNT_UNINITIALIZED_DATA = 0x00000080;\nconstexpr std::uint32_t IMAGE_SCN_LNK_OTHER = 0x00000100;\nconstexpr std::uint32_t IMAGE_SCN_LNK_INFO = 0x00000200;\nconstexpr std::uint32_t IMAGE_SCN_LNK_REMOVE = 0x00000800;\nconstexpr std::uint32_t IMAGE_SCN_LNK_COMDAT = 0x00001000;\nconstexpr std::uint32_t IMAGE_SCN_NO_DEFER_SPEC_EXC = 0x00004000;\nconstexpr std::uint32_t IMAGE_SCN_GPREL = 0x00008000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_FARDATA = 0x00008000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_PURGEABLE = 0x00020000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_16BIT = 0x00020000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_LOCKED = 0x00040000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_PRELOAD = 0x00080000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_1BYTES = 0x00100000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_2BYTES = 0x00200000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_4BYTES = 0x00300000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_8BYTES = 0x00400000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_16BYTES = 0x00500000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_32BYTES = 0x00600000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_64BYTES = 0x00700000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_128BYTES = 0x00800000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_256BYTES = 0x00900000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_512BYTES = 0x00A00000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_1024BYTES = 0x00B00000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_2048BYTES = 0x00C00000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_4096BYTES = 0x00D00000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_8192BYTES = 0x00E00000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_MASK = 0x00F00000;\nconstexpr std::uint32_t IMAGE_SCN_LNK_NRELOC_OVFL = 0x01000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_DISCARDABLE = 0x02000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_NOT_CACHED = 0x04000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_NOT_PAGED = 0x08000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_SHARED = 0x10000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_EXECUTE = 0x20000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_READ = 0x40000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_WRITE = 0x80000000;\n\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_UNKNOWN = 0;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_NATIVE = 1;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_WINDOWS_GUI = 2;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_WINDOWS_CUI = 3;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_OS2_CUI = 5;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_POSIX_CUI = 7;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_NATIVE_WINDOWS = 8;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_WINDOWS_CE_GUI = 9;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_EFI_APPLICATION = 10;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER = 11;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER = 12;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_EFI_ROM = 13;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_XBOX = 14;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION = 16;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_XBOX_CODE_CATALOG = 17;\n\n// Symbol section number values\nconstexpr std::int16_t IMAGE_SYM_UNDEFINED = 0;\nconstexpr std::int16_t IMAGE_SYM_ABSOLUTE = -1;\nconstexpr std::int16_t IMAGE_SYM_DEBUG = -2;\n\n// Symbol table types\nconstexpr std::uint16_t IMAGE_SYM_TYPE_NULL = 0;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_VOID = 1;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_CHAR = 2;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_SHORT = 3;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_INT = 4;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_LONG = 5;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_FLOAT = 6;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_DOUBLE = 7;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_STRUCT = 8;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_UNION = 9;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_ENUM = 10;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_MOE = 11;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_BYTE = 12;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_WORD = 13;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_UINT = 14;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_DWORD = 15;\nconstexpr std::uint16_t IMAGE_SYM_DTYPE_NULL = 0;\nconstexpr std::uint16_t IMAGE_SYM_DTYPE_POINTER = 1;\nconstexpr std::uint16_t IMAGE_SYM_DTYPE_FUNCTION = 2;\nconstexpr std::uint16_t IMAGE_SYM_DTYPE_ARRAY = 3;\n\n// Symbol table storage classes\nconstexpr std::uint8_t IMAGE_SYM_CLASS_END_OF_FUNCTION = static_cast(-1);\nconstexpr std::uint8_t IMAGE_SYM_CLASS_NULL = 0;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_AUTOMATIC = 1;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_EXTERNAL = 2;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_STATIC = 3;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_REGISTER = 4;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_EXTERNAL_DEF = 5;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_LABEL = 6;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_UNDEFINED_LABEL = 7;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_MEMBER_OF_STRUCT = 8;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_ARGUMENT = 9;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_STRUCT_TAG = 10;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_MEMBER_OF_UNION = 11;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_UNION_TAG = 12;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_TYPE_DEFINITION = 13;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_UNDEFINED_STATIC = 14;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_ENUM_TAG = 15;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_MEMBER_OF_ENUM = 16;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_REGISTER_PARAM = 17;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_BIT_FIELD = 18;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_BLOCK = 100;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_FUNCTION = 101;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_END_OF_STRUCT = 102;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_FILE = 103;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_SECTION = 104;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_WEAK_EXTERNAL = 105;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_CLR_TOKEN = 107;\n\n// Optional header DLL characteristics\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE = 0x0040;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY = 0x0080;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_NX_COMPAT = 0x0100;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_NO_ISOLATION = 0x0200;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_NO_SEH = 0x0400;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_NO_BIND = 0x0800;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_APPCONTAINER = 0x1000;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_WDM_DRIVER = 0x2000;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_GUARD_CF = 0x4000;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE = 0x8000;\n\n// Extended DLL characteristics\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_EX_CET_COMPAT = 0x0001;\n#endif\n// clang-format on\n\nstruct dos_header {\n std::uint16_t e_magic;\n std::uint16_t e_cblp;\n std::uint16_t e_cp;\n std::uint16_t e_crlc;\n std::uint16_t e_cparhdr;\n std::uint16_t e_minalloc;\n std::uint16_t e_maxalloc;\n std::uint16_t e_ss;\n std::uint16_t e_sp;\n std::uint16_t e_csum;\n std::uint16_t e_ip;\n std::uint16_t e_cs;\n std::uint16_t e_lfarlc;\n std::uint16_t e_ovno;\n std::uint16_t e_res[4];\n std::uint16_t e_oemid;\n std::uint16_t e_oeminfo;\n std::uint16_t e_res2[10];\n std::uint32_t e_lfanew;\n};\n\nstruct file_header {\n std::uint16_t Machine;\n std::uint16_t NumberOfSections;\n std::uint32_t TimeDateStamp;\n std::uint32_t PointerToSymbolTable;\n std::uint32_t NumberOfSymbols;\n std::uint16_t SizeOfOptionalHeader;\n std::uint16_t Characteristics;\n};\n\nstruct data_directory {\n std::uint32_t VirtualAddress;\n std::uint32_t Size;\n};\n\nenum data_directory_kind {\n DIR_EXPORT = 0,\n DIR_IMPORT = 1,\n DIR_RESOURCE = 2,\n DIR_EXCEPTION = 3,\n DIR_SECURITY = 4,\n DIR_BASERELOC = 5,\n DIR_DEBUG = 6,\n DIR_ARCHITECTURE = 7,\n DIR_GLOBALPTR = 8,\n DIR_TLS = 9,\n DIR_LOAD_CONFIG = 10,\n DIR_BOUND_IMPORT = 11,\n DIR_IAT = 12,\n DIR_DELAY_IMPORT = 13,\n DIR_COM_DESCRIPTOR = 14,\n DIR_RESERVED = 15,\n};\n\nstruct optional_header_32 {\n std::uint16_t Magic;\n std::uint8_t MajorLinkerVersion;\n std::uint8_t MinorLinkerVersion;\n std::uint32_t SizeOfCode;\n std::uint32_t SizeOfInitializedData;\n std::uint32_t SizeOfUninitializedData;\n std::uint32_t AddressOfEntryPoint;\n std::uint32_t BaseOfCode;\n std::uint32_t BaseOfData;\n std::uint32_t ImageBase;\n std::uint32_t SectionAlignment;\n std::uint32_t FileAlignment;\n std::uint16_t MajorOperatingSystemVersion;\n std::uint16_t MinorOperatingSystemVersion;\n std::uint16_t MajorImageVersion;\n std::uint16_t MinorImageVersion;\n std::uint16_t MajorSubsystemVersion;\n std::uint16_t MinorSubsystemVersion;\n std::uint32_t Win32VersionValue;\n std::uint32_t SizeOfImage;\n std::uint32_t SizeOfHeaders;\n std::uint32_t CheckSum;\n std::uint16_t Subsystem;\n std::uint16_t DllCharacteristics;\n std::uint32_t SizeOfStackReserve;\n std::uint32_t SizeOfStackCommit;\n std::uint32_t SizeOfHeapReserve;\n std::uint32_t SizeOfHeapCommit;\n std::uint32_t LoaderFlags;\n std::uint32_t NumberOfRvaAndSizes;\n data_directory DataDirectory[NUM_DIR_ENTRIES];\n};\n\n/*\n * This is used for PE32+ binaries. It is similar to optional_header_32\n * except some fields don't exist here (BaseOfData), and others are bigger.\n */\nstruct optional_header_64 {\n std::uint16_t Magic;\n std::uint8_t MajorLinkerVersion;\n std::uint8_t MinorLinkerVersion;\n std::uint32_t SizeOfCode;\n std::uint32_t SizeOfInitializedData;\n std::uint32_t SizeOfUninitializedData;\n std::uint32_t AddressOfEntryPoint;\n std::uint32_t BaseOfCode;\n std::uint64_t ImageBase;\n std::uint32_t SectionAlignment;\n std::uint32_t FileAlignment;\n std::uint16_t MajorOperatingSystemVersion;\n std::uint16_t MinorOperatingSystemVersion;\n std::uint16_t MajorImageVersion;\n std::uint16_t MinorImageVersion;\n std::uint16_t MajorSubsystemVersion;\n std::uint16_t MinorSubsystemVersion;\n std::uint32_t Win32VersionValue;\n std::uint32_t SizeOfImage;\n std::uint32_t SizeOfHeaders;\n std::uint32_t CheckSum;\n std::uint16_t Subsystem;\n std::uint16_t DllCharacteristics;\n std::uint64_t SizeOfStackReserve;\n std::uint64_t SizeOfStackCommit;\n std::uint64_t SizeOfHeapReserve;\n std::uint64_t SizeOfHeapCommit;\n std::uint32_t LoaderFlags;\n std::uint32_t NumberOfRvaAndSizes;\n data_directory DataDirectory[NUM_DIR_ENTRIES];\n};\n\nstruct nt_header_32 {\n std::uint32_t Signature;\n file_header FileHeader;\n optional_header_32 OptionalHeader;\n optional_header_64 OptionalHeader64;\n std::uint16_t OptionalMagic;\n};\n\nstruct rich_entry {\n std::uint16_t ProductId;\n std::uint16_t BuildNumber;\n std::uint32_t Count;\n};\n\nstruct rich_header {\n std::uint32_t StartSignature;\n std::vector Entries;\n std::uint32_t EndSignature;\n std::uint32_t DecryptionKey;\n std::uint32_t Checksum;\n bool isPresent;\n bool isValid;\n};\n\n/*\n * This structure is only used to know how far to move the offset\n * when parsing resources. The data is stored in a resource_dir_entry\n * struct but that also has extra information used in the parsing which\n * causes the size to be inaccurate.\n */\nstruct resource_dir_entry_sz {\n std::uint32_t ID;\n std::uint32_t RVA;\n};\n\nstruct resource_dir_entry {\n inline resource_dir_entry(void) : ID(0), RVA(0), type(0), name(0), lang(0) {\n }\n\n std::uint32_t ID;\n std::uint32_t RVA;\n std::uint32_t type;\n std::uint32_t name;\n std::uint32_t lang;\n std::string type_str;\n std::string name_str;\n std::string lang_str;\n};\n\nstruct resource_dir_table {\n std::uint32_t Characteristics;\n std::uint32_t TimeDateStamp;\n std::uint16_t MajorVersion;\n std::uint16_t MinorVersion;\n std::uint16_t NameEntries;\n std::uint16_t IDEntries;\n};\n\nstruct resource_dat_entry {\n std::uint32_t RVA;\n std::uint32_t size;\n std::uint32_t codepage;\n std::uint32_t reserved;\n};\n\nstruct image_section_header {\n std::uint8_t Name[NT_SHORT_NAME_LEN];\n union {\n std::uint32_t PhysicalAddress;\n std::uint32_t VirtualSize;\n } Misc;\n std::uint32_t VirtualAddress;\n std::uint32_t SizeOfRawData;\n std::uint32_t PointerToRawData;\n std::uint32_t PointerToRelocations;\n std::uint32_t PointerToLinenumbers;\n std::uint16_t NumberOfRelocations;\n std::uint16_t NumberOfLinenumbers;\n std::uint32_t Characteristics;\n};\n\nstruct import_dir_entry {\n std::uint32_t LookupTableRVA;\n std::uint32_t TimeStamp;\n std::uint32_t ForwarderChain;\n std::uint32_t NameRVA;\n std::uint32_t AddressRVA;\n};\n\nstruct export_dir_table {\n std::uint32_t ExportFlags;\n std::uint32_t TimeDateStamp;\n std::uint16_t MajorVersion;\n std::uint16_t MinorVersion;\n std::uint32_t NameRVA;\n std::uint32_t OrdinalBase;\n std::uint32_t AddressTableEntries;\n std::uint32_t NumberOfNamePointers;\n std::uint32_t ExportAddressTableRVA;\n std::uint32_t NamePointerRVA;\n std::uint32_t OrdinalTableRVA;\n};\n\nstruct debug_dir_entry {\n std::uint32_t Characteristics;\n std::uint32_t TimeStamp;\n std::uint16_t MajorVersion;\n std::uint16_t MinorVersion;\n std::uint32_t Type;\n std::uint32_t SizeOfData;\n std::uint32_t AddressOfRawData;\n std::uint32_t PointerToRawData;\n};\n\nenum reloc_type {\n RELOC_ABSOLUTE = 0,\n RELOC_HIGH = 1,\n RELOC_LOW = 2,\n RELOC_HIGHLOW = 3,\n RELOC_HIGHADJ = 4,\n RELOC_MIPS_JMPADDR = 5, // only valid on MIPS\n RELOC_ARM_MOV32 = 5, // only valid on ARM/Thumb\n RELOC_RISCV_HIGH20 = 5, // only valid on RISC-V\n RELOC_RESERVED = 6,\n RELOC_THUMB_MOV32 = 7, // only valid on Thumb\n RELOC_RISCV_LOW32I = 7, // only valid on RISC-V\n RELOC_RISCV_LOW12S = 8, // only valid on RISC-V\n RELOC_LOONGARCH32_MARK_LA = 8, // only valid on LoongArch 32\n RELOC_LOONGARCH64_MARK_LA = 8, // only valid on LoongArch 64\n RELOC_MIPS_JMPADDR16 = 9, // only valid on MIPS\n RELOC_IA64_IMM64 = 9,\n RELOC_DIR64 = 10\n};\n\nstruct reloc_block {\n std::uint32_t PageRVA;\n std::uint32_t BlockSize;\n};\n\nstruct image_load_config_code_integrity {\n std::uint16_t Flags;\n std::uint16_t Catalog;\n std::uint32_t CatalogOffset;\n std::uint32_t Reserved;\n};\n\nstruct image_load_config_32 {\n std::uint32_t Size;\n std::uint32_t TimeDateStamp;\n std::uint16_t MajorVersion;\n std::uint16_t MinorVersion;\n std::uint32_t GlobalFlagsClear;\n std::uint32_t GlobalFlagsSet;\n std::uint32_t CriticalSectionDefaultTimeout;\n std::uint32_t DeCommitFreeBlockThreshold;\n std::uint32_t DeCommitTotalFreeThreshold;\n std::uint32_t LockPrefixTable;\n std::uint32_t MaximumAllocationSize;\n std::uint32_t VirtualMemoryThreshold;\n std::uint32_t ProcessHeapFlags;\n std::uint32_t ProcessAffinityMask;\n std::uint16_t CSDVersion;\n std::uint16_t DependentLoadFlags;\n std::uint32_t EditList;\n std::uint32_t SecurityCookie;\n std::uint32_t SEHandlerTable;\n std::uint32_t SEHandlerCount;\n std::uint32_t GuardCFCheckFunctionPointer;\n std::uint32_t GuardCFDispatchFunctionPointer;\n std::uint32_t GuardCFFunctionTable;\n std::uint32_t GuardCFFunctionCount;\n std::uint32_t GuardFlags;\n image_load_config_code_integrity CodeIntegrity;\n std::uint32_t GuardAddressTakenIatEntryTable;\n std::uint32_t GuardAddressTakenIatEntryCount;\n std::uint32_t GuardLongJumpTargetTable;\n std::uint32_t GuardLongJumpTargetCount;\n std::uint32_t DynamicValueRelocTable;\n std::uint32_t CHPEMetadataPointer;\n std::uint32_t GuardRFFailureRoutine;\n std::uint32_t GuardRFFailureRoutineFunctionPointer;\n std::uint32_t DynamicValueRelocTableOffset;\n std::uint16_t DynamicValueRelocTableSection;\n std::uint16_t Reserved2;\n std::uint32_t GuardRFVerifyStackPointerFunctionPointer;\n std::uint32_t HotPatchTableOffset;\n std::uint32_t Reserved3;\n std::uint32_t EnclaveConfigurationPointer;\n std::uint32_t VolatileMetadataPointer;\n};\n\nstruct image_load_config_64 {\n std::uint32_t Size;\n std::uint32_t TimeDateStamp;\n std::uint16_t MajorVersion;\n std::uint16_t MinorVersion;\n std::uint32_t GlobalFlagsClear;\n std::uint32_t GlobalFlagsSet;\n std::uint32_t CriticalSectionDefaultTimeout;\n std::uint64_t DeCommitFreeBlockThreshold;\n std::uint64_t DeCommitTotalFreeThreshold;\n std::uint64_t LockPrefixTable;\n std::uint64_t MaximumAllocationSize;\n std::uint64_t VirtualMemoryThreshold;\n std::uint64_t ProcessAffinityMask;\n std::uint32_t ProcessHeapFlags;\n std::uint16_t CSDVersion;\n std::uint16_t DependentLoadFlags;\n std::uint64_t EditList;\n std::uint64_t SecurityCookie;\n std::uint64_t SEHandlerTable;\n std::uint64_t SEHandlerCount;\n std::uint64_t GuardCFCheckFunctionPointer;\n std::uint64_t GuardCFDispatchFunctionPointer;\n std::uint64_t GuardCFFunctionTable;\n std::uint64_t GuardCFFunctionCount;\n std::uint32_t GuardFlags;\n image_load_config_code_integrity CodeIntegrity;\n std::uint64_t GuardAddressTakenIatEntryTable;\n std::uint64_t GuardAddressTakenIatEntryCount;\n std::uint64_t GuardLongJumpTargetTable;\n std::uint64_t GuardLongJumpTargetCount;\n std::uint64_t DynamicValueRelocTable;\n std::uint64_t CHPEMetadataPointer;\n std::uint64_t GuardRFFailureRoutine;\n std::uint64_t GuardRFFailureRoutineFunctionPointer;\n std::uint32_t DynamicValueRelocTableOffset;\n std::uint16_t DynamicValueRelocTableSection;\n std::uint16_t Reserved2;\n std::uint64_t GuardRFVerifyStackPointerFunctionPointer;\n std::uint32_t HotPatchTableOffset;\n std::uint32_t Reserved3;\n std::uint64_t EnclaveConfigurationPointer;\n std::uint64_t VolatileMetadataPointer;\n};\n} // namespace peparse\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_reader.h", "class DxbcReader {\n public:\n template\n auto readEnum() {\n using Tx = std::underlying_type_t;\n return static_cast(this->readNum());\n }\n DxbcTag readTag() {\n DxbcTag tag;\n this->read(&tag, 4);\n return tag;\n }\n std::string readString() {\n std::string result;\n \n while (m_data[m_pos] != '\\0')\n result.push_back(m_data[m_pos++]);\n \n m_pos++;\n return result;\n }\n void read(void* dst, size_t n) {\n if (m_pos + n > m_size)\n throw DxvkError(\"DxbcReader::read: Unexpected end of file\");\n std::memcpy(dst, m_data + m_pos, n);\n m_pos += n;\n }\n void skip(size_t n) {\n if (m_pos + n > m_size)\n throw DxvkError(\"DxbcReader::skip: Unexpected end of file\");\n m_pos += n;\n }\n DxbcReader clone(size_t pos) const {\n if (pos > m_size)\n throw DxvkError(\"DxbcReader::clone: Invalid offset\");\n return DxbcReader(m_data + pos, m_size - pos);\n }\n DxbcReader resize(size_t size) const {\n if (size > m_size)\n throw DxvkError(\"DxbcReader::resize: Invalid size\");\n return DxbcReader(m_data, size, m_pos);\n }\n void store(std::ostream&& stream) const {\n stream.write(m_data, m_size);\n }\n private:\n const char* m_data = nullptr;\n size_t m_size = 0;\n size_t m_pos = 0;\n template\n T readNum() {\n T result;\n this->read(&result, sizeof(result));\n return result;\n }\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_enums.h", "#pragma once\n\n#include \"dxbc_include.h\"\n\nnamespace dxvk {\n \n /**\n * \\brief Instruction code listing\n */\n enum class DxbcOpcode : uint32_t {\n Add = 0,\n And = 1,\n Break = 2,\n Breakc = 3,\n Call = 4,\n Callc = 5,\n Case = 6,\n Continue = 7,\n Continuec = 8,\n Cut = 9,\n Default = 10,\n DerivRtx = 11,\n DerivRty = 12,\n Discard = 13,\n Div = 14,\n Dp2 = 15,\n Dp3 = 16,\n Dp4 = 17,\n Else = 18,\n Emit = 19,\n EmitThenCut = 20,\n EndIf = 21,\n EndLoop = 22,\n EndSwitch = 23,\n Eq = 24,\n Exp = 25,\n Frc = 26,\n FtoI = 27,\n FtoU = 28,\n Ge = 29,\n IAdd = 30,\n If = 31,\n IEq = 32,\n IGe = 33,\n ILt = 34,\n IMad = 35,\n IMax = 36,\n IMin = 37,\n IMul = 38,\n INe = 39,\n INeg = 40,\n IShl = 41,\n IShr = 42,\n ItoF = 43,\n Label = 44,\n Ld = 45,\n LdMs = 46,\n Log = 47,\n Loop = 48,\n Lt = 49,\n Mad = 50,\n Min = 51,\n Max = 52,\n CustomData = 53,\n Mov = 54,\n Movc = 55,\n Mul = 56,\n Ne = 57,\n Nop = 58,\n Not = 59,\n Or = 60,\n ResInfo = 61,\n Ret = 62,\n Retc = 63,\n RoundNe = 64,\n RoundNi = 65,\n RoundPi = 66,\n RoundZ = 67,\n Rsq = 68,\n Sample = 69,\n SampleC = 70,\n SampleClz = 71,\n SampleL = 72,\n SampleD = 73,\n SampleB = 74,\n Sqrt = 75,\n Switch = 76,\n SinCos = 77,\n UDiv = 78,\n ULt = 79,\n UGe = 80,\n UMul = 81,\n UMad = 82,\n UMax = 83,\n UMin = 84,\n UShr = 85,\n UtoF = 86,\n Xor = 87,\n DclResource = 88,\n DclConstantBuffer = 89,\n DclSampler = 90,\n DclIndexRange = 91,\n DclGsOutputPrimitiveTopology = 92,\n DclGsInputPrimitive = 93,\n DclMaxOutputVertexCount = 94,\n DclInput = 95,\n DclInputSgv = 96,\n DclInputSiv = 97,\n DclInputPs = 98,\n DclInputPsSgv = 99,\n DclInputPsSiv = 100,\n DclOutput = 101,\n DclOutputSgv = 102,\n DclOutputSiv = 103,\n DclTemps = 104,\n DclIndexableTemp = 105,\n DclGlobalFlags = 106,\n Reserved0 = 107,\n Lod = 108,\n Gather4 = 109,\n SamplePos = 110,\n SampleInfo = 111,\n Reserved1 = 112,\n HsDecls = 113,\n HsControlPointPhase = 114,\n HsForkPhase = 115,\n HsJoinPhase = 116,\n EmitStream = 117,\n CutStream = 118,\n EmitThenCutStream = 119,\n InterfaceCall = 120,\n BufInfo = 121,\n DerivRtxCoarse = 122,\n DerivRtxFine = 123,\n DerivRtyCoarse = 124,\n DerivRtyFine = 125,\n Gather4C = 126,\n Gather4Po = 127,\n Gather4PoC = 128,\n Rcp = 129,\n F32toF16 = 130,\n F16toF32 = 131,\n UAddc = 132,\n USubb = 133,\n CountBits = 134,\n FirstBitHi = 135,\n FirstBitLo = 136,\n FirstBitShi = 137,\n UBfe = 138,\n IBfe = 139,\n Bfi = 140,\n BfRev = 141,\n Swapc = 142,\n DclStream = 143,\n DclFunctionBody = 144,\n DclFunctionTable = 145,\n DclInterface = 146,\n DclInputControlPointCount = 147,\n DclOutputControlPointCount = 148,\n DclTessDomain = 149,\n DclTessPartitioning = 150,\n DclTessOutputPrimitive = 151,\n DclHsMaxTessFactor = 152,\n DclHsForkPhaseInstanceCount = 153,\n DclHsJoinPhaseInstanceCount = 154,\n DclThreadGroup = 155,\n DclUavTyped = 156,\n DclUavRaw = 157,\n DclUavStructured = 158,\n DclThreadGroupSharedMemoryRaw = 159,\n DclThreadGroupSharedMemoryStructured = 160,\n DclResourceRaw = 161,\n DclResourceStructured = 162,\n LdUavTyped = 163,\n StoreUavTyped = 164,\n LdRaw = 165,\n StoreRaw = 166,\n LdStructured = 167,\n StoreStructured = 168,\n AtomicAnd = 169,\n AtomicOr = 170,\n AtomicXor = 171,\n AtomicCmpStore = 172,\n AtomicIAdd = 173,\n AtomicIMax = 174,\n AtomicIMin = 175,\n AtomicUMax = 176,\n AtomicUMin = 177,\n ImmAtomicAlloc = 178,\n ImmAtomicConsume = 179,\n ImmAtomicIAdd = 180,\n ImmAtomicAnd = 181,\n ImmAtomicOr = 182,\n ImmAtomicXor = 183,\n ImmAtomicExch = 184,\n ImmAtomicCmpExch = 185,\n ImmAtomicIMax = 186,\n ImmAtomicIMin = 187,\n ImmAtomicUMax = 188,\n ImmAtomicUMin = 189,\n Sync = 190,\n DAdd = 191,\n DMax = 192,\n DMin = 193,\n DMul = 194,\n DEq = 195,\n DGe = 196,\n DLt = 197,\n DNe = 198,\n DMov = 199,\n DMovc = 200,\n DtoF = 201,\n FtoD = 202,\n EvalSnapped = 203,\n EvalSampleIndex = 204,\n EvalCentroid = 205,\n DclGsInstanceCount = 206,\n Abort = 207,\n DebugBreak = 208,\n ReservedBegin11_1 = 209,\n DDiv = 210,\n DFma = 211,\n DRcp = 212,\n Msad = 213,\n DtoI = 214,\n DtoU = 215,\n ItoD = 216,\n UtoD = 217,\n ReservedBegin11_2 = 218,\n Gather4S = 219,\n Gather4CS = 220,\n Gather4PoS = 221,\n Gather4PoCS = 222,\n LdS = 223,\n LdMsS = 224,\n LdUavTypedS = 225,\n LdRawS = 226,\n LdStructuredS = 227,\n SampleLS = 228,\n SampleClzS = 229,\n SampleClampS = 230,\n SampleBClampS = 231,\n SampleDClampS = 232,\n SampleCClampS = 233,\n CheckAccessFullyMapped = 234,\n };\n \n \n /**\n * \\brief Extended opcode\n */\n enum class DxbcExtOpcode : uint32_t {\n Empty = 0,\n SampleControls = 1,\n ResourceDim = 2,\n ResourceReturnType = 3,\n };\n \n \n /**\n * \\brief Operand type\n * \n * Selects the 'register file' from which\n * to retrieve an operand's value.\n */\n enum class DxbcOperandType : uint32_t {\n Temp = 0,\n Input = 1,\n Output = 2,\n IndexableTemp = 3,\n Imm32 = 4,\n Imm64 = 5,\n Sampler = 6,\n Resource = 7,\n ConstantBuffer = 8,\n ImmediateConstantBuffer = 9,\n Label = 10,\n InputPrimitiveId = 11,\n OutputDepth = 12,\n Null = 13,\n Rasterizer = 14,\n OutputCoverageMask = 15,\n Stream = 16,\n FunctionBody = 17,\n FunctionTable = 18,\n Interface = 19,\n FunctionInput = 20,\n FunctionOutput = 21,\n OutputControlPointId = 22,\n InputForkInstanceId = 23,\n InputJoinInstanceId = 24,\n InputControlPoint = 25,\n OutputControlPoint = 26,\n InputPatchConstant = 27,\n InputDomainPoint = 28,\n ThisPointer = 29,\n UnorderedAccessView = 30,\n ThreadGroupSharedMemory = 31,\n InputThreadId = 32,\n InputThreadGroupId = 33,\n InputThreadIdInGroup = 34,\n InputCoverageMask = 35,\n InputThreadIndexInGroup = 36,\n InputGsInstanceId = 37,\n OutputDepthGe = 38,\n OutputDepthLe = 39,\n CycleCounter = 40,\n OutputStencilRef = 41,\n InputInnerCoverage = 42,\n };\n \n \n /**\n * \\brief Number of components\n * \n * Used by operands to determine whether the\n * operand has one, four or zero components.\n */\n enum class DxbcComponentCount : uint32_t {\n Component0 = 0,\n Component1 = 1,\n Component4 = 2,\n };\n \n \n /**\n * \\brief Component selection mode\n * \n * When an operand has four components, the\n * component selection mode deterines which\n * components are used for the operation.\n */\n enum class DxbcRegMode : uint32_t {\n Mask = 0,\n Swizzle = 1,\n Select1 = 2,\n };\n \n \n /**\n * \\brief Index representation\n * \n * Determines how an operand\n * register index is stored.\n */\n enum class DxbcOperandIndexRepresentation : uint32_t {\n Imm32 = 0,\n Imm64 = 1,\n Relative = 2,\n Imm32Relative = 3,\n Imm64Relative = 4,\n };\n \n \n /**\n * \\brief Extended operand type\n */\n enum class DxbcOperandExt : uint32_t {\n OperandModifier = 1,\n };\n \n \n /**\n * \\brief Resource dimension\n * The type of a resource.\n */\n enum class DxbcResourceDim : uint32_t {\n Unknown = 0,\n Buffer = 1,\n Texture1D = 2,\n Texture2D = 3,\n Texture2DMs = 4,\n Texture3D = 5,\n TextureCube = 6,\n Texture1DArr = 7,\n Texture2DArr = 8,\n Texture2DMsArr = 9,\n TextureCubeArr = 10,\n RawBuffer = 11,\n StructuredBuffer = 12,\n };\n \n \n /**\n * \\brief Resource return type\n * Data type for resource read ops.\n */\n enum class DxbcResourceReturnType : uint32_t {\n Unorm = 1,\n Snorm = 2,\n Sint = 3,\n Uint = 4,\n Float = 5,\n Mixed = 6, /// ?\n Double = 7,\n Continued = 8, /// ?\n Unused = 9, /// ?\n };\n \n \n /**\n * \\brief Register component type\n * Data type of a register component.\n */\n enum class DxbcRegisterComponentType : uint32_t {\n Unknown = 0,\n Uint32 = 1,\n Sint32 = 2,\n Float32 = 3,\n };\n \n \n /**\n * \\brief Instruction return type\n */\n enum class DxbcInstructionReturnType : uint32_t {\n Float = 0,\n Uint = 1,\n };\n \n \n enum class DxbcSystemValue : uint32_t {\n None = 0,\n Position = 1,\n ClipDistance = 2,\n CullDistance = 3,\n RenderTargetId = 4,\n ViewportId = 5,\n VertexId = 6,\n PrimitiveId = 7,\n InstanceId = 8,\n IsFrontFace = 9,\n SampleIndex = 10,\n FinalQuadUeq0EdgeTessFactor = 11,\n FinalQuadVeq0EdgeTessFactor = 12,\n FinalQuadUeq1EdgeTessFactor = 13,\n FinalQuadVeq1EdgeTessFactor = 14,\n FinalQuadUInsideTessFactor = 15,\n FinalQuadVInsideTessFactor = 16,\n FinalTriUeq0EdgeTessFactor = 17,\n FinalTriVeq0EdgeTessFactor = 18,\n FinalTriWeq0EdgeTessFactor = 19,\n FinalTriInsideTessFactor = 20,\n FinalLineDetailTessFactor = 21,\n FinalLineDensityTessFactor = 22,\n Target = 64,\n Depth = 65,\n Coverage = 66,\n DepthGe = 67,\n DepthLe = 68\n };\n \n \n enum class DxbcInterpolationMode : uint32_t {\n Undefined = 0,\n Constant = 1,\n Linear = 2,\n LinearCentroid = 3,\n LinearNoPerspective = 4,\n LinearNoPerspectiveCentroid = 5,\n LinearSample = 6,\n LinearNoPerspectiveSample = 7,\n };\n \n \n enum class DxbcGlobalFlag : uint32_t {\n RefactoringAllowed = 0,\n DoublePrecision = 1,\n EarlyFragmentTests = 2,\n RawStructuredBuffers = 3,\n };\n \n using DxbcGlobalFlags = Flags;\n \n enum class DxbcZeroTest : uint32_t {\n TestZ = 0,\n TestNz = 1,\n };\n \n enum class DxbcResinfoType : uint32_t {\n Float = 0,\n RcpFloat = 1,\n Uint = 2,\n };\n \n enum class DxbcSyncFlag : uint32_t {\n ThreadsInGroup = 0,\n ThreadGroupSharedMemory = 1,\n UavMemoryGroup = 2,\n UavMemoryGlobal = 3,\n };\n \n using DxbcSyncFlags = Flags;\n \n \n /**\n * \\brief Geometry shader input primitive\n */\n enum class DxbcPrimitive : uint32_t {\n Undefined = 0,\n Point = 1,\n Line = 2,\n Triangle = 3,\n LineAdj = 6,\n TriangleAdj = 7,\n Patch1 = 8,\n Patch2 = 9,\n Patch3 = 10,\n Patch4 = 11,\n Patch5 = 12,\n Patch6 = 13,\n Patch7 = 14,\n Patch8 = 15,\n Patch9 = 16,\n Patch10 = 17,\n Patch11 = 18,\n Patch12 = 19,\n Patch13 = 20,\n Patch14 = 21,\n Patch15 = 22,\n Patch16 = 23,\n Patch17 = 24,\n Patch18 = 25,\n Patch19 = 26,\n Patch20 = 27,\n Patch21 = 28,\n Patch22 = 29,\n Patch23 = 30,\n Patch24 = 31,\n Patch25 = 32,\n Patch26 = 33,\n Patch27 = 34,\n Patch28 = 35,\n Patch29 = 36,\n Patch30 = 37,\n Patch31 = 38,\n Patch32 = 39,\n };\n \n \n /**\n * \\brief Geometry shader output topology\n */\n enum class DxbcPrimitiveTopology : uint32_t {\n Undefined = 0,\n PointList = 1,\n LineList = 2,\n LineStrip = 3,\n TriangleList = 4,\n TriangleStrip = 5,\n LineListAdj = 10,\n LineStripAdj = 11,\n TriangleListAdj = 12,\n TriangleStripAdj = 13,\n };\n \n \n /**\n * \\brief Sampler operation mode\n */\n enum class DxbcSamplerMode : uint32_t {\n Default = 0,\n Comparison = 1,\n Mono = 2,\n };\n \n \n /**\n * \\brief Scalar value type\n * \n * Enumerates possible register component\n * types. Scalar types are represented as\n * a one-component vector type.\n */\n enum class DxbcScalarType : uint32_t {\n Uint32 = 0,\n Uint64 = 1,\n Sint32 = 2,\n Sint64 = 3,\n Float32 = 4,\n Float64 = 5,\n Bool = 6,\n };\n \n \n /**\n * \\brief Tessellator domain\n */\n enum class DxbcTessDomain : uint32_t {\n Undefined = 0,\n Isolines = 1,\n Triangles = 2,\n Quads = 3,\n };\n \n /**\n * \\brief Tessellator partitioning\n */\n enum class DxbcTessPartitioning : uint32_t {\n Undefined = 0,\n Integer = 1,\n Pow2 = 2,\n FractOdd = 3,\n FractEven = 4,\n };\n \n /**\n * \\brief UAV definition flags\n */\n enum class DxbcUavFlag : uint32_t {\n GloballyCoherent = 0,\n RasterizerOrdered = 1,\n };\n \n using DxbcUavFlags = Flags;\n \n /**\n * \\brief Tessellator output primitive\n */\n enum class DxbcTessOutputPrimitive : uint32_t {\n Undefined = 0,\n Point = 1,\n Line = 2,\n TriangleCw = 3,\n TriangleCcw = 4,\n };\n \n /**\n * \\brief Custom data class\n * \n * Stores which type of custom data is\n * referenced by the instruction.\n */\n enum class DxbcCustomDataClass : uint32_t {\n Comment = 0,\n DebugInfo = 1,\n Opaque = 2,\n ImmConstBuf = 3,\n };\n \n \n enum class DxbcResourceType : uint32_t {\n Typed = 0,\n Raw = 1,\n Structured = 2,\n };\n\n\n enum class DxbcConstantBufferAccessType : uint32_t {\n StaticallyIndexed = 0,\n DynamicallyIndexed = 1,\n };\n \n}"], ["/lsfg-vk/framegen/src/common/exception.cpp", "#include \"common/exception.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\n\nvulkan_error::vulkan_error(VkResult result, const std::string& message)\n : std::runtime_error(std::format(\"{} (error {})\", message, static_cast(result))),\n result(result) {}\n\nvulkan_error::~vulkan_error() noexcept = default;\n\nrethrowable_error::rethrowable_error(const std::string& message, const std::exception& exe)\n : std::runtime_error(message) {\n this->message = std::format(\"{}\\n- {}\", message, exe.what());\n}\n\nrethrowable_error::~rethrowable_error() noexcept = default;\n"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/src/unicode_winapi.cpp", "/*\nThe MIT License (MIT)\n\nCopyright (c) 2020 Trail of Bits, Inc.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n*/\n\n#include \n#include \n\nnamespace peparse {\nstd::string from_utf16(const UCharString &u) {\n std::string result;\n std::size_t size = WideCharToMultiByte(CP_UTF8,\n 0,\n u.data(),\n static_cast(u.size()),\n nullptr,\n 0,\n nullptr,\n nullptr);\n\n if (size <= 0) {\n return result;\n }\n\n result.reserve(size);\n WideCharToMultiByte(CP_UTF8,\n 0,\n u.data(),\n static_cast(u.size()),\n &result[0],\n static_cast(result.capacity()),\n nullptr,\n nullptr);\n\n return result;\n}\n} // namespace peparse\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/log/log.h", "#pragma once\n\n#include \n#include \n\nnamespace dxvk {\n \n enum class LogLevel : uint32_t {\n Trace = 0,\n Debug = 1,\n Info = 2,\n Warn = 3,\n Error = 4,\n None = 5,\n };\n\n /**\n * \\brief Logger\n * \n * Logger for one DLL. Creates a text file and\n * writes all log messages to that file.\n */\n class Logger {\n \n public:\n \n Logger() {}\n Logger(const std::string& file_name) {}\n ~Logger() {}\n \n static void trace(const std::string& message) {}\n static void debug(const std::string& message) {}\n static void info (const std::string& message) {}\n static void warn (const std::string& message) {}\n static void err (const std::string& message) {}\n static void log (LogLevel level, const std::string& message) {}\n \n static LogLevel logLevel() {\n return LogLevel::Warn;\n }\n\n };\n \n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_flags.h", "#pragma once\n\n#include \n\n#include \"util_bit.h\"\n\nnamespace dxvk {\n \n template\n class Flags {\n \n public:\n \n using IntType = std::underlying_type_t;\n \n Flags() { }\n \n Flags(IntType t)\n : m_bits(t) { }\n \n template\n Flags(T f, Tx... fx) {\n this->set(f, fx...);\n }\n \n template\n void set(Tx... fx) {\n m_bits |= bits(fx...);\n }\n \n void set(Flags flags) {\n m_bits |= flags.m_bits;\n }\n \n template\n void clr(Tx... fx) {\n m_bits &= ~bits(fx...);\n }\n \n void clr(Flags flags) {\n m_bits &= ~flags.m_bits;\n }\n \n template\n bool any(Tx... fx) const {\n return (m_bits & bits(fx...)) != 0;\n }\n \n template\n bool all(Tx... fx) const {\n const IntType mask = bits(fx...);\n return (m_bits & mask) == mask;\n }\n \n bool test(T f) const {\n return this->any(f);\n }\n \n bool isClear() const {\n return m_bits == 0;\n }\n \n void clrAll() {\n m_bits = 0;\n }\n \n IntType raw() const {\n return m_bits;\n }\n \n Flags operator & (const Flags& other) const {\n return Flags(m_bits & other.m_bits);\n }\n \n Flags operator | (const Flags& other) const {\n return Flags(m_bits | other.m_bits);\n }\n \n Flags operator ^ (const Flags& other) const {\n return Flags(m_bits ^ other.m_bits);\n }\n\n bool operator == (const Flags& other) const {\n return m_bits == other.m_bits;\n }\n \n bool operator != (const Flags& other) const {\n return m_bits != other.m_bits;\n }\n \n private:\n \n IntType m_bits = 0;\n \n static IntType bit(T f) {\n return IntType(1) << static_cast(f);\n }\n \n template\n static IntType bits(T f, Tx... fx) {\n return bit(f) | bits(fx...);\n }\n \n static IntType bits() {\n return 0;\n }\n \n };\n \n}"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_tag.h", "#pragma once\n\n#include \"dxbc_include.h\"\n\nnamespace dxvk {\n \n /**\n * \\brief Four-character tag\n * \n * Used to identify chunks in the\n * compiled DXBC file by name.\n */\n class DxbcTag {\n \n public:\n \n DxbcTag() {\n for (size_t i = 0; i < 4; i++)\n m_chars[i] = '\\0';\n }\n \n DxbcTag(const char* tag) {\n for (size_t i = 0; i < 4; i++)\n m_chars[i] = tag[i];\n }\n \n bool operator == (const DxbcTag& other) const {\n bool result = true;\n for (size_t i = 0; i < 4; i++)\n result &= m_chars[i] == other.m_chars[i];\n return result;\n }\n \n bool operator != (const DxbcTag& other) const {\n return !this->operator == (other);\n }\n \n const char* operator & () const { return m_chars; }\n char* operator & () { return m_chars; }\n \n private:\n \n char m_chars[4];\n \n };\n \n}"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_options.h", "#pragma once\n\n#include \n\n#include \"util_flags.h\"\n\nnamespace dxvk {\n\n struct D3D11Options;\n\n enum class DxbcFloatControlFlag : uint32_t {\n DenormFlushToZero32,\n DenormPreserve64,\n PreserveNan32,\n PreserveNan64,\n };\n\n using DxbcFloatControlFlags = Flags;\n\n struct DxbcOptions {\n DxbcOptions() {}\n\n // Clamp oDepth in fragment shaders if the depth\n // clip device feature is not supported\n bool useDepthClipWorkaround = false;\n\n /// Determines whether format qualifiers\n /// on typed UAV loads are required\n bool supportsTypedUavLoadR32 = false;\n\n /// Determines whether raw access chains are supported\n bool supportsRawAccessChains = false;\n\n /// Clear thread-group shared memory to zero\n bool zeroInitWorkgroupMemory = false;\n\n /// Declare vertex positions as invariant\n bool invariantPosition = false;\n\n /// Insert memory barriers after TGSM stoes\n bool forceVolatileTgsmAccess = false;\n\n /// Try to detect hazards in UAV access and insert\n /// barriers when we know control flow is uniform.\n bool forceComputeUavBarriers = false;\n\n /// Replace ld_ms with ld\n bool disableMsaa = false;\n\n /// Force sample rate shading by using sample\n /// interpolation for fragment shader inputs\n bool forceSampleRateShading = false;\n\n // Enable per-sample interlock if supported\n bool enableSampleShadingInterlock = false;\n\n /// Use tightly packed arrays for immediate\n /// constant buffers if possible\n bool supportsTightIcbPacking = false;\n\n /// Whether exporting point size is required\n bool needsPointSizeExport = true;\n\n /// Whether to enable sincos emulation\n bool sincosEmulation = false;\n\n /// Float control flags\n DxbcFloatControlFlags floatControl;\n\n /// Minimum storage buffer alignment\n VkDeviceSize minSsboAlignment = 0;\n };\n \n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_math.h", "#pragma once\n\n#include \n\nnamespace dxvk {\n \n constexpr size_t CACHE_LINE_SIZE = 64;\n constexpr double pi = 3.14159265359;\n\n template\n constexpr T clamp(T n, T lo, T hi) {\n if (n < lo) return lo;\n if (n > hi) return hi;\n return n;\n }\n \n template\n constexpr T align(T what, U to) {\n return (what + to - 1) & ~(to - 1);\n }\n\n template\n constexpr T alignDown(T what, U to) {\n return (what / to) * to;\n }\n\n // Equivalent of std::clamp for use with floating point numbers\n // Handles (-){INFINITY,NAN} cases.\n // Will return min in cases of NAN, etc.\n inline float fclamp(float value, float min, float max) {\n return std::fmin(\n std::fmax(value, min), max);\n }\n\n template\n inline T divCeil(T dividend, T divisor) {\n return (dividend + divisor - 1) / divisor;\n }\n \n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_header.h", "class DxbcHeader {\n public:\n DxbcHeader(DxbcReader& reader) {\n // FourCC at the start of the file, must be 'DXBC'\n DxbcTag fourcc = reader.readTag();\n \n if (fourcc != \"DXBC\")\n throw DxvkError(\"DxbcHeader::DxbcHeader: Invalid fourcc, expected 'DXBC'\");\n \n // Stuff we don't actually need to store\n reader.skip(4 * sizeof(uint32_t)); // Check sum\n reader.skip(1 * sizeof(uint32_t)); // Constant 1\n reader.skip(1 * sizeof(uint32_t)); // Bytecode length\n \n // Number of chunks in the file\n uint32_t chunkCount = reader.readu32();\n \n // Chunk offsets are stored immediately after\n for (uint32_t i = 0; i < chunkCount; i++)\n m_chunkOffsets.push_back(reader.readu32());\n }\n ~DxbcHeader() {\n \n }\n private:\n std::vector m_chunkOffsets;\n};"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/src/unicode_codecvt.cpp", "/*\nThe MIT License (MIT)\n\nCopyright (c) 2019 Trail of Bits, Inc.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n*/\n\n#include \n#include \n#include \n\nnamespace peparse {\n// See\n// https://stackoverflow.com/questions/38688417/utf-conversion-functions-in-c11\nstd::string from_utf16(const UCharString &u) {\n std::wstring_convert, char16_t> convert;\n return convert.to_bytes(u);\n}\n} // namespace peparse\n"], ["/lsfg-vk/thirdparty/dxbc/include/spirv/thirdparty/GLSL.std.450.h", "/*\n** Copyright (c) 2014-2024 The Khronos Group Inc.\n**\n** Permission is hereby granted, free of charge, to any person obtaining a copy\n** of this software and/or associated documentation files (the \"Materials\"),\n** to deal in the Materials without restriction, including without limitation\n** the rights to use, copy, modify, merge, publish, distribute, sublicense,\n** and/or sell copies of the Materials, and to permit persons to whom the\n** Materials are furnished to do so, subject to the following conditions:\n**\n** The above copyright notice and this permission notice shall be included in\n** all copies or substantial portions of the Materials.\n**\n** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS\n** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND\n** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ \n**\n** THE MATERIALS ARE PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS\n** IN THE MATERIALS.\n*/\n\n#ifndef GLSLstd450_H\n#define GLSLstd450_H\n\nstatic const int GLSLstd450Version = 100;\nstatic const int GLSLstd450Revision = 3;\n\nenum GLSLstd450 {\n GLSLstd450Bad = 0, // Don't use\n\n GLSLstd450Round = 1,\n GLSLstd450RoundEven = 2,\n GLSLstd450Trunc = 3,\n GLSLstd450FAbs = 4,\n GLSLstd450SAbs = 5,\n GLSLstd450FSign = 6,\n GLSLstd450SSign = 7,\n GLSLstd450Floor = 8,\n GLSLstd450Ceil = 9,\n GLSLstd450Fract = 10,\n\n GLSLstd450Radians = 11,\n GLSLstd450Degrees = 12,\n GLSLstd450Sin = 13,\n GLSLstd450Cos = 14,\n GLSLstd450Tan = 15,\n GLSLstd450Asin = 16,\n GLSLstd450Acos = 17,\n GLSLstd450Atan = 18,\n GLSLstd450Sinh = 19,\n GLSLstd450Cosh = 20,\n GLSLstd450Tanh = 21,\n GLSLstd450Asinh = 22,\n GLSLstd450Acosh = 23,\n GLSLstd450Atanh = 24,\n GLSLstd450Atan2 = 25,\n\n GLSLstd450Pow = 26,\n GLSLstd450Exp = 27,\n GLSLstd450Log = 28,\n GLSLstd450Exp2 = 29,\n GLSLstd450Log2 = 30,\n GLSLstd450Sqrt = 31,\n GLSLstd450InverseSqrt = 32,\n\n GLSLstd450Determinant = 33,\n GLSLstd450MatrixInverse = 34,\n\n GLSLstd450Modf = 35, // second operand needs an OpVariable to write to\n GLSLstd450ModfStruct = 36, // no OpVariable operand\n GLSLstd450FMin = 37,\n GLSLstd450UMin = 38,\n GLSLstd450SMin = 39,\n GLSLstd450FMax = 40,\n GLSLstd450UMax = 41,\n GLSLstd450SMax = 42,\n GLSLstd450FClamp = 43,\n GLSLstd450UClamp = 44,\n GLSLstd450SClamp = 45,\n GLSLstd450FMix = 46,\n GLSLstd450IMix = 47, // Reserved\n GLSLstd450Step = 48,\n GLSLstd450SmoothStep = 49,\n\n GLSLstd450Fma = 50,\n GLSLstd450Frexp = 51, // second operand needs an OpVariable to write to\n GLSLstd450FrexpStruct = 52, // no OpVariable operand\n GLSLstd450Ldexp = 53,\n\n GLSLstd450PackSnorm4x8 = 54,\n GLSLstd450PackUnorm4x8 = 55,\n GLSLstd450PackSnorm2x16 = 56,\n GLSLstd450PackUnorm2x16 = 57,\n GLSLstd450PackHalf2x16 = 58,\n GLSLstd450PackDouble2x32 = 59,\n GLSLstd450UnpackSnorm2x16 = 60,\n GLSLstd450UnpackUnorm2x16 = 61,\n GLSLstd450UnpackHalf2x16 = 62,\n GLSLstd450UnpackSnorm4x8 = 63,\n GLSLstd450UnpackUnorm4x8 = 64,\n GLSLstd450UnpackDouble2x32 = 65,\n\n GLSLstd450Length = 66,\n GLSLstd450Distance = 67,\n GLSLstd450Cross = 68,\n GLSLstd450Normalize = 69,\n GLSLstd450FaceForward = 70,\n GLSLstd450Reflect = 71,\n GLSLstd450Refract = 72,\n\n GLSLstd450FindILsb = 73,\n GLSLstd450FindSMsb = 74,\n GLSLstd450FindUMsb = 75,\n\n GLSLstd450InterpolateAtCentroid = 76,\n GLSLstd450InterpolateAtSample = 77,\n GLSLstd450InterpolateAtOffset = 78,\n\n GLSLstd450NMin = 79,\n GLSLstd450NMax = 80,\n GLSLstd450NClamp = 81,\n\n GLSLstd450Count\n};\n\n#endif // #ifndef GLSLstd450_H\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_common.h", "class DxbcProgramType {\n public:\n VkShaderStageFlagBits shaderStage() const {\n switch (m_type) {\n case DxbcProgramType::PixelShader : return VK_SHADER_STAGE_FRAGMENT_BIT;\n case DxbcProgramType::VertexShader : return VK_SHADER_STAGE_VERTEX_BIT;\n case DxbcProgramType::GeometryShader : return VK_SHADER_STAGE_GEOMETRY_BIT;\n case DxbcProgramType::HullShader : return VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;\n case DxbcProgramType::DomainShader : return VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;\n case DxbcProgramType::ComputeShader : return VK_SHADER_STAGE_COMPUTE_BIT;\n default: throw DxvkError(\"DxbcProgramInfo::shaderStage: Unsupported program type\");\n }\n }\n spv::ExecutionModel executionModel() const {\n switch (m_type) {\n case DxbcProgramType::PixelShader : return spv::ExecutionModelFragment;\n case DxbcProgramType::VertexShader : return spv::ExecutionModelVertex;\n case DxbcProgramType::GeometryShader : return spv::ExecutionModelGeometry;\n case DxbcProgramType::HullShader : return spv::ExecutionModelTessellationControl;\n case DxbcProgramType::DomainShader : return spv::ExecutionModelTessellationEvaluation;\n case DxbcProgramType::ComputeShader : return spv::ExecutionModelGLCompute;\n default: throw DxvkError(\"DxbcProgramInfo::executionModel: Unsupported program type\");\n }\n }\n private:\n DxbcProgramType m_type = DxbcProgramType::PixelShader;\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_chunk_shex.h", "class DxbcShex {\n public:\n DxbcShex(DxbcReader reader) {\n // The shader version and type are stored in a 32-bit unit,\n // where the first byte contains the major and minor version\n // numbers, and the high word contains the program type.\n reader.skip(2);\n auto pType = reader.readEnum();\n m_programInfo = DxbcProgramInfo(pType);\n \n // Read the actual shader code as an array of DWORDs.\n auto codeLength = reader.readu32() - 2;\n m_code.resize(codeLength);\n reader.read(m_code.data(), codeLength * sizeof(uint32_t));\n }\n ~DxbcShex() {\n \n }\n private:\n DxbcProgramInfo m_programInfo;\n std::vector m_code;\n};"], ["/lsfg-vk/thirdparty/dxbc/include/util/log/log_debug.h", "#pragma once\n\n#include \n\n#include \"log/log.h\"\n\n#ifdef _MSC_VER\n#define METHOD_NAME __FUNCSIG__\n#else\n#define METHOD_NAME __PRETTY_FUNCTION__\n#endif\n\n#define TRACE_ENABLED\n\n#ifdef TRACE_ENABLED\n#define TRACE(...) \\\n do { dxvk::debug::trace(METHOD_NAME, ##__VA_ARGS__); } while (0)\n#else\n#define TRACE(...) \\\n do { } while (0)\n#endif\n\nnamespace dxvk::debug {\n \n std::string methodName(const std::string& prettyName);\n \n inline void traceArgs(std::stringstream& stream) { }\n \n template\n void traceArgs(std::stringstream& stream, const Arg1& arg1) {\n stream << arg1;\n }\n \n template\n void traceArgs(std::stringstream& stream, const Arg1& arg1, const Arg2& arg2, const Args&... args) {\n stream << arg1 << \",\";\n traceArgs(stream, arg2, args...);\n }\n \n template\n void trace(const std::string& funcName, const Args&... args) {\n std::stringstream stream;\n stream << methodName(funcName) << \"(\";\n traceArgs(stream, args...);\n stream << \")\";\n Logger::trace(stream.str());\n }\n \n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxvk/dxvk_limits.h", "#pragma once\n\n#include \n\nnamespace dxvk {\n\n enum DxvkLimits : size_t {\n MaxNumRenderTargets = 8,\n MaxNumVertexAttributes = 32,\n MaxNumVertexBindings = 32,\n MaxNumXfbBuffers = 4,\n MaxNumXfbStreams = 4,\n MaxNumViewports = 16,\n MaxNumResourceSlots = 1216,\n MaxNumQueuedCommandBuffers = 32,\n MaxNumQueryCountPerPool = 128,\n MaxNumSpecConstants = 12,\n MaxUniformBufferSize = 65536,\n MaxVertexBindingStride = 2048,\n MaxPushConstantSize = 128,\n };\n\n}\n"], ["/lsfg-vk/thirdparty/dxbc/src/util/util_log.cpp", "#include \"log/log_debug.h\"\n\nnamespace dxvk::debug {\n \n std::string methodName(const std::string& prettyName) {\n size_t end = prettyName.find(\"(\");\n size_t begin = prettyName.substr(0, end).rfind(\" \") + 1;\n return prettyName.substr(begin,end - begin);\n }\n \n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/rc/util_rc.h", "#pragma once\n\n#include \n\n#include \"../util_likely.h\"\n\nnamespace dxvk {\n \n /**\n * \\brief Reference-counted object\n */\n class RcObject {\n \n public:\n \n /**\n * \\brief Increments reference count\n * \\returns New reference count\n */\n force_inline uint32_t incRef() {\n return ++m_refCount;\n }\n \n /**\n * \\brief Decrements reference count\n * \\returns New reference count\n */\n force_inline uint32_t decRef() {\n return --m_refCount;\n }\n \n private:\n \n std::atomic m_refCount = { 0u };\n \n };\n \n}"], ["/lsfg-vk/thirdparty/toml11/src/parser.cpp", "#include \n#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n\nnamespace toml\n{\n\ntemplate result, std::vector> try_parse(std::vector, std::string, spec);\ntemplate result, std::vector> try_parse(std::istream&, std::string, spec);\ntemplate result, std::vector> try_parse(std::string, spec);\ntemplate result, std::vector> try_parse(FILE*, std::string, spec);\ntemplate result, std::vector> try_parse_str(std::string, spec, cxx::source_location);\n\ntemplate basic_value parse(std::vector, std::string, spec);\ntemplate basic_value parse(std::istream&, std::string, spec);\ntemplate basic_value parse(std::string, spec);\ntemplate basic_value parse(FILE*, std::string, spec);\ntemplate basic_value parse_str(std::string, spec, cxx::source_location);\n\ntemplate result, std::vector> try_parse(std::vector, std::string, spec);\ntemplate result, std::vector> try_parse(std::istream&, std::string, spec);\ntemplate result, std::vector> try_parse(std::string, spec);\ntemplate result, std::vector> try_parse(FILE*, std::string, spec);\ntemplate result, std::vector> try_parse_str(std::string, spec, cxx::source_location);\n\ntemplate basic_value parse(std::vector, std::string, spec);\ntemplate basic_value parse(std::istream&, std::string, spec);\ntemplate basic_value parse(std::string, spec);\ntemplate basic_value parse(FILE*, std::string, spec);\ntemplate basic_value parse_str(std::string, spec, cxx::source_location);\n\n#if defined(TOML11_HAS_FILESYSTEM)\ntemplate cxx::enable_if_t::value, result, std::vector>> try_parse(const std::filesystem::path&, spec);\ntemplate cxx::enable_if_t::value, result, std::vector>> try_parse(const std::filesystem::path&, spec);\ntemplate cxx::enable_if_t::value, basic_value > parse (const std::filesystem::path&, spec);\ntemplate cxx::enable_if_t::value, basic_value > parse (const std::filesystem::path&, spec);\n#endif // filesystem\n\n} // toml\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxvk/dxvk_hash.h", "#pragma once\n\n#include \n\nnamespace dxvk {\n\n struct DxvkEq {\n template\n size_t operator () (const T& a, const T& b) const {\n return a.eq(b);\n }\n };\n\n struct DxvkHash {\n template\n size_t operator () (const T& object) const {\n return object.hash();\n }\n };\n\n class DxvkHashState {\n\n public:\n\n void add(size_t hash) {\n m_value ^= hash + 0x9e3779b9\n + (m_value << 6)\n + (m_value >> 2);\n }\n\n operator size_t () const {\n return m_value;\n }\n\n private:\n\n size_t m_value = 0;\n\n };\n\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_modinfo.h", "#pragma once\n\n#include \"dxbc_options.h\"\n\nnamespace dxvk {\n\n /**\n * \\brief Tessellation info\n * \n * Stores the maximum tessellation factor\n * to export from tessellation shaders.\n */\n struct DxbcTessInfo {\n float maxTessFactor;\n };\n\n /**\n * \\brief Xfb capture entry\n * \n * Stores an output variable to capture,\n * as well as the buffer to write it to.\n */\n struct DxbcXfbEntry {\n const char* semanticName;\n uint32_t semanticIndex;\n uint32_t componentIndex;\n uint32_t componentCount;\n uint32_t streamId;\n uint32_t bufferId;\n uint32_t offset;\n };\n\n /**\n * \\brief Xfb info\n * \n * Stores capture entries and output buffer\n * strides. This structure must only be\n * defined if \\c entryCount is non-zero.\n */\n struct DxbcXfbInfo {\n uint32_t entryCount;\n DxbcXfbEntry entries[128];\n uint32_t strides[4];\n int32_t rasterizedStream;\n };\n\n /**\n * \\brief Shader module info\n * \n * Stores information which may affect shader compilation.\n * This data can be supplied by the client API implementation.\n */\n struct DxbcModuleInfo {\n DxbcOptions options;\n DxbcTessInfo* tess;\n DxbcXfbInfo* xfb;\n };\n\n}"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/include/pe-parse/to_string.h", "#pragma once\n\n#include \n#include \n\n#if defined(_MSC_VER)\ntypedef std::basic_string UCharString;\n#else\ntypedef std::u16string UCharString;\n#endif\n\nnamespace peparse {\ntemplate \nstatic std::string to_string(T t, std::ios_base &(*f)(std::ios_base &) ) {\n std::ostringstream oss;\n oss << f << t;\n return oss.str();\n}\n\nstd::string from_utf16(const UCharString &u);\n} // namespace peparse\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_error.h", "#pragma once\n\n#include \n\nnamespace dxvk {\n \n /**\n * \\brief DXVK error\n * \n * A generic exception class that stores a\n * message. Exceptions should be logged.\n */\n class DxvkError {\n \n public:\n \n DxvkError() { }\n DxvkError(std::string&& message)\n : m_message(std::move(message)) { }\n \n const std::string& message() const {\n return m_message;\n }\n \n private:\n \n std::string m_message;\n \n };\n \n}"], ["/lsfg-vk/thirdparty/toml11/src/serializer.cpp", "#include \n#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n\nnamespace toml\n{\nstruct type_config;\nstruct ordered_type_config;\n\ntemplate typename basic_value::string_type\nformat(const basic_value&, const spec);\n\ntemplate typename basic_value::string_type\nformat(const typename basic_value::key_type& k,\n const basic_value& v, const spec);\n\ntemplate typename basic_value::string_type\nformat(const std::vector::key_type>& ks,\n const basic_value& v, const spec s);\n\ntemplate typename basic_value::string_type\nformat(const basic_value&, const spec);\n\ntemplate typename basic_value::string_type\nformat(const typename basic_value::key_type& k,\n const basic_value& v, const spec);\n\ntemplate typename basic_value::string_type\nformat(const std::vector::key_type>& ks,\n const basic_value& v, const spec s);\n\nnamespace detail\n{\ntemplate class serializer<::toml::type_config>;\ntemplate class serializer<::toml::ordered_type_config>;\n} // detail\n} // toml\n"], ["/lsfg-vk/thirdparty/toml11/src/skip.cpp", "#include \n#include \n#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n\nnamespace toml\n{\n\nnamespace detail\n{\ntemplate bool skip_whitespace (location& loc, const context&);\ntemplate bool skip_empty_lines (location& loc, const context&);\ntemplate void skip_comment_block (location& loc, const context&);\ntemplate void skip_empty_or_comment_lines(location& loc, const context&);\ntemplate void skip_string_like (location& loc, const context&);\ntemplate void skip_array_like (location& loc, const context&);\ntemplate void skip_inline_table_like (location& loc, const context&);\ntemplate void skip_value (location& loc, const context&);\ntemplate void skip_key_value_pair (location& loc, const context&);\ntemplate void skip_until_next_table (location& loc, const context&);\n\ntemplate bool skip_whitespace (location& loc, const context&);\ntemplate bool skip_empty_lines (location& loc, const context&);\ntemplate void skip_comment_block (location& loc, const context&);\ntemplate void skip_empty_or_comment_lines(location& loc, const context&);\ntemplate void skip_string_like (location& loc, const context&);\ntemplate void skip_array_like (location& loc, const context&);\ntemplate void skip_inline_table_like (location& loc, const context&);\ntemplate void skip_value (location& loc, const context&);\ntemplate void skip_key_value_pair (location& loc, const context&);\ntemplate void skip_until_next_table (location& loc, const context&);\n\n} // detail\n} // toml\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_enum.h", "#pragma once\n\n#define ENUM_NAME(name) \\\n case name: return os << #name\n\n#define ENUM_DEFAULT(name) \\\n default: return os << static_cast(e)\n"], ["/lsfg-vk/thirdparty/toml11/src/context.cpp", "#include \n#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n\nnamespace toml\n{\nnamespace detail\n{\ntemplate class context<::toml::type_config>;\ntemplate class context<::toml::ordered_type_config>;\n} // detail\n} // toml\n"], ["/lsfg-vk/thirdparty/dxbc/include/spirv/spirv_include.h", "#pragma once\n\n#include \n#include \n\n#include \"log/log.h\"\n#include \"log/log_debug.h\"\n\n#include \"util_error.h\"\n#include \"util_flags.h\"\n#include \"util_likely.h\"\n#include \"util_string.h\"\n\n#include \"rc/util_rc.h\"\n#include \"rc/util_rc_ptr.h\"\n"], ["/lsfg-vk/thirdparty/toml11/src/types.cpp", "#include \n#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n\nnamespace toml\n{\ntemplate class basic_value;\ntemplate class basic_value;\n} // toml\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_likely.h", "#pragma once\n\n#ifdef __GNUC__\n#define likely(x) __builtin_expect(bool(x),1)\n#define unlikely(x) __builtin_expect(bool(x),0)\n#define force_inline inline __attribute__((always_inline))\n#else\n#define likely(x) (x)\n#define unlikely(x) (x)\n#define force_inline inline\n#endif\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_include.h", "#pragma once\n\n#include \n#include \n\n#include \"dxvk_limits.h\"\n#include \"dxvk_pipelayout.h\"\n\n#include \"log/log.h\"\n#include \"log/log_debug.h\"\n\n#include \"rc/util_rc.h\"\n#include \"rc/util_rc_ptr.h\"\n\n#include \"util_bit.h\"\n#include \"util_enum.h\"\n#include \"util_error.h\"\n#include \"util_string.h\"\n#include \"util_flags.h\"\n#include \"util_small_vector.h\"\n"], ["/lsfg-vk/thirdparty/toml11/src/value_t.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/region.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/color.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/comments.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/format.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/location.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/literal.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/datetime.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/syntax.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/source_location.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/scanner.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/error_info.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/include/pe-parse/parse.h", "class parse {\n};"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_string.h", "class util_string {\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_util.h", "class dxbc_util {\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_names.h", "class dxbc_names {\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_defs.h", "class DxbcOperandKind {\n};"]], "task_instance_info": {"created_time": "2025-08-20 20:01:24", "created_task_model": "DeepSeek-R1", "function_skeleton": "std::vector Utils::addExtensions(const char* const* extensions, size_t count,\n const std::vector& requiredExtensions) {}", "code_description": "This code segment functions to: Combine existing extension lists with required entries while preventing duplicate elements through conditional checks and merging operations."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```cpp\n[TASK_BEGIN]\nstd::vector Utils::addExtensions(const char* const* extensions, size_t count,\n const std::vector& requiredExtensions) {\n std::vector result;\n \n // Add existing extensions\n for (size_t i = 0; i < count; ++i) {\n result.push_back(extensions[i]);\n }\n\n // Add required extensions that aren't already present\n for (const auto& reqExt : requiredExtensions) {\n bool exists = false;\n for (size_t i = 0; i < count; ++i) {\n if (strcmp(reqExt, extensions[i]) == 0) {\n exists = true;\n break;\n }\n }\n if (!exists) {\n result.push_back(reqExt);\n }\n }\n\n return result;\n}\n[TASK_END]\n```", "inference_time": "2025-08-20 20-02-29"}, "editdistance_info": {"edit_distance": 44.2308, "calculate_time": "2025-08-20 20:02:29", "true_code_clean": "std::vector Utils::addExtensions(const char* const* extensions, size_t count,\n const std::vector& requiredExtensions) {\n std::vector ext(count);\n std::copy_n(extensions, count, ext.data());\n for (const auto& e : requiredExtensions) {\n auto it = std::ranges::find_if(ext,\n [e](const char* extName) {\n return std::string(extName) == std::string(e);\n });\n if (it == ext.end())\n ext.push_back(e);\n }\n return ext;\n}", "predict_code_clean": "std::vector Utils::addExtensions(const char* const* extensions, size_t count,\n const std::vector& requiredExtensions) {\n std::vector result;\n for (size_t i = 0; i < count; ++i) {\n result.push_back(extensions[i]);\n }\n for (const auto& reqExt : requiredExtensions) {\n bool exists = false;\n for (size_t i = 0; i < count; ++i) {\n if (strcmp(reqExt, extensions[i]) == 0) {\n exists = true;\n break;\n }\n }\n if (!exists) {\n result.push_back(reqExt);\n }\n }\n return result;\n}"}} {"repo_name": "lsfg-vk", "file_name": "/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_decoder.h", "inference_info": {"prefix_code": "class DxbcRegModifier {\n public:\n private:\n uint8_t m_mask = 0;\n public:\n private:\n uint8_t m_mask = 0;\n public:\n private:\n uint32_t m_bits;\n public:\n const uint32_t* ptrAt(uint32_t id) const;\n uint32_t at(uint32_t id) const {\n if (m_ptr + id >= m_end)\n throw DxvkError(\"DxbcCodeSlice: End of stream\");\n return m_ptr[id];\n }\n uint32_t read() {\n if (m_ptr >= m_end)\n throw DxvkError(\"DxbcCodeSlice: End of stream\");\n return *(m_ptr++);\n }\n DxbcCodeSlice take(uint32_t n) const {\n if (m_ptr + n > m_end)\n throw DxvkError(\"DxbcCodeSlice: End of stream\");\n return DxbcCodeSlice(m_ptr, m_ptr + n);\n }\n DxbcCodeSlice skip(uint32_t n) const {\n if (m_ptr + n > m_end)\n throw DxvkError(\"DxbcCodeSlice: End of stream\");\n return DxbcCodeSlice(m_ptr + n, m_end);\n }\n private:\n const uint32_t* m_ptr = nullptr;\n const uint32_t* m_end = nullptr;\n public:\n void decodeInstruction(DxbcCodeSlice& code) {\n const uint32_t token0 = code.at(0);\n \n // Initialize the instruction structure. Some of these values\n // may not get written otherwise while decoding the instruction.\n m_instruction.op = static_cast(bit::extract(token0, 0, 10));\n m_instruction.opClass = DxbcInstClass::Undefined;\n m_instruction.sampleControls = { 0, 0, 0 };\n m_instruction.dstCount = 0;\n m_instruction.srcCount = 0;\n m_instruction.immCount = 0;\n m_instruction.dst = m_dstOperands.data();\n m_instruction.src = m_srcOperands.data();\n m_instruction.imm = m_immOperands.data();\n m_instruction.customDataType = DxbcCustomDataClass::Comment;\n m_instruction.customDataSize = 0;\n m_instruction.customData = nullptr;\n \n // Reset the index pointer, which may still contain\n // a non-zero value from the previous iteration\n m_indexId = 0;\n \n // Instruction length, in DWORDs. This includes the token\n // itself and any other prefix that an instruction may have.\n uint32_t length = 0;\n \n if (m_instruction.op == DxbcOpcode::CustomData) {\n length = code.at(1);\n this->decodeCustomData(code.take(length));\n } else {\n length = bit::extract(token0, 24, 30);\n this->decodeOperation(code.take(length));\n }\n \n // Advance the caller's slice to the next token so that\n // they can make consecutive calls to decodeInstruction()\n code = code.skip(length);\n }\n private:\n DxbcShaderInstruction m_instruction;\n std::array m_dstOperands;\n std::array m_srcOperands;\n std::array m_immOperands;\n std::array m_indices;\n uint32_t m_indexId = 0;\n void decodeCustomData(DxbcCodeSlice code) {\n const uint32_t blockLength = code.at(1);\n \n if (blockLength < 2) {\n Logger::err(\"DxbcDecodeContext: Invalid custom data block\");\n return;\n }\n \n // Custom data blocks have their own instruction class\n m_instruction.op = DxbcOpcode::CustomData;\n m_instruction.opClass = DxbcInstClass::CustomData;\n \n // We'll point into the code buffer rather than making a copy\n m_instruction.customDataType = static_cast(\n bit::extract(code.at(0), 11, 31));\n m_instruction.customDataSize = blockLength - 2;\n m_instruction.customData = code.ptrAt(2);\n }\n void decodeOperation(DxbcCodeSlice code) {\n uint32_t token = code.read();\n \n // Result modifiers, which are applied to common ALU ops\n m_instruction.modifiers.saturate = !!bit::extract(token, 13, 13);\n m_instruction.modifiers.precise = !!bit::extract(token, 19, 22);\n \n // Opcode controls. It will depend on the\n // opcode itself which ones are valid.\n m_instruction.controls = DxbcShaderOpcodeControls(token);\n \n // Process extended opcode tokens\n while (bit::extract(token, 31, 31)) {\n token = code.read();\n \n const DxbcExtOpcode extOpcode\n = static_cast(bit::extract(token, 0, 5));\n \n switch (extOpcode) {\n case DxbcExtOpcode::SampleControls: {\n struct {\n int u : 4;\n int v : 4;\n int w : 4;\n } aoffimmi;\n \n aoffimmi.u = bit::extract(token, 9, 12);\n aoffimmi.v = bit::extract(token, 13, 16);\n aoffimmi.w = bit::extract(token, 17, 20);\n \n // Four-bit signed numbers, sign-extend them\n m_instruction.sampleControls.u = aoffimmi.u;\n m_instruction.sampleControls.v = aoffimmi.v;\n m_instruction.sampleControls.w = aoffimmi.w;\n } break;\n \n case DxbcExtOpcode::ResourceDim:\n case DxbcExtOpcode::ResourceReturnType:\n break; // part of resource description\n \n default:\n Logger::warn(str::format(\n \"DxbcDecodeContext: Unhandled extended opcode: \",\n extOpcode));\n }\n }\n \n // Retrieve the instruction format in order to parse the\n // operands. Doing this mostly automatically means that\n // the compiler can rely on the operands being valid.\n const DxbcInstFormat format = dxbcInstructionFormat(m_instruction.op);\n m_instruction.opClass = format.instructionClass;\n \n for (uint32_t i = 0; i < format.operandCount; i++)\n this->decodeOperand(code, format.operands[i]);\n }\n void decodeComponentSelection(DxbcRegister& reg, uint32_t token) {\n // Pick the correct component selection mode based on the\n // component count. We'll simplify this here so that the\n // compiler can assume that everything is a 4D vector.\n reg.componentCount = static_cast(bit::extract(token, 0, 1));\n \n switch (reg.componentCount) {\n // No components - used for samplers etc.\n case DxbcComponentCount::Component0:\n reg.mask = DxbcRegMask(false, false, false, false);\n reg.swizzle = DxbcRegSwizzle(0, 0, 0, 0);\n break;\n \n // One component - used for immediates\n // and a few built-in registers.\n case DxbcComponentCount::Component1:\n reg.mask = DxbcRegMask(true, false, false, false);\n reg.swizzle = DxbcRegSwizzle(0, 0, 0, 0);\n break;\n \n // Four components - everything else. This requires us\n // to actually parse the component selection mode.\n case DxbcComponentCount::Component4: {\n const DxbcRegMode componentMode =\n static_cast(bit::extract(token, 2, 3));\n \n switch (componentMode) {\n // Write mask for destination operands\n case DxbcRegMode::Mask:\n reg.mask = bit::extract(token, 4, 7);\n reg.swizzle = DxbcRegSwizzle(0, 1, 2, 3);\n break;\n \n // Swizzle for source operands (including resources)\n case DxbcRegMode::Swizzle:\n reg.mask = DxbcRegMask(true, true, true, true);\n reg.swizzle = DxbcRegSwizzle(\n bit::extract(token, 4, 5),\n bit::extract(token, 6, 7),\n bit::extract(token, 8, 9),\n bit::extract(token, 10, 11));\n break;\n \n // Selection of one component. We can generate both a\n // mask and a swizzle for this so that the compiler\n // won't have to deal with this case specifically.\n case DxbcRegMode::Select1: {\n const uint32_t n = bit::extract(token, 4, 5);\n reg.mask = DxbcRegMask(n == 0, n == 1, n == 2, n == 3);\n reg.swizzle = DxbcRegSwizzle(n, n, n, n);\n } break;\n \n default:\n Logger::warn(\"DxbcDecodeContext: Invalid component selection mode\");\n }\n } break;\n \n default:\n Logger::warn(\"DxbcDecodeContext: Invalid component count\");\n }\n }\n void decodeOperandExtensions(DxbcCodeSlice& code, DxbcRegister& reg, uint32_t token) {\n while (bit::extract(token, 31, 31)) {\n token = code.read();\n \n // Type of the extended operand token\n const DxbcOperandExt extTokenType =\n static_cast(bit::extract(token, 0, 5));\n \n switch (extTokenType) {\n // Operand modifiers, which are used to manipulate the\n // value of a source operand during the load operation\n case DxbcOperandExt::OperandModifier:\n reg.modifiers = bit::extract(token, 6, 13);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcDecodeContext: Unhandled extended operand token: \",\n extTokenType));\n }\n }\n }\n void decodeOperandImmediates(DxbcCodeSlice& code, DxbcRegister& reg) {\n if (reg.type == DxbcOperandType::Imm32\n || reg.type == DxbcOperandType::Imm64) {\n switch (reg.componentCount) {\n // This is commonly used if only one vector\n // component is involved in an operation\n case DxbcComponentCount::Component1: {\n reg.imm.u32_1 = code.read();\n } break;\n \n // Typical four-component vector\n case DxbcComponentCount::Component4: {\n reg.imm.u32_4[0] = code.read();\n reg.imm.u32_4[1] = code.read();\n reg.imm.u32_4[2] = code.read();\n reg.imm.u32_4[3] = code.read();\n } break;\n\n default:\n Logger::warn(\"DxbcDecodeContext: Invalid component count for immediate operand\");\n }\n }\n }\n void decodeOperandIndex(DxbcCodeSlice& code, DxbcRegister& reg, uint32_t token) {\n reg.idxDim = bit::extract(token, 20, 21);\n \n for (uint32_t i = 0; i < reg.idxDim; i++) {\n // An index can be encoded in various different ways\n const DxbcOperandIndexRepresentation repr =\n static_cast(\n bit::extract(token, 22 + 3 * i, 24 + 3 * i));\n \n switch (repr) {\n case DxbcOperandIndexRepresentation::Imm32:\n reg.idx[i].offset = static_cast(code.read());\n reg.idx[i].relReg = nullptr;\n break;\n \n case DxbcOperandIndexRepresentation::Relative:\n reg.idx[i].offset = 0;\n reg.idx[i].relReg = &m_indices.at(m_indexId);\n \n this->decodeRegister(code,\n m_indices.at(m_indexId++),\n DxbcScalarType::Sint32);\n break;\n \n case DxbcOperandIndexRepresentation::Imm32Relative:\n reg.idx[i].offset = static_cast(code.read());\n reg.idx[i].relReg = &m_indices.at(m_indexId);\n \n this->decodeRegister(code,\n m_indices.at(m_indexId++),\n DxbcScalarType::Sint32);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcDecodeContext: Unhandled index representation: \",\n repr));\n }\n }\n }\n void decodeRegister(DxbcCodeSlice& code, DxbcRegister& reg, DxbcScalarType type) {\n const uint32_t token = code.read();\n \n reg.type = static_cast(bit::extract(token, 12, 19));\n reg.dataType = type;\n reg.modifiers = 0;\n reg.idxDim = 0;\n \n for (uint32_t i = 0; i < DxbcMaxRegIndexDim; i++) {\n reg.idx[i].relReg = nullptr;\n reg.idx[i].offset = 0;\n }\n \n this->decodeComponentSelection(reg, token);\n this->decodeOperandExtensions(code, reg, token);\n this->decodeOperandImmediates(code, reg);\n this->decodeOperandIndex(code, reg, token);\n }\n void decodeImm32(DxbcCodeSlice& code, DxbcImmediate& imm, DxbcScalarType type) {\n imm.u32 = code.read();\n }\n ", "suffix_code": "\n};", "middle_code": "void decodeOperand(DxbcCodeSlice& code, const DxbcInstOperandFormat& format) {\n switch (format.kind) {\n case DxbcOperandKind::DstReg: {\n const uint32_t operandId = m_instruction.dstCount++;\n this->decodeRegister(code, m_dstOperands.at(operandId), format.type);\n } break;\n case DxbcOperandKind::SrcReg: {\n const uint32_t operandId = m_instruction.srcCount++;\n this->decodeRegister(code, m_srcOperands.at(operandId), format.type);\n } break;\n case DxbcOperandKind::Imm32: {\n const uint32_t operandId = m_instruction.immCount++;\n this->decodeImm32(code, m_immOperands.at(operandId), format.type);\n } break;\n default:\n throw DxvkError(\"DxbcDecodeContext: Invalid operand format\");\n }\n }", "code_description": null, "fill_type": "FUNCTION_TYPE", "language_type": "cpp", "sub_task_type": null}, "context_code": [["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_compiler.h", "class DxbcCompilerHsPhase {\n public:\n DxbcCompiler(\n const std::string& fileName,\n const DxbcModuleInfo& moduleInfo,\n const DxbcProgramInfo& programInfo,\n const Rc& isgn,\n const Rc& osgn,\n const Rc& psgn,\n const DxbcAnalysisInfo& analysis) {\n // Declare an entry point ID. We'll need it during the\n // initialization phase where the execution mode is set.\n m_entryPointId = m_module.allocateId();\n \n // Set the shader name so that we recognize it in renderdoc\n m_module.setDebugSource(\n spv::SourceLanguageUnknown, 0,\n m_module.addDebugString(fileName.c_str()),\n nullptr);\n\n // Set the memory model. This is the same for all shaders.\n m_module.enableCapability(\n spv::CapabilityVulkanMemoryModel);\n\n m_module.setMemoryModel(\n spv::AddressingModelLogical,\n spv::MemoryModelVulkan);\n \n // Make sure our interface registers are clear\n for (uint32_t i = 0; i < DxbcMaxInterfaceRegs; i++) {\n m_vRegs.at(i) = DxbcRegisterPointer { };\n m_oRegs.at(i) = DxbcRegisterPointer { };\n }\n \n this->emitInit();\n }\n ~DxbcCompiler() {\n \n }\n void processInstruction(\n const DxbcShaderInstruction& ins) {\n m_lastOp = m_currOp;\n m_currOp = ins.op;\n\n switch (ins.opClass) {\n case DxbcInstClass::Declaration:\n return this->emitDcl(ins);\n \n case DxbcInstClass::CustomData:\n return this->emitCustomData(ins);\n \n case DxbcInstClass::Atomic:\n return this->emitAtomic(ins);\n \n case DxbcInstClass::AtomicCounter:\n return this->emitAtomicCounter(ins);\n \n case DxbcInstClass::Barrier:\n return this->emitBarrier(ins);\n \n case DxbcInstClass::BitExtract:\n return this->emitBitExtract(ins);\n \n case DxbcInstClass::BitInsert:\n return this->emitBitInsert(ins);\n \n case DxbcInstClass::BitScan:\n return this->emitBitScan(ins);\n \n case DxbcInstClass::BufferQuery:\n return this->emitBufferQuery(ins);\n \n case DxbcInstClass::BufferLoad:\n return this->emitBufferLoad(ins);\n \n case DxbcInstClass::BufferStore:\n return this->emitBufferStore(ins);\n \n case DxbcInstClass::ConvertFloat16:\n return this->emitConvertFloat16(ins);\n \n case DxbcInstClass::ConvertFloat64:\n return this->emitConvertFloat64(ins);\n \n case DxbcInstClass::ControlFlow:\n return this->emitControlFlow(ins);\n \n case DxbcInstClass::GeometryEmit:\n return this->emitGeometryEmit(ins);\n \n case DxbcInstClass::HullShaderPhase:\n return this->emitHullShaderPhase(ins);\n \n case DxbcInstClass::HullShaderInstCnt:\n return this->emitHullShaderInstCnt(ins);\n \n case DxbcInstClass::Interpolate:\n return this->emitInterpolate(ins);\n \n case DxbcInstClass::NoOperation:\n return;\n\n case DxbcInstClass::SparseCheckAccess:\n return this->emitSparseCheckAccess(ins);\n\n case DxbcInstClass::TextureQuery:\n return this->emitTextureQuery(ins);\n \n case DxbcInstClass::TextureQueryLod:\n return this->emitTextureQueryLod(ins);\n \n case DxbcInstClass::TextureQueryMs:\n return this->emitTextureQueryMs(ins);\n \n case DxbcInstClass::TextureQueryMsPos:\n return this->emitTextureQueryMsPos(ins);\n \n case DxbcInstClass::TextureFetch:\n return this->emitTextureFetch(ins);\n \n case DxbcInstClass::TextureGather:\n return this->emitTextureGather(ins);\n \n case DxbcInstClass::TextureSample:\n return this->emitTextureSample(ins);\n \n case DxbcInstClass::TypedUavLoad:\n return this->emitTypedUavLoad(ins);\n \n case DxbcInstClass::TypedUavStore:\n return this->emitTypedUavStore(ins);\n \n case DxbcInstClass::VectorAlu:\n return this->emitVectorAlu(ins);\n \n case DxbcInstClass::VectorCmov:\n return this->emitVectorCmov(ins);\n \n case DxbcInstClass::VectorCmp:\n return this->emitVectorCmp(ins);\n \n case DxbcInstClass::VectorDeriv:\n return this->emitVectorDeriv(ins);\n \n case DxbcInstClass::VectorDot:\n return this->emitVectorDot(ins);\n \n case DxbcInstClass::VectorIdiv:\n return this->emitVectorIdiv(ins);\n \n case DxbcInstClass::VectorImul:\n return this->emitVectorImul(ins);\n \n case DxbcInstClass::VectorMsad:\n return this->emitVectorMsad(ins);\n \n case DxbcInstClass::VectorShift:\n return this->emitVectorShift(ins);\n \n case DxbcInstClass::VectorSinCos:\n return this->emitVectorSinCos(ins);\n \n default:\n Logger::warn(\n str::format(\"DxbcCompiler: Unhandled opcode class: \",\n ins.op));\n }\n }\n void processXfbPassthrough() {\n m_module.setExecutionMode (m_entryPointId, spv::ExecutionModeInputPoints);\n m_module.setExecutionMode (m_entryPointId, spv::ExecutionModeOutputPoints);\n m_module.setOutputVertices(m_entryPointId, 1);\n\n for (auto e = m_isgn->begin(); e != m_isgn->end(); e++) {\n emitDclInput(e->registerId, 1,\n e->componentMask, DxbcSystemValue::None,\n DxbcInterpolationMode::Undefined);\n }\n\n // Figure out which streams to enable\n uint32_t streamMask = 0;\n\n for (size_t i = 0; i < m_xfbVars.size(); i++)\n streamMask |= 1u << m_xfbVars[i].streamId;\n \n for (uint32_t streamId : bit::BitMask(streamMask)) {\n emitXfbOutputSetup(streamId, true);\n m_module.opEmitVertex(m_module.constu32(streamId));\n }\n\n // End the main function\n emitFunctionEnd();\n\n // For pass-through we always assume points\n m_inputTopology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST;\n }\n SpirvCodeBuffer finalize() {\n // Depending on the shader type, this will prepare\n // input registers, call various shader functions\n // and write back the output registers.\n switch (m_programInfo.type()) {\n case DxbcProgramType::VertexShader: this->emitVsFinalize(); break;\n case DxbcProgramType::HullShader: this->emitHsFinalize(); break;\n case DxbcProgramType::DomainShader: this->emitDsFinalize(); break;\n case DxbcProgramType::GeometryShader: this->emitGsFinalize(); break;\n case DxbcProgramType::PixelShader: this->emitPsFinalize(); break;\n case DxbcProgramType::ComputeShader: this->emitCsFinalize(); break;\n default: throw DxvkError(\"Invalid shader stage\");\n }\n\n // Emit float control mode if the extension is supported\n this->emitFloatControl();\n \n // Declare the entry point, we now have all the\n // information we need, including the interfaces\n m_module.addEntryPoint(m_entryPointId,\n m_programInfo.executionModel(), \"main\");\n m_module.setDebugName(m_entryPointId, \"main\");\n\n return m_module.compile();\n }\n private:\n DxbcModuleInfo m_moduleInfo;\n DxbcProgramInfo m_programInfo;\n SpirvModule m_module;\n Rc m_isgn;\n Rc m_osgn;\n Rc m_psgn;\n const DxbcAnalysisInfo* m_analysis;\n std::vector m_bindings;\n std::vector m_rRegs;\n std::vector m_xRegs;\n std::vector m_gRegs;\n std::array<\n DxbcRegisterPointer,\n DxbcMaxInterfaceRegs> m_vRegs;\n std::vector m_vMappings;\n std::array<\n DxbcRegisterPointer,\n DxbcMaxInterfaceRegs> m_oRegs;\n std::vector m_oMappings;\n std::vector m_xfbVars;\n std::vector m_indexRanges = { };\n std::array m_constantBuffers;\n std::array m_samplers;\n std::array m_textures;\n std::array m_uavs;\n bool m_hasGloballyCoherentUav = false;\n bool m_hasRasterizerOrderedUav = false;\n std::vector m_controlFlowBlocks;\n bool m_topLevelIsUniform = true;\n uint64_t m_uavRdMask = 0u;\n uint64_t m_uavWrMask = 0u;\n bool m_insideFunction = false;\n uint32_t m_vArrayLength = 0;\n uint32_t m_vArrayLengthId = 0;\n uint32_t m_vArray = 0;\n uint32_t m_positionIn = 0;\n uint32_t m_positionOut = 0;\n uint32_t m_clipDistances = 0;\n uint32_t m_cullDistances = 0;\n uint32_t m_primitiveIdIn = 0;\n uint32_t m_primitiveIdOut = 0;\n uint32_t m_icbArray = 0;\n std::vector m_icbData;\n uint32_t m_icbComponents = 0u;\n uint32_t m_icbSize = 0u;\n uint32_t m_samplePositions = 0;\n uint32_t m_uavCtrStructType = 0;\n uint32_t m_uavCtrPointerType = 0;\n std::unordered_map m_subroutines;\n uint32_t m_entryPointId = 0;\n bool m_hasRawAccessChains = false;\n uint32_t m_inputMask = 0u;\n uint32_t m_outputMask = 0u;\n DxbcCompilerVsPart m_vs;\n DxbcCompilerHsPart m_hs;\n DxbcCompilerDsPart m_ds;\n DxbcCompilerGsPart m_gs;\n DxbcCompilerPsPart m_ps;\n DxbcCompilerCsPart m_cs;\n bool m_precise = true;\n DxbcOpcode m_lastOp = DxbcOpcode::Nop;\n DxbcOpcode m_currOp = DxbcOpcode::Nop;\n VkPrimitiveTopology m_inputTopology = VK_PRIMITIVE_TOPOLOGY_MAX_ENUM;\n VkPrimitiveTopology m_outputTopology = VK_PRIMITIVE_TOPOLOGY_MAX_ENUM;\n void emitDcl(\n const DxbcShaderInstruction& ins) {\n switch (ins.op) {\n case DxbcOpcode::DclGlobalFlags:\n return this->emitDclGlobalFlags(ins);\n \n case DxbcOpcode::DclIndexRange:\n return this->emitDclIndexRange(ins);\n \n case DxbcOpcode::DclTemps:\n return this->emitDclTemps(ins);\n \n case DxbcOpcode::DclIndexableTemp:\n return this->emitDclIndexableTemp(ins);\n \n case DxbcOpcode::DclInput:\n case DxbcOpcode::DclInputSgv:\n case DxbcOpcode::DclInputSiv:\n case DxbcOpcode::DclInputPs:\n case DxbcOpcode::DclInputPsSgv:\n case DxbcOpcode::DclInputPsSiv:\n case DxbcOpcode::DclOutput:\n case DxbcOpcode::DclOutputSgv:\n case DxbcOpcode::DclOutputSiv:\n return this->emitDclInterfaceReg(ins);\n \n case DxbcOpcode::DclConstantBuffer:\n return this->emitDclConstantBuffer(ins);\n \n case DxbcOpcode::DclSampler:\n return this->emitDclSampler(ins);\n \n case DxbcOpcode::DclStream:\n return this->emitDclStream(ins);\n \n case DxbcOpcode::DclUavTyped:\n case DxbcOpcode::DclResource:\n return this->emitDclResourceTyped(ins);\n \n case DxbcOpcode::DclUavRaw:\n case DxbcOpcode::DclResourceRaw:\n case DxbcOpcode::DclUavStructured:\n case DxbcOpcode::DclResourceStructured:\n return this->emitDclResourceRawStructured(ins);\n \n case DxbcOpcode::DclThreadGroupSharedMemoryRaw:\n case DxbcOpcode::DclThreadGroupSharedMemoryStructured:\n return this->emitDclThreadGroupSharedMemory(ins);\n \n case DxbcOpcode::DclGsInputPrimitive:\n return this->emitDclGsInputPrimitive(ins);\n \n case DxbcOpcode::DclGsOutputPrimitiveTopology:\n return this->emitDclGsOutputTopology(ins);\n \n case DxbcOpcode::DclMaxOutputVertexCount:\n return this->emitDclMaxOutputVertexCount(ins);\n \n case DxbcOpcode::DclInputControlPointCount:\n return this->emitDclInputControlPointCount(ins);\n \n case DxbcOpcode::DclOutputControlPointCount:\n return this->emitDclOutputControlPointCount(ins);\n \n case DxbcOpcode::DclHsMaxTessFactor:\n return this->emitDclHsMaxTessFactor(ins);\n \n case DxbcOpcode::DclTessDomain:\n return this->emitDclTessDomain(ins);\n \n case DxbcOpcode::DclTessPartitioning:\n return this->emitDclTessPartitioning(ins);\n \n case DxbcOpcode::DclTessOutputPrimitive:\n return this->emitDclTessOutputPrimitive(ins);\n \n case DxbcOpcode::DclThreadGroup:\n return this->emitDclThreadGroup(ins);\n \n case DxbcOpcode::DclGsInstanceCount:\n return this->emitDclGsInstanceCount(ins);\n \n default:\n Logger::warn(\n str::format(\"DxbcCompiler: Unhandled opcode: \",\n ins.op));\n }\n }\n void emitDclGlobalFlags(\n const DxbcShaderInstruction& ins) {\n const DxbcGlobalFlags flags = ins.controls.globalFlags();\n \n if (flags.test(DxbcGlobalFlag::RefactoringAllowed))\n m_precise = false;\n\n if (flags.test(DxbcGlobalFlag::EarlyFragmentTests))\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeEarlyFragmentTests);\n }\n void emitDclIndexRange(\n const DxbcShaderInstruction& ins) {\n // dcl_index_range has one operand:\n // (0) Range start, either an input or output register\n // (1) Range end\n uint32_t index = ins.dst[0].idxDim - 1u;\n\n DxbcIndexRange range = { };\n range.type = ins.dst[0].type;\n range.start = ins.dst[0].idx[index].offset;\n range.length = ins.imm[0].u32;\n\n m_indexRanges.push_back(range);\n }\n void emitDclTemps(\n const DxbcShaderInstruction& ins) {\n // dcl_temps has one operand:\n // (imm0) Number of temp registers\n\n // Ignore this and declare temps on demand.\n }\n void emitDclIndexableTemp(\n const DxbcShaderInstruction& ins) {\n // dcl_indexable_temps has three operands:\n // (imm0) Array register index (x#)\n // (imm1) Number of vectors stored in the array\n // (imm2) Component count of each individual vector. This is\n // always 4 in fxc-generated binaries and therefore useless.\n const uint32_t regId = ins.imm[0].u32;\n\n DxbcRegisterInfo info;\n info.type.ctype = DxbcScalarType::Float32;\n info.type.ccount = m_analysis->xRegMasks.at(regId).minComponents();\n info.type.alength = ins.imm[1].u32;\n info.sclass = spv::StorageClassPrivate;\n\n if (regId >= m_xRegs.size())\n m_xRegs.resize(regId + 1);\n \n m_xRegs.at(regId).ccount = info.type.ccount;\n m_xRegs.at(regId).alength = info.type.alength;\n m_xRegs.at(regId).varId = emitNewVariable(info);\n \n m_module.setDebugName(m_xRegs.at(regId).varId,\n str::format(\"x\", regId).c_str());\n }\n void emitDclInterfaceReg(\n const DxbcShaderInstruction& ins) {\n switch (ins.dst[0].type) {\n case DxbcOperandType::InputControlPoint:\n if (m_programInfo.type() != DxbcProgramType::HullShader)\n break;\n [[fallthrough]];\n\n case DxbcOperandType::Input:\n case DxbcOperandType::Output: {\n // dcl_input and dcl_output instructions\n // have the following operands:\n // (dst0) The register to declare\n // (imm0) The system value (optional)\n uint32_t regDim = 0;\n uint32_t regIdx = 0;\n \n // In the vertex and fragment shader stage, the\n // operand indices will have the following format:\n // (0) Register index\n // \n // In other stages, the input and output registers\n // may be declared as arrays of a fixed size:\n // (0) Array length\n // (1) Register index\n if (ins.dst[0].idxDim == 2) {\n regDim = ins.dst[0].idx[0].offset;\n regIdx = ins.dst[0].idx[1].offset;\n } else if (ins.dst[0].idxDim == 1) {\n regIdx = ins.dst[0].idx[0].offset;\n } else {\n Logger::err(str::format(\n \"DxbcCompiler: \", ins.op,\n \": Invalid index dimension\"));\n return;\n }\n \n // This declaration may map an output register to a system\n // value. If that is the case, the system value type will\n // be stored in the second operand.\n const bool hasSv =\n ins.op == DxbcOpcode::DclInputSgv\n || ins.op == DxbcOpcode::DclInputSiv\n || ins.op == DxbcOpcode::DclInputPsSgv\n || ins.op == DxbcOpcode::DclInputPsSiv\n || ins.op == DxbcOpcode::DclOutputSgv\n || ins.op == DxbcOpcode::DclOutputSiv;\n \n DxbcSystemValue sv = DxbcSystemValue::None;\n \n if (hasSv)\n sv = static_cast(ins.imm[0].u32);\n \n // In the pixel shader, inputs are declared with an\n // interpolation mode that is part of the op token.\n const bool hasInterpolationMode =\n ins.op == DxbcOpcode::DclInputPs\n || ins.op == DxbcOpcode::DclInputPsSiv;\n \n DxbcInterpolationMode im = DxbcInterpolationMode::Undefined;\n \n if (hasInterpolationMode)\n im = ins.controls.interpolation();\n \n // Declare the actual input/output variable\n switch (ins.op) {\n case DxbcOpcode::DclInput:\n case DxbcOpcode::DclInputSgv:\n case DxbcOpcode::DclInputSiv:\n case DxbcOpcode::DclInputPs:\n case DxbcOpcode::DclInputPsSgv:\n case DxbcOpcode::DclInputPsSiv:\n this->emitDclInput(regIdx, regDim, ins.dst[0].mask, sv, im);\n break;\n \n case DxbcOpcode::DclOutput:\n case DxbcOpcode::DclOutputSgv:\n case DxbcOpcode::DclOutputSiv:\n this->emitDclOutput(regIdx, regDim, ins.dst[0].mask, sv, im);\n break;\n \n default:\n Logger::err(str::format(\n \"DxbcCompiler: Unexpected opcode: \",\n ins.op));\n }\n } break;\n \n case DxbcOperandType::InputThreadId: {\n m_cs.builtinGlobalInvocationId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 3, 0 },\n spv::StorageClassInput },\n spv::BuiltInGlobalInvocationId,\n \"vThreadId\");\n } break;\n \n case DxbcOperandType::InputThreadGroupId: {\n m_cs.builtinWorkgroupId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 3, 0 },\n spv::StorageClassInput },\n spv::BuiltInWorkgroupId,\n \"vThreadGroupId\");\n } break;\n \n case DxbcOperandType::InputThreadIdInGroup: {\n m_cs.builtinLocalInvocationId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 3, 0 },\n spv::StorageClassInput },\n spv::BuiltInLocalInvocationId,\n \"vThreadIdInGroup\");\n } break;\n \n case DxbcOperandType::InputThreadIndexInGroup: {\n m_cs.builtinLocalInvocationIndex = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInLocalInvocationIndex,\n \"vThreadIndexInGroup\");\n } break;\n \n case DxbcOperandType::InputCoverageMask: {\n m_ps.builtinSampleMaskIn = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 1 },\n spv::StorageClassInput },\n spv::BuiltInSampleMask,\n \"vCoverage\");\n } break;\n \n case DxbcOperandType::OutputCoverageMask: {\n m_ps.builtinSampleMaskOut = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 1 },\n spv::StorageClassOutput },\n spv::BuiltInSampleMask,\n \"oMask\");\n } break;\n \n case DxbcOperandType::OutputDepth: {\n m_module.setExecutionMode(m_entryPointId,\n spv::ExecutionModeDepthReplacing);\n m_ps.builtinDepth = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInFragDepth,\n \"oDepth\");\n } break;\n \n case DxbcOperandType::OutputStencilRef: {\n m_module.enableExtension(\"SPV_EXT_shader_stencil_export\");\n m_module.enableCapability(spv::CapabilityStencilExportEXT);\n m_module.setExecutionMode(m_entryPointId,\n spv::ExecutionModeStencilRefReplacingEXT);\n m_ps.builtinStencilRef = emitNewBuiltinVariable({\n { DxbcScalarType::Sint32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInFragStencilRefEXT,\n \"oStencilRef\");\n } break;\n\n case DxbcOperandType::OutputDepthGe: {\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDepthReplacing);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDepthGreater);\n m_ps.builtinDepth = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInFragDepth,\n \"oDepthGe\");\n } break;\n \n case DxbcOperandType::OutputDepthLe: {\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDepthReplacing);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDepthLess);\n m_ps.builtinDepth = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInFragDepth,\n \"oDepthLe\");\n } break;\n \n case DxbcOperandType::InputPrimitiveId: {\n m_primitiveIdIn = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInPrimitiveId,\n \"vPrim\");\n } break;\n \n case DxbcOperandType::InputDomainPoint: {\n m_ds.builtinTessCoord = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 3, 0 },\n spv::StorageClassInput },\n spv::BuiltInTessCoord,\n \"vDomain\");\n } break;\n \n case DxbcOperandType::InputForkInstanceId:\n case DxbcOperandType::InputJoinInstanceId: {\n auto phase = this->getCurrentHsForkJoinPhase();\n \n phase->instanceIdPtr = m_module.newVar(\n m_module.defPointerType(\n m_module.defIntType(32, 0),\n spv::StorageClassFunction),\n spv::StorageClassFunction);\n \n m_module.opStore(phase->instanceIdPtr, phase->instanceId);\n m_module.setDebugName(phase->instanceIdPtr,\n ins.dst[0].type == DxbcOperandType::InputForkInstanceId\n ? \"vForkInstanceId\" : \"vJoinInstanceId\");\n } break;\n \n case DxbcOperandType::OutputControlPointId: {\n // This system value map to the invocation\n // ID, which has been declared already.\n } break;\n \n case DxbcOperandType::InputPatchConstant:\n case DxbcOperandType::OutputControlPoint: {\n // These have been declared as global input and\n // output arrays, so there's nothing left to do.\n } break;\n \n case DxbcOperandType::InputGsInstanceId: {\n m_gs.builtinInvocationId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInInvocationId,\n \"vInstanceID\");\n } break;\n \n case DxbcOperandType::InputInnerCoverage: {\n m_module.enableExtension(\"SPV_EXT_fragment_fully_covered\");\n m_module.enableCapability(spv::CapabilityFragmentFullyCoveredEXT);\n\n // This is bool in SPIR-V but uint32 in DXBC. A bool value of\n // false must be 0, and bit 1 must be set to represent true.\n uint32_t builtinId = emitNewBuiltinVariable({\n { DxbcScalarType::Bool, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInFullyCoveredEXT,\n nullptr);\n\n m_ps.builtinInnerCoverageId = emitNewVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassPrivate });\n\n m_module.setDebugName(m_ps.builtinInnerCoverageId, \"vInnerCoverage\");\n\n uint32_t boolTypeId = m_module.defBoolType();\n uint32_t uintTypeId = m_module.defIntType(32, 0);\n\n m_module.opStore(m_ps.builtinInnerCoverageId,\n m_module.opSelect(uintTypeId,\n m_module.opLoad(boolTypeId, builtinId),\n m_module.constu32(1),\n m_module.constu32(0)));\n } break;\n\n default:\n Logger::err(str::format(\n \"DxbcCompiler: Unsupported operand type declaration: \",\n ins.dst[0].type));\n \n }\n }\n void emitDclInput(\n uint32_t regIdx,\n uint32_t regDim,\n DxbcRegMask regMask,\n DxbcSystemValue sv,\n DxbcInterpolationMode im) {\n // Avoid declaring the same variable multiple times.\n // This may happen when multiple system values are\n // mapped to different parts of the same register.\n if (m_vRegs.at(regIdx).id == 0 && sv == DxbcSystemValue::None) {\n const DxbcVectorType regType = getInputRegType(regIdx);\n \n DxbcRegisterInfo info;\n info.type.ctype = regType.ctype;\n info.type.ccount = regType.ccount;\n info.type.alength = regDim;\n info.sclass = spv::StorageClassInput;\n \n const uint32_t varId = emitNewVariable(info);\n \n m_module.decorateLocation(varId, regIdx);\n m_module.setDebugName(varId, str::format(\"v\", regIdx).c_str());\n \n m_vRegs.at(regIdx) = { regType, varId };\n \n // Interpolation mode, used in pixel shaders\n if (im == DxbcInterpolationMode::Constant)\n m_module.decorate(varId, spv::DecorationFlat);\n \n if (im == DxbcInterpolationMode::LinearCentroid\n || im == DxbcInterpolationMode::LinearNoPerspectiveCentroid)\n m_module.decorate(varId, spv::DecorationCentroid);\n \n if (im == DxbcInterpolationMode::LinearNoPerspective\n || im == DxbcInterpolationMode::LinearNoPerspectiveCentroid\n || im == DxbcInterpolationMode::LinearNoPerspectiveSample)\n m_module.decorate(varId, spv::DecorationNoPerspective);\n \n if (im == DxbcInterpolationMode::LinearSample\n || im == DxbcInterpolationMode::LinearNoPerspectiveSample) {\n m_module.enableCapability(spv::CapabilitySampleRateShading);\n m_module.decorate(varId, spv::DecorationSample);\n }\n\n if (m_moduleInfo.options.forceSampleRateShading) {\n if (im == DxbcInterpolationMode::Linear\n || im == DxbcInterpolationMode::LinearNoPerspective) {\n m_module.enableCapability(spv::CapabilitySampleRateShading);\n m_module.decorate(varId, spv::DecorationSample);\n }\n }\n\n // Declare the input slot as defined\n m_inputMask |= 1u << regIdx;\n m_vArrayLength = std::max(m_vArrayLength, regIdx + 1);\n } else if (sv != DxbcSystemValue::None) {\n // Add a new system value mapping if needed\n bool skipSv = sv == DxbcSystemValue::ClipDistance\n || sv == DxbcSystemValue::CullDistance;\n \n if (!skipSv)\n m_vMappings.push_back({ regIdx, regMask, sv });\n }\n }\n void emitDclOutput(\n uint32_t regIdx,\n uint32_t regDim,\n DxbcRegMask regMask,\n DxbcSystemValue sv,\n DxbcInterpolationMode im) {\n // Add a new system value mapping if needed. Clip\n // and cull distances are handled separately.\n if (sv != DxbcSystemValue::None\n && sv != DxbcSystemValue::ClipDistance\n && sv != DxbcSystemValue::CullDistance)\n m_oMappings.push_back({ regIdx, regMask, sv });\n \n if (m_programInfo.type() == DxbcProgramType::HullShader) {\n // Hull shaders don't use standard outputs\n if (getCurrentHsForkJoinPhase() != nullptr)\n m_hs.outputPerPatchMask |= 1 << regIdx;\n } else if (m_oRegs.at(regIdx).id == 0) {\n // Avoid declaring the same variable multiple times.\n // This may happen when multiple system values are\n // mapped to different parts of the same register.\n const DxbcVectorType regType = getOutputRegType(regIdx);\n \n DxbcRegisterInfo info;\n info.type.ctype = regType.ctype;\n info.type.ccount = regType.ccount;\n info.type.alength = regDim;\n info.sclass = spv::StorageClassOutput;\n\n // In xfb mode, we set up the actual\n // output vars when emitting a vertex\n if (m_moduleInfo.xfb != nullptr)\n info.sclass = spv::StorageClassPrivate;\n \n // In geometry shaders, don't duplicate system value outputs\n // to stay within device limits. The pixel shader will read\n // all GS system value outputs as system value inputs.\n if (m_programInfo.type() == DxbcProgramType::GeometryShader && sv != DxbcSystemValue::None)\n info.sclass = spv::StorageClassPrivate;\n\n const uint32_t varId = this->emitNewVariable(info);\n m_module.setDebugName(varId, str::format(\"o\", regIdx).c_str());\n \n if (info.sclass == spv::StorageClassOutput) {\n m_module.decorateLocation(varId, regIdx);\n\n // Add index decoration for potential dual-source blending\n if (m_programInfo.type() == DxbcProgramType::PixelShader)\n m_module.decorateIndex(varId, 0);\n\n // Declare vertex positions in all stages as invariant, even if\n // this is not the last stage, to help with potential Z fighting.\n if (sv == DxbcSystemValue::Position && m_moduleInfo.options.invariantPosition)\n m_module.decorate(varId, spv::DecorationInvariant);\n }\n \n m_oRegs.at(regIdx) = { regType, varId };\n \n // Declare the output slot as defined\n m_outputMask |= 1u << regIdx;\n }\n }\n void emitDclConstantBuffer(\n const DxbcShaderInstruction& ins) {\n // dcl_constant_buffer has one operand with two indices:\n // (0) Constant buffer register ID (cb#)\n // (1) Number of constants in the buffer\n uint32_t bufferId = ins.dst[0].idx[0].offset;\n uint32_t elementCount = ins.dst[0].idx[1].offset;\n\n // With dynamic indexing, games will often index constant buffers\n // out of bounds. Declare an upper bound to stay within spec.\n if (ins.controls.accessType() == DxbcConstantBufferAccessType::DynamicallyIndexed)\n elementCount = 4096;\n\n this->emitDclConstantBufferVar(bufferId, elementCount, 4u,\n str::format(\"cb\", bufferId).c_str());\n }\n void emitDclConstantBufferVar(\n uint32_t regIdx,\n uint32_t numConstants,\n uint32_t numComponents,\n const char* name) {\n // Uniform buffer data is stored as a fixed-size array\n // of 4x32-bit vectors. SPIR-V requires explicit strides.\n const uint32_t arrayType = m_module.defArrayTypeUnique(\n getVectorTypeId({ DxbcScalarType::Float32, numComponents }),\n m_module.constu32(numConstants));\n m_module.decorateArrayStride(arrayType, sizeof(uint32_t) * numComponents);\n \n // SPIR-V requires us to put that array into a\n // struct and decorate that struct as a block.\n const uint32_t structType = m_module.defStructTypeUnique(1, &arrayType);\n \n m_module.decorate(structType, spv::DecorationBlock);\n m_module.memberDecorateOffset(structType, 0, 0);\n \n m_module.setDebugName (structType, str::format(name, \"_t\").c_str());\n m_module.setDebugMemberName (structType, 0, \"m\");\n \n // Variable that we'll use to access the buffer\n const uint32_t varId = m_module.newVar(\n m_module.defPointerType(structType, spv::StorageClassUniform),\n spv::StorageClassUniform);\n \n m_module.setDebugName(varId, name);\n \n // Compute the DXVK binding slot index for the buffer.\n // D3D11 needs to bind the actual buffers to this slot.\n uint32_t bindingId = computeConstantBufferBinding(\n m_programInfo.type(), regIdx);\n \n m_module.decorateDescriptorSet(varId, 0);\n m_module.decorateBinding(varId, bindingId);\n\n DxbcConstantBuffer buf;\n buf.varId = varId;\n buf.size = numConstants;\n m_constantBuffers.at(regIdx) = buf;\n \n // Store descriptor info for the shader interface\n DxvkBindingInfo binding = { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER };\n binding.viewType = VK_IMAGE_VIEW_TYPE_MAX_ENUM;\n binding.access = VK_ACCESS_UNIFORM_READ_BIT;\n binding.resourceBinding = bindingId;\n binding.uboSet = true;\n m_bindings.push_back(binding);\n }\n void emitDclSampler(\n const DxbcShaderInstruction& ins) {\n // dclSampler takes one operand:\n // (dst0) The sampler register to declare\n const uint32_t samplerId = ins.dst[0].idx[0].offset;\n \n // The sampler type is opaque, but we still have to\n // define a pointer and a variable in oder to use it\n const uint32_t samplerType = m_module.defSamplerType();\n const uint32_t samplerPtrType = m_module.defPointerType(\n samplerType, spv::StorageClassUniformConstant);\n \n // Define the sampler variable\n const uint32_t varId = m_module.newVar(samplerPtrType,\n spv::StorageClassUniformConstant);\n m_module.setDebugName(varId,\n str::format(\"s\", samplerId).c_str());\n \n m_samplers.at(samplerId).varId = varId;\n m_samplers.at(samplerId).typeId = samplerType;\n \n // Compute binding slot index for the sampler\n uint32_t bindingId = computeSamplerBinding(\n m_programInfo.type(), samplerId);\n \n m_module.decorateDescriptorSet(varId, 0);\n m_module.decorateBinding(varId, bindingId);\n \n // Store descriptor info for the shader interface\n DxvkBindingInfo binding = { VK_DESCRIPTOR_TYPE_SAMPLER };\n binding.viewType = VK_IMAGE_VIEW_TYPE_MAX_ENUM;\n binding.resourceBinding = bindingId;\n m_bindings.push_back(binding);\n }\n void emitDclStream(\n const DxbcShaderInstruction& ins) {\n if (ins.dst[0].idx[0].offset != 0 && m_moduleInfo.xfb == nullptr)\n Logger::err(\"Dxbc: Multiple streams not supported\");\n }\n void emitDclResourceTyped(\n const DxbcShaderInstruction& ins) {\n // dclResource takes two operands:\n // (dst0) The resource register ID\n // (imm0) The resource return type\n const uint32_t registerId = ins.dst[0].idx[0].offset;\n \n // We also handle unordered access views here\n const bool isUav = ins.op == DxbcOpcode::DclUavTyped;\n \n if (isUav) {\n if (m_moduleInfo.options.supportsTypedUavLoadR32)\n m_module.enableCapability(spv::CapabilityStorageImageReadWithoutFormat);\n m_module.enableCapability(spv::CapabilityStorageImageWriteWithoutFormat);\n }\n \n // Defines the type of the resource (texture2D, ...)\n const DxbcResourceDim resourceType = ins.controls.resourceDim();\n \n // Defines the type of a read operation. DXBC has the ability\n // to define four different types whereas SPIR-V only allows\n // one, but in practice this should not be much of a problem.\n auto xType = static_cast(\n bit::extract(ins.imm[0].u32, 0, 3));\n auto yType = static_cast(\n bit::extract(ins.imm[0].u32, 4, 7));\n auto zType = static_cast(\n bit::extract(ins.imm[0].u32, 8, 11));\n auto wType = static_cast(\n bit::extract(ins.imm[0].u32, 12, 15));\n \n if ((xType != yType) || (xType != zType) || (xType != wType))\n Logger::warn(\"DxbcCompiler: dcl_resource: Ignoring resource return types\");\n \n // Declare the actual sampled type\n const DxbcScalarType sampledType = [xType] {\n switch (xType) {\n // FIXME is this correct? There's no documentation about it\n case DxbcResourceReturnType::Mixed: return DxbcScalarType::Uint32;\n // FIXME do we have to manually clamp writes to SNORM/UNORM resources?\n case DxbcResourceReturnType::Snorm: return DxbcScalarType::Float32;\n case DxbcResourceReturnType::Unorm: return DxbcScalarType::Float32;\n case DxbcResourceReturnType::Float: return DxbcScalarType::Float32;\n case DxbcResourceReturnType::Sint: return DxbcScalarType::Sint32;\n case DxbcResourceReturnType::Uint: return DxbcScalarType::Uint32;\n default: throw DxvkError(str::format(\"DxbcCompiler: Invalid sampled type: \", xType));\n }\n }();\n \n // Declare the resource type\n const uint32_t sampledTypeId = getScalarTypeId(sampledType);\n const DxbcImageInfo typeInfo = getResourceType(resourceType, isUav); \n \n // Declare additional capabilities if necessary\n switch (resourceType) {\n case DxbcResourceDim::Buffer:\n m_module.enableCapability(isUav\n ? spv::CapabilityImageBuffer\n : spv::CapabilitySampledBuffer);\n break;\n \n case DxbcResourceDim::Texture1D:\n case DxbcResourceDim::Texture1DArr:\n m_module.enableCapability(isUav\n ? spv::CapabilityImage1D\n : spv::CapabilitySampled1D);\n break;\n \n case DxbcResourceDim::TextureCubeArr:\n m_module.enableCapability(\n spv::CapabilitySampledCubeArray);\n break;\n \n default:\n // No additional capabilities required\n break;\n }\n \n // If the read-without-format capability is not set and this\n // image is access via a typed load, or if atomic operations\n // are used,, we must define the image format explicitly.\n spv::ImageFormat imageFormat = spv::ImageFormatUnknown;\n \n if (isUav) {\n if ((m_analysis->uavInfos[registerId].accessAtomicOp)\n || (m_analysis->uavInfos[registerId].accessTypedLoad\n && !m_moduleInfo.options.supportsTypedUavLoadR32))\n imageFormat = getScalarImageFormat(sampledType);\n }\n \n // We do not know whether the image is going to be used as\n // a color image or a depth image yet, but we can pick the\n // correct type when creating a sampled image object.\n const uint32_t imageTypeId = m_module.defImageType(sampledTypeId,\n typeInfo.dim, 0, typeInfo.array, typeInfo.ms, typeInfo.sampled,\n imageFormat);\n \n // We'll declare the texture variable with the color type\n // and decide which one to use when the texture is sampled.\n const uint32_t resourcePtrType = m_module.defPointerType(\n imageTypeId, spv::StorageClassUniformConstant);\n \n const uint32_t varId = m_module.newVar(resourcePtrType,\n spv::StorageClassUniformConstant);\n \n m_module.setDebugName(varId,\n str::format(isUav ? \"u\" : \"t\", registerId).c_str());\n \n // Compute the DXVK binding slot index for the resource.\n // D3D11 needs to bind the actual resource to this slot.\n uint32_t bindingId = isUav\n ? computeUavBinding(m_programInfo.type(), registerId)\n : computeSrvBinding(m_programInfo.type(), registerId);\n \n m_module.decorateDescriptorSet(varId, 0);\n m_module.decorateBinding(varId, bindingId);\n \n // Declare a specialization constant which will\n // store whether or not the resource is bound.\n if (isUav) {\n DxbcUav uav;\n uav.type = DxbcResourceType::Typed;\n uav.imageInfo = typeInfo;\n uav.varId = varId;\n uav.ctrId = 0;\n uav.sampledType = sampledType;\n uav.sampledTypeId = sampledTypeId;\n uav.imageTypeId = imageTypeId;\n uav.structStride = 0;\n uav.coherence = getUavCoherence(registerId, ins.controls.uavFlags());\n uav.isRawSsbo = false;\n m_uavs.at(registerId) = uav;\n } else {\n DxbcShaderResource res;\n res.type = DxbcResourceType::Typed;\n res.imageInfo = typeInfo;\n res.varId = varId;\n res.sampledType = sampledType;\n res.sampledTypeId = sampledTypeId;\n res.imageTypeId = imageTypeId;\n res.colorTypeId = imageTypeId;\n res.depthTypeId = 0;\n res.structStride = 0;\n res.isRawSsbo = false;\n \n if ((sampledType == DxbcScalarType::Float32)\n && (resourceType == DxbcResourceDim::Texture1D\n || resourceType == DxbcResourceDim::Texture1DArr\n || resourceType == DxbcResourceDim::Texture2D\n || resourceType == DxbcResourceDim::Texture2DArr\n || resourceType == DxbcResourceDim::TextureCube\n || resourceType == DxbcResourceDim::TextureCubeArr)) {\n res.depthTypeId = m_module.defImageType(sampledTypeId,\n typeInfo.dim, 1, typeInfo.array, typeInfo.ms, typeInfo.sampled,\n spv::ImageFormatUnknown);\n }\n \n m_textures.at(registerId) = res;\n }\n \n // Store descriptor info for the shader interface\n DxvkBindingInfo binding = { };\n binding.viewType = typeInfo.vtype;\n binding.resourceBinding = bindingId;\n binding.isMultisampled = typeInfo.ms;\n\n if (isUav) {\n binding.descriptorType = resourceType == DxbcResourceDim::Buffer\n ? VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER\n : VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;\n binding.access = m_analysis->uavInfos[registerId].accessFlags;\n\n if (!m_analysis->uavInfos[registerId].nonInvariantAccess)\n binding.accessOp = m_analysis->uavInfos[registerId].accessOp;\n\n if (!(binding.access & VK_ACCESS_SHADER_WRITE_BIT))\n m_module.decorate(varId, spv::DecorationNonWritable);\n if (!(binding.access & VK_ACCESS_SHADER_READ_BIT))\n m_module.decorate(varId, spv::DecorationNonReadable);\n } else {\n binding.descriptorType = resourceType == DxbcResourceDim::Buffer\n ? VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER\n : VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;\n binding.access = VK_ACCESS_SHADER_READ_BIT;\n }\n\n m_bindings.push_back(binding);\n }\n void emitDclResourceRawStructured(\n const DxbcShaderInstruction& ins) {\n // dcl_resource_raw and dcl_uav_raw take one argument:\n // (dst0) The resource register ID\n // dcl_resource_structured and dcl_uav_structured take two arguments:\n // (dst0) The resource register ID\n // (imm0) Structure stride, in bytes\n const uint32_t registerId = ins.dst[0].idx[0].offset;\n \n const bool isUav = ins.op == DxbcOpcode::DclUavRaw\n || ins.op == DxbcOpcode::DclUavStructured;\n \n const bool isStructured = ins.op == DxbcOpcode::DclUavStructured\n || ins.op == DxbcOpcode::DclResourceStructured;\n \n const DxbcScalarType sampledType = DxbcScalarType::Uint32;\n const uint32_t sampledTypeId = getScalarTypeId(sampledType);\n \n const DxbcImageInfo typeInfo = { spv::DimBuffer, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_MAX_ENUM };\n \n // Declare the resource type\n uint32_t resTypeId = 0;\n uint32_t varId = 0;\n \n // Write back resource info\n DxbcResourceType resType = isStructured\n ? DxbcResourceType::Structured\n : DxbcResourceType::Raw;\n \n uint32_t resStride = isStructured\n ? ins.imm[0].u32\n : 0;\n \n uint32_t resAlign = isStructured\n ? (resStride & -resStride)\n : 16;\n \n // Compute the DXVK binding slot index for the resource.\n uint32_t bindingId = isUav\n ? computeUavBinding(m_programInfo.type(), registerId)\n : computeSrvBinding(m_programInfo.type(), registerId);\n \n // Test whether we should use a raw SSBO for this resource\n bool hasSparseFeedback = isUav\n ? m_analysis->uavInfos[registerId].sparseFeedback\n : m_analysis->srvInfos[registerId].sparseFeedback;\n\n bool useRawSsbo = m_moduleInfo.options.minSsboAlignment <= resAlign && !hasSparseFeedback;\n \n if (useRawSsbo) {\n uint32_t elemType = getScalarTypeId(DxbcScalarType::Uint32);\n uint32_t arrayType = m_module.defRuntimeArrayTypeUnique(elemType);\n uint32_t structType = m_module.defStructTypeUnique(1, &arrayType);\n uint32_t ptrType = m_module.defPointerType(structType, spv::StorageClassStorageBuffer);\n\n resTypeId = m_module.defPointerType(elemType, spv::StorageClassStorageBuffer);\n varId = m_module.newVar(ptrType, spv::StorageClassStorageBuffer);\n \n m_module.decorateArrayStride(arrayType, sizeof(uint32_t));\n m_module.decorate(structType, spv::DecorationBlock);\n m_module.memberDecorateOffset(structType, 0, 0);\n\n m_module.setDebugName(structType,\n str::format(isUav ? \"u\" : \"t\", registerId, \"_t\").c_str());\n m_module.setDebugMemberName(structType, 0, \"m\");\n } else {\n // Structured and raw buffers are represented as\n // texel buffers consisting of 32-bit integers.\n m_module.enableCapability(isUav\n ? spv::CapabilityImageBuffer\n : spv::CapabilitySampledBuffer);\n \n resTypeId = m_module.defImageType(sampledTypeId,\n typeInfo.dim, 0, typeInfo.array, typeInfo.ms, typeInfo.sampled,\n spv::ImageFormatR32ui);\n \n varId = m_module.newVar(\n m_module.defPointerType(resTypeId, spv::StorageClassUniformConstant),\n spv::StorageClassUniformConstant);\n }\n\n m_module.setDebugName(varId,\n str::format(isUav ? \"u\" : \"t\", registerId).c_str());\n \n m_module.decorateDescriptorSet(varId, 0);\n m_module.decorateBinding(varId, bindingId);\n \n if (isUav) {\n DxbcUav uav;\n uav.type = resType;\n uav.imageInfo = typeInfo;\n uav.varId = varId;\n uav.ctrId = 0;\n uav.sampledType = sampledType;\n uav.sampledTypeId = sampledTypeId;\n uav.imageTypeId = resTypeId;\n uav.structStride = resStride;\n uav.coherence = getUavCoherence(registerId, ins.controls.uavFlags());\n uav.isRawSsbo = useRawSsbo;\n m_uavs.at(registerId) = uav;\n } else {\n DxbcShaderResource res;\n res.type = resType;\n res.imageInfo = typeInfo;\n res.varId = varId;\n res.sampledType = sampledType;\n res.sampledTypeId = sampledTypeId;\n res.imageTypeId = resTypeId;\n res.colorTypeId = resTypeId;\n res.depthTypeId = 0;\n res.structStride = resStride;\n res.isRawSsbo = useRawSsbo;\n m_textures.at(registerId) = res;\n }\n \n // Store descriptor info for the shader interface\n DxvkBindingInfo binding = { };\n binding.descriptorType = useRawSsbo\n ? VK_DESCRIPTOR_TYPE_STORAGE_BUFFER\n : (isUav ? VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER : VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER);\n binding.viewType = VK_IMAGE_VIEW_TYPE_MAX_ENUM;\n binding.resourceBinding = bindingId;\n binding.access = VK_ACCESS_SHADER_READ_BIT;\n\n if (isUav) {\n binding.access = m_analysis->uavInfos[registerId].accessFlags;\n\n if (!m_analysis->uavInfos[registerId].nonInvariantAccess)\n binding.accessOp = m_analysis->uavInfos[registerId].accessOp;\n }\n\n if (useRawSsbo || isUav) {\n if (!(binding.access & VK_ACCESS_SHADER_WRITE_BIT))\n m_module.decorate(varId, spv::DecorationNonWritable);\n if (!(binding.access & VK_ACCESS_SHADER_READ_BIT))\n m_module.decorate(varId, spv::DecorationNonReadable);\n }\n\n m_bindings.push_back(binding);\n\n // If supported, we'll be using raw access chains to access this\n if (!m_hasRawAccessChains && m_moduleInfo.options.supportsRawAccessChains) {\n m_module.enableExtension(\"SPV_NV_raw_access_chains\");\n m_module.enableCapability(spv::CapabilityRawAccessChainsNV);\n\n m_hasRawAccessChains = true;\n }\n }\n void emitDclThreadGroupSharedMemory(\n const DxbcShaderInstruction& ins) {\n // dcl_tgsm_raw takes two arguments:\n // (dst0) The resource register ID\n // (imm0) Block size, in bytes\n // dcl_tgsm_structured takes three arguments:\n // (dst0) The resource register ID\n // (imm0) Structure stride, in bytes\n // (imm1) Structure count\n const bool isStructured = ins.op == DxbcOpcode::DclThreadGroupSharedMemoryStructured;\n \n const uint32_t regId = ins.dst[0].idx[0].offset;\n \n if (regId >= m_gRegs.size())\n m_gRegs.resize(regId + 1);\n \n const uint32_t elementStride = isStructured ? ins.imm[0].u32 : 0;\n const uint32_t elementCount = isStructured ? ins.imm[1].u32 : ins.imm[0].u32;\n \n DxbcRegisterInfo varInfo;\n varInfo.type.ctype = DxbcScalarType::Uint32;\n varInfo.type.ccount = 1;\n varInfo.type.alength = isStructured\n ? elementCount * elementStride / 4\n : elementCount / 4;\n varInfo.sclass = spv::StorageClassWorkgroup;\n \n m_gRegs[regId].type = isStructured\n ? DxbcResourceType::Structured\n : DxbcResourceType::Raw;\n m_gRegs[regId].elementStride = elementStride;\n m_gRegs[regId].elementCount = elementCount;\n m_gRegs[regId].varId = emitNewVariable(varInfo);\n \n m_module.setDebugName(m_gRegs[regId].varId,\n str::format(\"g\", regId).c_str());\n }\n void emitDclGsInputPrimitive(\n const DxbcShaderInstruction& ins) {\n // The input primitive type is stored within in the\n // control bits of the opcode token. In SPIR-V, we\n // have to define an execution mode.\n const auto mode = [&] {\n switch (ins.controls.primitive()) {\n case DxbcPrimitive::Point: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_POINT_LIST, spv::ExecutionModeInputPoints);\n case DxbcPrimitive::Line: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_LINE_LIST, spv::ExecutionModeInputLines);\n case DxbcPrimitive::Triangle: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, spv::ExecutionModeTriangles);\n case DxbcPrimitive::LineAdj: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY, spv::ExecutionModeInputLinesAdjacency);\n case DxbcPrimitive::TriangleAdj: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY, spv::ExecutionModeInputTrianglesAdjacency);\n default: throw DxvkError(\"DxbcCompiler: Unsupported primitive type\");\n }\n }();\n\n m_gs.inputPrimitive = ins.controls.primitive();\n m_module.setExecutionMode(m_entryPointId, mode.second);\n m_inputTopology = mode.first;\n \n emitDclInputArray(primitiveVertexCount(m_gs.inputPrimitive));\n }\n void emitDclGsOutputTopology(\n const DxbcShaderInstruction& ins) {\n // The input primitive topology is stored within in the\n // control bits of the opcode token. In SPIR-V, we have\n // to define an execution mode.\n auto mode = [&] {\n switch (ins.controls.primitiveTopology()) {\n case DxbcPrimitiveTopology::PointList: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_POINT_LIST, spv::ExecutionModeOutputPoints);\n case DxbcPrimitiveTopology::LineStrip: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_LINE_LIST, spv::ExecutionModeOutputLineStrip);\n case DxbcPrimitiveTopology::TriangleStrip: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, spv::ExecutionModeOutputTriangleStrip);\n default: throw DxvkError(\"DxbcCompiler: Unsupported primitive topology\");\n }\n }();\n \n m_outputTopology = mode.first;\n m_module.setExecutionMode(m_entryPointId, mode.second);\n }\n void emitDclMaxOutputVertexCount(\n const DxbcShaderInstruction& ins) {\n // dcl_max_output_vertex_count has one operand:\n // (imm0) The maximum number of vertices\n m_gs.outputVertexCount = ins.imm[0].u32;\n \n m_module.setOutputVertices(m_entryPointId, m_gs.outputVertexCount);\n }\n void emitDclInputControlPointCount(\n const DxbcShaderInstruction& ins) {\n // dcl_input_control_points has the control point\n // count embedded within the opcode token.\n if (m_programInfo.type() == DxbcProgramType::HullShader) {\n m_hs.vertexCountIn = ins.controls.controlPointCount();\n \n emitDclInputArray(m_hs.vertexCountIn); \n } else {\n m_ds.vertexCountIn = ins.controls.controlPointCount();\n \n m_ds.inputPerPatch = emitTessInterfacePerPatch (spv::StorageClassInput);\n m_ds.inputPerVertex = emitTessInterfacePerVertex(spv::StorageClassInput, m_ds.vertexCountIn);\n }\n }\n void emitDclOutputControlPointCount(\n const DxbcShaderInstruction& ins) {\n // dcl_output_control_points has the control point\n // count embedded within the opcode token.\n m_hs.vertexCountOut = ins.controls.controlPointCount();\n \n m_hs.outputPerPatch = emitTessInterfacePerPatch(spv::StorageClassPrivate);\n m_hs.outputPerVertex = emitTessInterfacePerVertex(spv::StorageClassOutput, m_hs.vertexCountOut);\n \n m_module.setOutputVertices(m_entryPointId, m_hs.vertexCountOut);\n }\n void emitDclHsMaxTessFactor(\n const DxbcShaderInstruction& ins) {\n m_hs.maxTessFactor = ins.imm[0].f32;\n }\n void emitDclTessDomain(\n const DxbcShaderInstruction& ins) {\n auto mode = [&] {\n switch (ins.controls.tessDomain()) {\n case DxbcTessDomain::Isolines: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_LINE_LIST, spv::ExecutionModeIsolines);\n case DxbcTessDomain::Triangles: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, spv::ExecutionModeTriangles);\n case DxbcTessDomain::Quads: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, spv::ExecutionModeQuads);\n default: throw DxvkError(\"Dxbc: Invalid tess domain\");\n }\n }();\n \n m_outputTopology = mode.first;\n m_module.setExecutionMode(m_entryPointId, mode.second);\n }\n void emitDclTessPartitioning(\n const DxbcShaderInstruction& ins) {\n const spv::ExecutionMode executionMode = [&] {\n switch (ins.controls.tessPartitioning()) {\n case DxbcTessPartitioning::Pow2:\n case DxbcTessPartitioning::Integer: return spv::ExecutionModeSpacingEqual;\n case DxbcTessPartitioning::FractOdd: return spv::ExecutionModeSpacingFractionalOdd;\n case DxbcTessPartitioning::FractEven: return spv::ExecutionModeSpacingFractionalEven;\n default: throw DxvkError(\"Dxbc: Invalid tess partitioning\");\n }\n }();\n \n m_module.setExecutionMode(m_entryPointId, executionMode);\n }\n void emitDclTessOutputPrimitive(\n const DxbcShaderInstruction& ins) {\n switch (ins.controls.tessOutputPrimitive()) {\n case DxbcTessOutputPrimitive::Point:\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModePointMode);\n break;\n \n case DxbcTessOutputPrimitive::Line:\n break;\n \n case DxbcTessOutputPrimitive::TriangleCw:\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeVertexOrderCw);\n break;\n \n case DxbcTessOutputPrimitive::TriangleCcw:\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeVertexOrderCcw);\n break;\n \n default:\n throw DxvkError(\"Dxbc: Invalid tess output primitive\");\n }\n }\n void emitDclThreadGroup(\n const DxbcShaderInstruction& ins) {\n // dcl_thread_group has three operands:\n // (imm0) Number of threads in X dimension\n // (imm1) Number of threads in Y dimension\n // (imm2) Number of threads in Z dimension\n m_cs.workgroupSizeX = ins.imm[0].u32;\n m_cs.workgroupSizeY = ins.imm[1].u32;\n m_cs.workgroupSizeZ = ins.imm[2].u32;\n\n m_module.setLocalSize(m_entryPointId,\n ins.imm[0].u32, ins.imm[1].u32, ins.imm[2].u32);\n }\n void emitDclGsInstanceCount(\n const DxbcShaderInstruction& ins) {\n // dcl_gs_instance_count has one operand:\n // (imm0) Number of geometry shader invocations\n m_module.setInvocations(m_entryPointId, ins.imm[0].u32);\n m_gs.invocationCount = ins.imm[0].u32;\n }\n uint32_t emitDclUavCounter(\n uint32_t regId) {\n // Declare a structure type which holds the UAV counter\n if (m_uavCtrStructType == 0) {\n const uint32_t t_u32 = m_module.defIntType(32, 0);\n const uint32_t t_struct = m_module.defStructTypeUnique(1, &t_u32);\n \n m_module.decorate(t_struct, spv::DecorationBlock);\n m_module.memberDecorateOffset(t_struct, 0, 0);\n \n m_module.setDebugName (t_struct, \"uav_meta\");\n m_module.setDebugMemberName(t_struct, 0, \"ctr\");\n \n m_uavCtrStructType = t_struct;\n m_uavCtrPointerType = m_module.defPointerType(\n t_struct, spv::StorageClassStorageBuffer);\n }\n \n // Declare the buffer variable\n const uint32_t varId = m_module.newVar(\n m_uavCtrPointerType, spv::StorageClassStorageBuffer);\n \n m_module.setDebugName(varId,\n str::format(\"u\", regId, \"_meta\").c_str());\n \n uint32_t bindingId = computeUavCounterBinding(\n m_programInfo.type(), regId);\n \n m_module.decorateDescriptorSet(varId, 0);\n m_module.decorateBinding(varId, bindingId);\n \n // Declare the storage buffer binding\n DxvkBindingInfo binding = { VK_DESCRIPTOR_TYPE_STORAGE_BUFFER };\n binding.resourceBinding = bindingId;\n binding.viewType = VK_IMAGE_VIEW_TYPE_MAX_ENUM;\n binding.access = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;\n m_bindings.push_back(binding);\n\n return varId;\n }\n void emitDclImmediateConstantBuffer(\n const DxbcShaderInstruction& ins) {\n if (m_icbArray)\n throw DxvkError(\"DxbcCompiler: Immediate constant buffer already declared\");\n \n if ((ins.customDataSize & 0x3) != 0)\n throw DxvkError(\"DxbcCompiler: Immediate constant buffer size not a multiple of four DWORDs\");\n\n // A lot of the time we'll be dealing with a scalar or vec2\n // array here, there's no reason to emit all those zeroes.\n uint32_t componentCount = 1u;\n\n for (uint32_t i = 0; i < ins.customDataSize; i += 4u) {\n for (uint32_t c = componentCount; c < 4u; c++) {\n if (ins.customData[i + c])\n componentCount = c + 1u;\n }\n\n if (componentCount == 4u)\n break;\n }\n\n uint32_t vectorCount = (ins.customDataSize / 4u);\n uint32_t dwordCount = vectorCount * componentCount;\n\n if (dwordCount <= Icb_MaxBakedDwords) {\n this->emitDclImmediateConstantBufferBaked(\n ins.customDataSize, ins.customData, componentCount);\n } else {\n this->emitDclImmediateConstantBufferUbo(\n ins.customDataSize, ins.customData, componentCount);\n }\n }\n void emitDclImmediateConstantBufferBaked(\n uint32_t dwordCount,\n const uint32_t* dwordArray,\n uint32_t componentCount) {\n // Declare individual vector constants as 4x32-bit vectors\n small_vector vectorIds;\n \n DxbcVectorType vecType;\n vecType.ctype = DxbcScalarType::Uint32;\n vecType.ccount = componentCount;\n \n uint32_t vectorTypeId = getVectorTypeId(vecType);\n \n for (uint32_t i = 0; i < dwordCount; i += 4u) {\n std::array scalarIds = { };\n\n for (uint32_t c = 0; c < componentCount; c++)\n scalarIds[c] = m_module.constu32(dwordArray[i + c]);\n\n uint32_t id = scalarIds[0];\n\n if (componentCount > 1u)\n id = m_module.constComposite(vectorTypeId, componentCount, scalarIds.data());\n\n vectorIds.push_back(id);\n }\n\n // Pad array with one entry of zeroes so that we can\n // handle out-of-bounds accesses more conveniently.\n vectorIds.push_back(emitBuildZeroVector(vecType).id);\n\n // Declare the array that contains all the vectors\n DxbcArrayType arrInfo;\n arrInfo.ctype = DxbcScalarType::Uint32;\n arrInfo.ccount = componentCount;\n arrInfo.alength = vectorIds.size();\n\n uint32_t arrayTypeId = getArrayTypeId(arrInfo);\n uint32_t arrayId = m_module.constComposite(\n arrayTypeId, vectorIds.size(), vectorIds.data());\n\n // Declare the variable that will hold the constant\n // data and initialize it with the constant array.\n uint32_t pointerTypeId = m_module.defPointerType(\n arrayTypeId, spv::StorageClassPrivate);\n\n m_icbArray = m_module.newVarInit(\n pointerTypeId, spv::StorageClassPrivate,\n arrayId);\n\n m_module.setDebugName(m_icbArray, \"icb\");\n m_module.decorate(m_icbArray, spv::DecorationNonWritable);\n\n m_icbComponents = componentCount;\n m_icbSize = dwordCount / 4u;\n }\n void emitDclImmediateConstantBufferUbo(\n uint32_t dwordCount,\n const uint32_t* dwordArray,\n uint32_t componentCount) {\n uint32_t vectorCount = dwordCount / 4u;\n\n // Tightly pack vec2 or scalar arrays if possible. Don't bother with\n // vec3 since we'd rather have properly vectorized loads in that case.\n if (m_moduleInfo.options.supportsTightIcbPacking && componentCount <= 2u)\n m_icbComponents = componentCount;\n else\n m_icbComponents = 4u;\n\n // Immediate constant buffer can be read out of bounds, declare\n // it with the maximum possible size and rely on robustness.\n this->emitDclConstantBufferVar(Icb_BindingSlotId, 4096u, m_icbComponents, \"icb\");\n\n m_icbData.reserve(vectorCount * componentCount);\n\n for (uint32_t i = 0; i < dwordCount; i += 4u) {\n for (uint32_t c = 0; c < m_icbComponents; c++)\n m_icbData.push_back(dwordArray[i + c]);\n }\n\n m_icbSize = vectorCount;\n }\n void emitCustomData(\n const DxbcShaderInstruction& ins) {\n switch (ins.customDataType) {\n case DxbcCustomDataClass::ImmConstBuf:\n return emitDclImmediateConstantBuffer(ins);\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unsupported custom data block: \",\n ins.customDataType));\n }\n }\n void emitVectorAlu(\n const DxbcShaderInstruction& ins) {\n std::array src;\n \n for (uint32_t i = 0; i < ins.srcCount; i++)\n src.at(i) = emitRegisterLoad(ins.src[i], ins.dst[0].mask);\n \n DxbcRegisterValue dst;\n dst.type.ctype = ins.dst[0].dataType;\n dst.type.ccount = ins.dst[0].mask.popCount();\n\n if (isDoubleType(ins.dst[0].dataType))\n dst.type.ccount /= 2;\n \n const uint32_t typeId = getVectorTypeId(dst.type);\n \n switch (ins.op) {\n /////////////////////\n // Move instructions\n case DxbcOpcode::Mov:\n case DxbcOpcode::DMov:\n dst.id = src.at(0).id;\n break;\n \n /////////////////////////////////////\n // ALU operations on float32 numbers\n case DxbcOpcode::Add:\n case DxbcOpcode::DAdd:\n dst.id = m_module.opFAdd(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Div:\n case DxbcOpcode::DDiv:\n dst.id = m_module.opFDiv(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Exp:\n dst.id = m_module.opExp2(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Frc:\n dst.id = m_module.opFract(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Log:\n dst.id = m_module.opLog2(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Mad:\n case DxbcOpcode::DFma:\n if (ins.controls.precise()) {\n // FXC only emits precise mad if the shader explicitly uses\n // the HLSL mad()/fma() intrinsics, let's preserve that.\n dst.id = m_module.opFFma(typeId,\n src.at(0).id, src.at(1).id, src.at(2).id);\n } else {\n dst.id = m_module.opFMul(typeId, src.at(0).id, src.at(1).id);\n dst.id = m_module.opFAdd(typeId, dst.id, src.at(2).id);\n }\n break;\n \n case DxbcOpcode::Max:\n case DxbcOpcode::DMax:\n dst.id = m_module.opNMax(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Min:\n case DxbcOpcode::DMin:\n dst.id = m_module.opNMin(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Mul:\n case DxbcOpcode::DMul:\n dst.id = m_module.opFMul(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Rcp:\n dst.id = m_module.opFDiv(typeId,\n emitBuildConstVecf32(\n 1.0f, 1.0f, 1.0f, 1.0f,\n ins.dst[0].mask).id,\n src.at(0).id);\n break;\n \n case DxbcOpcode::DRcp:\n dst.id = m_module.opFDiv(typeId,\n emitBuildConstVecf64(1.0, 1.0,\n ins.dst[0].mask).id,\n src.at(0).id);\n break;\n \n case DxbcOpcode::RoundNe:\n dst.id = m_module.opRoundEven(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::RoundNi:\n dst.id = m_module.opFloor(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::RoundPi:\n dst.id = m_module.opCeil(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::RoundZ:\n dst.id = m_module.opTrunc(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Rsq:\n dst.id = m_module.opInverseSqrt(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Sqrt:\n dst.id = m_module.opSqrt(\n typeId, src.at(0).id);\n break;\n \n /////////////////////////////////////\n // ALU operations on signed integers\n case DxbcOpcode::IAdd:\n dst.id = m_module.opIAdd(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::IMad:\n case DxbcOpcode::UMad:\n dst.id = m_module.opIAdd(typeId,\n m_module.opIMul(typeId,\n src.at(0).id, src.at(1).id),\n src.at(2).id);\n break;\n \n case DxbcOpcode::IMax:\n dst.id = m_module.opSMax(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::IMin:\n dst.id = m_module.opSMin(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::INeg:\n dst.id = m_module.opSNegate(\n typeId, src.at(0).id);\n break;\n \n ///////////////////////////////////////\n // ALU operations on unsigned integers\n case DxbcOpcode::UMax:\n dst.id = m_module.opUMax(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::UMin:\n dst.id = m_module.opUMin(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n ///////////////////////////////////////\n // Bit operations on unsigned integers\n case DxbcOpcode::And:\n dst.id = m_module.opBitwiseAnd(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Not:\n dst.id = m_module.opNot(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Or:\n dst.id = m_module.opBitwiseOr(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Xor:\n dst.id = m_module.opBitwiseXor(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::CountBits:\n dst.id = m_module.opBitCount(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::BfRev:\n dst.id = m_module.opBitReverse(\n typeId, src.at(0).id);\n break;\n \n ///////////////////////////\n // Conversion instructions\n case DxbcOpcode::ItoF:\n dst.id = m_module.opConvertStoF(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::UtoF:\n dst.id = m_module.opConvertUtoF(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::FtoI:\n dst.id = m_module.opConvertFtoS(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::FtoU:\n dst.id = m_module.opConvertFtoU(\n typeId, src.at(0).id);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n if (ins.controls.precise() || m_precise)\n m_module.decorate(dst.id, spv::DecorationNoContraction);\n \n // Store computed value\n dst = emitDstOperandModifiers(dst, ins.modifiers);\n emitRegisterStore(ins.dst[0], dst);\n }\n void emitVectorCmov(\n const DxbcShaderInstruction& ins) {\n // movc and swapc have the following operands:\n // (dst0) The first destination register\n // (dst1) The second destination register (swapc only)\n // (src0) The condition vector\n // (src1) Vector to select from if the condition is not 0\n // (src2) Vector to select from if the condition is 0\n DxbcRegMask condMask = ins.dst[0].mask;\n\n if (ins.dst[0].dataType == DxbcScalarType::Float64) {\n condMask = DxbcRegMask(\n condMask[0] && condMask[1],\n condMask[2] && condMask[3],\n false, false);\n }\n \n const DxbcRegisterValue condition = emitRegisterLoad(ins.src[0], condMask);\n const DxbcRegisterValue selectTrue = emitRegisterLoad(ins.src[1], ins.dst[0].mask);\n const DxbcRegisterValue selectFalse = emitRegisterLoad(ins.src[2], ins.dst[0].mask);\n \n uint32_t componentCount = condMask.popCount();\n \n // We'll compare against a vector of zeroes to generate a\n // boolean vector, which in turn will be used by OpSelect\n uint32_t zeroType = m_module.defIntType(32, 0);\n uint32_t boolType = m_module.defBoolType();\n \n uint32_t zero = m_module.constu32(0);\n \n if (componentCount > 1) {\n zeroType = m_module.defVectorType(zeroType, componentCount);\n boolType = m_module.defVectorType(boolType, componentCount);\n \n const std::array zeroVec = { zero, zero, zero, zero };\n zero = m_module.constComposite(zeroType, componentCount, zeroVec.data());\n }\n \n // In case of swapc, the second destination operand receives\n // the output that a cmov instruction would normally get\n const uint32_t trueIndex = ins.op == DxbcOpcode::Swapc ? 1 : 0;\n \n for (uint32_t i = 0; i < ins.dstCount; i++) {\n DxbcRegisterValue result;\n result.type.ctype = ins.dst[i].dataType;\n result.type.ccount = componentCount;\n result.id = m_module.opSelect(\n getVectorTypeId(result.type),\n m_module.opINotEqual(boolType, condition.id, zero),\n i == trueIndex ? selectTrue.id : selectFalse.id,\n i != trueIndex ? selectTrue.id : selectFalse.id);\n \n result = emitDstOperandModifiers(result, ins.modifiers);\n emitRegisterStore(ins.dst[i], result);\n }\n }\n void emitVectorCmp(\n const DxbcShaderInstruction& ins) {\n // Compare instructions have three operands:\n // (dst0) The destination register\n // (src0) The first vector to compare\n // (src1) The second vector to compare\n uint32_t componentCount = ins.dst[0].mask.popCount();\n\n // For 64-bit operations, we'll return a 32-bit\n // vector, so we have to adjust the read mask\n DxbcRegMask srcMask = ins.dst[0].mask;\n\n if (isDoubleType(ins.src[0].dataType)) {\n srcMask = DxbcRegMask(\n componentCount > 0, componentCount > 0,\n componentCount > 1, componentCount > 1);\n }\n\n const std::array src = {\n emitRegisterLoad(ins.src[0], srcMask),\n emitRegisterLoad(ins.src[1], srcMask),\n };\n \n // Condition, which is a boolean vector used\n // to select between the ~0u and 0u vectors.\n uint32_t condition = 0;\n uint32_t conditionType = m_module.defBoolType();\n \n if (componentCount > 1)\n conditionType = m_module.defVectorType(conditionType, componentCount);\n \n bool invert = false;\n\n switch (ins.op) {\n case DxbcOpcode::Ne:\n case DxbcOpcode::DNe:\n invert = true;\n [[fallthrough]];\n\n case DxbcOpcode::Eq:\n case DxbcOpcode::DEq:\n condition = m_module.opFOrdEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Ge:\n case DxbcOpcode::DGe:\n condition = m_module.opFOrdGreaterThanEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Lt:\n case DxbcOpcode::DLt:\n condition = m_module.opFOrdLessThan(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::IEq:\n condition = m_module.opIEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::IGe:\n condition = m_module.opSGreaterThanEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::ILt:\n condition = m_module.opSLessThan(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::INe:\n condition = m_module.opINotEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::UGe:\n condition = m_module.opUGreaterThanEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::ULt:\n condition = m_module.opULessThan(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n // Generate constant vectors for selection\n uint32_t sFalse = m_module.constu32( 0u);\n uint32_t sTrue = m_module.constu32(~0u);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = componentCount;\n \n const uint32_t typeId = getVectorTypeId(result.type);\n \n if (componentCount > 1) {\n const std::array vFalse = { sFalse, sFalse, sFalse, sFalse };\n const std::array vTrue = { sTrue, sTrue, sTrue, sTrue };\n \n sFalse = m_module.constComposite(typeId, componentCount, vFalse.data());\n sTrue = m_module.constComposite(typeId, componentCount, vTrue .data());\n }\n \n if (invert)\n std::swap(sFalse, sTrue);\n\n // Perform component-wise mask selection\n // based on the condition evaluated above.\n result.id = m_module.opSelect(\n typeId, condition, sTrue, sFalse);\n \n emitRegisterStore(ins.dst[0], result);\n }\n void emitVectorDeriv(\n const DxbcShaderInstruction& ins) {\n // Derivative instructions have two operands:\n // (dst0) Destination register for the derivative\n // (src0) The operand to compute the derivative of\n DxbcRegisterValue value = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n const uint32_t typeId = getVectorTypeId(value.type);\n \n switch (ins.op) {\n case DxbcOpcode::DerivRtx:\n value.id = m_module.opDpdx(typeId, value.id);\n break;\n \n case DxbcOpcode::DerivRty:\n value.id = m_module.opDpdy(typeId, value.id);\n break;\n \n case DxbcOpcode::DerivRtxCoarse:\n value.id = m_module.opDpdxCoarse(typeId, value.id);\n break;\n \n case DxbcOpcode::DerivRtyCoarse:\n value.id = m_module.opDpdyCoarse(typeId, value.id);\n break;\n \n case DxbcOpcode::DerivRtxFine:\n value.id = m_module.opDpdxFine(typeId, value.id);\n break;\n \n case DxbcOpcode::DerivRtyFine:\n value.id = m_module.opDpdyFine(typeId, value.id);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n value = emitDstOperandModifiers(value, ins.modifiers);\n emitRegisterStore(ins.dst[0], value);\n }\n void emitVectorDot(\n const DxbcShaderInstruction& ins) {\n const DxbcRegMask srcMask(true,\n ins.op >= DxbcOpcode::Dp2,\n ins.op >= DxbcOpcode::Dp3,\n ins.op >= DxbcOpcode::Dp4);\n \n const std::array src = {\n emitRegisterLoad(ins.src[0], srcMask),\n emitRegisterLoad(ins.src[1], srcMask),\n };\n \n DxbcRegisterValue dst;\n dst.type.ctype = ins.dst[0].dataType;\n dst.type.ccount = 1;\n dst.id = 0;\n\n uint32_t componentType = getVectorTypeId(dst.type);\n uint32_t componentCount = srcMask.popCount();\n\n for (uint32_t i = 0; i < componentCount; i++) {\n if (dst.id) {\n dst.id = m_module.opFFma(componentType,\n m_module.opCompositeExtract(componentType, src.at(0).id, 1, &i),\n m_module.opCompositeExtract(componentType, src.at(1).id, 1, &i),\n dst.id);\n } else {\n dst.id = m_module.opFMul(componentType,\n m_module.opCompositeExtract(componentType, src.at(0).id, 1, &i),\n m_module.opCompositeExtract(componentType, src.at(1).id, 1, &i));\n }\n\n // Unconditionally mark as precise since the exact order of operation\n // matters for some games, even if the instruction itself is not marked\n // as precise.\n m_module.decorate(dst.id, spv::DecorationNoContraction);\n }\n\n dst = emitDstOperandModifiers(dst, ins.modifiers);\n emitRegisterStore(ins.dst[0], dst);\n }\n void emitVectorIdiv(\n const DxbcShaderInstruction& ins) {\n // udiv has four operands:\n // (dst0) Quotient destination register\n // (dst1) Remainder destination register\n // (src0) The first vector to compare\n // (src1) The second vector to compare\n if (ins.dst[0].type == DxbcOperandType::Null\n && ins.dst[1].type == DxbcOperandType::Null)\n return;\n \n // FIXME support this if applications require it\n if (ins.dst[0].type != DxbcOperandType::Null\n && ins.dst[1].type != DxbcOperandType::Null\n && ins.dst[0].mask != ins.dst[1].mask) {\n Logger::warn(\"DxbcCompiler: Idiv with different destination masks not supported\");\n return;\n }\n \n // Load source operands as integers with the\n // mask of one non-NULL destination operand\n const DxbcRegMask srcMask =\n ins.dst[0].type != DxbcOperandType::Null\n ? ins.dst[0].mask\n : ins.dst[1].mask;\n \n const std::array src = {\n emitRegisterLoad(ins.src[0], srcMask),\n emitRegisterLoad(ins.src[1], srcMask),\n };\n \n // Division by zero will return 0xffffffff for both results\n auto bvecId = getVectorTypeId({ DxbcScalarType::Bool, srcMask.popCount() });\n\n DxbcRegisterValue const0 = emitBuildConstVecu32( 0u, 0u, 0u, 0u, srcMask);\n DxbcRegisterValue constff = emitBuildConstVecu32(~0u, ~0u, ~0u, ~0u, srcMask);\n\n uint32_t cmpValue = m_module.opINotEqual(bvecId, src.at(1).id, const0.id);\n\n // Compute results only if the destination\n // operands are not NULL.\n if (ins.dst[0].type != DxbcOperandType::Null) {\n DxbcRegisterValue quotient;\n quotient.type.ctype = ins.dst[0].dataType;\n quotient.type.ccount = ins.dst[0].mask.popCount();\n \n quotient.id = m_module.opUDiv(\n getVectorTypeId(quotient.type),\n src.at(0).id, src.at(1).id);\n\n quotient.id = m_module.opSelect(\n getVectorTypeId(quotient.type),\n cmpValue, quotient.id, constff.id);\n \n quotient = emitDstOperandModifiers(quotient, ins.modifiers);\n emitRegisterStore(ins.dst[0], quotient);\n }\n \n if (ins.dst[1].type != DxbcOperandType::Null) {\n DxbcRegisterValue remainder;\n remainder.type.ctype = ins.dst[1].dataType;\n remainder.type.ccount = ins.dst[1].mask.popCount();\n \n remainder.id = m_module.opUMod(\n getVectorTypeId(remainder.type),\n src.at(0).id, src.at(1).id);\n\n remainder.id = m_module.opSelect(\n getVectorTypeId(remainder.type),\n cmpValue, remainder.id, constff.id);\n \n remainder = emitDstOperandModifiers(remainder, ins.modifiers);\n emitRegisterStore(ins.dst[1], remainder);\n }\n }\n void emitVectorImul(\n const DxbcShaderInstruction& ins) {\n // imul and umul have four operands:\n // (dst0) High destination register\n // (dst1) Low destination register\n // (src0) The first vector to compare\n // (src1) The second vector to compare\n if (ins.dst[0].type == DxbcOperandType::Null) {\n if (ins.dst[1].type == DxbcOperandType::Null)\n return;\n \n // If dst0 is NULL, this instruction behaves just\n // like any other three-operand ALU instruction\n const std::array src = {\n emitRegisterLoad(ins.src[0], ins.dst[1].mask),\n emitRegisterLoad(ins.src[1], ins.dst[1].mask),\n };\n \n DxbcRegisterValue result;\n result.type.ctype = ins.dst[1].dataType;\n result.type.ccount = ins.dst[1].mask.popCount();\n result.id = m_module.opIMul(\n getVectorTypeId(result.type),\n src.at(0).id, src.at(1).id);\n \n result = emitDstOperandModifiers(result, ins.modifiers);\n emitRegisterStore(ins.dst[1], result);\n } else {\n // TODO implement this\n Logger::warn(\"DxbcCompiler: Extended Imul not yet supported\");\n }\n }\n void emitVectorMsad(\n const DxbcShaderInstruction& ins) {\n // msad has four operands:\n // (dst0) Destination\n // (src0) Reference (packed uint8)\n // (src1) Source (packed uint8)\n // (src2) Accumulator\n DxbcRegisterValue refReg = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n DxbcRegisterValue srcReg = emitRegisterLoad(ins.src[1], ins.dst[0].mask);\n DxbcRegisterValue result = emitRegisterLoad(ins.src[2], ins.dst[0].mask);\n \n auto typeId = getVectorTypeId(result.type);\n auto bvecId = getVectorTypeId({ DxbcScalarType::Bool, result.type.ccount });\n\n for (uint32_t i = 0; i < 4; i++) {\n auto shift = m_module.constu32(8 * i);\n auto count = m_module.constu32(8);\n\n auto ref = m_module.opBitFieldUExtract(typeId, refReg.id, shift, count);\n auto src = m_module.opBitFieldUExtract(typeId, srcReg.id, shift, count);\n\n auto zero = emitBuildConstVecu32(0, 0, 0, 0, ins.dst[0].mask);\n auto mask = m_module.opINotEqual(bvecId, ref, zero.id);\n\n auto diff = m_module.opSAbs(typeId, m_module.opISub(typeId, ref, src));\n result.id = m_module.opSelect(typeId, mask, m_module.opIAdd(typeId, result.id, diff), result.id);\n }\n\n result = emitDstOperandModifiers(result, ins.modifiers);\n emitRegisterStore(ins.dst[0], result);\n }\n void emitVectorShift(\n const DxbcShaderInstruction& ins) {\n // Shift operations have three operands:\n // (dst0) The destination register\n // (src0) The register to shift\n // (src1) The shift amount (scalar)\n DxbcRegisterValue shiftReg = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n DxbcRegisterValue countReg = emitRegisterLoad(ins.src[1], ins.dst[0].mask);\n\n if (ins.src[1].type != DxbcOperandType::Imm32)\n countReg = emitRegisterMaskBits(countReg, 0x1F);\n \n if (countReg.type.ccount == 1)\n countReg = emitRegisterExtend(countReg, shiftReg.type.ccount);\n \n DxbcRegisterValue result;\n result.type.ctype = ins.dst[0].dataType;\n result.type.ccount = ins.dst[0].mask.popCount();\n \n switch (ins.op) {\n case DxbcOpcode::IShl:\n result.id = m_module.opShiftLeftLogical(\n getVectorTypeId(result.type),\n shiftReg.id, countReg.id);\n break;\n \n case DxbcOpcode::IShr:\n result.id = m_module.opShiftRightArithmetic(\n getVectorTypeId(result.type),\n shiftReg.id, countReg.id);\n break;\n \n case DxbcOpcode::UShr:\n result.id = m_module.opShiftRightLogical(\n getVectorTypeId(result.type),\n shiftReg.id, countReg.id);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n result = emitDstOperandModifiers(result, ins.modifiers);\n emitRegisterStore(ins.dst[0], result);\n }\n void emitVectorSinCos(\n const DxbcShaderInstruction& ins) {\n // sincos has three operands:\n // (dst0) Destination register for sin(x)\n // (dst1) Destination register for cos(x)\n // (src0) Source operand x\n \n // Load source operand as 32-bit float vector.\n const DxbcRegisterValue srcValue = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, true, true, true));\n\n uint32_t typeId = getScalarTypeId(srcValue.type.ctype);\n\n DxbcRegisterValue sinVector = { };\n sinVector.type.ctype = DxbcScalarType::Float32;\n\n DxbcRegisterValue cosVector = { };\n cosVector.type.ctype = DxbcScalarType::Float32;\n\n // Only compute sincos for enabled components\n std::array sinIds = { };\n std::array cosIds = { };\n\n for (uint32_t i = 0; i < 4; i++) {\n const uint32_t sinIndex = 0u;\n const uint32_t cosIndex = 1u;\n\n if (ins.dst[0].mask[i] || ins.dst[1].mask[i]) {\n uint32_t sincosId = m_module.opSinCos(m_module.opCompositeExtract(typeId, srcValue.id, 1u, &i), !m_moduleInfo.options.sincosEmulation);\n\n if (ins.dst[0].type != DxbcOperandType::Null && ins.dst[0].mask[i])\n sinIds[sinVector.type.ccount++] = m_module.opCompositeExtract(typeId, sincosId, 1u, &sinIndex);\n\n if (ins.dst[1].type != DxbcOperandType::Null && ins.dst[1].mask[i])\n cosIds[cosVector.type.ccount++] = m_module.opCompositeExtract(typeId, sincosId, 1u, &cosIndex);\n }\n }\n\n if (sinVector.type.ccount) {\n sinVector.id = sinVector.type.ccount > 1u\n ? m_module.opCompositeConstruct(getVectorTypeId(sinVector.type), sinVector.type.ccount, sinIds.data())\n : sinIds[0];\n\n emitRegisterStore(ins.dst[0], sinVector);\n }\n\n if (cosVector.type.ccount) {\n cosVector.id = cosVector.type.ccount > 1u\n ? m_module.opCompositeConstruct(getVectorTypeId(cosVector.type), cosVector.type.ccount, cosIds.data())\n : cosIds[0];\n\n emitRegisterStore(ins.dst[1], cosVector);\n }\n }\n void emitGeometryEmit(\n const DxbcShaderInstruction& ins) {\n // In xfb mode we might have multiple streams, so\n // we have to figure out which stream to write to\n uint32_t streamId = 0;\n uint32_t streamVar = 0;\n\n if (m_moduleInfo.xfb != nullptr) {\n streamId = ins.dstCount > 0 ? ins.dst[0].idx[0].offset : 0;\n streamVar = m_module.constu32(streamId);\n }\n\n // Checking the negation is easier for EmitThenCut/EmitThenCutStream\n bool doEmit = ins.op != DxbcOpcode::Cut && ins.op != DxbcOpcode::CutStream;\n bool doCut = ins.op != DxbcOpcode::Emit && ins.op != DxbcOpcode::EmitStream;\n\n if (doEmit) {\n if (m_gs.needsOutputSetup)\n emitOutputSetup();\n emitClipCullStore(DxbcSystemValue::ClipDistance, m_clipDistances);\n emitClipCullStore(DxbcSystemValue::CullDistance, m_cullDistances);\n emitXfbOutputSetup(streamId, false);\n m_module.opEmitVertex(streamVar);\n }\n\n if (doCut)\n m_module.opEndPrimitive(streamVar);\n }\n void emitAtomic(\n const DxbcShaderInstruction& ins) {\n // atomic_* operations have the following operands:\n // (dst0) Destination u# or g# register\n // (src0) Index into the texture or buffer\n // (src1) The source value for the operation\n // (src2) Second source operand (optional)\n // imm_atomic_* operations have the following operands:\n // (dst0) Register that receives the result\n // (dst1) Destination u# or g# register\n // (srcX) As above\n const DxbcBufferInfo bufferInfo = getBufferInfo(ins.dst[ins.dstCount - 1]);\n \n bool isImm = ins.dstCount == 2;\n bool isUav = ins.dst[ins.dstCount - 1].type == DxbcOperandType::UnorderedAccessView;\n bool isSsbo = bufferInfo.isSsbo;\n\n // Retrieve destination pointer for the atomic operation>\n const DxbcRegisterPointer pointer = emitGetAtomicPointer(\n ins.dst[ins.dstCount - 1], ins.src[0]);\n \n // Load source values\n std::array src;\n \n for (uint32_t i = 1; i < ins.srcCount; i++) {\n src[i - 1] = emitRegisterBitcast(\n emitRegisterLoad(ins.src[i], DxbcRegMask(true, false, false, false)),\n pointer.type.ctype);\n }\n \n // Define memory scope and semantics based on the operands\n uint32_t scope = 0;\n uint32_t semantics = 0;\n \n if (isUav) {\n scope = spv::ScopeQueueFamily;\n semantics = spv::MemorySemanticsAcquireReleaseMask;\n\n semantics |= isSsbo\n ? spv::MemorySemanticsUniformMemoryMask\n : spv::MemorySemanticsImageMemoryMask;\n } else {\n scope = spv::ScopeWorkgroup;\n semantics = spv::MemorySemanticsWorkgroupMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask;\n }\n\n const uint32_t scopeId = m_module.constu32(scope);\n const uint32_t semanticsId = m_module.constu32(semantics);\n \n // Perform the atomic operation on the given pointer\n DxbcRegisterValue value;\n value.type = pointer.type;\n value.id = 0;\n \n // The result type, which is a scalar integer\n const uint32_t typeId = getVectorTypeId(value.type);\n \n switch (ins.op) {\n case DxbcOpcode::AtomicCmpStore:\n case DxbcOpcode::ImmAtomicCmpExch:\n value.id = m_module.opAtomicCompareExchange(\n typeId, pointer.id, scopeId, semanticsId,\n m_module.constu32(spv::MemorySemanticsMaskNone),\n src[1].id, src[0].id);\n break;\n \n case DxbcOpcode::ImmAtomicExch:\n value.id = m_module.opAtomicExchange(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicIAdd:\n case DxbcOpcode::ImmAtomicIAdd:\n value.id = m_module.opAtomicIAdd(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicAnd:\n case DxbcOpcode::ImmAtomicAnd:\n value.id = m_module.opAtomicAnd(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicOr:\n case DxbcOpcode::ImmAtomicOr:\n value.id = m_module.opAtomicOr(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicXor:\n case DxbcOpcode::ImmAtomicXor:\n value.id = m_module.opAtomicXor(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicIMin:\n case DxbcOpcode::ImmAtomicIMin:\n value.id = m_module.opAtomicSMin(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicIMax:\n case DxbcOpcode::ImmAtomicIMax:\n value.id = m_module.opAtomicSMax(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicUMin:\n case DxbcOpcode::ImmAtomicUMin:\n value.id = m_module.opAtomicUMin(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicUMax:\n case DxbcOpcode::ImmAtomicUMax:\n value.id = m_module.opAtomicUMax(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n // Write back the result to the destination\n // register if this is an imm_atomic_* opcode.\n if (isImm)\n emitRegisterStore(ins.dst[0], value);\n }\n void emitAtomicCounter(\n const DxbcShaderInstruction& ins) {\n // imm_atomic_alloc and imm_atomic_consume have the following operands:\n // (dst0) The register that will hold the old counter value\n // (dst1) The UAV whose counter is going to be modified\n const uint32_t registerId = ins.dst[1].idx[0].offset;\n \n if (m_uavs.at(registerId).ctrId == 0)\n m_uavs.at(registerId).ctrId = emitDclUavCounter(registerId);\n \n // Get a pointer to the atomic counter in question\n DxbcRegisterInfo ptrType;\n ptrType.type.ctype = DxbcScalarType::Uint32;\n ptrType.type.ccount = 1;\n ptrType.type.alength = 0;\n ptrType.sclass = spv::StorageClassStorageBuffer;\n \n uint32_t zeroId = m_module.consti32(0);\n uint32_t ptrId = m_module.opAccessChain(\n getPointerTypeId(ptrType),\n m_uavs.at(registerId).ctrId,\n 1, &zeroId);\n \n // Define memory scope and semantics based on the operands\n uint32_t scope = spv::ScopeQueueFamily;\n uint32_t semantics = spv::MemorySemanticsUniformMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask;\n \n uint32_t scopeId = m_module.constu32(scope);\n uint32_t semanticsId = m_module.constu32(semantics);\n \n // Compute the result value\n DxbcRegisterValue value;\n value.type.ctype = DxbcScalarType::Uint32;\n value.type.ccount = 1;\n \n uint32_t typeId = getVectorTypeId(value.type);\n \n switch (ins.op) {\n case DxbcOpcode::ImmAtomicAlloc:\n value.id = m_module.opAtomicIAdd(typeId, ptrId,\n scopeId, semanticsId, m_module.constu32(1));\n break;\n \n case DxbcOpcode::ImmAtomicConsume:\n value.id = m_module.opAtomicISub(typeId, ptrId,\n scopeId, semanticsId, m_module.constu32(1));\n value.id = m_module.opISub(typeId, value.id,\n m_module.constu32(1));\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n\n // Store the result\n emitRegisterStore(ins.dst[0], value);\n }\n void emitBarrier(\n const DxbcShaderInstruction& ins) {\n // sync takes no operands. Instead, the synchronization\n // scope is defined by the operand control bits.\n const DxbcSyncFlags flags = ins.controls.syncFlags();\n \n uint32_t executionScope = spv::ScopeInvocation;\n uint32_t memoryScope = spv::ScopeInvocation;\n uint32_t memorySemantics = 0;\n \n if (flags.test(DxbcSyncFlag::ThreadsInGroup))\n executionScope = spv::ScopeWorkgroup;\n \n if (flags.test(DxbcSyncFlag::ThreadGroupSharedMemory)) {\n memoryScope = spv::ScopeWorkgroup;\n memorySemantics |= spv::MemorySemanticsWorkgroupMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask\n | spv::MemorySemanticsMakeAvailableMask\n | spv::MemorySemanticsMakeVisibleMask;\n }\n \n if (flags.test(DxbcSyncFlag::UavMemoryGroup)) {\n memoryScope = spv::ScopeWorkgroup;\n memorySemantics |= spv::MemorySemanticsImageMemoryMask\n | spv::MemorySemanticsUniformMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask\n | spv::MemorySemanticsMakeAvailableMask\n | spv::MemorySemanticsMakeVisibleMask;\n }\n \n if (flags.test(DxbcSyncFlag::UavMemoryGlobal)) {\n memoryScope = spv::ScopeQueueFamily;\n\n if (m_programInfo.type() == DxbcProgramType::ComputeShader && !m_hasGloballyCoherentUav)\n memoryScope = spv::ScopeWorkgroup;\n\n memorySemantics |= spv::MemorySemanticsImageMemoryMask\n | spv::MemorySemanticsUniformMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask\n | spv::MemorySemanticsMakeAvailableMask\n | spv::MemorySemanticsMakeVisibleMask;\n }\n \n if (executionScope != spv::ScopeInvocation) {\n m_module.opControlBarrier(\n m_module.constu32(executionScope),\n m_module.constu32(memoryScope),\n m_module.constu32(memorySemantics));\n } else if (memoryScope != spv::ScopeInvocation) {\n m_module.opMemoryBarrier(\n m_module.constu32(memoryScope),\n m_module.constu32(memorySemantics));\n } else {\n Logger::warn(\"DxbcCompiler: sync instruction has no effect\");\n }\n }\n void emitBitExtract(\n const DxbcShaderInstruction& ins) {\n // ibfe and ubfe take the following arguments:\n // (dst0) The destination register\n // (src0) Number of bits to extact\n // (src1) Offset of the bits to extract\n // (src2) Register to extract bits from\n const bool isSigned = ins.op == DxbcOpcode::IBfe;\n \n DxbcRegisterValue bitCnt = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n DxbcRegisterValue bitOfs = emitRegisterLoad(ins.src[1], ins.dst[0].mask);\n\n if (ins.src[0].type != DxbcOperandType::Imm32)\n bitCnt = emitRegisterMaskBits(bitCnt, 0x1F);\n \n if (ins.src[1].type != DxbcOperandType::Imm32)\n bitOfs = emitRegisterMaskBits(bitOfs, 0x1F);\n \n const DxbcRegisterValue src = emitRegisterLoad(ins.src[2], ins.dst[0].mask);\n \n const uint32_t componentCount = src.type.ccount;\n std::array componentIds = {{ 0, 0, 0, 0 }};\n \n for (uint32_t i = 0; i < componentCount; i++) {\n const DxbcRegisterValue currBitCnt = emitRegisterExtract(bitCnt, DxbcRegMask::select(i));\n const DxbcRegisterValue currBitOfs = emitRegisterExtract(bitOfs, DxbcRegMask::select(i));\n const DxbcRegisterValue currSrc = emitRegisterExtract(src, DxbcRegMask::select(i));\n \n const uint32_t typeId = getVectorTypeId(currSrc.type);\n \n componentIds[i] = isSigned\n ? m_module.opBitFieldSExtract(typeId, currSrc.id, currBitOfs.id, currBitCnt.id)\n : m_module.opBitFieldUExtract(typeId, currSrc.id, currBitOfs.id, currBitCnt.id);\n }\n \n DxbcRegisterValue result;\n result.type = src.type;\n result.id = componentCount > 1\n ? m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n componentCount, componentIds.data())\n : componentIds[0];\n emitRegisterStore(ins.dst[0], result);\n }\n void emitBitInsert(\n const DxbcShaderInstruction& ins) {\n // ibfe and ubfe take the following arguments:\n // (dst0) The destination register\n // (src0) Number of bits to extact\n // (src1) Offset of the bits to extract\n // (src2) Register to take bits from\n // (src3) Register to replace bits in\n DxbcRegisterValue bitCnt = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n DxbcRegisterValue bitOfs = emitRegisterLoad(ins.src[1], ins.dst[0].mask);\n \n if (ins.src[0].type != DxbcOperandType::Imm32)\n bitCnt = emitRegisterMaskBits(bitCnt, 0x1F);\n \n if (ins.src[1].type != DxbcOperandType::Imm32)\n bitOfs = emitRegisterMaskBits(bitOfs, 0x1F);\n \n const DxbcRegisterValue insert = emitRegisterLoad(ins.src[2], ins.dst[0].mask);\n const DxbcRegisterValue base = emitRegisterLoad(ins.src[3], ins.dst[0].mask);\n \n const uint32_t componentCount = base.type.ccount;\n std::array componentIds = {{ 0, 0, 0, 0 }};\n \n for (uint32_t i = 0; i < componentCount; i++) {\n const DxbcRegisterValue currBitCnt = emitRegisterExtract(bitCnt, DxbcRegMask::select(i));\n const DxbcRegisterValue currBitOfs = emitRegisterExtract(bitOfs, DxbcRegMask::select(i));\n const DxbcRegisterValue currInsert = emitRegisterExtract(insert, DxbcRegMask::select(i));\n const DxbcRegisterValue currBase = emitRegisterExtract(base, DxbcRegMask::select(i));\n \n componentIds[i] = m_module.opBitFieldInsert(\n getVectorTypeId(currBase.type),\n currBase.id, currInsert.id,\n currBitOfs.id, currBitCnt.id);\n }\n \n DxbcRegisterValue result;\n result.type = base.type;\n result.id = componentCount > 1\n ? m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n componentCount, componentIds.data())\n : componentIds[0];\n emitRegisterStore(ins.dst[0], result);\n }\n void emitBitScan(\n const DxbcShaderInstruction& ins) {\n // firstbit(lo|hi|shi) have two operands:\n // (dst0) The destination operant\n // (src0) Source operand to scan\n DxbcRegisterValue src = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n \n DxbcRegisterValue dst;\n dst.type.ctype = ins.dst[0].dataType;\n dst.type.ccount = ins.dst[0].mask.popCount();\n \n // Result type, should be an unsigned integer\n const uint32_t typeId = getVectorTypeId(dst.type);\n \n switch (ins.op) {\n case DxbcOpcode::FirstBitLo: dst.id = m_module.opFindILsb(typeId, src.id); break;\n case DxbcOpcode::FirstBitHi: dst.id = m_module.opFindUMsb(typeId, src.id); break;\n case DxbcOpcode::FirstBitShi: dst.id = m_module.opFindSMsb(typeId, src.id); break;\n default: Logger::warn(str::format(\"DxbcCompiler: Unhandled instruction: \", ins.op)); return;\n }\n \n // The 'Hi' variants are counted from the MSB in DXBC\n // rather than the LSB, so we have to invert the number\n if (ins.op == DxbcOpcode::FirstBitHi || ins.op == DxbcOpcode::FirstBitShi) {\n uint32_t boolTypeId = m_module.defBoolType();\n\n if (dst.type.ccount > 1)\n boolTypeId = m_module.defVectorType(boolTypeId, dst.type.ccount);\n\n DxbcRegisterValue const31 = emitBuildConstVecu32(31u, 31u, 31u, 31u, ins.dst[0].mask);\n DxbcRegisterValue constff = emitBuildConstVecu32(~0u, ~0u, ~0u, ~0u, ins.dst[0].mask);\n\n dst.id = m_module.opSelect(typeId,\n m_module.opINotEqual(boolTypeId, dst.id, constff.id),\n m_module.opISub(typeId, const31.id, dst.id),\n constff.id);\n }\n \n // No modifiers are supported\n emitRegisterStore(ins.dst[0], dst);\n }\n void emitBufferQuery(\n const DxbcShaderInstruction& ins) {\n // bufinfo takes two arguments\n // (dst0) The destination register\n // (src0) The buffer register to query\n const DxbcBufferInfo bufferInfo = getBufferInfo(ins.src[0]);\n bool isSsbo = bufferInfo.isSsbo;\n \n // We'll store this as a scalar unsigned integer\n DxbcRegisterValue result = isSsbo\n ? emitQueryBufferSize(ins.src[0])\n : emitQueryTexelBufferSize(ins.src[0]);\n \n uint32_t typeId = getVectorTypeId(result.type);\n \n // Adjust returned size if this is a raw or structured\n // buffer, as emitQueryTexelBufferSize only returns the\n // number of typed elements in the buffer.\n if (bufferInfo.type == DxbcResourceType::Raw) {\n result.id = m_module.opIMul(typeId,\n result.id, m_module.constu32(4));\n } else if (bufferInfo.type == DxbcResourceType::Structured) {\n result.id = m_module.opUDiv(typeId, result.id,\n m_module.constu32(bufferInfo.stride / 4));\n }\n\n // Store the result. The scalar will be extended to a\n // vector if the write mask consists of more than one\n // component, which is the desired behaviour.\n emitRegisterStore(ins.dst[0], result);\n }\n void emitBufferLoad(\n const DxbcShaderInstruction& ins) {\n // ld_raw takes three arguments:\n // (dst0) Destination register\n // (src0) Byte offset\n // (src1) Source register\n // ld_structured takes four arguments:\n // (dst0) Destination register\n // (src0) Structure index\n // (src1) Byte offset\n // (src2) Source register\n const bool isStructured = ins.op == DxbcOpcode::LdStructured\n || ins.op == DxbcOpcode::LdStructuredS;\n \n // Source register. The exact way we access\n // the data depends on the register type.\n const DxbcRegister& dstReg = ins.dst[0];\n const DxbcRegister& srcReg = isStructured ? ins.src[2] : ins.src[1];\n\n if (dstReg.type == DxbcOperandType::UnorderedAccessView)\n emitUavBarrier(uint64_t(1u) << srcReg.idx[0].offset, 0u);\n\n // Retrieve common info about the buffer\n const DxbcBufferInfo bufferInfo = getBufferInfo(srcReg);\n\n // Shared memory is the only type of buffer that\n // is not accessed through a texel buffer view\n bool isTgsm = srcReg.type == DxbcOperandType::ThreadGroupSharedMemory;\n bool isSsbo = bufferInfo.isSsbo;\n\n // Common types and IDs used while loading the data\n uint32_t bufferId = isTgsm || isSsbo ? 0 : m_module.opLoad(bufferInfo.typeId, bufferInfo.varId);\n\n uint32_t vectorTypeId = getVectorTypeId({ DxbcScalarType::Uint32, 4 });\n uint32_t scalarTypeId = getVectorTypeId({ DxbcScalarType::Uint32, 1 });\n\n // Since all data is represented as a sequence of 32-bit\n // integers, we have to load each component individually.\n std::array ccomps = { 0, 0, 0, 0 };\n std::array scomps = { 0, 0, 0, 0 };\n uint32_t scount = 0;\n\n // The sparse feedback ID will be non-zero for sparse\n // instructions on input. We need to reset it to 0.\n SpirvMemoryOperands memoryOperands;\n SpirvImageOperands imageOperands;\n imageOperands.sparse = ins.dstCount == 2;\n\n uint32_t coherence = bufferInfo.coherence;\n\n if (isTgsm && m_moduleInfo.options.forceVolatileTgsmAccess) {\n memoryOperands.flags |= spv::MemoryAccessVolatileMask;\n coherence = spv::ScopeWorkgroup;\n }\n\n if (coherence) {\n memoryOperands.flags |= spv::MemoryAccessNonPrivatePointerMask;\n\n if (coherence != spv::ScopeInvocation) {\n memoryOperands.flags |= spv::MemoryAccessMakePointerVisibleMask;\n memoryOperands.makeVisible = m_module.constu32(coherence);\n\n imageOperands.flags = spv::ImageOperandsNonPrivateTexelMask\n | spv::ImageOperandsMakeTexelVisibleMask;\n imageOperands.makeVisible = m_module.constu32(coherence);\n }\n }\n\n uint32_t sparseFeedbackId = 0;\n\n bool useRawAccessChains = m_hasRawAccessChains && isSsbo && !imageOperands.sparse;\n\n DxbcRegisterValue index = emitRegisterLoad(ins.src[0], DxbcRegMask(true, false, false, false));\n DxbcRegisterValue offset = index;\n\n if (isStructured)\n offset = emitRegisterLoad(ins.src[1], DxbcRegMask(true, false, false, false));\n\n DxbcRegisterValue elementIndex = { };\n\n uint32_t baseAlignment = sizeof(uint32_t);\n\n if (useRawAccessChains) {\n memoryOperands.flags |= spv::MemoryAccessAlignedMask;\n\n if (isStructured && ins.src[1].type == DxbcOperandType::Imm32) {\n baseAlignment = bufferInfo.stride | ins.src[1].imm.u32_1;\n baseAlignment = baseAlignment & -baseAlignment;\n baseAlignment = std::min(baseAlignment, uint32_t(m_moduleInfo.options.minSsboAlignment));\n }\n } else {\n elementIndex = isStructured\n ? emitCalcBufferIndexStructured(index, offset, bufferInfo.stride)\n : emitCalcBufferIndexRaw(offset);\n }\n\n uint32_t readMask = 0u;\n\n for (uint32_t i = 0; i < 4; i++) {\n if (dstReg.mask[i])\n readMask |= 1u << srcReg.swizzle[i];\n }\n\n while (readMask) {\n uint32_t sindex = bit::tzcnt(readMask);\n uint32_t scount = bit::tzcnt(~(readMask >> sindex));\n uint32_t zero = 0;\n\n if (useRawAccessChains) {\n uint32_t alignment = baseAlignment;\n uint32_t offsetId = offset.id;\n\n if (sindex) {\n offsetId = m_module.opIAdd(scalarTypeId,\n offsetId, m_module.constu32(sizeof(uint32_t) * sindex));\n alignment |= sizeof(uint32_t) * sindex;\n }\n\n DxbcRegisterInfo storeInfo;\n storeInfo.type.ctype = DxbcScalarType::Uint32;\n storeInfo.type.ccount = scount;\n storeInfo.type.alength = 0;\n storeInfo.sclass = spv::StorageClassStorageBuffer;\n\n uint32_t loadTypeId = getArrayTypeId(storeInfo.type);\n uint32_t ptrTypeId = getPointerTypeId(storeInfo);\n\n uint32_t accessChain = isStructured\n ? m_module.opRawAccessChain(ptrTypeId, bufferInfo.varId,\n m_module.constu32(bufferInfo.stride), index.id, offsetId,\n spv::RawAccessChainOperandsRobustnessPerElementNVMask)\n : m_module.opRawAccessChain(ptrTypeId, bufferInfo.varId,\n m_module.constu32(0), m_module.constu32(0), offsetId,\n spv::RawAccessChainOperandsRobustnessPerComponentNVMask);\n\n memoryOperands.alignment = alignment & -alignment;\n\n uint32_t vectorId = m_module.opLoad(loadTypeId, accessChain, memoryOperands);\n\n for (uint32_t i = 0; i < scount; i++) {\n ccomps[sindex + i] = vectorId;\n\n if (scount > 1) {\n ccomps[sindex + i] = m_module.opCompositeExtract(\n scalarTypeId, vectorId, 1, &i);\n }\n }\n\n readMask &= ~(((1u << scount) - 1u) << sindex);\n } else {\n uint32_t elementIndexAdjusted = m_module.opIAdd(\n getVectorTypeId(elementIndex.type), elementIndex.id,\n m_module.consti32(sindex));\n\n if (isTgsm) {\n ccomps[sindex] = m_module.opLoad(scalarTypeId,\n m_module.opAccessChain(bufferInfo.typeId,\n bufferInfo.varId, 1, &elementIndexAdjusted),\n memoryOperands);\n } else if (isSsbo) {\n uint32_t indices[2] = { m_module.constu32(0), elementIndexAdjusted };\n ccomps[sindex] = m_module.opLoad(scalarTypeId,\n m_module.opAccessChain(bufferInfo.typeId,\n bufferInfo.varId, 2, indices),\n memoryOperands);\n } else {\n uint32_t resultTypeId = vectorTypeId;\n uint32_t resultId = 0;\n\n if (imageOperands.sparse)\n resultTypeId = getSparseResultTypeId(vectorTypeId);\n\n if (srcReg.type == DxbcOperandType::Resource) {\n resultId = m_module.opImageFetch(resultTypeId,\n bufferId, elementIndexAdjusted, imageOperands);\n } else if (srcReg.type == DxbcOperandType::UnorderedAccessView) {\n resultId = m_module.opImageRead(resultTypeId,\n bufferId, elementIndexAdjusted, imageOperands);\n } else {\n throw DxvkError(\"DxbcCompiler: Invalid operand type for strucured/raw load\");\n }\n\n // Only read sparse feedback once. This may be somewhat inaccurate\n // for reads that straddle pages, but we can't easily emulate this.\n if (imageOperands.sparse) {\n imageOperands.sparse = false;\n sparseFeedbackId = resultId;\n\n resultId = emitExtractSparseTexel(vectorTypeId, resultId);\n }\n\n ccomps[sindex] = m_module.opCompositeExtract(scalarTypeId, resultId, 1, &zero);\n }\n\n readMask &= readMask - 1;\n }\n }\n\n for (uint32_t i = 0; i < 4; i++) {\n uint32_t sindex = srcReg.swizzle[i];\n\n if (dstReg.mask[i])\n scomps[scount++] = ccomps[sindex];\n }\n\n DxbcRegisterValue result = { };\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = scount;\n result.id = scomps[0];\n\n if (scount > 1) {\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n scount, scomps.data());\n }\n\n emitRegisterStore(dstReg, result);\n\n if (sparseFeedbackId)\n emitStoreSparseFeedback(ins.dst[1], sparseFeedbackId);\n }\n void emitBufferStore(\n const DxbcShaderInstruction& ins) {\n // store_raw takes three arguments:\n // (dst0) Destination register\n // (src0) Byte offset\n // (src1) Source register\n // store_structured takes four arguments:\n // (dst0) Destination register\n // (src0) Structure index\n // (src1) Byte offset\n // (src2) Source register\n const bool isStructured = ins.op == DxbcOpcode::StoreStructured;\n \n // Source register. The exact way we access\n // the data depends on the register type.\n const DxbcRegister& dstReg = ins.dst[0];\n const DxbcRegister& srcReg = isStructured ? ins.src[2] : ins.src[1];\n\n if (dstReg.type == DxbcOperandType::UnorderedAccessView)\n emitUavBarrier(0u, uint64_t(1u) << dstReg.idx[0].offset);\n\n DxbcRegisterValue value = emitRegisterLoad(srcReg, dstReg.mask);\n value = emitRegisterBitcast(value, DxbcScalarType::Uint32);\n\n // Retrieve common info about the buffer\n const DxbcBufferInfo bufferInfo = getBufferInfo(dstReg);\n\n // Thread Group Shared Memory is not accessed through a texel buffer view\n bool isTgsm = dstReg.type == DxbcOperandType::ThreadGroupSharedMemory;\n bool isSsbo = bufferInfo.isSsbo;\n\n uint32_t bufferId = isTgsm || isSsbo ? 0 : m_module.opLoad(bufferInfo.typeId, bufferInfo.varId);\n\n uint32_t scalarTypeId = getVectorTypeId({ DxbcScalarType::Uint32, 1 });\n uint32_t vectorTypeId = getVectorTypeId({ DxbcScalarType::Uint32, 4 });\n\n // Set memory operands according to resource properties\n SpirvMemoryOperands memoryOperands;\n SpirvImageOperands imageOperands;\n\n uint32_t coherence = bufferInfo.coherence;\n\n if (isTgsm && m_moduleInfo.options.forceVolatileTgsmAccess) {\n memoryOperands.flags |= spv::MemoryAccessVolatileMask;\n coherence = spv::ScopeWorkgroup;\n }\n\n if (coherence) {\n memoryOperands.flags |= spv::MemoryAccessNonPrivatePointerMask;\n\n if (coherence != spv::ScopeInvocation) {\n memoryOperands.flags |= spv::MemoryAccessMakePointerAvailableMask;\n memoryOperands.makeAvailable = m_module.constu32(coherence);\n\n imageOperands.flags = spv::ImageOperandsNonPrivateTexelMask\n | spv::ImageOperandsMakeTexelAvailableMask;\n imageOperands.makeAvailable = m_module.constu32(coherence);\n }\n }\n\n // Compute flat element index as necessary\n bool useRawAccessChains = isSsbo && m_hasRawAccessChains;\n\n DxbcRegisterValue index = emitRegisterLoad(ins.src[0], DxbcRegMask(true, false, false, false));\n DxbcRegisterValue offset = index;\n\n if (isStructured)\n offset = emitRegisterLoad(ins.src[1], DxbcRegMask(true, false, false, false));\n\n DxbcRegisterValue elementIndex = { };\n\n uint32_t baseAlignment = sizeof(uint32_t);\n\n if (useRawAccessChains) {\n memoryOperands.flags |= spv::MemoryAccessAlignedMask;\n\n if (isStructured && ins.src[1].type == DxbcOperandType::Imm32) {\n baseAlignment = bufferInfo.stride | ins.src[1].imm.u32_1;\n baseAlignment = baseAlignment & -baseAlignment;\n baseAlignment = std::min(baseAlignment, uint32_t(m_moduleInfo.options.minSsboAlignment));\n }\n } else {\n elementIndex = isStructured\n ? emitCalcBufferIndexStructured(index, offset, bufferInfo.stride)\n : emitCalcBufferIndexRaw(offset);\n }\n\n uint32_t writeMask = dstReg.mask.raw();\n\n while (writeMask) {\n uint32_t sindex = bit::tzcnt(writeMask);\n uint32_t scount = bit::tzcnt(~(writeMask >> sindex));\n\n if (useRawAccessChains) {\n uint32_t alignment = baseAlignment;\n uint32_t offsetId = offset.id;\n\n if (sindex) {\n offsetId = m_module.opIAdd(scalarTypeId,\n offsetId, m_module.constu32(sizeof(uint32_t) * sindex));\n alignment = alignment | (sizeof(uint32_t) * sindex);\n }\n\n DxbcRegisterInfo storeInfo;\n storeInfo.type.ctype = DxbcScalarType::Uint32;\n storeInfo.type.ccount = scount;\n storeInfo.type.alength = 0;\n storeInfo.sclass = spv::StorageClassStorageBuffer;\n\n uint32_t storeTypeId = getArrayTypeId(storeInfo.type);\n uint32_t ptrTypeId = getPointerTypeId(storeInfo);\n\n uint32_t accessChain = isStructured\n ? m_module.opRawAccessChain(ptrTypeId, bufferInfo.varId,\n m_module.constu32(bufferInfo.stride), index.id, offsetId,\n spv::RawAccessChainOperandsRobustnessPerElementNVMask)\n : m_module.opRawAccessChain(ptrTypeId, bufferInfo.varId,\n m_module.constu32(0), m_module.constu32(0), offsetId,\n spv::RawAccessChainOperandsRobustnessPerComponentNVMask);\n\n uint32_t valueId = value.id;\n\n if (scount < value.type.ccount) {\n if (scount == 1) {\n valueId = m_module.opCompositeExtract(storeTypeId, value.id, 1, &sindex);\n } else {\n std::array indices = { sindex, sindex + 1u, sindex + 2u, sindex + 3u };\n valueId = m_module.opVectorShuffle(storeTypeId, value.id, value.id, scount, indices.data());\n }\n }\n\n memoryOperands.alignment = alignment & -alignment;\n m_module.opStore(accessChain, valueId, memoryOperands);\n\n writeMask &= ~(((1u << scount) - 1u) << sindex);\n } else {\n uint32_t srcComponentId = value.type.ccount > 1\n ? m_module.opCompositeExtract(scalarTypeId,\n value.id, 1, &sindex)\n : value.id;\n\n uint32_t elementIndexAdjusted = sindex != 0\n ? m_module.opIAdd(getVectorTypeId(elementIndex.type),\n elementIndex.id, m_module.consti32(sindex))\n : elementIndex.id;\n\n if (isTgsm) {\n m_module.opStore(\n m_module.opAccessChain(bufferInfo.typeId,\n bufferInfo.varId, 1, &elementIndexAdjusted),\n srcComponentId, memoryOperands);\n } else if (isSsbo) {\n uint32_t indices[2] = { m_module.constu32(0), elementIndexAdjusted };\n m_module.opStore(\n m_module.opAccessChain(bufferInfo.typeId,\n bufferInfo.varId, 2, indices),\n srcComponentId, memoryOperands);\n } else if (dstReg.type == DxbcOperandType::UnorderedAccessView) {\n const std::array srcVectorIds = {\n srcComponentId, srcComponentId,\n srcComponentId, srcComponentId,\n };\n\n m_module.opImageWrite(\n bufferId, elementIndexAdjusted,\n m_module.opCompositeConstruct(vectorTypeId,\n 4, srcVectorIds.data()),\n imageOperands);\n } else {\n throw DxvkError(\"DxbcCompiler: Invalid operand type for strucured/raw store\");\n }\n\n writeMask &= writeMask - 1u;\n }\n }\n }\n void emitConvertFloat16(\n const DxbcShaderInstruction& ins) {\n // f32tof16 takes two operands:\n // (dst0) Destination register as a uint32 vector\n // (src0) Source register as a float32 vector\n // f16tof32 takes two operands:\n // (dst0) Destination register as a float32 vector\n // (src0) Source register as a uint32 vector\n const DxbcRegisterValue src = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n \n // We handle both packing and unpacking here\n const bool isPack = ins.op == DxbcOpcode::F32toF16;\n \n // The conversion instructions do not map very well to the\n // SPIR-V pack instructions, which operate on 2D vectors.\n std::array scalarIds = {{ 0, 0, 0, 0 }};\n \n const uint32_t componentCount = src.type.ccount;\n \n // These types are used in both pack and unpack operations\n const uint32_t t_u32 = getVectorTypeId({ DxbcScalarType::Uint32, 1 });\n const uint32_t t_f32 = getVectorTypeId({ DxbcScalarType::Float32, 1 });\n const uint32_t t_f32v2 = getVectorTypeId({ DxbcScalarType::Float32, 2 });\n \n // Constant zero-bit pattern, used for packing\n const uint32_t zerof32 = isPack ? m_module.constf32(0.0f) : 0;\n \n for (uint32_t i = 0; i < componentCount; i++) {\n const DxbcRegisterValue componentValue\n = emitRegisterExtract(src, DxbcRegMask::select(i));\n \n if (isPack) { // f32tof16\n const std::array packIds =\n {{ componentValue.id, zerof32 }};\n \n scalarIds[i] = m_module.opPackHalf2x16(t_u32,\n m_module.opCompositeConstruct(t_f32v2, packIds.size(), packIds.data()));\n } else { // f16tof32\n const uint32_t zeroIndex = 0;\n \n scalarIds[i] = m_module.opCompositeExtract(t_f32,\n m_module.opUnpackHalf2x16(t_f32v2, componentValue.id),\n 1, &zeroIndex);\n }\n }\n\n DxbcRegisterValue result;\n result.type.ctype = ins.dst[0].dataType;\n result.type.ccount = componentCount;\n\n uint32_t typeId = getVectorTypeId(result.type);\n result.id = componentCount > 1\n ? m_module.opCompositeConstruct(typeId,\n componentCount, scalarIds.data())\n : scalarIds[0];\n\n if (isPack) {\n // Some drivers return infinity if the input value is above a certain\n // threshold, but D3D wants us to return infinity only if the input is\n // actually infinite. Fix this up to return the maximum representable\n // 16-bit floating point number instead, but preserve input infinity.\n uint32_t t_bvec = getVectorTypeId({ DxbcScalarType::Bool, componentCount });\n uint32_t f16Infinity = m_module.constuReplicant(0x7C00, componentCount);\n uint32_t f16Unsigned = m_module.constuReplicant(0x7FFF, componentCount);\n\n uint32_t isInputInf = m_module.opIsInf(t_bvec, src.id);\n uint32_t isValueInf = m_module.opIEqual(t_bvec, f16Infinity,\n m_module.opBitwiseAnd(typeId, result.id, f16Unsigned));\n\n result.id = m_module.opSelect(getVectorTypeId(result.type),\n m_module.opLogicalAnd(t_bvec, isValueInf, m_module.opLogicalNot(t_bvec, isInputInf)),\n m_module.opISub(typeId, result.id, m_module.constuReplicant(1, componentCount)),\n result.id);\n }\n\n // Store result in the destination register\n emitRegisterStore(ins.dst[0], result);\n }\n void emitConvertFloat64(\n const DxbcShaderInstruction& ins) {\n // ftod and dtof take the following operands:\n // (dst0) Destination operand\n // (src0) Number to convert\n uint32_t dstBits = ins.dst[0].mask.popCount();\n\n DxbcRegMask srcMask = isDoubleType(ins.dst[0].dataType)\n ? DxbcRegMask(dstBits >= 2, dstBits >= 4, false, false)\n : DxbcRegMask(dstBits >= 1, dstBits >= 1, dstBits >= 2, dstBits >= 2);\n\n // Perform actual conversion, destination modifiers are not applied\n DxbcRegisterValue val = emitRegisterLoad(ins.src[0], srcMask);\n\n DxbcRegisterValue result;\n result.type.ctype = ins.dst[0].dataType;\n result.type.ccount = val.type.ccount;\n\n switch (ins.op) {\n case DxbcOpcode::DtoF:\n case DxbcOpcode::FtoD:\n result.id = m_module.opFConvert(\n getVectorTypeId(result.type), val.id);\n break;\n\n case DxbcOpcode::DtoI:\n result.id = m_module.opConvertFtoS(\n getVectorTypeId(result.type), val.id);\n break;\n\n case DxbcOpcode::DtoU:\n result.id = m_module.opConvertFtoU(\n getVectorTypeId(result.type), val.id);\n break;\n\n case DxbcOpcode::ItoD:\n result.id = m_module.opConvertStoF(\n getVectorTypeId(result.type), val.id);\n break;\n \n case DxbcOpcode::UtoD:\n result.id = m_module.opConvertUtoF(\n getVectorTypeId(result.type), val.id);\n break;\n \n default:\n Logger::warn(str::format(\"DxbcCompiler: Unhandled instruction: \", ins.op));\n return;\n }\n \n emitRegisterStore(ins.dst[0], result);\n }\n void emitHullShaderPhase(\n const DxbcShaderInstruction& ins) {\n switch (ins.op) {\n case DxbcOpcode::HsDecls: {\n if (m_hs.currPhaseType != DxbcCompilerHsPhase::None)\n Logger::err(\"DXBC: HsDecls not the first phase in hull shader\");\n \n m_hs.currPhaseType = DxbcCompilerHsPhase::Decl;\n } break;\n \n case DxbcOpcode::HsControlPointPhase: {\n m_hs.cpPhase = this->emitNewHullShaderControlPointPhase();\n \n m_hs.currPhaseType = DxbcCompilerHsPhase::ControlPoint;\n m_hs.currPhaseId = 0;\n \n m_module.setDebugName(m_hs.cpPhase.functionId, \"hs_control_point\");\n } break;\n \n case DxbcOpcode::HsForkPhase: {\n auto phase = this->emitNewHullShaderForkJoinPhase();\n m_hs.forkPhases.push_back(phase);\n \n m_hs.currPhaseType = DxbcCompilerHsPhase::Fork;\n m_hs.currPhaseId = m_hs.forkPhases.size() - 1;\n \n m_module.setDebugName(phase.functionId,\n str::format(\"hs_fork_\", m_hs.currPhaseId).c_str());\n } break;\n \n case DxbcOpcode::HsJoinPhase: {\n auto phase = this->emitNewHullShaderForkJoinPhase();\n m_hs.joinPhases.push_back(phase);\n \n m_hs.currPhaseType = DxbcCompilerHsPhase::Join;\n m_hs.currPhaseId = m_hs.joinPhases.size() - 1;\n \n m_module.setDebugName(phase.functionId,\n str::format(\"hs_join_\", m_hs.currPhaseId).c_str());\n } break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n }\n }\n void emitHullShaderInstCnt(\n const DxbcShaderInstruction& ins) {\n this->getCurrentHsForkJoinPhase()->instanceCount = ins.imm[0].u32;\n }\n void emitInterpolate(\n const DxbcShaderInstruction& ins) {\n m_module.enableCapability(spv::CapabilityInterpolationFunction);\n\n // The SPIR-V instructions operate on input variable pointers,\n // which are all declared as four-component float vectors.\n uint32_t registerId = ins.src[0].idx[0].offset;\n \n DxbcRegisterValue result;\n result.type = getInputRegType(registerId);\n \n switch (ins.op) {\n case DxbcOpcode::EvalCentroid: {\n result.id = m_module.opInterpolateAtCentroid(\n getVectorTypeId(result.type),\n m_vRegs.at(registerId).id);\n } break;\n \n case DxbcOpcode::EvalSampleIndex: {\n const DxbcRegisterValue sampleIndex = emitRegisterLoad(\n ins.src[1], DxbcRegMask(true, false, false, false));\n \n result.id = m_module.opInterpolateAtSample(\n getVectorTypeId(result.type),\n m_vRegs.at(registerId).id,\n sampleIndex.id);\n } break;\n \n case DxbcOpcode::EvalSnapped: {\n // The offset is encoded as a 4-bit fixed point value\n DxbcRegisterValue offset = emitRegisterLoad(\n ins.src[1], DxbcRegMask(true, true, false, false));\n offset.id = m_module.opBitFieldSExtract(\n getVectorTypeId(offset.type), offset.id,\n m_module.consti32(0), m_module.consti32(4));\n\n offset.type.ctype = DxbcScalarType::Float32;\n offset.id = m_module.opConvertStoF(\n getVectorTypeId(offset.type), offset.id);\n\n offset.id = m_module.opFMul(\n getVectorTypeId(offset.type), offset.id,\n m_module.constvec2f32(1.0f / 16.0f, 1.0f / 16.0f));\n\n result.id = m_module.opInterpolateAtOffset(\n getVectorTypeId(result.type),\n m_vRegs.at(registerId).id,\n offset.id);\n } break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n result = emitRegisterSwizzle(result,\n ins.src[0].swizzle, ins.dst[0].mask);\n emitRegisterStore(ins.dst[0], result);\n }\n void emitSparseCheckAccess(\n const DxbcShaderInstruction& ins) {\n // check_access_mapped has two operands:\n // (dst0) The destination register\n // (src0) The residency code\n m_module.enableCapability(spv::CapabilitySparseResidency);\n\n DxbcRegisterValue srcValue = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n\n uint32_t boolId = m_module.opImageSparseTexelsResident(\n m_module.defBoolType(), srcValue.id);\n\n DxbcRegisterValue dstValue;\n dstValue.type = { DxbcScalarType::Uint32, 1 };\n dstValue.id = m_module.opSelect(getScalarTypeId(DxbcScalarType::Uint32),\n boolId, m_module.constu32(~0u), m_module.constu32(0));\n\n emitRegisterStore(ins.dst[0], dstValue);\n }\n void emitTextureQuery(\n const DxbcShaderInstruction& ins) {\n // resinfo has three operands:\n // (dst0) The destination register\n // (src0) Resource LOD to query\n // (src1) Resource to query\n const DxbcBufferInfo resourceInfo = getBufferInfo(ins.src[1]);\n const DxbcResinfoType resinfoType = ins.controls.resinfoType();\n \n // Read the exact LOD for the image query\n const DxbcRegisterValue mipLod = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n const DxbcScalarType returnType = resinfoType == DxbcResinfoType::Uint\n ? DxbcScalarType::Uint32 : DxbcScalarType::Float32;\n \n // Query the size of the selected mip level, as well as the\n // total number of mip levels. We will have to combine the\n // result into a four-component vector later.\n DxbcRegisterValue imageSize = emitQueryTextureSize(ins.src[1], mipLod);\n DxbcRegisterValue imageLevels = emitQueryTextureLods(ins.src[1]);\n\n // If the mip level is out of bounds, D3D requires us to return\n // zero before applying modifiers, whereas SPIR-V is undefined,\n // so we need to fix it up manually here.\n imageSize.id = m_module.opSelect(getVectorTypeId(imageSize.type),\n m_module.opULessThan(m_module.defBoolType(), mipLod.id, imageLevels.id),\n imageSize.id, emitBuildZeroVector(imageSize.type).id);\n\n // Convert intermediates to the requested type\n if (returnType == DxbcScalarType::Float32) {\n imageSize.type.ctype = DxbcScalarType::Float32;\n imageSize.id = m_module.opConvertUtoF(\n getVectorTypeId(imageSize.type),\n imageSize.id);\n \n imageLevels.type.ctype = DxbcScalarType::Float32;\n imageLevels.id = m_module.opConvertUtoF(\n getVectorTypeId(imageLevels.type),\n imageLevels.id);\n }\n \n // If the selected return type is rcpFloat, we need\n // to compute the reciprocal of the image dimensions,\n // but not the array size, so we need to separate it.\n const uint32_t imageCoordDim = imageSize.type.ccount;\n \n DxbcRegisterValue imageLayers;\n imageLayers.type = imageSize.type;\n imageLayers.id = 0;\n \n if (resinfoType == DxbcResinfoType::RcpFloat && resourceInfo.image.array) {\n imageLayers = emitRegisterExtract(imageSize, DxbcRegMask::select(imageCoordDim - 1));\n imageSize = emitRegisterExtract(imageSize, DxbcRegMask::firstN(imageCoordDim - 1));\n }\n \n if (resinfoType == DxbcResinfoType::RcpFloat) {\n imageSize.id = m_module.opFDiv(\n getVectorTypeId(imageSize.type),\n emitBuildConstVecf32(1.0f, 1.0f, 1.0f, 1.0f,\n DxbcRegMask::firstN(imageSize.type.ccount)).id,\n imageSize.id);\n }\n \n // Concatenate result vectors and scalars to form a\n // 4D vector. Unused components will be set to zero.\n std::array vectorIds = { imageSize.id, 0, 0, 0 };\n uint32_t numVectorIds = 1;\n \n if (imageLayers.id != 0)\n vectorIds[numVectorIds++] = imageLayers.id;\n \n if (imageCoordDim < 3) {\n const uint32_t zero = returnType == DxbcScalarType::Uint32\n ? m_module.constu32(0)\n : m_module.constf32(0.0f);\n \n for (uint32_t i = imageCoordDim; i < 3; i++)\n vectorIds[numVectorIds++] = zero;\n }\n \n vectorIds[numVectorIds++] = imageLevels.id;\n \n // Create the actual result vector\n DxbcRegisterValue result;\n result.type.ctype = returnType;\n result.type.ccount = 4;\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n numVectorIds, vectorIds.data());\n \n // Swizzle components using the resource swizzle\n // and the destination operand's write mask\n result = emitRegisterSwizzle(result,\n ins.src[1].swizzle, ins.dst[0].mask);\n emitRegisterStore(ins.dst[0], result);\n }\n void emitTextureQueryLod(\n const DxbcShaderInstruction& ins) {\n // All sample instructions have at least these operands:\n // (dst0) The destination register\n // (src0) Texture coordinates\n // (src1) The texture itself\n // (src2) The sampler object\n const DxbcRegister& texCoordReg = ins.src[0];\n const DxbcRegister& textureReg = ins.src[1];\n const DxbcRegister& samplerReg = ins.src[2];\n \n // Texture and sampler register IDs\n const auto& texture = m_textures.at(textureReg.idx[0].offset);\n const auto& sampler = m_samplers.at(samplerReg.idx[0].offset);\n \n // Load texture coordinates\n const DxbcRegisterValue coord = emitRegisterLoad(texCoordReg,\n DxbcRegMask::firstN(getTexLayerDim(texture.imageInfo)));\n \n // Query the LOD. The result is a two-dimensional float32\n // vector containing the mip level and virtual LOD numbers.\n const uint32_t sampledImageId = emitLoadSampledImage(texture, sampler, false);\n const uint32_t queriedLodId = m_module.opImageQueryLod(\n getVectorTypeId({ DxbcScalarType::Float32, 2 }),\n sampledImageId, coord.id);\n \n // Build the result array vector by filling up\n // the remaining two components with zeroes.\n const uint32_t zero = m_module.constf32(0.0f);\n const std::array resultIds\n = {{ queriedLodId, zero, zero }};\n \n DxbcRegisterValue result;\n result.type = DxbcVectorType { DxbcScalarType::Float32, 4 };\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n resultIds.size(), resultIds.data());\n \n result = emitRegisterSwizzle(result, ins.src[1].swizzle, ins.dst[0].mask);\n emitRegisterStore(ins.dst[0], result);\n }\n void emitTextureQueryMs(\n const DxbcShaderInstruction& ins) {\n // sampleinfo has two operands:\n // (dst0) The destination register\n // (src0) Resource to query\n DxbcRegisterValue sampleCount = emitQueryTextureSamples(ins.src[0]);\n \n if (ins.controls.returnType() != DxbcInstructionReturnType::Uint) {\n sampleCount.type = { DxbcScalarType::Float32, 1 };\n sampleCount.id = m_module.opConvertUtoF(\n getVectorTypeId(sampleCount.type),\n sampleCount.id);\n }\n \n emitRegisterStore(ins.dst[0], sampleCount);\n }\n void emitTextureQueryMsPos(\n const DxbcShaderInstruction& ins) {\n // samplepos has three operands:\n // (dst0) The destination register\n // (src0) Resource to query \n // (src1) Sample index\n if (m_samplePositions == 0)\n m_samplePositions = emitSamplePosArray();\n \n // The lookup index is qual to the sample count plus the\n // sample index, or 0 if the resource cannot be queried.\n DxbcRegisterValue sampleCount = emitQueryTextureSamples(ins.src[0]);\n DxbcRegisterValue sampleIndex = emitRegisterLoad(\n ins.src[1], DxbcRegMask(true, false, false, false));\n \n uint32_t lookupIndex = m_module.opIAdd(\n getVectorTypeId(sampleCount.type),\n sampleCount.id, sampleIndex.id);\n \n // Validate the parameters\n uint32_t sampleCountValid = m_module.opULessThanEqual(\n m_module.defBoolType(),\n sampleCount.id,\n m_module.constu32(16));\n \n uint32_t sampleIndexValid = m_module.opULessThan(\n m_module.defBoolType(),\n sampleIndex.id,\n sampleCount.id);\n \n // If the lookup cannot be performed, set the lookup\n // index to zero, which will return a zero vector.\n lookupIndex = m_module.opSelect(\n getVectorTypeId(sampleCount.type),\n m_module.opLogicalAnd(\n m_module.defBoolType(),\n sampleCountValid,\n sampleIndexValid),\n lookupIndex,\n m_module.constu32(0));\n \n // Load sample pos vector and write the masked\n // components to the destination register.\n DxbcRegisterPointer samplePos;\n samplePos.type.ctype = DxbcScalarType::Float32;\n samplePos.type.ccount = 2;\n samplePos.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(samplePos.type),\n spv::StorageClassPrivate),\n m_samplePositions, 1, &lookupIndex);\n \n // Expand to vec4 by appending zeroes\n DxbcRegisterValue result = emitValueLoad(samplePos);\n\n DxbcRegisterValue zero;\n zero.type.ctype = DxbcScalarType::Float32;\n zero.type.ccount = 2;\n zero.id = m_module.constvec2f32(0.0f, 0.0f);\n\n result = emitRegisterConcat(result, zero);\n \n emitRegisterStore(ins.dst[0],\n emitRegisterSwizzle(result,\n ins.src[0].swizzle,\n ins.dst[0].mask));\n }\n void emitTextureFetch(\n const DxbcShaderInstruction& ins) {\n // ld has three operands:\n // (dst0) The destination register\n // (src0) Source address\n // (src1) Source texture\n // ld2dms has four operands:\n // (dst0) The destination register\n // (src0) Source address\n // (src1) Source texture\n // (src2) Sample number\n const auto& texture = m_textures.at(ins.src[1].idx[0].offset);\n const uint32_t imageLayerDim = getTexLayerDim(texture.imageInfo);\n \n bool isMultisampled = ins.op == DxbcOpcode::LdMs\n || ins.op == DxbcOpcode::LdMsS;\n\n // Load the texture coordinates. The last component\n // contains the LOD if the resource is an image.\n const DxbcRegisterValue address = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, true, true, true));\n \n // Additional image operands. This will store\n // the LOD and the address offset if present.\n SpirvImageOperands imageOperands;\n imageOperands.sparse = ins.dstCount == 2;\n \n if (ins.sampleControls.u != 0 || ins.sampleControls.v != 0 || ins.sampleControls.w != 0) {\n const std::array offsetIds = {\n imageLayerDim >= 1 ? m_module.consti32(ins.sampleControls.u) : 0,\n imageLayerDim >= 2 ? m_module.consti32(ins.sampleControls.v) : 0,\n imageLayerDim >= 3 ? m_module.consti32(ins.sampleControls.w) : 0,\n };\n \n imageOperands.flags |= spv::ImageOperandsConstOffsetMask;\n imageOperands.sConstOffset = offsetIds[0];\n\n if (imageLayerDim > 1) {\n imageOperands.sConstOffset = m_module.constComposite(\n getVectorTypeId({ DxbcScalarType::Sint32, imageLayerDim }),\n imageLayerDim, offsetIds.data());\n }\n }\n \n // The LOD is not present when reading from\n // a buffer or from a multisample texture.\n if (texture.imageInfo.dim != spv::DimBuffer && texture.imageInfo.ms == 0) {\n DxbcRegisterValue imageLod;\n \n if (!isMultisampled) {\n imageLod = emitRegisterExtract(\n address, DxbcRegMask(false, false, false, true));\n } else {\n // If we force-disabled MSAA, fetch from LOD 0\n imageLod.type = { DxbcScalarType::Uint32, 1 };\n imageLod.id = m_module.constu32(0);\n }\n \n imageOperands.flags |= spv::ImageOperandsLodMask;\n imageOperands.sLod = imageLod.id;\n }\n \n // The ld2dms instruction has a sample index, but we\n // are only allowed to set it for multisample views\n if (isMultisampled && texture.imageInfo.ms == 1) {\n DxbcRegisterValue sampleId = emitRegisterLoad(\n ins.src[2], DxbcRegMask(true, false, false, false));\n \n imageOperands.flags |= spv::ImageOperandsSampleMask;\n imageOperands.sSampleId = sampleId.id;\n }\n \n // Extract coordinates from address\n const DxbcRegisterValue coord = emitCalcTexCoord(address, texture.imageInfo);\n \n // Reading a typed image or buffer view\n // always returns a four-component vector.\n const uint32_t imageId = m_module.opLoad(texture.imageTypeId, texture.varId);\n\n DxbcVectorType texelType;\n texelType.ctype = texture.sampledType;\n texelType.ccount = 4;\n\n uint32_t texelTypeId = getVectorTypeId(texelType);\n uint32_t resultTypeId = texelTypeId;\n uint32_t resultId = 0;\n\n if (imageOperands.sparse)\n resultTypeId = getSparseResultTypeId(texelTypeId);\n\n resultId = m_module.opImageFetch(resultTypeId,\n imageId, coord.id, imageOperands);\n\n DxbcRegisterValue result;\n result.type = texelType;\n result.id = imageOperands.sparse\n ? emitExtractSparseTexel(texelTypeId, resultId)\n : resultId;\n \n // Swizzle components using the texture swizzle\n // and the destination operand's write mask\n result = emitRegisterSwizzle(result,\n ins.src[1].swizzle, ins.dst[0].mask);\n \n emitRegisterStore(ins.dst[0], result);\n\n if (imageOperands.sparse)\n emitStoreSparseFeedback(ins.dst[1], resultId);\n }\n void emitTextureGather(\n const DxbcShaderInstruction& ins) {\n // Gather4 takes the following operands:\n // (dst0) The destination register\n // (dst1) The residency code for sparse ops\n // (src0) Texture coordinates\n // (src1) The texture itself\n // (src2) The sampler, with a component selector\n // Gather4C takes the following additional operand:\n // (src3) The depth reference value\n // The Gather4Po variants take an additional operand\n // which defines an extended constant offset.\n // TODO reduce code duplication by moving some common code\n // in both sample() and gather() into separate methods\n const bool isExtendedGather = ins.op == DxbcOpcode::Gather4Po\n || ins.op == DxbcOpcode::Gather4PoC\n || ins.op == DxbcOpcode::Gather4PoS\n || ins.op == DxbcOpcode::Gather4PoCS;\n \n const DxbcRegister& texCoordReg = ins.src[0];\n const DxbcRegister& textureReg = ins.src[1 + isExtendedGather];\n const DxbcRegister& samplerReg = ins.src[2 + isExtendedGather];\n \n // Texture and sampler register IDs\n const auto& texture = m_textures.at(textureReg.idx[0].offset);\n const auto& sampler = m_samplers.at(samplerReg.idx[0].offset);\n \n // Image type, which stores the image dimensions etc.\n const uint32_t imageLayerDim = getTexLayerDim(texture.imageInfo);\n \n // Load the texture coordinates. SPIR-V allows these\n // to be float4 even if not all components are used.\n DxbcRegisterValue coord = emitLoadTexCoord(texCoordReg, texture.imageInfo);\n \n // Load reference value for depth-compare operations\n const bool isDepthCompare = ins.op == DxbcOpcode::Gather4C\n || ins.op == DxbcOpcode::Gather4PoC\n || ins.op == DxbcOpcode::Gather4CS\n || ins.op == DxbcOpcode::Gather4PoCS;\n\n const DxbcRegisterValue referenceValue = isDepthCompare\n ? emitRegisterLoad(ins.src[3 + isExtendedGather],\n DxbcRegMask(true, false, false, false))\n : DxbcRegisterValue();\n \n // Accumulate additional image operands.\n SpirvImageOperands imageOperands;\n imageOperands.sparse = ins.dstCount == 2;\n\n if (isExtendedGather) {\n m_module.enableCapability(spv::CapabilityImageGatherExtended);\n \n DxbcRegisterValue gatherOffset = emitRegisterLoad(\n ins.src[1], DxbcRegMask::firstN(imageLayerDim));\n \n imageOperands.flags |= spv::ImageOperandsOffsetMask;\n imageOperands.gOffset = gatherOffset.id;\n } else if (ins.sampleControls.u != 0 || ins.sampleControls.v != 0 || ins.sampleControls.w != 0) {\n const std::array offsetIds = {\n imageLayerDim >= 1 ? m_module.consti32(ins.sampleControls.u) : 0,\n imageLayerDim >= 2 ? m_module.consti32(ins.sampleControls.v) : 0,\n imageLayerDim >= 3 ? m_module.consti32(ins.sampleControls.w) : 0,\n };\n \n imageOperands.flags |= spv::ImageOperandsConstOffsetMask;\n imageOperands.sConstOffset = offsetIds[0];\n\n if (imageLayerDim > 1) {\n imageOperands.sConstOffset = m_module.constComposite(\n getVectorTypeId({ DxbcScalarType::Sint32, imageLayerDim }),\n imageLayerDim, offsetIds.data());\n }\n }\n\n // Gathering texels always returns a four-component\n // vector, even for the depth-compare variants.\n uint32_t sampledImageId = emitLoadSampledImage(texture, sampler, isDepthCompare);\n\n DxbcVectorType texelType;\n texelType.ctype = texture.sampledType;\n texelType.ccount = 4;\n\n uint32_t texelTypeId = getVectorTypeId(texelType);\n uint32_t resultTypeId = texelTypeId;\n uint32_t resultId = 0;\n\n if (imageOperands.sparse)\n resultTypeId = getSparseResultTypeId(texelTypeId);\n\n if (sampledImageId) {\n switch (ins.op) {\n // Simple image gather operation\n case DxbcOpcode::Gather4:\n case DxbcOpcode::Gather4S:\n case DxbcOpcode::Gather4Po:\n case DxbcOpcode::Gather4PoS: {\n resultId = m_module.opImageGather(\n resultTypeId, sampledImageId, coord.id,\n m_module.consti32(samplerReg.swizzle[0]),\n imageOperands);\n } break;\n\n // Depth-compare operation\n case DxbcOpcode::Gather4C:\n case DxbcOpcode::Gather4CS:\n case DxbcOpcode::Gather4PoC:\n case DxbcOpcode::Gather4PoCS: {\n resultId = m_module.opImageDrefGather(\n resultTypeId, sampledImageId, coord.id,\n referenceValue.id, imageOperands);\n } break;\n\n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n } else {\n Logger::warn(str::format(\"DxbcCompiler: \", ins.op, \": Unsupported image type\"));\n resultId = m_module.constNull(resultTypeId);\n }\n\n // If necessary, deal with the sparse result\n DxbcRegisterValue result;\n result.type = texelType;\n result.id = imageOperands.sparse\n ? emitExtractSparseTexel(texelTypeId, resultId)\n : resultId;\n\n // Swizzle components using the texture swizzle\n // and the destination operand's write mask\n result = emitRegisterSwizzle(result,\n textureReg.swizzle, ins.dst[0].mask);\n \n emitRegisterStore(ins.dst[0], result);\n\n if (imageOperands.sparse)\n emitStoreSparseFeedback(ins.dst[1], resultId);\n }\n void emitTextureSample(\n const DxbcShaderInstruction& ins) {\n // All sample instructions have at least these operands:\n // (dst0) The destination register\n // (src0) Texture coordinates\n // (src1) The texture itself\n // (src2) The sampler object\n const DxbcRegister& texCoordReg = ins.src[0];\n const DxbcRegister& textureReg = ins.src[1];\n const DxbcRegister& samplerReg = ins.src[2];\n \n // Texture and sampler register IDs\n const auto& texture = m_textures.at(textureReg.idx[0].offset);\n const auto& sampler = m_samplers.at(samplerReg.idx[0].offset);\n const uint32_t imageLayerDim = getTexLayerDim(texture.imageInfo);\n \n // Load the texture coordinates. SPIR-V allows these\n // to be float4 even if not all components are used.\n DxbcRegisterValue coord = emitLoadTexCoord(texCoordReg, texture.imageInfo);\n \n // Load reference value for depth-compare operations\n const bool isDepthCompare = ins.op == DxbcOpcode::SampleC\n || ins.op == DxbcOpcode::SampleClz\n || ins.op == DxbcOpcode::SampleCClampS\n || ins.op == DxbcOpcode::SampleClzS;\n \n const DxbcRegisterValue referenceValue = isDepthCompare\n ? emitRegisterLoad(ins.src[3], DxbcRegMask(true, false, false, false))\n : DxbcRegisterValue();\n \n // Load explicit gradients for sample operations that require them\n const bool hasExplicitGradients = ins.op == DxbcOpcode::SampleD\n || ins.op == DxbcOpcode::SampleDClampS;\n \n const DxbcRegisterValue explicitGradientX = hasExplicitGradients\n ? emitRegisterLoad(ins.src[3], DxbcRegMask::firstN(imageLayerDim))\n : DxbcRegisterValue();\n \n const DxbcRegisterValue explicitGradientY = hasExplicitGradients\n ? emitRegisterLoad(ins.src[4], DxbcRegMask::firstN(imageLayerDim))\n : DxbcRegisterValue();\n \n // LOD for certain sample operations\n const bool hasLod = ins.op == DxbcOpcode::SampleL\n || ins.op == DxbcOpcode::SampleLS\n || ins.op == DxbcOpcode::SampleB\n || ins.op == DxbcOpcode::SampleBClampS;\n \n const DxbcRegisterValue lod = hasLod\n ? emitRegisterLoad(ins.src[3], DxbcRegMask(true, false, false, false))\n : DxbcRegisterValue();\n\n // Min LOD for certain sparse operations\n const bool hasMinLod = ins.op == DxbcOpcode::SampleClampS\n || ins.op == DxbcOpcode::SampleBClampS\n || ins.op == DxbcOpcode::SampleDClampS\n || ins.op == DxbcOpcode::SampleCClampS;\n\n const DxbcRegisterValue minLod = hasMinLod && ins.src[ins.srcCount - 1].type != DxbcOperandType::Null\n ? emitRegisterLoad(ins.src[ins.srcCount - 1], DxbcRegMask(true, false, false, false))\n : DxbcRegisterValue();\n\n // Accumulate additional image operands. These are\n // not part of the actual operand token in SPIR-V.\n SpirvImageOperands imageOperands;\n imageOperands.sparse = ins.dstCount == 2;\n\n if (ins.sampleControls.u != 0 || ins.sampleControls.v != 0 || ins.sampleControls.w != 0) {\n const std::array offsetIds = {\n imageLayerDim >= 1 ? m_module.consti32(ins.sampleControls.u) : 0,\n imageLayerDim >= 2 ? m_module.consti32(ins.sampleControls.v) : 0,\n imageLayerDim >= 3 ? m_module.consti32(ins.sampleControls.w) : 0,\n };\n \n imageOperands.flags |= spv::ImageOperandsConstOffsetMask;\n imageOperands.sConstOffset = offsetIds[0];\n\n if (imageLayerDim > 1) {\n imageOperands.sConstOffset = m_module.constComposite(\n getVectorTypeId({ DxbcScalarType::Sint32, imageLayerDim }),\n imageLayerDim, offsetIds.data());\n }\n }\n\n if (hasMinLod) {\n m_module.enableCapability(spv::CapabilityMinLod);\n\n imageOperands.flags |= spv::ImageOperandsMinLodMask;\n imageOperands.sMinLod = minLod.id;\n }\n\n // Combine the texture and the sampler into a sampled image\n uint32_t sampledImageId = emitLoadSampledImage(texture, sampler, isDepthCompare);\n \n // Sampling an image always returns a four-component\n // vector, whereas depth-compare ops return a scalar.\n DxbcVectorType texelType;\n texelType.ctype = texture.sampledType;\n texelType.ccount = isDepthCompare ? 1 : 4;\n\n uint32_t texelTypeId = getVectorTypeId(texelType);\n uint32_t resultTypeId = texelTypeId;\n uint32_t resultId = 0;\n\n if (imageOperands.sparse)\n resultTypeId = getSparseResultTypeId(texelTypeId);\n\n if (sampledImageId) {\n switch (ins.op) {\n // Simple image sample operation\n case DxbcOpcode::Sample:\n case DxbcOpcode::SampleClampS: {\n resultId = m_module.opImageSampleImplicitLod(\n resultTypeId, sampledImageId, coord.id,\n imageOperands);\n } break;\n\n // Depth-compare operation\n case DxbcOpcode::SampleC:\n case DxbcOpcode::SampleCClampS: {\n resultId = m_module.opImageSampleDrefImplicitLod(\n resultTypeId, sampledImageId, coord.id,\n referenceValue.id, imageOperands);\n } break;\n\n // Depth-compare operation on mip level zero\n case DxbcOpcode::SampleClz:\n case DxbcOpcode::SampleClzS: {\n imageOperands.flags |= spv::ImageOperandsLodMask;\n imageOperands.sLod = m_module.constf32(0.0f);\n\n resultId = m_module.opImageSampleDrefExplicitLod(\n resultTypeId, sampledImageId, coord.id,\n referenceValue.id, imageOperands);\n } break;\n\n // Sample operation with explicit gradients\n case DxbcOpcode::SampleD:\n case DxbcOpcode::SampleDClampS: {\n imageOperands.flags |= spv::ImageOperandsGradMask;\n imageOperands.sGradX = explicitGradientX.id;\n imageOperands.sGradY = explicitGradientY.id;\n\n resultId = m_module.opImageSampleExplicitLod(\n resultTypeId, sampledImageId, coord.id,\n imageOperands);\n } break;\n\n // Sample operation with explicit LOD\n case DxbcOpcode::SampleL:\n case DxbcOpcode::SampleLS: {\n imageOperands.flags |= spv::ImageOperandsLodMask;\n imageOperands.sLod = lod.id;\n\n resultId = m_module.opImageSampleExplicitLod(\n resultTypeId, sampledImageId, coord.id,\n imageOperands);\n } break;\n\n // Sample operation with LOD bias\n case DxbcOpcode::SampleB:\n case DxbcOpcode::SampleBClampS: {\n imageOperands.flags |= spv::ImageOperandsBiasMask;\n imageOperands.sLodBias = lod.id;\n\n resultId = m_module.opImageSampleImplicitLod(\n resultTypeId, sampledImageId, coord.id,\n imageOperands);\n } break;\n\n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n } else {\n Logger::warn(str::format(\"DxbcCompiler: \", ins.op, \": Unsupported image type\"));\n resultId = m_module.constNull(resultTypeId);\n }\n \n DxbcRegisterValue result;\n result.type = texelType;\n result.id = imageOperands.sparse\n ? emitExtractSparseTexel(texelTypeId, resultId)\n : resultId;\n\n // Swizzle components using the texture swizzle\n // and the destination operand's write mask\n if (result.type.ccount != 1) {\n result = emitRegisterSwizzle(result,\n textureReg.swizzle, ins.dst[0].mask);\n }\n \n emitRegisterStore(ins.dst[0], result);\n\n if (imageOperands.sparse)\n emitStoreSparseFeedback(ins.dst[1], resultId);\n }\n void emitTypedUavLoad(\n const DxbcShaderInstruction& ins) {\n // load_uav_typed has three operands:\n // (dst0) The destination register\n // (src0) The texture or buffer coordinates\n // (src1) The UAV to load from\n const uint32_t registerId = ins.src[1].idx[0].offset;\n const DxbcUav uavInfo = m_uavs.at(registerId);\n\n emitUavBarrier(uint64_t(1u) << registerId, 0u);\n\n // Load texture coordinates\n DxbcRegisterValue texCoord = emitLoadTexCoord(\n ins.src[0], uavInfo.imageInfo);\n\n SpirvImageOperands imageOperands;\n imageOperands.sparse = ins.dstCount == 2;\n\n if (uavInfo.coherence) {\n imageOperands.flags |= spv::ImageOperandsNonPrivateTexelMask\n | spv::ImageOperandsMakeTexelVisibleMask;\n imageOperands.makeVisible = m_module.constu32(uavInfo.coherence);\n }\n\n DxbcVectorType texelType;\n texelType.ctype = uavInfo.sampledType;\n texelType.ccount = 4;\n\n uint32_t texelTypeId = getVectorTypeId(texelType);\n uint32_t resultTypeId = texelTypeId;\n uint32_t resultId = 0;\n\n if (imageOperands.sparse)\n resultTypeId = getSparseResultTypeId(texelTypeId);\n\n // Load source value from the UAV\n resultId = m_module.opImageRead(resultTypeId,\n m_module.opLoad(uavInfo.imageTypeId, uavInfo.varId),\n texCoord.id, imageOperands);\n \n // Apply component swizzle and mask\n DxbcRegisterValue uavValue;\n uavValue.type = texelType;\n uavValue.id = imageOperands.sparse\n ? emitExtractSparseTexel(texelTypeId, resultId)\n : resultId;\n\n uavValue = emitRegisterSwizzle(uavValue,\n ins.src[1].swizzle, ins.dst[0].mask);\n \n emitRegisterStore(ins.dst[0], uavValue);\n\n if (imageOperands.sparse)\n emitStoreSparseFeedback(ins.dst[1], resultId);\n }\n void emitTypedUavStore(\n const DxbcShaderInstruction& ins) {\n // store_uav_typed has three operands:\n // (dst0) The destination UAV\n // (src0) The texture or buffer coordinates\n // (src1) The value to store\n const DxbcBufferInfo uavInfo = getBufferInfo(ins.dst[0]);\n emitUavBarrier(0u, uint64_t(1u) << ins.dst[0].idx[0].offset);\n\n // Set image operands for coherent access if necessary \n SpirvImageOperands imageOperands;\n\n if (uavInfo.coherence) {\n imageOperands.flags |= spv::ImageOperandsNonPrivateTexelMask\n | spv::ImageOperandsMakeTexelAvailableMask;\n imageOperands.makeAvailable = m_module.constu32(uavInfo.coherence);\n }\n\n // Load texture coordinates\n DxbcRegisterValue texCoord = emitLoadTexCoord(ins.src[0], uavInfo.image);\n\n // Load the value that will be written to the image. We'll\n // have to cast it to the component type of the image.\n const DxbcRegisterValue texValue = emitRegisterBitcast(\n emitRegisterLoad(ins.src[1], DxbcRegMask(true, true, true, true)),\n uavInfo.stype);\n \n // Write the given value to the image\n m_module.opImageWrite(\n m_module.opLoad(uavInfo.typeId, uavInfo.varId),\n texCoord.id, texValue.id, imageOperands);\n }\n void emitControlFlowIf(\n const DxbcShaderInstruction& ins) {\n // Load the first component of the condition\n // operand and perform a zero test on it.\n const DxbcRegisterValue condition = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n // Declare the 'if' block. We do not know if there\n // will be an 'else' block or not, so we'll assume\n // that there is one and leave it empty otherwise.\n DxbcCfgBlock block;\n block.type = DxbcCfgBlockType::If;\n block.b_if.ztestId = emitRegisterZeroTest(condition, ins.controls.zeroTest()).id;\n block.b_if.labelIf = m_module.allocateId();\n block.b_if.labelElse = 0;\n block.b_if.labelEnd = m_module.allocateId();\n block.b_if.headerPtr = m_module.getInsertionPtr();\n m_controlFlowBlocks.push_back(block);\n \n // We'll insert the branch instruction when closing\n // the block, since we don't know whether or not an\n // else block is needed right now.\n m_module.opLabel(block.b_if.labelIf);\n }\n void emitControlFlowElse(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::If\n || m_controlFlowBlocks.back().b_if.labelElse != 0)\n throw DxvkError(\"DxbcCompiler: 'Else' without 'If' found\");\n \n // Set the 'Else' flag so that we do\n // not insert a dummy block on 'EndIf'\n DxbcCfgBlock& block = m_controlFlowBlocks.back();\n block.b_if.labelElse = m_module.allocateId();\n \n // Close the 'If' block by branching to\n // the merge block we declared earlier\n m_module.opBranch(block.b_if.labelEnd);\n m_module.opLabel (block.b_if.labelElse);\n }\n void emitControlFlowEndIf(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::If)\n throw DxvkError(\"DxbcCompiler: 'EndIf' without 'If' found\");\n \n // Remove the block from the stack, it's closed\n DxbcCfgBlock block = m_controlFlowBlocks.back();\n m_controlFlowBlocks.pop_back();\n \n // Write out the 'if' header\n m_module.beginInsertion(block.b_if.headerPtr);\n \n m_module.opSelectionMerge(\n block.b_if.labelEnd,\n spv::SelectionControlMaskNone);\n \n m_module.opBranchConditional(\n block.b_if.ztestId,\n block.b_if.labelIf,\n block.b_if.labelElse != 0\n ? block.b_if.labelElse\n : block.b_if.labelEnd);\n \n m_module.endInsertion();\n \n // End the active 'if' or 'else' block\n m_module.opBranch(block.b_if.labelEnd);\n m_module.opLabel (block.b_if.labelEnd);\n }\n void emitControlFlowSwitch(\n const DxbcShaderInstruction& ins) {\n // Load the selector as a scalar unsigned integer\n const DxbcRegisterValue selector = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n // Declare switch block. We cannot insert the switch\n // instruction itself yet because the number of case\n // statements and blocks is unknown at this point.\n DxbcCfgBlock block;\n block.type = DxbcCfgBlockType::Switch;\n block.b_switch.insertPtr = m_module.getInsertionPtr();\n block.b_switch.selectorId = selector.id;\n block.b_switch.labelBreak = m_module.allocateId();\n block.b_switch.labelCase = m_module.allocateId();\n block.b_switch.labelDefault = 0;\n block.b_switch.labelCases = nullptr;\n m_controlFlowBlocks.push_back(block);\n \n // Define the first 'case' label\n m_module.opLabel(block.b_switch.labelCase);\n }\n void emitControlFlowCase(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::Switch)\n throw DxvkError(\"DxbcCompiler: 'Case' without 'Switch' found\");\n \n // The source operand must be a 32-bit immediate.\n if (ins.src[0].type != DxbcOperandType::Imm32)\n throw DxvkError(\"DxbcCompiler: Invalid operand type for 'Case'\");\n\n // Use the last label allocated for 'case'.\n DxbcCfgBlockSwitch* block = &m_controlFlowBlocks.back().b_switch;\n\n if (caseBlockIsFallthrough()) {\n block->labelCase = m_module.allocateId();\n\n m_module.opBranch(block->labelCase);\n m_module.opLabel (block->labelCase);\n }\n\n DxbcSwitchLabel label;\n label.desc.literal = ins.src[0].imm.u32_1;\n label.desc.labelId = block->labelCase;\n label.next = block->labelCases;\n block->labelCases = new DxbcSwitchLabel(label);\n }\n void emitControlFlowDefault(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::Switch)\n throw DxvkError(\"DxbcCompiler: 'Default' without 'Switch' found\");\n \n DxbcCfgBlockSwitch* block = &m_controlFlowBlocks.back().b_switch;\n\n if (caseBlockIsFallthrough()) {\n block->labelCase = m_module.allocateId();\n\n m_module.opBranch(block->labelCase);\n m_module.opLabel (block->labelCase);\n }\n\n // Set the last label allocated for 'case' as the default label.\n block->labelDefault = block->labelCase;\n }\n void emitControlFlowEndSwitch(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::Switch)\n throw DxvkError(\"DxbcCompiler: 'EndSwitch' without 'Switch' found\");\n \n // Remove the block from the stack, it's closed\n DxbcCfgBlock block = m_controlFlowBlocks.back();\n m_controlFlowBlocks.pop_back();\n\n if (!block.b_switch.labelDefault) {\n block.b_switch.labelDefault = caseBlockIsFallthrough()\n ? block.b_switch.labelBreak\n : block.b_switch.labelCase;\n }\n \n // Close the current 'case' block\n m_module.opBranch(block.b_switch.labelBreak);\n \n // Insert the 'switch' statement. For that, we need to\n // gather all the literal-label pairs for the construct.\n m_module.beginInsertion(block.b_switch.insertPtr);\n m_module.opSelectionMerge(\n block.b_switch.labelBreak,\n spv::SelectionControlMaskNone);\n \n // We'll restore the original order of the case labels here\n std::vector jumpTargets;\n for (auto i = block.b_switch.labelCases; i != nullptr; i = i->next)\n jumpTargets.insert(jumpTargets.begin(), i->desc);\n \n m_module.opSwitch(\n block.b_switch.selectorId,\n block.b_switch.labelDefault,\n jumpTargets.size(),\n jumpTargets.data());\n m_module.endInsertion();\n \n // Destroy the list of case labels\n // FIXME we're leaking memory if compilation fails.\n DxbcSwitchLabel* caseLabel = block.b_switch.labelCases;\n \n while (caseLabel != nullptr)\n delete std::exchange(caseLabel, caseLabel->next);\n\n // Begin new block after switch blocks\n m_module.opLabel(block.b_switch.labelBreak);\n }\n void emitControlFlowLoop(\n const DxbcShaderInstruction& ins) {\n // Declare the 'loop' block\n DxbcCfgBlock block;\n block.type = DxbcCfgBlockType::Loop;\n block.b_loop.labelHeader = m_module.allocateId();\n block.b_loop.labelBegin = m_module.allocateId();\n block.b_loop.labelContinue = m_module.allocateId();\n block.b_loop.labelBreak = m_module.allocateId();\n m_controlFlowBlocks.push_back(block);\n \n m_module.opBranch(block.b_loop.labelHeader);\n m_module.opLabel (block.b_loop.labelHeader);\n \n m_module.opLoopMerge(\n block.b_loop.labelBreak,\n block.b_loop.labelContinue,\n spv::LoopControlMaskNone);\n \n m_module.opBranch(block.b_loop.labelBegin);\n m_module.opLabel (block.b_loop.labelBegin);\n }\n void emitControlFlowEndLoop(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::Loop)\n throw DxvkError(\"DxbcCompiler: 'EndLoop' without 'Loop' found\");\n \n // Remove the block from the stack, it's closed\n const DxbcCfgBlock block = m_controlFlowBlocks.back();\n m_controlFlowBlocks.pop_back();\n \n // Declare the continue block\n m_module.opBranch(block.b_loop.labelContinue);\n m_module.opLabel (block.b_loop.labelContinue);\n \n // Declare the merge block\n m_module.opBranch(block.b_loop.labelHeader);\n m_module.opLabel (block.b_loop.labelBreak);\n }\n void emitControlFlowBreak(\n const DxbcShaderInstruction& ins) {\n const bool isBreak = ins.op == DxbcOpcode::Break;\n \n DxbcCfgBlock* cfgBlock = isBreak\n ? cfgFindBlock({ DxbcCfgBlockType::Loop, DxbcCfgBlockType::Switch })\n : cfgFindBlock({ DxbcCfgBlockType::Loop });\n \n if (cfgBlock == nullptr)\n throw DxvkError(\"DxbcCompiler: 'Break' or 'Continue' outside 'Loop' or 'Switch' found\");\n \n if (cfgBlock->type == DxbcCfgBlockType::Loop) {\n m_module.opBranch(isBreak\n ? cfgBlock->b_loop.labelBreak\n : cfgBlock->b_loop.labelContinue);\n } else /* if (cfgBlock->type == DxbcCfgBlockType::Switch) */ {\n m_module.opBranch(cfgBlock->b_switch.labelBreak);\n }\n \n // Subsequent instructions assume that there is an open block\n const uint32_t labelId = m_module.allocateId();\n m_module.opLabel(labelId);\n \n // If this is on the same level as a switch-case construct,\n // rather than being nested inside an 'if' statement, close\n // the current 'case' block.\n if (m_controlFlowBlocks.back().type == DxbcCfgBlockType::Switch)\n cfgBlock->b_switch.labelCase = labelId;\n }\n void emitControlFlowBreakc(\n const DxbcShaderInstruction& ins) {\n const bool isBreak = ins.op == DxbcOpcode::Breakc;\n \n DxbcCfgBlock* cfgBlock = isBreak\n ? cfgFindBlock({ DxbcCfgBlockType::Loop, DxbcCfgBlockType::Switch })\n : cfgFindBlock({ DxbcCfgBlockType::Loop });\n \n if (cfgBlock == nullptr)\n throw DxvkError(\"DxbcCompiler: 'Breakc' or 'Continuec' outside 'Loop' or 'Switch' found\");\n \n // Perform zero test on the first component of the condition\n const DxbcRegisterValue condition = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n const DxbcRegisterValue zeroTest = emitRegisterZeroTest(\n condition, ins.controls.zeroTest());\n \n // We basically have to wrap this into an 'if' block\n const uint32_t breakBlock = m_module.allocateId();\n const uint32_t mergeBlock = m_module.allocateId();\n \n m_module.opSelectionMerge(mergeBlock,\n spv::SelectionControlMaskNone);\n \n m_module.opBranchConditional(\n zeroTest.id, breakBlock, mergeBlock);\n \n m_module.opLabel(breakBlock);\n \n if (cfgBlock->type == DxbcCfgBlockType::Loop) {\n m_module.opBranch(isBreak\n ? cfgBlock->b_loop.labelBreak\n : cfgBlock->b_loop.labelContinue);\n } else /* if (cfgBlock->type == DxbcCfgBlockType::Switch) */ {\n m_module.opBranch(cfgBlock->b_switch.labelBreak);\n }\n \n m_module.opLabel(mergeBlock);\n }\n void emitControlFlowRet(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() != 0) {\n uint32_t labelId = m_module.allocateId();\n \n m_module.opReturn();\n m_module.opLabel(labelId);\n\n // return can be used in place of break to terminate a case block\n if (m_controlFlowBlocks.back().type == DxbcCfgBlockType::Switch)\n m_controlFlowBlocks.back().b_switch.labelCase = labelId;\n\n m_topLevelIsUniform = false;\n } else {\n // Last instruction in the current function\n this->emitFunctionEnd();\n }\n }\n void emitControlFlowRetc(\n const DxbcShaderInstruction& ins) {\n // Perform zero test on the first component of the condition\n const DxbcRegisterValue condition = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n const DxbcRegisterValue zeroTest = emitRegisterZeroTest(\n condition, ins.controls.zeroTest());\n \n // We basically have to wrap this into an 'if' block\n const uint32_t returnLabel = m_module.allocateId();\n const uint32_t continueLabel = m_module.allocateId();\n \n m_module.opSelectionMerge(continueLabel,\n spv::SelectionControlMaskNone);\n \n m_module.opBranchConditional(\n zeroTest.id, returnLabel, continueLabel);\n \n m_module.opLabel(returnLabel);\n m_module.opReturn();\n\n m_module.opLabel(continueLabel);\n\n // The return condition may be non-uniform\n m_topLevelIsUniform = false;\n }\n void emitControlFlowDiscard(\n const DxbcShaderInstruction& ins) {\n // Discard actually has an operand that determines\n // whether or not the fragment should be discarded\n const DxbcRegisterValue condition = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n const DxbcRegisterValue zeroTest = emitRegisterZeroTest(\n condition, ins.controls.zeroTest());\n \n DxbcConditional cond;\n cond.labelIf = m_module.allocateId();\n cond.labelEnd = m_module.allocateId();\n \n m_module.opSelectionMerge(cond.labelEnd, spv::SelectionControlMaskNone);\n m_module.opBranchConditional(zeroTest.id, cond.labelIf, cond.labelEnd);\n \n m_module.opLabel(cond.labelIf);\n m_module.opDemoteToHelperInvocation();\n m_module.opBranch(cond.labelEnd);\n \n m_module.opLabel(cond.labelEnd);\n\n m_module.enableCapability(spv::CapabilityDemoteToHelperInvocation);\n\n // Discard is just retc in a trenchcoat\n m_topLevelIsUniform = false;\n }\n void emitControlFlowLabel(\n const DxbcShaderInstruction& ins) {\n uint32_t functionNr = ins.dst[0].idx[0].offset;\n uint32_t functionId = getFunctionId(functionNr);\n \n this->emitFunctionBegin(\n functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n \n m_module.opLabel(m_module.allocateId());\n m_module.setDebugName(functionId, str::format(\"label\", functionNr).c_str());\n \n m_insideFunction = true;\n\n // We have to assume that this function gets\n // called from non-uniform control flow\n m_topLevelIsUniform = false;\n }\n void emitControlFlowCall(\n const DxbcShaderInstruction& ins) {\n uint32_t functionNr = ins.src[0].idx[0].offset;\n uint32_t functionId = getFunctionId(functionNr);\n\n m_module.opFunctionCall(\n m_module.defVoidType(),\n functionId, 0, nullptr);\n }\n void emitControlFlowCallc(\n const DxbcShaderInstruction& ins) {\n uint32_t functionNr = ins.src[1].idx[0].offset;\n uint32_t functionId = getFunctionId(functionNr);\n\n // Perform zero test on the first component of the condition\n const DxbcRegisterValue condition = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n const DxbcRegisterValue zeroTest = emitRegisterZeroTest(\n condition, ins.controls.zeroTest());\n \n // We basically have to wrap this into an 'if' block\n const uint32_t callLabel = m_module.allocateId();\n const uint32_t skipLabel = m_module.allocateId();\n \n m_module.opSelectionMerge(skipLabel,\n spv::SelectionControlMaskNone);\n \n m_module.opBranchConditional(\n zeroTest.id, callLabel, skipLabel);\n \n m_module.opLabel(callLabel);\n m_module.opFunctionCall(\n m_module.defVoidType(),\n functionId, 0, nullptr);\n\n m_module.opBranch(skipLabel);\n m_module.opLabel(skipLabel);\n }\n void emitControlFlow(\n const DxbcShaderInstruction& ins) {\n switch (ins.op) {\n case DxbcOpcode::If:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowIf(ins);\n break;\n \n case DxbcOpcode::Else:\n this->emitControlFlowElse(ins);\n break;\n \n case DxbcOpcode::EndIf:\n this->emitControlFlowEndIf(ins);\n this->emitUavBarrier(0, 0);\n break;\n \n case DxbcOpcode::Switch:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowSwitch(ins);\n break;\n \n case DxbcOpcode::Case:\n this->emitControlFlowCase(ins);\n break;\n \n case DxbcOpcode::Default:\n this->emitControlFlowDefault(ins);\n break;\n \n case DxbcOpcode::EndSwitch:\n this->emitControlFlowEndSwitch(ins);\n this->emitUavBarrier(0, 0);\n break;\n\n case DxbcOpcode::Loop:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowLoop(ins);\n break;\n \n case DxbcOpcode::EndLoop:\n this->emitControlFlowEndLoop(ins);\n this->emitUavBarrier(0, 0);\n break;\n\n case DxbcOpcode::Break:\n case DxbcOpcode::Continue:\n this->emitControlFlowBreak(ins);\n break;\n \n case DxbcOpcode::Breakc:\n case DxbcOpcode::Continuec:\n this->emitControlFlowBreakc(ins);\n break;\n\n case DxbcOpcode::Ret:\n this->emitControlFlowRet(ins);\n break;\n\n case DxbcOpcode::Retc:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowRetc(ins);\n break;\n \n case DxbcOpcode::Discard:\n this->emitControlFlowDiscard(ins);\n break;\n \n case DxbcOpcode::Label:\n this->emitControlFlowLabel(ins);\n break;\n\n case DxbcOpcode::Call:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowCall(ins);\n this->emitUavBarrier(-1, -1);\n break;\n\n case DxbcOpcode::Callc:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowCallc(ins);\n this->emitUavBarrier(-1, -1);\n break;\n\n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n }\n }\n DxbcRegisterValue emitBuildConstVecf32(\n float x,\n float y,\n float z,\n float w,\n const DxbcRegMask& writeMask) {\n // TODO refactor these functions into one single template\n std::array ids = { 0, 0, 0, 0 };\n uint32_t componentIndex = 0;\n \n if (writeMask[0]) ids[componentIndex++] = m_module.constf32(x);\n if (writeMask[1]) ids[componentIndex++] = m_module.constf32(y);\n if (writeMask[2]) ids[componentIndex++] = m_module.constf32(z);\n if (writeMask[3]) ids[componentIndex++] = m_module.constf32(w);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Float32;\n result.type.ccount = componentIndex;\n result.id = componentIndex > 1\n ? m_module.constComposite(\n getVectorTypeId(result.type),\n componentIndex, ids.data())\n : ids[0];\n return result;\n }\n DxbcRegisterValue emitBuildConstVecu32(\n uint32_t x,\n uint32_t y,\n uint32_t z,\n uint32_t w,\n const DxbcRegMask& writeMask) {\n std::array ids = { 0, 0, 0, 0 };\n uint32_t componentIndex = 0;\n \n if (writeMask[0]) ids[componentIndex++] = m_module.constu32(x);\n if (writeMask[1]) ids[componentIndex++] = m_module.constu32(y);\n if (writeMask[2]) ids[componentIndex++] = m_module.constu32(z);\n if (writeMask[3]) ids[componentIndex++] = m_module.constu32(w);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = componentIndex;\n result.id = componentIndex > 1\n ? m_module.constComposite(\n getVectorTypeId(result.type),\n componentIndex, ids.data())\n : ids[0];\n return result;\n }\n DxbcRegisterValue emitBuildConstVeci32(\n int32_t x,\n int32_t y,\n int32_t z,\n int32_t w,\n const DxbcRegMask& writeMask) {\n std::array ids = { 0, 0, 0, 0 };\n uint32_t componentIndex = 0;\n \n if (writeMask[0]) ids[componentIndex++] = m_module.consti32(x);\n if (writeMask[1]) ids[componentIndex++] = m_module.consti32(y);\n if (writeMask[2]) ids[componentIndex++] = m_module.consti32(z);\n if (writeMask[3]) ids[componentIndex++] = m_module.consti32(w);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Sint32;\n result.type.ccount = componentIndex;\n result.id = componentIndex > 1\n ? m_module.constComposite(\n getVectorTypeId(result.type),\n componentIndex, ids.data())\n : ids[0];\n return result;\n }\n DxbcRegisterValue emitBuildConstVecf64(\n double xy,\n double zw,\n const DxbcRegMask& writeMask) {\n std::array ids = { 0, 0 };\n uint32_t componentIndex = 0;\n \n if (writeMask[0] && writeMask[1]) ids[componentIndex++] = m_module.constf64(xy);\n if (writeMask[2] && writeMask[3]) ids[componentIndex++] = m_module.constf64(zw);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Float64;\n result.type.ccount = componentIndex;\n result.id = componentIndex > 1\n ? m_module.constComposite(\n getVectorTypeId(result.type),\n componentIndex, ids.data())\n : ids[0];\n return result;\n }\n DxbcRegisterValue emitBuildVector(\n DxbcRegisterValue scalar,\n uint32_t count) {\n if (count == 1)\n return scalar;\n\n std::array scalarIds =\n { scalar.id, scalar.id, scalar.id, scalar.id };\n\n DxbcRegisterValue result;\n result.type.ctype = scalar.type.ctype;\n result.type.ccount = count;\n result.id = m_module.constComposite(\n getVectorTypeId(result.type),\n count, scalarIds.data());\n return result;\n }\n DxbcRegisterValue emitBuildZeroVector(\n DxbcVectorType type) {\n DxbcRegisterValue result;\n result.type.ctype = type.ctype;\n result.type.ccount = 1;\n\n switch (type.ctype) {\n case DxbcScalarType::Float32: result.id = m_module.constf32(0.0f); break;\n case DxbcScalarType::Uint32: result.id = m_module.constu32(0u); break;\n case DxbcScalarType::Sint32: result.id = m_module.consti32(0); break;\n default: throw DxvkError(\"DxbcCompiler: Invalid scalar type\");\n }\n\n return emitBuildVector(result, type.ccount);\n }\n DxbcRegisterValue emitRegisterBitcast(\n DxbcRegisterValue srcValue,\n DxbcScalarType dstType) {\n DxbcScalarType srcType = srcValue.type.ctype;\n\n if (srcType == dstType)\n return srcValue;\n \n DxbcRegisterValue result;\n result.type.ctype = dstType;\n result.type.ccount = srcValue.type.ccount;\n\n if (isDoubleType(srcType)) result.type.ccount *= 2;\n if (isDoubleType(dstType)) result.type.ccount /= 2;\n\n result.id = m_module.opBitcast(\n getVectorTypeId(result.type),\n srcValue.id);\n return result;\n }\n DxbcRegisterValue emitRegisterSwizzle(\n DxbcRegisterValue value,\n DxbcRegSwizzle swizzle,\n DxbcRegMask writeMask) {\n if (value.type.ccount == 1)\n return emitRegisterExtend(value, writeMask.popCount());\n \n std::array indices;\n \n uint32_t dstIndex = 0;\n \n for (uint32_t i = 0; i < 4; i++) {\n if (writeMask[i])\n indices[dstIndex++] = swizzle[i];\n }\n \n // If the swizzle combined with the mask can be reduced\n // to a no-op, we don't need to insert any instructions.\n bool isIdentitySwizzle = dstIndex == value.type.ccount;\n \n for (uint32_t i = 0; i < dstIndex && isIdentitySwizzle; i++)\n isIdentitySwizzle &= indices[i] == i;\n \n if (isIdentitySwizzle)\n return value;\n \n // Use OpCompositeExtract if the resulting vector contains\n // only one component, and OpVectorShuffle if it is a vector.\n DxbcRegisterValue result;\n result.type.ctype = value.type.ctype;\n result.type.ccount = dstIndex;\n \n const uint32_t typeId = getVectorTypeId(result.type);\n \n if (dstIndex == 1) {\n result.id = m_module.opCompositeExtract(\n typeId, value.id, 1, indices.data());\n } else {\n result.id = m_module.opVectorShuffle(\n typeId, value.id, value.id,\n dstIndex, indices.data());\n }\n \n return result;\n }\n DxbcRegisterValue emitRegisterExtract(\n DxbcRegisterValue value,\n DxbcRegMask mask) {\n return emitRegisterSwizzle(value,\n DxbcRegSwizzle(0, 1, 2, 3), mask);\n }\n DxbcRegisterValue emitRegisterInsert(\n DxbcRegisterValue dstValue,\n DxbcRegisterValue srcValue,\n DxbcRegMask srcMask) {\n DxbcRegisterValue result;\n result.type = dstValue.type;\n \n const uint32_t typeId = getVectorTypeId(result.type);\n \n if (srcMask.popCount() == 0) {\n // Nothing to do if the insertion mask is empty\n result.id = dstValue.id;\n } else if (dstValue.type.ccount == 1) {\n // Both values are scalar, so the first component\n // of the write mask decides which one to take.\n result.id = srcMask[0] ? srcValue.id : dstValue.id;\n } else if (srcValue.type.ccount == 1) {\n // The source value is scalar. Since OpVectorShuffle\n // requires both arguments to be vectors, we have to\n // use OpCompositeInsert to modify the vector instead.\n const uint32_t componentId = srcMask.firstSet();\n \n result.id = m_module.opCompositeInsert(typeId,\n srcValue.id, dstValue.id, 1, &componentId);\n } else {\n // Both arguments are vectors. We can determine which\n // components to take from which vector and use the\n // OpVectorShuffle instruction.\n std::array components;\n uint32_t srcComponentId = dstValue.type.ccount;\n \n for (uint32_t i = 0; i < dstValue.type.ccount; i++)\n components.at(i) = srcMask[i] ? srcComponentId++ : i;\n \n result.id = m_module.opVectorShuffle(\n typeId, dstValue.id, srcValue.id,\n dstValue.type.ccount, components.data());\n }\n \n return result;\n }\n DxbcRegisterValue emitRegisterConcat(\n DxbcRegisterValue value1,\n DxbcRegisterValue value2) {\n std::array ids =\n {{ value1.id, value2.id }};\n \n DxbcRegisterValue result;\n result.type.ctype = value1.type.ctype;\n result.type.ccount = value1.type.ccount + value2.type.ccount;\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n ids.size(), ids.data());\n return result;\n }\n DxbcRegisterValue emitRegisterExtend(\n DxbcRegisterValue value,\n uint32_t size) {\n if (size == 1)\n return value;\n \n std::array ids = {{\n value.id, value.id,\n value.id, value.id, \n }};\n \n DxbcRegisterValue result;\n result.type.ctype = value.type.ctype;\n result.type.ccount = size;\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n size, ids.data());\n return result;\n }\n DxbcRegisterValue emitRegisterAbsolute(\n DxbcRegisterValue value) {\n const uint32_t typeId = getVectorTypeId(value.type);\n \n switch (value.type.ctype) {\n case DxbcScalarType::Float32: value.id = m_module.opFAbs(typeId, value.id); break;\n case DxbcScalarType::Float64: value.id = m_module.opFAbs(typeId, value.id); break;\n case DxbcScalarType::Sint32: value.id = m_module.opSAbs(typeId, value.id); break;\n case DxbcScalarType::Sint64: value.id = m_module.opSAbs(typeId, value.id); break;\n default: Logger::warn(\"DxbcCompiler: Cannot get absolute value for given type\");\n }\n \n return value;\n }\n DxbcRegisterValue emitRegisterNegate(\n DxbcRegisterValue value) {\n const uint32_t typeId = getVectorTypeId(value.type);\n \n switch (value.type.ctype) {\n case DxbcScalarType::Float32: value.id = m_module.opFNegate(typeId, value.id); break;\n case DxbcScalarType::Float64: value.id = m_module.opFNegate(typeId, value.id); break;\n case DxbcScalarType::Sint32: value.id = m_module.opSNegate(typeId, value.id); break;\n case DxbcScalarType::Sint64: value.id = m_module.opSNegate(typeId, value.id); break;\n default: Logger::warn(\"DxbcCompiler: Cannot negate given type\");\n }\n \n return value;\n }\n DxbcRegisterValue emitRegisterZeroTest(\n DxbcRegisterValue value,\n DxbcZeroTest test) {\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Bool;\n result.type.ccount = 1;\n \n const uint32_t zeroId = m_module.constu32(0u);\n const uint32_t typeId = getVectorTypeId(result.type);\n \n result.id = test == DxbcZeroTest::TestZ\n ? m_module.opIEqual (typeId, value.id, zeroId)\n : m_module.opINotEqual(typeId, value.id, zeroId);\n return result;\n }\n DxbcRegisterValue emitRegisterMaskBits(\n DxbcRegisterValue value,\n uint32_t mask) {\n DxbcRegisterValue maskVector = emitBuildConstVecu32(\n mask, mask, mask, mask, DxbcRegMask::firstN(value.type.ccount));\n \n DxbcRegisterValue result;\n result.type = value.type;\n result.id = m_module.opBitwiseAnd(\n getVectorTypeId(result.type),\n value.id, maskVector.id);\n return result;\n }\n DxbcRegisterValue emitSrcOperandModifiers(\n DxbcRegisterValue value,\n DxbcRegModifiers modifiers) {\n if (modifiers.test(DxbcRegModifier::Abs))\n value = emitRegisterAbsolute(value);\n \n if (modifiers.test(DxbcRegModifier::Neg))\n value = emitRegisterNegate(value);\n return value;\n }\n DxbcRegisterValue emitDstOperandModifiers(\n DxbcRegisterValue value,\n DxbcOpModifiers modifiers) {\n const uint32_t typeId = getVectorTypeId(value.type);\n \n if (modifiers.saturate) {\n DxbcRegMask mask;\n DxbcRegisterValue vec0, vec1;\n\n if (value.type.ctype == DxbcScalarType::Float32) {\n mask = DxbcRegMask::firstN(value.type.ccount);\n vec0 = emitBuildConstVecf32(0.0f, 0.0f, 0.0f, 0.0f, mask);\n vec1 = emitBuildConstVecf32(1.0f, 1.0f, 1.0f, 1.0f, mask);\n } else if (value.type.ctype == DxbcScalarType::Float64) {\n mask = DxbcRegMask::firstN(value.type.ccount * 2);\n vec0 = emitBuildConstVecf64(0.0, 0.0, mask);\n vec1 = emitBuildConstVecf64(1.0, 1.0, mask);\n }\n\n if (mask)\n value.id = m_module.opNClamp(typeId, value.id, vec0.id, vec1.id);\n }\n \n return value;\n }\n uint32_t emitExtractSparseTexel(\n uint32_t texelTypeId,\n uint32_t resultId) {\n uint32_t index = 1;\n\n return m_module.opCompositeExtract(\n texelTypeId, resultId, 1, &index);\n }\n void emitStoreSparseFeedback(\n const DxbcRegister& feedbackRegister,\n uint32_t resultId) {\n if (feedbackRegister.type != DxbcOperandType::Null) {\n uint32_t index = 0;\n\n DxbcRegisterValue result;\n result.type = { DxbcScalarType::Uint32, 1 };\n result.id = m_module.opCompositeExtract(\n getScalarTypeId(DxbcScalarType::Uint32),\n resultId, 1, &index);\n\n emitRegisterStore(feedbackRegister, result);\n }\n }\n DxbcRegisterPointer emitArrayAccess(\n DxbcRegisterPointer pointer,\n spv::StorageClass sclass,\n uint32_t index) {\n uint32_t ptrTypeId = m_module.defPointerType(\n getVectorTypeId(pointer.type), sclass);\n \n DxbcRegisterPointer result;\n result.type = pointer.type;\n result.id = m_module.opAccessChain(\n ptrTypeId, pointer.id, 1, &index);\n return result;\n }\n uint32_t emitLoadSampledImage(\n const DxbcShaderResource& textureResource,\n const DxbcSampler& samplerResource,\n bool isDepthCompare) {\n uint32_t baseId = isDepthCompare\n ? textureResource.depthTypeId\n : textureResource.colorTypeId;\n\n if (!baseId)\n return 0;\n\n uint32_t sampledImageType = m_module.defSampledImageType(baseId);\n\n return m_module.opSampledImage(sampledImageType,\n m_module.opLoad(textureResource.imageTypeId, textureResource.varId),\n m_module.opLoad(samplerResource.typeId, samplerResource.varId));\n }\n DxbcRegisterPointer emitGetTempPtr(\n const DxbcRegister& operand) {\n // r# regs are indexed as follows:\n // (0) register index (immediate)\n uint32_t regIdx = operand.idx[0].offset;\n\n if (regIdx >= m_rRegs.size())\n m_rRegs.resize(regIdx + 1, 0u);\n\n if (!m_rRegs.at(regIdx)) {\n DxbcRegisterInfo info;\n info.type.ctype = DxbcScalarType::Float32;\n info.type.ccount = 4;\n info.type.alength = 0;\n info.sclass = spv::StorageClassPrivate;\n\n uint32_t varId = emitNewVariable(info);\n m_rRegs.at(regIdx) = varId;\n\n m_module.setDebugName(varId,\n str::format(\"r\", regIdx).c_str());\n }\n\n DxbcRegisterPointer result;\n result.type.ctype = DxbcScalarType::Float32;\n result.type.ccount = 4;\n result.id = m_rRegs.at(regIdx);\n return result;\n }\n DxbcRegisterPointer emitGetIndexableTempPtr(\n const DxbcRegister& operand) {\n return getIndexableTempPtr(operand, emitIndexLoad(operand.idx[1]));\n }\n DxbcRegisterPointer emitGetInputPtr(\n const DxbcRegister& operand) {\n // In the vertex and pixel stages,\n // v# regs are indexed as follows:\n // (0) register index (relative)\n // \n // In the tessellation and geometry\n // stages, the index has two dimensions:\n // (0) vertex index (relative)\n // (1) register index (relative)\n DxbcRegisterPointer result;\n result.type.ctype = DxbcScalarType::Float32;\n result.type.ccount = 4;\n \n std::array indices = {{ 0, 0 }};\n \n for (uint32_t i = 0; i < operand.idxDim; i++)\n indices.at(i) = emitIndexLoad(operand.idx[i]).id;\n \n // Pick the input array depending on\n // the program type and operand type\n struct InputArray {\n uint32_t id;\n spv::StorageClass sclass;\n };\n \n const InputArray array = [&] () -> InputArray {\n switch (operand.type) {\n case DxbcOperandType::InputControlPoint:\n return m_programInfo.type() == DxbcProgramType::HullShader\n ? InputArray { m_vArray, spv::StorageClassPrivate }\n : InputArray { m_ds.inputPerVertex, spv::StorageClassInput };\n case DxbcOperandType::InputPatchConstant:\n return m_programInfo.type() == DxbcProgramType::HullShader\n ? InputArray { m_hs.outputPerPatch, spv::StorageClassPrivate }\n : InputArray { m_ds.inputPerPatch, spv::StorageClassInput };\n case DxbcOperandType::OutputControlPoint:\n return InputArray { m_hs.outputPerVertex, spv::StorageClassOutput };\n default:\n return { m_vArray, spv::StorageClassPrivate };\n }\n }();\n \n DxbcRegisterInfo info;\n info.type.ctype = result.type.ctype;\n info.type.ccount = result.type.ccount;\n info.type.alength = 0;\n info.sclass = array.sclass;\n \n result.id = m_module.opAccessChain(\n getPointerTypeId(info), array.id,\n operand.idxDim, indices.data());\n \n return result;\n }\n DxbcRegisterPointer emitGetOutputPtr(\n const DxbcRegister& operand) {\n if (m_programInfo.type() == DxbcProgramType::HullShader) {\n // Hull shaders are special in that they have two sets of\n // output registers, one for per-patch values and one for\n // per-vertex values.\n DxbcRegisterPointer result;\n result.type.ctype = DxbcScalarType::Float32;\n result.type.ccount = 4;\n \n uint32_t registerId = emitIndexLoad(operand.idx[0]).id;\n\n if (m_hs.currPhaseType == DxbcCompilerHsPhase::ControlPoint) {\n std::array indices = {{\n m_module.opLoad(m_module.defIntType(32, 0), m_hs.builtinInvocationId),\n registerId,\n }};\n \n uint32_t ptrTypeId = m_module.defPointerType(\n getVectorTypeId(result.type),\n spv::StorageClassOutput);\n \n result.id = m_module.opAccessChain(\n ptrTypeId, m_hs.outputPerVertex,\n indices.size(), indices.data());\n } else {\n uint32_t ptrTypeId = m_module.defPointerType(\n getVectorTypeId(result.type),\n spv::StorageClassPrivate);\n \n result.id = m_module.opAccessChain(\n ptrTypeId, m_hs.outputPerPatch,\n 1, ®isterId);\n }\n\n return result;\n } else {\n // Regular shaders have their output\n // registers set up at declaration time\n return m_oRegs.at(operand.idx[0].offset);\n }\n }\n DxbcRegisterPointer emitGetConstBufPtr(\n const DxbcRegister& operand);\n DxbcRegisterPointer emitGetImmConstBufPtr(\n const DxbcRegister& operand) {\n DxbcRegisterValue constId = emitIndexLoad(operand.idx[0]);\n\n if (m_icbArray) {\n // We pad the icb array with an extra zero vector, so we can\n // clamp the index and get correct robustness behaviour.\n constId.id = m_module.opUMin(getVectorTypeId(constId.type),\n constId.id, m_module.constu32(m_icbSize));\n\n DxbcRegisterInfo ptrInfo;\n ptrInfo.type.ctype = DxbcScalarType::Uint32;\n ptrInfo.type.ccount = m_icbComponents;\n ptrInfo.type.alength = 0;\n ptrInfo.sclass = spv::StorageClassPrivate;\n\n DxbcRegisterPointer result;\n result.type.ctype = ptrInfo.type.ctype;\n result.type.ccount = ptrInfo.type.ccount;\n result.id = m_module.opAccessChain(\n getPointerTypeId(ptrInfo),\n m_icbArray, 1, &constId.id);\n return result;\n } else if (m_constantBuffers.at(Icb_BindingSlotId).varId != 0) {\n const std::array indices =\n {{ m_module.consti32(0), constId.id }};\n \n DxbcRegisterInfo ptrInfo;\n ptrInfo.type.ctype = DxbcScalarType::Float32;\n ptrInfo.type.ccount = m_icbComponents;\n ptrInfo.type.alength = 0;\n ptrInfo.sclass = spv::StorageClassUniform;\n\n DxbcRegisterPointer result;\n result.type.ctype = ptrInfo.type.ctype;\n result.type.ccount = ptrInfo.type.ccount;\n result.id = m_module.opAccessChain(\n getPointerTypeId(ptrInfo),\n m_constantBuffers.at(Icb_BindingSlotId).varId,\n indices.size(), indices.data());\n return result;\n } else {\n throw DxvkError(\"DxbcCompiler: Immediate constant buffer not defined\");\n }\n }\n DxbcRegisterPointer emitGetOperandPtr(\n const DxbcRegister& operand) {\n switch (operand.type) {\n case DxbcOperandType::Temp:\n return emitGetTempPtr(operand);\n \n case DxbcOperandType::IndexableTemp:\n return emitGetIndexableTempPtr(operand);\n \n case DxbcOperandType::Input:\n case DxbcOperandType::InputControlPoint:\n case DxbcOperandType::InputPatchConstant:\n case DxbcOperandType::OutputControlPoint:\n return emitGetInputPtr(operand);\n \n case DxbcOperandType::Output:\n return emitGetOutputPtr(operand);\n \n case DxbcOperandType::ImmediateConstantBuffer:\n return emitGetImmConstBufPtr(operand);\n\n case DxbcOperandType::InputThreadId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 3 },\n m_cs.builtinGlobalInvocationId };\n \n case DxbcOperandType::InputThreadGroupId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 3 },\n m_cs.builtinWorkgroupId };\n \n case DxbcOperandType::InputThreadIdInGroup:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 3 },\n m_cs.builtinLocalInvocationId };\n \n case DxbcOperandType::InputThreadIndexInGroup:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n m_cs.builtinLocalInvocationIndex };\n \n case DxbcOperandType::InputCoverageMask: {\n const std::array indices\n = {{ m_module.constu32(0) }};\n \n DxbcRegisterPointer result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(result.type),\n spv::StorageClassInput),\n m_ps.builtinSampleMaskIn,\n indices.size(), indices.data());\n return result;\n }\n \n case DxbcOperandType::OutputCoverageMask: {\n const std::array indices\n = {{ m_module.constu32(0) }};\n \n DxbcRegisterPointer result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(result.type),\n spv::StorageClassOutput),\n m_ps.builtinSampleMaskOut,\n indices.size(), indices.data());\n return result;\n }\n \n case DxbcOperandType::OutputDepth:\n case DxbcOperandType::OutputDepthGe:\n case DxbcOperandType::OutputDepthLe:\n return DxbcRegisterPointer {\n { DxbcScalarType::Float32, 1 },\n m_ps.builtinDepth };\n \n case DxbcOperandType::OutputStencilRef:\n return DxbcRegisterPointer {\n { DxbcScalarType::Sint32, 1 },\n m_ps.builtinStencilRef };\n\n case DxbcOperandType::InputPrimitiveId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n m_primitiveIdIn };\n \n case DxbcOperandType::InputDomainPoint:\n return DxbcRegisterPointer {\n { DxbcScalarType::Float32, 3 },\n m_ds.builtinTessCoord };\n \n case DxbcOperandType::OutputControlPointId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n m_hs.builtinInvocationId };\n \n case DxbcOperandType::InputForkInstanceId:\n case DxbcOperandType::InputJoinInstanceId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n getCurrentHsForkJoinPhase()->instanceIdPtr };\n \n case DxbcOperandType::InputGsInstanceId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n m_gs.builtinInvocationId };\n \n case DxbcOperandType::InputInnerCoverage:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n m_ps.builtinInnerCoverageId };\n \n default:\n throw DxvkError(str::format(\n \"DxbcCompiler: Unhandled operand type: \",\n operand.type));\n }\n }\n DxbcRegisterPointer emitGetAtomicPointer(\n const DxbcRegister& operand,\n const DxbcRegister& address) {\n // Query information about the resource itself\n const uint32_t registerId = operand.idx[0].offset;\n const DxbcBufferInfo resourceInfo = getBufferInfo(operand);\n \n // For UAVs and shared memory, different methods\n // of obtaining the final pointer are used.\n bool isTgsm = operand.type == DxbcOperandType::ThreadGroupSharedMemory;\n bool isSsbo = resourceInfo.isSsbo;\n \n // Compute the actual address into the resource\n const DxbcRegisterValue addressValue = [&] {\n switch (resourceInfo.type) {\n case DxbcResourceType::Raw:\n return emitCalcBufferIndexRaw(emitRegisterLoad(\n address, DxbcRegMask(true, false, false, false)));\n \n case DxbcResourceType::Structured: {\n const DxbcRegisterValue addressComponents = emitRegisterLoad(\n address, DxbcRegMask(true, true, false, false));\n \n return emitCalcBufferIndexStructured(\n emitRegisterExtract(addressComponents, DxbcRegMask(true, false, false, false)),\n emitRegisterExtract(addressComponents, DxbcRegMask(false, true, false, false)),\n resourceInfo.stride);\n };\n \n case DxbcResourceType::Typed: {\n if (isTgsm)\n throw DxvkError(\"DxbcCompiler: TGSM cannot be typed\");\n \n return emitLoadTexCoord(address,\n m_uavs.at(registerId).imageInfo);\n }\n \n default:\n throw DxvkError(\"DxbcCompiler: Unhandled resource type\");\n }\n }();\n \n // Compute the actual pointer\n DxbcRegisterPointer result;\n result.type.ctype = resourceInfo.stype;\n result.type.ccount = 1;\n\n if (isTgsm) {\n result.id = m_module.opAccessChain(resourceInfo.typeId,\n resourceInfo.varId, 1, &addressValue.id);\n } else if (isSsbo) {\n uint32_t indices[2] = { m_module.constu32(0), addressValue.id };\n result.id = m_module.opAccessChain(resourceInfo.typeId,\n resourceInfo.varId, 2, indices);\n } else {\n result.id = m_module.opImageTexelPointer(\n m_module.defPointerType(getVectorTypeId(result.type), spv::StorageClassImage),\n resourceInfo.varId, addressValue.id, m_module.constu32(0));\n }\n\n return result;\n }\n DxbcRegisterValue emitQueryBufferSize(\n const DxbcRegister& resource) {\n const DxbcBufferInfo bufferInfo = getBufferInfo(resource);\n\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opArrayLength(\n getVectorTypeId(result.type),\n bufferInfo.varId, 0);\n\n return result;\n }\n DxbcRegisterValue emitQueryTexelBufferSize(\n const DxbcRegister& resource) {\n // Load the texel buffer object. This cannot be used with\n // constant buffers or any other type of resource.\n const DxbcBufferInfo bufferInfo = getBufferInfo(resource);\n \n const uint32_t bufferId = m_module.opLoad(\n bufferInfo.typeId, bufferInfo.varId);\n \n // We'll store this as a scalar unsigned integer\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opImageQuerySize(\n getVectorTypeId(result.type), bufferId);\n\n return result;\n }\n DxbcRegisterValue emitQueryTextureLods(\n const DxbcRegister& resource) {\n const DxbcBufferInfo info = getBufferInfo(resource);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n \n if (info.image.ms == 0 && info.image.sampled == 1) {\n result.id = m_module.opImageQueryLevels(\n getVectorTypeId(result.type),\n m_module.opLoad(info.typeId, info.varId));\n } else {\n // Report one LOD in case of UAVs or multisampled images\n result.id = m_module.constu32(1);\n }\n\n return result;\n }\n DxbcRegisterValue emitQueryTextureSamples(\n const DxbcRegister& resource) {\n if (resource.type == DxbcOperandType::Rasterizer) {\n // SPIR-V has no gl_NumSamples equivalent, so we\n // have to work around it using a push constant\n if (!m_ps.pushConstantId)\n m_ps.pushConstantId = emitPushConstants();\n\n uint32_t uintTypeId = m_module.defIntType(32, 0);\n uint32_t ptrTypeId = m_module.defPointerType(uintTypeId, spv::StorageClassPushConstant);\n uint32_t index = m_module.constu32(0);\n\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opLoad(uintTypeId,\n m_module.opAccessChain(ptrTypeId, m_ps.pushConstantId, 1, &index));\n return result;\n } else {\n DxbcBufferInfo info = getBufferInfo(resource);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n\n if (info.image.ms) {\n result.id = m_module.opImageQuerySamples(\n getVectorTypeId(result.type),\n m_module.opLoad(info.typeId, info.varId));\n } else {\n // OpImageQuerySamples requires MSAA images\n result.id = m_module.constu32(1);\n }\n \n return result;\n }\n }\n DxbcRegisterValue emitQueryTextureSize(\n const DxbcRegister& resource,\n DxbcRegisterValue lod) {\n const DxbcBufferInfo info = getBufferInfo(resource);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = getTexSizeDim(info.image);\n \n if (info.image.ms == 0 && info.image.sampled == 1) {\n result.id = m_module.opImageQuerySizeLod(\n getVectorTypeId(result.type),\n m_module.opLoad(info.typeId, info.varId),\n lod.id);\n } else {\n result.id = m_module.opImageQuerySize(\n getVectorTypeId(result.type),\n m_module.opLoad(info.typeId, info.varId));\n }\n\n return result;\n }\n DxbcRegisterValue emitCalcBufferIndexStructured(\n DxbcRegisterValue structId,\n DxbcRegisterValue structOffset,\n uint32_t structStride) {\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Sint32;\n result.type.ccount = 1;\n \n uint32_t typeId = getVectorTypeId(result.type);\n uint32_t offset = m_module.opShiftRightLogical(typeId, structOffset.id, m_module.consti32(2));\n \n result.id = m_module.opIAdd(typeId,\n m_module.opIMul(typeId, structId.id, m_module.consti32(structStride / 4)),\n offset);\n return result;\n }\n DxbcRegisterValue emitCalcBufferIndexRaw(\n DxbcRegisterValue byteOffset) {\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Sint32;\n result.type.ccount = 1;\n \n uint32_t typeId = getVectorTypeId(result.type);\n result.id = m_module.opShiftRightLogical(typeId, byteOffset.id, m_module.consti32(2));\n return result;\n }\n DxbcRegisterValue emitCalcTexCoord(\n DxbcRegisterValue coordVector,\n const DxbcImageInfo& imageInfo) {\n const uint32_t dim = getTexCoordDim(imageInfo);\n \n if (dim != coordVector.type.ccount) {\n coordVector = emitRegisterExtract(\n coordVector, DxbcRegMask::firstN(dim)); \n }\n \n return coordVector;\n }\n DxbcRegisterValue emitLoadTexCoord(\n const DxbcRegister& coordReg,\n const DxbcImageInfo& imageInfo) {\n return emitCalcTexCoord(emitRegisterLoad(coordReg,\n DxbcRegMask(true, true, true, true)), imageInfo);\n }\n DxbcRegisterValue emitIndexLoad(\n DxbcRegIndex index) {\n if (index.relReg != nullptr) {\n DxbcRegisterValue result = emitRegisterLoad(\n *index.relReg, DxbcRegMask(true, false, false, false));\n \n if (index.offset != 0) {\n result.id = m_module.opIAdd(\n getVectorTypeId(result.type), result.id,\n m_module.consti32(index.offset));\n }\n \n return result;\n } else {\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Sint32;\n result.type.ccount = 1;\n result.id = m_module.consti32(index.offset);\n return result;\n }\n }\n DxbcRegisterValue emitValueLoad(\n DxbcRegisterPointer ptr) {\n DxbcRegisterValue result;\n result.type = ptr.type;\n result.id = m_module.opLoad(\n getVectorTypeId(result.type),\n ptr.id);\n return result;\n }\n void emitValueStore(\n DxbcRegisterPointer ptr,\n DxbcRegisterValue value,\n DxbcRegMask writeMask) {\n // If the component types are not compatible,\n // we need to bit-cast the source variable.\n if (value.type.ctype != ptr.type.ctype)\n value = emitRegisterBitcast(value, ptr.type.ctype);\n \n // If the source value consists of only one component,\n // it is stored in all components of the destination.\n if (value.type.ccount == 1)\n value = emitRegisterExtend(value, writeMask.popCount());\n \n if (ptr.type.ccount == writeMask.popCount()) {\n // Simple case: We write to the entire register\n m_module.opStore(ptr.id, value.id);\n } else {\n // We only write to part of the destination\n // register, so we need to load and modify it\n DxbcRegisterValue tmp = emitValueLoad(ptr);\n tmp = emitRegisterInsert(tmp, value, writeMask);\n \n m_module.opStore(ptr.id, tmp.id);\n }\n }\n DxbcRegisterValue emitRegisterLoadRaw(\n const DxbcRegister& reg) {\n // Try to find index range for the given register\n const DxbcIndexRange* indexRange = nullptr;\n\n if (reg.idxDim && reg.idx[reg.idxDim - 1u].relReg) {\n uint32_t offset = reg.idx[reg.idxDim - 1u].offset;\n\n for (const auto& range : m_indexRanges) {\n if (reg.type == range.type && offset >= range.start && offset < range.start + range.length)\n indexRange = ⦥\n }\n }\n\n if (reg.type == DxbcOperandType::IndexableTemp || indexRange) {\n bool doBoundsCheck = reg.idx[reg.idxDim - 1u].relReg != nullptr;\n\n if (doBoundsCheck) {\n DxbcRegisterValue indexId = emitIndexLoad(reg.idx[reg.idxDim - 1u]);\n uint32_t boundsCheck = 0u;\n\n if (reg.type == DxbcOperandType::IndexableTemp) {\n boundsCheck = m_module.opULessThan(\n m_module.defBoolType(), indexId.id,\n m_module.constu32(m_xRegs.at(reg.idx[0].offset).alength));\n } else {\n uint32_t adjustedId = m_module.opISub(getVectorTypeId(indexId.type),\n indexId.id, m_module.consti32(indexRange->start));\n\n boundsCheck = m_module.opULessThan(\n m_module.defBoolType(), adjustedId,\n m_module.constu32(indexRange->length));\n }\n\n // Kind of ugly to have an empty else block here but there's no\n // way for us to know the current block ID for the phi below\n DxbcConditional cond;\n cond.labelIf = m_module.allocateId();\n cond.labelElse = m_module.allocateId();\n cond.labelEnd = m_module.allocateId();\n\n m_module.opSelectionMerge(cond.labelEnd, spv::SelectionControlMaskNone);\n m_module.opBranchConditional(boundsCheck, cond.labelIf, cond.labelElse);\n\n m_module.opLabel(cond.labelIf);\n\n DxbcRegisterValue returnValue = emitValueLoad(emitGetOperandPtr(reg));\n\n m_module.opBranch(cond.labelEnd);\n m_module.opLabel (cond.labelElse);\n\n DxbcRegisterValue zeroValue = emitBuildZeroVector(returnValue.type);\n\n m_module.opBranch(cond.labelEnd);\n m_module.opLabel (cond.labelEnd);\n\n std::array phiLabels = {{\n { returnValue.id, cond.labelIf },\n { zeroValue.id, cond.labelElse },\n }};\n\n returnValue.id = m_module.opPhi(\n getVectorTypeId(returnValue.type),\n phiLabels.size(), phiLabels.data());\n return returnValue;\n }\n }\n\n DxbcRegisterValue value = emitValueLoad(emitGetOperandPtr(reg));\n\n // Pad icb values to a vec4 since the app may access components that are always 0\n if (reg.type == DxbcOperandType::ImmediateConstantBuffer && value.type.ccount < 4u) {\n DxbcVectorType zeroType;\n zeroType.ctype = value.type.ctype;\n zeroType.ccount = 4u - value.type.ccount;\n\n uint32_t zeroVector = emitBuildZeroVector(zeroType).id;\n\n std::array constituents = { value.id, zeroVector };\n\n value.type.ccount = 4u;\n value.id = m_module.opCompositeConstruct(getVectorTypeId(value.type),\n constituents.size(), constituents.data());\n }\n\n return value;\n }\n DxbcRegisterValue emitConstantBufferLoad(\n const DxbcRegister& reg,\n DxbcRegMask writeMask) {\n // Constant buffers take a two-dimensional index:\n // (0) register index (immediate)\n // (1) constant offset (relative)\n DxbcRegisterInfo info;\n info.type.ctype = DxbcScalarType::Float32;\n info.type.ccount = 4;\n info.type.alength = 0;\n info.sclass = spv::StorageClassUniform;\n \n uint32_t regId = reg.idx[0].offset;\n DxbcRegisterValue constId = emitIndexLoad(reg.idx[1]);\n \n uint32_t ptrTypeId = getPointerTypeId(info);\n \n const std::array indices =\n {{ m_module.consti32(0), constId.id }};\n \n DxbcRegisterPointer ptr;\n ptr.type.ctype = info.type.ctype;\n ptr.type.ccount = info.type.ccount;\n ptr.id = m_module.opAccessChain(ptrTypeId,\n m_constantBuffers.at(regId).varId,\n indices.size(), indices.data());\n\n // Load individual components from buffer\n std::array ccomps = { 0, 0, 0, 0 };\n std::array scomps = { 0, 0, 0, 0 };\n uint32_t scount = 0;\n\n for (uint32_t i = 0; i < 4; i++) {\n uint32_t sindex = reg.swizzle[i];\n\n if (!writeMask[i] || ccomps[sindex])\n continue;\n \n uint32_t componentId = m_module.constu32(sindex);\n uint32_t componentPtr = m_module.opAccessChain(\n m_module.defPointerType(\n getScalarTypeId(DxbcScalarType::Float32),\n spv::StorageClassUniform),\n ptr.id, 1, &componentId);\n \n ccomps[sindex] = m_module.opLoad(\n getScalarTypeId(DxbcScalarType::Float32),\n componentPtr);\n }\n\n for (uint32_t i = 0; i < 4; i++) {\n uint32_t sindex = reg.swizzle[i];\n \n if (writeMask[i])\n scomps[scount++] = ccomps[sindex];\n }\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Float32;\n result.type.ccount = scount;\n result.id = scomps[0];\n \n if (scount > 1) {\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n scount, scomps.data());\n }\n\n // Apply any post-processing that might be necessary\n result = emitRegisterBitcast(result, reg.dataType);\n result = emitSrcOperandModifiers(result, reg.modifiers);\n return result;\n }\n DxbcRegisterValue emitRegisterLoad(\n const DxbcRegister& reg,\n DxbcRegMask writeMask) {\n if (reg.type == DxbcOperandType::Imm32\n || reg.type == DxbcOperandType::Imm64) {\n DxbcRegisterValue result;\n \n if (reg.componentCount == DxbcComponentCount::Component1) {\n // Create one single u32 constant\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.constu32(reg.imm.u32_1);\n\n result = emitRegisterExtend(result, writeMask.popCount());\n } else if (reg.componentCount == DxbcComponentCount::Component4) {\n // Create a u32 vector with as many components as needed\n std::array indices = { };\n uint32_t indexId = 0;\n \n for (uint32_t i = 0; i < indices.size(); i++) {\n if (writeMask[i]) {\n indices.at(indexId++) =\n m_module.constu32(reg.imm.u32_4[i]);\n }\n }\n \n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = writeMask.popCount();\n result.id = indices.at(0);\n \n if (indexId > 1) {\n result.id = m_module.constComposite(\n getVectorTypeId(result.type),\n result.type.ccount, indices.data());\n }\n \n } else {\n // Something went horribly wrong in the decoder or the shader is broken\n throw DxvkError(\"DxbcCompiler: Invalid component count for immediate operand\");\n }\n \n // Cast constants to the requested type\n return emitRegisterBitcast(result, reg.dataType);\n } else if (reg.type == DxbcOperandType::ConstantBuffer) {\n return emitConstantBufferLoad(reg, writeMask);\n } else {\n // Load operand from the operand pointer\n DxbcRegisterValue result = emitRegisterLoadRaw(reg);\n \n // Apply operand swizzle to the operand value\n result = emitRegisterSwizzle(result, reg.swizzle, writeMask);\n \n // Cast it to the requested type. We need to do\n // this after the swizzling for 64-bit types.\n result = emitRegisterBitcast(result, reg.dataType);\n \n // Apply operand modifiers\n result = emitSrcOperandModifiers(result, reg.modifiers);\n return result;\n }\n }\n void emitRegisterStore(\n const DxbcRegister& reg,\n DxbcRegisterValue value) {\n if (reg.type == DxbcOperandType::IndexableTemp) {\n bool doBoundsCheck = reg.idx[1].relReg != nullptr;\n DxbcRegisterValue vectorId = emitIndexLoad(reg.idx[1]);\n\n if (doBoundsCheck) {\n uint32_t boundsCheck = m_module.opULessThan(\n m_module.defBoolType(), vectorId.id,\n m_module.constu32(m_xRegs.at(reg.idx[0].offset).alength));\n \n DxbcConditional cond;\n cond.labelIf = m_module.allocateId();\n cond.labelEnd = m_module.allocateId();\n \n m_module.opSelectionMerge(cond.labelEnd, spv::SelectionControlMaskNone);\n m_module.opBranchConditional(boundsCheck, cond.labelIf, cond.labelEnd);\n \n m_module.opLabel(cond.labelIf);\n\n emitValueStore(getIndexableTempPtr(reg, vectorId), value, reg.mask);\n\n m_module.opBranch(cond.labelEnd);\n m_module.opLabel (cond.labelEnd);\n } else {\n emitValueStore(getIndexableTempPtr(reg, vectorId), value, reg.mask);\n }\n } else {\n emitValueStore(emitGetOperandPtr(reg), value, reg.mask);\n }\n }\n void emitInputSetup() {\n m_module.setLateConst(m_vArrayLengthId, &m_vArrayLength);\n\n // Copy all defined v# registers into the input array. Note\n // that the outer index of the array is the vertex index.\n const uint32_t vecTypeId = m_module.defVectorType(m_module.defFloatType(32), 4);\n const uint32_t dstPtrTypeId = m_module.defPointerType(vecTypeId, spv::StorageClassPrivate);\n \n for (uint32_t i = 0; i < m_vRegs.size(); i++) {\n if (m_vRegs.at(i).id != 0) {\n const uint32_t registerId = m_module.consti32(i);\n \n for (uint32_t v = 0; v < vertexCount; v++) {\n std::array indices\n = {{ m_module.consti32(v), registerId }};\n \n DxbcRegisterPointer srcPtr;\n srcPtr.type = m_vRegs.at(i).type;\n srcPtr.id = m_module.opAccessChain(\n m_module.defPointerType(getVectorTypeId(srcPtr.type), spv::StorageClassInput),\n m_vRegs.at(i).id, 1, indices.data());\n \n DxbcRegisterValue srcValue = emitRegisterBitcast(\n emitValueLoad(srcPtr), DxbcScalarType::Float32);\n \n DxbcRegisterPointer dstPtr;\n dstPtr.type = { DxbcScalarType::Float32, 4 };\n dstPtr.id = m_module.opAccessChain(\n dstPtrTypeId, m_vArray, 2, indices.data());\n\n emitValueStore(dstPtr, srcValue, DxbcRegMask::firstN(srcValue.type.ccount));\n }\n }\n }\n \n // Copy all system value registers into the array,\n // preserving any previously written contents.\n for (const DxbcSvMapping& map : m_vMappings) {\n const uint32_t registerId = m_module.consti32(map.regId);\n \n for (uint32_t v = 0; v < vertexCount; v++) {\n const DxbcRegisterValue value = [&] {\n switch (m_programInfo.type()) {\n case DxbcProgramType::GeometryShader: return emitGsSystemValueLoad(map.sv, map.regMask, v);\n default: throw DxvkError(str::format(\"DxbcCompiler: Unexpected stage: \", m_programInfo.type()));\n }\n }();\n \n std::array indices = {\n m_module.consti32(v), registerId,\n };\n \n DxbcRegisterPointer inputReg;\n inputReg.type.ctype = DxbcScalarType::Float32;\n inputReg.type.ccount = 4;\n inputReg.id = m_module.opAccessChain(dstPtrTypeId,\n m_vArray, indices.size(), indices.data());\n emitValueStore(inputReg, value, map.regMask);\n }\n }\n }\n void emitInputSetup(uint32_t vertexCount) {\n m_module.setLateConst(m_vArrayLengthId, &m_vArrayLength);\n\n // Copy all defined v# registers into the input array. Note\n // that the outer index of the array is the vertex index.\n const uint32_t vecTypeId = m_module.defVectorType(m_module.defFloatType(32), 4);\n const uint32_t dstPtrTypeId = m_module.defPointerType(vecTypeId, spv::StorageClassPrivate);\n \n for (uint32_t i = 0; i < m_vRegs.size(); i++) {\n if (m_vRegs.at(i).id != 0) {\n const uint32_t registerId = m_module.consti32(i);\n \n for (uint32_t v = 0; v < vertexCount; v++) {\n std::array indices\n = {{ m_module.consti32(v), registerId }};\n \n DxbcRegisterPointer srcPtr;\n srcPtr.type = m_vRegs.at(i).type;\n srcPtr.id = m_module.opAccessChain(\n m_module.defPointerType(getVectorTypeId(srcPtr.type), spv::StorageClassInput),\n m_vRegs.at(i).id, 1, indices.data());\n \n DxbcRegisterValue srcValue = emitRegisterBitcast(\n emitValueLoad(srcPtr), DxbcScalarType::Float32);\n \n DxbcRegisterPointer dstPtr;\n dstPtr.type = { DxbcScalarType::Float32, 4 };\n dstPtr.id = m_module.opAccessChain(\n dstPtrTypeId, m_vArray, 2, indices.data());\n\n emitValueStore(dstPtr, srcValue, DxbcRegMask::firstN(srcValue.type.ccount));\n }\n }\n }\n \n // Copy all system value registers into the array,\n // preserving any previously written contents.\n for (const DxbcSvMapping& map : m_vMappings) {\n const uint32_t registerId = m_module.consti32(map.regId);\n \n for (uint32_t v = 0; v < vertexCount; v++) {\n const DxbcRegisterValue value = [&] {\n switch (m_programInfo.type()) {\n case DxbcProgramType::GeometryShader: return emitGsSystemValueLoad(map.sv, map.regMask, v);\n default: throw DxvkError(str::format(\"DxbcCompiler: Unexpected stage: \", m_programInfo.type()));\n }\n }();\n \n std::array indices = {\n m_module.consti32(v), registerId,\n };\n \n DxbcRegisterPointer inputReg;\n inputReg.type.ctype = DxbcScalarType::Float32;\n inputReg.type.ccount = 4;\n inputReg.id = m_module.opAccessChain(dstPtrTypeId,\n m_vArray, indices.size(), indices.data());\n emitValueStore(inputReg, value, map.regMask);\n }\n }\n }\n void emitOutputSetup() {\n for (const DxbcSvMapping& svMapping : m_oMappings) {\n DxbcRegisterPointer outputReg = m_oRegs.at(svMapping.regId);\n \n if (m_programInfo.type() == DxbcProgramType::HullShader) {\n uint32_t registerIndex = m_module.constu32(svMapping.regId);\n \n outputReg.type = { DxbcScalarType::Float32, 4 };\n outputReg.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(outputReg.type),\n spv::StorageClassPrivate),\n m_hs.outputPerPatch,\n 1, ®isterIndex);\n }\n \n auto sv = svMapping.sv;\n auto mask = svMapping.regMask;\n auto value = emitValueLoad(outputReg);\n \n switch (m_programInfo.type()) {\n case DxbcProgramType::VertexShader: emitVsSystemValueStore(sv, mask, value); break;\n case DxbcProgramType::GeometryShader: emitGsSystemValueStore(sv, mask, value); break;\n case DxbcProgramType::HullShader: emitHsSystemValueStore(sv, mask, value); break;\n case DxbcProgramType::DomainShader: emitDsSystemValueStore(sv, mask, value); break;\n case DxbcProgramType::PixelShader: emitPsSystemValueStore(sv, mask, value); break;\n default: break;\n }\n }\n }\n void emitOutputDepthClamp() {\n // HACK: Some drivers do not clamp FragDepth to [minDepth..maxDepth]\n // before writing to the depth attachment, but we do not have acccess\n // to those. Clamp to [0..1] instead.\n if (m_ps.builtinDepth) {\n DxbcRegisterPointer ptr;\n ptr.type = { DxbcScalarType::Float32, 1 };\n ptr.id = m_ps.builtinDepth;\n\n DxbcRegisterValue value = emitValueLoad(ptr);\n\n value.id = m_module.opNClamp(\n getVectorTypeId(ptr.type),\n value.id,\n m_module.constf32(0.0f),\n m_module.constf32(1.0f));\n \n emitValueStore(ptr, value,\n DxbcRegMask::firstN(1));\n }\n }\n void emitInitWorkgroupMemory() {\n bool hasTgsm = false;\n\n SpirvMemoryOperands memoryOperands;\n memoryOperands.flags = spv::MemoryAccessNonPrivatePointerMask;\n\n for (uint32_t i = 0; i < m_gRegs.size(); i++) {\n if (!m_gRegs[i].varId)\n continue;\n \n if (!m_cs.builtinLocalInvocationIndex) {\n m_cs.builtinLocalInvocationIndex = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInLocalInvocationIndex,\n \"vThreadIndexInGroup\");\n }\n\n uint32_t intTypeId = getScalarTypeId(DxbcScalarType::Uint32);\n uint32_t ptrTypeId = m_module.defPointerType(\n intTypeId, spv::StorageClassWorkgroup);\n\n uint32_t numElements = m_gRegs[i].type == DxbcResourceType::Structured\n ? m_gRegs[i].elementCount * m_gRegs[i].elementStride / 4\n : m_gRegs[i].elementCount / 4;\n \n uint32_t numThreads = m_cs.workgroupSizeX *\n m_cs.workgroupSizeY * m_cs.workgroupSizeZ;\n \n uint32_t numElementsPerThread = numElements / numThreads;\n uint32_t numElementsRemaining = numElements % numThreads;\n\n uint32_t threadId = m_module.opLoad(\n intTypeId, m_cs.builtinLocalInvocationIndex);\n uint32_t zeroId = m_module.constu32(0);\n\n for (uint32_t e = 0; e < numElementsPerThread; e++) {\n uint32_t ofsId = m_module.opIAdd(intTypeId, threadId,\n m_module.constu32(numThreads * e));\n \n uint32_t ptrId = m_module.opAccessChain(\n ptrTypeId, m_gRegs[i].varId, 1, &ofsId);\n\n m_module.opStore(ptrId, zeroId, memoryOperands);\n }\n\n if (numElementsRemaining) {\n uint32_t condition = m_module.opULessThan(\n m_module.defBoolType(), threadId,\n m_module.constu32(numElementsRemaining));\n \n DxbcConditional cond;\n cond.labelIf = m_module.allocateId();\n cond.labelEnd = m_module.allocateId();\n\n m_module.opSelectionMerge(cond.labelEnd, spv::SelectionControlMaskNone);\n m_module.opBranchConditional(condition, cond.labelIf, cond.labelEnd);\n\n m_module.opLabel(cond.labelIf);\n\n uint32_t ofsId = m_module.opIAdd(intTypeId, threadId,\n m_module.constu32(numThreads * numElementsPerThread));\n \n uint32_t ptrId = m_module.opAccessChain(\n ptrTypeId, m_gRegs[i].varId, 1, &ofsId);\n \n m_module.opStore(ptrId, zeroId, memoryOperands);\n\n m_module.opBranch(cond.labelEnd);\n m_module.opLabel (cond.labelEnd);\n }\n\n hasTgsm = true;\n }\n\n if (hasTgsm) {\n m_module.opControlBarrier(\n m_module.constu32(spv::ScopeWorkgroup),\n m_module.constu32(spv::ScopeWorkgroup),\n m_module.constu32(spv::MemorySemanticsWorkgroupMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask\n | spv::MemorySemanticsMakeAvailableMask\n | spv::MemorySemanticsMakeVisibleMask));\n }\n }\n DxbcRegisterValue emitVsSystemValueLoad(\n DxbcSystemValue sv,\n DxbcRegMask mask) {\n switch (sv) {\n case DxbcSystemValue::VertexId: {\n const uint32_t typeId = getScalarTypeId(DxbcScalarType::Uint32);\n \n if (m_vs.builtinVertexId == 0) {\n m_vs.builtinVertexId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInVertexIndex,\n \"vs_vertex_index\");\n }\n \n if (m_vs.builtinBaseVertex == 0) {\n m_vs.builtinBaseVertex = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInBaseVertex,\n \"vs_base_vertex\");\n }\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opISub(typeId,\n m_module.opLoad(typeId, m_vs.builtinVertexId),\n m_module.opLoad(typeId, m_vs.builtinBaseVertex));\n return result;\n } break;\n \n case DxbcSystemValue::InstanceId: {\n const uint32_t typeId = getScalarTypeId(DxbcScalarType::Uint32);\n \n if (m_vs.builtinInstanceId == 0) {\n m_vs.builtinInstanceId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInInstanceIndex,\n \"vs_instance_index\");\n }\n \n if (m_vs.builtinBaseInstance == 0) {\n m_vs.builtinBaseInstance = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInBaseInstance,\n \"vs_base_instance\");\n }\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opISub(typeId,\n m_module.opLoad(typeId, m_vs.builtinInstanceId),\n m_module.opLoad(typeId, m_vs.builtinBaseInstance));\n return result;\n } break;\n \n default:\n throw DxvkError(str::format(\n \"DxbcCompiler: Unhandled VS SV input: \", sv));\n }\n }\n DxbcRegisterValue emitGsSystemValueLoad(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n uint32_t vertexId) {\n switch (sv) {\n case DxbcSystemValue::Position: {\n uint32_t arrayIndex = m_module.consti32(vertexId);\n\n if (!m_positionIn) {\n m_positionIn = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 4, primitiveVertexCount(m_gs.inputPrimitive) },\n spv::StorageClassInput },\n spv::BuiltInPosition,\n \"in_position\");\n }\n\n DxbcRegisterPointer ptrIn;\n ptrIn.type.ctype = DxbcScalarType::Float32;\n ptrIn.type.ccount = 4;\n ptrIn.id = m_module.opAccessChain(\n m_module.defPointerType(getVectorTypeId(ptrIn.type), spv::StorageClassInput),\n m_positionIn, 1, &arrayIndex);\n \n return emitRegisterExtract(emitValueLoad(ptrIn), mask);\n } break;\n \n default:\n throw DxvkError(str::format(\n \"DxbcCompiler: Unhandled GS SV input: \", sv));\n }\n }\n DxbcRegisterValue emitPsSystemValueLoad(\n DxbcSystemValue sv,\n DxbcRegMask mask) {\n switch (sv) {\n case DxbcSystemValue::Position: {\n if (m_ps.builtinFragCoord == 0) {\n m_ps.builtinFragCoord = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 4, 0 },\n spv::StorageClassInput },\n spv::BuiltInFragCoord,\n \"ps_frag_coord\");\n }\n \n DxbcRegisterPointer ptrIn;\n ptrIn.type = { DxbcScalarType::Float32, 4 };\n ptrIn.id = m_ps.builtinFragCoord;\n \n // The X, Y and Z components of the SV_POSITION semantic\n // are identical to Vulkan's FragCoord builtin, but we\n // need to compute the reciprocal of the W component.\n DxbcRegisterValue fragCoord = emitValueLoad(ptrIn);\n \n uint32_t componentIndex = 3;\n uint32_t t_f32 = m_module.defFloatType(32);\n uint32_t v_wComp = m_module.opCompositeExtract(t_f32, fragCoord.id, 1, &componentIndex);\n v_wComp = m_module.opFDiv(t_f32, m_module.constf32(1.0f), v_wComp);\n \n fragCoord.id = m_module.opCompositeInsert(\n getVectorTypeId(fragCoord.type),\n v_wComp, fragCoord.id,\n 1, &componentIndex);\n \n return emitRegisterExtract(fragCoord, mask);\n } break;\n \n case DxbcSystemValue::IsFrontFace: {\n if (m_ps.builtinIsFrontFace == 0) {\n m_ps.builtinIsFrontFace = emitNewBuiltinVariable({\n { DxbcScalarType::Bool, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInFrontFacing,\n \"ps_is_front_face\");\n }\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opSelect(\n getVectorTypeId(result.type),\n m_module.opLoad(\n m_module.defBoolType(),\n m_ps.builtinIsFrontFace),\n m_module.constu32(0xFFFFFFFF),\n m_module.constu32(0x00000000));\n return result;\n } break;\n \n case DxbcSystemValue::PrimitiveId: {\n if (m_primitiveIdIn == 0) {\n m_module.enableCapability(spv::CapabilityGeometry);\n \n m_primitiveIdIn = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInPrimitiveId,\n \"ps_primitive_id\");\n }\n \n DxbcRegisterPointer ptrIn;\n ptrIn.type = { DxbcScalarType::Uint32, 1 };\n ptrIn.id = m_primitiveIdIn;\n \n return emitValueLoad(ptrIn);\n } break;\n \n case DxbcSystemValue::SampleIndex: {\n if (m_ps.builtinSampleId == 0) {\n m_module.enableCapability(spv::CapabilitySampleRateShading);\n \n m_ps.builtinSampleId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInSampleId,\n \"ps_sample_id\");\n }\n \n DxbcRegisterPointer ptrIn;\n ptrIn.type.ctype = DxbcScalarType::Uint32;\n ptrIn.type.ccount = 1;\n ptrIn.id = m_ps.builtinSampleId;\n \n return emitValueLoad(ptrIn);\n } break;\n \n case DxbcSystemValue::RenderTargetId: {\n if (m_ps.builtinLayer == 0) {\n m_module.enableCapability(spv::CapabilityGeometry);\n \n m_ps.builtinLayer = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInLayer,\n \"v_layer\");\n }\n \n DxbcRegisterPointer ptr;\n ptr.type.ctype = DxbcScalarType::Uint32;\n ptr.type.ccount = 1;\n ptr.id = m_ps.builtinLayer;\n \n return emitValueLoad(ptr);\n } break;\n \n case DxbcSystemValue::ViewportId: {\n if (m_ps.builtinViewportId == 0) {\n m_module.enableCapability(spv::CapabilityMultiViewport);\n \n m_ps.builtinViewportId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInViewportIndex,\n \"v_viewport\");\n }\n \n DxbcRegisterPointer ptr;\n ptr.type.ctype = DxbcScalarType::Uint32;\n ptr.type.ccount = 1;\n ptr.id = m_ps.builtinViewportId;\n \n return emitValueLoad(ptr);\n } break;\n \n default:\n throw DxvkError(str::format(\n \"DxbcCompiler: Unhandled PS SV input: \", sv));\n }\n }\n void emitVsSystemValueStore(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n const DxbcRegisterValue& value) {\n switch (sv) {\n case DxbcSystemValue::Position: {\n if (!m_positionOut) {\n m_positionOut = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 4, 0 },\n spv::StorageClassOutput },\n spv::BuiltInPosition,\n \"out_position\");\n }\n\n DxbcRegisterPointer ptr;\n ptr.type.ctype = DxbcScalarType::Float32;\n ptr.type.ccount = 4;\n ptr.id = m_positionOut;\n \n emitValueStore(ptr, value, mask);\n } break;\n \n case DxbcSystemValue::RenderTargetId: {\n if (m_programInfo.type() != DxbcProgramType::GeometryShader)\n m_module.enableCapability(spv::CapabilityShaderLayer);\n\n if (m_gs.builtinLayer == 0) {\n m_module.enableCapability(spv::CapabilityGeometry);\n\n m_gs.builtinLayer = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInLayer,\n \"o_layer\");\n }\n \n DxbcRegisterPointer ptr;\n ptr.type = { DxbcScalarType::Uint32, 1 };\n ptr.id = m_gs.builtinLayer;\n \n emitValueStore(\n ptr, emitRegisterExtract(value, mask),\n DxbcRegMask(true, false, false, false));\n } break;\n \n case DxbcSystemValue::ViewportId: {\n if (m_programInfo.type() != DxbcProgramType::GeometryShader)\n m_module.enableCapability(spv::CapabilityShaderViewportIndex);\n\n if (m_gs.builtinViewportId == 0) {\n m_module.enableCapability(spv::CapabilityMultiViewport);\n \n m_gs.builtinViewportId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInViewportIndex,\n \"o_viewport\");\n }\n \n DxbcRegisterPointer ptr;\n ptr.type = { DxbcScalarType::Uint32, 1};\n ptr.id = m_gs.builtinViewportId;\n \n emitValueStore(\n ptr, emitRegisterExtract(value, mask),\n DxbcRegMask(true, false, false, false));\n } break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled VS SV output: \", sv));\n }\n }\n void emitHsSystemValueStore(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n const DxbcRegisterValue& value) {\n if (sv >= DxbcSystemValue::FinalQuadUeq0EdgeTessFactor\n && sv <= DxbcSystemValue::FinalLineDensityTessFactor) {\n struct TessFactor {\n uint32_t array = 0;\n uint32_t index = 0;\n };\n \n static const std::array s_tessFactors = {{\n { m_hs.builtinTessLevelOuter, 0 }, // FinalQuadUeq0EdgeTessFactor\n { m_hs.builtinTessLevelOuter, 1 }, // FinalQuadVeq0EdgeTessFactor\n { m_hs.builtinTessLevelOuter, 2 }, // FinalQuadUeq1EdgeTessFactor\n { m_hs.builtinTessLevelOuter, 3 }, // FinalQuadVeq1EdgeTessFactor\n { m_hs.builtinTessLevelInner, 0 }, // FinalQuadUInsideTessFactor\n { m_hs.builtinTessLevelInner, 1 }, // FinalQuadVInsideTessFactor\n { m_hs.builtinTessLevelOuter, 0 }, // FinalTriUeq0EdgeTessFactor\n { m_hs.builtinTessLevelOuter, 1 }, // FinalTriVeq0EdgeTessFactor\n { m_hs.builtinTessLevelOuter, 2 }, // FinalTriWeq0EdgeTessFactor\n { m_hs.builtinTessLevelInner, 0 }, // FinalTriInsideTessFactor\n { m_hs.builtinTessLevelOuter, 0 }, // FinalLineDensityTessFactor\n { m_hs.builtinTessLevelOuter, 1 }, // FinalLineDetailTessFactor\n }};\n \n const TessFactor tessFactor = s_tessFactors.at(uint32_t(sv)\n - uint32_t(DxbcSystemValue::FinalQuadUeq0EdgeTessFactor));\n \n const uint32_t tessFactorArrayIndex\n = m_module.constu32(tessFactor.index);\n \n // Apply global tess factor limit\n float maxTessFactor = m_hs.maxTessFactor;\n\n if (m_moduleInfo.tess != nullptr) {\n if (m_moduleInfo.tess->maxTessFactor < maxTessFactor)\n maxTessFactor = m_moduleInfo.tess->maxTessFactor;\n }\n\n DxbcRegisterValue tessValue = emitRegisterExtract(value, mask);\n tessValue.id = m_module.opNClamp(getVectorTypeId(tessValue.type),\n tessValue.id, m_module.constf32(0.0f),\n m_module.constf32(maxTessFactor));\n \n DxbcRegisterPointer ptr;\n ptr.type.ctype = DxbcScalarType::Float32;\n ptr.type.ccount = 1;\n ptr.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(ptr.type),\n spv::StorageClassOutput),\n tessFactor.array, 1,\n &tessFactorArrayIndex);\n \n emitValueStore(ptr, tessValue,\n DxbcRegMask(true, false, false, false));\n } else {\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled HS SV output: \", sv));\n }\n }\n void emitDsSystemValueStore(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n const DxbcRegisterValue& value) {\n switch (sv) {\n case DxbcSystemValue::Position:\n case DxbcSystemValue::CullDistance:\n case DxbcSystemValue::ClipDistance:\n case DxbcSystemValue::RenderTargetId:\n case DxbcSystemValue::ViewportId:\n emitVsSystemValueStore(sv, mask, value);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled DS SV output: \", sv));\n }\n }\n void emitGsSystemValueStore(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n const DxbcRegisterValue& value) {\n switch (sv) {\n case DxbcSystemValue::Position:\n case DxbcSystemValue::CullDistance:\n case DxbcSystemValue::ClipDistance:\n case DxbcSystemValue::RenderTargetId:\n case DxbcSystemValue::ViewportId:\n emitVsSystemValueStore(sv, mask, value);\n break;\n \n case DxbcSystemValue::PrimitiveId: {\n if (m_primitiveIdOut == 0) {\n m_primitiveIdOut = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInPrimitiveId,\n \"gs_primitive_id\");\n }\n \n DxbcRegisterPointer ptr;\n ptr.type = { DxbcScalarType::Uint32, 1};\n ptr.id = m_primitiveIdOut;\n \n emitValueStore(\n ptr, emitRegisterExtract(value, mask),\n DxbcRegMask(true, false, false, false));\n } break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled GS SV output: \", sv));\n }\n }\n void emitPsSystemValueStore(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n const DxbcRegisterValue& value) {\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled PS SV output: \", sv));\n }\n void emitClipCullStore(\n DxbcSystemValue sv,\n uint32_t dstArray) {\n uint32_t offset = 0;\n \n if (dstArray == 0)\n return;\n \n for (auto e = m_osgn->begin(); e != m_osgn->end(); e++) {\n if (e->systemValue == sv) {\n DxbcRegisterPointer srcPtr = m_oRegs.at(e->registerId);\n DxbcRegisterValue srcValue = emitValueLoad(srcPtr);\n \n for (uint32_t i = 0; i < 4; i++) {\n if (e->componentMask[i]) {\n uint32_t offsetId = m_module.consti32(offset++);\n \n DxbcRegisterValue component = emitRegisterExtract(\n srcValue, DxbcRegMask::select(i));\n \n DxbcRegisterPointer dstPtr;\n dstPtr.type = { DxbcScalarType::Float32, 1 };\n dstPtr.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(dstPtr.type),\n spv::StorageClassOutput),\n dstArray, 1, &offsetId);\n \n emitValueStore(dstPtr, component,\n DxbcRegMask(true, false, false, false));\n }\n }\n }\n }\n }\n void emitClipCullLoad(\n DxbcSystemValue sv,\n uint32_t srcArray) {\n uint32_t offset = 0;\n \n if (srcArray == 0)\n return;\n \n for (auto e = m_isgn->begin(); e != m_isgn->end(); e++) {\n if (e->systemValue == sv) {\n // Load individual components from the source array\n uint32_t componentIndex = 0;\n std::array componentIds = {{ 0, 0, 0, 0 }};\n \n for (uint32_t i = 0; i < 4; i++) {\n if (e->componentMask[i]) {\n uint32_t offsetId = m_module.consti32(offset++);\n \n DxbcRegisterPointer srcPtr;\n srcPtr.type = { DxbcScalarType::Float32, 1 };\n srcPtr.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(srcPtr.type),\n spv::StorageClassInput),\n srcArray, 1, &offsetId);\n \n componentIds[componentIndex++]\n = emitValueLoad(srcPtr).id;\n }\n }\n \n // Put everything into one vector\n DxbcRegisterValue dstValue;\n dstValue.type = { DxbcScalarType::Float32, componentIndex };\n dstValue.id = componentIds[0];\n \n if (componentIndex > 1) {\n dstValue.id = m_module.opCompositeConstruct(\n getVectorTypeId(dstValue.type),\n componentIndex, componentIds.data());\n }\n \n // Store vector to the input array\n uint32_t registerId = m_module.consti32(e->registerId);\n \n DxbcRegisterPointer dstInput;\n dstInput.type = { DxbcScalarType::Float32, 4 };\n dstInput.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(dstInput.type),\n spv::StorageClassPrivate),\n m_vArray, 1, ®isterId);\n \n emitValueStore(dstInput, dstValue, e->componentMask);\n }\n }\n }\n void emitPointSizeStore() {\n if (m_moduleInfo.options.needsPointSizeExport) {\n uint32_t pointSizeId = emitNewBuiltinVariable(DxbcRegisterInfo {\n { DxbcScalarType::Float32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInPointSize,\n \"point_size\");\n\n m_module.opStore(pointSizeId, m_module.constf32(1.0f));\n }\n }\n void emitInit() {\n // Set up common capabilities for all shaders\n m_module.enableCapability(spv::CapabilityShader);\n m_module.enableCapability(spv::CapabilityImageQuery);\n \n // Initialize the shader module with capabilities\n // etc. Each shader type has its own peculiarities.\n switch (m_programInfo.type()) {\n case DxbcProgramType::VertexShader: emitVsInit(); break;\n case DxbcProgramType::HullShader: emitHsInit(); break;\n case DxbcProgramType::DomainShader: emitDsInit(); break;\n case DxbcProgramType::GeometryShader: emitGsInit(); break;\n case DxbcProgramType::PixelShader: emitPsInit(); break;\n case DxbcProgramType::ComputeShader: emitCsInit(); break;\n default: throw DxvkError(\"Invalid shader stage\");\n }\n }\n void emitFunctionBegin(\n uint32_t entryPoint,\n uint32_t returnType,\n uint32_t funcType) {\n this->emitFunctionEnd();\n \n m_module.functionBegin(\n returnType, entryPoint, funcType,\n spv::FunctionControlMaskNone);\n \n m_insideFunction = true;\n }\n void emitFunctionEnd() {\n if (m_insideFunction) {\n m_module.opReturn();\n m_module.functionEnd();\n }\n \n m_insideFunction = false;\n }\n void emitFunctionLabel() {\n m_module.opLabel(m_module.allocateId());\n }\n void emitMainFunctionBegin() {\n this->emitFunctionBegin(\n m_entryPointId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitVsInit() {\n m_module.enableCapability(spv::CapabilityClipDistance);\n m_module.enableCapability(spv::CapabilityCullDistance);\n m_module.enableCapability(spv::CapabilityDrawParameters);\n \n // Standard input array\n emitDclInputArray(0);\n \n // Cull/clip distances as outputs\n m_clipDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numClipPlanes,\n spv::BuiltInClipDistance,\n spv::StorageClassOutput);\n \n m_cullDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numCullPlanes,\n spv::BuiltInCullDistance,\n spv::StorageClassOutput);\n \n // Main function of the vertex shader\n m_vs.functionId = m_module.allocateId();\n m_module.setDebugName(m_vs.functionId, \"vs_main\");\n \n this->emitFunctionBegin(\n m_vs.functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitHsInit() {\n m_module.enableCapability(spv::CapabilityTessellation);\n m_module.enableCapability(spv::CapabilityClipDistance);\n m_module.enableCapability(spv::CapabilityCullDistance);\n \n m_hs.builtinInvocationId = emitNewBuiltinVariable(\n DxbcRegisterInfo {\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInInvocationId,\n \"vOutputControlPointId\");\n \n m_hs.builtinTessLevelOuter = emitBuiltinTessLevelOuter(spv::StorageClassOutput);\n m_hs.builtinTessLevelInner = emitBuiltinTessLevelInner(spv::StorageClassOutput);\n }\n void emitDsInit() {\n m_module.enableCapability(spv::CapabilityTessellation);\n m_module.enableCapability(spv::CapabilityClipDistance);\n m_module.enableCapability(spv::CapabilityCullDistance);\n \n m_ds.builtinTessLevelOuter = emitBuiltinTessLevelOuter(spv::StorageClassInput);\n m_ds.builtinTessLevelInner = emitBuiltinTessLevelInner(spv::StorageClassInput);\n \n // Cull/clip distances as outputs\n m_clipDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numClipPlanes,\n spv::BuiltInClipDistance,\n spv::StorageClassOutput);\n \n m_cullDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numCullPlanes,\n spv::BuiltInCullDistance,\n spv::StorageClassOutput);\n \n // Main function of the domain shader\n m_ds.functionId = m_module.allocateId();\n m_module.setDebugName(m_ds.functionId, \"ds_main\");\n \n this->emitFunctionBegin(\n m_ds.functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitGsInit() {\n m_module.enableCapability(spv::CapabilityGeometry);\n m_module.enableCapability(spv::CapabilityClipDistance);\n m_module.enableCapability(spv::CapabilityCullDistance);\n\n // Enable capabilities for xfb mode if necessary\n if (m_moduleInfo.xfb) {\n m_module.enableCapability(spv::CapabilityGeometryStreams);\n m_module.enableCapability(spv::CapabilityTransformFeedback);\n \n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeXfb);\n }\n\n // We only need outputs if rasterization is enabled\n m_gs.needsOutputSetup = !m_moduleInfo.xfb\n || m_moduleInfo.xfb->rasterizedStream >= 0;\n \n // Cull/clip distances as outputs\n m_clipDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numClipPlanes,\n spv::BuiltInClipDistance,\n spv::StorageClassOutput);\n \n m_cullDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numCullPlanes,\n spv::BuiltInCullDistance,\n spv::StorageClassOutput);\n \n // Emit Xfb variables if necessary\n if (m_moduleInfo.xfb)\n emitXfbOutputDeclarations();\n\n // Main function of the vertex shader\n m_gs.functionId = m_module.allocateId();\n m_module.setDebugName(m_gs.functionId, \"gs_main\");\n \n this->emitFunctionBegin(\n m_gs.functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitPsInit() {\n m_module.enableCapability(spv::CapabilityDerivativeControl);\n \n m_module.setExecutionMode(m_entryPointId,\n spv::ExecutionModeOriginUpperLeft);\n \n // Standard input array\n emitDclInputArray(0);\n \n // Cull/clip distances as inputs\n m_clipDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullIn.numClipPlanes,\n spv::BuiltInClipDistance,\n spv::StorageClassInput);\n \n m_cullDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullIn.numCullPlanes,\n spv::BuiltInCullDistance,\n spv::StorageClassInput);\n \n // Main function of the pixel shader\n m_ps.functionId = m_module.allocateId();\n m_module.setDebugName(m_ps.functionId, \"ps_main\");\n \n this->emitFunctionBegin(\n m_ps.functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitCsInit() {\n // Main function of the compute shader\n m_cs.functionId = m_module.allocateId();\n m_module.setDebugName(m_cs.functionId, \"cs_main\");\n \n this->emitFunctionBegin(\n m_cs.functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitVsFinalize() {\n this->emitMainFunctionBegin();\n this->emitInputSetup();\n m_module.opFunctionCall(\n m_module.defVoidType(),\n m_vs.functionId, 0, nullptr);\n this->emitOutputSetup();\n this->emitClipCullStore(DxbcSystemValue::ClipDistance, m_clipDistances);\n this->emitClipCullStore(DxbcSystemValue::CullDistance, m_cullDistances);\n this->emitPointSizeStore();\n this->emitFunctionEnd();\n }\n void emitHsFinalize() {\n if (m_hs.cpPhase.functionId == 0)\n m_hs.cpPhase = this->emitNewHullShaderPassthroughPhase();\n \n // Control point phase\n this->emitMainFunctionBegin();\n this->emitInputSetup(m_hs.vertexCountIn);\n this->emitHsControlPointPhase(m_hs.cpPhase);\n this->emitHsPhaseBarrier();\n \n // Fork-join phases and output setup\n this->emitHsInvocationBlockBegin(1);\n \n for (const auto& phase : m_hs.forkPhases)\n this->emitHsForkJoinPhase(phase);\n \n for (const auto& phase : m_hs.joinPhases)\n this->emitHsForkJoinPhase(phase);\n \n this->emitOutputSetup();\n this->emitHsOutputSetup();\n this->emitHsInvocationBlockEnd();\n this->emitFunctionEnd();\n }\n void emitDsFinalize() {\n this->emitMainFunctionBegin();\n m_module.opFunctionCall(\n m_module.defVoidType(),\n m_ds.functionId, 0, nullptr);\n this->emitOutputSetup();\n this->emitClipCullStore(DxbcSystemValue::ClipDistance, m_clipDistances);\n this->emitClipCullStore(DxbcSystemValue::CullDistance, m_cullDistances);\n this->emitFunctionEnd();\n }\n void emitGsFinalize() {\n if (!m_gs.invocationCount)\n m_module.setInvocations(m_entryPointId, 1);\n\n this->emitMainFunctionBegin();\n this->emitInputSetup(\n primitiveVertexCount(m_gs.inputPrimitive));\n m_module.opFunctionCall(\n m_module.defVoidType(),\n m_gs.functionId, 0, nullptr);\n // No output setup at this point as that was\n // already done during the EmitVertex step\n this->emitFunctionEnd();\n }\n void emitPsFinalize() {\n this->emitMainFunctionBegin();\n this->emitInputSetup();\n this->emitClipCullLoad(DxbcSystemValue::ClipDistance, m_clipDistances);\n this->emitClipCullLoad(DxbcSystemValue::CullDistance, m_cullDistances);\n\n if (m_hasRasterizerOrderedUav) {\n // For simplicity, just lock the entire fragment shader\n // if there are any rasterizer ordered views.\n m_module.enableExtension(\"SPV_EXT_fragment_shader_interlock\");\n\n if (m_module.hasCapability(spv::CapabilitySampleRateShading)\n && m_moduleInfo.options.enableSampleShadingInterlock) {\n m_module.enableCapability(spv::CapabilityFragmentShaderSampleInterlockEXT);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeSampleInterlockOrderedEXT);\n } else {\n m_module.enableCapability(spv::CapabilityFragmentShaderPixelInterlockEXT);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModePixelInterlockOrderedEXT);\n }\n\n m_module.opBeginInvocationInterlock();\n }\n\n m_module.opFunctionCall(\n m_module.defVoidType(),\n m_ps.functionId, 0, nullptr);\n\n if (m_hasRasterizerOrderedUav)\n m_module.opEndInvocationInterlock();\n\n this->emitOutputSetup();\n\n if (m_moduleInfo.options.useDepthClipWorkaround)\n this->emitOutputDepthClamp();\n \n this->emitFunctionEnd();\n }\n void emitCsFinalize() {\n this->emitMainFunctionBegin();\n\n if (m_moduleInfo.options.zeroInitWorkgroupMemory)\n this->emitInitWorkgroupMemory();\n\n m_module.opFunctionCall(\n m_module.defVoidType(),\n m_cs.functionId, 0, nullptr);\n \n this->emitFunctionEnd();\n }\n void emitXfbOutputDeclarations() {\n for (uint32_t i = 0; i < m_moduleInfo.xfb->entryCount; i++) {\n const DxbcXfbEntry* xfbEntry = m_moduleInfo.xfb->entries + i;\n const DxbcSgnEntry* sigEntry = m_osgn->find(\n xfbEntry->semanticName,\n xfbEntry->semanticIndex,\n xfbEntry->streamId);\n\n if (sigEntry == nullptr)\n continue;\n \n DxbcRegisterInfo varInfo;\n varInfo.type.ctype = DxbcScalarType::Float32;\n varInfo.type.ccount = xfbEntry->componentCount;\n varInfo.type.alength = 0;\n varInfo.sclass = spv::StorageClassOutput;\n \n uint32_t dstComponentMask = (1 << xfbEntry->componentCount) - 1;\n uint32_t srcComponentMask = dstComponentMask\n << sigEntry->componentMask.firstSet()\n << xfbEntry->componentIndex;\n \n DxbcXfbVar xfbVar;\n xfbVar.varId = emitNewVariable(varInfo);\n xfbVar.streamId = xfbEntry->streamId;\n xfbVar.outputId = sigEntry->registerId;\n xfbVar.srcMask = DxbcRegMask(srcComponentMask);\n xfbVar.dstMask = DxbcRegMask(dstComponentMask);\n m_xfbVars.push_back(xfbVar);\n\n m_module.setDebugName(xfbVar.varId,\n str::format(\"xfb\", i).c_str());\n \n m_module.decorateXfb(xfbVar.varId,\n xfbEntry->streamId, xfbEntry->bufferId, xfbEntry->offset,\n m_moduleInfo.xfb->strides[xfbEntry->bufferId]);\n }\n\n // TODO Compact location/component assignment\n for (uint32_t i = 0; i < m_xfbVars.size(); i++) {\n m_xfbVars[i].location = i;\n m_xfbVars[i].component = 0;\n }\n\n for (uint32_t i = 0; i < m_xfbVars.size(); i++) {\n const DxbcXfbVar* var = &m_xfbVars[i];\n\n m_module.decorateLocation (var->varId, var->location);\n m_module.decorateComponent(var->varId, var->component);\n }\n }\n void emitXfbOutputSetup(\n uint32_t streamId,\n bool passthrough) {\n for (size_t i = 0; i < m_xfbVars.size(); i++) {\n if (m_xfbVars[i].streamId == streamId) {\n DxbcRegisterPointer srcPtr = passthrough\n ? m_vRegs[m_xfbVars[i].outputId]\n : m_oRegs[m_xfbVars[i].outputId];\n\n if (passthrough) {\n srcPtr = emitArrayAccess(srcPtr,\n spv::StorageClassInput,\n m_module.constu32(0));\n }\n \n DxbcRegisterPointer dstPtr;\n dstPtr.type.ctype = DxbcScalarType::Float32;\n dstPtr.type.ccount = m_xfbVars[i].dstMask.popCount();\n dstPtr.id = m_xfbVars[i].varId;\n\n DxbcRegisterValue value = emitRegisterExtract(\n emitValueLoad(srcPtr), m_xfbVars[i].srcMask);\n emitValueStore(dstPtr, value, m_xfbVars[i].dstMask);\n }\n }\n }\n void emitHsControlPointPhase(\n const DxbcCompilerHsControlPointPhase& phase) {\n m_module.opFunctionCall(\n m_module.defVoidType(),\n phase.functionId, 0, nullptr);\n }\n void emitHsForkJoinPhase(\n const DxbcCompilerHsForkJoinPhase& phase) {\n for (uint32_t i = 0; i < phase.instanceCount; i++) {\n uint32_t invocationId = m_module.constu32(i);\n \n m_module.opFunctionCall(\n m_module.defVoidType(),\n phase.functionId, 1,\n &invocationId);\n }\n }\n void emitHsPhaseBarrier() {\n uint32_t exeScopeId = m_module.constu32(spv::ScopeWorkgroup);\n uint32_t memScopeId = m_module.constu32(spv::ScopeWorkgroup);\n uint32_t semanticId = m_module.constu32(\n spv::MemorySemanticsOutputMemoryMask |\n spv::MemorySemanticsAcquireReleaseMask |\n spv::MemorySemanticsMakeAvailableMask |\n spv::MemorySemanticsMakeVisibleMask);\n \n m_module.opControlBarrier(exeScopeId, memScopeId, semanticId);\n }\n void emitHsInvocationBlockBegin(\n uint32_t count) {\n uint32_t invocationId = m_module.opLoad(\n getScalarTypeId(DxbcScalarType::Uint32),\n m_hs.builtinInvocationId);\n \n uint32_t condition = m_module.opULessThan(\n m_module.defBoolType(), invocationId,\n m_module.constu32(count));\n \n m_hs.invocationBlockBegin = m_module.allocateId();\n m_hs.invocationBlockEnd = m_module.allocateId();\n \n m_module.opSelectionMerge(\n m_hs.invocationBlockEnd,\n spv::SelectionControlMaskNone);\n \n m_module.opBranchConditional(\n condition,\n m_hs.invocationBlockBegin,\n m_hs.invocationBlockEnd);\n \n m_module.opLabel(\n m_hs.invocationBlockBegin);\n }\n void emitHsInvocationBlockEnd() {\n m_module.opBranch (m_hs.invocationBlockEnd);\n m_module.opLabel (m_hs.invocationBlockEnd);\n \n m_hs.invocationBlockBegin = 0;\n m_hs.invocationBlockEnd = 0;\n }\n void emitHsOutputSetup() {\n uint32_t outputPerPatch = emitTessInterfacePerPatch(spv::StorageClassOutput);\n\n if (!outputPerPatch)\n return;\n\n uint32_t vecType = getVectorTypeId({ DxbcScalarType::Float32, 4 });\n\n uint32_t srcPtrType = m_module.defPointerType(vecType, spv::StorageClassPrivate);\n uint32_t dstPtrType = m_module.defPointerType(vecType, spv::StorageClassOutput);\n\n for (uint32_t i = 0; i < 32; i++) {\n if (m_hs.outputPerPatchMask & (1 << i)) {\n uint32_t index = m_module.constu32(i);\n\n uint32_t srcPtr = m_module.opAccessChain(srcPtrType, m_hs.outputPerPatch, 1, &index);\n uint32_t dstPtr = m_module.opAccessChain(dstPtrType, outputPerPatch, 1, &index);\n\n m_module.opStore(dstPtr, m_module.opLoad(vecType, srcPtr));\n }\n }\n }\n uint32_t emitTessInterfacePerPatch(\n spv::StorageClass storageClass) {\n const char* name = \"vPatch\";\n\n if (storageClass == spv::StorageClassPrivate)\n name = \"rPatch\";\n if (storageClass == spv::StorageClassOutput)\n name = \"oPatch\";\n \n uint32_t arrLen = m_psgn != nullptr ? m_psgn->maxRegisterCount() : 0;\n\n if (!arrLen)\n return 0;\n\n uint32_t vecType = m_module.defVectorType (m_module.defFloatType(32), 4);\n uint32_t arrType = m_module.defArrayType (vecType, m_module.constu32(arrLen));\n uint32_t ptrType = m_module.defPointerType(arrType, storageClass);\n uint32_t varId = m_module.newVar (ptrType, storageClass);\n \n m_module.setDebugName (varId, name);\n \n if (storageClass != spv::StorageClassPrivate) {\n m_module.decorate (varId, spv::DecorationPatch);\n m_module.decorateLocation (varId, 0);\n }\n\n return varId;\n }\n uint32_t emitTessInterfacePerVertex(\n spv::StorageClass storageClass,\n uint32_t vertexCount) {\n const bool isInput = storageClass == spv::StorageClassInput;\n \n uint32_t arrLen = isInput\n ? (m_isgn != nullptr ? m_isgn->maxRegisterCount() : 0)\n : (m_osgn != nullptr ? m_osgn->maxRegisterCount() : 0);\n \n if (!arrLen)\n return 0;\n \n uint32_t locIdx = m_psgn != nullptr\n ? m_psgn->maxRegisterCount()\n : 0;\n \n uint32_t vecType = m_module.defVectorType (m_module.defFloatType(32), 4);\n uint32_t arrTypeInner = m_module.defArrayType (vecType, m_module.constu32(arrLen));\n uint32_t arrTypeOuter = m_module.defArrayType (arrTypeInner, m_module.constu32(vertexCount));\n uint32_t ptrType = m_module.defPointerType(arrTypeOuter, storageClass);\n uint32_t varId = m_module.newVar (ptrType, storageClass);\n \n m_module.setDebugName (varId, isInput ? \"vVertex\" : \"oVertex\");\n m_module.decorateLocation (varId, locIdx);\n return varId;\n }\n void emitDclInputArray(\n uint32_t vertexCount) {\n DxbcVectorType info;\n info.ctype = DxbcScalarType::Float32;\n info.ccount = 4;\n\n // Define the array type. This will be two-dimensional\n // in some shaders, with the outer index representing\n // the vertex ID within an invocation.\n m_vArrayLength = m_isgn != nullptr ? std::max(1u, m_isgn->maxRegisterCount()) : 1;\n m_vArrayLengthId = m_module.lateConst32(getScalarTypeId(DxbcScalarType::Uint32));\n\n uint32_t vectorTypeId = getVectorTypeId(info);\n uint32_t arrayTypeId = m_module.defArrayType(vectorTypeId, m_vArrayLengthId);\n \n if (vertexCount != 0) {\n arrayTypeId = m_module.defArrayType(\n arrayTypeId, m_module.constu32(vertexCount));\n }\n \n // Define the actual variable. Note that this is private\n // because we will copy input registers and some system\n // variables to the array during the setup phase.\n const uint32_t ptrTypeId = m_module.defPointerType(\n arrayTypeId, spv::StorageClassPrivate);\n \n const uint32_t varId = m_module.newVar(\n ptrTypeId, spv::StorageClassPrivate);\n \n m_module.setDebugName(varId, \"shader_in\");\n m_vArray = varId;\n }\n uint32_t emitDclClipCullDistanceArray(\n uint32_t length,\n spv::BuiltIn builtIn,\n spv::StorageClass storageClass) {\n if (length == 0)\n return 0;\n \n uint32_t t_f32 = m_module.defFloatType(32);\n uint32_t t_arr = m_module.defArrayType(t_f32, m_module.constu32(length));\n uint32_t t_ptr = m_module.defPointerType(t_arr, storageClass);\n uint32_t varId = m_module.newVar(t_ptr, storageClass);\n \n m_module.decorateBuiltIn(varId, builtIn);\n m_module.setDebugName(varId,\n builtIn == spv::BuiltInClipDistance\n ? \"clip_distances\"\n : \"cull_distances\");\n \n return varId;\n }\n DxbcCompilerHsControlPointPhase emitNewHullShaderControlPointPhase() {\n uint32_t funTypeId = m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr);\n \n uint32_t funId = m_module.allocateId();\n \n this->emitFunctionBegin(funId,\n m_module.defVoidType(),\n funTypeId);\n this->emitFunctionLabel();\n \n DxbcCompilerHsControlPointPhase result;\n result.functionId = funId;\n return result;\n }\n DxbcCompilerHsControlPointPhase emitNewHullShaderPassthroughPhase() {\n uint32_t funTypeId = m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr);\n \n // Begin passthrough function\n uint32_t funId = m_module.allocateId();\n m_module.setDebugName(funId, \"hs_passthrough\");\n \n this->emitFunctionBegin(funId,\n m_module.defVoidType(),\n funTypeId);\n this->emitFunctionLabel();\n \n // We'll basically copy each input variable to the corresponding\n // output, using the shader's invocation ID as the array index.\n const uint32_t invocationId = m_module.opLoad(\n getScalarTypeId(DxbcScalarType::Uint32),\n m_hs.builtinInvocationId);\n \n for (auto i = m_isgn->begin(); i != m_isgn->end(); i++) {\n this->emitDclInput(\n i->registerId, m_hs.vertexCountIn,\n i->componentMask,\n DxbcSystemValue::None,\n DxbcInterpolationMode::Undefined);\n \n // Vector type index\n const std::array dstIndices\n = {{ invocationId, m_module.constu32(i->registerId) }};\n \n DxbcRegisterPointer srcPtr;\n srcPtr.type = m_vRegs.at(i->registerId).type;\n srcPtr.id = m_module.opAccessChain(\n m_module.defPointerType(getVectorTypeId(srcPtr.type), spv::StorageClassInput),\n m_vRegs.at(i->registerId).id, 1, &invocationId);\n \n DxbcRegisterValue srcValue = emitRegisterBitcast(\n emitValueLoad(srcPtr), DxbcScalarType::Float32);\n\n DxbcRegisterPointer dstPtr;\n dstPtr.type = { DxbcScalarType::Float32, 4 };\n dstPtr.id = m_module.opAccessChain(\n m_module.defPointerType(getVectorTypeId(dstPtr.type), spv::StorageClassOutput),\n m_hs.outputPerVertex, dstIndices.size(), dstIndices.data());\n\n emitValueStore(dstPtr, srcValue, DxbcRegMask::firstN(srcValue.type.ccount));\n }\n \n // End function\n this->emitFunctionEnd();\n \n DxbcCompilerHsControlPointPhase result;\n result.functionId = funId;\n return result;\n }\n DxbcCompilerHsForkJoinPhase emitNewHullShaderForkJoinPhase() {\n uint32_t argTypeId = m_module.defIntType(32, 0);\n uint32_t funTypeId = m_module.defFunctionType(\n m_module.defVoidType(), 1, &argTypeId);\n \n uint32_t funId = m_module.allocateId();\n \n this->emitFunctionBegin(funId,\n m_module.defVoidType(),\n funTypeId);\n \n uint32_t argId = m_module.functionParameter(argTypeId);\n this->emitFunctionLabel();\n \n DxbcCompilerHsForkJoinPhase result;\n result.functionId = funId;\n result.instanceId = argId;\n return result;\n }\n uint32_t emitSamplePosArray() {\n const std::array samplePosVectors = {{\n // Invalid sample count / unbound resource\n m_module.constvec2f32( 0.0f, 0.0f),\n // VK_SAMPLE_COUNT_1_BIT\n m_module.constvec2f32( 0.0f, 0.0f),\n // VK_SAMPLE_COUNT_2_BIT\n m_module.constvec2f32( 0.25f, 0.25f),\n m_module.constvec2f32(-0.25f,-0.25f),\n // VK_SAMPLE_COUNT_4_BIT\n m_module.constvec2f32(-0.125f,-0.375f),\n m_module.constvec2f32( 0.375f,-0.125f),\n m_module.constvec2f32(-0.375f, 0.125f),\n m_module.constvec2f32( 0.125f, 0.375f),\n // VK_SAMPLE_COUNT_8_BIT\n m_module.constvec2f32( 0.0625f,-0.1875f),\n m_module.constvec2f32(-0.0625f, 0.1875f),\n m_module.constvec2f32( 0.3125f, 0.0625f),\n m_module.constvec2f32(-0.1875f,-0.3125f),\n m_module.constvec2f32(-0.3125f, 0.3125f),\n m_module.constvec2f32(-0.4375f,-0.0625f),\n m_module.constvec2f32( 0.1875f, 0.4375f),\n m_module.constvec2f32( 0.4375f,-0.4375f),\n // VK_SAMPLE_COUNT_16_BIT\n m_module.constvec2f32( 0.0625f, 0.0625f),\n m_module.constvec2f32(-0.0625f,-0.1875f),\n m_module.constvec2f32(-0.1875f, 0.1250f),\n m_module.constvec2f32( 0.2500f,-0.0625f),\n m_module.constvec2f32(-0.3125f,-0.1250f),\n m_module.constvec2f32( 0.1250f, 0.3125f),\n m_module.constvec2f32( 0.3125f, 0.1875f),\n m_module.constvec2f32( 0.1875f,-0.3125f),\n m_module.constvec2f32(-0.1250f, 0.3750f),\n m_module.constvec2f32( 0.0000f,-0.4375f),\n m_module.constvec2f32(-0.2500f,-0.3750f),\n m_module.constvec2f32(-0.3750f, 0.2500f),\n m_module.constvec2f32(-0.5000f, 0.0000f),\n m_module.constvec2f32( 0.4375f,-0.2500f),\n m_module.constvec2f32( 0.3750f, 0.4375f),\n m_module.constvec2f32(-0.4375f,-0.5000f),\n }};\n \n uint32_t arrayTypeId = getArrayTypeId({\n DxbcScalarType::Float32, 2,\n static_cast(samplePosVectors.size()) });\n \n uint32_t samplePosArray = m_module.constComposite(\n arrayTypeId,\n samplePosVectors.size(),\n samplePosVectors.data());\n \n uint32_t varId = m_module.newVarInit(\n m_module.defPointerType(arrayTypeId, spv::StorageClassPrivate),\n spv::StorageClassPrivate, samplePosArray);\n \n m_module.setDebugName(varId, \"g_sample_pos\");\n m_module.decorate(varId, spv::DecorationNonWritable);\n return varId;\n }\n void emitFloatControl() {\n DxbcFloatControlFlags flags = m_moduleInfo.options.floatControl;\n\n if (flags.isClear())\n return;\n\n const uint32_t width32 = 32;\n const uint32_t width64 = 64;\n\n if (flags.test(DxbcFloatControlFlag::DenormFlushToZero32)) {\n m_module.enableCapability(spv::CapabilityDenormFlushToZero);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDenormFlushToZero, 1, &width32);\n }\n\n if (flags.test(DxbcFloatControlFlag::PreserveNan32)) {\n m_module.enableCapability(spv::CapabilitySignedZeroInfNanPreserve);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeSignedZeroInfNanPreserve, 1, &width32);\n }\n\n if (m_module.hasCapability(spv::CapabilityFloat64)) {\n if (flags.test(DxbcFloatControlFlag::DenormPreserve64)) {\n m_module.enableCapability(spv::CapabilityDenormPreserve);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDenormPreserve, 1, &width64);\n }\n\n if (flags.test(DxbcFloatControlFlag::PreserveNan64)) {\n m_module.enableCapability(spv::CapabilitySignedZeroInfNanPreserve);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeSignedZeroInfNanPreserve, 1, &width64);\n }\n }\n }\n uint32_t emitNewVariable(\n const DxbcRegisterInfo& info) {\n const uint32_t ptrTypeId = this->getPointerTypeId(info);\n return m_module.newVar(ptrTypeId, info.sclass);\n }\n uint32_t emitNewBuiltinVariable(\n const DxbcRegisterInfo& info,\n spv::BuiltIn builtIn,\n const char* name) {\n const uint32_t varId = emitNewVariable(info);\n \n if (name)\n m_module.setDebugName(varId, name);\n\n m_module.decorateBuiltIn(varId, builtIn);\n\n if (m_programInfo.type() == DxbcProgramType::PixelShader\n && info.type.ctype != DxbcScalarType::Float32\n && info.type.ctype != DxbcScalarType::Bool\n && info.sclass == spv::StorageClassInput)\n m_module.decorate(varId, spv::DecorationFlat);\n\n return varId;\n }\n uint32_t emitBuiltinTessLevelOuter(\n spv::StorageClass storageClass) {\n uint32_t id = emitNewBuiltinVariable(\n DxbcRegisterInfo {\n { DxbcScalarType::Float32, 0, 4 },\n storageClass },\n spv::BuiltInTessLevelOuter,\n \"bTessLevelOuter\");\n \n m_module.decorate(id, spv::DecorationPatch);\n return id;\n }\n uint32_t emitBuiltinTessLevelInner(\n spv::StorageClass storageClass) {\n uint32_t id = emitNewBuiltinVariable(\n DxbcRegisterInfo {\n { DxbcScalarType::Float32, 0, 2 },\n storageClass },\n spv::BuiltInTessLevelInner,\n \"bTessLevelInner\");\n \n m_module.decorate(id, spv::DecorationPatch);\n return id;\n }\n uint32_t emitPushConstants() {\n uint32_t uintTypeId = m_module.defIntType(32, 0);\n uint32_t structTypeId = m_module.defStructTypeUnique(1, &uintTypeId);\n\n m_module.setDebugName(structTypeId, \"pc_t\");\n m_module.setDebugMemberName(structTypeId, 0, \"RasterizerSampleCount\");\n m_module.memberDecorateOffset(structTypeId, 0, 0);\n\n uint32_t ptrTypeId = m_module.defPointerType(structTypeId, spv::StorageClassPushConstant);\n uint32_t varId = m_module.newVar(ptrTypeId, spv::StorageClassPushConstant);\n\n m_module.setDebugName(varId, \"pc\");\n return varId;\n }\n DxbcCfgBlock* cfgFindBlock(\n const std::initializer_list& types);\n DxbcBufferInfo getBufferInfo(\n const DxbcRegister& reg) {\n const uint32_t registerId = reg.idx[0].offset;\n \n switch (reg.type) {\n case DxbcOperandType::Resource: {\n const auto& texture = m_textures.at(registerId);\n\n DxbcBufferInfo result;\n result.image = texture.imageInfo;\n result.stype = texture.sampledType;\n result.type = texture.type;\n result.typeId = texture.imageTypeId;\n result.varId = texture.varId;\n result.stride = texture.structStride;\n result.coherence = 0;\n result.isSsbo = texture.isRawSsbo;\n return result;\n } break;\n \n case DxbcOperandType::UnorderedAccessView: {\n const auto& uav = m_uavs.at(registerId);\n\n DxbcBufferInfo result;\n result.image = uav.imageInfo;\n result.stype = uav.sampledType;\n result.type = uav.type;\n result.typeId = uav.imageTypeId;\n result.varId = uav.varId;\n result.stride = uav.structStride;\n result.coherence = uav.coherence;\n result.isSsbo = uav.isRawSsbo;\n return result;\n } break;\n \n case DxbcOperandType::ThreadGroupSharedMemory: {\n DxbcBufferInfo result;\n result.image = { spv::DimBuffer, 0, 0, 0 };\n result.stype = DxbcScalarType::Uint32;\n result.type = m_gRegs.at(registerId).type;\n result.typeId = m_module.defPointerType(\n getScalarTypeId(DxbcScalarType::Uint32),\n spv::StorageClassWorkgroup);\n result.varId = m_gRegs.at(registerId).varId;\n result.stride = m_gRegs.at(registerId).elementStride;\n result.coherence = spv::ScopeInvocation;\n result.isSsbo = false;\n return result;\n } break;\n \n default:\n throw DxvkError(str::format(\"DxbcCompiler: Invalid operand type for buffer: \", reg.type));\n }\n }\n uint32_t getTexSizeDim(\n const DxbcImageInfo& imageType) const {\n switch (imageType.dim) {\n case spv::DimBuffer: return 1 + imageType.array;\n case spv::Dim1D: return 1 + imageType.array;\n case spv::Dim2D: return 2 + imageType.array;\n case spv::Dim3D: return 3 + imageType.array;\n case spv::DimCube: return 2 + imageType.array;\n default: throw DxvkError(\"DxbcCompiler: getTexLayerDim: Unsupported image dimension\");\n }\n }\n uint32_t getTexLayerDim(\n const DxbcImageInfo& imageType) const {\n switch (imageType.dim) {\n case spv::DimBuffer: return 1;\n case spv::Dim1D: return 1;\n case spv::Dim2D: return 2;\n case spv::Dim3D: return 3;\n case spv::DimCube: return 3;\n default: throw DxvkError(\"DxbcCompiler: getTexLayerDim: Unsupported image dimension\");\n }\n }\n uint32_t getTexCoordDim(\n const DxbcImageInfo& imageType) const {\n return getTexLayerDim(imageType) + imageType.array;\n }\n DxbcRegMask getTexCoordMask(\n const DxbcImageInfo& imageType) const {\n return DxbcRegMask::firstN(getTexCoordDim(imageType));\n }\n DxbcVectorType getInputRegType(\n uint32_t regIdx) const {\n switch (m_programInfo.type()) {\n case DxbcProgramType::VertexShader: {\n const DxbcSgnEntry* entry = m_isgn->findByRegister(regIdx);\n \n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n \n if (entry != nullptr) {\n result.ctype = entry->componentType;\n result.ccount = entry->componentMask.popCount();\n }\n \n return result;\n }\n\n case DxbcProgramType::DomainShader: {\n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n return result;\n }\n\n default: {\n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n\n if (m_isgn == nullptr || !m_isgn->findByRegister(regIdx))\n return result;\n\n DxbcRegMask mask(0u);\n DxbcRegMask used(0u);\n\n for (const auto& e : *m_isgn) {\n if (e.registerId == regIdx && !ignoreInputSystemValue(e.systemValue)) {\n mask |= e.componentMask;\n used |= e.componentUsed;\n }\n }\n\n if (m_programInfo.type() == DxbcProgramType::PixelShader) {\n if ((used.raw() & mask.raw()) == used.raw())\n mask = used;\n }\n\n result.ccount = mask.minComponents();\n return result;\n }\n }\n }\n DxbcVectorType getOutputRegType(\n uint32_t regIdx) const {\n switch (m_programInfo.type()) {\n case DxbcProgramType::PixelShader: {\n const DxbcSgnEntry* entry = m_osgn->findByRegister(regIdx);\n\n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n \n if (entry != nullptr) {\n result.ctype = entry->componentType;\n result.ccount = entry->componentMask.popCount();\n }\n\n return result;\n }\n\n case DxbcProgramType::HullShader: {\n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n return result;\n }\n\n default: {\n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n\n if (m_osgn->findByRegister(regIdx))\n result.ccount = m_osgn->regMask(regIdx).minComponents();\n return result;\n }\n }\n }\n DxbcImageInfo getResourceType(\n DxbcResourceDim resourceType,\n bool isUav) const {\n uint32_t ms = m_moduleInfo.options.disableMsaa ? 0 : 1;\n\n switch (resourceType) {\n case DxbcResourceDim::Buffer: return { spv::DimBuffer, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_MAX_ENUM };\n case DxbcResourceDim::Texture1D: return { spv::Dim1D, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_1D };\n case DxbcResourceDim::Texture1DArr: return { spv::Dim1D, 1, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_1D_ARRAY };\n case DxbcResourceDim::Texture2D: return { spv::Dim2D, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_2D };\n case DxbcResourceDim::Texture2DArr: return { spv::Dim2D, 1, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_2D_ARRAY };\n case DxbcResourceDim::Texture2DMs: return { spv::Dim2D, 0, ms,isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_2D };\n case DxbcResourceDim::Texture2DMsArr: return { spv::Dim2D, 1, ms,isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_2D_ARRAY };\n case DxbcResourceDim::Texture3D: return { spv::Dim3D, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_3D };\n case DxbcResourceDim::TextureCube: return { spv::DimCube, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_CUBE };\n case DxbcResourceDim::TextureCubeArr: return { spv::DimCube, 1, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_CUBE_ARRAY };\n default: throw DxvkError(str::format(\"DxbcCompiler: Unsupported resource type: \", resourceType));\n }\n }\n spv::ImageFormat getScalarImageFormat(\n DxbcScalarType type) const {\n switch (type) {\n case DxbcScalarType::Float32: return spv::ImageFormatR32f;\n case DxbcScalarType::Sint32: return spv::ImageFormatR32i;\n case DxbcScalarType::Uint32: return spv::ImageFormatR32ui;\n default: throw DxvkError(\"DxbcCompiler: Unhandled scalar resource type\");\n }\n }\n bool isDoubleType(\n DxbcScalarType type) const {\n return type == DxbcScalarType::Sint64\n || type == DxbcScalarType::Uint64\n || type == DxbcScalarType::Float64;\n }\n DxbcRegisterPointer getIndexableTempPtr(\n const DxbcRegister& operand,\n DxbcRegisterValue vectorId) {\n // x# regs are indexed as follows:\n // (0) register index (immediate)\n // (1) element index (relative)\n const uint32_t regId = operand.idx[0].offset;\n \n DxbcRegisterInfo info;\n info.type.ctype = DxbcScalarType::Float32;\n info.type.ccount = m_xRegs[regId].ccount;\n info.type.alength = 0;\n info.sclass = spv::StorageClassPrivate;\n \n DxbcRegisterPointer result;\n result.type.ctype = info.type.ctype;\n result.type.ccount = info.type.ccount;\n result.id = m_module.opAccessChain(\n getPointerTypeId(info),\n m_xRegs.at(regId).varId,\n 1, &vectorId.id);\n\n return result;\n }\n bool caseBlockIsFallthrough() const {\n return m_lastOp != DxbcOpcode::Case\n && m_lastOp != DxbcOpcode::Default\n && m_lastOp != DxbcOpcode::Break\n && m_lastOp != DxbcOpcode::Ret;\n }\n uint32_t getUavCoherence(\n uint32_t registerId,\n DxbcUavFlags flags) {\n // For any ROV with write access, we must ensure that\n // availability operations happen within the locked scope.\n if (flags.test(DxbcUavFlag::RasterizerOrdered)\n && (m_analysis->uavInfos[registerId].accessFlags & VK_ACCESS_SHADER_WRITE_BIT)) {\n m_hasGloballyCoherentUav = true;\n m_hasRasterizerOrderedUav = true;\n return spv::ScopeQueueFamily;\n }\n\n // Ignore any resources that can't both be read and written in\n // the current shader, explicit availability/visibility operands\n // are not useful in that case.\n if (m_analysis->uavInfos[registerId].accessFlags != (VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT))\n return 0;\n\n // If the globally coherent flag is set, the resource must be\n // coherent across multiple workgroups of the same dispatch\n if (flags.test(DxbcUavFlag::GloballyCoherent)) {\n m_hasGloballyCoherentUav = true;\n return spv::ScopeQueueFamily;\n }\n\n // In compute shaders, UAVs are implicitly workgroup coherent,\n // but we can rely on memory barrier instructions to make any\n // access available and visible to the entire workgroup.\n if (m_programInfo.type() == DxbcProgramType::ComputeShader)\n return spv::ScopeInvocation;\n\n return 0;\n }\n bool ignoreInputSystemValue(\n DxbcSystemValue sv) const {\n switch (sv) {\n case DxbcSystemValue::Position:\n case DxbcSystemValue::IsFrontFace:\n case DxbcSystemValue::SampleIndex:\n case DxbcSystemValue::PrimitiveId:\n case DxbcSystemValue::Coverage:\n return m_programInfo.type() == DxbcProgramType::PixelShader;\n\n default:\n return false;\n }\n }\n void emitUavBarrier(\n uint64_t readMask,\n uint64_t writeMask) {\n if (!m_moduleInfo.options.forceComputeUavBarriers\n || m_programInfo.type() != DxbcProgramType::ComputeShader)\n return;\n\n // If both masks are 0, emit a barrier in case at least one read-write UAV\n // has a pending unsynchronized access. Only consider read-after-write and\n // write-after-read hazards, assume that back-to-back stores are safe and\n // do not overlap in memory. Atomics are also completely ignored here.\n uint64_t rdMask = m_uavRdMask;\n uint64_t wrMask = m_uavWrMask;\n\n bool insertBarrier = bool(rdMask & wrMask);\n\n if (readMask || writeMask) {\n rdMask &= m_uavWrMask;\n wrMask &= m_uavRdMask;\n }\n\n for (auto uav : bit::BitMask(rdMask | wrMask)) {\n constexpr VkAccessFlags rwAccess = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;\n insertBarrier |= (m_analysis->uavInfos[uav].accessFlags & rwAccess) == rwAccess;\n }\n\n // Need to be in uniform top-level control flow, or otherwise\n // it is not safe to insert control barriers.\n if (insertBarrier && m_controlFlowBlocks.empty() && m_topLevelIsUniform) {\n m_module.opControlBarrier(\n m_module.constu32(spv::ScopeWorkgroup),\n m_module.constu32(m_hasGloballyCoherentUav ? spv::ScopeQueueFamily : spv::ScopeWorkgroup),\n m_module.constu32(spv::MemorySemanticsWorkgroupMemoryMask\n | spv::MemorySemanticsImageMemoryMask\n | spv::MemorySemanticsUniformMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask\n | spv::MemorySemanticsMakeAvailableMask\n | spv::MemorySemanticsMakeVisibleMask));\n\n m_uavWrMask = 0u;\n m_uavRdMask = 0u;\n }\n\n // Mark pending accesses\n m_uavWrMask |= writeMask;\n m_uavRdMask |= readMask;\n }\n uint32_t getScalarTypeId(\n DxbcScalarType type) {\n if (type == DxbcScalarType::Float64)\n m_module.enableCapability(spv::CapabilityFloat64);\n \n if (type == DxbcScalarType::Sint64 || type == DxbcScalarType::Uint64)\n m_module.enableCapability(spv::CapabilityInt64);\n \n switch (type) {\n case DxbcScalarType::Uint32: return m_module.defIntType(32, 0);\n case DxbcScalarType::Uint64: return m_module.defIntType(64, 0);\n case DxbcScalarType::Sint32: return m_module.defIntType(32, 1);\n case DxbcScalarType::Sint64: return m_module.defIntType(64, 1);\n case DxbcScalarType::Float32: return m_module.defFloatType(32);\n case DxbcScalarType::Float64: return m_module.defFloatType(64);\n case DxbcScalarType::Bool: return m_module.defBoolType();\n }\n\n throw DxvkError(\"DxbcCompiler: Invalid scalar type\");\n }\n uint32_t getVectorTypeId(\n const DxbcVectorType& type) {\n uint32_t typeId = this->getScalarTypeId(type.ctype);\n \n if (type.ccount > 1)\n typeId = m_module.defVectorType(typeId, type.ccount);\n \n return typeId;\n }\n uint32_t getArrayTypeId(\n const DxbcArrayType& type) {\n DxbcVectorType vtype;\n vtype.ctype = type.ctype;\n vtype.ccount = type.ccount;\n \n uint32_t typeId = this->getVectorTypeId(vtype);\n \n if (type.alength != 0) {\n typeId = m_module.defArrayType(typeId,\n m_module.constu32(type.alength));\n }\n \n return typeId;\n }\n uint32_t getPointerTypeId(\n const DxbcRegisterInfo& type) {\n return m_module.defPointerType(\n this->getArrayTypeId(type.type),\n type.sclass);\n }\n uint32_t getSparseResultTypeId(\n uint32_t baseType) {\n m_module.enableCapability(spv::CapabilitySparseResidency);\n\n uint32_t uintType = getScalarTypeId(DxbcScalarType::Uint32);\n std::array typeIds = { uintType, baseType };\n return m_module.defStructType(typeIds.size(), typeIds.data());\n }\n uint32_t getFunctionId(\n uint32_t functionNr) {\n auto entry = m_subroutines.find(functionNr);\n if (entry != m_subroutines.end())\n return entry->second;\n \n uint32_t functionId = m_module.allocateId();\n m_subroutines.insert({ functionNr, functionId });\n return functionId;\n }\n DxbcCompilerHsForkJoinPhase* getCurrentHsForkJoinPhase();\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_analysis.h", "class DxbcAnalyzer {\n public:\n DxbcAnalyzer(\n const DxbcModuleInfo& moduleInfo,\n const DxbcProgramInfo& programInfo,\n const Rc& isgn,\n const Rc& osgn,\n const Rc& psgn,\n DxbcAnalysisInfo& analysis) {\n // Get number of clipping and culling planes from the\n // input and output signatures. We will need this to\n // declare the shader input and output interfaces.\n m_analysis->clipCullIn = getClipCullInfo(m_isgn);\n m_analysis->clipCullOut = getClipCullInfo(m_osgn);\n }\n ~DxbcAnalyzer() {\n \n }\n void processInstruction(\n const DxbcShaderInstruction& ins) {\n switch (ins.opClass) {\n case DxbcInstClass::Atomic: {\n const uint32_t operandId = ins.dstCount - 1;\n\n if (ins.dst[operandId].type == DxbcOperandType::UnorderedAccessView) {\n const uint32_t registerId = ins.dst[operandId].idx[0].offset;\n m_analysis->uavInfos[registerId].accessAtomicOp = true;\n m_analysis->uavInfos[registerId].accessFlags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;\n\n // Check whether the atomic operation is order-invariant\n DxvkAccessOp op = DxvkAccessOp::None;\n\n switch (ins.op) {\n case DxbcOpcode::AtomicAnd: op = DxvkAccessOp::And; break;\n case DxbcOpcode::AtomicOr: op = DxvkAccessOp::Or; break;\n case DxbcOpcode::AtomicXor: op = DxvkAccessOp::Xor; break;\n case DxbcOpcode::AtomicIAdd: op = DxvkAccessOp::Add; break;\n case DxbcOpcode::AtomicIMax: op = DxvkAccessOp::IMax; break;\n case DxbcOpcode::AtomicIMin: op = DxvkAccessOp::IMin; break;\n case DxbcOpcode::AtomicUMax: op = DxvkAccessOp::UMax; break;\n case DxbcOpcode::AtomicUMin: op = DxvkAccessOp::UMin; break;\n default: break;\n }\n\n setUavAccessOp(registerId, op);\n }\n } break;\n\n case DxbcInstClass::TextureSample:\n case DxbcInstClass::TextureGather:\n case DxbcInstClass::TextureQueryLod:\n case DxbcInstClass::VectorDeriv: {\n m_analysis->usesDerivatives = true;\n } break;\n\n case DxbcInstClass::ControlFlow: {\n if (ins.op == DxbcOpcode::Discard)\n m_analysis->usesKill = true;\n } break;\n\n case DxbcInstClass::BufferLoad: {\n uint32_t operandId = ins.op == DxbcOpcode::LdStructured ? 2 : 1;\n bool sparseFeedback = ins.dstCount == 2;\n\n if (ins.src[operandId].type == DxbcOperandType::UnorderedAccessView) {\n const uint32_t registerId = ins.src[operandId].idx[0].offset;\n m_analysis->uavInfos[registerId].accessFlags |= VK_ACCESS_SHADER_READ_BIT;\n m_analysis->uavInfos[registerId].sparseFeedback |= sparseFeedback;\n\n setUavAccessOp(registerId, DxvkAccessOp::None);\n } else if (ins.src[operandId].type == DxbcOperandType::Resource) {\n const uint32_t registerId = ins.src[operandId].idx[0].offset;\n m_analysis->srvInfos[registerId].sparseFeedback |= sparseFeedback;\n }\n } break;\n\n case DxbcInstClass::BufferStore: {\n if (ins.dst[0].type == DxbcOperandType::UnorderedAccessView) {\n const uint32_t registerId = ins.dst[0].idx[0].offset;\n m_analysis->uavInfos[registerId].accessFlags |= VK_ACCESS_SHADER_WRITE_BIT;\n\n setUavAccessOp(registerId, getStoreAccessOp(ins.dst[0].mask, ins.src[ins.srcCount - 1u]));\n }\n } break;\n\n case DxbcInstClass::TypedUavLoad: {\n const uint32_t registerId = ins.src[1].idx[0].offset;\n m_analysis->uavInfos[registerId].accessTypedLoad = true;\n m_analysis->uavInfos[registerId].accessFlags |= VK_ACCESS_SHADER_READ_BIT;\n\n setUavAccessOp(registerId, DxvkAccessOp::None);\n } break;\n\n case DxbcInstClass::TypedUavStore: {\n const uint32_t registerId = ins.dst[0].idx[0].offset;\n m_analysis->uavInfos[registerId].accessFlags |= VK_ACCESS_SHADER_WRITE_BIT;\n\n // The UAV format may change between dispatches, so be conservative here\n // and only allow this optimization when the app is writing zeroes.\n DxvkAccessOp storeOp = getStoreAccessOp(DxbcRegMask(0xf), ins.src[1u]);\n\n if (storeOp != DxvkAccessOp(DxvkAccessOp::OpType::StoreUi, 0u))\n storeOp = DxvkAccessOp::None;\n\n setUavAccessOp(registerId, storeOp);\n } break;\n\n case DxbcInstClass::Declaration: {\n switch (ins.op) {\n case DxbcOpcode::DclConstantBuffer: {\n uint32_t registerId = ins.dst[0].idx[0].offset;\n\n if (registerId < DxbcConstBufBindingCount)\n m_analysis->bindings.cbvMask |= 1u << registerId;\n } break;\n\n case DxbcOpcode::DclSampler: {\n uint32_t registerId = ins.dst[0].idx[0].offset;\n\n if (registerId < DxbcSamplerBindingCount)\n m_analysis->bindings.samplerMask |= 1u << registerId;\n } break;\n\n case DxbcOpcode::DclResource:\n case DxbcOpcode::DclResourceRaw:\n case DxbcOpcode::DclResourceStructured: {\n uint32_t registerId = ins.dst[0].idx[0].offset;\n\n uint32_t idx = registerId / 64u;\n uint32_t bit = registerId % 64u;\n\n if (registerId < DxbcResourceBindingCount)\n m_analysis->bindings.srvMask[idx] |= uint64_t(1u) << bit;\n } break;\n\n case DxbcOpcode::DclUavTyped:\n case DxbcOpcode::DclUavRaw:\n case DxbcOpcode::DclUavStructured: {\n uint32_t registerId = ins.dst[0].idx[0].offset;\n\n if (registerId < DxbcUavBindingCount)\n m_analysis->bindings.uavMask |= uint64_t(1u) << registerId;\n } break;\n\n default: ;\n }\n } break;\n\n default:\n break;\n }\n\n for (uint32_t i = 0; i < ins.dstCount; i++) {\n if (ins.dst[i].type == DxbcOperandType::IndexableTemp) {\n uint32_t index = ins.dst[i].idx[0].offset;\n m_analysis->xRegMasks[index] |= ins.dst[i].mask;\n }\n }\n }\n private:\n Rc m_isgn;\n Rc m_osgn;\n Rc m_psgn;\n DxbcAnalysisInfo* m_analysis = nullptr;\n DxbcClipCullInfo getClipCullInfo(\n const Rc& sgn) const {\n DxbcClipCullInfo result;\n \n if (sgn != nullptr) {\n for (auto e = sgn->begin(); e != sgn->end(); e++) {\n const uint32_t componentCount = e->componentMask.popCount();\n \n if (e->systemValue == DxbcSystemValue::ClipDistance)\n result.numClipPlanes += componentCount;\n if (e->systemValue == DxbcSystemValue::CullDistance)\n result.numCullPlanes += componentCount;\n }\n }\n \n return result;\n }\n void setUavAccessOp(uint32_t uav, DxvkAccessOp op) {\n if (m_analysis->uavInfos[uav].accessOp == DxvkAccessOp::None)\n m_analysis->uavInfos[uav].accessOp = op;\n\n // Maintain ordering if the UAV is accessed via other operations as well\n if (op == DxvkAccessOp::None || m_analysis->uavInfos[uav].accessOp != op)\n m_analysis->uavInfos[uav].nonInvariantAccess = true;\n }\n static DxvkAccessOp getStoreAccessOp(DxbcRegMask writeMask, const DxbcRegister& src) {\n if (src.type != DxbcOperandType::Imm32)\n return DxvkAccessOp::None;\n\n // Trivial case, same value is written to all components\n if (src.componentCount == DxbcComponentCount::Component1)\n return getConstantStoreOp(src.imm.u32_1);\n\n if (src.componentCount != DxbcComponentCount::Component4)\n return DxvkAccessOp::None;\n\n // Otherwise, make sure that all written components are equal\n DxvkAccessOp op = DxvkAccessOp::None;\n\n for (uint32_t i = 0u; i < 4u; i++) {\n if (!writeMask[i])\n continue;\n\n // If the written value can't be represented, skip\n DxvkAccessOp scalarOp = getConstantStoreOp(src.imm.u32_4[i]);\n\n if (scalarOp == DxvkAccessOp::None)\n return DxvkAccessOp::None;\n\n // First component written\n if (op == DxvkAccessOp::None)\n op = scalarOp;\n\n // Conflicting store ops\n if (op != scalarOp)\n return DxvkAccessOp::None;\n }\n\n return op;\n }\n static DxvkAccessOp getConstantStoreOp(uint32_t value) {\n constexpr uint32_t mask = 0xfffu;\n\n uint32_t ubits = value & mask;\n uint32_t fbits = (value >> 20u);\n\n if (value == ubits)\n return DxvkAccessOp(DxvkAccessOp::OpType::StoreUi, ubits);\n\n if (value == (ubits | ~mask))\n return DxvkAccessOp(DxvkAccessOp::OpType::StoreSi, ubits);\n\n if (value == (fbits << 20u))\n return DxvkAccessOp(DxvkAccessOp::OpType::StoreF, fbits);\n\n return DxvkAccessOp::None;\n }\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_chunk_isgn.h", "class DxbcIsgn {\n public:\n DxbcIsgn(DxbcReader reader, DxbcTag tag) {\n uint32_t elementCount = reader.readu32();\n reader.skip(sizeof(uint32_t));\n \n std::array componentTypes = {\n DxbcScalarType::Uint32, DxbcScalarType::Uint32,\n DxbcScalarType::Sint32, DxbcScalarType::Float32,\n };\n\n // https://github.com/DarkStarSword/3d-fixes/blob/master/dx11shaderanalyse.py#L101\n bool hasStream = (tag == \"ISG1\") || (tag == \"OSG1\") || (tag == \"PSG1\") || (tag == \"OSG5\");\n bool hasPrecision = (tag == \"ISG1\") || (tag == \"OSG1\") || (tag == \"PSG1\");\n \n for (uint32_t i = 0; i < elementCount; i++) {\n DxbcSgnEntry entry;\n entry.streamId = hasStream ? reader.readu32() : 0;\n entry.semanticName = reader.clone(reader.readu32()).readString();\n entry.semanticIndex = reader.readu32();\n entry.systemValue = static_cast(reader.readu32());\n entry.componentType = componentTypes.at(reader.readu32());\n entry.registerId = reader.readu32();\n\n uint32_t mask = reader.readu32();\n\n entry.componentMask = bit::extract(mask, 0, 3);\n entry.componentUsed = bit::extract(mask, 8, 11);\n\n if (hasPrecision)\n reader.readu32();\n\n m_entries.push_back(entry);\n }\n }\n ~DxbcIsgn() {\n \n }\n const DxbcSgnEntry* findByRegister(\n uint32_t registerId) const;\n const DxbcSgnEntry* find(\n const std::string& semanticName,\n uint32_t semanticIndex,\n uint32_t streamIndex) const;\n DxbcRegMask regMask(\n uint32_t registerId) const {\n DxbcRegMask mask;\n\n for (auto e = this->begin(); e != this->end(); e++) {\n if (e->registerId == registerId)\n mask |= e->componentMask;\n }\n\n return mask;\n }\n uint32_t maxRegisterCount() const {\n uint32_t result = 0;\n for (auto e = this->begin(); e != this->end(); e++)\n result = std::max(result, e->registerId + 1);\n return result;\n }\n static bool compareSemanticNames(\n const std::string& a,\n const std::string& b) {\n if (a.size() != b.size())\n return false;\n \n for (size_t i = 0; i < a.size(); i++) {\n char ac = a[i];\n char bc = b[i];\n\n if (ac != bc) {\n if (ac >= 'A' && ac <= 'Z') ac += 'a' - 'A';\n if (bc >= 'A' && bc <= 'Z') bc += 'a' - 'A';\n\n if (ac != bc)\n return false;\n }\n }\n \n return true;\n }\n private:\n std::vector m_entries;\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_module.h", "class DxbcAnalyzer {\n public:\n DxbcModule(DxbcReader& reader) {\n for (uint32_t i = 0; i < m_header.numChunks(); i++) {\n \n // The chunk tag is stored at the beginning of each chunk\n auto chunkReader = reader.clone(m_header.chunkOffset(i));\n auto tag = chunkReader.readTag();\n \n // The chunk size follows right after the four-character\n // code. This does not include the eight bytes that are\n // consumed by the FourCC and chunk length entry.\n auto chunkLength = chunkReader.readu32();\n \n chunkReader = chunkReader.clone(8);\n chunkReader = chunkReader.resize(chunkLength);\n \n if ((tag == \"SHDR\") || (tag == \"SHEX\"))\n m_shexChunk = new DxbcShex(chunkReader);\n \n if ((tag == \"ISGN\") || (tag == \"ISG1\"))\n m_isgnChunk = new DxbcIsgn(chunkReader, tag);\n \n if ((tag == \"OSGN\") || (tag == \"OSG5\") || (tag == \"OSG1\"))\n m_osgnChunk = new DxbcIsgn(chunkReader, tag);\n \n if ((tag == \"PCSG\") || (tag == \"PSG1\"))\n m_psgnChunk = new DxbcIsgn(chunkReader, tag);\n }\n }\n ~DxbcModule() {\n \n }\n SpirvCodeBuffer compile(\n const DxbcModuleInfo& moduleInfo,\n const std::string& fileName) {\n if (m_shexChunk == nullptr)\n throw DxvkError(\"DxbcModule::compile: No SHDR/SHEX chunk\");\n \n DxbcAnalysisInfo analysisInfo;\n \n DxbcAnalyzer analyzer(moduleInfo,\n m_shexChunk->programInfo(),\n m_isgnChunk, m_osgnChunk,\n m_psgnChunk, analysisInfo);\n \n this->runAnalyzer(analyzer, m_shexChunk->slice());\n\n m_bindings = std::make_optional(analysisInfo.bindings);\n \n DxbcCompiler compiler(\n fileName, moduleInfo,\n m_shexChunk->programInfo(),\n m_isgnChunk, m_osgnChunk,\n m_psgnChunk, analysisInfo);\n \n this->runCompiler(compiler, m_shexChunk->slice());\n\n m_icb = compiler.getIcbData();\n\n return compiler.finalize();\n }\n SpirvCodeBuffer compilePassthroughShader(\n const DxbcModuleInfo& moduleInfo,\n const std::string& fileName) const {\n if (m_shexChunk == nullptr)\n throw DxvkError(\"DxbcModule::compile: No SHDR/SHEX chunk\");\n \n DxbcAnalysisInfo analysisInfo;\n\n DxbcCompiler compiler(\n fileName, moduleInfo,\n DxbcProgramType::GeometryShader,\n m_osgnChunk, m_osgnChunk,\n m_psgnChunk, analysisInfo);\n \n compiler.processXfbPassthrough();\n return compiler.finalize();\n }\n private:\n DxbcHeader m_header;\n Rc m_isgnChunk;\n Rc m_osgnChunk;\n Rc m_psgnChunk;\n Rc m_shexChunk;\n std::vector m_icb;\n std::optional m_bindings;\n void runAnalyzer(\n DxbcAnalyzer& analyzer,\n DxbcCodeSlice slice) const {\n DxbcDecodeContext decoder;\n \n while (!slice.atEnd()) {\n decoder.decodeInstruction(slice);\n \n analyzer.processInstruction(\n decoder.getInstruction());\n }\n }\n void runCompiler(\n DxbcCompiler& compiler,\n DxbcCodeSlice slice) const {\n DxbcDecodeContext decoder;\n \n while (!slice.atEnd()) {\n decoder.decodeInstruction(slice);\n \n compiler.processInstruction(\n decoder.getInstruction());\n }\n }\n};"], ["/lsfg-vk/thirdparty/dxbc/include/spirv/spirv_module.h", "class generates {\n public:\n explicit SpirvModule(uint32_t version) {\n this->instImportGlsl450();\n }\n ~SpirvModule() {\n \n }\n SpirvCodeBuffer compile() {\n SpirvCodeBuffer result;\n result.putHeader(m_version, m_id);\n result.append(m_capabilities);\n result.append(m_extensions);\n result.append(m_instExt);\n result.append(m_memoryModel);\n result.append(m_entryPoints);\n result.append(m_execModeInfo);\n result.append(m_debugNames);\n result.append(m_annotations);\n result.append(m_typeConstDefs);\n result.append(m_variables);\n\n // Perform some crude dead code elimination. In some cases, our compilers\n // may emit invalid code, such as an unreachable block branching to a loop's\n // continue block, but those cases cannot be reasonably detected up-front.\n std::unordered_set reachableBlocks;\n std::unordered_set mergeBlocks;\n\n classifyBlocks(reachableBlocks, mergeBlocks);\n\n bool reachable = true;\n\n for (auto ins : m_code) {\n if (ins.opCode() == spv::OpFunctionEnd) {\n reachable = true;\n result.append(ins);\n } else if (ins.opCode() == spv::OpLabel) {\n uint32_t labelId = ins.arg(1);\n\n if ((reachable = reachableBlocks.find(labelId) != reachableBlocks.end())) {\n result.append(ins);\n } else if (mergeBlocks.find(labelId) != mergeBlocks.end()) {\n result.append(ins);\n result.putIns(spv::OpUnreachable, 1);\n }\n } else if (reachable) {\n result.append(ins);\n }\n }\n\n return result;\n }\n uint32_t allocateId() {\n return m_id++;\n }\n bool hasCapability(\n spv::Capability capability) {\n for (auto ins : m_capabilities) {\n if (ins.opCode() == spv::OpCapability && ins.arg(1) == capability)\n return true;\n }\n\n return false;\n }\n void enableCapability(\n spv::Capability capability) {\n // Scan the generated instructions to check\n // whether we already enabled the capability.\n if (!hasCapability(capability)) {\n m_capabilities.putIns (spv::OpCapability, 2);\n m_capabilities.putWord(capability);\n }\n }\n void enableExtension(\n const char* extensionName) {\n m_extensions.putIns (spv::OpExtension, 1 + m_extensions.strLen(extensionName));\n m_extensions.putStr (extensionName);\n }\n void addEntryPoint(\n uint32_t entryPointId,\n spv::ExecutionModel executionModel,\n const char* name) {\n m_entryPoints.putIns (spv::OpEntryPoint, 3 + m_entryPoints.strLen(name) + m_interfaceVars.size());\n m_entryPoints.putWord (executionModel);\n m_entryPoints.putWord (entryPointId);\n m_entryPoints.putStr (name);\n \n for (uint32_t varId : m_interfaceVars)\n m_entryPoints.putWord(varId);\n }\n void setMemoryModel(\n spv::AddressingModel addressModel,\n spv::MemoryModel memoryModel) {\n m_memoryModel.putIns (spv::OpMemoryModel, 3);\n m_memoryModel.putWord (addressModel);\n m_memoryModel.putWord (memoryModel);\n }\n void setExecutionMode(\n uint32_t entryPointId,\n spv::ExecutionMode executionMode) {\n m_execModeInfo.putIns (spv::OpExecutionMode, 3 + argCount);\n m_execModeInfo.putWord(entryPointId);\n m_execModeInfo.putWord(executionMode);\n\n for (uint32_t i = 0; i < argCount; i++)\n m_execModeInfo.putWord(args[i]);\n }\n void setExecutionMode(\n uint32_t entryPointId,\n spv::ExecutionMode executionMode,\n uint32_t argCount,\n const uint32_t* args) {\n m_execModeInfo.putIns (spv::OpExecutionMode, 3 + argCount);\n m_execModeInfo.putWord(entryPointId);\n m_execModeInfo.putWord(executionMode);\n\n for (uint32_t i = 0; i < argCount; i++)\n m_execModeInfo.putWord(args[i]);\n }\n void setInvocations(\n uint32_t entryPointId,\n uint32_t invocations) {\n m_execModeInfo.putIns (spv::OpExecutionMode, 4);\n m_execModeInfo.putWord (entryPointId);\n m_execModeInfo.putWord (spv::ExecutionModeInvocations);\n m_execModeInfo.putInt32(invocations);\n }\n void setLocalSize(\n uint32_t entryPointId,\n uint32_t x,\n uint32_t y,\n uint32_t z) {\n m_execModeInfo.putIns (spv::OpExecutionMode, 6);\n m_execModeInfo.putWord (entryPointId);\n m_execModeInfo.putWord (spv::ExecutionModeLocalSize);\n m_execModeInfo.putInt32(x);\n m_execModeInfo.putInt32(y);\n m_execModeInfo.putInt32(z);\n }\n void setOutputVertices(\n uint32_t entryPointId,\n uint32_t vertexCount) {\n m_execModeInfo.putIns (spv::OpExecutionMode, 4);\n m_execModeInfo.putWord(entryPointId);\n m_execModeInfo.putWord(spv::ExecutionModeOutputVertices);\n m_execModeInfo.putWord(vertexCount);\n }\n uint32_t addDebugString(\n const char* string) {\n uint32_t resultId = this->allocateId();\n \n m_debugNames.putIns (spv::OpString,\n 2 + m_debugNames.strLen(string));\n m_debugNames.putWord(resultId);\n m_debugNames.putStr (string);\n return resultId;\n }\n void setDebugSource(\n spv::SourceLanguage language,\n uint32_t version,\n uint32_t file,\n const char* source) {\n uint32_t strLen = source != nullptr\n ? m_debugNames.strLen(source) : 0;\n \n m_debugNames.putIns (spv::OpSource, 4 + strLen);\n m_debugNames.putWord(language);\n m_debugNames.putWord(version);\n m_debugNames.putWord(file);\n \n if (source != nullptr)\n m_debugNames.putStr(source);\n }\n void setDebugName(\n uint32_t expressionId,\n const char* debugName) {\n m_debugNames.putIns (spv::OpName, 2 + m_debugNames.strLen(debugName));\n m_debugNames.putWord(expressionId);\n m_debugNames.putStr (debugName);\n }\n void setDebugMemberName(\n uint32_t structId,\n uint32_t memberId,\n const char* debugName) {\n m_debugNames.putIns (spv::OpMemberName, 3 + m_debugNames.strLen(debugName));\n m_debugNames.putWord(structId);\n m_debugNames.putWord(memberId);\n m_debugNames.putStr (debugName);\n }\n uint32_t constBool(\n bool v) {\n return this->defConst(v\n ? spv::OpConstantTrue\n : spv::OpConstantFalse,\n this->defBoolType(),\n 0, nullptr);\n }\n uint32_t consti32(\n int32_t v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defIntType(32, 1),\n data.size(),\n data.data());\n }\n uint32_t consti64(\n int64_t v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defIntType(64, 1),\n data.size(),\n data.data());\n }\n uint32_t constu32(\n uint32_t v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defIntType(32, 0),\n data.size(),\n data.data());\n }\n uint32_t constu64(\n uint64_t v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defIntType(64, 0),\n data.size(),\n data.data());\n }\n uint32_t constf32(\n float v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defFloatType(32),\n data.size(),\n data.data());\n }\n uint32_t constf64(\n double v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defFloatType(64),\n data.size(),\n data.data());\n }\n uint32_t constvec4i32(\n int32_t x,\n int32_t y,\n int32_t z,\n int32_t w) {\n std::array args = {{\n this->consti32(x), this->consti32(y),\n this->consti32(z), this->consti32(w),\n }};\n \n uint32_t scalarTypeId = this->defIntType(32, 1);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 4);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constvec4b32(\n bool x,\n bool y,\n bool z,\n bool w) {\n std::array args = {{\n this->constBool(x), this->constBool(y),\n this->constBool(z), this->constBool(w),\n }};\n \n uint32_t scalarTypeId = this->defBoolType();\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 4);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constvec4u32(\n uint32_t x,\n uint32_t y,\n uint32_t z,\n uint32_t w) {\n std::array args = {{\n this->constu32(x), this->constu32(y),\n this->constu32(z), this->constu32(w),\n }};\n \n uint32_t scalarTypeId = this->defIntType(32, 0);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 4);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constvec2f32(\n float x,\n float y) {\n std::array args = {{\n this->constf32(x), this->constf32(y),\n }};\n \n uint32_t scalarTypeId = this->defFloatType(32);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 2);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constvec3f32(\n float x,\n float y,\n float z) {\n std::array args = {{\n this->constf32(x), this->constf32(y),\n this->constf32(z),\n }};\n \n uint32_t scalarTypeId = this->defFloatType(32);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 3);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constvec4f32(\n float x,\n float y,\n float z,\n float w) {\n std::array args = {{\n this->constf32(x), this->constf32(y),\n this->constf32(z), this->constf32(w),\n }};\n \n uint32_t scalarTypeId = this->defFloatType(32);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 4);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constfReplicant(\n float replicant,\n uint32_t count) {\n uint32_t value = this->constf32(replicant);\n\n std::array args = { value, value, value, value };\n\n // Can't make a scalar composite.\n if (count == 1)\n return args[0];\n \n uint32_t scalarTypeId = this->defFloatType(32);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, count);\n \n return this->constComposite(vectorTypeId, count, args.data());\n }\n uint32_t constbReplicant(\n bool replicant,\n uint32_t count) {\n uint32_t value = this->constBool(replicant);\n\n std::array args = { value, value, value, value };\n\n // Can't make a scalar composite.\n if (count == 1)\n return args[0];\n \n uint32_t scalarTypeId = this->defBoolType();\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, count);\n \n return this->constComposite(vectorTypeId, count, args.data());\n }\n uint32_t constiReplicant(\n int32_t replicant,\n uint32_t count) {\n uint32_t value = this->consti32(replicant);\n\n std::array args = { value, value, value, value };\n\n // Can't make a scalar composite.\n if (count == 1)\n return args[0];\n \n uint32_t scalarTypeId = this->defIntType(32, 1);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, count);\n \n return this->constComposite(vectorTypeId, count, args.data());\n }\n uint32_t constuReplicant(\n int32_t replicant,\n uint32_t count) {\n uint32_t value = this->constu32(replicant);\n\n std::array args = { value, value, value, value };\n\n // Can't make a scalar composite.\n if (count == 1)\n return args[0];\n \n uint32_t scalarTypeId = this->defIntType(32, 0);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, count);\n \n return this->constComposite(vectorTypeId, count, args.data());\n }\n uint32_t constComposite(\n uint32_t typeId,\n uint32_t constCount,\n const uint32_t* constIds) {\n return this->defConst(\n spv::OpConstantComposite,\n typeId, constCount, constIds);\n }\n uint32_t constUndef(\n uint32_t typeId) {\n return this->defConst(spv::OpUndef,\n typeId, 0, nullptr);\n }\n uint32_t constNull(\n uint32_t typeId) {\n return this->defConst(spv::OpConstantNull,\n typeId, 0, nullptr);\n }\n uint32_t lateConst32(\n uint32_t typeId) {\n uint32_t resultId = this->allocateId();\n m_lateConsts.insert(resultId);\n\n m_typeConstDefs.putIns (spv::OpConstant, 4);\n m_typeConstDefs.putWord(typeId);\n m_typeConstDefs.putWord(resultId);\n m_typeConstDefs.putWord(0);\n return resultId;\n }\n void setLateConst(\n uint32_t constId,\n const uint32_t* argIds) {\n for (auto ins : m_typeConstDefs) {\n if (ins.opCode() != spv::OpConstant\n && ins.opCode() != spv::OpConstantComposite)\n continue;\n \n if (ins.arg(2) != constId)\n continue;\n\n for (uint32_t i = 3; i < ins.length(); i++)\n ins.setArg(i, argIds[i - 3]);\n\n return;\n }\n }\n uint32_t specConstBool(\n bool v) {\n uint32_t typeId = this->defBoolType();\n uint32_t resultId = this->allocateId();\n \n const spv::Op op = v\n ? spv::OpSpecConstantTrue\n : spv::OpSpecConstantFalse;\n \n m_typeConstDefs.putIns (op, 3);\n m_typeConstDefs.putWord (typeId);\n m_typeConstDefs.putWord (resultId);\n return resultId;\n }\n uint32_t specConst32(\n uint32_t typeId,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_typeConstDefs.putIns (spv::OpSpecConstant, 4);\n m_typeConstDefs.putWord (typeId);\n m_typeConstDefs.putWord (resultId);\n m_typeConstDefs.putWord (value);\n return resultId;\n }\n void decorate(\n uint32_t object,\n spv::Decoration decoration) {\n m_annotations.putIns (spv::OpDecorate, 3);\n m_annotations.putWord (object);\n m_annotations.putWord (decoration);\n }\n void decorateArrayStride(\n uint32_t object,\n uint32_t stride) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationArrayStride);\n m_annotations.putInt32(stride);\n }\n void decorateBinding(\n uint32_t object,\n uint32_t binding) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationBinding);\n m_annotations.putInt32(binding);\n }\n void decorateBlock(\n uint32_t object) {\n m_annotations.putIns (spv::OpDecorate, 3);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationBlock);\n }\n void decorateBuiltIn(\n uint32_t object,\n spv::BuiltIn builtIn) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationBuiltIn);\n m_annotations.putWord (builtIn);\n }\n void decorateComponent(\n uint32_t object,\n uint32_t location) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationComponent);\n m_annotations.putInt32(location);\n }\n void decorateDescriptorSet(\n uint32_t object,\n uint32_t set) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationDescriptorSet);\n m_annotations.putInt32(set);\n }\n void decorateIndex(\n uint32_t object,\n uint32_t index) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationIndex);\n m_annotations.putInt32(index);\n }\n void decorateLocation(\n uint32_t object,\n uint32_t location) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationLocation);\n m_annotations.putInt32(location);\n }\n void decorateSpecId(\n uint32_t object,\n uint32_t specId) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationSpecId);\n m_annotations.putInt32(specId);\n }\n void decorateXfb(\n uint32_t object,\n uint32_t streamId,\n uint32_t bufferId,\n uint32_t offset,\n uint32_t stride) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationStream);\n m_annotations.putInt32(streamId);\n\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationXfbBuffer);\n m_annotations.putInt32(bufferId);\n\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationXfbStride);\n m_annotations.putInt32(stride);\n\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationOffset);\n m_annotations.putInt32(offset);\n }\n void memberDecorateBuiltIn(\n uint32_t structId,\n uint32_t memberId,\n spv::BuiltIn builtIn) {\n m_annotations.putIns (spv::OpMemberDecorate, 5);\n m_annotations.putWord (structId);\n m_annotations.putWord (memberId);\n m_annotations.putWord (spv::DecorationBuiltIn);\n m_annotations.putWord (builtIn);\n }\n void memberDecorate(\n uint32_t structId,\n uint32_t memberId,\n spv::Decoration decoration) {\n m_annotations.putIns (spv::OpMemberDecorate, 4);\n m_annotations.putWord (structId);\n m_annotations.putWord (memberId);\n m_annotations.putWord (decoration);\n }\n void memberDecorateMatrixStride(\n uint32_t structId,\n uint32_t memberId,\n uint32_t stride) {\n m_annotations.putIns (spv::OpMemberDecorate, 5);\n m_annotations.putWord (structId);\n m_annotations.putWord (memberId);\n m_annotations.putWord (spv::DecorationMatrixStride);\n m_annotations.putWord (stride);\n }\n void memberDecorateOffset(\n uint32_t structId,\n uint32_t memberId,\n uint32_t offset) {\n m_annotations.putIns (spv::OpMemberDecorate, 5);\n m_annotations.putWord (structId);\n m_annotations.putWord (memberId);\n m_annotations.putWord (spv::DecorationOffset);\n m_annotations.putWord (offset);\n }\n uint32_t defVoidType() {\n return this->defType(spv::OpTypeVoid, 0, nullptr);\n }\n uint32_t defBoolType() {\n return this->defType(spv::OpTypeBool, 0, nullptr);\n }\n uint32_t defIntType(\n uint32_t width,\n uint32_t isSigned) {\n std::array args = {{ width, isSigned }};\n return this->defType(spv::OpTypeInt,\n args.size(), args.data());\n }\n uint32_t defFloatType(\n uint32_t width) {\n std::array args = {{ width }};\n return this->defType(spv::OpTypeFloat,\n args.size(), args.data());\n }\n uint32_t defVectorType(\n uint32_t elementType,\n uint32_t elementCount) {\n std::array args =\n {{ elementType, elementCount }};\n \n return this->defType(spv::OpTypeVector,\n args.size(), args.data());\n }\n uint32_t defMatrixType(\n uint32_t columnType,\n uint32_t columnCount) {\n std::array args =\n {{ columnType, columnCount }};\n \n return this->defType(spv::OpTypeMatrix,\n args.size(), args.data());\n }\n uint32_t defArrayType(\n uint32_t typeId,\n uint32_t length) {\n std::array args = {{ typeId, length }};\n \n return this->defType(spv::OpTypeArray,\n args.size(), args.data());\n }\n uint32_t defArrayTypeUnique(\n uint32_t typeId,\n uint32_t length) {\n uint32_t resultId = this->allocateId();\n \n m_typeConstDefs.putIns (spv::OpTypeArray, 4);\n m_typeConstDefs.putWord(resultId);\n m_typeConstDefs.putWord(typeId);\n m_typeConstDefs.putWord(length);\n return resultId;\n }\n uint32_t defRuntimeArrayType(\n uint32_t typeId) {\n std::array args = { typeId };\n \n return this->defType(spv::OpTypeRuntimeArray,\n args.size(), args.data());\n }\n uint32_t defRuntimeArrayTypeUnique(\n uint32_t typeId) {\n uint32_t resultId = this->allocateId();\n \n m_typeConstDefs.putIns (spv::OpTypeRuntimeArray, 3);\n m_typeConstDefs.putWord(resultId);\n m_typeConstDefs.putWord(typeId);\n return resultId;\n }\n uint32_t defFunctionType(\n uint32_t returnType,\n uint32_t argCount,\n const uint32_t* argTypes) {\n std::vector args;\n args.push_back(returnType);\n \n for (uint32_t i = 0; i < argCount; i++)\n args.push_back(argTypes[i]);\n \n return this->defType(spv::OpTypeFunction,\n args.size(), args.data());\n }\n uint32_t defStructType(\n uint32_t memberCount,\n const uint32_t* memberTypes) {\n return this->defType(spv::OpTypeStruct,\n memberCount, memberTypes);\n }\n uint32_t defStructTypeUnique(\n uint32_t memberCount,\n const uint32_t* memberTypes) {\n uint32_t resultId = this->allocateId();\n \n m_typeConstDefs.putIns (spv::OpTypeStruct, 2 + memberCount);\n m_typeConstDefs.putWord(resultId);\n \n for (uint32_t i = 0; i < memberCount; i++)\n m_typeConstDefs.putWord(memberTypes[i]);\n return resultId;\n }\n uint32_t defPointerType(\n uint32_t variableType,\n spv::StorageClass storageClass) {\n std::array args = {{\n static_cast(storageClass),\n variableType,\n }};\n \n return this->defType(spv::OpTypePointer,\n args.size(), args.data());\n }\n uint32_t defSamplerType() {\n return this->defType(spv::OpTypeSampler, 0, nullptr);\n }\n uint32_t defImageType(\n uint32_t sampledType,\n spv::Dim dimensionality,\n uint32_t depth,\n uint32_t arrayed,\n uint32_t multisample,\n uint32_t sampled,\n spv::ImageFormat format) {\n std::array args = {{\n sampledType,\n static_cast(dimensionality),\n depth, arrayed,\n multisample,\n sampled,\n static_cast(format)\n }};\n \n return this->defType(spv::OpTypeImage,\n args.size(), args.data());\n }\n uint32_t defSampledImageType(\n uint32_t imageType) {\n return this->defType(spv::OpTypeSampledImage, 1, &imageType);\n }\n uint32_t newVar(\n uint32_t pointerType,\n spv::StorageClass storageClass) {\n uint32_t resultId = this->allocateId();\n \n if (isInterfaceVar(storageClass))\n m_interfaceVars.push_back(resultId);\n\n auto& code = storageClass != spv::StorageClassFunction\n ? m_variables : m_code;\n\n code.putIns (spv::OpVariable, 4);\n code.putWord (pointerType);\n code.putWord (resultId);\n code.putWord (storageClass);\n return resultId;\n }\n uint32_t newVarInit(\n uint32_t pointerType,\n spv::StorageClass storageClass,\n uint32_t initialValue) {\n uint32_t resultId = this->allocateId();\n \n if (isInterfaceVar(storageClass))\n m_interfaceVars.push_back(resultId);\n\n auto& code = storageClass != spv::StorageClassFunction\n ? m_variables : m_code;\n \n code.putIns (spv::OpVariable, 5);\n code.putWord (pointerType);\n code.putWord (resultId);\n code.putWord (storageClass);\n code.putWord (initialValue);\n return resultId;\n }\n void functionBegin(\n uint32_t returnType,\n uint32_t functionId,\n uint32_t functionType,\n spv::FunctionControlMask functionControl) {\n m_code.putIns (spv::OpFunction, 5);\n m_code.putWord(returnType);\n m_code.putWord(functionId);\n m_code.putWord(functionControl);\n m_code.putWord(functionType);\n }\n uint32_t functionParameter(\n uint32_t parameterType) {\n uint32_t parameterId = this->allocateId();\n \n m_code.putIns (spv::OpFunctionParameter, 3);\n m_code.putWord(parameterType);\n m_code.putWord(parameterId);\n return parameterId;\n }\n void functionEnd() {\n m_code.putIns (spv::OpFunctionEnd, 1);\n }\n uint32_t opAccessChain(\n uint32_t resultType,\n uint32_t composite,\n uint32_t indexCount,\n const uint32_t* indexArray) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAccessChain, 4 + indexCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(composite);\n \n for (uint32_t i = 0; i < indexCount; i++)\n m_code.putInt32(indexArray[i]);\n return resultId;\n }\n uint32_t opArrayLength(\n uint32_t resultType,\n uint32_t structure,\n uint32_t memberId) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpArrayLength, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(structure);\n m_code.putWord(memberId);\n return resultId;\n }\n uint32_t opAny(\n uint32_t resultType,\n uint32_t vector) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAny, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector);\n return resultId;\n }\n uint32_t opAll(\n uint32_t resultType,\n uint32_t vector) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAll, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector);\n return resultId;\n }\n uint32_t opAtomicLoad(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicLoad, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n return resultId;\n }\n void opAtomicStore(\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n m_code.putIns (spv::OpAtomicStore, 5);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n }\n uint32_t opAtomicExchange(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicExchange, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicCompareExchange(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t equal,\n uint32_t unequal,\n uint32_t value,\n uint32_t comparator) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicCompareExchange, 9);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(equal);\n m_code.putWord(unequal);\n m_code.putWord(value);\n m_code.putWord(comparator);\n return resultId;\n }\n uint32_t opAtomicIIncrement(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicIIncrement, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n return resultId;\n }\n uint32_t opAtomicIDecrement(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicIDecrement, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n return resultId;\n }\n uint32_t opAtomicIAdd(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicIAdd, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicISub(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicISub, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicSMin(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicSMin, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicSMax(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicSMax, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicUMin(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicUMin, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicUMax(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicUMax, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicAnd(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicAnd, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicOr(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicOr, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicXor(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicXor, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opBitcast(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitcast, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opBitCount(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitCount, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opBitReverse(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitReverse, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFindILsb(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FindILsb);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFindUMsb(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FindUMsb);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFindSMsb(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FindSMsb);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opBitFieldInsert(\n uint32_t resultType,\n uint32_t base,\n uint32_t insert,\n uint32_t offset,\n uint32_t count) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitFieldInsert, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(insert);\n m_code.putWord(offset);\n m_code.putWord(count);\n return resultId;\n }\n uint32_t opBitFieldSExtract(\n uint32_t resultType,\n uint32_t base,\n uint32_t offset,\n uint32_t count) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitFieldSExtract, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(offset);\n m_code.putWord(count);\n return resultId;\n }\n uint32_t opBitFieldUExtract(\n uint32_t resultType,\n uint32_t base,\n uint32_t offset,\n uint32_t count) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitFieldUExtract, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(offset);\n m_code.putWord(count);\n return resultId;\n }\n uint32_t opBitwiseAnd(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitwiseAnd, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opBitwiseOr(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitwiseOr, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opBitwiseXor(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitwiseXor, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opNot(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpNot, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opShiftLeftLogical(\n uint32_t resultType,\n uint32_t base,\n uint32_t shift) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpShiftLeftLogical, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(shift);\n return resultId;\n }\n uint32_t opShiftRightArithmetic(\n uint32_t resultType,\n uint32_t base,\n uint32_t shift) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpShiftRightArithmetic, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(shift);\n return resultId;\n }\n uint32_t opShiftRightLogical(\n uint32_t resultType,\n uint32_t base,\n uint32_t shift) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpShiftRightLogical, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(shift);\n return resultId;\n }\n uint32_t opConvertFtoS(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpConvertFToS, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opConvertFtoU(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpConvertFToU, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opConvertStoF(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpConvertSToF, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opConvertUtoF(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpConvertUToF, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opCompositeConstruct(\n uint32_t resultType,\n uint32_t valueCount,\n const uint32_t* valueArray) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpCompositeConstruct, 3 + valueCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n \n for (uint32_t i = 0; i < valueCount; i++)\n m_code.putWord(valueArray[i]);\n return resultId;\n }\n uint32_t opCompositeExtract(\n uint32_t resultType,\n uint32_t composite,\n uint32_t indexCount,\n const uint32_t* indexArray) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpCompositeExtract, 4 + indexCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(composite);\n \n for (uint32_t i = 0; i < indexCount; i++)\n m_code.putInt32(indexArray[i]);\n return resultId;\n }\n uint32_t opCompositeInsert(\n uint32_t resultType,\n uint32_t object,\n uint32_t composite,\n uint32_t indexCount,\n const uint32_t* indexArray) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpCompositeInsert, 5 + indexCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(object);\n m_code.putWord(composite);\n \n for (uint32_t i = 0; i < indexCount; i++)\n m_code.putInt32(indexArray[i]);\n return resultId;\n }\n uint32_t opDpdx(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdx, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDpdy(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdy, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDpdxCoarse(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdxCoarse, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDpdyCoarse(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdyCoarse, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDpdxFine(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdxFine, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDpdyFine(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdyFine, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opVectorExtractDynamic(\n uint32_t resultType,\n uint32_t vector,\n uint32_t index) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpVectorExtractDynamic, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector);\n m_code.putWord(index);\n return resultId;\n }\n uint32_t opVectorShuffle(\n uint32_t resultType,\n uint32_t vectorLeft,\n uint32_t vectorRight,\n uint32_t indexCount,\n const uint32_t* indexArray) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpVectorShuffle, 5 + indexCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vectorLeft);\n m_code.putWord(vectorRight);\n \n for (uint32_t i = 0; i < indexCount; i++)\n m_code.putInt32(indexArray[i]);\n return resultId;\n }\n uint32_t opSNegate(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSNegate, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFNegate(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFNegate, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opSAbs(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450SAbs);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFAbs(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FAbs);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFSign(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FSign);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFMix(\n uint32_t resultType,\n uint32_t x,\n uint32_t y,\n uint32_t a) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 8);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FMix);\n m_code.putWord(x);\n m_code.putWord(y);\n m_code.putWord(a);\n return resultId;\n }\n uint32_t opCross(\n uint32_t resultType,\n uint32_t x,\n uint32_t y) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Cross);\n m_code.putWord(x);\n m_code.putWord(y);\n return resultId;\n }\n uint32_t opIAdd(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpIAdd, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opISub(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpISub, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFAdd(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFAdd, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFSub(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFSub, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opSDiv(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSDiv, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opUDiv(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpUDiv, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opSRem(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSRem, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opUMod(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpUMod, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFDiv(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFDiv, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opIMul(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpIMul, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFMul(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFMul, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opVectorTimesScalar(\n uint32_t resultType,\n uint32_t vector,\n uint32_t scalar) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpVectorTimesScalar, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector);\n m_code.putWord(scalar);\n return resultId;\n }\n uint32_t opMatrixTimesMatrix(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpMatrixTimesMatrix, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opMatrixTimesVector(\n uint32_t resultType,\n uint32_t matrix,\n uint32_t vector) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpMatrixTimesVector, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(matrix);\n m_code.putWord(vector);\n return resultId;\n }\n uint32_t opVectorTimesMatrix(\n uint32_t resultType,\n uint32_t vector,\n uint32_t matrix) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpVectorTimesMatrix, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector);\n m_code.putWord(matrix);\n return resultId;\n }\n uint32_t opTranspose(\n uint32_t resultType,\n uint32_t matrix) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpTranspose, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(matrix);\n return resultId;\n }\n uint32_t opInverse(\n uint32_t resultType,\n uint32_t matrix) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450MatrixInverse);\n m_code.putWord(matrix);\n return resultId;\n }\n uint32_t opFFma(\n uint32_t resultType,\n uint32_t a,\n uint32_t b,\n uint32_t c) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 8);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Fma);\n m_code.putWord(a);\n m_code.putWord(b);\n m_code.putWord(c);\n return resultId;\n }\n uint32_t opFMax(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FMax);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFMin(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FMin);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opNMax(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450NMax);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opNMin(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450NMin);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opSMax(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450SMax);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opSMin(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450SMin);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opUMax(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450UMax);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opUMin(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450UMin);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFClamp(\n uint32_t resultType,\n uint32_t x,\n uint32_t minVal,\n uint32_t maxVal) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 8);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FClamp);\n m_code.putWord(x);\n m_code.putWord(minVal);\n m_code.putWord(maxVal);\n return resultId;\n }\n uint32_t opNClamp(\n uint32_t resultType,\n uint32_t x,\n uint32_t minVal,\n uint32_t maxVal) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 8);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450NClamp);\n m_code.putWord(x);\n m_code.putWord(minVal);\n m_code.putWord(maxVal);\n return resultId;\n }\n uint32_t opIEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpIEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opINotEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpINotEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opSLessThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSLessThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opSLessThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSLessThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opSGreaterThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSGreaterThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opSGreaterThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSGreaterThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opULessThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpULessThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opULessThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpULessThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opUGreaterThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpUGreaterThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opUGreaterThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpUGreaterThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFOrdEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFOrdEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFUnordNotEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFUnordNotEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFOrdLessThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFOrdLessThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFOrdLessThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFOrdLessThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFOrdGreaterThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFOrdGreaterThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFOrdGreaterThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFOrdGreaterThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opLogicalEqual(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLogicalEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opLogicalNotEqual(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLogicalNotEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opLogicalAnd(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLogicalAnd, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opLogicalOr(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLogicalOr, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opLogicalNot(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLogicalNot, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDot(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDot, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opSin(\n uint32_t resultType,\n uint32_t vector) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Sin);\n m_code.putWord(vector);\n return resultId;\n }\n uint32_t opCos(\n uint32_t resultType,\n uint32_t vector) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Cos);\n m_code.putWord(vector);\n return resultId;\n }\n uint32_t opSqrt(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Sqrt);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opInverseSqrt(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450InverseSqrt);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opNormalize(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Normalize);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opRawAccessChain(\n uint32_t resultType,\n uint32_t base,\n uint32_t stride,\n uint32_t index,\n uint32_t offset,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns (spv::OpRawAccessChainNV, operand ? 8 : 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(stride);\n m_code.putWord(index);\n m_code.putWord(offset);\n\n if (operand)\n m_code.putWord(operand);\n\n return resultId;\n }\n uint32_t opReflect(\n uint32_t resultType,\n uint32_t incident,\n uint32_t normal) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Reflect);\n m_code.putWord(incident);\n m_code.putWord(normal);\n return resultId;\n }\n uint32_t opLength(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Length);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opExp2(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Exp2);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opExp(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Exp);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opLog2(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Log2);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opPow(\n uint32_t resultType,\n uint32_t base,\n uint32_t exponent) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Pow);\n m_code.putWord(base);\n m_code.putWord(exponent);\n return resultId;\n }\n uint32_t opFract(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Fract);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opCeil(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Ceil);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFloor(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Floor);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opRound(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Round);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opRoundEven(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450RoundEven);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opTrunc(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Trunc);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFConvert(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns (spv::OpFConvert, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opPackHalf2x16(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450PackHalf2x16);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opUnpackHalf2x16(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450UnpackHalf2x16);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opSelect(\n uint32_t resultType,\n uint32_t condition,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSelect, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(condition);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opIsNan(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpIsNan, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opIsInf(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpIsInf, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFunctionCall(\n uint32_t resultType,\n uint32_t functionId,\n uint32_t argCount,\n const uint32_t* argIds) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFunctionCall, 4 + argCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(functionId);\n \n for (uint32_t i = 0; i < argCount; i++)\n m_code.putWord(argIds[i]);\n return resultId;\n }\n void opLabel(\n uint32_t labelId) {\n m_code.putIns (spv::OpLabel, 2);\n m_code.putWord(labelId);\n\n m_blockId = labelId;\n }\n uint32_t opLoad(\n uint32_t typeId,\n uint32_t pointerId) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLoad, 4 + getMemoryOperandWordCount(operands));\n m_code.putWord(typeId);\n m_code.putWord(resultId);\n m_code.putWord(pointerId);\n\n putMemoryOperands(operands);\n return resultId;\n }\n uint32_t opLoad(\n uint32_t typeId,\n uint32_t pointerId,\n const SpirvMemoryOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLoad, 4 + getMemoryOperandWordCount(operands));\n m_code.putWord(typeId);\n m_code.putWord(resultId);\n m_code.putWord(pointerId);\n\n putMemoryOperands(operands);\n return resultId;\n }\n void opStore(\n uint32_t pointerId,\n uint32_t valueId) {\n m_code.putIns (spv::OpStore, 3 + getMemoryOperandWordCount(operands));\n m_code.putWord(pointerId);\n m_code.putWord(valueId);\n\n putMemoryOperands(operands);\n }\n void opStore(\n uint32_t pointerId,\n uint32_t valueId,\n const SpirvMemoryOperands& operands) {\n m_code.putIns (spv::OpStore, 3 + getMemoryOperandWordCount(operands));\n m_code.putWord(pointerId);\n m_code.putWord(valueId);\n\n putMemoryOperands(operands);\n }\n uint32_t opInterpolateAtCentroid(\n uint32_t resultType,\n uint32_t interpolant) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450InterpolateAtCentroid);\n m_code.putWord(interpolant);\n return resultId;\n }\n uint32_t opInterpolateAtSample(\n uint32_t resultType,\n uint32_t interpolant,\n uint32_t sample) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450InterpolateAtSample);\n m_code.putWord(interpolant);\n m_code.putWord(sample);\n return resultId;\n }\n uint32_t opInterpolateAtOffset(\n uint32_t resultType,\n uint32_t interpolant,\n uint32_t offset) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450InterpolateAtOffset);\n m_code.putWord(interpolant);\n m_code.putWord(offset);\n return resultId;\n }\n uint32_t opImage(\n uint32_t resultType,\n uint32_t sampledImage) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpImage, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n return resultId;\n }\n uint32_t opImageRead(\n uint32_t resultType,\n uint32_t image,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseRead\n : spv::OpImageRead;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n void opImageWrite(\n uint32_t image,\n uint32_t coordinates,\n uint32_t texel,\n const SpirvImageOperands& operands) {\n m_code.putIns (spv::OpImageWrite,\n 4 + getImageOperandWordCount(operands));\n m_code.putWord(image);\n m_code.putWord(coordinates);\n m_code.putWord(texel);\n \n putImageOperands(operands);\n }\n uint32_t opImageSparseTexelsResident(\n uint32_t resultType,\n uint32_t residentCode) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns (spv::OpImageSparseTexelsResident, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(residentCode);\n\n return resultId;\n }\n uint32_t opImageTexelPointer(\n uint32_t resultType,\n uint32_t image,\n uint32_t coordinates,\n uint32_t sample) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageTexelPointer, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n m_code.putWord(coordinates);\n m_code.putWord(sample);\n return resultId;\n }\n uint32_t opSampledImage(\n uint32_t resultType,\n uint32_t image,\n uint32_t sampler) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSampledImage, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n m_code.putWord(sampler);\n return resultId;\n }\n uint32_t opImageQuerySizeLod(\n uint32_t resultType,\n uint32_t image,\n uint32_t lod) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageQuerySizeLod, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n m_code.putWord(lod);\n return resultId;\n }\n uint32_t opImageQuerySize(\n uint32_t resultType,\n uint32_t image) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageQuerySize, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n return resultId;\n }\n uint32_t opImageQueryLevels(\n uint32_t resultType,\n uint32_t image) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageQueryLevels, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n return resultId;\n }\n uint32_t opImageQueryLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageQueryLod, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n return resultId;\n }\n uint32_t opImageQuerySamples(\n uint32_t resultType,\n uint32_t image) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageQuerySamples, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n return resultId;\n }\n uint32_t opImageFetch(\n uint32_t resultType,\n uint32_t image,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n\n spv::Op op = operands.sparse\n ? spv::OpImageSparseFetch\n : spv::OpImageFetch;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageGather(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t component,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseGather\n : spv::OpImageGather;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(component);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageDrefGather(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t reference,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseDrefGather\n : spv::OpImageDrefGather;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(reference);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleImplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleImplicitLod\n : spv::OpImageSampleImplicitLod;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleExplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleExplicitLod\n : spv::OpImageSampleExplicitLod;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleProjImplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleProjImplicitLod\n : spv::OpImageSampleProjImplicitLod;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleProjExplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleProjExplicitLod\n : spv::OpImageSampleProjExplicitLod;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleDrefImplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t reference,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleDrefImplicitLod\n : spv::OpImageSampleDrefImplicitLod;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(reference);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleDrefExplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t reference,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleDrefExplicitLod\n : spv::OpImageSampleDrefExplicitLod;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(reference);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleProjDrefImplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t reference,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleProjDrefImplicitLod\n : spv::OpImageSampleProjDrefImplicitLod;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(reference);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleProjDrefExplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t reference,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleProjDrefExplicitLod\n : spv::OpImageSampleProjDrefExplicitLod;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(reference);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opGroupNonUniformBallot(\n uint32_t resultType,\n uint32_t execution,\n uint32_t predicate) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpGroupNonUniformBallot, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(execution);\n m_code.putWord(predicate);\n return resultId;\n }\n uint32_t opGroupNonUniformBallotBitCount(\n uint32_t resultType,\n uint32_t execution,\n uint32_t operation,\n uint32_t ballot) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpGroupNonUniformBallotBitCount, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(execution);\n m_code.putWord(operation);\n m_code.putWord(ballot);\n return resultId;\n }\n uint32_t opGroupNonUniformElect(\n uint32_t resultType,\n uint32_t execution) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpGroupNonUniformElect, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(execution);\n return resultId;\n }\n uint32_t opGroupNonUniformBroadcastFirst(\n uint32_t resultType,\n uint32_t execution,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpGroupNonUniformBroadcastFirst, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(execution);\n m_code.putWord(value);\n return resultId;\n }\n void opControlBarrier(\n uint32_t execution,\n uint32_t memory,\n uint32_t semantics) {\n m_code.putIns (spv::OpControlBarrier, 4);\n m_code.putWord(execution);\n m_code.putWord(memory);\n m_code.putWord(semantics);\n }\n void opMemoryBarrier(\n uint32_t memory,\n uint32_t semantics) {\n m_code.putIns (spv::OpMemoryBarrier, 3);\n m_code.putWord(memory);\n m_code.putWord(semantics);\n }\n void opLoopMerge(\n uint32_t mergeBlock,\n uint32_t continueTarget,\n uint32_t loopControl) {\n m_code.putIns (spv::OpLoopMerge, 4);\n m_code.putWord(mergeBlock);\n m_code.putWord(continueTarget);\n m_code.putWord(loopControl);\n }\n void opSelectionMerge(\n uint32_t mergeBlock,\n uint32_t selectionControl) {\n m_code.putIns (spv::OpSelectionMerge, 3);\n m_code.putWord(mergeBlock);\n m_code.putWord(selectionControl);\n }\n void opBranch(\n uint32_t label) {\n m_code.putIns (spv::OpBranch, 2);\n m_code.putWord(label);\n\n m_blockId = 0;\n }\n void opBranchConditional(\n uint32_t condition,\n uint32_t trueLabel,\n uint32_t falseLabel) {\n m_code.putIns (spv::OpBranchConditional, 4);\n m_code.putWord(condition);\n m_code.putWord(trueLabel);\n m_code.putWord(falseLabel);\n\n m_blockId = 0;\n }\n void opSwitch(\n uint32_t selector,\n uint32_t jumpDefault,\n uint32_t caseCount,\n const SpirvSwitchCaseLabel* caseLabels) {\n m_code.putIns (spv::OpSwitch, 3 + 2 * caseCount);\n m_code.putWord(selector);\n m_code.putWord(jumpDefault);\n \n for (uint32_t i = 0; i < caseCount; i++) {\n m_code.putWord(caseLabels[i].literal);\n m_code.putWord(caseLabels[i].labelId);\n }\n\n m_blockId = 0;\n }\n uint32_t opPhi(\n uint32_t resultType,\n uint32_t sourceCount,\n const SpirvPhiLabel* sourceLabels) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpPhi, 3 + 2 * sourceCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n \n for (uint32_t i = 0; i < sourceCount; i++) {\n m_code.putWord(sourceLabels[i].varId);\n m_code.putWord(sourceLabels[i].labelId);\n }\n \n return resultId;\n }\n void opReturn() {\n m_code.putIns (spv::OpReturn, 1);\n m_blockId = 0;\n }\n void opDemoteToHelperInvocation() {\n m_code.putIns (spv::OpDemoteToHelperInvocation, 1);\n }\n void opEmitVertex(\n uint32_t streamId) {\n if (streamId == 0) {\n m_code.putIns (spv::OpEmitVertex, 1);\n } else {\n m_code.putIns (spv::OpEmitStreamVertex, 2);\n m_code.putWord(streamId);\n }\n }\n void opEndPrimitive(\n uint32_t streamId) {\n if (streamId == 0) {\n m_code.putIns (spv::OpEndPrimitive, 1);\n } else {\n m_code.putIns (spv::OpEndStreamPrimitive, 2);\n m_code.putWord(streamId);\n }\n }\n void opBeginInvocationInterlock() {\n m_code.putIns(spv::OpBeginInvocationInterlockEXT, 1);\n }\n void opEndInvocationInterlock() {\n m_code.putIns(spv::OpEndInvocationInterlockEXT, 1);\n }\n uint32_t opSinCos(\n uint32_t x,\n bool useBuiltIn) {\n // We only operate on 32-bit floats here\n uint32_t floatType = defFloatType(32);\n uint32_t resultType = defVectorType(floatType, 2u);\n\n if (useBuiltIn) {\n std::array members = { opSin(floatType, x), opCos(floatType, x) };\n return opCompositeConstruct(resultType, members.size(), members.data());\n } else {\n uint32_t uintType = defIntType(32, false);\n uint32_t sintType = defIntType(32, true);\n uint32_t boolType = defBoolType();\n\n // Normalize input to multiple of pi/4\n uint32_t xNorm = opFMul(floatType, opFAbs(floatType, x), constf32(4.0 / pi));\n\n uint32_t xTrunc = opTrunc(floatType, xNorm);\n uint32_t xFract = opFSub(floatType, xNorm, xTrunc);\n\n uint32_t xInt = opConvertFtoU(uintType, xTrunc);\n\n // Mirror input along x axis as necessary\n uint32_t mirror = opINotEqual(boolType, opBitwiseAnd(uintType, xInt, constu32(1u)), constu32(0u));\n xFract = opSelect(floatType, mirror, opFSub(floatType, constf32(1.0f), xFract), xFract);\n\n // Compute taylor series for fractional part\n uint32_t xFract_2 = opFMul(floatType, xFract, xFract);\n uint32_t xFract_4 = opFMul(floatType, xFract_2, xFract_2);\n uint32_t xFract_6 = opFMul(floatType, xFract_4, xFract_2);\n\n uint32_t taylor = opFMul(floatType, xFract_6, constf32(-sincosTaylorFactor(7)));\n decorate(taylor, spv::DecorationNoContraction);\n\n taylor = opFFma(floatType, xFract_4, constf32(sincosTaylorFactor(5)), taylor);\n decorate(taylor, spv::DecorationNoContraction);\n\n taylor = opFFma(floatType, xFract_2, constf32(-sincosTaylorFactor(3)), taylor);\n decorate(taylor, spv::DecorationNoContraction);\n\n taylor = opFAdd(floatType, constf32(sincosTaylorFactor(1)), taylor);\n decorate(taylor, spv::DecorationNoContraction);\n\n taylor = opFMul(floatType, taylor, xFract);\n decorate(taylor, spv::DecorationNoContraction);\n\n // Compute co-function based on sin^2 + cos^2 = 1\n uint32_t coFunc = opSqrt(floatType, opFSub(floatType, constf32(1.0f), opFMul(floatType, taylor, taylor)));\n\n // Determine whether the taylor series was used for sine or cosine and assign the correct result\n uint32_t funcIsSin = opIEqual(boolType, opBitwiseAnd(uintType, opIAdd(uintType, xInt, constu32(1u)), constu32(2u)), constu32(0u));\n\n uint32_t sin = opSelect(floatType, funcIsSin, taylor, coFunc);\n uint32_t cos = opSelect(floatType, funcIsSin, coFunc, taylor);\n\n // Determine whether sine is negative. Interpret the input as a\n // signed integer in order to propagate signed zeroes properly.\n uint32_t inputNeg = opSLessThan(boolType, opBitcast(sintType, x), consti32(0));\n\n uint32_t sinNeg = opINotEqual(boolType, opBitwiseAnd(uintType, xInt, constu32(4u)), constu32(0u));\n sinNeg = opLogicalNotEqual(boolType, sinNeg, inputNeg);\n\n // Determine whether cosine is negative\n uint32_t cosNeg = opINotEqual(boolType, opBitwiseAnd(uintType, opIAdd(uintType, xInt, constu32(2u)), constu32(4u)), constu32(0u));\n\n sin = opSelect(floatType, sinNeg, opFNegate(floatType, sin), sin);\n cos = opSelect(floatType, cosNeg, opFNegate(floatType, cos), cos);\n\n std::array members = { sin, cos };\n return opCompositeConstruct(resultType, members.size(), members.data());\n }\n }\n private:\n uint32_t m_version;\n uint32_t m_id = 1;\n uint32_t m_instExtGlsl450 = 0;\n uint32_t m_blockId = 0;\n SpirvCodeBuffer m_capabilities;\n SpirvCodeBuffer m_extensions;\n SpirvCodeBuffer m_instExt;\n SpirvCodeBuffer m_memoryModel;\n SpirvCodeBuffer m_entryPoints;\n SpirvCodeBuffer m_execModeInfo;\n SpirvCodeBuffer m_debugNames;\n SpirvCodeBuffer m_annotations;\n SpirvCodeBuffer m_typeConstDefs;\n SpirvCodeBuffer m_variables;\n SpirvCodeBuffer m_code;\n std::unordered_set m_lateConsts;\n std::vector m_interfaceVars;\n uint32_t defType(\n spv::Op op, \n uint32_t argCount,\n const uint32_t* argIds) {\n // Since the type info is stored in the code buffer,\n // we can use the code buffer to look up type IDs as\n // well. Result IDs are always stored as argument 1.\n for (auto ins : m_typeConstDefs) {\n bool match = ins.opCode() == op\n && ins.length() == 2 + argCount;\n \n for (uint32_t i = 0; i < argCount && match; i++)\n match &= ins.arg(2 + i) == argIds[i];\n \n if (match)\n return ins.arg(1);\n }\n \n // Type not yet declared, create a new one.\n uint32_t resultId = this->allocateId();\n m_typeConstDefs.putIns (op, 2 + argCount);\n m_typeConstDefs.putWord(resultId);\n \n for (uint32_t i = 0; i < argCount; i++)\n m_typeConstDefs.putWord(argIds[i]);\n return resultId;\n }\n uint32_t defConst(\n spv::Op op,\n uint32_t typeId,\n uint32_t argCount,\n const uint32_t* argIds) {\n // Avoid declaring constants multiple times\n for (auto ins : m_typeConstDefs) {\n bool match = ins.opCode() == op\n && ins.length() == 3 + argCount\n && ins.arg(1) == typeId;\n \n for (uint32_t i = 0; i < argCount && match; i++)\n match &= ins.arg(3 + i) == argIds[i];\n \n if (!match)\n continue;\n \n uint32_t id = ins.arg(2);\n\n if (m_lateConsts.find(id) == m_lateConsts.end())\n return id;\n }\n \n // Constant not yet declared, make a new one\n uint32_t resultId = this->allocateId();\n m_typeConstDefs.putIns (op, 3 + argCount);\n m_typeConstDefs.putWord(typeId);\n m_typeConstDefs.putWord(resultId);\n \n for (uint32_t i = 0; i < argCount; i++)\n m_typeConstDefs.putWord(argIds[i]);\n return resultId;\n }\n void instImportGlsl450() {\n m_instExtGlsl450 = this->allocateId();\n const char* name = \"GLSL.std.450\";\n \n m_instExt.putIns (spv::OpExtInstImport, 2 + m_instExt.strLen(name));\n m_instExt.putWord(m_instExtGlsl450);\n m_instExt.putStr (name);\n }\n uint32_t getMemoryOperandWordCount(\n const SpirvMemoryOperands& op) const {\n const uint32_t result\n = ((op.flags & spv::MemoryAccessAlignedMask) ? 1 : 0)\n + ((op.flags & spv::MemoryAccessMakePointerAvailableMask) ? 1 : 0)\n + ((op.flags & spv::MemoryAccessMakePointerVisibleMask) ? 1 : 0);\n\n return op.flags ? result + 1 : 0;\n }\n void putMemoryOperands(\n const SpirvMemoryOperands& op) {\n if (op.flags) {\n m_code.putWord(op.flags);\n\n if (op.flags & spv::MemoryAccessAlignedMask)\n m_code.putWord(op.alignment);\n\n if (op.flags & spv::MemoryAccessMakePointerAvailableMask)\n m_code.putWord(op.makeAvailable);\n\n if (op.flags & spv::MemoryAccessMakePointerVisibleMask)\n m_code.putWord(op.makeVisible);\n }\n }\n uint32_t getImageOperandWordCount(\n const SpirvImageOperands& op) const {\n // Each flag may add one or more operands\n const uint32_t result\n = ((op.flags & spv::ImageOperandsBiasMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsLodMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsConstOffsetMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsGradMask) ? 2 : 0)\n + ((op.flags & spv::ImageOperandsOffsetMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsConstOffsetsMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsSampleMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsMinLodMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsMakeTexelAvailableMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsMakeTexelVisibleMask) ? 1 : 0);\n \n // Add a DWORD for the operand mask if it is non-zero\n return op.flags ? result + 1 : 0;\n }\n void putImageOperands(\n const SpirvImageOperands& op) {\n if (op.flags) {\n m_code.putWord(op.flags);\n \n if (op.flags & spv::ImageOperandsBiasMask)\n m_code.putWord(op.sLodBias);\n \n if (op.flags & spv::ImageOperandsLodMask)\n m_code.putWord(op.sLod);\n\n if (op.flags & spv::ImageOperandsGradMask) {\n m_code.putWord(op.sGradX);\n m_code.putWord(op.sGradY);\n }\n\n if (op.flags & spv::ImageOperandsConstOffsetMask)\n m_code.putWord(op.sConstOffset);\n\n if (op.flags & spv::ImageOperandsOffsetMask)\n m_code.putWord(op.gOffset);\n \n if (op.flags & spv::ImageOperandsConstOffsetsMask)\n m_code.putWord(op.gConstOffsets);\n \n if (op.flags & spv::ImageOperandsSampleMask)\n m_code.putWord(op.sSampleId);\n \n if (op.flags & spv::ImageOperandsMinLodMask)\n m_code.putWord(op.sMinLod);\n\n if (op.flags & spv::ImageOperandsMakeTexelAvailableMask)\n m_code.putWord(op.makeAvailable);\n\n if (op.flags & spv::ImageOperandsMakeTexelVisibleMask)\n m_code.putWord(op.makeVisible);\n }\n }\n bool isInterfaceVar(\n spv::StorageClass sclass) const {\n if (m_version < spvVersion(1, 4)) {\n return sclass == spv::StorageClassInput\n || sclass == spv::StorageClassOutput;\n } else {\n // All global variables need to be declared\n return sclass != spv::StorageClassFunction;\n }\n }\n void classifyBlocks(\n std::unordered_set& reachableBlocks,\n std::unordered_set& mergeBlocks) {\n std::unordered_multimap branches;\n std::queue blockQueue;\n\n uint32_t blockId = 0;\n\n for (auto ins : m_code) {\n switch (ins.opCode()) {\n case spv::OpLabel: {\n uint32_t id = ins.arg(1);\n\n if (!blockId)\n branches.insert({ 0u, id });\n\n blockId = id;\n } break;\n\n case spv::OpFunction: {\n blockId = 0u;\n } break;\n\n case spv::OpBranch: {\n branches.insert({ blockId, ins.arg(1) });\n } break;\n\n case spv::OpBranchConditional: {\n branches.insert({ blockId, ins.arg(2) });\n branches.insert({ blockId, ins.arg(3) });\n } break;\n\n case spv::OpSwitch: {\n branches.insert({ blockId, ins.arg(2) });\n\n for (uint32_t i = 4; i < ins.length(); i += 2)\n branches.insert({ blockId, ins.arg(i) });\n } break;\n\n case spv::OpSelectionMerge: {\n mergeBlocks.insert(ins.arg(1));\n } break;\n\n case spv::OpLoopMerge: {\n mergeBlocks.insert(ins.arg(1));\n\n // It is possible for the continue block to be unreachable in\n // practice, but we still need to emit it if we are not going\n // to eliminate this loop. Since the current block dominates\n // the loop, use it to keep the continue block intact.\n branches.insert({ blockId, ins.arg(2) });\n } break;\n\n default:;\n }\n }\n\n blockQueue.push(0);\n\n while (!blockQueue.empty()) {\n uint32_t id = blockQueue.front();\n\n auto range = branches.equal_range(id);\n\n for (auto i = range.first; i != range.second; i++) {\n if (reachableBlocks.insert(i->second).second)\n blockQueue.push(i->second);\n }\n\n blockQueue.pop();\n }\n }\n};"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_bit.h", "#pragma once\n\n#if (defined(__x86_64__) && !defined(__arm64ec__)) || (defined(_M_X64) && !defined(_M_ARM64EC)) \\\n || defined(__i386__) || defined(_M_IX86) || defined(__e2k__)\n #define DXVK_ARCH_X86\n #if defined(__x86_64__) || defined(_M_X64) || defined(__e2k__)\n #define DXVK_ARCH_X86_64\n #endif\n#elif defined(__aarch64__) || defined(_M_ARM64) || defined(_M_ARM64EC)\n #define DXVK_ARCH_ARM64\n#endif\n\n#ifdef DXVK_ARCH_X86\n #ifndef _MSC_VER\n #if defined(_WIN32) && (defined(__AVX__) || defined(__AVX2__))\n #error \"AVX-enabled builds not supported due to stack alignment issues.\"\n #endif\n #if defined(__WINE__) && defined(__clang__)\n #pragma push_macro(\"_WIN32\")\n #undef _WIN32\n #endif\n #include \n #if defined(__WINE__) && defined(__clang__)\n #pragma pop_macro(\"_WIN32\")\n #endif\n #else\n #include \n #endif\n#endif\n\n#include \"util_likely.h\"\n#include \"util_math.h\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace dxvk::bit {\n\n template\n T cast(const J& src) {\n static_assert(sizeof(T) == sizeof(J));\n static_assert(std::is_trivially_copyable::value && std::is_trivial::value);\n\n T dst;\n std::memcpy(&dst, &src, sizeof(T));\n return dst;\n }\n \n template\n T extract(T value, uint32_t fst, uint32_t lst) {\n return (value >> fst) & ~(~T(0) << (lst - fst + 1));\n }\n\n template\n T popcnt(T n) {\n n -= ((n >> 1u) & T(0x5555555555555555ull));\n n = (n & T(0x3333333333333333ull)) + ((n >> 2u) & T(0x3333333333333333ull));\n n = (n + (n >> 4u)) & T(0x0f0f0f0f0f0f0f0full);\n n *= T(0x0101010101010101ull);\n return n >> (8u * (sizeof(T) - 1u));\n }\n\n inline uint32_t tzcnt(uint32_t n) {\n #if defined(_MSC_VER) && !defined(__clang__)\n if(n == 0)\n return 32;\n return _tzcnt_u32(n);\n #elif defined(__BMI__)\n return __tzcnt_u32(n);\n #elif defined(DXVK_ARCH_X86) && (defined(__GNUC__) || defined(__clang__))\n // tzcnt is encoded as rep bsf, so we can use it on all\n // processors, but the behaviour of zero inputs differs:\n // - bsf: zf = 1, cf = ?, result = ?\n // - tzcnt: zf = 0, cf = 1, result = 32\n // We'll have to handle this case manually.\n uint32_t res;\n uint32_t tmp;\n asm (\n \"tzcnt %2, %0;\"\n \"mov $32, %1;\"\n \"test %2, %2;\"\n \"cmovz %1, %0;\"\n : \"=&r\" (res), \"=&r\" (tmp)\n : \"r\" (n)\n : \"cc\");\n return res;\n #elif defined(__GNUC__) || defined(__clang__)\n return n != 0 ? __builtin_ctz(n) : 32;\n #else\n uint32_t r = 31;\n n &= -n;\n r -= (n & 0x0000FFFF) ? 16 : 0;\n r -= (n & 0x00FF00FF) ? 8 : 0;\n r -= (n & 0x0F0F0F0F) ? 4 : 0;\n r -= (n & 0x33333333) ? 2 : 0;\n r -= (n & 0x55555555) ? 1 : 0;\n return n != 0 ? r : 32;\n #endif\n }\n\n inline uint32_t tzcnt(uint64_t n) {\n #if defined(DXVK_ARCH_X86_64) && defined(_MSC_VER) && !defined(__clang__)\n if(n == 0)\n return 64;\n return (uint32_t)_tzcnt_u64(n);\n #elif defined(DXVK_ARCH_X86_64) && defined(__BMI__)\n return __tzcnt_u64(n);\n #elif defined(DXVK_ARCH_X86_64) && (defined(__GNUC__) || defined(__clang__))\n uint64_t res;\n uint64_t tmp;\n asm (\n \"tzcnt %2, %0;\"\n \"mov $64, %1;\"\n \"test %2, %2;\"\n \"cmovz %1, %0;\"\n : \"=&r\" (res), \"=&r\" (tmp)\n : \"r\" (n)\n : \"cc\");\n return res;\n #elif defined(__GNUC__) || defined(__clang__)\n return n != 0 ? __builtin_ctzll(n) : 64;\n #else\n uint32_t lo = uint32_t(n);\n if (lo) {\n return tzcnt(lo);\n } else {\n uint32_t hi = uint32_t(n >> 32);\n return tzcnt(hi) + 32;\n }\n #endif\n }\n\n inline uint32_t bsf(uint32_t n) {\n #if (defined(__GNUC__) || defined(__clang__)) && !defined(__BMI__) && defined(DXVK_ARCH_X86)\n uint32_t res;\n asm (\"tzcnt %1,%0\"\n : \"=r\" (res)\n : \"r\" (n)\n : \"cc\");\n return res;\n #else\n return tzcnt(n);\n #endif\n }\n\n inline uint32_t bsf(uint64_t n) {\n #if (defined(__GNUC__) || defined(__clang__)) && !defined(__BMI__) && defined(DXVK_ARCH_X86_64)\n uint64_t res;\n asm (\"tzcnt %1,%0\"\n : \"=r\" (res)\n : \"r\" (n)\n : \"cc\");\n return res;\n #else\n return tzcnt(n);\n #endif\n }\n\n inline uint32_t lzcnt(uint32_t n) {\n #if defined(_MSC_VER) && !defined(__clang__) && !defined(__LZCNT__)\n unsigned long bsr;\n if(n == 0)\n return 32;\n _BitScanReverse(&bsr, n);\n return 31-bsr;\n #elif (defined(_MSC_VER) && !defined(__clang__)) || defined(__LZCNT__)\n return _lzcnt_u32(n);\n #elif defined(__GNUC__) || defined(__clang__)\n return n != 0 ? __builtin_clz(n) : 32;\n #else\n uint32_t r = 0;\n\n if (n == 0)\treturn 32;\n\n if (n <= 0x0000FFFF) { r += 16; n <<= 16; }\n if (n <= 0x00FFFFFF) { r += 8; n <<= 8; }\n if (n <= 0x0FFFFFFF) { r += 4; n <<= 4; }\n if (n <= 0x3FFFFFFF) { r += 2; n <<= 2; }\n if (n <= 0x7FFFFFFF) { r += 1; n <<= 1; }\n\n return r;\n #endif\n }\n\n inline uint32_t lzcnt(uint64_t n) {\n #if defined(_MSC_VER) && !defined(__clang__) && !defined(__LZCNT__) && defined(DXVK_ARCH_X86_64)\n unsigned long bsr;\n if(n == 0)\n return 64;\n _BitScanReverse64(&bsr, n);\n return 63-bsr;\n #elif defined(DXVK_ARCH_X86_64) && ((defined(_MSC_VER) && !defined(__clang__)) && defined(__LZCNT__))\n return _lzcnt_u64(n);\n #elif defined(DXVK_ARCH_X86_64) && (defined(__GNUC__) || defined(__clang__))\n return n != 0 ? __builtin_clzll(n) : 64;\n #else\n uint32_t lo = uint32_t(n);\n uint32_t hi = uint32_t(n >> 32u);\n return hi ? lzcnt(hi) : lzcnt(lo) + 32u;\n #endif\n }\n\n template\n uint32_t pack(T& dst, uint32_t& shift, T src, uint32_t count) {\n constexpr uint32_t Bits = 8 * sizeof(T);\n if (likely(shift < Bits))\n dst |= src << shift;\n shift += count;\n return shift > Bits ? shift - Bits : 0;\n }\n\n template\n uint32_t unpack(T& dst, T src, uint32_t& shift, uint32_t count) {\n constexpr uint32_t Bits = 8 * sizeof(T);\n if (likely(shift < Bits))\n dst = (src >> shift) & ((T(1) << count) - 1);\n shift += count;\n return shift > Bits ? shift - Bits : 0;\n }\n\n\n /**\n * \\brief Clears cache lines of memory\n *\n * Uses non-temporal stores. The memory region offset\n * and size are assumed to be aligned to 64 bytes.\n * \\param [in] mem Memory region to clear\n * \\param [in] size Number of bytes to clear\n */\n inline void bclear(void* mem, size_t size) {\n #if defined(DXVK_ARCH_X86) && (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER))\n auto zero = _mm_setzero_si128();\n\n #if defined(__clang__)\n #pragma nounroll\n #elif defined(__GNUC__)\n #pragma GCC unroll 0\n #endif\n for (size_t i = 0; i < size; i += 64u) {\n auto* ptr = reinterpret_cast<__m128i*>(mem) + i / sizeof(zero);\n _mm_stream_si128(ptr + 0u, zero);\n _mm_stream_si128(ptr + 1u, zero);\n _mm_stream_si128(ptr + 2u, zero);\n _mm_stream_si128(ptr + 3u, zero);\n }\n #else\n std::memset(mem, 0, size);\n #endif\n }\n\n\n /**\n * \\brief Compares two aligned structs bit by bit\n *\n * \\param [in] a First struct\n * \\param [in] b Second struct\n * \\returns \\c true if the structs are equal\n */\n template\n bool bcmpeq(const T* a, const T* b) {\n static_assert(alignof(T) >= 16);\n #if defined(DXVK_ARCH_X86) && (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER))\n auto ai = reinterpret_cast(a);\n auto bi = reinterpret_cast(b);\n\n size_t i = 0;\n\n #if defined(__clang__)\n #pragma nounroll\n #elif defined(__GNUC__)\n #pragma GCC unroll 0\n #endif\n\n for ( ; i < 2 * (sizeof(T) / 32); i += 2) {\n __m128i eq0 = _mm_cmpeq_epi8(\n _mm_load_si128(ai + i),\n _mm_load_si128(bi + i));\n __m128i eq1 = _mm_cmpeq_epi8(\n _mm_load_si128(ai + i + 1),\n _mm_load_si128(bi + i + 1));\n __m128i eq = _mm_and_si128(eq0, eq1);\n\n int mask = _mm_movemask_epi8(eq);\n if (mask != 0xFFFF)\n return false;\n }\n\n for ( ; i < sizeof(T) / 16; i++) {\n __m128i eq = _mm_cmpeq_epi8(\n _mm_load_si128(ai + i),\n _mm_load_si128(bi + i));\n\n int mask = _mm_movemask_epi8(eq);\n if (mask != 0xFFFF)\n return false;\n }\n\n return true;\n #else\n return !std::memcmp(a, b, sizeof(T));\n #endif\n }\n\n template \n class bitset {\n static constexpr size_t Dwords = align(Bits, 32) / 32;\n public:\n\n constexpr bitset()\n : m_dwords() {\n\n }\n\n constexpr bool get(uint32_t idx) const {\n uint32_t dword = 0;\n uint32_t bit = idx;\n\n // Compiler doesn't remove this otherwise.\n if constexpr (Dwords > 1) {\n dword = idx / 32;\n bit = idx % 32;\n }\n\n return m_dwords[dword] & (1u << bit);\n }\n\n constexpr void set(uint32_t idx, bool value) {\n uint32_t dword = 0;\n uint32_t bit = idx;\n\n // Compiler doesn't remove this otherwise.\n if constexpr (Dwords > 1) {\n dword = idx / 32;\n bit = idx % 32;\n }\n\n if (value)\n m_dwords[dword] |= 1u << bit;\n else\n m_dwords[dword] &= ~(1u << bit);\n }\n\n constexpr bool exchange(uint32_t idx, bool value) {\n bool oldValue = get(idx);\n set(idx, value);\n return oldValue;\n }\n\n constexpr void flip(uint32_t idx) {\n uint32_t dword = 0;\n uint32_t bit = idx;\n\n // Compiler doesn't remove this otherwise.\n if constexpr (Dwords > 1) {\n dword = idx / 32;\n bit = idx % 32;\n }\n\n m_dwords[dword] ^= 1u << bit;\n }\n\n constexpr void setAll() {\n if constexpr (Bits % 32 == 0) {\n for (size_t i = 0; i < Dwords; i++)\n m_dwords[i] = std::numeric_limits::max();\n }\n else {\n for (size_t i = 0; i < Dwords - 1; i++)\n m_dwords[i] = std::numeric_limits::max();\n\n m_dwords[Dwords - 1] = (1u << (Bits % 32)) - 1;\n }\n }\n\n constexpr void clearAll() {\n for (size_t i = 0; i < Dwords; i++)\n m_dwords[i] = 0;\n }\n\n constexpr bool any() const {\n for (size_t i = 0; i < Dwords; i++) {\n if (m_dwords[i] != 0)\n return true;\n }\n\n return false;\n }\n\n constexpr uint32_t& dword(uint32_t idx) {\n return m_dwords[idx];\n }\n\n constexpr size_t bitCount() {\n return Bits;\n }\n\n constexpr size_t dwordCount() {\n return Dwords;\n }\n\n constexpr bool operator [] (uint32_t idx) const {\n return get(idx);\n }\n\n constexpr void setN(uint32_t bits) {\n uint32_t fullDwords = bits / 32;\n uint32_t offset = bits % 32;\n\n for (size_t i = 0; i < fullDwords; i++)\n m_dwords[i] = std::numeric_limits::max();\n \n if (offset > 0)\n m_dwords[fullDwords] = (1u << offset) - 1;\n }\n\n private:\n\n uint32_t m_dwords[Dwords];\n\n };\n\n class bitvector {\n public:\n\n bool get(uint32_t idx) const {\n uint32_t dword = idx / 32;\n uint32_t bit = idx % 32;\n\n return m_dwords[dword] & (1u << bit);\n }\n\n void ensureSize(uint32_t bitCount) {\n uint32_t dword = bitCount / 32;\n if (unlikely(dword >= m_dwords.size())) {\n m_dwords.resize(dword + 1);\n }\n m_bitCount = std::max(m_bitCount, bitCount);\n }\n\n void set(uint32_t idx, bool value) {\n ensureSize(idx + 1);\n\n uint32_t dword = 0;\n uint32_t bit = idx;\n\n if (value)\n m_dwords[dword] |= 1u << bit;\n else\n m_dwords[dword] &= ~(1u << bit);\n }\n\n bool exchange(uint32_t idx, bool value) {\n ensureSize(idx + 1);\n\n bool oldValue = get(idx);\n set(idx, value);\n return oldValue;\n }\n\n void flip(uint32_t idx) {\n ensureSize(idx + 1);\n\n uint32_t dword = idx / 32;\n uint32_t bit = idx % 32;\n\n m_dwords[dword] ^= 1u << bit;\n }\n\n void setAll() {\n if (m_bitCount % 32 == 0) {\n for (size_t i = 0; i < m_dwords.size(); i++)\n m_dwords[i] = std::numeric_limits::max();\n }\n else {\n for (size_t i = 0; i < m_dwords.size() - 1; i++)\n m_dwords[i] = std::numeric_limits::max();\n\n m_dwords[m_dwords.size() - 1] = (1u << (m_bitCount % 32)) - 1;\n }\n }\n\n void clearAll() {\n for (size_t i = 0; i < m_dwords.size(); i++)\n m_dwords[i] = 0;\n }\n\n bool any() const {\n for (size_t i = 0; i < m_dwords.size(); i++) {\n if (m_dwords[i] != 0)\n return true;\n }\n\n return false;\n }\n\n uint32_t& dword(uint32_t idx) {\n return m_dwords[idx];\n }\n\n size_t bitCount() const {\n return m_bitCount;\n }\n\n size_t dwordCount() const {\n return m_dwords.size();\n }\n\n bool operator [] (uint32_t idx) const {\n return get(idx);\n }\n\n void setN(uint32_t bits) {\n ensureSize(bits);\n\n uint32_t fullDwords = bits / 32;\n uint32_t offset = bits % 32;\n\n for (size_t i = 0; i < fullDwords; i++)\n m_dwords[i] = std::numeric_limits::max();\n\n if (offset > 0)\n m_dwords[fullDwords] = (1u << offset) - 1;\n }\n\n private:\n\n std::vector m_dwords;\n uint32_t m_bitCount = 0;\n\n };\n\n template\n class BitMask {\n\n public:\n\n class iterator {\n public:\n using iterator_category = std::input_iterator_tag;\n using value_type = T;\n using difference_type = T;\n using pointer = const T*;\n using reference = T;\n\n explicit iterator(T flags)\n : m_mask(flags) { }\n\n iterator& operator ++ () {\n m_mask &= m_mask - 1;\n return *this;\n }\n\n iterator operator ++ (int) {\n iterator retval = *this;\n m_mask &= m_mask - 1;\n return retval;\n }\n\n T operator * () const {\n return bsf(m_mask);\n }\n\n bool operator == (iterator other) const { return m_mask == other.m_mask; }\n bool operator != (iterator other) const { return m_mask != other.m_mask; }\n\n private:\n\n T m_mask;\n\n };\n\n BitMask()\n : m_mask(0) { }\n\n explicit BitMask(T n)\n : m_mask(n) { }\n\n iterator begin() {\n return iterator(m_mask);\n }\n\n iterator end() {\n return iterator(0);\n }\n\n private:\n\n T m_mask;\n\n };\n\n\n /**\n * \\brief Encodes float as fixed point\n *\n * Rounds away from zero. If this is not suitable for\n * certain use cases, implement round to nearest even.\n * \\tparam T Integer type, may be signed\n * \\tparam I Integer bits\n * \\tparam F Fractional bits\n * \\param n Float to encode\n * \\returns Encoded fixed-point value\n */\n template\n T encodeFixed(float n) {\n if (n != n)\n return 0u;\n\n n *= float(1u << F);\n\n if constexpr (std::is_signed_v) {\n n = std::max(n, -float(1u << (I + F - 1u)));\n n = std::min(n, float(1u << (I + F - 1u)) - 1.0f);\n n += n < 0.0f ? -0.5f : 0.5f;\n } else {\n n = std::max(n, 0.0f);\n n = std::min(n, float(1u << (I + F)) - 1.0f);\n n += 0.5f;\n }\n\n T result = T(n);\n\n if constexpr (std::is_signed_v)\n result &= ((T(1u) << (I + F)) - 1u);\n\n return result;\n }\n\n\n /**\n * \\brief Decodes fixed-point integer to float\n *\n * \\tparam T Integer type, may be signed\n * \\tparam I Integer bits\n * \\tparam F Fractional bits\n * \\param n Number to decode\n * \\returns Decoded number\n */\n template\n float decodeFixed(T n) {\n // Sign-extend as necessary\n if constexpr (std::is_signed_v)\n n -= (n & (T(1u) << (I + F - 1u))) << 1u;\n\n return float(n) / float(1u << F);\n }\n\n\n /**\n * \\brief Inserts one null bit after each bit\n */\n inline uint32_t split2(uint32_t c) {\n c = (c ^ (c << 8u)) & 0x00ff00ffu;\n c = (c ^ (c << 4u)) & 0x0f0f0f0fu;\n c = (c ^ (c << 2u)) & 0x33333333u;\n c = (c ^ (c << 1u)) & 0x55555555u;\n return c;\n }\n\n\n /**\n * \\brief Inserts two null bits after each bit\n */\n inline uint64_t split3(uint64_t c) {\n c = (c | c << 32u) & 0x001f00000000ffffull;\n c = (c | c << 16u) & 0x001f0000ff0000ffull;\n c = (c | c << 8u) & 0x100f00f00f00f00full;\n c = (c | c << 4u) & 0x10c30c30c30c30c3ull;\n c = (c | c << 2u) & 0x1249249249249249ull;\n return c;\n }\n\n\n /**\n * \\brief Interleaves bits from two integers\n *\n * Both numbers must fit into 16 bits.\n * \\param [in] x X coordinate\n * \\param [in] y Y coordinate\n * \\returns Morton code of x and y\n */\n inline uint32_t interleave(uint16_t x, uint16_t y) {\n return split2(x) | (split2(y) << 1u);\n }\n\n\n /**\n * \\brief Interleaves bits from three integers\n *\n * All three numbers must fit into 16 bits.\n */\n inline uint64_t interleave(uint16_t x, uint16_t y, uint16_t z) {\n return split3(x) | (split3(y) << 1u) | (split3(z) << 2u);\n }\n\n\n /**\n * \\brief 48-bit integer storage type\n */\n struct uint48_t {\n explicit uint48_t(uint64_t n)\n : a(uint16_t(n)), b(uint16_t(n >> 16)), c(uint16_t(n >> 32)) { }\n\n uint16_t a;\n uint16_t b;\n uint16_t c;\n\n explicit operator uint64_t () const {\n // GCC generates worse code if we promote to uint64 directly\n uint32_t lo = uint32_t(a) | (uint32_t(b) << 16);\n return uint64_t(lo) | (uint64_t(c) << 32);\n }\n };\n\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/spirv/spirv_instruction.h", "#pragma once\n\n#include \"spirv_include.h\"\n\nnamespace dxvk {\n \n /**\n * \\brief SPIR-V instruction\n * \n * Helps parsing a single instruction, providing\n * access to the op code, instruction length and\n * instruction arguments.\n */\n class SpirvInstruction {\n \n public:\n \n SpirvInstruction() { }\n SpirvInstruction(uint32_t* code, uint32_t offset, uint32_t length)\n : m_code(code), m_offset(offset), m_length(length) { }\n \n /**\n * \\brief SPIR-V Op code\n * \\returns The op code\n */\n spv::Op opCode() const {\n return static_cast(\n this->arg(0) & spv::OpCodeMask);\n }\n \n /**\n * \\brief Instruction length\n * \\returns Number of DWORDs\n */\n uint32_t length() const {\n return this->arg(0) >> spv::WordCountShift;\n }\n \n /**\n * \\brief Instruction offset\n * \\returns Offset in DWORDs\n */\n uint32_t offset() const {\n return m_offset;\n }\n \n /**\n * \\brief Argument value\n * \n * Retrieves an argument DWORD. Note that some instructions\n * take 64-bit arguments which require more than one DWORD.\n * Arguments start at index 1. Calling this method with an\n * argument ID of 0 will return the opcode token.\n * \\param [in] idx Argument index, starting at 1\n * \\returns The argument value\n */\n uint32_t arg(uint32_t idx) const {\n const uint32_t index = m_offset + idx;\n return index < m_length ? m_code[index] : 0;\n }\n\n /**\n * \\brief Argument string\n *\n * Retrieves a pointer to a UTF-8-encoded string.\n * \\param [in] idx Argument index, starting at 1\n * \\returns Pointer to the literal string\n */\n const char* chr(uint32_t idx) const {\n const uint32_t index = m_offset + idx;\n return index < m_length ? reinterpret_cast(&m_code[index]) : nullptr;\n }\n \n /**\n * \\brief Changes the value of an argument\n * \n * \\param [in] idx Argument index, starting at 1\n * \\param [in] word New argument word\n */\n void setArg(uint32_t idx, uint32_t word) const {\n if (m_offset + idx < m_length)\n m_code[m_offset + idx] = word;\n }\n \n private:\n \n uint32_t* m_code = nullptr;\n uint32_t m_offset = 0;\n uint32_t m_length = 0;\n \n };\n \n \n /**\n * \\brief SPIR-V instruction iterator\n * \n * Convenient iterator that can be used\n * to process raw SPIR-V shader code.\n */\n class SpirvInstructionIterator {\n \n public:\n \n SpirvInstructionIterator() { }\n SpirvInstructionIterator(uint32_t* code, uint32_t offset, uint32_t length)\n : m_code (length != 0 ? code : nullptr),\n m_offset(length != 0 ? offset : 0),\n m_length(length) {\n if ((length >= 5) && (offset == 0) && (m_code[0] == spv::MagicNumber))\n this->advance(5);\n }\n \n SpirvInstructionIterator& operator ++ () {\n this->advance(SpirvInstruction(m_code, m_offset, m_length).length());\n return *this;\n }\n \n SpirvInstructionIterator operator ++ (int) {\n SpirvInstructionIterator result = *this;\n this->advance(SpirvInstruction(m_code, m_offset, m_length).length());\n return result;\n }\n \n SpirvInstruction operator * () const {\n return SpirvInstruction(m_code, m_offset, m_length);\n }\n \n bool operator == (const SpirvInstructionIterator& other) const {\n return this->m_code == other.m_code\n && this->m_offset == other.m_offset\n && this->m_length == other.m_length;\n }\n \n bool operator != (const SpirvInstructionIterator& other) const {\n return this->m_code != other.m_code\n || this->m_offset != other.m_offset\n || this->m_length != other.m_length;\n }\n \n private:\n \n uint32_t* m_code = nullptr;\n uint32_t m_offset = 0;\n uint32_t m_length = 0;\n \n void advance(uint32_t n) {\n if (m_offset + n < m_length) {\n m_offset += n;\n } else {\n m_code = nullptr;\n m_offset = 0;\n m_length = 0;\n }\n }\n \n };\n \n}"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_enums.h", "#pragma once\n\n#include \"dxbc_include.h\"\n\nnamespace dxvk {\n \n /**\n * \\brief Instruction code listing\n */\n enum class DxbcOpcode : uint32_t {\n Add = 0,\n And = 1,\n Break = 2,\n Breakc = 3,\n Call = 4,\n Callc = 5,\n Case = 6,\n Continue = 7,\n Continuec = 8,\n Cut = 9,\n Default = 10,\n DerivRtx = 11,\n DerivRty = 12,\n Discard = 13,\n Div = 14,\n Dp2 = 15,\n Dp3 = 16,\n Dp4 = 17,\n Else = 18,\n Emit = 19,\n EmitThenCut = 20,\n EndIf = 21,\n EndLoop = 22,\n EndSwitch = 23,\n Eq = 24,\n Exp = 25,\n Frc = 26,\n FtoI = 27,\n FtoU = 28,\n Ge = 29,\n IAdd = 30,\n If = 31,\n IEq = 32,\n IGe = 33,\n ILt = 34,\n IMad = 35,\n IMax = 36,\n IMin = 37,\n IMul = 38,\n INe = 39,\n INeg = 40,\n IShl = 41,\n IShr = 42,\n ItoF = 43,\n Label = 44,\n Ld = 45,\n LdMs = 46,\n Log = 47,\n Loop = 48,\n Lt = 49,\n Mad = 50,\n Min = 51,\n Max = 52,\n CustomData = 53,\n Mov = 54,\n Movc = 55,\n Mul = 56,\n Ne = 57,\n Nop = 58,\n Not = 59,\n Or = 60,\n ResInfo = 61,\n Ret = 62,\n Retc = 63,\n RoundNe = 64,\n RoundNi = 65,\n RoundPi = 66,\n RoundZ = 67,\n Rsq = 68,\n Sample = 69,\n SampleC = 70,\n SampleClz = 71,\n SampleL = 72,\n SampleD = 73,\n SampleB = 74,\n Sqrt = 75,\n Switch = 76,\n SinCos = 77,\n UDiv = 78,\n ULt = 79,\n UGe = 80,\n UMul = 81,\n UMad = 82,\n UMax = 83,\n UMin = 84,\n UShr = 85,\n UtoF = 86,\n Xor = 87,\n DclResource = 88,\n DclConstantBuffer = 89,\n DclSampler = 90,\n DclIndexRange = 91,\n DclGsOutputPrimitiveTopology = 92,\n DclGsInputPrimitive = 93,\n DclMaxOutputVertexCount = 94,\n DclInput = 95,\n DclInputSgv = 96,\n DclInputSiv = 97,\n DclInputPs = 98,\n DclInputPsSgv = 99,\n DclInputPsSiv = 100,\n DclOutput = 101,\n DclOutputSgv = 102,\n DclOutputSiv = 103,\n DclTemps = 104,\n DclIndexableTemp = 105,\n DclGlobalFlags = 106,\n Reserved0 = 107,\n Lod = 108,\n Gather4 = 109,\n SamplePos = 110,\n SampleInfo = 111,\n Reserved1 = 112,\n HsDecls = 113,\n HsControlPointPhase = 114,\n HsForkPhase = 115,\n HsJoinPhase = 116,\n EmitStream = 117,\n CutStream = 118,\n EmitThenCutStream = 119,\n InterfaceCall = 120,\n BufInfo = 121,\n DerivRtxCoarse = 122,\n DerivRtxFine = 123,\n DerivRtyCoarse = 124,\n DerivRtyFine = 125,\n Gather4C = 126,\n Gather4Po = 127,\n Gather4PoC = 128,\n Rcp = 129,\n F32toF16 = 130,\n F16toF32 = 131,\n UAddc = 132,\n USubb = 133,\n CountBits = 134,\n FirstBitHi = 135,\n FirstBitLo = 136,\n FirstBitShi = 137,\n UBfe = 138,\n IBfe = 139,\n Bfi = 140,\n BfRev = 141,\n Swapc = 142,\n DclStream = 143,\n DclFunctionBody = 144,\n DclFunctionTable = 145,\n DclInterface = 146,\n DclInputControlPointCount = 147,\n DclOutputControlPointCount = 148,\n DclTessDomain = 149,\n DclTessPartitioning = 150,\n DclTessOutputPrimitive = 151,\n DclHsMaxTessFactor = 152,\n DclHsForkPhaseInstanceCount = 153,\n DclHsJoinPhaseInstanceCount = 154,\n DclThreadGroup = 155,\n DclUavTyped = 156,\n DclUavRaw = 157,\n DclUavStructured = 158,\n DclThreadGroupSharedMemoryRaw = 159,\n DclThreadGroupSharedMemoryStructured = 160,\n DclResourceRaw = 161,\n DclResourceStructured = 162,\n LdUavTyped = 163,\n StoreUavTyped = 164,\n LdRaw = 165,\n StoreRaw = 166,\n LdStructured = 167,\n StoreStructured = 168,\n AtomicAnd = 169,\n AtomicOr = 170,\n AtomicXor = 171,\n AtomicCmpStore = 172,\n AtomicIAdd = 173,\n AtomicIMax = 174,\n AtomicIMin = 175,\n AtomicUMax = 176,\n AtomicUMin = 177,\n ImmAtomicAlloc = 178,\n ImmAtomicConsume = 179,\n ImmAtomicIAdd = 180,\n ImmAtomicAnd = 181,\n ImmAtomicOr = 182,\n ImmAtomicXor = 183,\n ImmAtomicExch = 184,\n ImmAtomicCmpExch = 185,\n ImmAtomicIMax = 186,\n ImmAtomicIMin = 187,\n ImmAtomicUMax = 188,\n ImmAtomicUMin = 189,\n Sync = 190,\n DAdd = 191,\n DMax = 192,\n DMin = 193,\n DMul = 194,\n DEq = 195,\n DGe = 196,\n DLt = 197,\n DNe = 198,\n DMov = 199,\n DMovc = 200,\n DtoF = 201,\n FtoD = 202,\n EvalSnapped = 203,\n EvalSampleIndex = 204,\n EvalCentroid = 205,\n DclGsInstanceCount = 206,\n Abort = 207,\n DebugBreak = 208,\n ReservedBegin11_1 = 209,\n DDiv = 210,\n DFma = 211,\n DRcp = 212,\n Msad = 213,\n DtoI = 214,\n DtoU = 215,\n ItoD = 216,\n UtoD = 217,\n ReservedBegin11_2 = 218,\n Gather4S = 219,\n Gather4CS = 220,\n Gather4PoS = 221,\n Gather4PoCS = 222,\n LdS = 223,\n LdMsS = 224,\n LdUavTypedS = 225,\n LdRawS = 226,\n LdStructuredS = 227,\n SampleLS = 228,\n SampleClzS = 229,\n SampleClampS = 230,\n SampleBClampS = 231,\n SampleDClampS = 232,\n SampleCClampS = 233,\n CheckAccessFullyMapped = 234,\n };\n \n \n /**\n * \\brief Extended opcode\n */\n enum class DxbcExtOpcode : uint32_t {\n Empty = 0,\n SampleControls = 1,\n ResourceDim = 2,\n ResourceReturnType = 3,\n };\n \n \n /**\n * \\brief Operand type\n * \n * Selects the 'register file' from which\n * to retrieve an operand's value.\n */\n enum class DxbcOperandType : uint32_t {\n Temp = 0,\n Input = 1,\n Output = 2,\n IndexableTemp = 3,\n Imm32 = 4,\n Imm64 = 5,\n Sampler = 6,\n Resource = 7,\n ConstantBuffer = 8,\n ImmediateConstantBuffer = 9,\n Label = 10,\n InputPrimitiveId = 11,\n OutputDepth = 12,\n Null = 13,\n Rasterizer = 14,\n OutputCoverageMask = 15,\n Stream = 16,\n FunctionBody = 17,\n FunctionTable = 18,\n Interface = 19,\n FunctionInput = 20,\n FunctionOutput = 21,\n OutputControlPointId = 22,\n InputForkInstanceId = 23,\n InputJoinInstanceId = 24,\n InputControlPoint = 25,\n OutputControlPoint = 26,\n InputPatchConstant = 27,\n InputDomainPoint = 28,\n ThisPointer = 29,\n UnorderedAccessView = 30,\n ThreadGroupSharedMemory = 31,\n InputThreadId = 32,\n InputThreadGroupId = 33,\n InputThreadIdInGroup = 34,\n InputCoverageMask = 35,\n InputThreadIndexInGroup = 36,\n InputGsInstanceId = 37,\n OutputDepthGe = 38,\n OutputDepthLe = 39,\n CycleCounter = 40,\n OutputStencilRef = 41,\n InputInnerCoverage = 42,\n };\n \n \n /**\n * \\brief Number of components\n * \n * Used by operands to determine whether the\n * operand has one, four or zero components.\n */\n enum class DxbcComponentCount : uint32_t {\n Component0 = 0,\n Component1 = 1,\n Component4 = 2,\n };\n \n \n /**\n * \\brief Component selection mode\n * \n * When an operand has four components, the\n * component selection mode deterines which\n * components are used for the operation.\n */\n enum class DxbcRegMode : uint32_t {\n Mask = 0,\n Swizzle = 1,\n Select1 = 2,\n };\n \n \n /**\n * \\brief Index representation\n * \n * Determines how an operand\n * register index is stored.\n */\n enum class DxbcOperandIndexRepresentation : uint32_t {\n Imm32 = 0,\n Imm64 = 1,\n Relative = 2,\n Imm32Relative = 3,\n Imm64Relative = 4,\n };\n \n \n /**\n * \\brief Extended operand type\n */\n enum class DxbcOperandExt : uint32_t {\n OperandModifier = 1,\n };\n \n \n /**\n * \\brief Resource dimension\n * The type of a resource.\n */\n enum class DxbcResourceDim : uint32_t {\n Unknown = 0,\n Buffer = 1,\n Texture1D = 2,\n Texture2D = 3,\n Texture2DMs = 4,\n Texture3D = 5,\n TextureCube = 6,\n Texture1DArr = 7,\n Texture2DArr = 8,\n Texture2DMsArr = 9,\n TextureCubeArr = 10,\n RawBuffer = 11,\n StructuredBuffer = 12,\n };\n \n \n /**\n * \\brief Resource return type\n * Data type for resource read ops.\n */\n enum class DxbcResourceReturnType : uint32_t {\n Unorm = 1,\n Snorm = 2,\n Sint = 3,\n Uint = 4,\n Float = 5,\n Mixed = 6, /// ?\n Double = 7,\n Continued = 8, /// ?\n Unused = 9, /// ?\n };\n \n \n /**\n * \\brief Register component type\n * Data type of a register component.\n */\n enum class DxbcRegisterComponentType : uint32_t {\n Unknown = 0,\n Uint32 = 1,\n Sint32 = 2,\n Float32 = 3,\n };\n \n \n /**\n * \\brief Instruction return type\n */\n enum class DxbcInstructionReturnType : uint32_t {\n Float = 0,\n Uint = 1,\n };\n \n \n enum class DxbcSystemValue : uint32_t {\n None = 0,\n Position = 1,\n ClipDistance = 2,\n CullDistance = 3,\n RenderTargetId = 4,\n ViewportId = 5,\n VertexId = 6,\n PrimitiveId = 7,\n InstanceId = 8,\n IsFrontFace = 9,\n SampleIndex = 10,\n FinalQuadUeq0EdgeTessFactor = 11,\n FinalQuadVeq0EdgeTessFactor = 12,\n FinalQuadUeq1EdgeTessFactor = 13,\n FinalQuadVeq1EdgeTessFactor = 14,\n FinalQuadUInsideTessFactor = 15,\n FinalQuadVInsideTessFactor = 16,\n FinalTriUeq0EdgeTessFactor = 17,\n FinalTriVeq0EdgeTessFactor = 18,\n FinalTriWeq0EdgeTessFactor = 19,\n FinalTriInsideTessFactor = 20,\n FinalLineDetailTessFactor = 21,\n FinalLineDensityTessFactor = 22,\n Target = 64,\n Depth = 65,\n Coverage = 66,\n DepthGe = 67,\n DepthLe = 68\n };\n \n \n enum class DxbcInterpolationMode : uint32_t {\n Undefined = 0,\n Constant = 1,\n Linear = 2,\n LinearCentroid = 3,\n LinearNoPerspective = 4,\n LinearNoPerspectiveCentroid = 5,\n LinearSample = 6,\n LinearNoPerspectiveSample = 7,\n };\n \n \n enum class DxbcGlobalFlag : uint32_t {\n RefactoringAllowed = 0,\n DoublePrecision = 1,\n EarlyFragmentTests = 2,\n RawStructuredBuffers = 3,\n };\n \n using DxbcGlobalFlags = Flags;\n \n enum class DxbcZeroTest : uint32_t {\n TestZ = 0,\n TestNz = 1,\n };\n \n enum class DxbcResinfoType : uint32_t {\n Float = 0,\n RcpFloat = 1,\n Uint = 2,\n };\n \n enum class DxbcSyncFlag : uint32_t {\n ThreadsInGroup = 0,\n ThreadGroupSharedMemory = 1,\n UavMemoryGroup = 2,\n UavMemoryGlobal = 3,\n };\n \n using DxbcSyncFlags = Flags;\n \n \n /**\n * \\brief Geometry shader input primitive\n */\n enum class DxbcPrimitive : uint32_t {\n Undefined = 0,\n Point = 1,\n Line = 2,\n Triangle = 3,\n LineAdj = 6,\n TriangleAdj = 7,\n Patch1 = 8,\n Patch2 = 9,\n Patch3 = 10,\n Patch4 = 11,\n Patch5 = 12,\n Patch6 = 13,\n Patch7 = 14,\n Patch8 = 15,\n Patch9 = 16,\n Patch10 = 17,\n Patch11 = 18,\n Patch12 = 19,\n Patch13 = 20,\n Patch14 = 21,\n Patch15 = 22,\n Patch16 = 23,\n Patch17 = 24,\n Patch18 = 25,\n Patch19 = 26,\n Patch20 = 27,\n Patch21 = 28,\n Patch22 = 29,\n Patch23 = 30,\n Patch24 = 31,\n Patch25 = 32,\n Patch26 = 33,\n Patch27 = 34,\n Patch28 = 35,\n Patch29 = 36,\n Patch30 = 37,\n Patch31 = 38,\n Patch32 = 39,\n };\n \n \n /**\n * \\brief Geometry shader output topology\n */\n enum class DxbcPrimitiveTopology : uint32_t {\n Undefined = 0,\n PointList = 1,\n LineList = 2,\n LineStrip = 3,\n TriangleList = 4,\n TriangleStrip = 5,\n LineListAdj = 10,\n LineStripAdj = 11,\n TriangleListAdj = 12,\n TriangleStripAdj = 13,\n };\n \n \n /**\n * \\brief Sampler operation mode\n */\n enum class DxbcSamplerMode : uint32_t {\n Default = 0,\n Comparison = 1,\n Mono = 2,\n };\n \n \n /**\n * \\brief Scalar value type\n * \n * Enumerates possible register component\n * types. Scalar types are represented as\n * a one-component vector type.\n */\n enum class DxbcScalarType : uint32_t {\n Uint32 = 0,\n Uint64 = 1,\n Sint32 = 2,\n Sint64 = 3,\n Float32 = 4,\n Float64 = 5,\n Bool = 6,\n };\n \n \n /**\n * \\brief Tessellator domain\n */\n enum class DxbcTessDomain : uint32_t {\n Undefined = 0,\n Isolines = 1,\n Triangles = 2,\n Quads = 3,\n };\n \n /**\n * \\brief Tessellator partitioning\n */\n enum class DxbcTessPartitioning : uint32_t {\n Undefined = 0,\n Integer = 1,\n Pow2 = 2,\n FractOdd = 3,\n FractEven = 4,\n };\n \n /**\n * \\brief UAV definition flags\n */\n enum class DxbcUavFlag : uint32_t {\n GloballyCoherent = 0,\n RasterizerOrdered = 1,\n };\n \n using DxbcUavFlags = Flags;\n \n /**\n * \\brief Tessellator output primitive\n */\n enum class DxbcTessOutputPrimitive : uint32_t {\n Undefined = 0,\n Point = 1,\n Line = 2,\n TriangleCw = 3,\n TriangleCcw = 4,\n };\n \n /**\n * \\brief Custom data class\n * \n * Stores which type of custom data is\n * referenced by the instruction.\n */\n enum class DxbcCustomDataClass : uint32_t {\n Comment = 0,\n DebugInfo = 1,\n Opaque = 2,\n ImmConstBuf = 3,\n };\n \n \n enum class DxbcResourceType : uint32_t {\n Typed = 0,\n Raw = 1,\n Structured = 2,\n };\n\n\n enum class DxbcConstantBufferAccessType : uint32_t {\n StaticallyIndexed = 0,\n DynamicallyIndexed = 1,\n };\n \n}"], ["/lsfg-vk/thirdparty/dxbc/include/spirv/spirv_code_buffer.h", "class for {\n public:\n SpirvCodeBuffer() {\n stream.ignore(std::numeric_limits::max());\n std::streamsize length = stream.gcount();\n stream.clear();\n stream.seekg(0, std::ios_base::beg);\n \n std::vector buffer(length);\n stream.read(buffer.data(), length);\n buffer.resize(stream.gcount());\n \n m_code.resize(buffer.size() / sizeof(uint32_t));\n std::memcpy(reinterpret_cast(m_code.data()),\n buffer.data(), m_code.size() * sizeof(uint32_t));\n \n m_ptr = m_code.size();\n }\n explicit SpirvCodeBuffer(uint32_t size) {\n stream.ignore(std::numeric_limits::max());\n std::streamsize length = stream.gcount();\n stream.clear();\n stream.seekg(0, std::ios_base::beg);\n \n std::vector buffer(length);\n stream.read(buffer.data(), length);\n buffer.resize(stream.gcount());\n \n m_code.resize(buffer.size() / sizeof(uint32_t));\n std::memcpy(reinterpret_cast(m_code.data()),\n buffer.data(), m_code.size() * sizeof(uint32_t));\n \n m_ptr = m_code.size();\n }\n SpirvCodeBuffer(uint32_t size, const uint32_t* data) {\n stream.ignore(std::numeric_limits::max());\n std::streamsize length = stream.gcount();\n stream.clear();\n stream.seekg(0, std::ios_base::beg);\n \n std::vector buffer(length);\n stream.read(buffer.data(), length);\n buffer.resize(stream.gcount());\n \n m_code.resize(buffer.size() / sizeof(uint32_t));\n std::memcpy(reinterpret_cast(m_code.data()),\n buffer.data(), m_code.size() * sizeof(uint32_t));\n \n m_ptr = m_code.size();\n }\n SpirvCodeBuffer(std::istream& stream) {\n stream.ignore(std::numeric_limits::max());\n std::streamsize length = stream.gcount();\n stream.clear();\n stream.seekg(0, std::ios_base::beg);\n \n std::vector buffer(length);\n stream.read(buffer.data(), length);\n buffer.resize(stream.gcount());\n \n m_code.resize(buffer.size() / sizeof(uint32_t));\n std::memcpy(reinterpret_cast(m_code.data()),\n buffer.data(), m_code.size() * sizeof(uint32_t));\n \n m_ptr = m_code.size();\n }\n template\n SpirvCodeBuffer(const uint32_t (&data)[N])\n : SpirvCodeBuffer(N, data) { }\n ~SpirvCodeBuffer() { }\n uint32_t allocId() {\n constexpr size_t BoundIdsOffset = 3;\n\n if (m_code.size() <= BoundIdsOffset)\n return 0;\n\n return m_code[BoundIdsOffset]++;\n }\n void append(const SpirvInstruction& ins) {\n if (other.size() != 0) {\n const size_t size = m_code.size();\n m_code.resize(size + other.m_code.size());\n \n uint32_t* dst = this->m_code.data();\n const uint32_t* src = other.m_code.data();\n \n std::memcpy(dst + size, src, other.size());\n m_ptr += other.m_code.size();\n }\n }\n void append(const SpirvCodeBuffer& other) {\n if (other.size() != 0) {\n const size_t size = m_code.size();\n m_code.resize(size + other.m_code.size());\n \n uint32_t* dst = this->m_code.data();\n const uint32_t* src = other.m_code.data();\n \n std::memcpy(dst + size, src, other.size());\n m_ptr += other.m_code.size();\n }\n }\n void putWord(uint32_t word) {\n m_code.insert(m_code.begin() + m_ptr, word);\n m_ptr += 1;\n }\n void putIns(spv::Op opCode, uint16_t wordCount) {\n this->putWord(\n (static_cast(opCode) << 0)\n | (static_cast(wordCount) << 16));\n }\n void putInt32(uint32_t word) {\n this->putWord(word);\n }\n void putInt64(uint64_t value) {\n this->putWord(value >> 0);\n this->putWord(value >> 32);\n }\n void putFloat32(float value) {\n uint32_t tmp;\n static_assert(sizeof(tmp) == sizeof(value));\n std::memcpy(&tmp, &value, sizeof(value));\n this->putInt32(tmp);\n }\n void putFloat64(double value) {\n uint64_t tmp;\n static_assert(sizeof(tmp) == sizeof(value));\n std::memcpy(&tmp, &value, sizeof(value));\n this->putInt64(tmp);\n }\n void putStr(const char* str) {\n uint32_t word = 0;\n uint32_t nbit = 0;\n \n for (uint32_t i = 0; str[i] != '\\0'; str++) {\n word |= (static_cast(str[i]) & 0xFF) << nbit;\n \n if ((nbit += 8) == 32) {\n this->putWord(word);\n word = 0;\n nbit = 0;\n }\n }\n \n // Commit current word\n this->putWord(word);\n }\n void putHeader(uint32_t version, uint32_t boundIds) {\n this->putWord(spv::MagicNumber);\n this->putWord(version);\n this->putWord(0); // Generator\n this->putWord(boundIds);\n this->putWord(0); // Schema\n }\n void erase(size_t size) {\n m_code.erase(\n m_code.begin() + m_ptr,\n m_code.begin() + m_ptr + size);\n }\n uint32_t strLen(const char* str) {\n // Null-termination plus padding\n return (std::strlen(str) + 4) / 4;\n }\n void store(std::ostream& stream) const {\n stream.write(\n reinterpret_cast(m_code.data()),\n sizeof(uint32_t) * m_code.size());\n }\n private:\n std::vector m_code;\n size_t m_ptr = 0;\n};"], ["/lsfg-vk/thirdparty/pe-parse/dump-pe/main.cpp", "/*\nThe MIT License (MIT)\n\nCopyright (c) 2013 Andrew Ruef\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n*/\n\n#include \n#include \n#include \n#include \n#include \n\n#include \n\n#include \"vendor/argh.h\"\n\nusing namespace peparse;\n\nint printExps(void *N,\n const VA &funcAddr,\n std::uint16_t ordinal,\n const std::string &mod,\n const std::string &func,\n const std::string &fwd) {\n static_cast(N);\n\n auto address = static_cast(funcAddr);\n\n // save default formatting\n std::ios initial(nullptr);\n initial.copyfmt(std::cout);\n\n std::cout << \"EXP #\";\n std::cout << ordinal;\n std::cout << \": \";\n std::cout << mod;\n std::cout << \"!\";\n std::cout << func;\n std::cout << \": \";\n if (!fwd.empty()) {\n std::cout << fwd;\n } else {\n std::cout << std::showbase << std::hex << address;\n }\n std::cout << \"\\n\";\n\n // restore default formatting\n std::cout.copyfmt(initial);\n return 0;\n}\n\nint printImports(void *N,\n const VA &impAddr,\n const std::string &modName,\n const std::string &symName) {\n static_cast(N);\n\n auto address = static_cast(impAddr);\n\n std::cout << \"0x\" << std::hex << address << \" \" << modName << \"!\" << symName;\n std::cout << \"\\n\";\n return 0;\n}\n\nint printRelocs(void *N, const VA &relocAddr, const reloc_type &type) {\n static_cast(N);\n\n std::cout << \"TYPE: \";\n switch (type) {\n case RELOC_ABSOLUTE:\n std::cout << \"ABSOLUTE\";\n break;\n case RELOC_HIGH:\n std::cout << \"HIGH\";\n break;\n case RELOC_LOW:\n std::cout << \"LOW\";\n break;\n case RELOC_HIGHLOW:\n std::cout << \"HIGHLOW\";\n break;\n case RELOC_HIGHADJ:\n std::cout << \"HIGHADJ\";\n break;\n case RELOC_MIPS_JMPADDR:\n std::cout << \"MIPS_JMPADDR\";\n break;\n case RELOC_MIPS_JMPADDR16:\n std::cout << \"MIPS_JMPADD16\";\n break;\n case RELOC_DIR64:\n std::cout << \"DIR64\";\n break;\n default:\n std::cout << \"UNKNOWN\";\n break;\n }\n\n std::cout << \" VA: 0x\" << std::hex << relocAddr << \"\\n\";\n\n return 0;\n}\n\nint printDebugs(void *N,\n const std::uint32_t &type,\n const bounded_buffer *data) {\n static_cast(N);\n\n std::cout << \"Debug Directory Type: \";\n switch (type) {\n case 0:\n std::cout << \"IMAGE_DEBUG_TYPE_UNKNOWN\";\n break;\n case 1:\n std::cout << \"IMAGE_DEBUG_TYPE_COFF\";\n break;\n case 2:\n std::cout << \"IMAGE_DEBUG_TYPE_CODEVIEW\";\n break;\n case 3:\n std::cout << \"IMAGE_DEBUG_TYPE_FPO\";\n break;\n case 4:\n std::cout << \"IMAGE_DEBUG_TYPE_MISC\";\n break;\n case 5:\n std::cout << \"IMAGE_DEBUG_TYPE_EXCEPTION\";\n break;\n case 6:\n std::cout << \"IMAGE_DEBUG_TYPE_FIXUP\";\n break;\n case 7:\n std::cout << \"IMAGE_DEBUG_TYPE_OMAP_TO_SRC\";\n break;\n case 8:\n std::cout << \"IMAGE_DEBUG_TYPE_OMAP_FROM_SRC\";\n break;\n case 9:\n std::cout << \"IMAGE_DEBUG_TYPE_BORLAND\";\n break;\n case 10:\n std::cout << \"IMAGE_DEBUG_TYPE_RESERVED10\";\n break;\n case 11:\n std::cout << \"IMAGE_DEBUG_TYPE_CLSID\";\n break;\n case 12:\n std::cout << \"IMAGE_DEBUG_TYPE_VC_FEATURE\";\n break;\n case 13:\n std::cout << \"IMAGE_DEBUG_TYPE_POGO\";\n break;\n case 14:\n std::cout << \"IMAGE_DEBUG_TYPE_ILTCG\";\n break;\n case 15:\n std::cout << \"IMAGE_DEBUG_TYPE_MPX\";\n break;\n case 16:\n std::cout << \"IMAGE_DEBUG_TYPE_REPRO\";\n break;\n case 20:\n std::cout << \"IMAGE_DEBUG_TYPE_EX_DLLCHARACTERISTICS\";\n break;\n default:\n std::cout << \"INVALID\";\n break;\n }\n std::cout << \"\\n\";\n std::cout << \"Debug Directory Data: \";\n for (uint32_t i = 0; i < data->bufLen; i++) {\n std::cout << \" 0x\" << std::hex << static_cast(data->buf[i]);\n }\n std::cout << \"\\n\";\n\n return 0;\n}\n\nint printSymbols(void *N,\n const std::string &strName,\n const uint32_t &value,\n const int16_t §ionNumber,\n const uint16_t &type,\n const uint8_t &storageClass,\n const uint8_t &numberOfAuxSymbols) {\n static_cast(N);\n\n std::cout << \"Symbol Name: \" << strName << \"\\n\";\n std::cout << \"Symbol Value: 0x\" << std::hex << value << \"\\n\";\n\n std::cout << \"Symbol Section Number: \";\n switch (sectionNumber) {\n case IMAGE_SYM_UNDEFINED:\n std::cout << \"UNDEFINED\";\n break;\n case IMAGE_SYM_ABSOLUTE:\n std::cout << \"ABSOLUTE\";\n break;\n case IMAGE_SYM_DEBUG:\n std::cout << \"DEBUG\";\n break;\n default:\n std::cout << sectionNumber;\n break;\n }\n std::cout << \"\\n\";\n\n std::cout << \"Symbol Type: \";\n switch (type) {\n case IMAGE_SYM_TYPE_NULL:\n std::cout << \"NULL\";\n break;\n case IMAGE_SYM_TYPE_VOID:\n std::cout << \"VOID\";\n break;\n case IMAGE_SYM_TYPE_CHAR:\n std::cout << \"CHAR\";\n break;\n case IMAGE_SYM_TYPE_SHORT:\n std::cout << \"SHORT\";\n break;\n case IMAGE_SYM_TYPE_INT:\n std::cout << \"INT\";\n break;\n case IMAGE_SYM_TYPE_LONG:\n std::cout << \"LONG\";\n break;\n case IMAGE_SYM_TYPE_FLOAT:\n std::cout << \"FLOAT\";\n break;\n case IMAGE_SYM_TYPE_DOUBLE:\n std::cout << \"DOUBLE\";\n break;\n case IMAGE_SYM_TYPE_STRUCT:\n std::cout << \"STRUCT\";\n break;\n case IMAGE_SYM_TYPE_UNION:\n std::cout << \"UNION\";\n break;\n case IMAGE_SYM_TYPE_ENUM:\n std::cout << \"ENUM\";\n break;\n case IMAGE_SYM_TYPE_MOE:\n std::cout << \"IMAGE_SYM_TYPE_MOE\";\n break;\n case IMAGE_SYM_TYPE_BYTE:\n std::cout << \"BYTE\";\n break;\n case IMAGE_SYM_TYPE_WORD:\n std::cout << \"WORD\";\n break;\n case IMAGE_SYM_TYPE_UINT:\n std::cout << \"UINT\";\n break;\n case IMAGE_SYM_TYPE_DWORD:\n std::cout << \"DWORD\";\n break;\n default:\n std::cout << \"UNKNOWN\";\n break;\n }\n std::cout << \"\\n\";\n\n std::cout << \"Symbol Storage Class: \";\n switch (storageClass) {\n case IMAGE_SYM_CLASS_END_OF_FUNCTION:\n std::cout << \"FUNCTION\";\n break;\n case IMAGE_SYM_CLASS_NULL:\n std::cout << \"NULL\";\n break;\n case IMAGE_SYM_CLASS_AUTOMATIC:\n std::cout << \"AUTOMATIC\";\n break;\n case IMAGE_SYM_CLASS_EXTERNAL:\n std::cout << \"EXTERNAL\";\n break;\n case IMAGE_SYM_CLASS_STATIC:\n std::cout << \"STATIC\";\n break;\n case IMAGE_SYM_CLASS_REGISTER:\n std::cout << \"REGISTER\";\n break;\n case IMAGE_SYM_CLASS_EXTERNAL_DEF:\n std::cout << \"EXTERNAL DEF\";\n break;\n case IMAGE_SYM_CLASS_LABEL:\n std::cout << \"LABEL\";\n break;\n case IMAGE_SYM_CLASS_UNDEFINED_LABEL:\n std::cout << \"UNDEFINED LABEL\";\n break;\n case IMAGE_SYM_CLASS_MEMBER_OF_STRUCT:\n std::cout << \"MEMBER OF STRUCT\";\n break;\n default:\n std::cout << \"UNKNOWN\";\n break;\n }\n std::cout << \"\\n\";\n\n std::cout << \"Symbol Number of Aux Symbols: \"\n << static_cast(numberOfAuxSymbols) << \"\\n\";\n\n return 0;\n}\n\nint printRich(void *N, const rich_entry &r) {\n static_cast(N);\n std::cout << std::dec;\n std::cout << std::setw(10) << \"ProdId:\" << std::setw(7) << r.ProductId;\n std::cout << std::setw(10) << \"Build:\" << std::setw(7) << r.BuildNumber;\n std::cout << std::setw(10) << \"Name:\" << std::setw(40)\n << GetRichProductName(r.BuildNumber) << \" \"\n << GetRichObjectType(r.ProductId);\n std::cout << std::setw(10) << \"Count:\" << std::setw(7) << r.Count << \"\\n\";\n return 0;\n}\n\nint printRsrc(void *N, const resource &r) {\n static_cast(N);\n\n if (r.type_str.length())\n std::cout << \"Type (string): \" << r.type_str << \"\\n\";\n else\n std::cout << \"Type: 0x\" << std::hex << r.type << \"\\n\";\n\n if (r.name_str.length())\n std::cout << \"Name (string): \" << r.name_str << \"\\n\";\n else\n std::cout << \"Name: 0x\" << std::hex << r.name << \"\\n\";\n\n if (r.lang_str.length())\n std::cout << \"Lang (string): \" << r.lang_str << \"\\n\";\n else\n std::cout << \"Lang: 0x\" << std::hex << r.lang << \"\\n\";\n\n std::cout << \"Codepage: 0x\" << std::hex << r.codepage << \"\\n\";\n std::cout << \"RVA: \" << std::dec << r.RVA << \"\\n\";\n std::cout << \"Size: \" << std::dec << r.size << \"\\n\";\n return 0;\n}\n\nint printSecs(void *N,\n const VA &secBase,\n const std::string &secName,\n const image_section_header &s,\n const bounded_buffer *data) {\n static_cast(N);\n static_cast(s);\n\n std::cout << \"Sec Name: \" << secName << \"\\n\";\n std::cout << \"Sec Base: 0x\" << std::hex << secBase << \"\\n\";\n if (data)\n std::cout << \"Sec Size: \" << std::dec << data->bufLen << \"\\n\";\n else\n std::cout << \"Sec Size: 0\"\n << \"\\n\";\n return 0;\n}\n\n#define DUMP_FIELD(x) \\\n std::cout << \"\" #x << \": 0x\"; \\\n std::cout << std::hex << static_cast(p->peHeader.x) << \"\\n\";\n#define DUMP_DEC_FIELD(x) \\\n std::cout << \"\" #x << \": \"; \\\n std::cout << std::dec << static_cast(p->peHeader.x) << \"\\n\";\n#define DUMP_BOOL_FIELD(x) \\\n std::cout << \"\" #x << \": \"; \\\n std::cout << std::boolalpha << static_cast(p->peHeader.x) << \"\\n\";\n\nint main(int argc, char *argv[]) {\n\n argh::parser cmdl(argv);\n\n if (cmdl[{\"-h\", \"--help\"}] || argc <= 1) {\n std::cout << \"dump-pe utility from Trail of Bits\\n\";\n std::cout << \"Repository: https://github.com/trailofbits/pe-parse\\n\\n\";\n std::cout << \"Usage:\\n\\tdump-pe /path/to/executable.exe\\n\";\n return 0;\n } else if (cmdl[{\"-v\", \"--version\"}]) {\n std::cout << \"dump-pe (pe-parse) version \" << PEPARSE_VERSION << \"\\n\";\n return 0;\n }\n\n parsed_pe *p = ParsePEFromFile(cmdl[1].c_str());\n\n if (p == nullptr) {\n std::cout << \"Error: \" << GetPEErr() << \" (\" << GetPEErrString() << \")\"\n << \"\\n\";\n std::cout << \"Location: \" << GetPEErrLoc() << \"\\n\";\n return 1;\n }\n\n if (p != NULL) {\n // Print DOS header\n DUMP_FIELD(dos.e_magic);\n DUMP_FIELD(dos.e_cp);\n DUMP_FIELD(dos.e_crlc);\n DUMP_FIELD(dos.e_cparhdr);\n DUMP_FIELD(dos.e_minalloc);\n DUMP_FIELD(dos.e_maxalloc);\n DUMP_FIELD(dos.e_ss);\n DUMP_FIELD(dos.e_sp);\n DUMP_FIELD(dos.e_csum);\n DUMP_FIELD(dos.e_ip);\n DUMP_FIELD(dos.e_cs);\n DUMP_FIELD(dos.e_lfarlc);\n DUMP_FIELD(dos.e_ovno);\n DUMP_FIELD(dos.e_res[0]);\n DUMP_FIELD(dos.e_res[1]);\n DUMP_FIELD(dos.e_res[2]);\n DUMP_FIELD(dos.e_res[3]);\n DUMP_FIELD(dos.e_oemid);\n DUMP_FIELD(dos.e_oeminfo);\n DUMP_FIELD(dos.e_res2[0]);\n DUMP_FIELD(dos.e_res2[1]);\n DUMP_FIELD(dos.e_res2[2]);\n DUMP_FIELD(dos.e_res2[3]);\n DUMP_FIELD(dos.e_res2[4]);\n DUMP_FIELD(dos.e_res2[5]);\n DUMP_FIELD(dos.e_res2[6]);\n DUMP_FIELD(dos.e_res2[7]);\n DUMP_FIELD(dos.e_res2[8]);\n DUMP_FIELD(dos.e_res2[9]);\n DUMP_FIELD(dos.e_lfanew);\n // Print Rich header info\n DUMP_BOOL_FIELD(rich.isPresent);\n if (p->peHeader.rich.isPresent) {\n DUMP_FIELD(rich.DecryptionKey);\n DUMP_FIELD(rich.Checksum);\n DUMP_BOOL_FIELD(rich.isValid);\n IterRich(p, printRich, NULL);\n }\n // print out some things\n DUMP_FIELD(nt.Signature);\n DUMP_FIELD(nt.FileHeader.Machine);\n DUMP_FIELD(nt.FileHeader.NumberOfSections);\n DUMP_DEC_FIELD(nt.FileHeader.TimeDateStamp);\n DUMP_FIELD(nt.FileHeader.PointerToSymbolTable);\n DUMP_DEC_FIELD(nt.FileHeader.NumberOfSymbols);\n DUMP_FIELD(nt.FileHeader.SizeOfOptionalHeader);\n DUMP_FIELD(nt.FileHeader.Characteristics);\n if (p->peHeader.nt.OptionalMagic == NT_OPTIONAL_32_MAGIC) {\n DUMP_FIELD(nt.OptionalHeader.Magic);\n DUMP_DEC_FIELD(nt.OptionalHeader.MajorLinkerVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader.MinorLinkerVersion);\n DUMP_FIELD(nt.OptionalHeader.SizeOfCode);\n DUMP_FIELD(nt.OptionalHeader.SizeOfInitializedData);\n DUMP_FIELD(nt.OptionalHeader.SizeOfUninitializedData);\n DUMP_FIELD(nt.OptionalHeader.AddressOfEntryPoint);\n DUMP_FIELD(nt.OptionalHeader.BaseOfCode);\n DUMP_FIELD(nt.OptionalHeader.BaseOfData);\n DUMP_FIELD(nt.OptionalHeader.ImageBase);\n DUMP_FIELD(nt.OptionalHeader.SectionAlignment);\n DUMP_FIELD(nt.OptionalHeader.FileAlignment);\n DUMP_DEC_FIELD(nt.OptionalHeader.MajorOperatingSystemVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader.MinorOperatingSystemVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader.Win32VersionValue);\n DUMP_FIELD(nt.OptionalHeader.SizeOfImage);\n DUMP_FIELD(nt.OptionalHeader.SizeOfHeaders);\n DUMP_FIELD(nt.OptionalHeader.CheckSum);\n DUMP_FIELD(nt.OptionalHeader.Subsystem);\n DUMP_FIELD(nt.OptionalHeader.DllCharacteristics);\n DUMP_FIELD(nt.OptionalHeader.SizeOfStackReserve);\n DUMP_FIELD(nt.OptionalHeader.SizeOfStackCommit);\n DUMP_FIELD(nt.OptionalHeader.SizeOfHeapReserve);\n DUMP_FIELD(nt.OptionalHeader.SizeOfHeapCommit);\n DUMP_FIELD(nt.OptionalHeader.LoaderFlags);\n DUMP_DEC_FIELD(nt.OptionalHeader.NumberOfRvaAndSizes);\n } else {\n DUMP_FIELD(nt.OptionalHeader64.Magic);\n DUMP_DEC_FIELD(nt.OptionalHeader64.MajorLinkerVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader64.MinorLinkerVersion);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfCode);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfInitializedData);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfUninitializedData);\n DUMP_FIELD(nt.OptionalHeader64.AddressOfEntryPoint);\n DUMP_FIELD(nt.OptionalHeader64.BaseOfCode);\n DUMP_FIELD(nt.OptionalHeader64.ImageBase);\n DUMP_FIELD(nt.OptionalHeader64.SectionAlignment);\n DUMP_FIELD(nt.OptionalHeader64.FileAlignment);\n DUMP_DEC_FIELD(nt.OptionalHeader64.MajorOperatingSystemVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader64.MinorOperatingSystemVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader64.Win32VersionValue);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfImage);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfHeaders);\n DUMP_FIELD(nt.OptionalHeader64.CheckSum);\n DUMP_FIELD(nt.OptionalHeader64.Subsystem);\n DUMP_FIELD(nt.OptionalHeader64.DllCharacteristics);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfStackReserve);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfStackCommit);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfHeapReserve);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfHeapCommit);\n DUMP_FIELD(nt.OptionalHeader64.LoaderFlags);\n DUMP_DEC_FIELD(nt.OptionalHeader64.NumberOfRvaAndSizes);\n }\n\n#undef DUMP_FIELD\n#undef DUMP_DEC_FIELD\n\n std::cout << \"Imports: \"\n << \"\\n\";\n IterImpVAString(p, printImports, NULL);\n std::cout << \"Relocations: \"\n << \"\\n\";\n IterRelocs(p, printRelocs, NULL);\n std::cout << \"Debug Directories: \"\n << \"\\n\";\n IterDebugs(p, printDebugs, NULL);\n std::cout << \"Symbols (symbol table): \"\n << \"\\n\";\n IterSymbols(p, printSymbols, NULL);\n std::cout << \"Sections: \"\n << \"\\n\";\n IterSec(p, printSecs, NULL);\n std::cout << \"Exports: \"\n << \"\\n\";\n IterExpFull(p, printExps, NULL);\n\n // read the first 8 bytes from the entry point and print them\n VA entryPoint;\n if (GetEntryPoint(p, entryPoint)) {\n std::cout << \"First 8 bytes from entry point (0x\";\n std::cout << std::hex << entryPoint << \"):\"\n << \"\\n\";\n for (std::size_t i = 0; i < 8; i++) {\n std::uint8_t b;\n if (!ReadByteAtVA(p, i + entryPoint, b)) {\n std::cout << \" ERR\";\n } else {\n std::cout << \" 0x\" << std::hex << static_cast(b);\n }\n }\n\n std::cout << \"\\n\";\n }\n\n std::cout << \"Resources: \"\n << \"\\n\";\n IterRsrc(p, printRsrc, NULL);\n\n DestructParsedPE(p);\n\n return 0;\n }\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_reader.h", "class DxbcReader {\n public:\n template\n auto readEnum() {\n using Tx = std::underlying_type_t;\n return static_cast(this->readNum());\n }\n DxbcTag readTag() {\n DxbcTag tag;\n this->read(&tag, 4);\n return tag;\n }\n std::string readString() {\n std::string result;\n \n while (m_data[m_pos] != '\\0')\n result.push_back(m_data[m_pos++]);\n \n m_pos++;\n return result;\n }\n void read(void* dst, size_t n) {\n if (m_pos + n > m_size)\n throw DxvkError(\"DxbcReader::read: Unexpected end of file\");\n std::memcpy(dst, m_data + m_pos, n);\n m_pos += n;\n }\n void skip(size_t n) {\n if (m_pos + n > m_size)\n throw DxvkError(\"DxbcReader::skip: Unexpected end of file\");\n m_pos += n;\n }\n DxbcReader clone(size_t pos) const {\n if (pos > m_size)\n throw DxvkError(\"DxbcReader::clone: Invalid offset\");\n return DxbcReader(m_data + pos, m_size - pos);\n }\n DxbcReader resize(size_t size) const {\n if (size > m_size)\n throw DxvkError(\"DxbcReader::resize: Invalid size\");\n return DxbcReader(m_data, size, m_pos);\n }\n void store(std::ostream&& stream) const {\n stream.write(m_data, m_size);\n }\n private:\n const char* m_data = nullptr;\n size_t m_size = 0;\n size_t m_pos = 0;\n template\n T readNum() {\n T result;\n this->read(&result, sizeof(result));\n return result;\n }\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_header.h", "class DxbcHeader {\n public:\n DxbcHeader(DxbcReader& reader) {\n // FourCC at the start of the file, must be 'DXBC'\n DxbcTag fourcc = reader.readTag();\n \n if (fourcc != \"DXBC\")\n throw DxvkError(\"DxbcHeader::DxbcHeader: Invalid fourcc, expected 'DXBC'\");\n \n // Stuff we don't actually need to store\n reader.skip(4 * sizeof(uint32_t)); // Check sum\n reader.skip(1 * sizeof(uint32_t)); // Constant 1\n reader.skip(1 * sizeof(uint32_t)); // Bytecode length\n \n // Number of chunks in the file\n uint32_t chunkCount = reader.readu32();\n \n // Chunk offsets are stored immediately after\n for (uint32_t i = 0; i < chunkCount; i++)\n m_chunkOffsets.push_back(reader.readu32());\n }\n ~DxbcHeader() {\n \n }\n private:\n std::vector m_chunkOffsets;\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_chunk_shex.h", "class DxbcShex {\n public:\n DxbcShex(DxbcReader reader) {\n // The shader version and type are stored in a 32-bit unit,\n // where the first byte contains the major and minor version\n // numbers, and the high word contains the program type.\n reader.skip(2);\n auto pType = reader.readEnum();\n m_programInfo = DxbcProgramInfo(pType);\n \n // Read the actual shader code as an array of DWORDs.\n auto codeLength = reader.readu32() - 2;\n m_code.resize(codeLength);\n reader.read(m_code.data(), codeLength * sizeof(uint32_t));\n }\n ~DxbcShex() {\n \n }\n private:\n DxbcProgramInfo m_programInfo;\n std::vector m_code;\n};"], ["/lsfg-vk/src/context.cpp", "#include \"context.hpp\"\n#include \"config/config.hpp\"\n#include \"common/exception.hpp\"\n#include \"extract/extract.hpp\"\n#include \"extract/trans.hpp\"\n#include \"utils/utils.hpp\"\n#include \"hooks.hpp\"\n#include \"layer.hpp\"\n\n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nLsContext::LsContext(const Hooks::DeviceInfo& info, VkSwapchainKHR swapchain,\n VkExtent2D extent, const std::vector& swapchainImages)\n : swapchain(swapchain), swapchainImages(swapchainImages),\n extent(extent) {\n // get updated configuration\n auto& conf = Config::activeConf;\n if (!conf.config_file.empty()\n && (\n !std::filesystem::exists(conf.config_file)\n || conf.timestamp != std::filesystem::last_write_time(conf.config_file)\n )) {\n std::cerr << \"lsfg-vk: Rereading configuration, as it is no longer valid.\\n\";\n std::this_thread::sleep_for(std::chrono::milliseconds(100));\n\n // reread configuration\n const std::string file = Utils::getConfigFile();\n const auto name = Utils::getProcessName();\n try {\n Config::updateConfig(file);\n conf = Config::getConfig(name);\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: Failed to update configuration, continuing using old:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n }\n\n LSFG_3_1P::finalize();\n LSFG_3_1::finalize();\n\n // print config\n std::cerr << \"lsfg-vk: Reloaded configuration for \" << name.second << \":\\n\";\n if (!conf.dll.empty()) std::cerr << \" Using DLL from: \" << conf.dll << '\\n';\n std::cerr << \" Multiplier: \" << conf.multiplier << '\\n';\n std::cerr << \" Flow Scale: \" << conf.flowScale << '\\n';\n std::cerr << \" Performance Mode: \" << (conf.performance ? \"Enabled\" : \"Disabled\") << '\\n';\n std::cerr << \" HDR Mode: \" << (conf.hdr ? \"Enabled\" : \"Disabled\") << '\\n';\n if (conf.e_present != 2) std::cerr << \" ! Present Mode: \" << conf.e_present << '\\n';\n\n if (conf.multiplier <= 1) return;\n }\n // we could take the format from the swapchain,\n // but honestly this is safer.\n const VkFormat format = conf.hdr\n ? VK_FORMAT_R8G8B8A8_UNORM\n : VK_FORMAT_R16G16B16A16_SFLOAT;\n\n // prepare textures for lsfg\n std::array fds{};\n this->frame_0 = Mini::Image(info.device, info.physicalDevice,\n extent, format, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_ASPECT_COLOR_BIT,\n &fds.at(0));\n this->frame_1 = Mini::Image(info.device, info.physicalDevice,\n extent, format, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_ASPECT_COLOR_BIT,\n &fds.at(1));\n\n std::vector outFds(conf.multiplier - 1);\n for (size_t i = 0; i < (conf.multiplier - 1); ++i)\n this->out_n.emplace_back(info.device, info.physicalDevice,\n extent, format,\n VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_ASPECT_COLOR_BIT,\n &outFds.at(i));\n\n // initialize lsfg\n auto* lsfgInitialize = LSFG_3_1::initialize;\n auto* lsfgCreateContext = LSFG_3_1::createContext;\n auto* lsfgDeleteContext = LSFG_3_1::deleteContext;\n if (conf.performance) {\n lsfgInitialize = LSFG_3_1P::initialize;\n lsfgCreateContext = LSFG_3_1P::createContext;\n lsfgDeleteContext = LSFG_3_1P::deleteContext;\n }\n\n setenv(\"DISABLE_LSFG\", \"1\", 1); // NOLINT\n\n lsfgInitialize(\n Utils::getDeviceUUID(info.physicalDevice),\n conf.hdr, 1.0F / conf.flowScale, conf.multiplier - 1,\n [](const std::string& name) {\n auto dxbc = Extract::getShader(name);\n auto spirv = Extract::translateShader(dxbc);\n return spirv;\n }\n );\n\n this->lsfgCtxId = std::shared_ptr(\n new int32_t(lsfgCreateContext(fds.at(0), fds.at(1), outFds, extent, format)),\n [lsfgDeleteContext = lsfgDeleteContext](const int32_t* id) {\n lsfgDeleteContext(*id);\n }\n );\n\n unsetenv(\"DISABLE_LSFG\"); // NOLINT\n\n // prepare render passes\n this->cmdPool = Mini::CommandPool(info.device, info.queue.first);\n for (size_t i = 0; i < 8; i++) {\n auto& pass = this->passInfos.at(i);\n pass.renderSemaphores.resize(conf.multiplier - 1);\n pass.acquireSemaphores.resize(conf.multiplier - 1);\n pass.postCopyBufs.resize(conf.multiplier - 1);\n pass.postCopySemaphores.resize(conf.multiplier - 1);\n pass.prevPostCopySemaphores.resize(conf.multiplier - 1);\n }\n}\n\nVkResult LsContext::present(const Hooks::DeviceInfo& info, const void* pNext, VkQueue queue,\n const std::vector& gameRenderSemaphores, uint32_t presentIdx) {\n const auto& conf = Config::activeConf;\n auto& pass = this->passInfos.at(this->frameIdx % 8);\n\n // 1. copy swapchain image to frame_0/frame_1\n int preCopySemaphoreFd{};\n pass.preCopySemaphores.at(0) = Mini::Semaphore(info.device, &preCopySemaphoreFd);\n pass.preCopySemaphores.at(1) = Mini::Semaphore(info.device);\n pass.preCopyBuf = Mini::CommandBuffer(info.device, this->cmdPool);\n pass.preCopyBuf.begin();\n\n Utils::copyImage(pass.preCopyBuf.handle(),\n this->swapchainImages.at(presentIdx),\n this->frameIdx % 2 == 0 ? this->frame_0.handle() : this->frame_1.handle(),\n this->extent.width, this->extent.height,\n VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,\n true, false);\n\n pass.preCopyBuf.end();\n\n std::vector gameRenderSemaphores2 = gameRenderSemaphores;\n if (this->frameIdx > 0)\n gameRenderSemaphores2.emplace_back(this->passInfos.at((this->frameIdx - 1) % 8)\n .preCopySemaphores.at(1).handle());\n pass.preCopyBuf.submit(info.queue.second,\n gameRenderSemaphores2,\n { pass.preCopySemaphores.at(0).handle(),\n pass.preCopySemaphores.at(1).handle() });\n\n // 2. render intermediary frames\n std::vector renderSemaphoreFds(conf.multiplier - 1);\n for (size_t i = 0; i < (conf.multiplier - 1); ++i)\n pass.renderSemaphores.at(i) = Mini::Semaphore(info.device, &renderSemaphoreFds.at(i));\n\n if (conf.performance)\n LSFG_3_1P::presentContext(*this->lsfgCtxId,\n preCopySemaphoreFd,\n renderSemaphoreFds);\n else\n LSFG_3_1::presentContext(*this->lsfgCtxId,\n preCopySemaphoreFd,\n renderSemaphoreFds);\n\n for (size_t i = 0; i < (conf.multiplier - 1); i++) {\n // 3. acquire next swapchain image\n pass.acquireSemaphores.at(i) = Mini::Semaphore(info.device);\n uint32_t imageIdx{};\n auto res = Layer::ovkAcquireNextImageKHR(info.device, this->swapchain, UINT64_MAX,\n pass.acquireSemaphores.at(i).handle(), VK_NULL_HANDLE, &imageIdx);\n if (res != VK_SUCCESS && res != VK_SUBOPTIMAL_KHR)\n throw LSFG::vulkan_error(res, \"Failed to acquire next swapchain image\");\n\n // 4. copy output image to swapchain image\n pass.postCopySemaphores.at(i) = Mini::Semaphore(info.device);\n pass.prevPostCopySemaphores.at(i) = Mini::Semaphore(info.device);\n pass.postCopyBufs.at(i) = Mini::CommandBuffer(info.device, this->cmdPool);\n pass.postCopyBufs.at(i).begin();\n\n Utils::copyImage(pass.postCopyBufs.at(i).handle(),\n this->out_n.at(i).handle(),\n this->swapchainImages.at(imageIdx),\n this->extent.width, this->extent.height,\n VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,\n false, true);\n\n pass.postCopyBufs.at(i).end();\n pass.postCopyBufs.at(i).submit(info.queue.second,\n { pass.acquireSemaphores.at(i).handle(),\n pass.renderSemaphores.at(i).handle() },\n { pass.postCopySemaphores.at(i).handle(),\n pass.prevPostCopySemaphores.at(i).handle() });\n\n // 5. present swapchain image\n std::vector waitSemaphores{ pass.postCopySemaphores.at(i).handle() };\n if (i != 0) waitSemaphores.emplace_back(pass.prevPostCopySemaphores.at(i - 1).handle());\n\n const VkPresentInfoKHR presentInfo{\n .sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,\n .pNext = i == 0 ? pNext : nullptr, // only set on first present\n .waitSemaphoreCount = static_cast(waitSemaphores.size()),\n .pWaitSemaphores = waitSemaphores.data(),\n .swapchainCount = 1,\n .pSwapchains = &this->swapchain,\n .pImageIndices = &imageIdx,\n };\n res = Layer::ovkQueuePresentKHR(queue, &presentInfo);\n if (res != VK_SUCCESS && res != VK_SUBOPTIMAL_KHR)\n throw LSFG::vulkan_error(res, \"Failed to present swapchain image\");\n }\n\n // 6. present actual next frame\n VkSemaphore lastPrevPostCopySemaphore =\n pass.prevPostCopySemaphores.at(conf.multiplier - 1 - 1).handle();\n const VkPresentInfoKHR presentInfo{\n .sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,\n .waitSemaphoreCount = 1,\n .pWaitSemaphores = &lastPrevPostCopySemaphore,\n .swapchainCount = 1,\n .pSwapchains = &this->swapchain,\n .pImageIndices = &presentIdx,\n };\n auto res = Layer::ovkQueuePresentKHR(queue, &presentInfo);\n if (res != VK_SUCCESS && res != VK_SUBOPTIMAL_KHR)\n throw LSFG::vulkan_error(res, \"Failed to present swapchain image\");\n\n this->frameIdx++;\n return res;\n}\n"], ["/lsfg-vk/thirdparty/pe-parse/pepy/pepy.cpp", "/*\n * Copyright (c) 2013, Wesley Shields . All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *\n * 2. Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND\n * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE\n * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n * SUCH DAMAGE.\n */\n\n#include \n#include \n#include \n\nusing namespace peparse;\n\n/* NOTE(ww): These don't necessarily have to be the same, but currently are.\n */\n#define PEPY_VERSION PEPARSE_VERSION\n\n/* These are used to across multiple objects. */\n#define PEPY_OBJECT_GET(OBJ, ATTR) \\\n static PyObject *pepy_##OBJ##_get_##ATTR(PyObject *self, void *closure) { \\\n Py_INCREF(((pepy_##OBJ *) self)->ATTR); \\\n return ((pepy_##OBJ *) self)->ATTR; \\\n }\n\n#define OBJECTGETTER(OBJ, ATTR, DOC) \\\n { \\\n (char *) #ATTR, (getter) pepy_##OBJ##_get_##ATTR, \\\n (setter) pepy_attr_not_writable, (char *) #DOC, NULL \\\n }\n\n/* 'OPTIONAL' references the fact that these are from the Optional Header */\n#define OBJECTGETTER_OPTIONAL(ATTR, DOC) \\\n { \\\n (char *) #ATTR, (getter) pepy_parsed_get_optional_##ATTR, \\\n (setter) pepy_attr_not_writable, (char *) #DOC, NULL \\\n }\n\nstatic PyObject *pepy_error;\n\nstruct pepy {\n PyObject_HEAD\n};\n\nstruct pepy_parsed {\n PyObject_HEAD parsed_pe *pe;\n};\n\nstruct pepy_section {\n PyObject_HEAD PyObject *name;\n PyObject *base;\n PyObject *length;\n PyObject *virtaddr;\n PyObject *virtsize;\n PyObject *numrelocs;\n PyObject *numlinenums;\n PyObject *characteristics;\n PyObject *data;\n};\n\nstruct pepy_resource {\n PyObject_HEAD PyObject *type_str;\n PyObject *name_str;\n PyObject *lang_str;\n PyObject *type;\n PyObject *name;\n PyObject *lang;\n PyObject *codepage;\n PyObject *RVA;\n PyObject *size;\n PyObject *data;\n};\n\nstruct pepy_import {\n PyObject_HEAD PyObject *name;\n PyObject *sym;\n PyObject *addr;\n};\n\nstruct pepy_export {\n PyObject_HEAD PyObject *mod;\n PyObject *func;\n PyObject *addr;\n};\n\nstruct pepy_relocation {\n PyObject_HEAD PyObject *type;\n PyObject *addr;\n};\n\n/* None of the attributes in these objects are writable. */\nstatic int\npepy_attr_not_writable(PyObject *self, PyObject *value, void *closure) {\n PyErr_SetString(PyExc_TypeError, \"Attribute not writable.\");\n return -1;\n}\n\nstatic PyObject *\npepy_import_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_import *self;\n\n self = (pepy_import *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int pepy_import_init(pepy_import *self, PyObject *args, PyObject *kwds) {\n if (!PyArg_ParseTuple(\n args, \"OOO:pepy_import_init\", &self->name, &self->sym, &self->addr))\n return -1;\n return 0;\n}\n\nstatic void pepy_import_dealloc(pepy_import *self) {\n Py_XDECREF(self->name);\n Py_XDECREF(self->sym);\n Py_XDECREF(self->addr);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nPEPY_OBJECT_GET(import, name);\nPEPY_OBJECT_GET(import, sym);\nPEPY_OBJECT_GET(import, addr);\n\nstatic PyGetSetDef pepy_import_getseters[] = {\n OBJECTGETTER(import, name, \"Name\"),\n OBJECTGETTER(import, sym, \"Symbol\"),\n OBJECTGETTER(import, addr, \"Address\"),\n {NULL}};\n\nstatic PyTypeObject pepy_import_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.import\", /* tp_name */\n sizeof(pepy_import), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_import_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT, /* tp_flags */\n \"pepy import object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n 0, /* tp_methods */\n 0, /* tp_members */\n pepy_import_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_import_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_import_new /* tp_new */\n};\n\nstatic PyObject *\npepy_export_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_export *self;\n\n self = (pepy_export *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int pepy_export_init(pepy_export *self, PyObject *args, PyObject *kwds) {\n if (!PyArg_ParseTuple(\n args, \"OOO:pepy_export_init\", &self->mod, &self->func, &self->addr))\n return -1;\n return 0;\n}\n\nstatic void pepy_export_dealloc(pepy_export *self) {\n Py_XDECREF(self->mod);\n Py_XDECREF(self->func);\n Py_XDECREF(self->addr);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nPEPY_OBJECT_GET(export, mod);\nPEPY_OBJECT_GET(export, func);\nPEPY_OBJECT_GET(export, addr);\n\nstatic PyGetSetDef pepy_export_getseters[] = {\n OBJECTGETTER(export, mod, \"Module\"),\n OBJECTGETTER(export, func, \"Function\"),\n OBJECTGETTER(export, addr, \"Address\"),\n {NULL}};\n\nstatic PyTypeObject pepy_export_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.export\", /* tp_name */\n sizeof(pepy_export), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_export_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT, /* tp_flags */\n \"pepy export object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n 0, /* tp_methods */\n 0, /* tp_members */\n pepy_export_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_export_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_export_new /* tp_new */\n};\n\nstatic PyObject *\npepy_relocation_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_relocation *self;\n\n self = (pepy_relocation *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int\npepy_relocation_init(pepy_relocation *self, PyObject *args, PyObject *kwds) {\n if (!PyArg_ParseTuple(\n args, \"OO:pepy_relocation_init\", &self->type, &self->addr))\n return -1;\n return 0;\n}\n\nstatic void pepy_relocation_dealloc(pepy_relocation *self) {\n Py_XDECREF(self->type);\n Py_XDECREF(self->addr);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nPEPY_OBJECT_GET(relocation, type);\nPEPY_OBJECT_GET(relocation, addr);\n\nstatic PyGetSetDef pepy_relocation_getseters[] = {\n OBJECTGETTER(relocation, type, \"Type\"),\n OBJECTGETTER(relocation, addr, \"Address\"),\n {NULL}};\n\nstatic PyTypeObject pepy_relocation_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.relocation\", /* tp_name */\n sizeof(pepy_relocation), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_relocation_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT, /* tp_flags */\n \"pepy relocation object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n 0, /* tp_methods */\n 0, /* tp_members */\n pepy_relocation_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_relocation_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_relocation_new /* tp_new */\n};\n\nstatic PyObject *\npepy_section_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_section *self;\n\n self = (pepy_section *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int\npepy_section_init(pepy_section *self, PyObject *args, PyObject *kwds) {\n if (!PyArg_ParseTuple(args,\n \"OOOOOOOOO:pepy_section_init\",\n &self->name,\n &self->base,\n &self->length,\n &self->virtaddr,\n &self->virtsize,\n &self->numrelocs,\n &self->numlinenums,\n &self->characteristics,\n &self->data))\n return -1;\n return 0;\n}\n\nstatic void pepy_section_dealloc(pepy_section *self) {\n Py_XDECREF(self->name);\n Py_XDECREF(self->base);\n Py_XDECREF(self->length);\n Py_XDECREF(self->virtaddr);\n Py_XDECREF(self->virtsize);\n Py_XDECREF(self->numrelocs);\n Py_XDECREF(self->numlinenums);\n Py_XDECREF(self->characteristics);\n Py_XDECREF(self->data);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nPEPY_OBJECT_GET(section, name);\nPEPY_OBJECT_GET(section, base);\nPEPY_OBJECT_GET(section, length);\nPEPY_OBJECT_GET(section, virtaddr);\nPEPY_OBJECT_GET(section, virtsize);\nPEPY_OBJECT_GET(section, numrelocs);\nPEPY_OBJECT_GET(section, numlinenums);\nPEPY_OBJECT_GET(section, characteristics);\nPEPY_OBJECT_GET(section, data);\n\nstatic PyGetSetDef pepy_section_getseters[] = {\n OBJECTGETTER(section, name, \"Name\"),\n OBJECTGETTER(section, base, \"Base address\"),\n OBJECTGETTER(section, length, \"Length\"),\n OBJECTGETTER(section, virtaddr, \"Virtual address\"),\n OBJECTGETTER(section, virtsize, \"Virtual size\"),\n OBJECTGETTER(section, numrelocs, \"Number of relocations\"),\n OBJECTGETTER(section, numlinenums, \"Number of line numbers\"),\n OBJECTGETTER(section, characteristics, \"Characteristics\"),\n OBJECTGETTER(section, data, \"Section data\"),\n {NULL}};\n\nstatic PyTypeObject pepy_section_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.section\", /* tp_name */\n sizeof(pepy_section), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_section_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT, /* tp_flags */\n \"pepy section object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n 0, /* tp_methods */\n 0, /* tp_members */\n pepy_section_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_section_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_section_new /* tp_new */\n};\n\nstatic PyObject *\npepy_resource_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_resource *self;\n\n self = (pepy_resource *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int\npepy_resource_init(pepy_resource *self, PyObject *args, PyObject *kwds) {\n if (!PyArg_ParseTuple(args,\n \"OOOOOOOOOO:pepy_resource_init\",\n &self->type_str,\n &self->name_str,\n &self->lang_str,\n &self->type,\n &self->name,\n &self->lang,\n &self->codepage,\n &self->RVA,\n &self->size,\n &self->data))\n return -1;\n\n return 0;\n}\n\nstatic void pepy_resource_dealloc(pepy_resource *self) {\n Py_XDECREF(self->type_str);\n Py_XDECREF(self->name_str);\n Py_XDECREF(self->lang_str);\n Py_XDECREF(self->type);\n Py_XDECREF(self->name);\n Py_XDECREF(self->lang);\n Py_XDECREF(self->codepage);\n Py_XDECREF(self->RVA);\n Py_XDECREF(self->size);\n Py_XDECREF(self->data);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nPEPY_OBJECT_GET(resource, type_str);\nPEPY_OBJECT_GET(resource, name_str);\nPEPY_OBJECT_GET(resource, lang_str);\nPEPY_OBJECT_GET(resource, type);\nPEPY_OBJECT_GET(resource, name);\nPEPY_OBJECT_GET(resource, lang);\nPEPY_OBJECT_GET(resource, codepage);\nPEPY_OBJECT_GET(resource, RVA);\nPEPY_OBJECT_GET(resource, size);\nPEPY_OBJECT_GET(resource, data);\n\nstatic PyObject *pepy_resource_type_as_str(PyObject *self, PyObject *args) {\n PyObject *ret;\n char *str;\n long type;\n\n type = PyLong_AsUnsignedLong(((pepy_resource *) self)->type);\n if (type == -1) {\n if (PyErr_Occurred()) {\n PyErr_PrintEx(0);\n return NULL;\n }\n }\n switch ((resource_type) type) {\n case (RT_CURSOR):\n str = (char *) \"CURSOR\";\n break;\n case (RT_BITMAP):\n str = (char *) \"BITMAP\";\n break;\n case (RT_ICON):\n str = (char *) \"ICON\";\n break;\n case (RT_MENU):\n str = (char *) \"MENU\";\n break;\n case (RT_DIALOG):\n str = (char *) \"DIALOG\";\n break;\n case (RT_STRING):\n str = (char *) \"STRING\";\n break;\n case (RT_FONTDIR):\n str = (char *) \"FONTDIR\";\n break;\n case (RT_FONT):\n str = (char *) \"FONT\";\n break;\n case (RT_ACCELERATOR):\n str = (char *) \"ACCELERATOR\";\n break;\n case (RT_RCDATA):\n str = (char *) \"RCDATA\";\n break;\n case (RT_MESSAGETABLE):\n str = (char *) \"MESSAGETABLE\";\n break;\n case (RT_GROUP_CURSOR):\n str = (char *) \"GROUP_CURSOR\";\n break;\n case (RT_GROUP_ICON):\n str = (char *) \"GROUP_ICON\";\n break;\n case (RT_VERSION):\n str = (char *) \"VERSION\";\n break;\n case (RT_DLGINCLUDE):\n str = (char *) \"DLGINCLUDE\";\n break;\n case (RT_PLUGPLAY):\n str = (char *) \"PLUGPLAY\";\n break;\n case (RT_VXD):\n str = (char *) \"VXD\";\n break;\n case (RT_ANICURSOR):\n str = (char *) \"ANICURSOR\";\n break;\n case (RT_ANIICON):\n str = (char *) \"ANIICON\";\n break;\n case (RT_HTML):\n str = (char *) \"HTML\";\n break;\n case (RT_MANIFEST):\n str = (char *) \"MANIFEST\";\n break;\n default:\n str = (char *) \"UNKNOWN\";\n break;\n }\n\n ret = PyUnicode_FromString(str);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create return string.\");\n return NULL;\n }\n\n return ret;\n}\n\nstatic PyMethodDef pepy_resource_methods[] = {\n {\"type_as_str\",\n pepy_resource_type_as_str,\n METH_NOARGS,\n \"Return the resource type as a string.\"},\n {NULL}};\n\nstatic PyGetSetDef pepy_resource_getseters[] = {\n OBJECTGETTER(resource, type_str, \"Type string\"),\n OBJECTGETTER(resource, name_str, \"Name string\"),\n OBJECTGETTER(resource, lang_str, \"Lang string\"),\n OBJECTGETTER(resource, type, \"Type\"),\n OBJECTGETTER(resource, name, \"Name\"),\n OBJECTGETTER(resource, lang, \"Language\"),\n OBJECTGETTER(resource, codepage, \"Codepage\"),\n OBJECTGETTER(resource, RVA, \"RVA\"),\n OBJECTGETTER(resource, size, \"Size (specified in RDAT)\"),\n OBJECTGETTER(resource, data, \"Resource data\"),\n {NULL}};\n\nstatic PyTypeObject pepy_resource_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.resource\", /* tp_name */\n sizeof(pepy_resource), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_resource_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT, /* tp_flags */\n \"pepy resource object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n pepy_resource_methods, /* tp_methods */\n 0, /* tp_members */\n pepy_resource_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_resource_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_resource_new /* tp_new */\n};\n\nstatic PyObject *\npepy_parsed_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_parsed *self;\n\n self = (pepy_parsed *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int pepy_parsed_init(pepy_parsed *self, PyObject *args, PyObject *kwds) {\n char *pe_path;\n\n if (!PyArg_ParseTuple(args, \"s:pepy_parse\", &pe_path))\n return -1;\n\n if (!pe_path)\n return -1;\n\n self->pe = ParsePEFromFile(pe_path);\n if (!self->pe) {\n return -2;\n }\n\n return 0;\n}\n\nstatic void pepy_parsed_dealloc(pepy_parsed *self) {\n DestructParsedPE(self->pe);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nstatic PyObject *pepy_parsed_get_entry_point(PyObject *self, PyObject *args) {\n VA entrypoint;\n PyObject *ret;\n\n if (!GetEntryPoint(((pepy_parsed *) self)->pe, entrypoint))\n Py_RETURN_NONE;\n\n ret = PyLong_FromUnsignedLongLong(entrypoint);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create return object.\");\n return NULL;\n }\n\n return ret;\n}\n\nstatic PyObject *pepy_parsed_get_machine_as_str(PyObject *self,\n PyObject *args) {\n PyObject *ret;\n const char *str;\n\n str = GetMachineAsString(((pepy_parsed *) self)->pe);\n if (!str)\n Py_RETURN_NONE;\n\n ret = PyUnicode_FromString(str);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create return string.\");\n return NULL;\n }\n\n return ret;\n}\n\nstatic PyObject *pepy_parsed_get_subsystem_as_str(PyObject *self,\n PyObject *args) {\n PyObject *ret;\n const char *str;\n\n str = GetSubsystemAsString(((pepy_parsed *) self)->pe);\n if (!str)\n Py_RETURN_NONE;\n\n ret = PyUnicode_FromString(str);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create return string.\");\n return NULL;\n }\n\n return ret;\n}\n\nstatic PyObject *pepy_parsed_get_bytes(PyObject *self, PyObject *args) {\n uint64_t start;\n Py_ssize_t len, idx;\n PyObject *ret;\n\n if (!PyArg_ParseTuple(args, \"KK:pepy_parsed_get_bytes\", &start, &len))\n return NULL;\n\n /*\n * XXX: a new implementation read all bytes in char* and use\n * PybyteArray_FromStringAndSize\n */\n\n uint8_t *buf = new (std::nothrow) uint8_t[len];\n if (!buf) {\n /* in case allocation failed */\n PyErr_SetString(pepy_error,\n \"Unable to create initial buffer (allocation failure).\");\n return NULL;\n }\n\n for (idx = 0; idx < len; idx++) {\n if (!ReadByteAtVA(((pepy_parsed *) self)->pe, start + idx, buf[idx]))\n break;\n }\n\n /* use idx as content length, if we get less than asked for */\n ret = PyByteArray_FromStringAndSize(reinterpret_cast(buf), idx);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new byte array.\");\n return NULL;\n }\n\n delete[] buf;\n return ret;\n}\n\n/*\n * This is used to convert bounded buffers into python byte array objects.\n * In case the buffer is NULL, return an empty bytearray.\n */\nstatic PyObject *pepy_data_converter(bounded_buffer *data) {\n PyObject *ret;\n const char *str;\n Py_ssize_t len;\n\n if (!data || !data->buf) {\n str = \"\";\n len = 0;\n } else {\n str = (const char *) data->buf;\n len = data->bufLen;\n }\n\n ret = PyByteArray_FromStringAndSize(str, len);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to convert data to byte array.\");\n return NULL;\n }\n\n return ret;\n}\n\nint section_callback(void *cbd,\n const VA &base,\n const std::string &name,\n const image_section_header &s,\n const bounded_buffer *data) {\n uint32_t buflen;\n PyObject *sect;\n PyObject *tuple;\n PyObject *list = (PyObject *) cbd;\n\n /*\n * I've seen some interesting binaries with a section where the\n * PointerToRawData and SizeOfRawData are invalid. The parser library\n * handles this by setting sectionData to NULL as returned by splitBuffer().\n * The sectionData (passed in to us as *data) is converted using\n * pepy_data_converter() which will return an empty string object.\n * However, we need to address the fact that we pass an invalid length\n * via data->bufLen.\n */\n if (!data) {\n buflen = 0;\n } else {\n buflen = data->bufLen;\n }\n\n /*\n * The tuple item order is important here. It is passed into the\n * section type initialization and parsed there.\n */\n tuple = Py_BuildValue(\"sKKIIHHIO&\",\n name.c_str(),\n base,\n buflen,\n s.VirtualAddress,\n s.Misc.VirtualSize,\n s.NumberOfRelocations,\n s.NumberOfLinenumbers,\n s.Characteristics,\n pepy_data_converter,\n data);\n if (!tuple)\n return 1;\n\n sect = pepy_section_new(&pepy_section_type, NULL, NULL);\n if (!sect) {\n Py_DECREF(tuple);\n return 1;\n }\n\n if (pepy_section_init((pepy_section *) sect, tuple, NULL) == -1) {\n PyErr_SetString(pepy_error, \"Unable to init new section.\");\n return 1;\n }\n\n if (PyList_Append(list, sect) == -1) {\n Py_DECREF(tuple);\n Py_DECREF(sect);\n return 1;\n }\n\n return 0;\n}\n\nstatic PyObject *pepy_parsed_get_sections(PyObject *self, PyObject *args) {\n PyObject *ret = PyList_New(0);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new list.\");\n return NULL;\n }\n\n IterSec(((pepy_parsed *) self)->pe, section_callback, ret);\n\n return ret;\n}\n\nint resource_callback(void *cbd, const resource &r) {\n PyObject *rsrc;\n PyObject *tuple;\n PyObject *list = (PyObject *) cbd;\n\n /*\n * The tuple item order is important here. It is passed into the\n * section type initialization and parsed there.\n */\n tuple = Py_BuildValue(\"s#s#s#IIIIIIO&\",\n r.type_str.c_str(),\n r.type_str.length(),\n r.name_str.c_str(),\n r.name_str.length(),\n r.lang_str.c_str(),\n r.lang_str.length(),\n r.type,\n r.name,\n r.lang,\n r.codepage,\n r.RVA,\n r.size,\n pepy_data_converter,\n r.buf);\n if (!tuple)\n return 1;\n\n rsrc = pepy_resource_new(&pepy_resource_type, NULL, NULL);\n if (!rsrc) {\n Py_DECREF(tuple);\n return 1;\n }\n\n if (pepy_resource_init((pepy_resource *) rsrc, tuple, NULL) == -1) {\n PyErr_SetString(pepy_error, \"Unable to init new resource.\");\n return 1;\n }\n\n if (PyList_Append(list, rsrc) == -1) {\n Py_DECREF(tuple);\n Py_DECREF(rsrc);\n return 1;\n }\n\n return 0;\n}\n\nstatic PyObject *pepy_parsed_get_resources(PyObject *self, PyObject *args) {\n PyObject *ret = PyList_New(0);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new list.\");\n return NULL;\n }\n\n IterRsrc(((pepy_parsed *) self)->pe, resource_callback, ret);\n\n return ret;\n}\n\nint import_callback(void *cbd,\n const VA &addr,\n const std::string &name,\n const std::string &sym) {\n PyObject *imp;\n PyObject *tuple;\n PyObject *list = (PyObject *) cbd;\n\n /*\n * The tuple item order is important here. It is passed into the\n * import type initialization and parsed there.\n */\n tuple = Py_BuildValue(\"ssI\", name.c_str(), sym.c_str(), addr);\n if (!tuple)\n return 1;\n\n imp = pepy_import_new(&pepy_import_type, NULL, NULL);\n if (!imp) {\n Py_DECREF(tuple);\n return 1;\n }\n\n if (pepy_import_init((pepy_import *) imp, tuple, NULL) == -1) {\n PyErr_SetString(pepy_error, \"Unable to init new section.\");\n return 1;\n }\n\n if (PyList_Append(list, imp) == -1) {\n Py_DECREF(tuple);\n Py_DECREF(imp);\n return 1;\n }\n\n return 0;\n}\n\nstatic PyObject *pepy_parsed_get_imports(PyObject *self, PyObject *args) {\n PyObject *ret = PyList_New(0);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new list.\");\n return NULL;\n }\n\n IterImpVAString(((pepy_parsed *) self)->pe, import_callback, ret);\n\n return ret;\n}\n\nint export_callback(void *cbd,\n const VA &addr,\n const std::string &mod,\n const std::string &func) {\n PyObject *exp;\n PyObject *tuple;\n PyObject *list = (PyObject *) cbd;\n\n /*\n * The tuple item order is important here. It is passed into the\n * export type initialization and parsed there.\n */\n tuple = Py_BuildValue(\"ssI\", mod.c_str(), func.c_str(), addr);\n if (!tuple)\n return 1;\n\n exp = pepy_export_new(&pepy_export_type, NULL, NULL);\n if (!exp) {\n Py_DECREF(tuple);\n return 1;\n }\n\n if (pepy_export_init((pepy_export *) exp, tuple, NULL) == -1) {\n PyErr_SetString(pepy_error, \"Unable to init new section.\");\n return 1;\n }\n\n if (PyList_Append(list, exp) == -1) {\n Py_DECREF(tuple);\n Py_DECREF(exp);\n return 1;\n }\n\n return 0;\n}\n\nstatic PyObject *pepy_parsed_get_exports(PyObject *self, PyObject *args) {\n PyObject *ret = PyList_New(0);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new list.\");\n return NULL;\n }\n\n /*\n * This could use the same callback and object as imports but the names\n * of the attributes would be slightly off.\n */\n IterExpVA(((pepy_parsed *) self)->pe, export_callback, ret);\n\n return ret;\n}\n\nint reloc_callback(void *cbd, const VA &addr, const reloc_type &type) {\n PyObject *reloc;\n PyObject *tuple;\n PyObject *list = (PyObject *) cbd;\n\n /*\n * The tuple item order is important here. It is passed into the\n * relocation type initialization and parsed there.\n */\n tuple = Py_BuildValue(\"II\", type, addr);\n if (!tuple)\n return 1;\n\n reloc = pepy_relocation_new(&pepy_relocation_type, NULL, NULL);\n if (!reloc) {\n Py_DECREF(tuple);\n return 1;\n }\n\n if (pepy_relocation_init((pepy_relocation *) reloc, tuple, NULL) == -1) {\n PyErr_SetString(pepy_error, \"Unable to init new section.\");\n return 1;\n }\n\n if (PyList_Append(list, reloc) == -1) {\n Py_DECREF(tuple);\n Py_DECREF(reloc);\n return 1;\n }\n\n return 0;\n}\n\nstatic PyObject *pepy_parsed_get_relocations(PyObject *self, PyObject *args) {\n PyObject *ret = PyList_New(0);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new list.\");\n return NULL;\n }\n\n IterRelocs(((pepy_parsed *) self)->pe, reloc_callback, ret);\n\n return ret;\n}\n\n#define PEPY_PARSED_GET(ATTR, VAL) \\\n static PyObject *pepy_parsed_get_##ATTR(PyObject *self, void *closure) { \\\n PyObject *ret = PyLong_FromUnsignedLongLong( \\\n ((pepy_parsed *) self)->pe->peHeader.nt.VAL); \\\n if (!ret) \\\n PyErr_SetString(PyExc_AttributeError, \"Error getting attribute.\"); \\\n return ret; \\\n }\n\nPEPY_PARSED_GET(signature, Signature);\nPEPY_PARSED_GET(machine, FileHeader.Machine);\nPEPY_PARSED_GET(numberofsections, FileHeader.NumberOfSections);\nPEPY_PARSED_GET(timedatestamp, FileHeader.TimeDateStamp);\nPEPY_PARSED_GET(numberofsymbols, FileHeader.NumberOfSymbols);\nPEPY_PARSED_GET(characteristics, FileHeader.Characteristics);\nPEPY_PARSED_GET(magic, OptionalMagic);\n\n/*\n * This is used to get things from the optional header, which can be either\n * the PE32 or PE32+ version, depending upon the magic value. Technically\n * the magic is stored in the OptionalHeader, but to make life easier pe-parse\n * stores the value in nt_header_32 along with the appropriate optional header.\n * This is why \"magic\" is handled above, and not here.\n */\n#define PEPY_PARSED_GET_OPTIONAL(ATTR, VAL) \\\n static PyObject *pepy_parsed_get_optional_##ATTR(PyObject *self, \\\n void *closure) { \\\n PyObject *ret = NULL; \\\n if (((pepy_parsed *) self)->pe->peHeader.nt.OptionalMagic == \\\n NT_OPTIONAL_32_MAGIC) { \\\n ret = PyLong_FromUnsignedLongLong( \\\n ((pepy_parsed *) self)->pe->peHeader.nt.OptionalHeader.VAL); \\\n if (!ret) \\\n PyErr_SetString(PyExc_AttributeError, \"Error getting attribute.\"); \\\n } else if (((pepy_parsed *) self)->pe->peHeader.nt.OptionalMagic == \\\n NT_OPTIONAL_64_MAGIC) { \\\n ret = PyLong_FromUnsignedLongLong( \\\n ((pepy_parsed *) self)->pe->peHeader.nt.OptionalHeader64.VAL); \\\n if (!ret) \\\n PyErr_SetString(PyExc_AttributeError, \"Error getting attribute.\"); \\\n } else { \\\n PyErr_SetString(pepy_error, \"Bad magic value.\"); \\\n } \\\n return ret; \\\n }\n\nPEPY_PARSED_GET_OPTIONAL(majorlinkerver, MajorLinkerVersion);\nPEPY_PARSED_GET_OPTIONAL(minorlinkerver, MinorLinkerVersion);\nPEPY_PARSED_GET_OPTIONAL(codesize, SizeOfCode);\nPEPY_PARSED_GET_OPTIONAL(initdatasize, SizeOfInitializedData);\nPEPY_PARSED_GET_OPTIONAL(uninitdatasize, SizeOfUninitializedData);\nPEPY_PARSED_GET_OPTIONAL(entrypointaddr, AddressOfEntryPoint);\nPEPY_PARSED_GET_OPTIONAL(baseofcode, BaseOfCode);\nPEPY_PARSED_GET_OPTIONAL(imagebase, ImageBase);\nPEPY_PARSED_GET_OPTIONAL(sectionalignement, SectionAlignment);\nPEPY_PARSED_GET_OPTIONAL(filealignment, FileAlignment);\nPEPY_PARSED_GET_OPTIONAL(majorosver, MajorOperatingSystemVersion);\nPEPY_PARSED_GET_OPTIONAL(minorosver, MinorOperatingSystemVersion);\nPEPY_PARSED_GET_OPTIONAL(win32ver, Win32VersionValue);\nPEPY_PARSED_GET_OPTIONAL(imagesize, SizeOfImage);\nPEPY_PARSED_GET_OPTIONAL(headersize, SizeOfHeaders);\nPEPY_PARSED_GET_OPTIONAL(checksum, CheckSum);\nPEPY_PARSED_GET_OPTIONAL(subsystem, Subsystem);\nPEPY_PARSED_GET_OPTIONAL(dllcharacteristics, DllCharacteristics);\nPEPY_PARSED_GET_OPTIONAL(stackreservesize, SizeOfStackReserve);\nPEPY_PARSED_GET_OPTIONAL(stackcommitsize, SizeOfStackCommit);\nPEPY_PARSED_GET_OPTIONAL(heapreservesize, SizeOfHeapReserve);\nPEPY_PARSED_GET_OPTIONAL(heapcommitsize, SizeOfHeapCommit);\nPEPY_PARSED_GET_OPTIONAL(loaderflags, LoaderFlags);\nPEPY_PARSED_GET_OPTIONAL(rvasandsize, NumberOfRvaAndSizes);\n\n/*\n * BaseOfData is only in PE32, not PE32+. Thus, it uses a non-standard\n * getter function compared to the other shared fields.\n */\nstatic PyObject *pepy_parsed_get_optional_baseofdata(PyObject *self,\n void *closure) {\n PyObject *ret = NULL;\n if (((pepy_parsed *) self)->pe->peHeader.nt.OptionalMagic ==\n NT_OPTIONAL_32_MAGIC) {\n ret = PyLong_FromUnsignedLong(\n ((pepy_parsed *) self)->pe->peHeader.nt.OptionalHeader.BaseOfData);\n if (!ret)\n PyErr_SetString(PyExc_AttributeError, \"Error getting attribute.\");\n } else if (((pepy_parsed *) self)->pe->peHeader.nt.OptionalMagic ==\n NT_OPTIONAL_64_MAGIC) {\n PyErr_SetString(PyExc_AttributeError, \"Not available on PE32+.\");\n } else {\n PyErr_SetString(pepy_error, \"Bad magic value.\");\n }\n return ret;\n}\n\nstatic PyGetSetDef pepy_parsed_getseters[] = {\n OBJECTGETTER(parsed, signature, \"PE Signature\"),\n OBJECTGETTER(parsed, machine, \"Machine\"),\n OBJECTGETTER(parsed, numberofsections, \"Number of sections\"),\n OBJECTGETTER(parsed, timedatestamp, \"Timedate stamp\"),\n OBJECTGETTER(parsed, numberofsymbols, \"Number of symbols\"),\n OBJECTGETTER(parsed, characteristics, \"Characteristics\"),\n OBJECTGETTER(parsed, magic, \"Magic\"),\n OBJECTGETTER_OPTIONAL(majorlinkerver, \"Major linker version\"),\n OBJECTGETTER_OPTIONAL(minorlinkerver, \"Minor linker version\"),\n OBJECTGETTER_OPTIONAL(codesize, \"Size of code\"),\n OBJECTGETTER_OPTIONAL(initdatasize, \"Size of initialized data\"),\n OBJECTGETTER_OPTIONAL(uninitdatasize, \"Size of uninitialized data\"),\n OBJECTGETTER_OPTIONAL(entrypointaddr, \"Address of entry point\"),\n OBJECTGETTER_OPTIONAL(baseofcode, \"Base address of code\"),\n OBJECTGETTER_OPTIONAL(imagebase, \"Image base address\"),\n OBJECTGETTER_OPTIONAL(sectionalignement, \"Section alignment\"),\n OBJECTGETTER_OPTIONAL(filealignment, \"File alignment\"),\n OBJECTGETTER_OPTIONAL(majorosver, \"Major OS version\"),\n OBJECTGETTER_OPTIONAL(minorosver, \"Minor OS version\"),\n OBJECTGETTER_OPTIONAL(win32ver, \"Win32 version\"),\n OBJECTGETTER_OPTIONAL(imagesize, \"Size of image\"),\n OBJECTGETTER_OPTIONAL(headersize, \"Size of headers\"),\n OBJECTGETTER_OPTIONAL(checksum, \"Checksum\"),\n OBJECTGETTER_OPTIONAL(subsystem, \"Subsystem\"),\n OBJECTGETTER_OPTIONAL(dllcharacteristics, \"DLL characteristics\"),\n OBJECTGETTER_OPTIONAL(stackreservesize, \"Size of stack reserve\"),\n OBJECTGETTER_OPTIONAL(stackcommitsize, \"Size of stack commit\"),\n OBJECTGETTER_OPTIONAL(heapreservesize, \"Size of heap reserve\"),\n OBJECTGETTER_OPTIONAL(heapcommitsize, \"Size of heap commit\"),\n OBJECTGETTER_OPTIONAL(loaderflags, \"Loader flags\"),\n OBJECTGETTER_OPTIONAL(rvasandsize, \"Number of RVA and sizes\"),\n /* Base of data is only available in PE32, not PE32+. */\n {(char *) \"baseofdata\",\n (getter) pepy_parsed_get_optional_baseofdata,\n (setter) pepy_attr_not_writable,\n (char *) \"Base address of data\",\n NULL},\n {NULL}};\n\nstatic PyMethodDef pepy_parsed_methods[] = {\n {\"get_entry_point\",\n pepy_parsed_get_entry_point,\n METH_NOARGS,\n \"Return the entry point address.\"},\n {\"get_machine_as_str\",\n pepy_parsed_get_machine_as_str,\n METH_NOARGS,\n \"Return the machine as a human readable string.\"},\n {\"get_subsystem_as_str\",\n pepy_parsed_get_subsystem_as_str,\n METH_NOARGS,\n \"Return the subsystem as a human readable string.\"},\n {\"get_bytes\",\n pepy_parsed_get_bytes,\n METH_VARARGS,\n \"Return the first N bytes at a given address.\"},\n {\"get_sections\",\n pepy_parsed_get_sections,\n METH_NOARGS,\n \"Return a list of section objects.\"},\n {\"get_imports\",\n pepy_parsed_get_imports,\n METH_NOARGS,\n \"Return a list of import objects.\"},\n {\"get_exports\",\n pepy_parsed_get_exports,\n METH_NOARGS,\n \"Return a list of export objects.\"},\n {\"get_relocations\",\n pepy_parsed_get_relocations,\n METH_NOARGS,\n \"Return a list of relocation objects.\"},\n {\"get_resources\",\n pepy_parsed_get_resources,\n METH_NOARGS,\n \"Return a list of resource objects.\"},\n {NULL}};\n\nstatic PyTypeObject pepy_parsed_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.parsed\", /* tp_name */\n sizeof(pepy_parsed), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_parsed_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */\n \"pepy parsed object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n pepy_parsed_methods, /* tp_methods */\n 0, /* tp_members */\n pepy_parsed_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_parsed_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_parsed_new /* tp_new */\n};\n\nstatic PyObject *pepy_parse(PyObject *self, PyObject *args) {\n PyObject *parsed;\n int ret;\n char *err_str = NULL;\n\n parsed = pepy_parsed_new(&pepy_parsed_type, NULL, NULL);\n if (!parsed) {\n PyErr_SetString(pepy_error, \"Unable to make new parsed object.\");\n return NULL;\n }\n\n ret = pepy_parsed_init((pepy_parsed *) parsed, args, NULL);\n if (ret < 0) {\n if (ret == -2) {\n // error (loc)\n size_t len = GetPEErrString().length() + GetPEErrLoc().length() + 4;\n err_str = (char *) malloc(len);\n if (!err_str)\n return PyErr_NoMemory();\n snprintf(err_str,\n len,\n \"%s (%s)\",\n GetPEErrString().c_str(),\n GetPEErrLoc().c_str());\n PyErr_SetString(pepy_error, err_str);\n } else\n PyErr_SetString(pepy_error, \"Unable to init new parsed object.\");\n return NULL;\n }\n\n return parsed;\n}\n\nstatic PyMethodDef pepy_methods[] = {\n {\"parse\", pepy_parse, METH_VARARGS, \"Parse PE from file.\"}, {NULL}};\n\nPyMODINIT_FUNC PyInit_pepy(void) {\n PyObject *m;\n\n if (PyType_Ready(&pepy_parsed_type) < 0 ||\n PyType_Ready(&pepy_section_type) < 0 ||\n PyType_Ready(&pepy_import_type) < 0 ||\n PyType_Ready(&pepy_export_type) < 0 ||\n PyType_Ready(&pepy_relocation_type) < 0 ||\n PyType_Ready(&pepy_resource_type) < 0)\n return NULL;\n\n static struct PyModuleDef moduledef = {\n PyModuleDef_HEAD_INIT,\n \"pepy\",\n \"Python interface to pe-parse.\",\n -1,\n pepy_methods,\n NULL,\n NULL,\n NULL,\n NULL,\n };\n\n m = PyModule_Create(&moduledef);\n if (!m)\n return NULL;\n\n pepy_error = PyErr_NewException((char *) \"pepy.error\", NULL, NULL);\n Py_INCREF(pepy_error);\n PyModule_AddObject(m, \"error\", pepy_error);\n\n Py_INCREF(&pepy_parsed_type);\n PyModule_AddObject(m, \"pepy_parsed\", (PyObject *) &pepy_parsed_type);\n\n Py_INCREF(&pepy_section_type);\n PyModule_AddObject(m, \"pepy_section\", (PyObject *) &pepy_section_type);\n\n Py_INCREF(&pepy_import_type);\n PyModule_AddObject(m, \"pepy_import\", (PyObject *) &pepy_import_type);\n\n Py_INCREF(&pepy_export_type);\n PyModule_AddObject(m, \"pepy_export\", (PyObject *) &pepy_export_type);\n\n Py_INCREF(&pepy_relocation_type);\n PyModule_AddObject(m, \"pepy_relocation\", (PyObject *) &pepy_relocation_type);\n\n Py_INCREF(&pepy_resource_type);\n PyModule_AddObject(m, \"pepy_resource\", (PyObject *) &pepy_resource_type);\n\n PyModule_AddStringMacro(m, PEPY_VERSION);\n PyModule_AddStringMacro(m, PEPARSE_VERSION);\n PyModule_AddStringConstant(m, \"__version__\", PEPY_VERSION);\n\n PyModule_AddIntMacro(m, MZ_MAGIC);\n PyModule_AddIntMacro(m, NT_MAGIC);\n PyModule_AddIntMacro(m, NUM_DIR_ENTRIES);\n PyModule_AddIntMacro(m, NT_OPTIONAL_32_MAGIC);\n PyModule_AddIntMacro(m, NT_SHORT_NAME_LEN);\n PyModule_AddIntMacro(m, DIR_EXPORT);\n PyModule_AddIntMacro(m, DIR_IMPORT);\n PyModule_AddIntMacro(m, DIR_RESOURCE);\n PyModule_AddIntMacro(m, DIR_EXCEPTION);\n PyModule_AddIntMacro(m, DIR_SECURITY);\n PyModule_AddIntMacro(m, DIR_BASERELOC);\n PyModule_AddIntMacro(m, DIR_DEBUG);\n PyModule_AddIntMacro(m, DIR_ARCHITECTURE);\n PyModule_AddIntMacro(m, DIR_GLOBALPTR);\n PyModule_AddIntMacro(m, DIR_TLS);\n PyModule_AddIntMacro(m, DIR_LOAD_CONFIG);\n PyModule_AddIntMacro(m, DIR_BOUND_IMPORT);\n PyModule_AddIntMacro(m, DIR_IAT);\n PyModule_AddIntMacro(m, DIR_DELAY_IMPORT);\n PyModule_AddIntMacro(m, DIR_COM_DESCRIPTOR);\n\n PyModule_AddIntMacro(m, IMAGE_SCN_TYPE_NO_PAD);\n PyModule_AddIntMacro(m, IMAGE_SCN_CNT_CODE);\n PyModule_AddIntMacro(m, IMAGE_SCN_CNT_INITIALIZED_DATA);\n PyModule_AddIntMacro(m, IMAGE_SCN_CNT_UNINITIALIZED_DATA);\n PyModule_AddIntMacro(m, IMAGE_SCN_LNK_OTHER);\n PyModule_AddIntMacro(m, IMAGE_SCN_LNK_INFO);\n PyModule_AddIntMacro(m, IMAGE_SCN_LNK_REMOVE);\n PyModule_AddIntMacro(m, IMAGE_SCN_LNK_COMDAT);\n PyModule_AddIntMacro(m, IMAGE_SCN_NO_DEFER_SPEC_EXC);\n PyModule_AddIntMacro(m, IMAGE_SCN_GPREL);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_FARDATA);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_PURGEABLE);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_16BIT);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_LOCKED);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_PRELOAD);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_1BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_2BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_4BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_8BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_16BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_32BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_64BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_128BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_256BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_512BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_1024BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_2048BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_4096BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_8192BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_MASK);\n PyModule_AddIntMacro(m, IMAGE_SCN_LNK_NRELOC_OVFL);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_DISCARDABLE);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_NOT_CACHED);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_NOT_PAGED);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_SHARED);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_EXECUTE);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_READ);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_WRITE);\n\n return m;\n}\n"], ["/lsfg-vk/src/extract/trans.cpp", "#include \"extract/trans.hpp\"\n\n#include \n\n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n\nusing namespace Extract;\n\nstruct BindingOffsets {\n uint32_t bindingIndex{};\n uint32_t bindingOffset{};\n uint32_t setIndex{};\n uint32_t setOffset{};\n};\n\nstd::vector Extract::translateShader(std::vector bytecode) {\n // compile the shader\n dxvk::DxbcReader reader(reinterpret_cast(bytecode.data()), bytecode.size());\n dxvk::DxbcModule module(reader);\n const dxvk::DxbcModuleInfo info{};\n auto code = module.compile(info, \"CS\");\n\n // find all bindings\n std::vector bindingOffsets;\n std::vector varIds;\n for (auto ins : code) {\n if (ins.opCode() == spv::OpDecorate) {\n if (ins.arg(2) == spv::DecorationBinding) {\n const uint32_t varId = ins.arg(1);\n bindingOffsets.resize(std::max(bindingOffsets.size(), size_t(varId + 1)));\n bindingOffsets[varId].bindingIndex = ins.arg(3);\n bindingOffsets[varId].bindingOffset = ins.offset() + 3;\n varIds.push_back(varId);\n }\n\n if (ins.arg(2) == spv::DecorationDescriptorSet) {\n const uint32_t varId = ins.arg(1);\n bindingOffsets.resize(std::max(bindingOffsets.size(), size_t(varId + 1)));\n bindingOffsets[varId].setIndex = ins.arg(3);\n bindingOffsets[varId].setOffset = ins.offset() + 3;\n }\n }\n\n if (ins.opCode() == spv::OpFunction)\n break;\n }\n\n std::vector validBindings;\n for (const auto varId : varIds) {\n auto info = bindingOffsets[varId];\n\n if (info.bindingOffset)\n validBindings.push_back(info);\n }\n\n // patch binding offset\n #pragma clang diagnostic push\n #pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n for (size_t i = 0; i < validBindings.size(); i++)\n code.data()[validBindings.at(i).bindingOffset] // NOLINT\n = static_cast(i);\n #pragma clang diagnostic pop\n\n // return the new bytecode\n std::vector spirvBytecode(code.size());\n std::copy_n(reinterpret_cast(code.data()),\n code.size(), spirvBytecode.data());\n return spirvBytecode;\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_tag.h", "#pragma once\n\n#include \"dxbc_include.h\"\n\nnamespace dxvk {\n \n /**\n * \\brief Four-character tag\n * \n * Used to identify chunks in the\n * compiled DXBC file by name.\n */\n class DxbcTag {\n \n public:\n \n DxbcTag() {\n for (size_t i = 0; i < 4; i++)\n m_chars[i] = '\\0';\n }\n \n DxbcTag(const char* tag) {\n for (size_t i = 0; i < 4; i++)\n m_chars[i] = tag[i];\n }\n \n bool operator == (const DxbcTag& other) const {\n bool result = true;\n for (size_t i = 0; i < 4; i++)\n result &= m_chars[i] == other.m_chars[i];\n return result;\n }\n \n bool operator != (const DxbcTag& other) const {\n return !this->operator == (other);\n }\n \n const char* operator & () const { return m_chars; }\n char* operator & () { return m_chars; }\n \n private:\n \n char m_chars[4];\n \n };\n \n}"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_small_vector.h", "#pragma once\n\n#include \n#include \n#include \n#include \n\n#include \"util_bit.h\"\n#include \"util_likely.h\"\n\nnamespace dxvk {\n\n template\n class small_vector {\n using storage = std::aligned_storage_t;\n public:\n\n constexpr static size_t MinCapacity = N;\n\n small_vector() { }\n\n small_vector(size_t size) {\n resize(size);\n }\n\n small_vector(const small_vector& other) {\n reserve(other.m_size);\n\n for (size_t i = 0; i < other.m_size; i++)\n *ptr(i) = *other.ptr(i);\n\n m_size = other.m_size;\n }\n\n small_vector& operator = (const small_vector& other) {\n for (size_t i = 0; i < m_size; i++)\n ptr(i)->~T();\n\n reserve(other.m_size);\n\n for (size_t i = 0; i < other.m_size; i++)\n *ptr(i) = *other.ptr(i);\n\n m_size = other.m_size;\n return *this;\n }\n\n small_vector(small_vector&& other) {\n if (other.m_size <= N) {\n for (size_t i = 0; i < other.m_size; i++)\n new (&u.m_data[i]) T(std::move(*other.ptr(i)));\n } else {\n u.m_ptr = other.u.m_ptr;\n m_capacity = other.m_capacity;\n\n other.u.m_ptr = nullptr;\n other.m_capacity = N;\n }\n\n m_size = other.m_size;\n other.m_size = 0;\n }\n\n small_vector& operator = (small_vector&& other) {\n for (size_t i = 0; i < m_size; i++)\n ptr(i)->~T();\n\n if (m_capacity > N)\n delete[] u.m_ptr;\n\n if (other.m_size <= N) {\n m_capacity = N;\n\n for (size_t i = 0; i < other.m_size; i++)\n new (&u.m_data[i]) T(std::move(*other.ptr(i)));\n } else {\n u.m_ptr = other.u.m_ptr;\n m_capacity = other.m_capacity;\n\n other.u.m_ptr = nullptr;\n other.m_capacity = N;\n }\n\n m_size = other.m_size;\n other.m_size = 0;\n return *this;\n }\n\n ~small_vector() {\n for (size_t i = 0; i < m_size; i++)\n ptr(i)->~T();\n\n if (m_capacity > N)\n delete[] u.m_ptr;\n }\n\n size_t size() const {\n return m_size;\n }\n\n void reserve(size_t n) {\n if (likely(n <= m_capacity))\n return;\n\n n = pick_capacity(n);\n\n storage* data = new storage[n];\n\n for (size_t i = 0; i < m_size; i++) {\n new (&data[i]) T(std::move(*ptr(i)));\n ptr(i)->~T();\n }\n\n if (m_capacity > N)\n delete[] u.m_ptr;\n\n m_capacity = n;\n u.m_ptr = data;\n }\n\n const T* data() const { return ptr(0); }\n T* data() { return ptr(0); }\n\n void resize(size_t n) {\n reserve(n);\n\n for (size_t i = n; i < m_size; i++)\n ptr(i)->~T();\n\n for (size_t i = m_size; i < n; i++)\n new (ptr(i)) T();\n\n m_size = n;\n }\n\n void push_back(const T& object) {\n reserve(m_size + 1);\n new (ptr(m_size++)) T(object);\n }\n\n void push_back(T&& object) {\n reserve(m_size + 1);\n new (ptr(m_size++)) T(std::move(object));\n }\n\n template\n T& emplace_back(Args... args) {\n reserve(m_size + 1);\n return *(new (ptr(m_size++)) T(std::forward(args)...));\n }\n\n void erase(size_t idx) {\n ptr(idx)->~T();\n\n for (size_t i = idx; i < m_size - 1; i++) {\n new (ptr(i)) T(std::move(*ptr(i + 1)));\n ptr(i + 1)->~T();\n }\n }\n\n void pop_back() {\n ptr(--m_size)->~T();\n }\n\n void clear() {\n for (size_t i = 0; i < m_size; i++)\n ptr(i)->~T();\n\n m_size = 0;\n }\n\n bool empty() const {\n return m_size == 0;\n }\n\n T& operator [] (size_t idx) { return *ptr(idx); }\n const T& operator [] (size_t idx) const { return *ptr(idx); }\n\n T& front() { return *ptr(0); }\n const T& front() const { return *ptr(0); }\n\n T& back() { return *ptr(m_size - 1); }\n const T& back() const { return *ptr(m_size - 1); }\n\n private:\n\n size_t m_capacity = N;\n size_t m_size = 0;\n\n union {\n storage* m_ptr;\n storage m_data[N];\n } u;\n\n size_t pick_capacity(size_t n) {\n // Pick next largest power of two for the new capacity\n return size_t(1u) << ((sizeof(n) * 8u) - bit::lzcnt(n - 1));\n }\n\n T* ptr(size_t idx) {\n return m_capacity == N\n ? reinterpret_cast(&u.m_data[idx])\n : reinterpret_cast(&u.m_ptr[idx]);\n }\n\n const T* ptr(size_t idx) const {\n return m_capacity == N\n ? reinterpret_cast(&u.m_data[idx])\n : reinterpret_cast(&u.m_ptr[idx]);\n }\n\n };\n\n}\n"], ["/lsfg-vk/src/utils/benchmark.cpp", "#include \"utils/benchmark.hpp\"\n#include \"config/config.hpp\"\n#include \"extract/extract.hpp\"\n#include \"extract/trans.hpp\"\n\n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace Benchmark;\n\nvoid Benchmark::run(uint32_t width, uint32_t height) {\n const auto& conf = Config::activeConf;\n\n auto* lsfgInitialize = LSFG_3_1::initialize;\n auto* lsfgCreateContext = LSFG_3_1::createContext;\n auto* lsfgPresentContext = LSFG_3_1::presentContext;\n if (conf.performance) {\n lsfgInitialize = LSFG_3_1P::initialize;\n lsfgCreateContext = LSFG_3_1P::createContext;\n lsfgPresentContext = LSFG_3_1P::presentContext;\n }\n\n // create the benchmark context\n const char* lsfgDeviceUUID = std::getenv(\"LSFG_DEVICE_UUID\");\n const uint64_t deviceUUID = lsfgDeviceUUID\n ? std::stoull(std::string(lsfgDeviceUUID), nullptr, 16) : 0x1463ABAC;\n\n setenv(\"DISABLE_LSFG\", \"1\", 1); // NOLINT\n\n Extract::extractShaders();\n lsfgInitialize(\n deviceUUID, // some magic number if not given\n conf.hdr, 1.0F / conf.flowScale, conf.multiplier - 1,\n [](const std::string& name) -> std::vector {\n auto dxbc = Extract::getShader(name);\n auto spirv = Extract::translateShader(dxbc);\n return spirv;\n }\n );\n const int32_t ctx = lsfgCreateContext(-1, -1, {},\n { .width = width, .height = height },\n conf.hdr ? VK_FORMAT_R16G16B16A16_SFLOAT : VK_FORMAT_R8G8B8A8_UNORM\n );\n\n unsetenv(\"DISABLE_LSFG\"); // NOLINT\n\n // run the benchmark (run 8*n + 1 so the fences are waited on)\n const auto now = std::chrono::high_resolution_clock::now();\n const uint64_t iterations = 8 * 500UL;\n\n std::cerr << \"lsfg-vk: Benchmark started, running \" << iterations << \" iterations...\\n\";\n for (uint64_t count = 0; count < iterations + 1; count++) {\n lsfgPresentContext(ctx, -1, {});\n\n if (count % 50 == 0 && count > 0)\n std::cerr << \"lsfg-vk: \"\n << std::setprecision(2) << std::fixed\n << static_cast(count) / static_cast(iterations) * 100.0F\n << \"% done (\" << count + 1 << \"/\" << iterations << \")\\r\";\n }\n const auto then = std::chrono::high_resolution_clock::now();\n\n // print results\n const auto ms = std::chrono::duration_cast(then - now).count();\n\n const auto perIteration = static_cast(ms) / static_cast(iterations);\n\n const uint64_t totalGen = (conf.multiplier - 1) * iterations;\n const auto genFps = static_cast(totalGen) / (static_cast(ms) / 1000.0F);\n\n const uint64_t totalFrames = iterations * conf.multiplier;\n const auto totalFps = static_cast(totalFrames) / (static_cast(ms) / 1000.0F);\n\n std::cerr << \"lsfg-vk: Benchmark completed in \" << ms << \" ms\\n\";\n std::cerr << \" Time taken per real frame: \"\n << std::setprecision(2) << std::fixed << perIteration << \" ms\\n\";\n std::cerr << \" Generated \" << totalGen << \" frames in total at \"\n << std::setprecision(2) << std::fixed << genFps << \" FPS\\n\";\n std::cerr << \" Total of \" << totalFrames << \" frames presented at \"\n << std::setprecision(2) << std::fixed << totalFps << \" FPS\\n\";\n\n // sleep for a second, then exit\n std::this_thread::sleep_for(std::chrono::seconds(1));\n _exit(0);\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_common.h", "class DxbcProgramType {\n public:\n VkShaderStageFlagBits shaderStage() const {\n switch (m_type) {\n case DxbcProgramType::PixelShader : return VK_SHADER_STAGE_FRAGMENT_BIT;\n case DxbcProgramType::VertexShader : return VK_SHADER_STAGE_VERTEX_BIT;\n case DxbcProgramType::GeometryShader : return VK_SHADER_STAGE_GEOMETRY_BIT;\n case DxbcProgramType::HullShader : return VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;\n case DxbcProgramType::DomainShader : return VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;\n case DxbcProgramType::ComputeShader : return VK_SHADER_STAGE_COMPUTE_BIT;\n default: throw DxvkError(\"DxbcProgramInfo::shaderStage: Unsupported program type\");\n }\n }\n spv::ExecutionModel executionModel() const {\n switch (m_type) {\n case DxbcProgramType::PixelShader : return spv::ExecutionModelFragment;\n case DxbcProgramType::VertexShader : return spv::ExecutionModelVertex;\n case DxbcProgramType::GeometryShader : return spv::ExecutionModelGeometry;\n case DxbcProgramType::HullShader : return spv::ExecutionModelTessellationControl;\n case DxbcProgramType::DomainShader : return spv::ExecutionModelTessellationEvaluation;\n case DxbcProgramType::ComputeShader : return spv::ExecutionModelGLCompute;\n default: throw DxvkError(\"DxbcProgramInfo::executionModel: Unsupported program type\");\n }\n }\n private:\n DxbcProgramType m_type = DxbcProgramType::PixelShader;\n};"], ["/lsfg-vk/thirdparty/dxbc/include/util/rc/util_rc_ptr.h", "#pragma once\n\n#include \n#include \n#include \n#include \n#include \n\nnamespace dxvk {\n\n /**\n * \\brief Pointer for reference-counted objects\n * \n * This only requires the given type to implement \\c incRef\n * and \\c decRef methods that adjust the reference count.\n * \\tparam T Object type\n */\n template\n class Rc {\n template\n friend class Rc;\n public:\n\n Rc() = default;\n Rc(std::nullptr_t) { }\n\n Rc(T* object)\n : m_object(object) {\n this->incRef();\n }\n\n Rc(const Rc& other)\n : m_object(other.m_object) {\n this->incRef();\n }\n\n template\n Rc(const Rc& other)\n : m_object(other.m_object) {\n this->incRef();\n }\n\n Rc(Rc&& other)\n : m_object(other.m_object) {\n other.m_object = nullptr;\n }\n\n template\n Rc(Rc&& other)\n : m_object(other.m_object) {\n other.m_object = nullptr;\n }\n\n Rc& operator = (std::nullptr_t) {\n this->decRef();\n m_object = nullptr;\n return *this;\n }\n\n Rc& operator = (const Rc& other) {\n other.incRef();\n this->decRef();\n m_object = other.m_object;\n return *this;\n }\n\n template\n Rc& operator = (const Rc& other) {\n other.incRef();\n this->decRef();\n m_object = other.m_object;\n return *this;\n }\n\n Rc& operator = (Rc&& other) {\n this->decRef();\n this->m_object = other.m_object;\n other.m_object = nullptr;\n return *this;\n }\n\n template\n Rc& operator = (Rc&& other) {\n this->decRef();\n this->m_object = other.m_object;\n other.m_object = nullptr;\n return *this;\n }\n\n ~Rc() {\n this->decRef();\n }\n\n T& operator * () const { return *m_object; }\n T* operator -> () const { return m_object; }\n T* ptr() const { return m_object; }\n\n template bool operator == (const Rc& other) const { return m_object == other.m_object; }\n template bool operator != (const Rc& other) const { return m_object != other.m_object; }\n\n template bool operator == (Tx* other) const { return m_object == other; }\n template bool operator != (Tx* other) const { return m_object != other; }\n\n bool operator == (std::nullptr_t) const { return m_object == nullptr; }\n bool operator != (std::nullptr_t) const { return m_object != nullptr; }\n \n explicit operator bool () const {\n return m_object != nullptr;\n }\n\n /**\n * \\brief Sets pointer without acquiring a reference\n *\n * Must only be use when a reference has been taken via\n * other means.\n * \\param [in] object Object pointer\n */\n void unsafeInsert(T* object) {\n this->decRef();\n m_object = object;\n }\n\n /**\n * \\brief Extracts raw pointer\n *\n * Sets the smart pointer to null without decrementing the\n * reference count. Must only be used when the reference\n * count is decremented in some other way.\n * \\returns Pointer to owned object\n */\n T* unsafeExtract() {\n return std::exchange(m_object, nullptr);\n }\n\n /**\n * \\brief Creates smart pointer without taking reference\n *\n * Must only be used when a refernece has been obtained via other means.\n * \\param [in] object Pointer to object to take ownership of\n */\n static Rc unsafeCreate(T* object) {\n return Rc(object, false);\n }\n\n private:\n\n T* m_object = nullptr;\n\n explicit Rc(T* object, bool)\n : m_object(object) { }\n\n force_inline void incRef() const {\n if (m_object != nullptr)\n m_object->incRef();\n }\n\n force_inline void decRef() const {\n if (m_object != nullptr) {\n if constexpr (std::is_void_vdecRef())>) {\n m_object->decRef();\n } else {\n // Deprecated, objects should manage themselves now.\n if (!m_object->decRef())\n delete m_object;\n }\n }\n }\n\n };\n\n template\n bool operator == (Tx* a, const Rc& b) { return b == a; }\n\n template\n bool operator != (Tx* a, const Rc& b) { return b != a; }\n\n struct RcHash {\n template\n size_t operator () (const Rc& rc) const {\n return reinterpret_cast(rc.ptr()) / sizeof(T);\n }\n };\n\n}\n\ntemplate\nstd::ostream& operator << (std::ostream& os, const dxvk::Rc& rc) {\n return os << rc.ptr();\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxvk/dxvk_pipelayout.h", "#pragma once\n\n#include \n\n#include \n\n#include \"dxvk_hash.h\"\n\n#include \"util_math.h\"\n#include \"util_bit.h\"\n#include \"util_flags.h\"\n\nnamespace dxvk {\n\n class DxvkDevice;\n class DxvkPipelineManager;\n\n /**\n * \\brief Order-invariant atomic access operation\n *\n * Information used to optimize barriers when a resource\n * is accessed exlusively via order-invariant stores.\n */\n struct DxvkAccessOp {\n enum OpType : uint16_t {\n None = 0x0u,\n Or = 0x1u,\n And = 0x2u,\n Xor = 0x3u,\n Add = 0x4u,\n IMin = 0x5u,\n IMax = 0x6u,\n UMin = 0x7u,\n UMax = 0x8u,\n\n StoreF = 0xdu,\n StoreUi = 0xeu,\n StoreSi = 0xfu,\n };\n\n DxvkAccessOp() = default;\n DxvkAccessOp(OpType t)\n : op(uint16_t(t)) { }\n\n DxvkAccessOp(OpType t, uint16_t constant)\n : op(uint16_t(t) | (constant << 4u)) { }\n\n uint16_t op = 0u;\n\n bool operator == (const DxvkAccessOp& t) const { return op == t.op; }\n bool operator != (const DxvkAccessOp& t) const { return op != t.op; }\n\n template, bool> = true>\n explicit operator T() const { return op; }\n };\n\n static_assert(sizeof(DxvkAccessOp) == sizeof(uint16_t));\n\n /**\n * \\brief Binding info\n *\n * Stores metadata for a single binding in\n * a given shader, or for the whole pipeline.\n */\n struct DxvkBindingInfo {\n VkDescriptorType descriptorType = VK_DESCRIPTOR_TYPE_MAX_ENUM; ///< Vulkan descriptor type\n uint32_t resourceBinding = 0u; ///< API binding slot for the resource\n VkImageViewType viewType = VK_IMAGE_VIEW_TYPE_MAX_ENUM; ///< Image view type\n VkShaderStageFlagBits stage = VK_SHADER_STAGE_FLAG_BITS_MAX_ENUM; ///< Shader stage\n VkAccessFlags access = 0u; ///< Access mask for the resource\n DxvkAccessOp accessOp = DxvkAccessOp::None; ///< Order-invariant store type, if any\n bool uboSet = false; ///< Whether to include this in the UBO set\n bool isMultisampled = false; ///< Multisampled binding\n\n /**\n * \\brief Computes descriptor set index for the given binding\n *\n * This is determines based on the shader stages that use the binding.\n * \\returns Descriptor set index\n */\n uint32_t computeSetIndex() const;\n\n /**\n * \\brief Numeric value of the binding\n *\n * Used when sorting bindings.\n * \\returns Numeric value\n */\n uint32_t value() const;\n\n /**\n * \\brief Checks for equality\n *\n * \\param [in] other Binding to compare to\n * \\returns \\c true if both bindings are equal\n */\n bool eq(const DxvkBindingInfo& other) const;\n\n /**\n * \\brief Hashes binding info\n * \\returns Binding hash\n */\n size_t hash() const;\n\n };\n\n}\n"], ["/lsfg-vk/src/utils/utils.cpp", "#include \"utils/utils.hpp\"\n#include \"common/exception.hpp\"\n#include \"layer.hpp\"\n\n#include \n#include \n#include // NOLINT\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace Utils;\n\nstd::pair Utils::findQueue(VkDevice device, VkPhysicalDevice physicalDevice,\n VkDeviceCreateInfo* desc, VkQueueFlags flags) {\n std::vector enabledQueues(desc->queueCreateInfoCount);\n std::copy_n(desc->pQueueCreateInfos, enabledQueues.size(), enabledQueues.data());\n\n uint32_t familyCount{};\n Layer::ovkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, &familyCount, nullptr);\n std::vector families(familyCount);\n Layer::ovkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, &familyCount,\n families.data());\n\n std::optional idx;\n for (const auto& queueInfo : enabledQueues) {\n if ((queueInfo.queueFamilyIndex < families.size()) &&\n (families[queueInfo.queueFamilyIndex].queueFlags & flags)) {\n idx = queueInfo.queueFamilyIndex;\n break;\n }\n }\n if (!idx.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"No suitable queue found\");\n\n VkQueue queue{};\n Layer::ovkGetDeviceQueue(device, *idx, 0, &queue);\n\n auto res = Layer::ovkSetDeviceLoaderData(device, queue);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to set device loader data for queue\");\n\n return { *idx, queue };\n}\n\nuint64_t Utils::getDeviceUUID(VkPhysicalDevice physicalDevice) {\n VkPhysicalDeviceProperties properties{};\n Layer::ovkGetPhysicalDeviceProperties(physicalDevice, &properties);\n\n return static_cast(properties.vendorID) << 32 | properties.deviceID;\n}\n\nuint32_t Utils::getMaxImageCount(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface) {\n VkSurfaceCapabilitiesKHR capabilities{};\n auto res = Layer::ovkGetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice,\n surface, &capabilities);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to get surface capabilities\");\n if (capabilities.maxImageCount == 0)\n return 999; // :3\n return capabilities.maxImageCount;\n}\n\nstd::vector Utils::addExtensions(const char* const* extensions, size_t count,\n const std::vector& requiredExtensions) {\n std::vector ext(count);\n std::copy_n(extensions, count, ext.data());\n\n for (const auto& e : requiredExtensions) {\n auto it = std::ranges::find_if(ext,\n [e](const char* extName) {\n return std::string(extName) == std::string(e);\n });\n if (it == ext.end())\n ext.push_back(e);\n }\n\n return ext;\n}\n\nvoid Utils::copyImage(VkCommandBuffer buf,\n VkImage src, VkImage dst,\n uint32_t width, uint32_t height,\n VkPipelineStageFlags pre, VkPipelineStageFlags post,\n bool makeSrcPresentable, bool makeDstPresentable) {\n const VkImageMemoryBarrier srcBarrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,\n .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,\n .oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,\n .newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,\n .image = src,\n .subresourceRange = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n const VkImageMemoryBarrier dstBarrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,\n .dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,\n .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n .image = dst,\n .subresourceRange = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n const std::vector barriers = { srcBarrier, dstBarrier };\n Layer::ovkCmdPipelineBarrier(buf,\n pre, VK_PIPELINE_STAGE_TRANSFER_BIT, 0,\n 0, nullptr, 0, nullptr,\n static_cast(barriers.size()), barriers.data());\n\n const VkImageBlit imageBlit{\n .srcSubresource = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .layerCount = 1\n },\n .srcOffsets = {\n { 0, 0, 0 },\n { static_cast(width), static_cast(height), 1 }\n },\n .dstSubresource = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .layerCount = 1\n },\n .dstOffsets = {\n { 0, 0, 0 },\n { static_cast(width), static_cast(height), 1 }\n }\n };\n Layer::ovkCmdBlitImage(\n buf,\n src, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,\n dst, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n 1, &imageBlit,\n VK_FILTER_NEAREST\n );\n\n if (makeSrcPresentable) {\n const VkImageMemoryBarrier presentBarrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,\n .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,\n .newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,\n .image = src,\n .subresourceRange = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n Layer::ovkCmdPipelineBarrier(buf,\n VK_PIPELINE_STAGE_TRANSFER_BIT, post, 0,\n 0, nullptr, 0, nullptr,\n 1, &presentBarrier);\n }\n\n if (makeDstPresentable) {\n const VkImageMemoryBarrier presentBarrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,\n .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,\n .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT,\n .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n .newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,\n .image = dst,\n .subresourceRange = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n Layer::ovkCmdPipelineBarrier(buf,\n VK_PIPELINE_STAGE_TRANSFER_BIT, post, 0,\n 0, nullptr, 0, nullptr,\n 1, &presentBarrier);\n }\n}\n\nnamespace {\n auto& logCounts() {\n static std::unordered_map map;\n return map;\n }\n}\n\nvoid Utils::logLimitN(const std::string& id, size_t n, const std::string& message) {\n auto& count = logCounts()[id];\n if (count <= n)\n std::cerr << \"lsfg-vk: \" << message << '\\n';\n if (count == n)\n std::cerr << \"(above message has been repeated \" << n << \" times, suppressing further)\\n\";\n count++;\n}\n\nvoid Utils::resetLimitN(const std::string& id) noexcept {\n logCounts().erase(id);\n}\n\nstd::pair Utils::getProcessName() {\n const char* process_name = std::getenv(\"LSFG_PROCESS\");\n if (process_name && *process_name != '\\0')\n return { process_name, process_name };\n\n const char* benchmark_flag = std::getenv(\"LSFG_BENCHMARK\");\n if (benchmark_flag)\n return { \"benchmark\", \"benchmark\" };\n std::array exe{};\n\n const ssize_t exe_len = readlink(\"/proc/self/exe\", exe.data(), exe.size() - 1);\n if (exe_len <= 0)\n return { \"Unknown Process\", \"unknown\" };\n exe.at(static_cast(exe_len)) = '\\0';\n\n std::ifstream comm_file(\"/proc/self/comm\");\n if (!comm_file.is_open())\n return { std::string(exe.data()), \"unknown\" };\n std::array comm{};\n comm_file.read(comm.data(), 256);\n comm.at(static_cast(comm_file.gcount())) = '\\0';\n std::string comm_str(comm.data());\n if (comm_str.back() == '\\n')\n comm_str.pop_back();\n\n return{ std::string(exe.data()), comm_str };\n}\n\nstd::string Utils::getConfigFile() {\n const char* configFile = std::getenv(\"LSFG_CONFIG\");\n if (configFile && *configFile != '\\0')\n return{configFile};\n const char* xdgPath = std::getenv(\"XDG_CONFIG_HOME\");\n if (xdgPath && *xdgPath != '\\0')\n return std::string(xdgPath) + \"/lsfg-vk/conf.toml\";\n const char* homePath = std::getenv(\"HOME\");\n if (homePath && *homePath != '\\0')\n return std::string(homePath) + \"/.config/lsfg-vk/conf.toml\";\n return \"/etc/lsfg-vk/conf.toml\";\n}\n"], ["/lsfg-vk/src/hooks.cpp", "#include \"hooks.hpp\"\n#include \"common/exception.hpp\"\n#include \"config/config.hpp\"\n#include \"utils/utils.hpp\"\n#include \"context.hpp\"\n#include \"layer.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace Hooks;\n\nnamespace {\n\n ///\n /// Add extensions to the instance create info.\n ///\n VkResult myvkCreateInstance(\n const VkInstanceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkInstance* pInstance) {\n auto extensions = Utils::addExtensions(\n pCreateInfo->ppEnabledExtensionNames,\n pCreateInfo->enabledExtensionCount,\n {\n \"VK_KHR_get_physical_device_properties2\",\n \"VK_KHR_external_memory_capabilities\",\n \"VK_KHR_external_semaphore_capabilities\"\n }\n );\n VkInstanceCreateInfo createInfo = *pCreateInfo;\n createInfo.enabledExtensionCount = static_cast(extensions.size());\n createInfo.ppEnabledExtensionNames = extensions.data();\n auto res = Layer::ovkCreateInstance(&createInfo, pAllocator, pInstance);\n if (res == VK_ERROR_EXTENSION_NOT_PRESENT)\n throw std::runtime_error(\n \"Required Vulkan instance extensions are not present.\"\n \"Your GPU driver is not supported.\");\n return res;\n }\n\n /// Map of devices to related information.\n std::unordered_map deviceToInfo;\n\n ///\n /// Add extensions to the device create info.\n /// (function pointers are not initialized yet)\n ///\n VkResult myvkCreateDevicePre(\n VkPhysicalDevice physicalDevice,\n const VkDeviceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkDevice* pDevice) {\n // add extensions\n auto extensions = Utils::addExtensions(\n pCreateInfo->ppEnabledExtensionNames,\n pCreateInfo->enabledExtensionCount,\n {\n \"VK_KHR_external_memory\",\n \"VK_KHR_external_memory_fd\",\n \"VK_KHR_external_semaphore\",\n \"VK_KHR_external_semaphore_fd\"\n }\n );\n VkDeviceCreateInfo createInfo = *pCreateInfo;\n createInfo.enabledExtensionCount = static_cast(extensions.size());\n createInfo.ppEnabledExtensionNames = extensions.data();\n auto res = Layer::ovkCreateDevice(physicalDevice, &createInfo, pAllocator, pDevice);\n if (res == VK_ERROR_EXTENSION_NOT_PRESENT)\n throw std::runtime_error(\n \"Required Vulkan device extensions are not present.\"\n \"Your GPU driver is not supported.\");\n return res;\n }\n\n ///\n /// Add related device information after the device is created.\n ///\n VkResult myvkCreateDevicePost(\n VkPhysicalDevice physicalDevice,\n VkDeviceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks*,\n VkDevice* pDevice) {\n deviceToInfo.emplace(*pDevice, DeviceInfo {\n .device = *pDevice,\n .physicalDevice = physicalDevice,\n .queue = Utils::findQueue(*pDevice, physicalDevice, pCreateInfo, VK_QUEUE_GRAPHICS_BIT)\n });\n return VK_SUCCESS;\n }\n\n /// Erase the device information when the device is destroyed.\n void myvkDestroyDevice(VkDevice device, const VkAllocationCallbacks* pAllocator) noexcept {\n deviceToInfo.erase(device);\n Layer::ovkDestroyDevice(device, pAllocator);\n }\n\n std::unordered_map swapchains;\n std::unordered_map swapchainToDeviceTable;\n std::unordered_map swapchainToPresent;\n\n ///\n /// Adjust swapchain creation parameters and create a swapchain context.\n ///\n VkResult myvkCreateSwapchainKHR(\n VkDevice device,\n const VkSwapchainCreateInfoKHR* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkSwapchainKHR* pSwapchain) noexcept {\n // find device\n auto it = deviceToInfo.find(device);\n if (it == deviceToInfo.end()) {\n Utils::logLimitN(\"swapMap\", 5, \"Device not found in map\");\n return Layer::ovkCreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);\n }\n Utils::resetLimitN(\"swapMap\");\n auto& deviceInfo = it->second;\n\n // increase amount of images in swapchain\n VkSwapchainCreateInfoKHR createInfo = *pCreateInfo;\n const auto maxImages = Utils::getMaxImageCount(\n deviceInfo.physicalDevice, pCreateInfo->surface);\n createInfo.minImageCount = createInfo.minImageCount + 1\n + static_cast(deviceInfo.queue.first);\n if (createInfo.minImageCount > maxImages) {\n createInfo.minImageCount = maxImages;\n Utils::logLimitN(\"swapCount\", 10,\n \"Requested image count (\" +\n std::to_string(pCreateInfo->minImageCount) + \") \"\n \"exceeds maximum allowed (\" +\n std::to_string(maxImages) + \"). \"\n \"Continuing with maximum allowed image count. \"\n \"This might lead to performance degradation.\");\n } else {\n Utils::resetLimitN(\"swapCount\");\n }\n\n // allow copy operations on swapchain images\n createInfo.imageUsage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;\n createInfo.imageUsage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;\n\n // enforce present mode\n createInfo.presentMode = Config::activeConf.e_present;\n\n // retire potential old swapchain\n if (pCreateInfo->oldSwapchain) {\n swapchains.erase(pCreateInfo->oldSwapchain);\n swapchainToDeviceTable.erase(pCreateInfo->oldSwapchain);\n }\n\n // create swapchain\n auto res = Layer::ovkCreateSwapchainKHR(device, &createInfo, pAllocator, pSwapchain);\n if (res != VK_SUCCESS)\n return res; // can't be caused by lsfg-vk (yet)\n\n try {\n swapchainToPresent.emplace(*pSwapchain, createInfo.presentMode);\n\n // get all swapchain images\n uint32_t imageCount{};\n res = Layer::ovkGetSwapchainImagesKHR(device, *pSwapchain, &imageCount, nullptr);\n if (res != VK_SUCCESS || imageCount == 0)\n throw LSFG::vulkan_error(res, \"Failed to get swapchain image count\");\n\n std::vector swapchainImages(imageCount);\n res = Layer::ovkGetSwapchainImagesKHR(device, *pSwapchain,\n &imageCount, swapchainImages.data());\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to get swapchain images\");\n\n // create swapchain context\n swapchainToDeviceTable.emplace(*pSwapchain, device);\n swapchains.emplace(*pSwapchain, LsContext(\n deviceInfo, *pSwapchain, pCreateInfo->imageExtent,\n swapchainImages\n ));\n\n std::cerr << \"lsfg-vk: Swapchain context \" <<\n (createInfo.oldSwapchain ? \"recreated\" : \"created\")\n << \" (using \" << imageCount << \" images).\\n\";\n\n Utils::resetLimitN(\"swapCtxCreate\");\n } catch (const std::exception& e) {\n Utils::logLimitN(\"swapCtxCreate\", 5,\n \"An error occurred while creating the swapchain wrapper:\\n\"\n \"- \" + std::string(e.what()));\n return VK_SUCCESS; // swapchain is still valid\n }\n return VK_SUCCESS;\n }\n\n ///\n /// Update presentation parameters and present the next frame(s).\n ///\n VkResult myvkQueuePresentKHR(\n VkQueue queue,\n const VkPresentInfoKHR* pPresentInfo) noexcept {\n // find swapchain device\n auto it = swapchainToDeviceTable.find(*pPresentInfo->pSwapchains);\n if (it == swapchainToDeviceTable.end()) {\n Utils::logLimitN(\"swapMap\", 5,\n \"Swapchain not found in map\");\n return Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n }\n\n // find device info\n auto it2 = deviceToInfo.find(it->second);\n if (it2 == deviceToInfo.end()) {\n Utils::logLimitN(\"swapMap\", 5,\n \"Device not found in map\");\n return Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n }\n auto& deviceInfo = it2->second;\n\n // find swapchain context\n auto it3 = swapchains.find(*pPresentInfo->pSwapchains);\n if (it3 == swapchains.end()) {\n Utils::logLimitN(\"swapMap\", 5,\n \"Swapchain context not found in map\");\n return Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n }\n auto& swapchain = it3->second;\n\n // find present mode\n auto it4 = swapchainToPresent.find(*pPresentInfo->pSwapchains);\n if (it4 == swapchainToPresent.end()) {\n Utils::logLimitN(\"swapMap\", 5,\n \"Swapchain present mode not found in map\");\n return Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n }\n auto& present = it4->second;\n\n // enforce present mode | NOLINTBEGIN\n #pragma clang diagnostic push\n #pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n const VkSwapchainPresentModeInfoEXT* presentModeInfo =\n reinterpret_cast(pPresentInfo->pNext);\n while (presentModeInfo) {\n if (presentModeInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_PRESENT_MODE_INFO_EXT) {\n for (size_t i = 0; i < presentModeInfo->swapchainCount; i++)\n const_cast(presentModeInfo->pPresentModes)[i] =\n present;\n }\n presentModeInfo =\n reinterpret_cast(presentModeInfo->pNext);\n }\n #pragma clang diagnostic pop\n\n // NOLINTEND | present the next frame\n VkResult res{}; // might return VK_SUBOPTIMAL_KHR\n try {\n // ensure config is valid\n auto& conf = Config::activeConf;\n if (!conf.config_file.empty()\n && (\n !std::filesystem::exists(conf.config_file)\n || conf.timestamp != std::filesystem::last_write_time(conf.config_file)\n )) {\n Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n return VK_ERROR_OUT_OF_DATE_KHR;\n }\n\n // ensure present mode is still valid\n if (present != conf.e_present) {\n Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n return VK_ERROR_OUT_OF_DATE_KHR;\n }\n\n // skip if disabled\n if (conf.multiplier <= 1)\n return Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n\n // present the swapchain\n std::vector semaphores(pPresentInfo->waitSemaphoreCount);\n std::copy_n(pPresentInfo->pWaitSemaphores, semaphores.size(), semaphores.data());\n\n res = swapchain.present(deviceInfo, pPresentInfo->pNext,\n queue, semaphores, *pPresentInfo->pImageIndices);\n\n Utils::resetLimitN(\"swapPresent\");\n } catch (const std::exception& e) {\n Utils::logLimitN(\"swapPresent\", 5,\n \"An error occurred while presenting the swapchain:\\n\"\n \"- \" + std::string(e.what()));\n return VK_ERROR_INITIALIZATION_FAILED;\n }\n return res;\n }\n\n /// Erase the swapchain context and mapping when the swapchain is destroyed.\n void myvkDestroySwapchainKHR(\n VkDevice device,\n VkSwapchainKHR swapchain,\n const VkAllocationCallbacks* pAllocator) noexcept {\n swapchains.erase(swapchain);\n swapchainToDeviceTable.erase(swapchain);\n swapchainToPresent.erase(swapchain);\n Layer::ovkDestroySwapchainKHR(device, swapchain, pAllocator);\n }\n}\n\nstd::unordered_map Hooks::hooks = {\n // instance hooks\n {\"vkCreateInstance\", reinterpret_cast(myvkCreateInstance)},\n\n // device hooks\n {\"vkCreateDevicePre\", reinterpret_cast(myvkCreateDevicePre)},\n {\"vkCreateDevicePost\", reinterpret_cast(myvkCreateDevicePost)},\n {\"vkDestroyDevice\", reinterpret_cast(myvkDestroyDevice)},\n\n // swapchain hooks\n {\"vkCreateSwapchainKHR\", reinterpret_cast(myvkCreateSwapchainKHR)},\n {\"vkQueuePresentKHR\", reinterpret_cast(myvkQueuePresentKHR)},\n {\"vkDestroySwapchainKHR\", reinterpret_cast(myvkDestroySwapchainKHR)}\n};\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_modinfo.h", "#pragma once\n\n#include \"dxbc_options.h\"\n\nnamespace dxvk {\n\n /**\n * \\brief Tessellation info\n * \n * Stores the maximum tessellation factor\n * to export from tessellation shaders.\n */\n struct DxbcTessInfo {\n float maxTessFactor;\n };\n\n /**\n * \\brief Xfb capture entry\n * \n * Stores an output variable to capture,\n * as well as the buffer to write it to.\n */\n struct DxbcXfbEntry {\n const char* semanticName;\n uint32_t semanticIndex;\n uint32_t componentIndex;\n uint32_t componentCount;\n uint32_t streamId;\n uint32_t bufferId;\n uint32_t offset;\n };\n\n /**\n * \\brief Xfb info\n * \n * Stores capture entries and output buffer\n * strides. This structure must only be\n * defined if \\c entryCount is non-zero.\n */\n struct DxbcXfbInfo {\n uint32_t entryCount;\n DxbcXfbEntry entries[128];\n uint32_t strides[4];\n int32_t rasterizedStream;\n };\n\n /**\n * \\brief Shader module info\n * \n * Stores information which may affect shader compilation.\n * This data can be supplied by the client API implementation.\n */\n struct DxbcModuleInfo {\n DxbcOptions options;\n DxbcTessInfo* tess;\n DxbcXfbInfo* xfb;\n };\n\n}"], ["/lsfg-vk/src/extract/extract.cpp", "#include \"extract/extract.hpp\"\n#include \"config/config.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace Extract;\n\nconst std::unordered_map nameIdxTable = {{\n { \"mipmaps\", 255 },\n { \"alpha[0]\", 267 },\n { \"alpha[1]\", 268 },\n { \"alpha[2]\", 269 },\n { \"alpha[3]\", 270 },\n { \"beta[0]\", 275 },\n { \"beta[1]\", 276 },\n { \"beta[2]\", 277 },\n { \"beta[3]\", 278 },\n { \"beta[4]\", 279 },\n { \"gamma[0]\", 257 },\n { \"gamma[1]\", 259 },\n { \"gamma[2]\", 260 },\n { \"gamma[3]\", 261 },\n { \"gamma[4]\", 262 },\n { \"delta[0]\", 257 },\n { \"delta[1]\", 263 },\n { \"delta[2]\", 264 },\n { \"delta[3]\", 265 },\n { \"delta[4]\", 266 },\n { \"delta[5]\", 258 },\n { \"delta[6]\", 271 },\n { \"delta[7]\", 272 },\n { \"delta[8]\", 273 },\n { \"delta[9]\", 274 },\n { \"generate\", 256 },\n { \"p_mipmaps\", 255 },\n { \"p_alpha[0]\", 290 },\n { \"p_alpha[1]\", 291 },\n { \"p_alpha[2]\", 292 },\n { \"p_alpha[3]\", 293 },\n { \"p_beta[0]\", 298 },\n { \"p_beta[1]\", 299 },\n { \"p_beta[2]\", 300 },\n { \"p_beta[3]\", 301 },\n { \"p_beta[4]\", 302 },\n { \"p_gamma[0]\", 280 },\n { \"p_gamma[1]\", 282 },\n { \"p_gamma[2]\", 283 },\n { \"p_gamma[3]\", 284 },\n { \"p_gamma[4]\", 285 },\n { \"p_delta[0]\", 280 },\n { \"p_delta[1]\", 286 },\n { \"p_delta[2]\", 287 },\n { \"p_delta[3]\", 288 },\n { \"p_delta[4]\", 289 },\n { \"p_delta[5]\", 281 },\n { \"p_delta[6]\", 294 },\n { \"p_delta[7]\", 295 },\n { \"p_delta[8]\", 296 },\n { \"p_delta[9]\", 297 },\n { \"p_generate\", 256 },\n}};\n\nnamespace {\n auto& shaders() {\n static std::unordered_map> shaderData;\n return shaderData;\n }\n\n int on_resource(void*, const peparse::resource& res) {\n if (res.type != peparse::RT_RCDATA || res.buf == nullptr || res.buf->bufLen <= 0)\n return 0;\n std::vector resource_data(res.buf->bufLen);\n std::copy_n(res.buf->buf, res.buf->bufLen, resource_data.data());\n shaders()[res.name] = resource_data;\n return 0;\n }\n\n const std::vector PATHS{{\n \".local/share/Steam/steamapps/common\",\n \".steam/steam/steamapps/common\",\n \".steam/debian-installation/steamapps/common\",\n \".var/app/com.valvesoftware.Steam/.local/share/Steam/steamapps/common\",\n \"snap/steam/common/.local/share/Steam/steamapps/common\"\n }};\n\n std::string getDllPath() {\n // overriden path\n std::string dllPath = Config::activeConf.dll;\n if (!dllPath.empty())\n return dllPath;\n // home based paths\n const char* home = getenv(\"HOME\");\n const std::string homeStr = home ? home : \"\";\n for (const auto& base : PATHS) {\n const std::filesystem::path path =\n std::filesystem::path(homeStr) / base / \"Lossless Scaling\" / \"Lossless.dll\";\n if (std::filesystem::exists(path))\n return path.string();\n }\n // xdg home\n const char* dataDir = getenv(\"XDG_DATA_HOME\");\n if (dataDir && *dataDir != '\\0')\n return std::string(dataDir) + \"/Steam/steamapps/common/Lossless Scaling/Lossless.dll\";\n // final fallback\n return \"Lossless.dll\";\n }\n}\n\nvoid Extract::extractShaders() {\n if (!shaders().empty())\n return;\n\n // parse the dll\n peparse::parsed_pe* dll = peparse::ParsePEFromFile(getDllPath().c_str());\n if (!dll)\n throw std::runtime_error(\"Unable to read Lossless.dll, is it installed?\");\n peparse::IterRsrc(dll, on_resource, nullptr);\n peparse::DestructParsedPE(dll);\n\n // ensure all shaders are present\n for (const auto& [name, idx] : nameIdxTable)\n if (shaders().find(idx) == shaders().end())\n throw std::runtime_error(\"Shader not found: \" + name + \".\\n- Is Lossless Scaling up to date?\");\n}\n\nstd::vector Extract::getShader(const std::string& name) {\n if (shaders().empty())\n throw std::runtime_error(\"Shaders are not loaded.\");\n\n auto hit = nameIdxTable.find(name);\n if (hit == nameIdxTable.end())\n throw std::runtime_error(\"Shader hash not found: \" + name);\n\n auto sit = shaders().find(hit->second);\n if (sit == shaders().end())\n throw std::runtime_error(\"Shader not found: \" + name);\n\n return sit->second;\n}\n"], ["/lsfg-vk/framegen/src/core/image.cpp", "#include \n#include \n\n#include \"core/image.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nImage::Image(const Core::Device& device, VkExtent2D extent, VkFormat format,\n VkImageUsageFlags usage, VkImageAspectFlags aspectFlags)\n : extent(extent), format(format), aspectFlags(aspectFlags) {\n // create image\n const VkImageCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,\n .imageType = VK_IMAGE_TYPE_2D,\n .format = format,\n .extent = {\n .width = extent.width,\n .height = extent.height,\n .depth = 1\n },\n .mipLevels = 1,\n .arrayLayers = 1,\n .samples = VK_SAMPLE_COUNT_1_BIT,\n .usage = usage,\n .sharingMode = VK_SHARING_MODE_EXCLUSIVE\n };\n VkImage imageHandle{};\n auto res = vkCreateImage(device.handle(), &desc, nullptr, &imageHandle);\n if (res != VK_SUCCESS || imageHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create Vulkan image\");\n\n // find memory type\n VkPhysicalDeviceMemoryProperties memProps;\n vkGetPhysicalDeviceMemoryProperties(device.getPhysicalDevice(), &memProps);\n\n VkMemoryRequirements memReqs;\n vkGetImageMemoryRequirements(device.handle(), imageHandle, &memReqs);\n\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n std::optional memType{};\n for (uint32_t i = 0; i < memProps.memoryTypeCount; ++i) {\n if ((memReqs.memoryTypeBits & (1 << i)) && // NOLINTBEGIN\n (memProps.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) {\n memType.emplace(i);\n break;\n } // NOLINTEND\n }\n if (!memType.has_value())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Unable to find memory type for image\");\n#pragma clang diagnostic pop\n\n // allocate and bind memory\n const VkMemoryAllocateInfo allocInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,\n .allocationSize = memReqs.size,\n .memoryTypeIndex = memType.value()\n };\n VkDeviceMemory memoryHandle{};\n res = vkAllocateMemory(device.handle(), &allocInfo, nullptr, &memoryHandle);\n if (res != VK_SUCCESS || memoryHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to allocate memory for Vulkan image\");\n\n res = vkBindImageMemory(device.handle(), imageHandle, memoryHandle, 0);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to bind memory to Vulkan image\");\n\n // create image view\n const VkImageViewCreateInfo viewDesc{\n .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,\n .image = imageHandle,\n .viewType = VK_IMAGE_VIEW_TYPE_2D,\n .format = format,\n .components = {\n .r = VK_COMPONENT_SWIZZLE_IDENTITY,\n .g = VK_COMPONENT_SWIZZLE_IDENTITY,\n .b = VK_COMPONENT_SWIZZLE_IDENTITY,\n .a = VK_COMPONENT_SWIZZLE_IDENTITY\n },\n .subresourceRange = {\n .aspectMask = aspectFlags,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n\n VkImageView viewHandle{};\n res = vkCreateImageView(device.handle(), &viewDesc, nullptr, &viewHandle);\n if (res != VK_SUCCESS || viewHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create image view\");\n\n // store objects in shared ptr\n this->layout = std::make_shared(VK_IMAGE_LAYOUT_UNDEFINED);\n this->image = std::shared_ptr(\n new VkImage(imageHandle),\n [dev = device.handle()](VkImage* img) {\n vkDestroyImage(dev, *img, nullptr);\n }\n );\n this->memory = std::shared_ptr(\n new VkDeviceMemory(memoryHandle),\n [dev = device.handle()](VkDeviceMemory* mem) {\n vkFreeMemory(dev, *mem, nullptr);\n }\n );\n this->view = std::shared_ptr(\n new VkImageView(viewHandle),\n [dev = device.handle()](VkImageView* imgView) {\n vkDestroyImageView(dev, *imgView, nullptr);\n }\n );\n}\n\n// shared memory constructor\n\nImage::Image(const Core::Device& device, VkExtent2D extent, VkFormat format,\n VkImageUsageFlags usage, VkImageAspectFlags aspectFlags, int fd)\n : extent(extent), format(format), aspectFlags(aspectFlags) {\n // create image\n const VkExternalMemoryImageCreateInfo externalInfo{\n .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,\n .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR\n };\n const VkImageCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,\n .pNext = &externalInfo,\n .imageType = VK_IMAGE_TYPE_2D,\n .format = format,\n .extent = {\n .width = extent.width,\n .height = extent.height,\n .depth = 1\n },\n .mipLevels = 1,\n .arrayLayers = 1,\n .samples = VK_SAMPLE_COUNT_1_BIT,\n .usage = usage,\n .sharingMode = VK_SHARING_MODE_EXCLUSIVE\n };\n VkImage imageHandle{};\n auto res = vkCreateImage(device.handle(), &desc, nullptr, &imageHandle);\n if (res != VK_SUCCESS || imageHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create Vulkan image\");\n\n // find memory type\n VkPhysicalDeviceMemoryProperties memProps;\n vkGetPhysicalDeviceMemoryProperties(device.getPhysicalDevice(), &memProps);\n\n VkMemoryRequirements memReqs;\n vkGetImageMemoryRequirements(device.handle(), imageHandle, &memReqs);\n\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n std::optional memType{};\n for (uint32_t i = 0; i < memProps.memoryTypeCount; ++i) {\n if ((memReqs.memoryTypeBits & (1 << i)) && // NOLINTBEGIN\n (memProps.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) {\n memType.emplace(i);\n break;\n } // NOLINTEND\n }\n if (!memType.has_value())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Unable to find memory type for image\");\n#pragma clang diagnostic pop\n\n // ~~allocate~~ and bind memory\n const VkMemoryDedicatedAllocateInfoKHR dedicatedInfo2{\n .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,\n .image = imageHandle,\n };\n const VkImportMemoryFdInfoKHR importInfo{\n .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR,\n .pNext = &dedicatedInfo2,\n .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR,\n .fd = fd // closes the fd\n };\n const VkMemoryAllocateInfo allocInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,\n .pNext = fd == -1 ? nullptr : &importInfo,\n .allocationSize = memReqs.size,\n .memoryTypeIndex = memType.value()\n };\n VkDeviceMemory memoryHandle{};\n res = vkAllocateMemory(device.handle(), &allocInfo, nullptr, &memoryHandle);\n if (res != VK_SUCCESS || memoryHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to allocate memory for Vulkan image\");\n\n res = vkBindImageMemory(device.handle(), imageHandle, memoryHandle, 0);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to bind memory to Vulkan image\");\n\n // create image view\n const VkImageViewCreateInfo viewDesc{\n .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,\n .image = imageHandle,\n .viewType = VK_IMAGE_VIEW_TYPE_2D,\n .format = format,\n .components = {\n .r = VK_COMPONENT_SWIZZLE_IDENTITY,\n .g = VK_COMPONENT_SWIZZLE_IDENTITY,\n .b = VK_COMPONENT_SWIZZLE_IDENTITY,\n .a = VK_COMPONENT_SWIZZLE_IDENTITY\n },\n .subresourceRange = {\n .aspectMask = aspectFlags,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n\n VkImageView viewHandle{};\n res = vkCreateImageView(device.handle(), &viewDesc, nullptr, &viewHandle);\n if (res != VK_SUCCESS || viewHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create image view\");\n\n // store objects in shared ptr\n this->layout = std::make_shared(VK_IMAGE_LAYOUT_UNDEFINED);\n this->image = std::shared_ptr(\n new VkImage(imageHandle),\n [dev = device.handle()](VkImage* img) {\n vkDestroyImage(dev, *img, nullptr);\n }\n );\n this->memory = std::shared_ptr(\n new VkDeviceMemory(memoryHandle),\n [dev = device.handle()](VkDeviceMemory* mem) {\n vkFreeMemory(dev, *mem, nullptr);\n }\n );\n this->view = std::shared_ptr(\n new VkImageView(viewHandle),\n [dev = device.handle()](VkImageView* imgView) {\n vkDestroyImageView(dev, *imgView, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/v3.1_src/shaders/generate.cpp", "#include \n#include \n\n#include \"v3_1/shaders/generate.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nGenerate::Generate(Vulkan& vk,\n Core::Image inImg1, Core::Image inImg2,\n Core::Image inImg3, Core::Image inImg4, Core::Image inImg5,\n const std::vector& fds, VkFormat format)\n : inImg1(std::move(inImg1)), inImg2(std::move(inImg2)),\n inImg3(std::move(inImg3)), inImg4(std::move(inImg4)),\n inImg5(std::move(inImg5)) {\n // create resources\n this->shaderModule = vk.shaders.getShader(vk.device, \"generate\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 5, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } });\n this->pipeline = vk.shaders.getPipeline(vk.device, \"generate\");\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImg1.getExtent();\n for (size_t i = 0; i < vk.generationCount; i++)\n this->outImgs.emplace_back(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, fds.empty() ? -1 : fds.at(i));\n\n // hook up shaders\n for (size_t i = 0; i < vk.generationCount; i++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(i + 1) / static_cast(vk.generationCount + 1));\n for (size_t j = 0; j < 2; j++) {\n pass.descriptorSet.at(j) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModule);\n pass.descriptorSet.at(j).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, j == 0 ? this->inImg2 : this->inImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, j == 0 ? this->inImg1 : this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg3)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg4)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg5)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs.at(i))\n .build();\n }\n }\n}\n\nvoid Generate::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx) {\n auto& pass = this->passes.at(pass_idx);\n\n // first pass\n const auto extent = this->inImg1.getExtent();\n const uint32_t threadsX = (extent.width + 15) >> 4;\n const uint32_t threadsY = (extent.height + 15) >> 4;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImg1)\n .addW2R(this->inImg2)\n .addW2R(this->inImg3)\n .addW2R(this->inImg4)\n .addW2R(this->inImg5)\n .addR2W(this->outImgs.at(pass_idx))\n .build();\n\n this->pipeline.bind(buf);\n pass.descriptorSet.at(frameCount % 2).bind(buf, this->pipeline);\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/shaders/generate.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/generate.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nGenerate::Generate(Vulkan& vk,\n Core::Image inImg1, Core::Image inImg2,\n Core::Image inImg3, Core::Image inImg4, Core::Image inImg5,\n const std::vector& fds, VkFormat format)\n : inImg1(std::move(inImg1)), inImg2(std::move(inImg2)),\n inImg3(std::move(inImg3)), inImg4(std::move(inImg4)),\n inImg5(std::move(inImg5)) {\n // create resources\n this->shaderModule = vk.shaders.getShader(vk.device, \"p_generate\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 5, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } });\n this->pipeline = vk.shaders.getPipeline(vk.device, \"p_generate\");\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImg1.getExtent();\n for (size_t i = 0; i < vk.generationCount; i++)\n this->outImgs.emplace_back(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, fds.empty() ? -1 : fds.at(i));\n\n // hook up shaders\n for (size_t i = 0; i < vk.generationCount; i++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(i + 1) / static_cast(vk.generationCount + 1));\n for (size_t j = 0; j < 2; j++) {\n pass.descriptorSet.at(j) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModule);\n pass.descriptorSet.at(j).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, j == 0 ? this->inImg2 : this->inImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, j == 0 ? this->inImg1 : this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg3)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg4)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg5)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs.at(i))\n .build();\n }\n }\n}\n\nvoid Generate::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx) {\n auto& pass = this->passes.at(pass_idx);\n\n // first pass\n const auto extent = this->inImg1.getExtent();\n const uint32_t threadsX = (extent.width + 15) >> 4;\n const uint32_t threadsY = (extent.height + 15) >> 4;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImg1)\n .addW2R(this->inImg2)\n .addW2R(this->inImg3)\n .addW2R(this->inImg4)\n .addW2R(this->inImg5)\n .addR2W(this->outImgs.at(pass_idx))\n .build();\n\n this->pipeline.bind(buf);\n pass.descriptorSet.at(frameCount % 2).bind(buf, this->pipeline);\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/src/buffer.cpp", "/*\nThe MIT License (MIT)\n\nCopyright (c) 2013 Andrew Ruef\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n*/\n\n#include \n#include \n\n// keep this header above \"windows.h\" because it contains many types\n#include \n\n#ifdef _WIN32\n\n#define WIN32_LEAN_AND_MEAN\n#define VC_EXTRALEAN\n\n#include \n#include \n#else\n#include \n#include \n#include \n#include \n#endif\n\nnamespace {\n\ninline std::uint16_t byteSwapUint16(std::uint16_t val) {\n#if defined(_MSC_VER) || defined(_MSC_FULL_VER)\n return _byteswap_ushort(val);\n#else\n return __builtin_bswap16(val);\n#endif\n}\n\ninline std::uint32_t byteSwapUint32(std::uint32_t val) {\n#if defined(_MSC_VER) || defined(_MSC_FULL_VER)\n return _byteswap_ulong(val);\n#else\n return __builtin_bswap32(val);\n#endif\n}\n\ninline uint64_t byteSwapUint64(std::uint64_t val) {\n#if defined(_MSC_VER) || defined(_MSC_FULL_VER)\n return _byteswap_uint64(val);\n#else\n return __builtin_bswap64(val);\n#endif\n}\n\n} // anonymous namespace\n\nnamespace peparse {\n\nextern std::uint32_t err;\nextern std::string err_loc;\n\nstruct buffer_detail {\n#ifdef _WIN32\n HANDLE file;\n HANDLE sec;\n#else\n int fd;\n#endif\n};\n\nbool readByte(bounded_buffer *b, std::uint32_t offset, std::uint8_t &out) {\n if (b == nullptr) {\n PE_ERR(PEERR_BUFFER);\n return false;\n }\n\n if (offset >= b->bufLen) {\n PE_ERR(PEERR_ADDRESS);\n return false;\n }\n\n std::uint8_t *tmp = (b->buf + offset);\n out = *tmp;\n\n return true;\n}\n\nbool readWord(bounded_buffer *b, std::uint32_t offset, std::uint16_t &out) {\n if (b == nullptr) {\n PE_ERR(PEERR_BUFFER);\n return false;\n }\n\n if (static_cast(offset) + 1 >= b->bufLen) {\n PE_ERR(PEERR_ADDRESS);\n return false;\n }\n\n std::uint16_t tmp;\n memcpy(&tmp, (b->buf + offset), sizeof(std::uint16_t));\n if (b->swapBytes) {\n out = byteSwapUint16(tmp);\n } else {\n out = tmp;\n }\n\n return true;\n}\n\nbool readDword(bounded_buffer *b, std::uint32_t offset, std::uint32_t &out) {\n if (b == nullptr) {\n PE_ERR(PEERR_BUFFER);\n return false;\n }\n\n if (static_cast(offset) + 3 >= b->bufLen) {\n PE_ERR(PEERR_ADDRESS);\n return false;\n }\n\n std::uint32_t tmp;\n memcpy(&tmp, (b->buf + offset), sizeof(std::uint32_t));\n if (b->swapBytes) {\n out = byteSwapUint32(tmp);\n } else {\n out = tmp;\n }\n\n return true;\n}\n\nbool readQword(bounded_buffer *b, std::uint32_t offset, std::uint64_t &out) {\n if (b == nullptr) {\n PE_ERR(PEERR_BUFFER);\n return false;\n }\n\n if (static_cast(offset) + 7 >= b->bufLen) {\n PE_ERR(PEERR_ADDRESS);\n return false;\n }\n\n std::uint64_t tmp;\n memcpy(&tmp, (b->buf + offset), sizeof(std::uint64_t));\n if (b->swapBytes) {\n out = byteSwapUint64(tmp);\n } else {\n out = tmp;\n }\n\n return true;\n}\n\nbool readChar16(bounded_buffer *b, std::uint32_t offset, char16_t &out) {\n if (b == nullptr) {\n PE_ERR(PEERR_BUFFER);\n return false;\n }\n\n if (static_cast(offset) + 1 >= b->bufLen) {\n PE_ERR(PEERR_ADDRESS);\n return false;\n }\n\n char16_t tmp;\n if (b->swapBytes) {\n std::uint8_t tmpBuf[2];\n tmpBuf[0] = *(b->buf + offset + 1);\n tmpBuf[1] = *(b->buf + offset);\n memcpy(&tmp, tmpBuf, sizeof(std::uint16_t));\n } else {\n memcpy(&tmp, (b->buf + offset), sizeof(std::uint16_t));\n }\n out = tmp;\n\n return true;\n}\n\nbounded_buffer *readFileToFileBuffer(const char *filePath) {\n#ifdef _WIN32\n HANDLE h = CreateFileA(filePath,\n GENERIC_READ,\n FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,\n nullptr,\n OPEN_EXISTING,\n FILE_ATTRIBUTE_NORMAL,\n nullptr);\n if (h == INVALID_HANDLE_VALUE) {\n PE_ERR(PEERR_OPEN);\n return nullptr;\n }\n\n DWORD fileSize = GetFileSize(h, nullptr);\n\n if (fileSize == INVALID_FILE_SIZE) {\n CloseHandle(h);\n PE_ERR(PEERR_OPEN);\n return nullptr;\n }\n\n#else\n // only where we have mmap / open / etc\n int fd = open(filePath, O_RDONLY);\n\n if (fd == -1) {\n PE_ERR(PEERR_OPEN);\n return nullptr;\n }\n#endif\n\n // make a buffer object\n bounded_buffer *p = new (std::nothrow) bounded_buffer();\n if (p == nullptr) {\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n memset(p, 0, sizeof(bounded_buffer));\n buffer_detail *d = new (std::nothrow) buffer_detail();\n\n if (d == nullptr) {\n delete p;\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n memset(d, 0, sizeof(buffer_detail));\n p->detail = d;\n\n// only where we have mmap / open / etc\n#ifdef _WIN32\n p->detail->file = h;\n\n HANDLE hMap = CreateFileMapping(h, nullptr, PAGE_READONLY, 0, 0, nullptr);\n\n if (hMap == nullptr) {\n CloseHandle(h);\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n p->detail->sec = hMap;\n\n LPVOID ptr = MapViewOfFile(hMap, FILE_MAP_READ, 0, 0, 0);\n\n if (ptr == nullptr) {\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n p->buf = reinterpret_cast(ptr);\n p->bufLen = fileSize;\n#else\n p->detail->fd = fd;\n\n struct stat s;\n memset(&s, 0, sizeof(struct stat));\n\n if (fstat(fd, &s) != 0) {\n close(fd);\n delete d;\n delete p;\n PE_ERR(PEERR_STAT);\n return nullptr;\n }\n\n void *maddr = mmap(nullptr,\n static_cast(s.st_size),\n PROT_READ,\n MAP_SHARED,\n fd,\n 0);\n\n if (maddr == MAP_FAILED) {\n close(fd);\n delete d;\n delete p;\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n p->buf = reinterpret_cast(maddr);\n p->bufLen = static_cast(s.st_size);\n#endif\n p->copy = false;\n p->swapBytes = false;\n\n return p;\n}\n\nbounded_buffer *makeBufferFromPointer(std::uint8_t *data, std::uint32_t sz) {\n if (data == nullptr) {\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n bounded_buffer *p = new (std::nothrow) bounded_buffer();\n\n if (p == nullptr) {\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n p->copy = true;\n p->detail = nullptr;\n p->buf = data;\n p->bufLen = sz;\n p->swapBytes = false;\n\n return p;\n}\n\n// split buffer inclusively from from to to by offset\nbounded_buffer *\nsplitBuffer(bounded_buffer *b, std::uint32_t from, std::uint32_t to) {\n if (b == nullptr) {\n return nullptr;\n }\n\n // safety checks\n if (to < from || to > b->bufLen) {\n return nullptr;\n }\n\n // make a new buffer\n auto newBuff = new (std::nothrow) bounded_buffer();\n if (newBuff == nullptr) {\n return nullptr;\n }\n\n newBuff->copy = true;\n newBuff->buf = b->buf + from;\n newBuff->bufLen = (to - from);\n\n return newBuff;\n}\n\nvoid deleteBuffer(bounded_buffer *b) {\n if (b == nullptr) {\n return;\n }\n\n if (!b->copy) {\n#ifdef _WIN32\n UnmapViewOfFile(b->buf);\n CloseHandle(b->detail->sec);\n CloseHandle(b->detail->file);\n#else\n munmap(b->buf, b->bufLen);\n close(b->detail->fd);\n#endif\n }\n\n delete b->detail;\n delete b;\n}\n\nstd::uint64_t bufLen(bounded_buffer *b) {\n return b->bufLen;\n}\n} // namespace peparse\n"], ["/lsfg-vk/framegen/v3.1p_src/context.cpp", "#include \n#include \n\n#include \"v3_1p/context.hpp\"\n#include \"common/utils.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG_3_1P;\n\nContext::Context(Vulkan& vk,\n int in0, int in1, const std::vector& outN,\n VkExtent2D extent, VkFormat format) {\n // import input images\n this->inImg_0 = Core::Image(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, in0);\n this->inImg_1 = Core::Image(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, in1);\n\n // prepare render data\n for (size_t i = 0; i < 8; i++) {\n auto& data = this->data.at(i);\n data.internalSemaphores.resize(vk.generationCount);\n data.outSemaphores.resize(vk.generationCount);\n data.completionFences.resize(vk.generationCount);\n data.cmdBuffers2.resize(vk.generationCount);\n }\n\n // create shader chains\n this->mipmaps = Shaders::Mipmaps(vk, this->inImg_0, this->inImg_1);\n for (size_t i = 0; i < 7; i++)\n this->alpha.at(i) = Shaders::Alpha(vk, this->mipmaps.getOutImages().at(i));\n this->beta = Shaders::Beta(vk, this->alpha.at(0).getOutImages());\n for (size_t i = 0; i < 7; i++) {\n this->gamma.at(i) = Shaders::Gamma(vk,\n this->alpha.at(6 - i).getOutImages(),\n this->beta.getOutImages().at(std::min(6 - i, 5)),\n (i == 0) ? std::nullopt : std::make_optional(this->gamma.at(i - 1).getOutImage()));\n if (i < 4) continue;\n\n this->delta.at(i - 4) = Shaders::Delta(vk,\n this->alpha.at(6 - i).getOutImages(),\n this->beta.getOutImages().at(6 - i),\n (i == 4) ? std::nullopt : std::make_optional(this->gamma.at(i - 1).getOutImage()),\n (i == 4) ? std::nullopt : std::make_optional(this->delta.at(i - 5).getOutImage1()));\n }\n this->generate = Shaders::Generate(vk,\n this->inImg_0, this->inImg_1,\n this->gamma.at(6).getOutImage(),\n this->delta.at(2).getOutImage1(),\n this->delta.at(2).getOutImage2(),\n outN, format);\n}\n\nvoid Context::present(Vulkan& vk,\n int inSem, const std::vector& outSem) {\n auto& data = this->data.at(this->frameIdx % 8);\n\n // 3. wait for completion of previous frame in this slot\n if (data.shouldWait)\n for (auto& fence : data.completionFences)\n if (!fence.wait(vk.device, UINT64_MAX))\n throw LSFG::vulkan_error(VK_TIMEOUT, \"Fence wait timed out\");\n data.shouldWait = true;\n\n // 1. create mipmaps and process input image\n if (inSem >= 0) data.inSemaphore = Core::Semaphore(vk.device, inSem);\n for (size_t i = 0; i < vk.generationCount; i++)\n data.internalSemaphores.at(i) = Core::Semaphore(vk.device);\n\n data.cmdBuffer1 = Core::CommandBuffer(vk.device, vk.commandPool);\n data.cmdBuffer1.begin();\n\n this->mipmaps.Dispatch(data.cmdBuffer1, this->frameIdx);\n for (size_t i = 0; i < 7; i++)\n this->alpha.at(6 - i).Dispatch(data.cmdBuffer1, this->frameIdx);\n this->beta.Dispatch(data.cmdBuffer1, this->frameIdx);\n\n data.cmdBuffer1.end();\n std::vector waits = { data.inSemaphore };\n if (inSem < 0) waits.clear();\n data.cmdBuffer1.submit(vk.device.getComputeQueue(), std::nullopt,\n waits, std::nullopt,\n data.internalSemaphores, std::nullopt);\n\n // 2. generate intermediary frames\n for (size_t pass = 0; pass < vk.generationCount; pass++) {\n auto& internalSemaphore = data.internalSemaphores.at(pass);\n auto& outSemaphore = data.outSemaphores.at(pass);\n if (inSem >= 0) outSemaphore = Core::Semaphore(vk.device, outSem.empty() ? -1 : outSem.at(pass));\n auto& completionFence = data.completionFences.at(pass);\n completionFence = Core::Fence(vk.device);\n\n auto& buf2 = data.cmdBuffers2.at(pass);\n buf2 = Core::CommandBuffer(vk.device, vk.commandPool);\n buf2.begin();\n\n for (size_t i = 0; i < 7; i++) {\n this->gamma.at(i).Dispatch(buf2, this->frameIdx, pass);\n if (i >= 4)\n this->delta.at(i - 4).Dispatch(buf2, this->frameIdx, pass, i == 6);\n }\n this->generate.Dispatch(buf2, this->frameIdx, pass);\n\n buf2.end();\n std::vector signals = { outSemaphore };\n if (inSem < 0) signals.clear();\n buf2.submit(vk.device.getComputeQueue(), completionFence,\n { internalSemaphore }, std::nullopt,\n signals, std::nullopt);\n }\n\n this->frameIdx++;\n}\n"], ["/lsfg-vk/src/main.cpp", "#include \"config/config.hpp\"\n#include \"extract/extract.hpp\"\n#include \"utils/benchmark.hpp\"\n#include \"utils/utils.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace {\n __attribute__((constructor)) void lsfgvk_init() {\n std::cerr << std::unitbuf;\n\n // read configuration\n const std::string file = Utils::getConfigFile();\n try {\n Config::updateConfig(file);\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occured while trying to parse the configuration, IGNORING:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n return; // default configuration will unload\n }\n\n const auto name = Utils::getProcessName();\n try {\n Config::activeConf = Config::getConfig(name);\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: The configuration for \" << name.second << \" is invalid, IGNORING:\\n\";\n std::cerr << e.what() << '\\n';\n return; // default configuration will unload\n }\n\n // exit silently if not enabled\n auto& conf = Config::activeConf;\n if (!conf.enable && name.second != \"benchmark\")\n return; // default configuration will unload\n\n // print config\n std::cerr << \"lsfg-vk: Loaded configuration for \" << name.second << \":\\n\";\n if (!conf.dll.empty()) std::cerr << \" Using DLL from: \" << conf.dll << '\\n';\n std::cerr << \" Multiplier: \" << conf.multiplier << '\\n';\n std::cerr << \" Flow Scale: \" << conf.flowScale << '\\n';\n std::cerr << \" Performance Mode: \" << (conf.performance ? \"Enabled\" : \"Disabled\") << '\\n';\n std::cerr << \" HDR Mode: \" << (conf.hdr ? \"Enabled\" : \"Disabled\") << '\\n';\n if (conf.e_present != 2) std::cerr << \" ! Present Mode: \" << conf.e_present << '\\n';\n\n // remove mesa var in favor of config\n unsetenv(\"MESA_VK_WSI_PRESENT_MODE\"); // NOLINT\n\n // write latest file\n try {\n std::ofstream latest(\"/tmp/lsfg-vk_last\", std::ios::trunc);\n if (!latest.is_open())\n throw std::runtime_error(\"Failed to open /tmp/lsfg-vk_last for writing\");\n latest << \"exe: \" << name.first << '\\n';\n latest << \"comm: \" << name.second << '\\n';\n latest << \"pid: \" << getpid() << '\\n';\n if (!latest.good())\n throw std::runtime_error(\"Failed to write to /tmp/lsfg-vk_last\");\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred while trying to write the latest file, exiting:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n exit(EXIT_FAILURE);\n }\n\n // load shaders\n try {\n Extract::extractShaders();\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred while trying to extract the shaders, exiting:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n exit(EXIT_FAILURE);\n }\n std::cerr << \"lsfg-vk: Shaders extracted successfully.\\n\";\n\n // run benchmark if requested\n const char* benchmark_flag = std::getenv(\"LSFG_BENCHMARK\");\n if (!benchmark_flag)\n return;\n\n const std::string resolution(benchmark_flag);\n uint32_t width{};\n uint32_t height{};\n try {\n const size_t x = resolution.find('x');\n if (x == std::string::npos)\n throw std::runtime_error(\"Unable to find 'x' in benchmark string\");\n\n const std::string width_str = resolution.substr(0, x);\n const std::string height_str = resolution.substr(x + 1);\n if (width_str.empty() || height_str.empty())\n throw std::runtime_error(\"Invalid resolution\");\n\n const int32_t w = std::stoi(width_str);\n const int32_t h = std::stoi(height_str);\n if (w < 0 || h < 0)\n throw std::runtime_error(\"Resolution cannot be negative\");\n\n width = static_cast(w);\n height = static_cast(h);\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred while trying to parse the resolution, exiting:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n exit(EXIT_FAILURE);\n }\n\n std::thread benchmark([width, height]() {\n try {\n Benchmark::run(width, height);\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred during the benchmark:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n exit(EXIT_FAILURE);\n }\n });\n benchmark.detach();\n conf.enable = false;\n }\n}\n"], ["/lsfg-vk/framegen/v3.1_src/context.cpp", "#include \n#include \n\n#include \"v3_1/context.hpp\"\n#include \"common/utils.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1;\n\nContext::Context(Vulkan& vk,\n int in0, int in1, const std::vector& outN,\n VkExtent2D extent, VkFormat format) {\n // import input images\n this->inImg_0 = Core::Image(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, in0);\n this->inImg_1 = Core::Image(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, in1);\n\n // prepare render data\n for (size_t i = 0; i < 8; i++) {\n auto& data = this->data.at(i);\n data.internalSemaphores.resize(vk.generationCount);\n data.outSemaphores.resize(vk.generationCount);\n data.completionFences.resize(vk.generationCount);\n data.cmdBuffers2.resize(vk.generationCount);\n }\n\n // create shader chains\n this->mipmaps = Shaders::Mipmaps(vk, this->inImg_0, this->inImg_1);\n for (size_t i = 0; i < 7; i++)\n this->alpha.at(i) = Shaders::Alpha(vk, this->mipmaps.getOutImages().at(i));\n this->beta = Shaders::Beta(vk, this->alpha.at(0).getOutImages());\n for (size_t i = 0; i < 7; i++) {\n this->gamma.at(i) = Shaders::Gamma(vk,\n this->alpha.at(6 - i).getOutImages(),\n this->beta.getOutImages().at(std::min(6 - i, 5)),\n (i == 0) ? std::nullopt : std::make_optional(this->gamma.at(i - 1).getOutImage()));\n if (i < 4) continue;\n\n this->delta.at(i - 4) = Shaders::Delta(vk,\n this->alpha.at(6 - i).getOutImages(),\n this->beta.getOutImages().at(6 - i),\n (i == 4) ? std::nullopt : std::make_optional(this->gamma.at(i - 1).getOutImage()),\n (i == 4) ? std::nullopt : std::make_optional(this->delta.at(i - 5).getOutImage1()),\n (i == 4) ? std::nullopt : std::make_optional(this->delta.at(i - 5).getOutImage2()));\n }\n this->generate = Shaders::Generate(vk,\n this->inImg_0, this->inImg_1,\n this->gamma.at(6).getOutImage(),\n this->delta.at(2).getOutImage1(),\n this->delta.at(2).getOutImage2(),\n outN, format);\n}\n\nvoid Context::present(Vulkan& vk,\n int inSem, const std::vector& outSem) {\n auto& data = this->data.at(this->frameIdx % 8);\n\n // 3. wait for completion of previous frame in this slot\n if (data.shouldWait)\n for (auto& fence : data.completionFences)\n if (!fence.wait(vk.device, UINT64_MAX))\n throw LSFG::vulkan_error(VK_TIMEOUT, \"Fence wait timed out\");\n data.shouldWait = true;\n\n // 1. create mipmaps and process input image\n if (inSem >= 0) data.inSemaphore = Core::Semaphore(vk.device, inSem);\n for (size_t i = 0; i < vk.generationCount; i++)\n data.internalSemaphores.at(i) = Core::Semaphore(vk.device);\n\n data.cmdBuffer1 = Core::CommandBuffer(vk.device, vk.commandPool);\n data.cmdBuffer1.begin();\n\n this->mipmaps.Dispatch(data.cmdBuffer1, this->frameIdx);\n for (size_t i = 0; i < 7; i++)\n this->alpha.at(6 - i).Dispatch(data.cmdBuffer1, this->frameIdx);\n this->beta.Dispatch(data.cmdBuffer1, this->frameIdx);\n\n data.cmdBuffer1.end();\n std::vector waits = { data.inSemaphore };\n if (inSem < 0) waits.clear();\n data.cmdBuffer1.submit(vk.device.getComputeQueue(), std::nullopt,\n waits, std::nullopt,\n data.internalSemaphores, std::nullopt);\n\n // 2. generate intermediary frames\n for (size_t pass = 0; pass < vk.generationCount; pass++) {\n auto& internalSemaphore = data.internalSemaphores.at(pass);\n auto& outSemaphore = data.outSemaphores.at(pass);\n if (inSem >= 0) outSemaphore = Core::Semaphore(vk.device, outSem.empty() ? -1 : outSem.at(pass));\n auto& completionFence = data.completionFences.at(pass);\n completionFence = Core::Fence(vk.device);\n\n auto& buf2 = data.cmdBuffers2.at(pass);\n buf2 = Core::CommandBuffer(vk.device, vk.commandPool);\n buf2.begin();\n\n for (size_t i = 0; i < 7; i++) {\n this->gamma.at(i).Dispatch(buf2, this->frameIdx, pass);\n if (i >= 4)\n this->delta.at(i - 4).Dispatch(buf2, this->frameIdx, pass);\n }\n this->generate.Dispatch(buf2, this->frameIdx, pass);\n\n buf2.end();\n std::vector signals = { outSemaphore };\n if (inSem < 0) signals.clear();\n buf2.submit(vk.device.getComputeQueue(), completionFence,\n { internalSemaphore }, std::nullopt,\n signals, std::nullopt);\n }\n\n this->frameIdx++;\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_flags.h", "#pragma once\n\n#include \n\n#include \"util_bit.h\"\n\nnamespace dxvk {\n \n template\n class Flags {\n \n public:\n \n using IntType = std::underlying_type_t;\n \n Flags() { }\n \n Flags(IntType t)\n : m_bits(t) { }\n \n template\n Flags(T f, Tx... fx) {\n this->set(f, fx...);\n }\n \n template\n void set(Tx... fx) {\n m_bits |= bits(fx...);\n }\n \n void set(Flags flags) {\n m_bits |= flags.m_bits;\n }\n \n template\n void clr(Tx... fx) {\n m_bits &= ~bits(fx...);\n }\n \n void clr(Flags flags) {\n m_bits &= ~flags.m_bits;\n }\n \n template\n bool any(Tx... fx) const {\n return (m_bits & bits(fx...)) != 0;\n }\n \n template\n bool all(Tx... fx) const {\n const IntType mask = bits(fx...);\n return (m_bits & mask) == mask;\n }\n \n bool test(T f) const {\n return this->any(f);\n }\n \n bool isClear() const {\n return m_bits == 0;\n }\n \n void clrAll() {\n m_bits = 0;\n }\n \n IntType raw() const {\n return m_bits;\n }\n \n Flags operator & (const Flags& other) const {\n return Flags(m_bits & other.m_bits);\n }\n \n Flags operator | (const Flags& other) const {\n return Flags(m_bits | other.m_bits);\n }\n \n Flags operator ^ (const Flags& other) const {\n return Flags(m_bits ^ other.m_bits);\n }\n\n bool operator == (const Flags& other) const {\n return m_bits == other.m_bits;\n }\n \n bool operator != (const Flags& other) const {\n return m_bits != other.m_bits;\n }\n \n private:\n \n IntType m_bits = 0;\n \n static IntType bit(T f) {\n return IntType(1) << static_cast(f);\n }\n \n template\n static IntType bits(T f, Tx... fx) {\n return bit(f) | bits(fx...);\n }\n \n static IntType bits() {\n return 0;\n }\n \n };\n \n}"], ["/lsfg-vk/framegen/v3.1_src/shaders/mipmaps.cpp", "#include \n#include \n\n#include \"v3_1/shaders/mipmaps.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/image.hpp\"\n#include \"core/commandbuffer.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nMipmaps::Mipmaps(Vulkan& vk,\n Core::Image inImg_0, Core::Image inImg_1)\n : inImg_0(std::move(inImg_0)), inImg_1(std::move(inImg_1)) {\n // create resources\n this->shaderModule = vk.shaders.getShader(vk.device, \"mipmaps\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 7, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } });\n this->pipeline = vk.shaders.getPipeline(vk.device, \"mipmaps\");\n this->buffer = vk.resources.getBuffer(vk.device);\n this->sampler = vk.resources.getSampler(vk.device);\n for (size_t i = 0; i < 2; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModule);\n\n // create outputs\n const VkExtent2D flowExtent{\n .width = static_cast(\n static_cast(this->inImg_0.getExtent().width) / vk.flowScale),\n .height = static_cast(\n static_cast(this->inImg_0.getExtent().height) / vk.flowScale)\n };\n for (size_t i = 0; i < 7; i++)\n this->outImgs.at(i) = Core::Image(vk.device,\n { flowExtent.width >> i, flowExtent.height >> i },\n VK_FORMAT_R8_UNORM);\n\n // hook up shaders\n for (size_t fc = 0; fc < 2; fc++)\n this->descriptorSets.at(fc).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, this->buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, (fc % 2 == 0) ? this->inImg_0 : this->inImg_1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs)\n .build();\n}\n\nvoid Mipmaps::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto flowExtent = this->outImgs.at(0).getExtent();\n const uint32_t threadsX = (flowExtent.width + 63) >> 6;\n const uint32_t threadsY = (flowExtent.height + 63) >> 6;\n\n Utils::BarrierBuilder(buf)\n .addW2R((frameCount % 2 == 0) ? this->inImg_0 : this->inImg_1)\n .addR2W(this->outImgs)\n .build();\n\n this->pipeline.bind(buf);\n this->descriptorSets.at(frameCount % 2).bind(buf, this->pipeline);\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/shaders/gamma.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/gamma.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nGamma::Gamma(Vulkan& vk, std::array, 3> inImgs1,\n Core::Image inImg2,\n std::optional optImg)\n : inImgs1(std::move(inImgs1)), inImg2(std::move(inImg2)),\n optImg(std::move(optImg)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"p_gamma[0]\",\n { { 1 , VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 5, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 3, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_gamma[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 3, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_gamma[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_gamma[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_gamma[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"p_gamma[0]\"),\n vk.shaders.getPipeline(vk.device, \"p_gamma[1]\"),\n vk.shaders.getPipeline(vk.device, \"p_gamma[2]\"),\n vk.shaders.getPipeline(vk.device, \"p_gamma[3]\"),\n vk.shaders.getPipeline(vk.device, \"p_gamma[4]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n this->samplers.at(2) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS, false);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs1.at(0).at(0).getExtent();\n for (size_t i = 0; i < 3; i++)\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n for (size_t i = 0; i < 2; i++)\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n\n this->outImg = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n\n // hook up shaders\n for (size_t pass_idx = 0; pass_idx < vk.generationCount; pass_idx++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(pass_idx + 1) / static_cast(vk.generationCount + 1),\n !this->optImg.has_value());\n for (size_t i = 0; i < 3; i++) {\n pass.firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(0));\n pass.firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n }\n pass.descriptorSets.at(0) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(1));\n pass.descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(1) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(2));\n pass.descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .build();\n pass.descriptorSets.at(2) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(3));\n pass.descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(3) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(4));\n pass.descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg)\n .build();\n }\n}\n\nvoid Gamma::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx) {\n auto& pass = this->passes.at(pass_idx);\n\n // first shader\n const auto extent = this->tempImgs1.at(0).getExtent();\n const uint32_t threadsX = (extent.width + 7) >> 3;\n const uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n pass.firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n pass.descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .build();\n\n this->pipelines.at(2).bind(buf);\n pass.descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n pass.descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addW2R(this->optImg)\n .addW2R(this->inImg2)\n .addR2W(this->outImg)\n .build();\n\n this->pipelines.at(4).bind(buf);\n pass.descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/include/pe-parse/nt-headers.h", "/*\nThe MIT License (MIT)\n\nCopyright (c) 2013 Andrew Ruef\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n*/\n\n#pragma once\n\n#include \n#include \n#include \n\n// need to pack these structure definitions\n\n// some constant definitions\n// clang-format off\nnamespace peparse {\nconstexpr std::uint32_t RICH_MAGIC_END = 0x68636952;\nconstexpr std::uint32_t RICH_MAGIC_START = 0x536e6144;\nconstexpr std::uint32_t RICH_OFFSET = 0x80;\nconstexpr std::uint16_t MZ_MAGIC = 0x5A4D;\nconstexpr std::uint32_t NT_MAGIC = 0x00004550;\nconstexpr std::uint16_t NUM_DIR_ENTRIES = 16;\nconstexpr std::uint16_t NT_OPTIONAL_32_MAGIC = 0x10B;\nconstexpr std::uint16_t NT_OPTIONAL_64_MAGIC = 0x20B;\nconstexpr std::uint16_t NT_SHORT_NAME_LEN = 8;\nconstexpr std::uint16_t SYMTAB_RECORD_LEN = 18;\n\n#ifndef _PEPARSE_WINDOWS_CONFLICTS\n// Machine Types\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_UNKNOWN = 0x0;\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_ALPHA = 0x184; // Alpha_AXP\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_ALPHA64 = 0x284; // ALPHA64\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_AM33 = 0x1d3; // Matsushita AM33\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_AMD64 = 0x8664; // x64\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_ARM = 0x1c0; // ARM little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_ARM64 = 0xaa64; // ARM64 little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_ARMNT = 0x1c4; // ARM Thumb-2 little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_AXP64 = 0x284; // ALPHA64\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_CEE = 0xc0ee;\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_CEF = 0xcef;\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_EBC = 0xebc; // EFI byte code\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_I386 = 0x14c; // Intel 386 or later processors and compatible processors\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_IA64 = 0x200; // Intel Itanium processor family\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_LOONGARCH32 = 0x6232; // LoongArch 32-bit address space\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_LOONGARCH64 = 0x6264; // LoongArch 64-bit address space\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_M32R = 0x9041; // Mitsubishi M32R little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_MIPS16 = 0x266; // MIPS16\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_MIPSFPU = 0x366; // MIPS with FPU\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_MIPSFPU16 = 0x466; // MIPS16 with FPU\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_POWERPC = 0x1f0; // Power PC little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_POWERPCFP = 0x1f1; // Power PC with floating point support\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_POWERPCBE = 0x1f2; // Power PC big endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_R3000 = 0x162; // MIPS little endian, 0x160 big-endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_R4000 = 0x166; // MIPS little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_R10000 = 0x168; // MIPS little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_RISCV32 = 0x5032; // RISC-V 32-bit address space\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_RISCV64 = 0x5064; // RISC-V 64-bit address space\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_RISCV128 = 0x5128; // RISC-V 128-bit address space\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_SH3 = 0x1a2; // Hitachi SH3\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_SH3DSP = 0x1a3; // Hitachi SH3 DSP\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_SH3E = 0x1a4; // Hitachi SH3E\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_SH4 = 0x1a6; // Hitachi SH4\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_SH5 = 0x1a8; // Hitachi SH5\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_THUMB = 0x1c2; // Thumb\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_TRICORE = 0x520; // Infineon\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_WCEMIPSV2 = 0x169; // MIPS little-endian WCE v2\n\nconstexpr std::uint16_t IMAGE_FILE_RELOCS_STRIPPED = 0x0001;\nconstexpr std::uint16_t IMAGE_FILE_EXECUTABLE_IMAGE = 0x0002;\nconstexpr std::uint16_t IMAGE_FILE_LINE_NUMS_STRIPPED = 0x0004;\nconstexpr std::uint16_t IMAGE_FILE_LOCAL_SYMS_STRIPPED = 0x0008;\nconstexpr std::uint16_t IMAGE_FILE_AGGRESSIVE_WS_TRIM = 0x0010;\nconstexpr std::uint16_t IMAGE_FILE_LARGE_ADDRESS_AWARE = 0x0020;\nconstexpr std::uint16_t IMAGE_FILE_BYTES_REVERSED_LO = 0x0080;\nconstexpr std::uint16_t IMAGE_FILE_32BIT_MACHINE = 0x0100;\nconstexpr std::uint16_t IMAGE_FILE_DEBUG_STRIPPED = 0x0200;\nconstexpr std::uint16_t IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP = 0x0400;\nconstexpr std::uint16_t IMAGE_FILE_NET_RUN_FROM_SWAP = 0x0800;\nconstexpr std::uint16_t IMAGE_FILE_SYSTEM = 0x1000;\nconstexpr std::uint16_t IMAGE_FILE_DLL = 0x2000;\nconstexpr std::uint16_t IMAGE_FILE_UP_SYSTEM_ONLY = 0x4000;\nconstexpr std::uint16_t IMAGE_FILE_BYTES_REVERSED_HI = 0x8000;\n\nconstexpr std::uint32_t IMAGE_SCN_TYPE_NO_PAD = 0x00000008;\nconstexpr std::uint32_t IMAGE_SCN_CNT_CODE = 0x00000020;\nconstexpr std::uint32_t IMAGE_SCN_CNT_INITIALIZED_DATA = 0x00000040;\nconstexpr std::uint32_t IMAGE_SCN_CNT_UNINITIALIZED_DATA = 0x00000080;\nconstexpr std::uint32_t IMAGE_SCN_LNK_OTHER = 0x00000100;\nconstexpr std::uint32_t IMAGE_SCN_LNK_INFO = 0x00000200;\nconstexpr std::uint32_t IMAGE_SCN_LNK_REMOVE = 0x00000800;\nconstexpr std::uint32_t IMAGE_SCN_LNK_COMDAT = 0x00001000;\nconstexpr std::uint32_t IMAGE_SCN_NO_DEFER_SPEC_EXC = 0x00004000;\nconstexpr std::uint32_t IMAGE_SCN_GPREL = 0x00008000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_FARDATA = 0x00008000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_PURGEABLE = 0x00020000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_16BIT = 0x00020000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_LOCKED = 0x00040000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_PRELOAD = 0x00080000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_1BYTES = 0x00100000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_2BYTES = 0x00200000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_4BYTES = 0x00300000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_8BYTES = 0x00400000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_16BYTES = 0x00500000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_32BYTES = 0x00600000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_64BYTES = 0x00700000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_128BYTES = 0x00800000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_256BYTES = 0x00900000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_512BYTES = 0x00A00000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_1024BYTES = 0x00B00000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_2048BYTES = 0x00C00000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_4096BYTES = 0x00D00000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_8192BYTES = 0x00E00000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_MASK = 0x00F00000;\nconstexpr std::uint32_t IMAGE_SCN_LNK_NRELOC_OVFL = 0x01000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_DISCARDABLE = 0x02000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_NOT_CACHED = 0x04000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_NOT_PAGED = 0x08000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_SHARED = 0x10000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_EXECUTE = 0x20000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_READ = 0x40000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_WRITE = 0x80000000;\n\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_UNKNOWN = 0;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_NATIVE = 1;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_WINDOWS_GUI = 2;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_WINDOWS_CUI = 3;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_OS2_CUI = 5;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_POSIX_CUI = 7;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_NATIVE_WINDOWS = 8;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_WINDOWS_CE_GUI = 9;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_EFI_APPLICATION = 10;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER = 11;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER = 12;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_EFI_ROM = 13;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_XBOX = 14;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION = 16;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_XBOX_CODE_CATALOG = 17;\n\n// Symbol section number values\nconstexpr std::int16_t IMAGE_SYM_UNDEFINED = 0;\nconstexpr std::int16_t IMAGE_SYM_ABSOLUTE = -1;\nconstexpr std::int16_t IMAGE_SYM_DEBUG = -2;\n\n// Symbol table types\nconstexpr std::uint16_t IMAGE_SYM_TYPE_NULL = 0;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_VOID = 1;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_CHAR = 2;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_SHORT = 3;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_INT = 4;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_LONG = 5;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_FLOAT = 6;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_DOUBLE = 7;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_STRUCT = 8;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_UNION = 9;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_ENUM = 10;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_MOE = 11;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_BYTE = 12;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_WORD = 13;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_UINT = 14;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_DWORD = 15;\nconstexpr std::uint16_t IMAGE_SYM_DTYPE_NULL = 0;\nconstexpr std::uint16_t IMAGE_SYM_DTYPE_POINTER = 1;\nconstexpr std::uint16_t IMAGE_SYM_DTYPE_FUNCTION = 2;\nconstexpr std::uint16_t IMAGE_SYM_DTYPE_ARRAY = 3;\n\n// Symbol table storage classes\nconstexpr std::uint8_t IMAGE_SYM_CLASS_END_OF_FUNCTION = static_cast(-1);\nconstexpr std::uint8_t IMAGE_SYM_CLASS_NULL = 0;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_AUTOMATIC = 1;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_EXTERNAL = 2;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_STATIC = 3;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_REGISTER = 4;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_EXTERNAL_DEF = 5;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_LABEL = 6;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_UNDEFINED_LABEL = 7;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_MEMBER_OF_STRUCT = 8;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_ARGUMENT = 9;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_STRUCT_TAG = 10;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_MEMBER_OF_UNION = 11;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_UNION_TAG = 12;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_TYPE_DEFINITION = 13;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_UNDEFINED_STATIC = 14;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_ENUM_TAG = 15;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_MEMBER_OF_ENUM = 16;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_REGISTER_PARAM = 17;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_BIT_FIELD = 18;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_BLOCK = 100;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_FUNCTION = 101;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_END_OF_STRUCT = 102;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_FILE = 103;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_SECTION = 104;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_WEAK_EXTERNAL = 105;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_CLR_TOKEN = 107;\n\n// Optional header DLL characteristics\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE = 0x0040;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY = 0x0080;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_NX_COMPAT = 0x0100;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_NO_ISOLATION = 0x0200;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_NO_SEH = 0x0400;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_NO_BIND = 0x0800;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_APPCONTAINER = 0x1000;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_WDM_DRIVER = 0x2000;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_GUARD_CF = 0x4000;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE = 0x8000;\n\n// Extended DLL characteristics\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_EX_CET_COMPAT = 0x0001;\n#endif\n// clang-format on\n\nstruct dos_header {\n std::uint16_t e_magic;\n std::uint16_t e_cblp;\n std::uint16_t e_cp;\n std::uint16_t e_crlc;\n std::uint16_t e_cparhdr;\n std::uint16_t e_minalloc;\n std::uint16_t e_maxalloc;\n std::uint16_t e_ss;\n std::uint16_t e_sp;\n std::uint16_t e_csum;\n std::uint16_t e_ip;\n std::uint16_t e_cs;\n std::uint16_t e_lfarlc;\n std::uint16_t e_ovno;\n std::uint16_t e_res[4];\n std::uint16_t e_oemid;\n std::uint16_t e_oeminfo;\n std::uint16_t e_res2[10];\n std::uint32_t e_lfanew;\n};\n\nstruct file_header {\n std::uint16_t Machine;\n std::uint16_t NumberOfSections;\n std::uint32_t TimeDateStamp;\n std::uint32_t PointerToSymbolTable;\n std::uint32_t NumberOfSymbols;\n std::uint16_t SizeOfOptionalHeader;\n std::uint16_t Characteristics;\n};\n\nstruct data_directory {\n std::uint32_t VirtualAddress;\n std::uint32_t Size;\n};\n\nenum data_directory_kind {\n DIR_EXPORT = 0,\n DIR_IMPORT = 1,\n DIR_RESOURCE = 2,\n DIR_EXCEPTION = 3,\n DIR_SECURITY = 4,\n DIR_BASERELOC = 5,\n DIR_DEBUG = 6,\n DIR_ARCHITECTURE = 7,\n DIR_GLOBALPTR = 8,\n DIR_TLS = 9,\n DIR_LOAD_CONFIG = 10,\n DIR_BOUND_IMPORT = 11,\n DIR_IAT = 12,\n DIR_DELAY_IMPORT = 13,\n DIR_COM_DESCRIPTOR = 14,\n DIR_RESERVED = 15,\n};\n\nstruct optional_header_32 {\n std::uint16_t Magic;\n std::uint8_t MajorLinkerVersion;\n std::uint8_t MinorLinkerVersion;\n std::uint32_t SizeOfCode;\n std::uint32_t SizeOfInitializedData;\n std::uint32_t SizeOfUninitializedData;\n std::uint32_t AddressOfEntryPoint;\n std::uint32_t BaseOfCode;\n std::uint32_t BaseOfData;\n std::uint32_t ImageBase;\n std::uint32_t SectionAlignment;\n std::uint32_t FileAlignment;\n std::uint16_t MajorOperatingSystemVersion;\n std::uint16_t MinorOperatingSystemVersion;\n std::uint16_t MajorImageVersion;\n std::uint16_t MinorImageVersion;\n std::uint16_t MajorSubsystemVersion;\n std::uint16_t MinorSubsystemVersion;\n std::uint32_t Win32VersionValue;\n std::uint32_t SizeOfImage;\n std::uint32_t SizeOfHeaders;\n std::uint32_t CheckSum;\n std::uint16_t Subsystem;\n std::uint16_t DllCharacteristics;\n std::uint32_t SizeOfStackReserve;\n std::uint32_t SizeOfStackCommit;\n std::uint32_t SizeOfHeapReserve;\n std::uint32_t SizeOfHeapCommit;\n std::uint32_t LoaderFlags;\n std::uint32_t NumberOfRvaAndSizes;\n data_directory DataDirectory[NUM_DIR_ENTRIES];\n};\n\n/*\n * This is used for PE32+ binaries. It is similar to optional_header_32\n * except some fields don't exist here (BaseOfData), and others are bigger.\n */\nstruct optional_header_64 {\n std::uint16_t Magic;\n std::uint8_t MajorLinkerVersion;\n std::uint8_t MinorLinkerVersion;\n std::uint32_t SizeOfCode;\n std::uint32_t SizeOfInitializedData;\n std::uint32_t SizeOfUninitializedData;\n std::uint32_t AddressOfEntryPoint;\n std::uint32_t BaseOfCode;\n std::uint64_t ImageBase;\n std::uint32_t SectionAlignment;\n std::uint32_t FileAlignment;\n std::uint16_t MajorOperatingSystemVersion;\n std::uint16_t MinorOperatingSystemVersion;\n std::uint16_t MajorImageVersion;\n std::uint16_t MinorImageVersion;\n std::uint16_t MajorSubsystemVersion;\n std::uint16_t MinorSubsystemVersion;\n std::uint32_t Win32VersionValue;\n std::uint32_t SizeOfImage;\n std::uint32_t SizeOfHeaders;\n std::uint32_t CheckSum;\n std::uint16_t Subsystem;\n std::uint16_t DllCharacteristics;\n std::uint64_t SizeOfStackReserve;\n std::uint64_t SizeOfStackCommit;\n std::uint64_t SizeOfHeapReserve;\n std::uint64_t SizeOfHeapCommit;\n std::uint32_t LoaderFlags;\n std::uint32_t NumberOfRvaAndSizes;\n data_directory DataDirectory[NUM_DIR_ENTRIES];\n};\n\nstruct nt_header_32 {\n std::uint32_t Signature;\n file_header FileHeader;\n optional_header_32 OptionalHeader;\n optional_header_64 OptionalHeader64;\n std::uint16_t OptionalMagic;\n};\n\nstruct rich_entry {\n std::uint16_t ProductId;\n std::uint16_t BuildNumber;\n std::uint32_t Count;\n};\n\nstruct rich_header {\n std::uint32_t StartSignature;\n std::vector Entries;\n std::uint32_t EndSignature;\n std::uint32_t DecryptionKey;\n std::uint32_t Checksum;\n bool isPresent;\n bool isValid;\n};\n\n/*\n * This structure is only used to know how far to move the offset\n * when parsing resources. The data is stored in a resource_dir_entry\n * struct but that also has extra information used in the parsing which\n * causes the size to be inaccurate.\n */\nstruct resource_dir_entry_sz {\n std::uint32_t ID;\n std::uint32_t RVA;\n};\n\nstruct resource_dir_entry {\n inline resource_dir_entry(void) : ID(0), RVA(0), type(0), name(0), lang(0) {\n }\n\n std::uint32_t ID;\n std::uint32_t RVA;\n std::uint32_t type;\n std::uint32_t name;\n std::uint32_t lang;\n std::string type_str;\n std::string name_str;\n std::string lang_str;\n};\n\nstruct resource_dir_table {\n std::uint32_t Characteristics;\n std::uint32_t TimeDateStamp;\n std::uint16_t MajorVersion;\n std::uint16_t MinorVersion;\n std::uint16_t NameEntries;\n std::uint16_t IDEntries;\n};\n\nstruct resource_dat_entry {\n std::uint32_t RVA;\n std::uint32_t size;\n std::uint32_t codepage;\n std::uint32_t reserved;\n};\n\nstruct image_section_header {\n std::uint8_t Name[NT_SHORT_NAME_LEN];\n union {\n std::uint32_t PhysicalAddress;\n std::uint32_t VirtualSize;\n } Misc;\n std::uint32_t VirtualAddress;\n std::uint32_t SizeOfRawData;\n std::uint32_t PointerToRawData;\n std::uint32_t PointerToRelocations;\n std::uint32_t PointerToLinenumbers;\n std::uint16_t NumberOfRelocations;\n std::uint16_t NumberOfLinenumbers;\n std::uint32_t Characteristics;\n};\n\nstruct import_dir_entry {\n std::uint32_t LookupTableRVA;\n std::uint32_t TimeStamp;\n std::uint32_t ForwarderChain;\n std::uint32_t NameRVA;\n std::uint32_t AddressRVA;\n};\n\nstruct export_dir_table {\n std::uint32_t ExportFlags;\n std::uint32_t TimeDateStamp;\n std::uint16_t MajorVersion;\n std::uint16_t MinorVersion;\n std::uint32_t NameRVA;\n std::uint32_t OrdinalBase;\n std::uint32_t AddressTableEntries;\n std::uint32_t NumberOfNamePointers;\n std::uint32_t ExportAddressTableRVA;\n std::uint32_t NamePointerRVA;\n std::uint32_t OrdinalTableRVA;\n};\n\nstruct debug_dir_entry {\n std::uint32_t Characteristics;\n std::uint32_t TimeStamp;\n std::uint16_t MajorVersion;\n std::uint16_t MinorVersion;\n std::uint32_t Type;\n std::uint32_t SizeOfData;\n std::uint32_t AddressOfRawData;\n std::uint32_t PointerToRawData;\n};\n\nenum reloc_type {\n RELOC_ABSOLUTE = 0,\n RELOC_HIGH = 1,\n RELOC_LOW = 2,\n RELOC_HIGHLOW = 3,\n RELOC_HIGHADJ = 4,\n RELOC_MIPS_JMPADDR = 5, // only valid on MIPS\n RELOC_ARM_MOV32 = 5, // only valid on ARM/Thumb\n RELOC_RISCV_HIGH20 = 5, // only valid on RISC-V\n RELOC_RESERVED = 6,\n RELOC_THUMB_MOV32 = 7, // only valid on Thumb\n RELOC_RISCV_LOW32I = 7, // only valid on RISC-V\n RELOC_RISCV_LOW12S = 8, // only valid on RISC-V\n RELOC_LOONGARCH32_MARK_LA = 8, // only valid on LoongArch 32\n RELOC_LOONGARCH64_MARK_LA = 8, // only valid on LoongArch 64\n RELOC_MIPS_JMPADDR16 = 9, // only valid on MIPS\n RELOC_IA64_IMM64 = 9,\n RELOC_DIR64 = 10\n};\n\nstruct reloc_block {\n std::uint32_t PageRVA;\n std::uint32_t BlockSize;\n};\n\nstruct image_load_config_code_integrity {\n std::uint16_t Flags;\n std::uint16_t Catalog;\n std::uint32_t CatalogOffset;\n std::uint32_t Reserved;\n};\n\nstruct image_load_config_32 {\n std::uint32_t Size;\n std::uint32_t TimeDateStamp;\n std::uint16_t MajorVersion;\n std::uint16_t MinorVersion;\n std::uint32_t GlobalFlagsClear;\n std::uint32_t GlobalFlagsSet;\n std::uint32_t CriticalSectionDefaultTimeout;\n std::uint32_t DeCommitFreeBlockThreshold;\n std::uint32_t DeCommitTotalFreeThreshold;\n std::uint32_t LockPrefixTable;\n std::uint32_t MaximumAllocationSize;\n std::uint32_t VirtualMemoryThreshold;\n std::uint32_t ProcessHeapFlags;\n std::uint32_t ProcessAffinityMask;\n std::uint16_t CSDVersion;\n std::uint16_t DependentLoadFlags;\n std::uint32_t EditList;\n std::uint32_t SecurityCookie;\n std::uint32_t SEHandlerTable;\n std::uint32_t SEHandlerCount;\n std::uint32_t GuardCFCheckFunctionPointer;\n std::uint32_t GuardCFDispatchFunctionPointer;\n std::uint32_t GuardCFFunctionTable;\n std::uint32_t GuardCFFunctionCount;\n std::uint32_t GuardFlags;\n image_load_config_code_integrity CodeIntegrity;\n std::uint32_t GuardAddressTakenIatEntryTable;\n std::uint32_t GuardAddressTakenIatEntryCount;\n std::uint32_t GuardLongJumpTargetTable;\n std::uint32_t GuardLongJumpTargetCount;\n std::uint32_t DynamicValueRelocTable;\n std::uint32_t CHPEMetadataPointer;\n std::uint32_t GuardRFFailureRoutine;\n std::uint32_t GuardRFFailureRoutineFunctionPointer;\n std::uint32_t DynamicValueRelocTableOffset;\n std::uint16_t DynamicValueRelocTableSection;\n std::uint16_t Reserved2;\n std::uint32_t GuardRFVerifyStackPointerFunctionPointer;\n std::uint32_t HotPatchTableOffset;\n std::uint32_t Reserved3;\n std::uint32_t EnclaveConfigurationPointer;\n std::uint32_t VolatileMetadataPointer;\n};\n\nstruct image_load_config_64 {\n std::uint32_t Size;\n std::uint32_t TimeDateStamp;\n std::uint16_t MajorVersion;\n std::uint16_t MinorVersion;\n std::uint32_t GlobalFlagsClear;\n std::uint32_t GlobalFlagsSet;\n std::uint32_t CriticalSectionDefaultTimeout;\n std::uint64_t DeCommitFreeBlockThreshold;\n std::uint64_t DeCommitTotalFreeThreshold;\n std::uint64_t LockPrefixTable;\n std::uint64_t MaximumAllocationSize;\n std::uint64_t VirtualMemoryThreshold;\n std::uint64_t ProcessAffinityMask;\n std::uint32_t ProcessHeapFlags;\n std::uint16_t CSDVersion;\n std::uint16_t DependentLoadFlags;\n std::uint64_t EditList;\n std::uint64_t SecurityCookie;\n std::uint64_t SEHandlerTable;\n std::uint64_t SEHandlerCount;\n std::uint64_t GuardCFCheckFunctionPointer;\n std::uint64_t GuardCFDispatchFunctionPointer;\n std::uint64_t GuardCFFunctionTable;\n std::uint64_t GuardCFFunctionCount;\n std::uint32_t GuardFlags;\n image_load_config_code_integrity CodeIntegrity;\n std::uint64_t GuardAddressTakenIatEntryTable;\n std::uint64_t GuardAddressTakenIatEntryCount;\n std::uint64_t GuardLongJumpTargetTable;\n std::uint64_t GuardLongJumpTargetCount;\n std::uint64_t DynamicValueRelocTable;\n std::uint64_t CHPEMetadataPointer;\n std::uint64_t GuardRFFailureRoutine;\n std::uint64_t GuardRFFailureRoutineFunctionPointer;\n std::uint32_t DynamicValueRelocTableOffset;\n std::uint16_t DynamicValueRelocTableSection;\n std::uint16_t Reserved2;\n std::uint64_t GuardRFVerifyStackPointerFunctionPointer;\n std::uint32_t HotPatchTableOffset;\n std::uint32_t Reserved3;\n std::uint64_t EnclaveConfigurationPointer;\n std::uint64_t VolatileMetadataPointer;\n};\n} // namespace peparse\n"], ["/lsfg-vk/framegen/v3.1_src/shaders/delta.cpp", "#include \n#include \n\n#include \"v3_1/shaders/delta.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nDelta::Delta(Vulkan& vk, std::array, 3> inImgs1,\n Core::Image inImg2,\n std::optional optImg1,\n std::optional optImg2,\n std::optional optImg3)\n : inImgs1(std::move(inImgs1)), inImg2(std::move(inImg2)),\n optImg1(std::move(optImg1)), optImg2(std::move(optImg2)),\n optImg3(std::move(optImg3)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"delta[0]\",\n { { 1 , VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 9, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 3, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 3, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 6, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[5]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 10, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[6]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[7]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[8]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[9]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 3, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"delta[0]\"),\n vk.shaders.getPipeline(vk.device, \"delta[1]\"),\n vk.shaders.getPipeline(vk.device, \"delta[2]\"),\n vk.shaders.getPipeline(vk.device, \"delta[3]\"),\n vk.shaders.getPipeline(vk.device, \"delta[4]\"),\n vk.shaders.getPipeline(vk.device, \"delta[5]\"),\n vk.shaders.getPipeline(vk.device, \"delta[6]\"),\n vk.shaders.getPipeline(vk.device, \"delta[7]\"),\n vk.shaders.getPipeline(vk.device, \"delta[8]\"),\n vk.shaders.getPipeline(vk.device, \"delta[9]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n this->samplers.at(2) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS, false);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs1.at(0).at(0).getExtent();\n for (size_t i = 0; i < 4; i++) {\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n }\n\n this->outImg1 = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n this->outImg2 = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n\n // hook up shaders\n for (size_t pass_idx = 0; pass_idx < vk.generationCount; pass_idx++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(pass_idx + 1) / static_cast(vk.generationCount + 1),\n false, !this->optImg1.has_value());\n for (size_t i = 0; i < 3; i++) {\n pass.firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(0));\n pass.firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(2))\n .build();\n }\n pass.descriptorSets.at(0) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(1));\n pass.descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(2))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(1) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(2));\n pass.descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n pass.descriptorSets.at(2) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(3));\n pass.descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(3) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(4));\n pass.descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg1)\n .build();\n for (size_t i = 0; i < 3; i++) {\n pass.sixthDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(5));\n pass.sixthDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(1))\n .build();\n }\n pass.descriptorSets.at(4) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(6));\n pass.descriptorSets.at(4).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .build();\n pass.descriptorSets.at(5) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(7));\n pass.descriptorSets.at(5).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(1))\n .build();\n pass.descriptorSets.at(6) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(8));\n pass.descriptorSets.at(6).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .build();\n pass.descriptorSets.at(7) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(9));\n pass.descriptorSets.at(7).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg3)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg2)\n .build();\n }\n}\n\nvoid Delta::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx) {\n auto& pass = this->passes.at(pass_idx);\n\n // first shader\n const auto extent = this->tempImgs1.at(0).getExtent();\n const uint32_t threadsX = (extent.width + 7) >> 3;\n const uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg1)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .addR2W(this->tempImgs1.at(2))\n .build();\n\n this->pipelines.at(0).bind(buf);\n pass.firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addW2R(this->tempImgs1.at(2))\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n pass.descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(2).bind(buf);\n pass.descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n pass.descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addW2R(this->optImg1)\n .addW2R(this->inImg2)\n .addR2W(this->outImg1)\n .build();\n\n this->pipelines.at(4).bind(buf);\n pass.descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n\n // sixth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg1)\n .addW2R(this->optImg2)\n .addR2W(this->tempImgs2.at(0))\n .addR2W(this->tempImgs2.at(1))\n .build();\n\n this->pipelines.at(5).bind(buf);\n pass.sixthDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(5));\n buf.dispatch(threadsX, threadsY, 1);\n\n // seventh shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2.at(0))\n .addW2R(this->tempImgs2.at(1))\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .build();\n\n this->pipelines.at(6).bind(buf);\n pass.descriptorSets.at(4).bind(buf, this->pipelines.at(6));\n buf.dispatch(threadsX, threadsY, 1);\n\n // eighth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addR2W(this->tempImgs2.at(0))\n .addR2W(this->tempImgs2.at(1))\n .build();\n this->pipelines.at(7).bind(buf);\n pass.descriptorSets.at(5).bind(buf, this->pipelines.at(7));\n buf.dispatch(threadsX, threadsY, 1);\n\n // ninth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2.at(0))\n .addW2R(this->tempImgs2.at(1))\n .addW2R(this->optImg3)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .build();\n\n this->pipelines.at(8).bind(buf);\n pass.descriptorSets.at(6).bind(buf, this->pipelines.at(8));\n buf.dispatch(threadsX, threadsY, 1);\n\n // tenth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addW2R(this->optImg3)\n .addR2W(this->outImg2)\n .build();\n\n this->pipelines.at(9).bind(buf);\n pass.descriptorSets.at(7).bind(buf, this->pipelines.at(9));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1_src/shaders/gamma.cpp", "#include \n#include \n\n#include \"v3_1/shaders/gamma.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nGamma::Gamma(Vulkan& vk, std::array, 3> inImgs1,\n Core::Image inImg2,\n std::optional optImg)\n : inImgs1(std::move(inImgs1)), inImg2(std::move(inImg2)),\n optImg(std::move(optImg)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"gamma[0]\",\n { { 1 , VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 9, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 3, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"gamma[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 3, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"gamma[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"gamma[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"gamma[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 6, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"gamma[0]\"),\n vk.shaders.getPipeline(vk.device, \"gamma[1]\"),\n vk.shaders.getPipeline(vk.device, \"gamma[2]\"),\n vk.shaders.getPipeline(vk.device, \"gamma[3]\"),\n vk.shaders.getPipeline(vk.device, \"gamma[4]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n this->samplers.at(2) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS, false);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs1.at(0).at(0).getExtent();\n for (size_t i = 0; i < 4; i++) {\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n }\n\n this->outImg = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n\n // hook up shaders\n for (size_t pass_idx = 0; pass_idx < vk.generationCount; pass_idx++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(pass_idx + 1) / static_cast(vk.generationCount + 1),\n !this->optImg.has_value());\n for (size_t i = 0; i < 3; i++) {\n pass.firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(0));\n pass.firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(2))\n .build();\n }\n pass.descriptorSets.at(0) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(1));\n pass.descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(2))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(1) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(2));\n pass.descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n pass.descriptorSets.at(2) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(3));\n pass.descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(3) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(4));\n pass.descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg)\n .build();\n }\n}\n\nvoid Gamma::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx) {\n auto& pass = this->passes.at(pass_idx);\n\n // first shader\n const auto extent = this->tempImgs1.at(0).getExtent();\n const uint32_t threadsX = (extent.width + 7) >> 3;\n const uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .addR2W(this->tempImgs1.at(2))\n .build();\n\n this->pipelines.at(0).bind(buf);\n pass.firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addW2R(this->tempImgs1.at(2))\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n pass.descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(2).bind(buf);\n pass.descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n pass.descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addW2R(this->optImg)\n .addW2R(this->inImg2)\n .addR2W(this->outImg)\n .build();\n\n this->pipelines.at(4).bind(buf);\n pass.descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/shaders/delta.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/delta.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nDelta::Delta(Vulkan& vk, std::array, 3> inImgs1,\n Core::Image inImg2,\n std::optional optImg1,\n std::optional optImg2)\n : inImgs1(std::move(inImgs1)), inImg2(std::move(inImg2)),\n optImg1(std::move(optImg1)), optImg2(std::move(optImg2)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"p_delta[0]\",\n { { 1 , VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 5, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 3, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 3, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[5]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 6, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[6]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[7]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[8]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[9]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"p_delta[0]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[1]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[2]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[3]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[4]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[5]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[6]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[7]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[8]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[9]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n this->samplers.at(2) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS, false);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs1.at(0).at(0).getExtent();\n for (size_t i = 0; i < 3; i++)\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n for (size_t i = 0; i < 2; i++)\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n\n this->outImg1 = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n this->outImg2 = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n\n // hook up shaders\n for (size_t pass_idx = 0; pass_idx < vk.generationCount; pass_idx++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(pass_idx + 1) / static_cast(vk.generationCount + 1),\n false, !this->optImg1.has_value());\n for (size_t i = 0; i < 3; i++) {\n pass.firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(0));\n pass.firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n }\n pass.descriptorSets.at(0) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(1));\n pass.descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(1) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(2));\n pass.descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .build();\n pass.descriptorSets.at(2) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(3));\n pass.descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(3) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(4));\n pass.descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg1)\n .build();\n for (size_t i = 0; i < 3; i++) {\n pass.sixthDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(5));\n pass.sixthDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(0))\n .build();\n }\n pass.descriptorSets.at(4) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(6));\n pass.descriptorSets.at(4).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .build();\n pass.descriptorSets.at(5) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(7));\n pass.descriptorSets.at(5).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(0))\n .build();\n pass.descriptorSets.at(6) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(8));\n pass.descriptorSets.at(6).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .build();\n pass.descriptorSets.at(7) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(9));\n pass.descriptorSets.at(7).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg2)\n .build();\n }\n}\n\nvoid Delta::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx,\n bool last) {\n auto& pass = this->passes.at(pass_idx);\n\n // first shader\n const auto extent = this->tempImgs1.at(0).getExtent();\n const uint32_t threadsX = (extent.width + 7) >> 3;\n const uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg1)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n pass.firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n pass.descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(2).bind(buf);\n pass.descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n pass.descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addW2R(this->optImg1)\n .addW2R(this->inImg2)\n .addR2W(this->outImg1)\n .build();\n\n this->pipelines.at(4).bind(buf);\n pass.descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n\n // sixth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg1)\n .addW2R(this->optImg2)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(5).bind(buf);\n pass.sixthDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(5));\n buf.dispatch(threadsX, threadsY, 1);\n\n if (!last)\n return;\n\n // seventh shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .build();\n\n this->pipelines.at(6).bind(buf);\n pass.descriptorSets.at(4).bind(buf, this->pipelines.at(6));\n buf.dispatch(threadsX, threadsY, 1);\n\n // eighth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addR2W(this->tempImgs2)\n .build();\n this->pipelines.at(7).bind(buf);\n pass.descriptorSets.at(5).bind(buf, this->pipelines.at(7));\n buf.dispatch(threadsX, threadsY, 1);\n\n // ninth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .build();\n\n this->pipelines.at(8).bind(buf);\n pass.descriptorSets.at(6).bind(buf, this->pipelines.at(8));\n buf.dispatch(threadsX, threadsY, 1);\n\n // tenth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addR2W(this->outImg2)\n .build();\n\n this->pipelines.at(9).bind(buf);\n pass.descriptorSets.at(7).bind(buf, this->pipelines.at(9));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/src/mini/image.cpp", "#include \"mini/image.hpp\"\n#include \"common/exception.hpp\"\n#include \"layer.hpp\"\n\n#include \n\n#include \n#include \n#include \n\nusing namespace Mini;\n\nImage::Image(VkDevice device, VkPhysicalDevice physicalDevice,\n VkExtent2D extent, VkFormat format,\n VkImageUsageFlags usage, VkImageAspectFlags aspectFlags, int* fd)\n : extent(extent), format(format), aspectFlags(aspectFlags) {\n // create image\n const VkExternalMemoryImageCreateInfo externalInfo{\n .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,\n .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR\n };\n const VkImageCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,\n .pNext = &externalInfo,\n .imageType = VK_IMAGE_TYPE_2D,\n .format = format,\n .extent = {\n .width = extent.width,\n .height = extent.height,\n .depth = 1\n },\n .mipLevels = 1,\n .arrayLayers = 1,\n .samples = VK_SAMPLE_COUNT_1_BIT,\n .usage = usage,\n .sharingMode = VK_SHARING_MODE_EXCLUSIVE\n };\n VkImage imageHandle{};\n auto res = Layer::ovkCreateImage(device, &desc, nullptr, &imageHandle);\n if (res != VK_SUCCESS || imageHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create Vulkan image\");\n\n // find memory type\n VkPhysicalDeviceMemoryProperties memProps;\n Layer::ovkGetPhysicalDeviceMemoryProperties(physicalDevice, &memProps);\n\n VkMemoryRequirements memReqs;\n Layer::ovkGetImageMemoryRequirements(device, imageHandle, &memReqs);\n\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n std::optional memType{};\n for (uint32_t i = 0; i < memProps.memoryTypeCount; ++i) {\n if ((memReqs.memoryTypeBits & (1 << i)) && // NOLINTBEGIN\n (memProps.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) {\n memType.emplace(i);\n break;\n } // NOLINTEND\n }\n if (!memType.has_value())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Unable to find memory type for image\");\n#pragma clang diagnostic pop\n\n // allocate and bind memory\n const VkMemoryDedicatedAllocateInfoKHR dedicatedInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,\n .image = imageHandle,\n };\n const VkExportMemoryAllocateInfo exportInfo{\n .sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO,\n .pNext = &dedicatedInfo,\n .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR\n };\n const VkMemoryAllocateInfo allocInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,\n .pNext = &exportInfo,\n .allocationSize = memReqs.size,\n .memoryTypeIndex = memType.value()\n };\n VkDeviceMemory memoryHandle{};\n res = Layer::ovkAllocateMemory(device, &allocInfo, nullptr, &memoryHandle);\n if (res != VK_SUCCESS || memoryHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to allocate memory for Vulkan image\");\n\n res = Layer::ovkBindImageMemory(device, imageHandle, memoryHandle, 0);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to bind memory to Vulkan image\");\n\n // obtain the sharing fd\n const VkMemoryGetFdInfoKHR fdInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR,\n .memory = memoryHandle,\n .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR,\n };\n res = Layer::ovkGetMemoryFdKHR(device, &fdInfo, fd);\n if (res != VK_SUCCESS || *fd < 0)\n throw LSFG::vulkan_error(res, \"Failed to obtain sharing fd for Vulkan image\");\n\n // store objects in shared ptr\n this->image = std::shared_ptr(\n new VkImage(imageHandle),\n [dev = device](VkImage* img) {\n Layer::ovkDestroyImage(dev, *img, nullptr);\n }\n );\n this->memory = std::shared_ptr(\n new VkDeviceMemory(memoryHandle),\n [dev = device](VkDeviceMemory* mem) {\n Layer::ovkFreeMemory(dev, *mem, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/src/common/utils.cpp", "#include \n#include \n\n#include \"common/utils.hpp\"\n#include \"core/buffer.hpp\"\n#include \"core/image.hpp\"\n#include \"core/device.hpp\"\n#include \"core/commandpool.hpp\"\n#include \"core/fence.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG::Utils;\n\nBarrierBuilder& BarrierBuilder::addR2W(Core::Image& image) {\n this->barriers.emplace_back(VkImageMemoryBarrier2 {\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,\n .srcStageMask = VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,\n .srcAccessMask = VK_ACCESS_2_SHADER_READ_BIT,\n .dstStageMask = VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,\n .dstAccessMask = VK_ACCESS_2_SHADER_WRITE_BIT,\n .oldLayout = image.getLayout(),\n .newLayout = VK_IMAGE_LAYOUT_GENERAL,\n .image = image.handle(),\n .subresourceRange = {\n .aspectMask = image.getAspectFlags(),\n .levelCount = 1,\n .layerCount = 1\n }\n });\n image.setLayout(VK_IMAGE_LAYOUT_GENERAL);\n\n return *this;\n}\n\nBarrierBuilder& BarrierBuilder::addW2R(Core::Image& image) {\n this->barriers.emplace_back(VkImageMemoryBarrier2 {\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,\n .srcStageMask = VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,\n .srcAccessMask = VK_ACCESS_2_SHADER_WRITE_BIT,\n .dstStageMask = VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,\n .dstAccessMask = VK_ACCESS_2_SHADER_READ_BIT,\n .oldLayout = image.getLayout(),\n .newLayout = VK_IMAGE_LAYOUT_GENERAL,\n .image = image.handle(),\n .subresourceRange = {\n .aspectMask = image.getAspectFlags(),\n .levelCount = 1,\n .layerCount = 1\n }\n });\n image.setLayout(VK_IMAGE_LAYOUT_GENERAL);\n\n return *this;\n}\n\nvoid BarrierBuilder::build() const {\n const VkDependencyInfo dependencyInfo = {\n .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,\n .imageMemoryBarrierCount = static_cast(this->barriers.size()),\n .pImageMemoryBarriers = this->barriers.data()\n };\n vkCmdPipelineBarrier2(this->commandBuffer->handle(), &dependencyInfo);\n}\n\nvoid Utils::uploadImage(const Core::Device& device, const Core::CommandPool& commandPool,\n Core::Image& image, const std::string& path) {\n // read image bytecode\n std::ifstream file(path.data(), std::ios::binary | std::ios::ate);\n if (!file.is_open())\n throw std::system_error(errno, std::generic_category(), \"Failed to open image: \" + path);\n\n std::streamsize size = file.tellg();\n size -= 124 + 4; // dds header and magic bytes\n std::vector code(static_cast(size));\n\n file.seekg(124 + 4, std::ios::beg);\n if (!file.read(code.data(), size))\n throw std::system_error(errno, std::generic_category(), \"Failed to read image: \" + path);\n\n file.close();\n\n // copy data to buffer\n const Core::Buffer stagingBuffer(\n device, code.data(), static_cast(code.size()),\n VK_BUFFER_USAGE_TRANSFER_SRC_BIT\n );\n\n // perform the upload\n Core::CommandBuffer commandBuffer(device, commandPool);\n commandBuffer.begin();\n\n const VkImageMemoryBarrier barrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,\n .srcAccessMask = VK_ACCESS_NONE,\n .dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,\n .oldLayout = image.getLayout(),\n .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n .image = image.handle(),\n .subresourceRange = {\n .aspectMask = image.getAspectFlags(),\n .levelCount = 1,\n .layerCount = 1\n }\n };\n image.setLayout(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);\n vkCmdPipelineBarrier(\n commandBuffer.handle(),\n VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,\n 0, 0, nullptr, 0, nullptr, 1, &barrier\n );\n\n auto extent = image.getExtent();\n const VkBufferImageCopy region{\n .bufferImageHeight = 0,\n .imageSubresource = {\n .aspectMask = image.getAspectFlags(),\n .layerCount = 1\n },\n .imageExtent = { extent.width, extent.height, 1 }\n };\n vkCmdCopyBufferToImage(\n commandBuffer.handle(),\n stagingBuffer.handle(), image.handle(),\n VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion\n );\n\n commandBuffer.end();\n\n Core::Fence fence(device);\n commandBuffer.submit(device.getComputeQueue(), fence);\n\n // wait for the upload to complete\n if (!fence.wait(device))\n throw LSFG::vulkan_error(VK_TIMEOUT, \"Upload operation timed out\");\n}\n\nvoid Utils::clearImage(const Core::Device& device, Core::Image& image, bool white) {\n Core::Fence fence(device);\n const Core::CommandPool cmdPool(device);\n Core::CommandBuffer cmdBuf(device, cmdPool);\n cmdBuf.begin();\n\n const VkImageMemoryBarrier2 barrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,\n .dstStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT,\n .dstAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,\n .oldLayout = image.getLayout(),\n .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n .image = image.handle(),\n .subresourceRange = {\n .aspectMask = image.getAspectFlags(),\n .levelCount = 1,\n .layerCount = 1\n }\n };\n const VkDependencyInfo dependencyInfo = {\n .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,\n .imageMemoryBarrierCount = 1,\n .pImageMemoryBarriers = &barrier\n };\n image.setLayout(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);\n vkCmdPipelineBarrier2(cmdBuf.handle(), &dependencyInfo);\n\n const float clearValue = white ? 1.0F : 0.0F;\n const VkClearColorValue clearColor = {{ clearValue, clearValue, clearValue, clearValue }};\n const VkImageSubresourceRange subresourceRange = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .levelCount = 1,\n .layerCount = 1\n };\n vkCmdClearColorImage(cmdBuf.handle(),\n image.handle(), image.getLayout(),\n &clearColor,\n 1, &subresourceRange);\n\n cmdBuf.end();\n\n cmdBuf.submit(device.getComputeQueue(), fence);\n if (!fence.wait(device))\n throw LSFG::vulkan_error(VK_TIMEOUT, \"Failed to wait for clearing fence.\");\n}\n"], ["/lsfg-vk/framegen/src/core/device.cpp", "#include \n#include \n\n#include \"core/device.hpp\"\n#include \"core/instance.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nconst std::vector requiredExtensions = {\n \"VK_KHR_external_memory_fd\",\n \"VK_KHR_external_semaphore_fd\",\n \"VK_EXT_robustness2\",\n};\n\nDevice::Device(const Instance& instance, uint64_t deviceUUID) {\n // get all physical devices\n uint32_t deviceCount{};\n auto res = vkEnumeratePhysicalDevices(instance.handle(), &deviceCount, nullptr);\n if (res != VK_SUCCESS || deviceCount == 0)\n throw LSFG::vulkan_error(res, \"Failed to enumerate physical devices\");\n\n std::vector devices(deviceCount);\n res = vkEnumeratePhysicalDevices(instance.handle(), &deviceCount, devices.data());\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to get physical devices\");\n\n // get device by uuid\n std::optional physicalDevice;\n for (const auto& device : devices) {\n VkPhysicalDeviceProperties properties;\n vkGetPhysicalDeviceProperties(device, &properties);\n\n const uint64_t uuid =\n static_cast(properties.vendorID) << 32 | properties.deviceID;\n if (deviceUUID == uuid || deviceUUID == 0x1463ABAC) {\n physicalDevice = device;\n break;\n }\n }\n if (!physicalDevice)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"Could not find physical device with UUID\");\n\n // find queue family indices\n uint32_t familyCount{};\n vkGetPhysicalDeviceQueueFamilyProperties(*physicalDevice, &familyCount, nullptr);\n\n std::vector queueFamilies(familyCount);\n vkGetPhysicalDeviceQueueFamilyProperties(*physicalDevice, &familyCount, queueFamilies.data());\n\n std::optional computeFamilyIdx;\n for (uint32_t i = 0; i < familyCount; ++i) {\n if (queueFamilies[i].queueFlags & VK_QUEUE_COMPUTE_BIT)\n computeFamilyIdx = i;\n }\n if (!computeFamilyIdx)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"No compute queue family found\");\n\n // create logical device\n const float queuePriority{1.0F}; // highest priority\n VkPhysicalDeviceRobustness2FeaturesEXT robustness2{\n .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT,\n .nullDescriptor = VK_TRUE,\n };\n VkPhysicalDeviceVulkan13Features features13{\n .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES,\n .pNext = &robustness2,\n .synchronization2 = VK_TRUE\n };\n const VkPhysicalDeviceVulkan12Features features12{\n .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES,\n .pNext = &features13,\n .timelineSemaphore = VK_TRUE,\n .vulkanMemoryModel = VK_TRUE\n };\n const VkDeviceQueueCreateInfo computeQueueDesc{\n .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,\n .queueFamilyIndex = *computeFamilyIdx,\n .queueCount = 1,\n .pQueuePriorities = &queuePriority\n };\n const VkDeviceCreateInfo deviceCreateInfo{\n .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,\n .pNext = &features12,\n .queueCreateInfoCount = 1,\n .pQueueCreateInfos = &computeQueueDesc,\n .enabledExtensionCount = static_cast(requiredExtensions.size()),\n .ppEnabledExtensionNames = requiredExtensions.data()\n };\n VkDevice deviceHandle{};\n res = vkCreateDevice(*physicalDevice, &deviceCreateInfo, nullptr, &deviceHandle);\n if (res != VK_SUCCESS | deviceHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create logical device\");\n\n volkLoadDevice(deviceHandle);\n\n // get compute queue\n VkQueue queueHandle{};\n vkGetDeviceQueue(deviceHandle, *computeFamilyIdx, 0, &queueHandle);\n\n // store in shared ptr\n this->computeQueue = queueHandle;\n this->computeFamilyIdx = *computeFamilyIdx;\n this->physicalDevice = *physicalDevice;\n this->device = std::shared_ptr(\n new VkDevice(deviceHandle),\n [](VkDevice* device) {\n vkDestroyDevice(*device, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/shaders/mipmaps.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/mipmaps.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/image.hpp\"\n#include \"core/commandbuffer.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nMipmaps::Mipmaps(Vulkan& vk,\n Core::Image inImg_0, Core::Image inImg_1)\n : inImg_0(std::move(inImg_0)), inImg_1(std::move(inImg_1)) {\n // create resources\n this->shaderModule = vk.shaders.getShader(vk.device, \"p_mipmaps\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 7, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } });\n this->pipeline = vk.shaders.getPipeline(vk.device, \"p_mipmaps\");\n this->buffer = vk.resources.getBuffer(vk.device);\n this->sampler = vk.resources.getSampler(vk.device);\n for (size_t i = 0; i < 2; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModule);\n\n // create outputs\n const VkExtent2D flowExtent{\n .width = static_cast(\n static_cast(this->inImg_0.getExtent().width) / vk.flowScale),\n .height = static_cast(\n static_cast(this->inImg_0.getExtent().height) / vk.flowScale)\n };\n for (size_t i = 0; i < 7; i++)\n this->outImgs.at(i) = Core::Image(vk.device,\n { flowExtent.width >> i, flowExtent.height >> i },\n VK_FORMAT_R8_UNORM);\n\n // hook up shaders\n for (size_t fc = 0; fc < 2; fc++)\n this->descriptorSets.at(fc).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, this->buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, (fc % 2 == 0) ? this->inImg_0 : this->inImg_1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs)\n .build();\n}\n\nvoid Mipmaps::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto flowExtent = this->outImgs.at(0).getExtent();\n const uint32_t threadsX = (flowExtent.width + 63) >> 6;\n const uint32_t threadsY = (flowExtent.height + 63) >> 6;\n\n Utils::BarrierBuilder(buf)\n .addW2R((frameCount % 2 == 0) ? this->inImg_0 : this->inImg_1)\n .addR2W(this->outImgs)\n .build();\n\n this->pipeline.bind(buf);\n this->descriptorSets.at(frameCount % 2).bind(buf, this->pipeline);\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_options.h", "#pragma once\n\n#include \n\n#include \"util_flags.h\"\n\nnamespace dxvk {\n\n struct D3D11Options;\n\n enum class DxbcFloatControlFlag : uint32_t {\n DenormFlushToZero32,\n DenormPreserve64,\n PreserveNan32,\n PreserveNan64,\n };\n\n using DxbcFloatControlFlags = Flags;\n\n struct DxbcOptions {\n DxbcOptions() {}\n\n // Clamp oDepth in fragment shaders if the depth\n // clip device feature is not supported\n bool useDepthClipWorkaround = false;\n\n /// Determines whether format qualifiers\n /// on typed UAV loads are required\n bool supportsTypedUavLoadR32 = false;\n\n /// Determines whether raw access chains are supported\n bool supportsRawAccessChains = false;\n\n /// Clear thread-group shared memory to zero\n bool zeroInitWorkgroupMemory = false;\n\n /// Declare vertex positions as invariant\n bool invariantPosition = false;\n\n /// Insert memory barriers after TGSM stoes\n bool forceVolatileTgsmAccess = false;\n\n /// Try to detect hazards in UAV access and insert\n /// barriers when we know control flow is uniform.\n bool forceComputeUavBarriers = false;\n\n /// Replace ld_ms with ld\n bool disableMsaa = false;\n\n /// Force sample rate shading by using sample\n /// interpolation for fragment shader inputs\n bool forceSampleRateShading = false;\n\n // Enable per-sample interlock if supported\n bool enableSampleShadingInterlock = false;\n\n /// Use tightly packed arrays for immediate\n /// constant buffers if possible\n bool supportsTightIcbPacking = false;\n\n /// Whether exporting point size is required\n bool needsPointSizeExport = true;\n\n /// Whether to enable sincos emulation\n bool sincosEmulation = false;\n\n /// Float control flags\n DxbcFloatControlFlags floatControl;\n\n /// Minimum storage buffer alignment\n VkDeviceSize minSsboAlignment = 0;\n };\n \n}\n"], ["/lsfg-vk/framegen/src/core/buffer.cpp", "#include \n#include \n\n#include \"core/buffer.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nvoid Buffer::construct(const Core::Device& device, const void* data, VkBufferUsageFlags usage) {\n // create buffer\n const VkBufferCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,\n .size = this->size,\n .usage = usage,\n .sharingMode = VK_SHARING_MODE_EXCLUSIVE\n };\n VkBuffer bufferHandle{};\n auto res = vkCreateBuffer(device.handle(), &desc, nullptr, &bufferHandle);\n if (res != VK_SUCCESS || bufferHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create Vulkan buffer\");\n\n // find memory type\n VkPhysicalDeviceMemoryProperties memProps;\n vkGetPhysicalDeviceMemoryProperties(device.getPhysicalDevice(), &memProps);\n\n VkMemoryRequirements memReqs;\n vkGetBufferMemoryRequirements(device.handle(), bufferHandle, &memReqs);\n\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n std::optional memType{};\n for (uint32_t i = 0; i < memProps.memoryTypeCount; ++i) {\n if ((memReqs.memoryTypeBits & (1 << i)) && // NOLINTBEGIN\n (memProps.memoryTypes[i].propertyFlags &\n (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT))) {\n memType.emplace(i);\n break;\n } // NOLINTEND\n }\n if (!memType.has_value())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Unable to find memory type for buffer\");\n#pragma clang diagnostic pop\n\n // allocate and bind memory\n const VkMemoryAllocateInfo allocInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,\n .allocationSize = memReqs.size,\n .memoryTypeIndex = memType.value()\n };\n VkDeviceMemory memoryHandle{};\n res = vkAllocateMemory(device.handle(), &allocInfo, nullptr, &memoryHandle);\n if (res != VK_SUCCESS || memoryHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to allocate memory for Vulkan buffer\");\n\n res = vkBindBufferMemory(device.handle(), bufferHandle, memoryHandle, 0);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to bind memory to Vulkan buffer\");\n\n // upload data to buffer\n uint8_t* buf{};\n res = vkMapMemory(device.handle(), memoryHandle, 0, this->size, 0, reinterpret_cast(&buf));\n if (res != VK_SUCCESS || buf == nullptr)\n throw LSFG::vulkan_error(res, \"Failed to map memory for Vulkan buffer\");\n std::copy_n(reinterpret_cast(data), this->size, buf);\n vkUnmapMemory(device.handle(), memoryHandle);\n\n // store buffer and memory in shared ptr\n this->buffer = std::shared_ptr(\n new VkBuffer(bufferHandle),\n [dev = device.handle()](VkBuffer* img) {\n vkDestroyBuffer(dev, *img, nullptr);\n }\n );\n this->memory = std::shared_ptr(\n new VkDeviceMemory(memoryHandle),\n [dev = device.handle()](VkDeviceMemory* mem) {\n vkFreeMemory(dev, *mem, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/src/config/config.cpp", "#include \"config/config.hpp\"\n#include \"common/exception.hpp\"\n\n#include \"config/default_conf.hpp\"\n\n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace Config;\n\nnamespace {\n Configuration globalConf{};\n std::optional> gameConfs;\n}\n\nConfiguration Config::activeConf{};\n\nnamespace {\n /// Turn a string into a VkPresentModeKHR enum value.\n VkPresentModeKHR into_present(const std::string& mode) {\n if (mode == \"fifo\" || mode == \"vsync\")\n return VkPresentModeKHR::VK_PRESENT_MODE_FIFO_KHR;\n if (mode == \"mailbox\")\n return VkPresentModeKHR::VK_PRESENT_MODE_MAILBOX_KHR;\n if (mode == \"immediate\")\n return VkPresentModeKHR::VK_PRESENT_MODE_IMMEDIATE_KHR;\n return VkPresentModeKHR::VK_PRESENT_MODE_FIFO_KHR;\n }\n}\n\nvoid Config::updateConfig(const std::string& file) {\n if (!std::filesystem::exists(file)) {\n std::cerr << \"lsfg-vk: Placing default configuration file at \" << file << '\\n';\n const auto parent = std::filesystem::path(file).parent_path();\n if (!std::filesystem::exists(parent))\n if (!std::filesystem::create_directories(parent))\n throw std::runtime_error(\"Unable to create configuration directory at \" + parent.string());\n\n std::ofstream out(file);\n if (!out.is_open())\n throw std::runtime_error(\"Unable to create configuration file at \" + file);\n out << DEFAULT_CONFIG;\n out.close();\n }\n\n // parse config file\n std::optional parsed;\n try {\n parsed.emplace(toml::parse(file));\n if (!parsed->contains(\"version\"))\n throw std::runtime_error(\"Configuration file is missing 'version' field\");\n if (parsed->at(\"version\").as_integer() != 1)\n throw std::runtime_error(\"Configuration file version is not supported, expected 1\");\n } catch (const std::exception& e) {\n throw LSFG::rethrowable_error(\"Unable to parse configuration file\", e);\n }\n auto& toml = *parsed;\n\n // parse global configuration\n const toml::value globalTable = toml::find_or_default(toml, \"global\");\n const Configuration global{\n .dll = toml::find_or(globalTable, \"dll\", std::string()),\n .config_file = file,\n .timestamp = std::filesystem::last_write_time(file)\n };\n\n // validate global configuration\n if (global.multiplier < 2)\n throw std::runtime_error(\"Global Multiplier cannot be less than 2\");\n if (global.flowScale < 0.25F || global.flowScale > 1.0F)\n throw std::runtime_error(\"Flow scale must be between 0.25 and 1.0\");\n\n // parse game-specific configuration\n std::unordered_map games;\n const toml::value gamesList = toml::find_or_default(toml, \"game\");\n for (const auto& gameTable : gamesList.as_array()) {\n if (!gameTable.is_table())\n throw std::runtime_error(\"Invalid game configuration entry\");\n if (!gameTable.contains(\"exe\"))\n throw std::runtime_error(\"Game override missing 'exe' field\");\n\n const std::string exe = toml::find(gameTable, \"exe\");\n Configuration game{\n .enable = true,\n .dll = global.dll,\n .multiplier = toml::find_or(gameTable, \"multiplier\", 2U),\n .flowScale = toml::find_or(gameTable, \"flow_scale\", 1.0F),\n .performance = toml::find_or(gameTable, \"performance_mode\", false),\n .hdr = toml::find_or(gameTable, \"hdr_mode\", false),\n .e_present = into_present(toml::find_or(gameTable, \"experimental_present_mode\", \"\")),\n .config_file = file,\n .timestamp = global.timestamp\n };\n\n // validate the configuration\n if (game.multiplier < 1)\n throw std::runtime_error(\"Multiplier cannot be less than 1\");\n if (game.flowScale < 0.25F || game.flowScale > 1.0F)\n throw std::runtime_error(\"Flow scale must be between 0.25 and 1.0\");\n games[exe] = std::move(game);\n }\n\n // store configurations\n globalConf = global;\n gameConfs = std::move(games);\n}\n\nConfiguration Config::getConfig(const std::pair& name) {\n // process legacy environment variables\n if (std::getenv(\"LSFG_LEGACY\")) {\n Configuration conf{\n .enable = true,\n .multiplier = 2,\n .flowScale = 1.0F,\n .e_present = VkPresentModeKHR::VK_PRESENT_MODE_FIFO_KHR\n };\n\n const char* dll = std::getenv(\"LSFG_DLL_PATH\");\n if (dll) conf.dll = std::string(dll);\n const char* multiplier = std::getenv(\"LSFG_MULTIPLIER\");\n if (multiplier) conf.multiplier = std::stoul(multiplier);\n const char* flow_scale = std::getenv(\"LSFG_FLOW_SCALE\");\n if (flow_scale) conf.flowScale = std::stof(flow_scale);\n const char* performance = std::getenv(\"LSFG_PERFORMANCE_MODE\");\n if (performance) conf.performance = std::string(performance) == \"1\";\n const char* hdr = std::getenv(\"LSFG_HDR_MODE\");\n if (hdr) conf.hdr = std::string(hdr) == \"1\";\n const char* e_present = std::getenv(\"LSFG_EXPERIMENTAL_PRESENT_MODE\");\n if (e_present) conf.e_present = into_present(std::string(e_present));\n\n return conf;\n }\n\n // process new configuration system\n if (!gameConfs.has_value())\n return globalConf;\n\n const auto& games = *gameConfs;\n auto it = std::ranges::find_if(games, [&name](const auto& pair) {\n return name.first.ends_with(pair.first) || (name.second == pair.first);\n });\n if (it != games.end())\n return it->second;\n\n return globalConf;\n}\n"], ["/lsfg-vk/src/layer.cpp", "#include \"layer.hpp\"\n#include \"common/exception.hpp\"\n#include \"config/config.hpp\"\n#include \"hooks.hpp\"\n\n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n\nnamespace {\n PFN_vkCreateInstance next_vkCreateInstance{};\n PFN_vkDestroyInstance next_vkDestroyInstance{};\n\n PFN_vkCreateDevice next_vkCreateDevice{};\n PFN_vkDestroyDevice next_vkDestroyDevice{};\n\n PFN_vkSetDeviceLoaderData next_vSetDeviceLoaderData{};\n\n PFN_vkGetInstanceProcAddr next_vkGetInstanceProcAddr{};\n PFN_vkGetDeviceProcAddr next_vkGetDeviceProcAddr{};\n\n PFN_vkGetPhysicalDeviceQueueFamilyProperties next_vkGetPhysicalDeviceQueueFamilyProperties{};\n PFN_vkGetPhysicalDeviceMemoryProperties next_vkGetPhysicalDeviceMemoryProperties{};\n PFN_vkGetPhysicalDeviceProperties next_vkGetPhysicalDeviceProperties{};\n PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR next_vkGetPhysicalDeviceSurfaceCapabilitiesKHR{};\n\n PFN_vkCreateSwapchainKHR next_vkCreateSwapchainKHR{};\n PFN_vkQueuePresentKHR next_vkQueuePresentKHR{};\n PFN_vkDestroySwapchainKHR next_vkDestroySwapchainKHR{};\n PFN_vkGetSwapchainImagesKHR next_vkGetSwapchainImagesKHR{};\n PFN_vkAllocateCommandBuffers next_vkAllocateCommandBuffers{};\n PFN_vkFreeCommandBuffers next_vkFreeCommandBuffers{};\n PFN_vkBeginCommandBuffer next_vkBeginCommandBuffer{};\n PFN_vkEndCommandBuffer next_vkEndCommandBuffer{};\n PFN_vkCreateCommandPool next_vkCreateCommandPool{};\n PFN_vkDestroyCommandPool next_vkDestroyCommandPool{};\n PFN_vkCreateImage next_vkCreateImage{};\n PFN_vkDestroyImage next_vkDestroyImage{};\n PFN_vkGetImageMemoryRequirements next_vkGetImageMemoryRequirements{};\n PFN_vkBindImageMemory next_vkBindImageMemory{};\n PFN_vkAllocateMemory next_vkAllocateMemory{};\n PFN_vkFreeMemory next_vkFreeMemory{};\n PFN_vkCreateSemaphore next_vkCreateSemaphore{};\n PFN_vkDestroySemaphore next_vkDestroySemaphore{};\n PFN_vkGetMemoryFdKHR next_vkGetMemoryFdKHR{};\n PFN_vkGetSemaphoreFdKHR next_vkGetSemaphoreFdKHR{};\n PFN_vkGetDeviceQueue next_vkGetDeviceQueue{};\n PFN_vkQueueSubmit next_vkQueueSubmit{};\n PFN_vkCmdPipelineBarrier next_vkCmdPipelineBarrier{};\n PFN_vkCmdBlitImage next_vkCmdBlitImage{};\n PFN_vkAcquireNextImageKHR next_vkAcquireNextImageKHR{};\n\n template\n bool initInstanceFunc(VkInstance instance, const char* name, T* func) {\n *func = reinterpret_cast(next_vkGetInstanceProcAddr(instance, name));\n if (!*func) {\n std::cerr << \"(no function pointer for \" << name << \")\\n\";\n return false;\n }\n return true;\n }\n\n template\n bool initDeviceFunc(VkDevice device, const char* name, T* func) {\n *func = reinterpret_cast(next_vkGetDeviceProcAddr(device, name));\n if (!*func) {\n std::cerr << \"(no function pointer for \" << name << \")\\n\";\n return false;\n }\n return true;\n }\n}\n\nnamespace {\n VkResult layer_vkCreateInstance(\n const VkInstanceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkInstance* pInstance) {\n try {\n // prepare layer | NOLINTBEGIN\n auto* layerDesc = const_cast(\n reinterpret_cast(pCreateInfo->pNext));\n while (layerDesc && (layerDesc->sType != VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO\n || layerDesc->function != VK_LAYER_LINK_INFO)) {\n layerDesc = const_cast(\n reinterpret_cast(layerDesc->pNext));\n }\n if (!layerDesc)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"No layer creation info found in pNext chain\");\n\n next_vkGetInstanceProcAddr = layerDesc->u.pLayerInfo->pfnNextGetInstanceProcAddr;\n layerDesc->u.pLayerInfo = layerDesc->u.pLayerInfo->pNext;\n\n bool success = initInstanceFunc(nullptr, \"vkCreateInstance\", &next_vkCreateInstance);\n if (!success)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"Failed to get instance function pointer for vkCreateInstance\");\n\n // NOLINTEND | skip initialization if the layer is disabled\n if (!Config::activeConf.enable) {\n auto res = next_vkCreateInstance(pCreateInfo, pAllocator, pInstance);\n initInstanceFunc(*pInstance, \"vkCreateDevice\", &next_vkCreateDevice);\n return res;\n }\n\n // create instance\n try {\n auto* createInstanceHook = reinterpret_cast(\n Hooks::hooks[\"vkCreateInstance\"]);\n auto res = createInstanceHook(pCreateInfo, pAllocator, pInstance);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unknown error\");\n } catch (const std::exception& e) {\n throw LSFG::rethrowable_error(\"Failed to create Vulkan instance\", e);\n }\n\n // get relevant function pointers from the next layer\n success = true;\n success &= initInstanceFunc(*pInstance,\n \"vkDestroyInstance\", &next_vkDestroyInstance);\n success &= initInstanceFunc(*pInstance,\n \"vkCreateDevice\", &next_vkCreateDevice); // workaround mesa bug\n success &= initInstanceFunc(*pInstance,\n \"vkGetPhysicalDeviceQueueFamilyProperties\", &next_vkGetPhysicalDeviceQueueFamilyProperties);\n success &= initInstanceFunc(*pInstance,\n \"vkGetPhysicalDeviceMemoryProperties\", &next_vkGetPhysicalDeviceMemoryProperties);\n success &= initInstanceFunc(*pInstance,\n \"vkGetPhysicalDeviceProperties\", &next_vkGetPhysicalDeviceProperties);\n success &= initInstanceFunc(*pInstance,\n \"vkGetPhysicalDeviceSurfaceCapabilitiesKHR\", &next_vkGetPhysicalDeviceSurfaceCapabilitiesKHR);\n if (!success)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"Failed to get instance function pointers\");\n\n std::cerr << \"lsfg-vk: Vulkan instance layer initialized successfully.\\n\";\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred while initializing the Vulkan instance layer:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n return VK_ERROR_INITIALIZATION_FAILED;\n }\n return VK_SUCCESS;\n }\n\n VkResult layer_vkCreateDevice( // NOLINTBEGIN\n VkPhysicalDevice physicalDevice,\n const VkDeviceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkDevice* pDevice) {\n try {\n // prepare layer | NOLINTBEGIN\n auto* layerDesc = const_cast(\n reinterpret_cast(pCreateInfo->pNext));\n while (layerDesc && (layerDesc->sType != VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO\n || layerDesc->function != VK_LAYER_LINK_INFO)) {\n layerDesc = const_cast(\n reinterpret_cast(layerDesc->pNext));\n }\n if (!layerDesc)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"No layer creation info found in pNext chain\");\n\n next_vkGetDeviceProcAddr = layerDesc->u.pLayerInfo->pfnNextGetDeviceProcAddr;\n layerDesc->u.pLayerInfo = layerDesc->u.pLayerInfo->pNext;\n\n auto* layerDesc2 = const_cast(\n reinterpret_cast(pCreateInfo->pNext));\n while (layerDesc2 && (layerDesc2->sType != VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO\n || layerDesc2->function != VK_LOADER_DATA_CALLBACK)) {\n layerDesc2 = const_cast(\n reinterpret_cast(layerDesc2->pNext));\n }\n if (!layerDesc2)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"No layer device loader data found in pNext chain\");\n\n next_vSetDeviceLoaderData = layerDesc2->u.pfnSetDeviceLoaderData;\n\n // NOLINTEND | skip initialization if the layer is disabled\n if (!Config::activeConf.enable)\n return next_vkCreateDevice(physicalDevice, pCreateInfo, pAllocator, pDevice);\n\n // create device\n try {\n auto* createDeviceHook = reinterpret_cast(\n Hooks::hooks[\"vkCreateDevicePre\"]);\n auto res = createDeviceHook(physicalDevice, pCreateInfo, pAllocator, pDevice);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unknown error\");\n } catch (const std::exception& e) {\n throw LSFG::rethrowable_error(\"Failed to create Vulkan device\", e);\n }\n\n // get relevant function pointers from the next layer\n bool success = true;\n success &= initDeviceFunc(*pDevice, \"vkDestroyDevice\", &next_vkDestroyDevice);\n success &= initDeviceFunc(*pDevice, \"vkCreateSwapchainKHR\", &next_vkCreateSwapchainKHR);\n success &= initDeviceFunc(*pDevice, \"vkQueuePresentKHR\", &next_vkQueuePresentKHR);\n success &= initDeviceFunc(*pDevice, \"vkDestroySwapchainKHR\", &next_vkDestroySwapchainKHR);\n success &= initDeviceFunc(*pDevice, \"vkGetSwapchainImagesKHR\", &next_vkGetSwapchainImagesKHR);\n success &= initDeviceFunc(*pDevice, \"vkAllocateCommandBuffers\", &next_vkAllocateCommandBuffers);\n success &= initDeviceFunc(*pDevice, \"vkFreeCommandBuffers\", &next_vkFreeCommandBuffers);\n success &= initDeviceFunc(*pDevice, \"vkBeginCommandBuffer\", &next_vkBeginCommandBuffer);\n success &= initDeviceFunc(*pDevice, \"vkEndCommandBuffer\", &next_vkEndCommandBuffer);\n success &= initDeviceFunc(*pDevice, \"vkCreateCommandPool\", &next_vkCreateCommandPool);\n success &= initDeviceFunc(*pDevice, \"vkDestroyCommandPool\", &next_vkDestroyCommandPool);\n success &= initDeviceFunc(*pDevice, \"vkCreateImage\", &next_vkCreateImage);\n success &= initDeviceFunc(*pDevice, \"vkDestroyImage\", &next_vkDestroyImage);\n success &= initDeviceFunc(*pDevice, \"vkGetImageMemoryRequirements\", &next_vkGetImageMemoryRequirements);\n success &= initDeviceFunc(*pDevice, \"vkBindImageMemory\", &next_vkBindImageMemory);\n success &= initDeviceFunc(*pDevice, \"vkGetMemoryFdKHR\", &next_vkGetMemoryFdKHR);\n success &= initDeviceFunc(*pDevice, \"vkAllocateMemory\", &next_vkAllocateMemory);\n success &= initDeviceFunc(*pDevice, \"vkFreeMemory\", &next_vkFreeMemory);\n success &= initDeviceFunc(*pDevice, \"vkCreateSemaphore\", &next_vkCreateSemaphore);\n success &= initDeviceFunc(*pDevice, \"vkDestroySemaphore\", &next_vkDestroySemaphore);\n success &= initDeviceFunc(*pDevice, \"vkGetSemaphoreFdKHR\", &next_vkGetSemaphoreFdKHR);\n success &= initDeviceFunc(*pDevice, \"vkGetDeviceQueue\", &next_vkGetDeviceQueue);\n success &= initDeviceFunc(*pDevice, \"vkQueueSubmit\", &next_vkQueueSubmit);\n success &= initDeviceFunc(*pDevice, \"vkCmdPipelineBarrier\", &next_vkCmdPipelineBarrier);\n success &= initDeviceFunc(*pDevice, \"vkCmdBlitImage\", &next_vkCmdBlitImage);\n success &= initDeviceFunc(*pDevice, \"vkAcquireNextImageKHR\", &next_vkAcquireNextImageKHR);\n if (!success)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"Failed to get device function pointers\");\n\n auto postCreateDeviceHook = reinterpret_cast(\n Hooks::hooks[\"vkCreateDevicePost\"]);\n auto res = postCreateDeviceHook(physicalDevice, pCreateInfo, pAllocator, pDevice);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unknown error\");\n\n std::cerr << \"lsfg-vk: Vulkan device layer initialized successfully.\\n\";\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred while initializing the Vulkan device layer:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n return VK_ERROR_INITIALIZATION_FAILED;\n }\n return VK_SUCCESS;\n } // NOLINTEND\n}\n\nconst std::unordered_map layerFunctions = {\n { \"vkCreateInstance\",\n reinterpret_cast(&layer_vkCreateInstance) },\n { \"vkCreateDevice\",\n reinterpret_cast(&layer_vkCreateDevice) },\n { \"vkGetInstanceProcAddr\",\n reinterpret_cast(&layer_vkGetInstanceProcAddr) },\n { \"vkGetDeviceProcAddr\",\n reinterpret_cast(&layer_vkGetDeviceProcAddr) },\n};\n\nPFN_vkVoidFunction layer_vkGetInstanceProcAddr(VkInstance instance, const char* pName) {\n const std::string name(pName);\n auto it = layerFunctions.find(name);\n if (it != layerFunctions.end())\n return it->second;\n\n it = Hooks::hooks.find(name);\n if (it != Hooks::hooks.end() && Config::activeConf.enable)\n return it->second;\n\n return next_vkGetInstanceProcAddr(instance, pName);\n}\n\nPFN_vkVoidFunction layer_vkGetDeviceProcAddr(VkDevice device, const char* pName) {\n const std::string name(pName);\n auto it = layerFunctions.find(name);\n if (it != layerFunctions.end())\n return it->second;\n\n it = Hooks::hooks.find(name);\n if (it != Hooks::hooks.end() && Config::activeConf.enable)\n return it->second;\n\n return next_vkGetDeviceProcAddr(device, pName);\n}\n\n// original functions\nnamespace Layer {\n VkResult ovkCreateInstance(\n const VkInstanceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkInstance* pInstance) {\n return next_vkCreateInstance(pCreateInfo, pAllocator, pInstance);\n }\n void ovkDestroyInstance(\n VkInstance instance,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroyInstance(instance, pAllocator);\n }\n\n VkResult ovkCreateDevice(\n VkPhysicalDevice physicalDevice,\n const VkDeviceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkDevice* pDevice) {\n return next_vkCreateDevice(physicalDevice, pCreateInfo, pAllocator, pDevice);\n }\n void ovkDestroyDevice(\n VkDevice device,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroyDevice(device, pAllocator);\n }\n\n VkResult ovkSetDeviceLoaderData(VkDevice device, void* object) {\n return next_vSetDeviceLoaderData(device, object);\n }\n\n PFN_vkVoidFunction ovkGetInstanceProcAddr(\n VkInstance instance,\n const char* pName) {\n return next_vkGetInstanceProcAddr(instance, pName);\n }\n PFN_vkVoidFunction ovkGetDeviceProcAddr(\n VkDevice device,\n const char* pName) {\n return next_vkGetDeviceProcAddr(device, pName);\n }\n\n void ovkGetPhysicalDeviceQueueFamilyProperties(\n VkPhysicalDevice physicalDevice,\n uint32_t* pQueueFamilyPropertyCount,\n VkQueueFamilyProperties* pQueueFamilyProperties) {\n next_vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties);\n }\n void ovkGetPhysicalDeviceMemoryProperties(\n VkPhysicalDevice physicalDevice,\n VkPhysicalDeviceMemoryProperties* pMemoryProperties) {\n next_vkGetPhysicalDeviceMemoryProperties(physicalDevice, pMemoryProperties);\n }\n void ovkGetPhysicalDeviceProperties(\n VkPhysicalDevice physicalDevice,\n VkPhysicalDeviceProperties* pProperties) {\n next_vkGetPhysicalDeviceProperties(physicalDevice, pProperties);\n }\n VkResult ovkGetPhysicalDeviceSurfaceCapabilitiesKHR(\n VkPhysicalDevice physicalDevice,\n VkSurfaceKHR surface,\n VkSurfaceCapabilitiesKHR* pSurfaceCapabilities) {\n return next_vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface, pSurfaceCapabilities);\n }\n\n VkResult ovkCreateSwapchainKHR(\n VkDevice device,\n const VkSwapchainCreateInfoKHR* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkSwapchainKHR* pSwapchain) {\n return next_vkCreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);\n }\n VkResult ovkQueuePresentKHR(\n VkQueue queue,\n const VkPresentInfoKHR* pPresentInfo) {\n return next_vkQueuePresentKHR(queue, pPresentInfo);\n }\n void ovkDestroySwapchainKHR(\n VkDevice device,\n VkSwapchainKHR swapchain,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroySwapchainKHR(device, swapchain, pAllocator);\n }\n\n VkResult ovkGetSwapchainImagesKHR(\n VkDevice device,\n VkSwapchainKHR swapchain,\n uint32_t* pSwapchainImageCount,\n VkImage* pSwapchainImages) {\n return next_vkGetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);\n }\n\n VkResult ovkAllocateCommandBuffers(\n VkDevice device,\n const VkCommandBufferAllocateInfo* pAllocateInfo,\n VkCommandBuffer* pCommandBuffers) {\n return next_vkAllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers);\n }\n void ovkFreeCommandBuffers(\n VkDevice device,\n VkCommandPool commandPool,\n uint32_t commandBufferCount,\n const VkCommandBuffer* pCommandBuffers) {\n next_vkFreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);\n }\n\n VkResult ovkBeginCommandBuffer(\n VkCommandBuffer commandBuffer,\n const VkCommandBufferBeginInfo* pBeginInfo) {\n return next_vkBeginCommandBuffer(commandBuffer, pBeginInfo);\n }\n VkResult ovkEndCommandBuffer(\n VkCommandBuffer commandBuffer) {\n return next_vkEndCommandBuffer(commandBuffer);\n }\n\n VkResult ovkCreateCommandPool(\n VkDevice device,\n const VkCommandPoolCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkCommandPool* pCommandPool) {\n return next_vkCreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);\n }\n void ovkDestroyCommandPool(\n VkDevice device,\n VkCommandPool commandPool,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroyCommandPool(device, commandPool, pAllocator);\n }\n\n VkResult ovkCreateImage(\n VkDevice device,\n const VkImageCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkImage* pImage) {\n return next_vkCreateImage(device, pCreateInfo, pAllocator, pImage);\n }\n void ovkDestroyImage(\n VkDevice device,\n VkImage image,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroyImage(device, image, pAllocator);\n }\n\n void ovkGetImageMemoryRequirements(\n VkDevice device,\n VkImage image,\n VkMemoryRequirements* pMemoryRequirements) {\n next_vkGetImageMemoryRequirements(device, image, pMemoryRequirements);\n }\n VkResult ovkBindImageMemory(\n VkDevice device,\n VkImage image,\n VkDeviceMemory memory,\n VkDeviceSize memoryOffset) {\n return next_vkBindImageMemory(device, image, memory, memoryOffset);\n }\n\n VkResult ovkAllocateMemory(\n VkDevice device,\n const VkMemoryAllocateInfo* pAllocateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkDeviceMemory* pMemory) {\n return next_vkAllocateMemory(device, pAllocateInfo, pAllocator, pMemory);\n }\n void ovkFreeMemory(\n VkDevice device,\n VkDeviceMemory memory,\n const VkAllocationCallbacks* pAllocator) {\n next_vkFreeMemory(device, memory, pAllocator);\n }\n\n VkResult ovkCreateSemaphore(\n VkDevice device,\n const VkSemaphoreCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkSemaphore* pSemaphore) {\n return next_vkCreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);\n }\n void ovkDestroySemaphore(\n VkDevice device,\n VkSemaphore semaphore,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroySemaphore(device, semaphore, pAllocator);\n }\n\n VkResult ovkGetMemoryFdKHR(\n VkDevice device,\n const VkMemoryGetFdInfoKHR* pGetFdInfo,\n int* pFd) {\n return next_vkGetMemoryFdKHR(device, pGetFdInfo, pFd);\n }\n VkResult ovkGetSemaphoreFdKHR(\n VkDevice device,\n const VkSemaphoreGetFdInfoKHR* pGetFdInfo,\n int* pFd) {\n return next_vkGetSemaphoreFdKHR(device, pGetFdInfo, pFd);\n }\n\n void ovkGetDeviceQueue(\n VkDevice device,\n uint32_t queueFamilyIndex,\n uint32_t queueIndex,\n VkQueue* pQueue) {\n next_vkGetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);\n }\n VkResult ovkQueueSubmit(\n VkQueue queue,\n uint32_t submitCount,\n const VkSubmitInfo* pSubmits,\n VkFence fence) {\n return next_vkQueueSubmit(queue, submitCount, pSubmits, fence);\n }\n\n void ovkCmdPipelineBarrier(\n VkCommandBuffer commandBuffer,\n VkPipelineStageFlags srcStageMask,\n VkPipelineStageFlags dstStageMask,\n VkDependencyFlags dependencyFlags,\n uint32_t memoryBarrierCount,\n const VkMemoryBarrier* pMemoryBarriers,\n uint32_t bufferMemoryBarrierCount,\n const VkBufferMemoryBarrier* pBufferMemoryBarriers,\n uint32_t imageMemoryBarrierCount,\n const VkImageMemoryBarrier* pImageMemoryBarriers) {\n next_vkCmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,\n memoryBarrierCount, pMemoryBarriers,\n bufferMemoryBarrierCount, pBufferMemoryBarriers,\n imageMemoryBarrierCount, pImageMemoryBarriers);\n }\n void ovkCmdBlitImage(\n VkCommandBuffer commandBuffer,\n VkImage srcImage,\n VkImageLayout srcImageLayout,\n VkImage dstImage,\n VkImageLayout dstImageLayout,\n uint32_t regionCount,\n const VkImageBlit* pRegions,\n VkFilter filter) {\n next_vkCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter);\n }\n\n VkResult ovkAcquireNextImageKHR(\n VkDevice device,\n VkSwapchainKHR swapchain,\n uint64_t timeout,\n VkSemaphore semaphore,\n VkFence fence,\n uint32_t* pImageIndex) {\n return next_vkAcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);\n }\n}\n"], ["/lsfg-vk/framegen/src/core/shadermodule.cpp", "#include \n#include \n\n#include \"core/shadermodule.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nShaderModule::ShaderModule(const Core::Device& device, const std::vector& code,\n const std::vector>& descriptorTypes) {\n // create shader module\n const uint8_t* data_ptr = code.data();\n const VkShaderModuleCreateInfo createInfo{\n .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,\n .codeSize = code.size(),\n .pCode = reinterpret_cast(data_ptr)\n };\n VkShaderModule shaderModuleHandle{};\n auto res = vkCreateShaderModule(device.handle(), &createInfo, nullptr, &shaderModuleHandle);\n if (res != VK_SUCCESS || !shaderModuleHandle)\n throw LSFG::vulkan_error(res, \"Failed to create shader module\");\n\n // create descriptor set layout\n std::vector layoutBindings;\n size_t bindIdx = 0;\n for (const auto &[count, type] : descriptorTypes)\n for (size_t i = 0; i < count; i++, bindIdx++)\n layoutBindings.emplace_back(VkDescriptorSetLayoutBinding {\n .binding = static_cast(bindIdx),\n .descriptorType = type,\n .descriptorCount = 1,\n .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT\n });\n\n const VkDescriptorSetLayoutCreateInfo layoutDesc{\n .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,\n .bindingCount = static_cast(layoutBindings.size()),\n .pBindings = layoutBindings.data()\n };\n VkDescriptorSetLayout descriptorSetLayout{};\n res = vkCreateDescriptorSetLayout(device.handle(), &layoutDesc, nullptr, &descriptorSetLayout);\n if (res != VK_SUCCESS || !descriptorSetLayout)\n throw LSFG::vulkan_error(res, \"Failed to create descriptor set layout\");\n\n // store module and layout in shared ptr\n this->shaderModule = std::shared_ptr(\n new VkShaderModule(shaderModuleHandle),\n [dev = device.handle()](VkShaderModule* shaderModuleHandle) {\n vkDestroyShaderModule(dev, *shaderModuleHandle, nullptr);\n }\n );\n this->descriptorSetLayout = std::shared_ptr(\n new VkDescriptorSetLayout(descriptorSetLayout),\n [dev = device.handle()](VkDescriptorSetLayout* layout) {\n vkDestroyDescriptorSetLayout(dev, *layout, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/v3.1_src/shaders/beta.cpp", "#include \n#include \n\n#include \"v3_1/shaders/beta.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nBeta::Beta(Vulkan& vk, std::array, 3> inImgs)\n : inImgs(std::move(inImgs)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"beta[0]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 12, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"beta[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"beta[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"beta[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"beta[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 6, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"beta[0]\"),\n vk.shaders.getPipeline(vk.device, \"beta[1]\"),\n vk.shaders.getPipeline(vk.device, \"beta[2]\"),\n vk.shaders.getPipeline(vk.device, \"beta[3]\"),\n vk.shaders.getPipeline(vk.device, \"beta[4]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n for (size_t i = 0; i < 3; i++)\n this->firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(0));\n for (size_t i = 0; i < 4; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(i + 1));\n this->buffer = vk.resources.getBuffer(vk.device, 0.5F);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs.at(0).at(0).getExtent();\n for (size_t i = 0; i < 2; i++) {\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n }\n\n for (size_t i = 0; i < 6; i++)\n this->outImgs.at(i) = Core::Image(vk.device,\n { extent.width >> i, extent.height >> i },\n VK_FORMAT_R8_UNORM);\n\n // hook up shaders\n for (size_t i = 0; i < 3; i++) {\n this->firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at((i + 1) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n }\n this->descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n this->descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n this->descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n this->descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, this->buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs)\n .build();\n}\n\nvoid Beta::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto extent = this->tempImgs1.at(0).getExtent();\n uint32_t threadsX = (extent.width + 7) >> 3;\n uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs.at(0))\n .addW2R(this->inImgs.at(1))\n .addW2R(this->inImgs.at(2))\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n this->firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n this->descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(2).bind(buf);\n this->descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n this->descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth pass\n threadsX = (extent.width + 31) >> 5;\n threadsY = (extent.height + 31) >> 5;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->outImgs)\n .build();\n\n this->pipelines.at(4).bind(buf);\n this->descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/shaders/beta.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/beta.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nBeta::Beta(Vulkan& vk, std::array, 3> inImgs)\n : inImgs(std::move(inImgs)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"p_beta[0]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 6, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_beta[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_beta[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_beta[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_beta[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 6, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"p_beta[0]\"),\n vk.shaders.getPipeline(vk.device, \"p_beta[1]\"),\n vk.shaders.getPipeline(vk.device, \"p_beta[2]\"),\n vk.shaders.getPipeline(vk.device, \"p_beta[3]\"),\n vk.shaders.getPipeline(vk.device, \"p_beta[4]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n for (size_t i = 0; i < 3; i++)\n this->firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(0));\n for (size_t i = 0; i < 4; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(i + 1));\n this->buffer = vk.resources.getBuffer(vk.device, 0.5F);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs.at(0).at(0).getExtent();\n for (size_t i = 0; i < 2; i++) {\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n }\n\n for (size_t i = 0; i < 6; i++)\n this->outImgs.at(i) = Core::Image(vk.device,\n { extent.width >> i, extent.height >> i },\n VK_FORMAT_R8_UNORM);\n\n // hook up shaders\n for (size_t i = 0; i < 3; i++) {\n this->firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at((i + 1) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n }\n this->descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n this->descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n this->descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n this->descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, this->buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs)\n .build();\n}\n\nvoid Beta::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto extent = this->tempImgs1.at(0).getExtent();\n uint32_t threadsX = (extent.width + 7) >> 3;\n uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs.at(0))\n .addW2R(this->inImgs.at(1))\n .addW2R(this->inImgs.at(2))\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n this->firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n this->descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(2).bind(buf);\n this->descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n this->descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth pass\n threadsX = (extent.width + 31) >> 5;\n threadsY = (extent.height + 31) >> 5;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->outImgs)\n .build();\n\n this->pipelines.at(4).bind(buf);\n this->descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/thirdparty/toml11/tools/expand/main.cpp", "#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n\nstd::optional\nis_include(const std::string& line, const std::filesystem::path& filepath)\n{\n // [ws] # [ws] include [ws] \\\".+\\\"\n auto iter = line.begin();\n while(iter < line.end())\n {\n if(*iter != ' ' && *iter != '\\t')\n {\n break;\n }\n ++iter;\n }\n if(iter == line.end() || *iter != '#') {return std::nullopt;}\n\n assert(*iter == '#');\n ++iter;\n\n while(iter < line.end())\n {\n if(*iter != ' ' && *iter != '\\t')\n {\n break;\n }\n ++iter;\n }\n if(iter == line.end() || *iter != 'i') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'n') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'c') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'l') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'u') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'd') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'e') {return std::nullopt;} else {++iter;}\n\n while(iter < line.end())\n {\n if(*iter != ' ' && *iter != '\\t')\n {\n break;\n }\n ++iter;\n }\n if(iter == line.end() || *iter != '\"') {return std::nullopt;} else {++iter;}\n\n std::string filename;\n while(iter < line.end())\n {\n if(*iter == '\"') {break;}\n filename += *iter;\n ++iter;\n }\n if(iter == line.end() || *iter != '\"') {return std::nullopt;} else {++iter;}\n\n return std::filesystem::canonical(filepath.parent_path() / std::filesystem::path(filename));\n}\n\nstruct File\n{\n File() = default;\n\n explicit File(std::filesystem::path f)\n : filename(std::move(f))\n {\n std::ifstream ifs(filename);\n if( ! ifs.good())\n {\n throw std::runtime_error(\"file open error: \" + filename.string());\n }\n\n std::string line;\n while(std::getline(ifs, line))\n {\n if(const auto incl = is_include(line, filename))\n {\n includes.push_back(incl.value());\n }\n else\n {\n content.push_back(line);\n }\n }\n }\n\n File(std::filesystem::path f, std::vector c,\n std::vector i)\n : filename(std::move(f)), content(std::move(c)), includes(std::move(i))\n {}\n\n std::filesystem::path filename;\n std::vector content; // w/o include\n std::vector includes;\n};\n\nstruct Graph\n{\n struct Node\n {\n std::vector included;\n std::vector includes;\n };\n\n std::map nodes;\n};\n\nint main(int argc, char** argv)\n{\n using namespace std::literals::string_literals;\n if(argc != 2)\n {\n std::cerr << \"Usage: ./a.out path/to/toml.hpp > single_include/toml.hpp\" << std::endl;\n return 1;\n }\n\n const auto input_file = std::filesystem::path(std::string(argv[1]));\n assert(input_file.filename() == \"toml.hpp\");\n\n const auto include_path = input_file.parent_path();\n\n // -------------------------------------------------------------------------\n // load files and detect `include \"xxx.hpp\"`.\n // If the file has `_fwd` and `_impl`, expand those files first.\n\n std::set fwd_impl_files;\n for(const auto& entities : std::filesystem::directory_iterator(include_path/\"toml11\"/\"fwd\"))\n {\n if( ! entities.is_regular_file()) {continue;}\n std::string fname = entities.path().filename().string();\n if(fname.ends_with(\"_fwd.hpp\"))\n {\n for(const auto c : \"_fwd.hpp\"s) {fname.pop_back(); (void)c;}\n fwd_impl_files.insert(std::move(fname));\n }\n }\n for(const auto& entities : std::filesystem::directory_iterator(include_path/\"toml11\"/\"impl\"))\n {\n if( ! entities.is_regular_file()) {continue;}\n std::string fname = entities.path().filename().string();\n if(fname.ends_with(\"_impl.hpp\"))\n {\n for(const auto c : \"_impl.hpp\"s) {fname.pop_back(); (void)c;}\n // all impl files has fwd file\n assert(fwd_impl_files.contains(fname));\n }\n }\n\n const auto input = File(input_file);\n\n std::map files;\n files[input_file] = input;\n\n for(const auto& fname : input.includes)\n {\n if(fwd_impl_files.contains(fname.stem().string()))\n {\n std::cerr << \"expanding fwd/impl file of \" << fname.string() << std::endl;\n\n // expand the first include\n std::ifstream ifs(fname);\n\n std::vector content;\n std::vector includes;\n\n std::string line;\n while(std::getline(ifs, line))\n {\n // expand _fwd and _impl files first.\n const auto incl = is_include(line, fname);\n if(incl.has_value())\n {\n // if a file has _fwd/_impl files, it only includes fwd/impl files.\n assert(incl.value().string().ends_with(\"_impl.hpp\") ||\n incl.value().string().ends_with(\"_fwd.hpp\") );\n\n const File included(incl.value());\n for(const auto& l : included.content)\n {\n content.push_back(l);\n }\n for(const auto& i : included.includes)\n {\n includes.push_back(i);\n }\n }\n else\n {\n content.push_back(line);\n }\n }\n files[fname] = File(fname, std::move(content), std::move(includes));\n }\n else\n {\n files[fname] = File(fname);\n }\n std::cerr << \"file \" << fname << \" has \" << files.at(fname).content.size() << \" lines.\" << std::endl;\n }\n\n std::cerr << \"-------------------------------------------------------------\\n\";\n std::cerr << \"files have been read. next: constructing dependency graph...\\n\";\n std::cerr << \"-------------------------------------------------------------\\n\";\n\n // -------------------------------------------------------------------------\n // construct dependency graph\n\n Graph g;\n for(const auto& [k, v] : files)\n {\n g.nodes[k] = Graph::Node{};\n }\n\n for(const auto& [fname, file] : files)\n {\n for(auto incl : file.includes)\n {\n auto incl_stem = incl.stem().string();\n if(incl_stem.ends_with(\"_fwd\"))\n {\n for(const char c : \"_fwd\"s) {incl_stem.pop_back(); (void)c;}\n\n // include original file instaed\n incl = incl.parent_path() / \"..\" / std::filesystem::path(incl_stem + \".hpp\");\n }\n else if(incl_stem.ends_with(\"_impl\"))\n {\n for(const char c : \"_impl\"s) {incl_stem.pop_back(); (void)c;}\n\n // include original file instaed\n incl = incl.parent_path() / \"..\" / std::filesystem::path(incl_stem + \".hpp\");\n }\n incl = std::filesystem::canonical(incl);\n\n // avoid self include loop\n if(fname != incl)\n {\n std::cerr << fname << \" includes \" << incl << std::endl;\n\n g.nodes.at(fname).includes.push_back(incl);\n g.nodes.at(incl) .included.push_back(fname);\n }\n }\n }\n\n std::cerr << \"-------------------------------------------------------------\\n\";\n std::cerr << \"graph has been constructed. flattening...\\n\";\n std::cerr << \"-------------------------------------------------------------\\n\";\n\n // -------------------------------------------------------------------------\n // flatten graph by topological sort\n\n // collect files that does not include\n std::vector sources;\n for(const auto& [fname, node] : g.nodes)\n {\n if(node.includes.empty())\n {\n sources.push_back(fname);\n }\n }\n assert( ! sources.empty());\n\n std::vector sorted;\n while( ! sources.empty())\n {\n const auto file = sources.back();\n sorted.push_back(sources.back());\n sources.pop_back();\n\n for(const auto& included : g.nodes.at(file).included)\n {\n auto found = std::find(g.nodes.at(included).includes.begin(),\n g.nodes.at(included).includes.end(), file);\n g.nodes.at(included).includes.erase(found);\n\n if(g.nodes.at(included).includes.empty())\n {\n sources.push_back(included);\n }\n }\n }\n\n std::cerr << \"-------------------------------------------------------------\\n\";\n std::cerr << \"flattened. outputting...\\n\";\n std::cerr << \"-------------------------------------------------------------\\n\";\n\n // -------------------------------------------------------------------------\n // output all the file in the sorted order\n\n for(const auto& fname : sorted)\n {\n std::cerr << \"expanding: \" << fname << std::endl;\n for(const auto& line : files.at(fname).content)\n {\n std::cout << line << '\\n';\n }\n }\n\n return 0;\n}\n"], ["/lsfg-vk/framegen/v3.1_src/shaders/alpha.cpp", "#include \n#include \n\n#include \"v3_1/shaders/alpha.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nAlpha::Alpha(Vulkan& vk, Core::Image inImg) : inImg(std::move(inImg)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"alpha[0]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"alpha[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"alpha[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"alpha[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"alpha[0]\"),\n vk.shaders.getPipeline(vk.device, \"alpha[1]\"),\n vk.shaders.getPipeline(vk.device, \"alpha[2]\"),\n vk.shaders.getPipeline(vk.device, \"alpha[3]\")\n }};\n this->sampler = vk.resources.getSampler(vk.device);\n for (size_t i = 0; i < 3; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(i));\n for (size_t i = 0; i < 3; i++)\n this->lastDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(3));\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImg.getExtent();\n const VkExtent2D halfExtent = {\n .width = (extent.width + 1) >> 1,\n .height = (extent.height + 1) >> 1\n };\n for (size_t i = 0; i < 2; i++) {\n this->tempImgs1.at(i) = Core::Image(vk.device, halfExtent);\n this->tempImgs2.at(i) = Core::Image(vk.device, halfExtent);\n }\n\n const VkExtent2D quarterExtent = {\n .width = (halfExtent.width + 1) >> 1,\n .height = (halfExtent.height + 1) >> 1\n };\n for (size_t i = 0; i < 4; i++) {\n this->tempImgs3.at(i) = Core::Image(vk.device, quarterExtent);\n for (size_t j = 0; j < 3; j++)\n this->outImgs.at(j).at(i) = Core::Image(vk.device, quarterExtent);\n }\n\n // hook up shaders\n this->descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n this->descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n this->descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs3)\n .build();\n for (size_t i = 0; i < 3; i++)\n this->lastDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs3)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs.at(i))\n .build();\n}\n\nvoid Alpha::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto halfExtent = this->tempImgs1.at(0).getExtent();\n uint32_t threadsX = (halfExtent.width + 7) >> 3;\n uint32_t threadsY = (halfExtent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImg)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n this->descriptorSets.at(0).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n this->descriptorSets.at(1).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third pass\n const auto quarterExtent = this->tempImgs3.at(0).getExtent();\n threadsX = (quarterExtent.width + 7) >> 3;\n threadsY = (quarterExtent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs3)\n .build();\n\n this->pipelines.at(2).bind(buf);\n this->descriptorSets.at(2).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs3)\n .addR2W(this->outImgs.at(frameCount % 3))\n .build();\n\n this->pipelines.at(3).bind(buf);\n this->lastDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/shaders/alpha.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/alpha.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nAlpha::Alpha(Vulkan& vk, Core::Image inImg) : inImg(std::move(inImg)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"p_alpha[0]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_alpha[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_alpha[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_alpha[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"p_alpha[0]\"),\n vk.shaders.getPipeline(vk.device, \"p_alpha[1]\"),\n vk.shaders.getPipeline(vk.device, \"p_alpha[2]\"),\n vk.shaders.getPipeline(vk.device, \"p_alpha[3]\")\n }};\n this->sampler = vk.resources.getSampler(vk.device);\n for (size_t i = 0; i < 3; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(i));\n for (size_t i = 0; i < 3; i++)\n this->lastDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(3));\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImg.getExtent();\n const VkExtent2D halfExtent = {\n .width = (extent.width + 1) >> 1,\n .height = (extent.height + 1) >> 1\n };\n this->tempImg1 = Core::Image(vk.device, halfExtent);\n this->tempImg2 = Core::Image(vk.device, halfExtent);\n\n const VkExtent2D quarterExtent = {\n .width = (halfExtent.width + 1) >> 1,\n .height = (halfExtent.height + 1) >> 1\n };\n for (size_t i = 0; i < 2; i++) {\n this->tempImgs3.at(i) = Core::Image(vk.device, quarterExtent);\n for (size_t j = 0; j < 3; j++)\n this->outImgs.at(j).at(i) = Core::Image(vk.device, quarterExtent);\n }\n\n // hook up shaders\n this->descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImg1)\n .build();\n this->descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImg1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImg2)\n .build();\n this->descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs3)\n .build();\n for (size_t i = 0; i < 3; i++)\n this->lastDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs3)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs.at(i))\n .build();\n}\n\nvoid Alpha::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto halfExtent = this->tempImg1.getExtent();\n uint32_t threadsX = (halfExtent.width + 7) >> 3;\n uint32_t threadsY = (halfExtent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImg)\n .addR2W(this->tempImg1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n this->descriptorSets.at(0).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImg1)\n .addR2W(this->tempImg2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n this->descriptorSets.at(1).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third pass\n const auto quarterExtent = this->tempImgs3.at(0).getExtent();\n threadsX = (quarterExtent.width + 7) >> 3;\n threadsY = (quarterExtent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImg2)\n .addR2W(this->tempImgs3)\n .build();\n\n this->pipelines.at(2).bind(buf);\n this->descriptorSets.at(2).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs3)\n .addR2W(this->outImgs.at(frameCount % 3))\n .build();\n\n this->pipelines.at(3).bind(buf);\n this->lastDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/src/pool/resourcepool.cpp", "#include \"pool/resourcepool.hpp\"\n#include \"core/buffer.hpp\"\n#include \"core/device.hpp\"\n#include \"core/sampler.hpp\"\n\n#include \n\n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG::Pool;\n\nstruct ConstantBuffer {\n std::array inputOffset;\n uint32_t firstIter;\n uint32_t firstIterS;\n uint32_t advancedColorKind;\n uint32_t hdrSupport;\n float resolutionInvScale;\n float timestamp;\n float uiThreshold;\n std::array pad;\n};\n\nCore::Buffer ResourcePool::getBuffer(\n const Core::Device& device,\n float timestamp, bool firstIter, bool firstIterS) {\n uint64_t hash = 0;\n const union { float f; uint32_t i; } u{\n .f = timestamp };\n hash |= u.i;\n hash |= static_cast(firstIter) << 32;\n hash |= static_cast(firstIterS) << 33;\n\n auto it = buffers.find(hash);\n if (it != buffers.end())\n return it->second;\n\n // create the buffer\n const ConstantBuffer data{\n .inputOffset = { 0, 0 },\n .advancedColorKind = this->isHdr ? 2U : 0U,\n .hdrSupport = this->isHdr,\n .resolutionInvScale = this->flowScale,\n .timestamp = timestamp,\n .uiThreshold = 0.5F,\n };\n Core::Buffer buffer(device, data, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);\n buffers[hash] = buffer;\n return buffer;\n}\n\nCore::Sampler ResourcePool::getSampler(\n const Core::Device& device,\n VkSamplerAddressMode type,\n VkCompareOp compare,\n bool isWhite) {\n uint64_t hash = 0;\n hash |= static_cast(type) << 0;\n hash |= static_cast(compare) << 8;\n hash |= static_cast(isWhite) << 16;\n\n auto it = samplers.find(hash);\n if (it != samplers.end())\n return it->second;\n\n // create the sampler\n Core::Sampler sampler(device, type, compare, isWhite);\n samplers[hash] = sampler;\n return sampler;\n}\n"], ["/lsfg-vk/framegen/src/core/commandbuffer.cpp", "#include \n#include \n\n#include \"core/commandbuffer.hpp\"\n#include \"core/device.hpp\"\n#include \"core/commandpool.hpp\"\n#include \"core/fence.hpp\"\n#include \"core/semaphore.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nCommandBuffer::CommandBuffer(const Core::Device& device, const CommandPool& pool) {\n // create command buffer\n const VkCommandBufferAllocateInfo desc{\n .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,\n .commandPool = pool.handle(),\n .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,\n .commandBufferCount = 1\n };\n VkCommandBuffer commandBufferHandle{};\n auto res = vkAllocateCommandBuffers(device.handle(), &desc, &commandBufferHandle);\n if (res != VK_SUCCESS || commandBufferHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to allocate command buffer\");\n\n // store command buffer in shared ptr\n this->state = std::make_shared(CommandBufferState::Empty);\n this->commandBuffer = std::shared_ptr(\n new VkCommandBuffer(commandBufferHandle),\n [dev = device.handle(), pool = pool.handle()](VkCommandBuffer* cmdBuffer) {\n vkFreeCommandBuffers(dev, pool, 1, cmdBuffer);\n }\n );\n}\n\nvoid CommandBuffer::begin() {\n if (*this->state != CommandBufferState::Empty)\n throw std::logic_error(\"Command buffer is not in Empty state\");\n\n const VkCommandBufferBeginInfo beginInfo = {\n .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,\n .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT\n };\n auto res = vkBeginCommandBuffer(*this->commandBuffer, &beginInfo);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to begin command buffer\");\n\n *this->state = CommandBufferState::Recording;\n}\n\nvoid CommandBuffer::dispatch(uint32_t x, uint32_t y, uint32_t z) const {\n if (*this->state != CommandBufferState::Recording)\n throw std::logic_error(\"Command buffer is not in Recording state\");\n\n vkCmdDispatch(*this->commandBuffer, x, y, z);\n}\n\nvoid CommandBuffer::end() {\n if (*this->state != CommandBufferState::Recording)\n throw std::logic_error(\"Command buffer is not in Recording state\");\n\n auto res = vkEndCommandBuffer(*this->commandBuffer);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to end command buffer\");\n\n *this->state = CommandBufferState::Full;\n}\n\nvoid CommandBuffer::submit(VkQueue queue, std::optional fence,\n const std::vector& waitSemaphores,\n std::optional> waitSemaphoreValues,\n const std::vector& signalSemaphores,\n std::optional> signalSemaphoreValues) {\n if (*this->state != CommandBufferState::Full)\n throw std::logic_error(\"Command buffer is not in Full state\");\n\n const std::vector waitStages(waitSemaphores.size(),\n VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);\n VkTimelineSemaphoreSubmitInfo timelineInfo{\n .sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO,\n };\n if (waitSemaphoreValues.has_value()) {\n timelineInfo.waitSemaphoreValueCount =\n static_cast(waitSemaphoreValues->size());\n timelineInfo.pWaitSemaphoreValues = waitSemaphoreValues->data();\n }\n if (signalSemaphoreValues.has_value()) {\n timelineInfo.signalSemaphoreValueCount =\n static_cast(signalSemaphoreValues->size());\n timelineInfo.pSignalSemaphoreValues = signalSemaphoreValues->data();\n }\n\n std::vector waitSemaphoresHandles;\n waitSemaphoresHandles.reserve(waitSemaphores.size());\n for (const auto& semaphore : waitSemaphores)\n waitSemaphoresHandles.push_back(semaphore.handle());\n std::vector signalSemaphoresHandles;\n signalSemaphoresHandles.reserve(signalSemaphores.size());\n for (const auto& semaphore : signalSemaphores)\n signalSemaphoresHandles.push_back(semaphore.handle());\n\n const VkSubmitInfo submitInfo{\n .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,\n .pNext = (waitSemaphoreValues.has_value() || signalSemaphoreValues.has_value())\n ? &timelineInfo : nullptr,\n .waitSemaphoreCount = static_cast(waitSemaphores.size()),\n .pWaitSemaphores = waitSemaphoresHandles.data(),\n .pWaitDstStageMask = waitStages.data(),\n .commandBufferCount = 1,\n .pCommandBuffers = &(*this->commandBuffer),\n .signalSemaphoreCount = static_cast(signalSemaphores.size()),\n .pSignalSemaphores = signalSemaphoresHandles.data()\n };\n auto res = vkQueueSubmit(queue, 1, &submitInfo, fence ? fence->handle() : VK_NULL_HANDLE);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to submit command buffer\");\n\n *this->state = CommandBufferState::Submitted;\n}\n"], ["/lsfg-vk/framegen/src/core/semaphore.cpp", "#include \n#include \n\n#include \"core/semaphore.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nSemaphore::Semaphore(const Core::Device& device, std::optional initial) {\n // create semaphore\n const VkSemaphoreTypeCreateInfo typeInfo{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO,\n .semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE,\n .initialValue = initial.value_or(0)\n };\n const VkSemaphoreCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,\n .pNext = initial.has_value() ? &typeInfo : nullptr,\n };\n VkSemaphore semaphoreHandle{};\n auto res = vkCreateSemaphore(device.handle(), &desc, nullptr, &semaphoreHandle);\n if (res != VK_SUCCESS || semaphoreHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create semaphore\");\n\n // store semaphore in shared ptr\n this->isTimeline = initial.has_value();\n this->semaphore = std::shared_ptr(\n new VkSemaphore(semaphoreHandle),\n [dev = device.handle()](VkSemaphore* semaphoreHandle) {\n vkDestroySemaphore(dev, *semaphoreHandle, nullptr);\n }\n );\n}\n\nSemaphore::Semaphore(const Core::Device& device, int fd) {\n // create semaphore\n const VkExportSemaphoreCreateInfo exportInfo{\n .sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO,\n .handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT\n };\n const VkSemaphoreCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,\n .pNext = &exportInfo\n };\n VkSemaphore semaphoreHandle{};\n auto res = vkCreateSemaphore(device.handle(), &desc, nullptr, &semaphoreHandle);\n if (res != VK_SUCCESS || semaphoreHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create semaphore\");\n\n // import semaphore from fd\n auto vkImportSemaphoreFdKHR = reinterpret_cast(\n vkGetDeviceProcAddr(device.handle(), \"vkImportSemaphoreFdKHR\"));\n\n const VkImportSemaphoreFdInfoKHR importInfo{\n .sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR,\n .semaphore = semaphoreHandle,\n .handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT,\n .fd = fd // closes the fd\n };\n res = vkImportSemaphoreFdKHR(device.handle(), &importInfo);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to import semaphore from fd\");\n\n // store semaphore in shared ptr\n this->isTimeline = false;\n this->semaphore = std::shared_ptr(\n new VkSemaphore(semaphoreHandle),\n [dev = device.handle()](VkSemaphore* semaphoreHandle) {\n vkDestroySemaphore(dev, *semaphoreHandle, nullptr);\n }\n );\n}\n\nvoid Semaphore::signal(const Core::Device& device, uint64_t value) const {\n if (!this->isTimeline)\n throw std::logic_error(\"Invalid timeline semaphore\");\n\n const VkSemaphoreSignalInfo signalInfo{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO,\n .semaphore = this->handle(),\n .value = value\n };\n auto res = vkSignalSemaphore(device.handle(), &signalInfo);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to signal semaphore\");\n}\n\nbool Semaphore::wait(const Core::Device& device, uint64_t value, uint64_t timeout) const {\n if (!this->isTimeline)\n throw std::logic_error(\"Invalid timeline semaphore\");\n\n VkSemaphore semaphore = this->handle();\n const VkSemaphoreWaitInfo waitInfo{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO,\n .semaphoreCount = 1,\n .pSemaphores = &semaphore,\n .pValues = &value\n };\n auto res = vkWaitSemaphores(device.handle(), &waitInfo, timeout);\n if (res != VK_SUCCESS && res != VK_TIMEOUT)\n throw LSFG::vulkan_error(res, \"Unable to wait for semaphore\");\n\n return res == VK_SUCCESS;\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/log/log.h", "#pragma once\n\n#include \n#include \n\nnamespace dxvk {\n \n enum class LogLevel : uint32_t {\n Trace = 0,\n Debug = 1,\n Info = 2,\n Warn = 3,\n Error = 4,\n None = 5,\n };\n\n /**\n * \\brief Logger\n * \n * Logger for one DLL. Creates a text file and\n * writes all log messages to that file.\n */\n class Logger {\n \n public:\n \n Logger() {}\n Logger(const std::string& file_name) {}\n ~Logger() {}\n \n static void trace(const std::string& message) {}\n static void debug(const std::string& message) {}\n static void info (const std::string& message) {}\n static void warn (const std::string& message) {}\n static void err (const std::string& message) {}\n static void log (LogLevel level, const std::string& message) {}\n \n static LogLevel logLevel() {\n return LogLevel::Warn;\n }\n\n };\n \n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/spirv/thirdparty/GLSL.std.450.h", "/*\n** Copyright (c) 2014-2024 The Khronos Group Inc.\n**\n** Permission is hereby granted, free of charge, to any person obtaining a copy\n** of this software and/or associated documentation files (the \"Materials\"),\n** to deal in the Materials without restriction, including without limitation\n** the rights to use, copy, modify, merge, publish, distribute, sublicense,\n** and/or sell copies of the Materials, and to permit persons to whom the\n** Materials are furnished to do so, subject to the following conditions:\n**\n** The above copyright notice and this permission notice shall be included in\n** all copies or substantial portions of the Materials.\n**\n** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS\n** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND\n** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ \n**\n** THE MATERIALS ARE PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS\n** IN THE MATERIALS.\n*/\n\n#ifndef GLSLstd450_H\n#define GLSLstd450_H\n\nstatic const int GLSLstd450Version = 100;\nstatic const int GLSLstd450Revision = 3;\n\nenum GLSLstd450 {\n GLSLstd450Bad = 0, // Don't use\n\n GLSLstd450Round = 1,\n GLSLstd450RoundEven = 2,\n GLSLstd450Trunc = 3,\n GLSLstd450FAbs = 4,\n GLSLstd450SAbs = 5,\n GLSLstd450FSign = 6,\n GLSLstd450SSign = 7,\n GLSLstd450Floor = 8,\n GLSLstd450Ceil = 9,\n GLSLstd450Fract = 10,\n\n GLSLstd450Radians = 11,\n GLSLstd450Degrees = 12,\n GLSLstd450Sin = 13,\n GLSLstd450Cos = 14,\n GLSLstd450Tan = 15,\n GLSLstd450Asin = 16,\n GLSLstd450Acos = 17,\n GLSLstd450Atan = 18,\n GLSLstd450Sinh = 19,\n GLSLstd450Cosh = 20,\n GLSLstd450Tanh = 21,\n GLSLstd450Asinh = 22,\n GLSLstd450Acosh = 23,\n GLSLstd450Atanh = 24,\n GLSLstd450Atan2 = 25,\n\n GLSLstd450Pow = 26,\n GLSLstd450Exp = 27,\n GLSLstd450Log = 28,\n GLSLstd450Exp2 = 29,\n GLSLstd450Log2 = 30,\n GLSLstd450Sqrt = 31,\n GLSLstd450InverseSqrt = 32,\n\n GLSLstd450Determinant = 33,\n GLSLstd450MatrixInverse = 34,\n\n GLSLstd450Modf = 35, // second operand needs an OpVariable to write to\n GLSLstd450ModfStruct = 36, // no OpVariable operand\n GLSLstd450FMin = 37,\n GLSLstd450UMin = 38,\n GLSLstd450SMin = 39,\n GLSLstd450FMax = 40,\n GLSLstd450UMax = 41,\n GLSLstd450SMax = 42,\n GLSLstd450FClamp = 43,\n GLSLstd450UClamp = 44,\n GLSLstd450SClamp = 45,\n GLSLstd450FMix = 46,\n GLSLstd450IMix = 47, // Reserved\n GLSLstd450Step = 48,\n GLSLstd450SmoothStep = 49,\n\n GLSLstd450Fma = 50,\n GLSLstd450Frexp = 51, // second operand needs an OpVariable to write to\n GLSLstd450FrexpStruct = 52, // no OpVariable operand\n GLSLstd450Ldexp = 53,\n\n GLSLstd450PackSnorm4x8 = 54,\n GLSLstd450PackUnorm4x8 = 55,\n GLSLstd450PackSnorm2x16 = 56,\n GLSLstd450PackUnorm2x16 = 57,\n GLSLstd450PackHalf2x16 = 58,\n GLSLstd450PackDouble2x32 = 59,\n GLSLstd450UnpackSnorm2x16 = 60,\n GLSLstd450UnpackUnorm2x16 = 61,\n GLSLstd450UnpackHalf2x16 = 62,\n GLSLstd450UnpackSnorm4x8 = 63,\n GLSLstd450UnpackUnorm4x8 = 64,\n GLSLstd450UnpackDouble2x32 = 65,\n\n GLSLstd450Length = 66,\n GLSLstd450Distance = 67,\n GLSLstd450Cross = 68,\n GLSLstd450Normalize = 69,\n GLSLstd450FaceForward = 70,\n GLSLstd450Reflect = 71,\n GLSLstd450Refract = 72,\n\n GLSLstd450FindILsb = 73,\n GLSLstd450FindSMsb = 74,\n GLSLstd450FindUMsb = 75,\n\n GLSLstd450InterpolateAtCentroid = 76,\n GLSLstd450InterpolateAtSample = 77,\n GLSLstd450InterpolateAtOffset = 78,\n\n GLSLstd450NMin = 79,\n GLSLstd450NMax = 80,\n GLSLstd450NClamp = 81,\n\n GLSLstd450Count\n};\n\n#endif // #ifndef GLSLstd450_H\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxvk/dxvk_hash.h", "#pragma once\n\n#include \n\nnamespace dxvk {\n\n struct DxvkEq {\n template\n size_t operator () (const T& a, const T& b) const {\n return a.eq(b);\n }\n };\n\n struct DxvkHash {\n template\n size_t operator () (const T& object) const {\n return object.hash();\n }\n };\n\n class DxvkHashState {\n\n public:\n\n void add(size_t hash) {\n m_value ^= hash + 0x9e3779b9\n + (m_value << 6)\n + (m_value >> 2);\n }\n\n operator size_t () const {\n return m_value;\n }\n\n private:\n\n size_t m_value = 0;\n\n };\n\n}\n"], ["/lsfg-vk/framegen/src/core/descriptorset.cpp", "#include \n#include \n\n#include \"core/descriptorset.hpp\"\n#include \"core/device.hpp\"\n#include \"core/descriptorpool.hpp\"\n#include \"core/shadermodule.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/pipeline.hpp\"\n#include \"core/image.hpp\"\n#include \"core/sampler.hpp\"\n#include \"core/buffer.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n\nusing namespace LSFG::Core;\n\nDescriptorSet::DescriptorSet(const Core::Device& device,\n const DescriptorPool& pool, const ShaderModule& shaderModule) {\n // create descriptor set\n VkDescriptorSetLayout layout = shaderModule.getLayout();\n const VkDescriptorSetAllocateInfo desc{\n .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,\n .descriptorPool = pool.handle(),\n .descriptorSetCount = 1,\n .pSetLayouts = &layout\n };\n VkDescriptorSet descriptorSetHandle{};\n auto res = vkAllocateDescriptorSets(device.handle(), &desc, &descriptorSetHandle);\n if (res != VK_SUCCESS || descriptorSetHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to allocate descriptor set\");\n\n /// store set in shared ptr\n this->descriptorSet = std::shared_ptr(\n new VkDescriptorSet(descriptorSetHandle),\n [dev = device.handle(), pool = pool](VkDescriptorSet* setHandle) {\n vkFreeDescriptorSets(dev, pool.handle(), 1, setHandle);\n }\n );\n}\n\nDescriptorSetUpdateBuilder DescriptorSet::update(const Core::Device& device) const {\n return { *this, device };\n}\n\nvoid DescriptorSet::bind(const CommandBuffer& commandBuffer, const Pipeline& pipeline) const {\n VkDescriptorSet descriptorSetHandle = this->handle();\n vkCmdBindDescriptorSets(commandBuffer.handle(),\n VK_PIPELINE_BIND_POINT_COMPUTE, pipeline.getLayout(),\n 0, 1, &descriptorSetHandle, 0, nullptr);\n}\n\n// updater class\n\nDescriptorSetUpdateBuilder& DescriptorSetUpdateBuilder::add(VkDescriptorType type, const Image& image) {\n this->entries.push_back({\n .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,\n .dstSet = this->descriptorSet->handle(),\n .dstBinding = static_cast(this->entries.size()),\n .descriptorCount = 1,\n .descriptorType = type,\n .pImageInfo = new VkDescriptorImageInfo {\n .imageView = image.getView(),\n .imageLayout = VK_IMAGE_LAYOUT_GENERAL\n },\n .pBufferInfo = nullptr\n });\n return *this;\n}\n\nDescriptorSetUpdateBuilder& DescriptorSetUpdateBuilder::add(VkDescriptorType type, const Sampler& sampler) {\n this->entries.push_back({\n .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,\n .dstSet = this->descriptorSet->handle(),\n .dstBinding = static_cast(this->entries.size()),\n .descriptorCount = 1,\n .descriptorType = type,\n .pImageInfo = new VkDescriptorImageInfo {\n .sampler = sampler.handle(),\n },\n .pBufferInfo = nullptr\n });\n return *this;\n}\n\nDescriptorSetUpdateBuilder& DescriptorSetUpdateBuilder::add(VkDescriptorType type, const Buffer& buffer) {\n this->entries.push_back({\n .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,\n .dstSet = this->descriptorSet->handle(),\n .dstBinding = static_cast(this->entries.size()),\n .descriptorCount = 1,\n .descriptorType = type,\n .pImageInfo = nullptr,\n .pBufferInfo = new VkDescriptorBufferInfo {\n .buffer = buffer.handle(),\n .range = buffer.getSize()\n }\n });\n return *this;\n}\n\nDescriptorSetUpdateBuilder& DescriptorSetUpdateBuilder::add(VkDescriptorType type) {\n this->entries.push_back({\n .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,\n .dstSet = this->descriptorSet->handle(),\n .dstBinding = static_cast(this->entries.size()),\n .descriptorCount = 1,\n .descriptorType = type,\n .pImageInfo = new VkDescriptorImageInfo {\n },\n .pBufferInfo = nullptr\n });\n return *this;\n}\n\nvoid DescriptorSetUpdateBuilder::build() {\n vkUpdateDescriptorSets(this->device->handle(),\n static_cast(this->entries.size()),\n this->entries.data(), 0, nullptr);\n\n // NOLINTBEGIN\n for (const auto& entry : this->entries) {\n delete entry.pImageInfo;\n delete entry.pBufferInfo;\n }\n // NOLINTEND\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/rc/util_rc.h", "#pragma once\n\n#include \n\n#include \"../util_likely.h\"\n\nnamespace dxvk {\n \n /**\n * \\brief Reference-counted object\n */\n class RcObject {\n \n public:\n \n /**\n * \\brief Increments reference count\n * \\returns New reference count\n */\n force_inline uint32_t incRef() {\n return ++m_refCount;\n }\n \n /**\n * \\brief Decrements reference count\n * \\returns New reference count\n */\n force_inline uint32_t decRef() {\n return --m_refCount;\n }\n \n private:\n \n std::atomic m_refCount = { 0u };\n \n };\n \n}"], ["/lsfg-vk/src/mini/commandbuffer.cpp", "#include \"mini/commandbuffer.hpp\"\n#include \"mini/commandpool.hpp\"\n#include \"common/exception.hpp\"\n#include \"layer.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n\nusing namespace Mini;\n\nCommandBuffer::CommandBuffer(VkDevice device, const CommandPool& pool) {\n // create command buffer\n const VkCommandBufferAllocateInfo desc{\n .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,\n .commandPool = pool.handle(),\n .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,\n .commandBufferCount = 1\n };\n VkCommandBuffer commandBufferHandle{};\n auto res = Layer::ovkAllocateCommandBuffers(device, &desc, &commandBufferHandle);\n if (res != VK_SUCCESS || commandBufferHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to allocate command buffer\");\n res = Layer::ovkSetDeviceLoaderData(device, commandBufferHandle);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to set device loader data for command buffer\");\n\n // store command buffer in shared ptr\n this->state = std::make_shared(CommandBufferState::Empty);\n this->commandBuffer = std::shared_ptr(\n new VkCommandBuffer(commandBufferHandle),\n [dev = device, pool = pool.handle()](VkCommandBuffer* cmdBuffer) {\n Layer::ovkFreeCommandBuffers(dev, pool, 1, cmdBuffer);\n }\n );\n}\n\nvoid CommandBuffer::begin() {\n if (*this->state != CommandBufferState::Empty)\n throw std::logic_error(\"Command buffer is not in Empty state\");\n\n const VkCommandBufferBeginInfo beginInfo = {\n .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,\n .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT\n };\n auto res = Layer::ovkBeginCommandBuffer(*this->commandBuffer, &beginInfo);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to begin command buffer\");\n\n *this->state = CommandBufferState::Recording;\n}\n\nvoid CommandBuffer::end() {\n if (*this->state != CommandBufferState::Recording)\n throw std::logic_error(\"Command buffer is not in Recording state\");\n\n auto res = Layer::ovkEndCommandBuffer(*this->commandBuffer);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to end command buffer\");\n\n *this->state = CommandBufferState::Full;\n}\n\nvoid CommandBuffer::submit(VkQueue queue,\n const std::vector& waitSemaphores,\n const std::vector& signalSemaphores) {\n if (*this->state != CommandBufferState::Full)\n throw std::logic_error(\"Command buffer is not in Full state\");\n\n const std::vector waitStages(waitSemaphores.size(),\n VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);\n\n const VkSubmitInfo submitInfo{\n .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,\n .waitSemaphoreCount = static_cast(waitSemaphores.size()),\n .pWaitSemaphores = waitSemaphores.data(),\n .pWaitDstStageMask = waitStages.data(),\n .commandBufferCount = 1,\n .pCommandBuffers = &(*this->commandBuffer),\n .signalSemaphoreCount = static_cast(signalSemaphores.size()),\n .pSignalSemaphores = signalSemaphores.data()\n };\n auto res = Layer::ovkQueueSubmit(queue, 1, &submitInfo, VK_NULL_HANDLE);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to submit command buffer\");\n\n *this->state = CommandBufferState::Submitted;\n}\n"], ["/lsfg-vk/framegen/src/core/descriptorpool.cpp", "#include \n#include \n\n#include \"core/descriptorpool.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nDescriptorPool::DescriptorPool(const Core::Device& device) {\n // create descriptor pool\n const std::array pools{{ // arbitrary limits\n { .type = VK_DESCRIPTOR_TYPE_SAMPLER, .descriptorCount = 4096 },\n { .type = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, .descriptorCount = 4096 },\n { .type = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, .descriptorCount = 4096 },\n { .type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, .descriptorCount = 4096 }\n }};\n const VkDescriptorPoolCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,\n .flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,\n .maxSets = 16384,\n .poolSizeCount = static_cast(pools.size()),\n .pPoolSizes = pools.data()\n };\n VkDescriptorPool poolHandle{};\n auto res = vkCreateDescriptorPool(device.handle(), &desc, nullptr, &poolHandle);\n if (res != VK_SUCCESS || poolHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create descriptor pool\");\n\n // store pool in shared ptr\n this->descriptorPool = std::shared_ptr(\n new VkDescriptorPool(poolHandle),\n [dev = device.handle()](VkDescriptorPool* poolHandle) {\n vkDestroyDescriptorPool(dev, *poolHandle, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/v3.1_src/lsfg.cpp", "#include \n#include \n\n#include \"lsfg_3_1.hpp\"\n#include \"v3_1/context.hpp\"\n#include \"core/commandpool.hpp\"\n#include \"core/descriptorpool.hpp\"\n#include \"core/instance.hpp\"\n#include \"pool/shaderpool.hpp\"\n#include \"common/exception.hpp\"\n#include \"common/utils.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG_3_1;\n\nnamespace {\n std::optional instance;\n std::optional device;\n std::unordered_map contexts;\n}\n\nvoid LSFG_3_1::initialize(uint64_t deviceUUID,\n bool isHdr, float flowScale, uint64_t generationCount,\n const std::function(const std::string&)>& loader) {\n if (instance.has_value() || device.has_value())\n return;\n\n instance.emplace();\n device.emplace(Vulkan {\n .device{*instance, deviceUUID},\n .generationCount = generationCount,\n .flowScale = flowScale,\n .isHdr = isHdr\n });\n contexts = std::unordered_map();\n\n device->commandPool = Core::CommandPool(device->device);\n device->descriptorPool = Core::DescriptorPool(device->device);\n\n device->resources = Pool::ResourcePool(device->isHdr, device->flowScale);\n device->shaders = Pool::ShaderPool(loader);\n\n std::srand(static_cast(std::time(nullptr)));\n}\n\nint32_t LSFG_3_1::createContext(\n int in0, int in1, const std::vector& outN,\n VkExtent2D extent, VkFormat format) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n const int32_t id = std::rand();\n contexts.emplace(id, Context(*device, in0, in1, outN, extent, format));\n return id;\n}\n\nvoid LSFG_3_1::presentContext(int32_t id, int inSem, const std::vector& outSem) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n auto it = contexts.find(id);\n if (it == contexts.end())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Context not found\");\n\n it->second.present(*device, inSem, outSem);\n}\n\nvoid LSFG_3_1::deleteContext(int32_t id) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n auto it = contexts.find(id);\n if (it == contexts.end())\n throw LSFG::vulkan_error(VK_ERROR_DEVICE_LOST, \"No such context\");\n\n vkDeviceWaitIdle(device->device.handle());\n contexts.erase(it);\n}\n\nvoid LSFG_3_1::finalize() {\n if (!instance.has_value() || !device.has_value())\n return;\n\n vkDeviceWaitIdle(device->device.handle());\n contexts.clear();\n device.reset();\n instance.reset();\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/lsfg.cpp", "#include \n#include \n\n#include \"lsfg_3_1p.hpp\"\n#include \"v3_1p/context.hpp\"\n#include \"core/commandpool.hpp\"\n#include \"core/descriptorpool.hpp\"\n#include \"core/instance.hpp\"\n#include \"pool/shaderpool.hpp\"\n#include \"common/exception.hpp\"\n#include \"common/utils.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG_3_1P;\n\nnamespace {\n std::optional instance;\n std::optional device;\n std::unordered_map contexts;\n}\n\nvoid LSFG_3_1P::initialize(uint64_t deviceUUID,\n bool isHdr, float flowScale, uint64_t generationCount,\n const std::function(const std::string&)>& loader) {\n if (instance.has_value() || device.has_value())\n return;\n\n instance.emplace();\n device.emplace(Vulkan {\n .device{*instance, deviceUUID},\n .generationCount = generationCount,\n .flowScale = flowScale,\n .isHdr = isHdr\n });\n contexts = std::unordered_map();\n\n device->commandPool = Core::CommandPool(device->device);\n device->descriptorPool = Core::DescriptorPool(device->device);\n\n device->resources = Pool::ResourcePool(device->isHdr, device->flowScale);\n device->shaders = Pool::ShaderPool(loader);\n\n std::srand(static_cast(std::time(nullptr)));\n}\n\nint32_t LSFG_3_1P::createContext(\n int in0, int in1, const std::vector& outN,\n VkExtent2D extent, VkFormat format) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n const int32_t id = std::rand();\n contexts.emplace(id, Context(*device, in0, in1, outN, extent, format));\n return id;\n}\n\nvoid LSFG_3_1P::presentContext(int32_t id, int inSem, const std::vector& outSem) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n auto it = contexts.find(id);\n if (it == contexts.end())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Context not found\");\n\n it->second.present(*device, inSem, outSem);\n}\n\nvoid LSFG_3_1P::deleteContext(int32_t id) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n auto it = contexts.find(id);\n if (it == contexts.end())\n throw LSFG::vulkan_error(VK_ERROR_DEVICE_LOST, \"No such context\");\n\n vkDeviceWaitIdle(device->device.handle());\n contexts.erase(it);\n}\n\nvoid LSFG_3_1P::finalize() {\n if (!instance.has_value() || !device.has_value())\n return;\n\n vkDeviceWaitIdle(device->device.handle());\n contexts.clear();\n device.reset();\n instance.reset();\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_math.h", "#pragma once\n\n#include \n\nnamespace dxvk {\n \n constexpr size_t CACHE_LINE_SIZE = 64;\n constexpr double pi = 3.14159265359;\n\n template\n constexpr T clamp(T n, T lo, T hi) {\n if (n < lo) return lo;\n if (n > hi) return hi;\n return n;\n }\n \n template\n constexpr T align(T what, U to) {\n return (what + to - 1) & ~(to - 1);\n }\n\n template\n constexpr T alignDown(T what, U to) {\n return (what / to) * to;\n }\n\n // Equivalent of std::clamp for use with floating point numbers\n // Handles (-){INFINITY,NAN} cases.\n // Will return min in cases of NAN, etc.\n inline float fclamp(float value, float min, float max) {\n return std::fmin(\n std::fmax(value, min), max);\n }\n\n template\n inline T divCeil(T dividend, T divisor) {\n return (dividend + divisor - 1) / divisor;\n }\n \n}\n"], ["/lsfg-vk/framegen/src/core/instance.cpp", "#include \n#include \n\n#include \"core/instance.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nconst std::vector requiredExtensions = {\n\n};\n\nInstance::Instance() {\n volkInitialize();\n\n // create Vulkan instance\n const VkApplicationInfo appInfo{\n .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,\n .pApplicationName = \"lsfg-vk-base\",\n .applicationVersion = VK_MAKE_VERSION(0, 0, 1),\n .pEngineName = \"lsfg-vk-base\",\n .engineVersion = VK_MAKE_VERSION(0, 0, 1),\n .apiVersion = VK_API_VERSION_1_3\n };\n const VkInstanceCreateInfo createInfo{\n .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,\n .pApplicationInfo = &appInfo,\n .enabledExtensionCount = static_cast(requiredExtensions.size()),\n .ppEnabledExtensionNames = requiredExtensions.data()\n };\n VkInstance instanceHandle{};\n auto res = vkCreateInstance(&createInfo, nullptr, &instanceHandle);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to create Vulkan instance\");\n\n volkLoadInstance(instanceHandle);\n\n // store in shared ptr\n this->instance = std::shared_ptr(\n new VkInstance(instanceHandle),\n [](VkInstance* instance) {\n vkDestroyInstance(*instance, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_error.h", "#pragma once\n\n#include \n\nnamespace dxvk {\n \n /**\n * \\brief DXVK error\n * \n * A generic exception class that stores a\n * message. Exceptions should be logged.\n */\n class DxvkError {\n \n public:\n \n DxvkError() { }\n DxvkError(std::string&& message)\n : m_message(std::move(message)) { }\n \n const std::string& message() const {\n return m_message;\n }\n \n private:\n \n std::string m_message;\n \n };\n \n}"], ["/lsfg-vk/framegen/src/core/fence.cpp", "#include \n#include \n\n#include \"core/fence.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n\nusing namespace LSFG::Core;\n\nFence::Fence(const Core::Device& device) {\n // create fence\n const VkFenceCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO\n };\n VkFence fenceHandle{};\n auto res = vkCreateFence(device.handle(), &desc, nullptr, &fenceHandle);\n if (res != VK_SUCCESS || fenceHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create fence\");\n\n // store fence in shared ptr\n this->fence = std::shared_ptr(\n new VkFence(fenceHandle),\n [dev = device.handle()](VkFence* fenceHandle) {\n vkDestroyFence(dev, *fenceHandle, nullptr);\n }\n );\n}\n\nvoid Fence::reset(const Core::Device& device) const {\n VkFence fenceHandle = this->handle();\n auto res = vkResetFences(device.handle(), 1, &fenceHandle);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to reset fence\");\n}\n\nbool Fence::wait(const Core::Device& device, uint64_t timeout) const {\n VkFence fenceHandle = this->handle();\n auto res = vkWaitForFences(device.handle(), 1, &fenceHandle, VK_TRUE, timeout);\n if (res != VK_SUCCESS && res != VK_TIMEOUT)\n throw LSFG::vulkan_error(res, \"Unable to wait for fence\");\n\n return res == VK_SUCCESS;\n}\n"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/src/unicode_winapi.cpp", "/*\nThe MIT License (MIT)\n\nCopyright (c) 2020 Trail of Bits, Inc.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n*/\n\n#include \n#include \n\nnamespace peparse {\nstd::string from_utf16(const UCharString &u) {\n std::string result;\n std::size_t size = WideCharToMultiByte(CP_UTF8,\n 0,\n u.data(),\n static_cast(u.size()),\n nullptr,\n 0,\n nullptr,\n nullptr);\n\n if (size <= 0) {\n return result;\n }\n\n result.reserve(size);\n WideCharToMultiByte(CP_UTF8,\n 0,\n u.data(),\n static_cast(u.size()),\n &result[0],\n static_cast(result.capacity()),\n nullptr,\n nullptr);\n\n return result;\n}\n} // namespace peparse\n"], ["/lsfg-vk/thirdparty/volk/volk.h", "/**\n * volk\n *\n * Copyright (C) 2018-2025, by Arseny Kapoulkine (arseny.kapoulkine@gmail.com)\n * Report bugs and download new versions at https://github.com/zeux/volk\n *\n * This library is distributed under the MIT License. See notice at the end of this file.\n */\n/* clang-format off */\n#ifndef VOLK_H_\n#define VOLK_H_\n\n#if defined(VULKAN_H_) && !defined(VK_NO_PROTOTYPES)\n#\terror To use volk, you need to define VK_NO_PROTOTYPES before including vulkan.h\n#endif\n\n/* VOLK_GENERATE_VERSION_DEFINE */\n#define VOLK_HEADER_VERSION 323\n/* VOLK_GENERATE_VERSION_DEFINE */\n\n#ifndef VK_NO_PROTOTYPES\n#\tdefine VK_NO_PROTOTYPES\n#endif\n\n#ifndef VULKAN_H_\n#\tifdef VOLK_VULKAN_H_PATH\n#\t\tinclude VOLK_VULKAN_H_PATH\n#\telse /* Platform headers included below */\n#\t\tinclude \n#\t\tinclude \n#\tendif\n#endif\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\nstruct VolkDeviceTable;\n\n/**\n * Initialize library by loading Vulkan loader; call this function before creating the Vulkan instance.\n *\n * Returns VK_SUCCESS on success and VK_ERROR_INITIALIZATION_FAILED otherwise.\n */\nVkResult volkInitialize(void);\n\n/**\n * Initialize library by providing a custom handler to load global symbols.\n *\n * This function can be used instead of volkInitialize.\n * The handler function pointer will be asked to load global Vulkan symbols which require no instance\n * (such as vkCreateInstance, vkEnumerateInstance* and vkEnumerateInstanceVersion if available).\n */\nvoid volkInitializeCustom(PFN_vkGetInstanceProcAddr handler);\n\n/**\n * Finalize library by unloading Vulkan loader and resetting global symbols to NULL.\n *\n * This function does not need to be called on process exit (as loader will be unloaded automatically) or if volkInitialize failed.\n * In general this function is optional to call but may be useful in rare cases eg if volk needs to be reinitialized multiple times.\n */\nvoid volkFinalize(void);\n\n/**\n * Get Vulkan instance version supported by the Vulkan loader, or 0 if Vulkan isn't supported\n *\n * Returns 0 if volkInitialize wasn't called or failed.\n */\nuint32_t volkGetInstanceVersion(void);\n\n/**\n * Load global function pointers using application-created VkInstance; call this function after creating the Vulkan instance.\n */\nvoid volkLoadInstance(VkInstance instance);\n\n/**\n * Load global function pointers using application-created VkInstance; call this function after creating the Vulkan instance.\n * Skips loading device-based function pointers, requires usage of volkLoadDevice afterwards.\n */\nvoid volkLoadInstanceOnly(VkInstance instance);\n\n/**\n * Load global function pointers using application-created VkDevice; call this function after creating the Vulkan device.\n *\n * Note: this is not suitable for applications that want to use multiple VkDevice objects concurrently.\n */\nvoid volkLoadDevice(VkDevice device);\n\n/**\n * Return last VkInstance for which global function pointers have been loaded via volkLoadInstance(),\n * or VK_NULL_HANDLE if volkLoadInstance() has not been called.\n */\nVkInstance volkGetLoadedInstance(void);\n\n/**\n * Return last VkDevice for which global function pointers have been loaded via volkLoadDevice(),\n * or VK_NULL_HANDLE if volkLoadDevice() has not been called.\n */\nVkDevice volkGetLoadedDevice(void);\n\n/**\n * Load function pointers using application-created VkDevice into a table.\n * Application should use function pointers from that table instead of using global function pointers.\n */\nvoid volkLoadDeviceTable(struct VolkDeviceTable* table, VkDevice device);\n\n#ifdef __cplusplus\n}\n#endif\n\n/* Instead of directly including vulkan.h, we include platform-specific parts of the SDK manually\n * This is necessary to avoid including platform headers in some cases (which vulkan.h does unconditionally)\n * and replace them with forward declarations, which makes build times faster and avoids macro conflicts.\n *\n * Note that we only replace platform-specific headers when the headers are known to be problematic: very large\n * or slow to compile (Windows), or introducing unprefixed macros which can cause conflicts (Windows, Xlib).\n */\n#if !defined(VULKAN_H_) && !defined(VOLK_VULKAN_H_PATH)\n\n#ifdef VK_USE_PLATFORM_ANDROID_KHR\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_FUCHSIA\n#include \n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_IOS_MVK\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_MACOS_MVK\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_METAL_EXT\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_VI_NN\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_WAYLAND_KHR\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_WIN32_KHR\ntypedef unsigned long DWORD;\ntypedef const wchar_t* LPCWSTR;\ntypedef void* HANDLE;\ntypedef struct HINSTANCE__* HINSTANCE;\ntypedef struct HWND__* HWND;\ntypedef struct HMONITOR__* HMONITOR;\ntypedef struct _SECURITY_ATTRIBUTES SECURITY_ATTRIBUTES;\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_XCB_KHR\n#include \n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_XLIB_KHR\ntypedef struct _XDisplay Display;\ntypedef unsigned long Window;\ntypedef unsigned long VisualID;\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_DIRECTFB_EXT\n#include \n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT\ntypedef struct _XDisplay Display;\ntypedef unsigned long RROutput;\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_GGP\n#include \n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_SCREEN_QNX\n#include \n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_SCI\n#include \n#include \n#include \n#endif\n\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n#include \n#endif\n\n#endif\n\n/**\n * Device-specific function pointer table\n */\nstruct VolkDeviceTable\n{\n\t/* VOLK_GENERATE_DEVICE_TABLE */\n#if defined(VK_VERSION_1_0)\n\tPFN_vkAllocateCommandBuffers vkAllocateCommandBuffers;\n\tPFN_vkAllocateDescriptorSets vkAllocateDescriptorSets;\n\tPFN_vkAllocateMemory vkAllocateMemory;\n\tPFN_vkBeginCommandBuffer vkBeginCommandBuffer;\n\tPFN_vkBindBufferMemory vkBindBufferMemory;\n\tPFN_vkBindImageMemory vkBindImageMemory;\n\tPFN_vkCmdBeginQuery vkCmdBeginQuery;\n\tPFN_vkCmdBeginRenderPass vkCmdBeginRenderPass;\n\tPFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets;\n\tPFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer;\n\tPFN_vkCmdBindPipeline vkCmdBindPipeline;\n\tPFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers;\n\tPFN_vkCmdBlitImage vkCmdBlitImage;\n\tPFN_vkCmdClearAttachments vkCmdClearAttachments;\n\tPFN_vkCmdClearColorImage vkCmdClearColorImage;\n\tPFN_vkCmdClearDepthStencilImage vkCmdClearDepthStencilImage;\n\tPFN_vkCmdCopyBuffer vkCmdCopyBuffer;\n\tPFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage;\n\tPFN_vkCmdCopyImage vkCmdCopyImage;\n\tPFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer;\n\tPFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults;\n\tPFN_vkCmdDispatch vkCmdDispatch;\n\tPFN_vkCmdDispatchIndirect vkCmdDispatchIndirect;\n\tPFN_vkCmdDraw vkCmdDraw;\n\tPFN_vkCmdDrawIndexed vkCmdDrawIndexed;\n\tPFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect;\n\tPFN_vkCmdDrawIndirect vkCmdDrawIndirect;\n\tPFN_vkCmdEndQuery vkCmdEndQuery;\n\tPFN_vkCmdEndRenderPass vkCmdEndRenderPass;\n\tPFN_vkCmdExecuteCommands vkCmdExecuteCommands;\n\tPFN_vkCmdFillBuffer vkCmdFillBuffer;\n\tPFN_vkCmdNextSubpass vkCmdNextSubpass;\n\tPFN_vkCmdPipelineBarrier vkCmdPipelineBarrier;\n\tPFN_vkCmdPushConstants vkCmdPushConstants;\n\tPFN_vkCmdResetEvent vkCmdResetEvent;\n\tPFN_vkCmdResetQueryPool vkCmdResetQueryPool;\n\tPFN_vkCmdResolveImage vkCmdResolveImage;\n\tPFN_vkCmdSetBlendConstants vkCmdSetBlendConstants;\n\tPFN_vkCmdSetDepthBias vkCmdSetDepthBias;\n\tPFN_vkCmdSetDepthBounds vkCmdSetDepthBounds;\n\tPFN_vkCmdSetEvent vkCmdSetEvent;\n\tPFN_vkCmdSetLineWidth vkCmdSetLineWidth;\n\tPFN_vkCmdSetScissor vkCmdSetScissor;\n\tPFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask;\n\tPFN_vkCmdSetStencilReference vkCmdSetStencilReference;\n\tPFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask;\n\tPFN_vkCmdSetViewport vkCmdSetViewport;\n\tPFN_vkCmdUpdateBuffer vkCmdUpdateBuffer;\n\tPFN_vkCmdWaitEvents vkCmdWaitEvents;\n\tPFN_vkCmdWriteTimestamp vkCmdWriteTimestamp;\n\tPFN_vkCreateBuffer vkCreateBuffer;\n\tPFN_vkCreateBufferView vkCreateBufferView;\n\tPFN_vkCreateCommandPool vkCreateCommandPool;\n\tPFN_vkCreateComputePipelines vkCreateComputePipelines;\n\tPFN_vkCreateDescriptorPool vkCreateDescriptorPool;\n\tPFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout;\n\tPFN_vkCreateEvent vkCreateEvent;\n\tPFN_vkCreateFence vkCreateFence;\n\tPFN_vkCreateFramebuffer vkCreateFramebuffer;\n\tPFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines;\n\tPFN_vkCreateImage vkCreateImage;\n\tPFN_vkCreateImageView vkCreateImageView;\n\tPFN_vkCreatePipelineCache vkCreatePipelineCache;\n\tPFN_vkCreatePipelineLayout vkCreatePipelineLayout;\n\tPFN_vkCreateQueryPool vkCreateQueryPool;\n\tPFN_vkCreateRenderPass vkCreateRenderPass;\n\tPFN_vkCreateSampler vkCreateSampler;\n\tPFN_vkCreateSemaphore vkCreateSemaphore;\n\tPFN_vkCreateShaderModule vkCreateShaderModule;\n\tPFN_vkDestroyBuffer vkDestroyBuffer;\n\tPFN_vkDestroyBufferView vkDestroyBufferView;\n\tPFN_vkDestroyCommandPool vkDestroyCommandPool;\n\tPFN_vkDestroyDescriptorPool vkDestroyDescriptorPool;\n\tPFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout;\n\tPFN_vkDestroyDevice vkDestroyDevice;\n\tPFN_vkDestroyEvent vkDestroyEvent;\n\tPFN_vkDestroyFence vkDestroyFence;\n\tPFN_vkDestroyFramebuffer vkDestroyFramebuffer;\n\tPFN_vkDestroyImage vkDestroyImage;\n\tPFN_vkDestroyImageView vkDestroyImageView;\n\tPFN_vkDestroyPipeline vkDestroyPipeline;\n\tPFN_vkDestroyPipelineCache vkDestroyPipelineCache;\n\tPFN_vkDestroyPipelineLayout vkDestroyPipelineLayout;\n\tPFN_vkDestroyQueryPool vkDestroyQueryPool;\n\tPFN_vkDestroyRenderPass vkDestroyRenderPass;\n\tPFN_vkDestroySampler vkDestroySampler;\n\tPFN_vkDestroySemaphore vkDestroySemaphore;\n\tPFN_vkDestroyShaderModule vkDestroyShaderModule;\n\tPFN_vkDeviceWaitIdle vkDeviceWaitIdle;\n\tPFN_vkEndCommandBuffer vkEndCommandBuffer;\n\tPFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;\n\tPFN_vkFreeCommandBuffers vkFreeCommandBuffers;\n\tPFN_vkFreeDescriptorSets vkFreeDescriptorSets;\n\tPFN_vkFreeMemory vkFreeMemory;\n\tPFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;\n\tPFN_vkGetDeviceMemoryCommitment vkGetDeviceMemoryCommitment;\n\tPFN_vkGetDeviceQueue vkGetDeviceQueue;\n\tPFN_vkGetEventStatus vkGetEventStatus;\n\tPFN_vkGetFenceStatus vkGetFenceStatus;\n\tPFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;\n\tPFN_vkGetImageSparseMemoryRequirements vkGetImageSparseMemoryRequirements;\n\tPFN_vkGetImageSubresourceLayout vkGetImageSubresourceLayout;\n\tPFN_vkGetPipelineCacheData vkGetPipelineCacheData;\n\tPFN_vkGetQueryPoolResults vkGetQueryPoolResults;\n\tPFN_vkGetRenderAreaGranularity vkGetRenderAreaGranularity;\n\tPFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;\n\tPFN_vkMapMemory vkMapMemory;\n\tPFN_vkMergePipelineCaches vkMergePipelineCaches;\n\tPFN_vkQueueBindSparse vkQueueBindSparse;\n\tPFN_vkQueueSubmit vkQueueSubmit;\n\tPFN_vkQueueWaitIdle vkQueueWaitIdle;\n\tPFN_vkResetCommandBuffer vkResetCommandBuffer;\n\tPFN_vkResetCommandPool vkResetCommandPool;\n\tPFN_vkResetDescriptorPool vkResetDescriptorPool;\n\tPFN_vkResetEvent vkResetEvent;\n\tPFN_vkResetFences vkResetFences;\n\tPFN_vkSetEvent vkSetEvent;\n\tPFN_vkUnmapMemory vkUnmapMemory;\n\tPFN_vkUpdateDescriptorSets vkUpdateDescriptorSets;\n\tPFN_vkWaitForFences vkWaitForFences;\n#else\n\tPFN_vkVoidFunction padding_6ce80d51[120];\n#endif /* defined(VK_VERSION_1_0) */\n#if defined(VK_VERSION_1_1)\n\tPFN_vkBindBufferMemory2 vkBindBufferMemory2;\n\tPFN_vkBindImageMemory2 vkBindImageMemory2;\n\tPFN_vkCmdDispatchBase vkCmdDispatchBase;\n\tPFN_vkCmdSetDeviceMask vkCmdSetDeviceMask;\n\tPFN_vkCreateDescriptorUpdateTemplate vkCreateDescriptorUpdateTemplate;\n\tPFN_vkCreateSamplerYcbcrConversion vkCreateSamplerYcbcrConversion;\n\tPFN_vkDestroyDescriptorUpdateTemplate vkDestroyDescriptorUpdateTemplate;\n\tPFN_vkDestroySamplerYcbcrConversion vkDestroySamplerYcbcrConversion;\n\tPFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;\n\tPFN_vkGetDescriptorSetLayoutSupport vkGetDescriptorSetLayoutSupport;\n\tPFN_vkGetDeviceGroupPeerMemoryFeatures vkGetDeviceGroupPeerMemoryFeatures;\n\tPFN_vkGetDeviceQueue2 vkGetDeviceQueue2;\n\tPFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;\n\tPFN_vkGetImageSparseMemoryRequirements2 vkGetImageSparseMemoryRequirements2;\n\tPFN_vkTrimCommandPool vkTrimCommandPool;\n\tPFN_vkUpdateDescriptorSetWithTemplate vkUpdateDescriptorSetWithTemplate;\n#else\n\tPFN_vkVoidFunction padding_1ec56847[16];\n#endif /* defined(VK_VERSION_1_1) */\n#if defined(VK_VERSION_1_2)\n\tPFN_vkCmdBeginRenderPass2 vkCmdBeginRenderPass2;\n\tPFN_vkCmdDrawIndexedIndirectCount vkCmdDrawIndexedIndirectCount;\n\tPFN_vkCmdDrawIndirectCount vkCmdDrawIndirectCount;\n\tPFN_vkCmdEndRenderPass2 vkCmdEndRenderPass2;\n\tPFN_vkCmdNextSubpass2 vkCmdNextSubpass2;\n\tPFN_vkCreateRenderPass2 vkCreateRenderPass2;\n\tPFN_vkGetBufferDeviceAddress vkGetBufferDeviceAddress;\n\tPFN_vkGetBufferOpaqueCaptureAddress vkGetBufferOpaqueCaptureAddress;\n\tPFN_vkGetDeviceMemoryOpaqueCaptureAddress vkGetDeviceMemoryOpaqueCaptureAddress;\n\tPFN_vkGetSemaphoreCounterValue vkGetSemaphoreCounterValue;\n\tPFN_vkResetQueryPool vkResetQueryPool;\n\tPFN_vkSignalSemaphore vkSignalSemaphore;\n\tPFN_vkWaitSemaphores vkWaitSemaphores;\n#else\n\tPFN_vkVoidFunction padding_a3e00662[13];\n#endif /* defined(VK_VERSION_1_2) */\n#if defined(VK_VERSION_1_3)\n\tPFN_vkCmdBeginRendering vkCmdBeginRendering;\n\tPFN_vkCmdBindVertexBuffers2 vkCmdBindVertexBuffers2;\n\tPFN_vkCmdBlitImage2 vkCmdBlitImage2;\n\tPFN_vkCmdCopyBuffer2 vkCmdCopyBuffer2;\n\tPFN_vkCmdCopyBufferToImage2 vkCmdCopyBufferToImage2;\n\tPFN_vkCmdCopyImage2 vkCmdCopyImage2;\n\tPFN_vkCmdCopyImageToBuffer2 vkCmdCopyImageToBuffer2;\n\tPFN_vkCmdEndRendering vkCmdEndRendering;\n\tPFN_vkCmdPipelineBarrier2 vkCmdPipelineBarrier2;\n\tPFN_vkCmdResetEvent2 vkCmdResetEvent2;\n\tPFN_vkCmdResolveImage2 vkCmdResolveImage2;\n\tPFN_vkCmdSetCullMode vkCmdSetCullMode;\n\tPFN_vkCmdSetDepthBiasEnable vkCmdSetDepthBiasEnable;\n\tPFN_vkCmdSetDepthBoundsTestEnable vkCmdSetDepthBoundsTestEnable;\n\tPFN_vkCmdSetDepthCompareOp vkCmdSetDepthCompareOp;\n\tPFN_vkCmdSetDepthTestEnable vkCmdSetDepthTestEnable;\n\tPFN_vkCmdSetDepthWriteEnable vkCmdSetDepthWriteEnable;\n\tPFN_vkCmdSetEvent2 vkCmdSetEvent2;\n\tPFN_vkCmdSetFrontFace vkCmdSetFrontFace;\n\tPFN_vkCmdSetPrimitiveRestartEnable vkCmdSetPrimitiveRestartEnable;\n\tPFN_vkCmdSetPrimitiveTopology vkCmdSetPrimitiveTopology;\n\tPFN_vkCmdSetRasterizerDiscardEnable vkCmdSetRasterizerDiscardEnable;\n\tPFN_vkCmdSetScissorWithCount vkCmdSetScissorWithCount;\n\tPFN_vkCmdSetStencilOp vkCmdSetStencilOp;\n\tPFN_vkCmdSetStencilTestEnable vkCmdSetStencilTestEnable;\n\tPFN_vkCmdSetViewportWithCount vkCmdSetViewportWithCount;\n\tPFN_vkCmdWaitEvents2 vkCmdWaitEvents2;\n\tPFN_vkCmdWriteTimestamp2 vkCmdWriteTimestamp2;\n\tPFN_vkCreatePrivateDataSlot vkCreatePrivateDataSlot;\n\tPFN_vkDestroyPrivateDataSlot vkDestroyPrivateDataSlot;\n\tPFN_vkGetDeviceBufferMemoryRequirements vkGetDeviceBufferMemoryRequirements;\n\tPFN_vkGetDeviceImageMemoryRequirements vkGetDeviceImageMemoryRequirements;\n\tPFN_vkGetDeviceImageSparseMemoryRequirements vkGetDeviceImageSparseMemoryRequirements;\n\tPFN_vkGetPrivateData vkGetPrivateData;\n\tPFN_vkQueueSubmit2 vkQueueSubmit2;\n\tPFN_vkSetPrivateData vkSetPrivateData;\n#else\n\tPFN_vkVoidFunction padding_ee798a88[36];\n#endif /* defined(VK_VERSION_1_3) */\n#if defined(VK_VERSION_1_4)\n\tPFN_vkCmdBindDescriptorSets2 vkCmdBindDescriptorSets2;\n\tPFN_vkCmdBindIndexBuffer2 vkCmdBindIndexBuffer2;\n\tPFN_vkCmdPushConstants2 vkCmdPushConstants2;\n\tPFN_vkCmdPushDescriptorSet vkCmdPushDescriptorSet;\n\tPFN_vkCmdPushDescriptorSet2 vkCmdPushDescriptorSet2;\n\tPFN_vkCmdPushDescriptorSetWithTemplate vkCmdPushDescriptorSetWithTemplate;\n\tPFN_vkCmdPushDescriptorSetWithTemplate2 vkCmdPushDescriptorSetWithTemplate2;\n\tPFN_vkCmdSetLineStipple vkCmdSetLineStipple;\n\tPFN_vkCmdSetRenderingAttachmentLocations vkCmdSetRenderingAttachmentLocations;\n\tPFN_vkCmdSetRenderingInputAttachmentIndices vkCmdSetRenderingInputAttachmentIndices;\n\tPFN_vkCopyImageToImage vkCopyImageToImage;\n\tPFN_vkCopyImageToMemory vkCopyImageToMemory;\n\tPFN_vkCopyMemoryToImage vkCopyMemoryToImage;\n\tPFN_vkGetDeviceImageSubresourceLayout vkGetDeviceImageSubresourceLayout;\n\tPFN_vkGetImageSubresourceLayout2 vkGetImageSubresourceLayout2;\n\tPFN_vkGetRenderingAreaGranularity vkGetRenderingAreaGranularity;\n\tPFN_vkMapMemory2 vkMapMemory2;\n\tPFN_vkTransitionImageLayout vkTransitionImageLayout;\n\tPFN_vkUnmapMemory2 vkUnmapMemory2;\n#else\n\tPFN_vkVoidFunction padding_82585fa3[19];\n#endif /* defined(VK_VERSION_1_4) */\n#if defined(VK_AMDX_shader_enqueue)\n\tPFN_vkCmdDispatchGraphAMDX vkCmdDispatchGraphAMDX;\n\tPFN_vkCmdDispatchGraphIndirectAMDX vkCmdDispatchGraphIndirectAMDX;\n\tPFN_vkCmdDispatchGraphIndirectCountAMDX vkCmdDispatchGraphIndirectCountAMDX;\n\tPFN_vkCmdInitializeGraphScratchMemoryAMDX vkCmdInitializeGraphScratchMemoryAMDX;\n\tPFN_vkCreateExecutionGraphPipelinesAMDX vkCreateExecutionGraphPipelinesAMDX;\n\tPFN_vkGetExecutionGraphPipelineNodeIndexAMDX vkGetExecutionGraphPipelineNodeIndexAMDX;\n\tPFN_vkGetExecutionGraphPipelineScratchSizeAMDX vkGetExecutionGraphPipelineScratchSizeAMDX;\n#else\n\tPFN_vkVoidFunction padding_9d3e2bba[7];\n#endif /* defined(VK_AMDX_shader_enqueue) */\n#if defined(VK_AMD_anti_lag)\n\tPFN_vkAntiLagUpdateAMD vkAntiLagUpdateAMD;\n#else\n\tPFN_vkVoidFunction padding_cf792fb4[1];\n#endif /* defined(VK_AMD_anti_lag) */\n#if defined(VK_AMD_buffer_marker)\n\tPFN_vkCmdWriteBufferMarkerAMD vkCmdWriteBufferMarkerAMD;\n#else\n\tPFN_vkVoidFunction padding_7836e92f[1];\n#endif /* defined(VK_AMD_buffer_marker) */\n#if defined(VK_AMD_buffer_marker) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2))\n\tPFN_vkCmdWriteBufferMarker2AMD vkCmdWriteBufferMarker2AMD;\n#else\n\tPFN_vkVoidFunction padding_bbf9b7bb[1];\n#endif /* defined(VK_AMD_buffer_marker) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) */\n#if defined(VK_AMD_display_native_hdr)\n\tPFN_vkSetLocalDimmingAMD vkSetLocalDimmingAMD;\n#else\n\tPFN_vkVoidFunction padding_6b81b2fb[1];\n#endif /* defined(VK_AMD_display_native_hdr) */\n#if defined(VK_AMD_draw_indirect_count)\n\tPFN_vkCmdDrawIndexedIndirectCountAMD vkCmdDrawIndexedIndirectCountAMD;\n\tPFN_vkCmdDrawIndirectCountAMD vkCmdDrawIndirectCountAMD;\n#else\n\tPFN_vkVoidFunction padding_fbfa9964[2];\n#endif /* defined(VK_AMD_draw_indirect_count) */\n#if defined(VK_AMD_shader_info)\n\tPFN_vkGetShaderInfoAMD vkGetShaderInfoAMD;\n#else\n\tPFN_vkVoidFunction padding_bfb754b[1];\n#endif /* defined(VK_AMD_shader_info) */\n#if defined(VK_ANDROID_external_memory_android_hardware_buffer)\n\tPFN_vkGetAndroidHardwareBufferPropertiesANDROID vkGetAndroidHardwareBufferPropertiesANDROID;\n\tPFN_vkGetMemoryAndroidHardwareBufferANDROID vkGetMemoryAndroidHardwareBufferANDROID;\n#else\n\tPFN_vkVoidFunction padding_c67b1beb[2];\n#endif /* defined(VK_ANDROID_external_memory_android_hardware_buffer) */\n#if defined(VK_ARM_data_graph)\n\tPFN_vkBindDataGraphPipelineSessionMemoryARM vkBindDataGraphPipelineSessionMemoryARM;\n\tPFN_vkCmdDispatchDataGraphARM vkCmdDispatchDataGraphARM;\n\tPFN_vkCreateDataGraphPipelineSessionARM vkCreateDataGraphPipelineSessionARM;\n\tPFN_vkCreateDataGraphPipelinesARM vkCreateDataGraphPipelinesARM;\n\tPFN_vkDestroyDataGraphPipelineSessionARM vkDestroyDataGraphPipelineSessionARM;\n\tPFN_vkGetDataGraphPipelineAvailablePropertiesARM vkGetDataGraphPipelineAvailablePropertiesARM;\n\tPFN_vkGetDataGraphPipelinePropertiesARM vkGetDataGraphPipelinePropertiesARM;\n\tPFN_vkGetDataGraphPipelineSessionBindPointRequirementsARM vkGetDataGraphPipelineSessionBindPointRequirementsARM;\n\tPFN_vkGetDataGraphPipelineSessionMemoryRequirementsARM vkGetDataGraphPipelineSessionMemoryRequirementsARM;\n#else\n\tPFN_vkVoidFunction padding_894d85d8[9];\n#endif /* defined(VK_ARM_data_graph) */\n#if defined(VK_ARM_tensors)\n\tPFN_vkBindTensorMemoryARM vkBindTensorMemoryARM;\n\tPFN_vkCmdCopyTensorARM vkCmdCopyTensorARM;\n\tPFN_vkCreateTensorARM vkCreateTensorARM;\n\tPFN_vkCreateTensorViewARM vkCreateTensorViewARM;\n\tPFN_vkDestroyTensorARM vkDestroyTensorARM;\n\tPFN_vkDestroyTensorViewARM vkDestroyTensorViewARM;\n\tPFN_vkGetDeviceTensorMemoryRequirementsARM vkGetDeviceTensorMemoryRequirementsARM;\n\tPFN_vkGetTensorMemoryRequirementsARM vkGetTensorMemoryRequirementsARM;\n#else\n\tPFN_vkVoidFunction padding_df67a729[8];\n#endif /* defined(VK_ARM_tensors) */\n#if defined(VK_ARM_tensors) && defined(VK_EXT_descriptor_buffer)\n\tPFN_vkGetTensorOpaqueCaptureDescriptorDataARM vkGetTensorOpaqueCaptureDescriptorDataARM;\n\tPFN_vkGetTensorViewOpaqueCaptureDescriptorDataARM vkGetTensorViewOpaqueCaptureDescriptorDataARM;\n#else\n\tPFN_vkVoidFunction padding_9483bf7e[2];\n#endif /* defined(VK_ARM_tensors) && defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_EXT_attachment_feedback_loop_dynamic_state)\n\tPFN_vkCmdSetAttachmentFeedbackLoopEnableEXT vkCmdSetAttachmentFeedbackLoopEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_760a41f5[1];\n#endif /* defined(VK_EXT_attachment_feedback_loop_dynamic_state) */\n#if defined(VK_EXT_buffer_device_address)\n\tPFN_vkGetBufferDeviceAddressEXT vkGetBufferDeviceAddressEXT;\n#else\n\tPFN_vkVoidFunction padding_3b69d885[1];\n#endif /* defined(VK_EXT_buffer_device_address) */\n#if defined(VK_EXT_calibrated_timestamps)\n\tPFN_vkGetCalibratedTimestampsEXT vkGetCalibratedTimestampsEXT;\n#else\n\tPFN_vkVoidFunction padding_d0981c89[1];\n#endif /* defined(VK_EXT_calibrated_timestamps) */\n#if defined(VK_EXT_color_write_enable)\n\tPFN_vkCmdSetColorWriteEnableEXT vkCmdSetColorWriteEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_d301ecc3[1];\n#endif /* defined(VK_EXT_color_write_enable) */\n#if defined(VK_EXT_conditional_rendering)\n\tPFN_vkCmdBeginConditionalRenderingEXT vkCmdBeginConditionalRenderingEXT;\n\tPFN_vkCmdEndConditionalRenderingEXT vkCmdEndConditionalRenderingEXT;\n#else\n\tPFN_vkVoidFunction padding_ab532c18[2];\n#endif /* defined(VK_EXT_conditional_rendering) */\n#if defined(VK_EXT_debug_marker)\n\tPFN_vkCmdDebugMarkerBeginEXT vkCmdDebugMarkerBeginEXT;\n\tPFN_vkCmdDebugMarkerEndEXT vkCmdDebugMarkerEndEXT;\n\tPFN_vkCmdDebugMarkerInsertEXT vkCmdDebugMarkerInsertEXT;\n\tPFN_vkDebugMarkerSetObjectNameEXT vkDebugMarkerSetObjectNameEXT;\n\tPFN_vkDebugMarkerSetObjectTagEXT vkDebugMarkerSetObjectTagEXT;\n#else\n\tPFN_vkVoidFunction padding_89986968[5];\n#endif /* defined(VK_EXT_debug_marker) */\n#if defined(VK_EXT_depth_bias_control)\n\tPFN_vkCmdSetDepthBias2EXT vkCmdSetDepthBias2EXT;\n#else\n\tPFN_vkVoidFunction padding_bcddab4d[1];\n#endif /* defined(VK_EXT_depth_bias_control) */\n#if defined(VK_EXT_descriptor_buffer)\n\tPFN_vkCmdBindDescriptorBufferEmbeddedSamplersEXT vkCmdBindDescriptorBufferEmbeddedSamplersEXT;\n\tPFN_vkCmdBindDescriptorBuffersEXT vkCmdBindDescriptorBuffersEXT;\n\tPFN_vkCmdSetDescriptorBufferOffsetsEXT vkCmdSetDescriptorBufferOffsetsEXT;\n\tPFN_vkGetBufferOpaqueCaptureDescriptorDataEXT vkGetBufferOpaqueCaptureDescriptorDataEXT;\n\tPFN_vkGetDescriptorEXT vkGetDescriptorEXT;\n\tPFN_vkGetDescriptorSetLayoutBindingOffsetEXT vkGetDescriptorSetLayoutBindingOffsetEXT;\n\tPFN_vkGetDescriptorSetLayoutSizeEXT vkGetDescriptorSetLayoutSizeEXT;\n\tPFN_vkGetImageOpaqueCaptureDescriptorDataEXT vkGetImageOpaqueCaptureDescriptorDataEXT;\n\tPFN_vkGetImageViewOpaqueCaptureDescriptorDataEXT vkGetImageViewOpaqueCaptureDescriptorDataEXT;\n\tPFN_vkGetSamplerOpaqueCaptureDescriptorDataEXT vkGetSamplerOpaqueCaptureDescriptorDataEXT;\n#else\n\tPFN_vkVoidFunction padding_80aa973c[10];\n#endif /* defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing))\n\tPFN_vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT;\n#else\n\tPFN_vkVoidFunction padding_98d0fb33[1];\n#endif /* defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) */\n#if defined(VK_EXT_device_fault)\n\tPFN_vkGetDeviceFaultInfoEXT vkGetDeviceFaultInfoEXT;\n#else\n\tPFN_vkVoidFunction padding_55095419[1];\n#endif /* defined(VK_EXT_device_fault) */\n#if defined(VK_EXT_device_generated_commands)\n\tPFN_vkCmdExecuteGeneratedCommandsEXT vkCmdExecuteGeneratedCommandsEXT;\n\tPFN_vkCmdPreprocessGeneratedCommandsEXT vkCmdPreprocessGeneratedCommandsEXT;\n\tPFN_vkCreateIndirectCommandsLayoutEXT vkCreateIndirectCommandsLayoutEXT;\n\tPFN_vkCreateIndirectExecutionSetEXT vkCreateIndirectExecutionSetEXT;\n\tPFN_vkDestroyIndirectCommandsLayoutEXT vkDestroyIndirectCommandsLayoutEXT;\n\tPFN_vkDestroyIndirectExecutionSetEXT vkDestroyIndirectExecutionSetEXT;\n\tPFN_vkGetGeneratedCommandsMemoryRequirementsEXT vkGetGeneratedCommandsMemoryRequirementsEXT;\n\tPFN_vkUpdateIndirectExecutionSetPipelineEXT vkUpdateIndirectExecutionSetPipelineEXT;\n\tPFN_vkUpdateIndirectExecutionSetShaderEXT vkUpdateIndirectExecutionSetShaderEXT;\n#else\n\tPFN_vkVoidFunction padding_7ba7ebaa[9];\n#endif /* defined(VK_EXT_device_generated_commands) */\n#if defined(VK_EXT_discard_rectangles)\n\tPFN_vkCmdSetDiscardRectangleEXT vkCmdSetDiscardRectangleEXT;\n#else\n\tPFN_vkVoidFunction padding_d6355c2[1];\n#endif /* defined(VK_EXT_discard_rectangles) */\n#if defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2\n\tPFN_vkCmdSetDiscardRectangleEnableEXT vkCmdSetDiscardRectangleEnableEXT;\n\tPFN_vkCmdSetDiscardRectangleModeEXT vkCmdSetDiscardRectangleModeEXT;\n#else\n\tPFN_vkVoidFunction padding_7bb44f77[2];\n#endif /* defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 */\n#if defined(VK_EXT_display_control)\n\tPFN_vkDisplayPowerControlEXT vkDisplayPowerControlEXT;\n\tPFN_vkGetSwapchainCounterEXT vkGetSwapchainCounterEXT;\n\tPFN_vkRegisterDeviceEventEXT vkRegisterDeviceEventEXT;\n\tPFN_vkRegisterDisplayEventEXT vkRegisterDisplayEventEXT;\n#else\n\tPFN_vkVoidFunction padding_d30dfaaf[4];\n#endif /* defined(VK_EXT_display_control) */\n#if defined(VK_EXT_external_memory_host)\n\tPFN_vkGetMemoryHostPointerPropertiesEXT vkGetMemoryHostPointerPropertiesEXT;\n#else\n\tPFN_vkVoidFunction padding_357656e9[1];\n#endif /* defined(VK_EXT_external_memory_host) */\n#if defined(VK_EXT_external_memory_metal)\n\tPFN_vkGetMemoryMetalHandleEXT vkGetMemoryMetalHandleEXT;\n\tPFN_vkGetMemoryMetalHandlePropertiesEXT vkGetMemoryMetalHandlePropertiesEXT;\n#else\n\tPFN_vkVoidFunction padding_37d43fb[2];\n#endif /* defined(VK_EXT_external_memory_metal) */\n#if defined(VK_EXT_fragment_density_map_offset)\n\tPFN_vkCmdEndRendering2EXT vkCmdEndRendering2EXT;\n#else\n\tPFN_vkVoidFunction padding_9c90cf11[1];\n#endif /* defined(VK_EXT_fragment_density_map_offset) */\n#if defined(VK_EXT_full_screen_exclusive)\n\tPFN_vkAcquireFullScreenExclusiveModeEXT vkAcquireFullScreenExclusiveModeEXT;\n\tPFN_vkReleaseFullScreenExclusiveModeEXT vkReleaseFullScreenExclusiveModeEXT;\n#else\n\tPFN_vkVoidFunction padding_3859df46[2];\n#endif /* defined(VK_EXT_full_screen_exclusive) */\n#if defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1))\n\tPFN_vkGetDeviceGroupSurfacePresentModes2EXT vkGetDeviceGroupSurfacePresentModes2EXT;\n#else\n\tPFN_vkVoidFunction padding_e5b48b5b[1];\n#endif /* defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1)) */\n#if defined(VK_EXT_hdr_metadata)\n\tPFN_vkSetHdrMetadataEXT vkSetHdrMetadataEXT;\n#else\n\tPFN_vkVoidFunction padding_ca6d733c[1];\n#endif /* defined(VK_EXT_hdr_metadata) */\n#if defined(VK_EXT_host_image_copy)\n\tPFN_vkCopyImageToImageEXT vkCopyImageToImageEXT;\n\tPFN_vkCopyImageToMemoryEXT vkCopyImageToMemoryEXT;\n\tPFN_vkCopyMemoryToImageEXT vkCopyMemoryToImageEXT;\n\tPFN_vkTransitionImageLayoutEXT vkTransitionImageLayoutEXT;\n#else\n\tPFN_vkVoidFunction padding_dd6d9b61[4];\n#endif /* defined(VK_EXT_host_image_copy) */\n#if defined(VK_EXT_host_query_reset)\n\tPFN_vkResetQueryPoolEXT vkResetQueryPoolEXT;\n#else\n\tPFN_vkVoidFunction padding_34e58bd3[1];\n#endif /* defined(VK_EXT_host_query_reset) */\n#if defined(VK_EXT_image_drm_format_modifier)\n\tPFN_vkGetImageDrmFormatModifierPropertiesEXT vkGetImageDrmFormatModifierPropertiesEXT;\n#else\n\tPFN_vkVoidFunction padding_eb50dc14[1];\n#endif /* defined(VK_EXT_image_drm_format_modifier) */\n#if defined(VK_EXT_line_rasterization)\n\tPFN_vkCmdSetLineStippleEXT vkCmdSetLineStippleEXT;\n#else\n\tPFN_vkVoidFunction padding_8a212c37[1];\n#endif /* defined(VK_EXT_line_rasterization) */\n#if defined(VK_EXT_mesh_shader)\n\tPFN_vkCmdDrawMeshTasksEXT vkCmdDrawMeshTasksEXT;\n\tPFN_vkCmdDrawMeshTasksIndirectEXT vkCmdDrawMeshTasksIndirectEXT;\n#else\n\tPFN_vkVoidFunction padding_f65e838[2];\n#endif /* defined(VK_EXT_mesh_shader) */\n#if defined(VK_EXT_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2))\n\tPFN_vkCmdDrawMeshTasksIndirectCountEXT vkCmdDrawMeshTasksIndirectCountEXT;\n#else\n\tPFN_vkVoidFunction padding_dcbaac2f[1];\n#endif /* defined(VK_EXT_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2)) */\n#if defined(VK_EXT_metal_objects)\n\tPFN_vkExportMetalObjectsEXT vkExportMetalObjectsEXT;\n#else\n\tPFN_vkVoidFunction padding_df21f735[1];\n#endif /* defined(VK_EXT_metal_objects) */\n#if defined(VK_EXT_multi_draw)\n\tPFN_vkCmdDrawMultiEXT vkCmdDrawMultiEXT;\n\tPFN_vkCmdDrawMultiIndexedEXT vkCmdDrawMultiIndexedEXT;\n#else\n\tPFN_vkVoidFunction padding_ce8b93b6[2];\n#endif /* defined(VK_EXT_multi_draw) */\n#if defined(VK_EXT_opacity_micromap)\n\tPFN_vkBuildMicromapsEXT vkBuildMicromapsEXT;\n\tPFN_vkCmdBuildMicromapsEXT vkCmdBuildMicromapsEXT;\n\tPFN_vkCmdCopyMemoryToMicromapEXT vkCmdCopyMemoryToMicromapEXT;\n\tPFN_vkCmdCopyMicromapEXT vkCmdCopyMicromapEXT;\n\tPFN_vkCmdCopyMicromapToMemoryEXT vkCmdCopyMicromapToMemoryEXT;\n\tPFN_vkCmdWriteMicromapsPropertiesEXT vkCmdWriteMicromapsPropertiesEXT;\n\tPFN_vkCopyMemoryToMicromapEXT vkCopyMemoryToMicromapEXT;\n\tPFN_vkCopyMicromapEXT vkCopyMicromapEXT;\n\tPFN_vkCopyMicromapToMemoryEXT vkCopyMicromapToMemoryEXT;\n\tPFN_vkCreateMicromapEXT vkCreateMicromapEXT;\n\tPFN_vkDestroyMicromapEXT vkDestroyMicromapEXT;\n\tPFN_vkGetDeviceMicromapCompatibilityEXT vkGetDeviceMicromapCompatibilityEXT;\n\tPFN_vkGetMicromapBuildSizesEXT vkGetMicromapBuildSizesEXT;\n\tPFN_vkWriteMicromapsPropertiesEXT vkWriteMicromapsPropertiesEXT;\n#else\n\tPFN_vkVoidFunction padding_fa41e53c[14];\n#endif /* defined(VK_EXT_opacity_micromap) */\n#if defined(VK_EXT_pageable_device_local_memory)\n\tPFN_vkSetDeviceMemoryPriorityEXT vkSetDeviceMemoryPriorityEXT;\n#else\n\tPFN_vkVoidFunction padding_b2d2c2d7[1];\n#endif /* defined(VK_EXT_pageable_device_local_memory) */\n#if defined(VK_EXT_pipeline_properties)\n\tPFN_vkGetPipelinePropertiesEXT vkGetPipelinePropertiesEXT;\n#else\n\tPFN_vkVoidFunction padding_11313020[1];\n#endif /* defined(VK_EXT_pipeline_properties) */\n#if defined(VK_EXT_private_data)\n\tPFN_vkCreatePrivateDataSlotEXT vkCreatePrivateDataSlotEXT;\n\tPFN_vkDestroyPrivateDataSlotEXT vkDestroyPrivateDataSlotEXT;\n\tPFN_vkGetPrivateDataEXT vkGetPrivateDataEXT;\n\tPFN_vkSetPrivateDataEXT vkSetPrivateDataEXT;\n#else\n\tPFN_vkVoidFunction padding_108010f[4];\n#endif /* defined(VK_EXT_private_data) */\n#if defined(VK_EXT_sample_locations)\n\tPFN_vkCmdSetSampleLocationsEXT vkCmdSetSampleLocationsEXT;\n#else\n\tPFN_vkVoidFunction padding_26f9079f[1];\n#endif /* defined(VK_EXT_sample_locations) */\n#if defined(VK_EXT_shader_module_identifier)\n\tPFN_vkGetShaderModuleCreateInfoIdentifierEXT vkGetShaderModuleCreateInfoIdentifierEXT;\n\tPFN_vkGetShaderModuleIdentifierEXT vkGetShaderModuleIdentifierEXT;\n#else\n\tPFN_vkVoidFunction padding_e10c8f86[2];\n#endif /* defined(VK_EXT_shader_module_identifier) */\n#if defined(VK_EXT_shader_object)\n\tPFN_vkCmdBindShadersEXT vkCmdBindShadersEXT;\n\tPFN_vkCreateShadersEXT vkCreateShadersEXT;\n\tPFN_vkDestroyShaderEXT vkDestroyShaderEXT;\n\tPFN_vkGetShaderBinaryDataEXT vkGetShaderBinaryDataEXT;\n#else\n\tPFN_vkVoidFunction padding_374f3e18[4];\n#endif /* defined(VK_EXT_shader_object) */\n#if defined(VK_EXT_swapchain_maintenance1)\n\tPFN_vkReleaseSwapchainImagesEXT vkReleaseSwapchainImagesEXT;\n#else\n\tPFN_vkVoidFunction padding_ea55bf74[1];\n#endif /* defined(VK_EXT_swapchain_maintenance1) */\n#if defined(VK_EXT_transform_feedback)\n\tPFN_vkCmdBeginQueryIndexedEXT vkCmdBeginQueryIndexedEXT;\n\tPFN_vkCmdBeginTransformFeedbackEXT vkCmdBeginTransformFeedbackEXT;\n\tPFN_vkCmdBindTransformFeedbackBuffersEXT vkCmdBindTransformFeedbackBuffersEXT;\n\tPFN_vkCmdDrawIndirectByteCountEXT vkCmdDrawIndirectByteCountEXT;\n\tPFN_vkCmdEndQueryIndexedEXT vkCmdEndQueryIndexedEXT;\n\tPFN_vkCmdEndTransformFeedbackEXT vkCmdEndTransformFeedbackEXT;\n#else\n\tPFN_vkVoidFunction padding_36980658[6];\n#endif /* defined(VK_EXT_transform_feedback) */\n#if defined(VK_EXT_validation_cache)\n\tPFN_vkCreateValidationCacheEXT vkCreateValidationCacheEXT;\n\tPFN_vkDestroyValidationCacheEXT vkDestroyValidationCacheEXT;\n\tPFN_vkGetValidationCacheDataEXT vkGetValidationCacheDataEXT;\n\tPFN_vkMergeValidationCachesEXT vkMergeValidationCachesEXT;\n#else\n\tPFN_vkVoidFunction padding_b4f2df29[4];\n#endif /* defined(VK_EXT_validation_cache) */\n#if defined(VK_FUCHSIA_buffer_collection)\n\tPFN_vkCreateBufferCollectionFUCHSIA vkCreateBufferCollectionFUCHSIA;\n\tPFN_vkDestroyBufferCollectionFUCHSIA vkDestroyBufferCollectionFUCHSIA;\n\tPFN_vkGetBufferCollectionPropertiesFUCHSIA vkGetBufferCollectionPropertiesFUCHSIA;\n\tPFN_vkSetBufferCollectionBufferConstraintsFUCHSIA vkSetBufferCollectionBufferConstraintsFUCHSIA;\n\tPFN_vkSetBufferCollectionImageConstraintsFUCHSIA vkSetBufferCollectionImageConstraintsFUCHSIA;\n#else\n\tPFN_vkVoidFunction padding_8eaa27bc[5];\n#endif /* defined(VK_FUCHSIA_buffer_collection) */\n#if defined(VK_FUCHSIA_external_memory)\n\tPFN_vkGetMemoryZirconHandleFUCHSIA vkGetMemoryZirconHandleFUCHSIA;\n\tPFN_vkGetMemoryZirconHandlePropertiesFUCHSIA vkGetMemoryZirconHandlePropertiesFUCHSIA;\n#else\n\tPFN_vkVoidFunction padding_e3cb8a67[2];\n#endif /* defined(VK_FUCHSIA_external_memory) */\n#if defined(VK_FUCHSIA_external_semaphore)\n\tPFN_vkGetSemaphoreZirconHandleFUCHSIA vkGetSemaphoreZirconHandleFUCHSIA;\n\tPFN_vkImportSemaphoreZirconHandleFUCHSIA vkImportSemaphoreZirconHandleFUCHSIA;\n#else\n\tPFN_vkVoidFunction padding_3df6f656[2];\n#endif /* defined(VK_FUCHSIA_external_semaphore) */\n#if defined(VK_GOOGLE_display_timing)\n\tPFN_vkGetPastPresentationTimingGOOGLE vkGetPastPresentationTimingGOOGLE;\n\tPFN_vkGetRefreshCycleDurationGOOGLE vkGetRefreshCycleDurationGOOGLE;\n#else\n\tPFN_vkVoidFunction padding_2a6f50cd[2];\n#endif /* defined(VK_GOOGLE_display_timing) */\n#if defined(VK_HUAWEI_cluster_culling_shader)\n\tPFN_vkCmdDrawClusterHUAWEI vkCmdDrawClusterHUAWEI;\n\tPFN_vkCmdDrawClusterIndirectHUAWEI vkCmdDrawClusterIndirectHUAWEI;\n#else\n\tPFN_vkVoidFunction padding_75b97be6[2];\n#endif /* defined(VK_HUAWEI_cluster_culling_shader) */\n#if defined(VK_HUAWEI_invocation_mask)\n\tPFN_vkCmdBindInvocationMaskHUAWEI vkCmdBindInvocationMaskHUAWEI;\n#else\n\tPFN_vkVoidFunction padding_c3a4569f[1];\n#endif /* defined(VK_HUAWEI_invocation_mask) */\n#if defined(VK_HUAWEI_subpass_shading) && VK_HUAWEI_SUBPASS_SHADING_SPEC_VERSION >= 2\n\tPFN_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI;\n#else\n\tPFN_vkVoidFunction padding_2e923f32[1];\n#endif /* defined(VK_HUAWEI_subpass_shading) && VK_HUAWEI_SUBPASS_SHADING_SPEC_VERSION >= 2 */\n#if defined(VK_HUAWEI_subpass_shading)\n\tPFN_vkCmdSubpassShadingHUAWEI vkCmdSubpassShadingHUAWEI;\n#else\n\tPFN_vkVoidFunction padding_f766fdf5[1];\n#endif /* defined(VK_HUAWEI_subpass_shading) */\n#if defined(VK_INTEL_performance_query)\n\tPFN_vkAcquirePerformanceConfigurationINTEL vkAcquirePerformanceConfigurationINTEL;\n\tPFN_vkCmdSetPerformanceMarkerINTEL vkCmdSetPerformanceMarkerINTEL;\n\tPFN_vkCmdSetPerformanceOverrideINTEL vkCmdSetPerformanceOverrideINTEL;\n\tPFN_vkCmdSetPerformanceStreamMarkerINTEL vkCmdSetPerformanceStreamMarkerINTEL;\n\tPFN_vkGetPerformanceParameterINTEL vkGetPerformanceParameterINTEL;\n\tPFN_vkInitializePerformanceApiINTEL vkInitializePerformanceApiINTEL;\n\tPFN_vkQueueSetPerformanceConfigurationINTEL vkQueueSetPerformanceConfigurationINTEL;\n\tPFN_vkReleasePerformanceConfigurationINTEL vkReleasePerformanceConfigurationINTEL;\n\tPFN_vkUninitializePerformanceApiINTEL vkUninitializePerformanceApiINTEL;\n#else\n\tPFN_vkVoidFunction padding_495a0a0b[9];\n#endif /* defined(VK_INTEL_performance_query) */\n#if defined(VK_KHR_acceleration_structure)\n\tPFN_vkBuildAccelerationStructuresKHR vkBuildAccelerationStructuresKHR;\n\tPFN_vkCmdBuildAccelerationStructuresIndirectKHR vkCmdBuildAccelerationStructuresIndirectKHR;\n\tPFN_vkCmdBuildAccelerationStructuresKHR vkCmdBuildAccelerationStructuresKHR;\n\tPFN_vkCmdCopyAccelerationStructureKHR vkCmdCopyAccelerationStructureKHR;\n\tPFN_vkCmdCopyAccelerationStructureToMemoryKHR vkCmdCopyAccelerationStructureToMemoryKHR;\n\tPFN_vkCmdCopyMemoryToAccelerationStructureKHR vkCmdCopyMemoryToAccelerationStructureKHR;\n\tPFN_vkCmdWriteAccelerationStructuresPropertiesKHR vkCmdWriteAccelerationStructuresPropertiesKHR;\n\tPFN_vkCopyAccelerationStructureKHR vkCopyAccelerationStructureKHR;\n\tPFN_vkCopyAccelerationStructureToMemoryKHR vkCopyAccelerationStructureToMemoryKHR;\n\tPFN_vkCopyMemoryToAccelerationStructureKHR vkCopyMemoryToAccelerationStructureKHR;\n\tPFN_vkCreateAccelerationStructureKHR vkCreateAccelerationStructureKHR;\n\tPFN_vkDestroyAccelerationStructureKHR vkDestroyAccelerationStructureKHR;\n\tPFN_vkGetAccelerationStructureBuildSizesKHR vkGetAccelerationStructureBuildSizesKHR;\n\tPFN_vkGetAccelerationStructureDeviceAddressKHR vkGetAccelerationStructureDeviceAddressKHR;\n\tPFN_vkGetDeviceAccelerationStructureCompatibilityKHR vkGetDeviceAccelerationStructureCompatibilityKHR;\n\tPFN_vkWriteAccelerationStructuresPropertiesKHR vkWriteAccelerationStructuresPropertiesKHR;\n#else\n\tPFN_vkVoidFunction padding_5a999b78[16];\n#endif /* defined(VK_KHR_acceleration_structure) */\n#if defined(VK_KHR_bind_memory2)\n\tPFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR;\n\tPFN_vkBindImageMemory2KHR vkBindImageMemory2KHR;\n#else\n\tPFN_vkVoidFunction padding_ed8481f5[2];\n#endif /* defined(VK_KHR_bind_memory2) */\n#if defined(VK_KHR_buffer_device_address)\n\tPFN_vkGetBufferDeviceAddressKHR vkGetBufferDeviceAddressKHR;\n\tPFN_vkGetBufferOpaqueCaptureAddressKHR vkGetBufferOpaqueCaptureAddressKHR;\n\tPFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR vkGetDeviceMemoryOpaqueCaptureAddressKHR;\n#else\n\tPFN_vkVoidFunction padding_178fdf81[3];\n#endif /* defined(VK_KHR_buffer_device_address) */\n#if defined(VK_KHR_calibrated_timestamps)\n\tPFN_vkGetCalibratedTimestampsKHR vkGetCalibratedTimestampsKHR;\n#else\n\tPFN_vkVoidFunction padding_8fd6f40d[1];\n#endif /* defined(VK_KHR_calibrated_timestamps) */\n#if defined(VK_KHR_copy_commands2)\n\tPFN_vkCmdBlitImage2KHR vkCmdBlitImage2KHR;\n\tPFN_vkCmdCopyBuffer2KHR vkCmdCopyBuffer2KHR;\n\tPFN_vkCmdCopyBufferToImage2KHR vkCmdCopyBufferToImage2KHR;\n\tPFN_vkCmdCopyImage2KHR vkCmdCopyImage2KHR;\n\tPFN_vkCmdCopyImageToBuffer2KHR vkCmdCopyImageToBuffer2KHR;\n\tPFN_vkCmdResolveImage2KHR vkCmdResolveImage2KHR;\n#else\n\tPFN_vkVoidFunction padding_4c841ff2[6];\n#endif /* defined(VK_KHR_copy_commands2) */\n#if defined(VK_KHR_create_renderpass2)\n\tPFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR;\n\tPFN_vkCmdEndRenderPass2KHR vkCmdEndRenderPass2KHR;\n\tPFN_vkCmdNextSubpass2KHR vkCmdNextSubpass2KHR;\n\tPFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR;\n#else\n\tPFN_vkVoidFunction padding_2a0a8727[4];\n#endif /* defined(VK_KHR_create_renderpass2) */\n#if defined(VK_KHR_deferred_host_operations)\n\tPFN_vkCreateDeferredOperationKHR vkCreateDeferredOperationKHR;\n\tPFN_vkDeferredOperationJoinKHR vkDeferredOperationJoinKHR;\n\tPFN_vkDestroyDeferredOperationKHR vkDestroyDeferredOperationKHR;\n\tPFN_vkGetDeferredOperationMaxConcurrencyKHR vkGetDeferredOperationMaxConcurrencyKHR;\n\tPFN_vkGetDeferredOperationResultKHR vkGetDeferredOperationResultKHR;\n#else\n\tPFN_vkVoidFunction padding_346287bb[5];\n#endif /* defined(VK_KHR_deferred_host_operations) */\n#if defined(VK_KHR_descriptor_update_template)\n\tPFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR;\n\tPFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR;\n\tPFN_vkUpdateDescriptorSetWithTemplateKHR vkUpdateDescriptorSetWithTemplateKHR;\n#else\n\tPFN_vkVoidFunction padding_3d63aec0[3];\n#endif /* defined(VK_KHR_descriptor_update_template) */\n#if defined(VK_KHR_device_group)\n\tPFN_vkCmdDispatchBaseKHR vkCmdDispatchBaseKHR;\n\tPFN_vkCmdSetDeviceMaskKHR vkCmdSetDeviceMaskKHR;\n\tPFN_vkGetDeviceGroupPeerMemoryFeaturesKHR vkGetDeviceGroupPeerMemoryFeaturesKHR;\n#else\n\tPFN_vkVoidFunction padding_5ebe16bd[3];\n#endif /* defined(VK_KHR_device_group) */\n#if defined(VK_KHR_display_swapchain)\n\tPFN_vkCreateSharedSwapchainsKHR vkCreateSharedSwapchainsKHR;\n#else\n\tPFN_vkVoidFunction padding_12099367[1];\n#endif /* defined(VK_KHR_display_swapchain) */\n#if defined(VK_KHR_draw_indirect_count)\n\tPFN_vkCmdDrawIndexedIndirectCountKHR vkCmdDrawIndexedIndirectCountKHR;\n\tPFN_vkCmdDrawIndirectCountKHR vkCmdDrawIndirectCountKHR;\n#else\n\tPFN_vkVoidFunction padding_7b5bc4c1[2];\n#endif /* defined(VK_KHR_draw_indirect_count) */\n#if defined(VK_KHR_dynamic_rendering)\n\tPFN_vkCmdBeginRenderingKHR vkCmdBeginRenderingKHR;\n\tPFN_vkCmdEndRenderingKHR vkCmdEndRenderingKHR;\n#else\n\tPFN_vkVoidFunction padding_b80f75a5[2];\n#endif /* defined(VK_KHR_dynamic_rendering) */\n#if defined(VK_KHR_dynamic_rendering_local_read)\n\tPFN_vkCmdSetRenderingAttachmentLocationsKHR vkCmdSetRenderingAttachmentLocationsKHR;\n\tPFN_vkCmdSetRenderingInputAttachmentIndicesKHR vkCmdSetRenderingInputAttachmentIndicesKHR;\n#else\n\tPFN_vkVoidFunction padding_b1510532[2];\n#endif /* defined(VK_KHR_dynamic_rendering_local_read) */\n#if defined(VK_KHR_external_fence_fd)\n\tPFN_vkGetFenceFdKHR vkGetFenceFdKHR;\n\tPFN_vkImportFenceFdKHR vkImportFenceFdKHR;\n#else\n\tPFN_vkVoidFunction padding_a2c787d5[2];\n#endif /* defined(VK_KHR_external_fence_fd) */\n#if defined(VK_KHR_external_fence_win32)\n\tPFN_vkGetFenceWin32HandleKHR vkGetFenceWin32HandleKHR;\n\tPFN_vkImportFenceWin32HandleKHR vkImportFenceWin32HandleKHR;\n#else\n\tPFN_vkVoidFunction padding_55d8e6a9[2];\n#endif /* defined(VK_KHR_external_fence_win32) */\n#if defined(VK_KHR_external_memory_fd)\n\tPFN_vkGetMemoryFdKHR vkGetMemoryFdKHR;\n\tPFN_vkGetMemoryFdPropertiesKHR vkGetMemoryFdPropertiesKHR;\n#else\n\tPFN_vkVoidFunction padding_982d9e19[2];\n#endif /* defined(VK_KHR_external_memory_fd) */\n#if defined(VK_KHR_external_memory_win32)\n\tPFN_vkGetMemoryWin32HandleKHR vkGetMemoryWin32HandleKHR;\n\tPFN_vkGetMemoryWin32HandlePropertiesKHR vkGetMemoryWin32HandlePropertiesKHR;\n#else\n\tPFN_vkVoidFunction padding_4af9e25a[2];\n#endif /* defined(VK_KHR_external_memory_win32) */\n#if defined(VK_KHR_external_semaphore_fd)\n\tPFN_vkGetSemaphoreFdKHR vkGetSemaphoreFdKHR;\n\tPFN_vkImportSemaphoreFdKHR vkImportSemaphoreFdKHR;\n#else\n\tPFN_vkVoidFunction padding_2237b7cf[2];\n#endif /* defined(VK_KHR_external_semaphore_fd) */\n#if defined(VK_KHR_external_semaphore_win32)\n\tPFN_vkGetSemaphoreWin32HandleKHR vkGetSemaphoreWin32HandleKHR;\n\tPFN_vkImportSemaphoreWin32HandleKHR vkImportSemaphoreWin32HandleKHR;\n#else\n\tPFN_vkVoidFunction padding_c18dea52[2];\n#endif /* defined(VK_KHR_external_semaphore_win32) */\n#if defined(VK_KHR_fragment_shading_rate)\n\tPFN_vkCmdSetFragmentShadingRateKHR vkCmdSetFragmentShadingRateKHR;\n#else\n\tPFN_vkVoidFunction padding_f91b0a90[1];\n#endif /* defined(VK_KHR_fragment_shading_rate) */\n#if defined(VK_KHR_get_memory_requirements2)\n\tPFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;\n\tPFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;\n\tPFN_vkGetImageSparseMemoryRequirements2KHR vkGetImageSparseMemoryRequirements2KHR;\n#else\n\tPFN_vkVoidFunction padding_79d9c5c4[3];\n#endif /* defined(VK_KHR_get_memory_requirements2) */\n#if defined(VK_KHR_line_rasterization)\n\tPFN_vkCmdSetLineStippleKHR vkCmdSetLineStippleKHR;\n#else\n\tPFN_vkVoidFunction padding_83c2939[1];\n#endif /* defined(VK_KHR_line_rasterization) */\n#if defined(VK_KHR_maintenance1)\n\tPFN_vkTrimCommandPoolKHR vkTrimCommandPoolKHR;\n#else\n\tPFN_vkVoidFunction padding_4b372c56[1];\n#endif /* defined(VK_KHR_maintenance1) */\n#if defined(VK_KHR_maintenance3)\n\tPFN_vkGetDescriptorSetLayoutSupportKHR vkGetDescriptorSetLayoutSupportKHR;\n#else\n\tPFN_vkVoidFunction padding_5ea7858d[1];\n#endif /* defined(VK_KHR_maintenance3) */\n#if defined(VK_KHR_maintenance4)\n\tPFN_vkGetDeviceBufferMemoryRequirementsKHR vkGetDeviceBufferMemoryRequirementsKHR;\n\tPFN_vkGetDeviceImageMemoryRequirementsKHR vkGetDeviceImageMemoryRequirementsKHR;\n\tPFN_vkGetDeviceImageSparseMemoryRequirementsKHR vkGetDeviceImageSparseMemoryRequirementsKHR;\n#else\n\tPFN_vkVoidFunction padding_8e2d4198[3];\n#endif /* defined(VK_KHR_maintenance4) */\n#if defined(VK_KHR_maintenance5)\n\tPFN_vkCmdBindIndexBuffer2KHR vkCmdBindIndexBuffer2KHR;\n\tPFN_vkGetDeviceImageSubresourceLayoutKHR vkGetDeviceImageSubresourceLayoutKHR;\n\tPFN_vkGetImageSubresourceLayout2KHR vkGetImageSubresourceLayout2KHR;\n\tPFN_vkGetRenderingAreaGranularityKHR vkGetRenderingAreaGranularityKHR;\n#else\n\tPFN_vkVoidFunction padding_37040339[4];\n#endif /* defined(VK_KHR_maintenance5) */\n#if defined(VK_KHR_maintenance6)\n\tPFN_vkCmdBindDescriptorSets2KHR vkCmdBindDescriptorSets2KHR;\n\tPFN_vkCmdPushConstants2KHR vkCmdPushConstants2KHR;\n#else\n\tPFN_vkVoidFunction padding_442955d8[2];\n#endif /* defined(VK_KHR_maintenance6) */\n#if defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor)\n\tPFN_vkCmdPushDescriptorSet2KHR vkCmdPushDescriptorSet2KHR;\n\tPFN_vkCmdPushDescriptorSetWithTemplate2KHR vkCmdPushDescriptorSetWithTemplate2KHR;\n#else\n\tPFN_vkVoidFunction padding_80e8513f[2];\n#endif /* defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor) */\n#if defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer)\n\tPFN_vkCmdBindDescriptorBufferEmbeddedSamplers2EXT vkCmdBindDescriptorBufferEmbeddedSamplers2EXT;\n\tPFN_vkCmdSetDescriptorBufferOffsets2EXT vkCmdSetDescriptorBufferOffsets2EXT;\n#else\n\tPFN_vkVoidFunction padding_2816b9cd[2];\n#endif /* defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_KHR_map_memory2)\n\tPFN_vkMapMemory2KHR vkMapMemory2KHR;\n\tPFN_vkUnmapMemory2KHR vkUnmapMemory2KHR;\n#else\n\tPFN_vkVoidFunction padding_5a6d8986[2];\n#endif /* defined(VK_KHR_map_memory2) */\n#if defined(VK_KHR_performance_query)\n\tPFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR;\n\tPFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR;\n#else\n\tPFN_vkVoidFunction padding_76f2673b[2];\n#endif /* defined(VK_KHR_performance_query) */\n#if defined(VK_KHR_pipeline_binary)\n\tPFN_vkCreatePipelineBinariesKHR vkCreatePipelineBinariesKHR;\n\tPFN_vkDestroyPipelineBinaryKHR vkDestroyPipelineBinaryKHR;\n\tPFN_vkGetPipelineBinaryDataKHR vkGetPipelineBinaryDataKHR;\n\tPFN_vkGetPipelineKeyKHR vkGetPipelineKeyKHR;\n\tPFN_vkReleaseCapturedPipelineDataKHR vkReleaseCapturedPipelineDataKHR;\n#else\n\tPFN_vkVoidFunction padding_65232810[5];\n#endif /* defined(VK_KHR_pipeline_binary) */\n#if defined(VK_KHR_pipeline_executable_properties)\n\tPFN_vkGetPipelineExecutableInternalRepresentationsKHR vkGetPipelineExecutableInternalRepresentationsKHR;\n\tPFN_vkGetPipelineExecutablePropertiesKHR vkGetPipelineExecutablePropertiesKHR;\n\tPFN_vkGetPipelineExecutableStatisticsKHR vkGetPipelineExecutableStatisticsKHR;\n#else\n\tPFN_vkVoidFunction padding_f7629b1e[3];\n#endif /* defined(VK_KHR_pipeline_executable_properties) */\n#if defined(VK_KHR_present_wait)\n\tPFN_vkWaitForPresentKHR vkWaitForPresentKHR;\n#else\n\tPFN_vkVoidFunction padding_b16cbe03[1];\n#endif /* defined(VK_KHR_present_wait) */\n#if defined(VK_KHR_present_wait2)\n\tPFN_vkWaitForPresent2KHR vkWaitForPresent2KHR;\n#else\n\tPFN_vkVoidFunction padding_7401483a[1];\n#endif /* defined(VK_KHR_present_wait2) */\n#if defined(VK_KHR_push_descriptor)\n\tPFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR;\n#else\n\tPFN_vkVoidFunction padding_8f7712ad[1];\n#endif /* defined(VK_KHR_push_descriptor) */\n#if defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline)\n\tPFN_vkCmdTraceRaysIndirect2KHR vkCmdTraceRaysIndirect2KHR;\n#else\n\tPFN_vkVoidFunction padding_dd5f9b4a[1];\n#endif /* defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) */\n#if defined(VK_KHR_ray_tracing_pipeline)\n\tPFN_vkCmdSetRayTracingPipelineStackSizeKHR vkCmdSetRayTracingPipelineStackSizeKHR;\n\tPFN_vkCmdTraceRaysIndirectKHR vkCmdTraceRaysIndirectKHR;\n\tPFN_vkCmdTraceRaysKHR vkCmdTraceRaysKHR;\n\tPFN_vkCreateRayTracingPipelinesKHR vkCreateRayTracingPipelinesKHR;\n\tPFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR vkGetRayTracingCaptureReplayShaderGroupHandlesKHR;\n\tPFN_vkGetRayTracingShaderGroupHandlesKHR vkGetRayTracingShaderGroupHandlesKHR;\n\tPFN_vkGetRayTracingShaderGroupStackSizeKHR vkGetRayTracingShaderGroupStackSizeKHR;\n#else\n\tPFN_vkVoidFunction padding_af99aedc[7];\n#endif /* defined(VK_KHR_ray_tracing_pipeline) */\n#if defined(VK_KHR_sampler_ycbcr_conversion)\n\tPFN_vkCreateSamplerYcbcrConversionKHR vkCreateSamplerYcbcrConversionKHR;\n\tPFN_vkDestroySamplerYcbcrConversionKHR vkDestroySamplerYcbcrConversionKHR;\n#else\n\tPFN_vkVoidFunction padding_88e61b30[2];\n#endif /* defined(VK_KHR_sampler_ycbcr_conversion) */\n#if defined(VK_KHR_shared_presentable_image)\n\tPFN_vkGetSwapchainStatusKHR vkGetSwapchainStatusKHR;\n#else\n\tPFN_vkVoidFunction padding_1ff3379[1];\n#endif /* defined(VK_KHR_shared_presentable_image) */\n#if defined(VK_KHR_swapchain)\n\tPFN_vkAcquireNextImageKHR vkAcquireNextImageKHR;\n\tPFN_vkCreateSwapchainKHR vkCreateSwapchainKHR;\n\tPFN_vkDestroySwapchainKHR vkDestroySwapchainKHR;\n\tPFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR;\n\tPFN_vkQueuePresentKHR vkQueuePresentKHR;\n#else\n\tPFN_vkVoidFunction padding_a1de893b[5];\n#endif /* defined(VK_KHR_swapchain) */\n#if defined(VK_KHR_swapchain_maintenance1)\n\tPFN_vkReleaseSwapchainImagesKHR vkReleaseSwapchainImagesKHR;\n#else\n\tPFN_vkVoidFunction padding_e032d5c4[1];\n#endif /* defined(VK_KHR_swapchain_maintenance1) */\n#if defined(VK_KHR_synchronization2)\n\tPFN_vkCmdPipelineBarrier2KHR vkCmdPipelineBarrier2KHR;\n\tPFN_vkCmdResetEvent2KHR vkCmdResetEvent2KHR;\n\tPFN_vkCmdSetEvent2KHR vkCmdSetEvent2KHR;\n\tPFN_vkCmdWaitEvents2KHR vkCmdWaitEvents2KHR;\n\tPFN_vkCmdWriteTimestamp2KHR vkCmdWriteTimestamp2KHR;\n\tPFN_vkQueueSubmit2KHR vkQueueSubmit2KHR;\n#else\n\tPFN_vkVoidFunction padding_e85bf128[6];\n#endif /* defined(VK_KHR_synchronization2) */\n#if defined(VK_KHR_timeline_semaphore)\n\tPFN_vkGetSemaphoreCounterValueKHR vkGetSemaphoreCounterValueKHR;\n\tPFN_vkSignalSemaphoreKHR vkSignalSemaphoreKHR;\n\tPFN_vkWaitSemaphoresKHR vkWaitSemaphoresKHR;\n#else\n\tPFN_vkVoidFunction padding_c799d931[3];\n#endif /* defined(VK_KHR_timeline_semaphore) */\n#if defined(VK_KHR_video_decode_queue)\n\tPFN_vkCmdDecodeVideoKHR vkCmdDecodeVideoKHR;\n#else\n\tPFN_vkVoidFunction padding_7a7cc7ad[1];\n#endif /* defined(VK_KHR_video_decode_queue) */\n#if defined(VK_KHR_video_encode_queue)\n\tPFN_vkCmdEncodeVideoKHR vkCmdEncodeVideoKHR;\n\tPFN_vkGetEncodedVideoSessionParametersKHR vkGetEncodedVideoSessionParametersKHR;\n#else\n\tPFN_vkVoidFunction padding_f2997fb4[2];\n#endif /* defined(VK_KHR_video_encode_queue) */\n#if defined(VK_KHR_video_queue)\n\tPFN_vkBindVideoSessionMemoryKHR vkBindVideoSessionMemoryKHR;\n\tPFN_vkCmdBeginVideoCodingKHR vkCmdBeginVideoCodingKHR;\n\tPFN_vkCmdControlVideoCodingKHR vkCmdControlVideoCodingKHR;\n\tPFN_vkCmdEndVideoCodingKHR vkCmdEndVideoCodingKHR;\n\tPFN_vkCreateVideoSessionKHR vkCreateVideoSessionKHR;\n\tPFN_vkCreateVideoSessionParametersKHR vkCreateVideoSessionParametersKHR;\n\tPFN_vkDestroyVideoSessionKHR vkDestroyVideoSessionKHR;\n\tPFN_vkDestroyVideoSessionParametersKHR vkDestroyVideoSessionParametersKHR;\n\tPFN_vkGetVideoSessionMemoryRequirementsKHR vkGetVideoSessionMemoryRequirementsKHR;\n\tPFN_vkUpdateVideoSessionParametersKHR vkUpdateVideoSessionParametersKHR;\n#else\n\tPFN_vkVoidFunction padding_98fb7016[10];\n#endif /* defined(VK_KHR_video_queue) */\n#if defined(VK_NVX_binary_import)\n\tPFN_vkCmdCuLaunchKernelNVX vkCmdCuLaunchKernelNVX;\n\tPFN_vkCreateCuFunctionNVX vkCreateCuFunctionNVX;\n\tPFN_vkCreateCuModuleNVX vkCreateCuModuleNVX;\n\tPFN_vkDestroyCuFunctionNVX vkDestroyCuFunctionNVX;\n\tPFN_vkDestroyCuModuleNVX vkDestroyCuModuleNVX;\n#else\n\tPFN_vkVoidFunction padding_eb54309b[5];\n#endif /* defined(VK_NVX_binary_import) */\n#if defined(VK_NVX_image_view_handle)\n\tPFN_vkGetImageViewHandleNVX vkGetImageViewHandleNVX;\n#else\n\tPFN_vkVoidFunction padding_887f6736[1];\n#endif /* defined(VK_NVX_image_view_handle) */\n#if defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 3\n\tPFN_vkGetImageViewHandle64NVX vkGetImageViewHandle64NVX;\n#else\n\tPFN_vkVoidFunction padding_64ad40e2[1];\n#endif /* defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 3 */\n#if defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 2\n\tPFN_vkGetImageViewAddressNVX vkGetImageViewAddressNVX;\n#else\n\tPFN_vkVoidFunction padding_d290479a[1];\n#endif /* defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 2 */\n#if defined(VK_NV_clip_space_w_scaling)\n\tPFN_vkCmdSetViewportWScalingNV vkCmdSetViewportWScalingNV;\n#else\n\tPFN_vkVoidFunction padding_88d7eb2e[1];\n#endif /* defined(VK_NV_clip_space_w_scaling) */\n#if defined(VK_NV_cluster_acceleration_structure)\n\tPFN_vkCmdBuildClusterAccelerationStructureIndirectNV vkCmdBuildClusterAccelerationStructureIndirectNV;\n\tPFN_vkGetClusterAccelerationStructureBuildSizesNV vkGetClusterAccelerationStructureBuildSizesNV;\n#else\n\tPFN_vkVoidFunction padding_60e35395[2];\n#endif /* defined(VK_NV_cluster_acceleration_structure) */\n#if defined(VK_NV_cooperative_vector)\n\tPFN_vkCmdConvertCooperativeVectorMatrixNV vkCmdConvertCooperativeVectorMatrixNV;\n\tPFN_vkConvertCooperativeVectorMatrixNV vkConvertCooperativeVectorMatrixNV;\n#else\n\tPFN_vkVoidFunction padding_f4a887d0[2];\n#endif /* defined(VK_NV_cooperative_vector) */\n#if defined(VK_NV_copy_memory_indirect)\n\tPFN_vkCmdCopyMemoryIndirectNV vkCmdCopyMemoryIndirectNV;\n\tPFN_vkCmdCopyMemoryToImageIndirectNV vkCmdCopyMemoryToImageIndirectNV;\n#else\n\tPFN_vkVoidFunction padding_9536230e[2];\n#endif /* defined(VK_NV_copy_memory_indirect) */\n#if defined(VK_NV_cuda_kernel_launch)\n\tPFN_vkCmdCudaLaunchKernelNV vkCmdCudaLaunchKernelNV;\n\tPFN_vkCreateCudaFunctionNV vkCreateCudaFunctionNV;\n\tPFN_vkCreateCudaModuleNV vkCreateCudaModuleNV;\n\tPFN_vkDestroyCudaFunctionNV vkDestroyCudaFunctionNV;\n\tPFN_vkDestroyCudaModuleNV vkDestroyCudaModuleNV;\n\tPFN_vkGetCudaModuleCacheNV vkGetCudaModuleCacheNV;\n#else\n\tPFN_vkVoidFunction padding_2eabdf3b[6];\n#endif /* defined(VK_NV_cuda_kernel_launch) */\n#if defined(VK_NV_device_diagnostic_checkpoints)\n\tPFN_vkCmdSetCheckpointNV vkCmdSetCheckpointNV;\n\tPFN_vkGetQueueCheckpointDataNV vkGetQueueCheckpointDataNV;\n#else\n\tPFN_vkVoidFunction padding_adaa5a21[2];\n#endif /* defined(VK_NV_device_diagnostic_checkpoints) */\n#if defined(VK_NV_device_diagnostic_checkpoints) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2))\n\tPFN_vkGetQueueCheckpointData2NV vkGetQueueCheckpointData2NV;\n#else\n\tPFN_vkVoidFunction padding_c776633d[1];\n#endif /* defined(VK_NV_device_diagnostic_checkpoints) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) */\n#if defined(VK_NV_device_generated_commands)\n\tPFN_vkCmdBindPipelineShaderGroupNV vkCmdBindPipelineShaderGroupNV;\n\tPFN_vkCmdExecuteGeneratedCommandsNV vkCmdExecuteGeneratedCommandsNV;\n\tPFN_vkCmdPreprocessGeneratedCommandsNV vkCmdPreprocessGeneratedCommandsNV;\n\tPFN_vkCreateIndirectCommandsLayoutNV vkCreateIndirectCommandsLayoutNV;\n\tPFN_vkDestroyIndirectCommandsLayoutNV vkDestroyIndirectCommandsLayoutNV;\n\tPFN_vkGetGeneratedCommandsMemoryRequirementsNV vkGetGeneratedCommandsMemoryRequirementsNV;\n#else\n\tPFN_vkVoidFunction padding_4c7e4395[6];\n#endif /* defined(VK_NV_device_generated_commands) */\n#if defined(VK_NV_device_generated_commands_compute)\n\tPFN_vkCmdUpdatePipelineIndirectBufferNV vkCmdUpdatePipelineIndirectBufferNV;\n\tPFN_vkGetPipelineIndirectDeviceAddressNV vkGetPipelineIndirectDeviceAddressNV;\n\tPFN_vkGetPipelineIndirectMemoryRequirementsNV vkGetPipelineIndirectMemoryRequirementsNV;\n#else\n\tPFN_vkVoidFunction padding_5195094c[3];\n#endif /* defined(VK_NV_device_generated_commands_compute) */\n#if defined(VK_NV_external_compute_queue)\n\tPFN_vkCreateExternalComputeQueueNV vkCreateExternalComputeQueueNV;\n\tPFN_vkDestroyExternalComputeQueueNV vkDestroyExternalComputeQueueNV;\n\tPFN_vkGetExternalComputeQueueDataNV vkGetExternalComputeQueueDataNV;\n#else\n\tPFN_vkVoidFunction padding_4f947e0b[3];\n#endif /* defined(VK_NV_external_compute_queue) */\n#if defined(VK_NV_external_memory_rdma)\n\tPFN_vkGetMemoryRemoteAddressNV vkGetMemoryRemoteAddressNV;\n#else\n\tPFN_vkVoidFunction padding_920e405[1];\n#endif /* defined(VK_NV_external_memory_rdma) */\n#if defined(VK_NV_external_memory_win32)\n\tPFN_vkGetMemoryWin32HandleNV vkGetMemoryWin32HandleNV;\n#else\n\tPFN_vkVoidFunction padding_c13d6f3a[1];\n#endif /* defined(VK_NV_external_memory_win32) */\n#if defined(VK_NV_fragment_shading_rate_enums)\n\tPFN_vkCmdSetFragmentShadingRateEnumNV vkCmdSetFragmentShadingRateEnumNV;\n#else\n\tPFN_vkVoidFunction padding_4979ca14[1];\n#endif /* defined(VK_NV_fragment_shading_rate_enums) */\n#if defined(VK_NV_low_latency2)\n\tPFN_vkGetLatencyTimingsNV vkGetLatencyTimingsNV;\n\tPFN_vkLatencySleepNV vkLatencySleepNV;\n\tPFN_vkQueueNotifyOutOfBandNV vkQueueNotifyOutOfBandNV;\n\tPFN_vkSetLatencyMarkerNV vkSetLatencyMarkerNV;\n\tPFN_vkSetLatencySleepModeNV vkSetLatencySleepModeNV;\n#else\n\tPFN_vkVoidFunction padding_fabf8b19[5];\n#endif /* defined(VK_NV_low_latency2) */\n#if defined(VK_NV_memory_decompression)\n\tPFN_vkCmdDecompressMemoryIndirectCountNV vkCmdDecompressMemoryIndirectCountNV;\n\tPFN_vkCmdDecompressMemoryNV vkCmdDecompressMemoryNV;\n#else\n\tPFN_vkVoidFunction padding_706009[2];\n#endif /* defined(VK_NV_memory_decompression) */\n#if defined(VK_NV_mesh_shader)\n\tPFN_vkCmdDrawMeshTasksIndirectNV vkCmdDrawMeshTasksIndirectNV;\n\tPFN_vkCmdDrawMeshTasksNV vkCmdDrawMeshTasksNV;\n#else\n\tPFN_vkVoidFunction padding_ac232758[2];\n#endif /* defined(VK_NV_mesh_shader) */\n#if defined(VK_NV_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2))\n\tPFN_vkCmdDrawMeshTasksIndirectCountNV vkCmdDrawMeshTasksIndirectCountNV;\n#else\n\tPFN_vkVoidFunction padding_53495be7[1];\n#endif /* defined(VK_NV_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2)) */\n#if defined(VK_NV_optical_flow)\n\tPFN_vkBindOpticalFlowSessionImageNV vkBindOpticalFlowSessionImageNV;\n\tPFN_vkCmdOpticalFlowExecuteNV vkCmdOpticalFlowExecuteNV;\n\tPFN_vkCreateOpticalFlowSessionNV vkCreateOpticalFlowSessionNV;\n\tPFN_vkDestroyOpticalFlowSessionNV vkDestroyOpticalFlowSessionNV;\n#else\n\tPFN_vkVoidFunction padding_f67571eb[4];\n#endif /* defined(VK_NV_optical_flow) */\n#if defined(VK_NV_partitioned_acceleration_structure)\n\tPFN_vkCmdBuildPartitionedAccelerationStructuresNV vkCmdBuildPartitionedAccelerationStructuresNV;\n\tPFN_vkGetPartitionedAccelerationStructuresBuildSizesNV vkGetPartitionedAccelerationStructuresBuildSizesNV;\n#else\n\tPFN_vkVoidFunction padding_d27c8c6d[2];\n#endif /* defined(VK_NV_partitioned_acceleration_structure) */\n#if defined(VK_NV_ray_tracing)\n\tPFN_vkBindAccelerationStructureMemoryNV vkBindAccelerationStructureMemoryNV;\n\tPFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV;\n\tPFN_vkCmdCopyAccelerationStructureNV vkCmdCopyAccelerationStructureNV;\n\tPFN_vkCmdTraceRaysNV vkCmdTraceRaysNV;\n\tPFN_vkCmdWriteAccelerationStructuresPropertiesNV vkCmdWriteAccelerationStructuresPropertiesNV;\n\tPFN_vkCompileDeferredNV vkCompileDeferredNV;\n\tPFN_vkCreateAccelerationStructureNV vkCreateAccelerationStructureNV;\n\tPFN_vkCreateRayTracingPipelinesNV vkCreateRayTracingPipelinesNV;\n\tPFN_vkDestroyAccelerationStructureNV vkDestroyAccelerationStructureNV;\n\tPFN_vkGetAccelerationStructureHandleNV vkGetAccelerationStructureHandleNV;\n\tPFN_vkGetAccelerationStructureMemoryRequirementsNV vkGetAccelerationStructureMemoryRequirementsNV;\n\tPFN_vkGetRayTracingShaderGroupHandlesNV vkGetRayTracingShaderGroupHandlesNV;\n#else\n\tPFN_vkVoidFunction padding_feefbeac[12];\n#endif /* defined(VK_NV_ray_tracing) */\n#if defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2\n\tPFN_vkCmdSetExclusiveScissorEnableNV vkCmdSetExclusiveScissorEnableNV;\n#else\n\tPFN_vkVoidFunction padding_e3c24f80[1];\n#endif /* defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 */\n#if defined(VK_NV_scissor_exclusive)\n\tPFN_vkCmdSetExclusiveScissorNV vkCmdSetExclusiveScissorNV;\n#else\n\tPFN_vkVoidFunction padding_8e88d86c[1];\n#endif /* defined(VK_NV_scissor_exclusive) */\n#if defined(VK_NV_shading_rate_image)\n\tPFN_vkCmdBindShadingRateImageNV vkCmdBindShadingRateImageNV;\n\tPFN_vkCmdSetCoarseSampleOrderNV vkCmdSetCoarseSampleOrderNV;\n\tPFN_vkCmdSetViewportShadingRatePaletteNV vkCmdSetViewportShadingRatePaletteNV;\n#else\n\tPFN_vkVoidFunction padding_92a0767f[3];\n#endif /* defined(VK_NV_shading_rate_image) */\n#if defined(VK_QCOM_tile_memory_heap)\n\tPFN_vkCmdBindTileMemoryQCOM vkCmdBindTileMemoryQCOM;\n#else\n\tPFN_vkVoidFunction padding_e2d55d04[1];\n#endif /* defined(VK_QCOM_tile_memory_heap) */\n#if defined(VK_QCOM_tile_properties)\n\tPFN_vkGetDynamicRenderingTilePropertiesQCOM vkGetDynamicRenderingTilePropertiesQCOM;\n\tPFN_vkGetFramebufferTilePropertiesQCOM vkGetFramebufferTilePropertiesQCOM;\n#else\n\tPFN_vkVoidFunction padding_be12e32[2];\n#endif /* defined(VK_QCOM_tile_properties) */\n#if defined(VK_QCOM_tile_shading)\n\tPFN_vkCmdBeginPerTileExecutionQCOM vkCmdBeginPerTileExecutionQCOM;\n\tPFN_vkCmdDispatchTileQCOM vkCmdDispatchTileQCOM;\n\tPFN_vkCmdEndPerTileExecutionQCOM vkCmdEndPerTileExecutionQCOM;\n#else\n\tPFN_vkVoidFunction padding_fcd9e1df[3];\n#endif /* defined(VK_QCOM_tile_shading) */\n#if defined(VK_QNX_external_memory_screen_buffer)\n\tPFN_vkGetScreenBufferPropertiesQNX vkGetScreenBufferPropertiesQNX;\n#else\n\tPFN_vkVoidFunction padding_1c27735d[1];\n#endif /* defined(VK_QNX_external_memory_screen_buffer) */\n#if defined(VK_VALVE_descriptor_set_host_mapping)\n\tPFN_vkGetDescriptorSetHostMappingVALVE vkGetDescriptorSetHostMappingVALVE;\n\tPFN_vkGetDescriptorSetLayoutHostMappingInfoVALVE vkGetDescriptorSetLayoutHostMappingInfoVALVE;\n#else\n\tPFN_vkVoidFunction padding_fd71e4c6[2];\n#endif /* defined(VK_VALVE_descriptor_set_host_mapping) */\n#if (defined(VK_EXT_depth_clamp_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clamp_control))\n\tPFN_vkCmdSetDepthClampRangeEXT vkCmdSetDepthClampRangeEXT;\n#else\n\tPFN_vkVoidFunction padding_faa18a61[1];\n#endif /* (defined(VK_EXT_depth_clamp_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clamp_control)) */\n#if (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object))\n\tPFN_vkCmdBindVertexBuffers2EXT vkCmdBindVertexBuffers2EXT;\n\tPFN_vkCmdSetCullModeEXT vkCmdSetCullModeEXT;\n\tPFN_vkCmdSetDepthBoundsTestEnableEXT vkCmdSetDepthBoundsTestEnableEXT;\n\tPFN_vkCmdSetDepthCompareOpEXT vkCmdSetDepthCompareOpEXT;\n\tPFN_vkCmdSetDepthTestEnableEXT vkCmdSetDepthTestEnableEXT;\n\tPFN_vkCmdSetDepthWriteEnableEXT vkCmdSetDepthWriteEnableEXT;\n\tPFN_vkCmdSetFrontFaceEXT vkCmdSetFrontFaceEXT;\n\tPFN_vkCmdSetPrimitiveTopologyEXT vkCmdSetPrimitiveTopologyEXT;\n\tPFN_vkCmdSetScissorWithCountEXT vkCmdSetScissorWithCountEXT;\n\tPFN_vkCmdSetStencilOpEXT vkCmdSetStencilOpEXT;\n\tPFN_vkCmdSetStencilTestEnableEXT vkCmdSetStencilTestEnableEXT;\n\tPFN_vkCmdSetViewportWithCountEXT vkCmdSetViewportWithCountEXT;\n#else\n\tPFN_vkVoidFunction padding_3e8c720f[12];\n#endif /* (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object))\n\tPFN_vkCmdSetDepthBiasEnableEXT vkCmdSetDepthBiasEnableEXT;\n\tPFN_vkCmdSetLogicOpEXT vkCmdSetLogicOpEXT;\n\tPFN_vkCmdSetPatchControlPointsEXT vkCmdSetPatchControlPointsEXT;\n\tPFN_vkCmdSetPrimitiveRestartEnableEXT vkCmdSetPrimitiveRestartEnableEXT;\n\tPFN_vkCmdSetRasterizerDiscardEnableEXT vkCmdSetRasterizerDiscardEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_b93e02a6[5];\n#endif /* (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object))\n\tPFN_vkCmdSetAlphaToCoverageEnableEXT vkCmdSetAlphaToCoverageEnableEXT;\n\tPFN_vkCmdSetAlphaToOneEnableEXT vkCmdSetAlphaToOneEnableEXT;\n\tPFN_vkCmdSetColorBlendEnableEXT vkCmdSetColorBlendEnableEXT;\n\tPFN_vkCmdSetColorBlendEquationEXT vkCmdSetColorBlendEquationEXT;\n\tPFN_vkCmdSetColorWriteMaskEXT vkCmdSetColorWriteMaskEXT;\n\tPFN_vkCmdSetDepthClampEnableEXT vkCmdSetDepthClampEnableEXT;\n\tPFN_vkCmdSetLogicOpEnableEXT vkCmdSetLogicOpEnableEXT;\n\tPFN_vkCmdSetPolygonModeEXT vkCmdSetPolygonModeEXT;\n\tPFN_vkCmdSetRasterizationSamplesEXT vkCmdSetRasterizationSamplesEXT;\n\tPFN_vkCmdSetSampleMaskEXT vkCmdSetSampleMaskEXT;\n#else\n\tPFN_vkVoidFunction padding_ab566e7e[10];\n#endif /* (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object))\n\tPFN_vkCmdSetTessellationDomainOriginEXT vkCmdSetTessellationDomainOriginEXT;\n#else\n\tPFN_vkVoidFunction padding_6730ed0c[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback))\n\tPFN_vkCmdSetRasterizationStreamEXT vkCmdSetRasterizationStreamEXT;\n#else\n\tPFN_vkVoidFunction padding_d3ebb335[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization))\n\tPFN_vkCmdSetConservativeRasterizationModeEXT vkCmdSetConservativeRasterizationModeEXT;\n\tPFN_vkCmdSetExtraPrimitiveOverestimationSizeEXT vkCmdSetExtraPrimitiveOverestimationSizeEXT;\n#else\n\tPFN_vkVoidFunction padding_a21758f4[2];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable))\n\tPFN_vkCmdSetDepthClipEnableEXT vkCmdSetDepthClipEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_a498a838[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations))\n\tPFN_vkCmdSetSampleLocationsEnableEXT vkCmdSetSampleLocationsEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_67db38de[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced))\n\tPFN_vkCmdSetColorBlendAdvancedEXT vkCmdSetColorBlendAdvancedEXT;\n#else\n\tPFN_vkVoidFunction padding_fbea7481[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex))\n\tPFN_vkCmdSetProvokingVertexModeEXT vkCmdSetProvokingVertexModeEXT;\n#else\n\tPFN_vkVoidFunction padding_3a8ec90e[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization))\n\tPFN_vkCmdSetLineRasterizationModeEXT vkCmdSetLineRasterizationModeEXT;\n\tPFN_vkCmdSetLineStippleEnableEXT vkCmdSetLineStippleEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_29cdb756[2];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control))\n\tPFN_vkCmdSetDepthClipNegativeOneToOneEXT vkCmdSetDepthClipNegativeOneToOneEXT;\n#else\n\tPFN_vkVoidFunction padding_815a7240[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling))\n\tPFN_vkCmdSetViewportWScalingEnableNV vkCmdSetViewportWScalingEnableNV;\n#else\n\tPFN_vkVoidFunction padding_d1f00511[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle))\n\tPFN_vkCmdSetViewportSwizzleNV vkCmdSetViewportSwizzleNV;\n#else\n\tPFN_vkVoidFunction padding_7a73d553[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color))\n\tPFN_vkCmdSetCoverageToColorEnableNV vkCmdSetCoverageToColorEnableNV;\n\tPFN_vkCmdSetCoverageToColorLocationNV vkCmdSetCoverageToColorLocationNV;\n#else\n\tPFN_vkVoidFunction padding_6045fb8c[2];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples))\n\tPFN_vkCmdSetCoverageModulationModeNV vkCmdSetCoverageModulationModeNV;\n\tPFN_vkCmdSetCoverageModulationTableEnableNV vkCmdSetCoverageModulationTableEnableNV;\n\tPFN_vkCmdSetCoverageModulationTableNV vkCmdSetCoverageModulationTableNV;\n#else\n\tPFN_vkVoidFunction padding_bdc35c80[3];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image))\n\tPFN_vkCmdSetShadingRateImageEnableNV vkCmdSetShadingRateImageEnableNV;\n#else\n\tPFN_vkVoidFunction padding_9a5cd6e8[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test))\n\tPFN_vkCmdSetRepresentativeFragmentTestEnableNV vkCmdSetRepresentativeFragmentTestEnableNV;\n#else\n\tPFN_vkVoidFunction padding_3ee17e96[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode))\n\tPFN_vkCmdSetCoverageReductionModeNV vkCmdSetCoverageReductionModeNV;\n#else\n\tPFN_vkVoidFunction padding_263d525a[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) */\n#if (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control))\n\tPFN_vkGetImageSubresourceLayout2EXT vkGetImageSubresourceLayout2EXT;\n#else\n\tPFN_vkVoidFunction padding_ecddace1[1];\n#endif /* (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) */\n#if (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state))\n\tPFN_vkCmdSetVertexInputEXT vkCmdSetVertexInputEXT;\n#else\n\tPFN_vkVoidFunction padding_d83e1de1[1];\n#endif /* (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) */\n#if (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template)))\n\tPFN_vkCmdPushDescriptorSetWithTemplateKHR vkCmdPushDescriptorSetWithTemplateKHR;\n#else\n\tPFN_vkVoidFunction padding_60f8358a[1];\n#endif /* (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template))) */\n#if (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1))\n\tPFN_vkGetDeviceGroupPresentCapabilitiesKHR vkGetDeviceGroupPresentCapabilitiesKHR;\n\tPFN_vkGetDeviceGroupSurfacePresentModesKHR vkGetDeviceGroupSurfacePresentModesKHR;\n#else\n\tPFN_vkVoidFunction padding_460290c6[2];\n#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */\n#if (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1))\n\tPFN_vkAcquireNextImage2KHR vkAcquireNextImage2KHR;\n#else\n\tPFN_vkVoidFunction padding_cffc198[1];\n#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */\n\t/* VOLK_GENERATE_DEVICE_TABLE */\n};\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n/* VOLK_GENERATE_PROTOTYPES_H */\n#if defined(VK_VERSION_1_0)\nextern PFN_vkAllocateCommandBuffers vkAllocateCommandBuffers;\nextern PFN_vkAllocateDescriptorSets vkAllocateDescriptorSets;\nextern PFN_vkAllocateMemory vkAllocateMemory;\nextern PFN_vkBeginCommandBuffer vkBeginCommandBuffer;\nextern PFN_vkBindBufferMemory vkBindBufferMemory;\nextern PFN_vkBindImageMemory vkBindImageMemory;\nextern PFN_vkCmdBeginQuery vkCmdBeginQuery;\nextern PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass;\nextern PFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets;\nextern PFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer;\nextern PFN_vkCmdBindPipeline vkCmdBindPipeline;\nextern PFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers;\nextern PFN_vkCmdBlitImage vkCmdBlitImage;\nextern PFN_vkCmdClearAttachments vkCmdClearAttachments;\nextern PFN_vkCmdClearColorImage vkCmdClearColorImage;\nextern PFN_vkCmdClearDepthStencilImage vkCmdClearDepthStencilImage;\nextern PFN_vkCmdCopyBuffer vkCmdCopyBuffer;\nextern PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage;\nextern PFN_vkCmdCopyImage vkCmdCopyImage;\nextern PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer;\nextern PFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults;\nextern PFN_vkCmdDispatch vkCmdDispatch;\nextern PFN_vkCmdDispatchIndirect vkCmdDispatchIndirect;\nextern PFN_vkCmdDraw vkCmdDraw;\nextern PFN_vkCmdDrawIndexed vkCmdDrawIndexed;\nextern PFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect;\nextern PFN_vkCmdDrawIndirect vkCmdDrawIndirect;\nextern PFN_vkCmdEndQuery vkCmdEndQuery;\nextern PFN_vkCmdEndRenderPass vkCmdEndRenderPass;\nextern PFN_vkCmdExecuteCommands vkCmdExecuteCommands;\nextern PFN_vkCmdFillBuffer vkCmdFillBuffer;\nextern PFN_vkCmdNextSubpass vkCmdNextSubpass;\nextern PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier;\nextern PFN_vkCmdPushConstants vkCmdPushConstants;\nextern PFN_vkCmdResetEvent vkCmdResetEvent;\nextern PFN_vkCmdResetQueryPool vkCmdResetQueryPool;\nextern PFN_vkCmdResolveImage vkCmdResolveImage;\nextern PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants;\nextern PFN_vkCmdSetDepthBias vkCmdSetDepthBias;\nextern PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds;\nextern PFN_vkCmdSetEvent vkCmdSetEvent;\nextern PFN_vkCmdSetLineWidth vkCmdSetLineWidth;\nextern PFN_vkCmdSetScissor vkCmdSetScissor;\nextern PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask;\nextern PFN_vkCmdSetStencilReference vkCmdSetStencilReference;\nextern PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask;\nextern PFN_vkCmdSetViewport vkCmdSetViewport;\nextern PFN_vkCmdUpdateBuffer vkCmdUpdateBuffer;\nextern PFN_vkCmdWaitEvents vkCmdWaitEvents;\nextern PFN_vkCmdWriteTimestamp vkCmdWriteTimestamp;\nextern PFN_vkCreateBuffer vkCreateBuffer;\nextern PFN_vkCreateBufferView vkCreateBufferView;\nextern PFN_vkCreateCommandPool vkCreateCommandPool;\nextern PFN_vkCreateComputePipelines vkCreateComputePipelines;\nextern PFN_vkCreateDescriptorPool vkCreateDescriptorPool;\nextern PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout;\nextern PFN_vkCreateDevice vkCreateDevice;\nextern PFN_vkCreateEvent vkCreateEvent;\nextern PFN_vkCreateFence vkCreateFence;\nextern PFN_vkCreateFramebuffer vkCreateFramebuffer;\nextern PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines;\nextern PFN_vkCreateImage vkCreateImage;\nextern PFN_vkCreateImageView vkCreateImageView;\nextern PFN_vkCreateInstance vkCreateInstance;\nextern PFN_vkCreatePipelineCache vkCreatePipelineCache;\nextern PFN_vkCreatePipelineLayout vkCreatePipelineLayout;\nextern PFN_vkCreateQueryPool vkCreateQueryPool;\nextern PFN_vkCreateRenderPass vkCreateRenderPass;\nextern PFN_vkCreateSampler vkCreateSampler;\nextern PFN_vkCreateSemaphore vkCreateSemaphore;\nextern PFN_vkCreateShaderModule vkCreateShaderModule;\nextern PFN_vkDestroyBuffer vkDestroyBuffer;\nextern PFN_vkDestroyBufferView vkDestroyBufferView;\nextern PFN_vkDestroyCommandPool vkDestroyCommandPool;\nextern PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool;\nextern PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout;\nextern PFN_vkDestroyDevice vkDestroyDevice;\nextern PFN_vkDestroyEvent vkDestroyEvent;\nextern PFN_vkDestroyFence vkDestroyFence;\nextern PFN_vkDestroyFramebuffer vkDestroyFramebuffer;\nextern PFN_vkDestroyImage vkDestroyImage;\nextern PFN_vkDestroyImageView vkDestroyImageView;\nextern PFN_vkDestroyInstance vkDestroyInstance;\nextern PFN_vkDestroyPipeline vkDestroyPipeline;\nextern PFN_vkDestroyPipelineCache vkDestroyPipelineCache;\nextern PFN_vkDestroyPipelineLayout vkDestroyPipelineLayout;\nextern PFN_vkDestroyQueryPool vkDestroyQueryPool;\nextern PFN_vkDestroyRenderPass vkDestroyRenderPass;\nextern PFN_vkDestroySampler vkDestroySampler;\nextern PFN_vkDestroySemaphore vkDestroySemaphore;\nextern PFN_vkDestroyShaderModule vkDestroyShaderModule;\nextern PFN_vkDeviceWaitIdle vkDeviceWaitIdle;\nextern PFN_vkEndCommandBuffer vkEndCommandBuffer;\nextern PFN_vkEnumerateDeviceExtensionProperties vkEnumerateDeviceExtensionProperties;\nextern PFN_vkEnumerateDeviceLayerProperties vkEnumerateDeviceLayerProperties;\nextern PFN_vkEnumerateInstanceExtensionProperties vkEnumerateInstanceExtensionProperties;\nextern PFN_vkEnumerateInstanceLayerProperties vkEnumerateInstanceLayerProperties;\nextern PFN_vkEnumeratePhysicalDevices vkEnumeratePhysicalDevices;\nextern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;\nextern PFN_vkFreeCommandBuffers vkFreeCommandBuffers;\nextern PFN_vkFreeDescriptorSets vkFreeDescriptorSets;\nextern PFN_vkFreeMemory vkFreeMemory;\nextern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;\nextern PFN_vkGetDeviceMemoryCommitment vkGetDeviceMemoryCommitment;\nextern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;\nextern PFN_vkGetDeviceQueue vkGetDeviceQueue;\nextern PFN_vkGetEventStatus vkGetEventStatus;\nextern PFN_vkGetFenceStatus vkGetFenceStatus;\nextern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;\nextern PFN_vkGetImageSparseMemoryRequirements vkGetImageSparseMemoryRequirements;\nextern PFN_vkGetImageSubresourceLayout vkGetImageSubresourceLayout;\nextern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;\nextern PFN_vkGetPhysicalDeviceFeatures vkGetPhysicalDeviceFeatures;\nextern PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties;\nextern PFN_vkGetPhysicalDeviceImageFormatProperties vkGetPhysicalDeviceImageFormatProperties;\nextern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;\nextern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;\nextern PFN_vkGetPhysicalDeviceQueueFamilyProperties vkGetPhysicalDeviceQueueFamilyProperties;\nextern PFN_vkGetPhysicalDeviceSparseImageFormatProperties vkGetPhysicalDeviceSparseImageFormatProperties;\nextern PFN_vkGetPipelineCacheData vkGetPipelineCacheData;\nextern PFN_vkGetQueryPoolResults vkGetQueryPoolResults;\nextern PFN_vkGetRenderAreaGranularity vkGetRenderAreaGranularity;\nextern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;\nextern PFN_vkMapMemory vkMapMemory;\nextern PFN_vkMergePipelineCaches vkMergePipelineCaches;\nextern PFN_vkQueueBindSparse vkQueueBindSparse;\nextern PFN_vkQueueSubmit vkQueueSubmit;\nextern PFN_vkQueueWaitIdle vkQueueWaitIdle;\nextern PFN_vkResetCommandBuffer vkResetCommandBuffer;\nextern PFN_vkResetCommandPool vkResetCommandPool;\nextern PFN_vkResetDescriptorPool vkResetDescriptorPool;\nextern PFN_vkResetEvent vkResetEvent;\nextern PFN_vkResetFences vkResetFences;\nextern PFN_vkSetEvent vkSetEvent;\nextern PFN_vkUnmapMemory vkUnmapMemory;\nextern PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets;\nextern PFN_vkWaitForFences vkWaitForFences;\n#endif /* defined(VK_VERSION_1_0) */\n#if defined(VK_VERSION_1_1)\nextern PFN_vkBindBufferMemory2 vkBindBufferMemory2;\nextern PFN_vkBindImageMemory2 vkBindImageMemory2;\nextern PFN_vkCmdDispatchBase vkCmdDispatchBase;\nextern PFN_vkCmdSetDeviceMask vkCmdSetDeviceMask;\nextern PFN_vkCreateDescriptorUpdateTemplate vkCreateDescriptorUpdateTemplate;\nextern PFN_vkCreateSamplerYcbcrConversion vkCreateSamplerYcbcrConversion;\nextern PFN_vkDestroyDescriptorUpdateTemplate vkDestroyDescriptorUpdateTemplate;\nextern PFN_vkDestroySamplerYcbcrConversion vkDestroySamplerYcbcrConversion;\nextern PFN_vkEnumerateInstanceVersion vkEnumerateInstanceVersion;\nextern PFN_vkEnumeratePhysicalDeviceGroups vkEnumeratePhysicalDeviceGroups;\nextern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;\nextern PFN_vkGetDescriptorSetLayoutSupport vkGetDescriptorSetLayoutSupport;\nextern PFN_vkGetDeviceGroupPeerMemoryFeatures vkGetDeviceGroupPeerMemoryFeatures;\nextern PFN_vkGetDeviceQueue2 vkGetDeviceQueue2;\nextern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;\nextern PFN_vkGetImageSparseMemoryRequirements2 vkGetImageSparseMemoryRequirements2;\nextern PFN_vkGetPhysicalDeviceExternalBufferProperties vkGetPhysicalDeviceExternalBufferProperties;\nextern PFN_vkGetPhysicalDeviceExternalFenceProperties vkGetPhysicalDeviceExternalFenceProperties;\nextern PFN_vkGetPhysicalDeviceExternalSemaphoreProperties vkGetPhysicalDeviceExternalSemaphoreProperties;\nextern PFN_vkGetPhysicalDeviceFeatures2 vkGetPhysicalDeviceFeatures2;\nextern PFN_vkGetPhysicalDeviceFormatProperties2 vkGetPhysicalDeviceFormatProperties2;\nextern PFN_vkGetPhysicalDeviceImageFormatProperties2 vkGetPhysicalDeviceImageFormatProperties2;\nextern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2;\nextern PFN_vkGetPhysicalDeviceProperties2 vkGetPhysicalDeviceProperties2;\nextern PFN_vkGetPhysicalDeviceQueueFamilyProperties2 vkGetPhysicalDeviceQueueFamilyProperties2;\nextern PFN_vkGetPhysicalDeviceSparseImageFormatProperties2 vkGetPhysicalDeviceSparseImageFormatProperties2;\nextern PFN_vkTrimCommandPool vkTrimCommandPool;\nextern PFN_vkUpdateDescriptorSetWithTemplate vkUpdateDescriptorSetWithTemplate;\n#endif /* defined(VK_VERSION_1_1) */\n#if defined(VK_VERSION_1_2)\nextern PFN_vkCmdBeginRenderPass2 vkCmdBeginRenderPass2;\nextern PFN_vkCmdDrawIndexedIndirectCount vkCmdDrawIndexedIndirectCount;\nextern PFN_vkCmdDrawIndirectCount vkCmdDrawIndirectCount;\nextern PFN_vkCmdEndRenderPass2 vkCmdEndRenderPass2;\nextern PFN_vkCmdNextSubpass2 vkCmdNextSubpass2;\nextern PFN_vkCreateRenderPass2 vkCreateRenderPass2;\nextern PFN_vkGetBufferDeviceAddress vkGetBufferDeviceAddress;\nextern PFN_vkGetBufferOpaqueCaptureAddress vkGetBufferOpaqueCaptureAddress;\nextern PFN_vkGetDeviceMemoryOpaqueCaptureAddress vkGetDeviceMemoryOpaqueCaptureAddress;\nextern PFN_vkGetSemaphoreCounterValue vkGetSemaphoreCounterValue;\nextern PFN_vkResetQueryPool vkResetQueryPool;\nextern PFN_vkSignalSemaphore vkSignalSemaphore;\nextern PFN_vkWaitSemaphores vkWaitSemaphores;\n#endif /* defined(VK_VERSION_1_2) */\n#if defined(VK_VERSION_1_3)\nextern PFN_vkCmdBeginRendering vkCmdBeginRendering;\nextern PFN_vkCmdBindVertexBuffers2 vkCmdBindVertexBuffers2;\nextern PFN_vkCmdBlitImage2 vkCmdBlitImage2;\nextern PFN_vkCmdCopyBuffer2 vkCmdCopyBuffer2;\nextern PFN_vkCmdCopyBufferToImage2 vkCmdCopyBufferToImage2;\nextern PFN_vkCmdCopyImage2 vkCmdCopyImage2;\nextern PFN_vkCmdCopyImageToBuffer2 vkCmdCopyImageToBuffer2;\nextern PFN_vkCmdEndRendering vkCmdEndRendering;\nextern PFN_vkCmdPipelineBarrier2 vkCmdPipelineBarrier2;\nextern PFN_vkCmdResetEvent2 vkCmdResetEvent2;\nextern PFN_vkCmdResolveImage2 vkCmdResolveImage2;\nextern PFN_vkCmdSetCullMode vkCmdSetCullMode;\nextern PFN_vkCmdSetDepthBiasEnable vkCmdSetDepthBiasEnable;\nextern PFN_vkCmdSetDepthBoundsTestEnable vkCmdSetDepthBoundsTestEnable;\nextern PFN_vkCmdSetDepthCompareOp vkCmdSetDepthCompareOp;\nextern PFN_vkCmdSetDepthTestEnable vkCmdSetDepthTestEnable;\nextern PFN_vkCmdSetDepthWriteEnable vkCmdSetDepthWriteEnable;\nextern PFN_vkCmdSetEvent2 vkCmdSetEvent2;\nextern PFN_vkCmdSetFrontFace vkCmdSetFrontFace;\nextern PFN_vkCmdSetPrimitiveRestartEnable vkCmdSetPrimitiveRestartEnable;\nextern PFN_vkCmdSetPrimitiveTopology vkCmdSetPrimitiveTopology;\nextern PFN_vkCmdSetRasterizerDiscardEnable vkCmdSetRasterizerDiscardEnable;\nextern PFN_vkCmdSetScissorWithCount vkCmdSetScissorWithCount;\nextern PFN_vkCmdSetStencilOp vkCmdSetStencilOp;\nextern PFN_vkCmdSetStencilTestEnable vkCmdSetStencilTestEnable;\nextern PFN_vkCmdSetViewportWithCount vkCmdSetViewportWithCount;\nextern PFN_vkCmdWaitEvents2 vkCmdWaitEvents2;\nextern PFN_vkCmdWriteTimestamp2 vkCmdWriteTimestamp2;\nextern PFN_vkCreatePrivateDataSlot vkCreatePrivateDataSlot;\nextern PFN_vkDestroyPrivateDataSlot vkDestroyPrivateDataSlot;\nextern PFN_vkGetDeviceBufferMemoryRequirements vkGetDeviceBufferMemoryRequirements;\nextern PFN_vkGetDeviceImageMemoryRequirements vkGetDeviceImageMemoryRequirements;\nextern PFN_vkGetDeviceImageSparseMemoryRequirements vkGetDeviceImageSparseMemoryRequirements;\nextern PFN_vkGetPhysicalDeviceToolProperties vkGetPhysicalDeviceToolProperties;\nextern PFN_vkGetPrivateData vkGetPrivateData;\nextern PFN_vkQueueSubmit2 vkQueueSubmit2;\nextern PFN_vkSetPrivateData vkSetPrivateData;\n#endif /* defined(VK_VERSION_1_3) */\n#if defined(VK_VERSION_1_4)\nextern PFN_vkCmdBindDescriptorSets2 vkCmdBindDescriptorSets2;\nextern PFN_vkCmdBindIndexBuffer2 vkCmdBindIndexBuffer2;\nextern PFN_vkCmdPushConstants2 vkCmdPushConstants2;\nextern PFN_vkCmdPushDescriptorSet vkCmdPushDescriptorSet;\nextern PFN_vkCmdPushDescriptorSet2 vkCmdPushDescriptorSet2;\nextern PFN_vkCmdPushDescriptorSetWithTemplate vkCmdPushDescriptorSetWithTemplate;\nextern PFN_vkCmdPushDescriptorSetWithTemplate2 vkCmdPushDescriptorSetWithTemplate2;\nextern PFN_vkCmdSetLineStipple vkCmdSetLineStipple;\nextern PFN_vkCmdSetRenderingAttachmentLocations vkCmdSetRenderingAttachmentLocations;\nextern PFN_vkCmdSetRenderingInputAttachmentIndices vkCmdSetRenderingInputAttachmentIndices;\nextern PFN_vkCopyImageToImage vkCopyImageToImage;\nextern PFN_vkCopyImageToMemory vkCopyImageToMemory;\nextern PFN_vkCopyMemoryToImage vkCopyMemoryToImage;\nextern PFN_vkGetDeviceImageSubresourceLayout vkGetDeviceImageSubresourceLayout;\nextern PFN_vkGetImageSubresourceLayout2 vkGetImageSubresourceLayout2;\nextern PFN_vkGetRenderingAreaGranularity vkGetRenderingAreaGranularity;\nextern PFN_vkMapMemory2 vkMapMemory2;\nextern PFN_vkTransitionImageLayout vkTransitionImageLayout;\nextern PFN_vkUnmapMemory2 vkUnmapMemory2;\n#endif /* defined(VK_VERSION_1_4) */\n#if defined(VK_AMDX_shader_enqueue)\nextern PFN_vkCmdDispatchGraphAMDX vkCmdDispatchGraphAMDX;\nextern PFN_vkCmdDispatchGraphIndirectAMDX vkCmdDispatchGraphIndirectAMDX;\nextern PFN_vkCmdDispatchGraphIndirectCountAMDX vkCmdDispatchGraphIndirectCountAMDX;\nextern PFN_vkCmdInitializeGraphScratchMemoryAMDX vkCmdInitializeGraphScratchMemoryAMDX;\nextern PFN_vkCreateExecutionGraphPipelinesAMDX vkCreateExecutionGraphPipelinesAMDX;\nextern PFN_vkGetExecutionGraphPipelineNodeIndexAMDX vkGetExecutionGraphPipelineNodeIndexAMDX;\nextern PFN_vkGetExecutionGraphPipelineScratchSizeAMDX vkGetExecutionGraphPipelineScratchSizeAMDX;\n#endif /* defined(VK_AMDX_shader_enqueue) */\n#if defined(VK_AMD_anti_lag)\nextern PFN_vkAntiLagUpdateAMD vkAntiLagUpdateAMD;\n#endif /* defined(VK_AMD_anti_lag) */\n#if defined(VK_AMD_buffer_marker)\nextern PFN_vkCmdWriteBufferMarkerAMD vkCmdWriteBufferMarkerAMD;\n#endif /* defined(VK_AMD_buffer_marker) */\n#if defined(VK_AMD_buffer_marker) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2))\nextern PFN_vkCmdWriteBufferMarker2AMD vkCmdWriteBufferMarker2AMD;\n#endif /* defined(VK_AMD_buffer_marker) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) */\n#if defined(VK_AMD_display_native_hdr)\nextern PFN_vkSetLocalDimmingAMD vkSetLocalDimmingAMD;\n#endif /* defined(VK_AMD_display_native_hdr) */\n#if defined(VK_AMD_draw_indirect_count)\nextern PFN_vkCmdDrawIndexedIndirectCountAMD vkCmdDrawIndexedIndirectCountAMD;\nextern PFN_vkCmdDrawIndirectCountAMD vkCmdDrawIndirectCountAMD;\n#endif /* defined(VK_AMD_draw_indirect_count) */\n#if defined(VK_AMD_shader_info)\nextern PFN_vkGetShaderInfoAMD vkGetShaderInfoAMD;\n#endif /* defined(VK_AMD_shader_info) */\n#if defined(VK_ANDROID_external_memory_android_hardware_buffer)\nextern PFN_vkGetAndroidHardwareBufferPropertiesANDROID vkGetAndroidHardwareBufferPropertiesANDROID;\nextern PFN_vkGetMemoryAndroidHardwareBufferANDROID vkGetMemoryAndroidHardwareBufferANDROID;\n#endif /* defined(VK_ANDROID_external_memory_android_hardware_buffer) */\n#if defined(VK_ARM_data_graph)\nextern PFN_vkBindDataGraphPipelineSessionMemoryARM vkBindDataGraphPipelineSessionMemoryARM;\nextern PFN_vkCmdDispatchDataGraphARM vkCmdDispatchDataGraphARM;\nextern PFN_vkCreateDataGraphPipelineSessionARM vkCreateDataGraphPipelineSessionARM;\nextern PFN_vkCreateDataGraphPipelinesARM vkCreateDataGraphPipelinesARM;\nextern PFN_vkDestroyDataGraphPipelineSessionARM vkDestroyDataGraphPipelineSessionARM;\nextern PFN_vkGetDataGraphPipelineAvailablePropertiesARM vkGetDataGraphPipelineAvailablePropertiesARM;\nextern PFN_vkGetDataGraphPipelinePropertiesARM vkGetDataGraphPipelinePropertiesARM;\nextern PFN_vkGetDataGraphPipelineSessionBindPointRequirementsARM vkGetDataGraphPipelineSessionBindPointRequirementsARM;\nextern PFN_vkGetDataGraphPipelineSessionMemoryRequirementsARM vkGetDataGraphPipelineSessionMemoryRequirementsARM;\nextern PFN_vkGetPhysicalDeviceQueueFamilyDataGraphProcessingEnginePropertiesARM vkGetPhysicalDeviceQueueFamilyDataGraphProcessingEnginePropertiesARM;\nextern PFN_vkGetPhysicalDeviceQueueFamilyDataGraphPropertiesARM vkGetPhysicalDeviceQueueFamilyDataGraphPropertiesARM;\n#endif /* defined(VK_ARM_data_graph) */\n#if defined(VK_ARM_tensors)\nextern PFN_vkBindTensorMemoryARM vkBindTensorMemoryARM;\nextern PFN_vkCmdCopyTensorARM vkCmdCopyTensorARM;\nextern PFN_vkCreateTensorARM vkCreateTensorARM;\nextern PFN_vkCreateTensorViewARM vkCreateTensorViewARM;\nextern PFN_vkDestroyTensorARM vkDestroyTensorARM;\nextern PFN_vkDestroyTensorViewARM vkDestroyTensorViewARM;\nextern PFN_vkGetDeviceTensorMemoryRequirementsARM vkGetDeviceTensorMemoryRequirementsARM;\nextern PFN_vkGetPhysicalDeviceExternalTensorPropertiesARM vkGetPhysicalDeviceExternalTensorPropertiesARM;\nextern PFN_vkGetTensorMemoryRequirementsARM vkGetTensorMemoryRequirementsARM;\n#endif /* defined(VK_ARM_tensors) */\n#if defined(VK_ARM_tensors) && defined(VK_EXT_descriptor_buffer)\nextern PFN_vkGetTensorOpaqueCaptureDescriptorDataARM vkGetTensorOpaqueCaptureDescriptorDataARM;\nextern PFN_vkGetTensorViewOpaqueCaptureDescriptorDataARM vkGetTensorViewOpaqueCaptureDescriptorDataARM;\n#endif /* defined(VK_ARM_tensors) && defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_EXT_acquire_drm_display)\nextern PFN_vkAcquireDrmDisplayEXT vkAcquireDrmDisplayEXT;\nextern PFN_vkGetDrmDisplayEXT vkGetDrmDisplayEXT;\n#endif /* defined(VK_EXT_acquire_drm_display) */\n#if defined(VK_EXT_acquire_xlib_display)\nextern PFN_vkAcquireXlibDisplayEXT vkAcquireXlibDisplayEXT;\nextern PFN_vkGetRandROutputDisplayEXT vkGetRandROutputDisplayEXT;\n#endif /* defined(VK_EXT_acquire_xlib_display) */\n#if defined(VK_EXT_attachment_feedback_loop_dynamic_state)\nextern PFN_vkCmdSetAttachmentFeedbackLoopEnableEXT vkCmdSetAttachmentFeedbackLoopEnableEXT;\n#endif /* defined(VK_EXT_attachment_feedback_loop_dynamic_state) */\n#if defined(VK_EXT_buffer_device_address)\nextern PFN_vkGetBufferDeviceAddressEXT vkGetBufferDeviceAddressEXT;\n#endif /* defined(VK_EXT_buffer_device_address) */\n#if defined(VK_EXT_calibrated_timestamps)\nextern PFN_vkGetCalibratedTimestampsEXT vkGetCalibratedTimestampsEXT;\nextern PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT vkGetPhysicalDeviceCalibrateableTimeDomainsEXT;\n#endif /* defined(VK_EXT_calibrated_timestamps) */\n#if defined(VK_EXT_color_write_enable)\nextern PFN_vkCmdSetColorWriteEnableEXT vkCmdSetColorWriteEnableEXT;\n#endif /* defined(VK_EXT_color_write_enable) */\n#if defined(VK_EXT_conditional_rendering)\nextern PFN_vkCmdBeginConditionalRenderingEXT vkCmdBeginConditionalRenderingEXT;\nextern PFN_vkCmdEndConditionalRenderingEXT vkCmdEndConditionalRenderingEXT;\n#endif /* defined(VK_EXT_conditional_rendering) */\n#if defined(VK_EXT_debug_marker)\nextern PFN_vkCmdDebugMarkerBeginEXT vkCmdDebugMarkerBeginEXT;\nextern PFN_vkCmdDebugMarkerEndEXT vkCmdDebugMarkerEndEXT;\nextern PFN_vkCmdDebugMarkerInsertEXT vkCmdDebugMarkerInsertEXT;\nextern PFN_vkDebugMarkerSetObjectNameEXT vkDebugMarkerSetObjectNameEXT;\nextern PFN_vkDebugMarkerSetObjectTagEXT vkDebugMarkerSetObjectTagEXT;\n#endif /* defined(VK_EXT_debug_marker) */\n#if defined(VK_EXT_debug_report)\nextern PFN_vkCreateDebugReportCallbackEXT vkCreateDebugReportCallbackEXT;\nextern PFN_vkDebugReportMessageEXT vkDebugReportMessageEXT;\nextern PFN_vkDestroyDebugReportCallbackEXT vkDestroyDebugReportCallbackEXT;\n#endif /* defined(VK_EXT_debug_report) */\n#if defined(VK_EXT_debug_utils)\nextern PFN_vkCmdBeginDebugUtilsLabelEXT vkCmdBeginDebugUtilsLabelEXT;\nextern PFN_vkCmdEndDebugUtilsLabelEXT vkCmdEndDebugUtilsLabelEXT;\nextern PFN_vkCmdInsertDebugUtilsLabelEXT vkCmdInsertDebugUtilsLabelEXT;\nextern PFN_vkCreateDebugUtilsMessengerEXT vkCreateDebugUtilsMessengerEXT;\nextern PFN_vkDestroyDebugUtilsMessengerEXT vkDestroyDebugUtilsMessengerEXT;\nextern PFN_vkQueueBeginDebugUtilsLabelEXT vkQueueBeginDebugUtilsLabelEXT;\nextern PFN_vkQueueEndDebugUtilsLabelEXT vkQueueEndDebugUtilsLabelEXT;\nextern PFN_vkQueueInsertDebugUtilsLabelEXT vkQueueInsertDebugUtilsLabelEXT;\nextern PFN_vkSetDebugUtilsObjectNameEXT vkSetDebugUtilsObjectNameEXT;\nextern PFN_vkSetDebugUtilsObjectTagEXT vkSetDebugUtilsObjectTagEXT;\nextern PFN_vkSubmitDebugUtilsMessageEXT vkSubmitDebugUtilsMessageEXT;\n#endif /* defined(VK_EXT_debug_utils) */\n#if defined(VK_EXT_depth_bias_control)\nextern PFN_vkCmdSetDepthBias2EXT vkCmdSetDepthBias2EXT;\n#endif /* defined(VK_EXT_depth_bias_control) */\n#if defined(VK_EXT_descriptor_buffer)\nextern PFN_vkCmdBindDescriptorBufferEmbeddedSamplersEXT vkCmdBindDescriptorBufferEmbeddedSamplersEXT;\nextern PFN_vkCmdBindDescriptorBuffersEXT vkCmdBindDescriptorBuffersEXT;\nextern PFN_vkCmdSetDescriptorBufferOffsetsEXT vkCmdSetDescriptorBufferOffsetsEXT;\nextern PFN_vkGetBufferOpaqueCaptureDescriptorDataEXT vkGetBufferOpaqueCaptureDescriptorDataEXT;\nextern PFN_vkGetDescriptorEXT vkGetDescriptorEXT;\nextern PFN_vkGetDescriptorSetLayoutBindingOffsetEXT vkGetDescriptorSetLayoutBindingOffsetEXT;\nextern PFN_vkGetDescriptorSetLayoutSizeEXT vkGetDescriptorSetLayoutSizeEXT;\nextern PFN_vkGetImageOpaqueCaptureDescriptorDataEXT vkGetImageOpaqueCaptureDescriptorDataEXT;\nextern PFN_vkGetImageViewOpaqueCaptureDescriptorDataEXT vkGetImageViewOpaqueCaptureDescriptorDataEXT;\nextern PFN_vkGetSamplerOpaqueCaptureDescriptorDataEXT vkGetSamplerOpaqueCaptureDescriptorDataEXT;\n#endif /* defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing))\nextern PFN_vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT;\n#endif /* defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) */\n#if defined(VK_EXT_device_fault)\nextern PFN_vkGetDeviceFaultInfoEXT vkGetDeviceFaultInfoEXT;\n#endif /* defined(VK_EXT_device_fault) */\n#if defined(VK_EXT_device_generated_commands)\nextern PFN_vkCmdExecuteGeneratedCommandsEXT vkCmdExecuteGeneratedCommandsEXT;\nextern PFN_vkCmdPreprocessGeneratedCommandsEXT vkCmdPreprocessGeneratedCommandsEXT;\nextern PFN_vkCreateIndirectCommandsLayoutEXT vkCreateIndirectCommandsLayoutEXT;\nextern PFN_vkCreateIndirectExecutionSetEXT vkCreateIndirectExecutionSetEXT;\nextern PFN_vkDestroyIndirectCommandsLayoutEXT vkDestroyIndirectCommandsLayoutEXT;\nextern PFN_vkDestroyIndirectExecutionSetEXT vkDestroyIndirectExecutionSetEXT;\nextern PFN_vkGetGeneratedCommandsMemoryRequirementsEXT vkGetGeneratedCommandsMemoryRequirementsEXT;\nextern PFN_vkUpdateIndirectExecutionSetPipelineEXT vkUpdateIndirectExecutionSetPipelineEXT;\nextern PFN_vkUpdateIndirectExecutionSetShaderEXT vkUpdateIndirectExecutionSetShaderEXT;\n#endif /* defined(VK_EXT_device_generated_commands) */\n#if defined(VK_EXT_direct_mode_display)\nextern PFN_vkReleaseDisplayEXT vkReleaseDisplayEXT;\n#endif /* defined(VK_EXT_direct_mode_display) */\n#if defined(VK_EXT_directfb_surface)\nextern PFN_vkCreateDirectFBSurfaceEXT vkCreateDirectFBSurfaceEXT;\nextern PFN_vkGetPhysicalDeviceDirectFBPresentationSupportEXT vkGetPhysicalDeviceDirectFBPresentationSupportEXT;\n#endif /* defined(VK_EXT_directfb_surface) */\n#if defined(VK_EXT_discard_rectangles)\nextern PFN_vkCmdSetDiscardRectangleEXT vkCmdSetDiscardRectangleEXT;\n#endif /* defined(VK_EXT_discard_rectangles) */\n#if defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2\nextern PFN_vkCmdSetDiscardRectangleEnableEXT vkCmdSetDiscardRectangleEnableEXT;\nextern PFN_vkCmdSetDiscardRectangleModeEXT vkCmdSetDiscardRectangleModeEXT;\n#endif /* defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 */\n#if defined(VK_EXT_display_control)\nextern PFN_vkDisplayPowerControlEXT vkDisplayPowerControlEXT;\nextern PFN_vkGetSwapchainCounterEXT vkGetSwapchainCounterEXT;\nextern PFN_vkRegisterDeviceEventEXT vkRegisterDeviceEventEXT;\nextern PFN_vkRegisterDisplayEventEXT vkRegisterDisplayEventEXT;\n#endif /* defined(VK_EXT_display_control) */\n#if defined(VK_EXT_display_surface_counter)\nextern PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT vkGetPhysicalDeviceSurfaceCapabilities2EXT;\n#endif /* defined(VK_EXT_display_surface_counter) */\n#if defined(VK_EXT_external_memory_host)\nextern PFN_vkGetMemoryHostPointerPropertiesEXT vkGetMemoryHostPointerPropertiesEXT;\n#endif /* defined(VK_EXT_external_memory_host) */\n#if defined(VK_EXT_external_memory_metal)\nextern PFN_vkGetMemoryMetalHandleEXT vkGetMemoryMetalHandleEXT;\nextern PFN_vkGetMemoryMetalHandlePropertiesEXT vkGetMemoryMetalHandlePropertiesEXT;\n#endif /* defined(VK_EXT_external_memory_metal) */\n#if defined(VK_EXT_fragment_density_map_offset)\nextern PFN_vkCmdEndRendering2EXT vkCmdEndRendering2EXT;\n#endif /* defined(VK_EXT_fragment_density_map_offset) */\n#if defined(VK_EXT_full_screen_exclusive)\nextern PFN_vkAcquireFullScreenExclusiveModeEXT vkAcquireFullScreenExclusiveModeEXT;\nextern PFN_vkGetPhysicalDeviceSurfacePresentModes2EXT vkGetPhysicalDeviceSurfacePresentModes2EXT;\nextern PFN_vkReleaseFullScreenExclusiveModeEXT vkReleaseFullScreenExclusiveModeEXT;\n#endif /* defined(VK_EXT_full_screen_exclusive) */\n#if defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1))\nextern PFN_vkGetDeviceGroupSurfacePresentModes2EXT vkGetDeviceGroupSurfacePresentModes2EXT;\n#endif /* defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1)) */\n#if defined(VK_EXT_hdr_metadata)\nextern PFN_vkSetHdrMetadataEXT vkSetHdrMetadataEXT;\n#endif /* defined(VK_EXT_hdr_metadata) */\n#if defined(VK_EXT_headless_surface)\nextern PFN_vkCreateHeadlessSurfaceEXT vkCreateHeadlessSurfaceEXT;\n#endif /* defined(VK_EXT_headless_surface) */\n#if defined(VK_EXT_host_image_copy)\nextern PFN_vkCopyImageToImageEXT vkCopyImageToImageEXT;\nextern PFN_vkCopyImageToMemoryEXT vkCopyImageToMemoryEXT;\nextern PFN_vkCopyMemoryToImageEXT vkCopyMemoryToImageEXT;\nextern PFN_vkTransitionImageLayoutEXT vkTransitionImageLayoutEXT;\n#endif /* defined(VK_EXT_host_image_copy) */\n#if defined(VK_EXT_host_query_reset)\nextern PFN_vkResetQueryPoolEXT vkResetQueryPoolEXT;\n#endif /* defined(VK_EXT_host_query_reset) */\n#if defined(VK_EXT_image_drm_format_modifier)\nextern PFN_vkGetImageDrmFormatModifierPropertiesEXT vkGetImageDrmFormatModifierPropertiesEXT;\n#endif /* defined(VK_EXT_image_drm_format_modifier) */\n#if defined(VK_EXT_line_rasterization)\nextern PFN_vkCmdSetLineStippleEXT vkCmdSetLineStippleEXT;\n#endif /* defined(VK_EXT_line_rasterization) */\n#if defined(VK_EXT_mesh_shader)\nextern PFN_vkCmdDrawMeshTasksEXT vkCmdDrawMeshTasksEXT;\nextern PFN_vkCmdDrawMeshTasksIndirectEXT vkCmdDrawMeshTasksIndirectEXT;\n#endif /* defined(VK_EXT_mesh_shader) */\n#if defined(VK_EXT_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2))\nextern PFN_vkCmdDrawMeshTasksIndirectCountEXT vkCmdDrawMeshTasksIndirectCountEXT;\n#endif /* defined(VK_EXT_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2)) */\n#if defined(VK_EXT_metal_objects)\nextern PFN_vkExportMetalObjectsEXT vkExportMetalObjectsEXT;\n#endif /* defined(VK_EXT_metal_objects) */\n#if defined(VK_EXT_metal_surface)\nextern PFN_vkCreateMetalSurfaceEXT vkCreateMetalSurfaceEXT;\n#endif /* defined(VK_EXT_metal_surface) */\n#if defined(VK_EXT_multi_draw)\nextern PFN_vkCmdDrawMultiEXT vkCmdDrawMultiEXT;\nextern PFN_vkCmdDrawMultiIndexedEXT vkCmdDrawMultiIndexedEXT;\n#endif /* defined(VK_EXT_multi_draw) */\n#if defined(VK_EXT_opacity_micromap)\nextern PFN_vkBuildMicromapsEXT vkBuildMicromapsEXT;\nextern PFN_vkCmdBuildMicromapsEXT vkCmdBuildMicromapsEXT;\nextern PFN_vkCmdCopyMemoryToMicromapEXT vkCmdCopyMemoryToMicromapEXT;\nextern PFN_vkCmdCopyMicromapEXT vkCmdCopyMicromapEXT;\nextern PFN_vkCmdCopyMicromapToMemoryEXT vkCmdCopyMicromapToMemoryEXT;\nextern PFN_vkCmdWriteMicromapsPropertiesEXT vkCmdWriteMicromapsPropertiesEXT;\nextern PFN_vkCopyMemoryToMicromapEXT vkCopyMemoryToMicromapEXT;\nextern PFN_vkCopyMicromapEXT vkCopyMicromapEXT;\nextern PFN_vkCopyMicromapToMemoryEXT vkCopyMicromapToMemoryEXT;\nextern PFN_vkCreateMicromapEXT vkCreateMicromapEXT;\nextern PFN_vkDestroyMicromapEXT vkDestroyMicromapEXT;\nextern PFN_vkGetDeviceMicromapCompatibilityEXT vkGetDeviceMicromapCompatibilityEXT;\nextern PFN_vkGetMicromapBuildSizesEXT vkGetMicromapBuildSizesEXT;\nextern PFN_vkWriteMicromapsPropertiesEXT vkWriteMicromapsPropertiesEXT;\n#endif /* defined(VK_EXT_opacity_micromap) */\n#if defined(VK_EXT_pageable_device_local_memory)\nextern PFN_vkSetDeviceMemoryPriorityEXT vkSetDeviceMemoryPriorityEXT;\n#endif /* defined(VK_EXT_pageable_device_local_memory) */\n#if defined(VK_EXT_pipeline_properties)\nextern PFN_vkGetPipelinePropertiesEXT vkGetPipelinePropertiesEXT;\n#endif /* defined(VK_EXT_pipeline_properties) */\n#if defined(VK_EXT_private_data)\nextern PFN_vkCreatePrivateDataSlotEXT vkCreatePrivateDataSlotEXT;\nextern PFN_vkDestroyPrivateDataSlotEXT vkDestroyPrivateDataSlotEXT;\nextern PFN_vkGetPrivateDataEXT vkGetPrivateDataEXT;\nextern PFN_vkSetPrivateDataEXT vkSetPrivateDataEXT;\n#endif /* defined(VK_EXT_private_data) */\n#if defined(VK_EXT_sample_locations)\nextern PFN_vkCmdSetSampleLocationsEXT vkCmdSetSampleLocationsEXT;\nextern PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT vkGetPhysicalDeviceMultisamplePropertiesEXT;\n#endif /* defined(VK_EXT_sample_locations) */\n#if defined(VK_EXT_shader_module_identifier)\nextern PFN_vkGetShaderModuleCreateInfoIdentifierEXT vkGetShaderModuleCreateInfoIdentifierEXT;\nextern PFN_vkGetShaderModuleIdentifierEXT vkGetShaderModuleIdentifierEXT;\n#endif /* defined(VK_EXT_shader_module_identifier) */\n#if defined(VK_EXT_shader_object)\nextern PFN_vkCmdBindShadersEXT vkCmdBindShadersEXT;\nextern PFN_vkCreateShadersEXT vkCreateShadersEXT;\nextern PFN_vkDestroyShaderEXT vkDestroyShaderEXT;\nextern PFN_vkGetShaderBinaryDataEXT vkGetShaderBinaryDataEXT;\n#endif /* defined(VK_EXT_shader_object) */\n#if defined(VK_EXT_swapchain_maintenance1)\nextern PFN_vkReleaseSwapchainImagesEXT vkReleaseSwapchainImagesEXT;\n#endif /* defined(VK_EXT_swapchain_maintenance1) */\n#if defined(VK_EXT_tooling_info)\nextern PFN_vkGetPhysicalDeviceToolPropertiesEXT vkGetPhysicalDeviceToolPropertiesEXT;\n#endif /* defined(VK_EXT_tooling_info) */\n#if defined(VK_EXT_transform_feedback)\nextern PFN_vkCmdBeginQueryIndexedEXT vkCmdBeginQueryIndexedEXT;\nextern PFN_vkCmdBeginTransformFeedbackEXT vkCmdBeginTransformFeedbackEXT;\nextern PFN_vkCmdBindTransformFeedbackBuffersEXT vkCmdBindTransformFeedbackBuffersEXT;\nextern PFN_vkCmdDrawIndirectByteCountEXT vkCmdDrawIndirectByteCountEXT;\nextern PFN_vkCmdEndQueryIndexedEXT vkCmdEndQueryIndexedEXT;\nextern PFN_vkCmdEndTransformFeedbackEXT vkCmdEndTransformFeedbackEXT;\n#endif /* defined(VK_EXT_transform_feedback) */\n#if defined(VK_EXT_validation_cache)\nextern PFN_vkCreateValidationCacheEXT vkCreateValidationCacheEXT;\nextern PFN_vkDestroyValidationCacheEXT vkDestroyValidationCacheEXT;\nextern PFN_vkGetValidationCacheDataEXT vkGetValidationCacheDataEXT;\nextern PFN_vkMergeValidationCachesEXT vkMergeValidationCachesEXT;\n#endif /* defined(VK_EXT_validation_cache) */\n#if defined(VK_FUCHSIA_buffer_collection)\nextern PFN_vkCreateBufferCollectionFUCHSIA vkCreateBufferCollectionFUCHSIA;\nextern PFN_vkDestroyBufferCollectionFUCHSIA vkDestroyBufferCollectionFUCHSIA;\nextern PFN_vkGetBufferCollectionPropertiesFUCHSIA vkGetBufferCollectionPropertiesFUCHSIA;\nextern PFN_vkSetBufferCollectionBufferConstraintsFUCHSIA vkSetBufferCollectionBufferConstraintsFUCHSIA;\nextern PFN_vkSetBufferCollectionImageConstraintsFUCHSIA vkSetBufferCollectionImageConstraintsFUCHSIA;\n#endif /* defined(VK_FUCHSIA_buffer_collection) */\n#if defined(VK_FUCHSIA_external_memory)\nextern PFN_vkGetMemoryZirconHandleFUCHSIA vkGetMemoryZirconHandleFUCHSIA;\nextern PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA vkGetMemoryZirconHandlePropertiesFUCHSIA;\n#endif /* defined(VK_FUCHSIA_external_memory) */\n#if defined(VK_FUCHSIA_external_semaphore)\nextern PFN_vkGetSemaphoreZirconHandleFUCHSIA vkGetSemaphoreZirconHandleFUCHSIA;\nextern PFN_vkImportSemaphoreZirconHandleFUCHSIA vkImportSemaphoreZirconHandleFUCHSIA;\n#endif /* defined(VK_FUCHSIA_external_semaphore) */\n#if defined(VK_FUCHSIA_imagepipe_surface)\nextern PFN_vkCreateImagePipeSurfaceFUCHSIA vkCreateImagePipeSurfaceFUCHSIA;\n#endif /* defined(VK_FUCHSIA_imagepipe_surface) */\n#if defined(VK_GGP_stream_descriptor_surface)\nextern PFN_vkCreateStreamDescriptorSurfaceGGP vkCreateStreamDescriptorSurfaceGGP;\n#endif /* defined(VK_GGP_stream_descriptor_surface) */\n#if defined(VK_GOOGLE_display_timing)\nextern PFN_vkGetPastPresentationTimingGOOGLE vkGetPastPresentationTimingGOOGLE;\nextern PFN_vkGetRefreshCycleDurationGOOGLE vkGetRefreshCycleDurationGOOGLE;\n#endif /* defined(VK_GOOGLE_display_timing) */\n#if defined(VK_HUAWEI_cluster_culling_shader)\nextern PFN_vkCmdDrawClusterHUAWEI vkCmdDrawClusterHUAWEI;\nextern PFN_vkCmdDrawClusterIndirectHUAWEI vkCmdDrawClusterIndirectHUAWEI;\n#endif /* defined(VK_HUAWEI_cluster_culling_shader) */\n#if defined(VK_HUAWEI_invocation_mask)\nextern PFN_vkCmdBindInvocationMaskHUAWEI vkCmdBindInvocationMaskHUAWEI;\n#endif /* defined(VK_HUAWEI_invocation_mask) */\n#if defined(VK_HUAWEI_subpass_shading) && VK_HUAWEI_SUBPASS_SHADING_SPEC_VERSION >= 2\nextern PFN_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI;\n#endif /* defined(VK_HUAWEI_subpass_shading) && VK_HUAWEI_SUBPASS_SHADING_SPEC_VERSION >= 2 */\n#if defined(VK_HUAWEI_subpass_shading)\nextern PFN_vkCmdSubpassShadingHUAWEI vkCmdSubpassShadingHUAWEI;\n#endif /* defined(VK_HUAWEI_subpass_shading) */\n#if defined(VK_INTEL_performance_query)\nextern PFN_vkAcquirePerformanceConfigurationINTEL vkAcquirePerformanceConfigurationINTEL;\nextern PFN_vkCmdSetPerformanceMarkerINTEL vkCmdSetPerformanceMarkerINTEL;\nextern PFN_vkCmdSetPerformanceOverrideINTEL vkCmdSetPerformanceOverrideINTEL;\nextern PFN_vkCmdSetPerformanceStreamMarkerINTEL vkCmdSetPerformanceStreamMarkerINTEL;\nextern PFN_vkGetPerformanceParameterINTEL vkGetPerformanceParameterINTEL;\nextern PFN_vkInitializePerformanceApiINTEL vkInitializePerformanceApiINTEL;\nextern PFN_vkQueueSetPerformanceConfigurationINTEL vkQueueSetPerformanceConfigurationINTEL;\nextern PFN_vkReleasePerformanceConfigurationINTEL vkReleasePerformanceConfigurationINTEL;\nextern PFN_vkUninitializePerformanceApiINTEL vkUninitializePerformanceApiINTEL;\n#endif /* defined(VK_INTEL_performance_query) */\n#if defined(VK_KHR_acceleration_structure)\nextern PFN_vkBuildAccelerationStructuresKHR vkBuildAccelerationStructuresKHR;\nextern PFN_vkCmdBuildAccelerationStructuresIndirectKHR vkCmdBuildAccelerationStructuresIndirectKHR;\nextern PFN_vkCmdBuildAccelerationStructuresKHR vkCmdBuildAccelerationStructuresKHR;\nextern PFN_vkCmdCopyAccelerationStructureKHR vkCmdCopyAccelerationStructureKHR;\nextern PFN_vkCmdCopyAccelerationStructureToMemoryKHR vkCmdCopyAccelerationStructureToMemoryKHR;\nextern PFN_vkCmdCopyMemoryToAccelerationStructureKHR vkCmdCopyMemoryToAccelerationStructureKHR;\nextern PFN_vkCmdWriteAccelerationStructuresPropertiesKHR vkCmdWriteAccelerationStructuresPropertiesKHR;\nextern PFN_vkCopyAccelerationStructureKHR vkCopyAccelerationStructureKHR;\nextern PFN_vkCopyAccelerationStructureToMemoryKHR vkCopyAccelerationStructureToMemoryKHR;\nextern PFN_vkCopyMemoryToAccelerationStructureKHR vkCopyMemoryToAccelerationStructureKHR;\nextern PFN_vkCreateAccelerationStructureKHR vkCreateAccelerationStructureKHR;\nextern PFN_vkDestroyAccelerationStructureKHR vkDestroyAccelerationStructureKHR;\nextern PFN_vkGetAccelerationStructureBuildSizesKHR vkGetAccelerationStructureBuildSizesKHR;\nextern PFN_vkGetAccelerationStructureDeviceAddressKHR vkGetAccelerationStructureDeviceAddressKHR;\nextern PFN_vkGetDeviceAccelerationStructureCompatibilityKHR vkGetDeviceAccelerationStructureCompatibilityKHR;\nextern PFN_vkWriteAccelerationStructuresPropertiesKHR vkWriteAccelerationStructuresPropertiesKHR;\n#endif /* defined(VK_KHR_acceleration_structure) */\n#if defined(VK_KHR_android_surface)\nextern PFN_vkCreateAndroidSurfaceKHR vkCreateAndroidSurfaceKHR;\n#endif /* defined(VK_KHR_android_surface) */\n#if defined(VK_KHR_bind_memory2)\nextern PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR;\nextern PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR;\n#endif /* defined(VK_KHR_bind_memory2) */\n#if defined(VK_KHR_buffer_device_address)\nextern PFN_vkGetBufferDeviceAddressKHR vkGetBufferDeviceAddressKHR;\nextern PFN_vkGetBufferOpaqueCaptureAddressKHR vkGetBufferOpaqueCaptureAddressKHR;\nextern PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR vkGetDeviceMemoryOpaqueCaptureAddressKHR;\n#endif /* defined(VK_KHR_buffer_device_address) */\n#if defined(VK_KHR_calibrated_timestamps)\nextern PFN_vkGetCalibratedTimestampsKHR vkGetCalibratedTimestampsKHR;\nextern PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsKHR vkGetPhysicalDeviceCalibrateableTimeDomainsKHR;\n#endif /* defined(VK_KHR_calibrated_timestamps) */\n#if defined(VK_KHR_cooperative_matrix)\nextern PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR;\n#endif /* defined(VK_KHR_cooperative_matrix) */\n#if defined(VK_KHR_copy_commands2)\nextern PFN_vkCmdBlitImage2KHR vkCmdBlitImage2KHR;\nextern PFN_vkCmdCopyBuffer2KHR vkCmdCopyBuffer2KHR;\nextern PFN_vkCmdCopyBufferToImage2KHR vkCmdCopyBufferToImage2KHR;\nextern PFN_vkCmdCopyImage2KHR vkCmdCopyImage2KHR;\nextern PFN_vkCmdCopyImageToBuffer2KHR vkCmdCopyImageToBuffer2KHR;\nextern PFN_vkCmdResolveImage2KHR vkCmdResolveImage2KHR;\n#endif /* defined(VK_KHR_copy_commands2) */\n#if defined(VK_KHR_create_renderpass2)\nextern PFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR;\nextern PFN_vkCmdEndRenderPass2KHR vkCmdEndRenderPass2KHR;\nextern PFN_vkCmdNextSubpass2KHR vkCmdNextSubpass2KHR;\nextern PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR;\n#endif /* defined(VK_KHR_create_renderpass2) */\n#if defined(VK_KHR_deferred_host_operations)\nextern PFN_vkCreateDeferredOperationKHR vkCreateDeferredOperationKHR;\nextern PFN_vkDeferredOperationJoinKHR vkDeferredOperationJoinKHR;\nextern PFN_vkDestroyDeferredOperationKHR vkDestroyDeferredOperationKHR;\nextern PFN_vkGetDeferredOperationMaxConcurrencyKHR vkGetDeferredOperationMaxConcurrencyKHR;\nextern PFN_vkGetDeferredOperationResultKHR vkGetDeferredOperationResultKHR;\n#endif /* defined(VK_KHR_deferred_host_operations) */\n#if defined(VK_KHR_descriptor_update_template)\nextern PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR;\nextern PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR;\nextern PFN_vkUpdateDescriptorSetWithTemplateKHR vkUpdateDescriptorSetWithTemplateKHR;\n#endif /* defined(VK_KHR_descriptor_update_template) */\n#if defined(VK_KHR_device_group)\nextern PFN_vkCmdDispatchBaseKHR vkCmdDispatchBaseKHR;\nextern PFN_vkCmdSetDeviceMaskKHR vkCmdSetDeviceMaskKHR;\nextern PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR vkGetDeviceGroupPeerMemoryFeaturesKHR;\n#endif /* defined(VK_KHR_device_group) */\n#if defined(VK_KHR_device_group_creation)\nextern PFN_vkEnumeratePhysicalDeviceGroupsKHR vkEnumeratePhysicalDeviceGroupsKHR;\n#endif /* defined(VK_KHR_device_group_creation) */\n#if defined(VK_KHR_display)\nextern PFN_vkCreateDisplayModeKHR vkCreateDisplayModeKHR;\nextern PFN_vkCreateDisplayPlaneSurfaceKHR vkCreateDisplayPlaneSurfaceKHR;\nextern PFN_vkGetDisplayModePropertiesKHR vkGetDisplayModePropertiesKHR;\nextern PFN_vkGetDisplayPlaneCapabilitiesKHR vkGetDisplayPlaneCapabilitiesKHR;\nextern PFN_vkGetDisplayPlaneSupportedDisplaysKHR vkGetDisplayPlaneSupportedDisplaysKHR;\nextern PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR vkGetPhysicalDeviceDisplayPlanePropertiesKHR;\nextern PFN_vkGetPhysicalDeviceDisplayPropertiesKHR vkGetPhysicalDeviceDisplayPropertiesKHR;\n#endif /* defined(VK_KHR_display) */\n#if defined(VK_KHR_display_swapchain)\nextern PFN_vkCreateSharedSwapchainsKHR vkCreateSharedSwapchainsKHR;\n#endif /* defined(VK_KHR_display_swapchain) */\n#if defined(VK_KHR_draw_indirect_count)\nextern PFN_vkCmdDrawIndexedIndirectCountKHR vkCmdDrawIndexedIndirectCountKHR;\nextern PFN_vkCmdDrawIndirectCountKHR vkCmdDrawIndirectCountKHR;\n#endif /* defined(VK_KHR_draw_indirect_count) */\n#if defined(VK_KHR_dynamic_rendering)\nextern PFN_vkCmdBeginRenderingKHR vkCmdBeginRenderingKHR;\nextern PFN_vkCmdEndRenderingKHR vkCmdEndRenderingKHR;\n#endif /* defined(VK_KHR_dynamic_rendering) */\n#if defined(VK_KHR_dynamic_rendering_local_read)\nextern PFN_vkCmdSetRenderingAttachmentLocationsKHR vkCmdSetRenderingAttachmentLocationsKHR;\nextern PFN_vkCmdSetRenderingInputAttachmentIndicesKHR vkCmdSetRenderingInputAttachmentIndicesKHR;\n#endif /* defined(VK_KHR_dynamic_rendering_local_read) */\n#if defined(VK_KHR_external_fence_capabilities)\nextern PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR vkGetPhysicalDeviceExternalFencePropertiesKHR;\n#endif /* defined(VK_KHR_external_fence_capabilities) */\n#if defined(VK_KHR_external_fence_fd)\nextern PFN_vkGetFenceFdKHR vkGetFenceFdKHR;\nextern PFN_vkImportFenceFdKHR vkImportFenceFdKHR;\n#endif /* defined(VK_KHR_external_fence_fd) */\n#if defined(VK_KHR_external_fence_win32)\nextern PFN_vkGetFenceWin32HandleKHR vkGetFenceWin32HandleKHR;\nextern PFN_vkImportFenceWin32HandleKHR vkImportFenceWin32HandleKHR;\n#endif /* defined(VK_KHR_external_fence_win32) */\n#if defined(VK_KHR_external_memory_capabilities)\nextern PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR vkGetPhysicalDeviceExternalBufferPropertiesKHR;\n#endif /* defined(VK_KHR_external_memory_capabilities) */\n#if defined(VK_KHR_external_memory_fd)\nextern PFN_vkGetMemoryFdKHR vkGetMemoryFdKHR;\nextern PFN_vkGetMemoryFdPropertiesKHR vkGetMemoryFdPropertiesKHR;\n#endif /* defined(VK_KHR_external_memory_fd) */\n#if defined(VK_KHR_external_memory_win32)\nextern PFN_vkGetMemoryWin32HandleKHR vkGetMemoryWin32HandleKHR;\nextern PFN_vkGetMemoryWin32HandlePropertiesKHR vkGetMemoryWin32HandlePropertiesKHR;\n#endif /* defined(VK_KHR_external_memory_win32) */\n#if defined(VK_KHR_external_semaphore_capabilities)\nextern PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR vkGetPhysicalDeviceExternalSemaphorePropertiesKHR;\n#endif /* defined(VK_KHR_external_semaphore_capabilities) */\n#if defined(VK_KHR_external_semaphore_fd)\nextern PFN_vkGetSemaphoreFdKHR vkGetSemaphoreFdKHR;\nextern PFN_vkImportSemaphoreFdKHR vkImportSemaphoreFdKHR;\n#endif /* defined(VK_KHR_external_semaphore_fd) */\n#if defined(VK_KHR_external_semaphore_win32)\nextern PFN_vkGetSemaphoreWin32HandleKHR vkGetSemaphoreWin32HandleKHR;\nextern PFN_vkImportSemaphoreWin32HandleKHR vkImportSemaphoreWin32HandleKHR;\n#endif /* defined(VK_KHR_external_semaphore_win32) */\n#if defined(VK_KHR_fragment_shading_rate)\nextern PFN_vkCmdSetFragmentShadingRateKHR vkCmdSetFragmentShadingRateKHR;\nextern PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR vkGetPhysicalDeviceFragmentShadingRatesKHR;\n#endif /* defined(VK_KHR_fragment_shading_rate) */\n#if defined(VK_KHR_get_display_properties2)\nextern PFN_vkGetDisplayModeProperties2KHR vkGetDisplayModeProperties2KHR;\nextern PFN_vkGetDisplayPlaneCapabilities2KHR vkGetDisplayPlaneCapabilities2KHR;\nextern PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR vkGetPhysicalDeviceDisplayPlaneProperties2KHR;\nextern PFN_vkGetPhysicalDeviceDisplayProperties2KHR vkGetPhysicalDeviceDisplayProperties2KHR;\n#endif /* defined(VK_KHR_get_display_properties2) */\n#if defined(VK_KHR_get_memory_requirements2)\nextern PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;\nextern PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;\nextern PFN_vkGetImageSparseMemoryRequirements2KHR vkGetImageSparseMemoryRequirements2KHR;\n#endif /* defined(VK_KHR_get_memory_requirements2) */\n#if defined(VK_KHR_get_physical_device_properties2)\nextern PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR;\nextern PFN_vkGetPhysicalDeviceFormatProperties2KHR vkGetPhysicalDeviceFormatProperties2KHR;\nextern PFN_vkGetPhysicalDeviceImageFormatProperties2KHR vkGetPhysicalDeviceImageFormatProperties2KHR;\nextern PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR;\nextern PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR;\nextern PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR vkGetPhysicalDeviceQueueFamilyProperties2KHR;\nextern PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR vkGetPhysicalDeviceSparseImageFormatProperties2KHR;\n#endif /* defined(VK_KHR_get_physical_device_properties2) */\n#if defined(VK_KHR_get_surface_capabilities2)\nextern PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR vkGetPhysicalDeviceSurfaceCapabilities2KHR;\nextern PFN_vkGetPhysicalDeviceSurfaceFormats2KHR vkGetPhysicalDeviceSurfaceFormats2KHR;\n#endif /* defined(VK_KHR_get_surface_capabilities2) */\n#if defined(VK_KHR_line_rasterization)\nextern PFN_vkCmdSetLineStippleKHR vkCmdSetLineStippleKHR;\n#endif /* defined(VK_KHR_line_rasterization) */\n#if defined(VK_KHR_maintenance1)\nextern PFN_vkTrimCommandPoolKHR vkTrimCommandPoolKHR;\n#endif /* defined(VK_KHR_maintenance1) */\n#if defined(VK_KHR_maintenance3)\nextern PFN_vkGetDescriptorSetLayoutSupportKHR vkGetDescriptorSetLayoutSupportKHR;\n#endif /* defined(VK_KHR_maintenance3) */\n#if defined(VK_KHR_maintenance4)\nextern PFN_vkGetDeviceBufferMemoryRequirementsKHR vkGetDeviceBufferMemoryRequirementsKHR;\nextern PFN_vkGetDeviceImageMemoryRequirementsKHR vkGetDeviceImageMemoryRequirementsKHR;\nextern PFN_vkGetDeviceImageSparseMemoryRequirementsKHR vkGetDeviceImageSparseMemoryRequirementsKHR;\n#endif /* defined(VK_KHR_maintenance4) */\n#if defined(VK_KHR_maintenance5)\nextern PFN_vkCmdBindIndexBuffer2KHR vkCmdBindIndexBuffer2KHR;\nextern PFN_vkGetDeviceImageSubresourceLayoutKHR vkGetDeviceImageSubresourceLayoutKHR;\nextern PFN_vkGetImageSubresourceLayout2KHR vkGetImageSubresourceLayout2KHR;\nextern PFN_vkGetRenderingAreaGranularityKHR vkGetRenderingAreaGranularityKHR;\n#endif /* defined(VK_KHR_maintenance5) */\n#if defined(VK_KHR_maintenance6)\nextern PFN_vkCmdBindDescriptorSets2KHR vkCmdBindDescriptorSets2KHR;\nextern PFN_vkCmdPushConstants2KHR vkCmdPushConstants2KHR;\n#endif /* defined(VK_KHR_maintenance6) */\n#if defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor)\nextern PFN_vkCmdPushDescriptorSet2KHR vkCmdPushDescriptorSet2KHR;\nextern PFN_vkCmdPushDescriptorSetWithTemplate2KHR vkCmdPushDescriptorSetWithTemplate2KHR;\n#endif /* defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor) */\n#if defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer)\nextern PFN_vkCmdBindDescriptorBufferEmbeddedSamplers2EXT vkCmdBindDescriptorBufferEmbeddedSamplers2EXT;\nextern PFN_vkCmdSetDescriptorBufferOffsets2EXT vkCmdSetDescriptorBufferOffsets2EXT;\n#endif /* defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_KHR_map_memory2)\nextern PFN_vkMapMemory2KHR vkMapMemory2KHR;\nextern PFN_vkUnmapMemory2KHR vkUnmapMemory2KHR;\n#endif /* defined(VK_KHR_map_memory2) */\n#if defined(VK_KHR_performance_query)\nextern PFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR;\nextern PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR;\nextern PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR;\nextern PFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR;\n#endif /* defined(VK_KHR_performance_query) */\n#if defined(VK_KHR_pipeline_binary)\nextern PFN_vkCreatePipelineBinariesKHR vkCreatePipelineBinariesKHR;\nextern PFN_vkDestroyPipelineBinaryKHR vkDestroyPipelineBinaryKHR;\nextern PFN_vkGetPipelineBinaryDataKHR vkGetPipelineBinaryDataKHR;\nextern PFN_vkGetPipelineKeyKHR vkGetPipelineKeyKHR;\nextern PFN_vkReleaseCapturedPipelineDataKHR vkReleaseCapturedPipelineDataKHR;\n#endif /* defined(VK_KHR_pipeline_binary) */\n#if defined(VK_KHR_pipeline_executable_properties)\nextern PFN_vkGetPipelineExecutableInternalRepresentationsKHR vkGetPipelineExecutableInternalRepresentationsKHR;\nextern PFN_vkGetPipelineExecutablePropertiesKHR vkGetPipelineExecutablePropertiesKHR;\nextern PFN_vkGetPipelineExecutableStatisticsKHR vkGetPipelineExecutableStatisticsKHR;\n#endif /* defined(VK_KHR_pipeline_executable_properties) */\n#if defined(VK_KHR_present_wait)\nextern PFN_vkWaitForPresentKHR vkWaitForPresentKHR;\n#endif /* defined(VK_KHR_present_wait) */\n#if defined(VK_KHR_present_wait2)\nextern PFN_vkWaitForPresent2KHR vkWaitForPresent2KHR;\n#endif /* defined(VK_KHR_present_wait2) */\n#if defined(VK_KHR_push_descriptor)\nextern PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR;\n#endif /* defined(VK_KHR_push_descriptor) */\n#if defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline)\nextern PFN_vkCmdTraceRaysIndirect2KHR vkCmdTraceRaysIndirect2KHR;\n#endif /* defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) */\n#if defined(VK_KHR_ray_tracing_pipeline)\nextern PFN_vkCmdSetRayTracingPipelineStackSizeKHR vkCmdSetRayTracingPipelineStackSizeKHR;\nextern PFN_vkCmdTraceRaysIndirectKHR vkCmdTraceRaysIndirectKHR;\nextern PFN_vkCmdTraceRaysKHR vkCmdTraceRaysKHR;\nextern PFN_vkCreateRayTracingPipelinesKHR vkCreateRayTracingPipelinesKHR;\nextern PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR vkGetRayTracingCaptureReplayShaderGroupHandlesKHR;\nextern PFN_vkGetRayTracingShaderGroupHandlesKHR vkGetRayTracingShaderGroupHandlesKHR;\nextern PFN_vkGetRayTracingShaderGroupStackSizeKHR vkGetRayTracingShaderGroupStackSizeKHR;\n#endif /* defined(VK_KHR_ray_tracing_pipeline) */\n#if defined(VK_KHR_sampler_ycbcr_conversion)\nextern PFN_vkCreateSamplerYcbcrConversionKHR vkCreateSamplerYcbcrConversionKHR;\nextern PFN_vkDestroySamplerYcbcrConversionKHR vkDestroySamplerYcbcrConversionKHR;\n#endif /* defined(VK_KHR_sampler_ycbcr_conversion) */\n#if defined(VK_KHR_shared_presentable_image)\nextern PFN_vkGetSwapchainStatusKHR vkGetSwapchainStatusKHR;\n#endif /* defined(VK_KHR_shared_presentable_image) */\n#if defined(VK_KHR_surface)\nextern PFN_vkDestroySurfaceKHR vkDestroySurfaceKHR;\nextern PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR vkGetPhysicalDeviceSurfaceCapabilitiesKHR;\nextern PFN_vkGetPhysicalDeviceSurfaceFormatsKHR vkGetPhysicalDeviceSurfaceFormatsKHR;\nextern PFN_vkGetPhysicalDeviceSurfacePresentModesKHR vkGetPhysicalDeviceSurfacePresentModesKHR;\nextern PFN_vkGetPhysicalDeviceSurfaceSupportKHR vkGetPhysicalDeviceSurfaceSupportKHR;\n#endif /* defined(VK_KHR_surface) */\n#if defined(VK_KHR_swapchain)\nextern PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR;\nextern PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR;\nextern PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR;\nextern PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR;\nextern PFN_vkQueuePresentKHR vkQueuePresentKHR;\n#endif /* defined(VK_KHR_swapchain) */\n#if defined(VK_KHR_swapchain_maintenance1)\nextern PFN_vkReleaseSwapchainImagesKHR vkReleaseSwapchainImagesKHR;\n#endif /* defined(VK_KHR_swapchain_maintenance1) */\n#if defined(VK_KHR_synchronization2)\nextern PFN_vkCmdPipelineBarrier2KHR vkCmdPipelineBarrier2KHR;\nextern PFN_vkCmdResetEvent2KHR vkCmdResetEvent2KHR;\nextern PFN_vkCmdSetEvent2KHR vkCmdSetEvent2KHR;\nextern PFN_vkCmdWaitEvents2KHR vkCmdWaitEvents2KHR;\nextern PFN_vkCmdWriteTimestamp2KHR vkCmdWriteTimestamp2KHR;\nextern PFN_vkQueueSubmit2KHR vkQueueSubmit2KHR;\n#endif /* defined(VK_KHR_synchronization2) */\n#if defined(VK_KHR_timeline_semaphore)\nextern PFN_vkGetSemaphoreCounterValueKHR vkGetSemaphoreCounterValueKHR;\nextern PFN_vkSignalSemaphoreKHR vkSignalSemaphoreKHR;\nextern PFN_vkWaitSemaphoresKHR vkWaitSemaphoresKHR;\n#endif /* defined(VK_KHR_timeline_semaphore) */\n#if defined(VK_KHR_video_decode_queue)\nextern PFN_vkCmdDecodeVideoKHR vkCmdDecodeVideoKHR;\n#endif /* defined(VK_KHR_video_decode_queue) */\n#if defined(VK_KHR_video_encode_queue)\nextern PFN_vkCmdEncodeVideoKHR vkCmdEncodeVideoKHR;\nextern PFN_vkGetEncodedVideoSessionParametersKHR vkGetEncodedVideoSessionParametersKHR;\nextern PFN_vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR;\n#endif /* defined(VK_KHR_video_encode_queue) */\n#if defined(VK_KHR_video_queue)\nextern PFN_vkBindVideoSessionMemoryKHR vkBindVideoSessionMemoryKHR;\nextern PFN_vkCmdBeginVideoCodingKHR vkCmdBeginVideoCodingKHR;\nextern PFN_vkCmdControlVideoCodingKHR vkCmdControlVideoCodingKHR;\nextern PFN_vkCmdEndVideoCodingKHR vkCmdEndVideoCodingKHR;\nextern PFN_vkCreateVideoSessionKHR vkCreateVideoSessionKHR;\nextern PFN_vkCreateVideoSessionParametersKHR vkCreateVideoSessionParametersKHR;\nextern PFN_vkDestroyVideoSessionKHR vkDestroyVideoSessionKHR;\nextern PFN_vkDestroyVideoSessionParametersKHR vkDestroyVideoSessionParametersKHR;\nextern PFN_vkGetPhysicalDeviceVideoCapabilitiesKHR vkGetPhysicalDeviceVideoCapabilitiesKHR;\nextern PFN_vkGetPhysicalDeviceVideoFormatPropertiesKHR vkGetPhysicalDeviceVideoFormatPropertiesKHR;\nextern PFN_vkGetVideoSessionMemoryRequirementsKHR vkGetVideoSessionMemoryRequirementsKHR;\nextern PFN_vkUpdateVideoSessionParametersKHR vkUpdateVideoSessionParametersKHR;\n#endif /* defined(VK_KHR_video_queue) */\n#if defined(VK_KHR_wayland_surface)\nextern PFN_vkCreateWaylandSurfaceKHR vkCreateWaylandSurfaceKHR;\nextern PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR vkGetPhysicalDeviceWaylandPresentationSupportKHR;\n#endif /* defined(VK_KHR_wayland_surface) */\n#if defined(VK_KHR_win32_surface)\nextern PFN_vkCreateWin32SurfaceKHR vkCreateWin32SurfaceKHR;\nextern PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR vkGetPhysicalDeviceWin32PresentationSupportKHR;\n#endif /* defined(VK_KHR_win32_surface) */\n#if defined(VK_KHR_xcb_surface)\nextern PFN_vkCreateXcbSurfaceKHR vkCreateXcbSurfaceKHR;\nextern PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR vkGetPhysicalDeviceXcbPresentationSupportKHR;\n#endif /* defined(VK_KHR_xcb_surface) */\n#if defined(VK_KHR_xlib_surface)\nextern PFN_vkCreateXlibSurfaceKHR vkCreateXlibSurfaceKHR;\nextern PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR vkGetPhysicalDeviceXlibPresentationSupportKHR;\n#endif /* defined(VK_KHR_xlib_surface) */\n#if defined(VK_MVK_ios_surface)\nextern PFN_vkCreateIOSSurfaceMVK vkCreateIOSSurfaceMVK;\n#endif /* defined(VK_MVK_ios_surface) */\n#if defined(VK_MVK_macos_surface)\nextern PFN_vkCreateMacOSSurfaceMVK vkCreateMacOSSurfaceMVK;\n#endif /* defined(VK_MVK_macos_surface) */\n#if defined(VK_NN_vi_surface)\nextern PFN_vkCreateViSurfaceNN vkCreateViSurfaceNN;\n#endif /* defined(VK_NN_vi_surface) */\n#if defined(VK_NVX_binary_import)\nextern PFN_vkCmdCuLaunchKernelNVX vkCmdCuLaunchKernelNVX;\nextern PFN_vkCreateCuFunctionNVX vkCreateCuFunctionNVX;\nextern PFN_vkCreateCuModuleNVX vkCreateCuModuleNVX;\nextern PFN_vkDestroyCuFunctionNVX vkDestroyCuFunctionNVX;\nextern PFN_vkDestroyCuModuleNVX vkDestroyCuModuleNVX;\n#endif /* defined(VK_NVX_binary_import) */\n#if defined(VK_NVX_image_view_handle)\nextern PFN_vkGetImageViewHandleNVX vkGetImageViewHandleNVX;\n#endif /* defined(VK_NVX_image_view_handle) */\n#if defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 3\nextern PFN_vkGetImageViewHandle64NVX vkGetImageViewHandle64NVX;\n#endif /* defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 3 */\n#if defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 2\nextern PFN_vkGetImageViewAddressNVX vkGetImageViewAddressNVX;\n#endif /* defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 2 */\n#if defined(VK_NV_acquire_winrt_display)\nextern PFN_vkAcquireWinrtDisplayNV vkAcquireWinrtDisplayNV;\nextern PFN_vkGetWinrtDisplayNV vkGetWinrtDisplayNV;\n#endif /* defined(VK_NV_acquire_winrt_display) */\n#if defined(VK_NV_clip_space_w_scaling)\nextern PFN_vkCmdSetViewportWScalingNV vkCmdSetViewportWScalingNV;\n#endif /* defined(VK_NV_clip_space_w_scaling) */\n#if defined(VK_NV_cluster_acceleration_structure)\nextern PFN_vkCmdBuildClusterAccelerationStructureIndirectNV vkCmdBuildClusterAccelerationStructureIndirectNV;\nextern PFN_vkGetClusterAccelerationStructureBuildSizesNV vkGetClusterAccelerationStructureBuildSizesNV;\n#endif /* defined(VK_NV_cluster_acceleration_structure) */\n#if defined(VK_NV_cooperative_matrix)\nextern PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV vkGetPhysicalDeviceCooperativeMatrixPropertiesNV;\n#endif /* defined(VK_NV_cooperative_matrix) */\n#if defined(VK_NV_cooperative_matrix2)\nextern PFN_vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV;\n#endif /* defined(VK_NV_cooperative_matrix2) */\n#if defined(VK_NV_cooperative_vector)\nextern PFN_vkCmdConvertCooperativeVectorMatrixNV vkCmdConvertCooperativeVectorMatrixNV;\nextern PFN_vkConvertCooperativeVectorMatrixNV vkConvertCooperativeVectorMatrixNV;\nextern PFN_vkGetPhysicalDeviceCooperativeVectorPropertiesNV vkGetPhysicalDeviceCooperativeVectorPropertiesNV;\n#endif /* defined(VK_NV_cooperative_vector) */\n#if defined(VK_NV_copy_memory_indirect)\nextern PFN_vkCmdCopyMemoryIndirectNV vkCmdCopyMemoryIndirectNV;\nextern PFN_vkCmdCopyMemoryToImageIndirectNV vkCmdCopyMemoryToImageIndirectNV;\n#endif /* defined(VK_NV_copy_memory_indirect) */\n#if defined(VK_NV_coverage_reduction_mode)\nextern PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV;\n#endif /* defined(VK_NV_coverage_reduction_mode) */\n#if defined(VK_NV_cuda_kernel_launch)\nextern PFN_vkCmdCudaLaunchKernelNV vkCmdCudaLaunchKernelNV;\nextern PFN_vkCreateCudaFunctionNV vkCreateCudaFunctionNV;\nextern PFN_vkCreateCudaModuleNV vkCreateCudaModuleNV;\nextern PFN_vkDestroyCudaFunctionNV vkDestroyCudaFunctionNV;\nextern PFN_vkDestroyCudaModuleNV vkDestroyCudaModuleNV;\nextern PFN_vkGetCudaModuleCacheNV vkGetCudaModuleCacheNV;\n#endif /* defined(VK_NV_cuda_kernel_launch) */\n#if defined(VK_NV_device_diagnostic_checkpoints)\nextern PFN_vkCmdSetCheckpointNV vkCmdSetCheckpointNV;\nextern PFN_vkGetQueueCheckpointDataNV vkGetQueueCheckpointDataNV;\n#endif /* defined(VK_NV_device_diagnostic_checkpoints) */\n#if defined(VK_NV_device_diagnostic_checkpoints) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2))\nextern PFN_vkGetQueueCheckpointData2NV vkGetQueueCheckpointData2NV;\n#endif /* defined(VK_NV_device_diagnostic_checkpoints) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) */\n#if defined(VK_NV_device_generated_commands)\nextern PFN_vkCmdBindPipelineShaderGroupNV vkCmdBindPipelineShaderGroupNV;\nextern PFN_vkCmdExecuteGeneratedCommandsNV vkCmdExecuteGeneratedCommandsNV;\nextern PFN_vkCmdPreprocessGeneratedCommandsNV vkCmdPreprocessGeneratedCommandsNV;\nextern PFN_vkCreateIndirectCommandsLayoutNV vkCreateIndirectCommandsLayoutNV;\nextern PFN_vkDestroyIndirectCommandsLayoutNV vkDestroyIndirectCommandsLayoutNV;\nextern PFN_vkGetGeneratedCommandsMemoryRequirementsNV vkGetGeneratedCommandsMemoryRequirementsNV;\n#endif /* defined(VK_NV_device_generated_commands) */\n#if defined(VK_NV_device_generated_commands_compute)\nextern PFN_vkCmdUpdatePipelineIndirectBufferNV vkCmdUpdatePipelineIndirectBufferNV;\nextern PFN_vkGetPipelineIndirectDeviceAddressNV vkGetPipelineIndirectDeviceAddressNV;\nextern PFN_vkGetPipelineIndirectMemoryRequirementsNV vkGetPipelineIndirectMemoryRequirementsNV;\n#endif /* defined(VK_NV_device_generated_commands_compute) */\n#if defined(VK_NV_external_compute_queue)\nextern PFN_vkCreateExternalComputeQueueNV vkCreateExternalComputeQueueNV;\nextern PFN_vkDestroyExternalComputeQueueNV vkDestroyExternalComputeQueueNV;\nextern PFN_vkGetExternalComputeQueueDataNV vkGetExternalComputeQueueDataNV;\n#endif /* defined(VK_NV_external_compute_queue) */\n#if defined(VK_NV_external_memory_capabilities)\nextern PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV vkGetPhysicalDeviceExternalImageFormatPropertiesNV;\n#endif /* defined(VK_NV_external_memory_capabilities) */\n#if defined(VK_NV_external_memory_rdma)\nextern PFN_vkGetMemoryRemoteAddressNV vkGetMemoryRemoteAddressNV;\n#endif /* defined(VK_NV_external_memory_rdma) */\n#if defined(VK_NV_external_memory_win32)\nextern PFN_vkGetMemoryWin32HandleNV vkGetMemoryWin32HandleNV;\n#endif /* defined(VK_NV_external_memory_win32) */\n#if defined(VK_NV_fragment_shading_rate_enums)\nextern PFN_vkCmdSetFragmentShadingRateEnumNV vkCmdSetFragmentShadingRateEnumNV;\n#endif /* defined(VK_NV_fragment_shading_rate_enums) */\n#if defined(VK_NV_low_latency2)\nextern PFN_vkGetLatencyTimingsNV vkGetLatencyTimingsNV;\nextern PFN_vkLatencySleepNV vkLatencySleepNV;\nextern PFN_vkQueueNotifyOutOfBandNV vkQueueNotifyOutOfBandNV;\nextern PFN_vkSetLatencyMarkerNV vkSetLatencyMarkerNV;\nextern PFN_vkSetLatencySleepModeNV vkSetLatencySleepModeNV;\n#endif /* defined(VK_NV_low_latency2) */\n#if defined(VK_NV_memory_decompression)\nextern PFN_vkCmdDecompressMemoryIndirectCountNV vkCmdDecompressMemoryIndirectCountNV;\nextern PFN_vkCmdDecompressMemoryNV vkCmdDecompressMemoryNV;\n#endif /* defined(VK_NV_memory_decompression) */\n#if defined(VK_NV_mesh_shader)\nextern PFN_vkCmdDrawMeshTasksIndirectNV vkCmdDrawMeshTasksIndirectNV;\nextern PFN_vkCmdDrawMeshTasksNV vkCmdDrawMeshTasksNV;\n#endif /* defined(VK_NV_mesh_shader) */\n#if defined(VK_NV_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2))\nextern PFN_vkCmdDrawMeshTasksIndirectCountNV vkCmdDrawMeshTasksIndirectCountNV;\n#endif /* defined(VK_NV_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2)) */\n#if defined(VK_NV_optical_flow)\nextern PFN_vkBindOpticalFlowSessionImageNV vkBindOpticalFlowSessionImageNV;\nextern PFN_vkCmdOpticalFlowExecuteNV vkCmdOpticalFlowExecuteNV;\nextern PFN_vkCreateOpticalFlowSessionNV vkCreateOpticalFlowSessionNV;\nextern PFN_vkDestroyOpticalFlowSessionNV vkDestroyOpticalFlowSessionNV;\nextern PFN_vkGetPhysicalDeviceOpticalFlowImageFormatsNV vkGetPhysicalDeviceOpticalFlowImageFormatsNV;\n#endif /* defined(VK_NV_optical_flow) */\n#if defined(VK_NV_partitioned_acceleration_structure)\nextern PFN_vkCmdBuildPartitionedAccelerationStructuresNV vkCmdBuildPartitionedAccelerationStructuresNV;\nextern PFN_vkGetPartitionedAccelerationStructuresBuildSizesNV vkGetPartitionedAccelerationStructuresBuildSizesNV;\n#endif /* defined(VK_NV_partitioned_acceleration_structure) */\n#if defined(VK_NV_ray_tracing)\nextern PFN_vkBindAccelerationStructureMemoryNV vkBindAccelerationStructureMemoryNV;\nextern PFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV;\nextern PFN_vkCmdCopyAccelerationStructureNV vkCmdCopyAccelerationStructureNV;\nextern PFN_vkCmdTraceRaysNV vkCmdTraceRaysNV;\nextern PFN_vkCmdWriteAccelerationStructuresPropertiesNV vkCmdWriteAccelerationStructuresPropertiesNV;\nextern PFN_vkCompileDeferredNV vkCompileDeferredNV;\nextern PFN_vkCreateAccelerationStructureNV vkCreateAccelerationStructureNV;\nextern PFN_vkCreateRayTracingPipelinesNV vkCreateRayTracingPipelinesNV;\nextern PFN_vkDestroyAccelerationStructureNV vkDestroyAccelerationStructureNV;\nextern PFN_vkGetAccelerationStructureHandleNV vkGetAccelerationStructureHandleNV;\nextern PFN_vkGetAccelerationStructureMemoryRequirementsNV vkGetAccelerationStructureMemoryRequirementsNV;\nextern PFN_vkGetRayTracingShaderGroupHandlesNV vkGetRayTracingShaderGroupHandlesNV;\n#endif /* defined(VK_NV_ray_tracing) */\n#if defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2\nextern PFN_vkCmdSetExclusiveScissorEnableNV vkCmdSetExclusiveScissorEnableNV;\n#endif /* defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 */\n#if defined(VK_NV_scissor_exclusive)\nextern PFN_vkCmdSetExclusiveScissorNV vkCmdSetExclusiveScissorNV;\n#endif /* defined(VK_NV_scissor_exclusive) */\n#if defined(VK_NV_shading_rate_image)\nextern PFN_vkCmdBindShadingRateImageNV vkCmdBindShadingRateImageNV;\nextern PFN_vkCmdSetCoarseSampleOrderNV vkCmdSetCoarseSampleOrderNV;\nextern PFN_vkCmdSetViewportShadingRatePaletteNV vkCmdSetViewportShadingRatePaletteNV;\n#endif /* defined(VK_NV_shading_rate_image) */\n#if defined(VK_OHOS_surface)\nextern PFN_vkCreateSurfaceOHOS vkCreateSurfaceOHOS;\n#endif /* defined(VK_OHOS_surface) */\n#if defined(VK_QCOM_tile_memory_heap)\nextern PFN_vkCmdBindTileMemoryQCOM vkCmdBindTileMemoryQCOM;\n#endif /* defined(VK_QCOM_tile_memory_heap) */\n#if defined(VK_QCOM_tile_properties)\nextern PFN_vkGetDynamicRenderingTilePropertiesQCOM vkGetDynamicRenderingTilePropertiesQCOM;\nextern PFN_vkGetFramebufferTilePropertiesQCOM vkGetFramebufferTilePropertiesQCOM;\n#endif /* defined(VK_QCOM_tile_properties) */\n#if defined(VK_QCOM_tile_shading)\nextern PFN_vkCmdBeginPerTileExecutionQCOM vkCmdBeginPerTileExecutionQCOM;\nextern PFN_vkCmdDispatchTileQCOM vkCmdDispatchTileQCOM;\nextern PFN_vkCmdEndPerTileExecutionQCOM vkCmdEndPerTileExecutionQCOM;\n#endif /* defined(VK_QCOM_tile_shading) */\n#if defined(VK_QNX_external_memory_screen_buffer)\nextern PFN_vkGetScreenBufferPropertiesQNX vkGetScreenBufferPropertiesQNX;\n#endif /* defined(VK_QNX_external_memory_screen_buffer) */\n#if defined(VK_QNX_screen_surface)\nextern PFN_vkCreateScreenSurfaceQNX vkCreateScreenSurfaceQNX;\nextern PFN_vkGetPhysicalDeviceScreenPresentationSupportQNX vkGetPhysicalDeviceScreenPresentationSupportQNX;\n#endif /* defined(VK_QNX_screen_surface) */\n#if defined(VK_VALVE_descriptor_set_host_mapping)\nextern PFN_vkGetDescriptorSetHostMappingVALVE vkGetDescriptorSetHostMappingVALVE;\nextern PFN_vkGetDescriptorSetLayoutHostMappingInfoVALVE vkGetDescriptorSetLayoutHostMappingInfoVALVE;\n#endif /* defined(VK_VALVE_descriptor_set_host_mapping) */\n#if (defined(VK_EXT_depth_clamp_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clamp_control))\nextern PFN_vkCmdSetDepthClampRangeEXT vkCmdSetDepthClampRangeEXT;\n#endif /* (defined(VK_EXT_depth_clamp_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clamp_control)) */\n#if (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object))\nextern PFN_vkCmdBindVertexBuffers2EXT vkCmdBindVertexBuffers2EXT;\nextern PFN_vkCmdSetCullModeEXT vkCmdSetCullModeEXT;\nextern PFN_vkCmdSetDepthBoundsTestEnableEXT vkCmdSetDepthBoundsTestEnableEXT;\nextern PFN_vkCmdSetDepthCompareOpEXT vkCmdSetDepthCompareOpEXT;\nextern PFN_vkCmdSetDepthTestEnableEXT vkCmdSetDepthTestEnableEXT;\nextern PFN_vkCmdSetDepthWriteEnableEXT vkCmdSetDepthWriteEnableEXT;\nextern PFN_vkCmdSetFrontFaceEXT vkCmdSetFrontFaceEXT;\nextern PFN_vkCmdSetPrimitiveTopologyEXT vkCmdSetPrimitiveTopologyEXT;\nextern PFN_vkCmdSetScissorWithCountEXT vkCmdSetScissorWithCountEXT;\nextern PFN_vkCmdSetStencilOpEXT vkCmdSetStencilOpEXT;\nextern PFN_vkCmdSetStencilTestEnableEXT vkCmdSetStencilTestEnableEXT;\nextern PFN_vkCmdSetViewportWithCountEXT vkCmdSetViewportWithCountEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object))\nextern PFN_vkCmdSetDepthBiasEnableEXT vkCmdSetDepthBiasEnableEXT;\nextern PFN_vkCmdSetLogicOpEXT vkCmdSetLogicOpEXT;\nextern PFN_vkCmdSetPatchControlPointsEXT vkCmdSetPatchControlPointsEXT;\nextern PFN_vkCmdSetPrimitiveRestartEnableEXT vkCmdSetPrimitiveRestartEnableEXT;\nextern PFN_vkCmdSetRasterizerDiscardEnableEXT vkCmdSetRasterizerDiscardEnableEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object))\nextern PFN_vkCmdSetAlphaToCoverageEnableEXT vkCmdSetAlphaToCoverageEnableEXT;\nextern PFN_vkCmdSetAlphaToOneEnableEXT vkCmdSetAlphaToOneEnableEXT;\nextern PFN_vkCmdSetColorBlendEnableEXT vkCmdSetColorBlendEnableEXT;\nextern PFN_vkCmdSetColorBlendEquationEXT vkCmdSetColorBlendEquationEXT;\nextern PFN_vkCmdSetColorWriteMaskEXT vkCmdSetColorWriteMaskEXT;\nextern PFN_vkCmdSetDepthClampEnableEXT vkCmdSetDepthClampEnableEXT;\nextern PFN_vkCmdSetLogicOpEnableEXT vkCmdSetLogicOpEnableEXT;\nextern PFN_vkCmdSetPolygonModeEXT vkCmdSetPolygonModeEXT;\nextern PFN_vkCmdSetRasterizationSamplesEXT vkCmdSetRasterizationSamplesEXT;\nextern PFN_vkCmdSetSampleMaskEXT vkCmdSetSampleMaskEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object))\nextern PFN_vkCmdSetTessellationDomainOriginEXT vkCmdSetTessellationDomainOriginEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback))\nextern PFN_vkCmdSetRasterizationStreamEXT vkCmdSetRasterizationStreamEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization))\nextern PFN_vkCmdSetConservativeRasterizationModeEXT vkCmdSetConservativeRasterizationModeEXT;\nextern PFN_vkCmdSetExtraPrimitiveOverestimationSizeEXT vkCmdSetExtraPrimitiveOverestimationSizeEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable))\nextern PFN_vkCmdSetDepthClipEnableEXT vkCmdSetDepthClipEnableEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations))\nextern PFN_vkCmdSetSampleLocationsEnableEXT vkCmdSetSampleLocationsEnableEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced))\nextern PFN_vkCmdSetColorBlendAdvancedEXT vkCmdSetColorBlendAdvancedEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex))\nextern PFN_vkCmdSetProvokingVertexModeEXT vkCmdSetProvokingVertexModeEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization))\nextern PFN_vkCmdSetLineRasterizationModeEXT vkCmdSetLineRasterizationModeEXT;\nextern PFN_vkCmdSetLineStippleEnableEXT vkCmdSetLineStippleEnableEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control))\nextern PFN_vkCmdSetDepthClipNegativeOneToOneEXT vkCmdSetDepthClipNegativeOneToOneEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling))\nextern PFN_vkCmdSetViewportWScalingEnableNV vkCmdSetViewportWScalingEnableNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle))\nextern PFN_vkCmdSetViewportSwizzleNV vkCmdSetViewportSwizzleNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color))\nextern PFN_vkCmdSetCoverageToColorEnableNV vkCmdSetCoverageToColorEnableNV;\nextern PFN_vkCmdSetCoverageToColorLocationNV vkCmdSetCoverageToColorLocationNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples))\nextern PFN_vkCmdSetCoverageModulationModeNV vkCmdSetCoverageModulationModeNV;\nextern PFN_vkCmdSetCoverageModulationTableEnableNV vkCmdSetCoverageModulationTableEnableNV;\nextern PFN_vkCmdSetCoverageModulationTableNV vkCmdSetCoverageModulationTableNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image))\nextern PFN_vkCmdSetShadingRateImageEnableNV vkCmdSetShadingRateImageEnableNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test))\nextern PFN_vkCmdSetRepresentativeFragmentTestEnableNV vkCmdSetRepresentativeFragmentTestEnableNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode))\nextern PFN_vkCmdSetCoverageReductionModeNV vkCmdSetCoverageReductionModeNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) */\n#if (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control))\nextern PFN_vkGetImageSubresourceLayout2EXT vkGetImageSubresourceLayout2EXT;\n#endif /* (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) */\n#if (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state))\nextern PFN_vkCmdSetVertexInputEXT vkCmdSetVertexInputEXT;\n#endif /* (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) */\n#if (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template)))\nextern PFN_vkCmdPushDescriptorSetWithTemplateKHR vkCmdPushDescriptorSetWithTemplateKHR;\n#endif /* (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template))) */\n#if (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1))\nextern PFN_vkGetDeviceGroupPresentCapabilitiesKHR vkGetDeviceGroupPresentCapabilitiesKHR;\nextern PFN_vkGetDeviceGroupSurfacePresentModesKHR vkGetDeviceGroupSurfacePresentModesKHR;\nextern PFN_vkGetPhysicalDevicePresentRectanglesKHR vkGetPhysicalDevicePresentRectanglesKHR;\n#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */\n#if (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1))\nextern PFN_vkAcquireNextImage2KHR vkAcquireNextImage2KHR;\n#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */\n/* VOLK_GENERATE_PROTOTYPES_H */\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif\n\n#ifdef VOLK_IMPLEMENTATION\n#undef VOLK_IMPLEMENTATION\n/* Prevent tools like dependency checkers from detecting a cyclic dependency */\n#define VOLK_SOURCE \"volk.c\"\n#include VOLK_SOURCE\n#endif\n\n/**\n * Copyright (c) 2018-2025 Arseny Kapoulkine\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to deal\n * in the Software without restriction, including without limitation the rights\n * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in all\n * copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n * SOFTWARE.\n*/\n/* clang-format on */\n"], ["/lsfg-vk/framegen/src/core/pipeline.cpp", "#include \n#include \n\n#include \"core/pipeline.hpp\"\n#include \"core/device.hpp\"\n#include \"core/shadermodule.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n\nusing namespace LSFG::Core;\n\nPipeline::Pipeline(const Core::Device& device, const ShaderModule& shader) {\n // create pipeline layout\n VkDescriptorSetLayout shaderLayout = shader.getLayout();\n const VkPipelineLayoutCreateInfo layoutDesc{\n .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,\n .setLayoutCount = 1,\n .pSetLayouts = &shaderLayout,\n };\n VkPipelineLayout layoutHandle{};\n auto res = vkCreatePipelineLayout(device.handle(), &layoutDesc, nullptr, &layoutHandle);\n if (res != VK_SUCCESS || !layoutHandle)\n throw LSFG::vulkan_error(res, \"Failed to create pipeline layout\");\n\n // create pipeline\n const VkPipelineShaderStageCreateInfo shaderStageInfo{\n .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,\n .stage = VK_SHADER_STAGE_COMPUTE_BIT,\n .module = shader.handle(),\n .pName = \"main\",\n };\n const VkComputePipelineCreateInfo pipelineDesc{\n .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,\n .stage = shaderStageInfo,\n .layout = layoutHandle,\n };\n VkPipeline pipelineHandle{};\n res = vkCreateComputePipelines(device.handle(),\n VK_NULL_HANDLE, 1, &pipelineDesc, nullptr, &pipelineHandle);\n if (res != VK_SUCCESS || !pipelineHandle)\n throw LSFG::vulkan_error(res, \"Failed to create compute pipeline\");\n\n // store layout and pipeline in shared ptr\n this->layout = std::shared_ptr(\n new VkPipelineLayout(layoutHandle),\n [dev = device.handle()](VkPipelineLayout* layout) {\n vkDestroyPipelineLayout(dev, *layout, nullptr);\n }\n );\n this->pipeline = std::shared_ptr(\n new VkPipeline(pipelineHandle),\n [dev = device.handle()](VkPipeline* pipeline) {\n vkDestroyPipeline(dev, *pipeline, nullptr);\n }\n );\n}\n\nvoid Pipeline::bind(const CommandBuffer& commandBuffer) const {\n vkCmdBindPipeline(commandBuffer.handle(), VK_PIPELINE_BIND_POINT_COMPUTE, *this->pipeline);\n}\n"], ["/lsfg-vk/framegen/src/pool/shaderpool.cpp", "#include \"pool/shaderpool.hpp\"\n#include \"core/shadermodule.hpp\"\n#include \"core/device.hpp\"\n#include \"core/pipeline.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG::Pool;\n\nCore::ShaderModule ShaderPool::getShader(\n const Core::Device& device, const std::string& name,\n const std::vector>& types) {\n auto it = shaders.find(name);\n if (it != shaders.end())\n return it->second;\n\n // grab the shader\n auto bytecode = this->source(name);\n if (bytecode.empty())\n throw std::runtime_error(\"Shader code is empty: \" + name);\n\n // create the shader module\n Core::ShaderModule shader(device, bytecode, types);\n shaders[name] = shader;\n return shader;\n}\n\nCore::Pipeline ShaderPool::getPipeline(\n const Core::Device& device, const std::string& name) {\n auto it = pipelines.find(name);\n if (it != pipelines.end())\n return it->second;\n\n // grab the shader module\n auto shader = this->getShader(device, name, {});\n\n // create the pipeline\n Core::Pipeline pipeline(device, shader);\n pipelines[name] = pipeline;\n return pipeline;\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/log/log_debug.h", "#pragma once\n\n#include \n\n#include \"log/log.h\"\n\n#ifdef _MSC_VER\n#define METHOD_NAME __FUNCSIG__\n#else\n#define METHOD_NAME __PRETTY_FUNCTION__\n#endif\n\n#define TRACE_ENABLED\n\n#ifdef TRACE_ENABLED\n#define TRACE(...) \\\n do { dxvk::debug::trace(METHOD_NAME, ##__VA_ARGS__); } while (0)\n#else\n#define TRACE(...) \\\n do { } while (0)\n#endif\n\nnamespace dxvk::debug {\n \n std::string methodName(const std::string& prettyName);\n \n inline void traceArgs(std::stringstream& stream) { }\n \n template\n void traceArgs(std::stringstream& stream, const Arg1& arg1) {\n stream << arg1;\n }\n \n template\n void traceArgs(std::stringstream& stream, const Arg1& arg1, const Arg2& arg2, const Args&... args) {\n stream << arg1 << \",\";\n traceArgs(stream, arg2, args...);\n }\n \n template\n void trace(const std::string& funcName, const Args&... args) {\n std::stringstream stream;\n stream << methodName(funcName) << \"(\";\n traceArgs(stream, args...);\n stream << \")\";\n Logger::trace(stream.str());\n }\n \n}\n"], ["/lsfg-vk/src/mini/semaphore.cpp", "#include \"mini/semaphore.hpp\"\n#include \"common/exception.hpp\"\n#include \"layer.hpp\"\n\n#include \n\n#include \n\nusing namespace Mini;\n\nSemaphore::Semaphore(VkDevice device) {\n // create semaphore\n const VkSemaphoreCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO\n };\n VkSemaphore semaphoreHandle{};\n auto res = Layer::ovkCreateSemaphore(device, &desc, nullptr, &semaphoreHandle);\n if (res != VK_SUCCESS || semaphoreHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create semaphore\");\n\n // store semaphore in shared ptr\n this->semaphore = std::shared_ptr(\n new VkSemaphore(semaphoreHandle),\n [dev = device](VkSemaphore* semaphoreHandle) {\n Layer::ovkDestroySemaphore(dev, *semaphoreHandle, nullptr);\n }\n );\n}\n\nSemaphore::Semaphore(VkDevice device, int* fd) {\n // create semaphore\n const VkExportSemaphoreCreateInfo exportInfo{\n .sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO,\n .handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT\n };\n const VkSemaphoreCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,\n .pNext = &exportInfo\n };\n VkSemaphore semaphoreHandle{};\n auto res = Layer::ovkCreateSemaphore(device, &desc, nullptr, &semaphoreHandle);\n if (res != VK_SUCCESS || semaphoreHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create semaphore\");\n\n // export semaphore to fd\n const VkSemaphoreGetFdInfoKHR fdInfo{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR,\n .semaphore = semaphoreHandle,\n .handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT\n };\n res = Layer::ovkGetSemaphoreFdKHR(device, &fdInfo, fd);\n if (res != VK_SUCCESS || *fd < 0)\n throw LSFG::vulkan_error(res, \"Unable to export semaphore to fd\");\n\n // store semaphore in shared ptr\n this->semaphore = std::shared_ptr(\n new VkSemaphore(semaphoreHandle),\n [dev = device](VkSemaphore* semaphoreHandle) {\n Layer::ovkDestroySemaphore(dev, *semaphoreHandle, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/src/mini/commandpool.cpp", "#include \"mini/commandpool.hpp\"\n#include \"common/exception.hpp\"\n#include \"layer.hpp\"\n\n#include \n\n#include \n#include \n\nusing namespace Mini;\n\nCommandPool::CommandPool(VkDevice device, uint32_t graphicsFamilyIdx) {\n // create command pool\n const VkCommandPoolCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,\n .queueFamilyIndex = graphicsFamilyIdx\n };\n VkCommandPool commandPoolHandle{};\n auto res = Layer::ovkCreateCommandPool(device, &desc, nullptr, &commandPoolHandle);\n if (res != VK_SUCCESS || commandPoolHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create command pool\");\n\n // store command pool in shared ptr\n this->commandPool = std::shared_ptr(\n new VkCommandPool(commandPoolHandle),\n [dev = device](VkCommandPool* commandPoolHandle) {\n Layer::ovkDestroyCommandPool(dev, *commandPoolHandle, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/src/core/sampler.cpp", "#include \n#include \n\n#include \"core/sampler.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n\nusing namespace LSFG::Core;\n\nSampler::Sampler(const Core::Device& device,\n VkSamplerAddressMode mode,\n VkCompareOp compare,\n bool isWhite) {\n // create sampler\n const VkSamplerCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,\n .magFilter = VK_FILTER_LINEAR,\n .minFilter = VK_FILTER_LINEAR,\n .mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR,\n .addressModeU = mode,\n .addressModeV = mode,\n .addressModeW = mode,\n .compareOp = compare,\n .maxLod = VK_LOD_CLAMP_NONE,\n .borderColor =\n isWhite ? VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE\n : VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK\n };\n VkSampler samplerHandle{};\n auto res = vkCreateSampler(device.handle(), &desc, nullptr, &samplerHandle);\n if (res != VK_SUCCESS || samplerHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create sampler\");\n\n // store sampler in shared ptr\n this->sampler = std::shared_ptr(\n new VkSampler(samplerHandle),\n [dev = device.handle()](VkSampler* samplerHandle) {\n vkDestroySampler(dev, *samplerHandle, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/src/unicode_codecvt.cpp", "/*\nThe MIT License (MIT)\n\nCopyright (c) 2019 Trail of Bits, Inc.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n*/\n\n#include \n#include \n#include \n\nnamespace peparse {\n// See\n// https://stackoverflow.com/questions/38688417/utf-conversion-functions-in-c11\nstd::string from_utf16(const UCharString &u) {\n std::wstring_convert, char16_t> convert;\n return convert.to_bytes(u);\n}\n} // namespace peparse\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxvk/dxvk_limits.h", "#pragma once\n\n#include \n\nnamespace dxvk {\n\n enum DxvkLimits : size_t {\n MaxNumRenderTargets = 8,\n MaxNumVertexAttributes = 32,\n MaxNumVertexBindings = 32,\n MaxNumXfbBuffers = 4,\n MaxNumXfbStreams = 4,\n MaxNumViewports = 16,\n MaxNumResourceSlots = 1216,\n MaxNumQueuedCommandBuffers = 32,\n MaxNumQueryCountPerPool = 128,\n MaxNumSpecConstants = 12,\n MaxUniformBufferSize = 65536,\n MaxVertexBindingStride = 2048,\n MaxPushConstantSize = 128,\n };\n\n}\n"], ["/lsfg-vk/framegen/src/common/exception.cpp", "#include \"common/exception.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\n\nvulkan_error::vulkan_error(VkResult result, const std::string& message)\n : std::runtime_error(std::format(\"{} (error {})\", message, static_cast(result))),\n result(result) {}\n\nvulkan_error::~vulkan_error() noexcept = default;\n\nrethrowable_error::rethrowable_error(const std::string& message, const std::exception& exe)\n : std::runtime_error(message) {\n this->message = std::format(\"{}\\n- {}\", message, exe.what());\n}\n\nrethrowable_error::~rethrowable_error() noexcept = default;\n"], ["/lsfg-vk/framegen/src/core/commandpool.cpp", "#include \n#include \n\n#include \"core/commandpool.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n\nusing namespace LSFG::Core;\n\nCommandPool::CommandPool(const Core::Device& device) {\n // create command pool\n const VkCommandPoolCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,\n .queueFamilyIndex = device.getComputeFamilyIdx()\n };\n VkCommandPool commandPoolHandle{};\n auto res = vkCreateCommandPool(device.handle(), &desc, nullptr, &commandPoolHandle);\n if (res != VK_SUCCESS || commandPoolHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create command pool\");\n\n // store command pool in shared ptr\n this->commandPool = std::shared_ptr(\n new VkCommandPool(commandPoolHandle),\n [dev = device.handle()](VkCommandPool* commandPoolHandle) {\n vkDestroyCommandPool(dev, *commandPoolHandle, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/thirdparty/dxbc/src/util/util_log.cpp", "#include \"log/log_debug.h\"\n\nnamespace dxvk::debug {\n \n std::string methodName(const std::string& prettyName) {\n size_t end = prettyName.find(\"(\");\n size_t begin = prettyName.substr(0, end).rfind(\" \") + 1;\n return prettyName.substr(begin,end - begin);\n }\n \n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_defs.h", "class DxbcOperandKind {\n};"], ["/lsfg-vk/thirdparty/toml11/src/skip.cpp", "#include \n#include \n#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n\nnamespace toml\n{\n\nnamespace detail\n{\ntemplate bool skip_whitespace (location& loc, const context&);\ntemplate bool skip_empty_lines (location& loc, const context&);\ntemplate void skip_comment_block (location& loc, const context&);\ntemplate void skip_empty_or_comment_lines(location& loc, const context&);\ntemplate void skip_string_like (location& loc, const context&);\ntemplate void skip_array_like (location& loc, const context&);\ntemplate void skip_inline_table_like (location& loc, const context&);\ntemplate void skip_value (location& loc, const context&);\ntemplate void skip_key_value_pair (location& loc, const context&);\ntemplate void skip_until_next_table (location& loc, const context&);\n\ntemplate bool skip_whitespace (location& loc, const context&);\ntemplate bool skip_empty_lines (location& loc, const context&);\ntemplate void skip_comment_block (location& loc, const context&);\ntemplate void skip_empty_or_comment_lines(location& loc, const context&);\ntemplate void skip_string_like (location& loc, const context&);\ntemplate void skip_array_like (location& loc, const context&);\ntemplate void skip_inline_table_like (location& loc, const context&);\ntemplate void skip_value (location& loc, const context&);\ntemplate void skip_key_value_pair (location& loc, const context&);\ntemplate void skip_until_next_table (location& loc, const context&);\n\n} // detail\n} // toml\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_enum.h", "#pragma once\n\n#define ENUM_NAME(name) \\\n case name: return os << #name\n\n#define ENUM_DEFAULT(name) \\\n default: return os << static_cast(e)\n"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/include/pe-parse/to_string.h", "#pragma once\n\n#include \n#include \n\n#if defined(_MSC_VER)\ntypedef std::basic_string UCharString;\n#else\ntypedef std::u16string UCharString;\n#endif\n\nnamespace peparse {\ntemplate \nstatic std::string to_string(T t, std::ios_base &(*f)(std::ios_base &) ) {\n std::ostringstream oss;\n oss << f << t;\n return oss.str();\n}\n\nstd::string from_utf16(const UCharString &u);\n} // namespace peparse\n"], ["/lsfg-vk/thirdparty/toml11/src/parser.cpp", "#include \n#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n\nnamespace toml\n{\n\ntemplate result, std::vector> try_parse(std::vector, std::string, spec);\ntemplate result, std::vector> try_parse(std::istream&, std::string, spec);\ntemplate result, std::vector> try_parse(std::string, spec);\ntemplate result, std::vector> try_parse(FILE*, std::string, spec);\ntemplate result, std::vector> try_parse_str(std::string, spec, cxx::source_location);\n\ntemplate basic_value parse(std::vector, std::string, spec);\ntemplate basic_value parse(std::istream&, std::string, spec);\ntemplate basic_value parse(std::string, spec);\ntemplate basic_value parse(FILE*, std::string, spec);\ntemplate basic_value parse_str(std::string, spec, cxx::source_location);\n\ntemplate result, std::vector> try_parse(std::vector, std::string, spec);\ntemplate result, std::vector> try_parse(std::istream&, std::string, spec);\ntemplate result, std::vector> try_parse(std::string, spec);\ntemplate result, std::vector> try_parse(FILE*, std::string, spec);\ntemplate result, std::vector> try_parse_str(std::string, spec, cxx::source_location);\n\ntemplate basic_value parse(std::vector, std::string, spec);\ntemplate basic_value parse(std::istream&, std::string, spec);\ntemplate basic_value parse(std::string, spec);\ntemplate basic_value parse(FILE*, std::string, spec);\ntemplate basic_value parse_str(std::string, spec, cxx::source_location);\n\n#if defined(TOML11_HAS_FILESYSTEM)\ntemplate cxx::enable_if_t::value, result, std::vector>> try_parse(const std::filesystem::path&, spec);\ntemplate cxx::enable_if_t::value, result, std::vector>> try_parse(const std::filesystem::path&, spec);\ntemplate cxx::enable_if_t::value, basic_value > parse (const std::filesystem::path&, spec);\ntemplate cxx::enable_if_t::value, basic_value > parse (const std::filesystem::path&, spec);\n#endif // filesystem\n\n} // toml\n"], ["/lsfg-vk/thirdparty/toml11/src/serializer.cpp", "#include \n#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n\nnamespace toml\n{\nstruct type_config;\nstruct ordered_type_config;\n\ntemplate typename basic_value::string_type\nformat(const basic_value&, const spec);\n\ntemplate typename basic_value::string_type\nformat(const typename basic_value::key_type& k,\n const basic_value& v, const spec);\n\ntemplate typename basic_value::string_type\nformat(const std::vector::key_type>& ks,\n const basic_value& v, const spec s);\n\ntemplate typename basic_value::string_type\nformat(const basic_value&, const spec);\n\ntemplate typename basic_value::string_type\nformat(const typename basic_value::key_type& k,\n const basic_value& v, const spec);\n\ntemplate typename basic_value::string_type\nformat(const std::vector::key_type>& ks,\n const basic_value& v, const spec s);\n\nnamespace detail\n{\ntemplate class serializer<::toml::type_config>;\ntemplate class serializer<::toml::ordered_type_config>;\n} // detail\n} // toml\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_names.h", "class dxbc_names {\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_util.h", "class dxbc_util {\n};"], ["/lsfg-vk/thirdparty/toml11/src/context.cpp", "#include \n#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n\nnamespace toml\n{\nnamespace detail\n{\ntemplate class context<::toml::type_config>;\ntemplate class context<::toml::ordered_type_config>;\n} // detail\n} // toml\n"], ["/lsfg-vk/thirdparty/toml11/src/types.cpp", "#include \n#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n\nnamespace toml\n{\ntemplate class basic_value;\ntemplate class basic_value;\n} // toml\n"], ["/lsfg-vk/thirdparty/dxbc/include/spirv/spirv_include.h", "#pragma once\n\n#include \n#include \n\n#include \"log/log.h\"\n#include \"log/log_debug.h\"\n\n#include \"util_error.h\"\n#include \"util_flags.h\"\n#include \"util_likely.h\"\n#include \"util_string.h\"\n\n#include \"rc/util_rc.h\"\n#include \"rc/util_rc_ptr.h\"\n"], ["/lsfg-vk/thirdparty/toml11/src/value_t.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_likely.h", "#pragma once\n\n#ifdef __GNUC__\n#define likely(x) __builtin_expect(bool(x),1)\n#define unlikely(x) __builtin_expect(bool(x),0)\n#define force_inline inline __attribute__((always_inline))\n#else\n#define likely(x) (x)\n#define unlikely(x) (x)\n#define force_inline inline\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/format.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/comments.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/color.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/location.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/syntax.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/error_info.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/scanner.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/source_location.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/region.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/datetime.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/literal.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_include.h", "#pragma once\n\n#include \n#include \n\n#include \"dxvk_limits.h\"\n#include \"dxvk_pipelayout.h\"\n\n#include \"log/log.h\"\n#include \"log/log_debug.h\"\n\n#include \"rc/util_rc.h\"\n#include \"rc/util_rc_ptr.h\"\n\n#include \"util_bit.h\"\n#include \"util_enum.h\"\n#include \"util_error.h\"\n#include \"util_string.h\"\n#include \"util_flags.h\"\n#include \"util_small_vector.h\"\n"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/include/pe-parse/parse.h", "class parse {\n};"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_string.h", "class util_string {\n};"]], "task_instance_info": {"created_time": "2025-08-20 20:01:24", "created_task_model": "DeepSeek-R1", "function_skeleton": "void decodeOperand(DxbcCodeSlice& code, const DxbcInstOperandFormat& format) {}", "code_description": "This code segment functions to: Process different types of instruction operands by routing them to specific decoding routines based on their classification and updating corresponding counters for destination, source, and immediate data elements."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```cpp\n[TASK_BEGIN]\nvoid decodeOperand(DxbcCodeSlice& code, const DxbcInstOperandFormat& format) {\n switch (format.operandType) {\n case DxbcOperandType::Dst:\n if (m_instruction.dstCount >= m_dstOperands.size())\n throw DxvkError(\"DxbcDecodeContext: Too many destination operands\");\n decodeRegister(code, m_instruction.dst[m_instruction.dstCount++], format.dataType);\n break;\n \n case DxbcOperandType::Src:\n if (m_instruction.srcCount >= m_srcOperands.size())\n throw DxvkError(\"DxbcDecodeContext: Too many source operands\");\n decodeRegister(code, m_instruction.src[m_instruction.srcCount++], format.dataType);\n break;\n \n case DxbcOperandType::Imm:\n if (m_instruction.immCount >= m_immOperands.size())\n throw DxvkError(\"DxbcDecodeContext: Too many immediate operands\");\n decodeImm32(code, m_instruction.imm[m_instruction.immCount++], format.dataType);\n break;\n \n default:\n throw DxvkError(\"DxbcDecodeContext: Invalid operand type\");\n }\n}\n[TASK_END]\n```", "inference_time": "2025-08-20 20-02-48"}, "editdistance_info": {"edit_distance": 36.5957, "calculate_time": "2025-08-20 20:02:48", "true_code_clean": "void decodeOperand(DxbcCodeSlice& code, const DxbcInstOperandFormat& format) {\n switch (format.kind) {\n case DxbcOperandKind::DstReg: {\n const uint32_t operandId = m_instruction.dstCount++;\n this->decodeRegister(code, m_dstOperands.at(operandId), format.type);\n } break;\n case DxbcOperandKind::SrcReg: {\n const uint32_t operandId = m_instruction.srcCount++;\n this->decodeRegister(code, m_srcOperands.at(operandId), format.type);\n } break;\n case DxbcOperandKind::Imm32: {\n const uint32_t operandId = m_instruction.immCount++;\n this->decodeImm32(code, m_immOperands.at(operandId), format.type);\n } break;\n default:\n throw DxvkError(\"DxbcDecodeContext: Invalid operand format\");\n }\n }", "predict_code_clean": "void decodeOperand(DxbcCodeSlice& code, const DxbcInstOperandFormat& format) {\n switch (format.operandType) {\n case DxbcOperandType::Dst:\n if (m_instruction.dstCount >= m_dstOperands.size())\n throw DxvkError(\"DxbcDecodeContext: Too many destination operands\");\n decodeRegister(code, m_instruction.dst[m_instruction.dstCount++], format.dataType);\n break;\n case DxbcOperandType::Src:\n if (m_instruction.srcCount >= m_srcOperands.size())\n throw DxvkError(\"DxbcDecodeContext: Too many source operands\");\n decodeRegister(code, m_instruction.src[m_instruction.srcCount++], format.dataType);\n break;\n case DxbcOperandType::Imm:\n if (m_instruction.immCount >= m_immOperands.size())\n throw DxvkError(\"DxbcDecodeContext: Too many immediate operands\");\n decodeImm32(code, m_instruction.imm[m_instruction.immCount++], format.dataType);\n break;\n default:\n throw DxvkError(\"DxbcDecodeContext: Invalid operand type\");\n }\n}"}} {"repo_name": "lsfg-vk", "file_name": "/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_chunk_isgn.h", "inference_info": {"prefix_code": "class DxbcIsgn {\n public:\n DxbcIsgn(DxbcReader reader, DxbcTag tag) {\n uint32_t elementCount = reader.readu32();\n reader.skip(sizeof(uint32_t));\n \n std::array componentTypes = {\n DxbcScalarType::Uint32, DxbcScalarType::Uint32,\n DxbcScalarType::Sint32, DxbcScalarType::Float32,\n };\n\n // https://github.com/DarkStarSword/3d-fixes/blob/master/dx11shaderanalyse.py#L101\n bool hasStream = (tag == \"ISG1\") || (tag == \"OSG1\") || (tag == \"PSG1\") || (tag == \"OSG5\");\n bool hasPrecision = (tag == \"ISG1\") || (tag == \"OSG1\") || (tag == \"PSG1\");\n \n for (uint32_t i = 0; i < elementCount; i++) {\n DxbcSgnEntry entry;\n entry.streamId = hasStream ? reader.readu32() : 0;\n entry.semanticName = reader.clone(reader.readu32()).readString();\n entry.semanticIndex = reader.readu32();\n entry.systemValue = static_cast(reader.readu32());\n entry.componentType = componentTypes.at(reader.readu32());\n entry.registerId = reader.readu32();\n\n uint32_t mask = reader.readu32();\n\n entry.componentMask = bit::extract(mask, 0, 3);\n entry.componentUsed = bit::extract(mask, 8, 11);\n\n if (hasPrecision)\n reader.readu32();\n\n m_entries.push_back(entry);\n }\n }\n ~DxbcIsgn() {\n \n }\n const DxbcSgnEntry* findByRegister(\n uint32_t registerId) const;\n const DxbcSgnEntry* find(\n const std::string& semanticName,\n uint32_t semanticIndex,\n uint32_t streamIndex) const;\n DxbcRegMask regMask(\n uint32_t registerId) const {\n DxbcRegMask mask;\n\n for (auto e = this->begin(); e != this->end(); e++) {\n if (e->registerId == registerId)\n mask |= e->componentMask;\n }\n\n return mask;\n }\n uint32_t maxRegisterCount() const {\n uint32_t result = 0;\n for (auto e = this->begin(); e != this->end(); e++)\n result = std::max(result, e->registerId + 1);\n return result;\n }\n ", "suffix_code": "\n private:\n std::vector m_entries;\n};", "middle_code": "static bool compareSemanticNames(\n const std::string& a,\n const std::string& b) {\n if (a.size() != b.size())\n return false;\n for (size_t i = 0; i < a.size(); i++) {\n char ac = a[i];\n char bc = b[i];\n if (ac != bc) {\n if (ac >= 'A' && ac <= 'Z') ac += 'a' - 'A';\n if (bc >= 'A' && bc <= 'Z') bc += 'a' - 'A';\n if (ac != bc)\n return false;\n }\n }\n return true;\n }", "code_description": null, "fill_type": "FUNCTION_TYPE", "language_type": "cpp", "sub_task_type": null}, "context_code": [["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_compiler.h", "class DxbcCompilerHsPhase {\n public:\n DxbcCompiler(\n const std::string& fileName,\n const DxbcModuleInfo& moduleInfo,\n const DxbcProgramInfo& programInfo,\n const Rc& isgn,\n const Rc& osgn,\n const Rc& psgn,\n const DxbcAnalysisInfo& analysis) {\n // Declare an entry point ID. We'll need it during the\n // initialization phase where the execution mode is set.\n m_entryPointId = m_module.allocateId();\n \n // Set the shader name so that we recognize it in renderdoc\n m_module.setDebugSource(\n spv::SourceLanguageUnknown, 0,\n m_module.addDebugString(fileName.c_str()),\n nullptr);\n\n // Set the memory model. This is the same for all shaders.\n m_module.enableCapability(\n spv::CapabilityVulkanMemoryModel);\n\n m_module.setMemoryModel(\n spv::AddressingModelLogical,\n spv::MemoryModelVulkan);\n \n // Make sure our interface registers are clear\n for (uint32_t i = 0; i < DxbcMaxInterfaceRegs; i++) {\n m_vRegs.at(i) = DxbcRegisterPointer { };\n m_oRegs.at(i) = DxbcRegisterPointer { };\n }\n \n this->emitInit();\n }\n ~DxbcCompiler() {\n \n }\n void processInstruction(\n const DxbcShaderInstruction& ins) {\n m_lastOp = m_currOp;\n m_currOp = ins.op;\n\n switch (ins.opClass) {\n case DxbcInstClass::Declaration:\n return this->emitDcl(ins);\n \n case DxbcInstClass::CustomData:\n return this->emitCustomData(ins);\n \n case DxbcInstClass::Atomic:\n return this->emitAtomic(ins);\n \n case DxbcInstClass::AtomicCounter:\n return this->emitAtomicCounter(ins);\n \n case DxbcInstClass::Barrier:\n return this->emitBarrier(ins);\n \n case DxbcInstClass::BitExtract:\n return this->emitBitExtract(ins);\n \n case DxbcInstClass::BitInsert:\n return this->emitBitInsert(ins);\n \n case DxbcInstClass::BitScan:\n return this->emitBitScan(ins);\n \n case DxbcInstClass::BufferQuery:\n return this->emitBufferQuery(ins);\n \n case DxbcInstClass::BufferLoad:\n return this->emitBufferLoad(ins);\n \n case DxbcInstClass::BufferStore:\n return this->emitBufferStore(ins);\n \n case DxbcInstClass::ConvertFloat16:\n return this->emitConvertFloat16(ins);\n \n case DxbcInstClass::ConvertFloat64:\n return this->emitConvertFloat64(ins);\n \n case DxbcInstClass::ControlFlow:\n return this->emitControlFlow(ins);\n \n case DxbcInstClass::GeometryEmit:\n return this->emitGeometryEmit(ins);\n \n case DxbcInstClass::HullShaderPhase:\n return this->emitHullShaderPhase(ins);\n \n case DxbcInstClass::HullShaderInstCnt:\n return this->emitHullShaderInstCnt(ins);\n \n case DxbcInstClass::Interpolate:\n return this->emitInterpolate(ins);\n \n case DxbcInstClass::NoOperation:\n return;\n\n case DxbcInstClass::SparseCheckAccess:\n return this->emitSparseCheckAccess(ins);\n\n case DxbcInstClass::TextureQuery:\n return this->emitTextureQuery(ins);\n \n case DxbcInstClass::TextureQueryLod:\n return this->emitTextureQueryLod(ins);\n \n case DxbcInstClass::TextureQueryMs:\n return this->emitTextureQueryMs(ins);\n \n case DxbcInstClass::TextureQueryMsPos:\n return this->emitTextureQueryMsPos(ins);\n \n case DxbcInstClass::TextureFetch:\n return this->emitTextureFetch(ins);\n \n case DxbcInstClass::TextureGather:\n return this->emitTextureGather(ins);\n \n case DxbcInstClass::TextureSample:\n return this->emitTextureSample(ins);\n \n case DxbcInstClass::TypedUavLoad:\n return this->emitTypedUavLoad(ins);\n \n case DxbcInstClass::TypedUavStore:\n return this->emitTypedUavStore(ins);\n \n case DxbcInstClass::VectorAlu:\n return this->emitVectorAlu(ins);\n \n case DxbcInstClass::VectorCmov:\n return this->emitVectorCmov(ins);\n \n case DxbcInstClass::VectorCmp:\n return this->emitVectorCmp(ins);\n \n case DxbcInstClass::VectorDeriv:\n return this->emitVectorDeriv(ins);\n \n case DxbcInstClass::VectorDot:\n return this->emitVectorDot(ins);\n \n case DxbcInstClass::VectorIdiv:\n return this->emitVectorIdiv(ins);\n \n case DxbcInstClass::VectorImul:\n return this->emitVectorImul(ins);\n \n case DxbcInstClass::VectorMsad:\n return this->emitVectorMsad(ins);\n \n case DxbcInstClass::VectorShift:\n return this->emitVectorShift(ins);\n \n case DxbcInstClass::VectorSinCos:\n return this->emitVectorSinCos(ins);\n \n default:\n Logger::warn(\n str::format(\"DxbcCompiler: Unhandled opcode class: \",\n ins.op));\n }\n }\n void processXfbPassthrough() {\n m_module.setExecutionMode (m_entryPointId, spv::ExecutionModeInputPoints);\n m_module.setExecutionMode (m_entryPointId, spv::ExecutionModeOutputPoints);\n m_module.setOutputVertices(m_entryPointId, 1);\n\n for (auto e = m_isgn->begin(); e != m_isgn->end(); e++) {\n emitDclInput(e->registerId, 1,\n e->componentMask, DxbcSystemValue::None,\n DxbcInterpolationMode::Undefined);\n }\n\n // Figure out which streams to enable\n uint32_t streamMask = 0;\n\n for (size_t i = 0; i < m_xfbVars.size(); i++)\n streamMask |= 1u << m_xfbVars[i].streamId;\n \n for (uint32_t streamId : bit::BitMask(streamMask)) {\n emitXfbOutputSetup(streamId, true);\n m_module.opEmitVertex(m_module.constu32(streamId));\n }\n\n // End the main function\n emitFunctionEnd();\n\n // For pass-through we always assume points\n m_inputTopology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST;\n }\n SpirvCodeBuffer finalize() {\n // Depending on the shader type, this will prepare\n // input registers, call various shader functions\n // and write back the output registers.\n switch (m_programInfo.type()) {\n case DxbcProgramType::VertexShader: this->emitVsFinalize(); break;\n case DxbcProgramType::HullShader: this->emitHsFinalize(); break;\n case DxbcProgramType::DomainShader: this->emitDsFinalize(); break;\n case DxbcProgramType::GeometryShader: this->emitGsFinalize(); break;\n case DxbcProgramType::PixelShader: this->emitPsFinalize(); break;\n case DxbcProgramType::ComputeShader: this->emitCsFinalize(); break;\n default: throw DxvkError(\"Invalid shader stage\");\n }\n\n // Emit float control mode if the extension is supported\n this->emitFloatControl();\n \n // Declare the entry point, we now have all the\n // information we need, including the interfaces\n m_module.addEntryPoint(m_entryPointId,\n m_programInfo.executionModel(), \"main\");\n m_module.setDebugName(m_entryPointId, \"main\");\n\n return m_module.compile();\n }\n private:\n DxbcModuleInfo m_moduleInfo;\n DxbcProgramInfo m_programInfo;\n SpirvModule m_module;\n Rc m_isgn;\n Rc m_osgn;\n Rc m_psgn;\n const DxbcAnalysisInfo* m_analysis;\n std::vector m_bindings;\n std::vector m_rRegs;\n std::vector m_xRegs;\n std::vector m_gRegs;\n std::array<\n DxbcRegisterPointer,\n DxbcMaxInterfaceRegs> m_vRegs;\n std::vector m_vMappings;\n std::array<\n DxbcRegisterPointer,\n DxbcMaxInterfaceRegs> m_oRegs;\n std::vector m_oMappings;\n std::vector m_xfbVars;\n std::vector m_indexRanges = { };\n std::array m_constantBuffers;\n std::array m_samplers;\n std::array m_textures;\n std::array m_uavs;\n bool m_hasGloballyCoherentUav = false;\n bool m_hasRasterizerOrderedUav = false;\n std::vector m_controlFlowBlocks;\n bool m_topLevelIsUniform = true;\n uint64_t m_uavRdMask = 0u;\n uint64_t m_uavWrMask = 0u;\n bool m_insideFunction = false;\n uint32_t m_vArrayLength = 0;\n uint32_t m_vArrayLengthId = 0;\n uint32_t m_vArray = 0;\n uint32_t m_positionIn = 0;\n uint32_t m_positionOut = 0;\n uint32_t m_clipDistances = 0;\n uint32_t m_cullDistances = 0;\n uint32_t m_primitiveIdIn = 0;\n uint32_t m_primitiveIdOut = 0;\n uint32_t m_icbArray = 0;\n std::vector m_icbData;\n uint32_t m_icbComponents = 0u;\n uint32_t m_icbSize = 0u;\n uint32_t m_samplePositions = 0;\n uint32_t m_uavCtrStructType = 0;\n uint32_t m_uavCtrPointerType = 0;\n std::unordered_map m_subroutines;\n uint32_t m_entryPointId = 0;\n bool m_hasRawAccessChains = false;\n uint32_t m_inputMask = 0u;\n uint32_t m_outputMask = 0u;\n DxbcCompilerVsPart m_vs;\n DxbcCompilerHsPart m_hs;\n DxbcCompilerDsPart m_ds;\n DxbcCompilerGsPart m_gs;\n DxbcCompilerPsPart m_ps;\n DxbcCompilerCsPart m_cs;\n bool m_precise = true;\n DxbcOpcode m_lastOp = DxbcOpcode::Nop;\n DxbcOpcode m_currOp = DxbcOpcode::Nop;\n VkPrimitiveTopology m_inputTopology = VK_PRIMITIVE_TOPOLOGY_MAX_ENUM;\n VkPrimitiveTopology m_outputTopology = VK_PRIMITIVE_TOPOLOGY_MAX_ENUM;\n void emitDcl(\n const DxbcShaderInstruction& ins) {\n switch (ins.op) {\n case DxbcOpcode::DclGlobalFlags:\n return this->emitDclGlobalFlags(ins);\n \n case DxbcOpcode::DclIndexRange:\n return this->emitDclIndexRange(ins);\n \n case DxbcOpcode::DclTemps:\n return this->emitDclTemps(ins);\n \n case DxbcOpcode::DclIndexableTemp:\n return this->emitDclIndexableTemp(ins);\n \n case DxbcOpcode::DclInput:\n case DxbcOpcode::DclInputSgv:\n case DxbcOpcode::DclInputSiv:\n case DxbcOpcode::DclInputPs:\n case DxbcOpcode::DclInputPsSgv:\n case DxbcOpcode::DclInputPsSiv:\n case DxbcOpcode::DclOutput:\n case DxbcOpcode::DclOutputSgv:\n case DxbcOpcode::DclOutputSiv:\n return this->emitDclInterfaceReg(ins);\n \n case DxbcOpcode::DclConstantBuffer:\n return this->emitDclConstantBuffer(ins);\n \n case DxbcOpcode::DclSampler:\n return this->emitDclSampler(ins);\n \n case DxbcOpcode::DclStream:\n return this->emitDclStream(ins);\n \n case DxbcOpcode::DclUavTyped:\n case DxbcOpcode::DclResource:\n return this->emitDclResourceTyped(ins);\n \n case DxbcOpcode::DclUavRaw:\n case DxbcOpcode::DclResourceRaw:\n case DxbcOpcode::DclUavStructured:\n case DxbcOpcode::DclResourceStructured:\n return this->emitDclResourceRawStructured(ins);\n \n case DxbcOpcode::DclThreadGroupSharedMemoryRaw:\n case DxbcOpcode::DclThreadGroupSharedMemoryStructured:\n return this->emitDclThreadGroupSharedMemory(ins);\n \n case DxbcOpcode::DclGsInputPrimitive:\n return this->emitDclGsInputPrimitive(ins);\n \n case DxbcOpcode::DclGsOutputPrimitiveTopology:\n return this->emitDclGsOutputTopology(ins);\n \n case DxbcOpcode::DclMaxOutputVertexCount:\n return this->emitDclMaxOutputVertexCount(ins);\n \n case DxbcOpcode::DclInputControlPointCount:\n return this->emitDclInputControlPointCount(ins);\n \n case DxbcOpcode::DclOutputControlPointCount:\n return this->emitDclOutputControlPointCount(ins);\n \n case DxbcOpcode::DclHsMaxTessFactor:\n return this->emitDclHsMaxTessFactor(ins);\n \n case DxbcOpcode::DclTessDomain:\n return this->emitDclTessDomain(ins);\n \n case DxbcOpcode::DclTessPartitioning:\n return this->emitDclTessPartitioning(ins);\n \n case DxbcOpcode::DclTessOutputPrimitive:\n return this->emitDclTessOutputPrimitive(ins);\n \n case DxbcOpcode::DclThreadGroup:\n return this->emitDclThreadGroup(ins);\n \n case DxbcOpcode::DclGsInstanceCount:\n return this->emitDclGsInstanceCount(ins);\n \n default:\n Logger::warn(\n str::format(\"DxbcCompiler: Unhandled opcode: \",\n ins.op));\n }\n }\n void emitDclGlobalFlags(\n const DxbcShaderInstruction& ins) {\n const DxbcGlobalFlags flags = ins.controls.globalFlags();\n \n if (flags.test(DxbcGlobalFlag::RefactoringAllowed))\n m_precise = false;\n\n if (flags.test(DxbcGlobalFlag::EarlyFragmentTests))\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeEarlyFragmentTests);\n }\n void emitDclIndexRange(\n const DxbcShaderInstruction& ins) {\n // dcl_index_range has one operand:\n // (0) Range start, either an input or output register\n // (1) Range end\n uint32_t index = ins.dst[0].idxDim - 1u;\n\n DxbcIndexRange range = { };\n range.type = ins.dst[0].type;\n range.start = ins.dst[0].idx[index].offset;\n range.length = ins.imm[0].u32;\n\n m_indexRanges.push_back(range);\n }\n void emitDclTemps(\n const DxbcShaderInstruction& ins) {\n // dcl_temps has one operand:\n // (imm0) Number of temp registers\n\n // Ignore this and declare temps on demand.\n }\n void emitDclIndexableTemp(\n const DxbcShaderInstruction& ins) {\n // dcl_indexable_temps has three operands:\n // (imm0) Array register index (x#)\n // (imm1) Number of vectors stored in the array\n // (imm2) Component count of each individual vector. This is\n // always 4 in fxc-generated binaries and therefore useless.\n const uint32_t regId = ins.imm[0].u32;\n\n DxbcRegisterInfo info;\n info.type.ctype = DxbcScalarType::Float32;\n info.type.ccount = m_analysis->xRegMasks.at(regId).minComponents();\n info.type.alength = ins.imm[1].u32;\n info.sclass = spv::StorageClassPrivate;\n\n if (regId >= m_xRegs.size())\n m_xRegs.resize(regId + 1);\n \n m_xRegs.at(regId).ccount = info.type.ccount;\n m_xRegs.at(regId).alength = info.type.alength;\n m_xRegs.at(regId).varId = emitNewVariable(info);\n \n m_module.setDebugName(m_xRegs.at(regId).varId,\n str::format(\"x\", regId).c_str());\n }\n void emitDclInterfaceReg(\n const DxbcShaderInstruction& ins) {\n switch (ins.dst[0].type) {\n case DxbcOperandType::InputControlPoint:\n if (m_programInfo.type() != DxbcProgramType::HullShader)\n break;\n [[fallthrough]];\n\n case DxbcOperandType::Input:\n case DxbcOperandType::Output: {\n // dcl_input and dcl_output instructions\n // have the following operands:\n // (dst0) The register to declare\n // (imm0) The system value (optional)\n uint32_t regDim = 0;\n uint32_t regIdx = 0;\n \n // In the vertex and fragment shader stage, the\n // operand indices will have the following format:\n // (0) Register index\n // \n // In other stages, the input and output registers\n // may be declared as arrays of a fixed size:\n // (0) Array length\n // (1) Register index\n if (ins.dst[0].idxDim == 2) {\n regDim = ins.dst[0].idx[0].offset;\n regIdx = ins.dst[0].idx[1].offset;\n } else if (ins.dst[0].idxDim == 1) {\n regIdx = ins.dst[0].idx[0].offset;\n } else {\n Logger::err(str::format(\n \"DxbcCompiler: \", ins.op,\n \": Invalid index dimension\"));\n return;\n }\n \n // This declaration may map an output register to a system\n // value. If that is the case, the system value type will\n // be stored in the second operand.\n const bool hasSv =\n ins.op == DxbcOpcode::DclInputSgv\n || ins.op == DxbcOpcode::DclInputSiv\n || ins.op == DxbcOpcode::DclInputPsSgv\n || ins.op == DxbcOpcode::DclInputPsSiv\n || ins.op == DxbcOpcode::DclOutputSgv\n || ins.op == DxbcOpcode::DclOutputSiv;\n \n DxbcSystemValue sv = DxbcSystemValue::None;\n \n if (hasSv)\n sv = static_cast(ins.imm[0].u32);\n \n // In the pixel shader, inputs are declared with an\n // interpolation mode that is part of the op token.\n const bool hasInterpolationMode =\n ins.op == DxbcOpcode::DclInputPs\n || ins.op == DxbcOpcode::DclInputPsSiv;\n \n DxbcInterpolationMode im = DxbcInterpolationMode::Undefined;\n \n if (hasInterpolationMode)\n im = ins.controls.interpolation();\n \n // Declare the actual input/output variable\n switch (ins.op) {\n case DxbcOpcode::DclInput:\n case DxbcOpcode::DclInputSgv:\n case DxbcOpcode::DclInputSiv:\n case DxbcOpcode::DclInputPs:\n case DxbcOpcode::DclInputPsSgv:\n case DxbcOpcode::DclInputPsSiv:\n this->emitDclInput(regIdx, regDim, ins.dst[0].mask, sv, im);\n break;\n \n case DxbcOpcode::DclOutput:\n case DxbcOpcode::DclOutputSgv:\n case DxbcOpcode::DclOutputSiv:\n this->emitDclOutput(regIdx, regDim, ins.dst[0].mask, sv, im);\n break;\n \n default:\n Logger::err(str::format(\n \"DxbcCompiler: Unexpected opcode: \",\n ins.op));\n }\n } break;\n \n case DxbcOperandType::InputThreadId: {\n m_cs.builtinGlobalInvocationId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 3, 0 },\n spv::StorageClassInput },\n spv::BuiltInGlobalInvocationId,\n \"vThreadId\");\n } break;\n \n case DxbcOperandType::InputThreadGroupId: {\n m_cs.builtinWorkgroupId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 3, 0 },\n spv::StorageClassInput },\n spv::BuiltInWorkgroupId,\n \"vThreadGroupId\");\n } break;\n \n case DxbcOperandType::InputThreadIdInGroup: {\n m_cs.builtinLocalInvocationId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 3, 0 },\n spv::StorageClassInput },\n spv::BuiltInLocalInvocationId,\n \"vThreadIdInGroup\");\n } break;\n \n case DxbcOperandType::InputThreadIndexInGroup: {\n m_cs.builtinLocalInvocationIndex = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInLocalInvocationIndex,\n \"vThreadIndexInGroup\");\n } break;\n \n case DxbcOperandType::InputCoverageMask: {\n m_ps.builtinSampleMaskIn = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 1 },\n spv::StorageClassInput },\n spv::BuiltInSampleMask,\n \"vCoverage\");\n } break;\n \n case DxbcOperandType::OutputCoverageMask: {\n m_ps.builtinSampleMaskOut = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 1 },\n spv::StorageClassOutput },\n spv::BuiltInSampleMask,\n \"oMask\");\n } break;\n \n case DxbcOperandType::OutputDepth: {\n m_module.setExecutionMode(m_entryPointId,\n spv::ExecutionModeDepthReplacing);\n m_ps.builtinDepth = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInFragDepth,\n \"oDepth\");\n } break;\n \n case DxbcOperandType::OutputStencilRef: {\n m_module.enableExtension(\"SPV_EXT_shader_stencil_export\");\n m_module.enableCapability(spv::CapabilityStencilExportEXT);\n m_module.setExecutionMode(m_entryPointId,\n spv::ExecutionModeStencilRefReplacingEXT);\n m_ps.builtinStencilRef = emitNewBuiltinVariable({\n { DxbcScalarType::Sint32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInFragStencilRefEXT,\n \"oStencilRef\");\n } break;\n\n case DxbcOperandType::OutputDepthGe: {\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDepthReplacing);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDepthGreater);\n m_ps.builtinDepth = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInFragDepth,\n \"oDepthGe\");\n } break;\n \n case DxbcOperandType::OutputDepthLe: {\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDepthReplacing);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDepthLess);\n m_ps.builtinDepth = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInFragDepth,\n \"oDepthLe\");\n } break;\n \n case DxbcOperandType::InputPrimitiveId: {\n m_primitiveIdIn = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInPrimitiveId,\n \"vPrim\");\n } break;\n \n case DxbcOperandType::InputDomainPoint: {\n m_ds.builtinTessCoord = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 3, 0 },\n spv::StorageClassInput },\n spv::BuiltInTessCoord,\n \"vDomain\");\n } break;\n \n case DxbcOperandType::InputForkInstanceId:\n case DxbcOperandType::InputJoinInstanceId: {\n auto phase = this->getCurrentHsForkJoinPhase();\n \n phase->instanceIdPtr = m_module.newVar(\n m_module.defPointerType(\n m_module.defIntType(32, 0),\n spv::StorageClassFunction),\n spv::StorageClassFunction);\n \n m_module.opStore(phase->instanceIdPtr, phase->instanceId);\n m_module.setDebugName(phase->instanceIdPtr,\n ins.dst[0].type == DxbcOperandType::InputForkInstanceId\n ? \"vForkInstanceId\" : \"vJoinInstanceId\");\n } break;\n \n case DxbcOperandType::OutputControlPointId: {\n // This system value map to the invocation\n // ID, which has been declared already.\n } break;\n \n case DxbcOperandType::InputPatchConstant:\n case DxbcOperandType::OutputControlPoint: {\n // These have been declared as global input and\n // output arrays, so there's nothing left to do.\n } break;\n \n case DxbcOperandType::InputGsInstanceId: {\n m_gs.builtinInvocationId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInInvocationId,\n \"vInstanceID\");\n } break;\n \n case DxbcOperandType::InputInnerCoverage: {\n m_module.enableExtension(\"SPV_EXT_fragment_fully_covered\");\n m_module.enableCapability(spv::CapabilityFragmentFullyCoveredEXT);\n\n // This is bool in SPIR-V but uint32 in DXBC. A bool value of\n // false must be 0, and bit 1 must be set to represent true.\n uint32_t builtinId = emitNewBuiltinVariable({\n { DxbcScalarType::Bool, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInFullyCoveredEXT,\n nullptr);\n\n m_ps.builtinInnerCoverageId = emitNewVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassPrivate });\n\n m_module.setDebugName(m_ps.builtinInnerCoverageId, \"vInnerCoverage\");\n\n uint32_t boolTypeId = m_module.defBoolType();\n uint32_t uintTypeId = m_module.defIntType(32, 0);\n\n m_module.opStore(m_ps.builtinInnerCoverageId,\n m_module.opSelect(uintTypeId,\n m_module.opLoad(boolTypeId, builtinId),\n m_module.constu32(1),\n m_module.constu32(0)));\n } break;\n\n default:\n Logger::err(str::format(\n \"DxbcCompiler: Unsupported operand type declaration: \",\n ins.dst[0].type));\n \n }\n }\n void emitDclInput(\n uint32_t regIdx,\n uint32_t regDim,\n DxbcRegMask regMask,\n DxbcSystemValue sv,\n DxbcInterpolationMode im) {\n // Avoid declaring the same variable multiple times.\n // This may happen when multiple system values are\n // mapped to different parts of the same register.\n if (m_vRegs.at(regIdx).id == 0 && sv == DxbcSystemValue::None) {\n const DxbcVectorType regType = getInputRegType(regIdx);\n \n DxbcRegisterInfo info;\n info.type.ctype = regType.ctype;\n info.type.ccount = regType.ccount;\n info.type.alength = regDim;\n info.sclass = spv::StorageClassInput;\n \n const uint32_t varId = emitNewVariable(info);\n \n m_module.decorateLocation(varId, regIdx);\n m_module.setDebugName(varId, str::format(\"v\", regIdx).c_str());\n \n m_vRegs.at(regIdx) = { regType, varId };\n \n // Interpolation mode, used in pixel shaders\n if (im == DxbcInterpolationMode::Constant)\n m_module.decorate(varId, spv::DecorationFlat);\n \n if (im == DxbcInterpolationMode::LinearCentroid\n || im == DxbcInterpolationMode::LinearNoPerspectiveCentroid)\n m_module.decorate(varId, spv::DecorationCentroid);\n \n if (im == DxbcInterpolationMode::LinearNoPerspective\n || im == DxbcInterpolationMode::LinearNoPerspectiveCentroid\n || im == DxbcInterpolationMode::LinearNoPerspectiveSample)\n m_module.decorate(varId, spv::DecorationNoPerspective);\n \n if (im == DxbcInterpolationMode::LinearSample\n || im == DxbcInterpolationMode::LinearNoPerspectiveSample) {\n m_module.enableCapability(spv::CapabilitySampleRateShading);\n m_module.decorate(varId, spv::DecorationSample);\n }\n\n if (m_moduleInfo.options.forceSampleRateShading) {\n if (im == DxbcInterpolationMode::Linear\n || im == DxbcInterpolationMode::LinearNoPerspective) {\n m_module.enableCapability(spv::CapabilitySampleRateShading);\n m_module.decorate(varId, spv::DecorationSample);\n }\n }\n\n // Declare the input slot as defined\n m_inputMask |= 1u << regIdx;\n m_vArrayLength = std::max(m_vArrayLength, regIdx + 1);\n } else if (sv != DxbcSystemValue::None) {\n // Add a new system value mapping if needed\n bool skipSv = sv == DxbcSystemValue::ClipDistance\n || sv == DxbcSystemValue::CullDistance;\n \n if (!skipSv)\n m_vMappings.push_back({ regIdx, regMask, sv });\n }\n }\n void emitDclOutput(\n uint32_t regIdx,\n uint32_t regDim,\n DxbcRegMask regMask,\n DxbcSystemValue sv,\n DxbcInterpolationMode im) {\n // Add a new system value mapping if needed. Clip\n // and cull distances are handled separately.\n if (sv != DxbcSystemValue::None\n && sv != DxbcSystemValue::ClipDistance\n && sv != DxbcSystemValue::CullDistance)\n m_oMappings.push_back({ regIdx, regMask, sv });\n \n if (m_programInfo.type() == DxbcProgramType::HullShader) {\n // Hull shaders don't use standard outputs\n if (getCurrentHsForkJoinPhase() != nullptr)\n m_hs.outputPerPatchMask |= 1 << regIdx;\n } else if (m_oRegs.at(regIdx).id == 0) {\n // Avoid declaring the same variable multiple times.\n // This may happen when multiple system values are\n // mapped to different parts of the same register.\n const DxbcVectorType regType = getOutputRegType(regIdx);\n \n DxbcRegisterInfo info;\n info.type.ctype = regType.ctype;\n info.type.ccount = regType.ccount;\n info.type.alength = regDim;\n info.sclass = spv::StorageClassOutput;\n\n // In xfb mode, we set up the actual\n // output vars when emitting a vertex\n if (m_moduleInfo.xfb != nullptr)\n info.sclass = spv::StorageClassPrivate;\n \n // In geometry shaders, don't duplicate system value outputs\n // to stay within device limits. The pixel shader will read\n // all GS system value outputs as system value inputs.\n if (m_programInfo.type() == DxbcProgramType::GeometryShader && sv != DxbcSystemValue::None)\n info.sclass = spv::StorageClassPrivate;\n\n const uint32_t varId = this->emitNewVariable(info);\n m_module.setDebugName(varId, str::format(\"o\", regIdx).c_str());\n \n if (info.sclass == spv::StorageClassOutput) {\n m_module.decorateLocation(varId, regIdx);\n\n // Add index decoration for potential dual-source blending\n if (m_programInfo.type() == DxbcProgramType::PixelShader)\n m_module.decorateIndex(varId, 0);\n\n // Declare vertex positions in all stages as invariant, even if\n // this is not the last stage, to help with potential Z fighting.\n if (sv == DxbcSystemValue::Position && m_moduleInfo.options.invariantPosition)\n m_module.decorate(varId, spv::DecorationInvariant);\n }\n \n m_oRegs.at(regIdx) = { regType, varId };\n \n // Declare the output slot as defined\n m_outputMask |= 1u << regIdx;\n }\n }\n void emitDclConstantBuffer(\n const DxbcShaderInstruction& ins) {\n // dcl_constant_buffer has one operand with two indices:\n // (0) Constant buffer register ID (cb#)\n // (1) Number of constants in the buffer\n uint32_t bufferId = ins.dst[0].idx[0].offset;\n uint32_t elementCount = ins.dst[0].idx[1].offset;\n\n // With dynamic indexing, games will often index constant buffers\n // out of bounds. Declare an upper bound to stay within spec.\n if (ins.controls.accessType() == DxbcConstantBufferAccessType::DynamicallyIndexed)\n elementCount = 4096;\n\n this->emitDclConstantBufferVar(bufferId, elementCount, 4u,\n str::format(\"cb\", bufferId).c_str());\n }\n void emitDclConstantBufferVar(\n uint32_t regIdx,\n uint32_t numConstants,\n uint32_t numComponents,\n const char* name) {\n // Uniform buffer data is stored as a fixed-size array\n // of 4x32-bit vectors. SPIR-V requires explicit strides.\n const uint32_t arrayType = m_module.defArrayTypeUnique(\n getVectorTypeId({ DxbcScalarType::Float32, numComponents }),\n m_module.constu32(numConstants));\n m_module.decorateArrayStride(arrayType, sizeof(uint32_t) * numComponents);\n \n // SPIR-V requires us to put that array into a\n // struct and decorate that struct as a block.\n const uint32_t structType = m_module.defStructTypeUnique(1, &arrayType);\n \n m_module.decorate(structType, spv::DecorationBlock);\n m_module.memberDecorateOffset(structType, 0, 0);\n \n m_module.setDebugName (structType, str::format(name, \"_t\").c_str());\n m_module.setDebugMemberName (structType, 0, \"m\");\n \n // Variable that we'll use to access the buffer\n const uint32_t varId = m_module.newVar(\n m_module.defPointerType(structType, spv::StorageClassUniform),\n spv::StorageClassUniform);\n \n m_module.setDebugName(varId, name);\n \n // Compute the DXVK binding slot index for the buffer.\n // D3D11 needs to bind the actual buffers to this slot.\n uint32_t bindingId = computeConstantBufferBinding(\n m_programInfo.type(), regIdx);\n \n m_module.decorateDescriptorSet(varId, 0);\n m_module.decorateBinding(varId, bindingId);\n\n DxbcConstantBuffer buf;\n buf.varId = varId;\n buf.size = numConstants;\n m_constantBuffers.at(regIdx) = buf;\n \n // Store descriptor info for the shader interface\n DxvkBindingInfo binding = { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER };\n binding.viewType = VK_IMAGE_VIEW_TYPE_MAX_ENUM;\n binding.access = VK_ACCESS_UNIFORM_READ_BIT;\n binding.resourceBinding = bindingId;\n binding.uboSet = true;\n m_bindings.push_back(binding);\n }\n void emitDclSampler(\n const DxbcShaderInstruction& ins) {\n // dclSampler takes one operand:\n // (dst0) The sampler register to declare\n const uint32_t samplerId = ins.dst[0].idx[0].offset;\n \n // The sampler type is opaque, but we still have to\n // define a pointer and a variable in oder to use it\n const uint32_t samplerType = m_module.defSamplerType();\n const uint32_t samplerPtrType = m_module.defPointerType(\n samplerType, spv::StorageClassUniformConstant);\n \n // Define the sampler variable\n const uint32_t varId = m_module.newVar(samplerPtrType,\n spv::StorageClassUniformConstant);\n m_module.setDebugName(varId,\n str::format(\"s\", samplerId).c_str());\n \n m_samplers.at(samplerId).varId = varId;\n m_samplers.at(samplerId).typeId = samplerType;\n \n // Compute binding slot index for the sampler\n uint32_t bindingId = computeSamplerBinding(\n m_programInfo.type(), samplerId);\n \n m_module.decorateDescriptorSet(varId, 0);\n m_module.decorateBinding(varId, bindingId);\n \n // Store descriptor info for the shader interface\n DxvkBindingInfo binding = { VK_DESCRIPTOR_TYPE_SAMPLER };\n binding.viewType = VK_IMAGE_VIEW_TYPE_MAX_ENUM;\n binding.resourceBinding = bindingId;\n m_bindings.push_back(binding);\n }\n void emitDclStream(\n const DxbcShaderInstruction& ins) {\n if (ins.dst[0].idx[0].offset != 0 && m_moduleInfo.xfb == nullptr)\n Logger::err(\"Dxbc: Multiple streams not supported\");\n }\n void emitDclResourceTyped(\n const DxbcShaderInstruction& ins) {\n // dclResource takes two operands:\n // (dst0) The resource register ID\n // (imm0) The resource return type\n const uint32_t registerId = ins.dst[0].idx[0].offset;\n \n // We also handle unordered access views here\n const bool isUav = ins.op == DxbcOpcode::DclUavTyped;\n \n if (isUav) {\n if (m_moduleInfo.options.supportsTypedUavLoadR32)\n m_module.enableCapability(spv::CapabilityStorageImageReadWithoutFormat);\n m_module.enableCapability(spv::CapabilityStorageImageWriteWithoutFormat);\n }\n \n // Defines the type of the resource (texture2D, ...)\n const DxbcResourceDim resourceType = ins.controls.resourceDim();\n \n // Defines the type of a read operation. DXBC has the ability\n // to define four different types whereas SPIR-V only allows\n // one, but in practice this should not be much of a problem.\n auto xType = static_cast(\n bit::extract(ins.imm[0].u32, 0, 3));\n auto yType = static_cast(\n bit::extract(ins.imm[0].u32, 4, 7));\n auto zType = static_cast(\n bit::extract(ins.imm[0].u32, 8, 11));\n auto wType = static_cast(\n bit::extract(ins.imm[0].u32, 12, 15));\n \n if ((xType != yType) || (xType != zType) || (xType != wType))\n Logger::warn(\"DxbcCompiler: dcl_resource: Ignoring resource return types\");\n \n // Declare the actual sampled type\n const DxbcScalarType sampledType = [xType] {\n switch (xType) {\n // FIXME is this correct? There's no documentation about it\n case DxbcResourceReturnType::Mixed: return DxbcScalarType::Uint32;\n // FIXME do we have to manually clamp writes to SNORM/UNORM resources?\n case DxbcResourceReturnType::Snorm: return DxbcScalarType::Float32;\n case DxbcResourceReturnType::Unorm: return DxbcScalarType::Float32;\n case DxbcResourceReturnType::Float: return DxbcScalarType::Float32;\n case DxbcResourceReturnType::Sint: return DxbcScalarType::Sint32;\n case DxbcResourceReturnType::Uint: return DxbcScalarType::Uint32;\n default: throw DxvkError(str::format(\"DxbcCompiler: Invalid sampled type: \", xType));\n }\n }();\n \n // Declare the resource type\n const uint32_t sampledTypeId = getScalarTypeId(sampledType);\n const DxbcImageInfo typeInfo = getResourceType(resourceType, isUav); \n \n // Declare additional capabilities if necessary\n switch (resourceType) {\n case DxbcResourceDim::Buffer:\n m_module.enableCapability(isUav\n ? spv::CapabilityImageBuffer\n : spv::CapabilitySampledBuffer);\n break;\n \n case DxbcResourceDim::Texture1D:\n case DxbcResourceDim::Texture1DArr:\n m_module.enableCapability(isUav\n ? spv::CapabilityImage1D\n : spv::CapabilitySampled1D);\n break;\n \n case DxbcResourceDim::TextureCubeArr:\n m_module.enableCapability(\n spv::CapabilitySampledCubeArray);\n break;\n \n default:\n // No additional capabilities required\n break;\n }\n \n // If the read-without-format capability is not set and this\n // image is access via a typed load, or if atomic operations\n // are used,, we must define the image format explicitly.\n spv::ImageFormat imageFormat = spv::ImageFormatUnknown;\n \n if (isUav) {\n if ((m_analysis->uavInfos[registerId].accessAtomicOp)\n || (m_analysis->uavInfos[registerId].accessTypedLoad\n && !m_moduleInfo.options.supportsTypedUavLoadR32))\n imageFormat = getScalarImageFormat(sampledType);\n }\n \n // We do not know whether the image is going to be used as\n // a color image or a depth image yet, but we can pick the\n // correct type when creating a sampled image object.\n const uint32_t imageTypeId = m_module.defImageType(sampledTypeId,\n typeInfo.dim, 0, typeInfo.array, typeInfo.ms, typeInfo.sampled,\n imageFormat);\n \n // We'll declare the texture variable with the color type\n // and decide which one to use when the texture is sampled.\n const uint32_t resourcePtrType = m_module.defPointerType(\n imageTypeId, spv::StorageClassUniformConstant);\n \n const uint32_t varId = m_module.newVar(resourcePtrType,\n spv::StorageClassUniformConstant);\n \n m_module.setDebugName(varId,\n str::format(isUav ? \"u\" : \"t\", registerId).c_str());\n \n // Compute the DXVK binding slot index for the resource.\n // D3D11 needs to bind the actual resource to this slot.\n uint32_t bindingId = isUav\n ? computeUavBinding(m_programInfo.type(), registerId)\n : computeSrvBinding(m_programInfo.type(), registerId);\n \n m_module.decorateDescriptorSet(varId, 0);\n m_module.decorateBinding(varId, bindingId);\n \n // Declare a specialization constant which will\n // store whether or not the resource is bound.\n if (isUav) {\n DxbcUav uav;\n uav.type = DxbcResourceType::Typed;\n uav.imageInfo = typeInfo;\n uav.varId = varId;\n uav.ctrId = 0;\n uav.sampledType = sampledType;\n uav.sampledTypeId = sampledTypeId;\n uav.imageTypeId = imageTypeId;\n uav.structStride = 0;\n uav.coherence = getUavCoherence(registerId, ins.controls.uavFlags());\n uav.isRawSsbo = false;\n m_uavs.at(registerId) = uav;\n } else {\n DxbcShaderResource res;\n res.type = DxbcResourceType::Typed;\n res.imageInfo = typeInfo;\n res.varId = varId;\n res.sampledType = sampledType;\n res.sampledTypeId = sampledTypeId;\n res.imageTypeId = imageTypeId;\n res.colorTypeId = imageTypeId;\n res.depthTypeId = 0;\n res.structStride = 0;\n res.isRawSsbo = false;\n \n if ((sampledType == DxbcScalarType::Float32)\n && (resourceType == DxbcResourceDim::Texture1D\n || resourceType == DxbcResourceDim::Texture1DArr\n || resourceType == DxbcResourceDim::Texture2D\n || resourceType == DxbcResourceDim::Texture2DArr\n || resourceType == DxbcResourceDim::TextureCube\n || resourceType == DxbcResourceDim::TextureCubeArr)) {\n res.depthTypeId = m_module.defImageType(sampledTypeId,\n typeInfo.dim, 1, typeInfo.array, typeInfo.ms, typeInfo.sampled,\n spv::ImageFormatUnknown);\n }\n \n m_textures.at(registerId) = res;\n }\n \n // Store descriptor info for the shader interface\n DxvkBindingInfo binding = { };\n binding.viewType = typeInfo.vtype;\n binding.resourceBinding = bindingId;\n binding.isMultisampled = typeInfo.ms;\n\n if (isUav) {\n binding.descriptorType = resourceType == DxbcResourceDim::Buffer\n ? VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER\n : VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;\n binding.access = m_analysis->uavInfos[registerId].accessFlags;\n\n if (!m_analysis->uavInfos[registerId].nonInvariantAccess)\n binding.accessOp = m_analysis->uavInfos[registerId].accessOp;\n\n if (!(binding.access & VK_ACCESS_SHADER_WRITE_BIT))\n m_module.decorate(varId, spv::DecorationNonWritable);\n if (!(binding.access & VK_ACCESS_SHADER_READ_BIT))\n m_module.decorate(varId, spv::DecorationNonReadable);\n } else {\n binding.descriptorType = resourceType == DxbcResourceDim::Buffer\n ? VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER\n : VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;\n binding.access = VK_ACCESS_SHADER_READ_BIT;\n }\n\n m_bindings.push_back(binding);\n }\n void emitDclResourceRawStructured(\n const DxbcShaderInstruction& ins) {\n // dcl_resource_raw and dcl_uav_raw take one argument:\n // (dst0) The resource register ID\n // dcl_resource_structured and dcl_uav_structured take two arguments:\n // (dst0) The resource register ID\n // (imm0) Structure stride, in bytes\n const uint32_t registerId = ins.dst[0].idx[0].offset;\n \n const bool isUav = ins.op == DxbcOpcode::DclUavRaw\n || ins.op == DxbcOpcode::DclUavStructured;\n \n const bool isStructured = ins.op == DxbcOpcode::DclUavStructured\n || ins.op == DxbcOpcode::DclResourceStructured;\n \n const DxbcScalarType sampledType = DxbcScalarType::Uint32;\n const uint32_t sampledTypeId = getScalarTypeId(sampledType);\n \n const DxbcImageInfo typeInfo = { spv::DimBuffer, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_MAX_ENUM };\n \n // Declare the resource type\n uint32_t resTypeId = 0;\n uint32_t varId = 0;\n \n // Write back resource info\n DxbcResourceType resType = isStructured\n ? DxbcResourceType::Structured\n : DxbcResourceType::Raw;\n \n uint32_t resStride = isStructured\n ? ins.imm[0].u32\n : 0;\n \n uint32_t resAlign = isStructured\n ? (resStride & -resStride)\n : 16;\n \n // Compute the DXVK binding slot index for the resource.\n uint32_t bindingId = isUav\n ? computeUavBinding(m_programInfo.type(), registerId)\n : computeSrvBinding(m_programInfo.type(), registerId);\n \n // Test whether we should use a raw SSBO for this resource\n bool hasSparseFeedback = isUav\n ? m_analysis->uavInfos[registerId].sparseFeedback\n : m_analysis->srvInfos[registerId].sparseFeedback;\n\n bool useRawSsbo = m_moduleInfo.options.minSsboAlignment <= resAlign && !hasSparseFeedback;\n \n if (useRawSsbo) {\n uint32_t elemType = getScalarTypeId(DxbcScalarType::Uint32);\n uint32_t arrayType = m_module.defRuntimeArrayTypeUnique(elemType);\n uint32_t structType = m_module.defStructTypeUnique(1, &arrayType);\n uint32_t ptrType = m_module.defPointerType(structType, spv::StorageClassStorageBuffer);\n\n resTypeId = m_module.defPointerType(elemType, spv::StorageClassStorageBuffer);\n varId = m_module.newVar(ptrType, spv::StorageClassStorageBuffer);\n \n m_module.decorateArrayStride(arrayType, sizeof(uint32_t));\n m_module.decorate(structType, spv::DecorationBlock);\n m_module.memberDecorateOffset(structType, 0, 0);\n\n m_module.setDebugName(structType,\n str::format(isUav ? \"u\" : \"t\", registerId, \"_t\").c_str());\n m_module.setDebugMemberName(structType, 0, \"m\");\n } else {\n // Structured and raw buffers are represented as\n // texel buffers consisting of 32-bit integers.\n m_module.enableCapability(isUav\n ? spv::CapabilityImageBuffer\n : spv::CapabilitySampledBuffer);\n \n resTypeId = m_module.defImageType(sampledTypeId,\n typeInfo.dim, 0, typeInfo.array, typeInfo.ms, typeInfo.sampled,\n spv::ImageFormatR32ui);\n \n varId = m_module.newVar(\n m_module.defPointerType(resTypeId, spv::StorageClassUniformConstant),\n spv::StorageClassUniformConstant);\n }\n\n m_module.setDebugName(varId,\n str::format(isUav ? \"u\" : \"t\", registerId).c_str());\n \n m_module.decorateDescriptorSet(varId, 0);\n m_module.decorateBinding(varId, bindingId);\n \n if (isUav) {\n DxbcUav uav;\n uav.type = resType;\n uav.imageInfo = typeInfo;\n uav.varId = varId;\n uav.ctrId = 0;\n uav.sampledType = sampledType;\n uav.sampledTypeId = sampledTypeId;\n uav.imageTypeId = resTypeId;\n uav.structStride = resStride;\n uav.coherence = getUavCoherence(registerId, ins.controls.uavFlags());\n uav.isRawSsbo = useRawSsbo;\n m_uavs.at(registerId) = uav;\n } else {\n DxbcShaderResource res;\n res.type = resType;\n res.imageInfo = typeInfo;\n res.varId = varId;\n res.sampledType = sampledType;\n res.sampledTypeId = sampledTypeId;\n res.imageTypeId = resTypeId;\n res.colorTypeId = resTypeId;\n res.depthTypeId = 0;\n res.structStride = resStride;\n res.isRawSsbo = useRawSsbo;\n m_textures.at(registerId) = res;\n }\n \n // Store descriptor info for the shader interface\n DxvkBindingInfo binding = { };\n binding.descriptorType = useRawSsbo\n ? VK_DESCRIPTOR_TYPE_STORAGE_BUFFER\n : (isUav ? VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER : VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER);\n binding.viewType = VK_IMAGE_VIEW_TYPE_MAX_ENUM;\n binding.resourceBinding = bindingId;\n binding.access = VK_ACCESS_SHADER_READ_BIT;\n\n if (isUav) {\n binding.access = m_analysis->uavInfos[registerId].accessFlags;\n\n if (!m_analysis->uavInfos[registerId].nonInvariantAccess)\n binding.accessOp = m_analysis->uavInfos[registerId].accessOp;\n }\n\n if (useRawSsbo || isUav) {\n if (!(binding.access & VK_ACCESS_SHADER_WRITE_BIT))\n m_module.decorate(varId, spv::DecorationNonWritable);\n if (!(binding.access & VK_ACCESS_SHADER_READ_BIT))\n m_module.decorate(varId, spv::DecorationNonReadable);\n }\n\n m_bindings.push_back(binding);\n\n // If supported, we'll be using raw access chains to access this\n if (!m_hasRawAccessChains && m_moduleInfo.options.supportsRawAccessChains) {\n m_module.enableExtension(\"SPV_NV_raw_access_chains\");\n m_module.enableCapability(spv::CapabilityRawAccessChainsNV);\n\n m_hasRawAccessChains = true;\n }\n }\n void emitDclThreadGroupSharedMemory(\n const DxbcShaderInstruction& ins) {\n // dcl_tgsm_raw takes two arguments:\n // (dst0) The resource register ID\n // (imm0) Block size, in bytes\n // dcl_tgsm_structured takes three arguments:\n // (dst0) The resource register ID\n // (imm0) Structure stride, in bytes\n // (imm1) Structure count\n const bool isStructured = ins.op == DxbcOpcode::DclThreadGroupSharedMemoryStructured;\n \n const uint32_t regId = ins.dst[0].idx[0].offset;\n \n if (regId >= m_gRegs.size())\n m_gRegs.resize(regId + 1);\n \n const uint32_t elementStride = isStructured ? ins.imm[0].u32 : 0;\n const uint32_t elementCount = isStructured ? ins.imm[1].u32 : ins.imm[0].u32;\n \n DxbcRegisterInfo varInfo;\n varInfo.type.ctype = DxbcScalarType::Uint32;\n varInfo.type.ccount = 1;\n varInfo.type.alength = isStructured\n ? elementCount * elementStride / 4\n : elementCount / 4;\n varInfo.sclass = spv::StorageClassWorkgroup;\n \n m_gRegs[regId].type = isStructured\n ? DxbcResourceType::Structured\n : DxbcResourceType::Raw;\n m_gRegs[regId].elementStride = elementStride;\n m_gRegs[regId].elementCount = elementCount;\n m_gRegs[regId].varId = emitNewVariable(varInfo);\n \n m_module.setDebugName(m_gRegs[regId].varId,\n str::format(\"g\", regId).c_str());\n }\n void emitDclGsInputPrimitive(\n const DxbcShaderInstruction& ins) {\n // The input primitive type is stored within in the\n // control bits of the opcode token. In SPIR-V, we\n // have to define an execution mode.\n const auto mode = [&] {\n switch (ins.controls.primitive()) {\n case DxbcPrimitive::Point: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_POINT_LIST, spv::ExecutionModeInputPoints);\n case DxbcPrimitive::Line: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_LINE_LIST, spv::ExecutionModeInputLines);\n case DxbcPrimitive::Triangle: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, spv::ExecutionModeTriangles);\n case DxbcPrimitive::LineAdj: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY, spv::ExecutionModeInputLinesAdjacency);\n case DxbcPrimitive::TriangleAdj: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY, spv::ExecutionModeInputTrianglesAdjacency);\n default: throw DxvkError(\"DxbcCompiler: Unsupported primitive type\");\n }\n }();\n\n m_gs.inputPrimitive = ins.controls.primitive();\n m_module.setExecutionMode(m_entryPointId, mode.second);\n m_inputTopology = mode.first;\n \n emitDclInputArray(primitiveVertexCount(m_gs.inputPrimitive));\n }\n void emitDclGsOutputTopology(\n const DxbcShaderInstruction& ins) {\n // The input primitive topology is stored within in the\n // control bits of the opcode token. In SPIR-V, we have\n // to define an execution mode.\n auto mode = [&] {\n switch (ins.controls.primitiveTopology()) {\n case DxbcPrimitiveTopology::PointList: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_POINT_LIST, spv::ExecutionModeOutputPoints);\n case DxbcPrimitiveTopology::LineStrip: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_LINE_LIST, spv::ExecutionModeOutputLineStrip);\n case DxbcPrimitiveTopology::TriangleStrip: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, spv::ExecutionModeOutputTriangleStrip);\n default: throw DxvkError(\"DxbcCompiler: Unsupported primitive topology\");\n }\n }();\n \n m_outputTopology = mode.first;\n m_module.setExecutionMode(m_entryPointId, mode.second);\n }\n void emitDclMaxOutputVertexCount(\n const DxbcShaderInstruction& ins) {\n // dcl_max_output_vertex_count has one operand:\n // (imm0) The maximum number of vertices\n m_gs.outputVertexCount = ins.imm[0].u32;\n \n m_module.setOutputVertices(m_entryPointId, m_gs.outputVertexCount);\n }\n void emitDclInputControlPointCount(\n const DxbcShaderInstruction& ins) {\n // dcl_input_control_points has the control point\n // count embedded within the opcode token.\n if (m_programInfo.type() == DxbcProgramType::HullShader) {\n m_hs.vertexCountIn = ins.controls.controlPointCount();\n \n emitDclInputArray(m_hs.vertexCountIn); \n } else {\n m_ds.vertexCountIn = ins.controls.controlPointCount();\n \n m_ds.inputPerPatch = emitTessInterfacePerPatch (spv::StorageClassInput);\n m_ds.inputPerVertex = emitTessInterfacePerVertex(spv::StorageClassInput, m_ds.vertexCountIn);\n }\n }\n void emitDclOutputControlPointCount(\n const DxbcShaderInstruction& ins) {\n // dcl_output_control_points has the control point\n // count embedded within the opcode token.\n m_hs.vertexCountOut = ins.controls.controlPointCount();\n \n m_hs.outputPerPatch = emitTessInterfacePerPatch(spv::StorageClassPrivate);\n m_hs.outputPerVertex = emitTessInterfacePerVertex(spv::StorageClassOutput, m_hs.vertexCountOut);\n \n m_module.setOutputVertices(m_entryPointId, m_hs.vertexCountOut);\n }\n void emitDclHsMaxTessFactor(\n const DxbcShaderInstruction& ins) {\n m_hs.maxTessFactor = ins.imm[0].f32;\n }\n void emitDclTessDomain(\n const DxbcShaderInstruction& ins) {\n auto mode = [&] {\n switch (ins.controls.tessDomain()) {\n case DxbcTessDomain::Isolines: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_LINE_LIST, spv::ExecutionModeIsolines);\n case DxbcTessDomain::Triangles: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, spv::ExecutionModeTriangles);\n case DxbcTessDomain::Quads: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, spv::ExecutionModeQuads);\n default: throw DxvkError(\"Dxbc: Invalid tess domain\");\n }\n }();\n \n m_outputTopology = mode.first;\n m_module.setExecutionMode(m_entryPointId, mode.second);\n }\n void emitDclTessPartitioning(\n const DxbcShaderInstruction& ins) {\n const spv::ExecutionMode executionMode = [&] {\n switch (ins.controls.tessPartitioning()) {\n case DxbcTessPartitioning::Pow2:\n case DxbcTessPartitioning::Integer: return spv::ExecutionModeSpacingEqual;\n case DxbcTessPartitioning::FractOdd: return spv::ExecutionModeSpacingFractionalOdd;\n case DxbcTessPartitioning::FractEven: return spv::ExecutionModeSpacingFractionalEven;\n default: throw DxvkError(\"Dxbc: Invalid tess partitioning\");\n }\n }();\n \n m_module.setExecutionMode(m_entryPointId, executionMode);\n }\n void emitDclTessOutputPrimitive(\n const DxbcShaderInstruction& ins) {\n switch (ins.controls.tessOutputPrimitive()) {\n case DxbcTessOutputPrimitive::Point:\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModePointMode);\n break;\n \n case DxbcTessOutputPrimitive::Line:\n break;\n \n case DxbcTessOutputPrimitive::TriangleCw:\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeVertexOrderCw);\n break;\n \n case DxbcTessOutputPrimitive::TriangleCcw:\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeVertexOrderCcw);\n break;\n \n default:\n throw DxvkError(\"Dxbc: Invalid tess output primitive\");\n }\n }\n void emitDclThreadGroup(\n const DxbcShaderInstruction& ins) {\n // dcl_thread_group has three operands:\n // (imm0) Number of threads in X dimension\n // (imm1) Number of threads in Y dimension\n // (imm2) Number of threads in Z dimension\n m_cs.workgroupSizeX = ins.imm[0].u32;\n m_cs.workgroupSizeY = ins.imm[1].u32;\n m_cs.workgroupSizeZ = ins.imm[2].u32;\n\n m_module.setLocalSize(m_entryPointId,\n ins.imm[0].u32, ins.imm[1].u32, ins.imm[2].u32);\n }\n void emitDclGsInstanceCount(\n const DxbcShaderInstruction& ins) {\n // dcl_gs_instance_count has one operand:\n // (imm0) Number of geometry shader invocations\n m_module.setInvocations(m_entryPointId, ins.imm[0].u32);\n m_gs.invocationCount = ins.imm[0].u32;\n }\n uint32_t emitDclUavCounter(\n uint32_t regId) {\n // Declare a structure type which holds the UAV counter\n if (m_uavCtrStructType == 0) {\n const uint32_t t_u32 = m_module.defIntType(32, 0);\n const uint32_t t_struct = m_module.defStructTypeUnique(1, &t_u32);\n \n m_module.decorate(t_struct, spv::DecorationBlock);\n m_module.memberDecorateOffset(t_struct, 0, 0);\n \n m_module.setDebugName (t_struct, \"uav_meta\");\n m_module.setDebugMemberName(t_struct, 0, \"ctr\");\n \n m_uavCtrStructType = t_struct;\n m_uavCtrPointerType = m_module.defPointerType(\n t_struct, spv::StorageClassStorageBuffer);\n }\n \n // Declare the buffer variable\n const uint32_t varId = m_module.newVar(\n m_uavCtrPointerType, spv::StorageClassStorageBuffer);\n \n m_module.setDebugName(varId,\n str::format(\"u\", regId, \"_meta\").c_str());\n \n uint32_t bindingId = computeUavCounterBinding(\n m_programInfo.type(), regId);\n \n m_module.decorateDescriptorSet(varId, 0);\n m_module.decorateBinding(varId, bindingId);\n \n // Declare the storage buffer binding\n DxvkBindingInfo binding = { VK_DESCRIPTOR_TYPE_STORAGE_BUFFER };\n binding.resourceBinding = bindingId;\n binding.viewType = VK_IMAGE_VIEW_TYPE_MAX_ENUM;\n binding.access = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;\n m_bindings.push_back(binding);\n\n return varId;\n }\n void emitDclImmediateConstantBuffer(\n const DxbcShaderInstruction& ins) {\n if (m_icbArray)\n throw DxvkError(\"DxbcCompiler: Immediate constant buffer already declared\");\n \n if ((ins.customDataSize & 0x3) != 0)\n throw DxvkError(\"DxbcCompiler: Immediate constant buffer size not a multiple of four DWORDs\");\n\n // A lot of the time we'll be dealing with a scalar or vec2\n // array here, there's no reason to emit all those zeroes.\n uint32_t componentCount = 1u;\n\n for (uint32_t i = 0; i < ins.customDataSize; i += 4u) {\n for (uint32_t c = componentCount; c < 4u; c++) {\n if (ins.customData[i + c])\n componentCount = c + 1u;\n }\n\n if (componentCount == 4u)\n break;\n }\n\n uint32_t vectorCount = (ins.customDataSize / 4u);\n uint32_t dwordCount = vectorCount * componentCount;\n\n if (dwordCount <= Icb_MaxBakedDwords) {\n this->emitDclImmediateConstantBufferBaked(\n ins.customDataSize, ins.customData, componentCount);\n } else {\n this->emitDclImmediateConstantBufferUbo(\n ins.customDataSize, ins.customData, componentCount);\n }\n }\n void emitDclImmediateConstantBufferBaked(\n uint32_t dwordCount,\n const uint32_t* dwordArray,\n uint32_t componentCount) {\n // Declare individual vector constants as 4x32-bit vectors\n small_vector vectorIds;\n \n DxbcVectorType vecType;\n vecType.ctype = DxbcScalarType::Uint32;\n vecType.ccount = componentCount;\n \n uint32_t vectorTypeId = getVectorTypeId(vecType);\n \n for (uint32_t i = 0; i < dwordCount; i += 4u) {\n std::array scalarIds = { };\n\n for (uint32_t c = 0; c < componentCount; c++)\n scalarIds[c] = m_module.constu32(dwordArray[i + c]);\n\n uint32_t id = scalarIds[0];\n\n if (componentCount > 1u)\n id = m_module.constComposite(vectorTypeId, componentCount, scalarIds.data());\n\n vectorIds.push_back(id);\n }\n\n // Pad array with one entry of zeroes so that we can\n // handle out-of-bounds accesses more conveniently.\n vectorIds.push_back(emitBuildZeroVector(vecType).id);\n\n // Declare the array that contains all the vectors\n DxbcArrayType arrInfo;\n arrInfo.ctype = DxbcScalarType::Uint32;\n arrInfo.ccount = componentCount;\n arrInfo.alength = vectorIds.size();\n\n uint32_t arrayTypeId = getArrayTypeId(arrInfo);\n uint32_t arrayId = m_module.constComposite(\n arrayTypeId, vectorIds.size(), vectorIds.data());\n\n // Declare the variable that will hold the constant\n // data and initialize it with the constant array.\n uint32_t pointerTypeId = m_module.defPointerType(\n arrayTypeId, spv::StorageClassPrivate);\n\n m_icbArray = m_module.newVarInit(\n pointerTypeId, spv::StorageClassPrivate,\n arrayId);\n\n m_module.setDebugName(m_icbArray, \"icb\");\n m_module.decorate(m_icbArray, spv::DecorationNonWritable);\n\n m_icbComponents = componentCount;\n m_icbSize = dwordCount / 4u;\n }\n void emitDclImmediateConstantBufferUbo(\n uint32_t dwordCount,\n const uint32_t* dwordArray,\n uint32_t componentCount) {\n uint32_t vectorCount = dwordCount / 4u;\n\n // Tightly pack vec2 or scalar arrays if possible. Don't bother with\n // vec3 since we'd rather have properly vectorized loads in that case.\n if (m_moduleInfo.options.supportsTightIcbPacking && componentCount <= 2u)\n m_icbComponents = componentCount;\n else\n m_icbComponents = 4u;\n\n // Immediate constant buffer can be read out of bounds, declare\n // it with the maximum possible size and rely on robustness.\n this->emitDclConstantBufferVar(Icb_BindingSlotId, 4096u, m_icbComponents, \"icb\");\n\n m_icbData.reserve(vectorCount * componentCount);\n\n for (uint32_t i = 0; i < dwordCount; i += 4u) {\n for (uint32_t c = 0; c < m_icbComponents; c++)\n m_icbData.push_back(dwordArray[i + c]);\n }\n\n m_icbSize = vectorCount;\n }\n void emitCustomData(\n const DxbcShaderInstruction& ins) {\n switch (ins.customDataType) {\n case DxbcCustomDataClass::ImmConstBuf:\n return emitDclImmediateConstantBuffer(ins);\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unsupported custom data block: \",\n ins.customDataType));\n }\n }\n void emitVectorAlu(\n const DxbcShaderInstruction& ins) {\n std::array src;\n \n for (uint32_t i = 0; i < ins.srcCount; i++)\n src.at(i) = emitRegisterLoad(ins.src[i], ins.dst[0].mask);\n \n DxbcRegisterValue dst;\n dst.type.ctype = ins.dst[0].dataType;\n dst.type.ccount = ins.dst[0].mask.popCount();\n\n if (isDoubleType(ins.dst[0].dataType))\n dst.type.ccount /= 2;\n \n const uint32_t typeId = getVectorTypeId(dst.type);\n \n switch (ins.op) {\n /////////////////////\n // Move instructions\n case DxbcOpcode::Mov:\n case DxbcOpcode::DMov:\n dst.id = src.at(0).id;\n break;\n \n /////////////////////////////////////\n // ALU operations on float32 numbers\n case DxbcOpcode::Add:\n case DxbcOpcode::DAdd:\n dst.id = m_module.opFAdd(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Div:\n case DxbcOpcode::DDiv:\n dst.id = m_module.opFDiv(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Exp:\n dst.id = m_module.opExp2(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Frc:\n dst.id = m_module.opFract(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Log:\n dst.id = m_module.opLog2(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Mad:\n case DxbcOpcode::DFma:\n if (ins.controls.precise()) {\n // FXC only emits precise mad if the shader explicitly uses\n // the HLSL mad()/fma() intrinsics, let's preserve that.\n dst.id = m_module.opFFma(typeId,\n src.at(0).id, src.at(1).id, src.at(2).id);\n } else {\n dst.id = m_module.opFMul(typeId, src.at(0).id, src.at(1).id);\n dst.id = m_module.opFAdd(typeId, dst.id, src.at(2).id);\n }\n break;\n \n case DxbcOpcode::Max:\n case DxbcOpcode::DMax:\n dst.id = m_module.opNMax(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Min:\n case DxbcOpcode::DMin:\n dst.id = m_module.opNMin(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Mul:\n case DxbcOpcode::DMul:\n dst.id = m_module.opFMul(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Rcp:\n dst.id = m_module.opFDiv(typeId,\n emitBuildConstVecf32(\n 1.0f, 1.0f, 1.0f, 1.0f,\n ins.dst[0].mask).id,\n src.at(0).id);\n break;\n \n case DxbcOpcode::DRcp:\n dst.id = m_module.opFDiv(typeId,\n emitBuildConstVecf64(1.0, 1.0,\n ins.dst[0].mask).id,\n src.at(0).id);\n break;\n \n case DxbcOpcode::RoundNe:\n dst.id = m_module.opRoundEven(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::RoundNi:\n dst.id = m_module.opFloor(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::RoundPi:\n dst.id = m_module.opCeil(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::RoundZ:\n dst.id = m_module.opTrunc(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Rsq:\n dst.id = m_module.opInverseSqrt(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Sqrt:\n dst.id = m_module.opSqrt(\n typeId, src.at(0).id);\n break;\n \n /////////////////////////////////////\n // ALU operations on signed integers\n case DxbcOpcode::IAdd:\n dst.id = m_module.opIAdd(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::IMad:\n case DxbcOpcode::UMad:\n dst.id = m_module.opIAdd(typeId,\n m_module.opIMul(typeId,\n src.at(0).id, src.at(1).id),\n src.at(2).id);\n break;\n \n case DxbcOpcode::IMax:\n dst.id = m_module.opSMax(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::IMin:\n dst.id = m_module.opSMin(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::INeg:\n dst.id = m_module.opSNegate(\n typeId, src.at(0).id);\n break;\n \n ///////////////////////////////////////\n // ALU operations on unsigned integers\n case DxbcOpcode::UMax:\n dst.id = m_module.opUMax(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::UMin:\n dst.id = m_module.opUMin(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n ///////////////////////////////////////\n // Bit operations on unsigned integers\n case DxbcOpcode::And:\n dst.id = m_module.opBitwiseAnd(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Not:\n dst.id = m_module.opNot(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Or:\n dst.id = m_module.opBitwiseOr(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Xor:\n dst.id = m_module.opBitwiseXor(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::CountBits:\n dst.id = m_module.opBitCount(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::BfRev:\n dst.id = m_module.opBitReverse(\n typeId, src.at(0).id);\n break;\n \n ///////////////////////////\n // Conversion instructions\n case DxbcOpcode::ItoF:\n dst.id = m_module.opConvertStoF(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::UtoF:\n dst.id = m_module.opConvertUtoF(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::FtoI:\n dst.id = m_module.opConvertFtoS(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::FtoU:\n dst.id = m_module.opConvertFtoU(\n typeId, src.at(0).id);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n if (ins.controls.precise() || m_precise)\n m_module.decorate(dst.id, spv::DecorationNoContraction);\n \n // Store computed value\n dst = emitDstOperandModifiers(dst, ins.modifiers);\n emitRegisterStore(ins.dst[0], dst);\n }\n void emitVectorCmov(\n const DxbcShaderInstruction& ins) {\n // movc and swapc have the following operands:\n // (dst0) The first destination register\n // (dst1) The second destination register (swapc only)\n // (src0) The condition vector\n // (src1) Vector to select from if the condition is not 0\n // (src2) Vector to select from if the condition is 0\n DxbcRegMask condMask = ins.dst[0].mask;\n\n if (ins.dst[0].dataType == DxbcScalarType::Float64) {\n condMask = DxbcRegMask(\n condMask[0] && condMask[1],\n condMask[2] && condMask[3],\n false, false);\n }\n \n const DxbcRegisterValue condition = emitRegisterLoad(ins.src[0], condMask);\n const DxbcRegisterValue selectTrue = emitRegisterLoad(ins.src[1], ins.dst[0].mask);\n const DxbcRegisterValue selectFalse = emitRegisterLoad(ins.src[2], ins.dst[0].mask);\n \n uint32_t componentCount = condMask.popCount();\n \n // We'll compare against a vector of zeroes to generate a\n // boolean vector, which in turn will be used by OpSelect\n uint32_t zeroType = m_module.defIntType(32, 0);\n uint32_t boolType = m_module.defBoolType();\n \n uint32_t zero = m_module.constu32(0);\n \n if (componentCount > 1) {\n zeroType = m_module.defVectorType(zeroType, componentCount);\n boolType = m_module.defVectorType(boolType, componentCount);\n \n const std::array zeroVec = { zero, zero, zero, zero };\n zero = m_module.constComposite(zeroType, componentCount, zeroVec.data());\n }\n \n // In case of swapc, the second destination operand receives\n // the output that a cmov instruction would normally get\n const uint32_t trueIndex = ins.op == DxbcOpcode::Swapc ? 1 : 0;\n \n for (uint32_t i = 0; i < ins.dstCount; i++) {\n DxbcRegisterValue result;\n result.type.ctype = ins.dst[i].dataType;\n result.type.ccount = componentCount;\n result.id = m_module.opSelect(\n getVectorTypeId(result.type),\n m_module.opINotEqual(boolType, condition.id, zero),\n i == trueIndex ? selectTrue.id : selectFalse.id,\n i != trueIndex ? selectTrue.id : selectFalse.id);\n \n result = emitDstOperandModifiers(result, ins.modifiers);\n emitRegisterStore(ins.dst[i], result);\n }\n }\n void emitVectorCmp(\n const DxbcShaderInstruction& ins) {\n // Compare instructions have three operands:\n // (dst0) The destination register\n // (src0) The first vector to compare\n // (src1) The second vector to compare\n uint32_t componentCount = ins.dst[0].mask.popCount();\n\n // For 64-bit operations, we'll return a 32-bit\n // vector, so we have to adjust the read mask\n DxbcRegMask srcMask = ins.dst[0].mask;\n\n if (isDoubleType(ins.src[0].dataType)) {\n srcMask = DxbcRegMask(\n componentCount > 0, componentCount > 0,\n componentCount > 1, componentCount > 1);\n }\n\n const std::array src = {\n emitRegisterLoad(ins.src[0], srcMask),\n emitRegisterLoad(ins.src[1], srcMask),\n };\n \n // Condition, which is a boolean vector used\n // to select between the ~0u and 0u vectors.\n uint32_t condition = 0;\n uint32_t conditionType = m_module.defBoolType();\n \n if (componentCount > 1)\n conditionType = m_module.defVectorType(conditionType, componentCount);\n \n bool invert = false;\n\n switch (ins.op) {\n case DxbcOpcode::Ne:\n case DxbcOpcode::DNe:\n invert = true;\n [[fallthrough]];\n\n case DxbcOpcode::Eq:\n case DxbcOpcode::DEq:\n condition = m_module.opFOrdEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Ge:\n case DxbcOpcode::DGe:\n condition = m_module.opFOrdGreaterThanEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Lt:\n case DxbcOpcode::DLt:\n condition = m_module.opFOrdLessThan(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::IEq:\n condition = m_module.opIEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::IGe:\n condition = m_module.opSGreaterThanEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::ILt:\n condition = m_module.opSLessThan(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::INe:\n condition = m_module.opINotEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::UGe:\n condition = m_module.opUGreaterThanEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::ULt:\n condition = m_module.opULessThan(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n // Generate constant vectors for selection\n uint32_t sFalse = m_module.constu32( 0u);\n uint32_t sTrue = m_module.constu32(~0u);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = componentCount;\n \n const uint32_t typeId = getVectorTypeId(result.type);\n \n if (componentCount > 1) {\n const std::array vFalse = { sFalse, sFalse, sFalse, sFalse };\n const std::array vTrue = { sTrue, sTrue, sTrue, sTrue };\n \n sFalse = m_module.constComposite(typeId, componentCount, vFalse.data());\n sTrue = m_module.constComposite(typeId, componentCount, vTrue .data());\n }\n \n if (invert)\n std::swap(sFalse, sTrue);\n\n // Perform component-wise mask selection\n // based on the condition evaluated above.\n result.id = m_module.opSelect(\n typeId, condition, sTrue, sFalse);\n \n emitRegisterStore(ins.dst[0], result);\n }\n void emitVectorDeriv(\n const DxbcShaderInstruction& ins) {\n // Derivative instructions have two operands:\n // (dst0) Destination register for the derivative\n // (src0) The operand to compute the derivative of\n DxbcRegisterValue value = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n const uint32_t typeId = getVectorTypeId(value.type);\n \n switch (ins.op) {\n case DxbcOpcode::DerivRtx:\n value.id = m_module.opDpdx(typeId, value.id);\n break;\n \n case DxbcOpcode::DerivRty:\n value.id = m_module.opDpdy(typeId, value.id);\n break;\n \n case DxbcOpcode::DerivRtxCoarse:\n value.id = m_module.opDpdxCoarse(typeId, value.id);\n break;\n \n case DxbcOpcode::DerivRtyCoarse:\n value.id = m_module.opDpdyCoarse(typeId, value.id);\n break;\n \n case DxbcOpcode::DerivRtxFine:\n value.id = m_module.opDpdxFine(typeId, value.id);\n break;\n \n case DxbcOpcode::DerivRtyFine:\n value.id = m_module.opDpdyFine(typeId, value.id);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n value = emitDstOperandModifiers(value, ins.modifiers);\n emitRegisterStore(ins.dst[0], value);\n }\n void emitVectorDot(\n const DxbcShaderInstruction& ins) {\n const DxbcRegMask srcMask(true,\n ins.op >= DxbcOpcode::Dp2,\n ins.op >= DxbcOpcode::Dp3,\n ins.op >= DxbcOpcode::Dp4);\n \n const std::array src = {\n emitRegisterLoad(ins.src[0], srcMask),\n emitRegisterLoad(ins.src[1], srcMask),\n };\n \n DxbcRegisterValue dst;\n dst.type.ctype = ins.dst[0].dataType;\n dst.type.ccount = 1;\n dst.id = 0;\n\n uint32_t componentType = getVectorTypeId(dst.type);\n uint32_t componentCount = srcMask.popCount();\n\n for (uint32_t i = 0; i < componentCount; i++) {\n if (dst.id) {\n dst.id = m_module.opFFma(componentType,\n m_module.opCompositeExtract(componentType, src.at(0).id, 1, &i),\n m_module.opCompositeExtract(componentType, src.at(1).id, 1, &i),\n dst.id);\n } else {\n dst.id = m_module.opFMul(componentType,\n m_module.opCompositeExtract(componentType, src.at(0).id, 1, &i),\n m_module.opCompositeExtract(componentType, src.at(1).id, 1, &i));\n }\n\n // Unconditionally mark as precise since the exact order of operation\n // matters for some games, even if the instruction itself is not marked\n // as precise.\n m_module.decorate(dst.id, spv::DecorationNoContraction);\n }\n\n dst = emitDstOperandModifiers(dst, ins.modifiers);\n emitRegisterStore(ins.dst[0], dst);\n }\n void emitVectorIdiv(\n const DxbcShaderInstruction& ins) {\n // udiv has four operands:\n // (dst0) Quotient destination register\n // (dst1) Remainder destination register\n // (src0) The first vector to compare\n // (src1) The second vector to compare\n if (ins.dst[0].type == DxbcOperandType::Null\n && ins.dst[1].type == DxbcOperandType::Null)\n return;\n \n // FIXME support this if applications require it\n if (ins.dst[0].type != DxbcOperandType::Null\n && ins.dst[1].type != DxbcOperandType::Null\n && ins.dst[0].mask != ins.dst[1].mask) {\n Logger::warn(\"DxbcCompiler: Idiv with different destination masks not supported\");\n return;\n }\n \n // Load source operands as integers with the\n // mask of one non-NULL destination operand\n const DxbcRegMask srcMask =\n ins.dst[0].type != DxbcOperandType::Null\n ? ins.dst[0].mask\n : ins.dst[1].mask;\n \n const std::array src = {\n emitRegisterLoad(ins.src[0], srcMask),\n emitRegisterLoad(ins.src[1], srcMask),\n };\n \n // Division by zero will return 0xffffffff for both results\n auto bvecId = getVectorTypeId({ DxbcScalarType::Bool, srcMask.popCount() });\n\n DxbcRegisterValue const0 = emitBuildConstVecu32( 0u, 0u, 0u, 0u, srcMask);\n DxbcRegisterValue constff = emitBuildConstVecu32(~0u, ~0u, ~0u, ~0u, srcMask);\n\n uint32_t cmpValue = m_module.opINotEqual(bvecId, src.at(1).id, const0.id);\n\n // Compute results only if the destination\n // operands are not NULL.\n if (ins.dst[0].type != DxbcOperandType::Null) {\n DxbcRegisterValue quotient;\n quotient.type.ctype = ins.dst[0].dataType;\n quotient.type.ccount = ins.dst[0].mask.popCount();\n \n quotient.id = m_module.opUDiv(\n getVectorTypeId(quotient.type),\n src.at(0).id, src.at(1).id);\n\n quotient.id = m_module.opSelect(\n getVectorTypeId(quotient.type),\n cmpValue, quotient.id, constff.id);\n \n quotient = emitDstOperandModifiers(quotient, ins.modifiers);\n emitRegisterStore(ins.dst[0], quotient);\n }\n \n if (ins.dst[1].type != DxbcOperandType::Null) {\n DxbcRegisterValue remainder;\n remainder.type.ctype = ins.dst[1].dataType;\n remainder.type.ccount = ins.dst[1].mask.popCount();\n \n remainder.id = m_module.opUMod(\n getVectorTypeId(remainder.type),\n src.at(0).id, src.at(1).id);\n\n remainder.id = m_module.opSelect(\n getVectorTypeId(remainder.type),\n cmpValue, remainder.id, constff.id);\n \n remainder = emitDstOperandModifiers(remainder, ins.modifiers);\n emitRegisterStore(ins.dst[1], remainder);\n }\n }\n void emitVectorImul(\n const DxbcShaderInstruction& ins) {\n // imul and umul have four operands:\n // (dst0) High destination register\n // (dst1) Low destination register\n // (src0) The first vector to compare\n // (src1) The second vector to compare\n if (ins.dst[0].type == DxbcOperandType::Null) {\n if (ins.dst[1].type == DxbcOperandType::Null)\n return;\n \n // If dst0 is NULL, this instruction behaves just\n // like any other three-operand ALU instruction\n const std::array src = {\n emitRegisterLoad(ins.src[0], ins.dst[1].mask),\n emitRegisterLoad(ins.src[1], ins.dst[1].mask),\n };\n \n DxbcRegisterValue result;\n result.type.ctype = ins.dst[1].dataType;\n result.type.ccount = ins.dst[1].mask.popCount();\n result.id = m_module.opIMul(\n getVectorTypeId(result.type),\n src.at(0).id, src.at(1).id);\n \n result = emitDstOperandModifiers(result, ins.modifiers);\n emitRegisterStore(ins.dst[1], result);\n } else {\n // TODO implement this\n Logger::warn(\"DxbcCompiler: Extended Imul not yet supported\");\n }\n }\n void emitVectorMsad(\n const DxbcShaderInstruction& ins) {\n // msad has four operands:\n // (dst0) Destination\n // (src0) Reference (packed uint8)\n // (src1) Source (packed uint8)\n // (src2) Accumulator\n DxbcRegisterValue refReg = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n DxbcRegisterValue srcReg = emitRegisterLoad(ins.src[1], ins.dst[0].mask);\n DxbcRegisterValue result = emitRegisterLoad(ins.src[2], ins.dst[0].mask);\n \n auto typeId = getVectorTypeId(result.type);\n auto bvecId = getVectorTypeId({ DxbcScalarType::Bool, result.type.ccount });\n\n for (uint32_t i = 0; i < 4; i++) {\n auto shift = m_module.constu32(8 * i);\n auto count = m_module.constu32(8);\n\n auto ref = m_module.opBitFieldUExtract(typeId, refReg.id, shift, count);\n auto src = m_module.opBitFieldUExtract(typeId, srcReg.id, shift, count);\n\n auto zero = emitBuildConstVecu32(0, 0, 0, 0, ins.dst[0].mask);\n auto mask = m_module.opINotEqual(bvecId, ref, zero.id);\n\n auto diff = m_module.opSAbs(typeId, m_module.opISub(typeId, ref, src));\n result.id = m_module.opSelect(typeId, mask, m_module.opIAdd(typeId, result.id, diff), result.id);\n }\n\n result = emitDstOperandModifiers(result, ins.modifiers);\n emitRegisterStore(ins.dst[0], result);\n }\n void emitVectorShift(\n const DxbcShaderInstruction& ins) {\n // Shift operations have three operands:\n // (dst0) The destination register\n // (src0) The register to shift\n // (src1) The shift amount (scalar)\n DxbcRegisterValue shiftReg = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n DxbcRegisterValue countReg = emitRegisterLoad(ins.src[1], ins.dst[0].mask);\n\n if (ins.src[1].type != DxbcOperandType::Imm32)\n countReg = emitRegisterMaskBits(countReg, 0x1F);\n \n if (countReg.type.ccount == 1)\n countReg = emitRegisterExtend(countReg, shiftReg.type.ccount);\n \n DxbcRegisterValue result;\n result.type.ctype = ins.dst[0].dataType;\n result.type.ccount = ins.dst[0].mask.popCount();\n \n switch (ins.op) {\n case DxbcOpcode::IShl:\n result.id = m_module.opShiftLeftLogical(\n getVectorTypeId(result.type),\n shiftReg.id, countReg.id);\n break;\n \n case DxbcOpcode::IShr:\n result.id = m_module.opShiftRightArithmetic(\n getVectorTypeId(result.type),\n shiftReg.id, countReg.id);\n break;\n \n case DxbcOpcode::UShr:\n result.id = m_module.opShiftRightLogical(\n getVectorTypeId(result.type),\n shiftReg.id, countReg.id);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n result = emitDstOperandModifiers(result, ins.modifiers);\n emitRegisterStore(ins.dst[0], result);\n }\n void emitVectorSinCos(\n const DxbcShaderInstruction& ins) {\n // sincos has three operands:\n // (dst0) Destination register for sin(x)\n // (dst1) Destination register for cos(x)\n // (src0) Source operand x\n \n // Load source operand as 32-bit float vector.\n const DxbcRegisterValue srcValue = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, true, true, true));\n\n uint32_t typeId = getScalarTypeId(srcValue.type.ctype);\n\n DxbcRegisterValue sinVector = { };\n sinVector.type.ctype = DxbcScalarType::Float32;\n\n DxbcRegisterValue cosVector = { };\n cosVector.type.ctype = DxbcScalarType::Float32;\n\n // Only compute sincos for enabled components\n std::array sinIds = { };\n std::array cosIds = { };\n\n for (uint32_t i = 0; i < 4; i++) {\n const uint32_t sinIndex = 0u;\n const uint32_t cosIndex = 1u;\n\n if (ins.dst[0].mask[i] || ins.dst[1].mask[i]) {\n uint32_t sincosId = m_module.opSinCos(m_module.opCompositeExtract(typeId, srcValue.id, 1u, &i), !m_moduleInfo.options.sincosEmulation);\n\n if (ins.dst[0].type != DxbcOperandType::Null && ins.dst[0].mask[i])\n sinIds[sinVector.type.ccount++] = m_module.opCompositeExtract(typeId, sincosId, 1u, &sinIndex);\n\n if (ins.dst[1].type != DxbcOperandType::Null && ins.dst[1].mask[i])\n cosIds[cosVector.type.ccount++] = m_module.opCompositeExtract(typeId, sincosId, 1u, &cosIndex);\n }\n }\n\n if (sinVector.type.ccount) {\n sinVector.id = sinVector.type.ccount > 1u\n ? m_module.opCompositeConstruct(getVectorTypeId(sinVector.type), sinVector.type.ccount, sinIds.data())\n : sinIds[0];\n\n emitRegisterStore(ins.dst[0], sinVector);\n }\n\n if (cosVector.type.ccount) {\n cosVector.id = cosVector.type.ccount > 1u\n ? m_module.opCompositeConstruct(getVectorTypeId(cosVector.type), cosVector.type.ccount, cosIds.data())\n : cosIds[0];\n\n emitRegisterStore(ins.dst[1], cosVector);\n }\n }\n void emitGeometryEmit(\n const DxbcShaderInstruction& ins) {\n // In xfb mode we might have multiple streams, so\n // we have to figure out which stream to write to\n uint32_t streamId = 0;\n uint32_t streamVar = 0;\n\n if (m_moduleInfo.xfb != nullptr) {\n streamId = ins.dstCount > 0 ? ins.dst[0].idx[0].offset : 0;\n streamVar = m_module.constu32(streamId);\n }\n\n // Checking the negation is easier for EmitThenCut/EmitThenCutStream\n bool doEmit = ins.op != DxbcOpcode::Cut && ins.op != DxbcOpcode::CutStream;\n bool doCut = ins.op != DxbcOpcode::Emit && ins.op != DxbcOpcode::EmitStream;\n\n if (doEmit) {\n if (m_gs.needsOutputSetup)\n emitOutputSetup();\n emitClipCullStore(DxbcSystemValue::ClipDistance, m_clipDistances);\n emitClipCullStore(DxbcSystemValue::CullDistance, m_cullDistances);\n emitXfbOutputSetup(streamId, false);\n m_module.opEmitVertex(streamVar);\n }\n\n if (doCut)\n m_module.opEndPrimitive(streamVar);\n }\n void emitAtomic(\n const DxbcShaderInstruction& ins) {\n // atomic_* operations have the following operands:\n // (dst0) Destination u# or g# register\n // (src0) Index into the texture or buffer\n // (src1) The source value for the operation\n // (src2) Second source operand (optional)\n // imm_atomic_* operations have the following operands:\n // (dst0) Register that receives the result\n // (dst1) Destination u# or g# register\n // (srcX) As above\n const DxbcBufferInfo bufferInfo = getBufferInfo(ins.dst[ins.dstCount - 1]);\n \n bool isImm = ins.dstCount == 2;\n bool isUav = ins.dst[ins.dstCount - 1].type == DxbcOperandType::UnorderedAccessView;\n bool isSsbo = bufferInfo.isSsbo;\n\n // Retrieve destination pointer for the atomic operation>\n const DxbcRegisterPointer pointer = emitGetAtomicPointer(\n ins.dst[ins.dstCount - 1], ins.src[0]);\n \n // Load source values\n std::array src;\n \n for (uint32_t i = 1; i < ins.srcCount; i++) {\n src[i - 1] = emitRegisterBitcast(\n emitRegisterLoad(ins.src[i], DxbcRegMask(true, false, false, false)),\n pointer.type.ctype);\n }\n \n // Define memory scope and semantics based on the operands\n uint32_t scope = 0;\n uint32_t semantics = 0;\n \n if (isUav) {\n scope = spv::ScopeQueueFamily;\n semantics = spv::MemorySemanticsAcquireReleaseMask;\n\n semantics |= isSsbo\n ? spv::MemorySemanticsUniformMemoryMask\n : spv::MemorySemanticsImageMemoryMask;\n } else {\n scope = spv::ScopeWorkgroup;\n semantics = spv::MemorySemanticsWorkgroupMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask;\n }\n\n const uint32_t scopeId = m_module.constu32(scope);\n const uint32_t semanticsId = m_module.constu32(semantics);\n \n // Perform the atomic operation on the given pointer\n DxbcRegisterValue value;\n value.type = pointer.type;\n value.id = 0;\n \n // The result type, which is a scalar integer\n const uint32_t typeId = getVectorTypeId(value.type);\n \n switch (ins.op) {\n case DxbcOpcode::AtomicCmpStore:\n case DxbcOpcode::ImmAtomicCmpExch:\n value.id = m_module.opAtomicCompareExchange(\n typeId, pointer.id, scopeId, semanticsId,\n m_module.constu32(spv::MemorySemanticsMaskNone),\n src[1].id, src[0].id);\n break;\n \n case DxbcOpcode::ImmAtomicExch:\n value.id = m_module.opAtomicExchange(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicIAdd:\n case DxbcOpcode::ImmAtomicIAdd:\n value.id = m_module.opAtomicIAdd(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicAnd:\n case DxbcOpcode::ImmAtomicAnd:\n value.id = m_module.opAtomicAnd(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicOr:\n case DxbcOpcode::ImmAtomicOr:\n value.id = m_module.opAtomicOr(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicXor:\n case DxbcOpcode::ImmAtomicXor:\n value.id = m_module.opAtomicXor(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicIMin:\n case DxbcOpcode::ImmAtomicIMin:\n value.id = m_module.opAtomicSMin(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicIMax:\n case DxbcOpcode::ImmAtomicIMax:\n value.id = m_module.opAtomicSMax(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicUMin:\n case DxbcOpcode::ImmAtomicUMin:\n value.id = m_module.opAtomicUMin(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicUMax:\n case DxbcOpcode::ImmAtomicUMax:\n value.id = m_module.opAtomicUMax(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n // Write back the result to the destination\n // register if this is an imm_atomic_* opcode.\n if (isImm)\n emitRegisterStore(ins.dst[0], value);\n }\n void emitAtomicCounter(\n const DxbcShaderInstruction& ins) {\n // imm_atomic_alloc and imm_atomic_consume have the following operands:\n // (dst0) The register that will hold the old counter value\n // (dst1) The UAV whose counter is going to be modified\n const uint32_t registerId = ins.dst[1].idx[0].offset;\n \n if (m_uavs.at(registerId).ctrId == 0)\n m_uavs.at(registerId).ctrId = emitDclUavCounter(registerId);\n \n // Get a pointer to the atomic counter in question\n DxbcRegisterInfo ptrType;\n ptrType.type.ctype = DxbcScalarType::Uint32;\n ptrType.type.ccount = 1;\n ptrType.type.alength = 0;\n ptrType.sclass = spv::StorageClassStorageBuffer;\n \n uint32_t zeroId = m_module.consti32(0);\n uint32_t ptrId = m_module.opAccessChain(\n getPointerTypeId(ptrType),\n m_uavs.at(registerId).ctrId,\n 1, &zeroId);\n \n // Define memory scope and semantics based on the operands\n uint32_t scope = spv::ScopeQueueFamily;\n uint32_t semantics = spv::MemorySemanticsUniformMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask;\n \n uint32_t scopeId = m_module.constu32(scope);\n uint32_t semanticsId = m_module.constu32(semantics);\n \n // Compute the result value\n DxbcRegisterValue value;\n value.type.ctype = DxbcScalarType::Uint32;\n value.type.ccount = 1;\n \n uint32_t typeId = getVectorTypeId(value.type);\n \n switch (ins.op) {\n case DxbcOpcode::ImmAtomicAlloc:\n value.id = m_module.opAtomicIAdd(typeId, ptrId,\n scopeId, semanticsId, m_module.constu32(1));\n break;\n \n case DxbcOpcode::ImmAtomicConsume:\n value.id = m_module.opAtomicISub(typeId, ptrId,\n scopeId, semanticsId, m_module.constu32(1));\n value.id = m_module.opISub(typeId, value.id,\n m_module.constu32(1));\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n\n // Store the result\n emitRegisterStore(ins.dst[0], value);\n }\n void emitBarrier(\n const DxbcShaderInstruction& ins) {\n // sync takes no operands. Instead, the synchronization\n // scope is defined by the operand control bits.\n const DxbcSyncFlags flags = ins.controls.syncFlags();\n \n uint32_t executionScope = spv::ScopeInvocation;\n uint32_t memoryScope = spv::ScopeInvocation;\n uint32_t memorySemantics = 0;\n \n if (flags.test(DxbcSyncFlag::ThreadsInGroup))\n executionScope = spv::ScopeWorkgroup;\n \n if (flags.test(DxbcSyncFlag::ThreadGroupSharedMemory)) {\n memoryScope = spv::ScopeWorkgroup;\n memorySemantics |= spv::MemorySemanticsWorkgroupMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask\n | spv::MemorySemanticsMakeAvailableMask\n | spv::MemorySemanticsMakeVisibleMask;\n }\n \n if (flags.test(DxbcSyncFlag::UavMemoryGroup)) {\n memoryScope = spv::ScopeWorkgroup;\n memorySemantics |= spv::MemorySemanticsImageMemoryMask\n | spv::MemorySemanticsUniformMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask\n | spv::MemorySemanticsMakeAvailableMask\n | spv::MemorySemanticsMakeVisibleMask;\n }\n \n if (flags.test(DxbcSyncFlag::UavMemoryGlobal)) {\n memoryScope = spv::ScopeQueueFamily;\n\n if (m_programInfo.type() == DxbcProgramType::ComputeShader && !m_hasGloballyCoherentUav)\n memoryScope = spv::ScopeWorkgroup;\n\n memorySemantics |= spv::MemorySemanticsImageMemoryMask\n | spv::MemorySemanticsUniformMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask\n | spv::MemorySemanticsMakeAvailableMask\n | spv::MemorySemanticsMakeVisibleMask;\n }\n \n if (executionScope != spv::ScopeInvocation) {\n m_module.opControlBarrier(\n m_module.constu32(executionScope),\n m_module.constu32(memoryScope),\n m_module.constu32(memorySemantics));\n } else if (memoryScope != spv::ScopeInvocation) {\n m_module.opMemoryBarrier(\n m_module.constu32(memoryScope),\n m_module.constu32(memorySemantics));\n } else {\n Logger::warn(\"DxbcCompiler: sync instruction has no effect\");\n }\n }\n void emitBitExtract(\n const DxbcShaderInstruction& ins) {\n // ibfe and ubfe take the following arguments:\n // (dst0) The destination register\n // (src0) Number of bits to extact\n // (src1) Offset of the bits to extract\n // (src2) Register to extract bits from\n const bool isSigned = ins.op == DxbcOpcode::IBfe;\n \n DxbcRegisterValue bitCnt = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n DxbcRegisterValue bitOfs = emitRegisterLoad(ins.src[1], ins.dst[0].mask);\n\n if (ins.src[0].type != DxbcOperandType::Imm32)\n bitCnt = emitRegisterMaskBits(bitCnt, 0x1F);\n \n if (ins.src[1].type != DxbcOperandType::Imm32)\n bitOfs = emitRegisterMaskBits(bitOfs, 0x1F);\n \n const DxbcRegisterValue src = emitRegisterLoad(ins.src[2], ins.dst[0].mask);\n \n const uint32_t componentCount = src.type.ccount;\n std::array componentIds = {{ 0, 0, 0, 0 }};\n \n for (uint32_t i = 0; i < componentCount; i++) {\n const DxbcRegisterValue currBitCnt = emitRegisterExtract(bitCnt, DxbcRegMask::select(i));\n const DxbcRegisterValue currBitOfs = emitRegisterExtract(bitOfs, DxbcRegMask::select(i));\n const DxbcRegisterValue currSrc = emitRegisterExtract(src, DxbcRegMask::select(i));\n \n const uint32_t typeId = getVectorTypeId(currSrc.type);\n \n componentIds[i] = isSigned\n ? m_module.opBitFieldSExtract(typeId, currSrc.id, currBitOfs.id, currBitCnt.id)\n : m_module.opBitFieldUExtract(typeId, currSrc.id, currBitOfs.id, currBitCnt.id);\n }\n \n DxbcRegisterValue result;\n result.type = src.type;\n result.id = componentCount > 1\n ? m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n componentCount, componentIds.data())\n : componentIds[0];\n emitRegisterStore(ins.dst[0], result);\n }\n void emitBitInsert(\n const DxbcShaderInstruction& ins) {\n // ibfe and ubfe take the following arguments:\n // (dst0) The destination register\n // (src0) Number of bits to extact\n // (src1) Offset of the bits to extract\n // (src2) Register to take bits from\n // (src3) Register to replace bits in\n DxbcRegisterValue bitCnt = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n DxbcRegisterValue bitOfs = emitRegisterLoad(ins.src[1], ins.dst[0].mask);\n \n if (ins.src[0].type != DxbcOperandType::Imm32)\n bitCnt = emitRegisterMaskBits(bitCnt, 0x1F);\n \n if (ins.src[1].type != DxbcOperandType::Imm32)\n bitOfs = emitRegisterMaskBits(bitOfs, 0x1F);\n \n const DxbcRegisterValue insert = emitRegisterLoad(ins.src[2], ins.dst[0].mask);\n const DxbcRegisterValue base = emitRegisterLoad(ins.src[3], ins.dst[0].mask);\n \n const uint32_t componentCount = base.type.ccount;\n std::array componentIds = {{ 0, 0, 0, 0 }};\n \n for (uint32_t i = 0; i < componentCount; i++) {\n const DxbcRegisterValue currBitCnt = emitRegisterExtract(bitCnt, DxbcRegMask::select(i));\n const DxbcRegisterValue currBitOfs = emitRegisterExtract(bitOfs, DxbcRegMask::select(i));\n const DxbcRegisterValue currInsert = emitRegisterExtract(insert, DxbcRegMask::select(i));\n const DxbcRegisterValue currBase = emitRegisterExtract(base, DxbcRegMask::select(i));\n \n componentIds[i] = m_module.opBitFieldInsert(\n getVectorTypeId(currBase.type),\n currBase.id, currInsert.id,\n currBitOfs.id, currBitCnt.id);\n }\n \n DxbcRegisterValue result;\n result.type = base.type;\n result.id = componentCount > 1\n ? m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n componentCount, componentIds.data())\n : componentIds[0];\n emitRegisterStore(ins.dst[0], result);\n }\n void emitBitScan(\n const DxbcShaderInstruction& ins) {\n // firstbit(lo|hi|shi) have two operands:\n // (dst0) The destination operant\n // (src0) Source operand to scan\n DxbcRegisterValue src = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n \n DxbcRegisterValue dst;\n dst.type.ctype = ins.dst[0].dataType;\n dst.type.ccount = ins.dst[0].mask.popCount();\n \n // Result type, should be an unsigned integer\n const uint32_t typeId = getVectorTypeId(dst.type);\n \n switch (ins.op) {\n case DxbcOpcode::FirstBitLo: dst.id = m_module.opFindILsb(typeId, src.id); break;\n case DxbcOpcode::FirstBitHi: dst.id = m_module.opFindUMsb(typeId, src.id); break;\n case DxbcOpcode::FirstBitShi: dst.id = m_module.opFindSMsb(typeId, src.id); break;\n default: Logger::warn(str::format(\"DxbcCompiler: Unhandled instruction: \", ins.op)); return;\n }\n \n // The 'Hi' variants are counted from the MSB in DXBC\n // rather than the LSB, so we have to invert the number\n if (ins.op == DxbcOpcode::FirstBitHi || ins.op == DxbcOpcode::FirstBitShi) {\n uint32_t boolTypeId = m_module.defBoolType();\n\n if (dst.type.ccount > 1)\n boolTypeId = m_module.defVectorType(boolTypeId, dst.type.ccount);\n\n DxbcRegisterValue const31 = emitBuildConstVecu32(31u, 31u, 31u, 31u, ins.dst[0].mask);\n DxbcRegisterValue constff = emitBuildConstVecu32(~0u, ~0u, ~0u, ~0u, ins.dst[0].mask);\n\n dst.id = m_module.opSelect(typeId,\n m_module.opINotEqual(boolTypeId, dst.id, constff.id),\n m_module.opISub(typeId, const31.id, dst.id),\n constff.id);\n }\n \n // No modifiers are supported\n emitRegisterStore(ins.dst[0], dst);\n }\n void emitBufferQuery(\n const DxbcShaderInstruction& ins) {\n // bufinfo takes two arguments\n // (dst0) The destination register\n // (src0) The buffer register to query\n const DxbcBufferInfo bufferInfo = getBufferInfo(ins.src[0]);\n bool isSsbo = bufferInfo.isSsbo;\n \n // We'll store this as a scalar unsigned integer\n DxbcRegisterValue result = isSsbo\n ? emitQueryBufferSize(ins.src[0])\n : emitQueryTexelBufferSize(ins.src[0]);\n \n uint32_t typeId = getVectorTypeId(result.type);\n \n // Adjust returned size if this is a raw or structured\n // buffer, as emitQueryTexelBufferSize only returns the\n // number of typed elements in the buffer.\n if (bufferInfo.type == DxbcResourceType::Raw) {\n result.id = m_module.opIMul(typeId,\n result.id, m_module.constu32(4));\n } else if (bufferInfo.type == DxbcResourceType::Structured) {\n result.id = m_module.opUDiv(typeId, result.id,\n m_module.constu32(bufferInfo.stride / 4));\n }\n\n // Store the result. The scalar will be extended to a\n // vector if the write mask consists of more than one\n // component, which is the desired behaviour.\n emitRegisterStore(ins.dst[0], result);\n }\n void emitBufferLoad(\n const DxbcShaderInstruction& ins) {\n // ld_raw takes three arguments:\n // (dst0) Destination register\n // (src0) Byte offset\n // (src1) Source register\n // ld_structured takes four arguments:\n // (dst0) Destination register\n // (src0) Structure index\n // (src1) Byte offset\n // (src2) Source register\n const bool isStructured = ins.op == DxbcOpcode::LdStructured\n || ins.op == DxbcOpcode::LdStructuredS;\n \n // Source register. The exact way we access\n // the data depends on the register type.\n const DxbcRegister& dstReg = ins.dst[0];\n const DxbcRegister& srcReg = isStructured ? ins.src[2] : ins.src[1];\n\n if (dstReg.type == DxbcOperandType::UnorderedAccessView)\n emitUavBarrier(uint64_t(1u) << srcReg.idx[0].offset, 0u);\n\n // Retrieve common info about the buffer\n const DxbcBufferInfo bufferInfo = getBufferInfo(srcReg);\n\n // Shared memory is the only type of buffer that\n // is not accessed through a texel buffer view\n bool isTgsm = srcReg.type == DxbcOperandType::ThreadGroupSharedMemory;\n bool isSsbo = bufferInfo.isSsbo;\n\n // Common types and IDs used while loading the data\n uint32_t bufferId = isTgsm || isSsbo ? 0 : m_module.opLoad(bufferInfo.typeId, bufferInfo.varId);\n\n uint32_t vectorTypeId = getVectorTypeId({ DxbcScalarType::Uint32, 4 });\n uint32_t scalarTypeId = getVectorTypeId({ DxbcScalarType::Uint32, 1 });\n\n // Since all data is represented as a sequence of 32-bit\n // integers, we have to load each component individually.\n std::array ccomps = { 0, 0, 0, 0 };\n std::array scomps = { 0, 0, 0, 0 };\n uint32_t scount = 0;\n\n // The sparse feedback ID will be non-zero for sparse\n // instructions on input. We need to reset it to 0.\n SpirvMemoryOperands memoryOperands;\n SpirvImageOperands imageOperands;\n imageOperands.sparse = ins.dstCount == 2;\n\n uint32_t coherence = bufferInfo.coherence;\n\n if (isTgsm && m_moduleInfo.options.forceVolatileTgsmAccess) {\n memoryOperands.flags |= spv::MemoryAccessVolatileMask;\n coherence = spv::ScopeWorkgroup;\n }\n\n if (coherence) {\n memoryOperands.flags |= spv::MemoryAccessNonPrivatePointerMask;\n\n if (coherence != spv::ScopeInvocation) {\n memoryOperands.flags |= spv::MemoryAccessMakePointerVisibleMask;\n memoryOperands.makeVisible = m_module.constu32(coherence);\n\n imageOperands.flags = spv::ImageOperandsNonPrivateTexelMask\n | spv::ImageOperandsMakeTexelVisibleMask;\n imageOperands.makeVisible = m_module.constu32(coherence);\n }\n }\n\n uint32_t sparseFeedbackId = 0;\n\n bool useRawAccessChains = m_hasRawAccessChains && isSsbo && !imageOperands.sparse;\n\n DxbcRegisterValue index = emitRegisterLoad(ins.src[0], DxbcRegMask(true, false, false, false));\n DxbcRegisterValue offset = index;\n\n if (isStructured)\n offset = emitRegisterLoad(ins.src[1], DxbcRegMask(true, false, false, false));\n\n DxbcRegisterValue elementIndex = { };\n\n uint32_t baseAlignment = sizeof(uint32_t);\n\n if (useRawAccessChains) {\n memoryOperands.flags |= spv::MemoryAccessAlignedMask;\n\n if (isStructured && ins.src[1].type == DxbcOperandType::Imm32) {\n baseAlignment = bufferInfo.stride | ins.src[1].imm.u32_1;\n baseAlignment = baseAlignment & -baseAlignment;\n baseAlignment = std::min(baseAlignment, uint32_t(m_moduleInfo.options.minSsboAlignment));\n }\n } else {\n elementIndex = isStructured\n ? emitCalcBufferIndexStructured(index, offset, bufferInfo.stride)\n : emitCalcBufferIndexRaw(offset);\n }\n\n uint32_t readMask = 0u;\n\n for (uint32_t i = 0; i < 4; i++) {\n if (dstReg.mask[i])\n readMask |= 1u << srcReg.swizzle[i];\n }\n\n while (readMask) {\n uint32_t sindex = bit::tzcnt(readMask);\n uint32_t scount = bit::tzcnt(~(readMask >> sindex));\n uint32_t zero = 0;\n\n if (useRawAccessChains) {\n uint32_t alignment = baseAlignment;\n uint32_t offsetId = offset.id;\n\n if (sindex) {\n offsetId = m_module.opIAdd(scalarTypeId,\n offsetId, m_module.constu32(sizeof(uint32_t) * sindex));\n alignment |= sizeof(uint32_t) * sindex;\n }\n\n DxbcRegisterInfo storeInfo;\n storeInfo.type.ctype = DxbcScalarType::Uint32;\n storeInfo.type.ccount = scount;\n storeInfo.type.alength = 0;\n storeInfo.sclass = spv::StorageClassStorageBuffer;\n\n uint32_t loadTypeId = getArrayTypeId(storeInfo.type);\n uint32_t ptrTypeId = getPointerTypeId(storeInfo);\n\n uint32_t accessChain = isStructured\n ? m_module.opRawAccessChain(ptrTypeId, bufferInfo.varId,\n m_module.constu32(bufferInfo.stride), index.id, offsetId,\n spv::RawAccessChainOperandsRobustnessPerElementNVMask)\n : m_module.opRawAccessChain(ptrTypeId, bufferInfo.varId,\n m_module.constu32(0), m_module.constu32(0), offsetId,\n spv::RawAccessChainOperandsRobustnessPerComponentNVMask);\n\n memoryOperands.alignment = alignment & -alignment;\n\n uint32_t vectorId = m_module.opLoad(loadTypeId, accessChain, memoryOperands);\n\n for (uint32_t i = 0; i < scount; i++) {\n ccomps[sindex + i] = vectorId;\n\n if (scount > 1) {\n ccomps[sindex + i] = m_module.opCompositeExtract(\n scalarTypeId, vectorId, 1, &i);\n }\n }\n\n readMask &= ~(((1u << scount) - 1u) << sindex);\n } else {\n uint32_t elementIndexAdjusted = m_module.opIAdd(\n getVectorTypeId(elementIndex.type), elementIndex.id,\n m_module.consti32(sindex));\n\n if (isTgsm) {\n ccomps[sindex] = m_module.opLoad(scalarTypeId,\n m_module.opAccessChain(bufferInfo.typeId,\n bufferInfo.varId, 1, &elementIndexAdjusted),\n memoryOperands);\n } else if (isSsbo) {\n uint32_t indices[2] = { m_module.constu32(0), elementIndexAdjusted };\n ccomps[sindex] = m_module.opLoad(scalarTypeId,\n m_module.opAccessChain(bufferInfo.typeId,\n bufferInfo.varId, 2, indices),\n memoryOperands);\n } else {\n uint32_t resultTypeId = vectorTypeId;\n uint32_t resultId = 0;\n\n if (imageOperands.sparse)\n resultTypeId = getSparseResultTypeId(vectorTypeId);\n\n if (srcReg.type == DxbcOperandType::Resource) {\n resultId = m_module.opImageFetch(resultTypeId,\n bufferId, elementIndexAdjusted, imageOperands);\n } else if (srcReg.type == DxbcOperandType::UnorderedAccessView) {\n resultId = m_module.opImageRead(resultTypeId,\n bufferId, elementIndexAdjusted, imageOperands);\n } else {\n throw DxvkError(\"DxbcCompiler: Invalid operand type for strucured/raw load\");\n }\n\n // Only read sparse feedback once. This may be somewhat inaccurate\n // for reads that straddle pages, but we can't easily emulate this.\n if (imageOperands.sparse) {\n imageOperands.sparse = false;\n sparseFeedbackId = resultId;\n\n resultId = emitExtractSparseTexel(vectorTypeId, resultId);\n }\n\n ccomps[sindex] = m_module.opCompositeExtract(scalarTypeId, resultId, 1, &zero);\n }\n\n readMask &= readMask - 1;\n }\n }\n\n for (uint32_t i = 0; i < 4; i++) {\n uint32_t sindex = srcReg.swizzle[i];\n\n if (dstReg.mask[i])\n scomps[scount++] = ccomps[sindex];\n }\n\n DxbcRegisterValue result = { };\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = scount;\n result.id = scomps[0];\n\n if (scount > 1) {\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n scount, scomps.data());\n }\n\n emitRegisterStore(dstReg, result);\n\n if (sparseFeedbackId)\n emitStoreSparseFeedback(ins.dst[1], sparseFeedbackId);\n }\n void emitBufferStore(\n const DxbcShaderInstruction& ins) {\n // store_raw takes three arguments:\n // (dst0) Destination register\n // (src0) Byte offset\n // (src1) Source register\n // store_structured takes four arguments:\n // (dst0) Destination register\n // (src0) Structure index\n // (src1) Byte offset\n // (src2) Source register\n const bool isStructured = ins.op == DxbcOpcode::StoreStructured;\n \n // Source register. The exact way we access\n // the data depends on the register type.\n const DxbcRegister& dstReg = ins.dst[0];\n const DxbcRegister& srcReg = isStructured ? ins.src[2] : ins.src[1];\n\n if (dstReg.type == DxbcOperandType::UnorderedAccessView)\n emitUavBarrier(0u, uint64_t(1u) << dstReg.idx[0].offset);\n\n DxbcRegisterValue value = emitRegisterLoad(srcReg, dstReg.mask);\n value = emitRegisterBitcast(value, DxbcScalarType::Uint32);\n\n // Retrieve common info about the buffer\n const DxbcBufferInfo bufferInfo = getBufferInfo(dstReg);\n\n // Thread Group Shared Memory is not accessed through a texel buffer view\n bool isTgsm = dstReg.type == DxbcOperandType::ThreadGroupSharedMemory;\n bool isSsbo = bufferInfo.isSsbo;\n\n uint32_t bufferId = isTgsm || isSsbo ? 0 : m_module.opLoad(bufferInfo.typeId, bufferInfo.varId);\n\n uint32_t scalarTypeId = getVectorTypeId({ DxbcScalarType::Uint32, 1 });\n uint32_t vectorTypeId = getVectorTypeId({ DxbcScalarType::Uint32, 4 });\n\n // Set memory operands according to resource properties\n SpirvMemoryOperands memoryOperands;\n SpirvImageOperands imageOperands;\n\n uint32_t coherence = bufferInfo.coherence;\n\n if (isTgsm && m_moduleInfo.options.forceVolatileTgsmAccess) {\n memoryOperands.flags |= spv::MemoryAccessVolatileMask;\n coherence = spv::ScopeWorkgroup;\n }\n\n if (coherence) {\n memoryOperands.flags |= spv::MemoryAccessNonPrivatePointerMask;\n\n if (coherence != spv::ScopeInvocation) {\n memoryOperands.flags |= spv::MemoryAccessMakePointerAvailableMask;\n memoryOperands.makeAvailable = m_module.constu32(coherence);\n\n imageOperands.flags = spv::ImageOperandsNonPrivateTexelMask\n | spv::ImageOperandsMakeTexelAvailableMask;\n imageOperands.makeAvailable = m_module.constu32(coherence);\n }\n }\n\n // Compute flat element index as necessary\n bool useRawAccessChains = isSsbo && m_hasRawAccessChains;\n\n DxbcRegisterValue index = emitRegisterLoad(ins.src[0], DxbcRegMask(true, false, false, false));\n DxbcRegisterValue offset = index;\n\n if (isStructured)\n offset = emitRegisterLoad(ins.src[1], DxbcRegMask(true, false, false, false));\n\n DxbcRegisterValue elementIndex = { };\n\n uint32_t baseAlignment = sizeof(uint32_t);\n\n if (useRawAccessChains) {\n memoryOperands.flags |= spv::MemoryAccessAlignedMask;\n\n if (isStructured && ins.src[1].type == DxbcOperandType::Imm32) {\n baseAlignment = bufferInfo.stride | ins.src[1].imm.u32_1;\n baseAlignment = baseAlignment & -baseAlignment;\n baseAlignment = std::min(baseAlignment, uint32_t(m_moduleInfo.options.minSsboAlignment));\n }\n } else {\n elementIndex = isStructured\n ? emitCalcBufferIndexStructured(index, offset, bufferInfo.stride)\n : emitCalcBufferIndexRaw(offset);\n }\n\n uint32_t writeMask = dstReg.mask.raw();\n\n while (writeMask) {\n uint32_t sindex = bit::tzcnt(writeMask);\n uint32_t scount = bit::tzcnt(~(writeMask >> sindex));\n\n if (useRawAccessChains) {\n uint32_t alignment = baseAlignment;\n uint32_t offsetId = offset.id;\n\n if (sindex) {\n offsetId = m_module.opIAdd(scalarTypeId,\n offsetId, m_module.constu32(sizeof(uint32_t) * sindex));\n alignment = alignment | (sizeof(uint32_t) * sindex);\n }\n\n DxbcRegisterInfo storeInfo;\n storeInfo.type.ctype = DxbcScalarType::Uint32;\n storeInfo.type.ccount = scount;\n storeInfo.type.alength = 0;\n storeInfo.sclass = spv::StorageClassStorageBuffer;\n\n uint32_t storeTypeId = getArrayTypeId(storeInfo.type);\n uint32_t ptrTypeId = getPointerTypeId(storeInfo);\n\n uint32_t accessChain = isStructured\n ? m_module.opRawAccessChain(ptrTypeId, bufferInfo.varId,\n m_module.constu32(bufferInfo.stride), index.id, offsetId,\n spv::RawAccessChainOperandsRobustnessPerElementNVMask)\n : m_module.opRawAccessChain(ptrTypeId, bufferInfo.varId,\n m_module.constu32(0), m_module.constu32(0), offsetId,\n spv::RawAccessChainOperandsRobustnessPerComponentNVMask);\n\n uint32_t valueId = value.id;\n\n if (scount < value.type.ccount) {\n if (scount == 1) {\n valueId = m_module.opCompositeExtract(storeTypeId, value.id, 1, &sindex);\n } else {\n std::array indices = { sindex, sindex + 1u, sindex + 2u, sindex + 3u };\n valueId = m_module.opVectorShuffle(storeTypeId, value.id, value.id, scount, indices.data());\n }\n }\n\n memoryOperands.alignment = alignment & -alignment;\n m_module.opStore(accessChain, valueId, memoryOperands);\n\n writeMask &= ~(((1u << scount) - 1u) << sindex);\n } else {\n uint32_t srcComponentId = value.type.ccount > 1\n ? m_module.opCompositeExtract(scalarTypeId,\n value.id, 1, &sindex)\n : value.id;\n\n uint32_t elementIndexAdjusted = sindex != 0\n ? m_module.opIAdd(getVectorTypeId(elementIndex.type),\n elementIndex.id, m_module.consti32(sindex))\n : elementIndex.id;\n\n if (isTgsm) {\n m_module.opStore(\n m_module.opAccessChain(bufferInfo.typeId,\n bufferInfo.varId, 1, &elementIndexAdjusted),\n srcComponentId, memoryOperands);\n } else if (isSsbo) {\n uint32_t indices[2] = { m_module.constu32(0), elementIndexAdjusted };\n m_module.opStore(\n m_module.opAccessChain(bufferInfo.typeId,\n bufferInfo.varId, 2, indices),\n srcComponentId, memoryOperands);\n } else if (dstReg.type == DxbcOperandType::UnorderedAccessView) {\n const std::array srcVectorIds = {\n srcComponentId, srcComponentId,\n srcComponentId, srcComponentId,\n };\n\n m_module.opImageWrite(\n bufferId, elementIndexAdjusted,\n m_module.opCompositeConstruct(vectorTypeId,\n 4, srcVectorIds.data()),\n imageOperands);\n } else {\n throw DxvkError(\"DxbcCompiler: Invalid operand type for strucured/raw store\");\n }\n\n writeMask &= writeMask - 1u;\n }\n }\n }\n void emitConvertFloat16(\n const DxbcShaderInstruction& ins) {\n // f32tof16 takes two operands:\n // (dst0) Destination register as a uint32 vector\n // (src0) Source register as a float32 vector\n // f16tof32 takes two operands:\n // (dst0) Destination register as a float32 vector\n // (src0) Source register as a uint32 vector\n const DxbcRegisterValue src = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n \n // We handle both packing and unpacking here\n const bool isPack = ins.op == DxbcOpcode::F32toF16;\n \n // The conversion instructions do not map very well to the\n // SPIR-V pack instructions, which operate on 2D vectors.\n std::array scalarIds = {{ 0, 0, 0, 0 }};\n \n const uint32_t componentCount = src.type.ccount;\n \n // These types are used in both pack and unpack operations\n const uint32_t t_u32 = getVectorTypeId({ DxbcScalarType::Uint32, 1 });\n const uint32_t t_f32 = getVectorTypeId({ DxbcScalarType::Float32, 1 });\n const uint32_t t_f32v2 = getVectorTypeId({ DxbcScalarType::Float32, 2 });\n \n // Constant zero-bit pattern, used for packing\n const uint32_t zerof32 = isPack ? m_module.constf32(0.0f) : 0;\n \n for (uint32_t i = 0; i < componentCount; i++) {\n const DxbcRegisterValue componentValue\n = emitRegisterExtract(src, DxbcRegMask::select(i));\n \n if (isPack) { // f32tof16\n const std::array packIds =\n {{ componentValue.id, zerof32 }};\n \n scalarIds[i] = m_module.opPackHalf2x16(t_u32,\n m_module.opCompositeConstruct(t_f32v2, packIds.size(), packIds.data()));\n } else { // f16tof32\n const uint32_t zeroIndex = 0;\n \n scalarIds[i] = m_module.opCompositeExtract(t_f32,\n m_module.opUnpackHalf2x16(t_f32v2, componentValue.id),\n 1, &zeroIndex);\n }\n }\n\n DxbcRegisterValue result;\n result.type.ctype = ins.dst[0].dataType;\n result.type.ccount = componentCount;\n\n uint32_t typeId = getVectorTypeId(result.type);\n result.id = componentCount > 1\n ? m_module.opCompositeConstruct(typeId,\n componentCount, scalarIds.data())\n : scalarIds[0];\n\n if (isPack) {\n // Some drivers return infinity if the input value is above a certain\n // threshold, but D3D wants us to return infinity only if the input is\n // actually infinite. Fix this up to return the maximum representable\n // 16-bit floating point number instead, but preserve input infinity.\n uint32_t t_bvec = getVectorTypeId({ DxbcScalarType::Bool, componentCount });\n uint32_t f16Infinity = m_module.constuReplicant(0x7C00, componentCount);\n uint32_t f16Unsigned = m_module.constuReplicant(0x7FFF, componentCount);\n\n uint32_t isInputInf = m_module.opIsInf(t_bvec, src.id);\n uint32_t isValueInf = m_module.opIEqual(t_bvec, f16Infinity,\n m_module.opBitwiseAnd(typeId, result.id, f16Unsigned));\n\n result.id = m_module.opSelect(getVectorTypeId(result.type),\n m_module.opLogicalAnd(t_bvec, isValueInf, m_module.opLogicalNot(t_bvec, isInputInf)),\n m_module.opISub(typeId, result.id, m_module.constuReplicant(1, componentCount)),\n result.id);\n }\n\n // Store result in the destination register\n emitRegisterStore(ins.dst[0], result);\n }\n void emitConvertFloat64(\n const DxbcShaderInstruction& ins) {\n // ftod and dtof take the following operands:\n // (dst0) Destination operand\n // (src0) Number to convert\n uint32_t dstBits = ins.dst[0].mask.popCount();\n\n DxbcRegMask srcMask = isDoubleType(ins.dst[0].dataType)\n ? DxbcRegMask(dstBits >= 2, dstBits >= 4, false, false)\n : DxbcRegMask(dstBits >= 1, dstBits >= 1, dstBits >= 2, dstBits >= 2);\n\n // Perform actual conversion, destination modifiers are not applied\n DxbcRegisterValue val = emitRegisterLoad(ins.src[0], srcMask);\n\n DxbcRegisterValue result;\n result.type.ctype = ins.dst[0].dataType;\n result.type.ccount = val.type.ccount;\n\n switch (ins.op) {\n case DxbcOpcode::DtoF:\n case DxbcOpcode::FtoD:\n result.id = m_module.opFConvert(\n getVectorTypeId(result.type), val.id);\n break;\n\n case DxbcOpcode::DtoI:\n result.id = m_module.opConvertFtoS(\n getVectorTypeId(result.type), val.id);\n break;\n\n case DxbcOpcode::DtoU:\n result.id = m_module.opConvertFtoU(\n getVectorTypeId(result.type), val.id);\n break;\n\n case DxbcOpcode::ItoD:\n result.id = m_module.opConvertStoF(\n getVectorTypeId(result.type), val.id);\n break;\n \n case DxbcOpcode::UtoD:\n result.id = m_module.opConvertUtoF(\n getVectorTypeId(result.type), val.id);\n break;\n \n default:\n Logger::warn(str::format(\"DxbcCompiler: Unhandled instruction: \", ins.op));\n return;\n }\n \n emitRegisterStore(ins.dst[0], result);\n }\n void emitHullShaderPhase(\n const DxbcShaderInstruction& ins) {\n switch (ins.op) {\n case DxbcOpcode::HsDecls: {\n if (m_hs.currPhaseType != DxbcCompilerHsPhase::None)\n Logger::err(\"DXBC: HsDecls not the first phase in hull shader\");\n \n m_hs.currPhaseType = DxbcCompilerHsPhase::Decl;\n } break;\n \n case DxbcOpcode::HsControlPointPhase: {\n m_hs.cpPhase = this->emitNewHullShaderControlPointPhase();\n \n m_hs.currPhaseType = DxbcCompilerHsPhase::ControlPoint;\n m_hs.currPhaseId = 0;\n \n m_module.setDebugName(m_hs.cpPhase.functionId, \"hs_control_point\");\n } break;\n \n case DxbcOpcode::HsForkPhase: {\n auto phase = this->emitNewHullShaderForkJoinPhase();\n m_hs.forkPhases.push_back(phase);\n \n m_hs.currPhaseType = DxbcCompilerHsPhase::Fork;\n m_hs.currPhaseId = m_hs.forkPhases.size() - 1;\n \n m_module.setDebugName(phase.functionId,\n str::format(\"hs_fork_\", m_hs.currPhaseId).c_str());\n } break;\n \n case DxbcOpcode::HsJoinPhase: {\n auto phase = this->emitNewHullShaderForkJoinPhase();\n m_hs.joinPhases.push_back(phase);\n \n m_hs.currPhaseType = DxbcCompilerHsPhase::Join;\n m_hs.currPhaseId = m_hs.joinPhases.size() - 1;\n \n m_module.setDebugName(phase.functionId,\n str::format(\"hs_join_\", m_hs.currPhaseId).c_str());\n } break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n }\n }\n void emitHullShaderInstCnt(\n const DxbcShaderInstruction& ins) {\n this->getCurrentHsForkJoinPhase()->instanceCount = ins.imm[0].u32;\n }\n void emitInterpolate(\n const DxbcShaderInstruction& ins) {\n m_module.enableCapability(spv::CapabilityInterpolationFunction);\n\n // The SPIR-V instructions operate on input variable pointers,\n // which are all declared as four-component float vectors.\n uint32_t registerId = ins.src[0].idx[0].offset;\n \n DxbcRegisterValue result;\n result.type = getInputRegType(registerId);\n \n switch (ins.op) {\n case DxbcOpcode::EvalCentroid: {\n result.id = m_module.opInterpolateAtCentroid(\n getVectorTypeId(result.type),\n m_vRegs.at(registerId).id);\n } break;\n \n case DxbcOpcode::EvalSampleIndex: {\n const DxbcRegisterValue sampleIndex = emitRegisterLoad(\n ins.src[1], DxbcRegMask(true, false, false, false));\n \n result.id = m_module.opInterpolateAtSample(\n getVectorTypeId(result.type),\n m_vRegs.at(registerId).id,\n sampleIndex.id);\n } break;\n \n case DxbcOpcode::EvalSnapped: {\n // The offset is encoded as a 4-bit fixed point value\n DxbcRegisterValue offset = emitRegisterLoad(\n ins.src[1], DxbcRegMask(true, true, false, false));\n offset.id = m_module.opBitFieldSExtract(\n getVectorTypeId(offset.type), offset.id,\n m_module.consti32(0), m_module.consti32(4));\n\n offset.type.ctype = DxbcScalarType::Float32;\n offset.id = m_module.opConvertStoF(\n getVectorTypeId(offset.type), offset.id);\n\n offset.id = m_module.opFMul(\n getVectorTypeId(offset.type), offset.id,\n m_module.constvec2f32(1.0f / 16.0f, 1.0f / 16.0f));\n\n result.id = m_module.opInterpolateAtOffset(\n getVectorTypeId(result.type),\n m_vRegs.at(registerId).id,\n offset.id);\n } break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n result = emitRegisterSwizzle(result,\n ins.src[0].swizzle, ins.dst[0].mask);\n emitRegisterStore(ins.dst[0], result);\n }\n void emitSparseCheckAccess(\n const DxbcShaderInstruction& ins) {\n // check_access_mapped has two operands:\n // (dst0) The destination register\n // (src0) The residency code\n m_module.enableCapability(spv::CapabilitySparseResidency);\n\n DxbcRegisterValue srcValue = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n\n uint32_t boolId = m_module.opImageSparseTexelsResident(\n m_module.defBoolType(), srcValue.id);\n\n DxbcRegisterValue dstValue;\n dstValue.type = { DxbcScalarType::Uint32, 1 };\n dstValue.id = m_module.opSelect(getScalarTypeId(DxbcScalarType::Uint32),\n boolId, m_module.constu32(~0u), m_module.constu32(0));\n\n emitRegisterStore(ins.dst[0], dstValue);\n }\n void emitTextureQuery(\n const DxbcShaderInstruction& ins) {\n // resinfo has three operands:\n // (dst0) The destination register\n // (src0) Resource LOD to query\n // (src1) Resource to query\n const DxbcBufferInfo resourceInfo = getBufferInfo(ins.src[1]);\n const DxbcResinfoType resinfoType = ins.controls.resinfoType();\n \n // Read the exact LOD for the image query\n const DxbcRegisterValue mipLod = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n const DxbcScalarType returnType = resinfoType == DxbcResinfoType::Uint\n ? DxbcScalarType::Uint32 : DxbcScalarType::Float32;\n \n // Query the size of the selected mip level, as well as the\n // total number of mip levels. We will have to combine the\n // result into a four-component vector later.\n DxbcRegisterValue imageSize = emitQueryTextureSize(ins.src[1], mipLod);\n DxbcRegisterValue imageLevels = emitQueryTextureLods(ins.src[1]);\n\n // If the mip level is out of bounds, D3D requires us to return\n // zero before applying modifiers, whereas SPIR-V is undefined,\n // so we need to fix it up manually here.\n imageSize.id = m_module.opSelect(getVectorTypeId(imageSize.type),\n m_module.opULessThan(m_module.defBoolType(), mipLod.id, imageLevels.id),\n imageSize.id, emitBuildZeroVector(imageSize.type).id);\n\n // Convert intermediates to the requested type\n if (returnType == DxbcScalarType::Float32) {\n imageSize.type.ctype = DxbcScalarType::Float32;\n imageSize.id = m_module.opConvertUtoF(\n getVectorTypeId(imageSize.type),\n imageSize.id);\n \n imageLevels.type.ctype = DxbcScalarType::Float32;\n imageLevels.id = m_module.opConvertUtoF(\n getVectorTypeId(imageLevels.type),\n imageLevels.id);\n }\n \n // If the selected return type is rcpFloat, we need\n // to compute the reciprocal of the image dimensions,\n // but not the array size, so we need to separate it.\n const uint32_t imageCoordDim = imageSize.type.ccount;\n \n DxbcRegisterValue imageLayers;\n imageLayers.type = imageSize.type;\n imageLayers.id = 0;\n \n if (resinfoType == DxbcResinfoType::RcpFloat && resourceInfo.image.array) {\n imageLayers = emitRegisterExtract(imageSize, DxbcRegMask::select(imageCoordDim - 1));\n imageSize = emitRegisterExtract(imageSize, DxbcRegMask::firstN(imageCoordDim - 1));\n }\n \n if (resinfoType == DxbcResinfoType::RcpFloat) {\n imageSize.id = m_module.opFDiv(\n getVectorTypeId(imageSize.type),\n emitBuildConstVecf32(1.0f, 1.0f, 1.0f, 1.0f,\n DxbcRegMask::firstN(imageSize.type.ccount)).id,\n imageSize.id);\n }\n \n // Concatenate result vectors and scalars to form a\n // 4D vector. Unused components will be set to zero.\n std::array vectorIds = { imageSize.id, 0, 0, 0 };\n uint32_t numVectorIds = 1;\n \n if (imageLayers.id != 0)\n vectorIds[numVectorIds++] = imageLayers.id;\n \n if (imageCoordDim < 3) {\n const uint32_t zero = returnType == DxbcScalarType::Uint32\n ? m_module.constu32(0)\n : m_module.constf32(0.0f);\n \n for (uint32_t i = imageCoordDim; i < 3; i++)\n vectorIds[numVectorIds++] = zero;\n }\n \n vectorIds[numVectorIds++] = imageLevels.id;\n \n // Create the actual result vector\n DxbcRegisterValue result;\n result.type.ctype = returnType;\n result.type.ccount = 4;\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n numVectorIds, vectorIds.data());\n \n // Swizzle components using the resource swizzle\n // and the destination operand's write mask\n result = emitRegisterSwizzle(result,\n ins.src[1].swizzle, ins.dst[0].mask);\n emitRegisterStore(ins.dst[0], result);\n }\n void emitTextureQueryLod(\n const DxbcShaderInstruction& ins) {\n // All sample instructions have at least these operands:\n // (dst0) The destination register\n // (src0) Texture coordinates\n // (src1) The texture itself\n // (src2) The sampler object\n const DxbcRegister& texCoordReg = ins.src[0];\n const DxbcRegister& textureReg = ins.src[1];\n const DxbcRegister& samplerReg = ins.src[2];\n \n // Texture and sampler register IDs\n const auto& texture = m_textures.at(textureReg.idx[0].offset);\n const auto& sampler = m_samplers.at(samplerReg.idx[0].offset);\n \n // Load texture coordinates\n const DxbcRegisterValue coord = emitRegisterLoad(texCoordReg,\n DxbcRegMask::firstN(getTexLayerDim(texture.imageInfo)));\n \n // Query the LOD. The result is a two-dimensional float32\n // vector containing the mip level and virtual LOD numbers.\n const uint32_t sampledImageId = emitLoadSampledImage(texture, sampler, false);\n const uint32_t queriedLodId = m_module.opImageQueryLod(\n getVectorTypeId({ DxbcScalarType::Float32, 2 }),\n sampledImageId, coord.id);\n \n // Build the result array vector by filling up\n // the remaining two components with zeroes.\n const uint32_t zero = m_module.constf32(0.0f);\n const std::array resultIds\n = {{ queriedLodId, zero, zero }};\n \n DxbcRegisterValue result;\n result.type = DxbcVectorType { DxbcScalarType::Float32, 4 };\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n resultIds.size(), resultIds.data());\n \n result = emitRegisterSwizzle(result, ins.src[1].swizzle, ins.dst[0].mask);\n emitRegisterStore(ins.dst[0], result);\n }\n void emitTextureQueryMs(\n const DxbcShaderInstruction& ins) {\n // sampleinfo has two operands:\n // (dst0) The destination register\n // (src0) Resource to query\n DxbcRegisterValue sampleCount = emitQueryTextureSamples(ins.src[0]);\n \n if (ins.controls.returnType() != DxbcInstructionReturnType::Uint) {\n sampleCount.type = { DxbcScalarType::Float32, 1 };\n sampleCount.id = m_module.opConvertUtoF(\n getVectorTypeId(sampleCount.type),\n sampleCount.id);\n }\n \n emitRegisterStore(ins.dst[0], sampleCount);\n }\n void emitTextureQueryMsPos(\n const DxbcShaderInstruction& ins) {\n // samplepos has three operands:\n // (dst0) The destination register\n // (src0) Resource to query \n // (src1) Sample index\n if (m_samplePositions == 0)\n m_samplePositions = emitSamplePosArray();\n \n // The lookup index is qual to the sample count plus the\n // sample index, or 0 if the resource cannot be queried.\n DxbcRegisterValue sampleCount = emitQueryTextureSamples(ins.src[0]);\n DxbcRegisterValue sampleIndex = emitRegisterLoad(\n ins.src[1], DxbcRegMask(true, false, false, false));\n \n uint32_t lookupIndex = m_module.opIAdd(\n getVectorTypeId(sampleCount.type),\n sampleCount.id, sampleIndex.id);\n \n // Validate the parameters\n uint32_t sampleCountValid = m_module.opULessThanEqual(\n m_module.defBoolType(),\n sampleCount.id,\n m_module.constu32(16));\n \n uint32_t sampleIndexValid = m_module.opULessThan(\n m_module.defBoolType(),\n sampleIndex.id,\n sampleCount.id);\n \n // If the lookup cannot be performed, set the lookup\n // index to zero, which will return a zero vector.\n lookupIndex = m_module.opSelect(\n getVectorTypeId(sampleCount.type),\n m_module.opLogicalAnd(\n m_module.defBoolType(),\n sampleCountValid,\n sampleIndexValid),\n lookupIndex,\n m_module.constu32(0));\n \n // Load sample pos vector and write the masked\n // components to the destination register.\n DxbcRegisterPointer samplePos;\n samplePos.type.ctype = DxbcScalarType::Float32;\n samplePos.type.ccount = 2;\n samplePos.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(samplePos.type),\n spv::StorageClassPrivate),\n m_samplePositions, 1, &lookupIndex);\n \n // Expand to vec4 by appending zeroes\n DxbcRegisterValue result = emitValueLoad(samplePos);\n\n DxbcRegisterValue zero;\n zero.type.ctype = DxbcScalarType::Float32;\n zero.type.ccount = 2;\n zero.id = m_module.constvec2f32(0.0f, 0.0f);\n\n result = emitRegisterConcat(result, zero);\n \n emitRegisterStore(ins.dst[0],\n emitRegisterSwizzle(result,\n ins.src[0].swizzle,\n ins.dst[0].mask));\n }\n void emitTextureFetch(\n const DxbcShaderInstruction& ins) {\n // ld has three operands:\n // (dst0) The destination register\n // (src0) Source address\n // (src1) Source texture\n // ld2dms has four operands:\n // (dst0) The destination register\n // (src0) Source address\n // (src1) Source texture\n // (src2) Sample number\n const auto& texture = m_textures.at(ins.src[1].idx[0].offset);\n const uint32_t imageLayerDim = getTexLayerDim(texture.imageInfo);\n \n bool isMultisampled = ins.op == DxbcOpcode::LdMs\n || ins.op == DxbcOpcode::LdMsS;\n\n // Load the texture coordinates. The last component\n // contains the LOD if the resource is an image.\n const DxbcRegisterValue address = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, true, true, true));\n \n // Additional image operands. This will store\n // the LOD and the address offset if present.\n SpirvImageOperands imageOperands;\n imageOperands.sparse = ins.dstCount == 2;\n \n if (ins.sampleControls.u != 0 || ins.sampleControls.v != 0 || ins.sampleControls.w != 0) {\n const std::array offsetIds = {\n imageLayerDim >= 1 ? m_module.consti32(ins.sampleControls.u) : 0,\n imageLayerDim >= 2 ? m_module.consti32(ins.sampleControls.v) : 0,\n imageLayerDim >= 3 ? m_module.consti32(ins.sampleControls.w) : 0,\n };\n \n imageOperands.flags |= spv::ImageOperandsConstOffsetMask;\n imageOperands.sConstOffset = offsetIds[0];\n\n if (imageLayerDim > 1) {\n imageOperands.sConstOffset = m_module.constComposite(\n getVectorTypeId({ DxbcScalarType::Sint32, imageLayerDim }),\n imageLayerDim, offsetIds.data());\n }\n }\n \n // The LOD is not present when reading from\n // a buffer or from a multisample texture.\n if (texture.imageInfo.dim != spv::DimBuffer && texture.imageInfo.ms == 0) {\n DxbcRegisterValue imageLod;\n \n if (!isMultisampled) {\n imageLod = emitRegisterExtract(\n address, DxbcRegMask(false, false, false, true));\n } else {\n // If we force-disabled MSAA, fetch from LOD 0\n imageLod.type = { DxbcScalarType::Uint32, 1 };\n imageLod.id = m_module.constu32(0);\n }\n \n imageOperands.flags |= spv::ImageOperandsLodMask;\n imageOperands.sLod = imageLod.id;\n }\n \n // The ld2dms instruction has a sample index, but we\n // are only allowed to set it for multisample views\n if (isMultisampled && texture.imageInfo.ms == 1) {\n DxbcRegisterValue sampleId = emitRegisterLoad(\n ins.src[2], DxbcRegMask(true, false, false, false));\n \n imageOperands.flags |= spv::ImageOperandsSampleMask;\n imageOperands.sSampleId = sampleId.id;\n }\n \n // Extract coordinates from address\n const DxbcRegisterValue coord = emitCalcTexCoord(address, texture.imageInfo);\n \n // Reading a typed image or buffer view\n // always returns a four-component vector.\n const uint32_t imageId = m_module.opLoad(texture.imageTypeId, texture.varId);\n\n DxbcVectorType texelType;\n texelType.ctype = texture.sampledType;\n texelType.ccount = 4;\n\n uint32_t texelTypeId = getVectorTypeId(texelType);\n uint32_t resultTypeId = texelTypeId;\n uint32_t resultId = 0;\n\n if (imageOperands.sparse)\n resultTypeId = getSparseResultTypeId(texelTypeId);\n\n resultId = m_module.opImageFetch(resultTypeId,\n imageId, coord.id, imageOperands);\n\n DxbcRegisterValue result;\n result.type = texelType;\n result.id = imageOperands.sparse\n ? emitExtractSparseTexel(texelTypeId, resultId)\n : resultId;\n \n // Swizzle components using the texture swizzle\n // and the destination operand's write mask\n result = emitRegisterSwizzle(result,\n ins.src[1].swizzle, ins.dst[0].mask);\n \n emitRegisterStore(ins.dst[0], result);\n\n if (imageOperands.sparse)\n emitStoreSparseFeedback(ins.dst[1], resultId);\n }\n void emitTextureGather(\n const DxbcShaderInstruction& ins) {\n // Gather4 takes the following operands:\n // (dst0) The destination register\n // (dst1) The residency code for sparse ops\n // (src0) Texture coordinates\n // (src1) The texture itself\n // (src2) The sampler, with a component selector\n // Gather4C takes the following additional operand:\n // (src3) The depth reference value\n // The Gather4Po variants take an additional operand\n // which defines an extended constant offset.\n // TODO reduce code duplication by moving some common code\n // in both sample() and gather() into separate methods\n const bool isExtendedGather = ins.op == DxbcOpcode::Gather4Po\n || ins.op == DxbcOpcode::Gather4PoC\n || ins.op == DxbcOpcode::Gather4PoS\n || ins.op == DxbcOpcode::Gather4PoCS;\n \n const DxbcRegister& texCoordReg = ins.src[0];\n const DxbcRegister& textureReg = ins.src[1 + isExtendedGather];\n const DxbcRegister& samplerReg = ins.src[2 + isExtendedGather];\n \n // Texture and sampler register IDs\n const auto& texture = m_textures.at(textureReg.idx[0].offset);\n const auto& sampler = m_samplers.at(samplerReg.idx[0].offset);\n \n // Image type, which stores the image dimensions etc.\n const uint32_t imageLayerDim = getTexLayerDim(texture.imageInfo);\n \n // Load the texture coordinates. SPIR-V allows these\n // to be float4 even if not all components are used.\n DxbcRegisterValue coord = emitLoadTexCoord(texCoordReg, texture.imageInfo);\n \n // Load reference value for depth-compare operations\n const bool isDepthCompare = ins.op == DxbcOpcode::Gather4C\n || ins.op == DxbcOpcode::Gather4PoC\n || ins.op == DxbcOpcode::Gather4CS\n || ins.op == DxbcOpcode::Gather4PoCS;\n\n const DxbcRegisterValue referenceValue = isDepthCompare\n ? emitRegisterLoad(ins.src[3 + isExtendedGather],\n DxbcRegMask(true, false, false, false))\n : DxbcRegisterValue();\n \n // Accumulate additional image operands.\n SpirvImageOperands imageOperands;\n imageOperands.sparse = ins.dstCount == 2;\n\n if (isExtendedGather) {\n m_module.enableCapability(spv::CapabilityImageGatherExtended);\n \n DxbcRegisterValue gatherOffset = emitRegisterLoad(\n ins.src[1], DxbcRegMask::firstN(imageLayerDim));\n \n imageOperands.flags |= spv::ImageOperandsOffsetMask;\n imageOperands.gOffset = gatherOffset.id;\n } else if (ins.sampleControls.u != 0 || ins.sampleControls.v != 0 || ins.sampleControls.w != 0) {\n const std::array offsetIds = {\n imageLayerDim >= 1 ? m_module.consti32(ins.sampleControls.u) : 0,\n imageLayerDim >= 2 ? m_module.consti32(ins.sampleControls.v) : 0,\n imageLayerDim >= 3 ? m_module.consti32(ins.sampleControls.w) : 0,\n };\n \n imageOperands.flags |= spv::ImageOperandsConstOffsetMask;\n imageOperands.sConstOffset = offsetIds[0];\n\n if (imageLayerDim > 1) {\n imageOperands.sConstOffset = m_module.constComposite(\n getVectorTypeId({ DxbcScalarType::Sint32, imageLayerDim }),\n imageLayerDim, offsetIds.data());\n }\n }\n\n // Gathering texels always returns a four-component\n // vector, even for the depth-compare variants.\n uint32_t sampledImageId = emitLoadSampledImage(texture, sampler, isDepthCompare);\n\n DxbcVectorType texelType;\n texelType.ctype = texture.sampledType;\n texelType.ccount = 4;\n\n uint32_t texelTypeId = getVectorTypeId(texelType);\n uint32_t resultTypeId = texelTypeId;\n uint32_t resultId = 0;\n\n if (imageOperands.sparse)\n resultTypeId = getSparseResultTypeId(texelTypeId);\n\n if (sampledImageId) {\n switch (ins.op) {\n // Simple image gather operation\n case DxbcOpcode::Gather4:\n case DxbcOpcode::Gather4S:\n case DxbcOpcode::Gather4Po:\n case DxbcOpcode::Gather4PoS: {\n resultId = m_module.opImageGather(\n resultTypeId, sampledImageId, coord.id,\n m_module.consti32(samplerReg.swizzle[0]),\n imageOperands);\n } break;\n\n // Depth-compare operation\n case DxbcOpcode::Gather4C:\n case DxbcOpcode::Gather4CS:\n case DxbcOpcode::Gather4PoC:\n case DxbcOpcode::Gather4PoCS: {\n resultId = m_module.opImageDrefGather(\n resultTypeId, sampledImageId, coord.id,\n referenceValue.id, imageOperands);\n } break;\n\n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n } else {\n Logger::warn(str::format(\"DxbcCompiler: \", ins.op, \": Unsupported image type\"));\n resultId = m_module.constNull(resultTypeId);\n }\n\n // If necessary, deal with the sparse result\n DxbcRegisterValue result;\n result.type = texelType;\n result.id = imageOperands.sparse\n ? emitExtractSparseTexel(texelTypeId, resultId)\n : resultId;\n\n // Swizzle components using the texture swizzle\n // and the destination operand's write mask\n result = emitRegisterSwizzle(result,\n textureReg.swizzle, ins.dst[0].mask);\n \n emitRegisterStore(ins.dst[0], result);\n\n if (imageOperands.sparse)\n emitStoreSparseFeedback(ins.dst[1], resultId);\n }\n void emitTextureSample(\n const DxbcShaderInstruction& ins) {\n // All sample instructions have at least these operands:\n // (dst0) The destination register\n // (src0) Texture coordinates\n // (src1) The texture itself\n // (src2) The sampler object\n const DxbcRegister& texCoordReg = ins.src[0];\n const DxbcRegister& textureReg = ins.src[1];\n const DxbcRegister& samplerReg = ins.src[2];\n \n // Texture and sampler register IDs\n const auto& texture = m_textures.at(textureReg.idx[0].offset);\n const auto& sampler = m_samplers.at(samplerReg.idx[0].offset);\n const uint32_t imageLayerDim = getTexLayerDim(texture.imageInfo);\n \n // Load the texture coordinates. SPIR-V allows these\n // to be float4 even if not all components are used.\n DxbcRegisterValue coord = emitLoadTexCoord(texCoordReg, texture.imageInfo);\n \n // Load reference value for depth-compare operations\n const bool isDepthCompare = ins.op == DxbcOpcode::SampleC\n || ins.op == DxbcOpcode::SampleClz\n || ins.op == DxbcOpcode::SampleCClampS\n || ins.op == DxbcOpcode::SampleClzS;\n \n const DxbcRegisterValue referenceValue = isDepthCompare\n ? emitRegisterLoad(ins.src[3], DxbcRegMask(true, false, false, false))\n : DxbcRegisterValue();\n \n // Load explicit gradients for sample operations that require them\n const bool hasExplicitGradients = ins.op == DxbcOpcode::SampleD\n || ins.op == DxbcOpcode::SampleDClampS;\n \n const DxbcRegisterValue explicitGradientX = hasExplicitGradients\n ? emitRegisterLoad(ins.src[3], DxbcRegMask::firstN(imageLayerDim))\n : DxbcRegisterValue();\n \n const DxbcRegisterValue explicitGradientY = hasExplicitGradients\n ? emitRegisterLoad(ins.src[4], DxbcRegMask::firstN(imageLayerDim))\n : DxbcRegisterValue();\n \n // LOD for certain sample operations\n const bool hasLod = ins.op == DxbcOpcode::SampleL\n || ins.op == DxbcOpcode::SampleLS\n || ins.op == DxbcOpcode::SampleB\n || ins.op == DxbcOpcode::SampleBClampS;\n \n const DxbcRegisterValue lod = hasLod\n ? emitRegisterLoad(ins.src[3], DxbcRegMask(true, false, false, false))\n : DxbcRegisterValue();\n\n // Min LOD for certain sparse operations\n const bool hasMinLod = ins.op == DxbcOpcode::SampleClampS\n || ins.op == DxbcOpcode::SampleBClampS\n || ins.op == DxbcOpcode::SampleDClampS\n || ins.op == DxbcOpcode::SampleCClampS;\n\n const DxbcRegisterValue minLod = hasMinLod && ins.src[ins.srcCount - 1].type != DxbcOperandType::Null\n ? emitRegisterLoad(ins.src[ins.srcCount - 1], DxbcRegMask(true, false, false, false))\n : DxbcRegisterValue();\n\n // Accumulate additional image operands. These are\n // not part of the actual operand token in SPIR-V.\n SpirvImageOperands imageOperands;\n imageOperands.sparse = ins.dstCount == 2;\n\n if (ins.sampleControls.u != 0 || ins.sampleControls.v != 0 || ins.sampleControls.w != 0) {\n const std::array offsetIds = {\n imageLayerDim >= 1 ? m_module.consti32(ins.sampleControls.u) : 0,\n imageLayerDim >= 2 ? m_module.consti32(ins.sampleControls.v) : 0,\n imageLayerDim >= 3 ? m_module.consti32(ins.sampleControls.w) : 0,\n };\n \n imageOperands.flags |= spv::ImageOperandsConstOffsetMask;\n imageOperands.sConstOffset = offsetIds[0];\n\n if (imageLayerDim > 1) {\n imageOperands.sConstOffset = m_module.constComposite(\n getVectorTypeId({ DxbcScalarType::Sint32, imageLayerDim }),\n imageLayerDim, offsetIds.data());\n }\n }\n\n if (hasMinLod) {\n m_module.enableCapability(spv::CapabilityMinLod);\n\n imageOperands.flags |= spv::ImageOperandsMinLodMask;\n imageOperands.sMinLod = minLod.id;\n }\n\n // Combine the texture and the sampler into a sampled image\n uint32_t sampledImageId = emitLoadSampledImage(texture, sampler, isDepthCompare);\n \n // Sampling an image always returns a four-component\n // vector, whereas depth-compare ops return a scalar.\n DxbcVectorType texelType;\n texelType.ctype = texture.sampledType;\n texelType.ccount = isDepthCompare ? 1 : 4;\n\n uint32_t texelTypeId = getVectorTypeId(texelType);\n uint32_t resultTypeId = texelTypeId;\n uint32_t resultId = 0;\n\n if (imageOperands.sparse)\n resultTypeId = getSparseResultTypeId(texelTypeId);\n\n if (sampledImageId) {\n switch (ins.op) {\n // Simple image sample operation\n case DxbcOpcode::Sample:\n case DxbcOpcode::SampleClampS: {\n resultId = m_module.opImageSampleImplicitLod(\n resultTypeId, sampledImageId, coord.id,\n imageOperands);\n } break;\n\n // Depth-compare operation\n case DxbcOpcode::SampleC:\n case DxbcOpcode::SampleCClampS: {\n resultId = m_module.opImageSampleDrefImplicitLod(\n resultTypeId, sampledImageId, coord.id,\n referenceValue.id, imageOperands);\n } break;\n\n // Depth-compare operation on mip level zero\n case DxbcOpcode::SampleClz:\n case DxbcOpcode::SampleClzS: {\n imageOperands.flags |= spv::ImageOperandsLodMask;\n imageOperands.sLod = m_module.constf32(0.0f);\n\n resultId = m_module.opImageSampleDrefExplicitLod(\n resultTypeId, sampledImageId, coord.id,\n referenceValue.id, imageOperands);\n } break;\n\n // Sample operation with explicit gradients\n case DxbcOpcode::SampleD:\n case DxbcOpcode::SampleDClampS: {\n imageOperands.flags |= spv::ImageOperandsGradMask;\n imageOperands.sGradX = explicitGradientX.id;\n imageOperands.sGradY = explicitGradientY.id;\n\n resultId = m_module.opImageSampleExplicitLod(\n resultTypeId, sampledImageId, coord.id,\n imageOperands);\n } break;\n\n // Sample operation with explicit LOD\n case DxbcOpcode::SampleL:\n case DxbcOpcode::SampleLS: {\n imageOperands.flags |= spv::ImageOperandsLodMask;\n imageOperands.sLod = lod.id;\n\n resultId = m_module.opImageSampleExplicitLod(\n resultTypeId, sampledImageId, coord.id,\n imageOperands);\n } break;\n\n // Sample operation with LOD bias\n case DxbcOpcode::SampleB:\n case DxbcOpcode::SampleBClampS: {\n imageOperands.flags |= spv::ImageOperandsBiasMask;\n imageOperands.sLodBias = lod.id;\n\n resultId = m_module.opImageSampleImplicitLod(\n resultTypeId, sampledImageId, coord.id,\n imageOperands);\n } break;\n\n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n } else {\n Logger::warn(str::format(\"DxbcCompiler: \", ins.op, \": Unsupported image type\"));\n resultId = m_module.constNull(resultTypeId);\n }\n \n DxbcRegisterValue result;\n result.type = texelType;\n result.id = imageOperands.sparse\n ? emitExtractSparseTexel(texelTypeId, resultId)\n : resultId;\n\n // Swizzle components using the texture swizzle\n // and the destination operand's write mask\n if (result.type.ccount != 1) {\n result = emitRegisterSwizzle(result,\n textureReg.swizzle, ins.dst[0].mask);\n }\n \n emitRegisterStore(ins.dst[0], result);\n\n if (imageOperands.sparse)\n emitStoreSparseFeedback(ins.dst[1], resultId);\n }\n void emitTypedUavLoad(\n const DxbcShaderInstruction& ins) {\n // load_uav_typed has three operands:\n // (dst0) The destination register\n // (src0) The texture or buffer coordinates\n // (src1) The UAV to load from\n const uint32_t registerId = ins.src[1].idx[0].offset;\n const DxbcUav uavInfo = m_uavs.at(registerId);\n\n emitUavBarrier(uint64_t(1u) << registerId, 0u);\n\n // Load texture coordinates\n DxbcRegisterValue texCoord = emitLoadTexCoord(\n ins.src[0], uavInfo.imageInfo);\n\n SpirvImageOperands imageOperands;\n imageOperands.sparse = ins.dstCount == 2;\n\n if (uavInfo.coherence) {\n imageOperands.flags |= spv::ImageOperandsNonPrivateTexelMask\n | spv::ImageOperandsMakeTexelVisibleMask;\n imageOperands.makeVisible = m_module.constu32(uavInfo.coherence);\n }\n\n DxbcVectorType texelType;\n texelType.ctype = uavInfo.sampledType;\n texelType.ccount = 4;\n\n uint32_t texelTypeId = getVectorTypeId(texelType);\n uint32_t resultTypeId = texelTypeId;\n uint32_t resultId = 0;\n\n if (imageOperands.sparse)\n resultTypeId = getSparseResultTypeId(texelTypeId);\n\n // Load source value from the UAV\n resultId = m_module.opImageRead(resultTypeId,\n m_module.opLoad(uavInfo.imageTypeId, uavInfo.varId),\n texCoord.id, imageOperands);\n \n // Apply component swizzle and mask\n DxbcRegisterValue uavValue;\n uavValue.type = texelType;\n uavValue.id = imageOperands.sparse\n ? emitExtractSparseTexel(texelTypeId, resultId)\n : resultId;\n\n uavValue = emitRegisterSwizzle(uavValue,\n ins.src[1].swizzle, ins.dst[0].mask);\n \n emitRegisterStore(ins.dst[0], uavValue);\n\n if (imageOperands.sparse)\n emitStoreSparseFeedback(ins.dst[1], resultId);\n }\n void emitTypedUavStore(\n const DxbcShaderInstruction& ins) {\n // store_uav_typed has three operands:\n // (dst0) The destination UAV\n // (src0) The texture or buffer coordinates\n // (src1) The value to store\n const DxbcBufferInfo uavInfo = getBufferInfo(ins.dst[0]);\n emitUavBarrier(0u, uint64_t(1u) << ins.dst[0].idx[0].offset);\n\n // Set image operands for coherent access if necessary \n SpirvImageOperands imageOperands;\n\n if (uavInfo.coherence) {\n imageOperands.flags |= spv::ImageOperandsNonPrivateTexelMask\n | spv::ImageOperandsMakeTexelAvailableMask;\n imageOperands.makeAvailable = m_module.constu32(uavInfo.coherence);\n }\n\n // Load texture coordinates\n DxbcRegisterValue texCoord = emitLoadTexCoord(ins.src[0], uavInfo.image);\n\n // Load the value that will be written to the image. We'll\n // have to cast it to the component type of the image.\n const DxbcRegisterValue texValue = emitRegisterBitcast(\n emitRegisterLoad(ins.src[1], DxbcRegMask(true, true, true, true)),\n uavInfo.stype);\n \n // Write the given value to the image\n m_module.opImageWrite(\n m_module.opLoad(uavInfo.typeId, uavInfo.varId),\n texCoord.id, texValue.id, imageOperands);\n }\n void emitControlFlowIf(\n const DxbcShaderInstruction& ins) {\n // Load the first component of the condition\n // operand and perform a zero test on it.\n const DxbcRegisterValue condition = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n // Declare the 'if' block. We do not know if there\n // will be an 'else' block or not, so we'll assume\n // that there is one and leave it empty otherwise.\n DxbcCfgBlock block;\n block.type = DxbcCfgBlockType::If;\n block.b_if.ztestId = emitRegisterZeroTest(condition, ins.controls.zeroTest()).id;\n block.b_if.labelIf = m_module.allocateId();\n block.b_if.labelElse = 0;\n block.b_if.labelEnd = m_module.allocateId();\n block.b_if.headerPtr = m_module.getInsertionPtr();\n m_controlFlowBlocks.push_back(block);\n \n // We'll insert the branch instruction when closing\n // the block, since we don't know whether or not an\n // else block is needed right now.\n m_module.opLabel(block.b_if.labelIf);\n }\n void emitControlFlowElse(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::If\n || m_controlFlowBlocks.back().b_if.labelElse != 0)\n throw DxvkError(\"DxbcCompiler: 'Else' without 'If' found\");\n \n // Set the 'Else' flag so that we do\n // not insert a dummy block on 'EndIf'\n DxbcCfgBlock& block = m_controlFlowBlocks.back();\n block.b_if.labelElse = m_module.allocateId();\n \n // Close the 'If' block by branching to\n // the merge block we declared earlier\n m_module.opBranch(block.b_if.labelEnd);\n m_module.opLabel (block.b_if.labelElse);\n }\n void emitControlFlowEndIf(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::If)\n throw DxvkError(\"DxbcCompiler: 'EndIf' without 'If' found\");\n \n // Remove the block from the stack, it's closed\n DxbcCfgBlock block = m_controlFlowBlocks.back();\n m_controlFlowBlocks.pop_back();\n \n // Write out the 'if' header\n m_module.beginInsertion(block.b_if.headerPtr);\n \n m_module.opSelectionMerge(\n block.b_if.labelEnd,\n spv::SelectionControlMaskNone);\n \n m_module.opBranchConditional(\n block.b_if.ztestId,\n block.b_if.labelIf,\n block.b_if.labelElse != 0\n ? block.b_if.labelElse\n : block.b_if.labelEnd);\n \n m_module.endInsertion();\n \n // End the active 'if' or 'else' block\n m_module.opBranch(block.b_if.labelEnd);\n m_module.opLabel (block.b_if.labelEnd);\n }\n void emitControlFlowSwitch(\n const DxbcShaderInstruction& ins) {\n // Load the selector as a scalar unsigned integer\n const DxbcRegisterValue selector = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n // Declare switch block. We cannot insert the switch\n // instruction itself yet because the number of case\n // statements and blocks is unknown at this point.\n DxbcCfgBlock block;\n block.type = DxbcCfgBlockType::Switch;\n block.b_switch.insertPtr = m_module.getInsertionPtr();\n block.b_switch.selectorId = selector.id;\n block.b_switch.labelBreak = m_module.allocateId();\n block.b_switch.labelCase = m_module.allocateId();\n block.b_switch.labelDefault = 0;\n block.b_switch.labelCases = nullptr;\n m_controlFlowBlocks.push_back(block);\n \n // Define the first 'case' label\n m_module.opLabel(block.b_switch.labelCase);\n }\n void emitControlFlowCase(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::Switch)\n throw DxvkError(\"DxbcCompiler: 'Case' without 'Switch' found\");\n \n // The source operand must be a 32-bit immediate.\n if (ins.src[0].type != DxbcOperandType::Imm32)\n throw DxvkError(\"DxbcCompiler: Invalid operand type for 'Case'\");\n\n // Use the last label allocated for 'case'.\n DxbcCfgBlockSwitch* block = &m_controlFlowBlocks.back().b_switch;\n\n if (caseBlockIsFallthrough()) {\n block->labelCase = m_module.allocateId();\n\n m_module.opBranch(block->labelCase);\n m_module.opLabel (block->labelCase);\n }\n\n DxbcSwitchLabel label;\n label.desc.literal = ins.src[0].imm.u32_1;\n label.desc.labelId = block->labelCase;\n label.next = block->labelCases;\n block->labelCases = new DxbcSwitchLabel(label);\n }\n void emitControlFlowDefault(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::Switch)\n throw DxvkError(\"DxbcCompiler: 'Default' without 'Switch' found\");\n \n DxbcCfgBlockSwitch* block = &m_controlFlowBlocks.back().b_switch;\n\n if (caseBlockIsFallthrough()) {\n block->labelCase = m_module.allocateId();\n\n m_module.opBranch(block->labelCase);\n m_module.opLabel (block->labelCase);\n }\n\n // Set the last label allocated for 'case' as the default label.\n block->labelDefault = block->labelCase;\n }\n void emitControlFlowEndSwitch(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::Switch)\n throw DxvkError(\"DxbcCompiler: 'EndSwitch' without 'Switch' found\");\n \n // Remove the block from the stack, it's closed\n DxbcCfgBlock block = m_controlFlowBlocks.back();\n m_controlFlowBlocks.pop_back();\n\n if (!block.b_switch.labelDefault) {\n block.b_switch.labelDefault = caseBlockIsFallthrough()\n ? block.b_switch.labelBreak\n : block.b_switch.labelCase;\n }\n \n // Close the current 'case' block\n m_module.opBranch(block.b_switch.labelBreak);\n \n // Insert the 'switch' statement. For that, we need to\n // gather all the literal-label pairs for the construct.\n m_module.beginInsertion(block.b_switch.insertPtr);\n m_module.opSelectionMerge(\n block.b_switch.labelBreak,\n spv::SelectionControlMaskNone);\n \n // We'll restore the original order of the case labels here\n std::vector jumpTargets;\n for (auto i = block.b_switch.labelCases; i != nullptr; i = i->next)\n jumpTargets.insert(jumpTargets.begin(), i->desc);\n \n m_module.opSwitch(\n block.b_switch.selectorId,\n block.b_switch.labelDefault,\n jumpTargets.size(),\n jumpTargets.data());\n m_module.endInsertion();\n \n // Destroy the list of case labels\n // FIXME we're leaking memory if compilation fails.\n DxbcSwitchLabel* caseLabel = block.b_switch.labelCases;\n \n while (caseLabel != nullptr)\n delete std::exchange(caseLabel, caseLabel->next);\n\n // Begin new block after switch blocks\n m_module.opLabel(block.b_switch.labelBreak);\n }\n void emitControlFlowLoop(\n const DxbcShaderInstruction& ins) {\n // Declare the 'loop' block\n DxbcCfgBlock block;\n block.type = DxbcCfgBlockType::Loop;\n block.b_loop.labelHeader = m_module.allocateId();\n block.b_loop.labelBegin = m_module.allocateId();\n block.b_loop.labelContinue = m_module.allocateId();\n block.b_loop.labelBreak = m_module.allocateId();\n m_controlFlowBlocks.push_back(block);\n \n m_module.opBranch(block.b_loop.labelHeader);\n m_module.opLabel (block.b_loop.labelHeader);\n \n m_module.opLoopMerge(\n block.b_loop.labelBreak,\n block.b_loop.labelContinue,\n spv::LoopControlMaskNone);\n \n m_module.opBranch(block.b_loop.labelBegin);\n m_module.opLabel (block.b_loop.labelBegin);\n }\n void emitControlFlowEndLoop(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::Loop)\n throw DxvkError(\"DxbcCompiler: 'EndLoop' without 'Loop' found\");\n \n // Remove the block from the stack, it's closed\n const DxbcCfgBlock block = m_controlFlowBlocks.back();\n m_controlFlowBlocks.pop_back();\n \n // Declare the continue block\n m_module.opBranch(block.b_loop.labelContinue);\n m_module.opLabel (block.b_loop.labelContinue);\n \n // Declare the merge block\n m_module.opBranch(block.b_loop.labelHeader);\n m_module.opLabel (block.b_loop.labelBreak);\n }\n void emitControlFlowBreak(\n const DxbcShaderInstruction& ins) {\n const bool isBreak = ins.op == DxbcOpcode::Break;\n \n DxbcCfgBlock* cfgBlock = isBreak\n ? cfgFindBlock({ DxbcCfgBlockType::Loop, DxbcCfgBlockType::Switch })\n : cfgFindBlock({ DxbcCfgBlockType::Loop });\n \n if (cfgBlock == nullptr)\n throw DxvkError(\"DxbcCompiler: 'Break' or 'Continue' outside 'Loop' or 'Switch' found\");\n \n if (cfgBlock->type == DxbcCfgBlockType::Loop) {\n m_module.opBranch(isBreak\n ? cfgBlock->b_loop.labelBreak\n : cfgBlock->b_loop.labelContinue);\n } else /* if (cfgBlock->type == DxbcCfgBlockType::Switch) */ {\n m_module.opBranch(cfgBlock->b_switch.labelBreak);\n }\n \n // Subsequent instructions assume that there is an open block\n const uint32_t labelId = m_module.allocateId();\n m_module.opLabel(labelId);\n \n // If this is on the same level as a switch-case construct,\n // rather than being nested inside an 'if' statement, close\n // the current 'case' block.\n if (m_controlFlowBlocks.back().type == DxbcCfgBlockType::Switch)\n cfgBlock->b_switch.labelCase = labelId;\n }\n void emitControlFlowBreakc(\n const DxbcShaderInstruction& ins) {\n const bool isBreak = ins.op == DxbcOpcode::Breakc;\n \n DxbcCfgBlock* cfgBlock = isBreak\n ? cfgFindBlock({ DxbcCfgBlockType::Loop, DxbcCfgBlockType::Switch })\n : cfgFindBlock({ DxbcCfgBlockType::Loop });\n \n if (cfgBlock == nullptr)\n throw DxvkError(\"DxbcCompiler: 'Breakc' or 'Continuec' outside 'Loop' or 'Switch' found\");\n \n // Perform zero test on the first component of the condition\n const DxbcRegisterValue condition = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n const DxbcRegisterValue zeroTest = emitRegisterZeroTest(\n condition, ins.controls.zeroTest());\n \n // We basically have to wrap this into an 'if' block\n const uint32_t breakBlock = m_module.allocateId();\n const uint32_t mergeBlock = m_module.allocateId();\n \n m_module.opSelectionMerge(mergeBlock,\n spv::SelectionControlMaskNone);\n \n m_module.opBranchConditional(\n zeroTest.id, breakBlock, mergeBlock);\n \n m_module.opLabel(breakBlock);\n \n if (cfgBlock->type == DxbcCfgBlockType::Loop) {\n m_module.opBranch(isBreak\n ? cfgBlock->b_loop.labelBreak\n : cfgBlock->b_loop.labelContinue);\n } else /* if (cfgBlock->type == DxbcCfgBlockType::Switch) */ {\n m_module.opBranch(cfgBlock->b_switch.labelBreak);\n }\n \n m_module.opLabel(mergeBlock);\n }\n void emitControlFlowRet(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() != 0) {\n uint32_t labelId = m_module.allocateId();\n \n m_module.opReturn();\n m_module.opLabel(labelId);\n\n // return can be used in place of break to terminate a case block\n if (m_controlFlowBlocks.back().type == DxbcCfgBlockType::Switch)\n m_controlFlowBlocks.back().b_switch.labelCase = labelId;\n\n m_topLevelIsUniform = false;\n } else {\n // Last instruction in the current function\n this->emitFunctionEnd();\n }\n }\n void emitControlFlowRetc(\n const DxbcShaderInstruction& ins) {\n // Perform zero test on the first component of the condition\n const DxbcRegisterValue condition = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n const DxbcRegisterValue zeroTest = emitRegisterZeroTest(\n condition, ins.controls.zeroTest());\n \n // We basically have to wrap this into an 'if' block\n const uint32_t returnLabel = m_module.allocateId();\n const uint32_t continueLabel = m_module.allocateId();\n \n m_module.opSelectionMerge(continueLabel,\n spv::SelectionControlMaskNone);\n \n m_module.opBranchConditional(\n zeroTest.id, returnLabel, continueLabel);\n \n m_module.opLabel(returnLabel);\n m_module.opReturn();\n\n m_module.opLabel(continueLabel);\n\n // The return condition may be non-uniform\n m_topLevelIsUniform = false;\n }\n void emitControlFlowDiscard(\n const DxbcShaderInstruction& ins) {\n // Discard actually has an operand that determines\n // whether or not the fragment should be discarded\n const DxbcRegisterValue condition = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n const DxbcRegisterValue zeroTest = emitRegisterZeroTest(\n condition, ins.controls.zeroTest());\n \n DxbcConditional cond;\n cond.labelIf = m_module.allocateId();\n cond.labelEnd = m_module.allocateId();\n \n m_module.opSelectionMerge(cond.labelEnd, spv::SelectionControlMaskNone);\n m_module.opBranchConditional(zeroTest.id, cond.labelIf, cond.labelEnd);\n \n m_module.opLabel(cond.labelIf);\n m_module.opDemoteToHelperInvocation();\n m_module.opBranch(cond.labelEnd);\n \n m_module.opLabel(cond.labelEnd);\n\n m_module.enableCapability(spv::CapabilityDemoteToHelperInvocation);\n\n // Discard is just retc in a trenchcoat\n m_topLevelIsUniform = false;\n }\n void emitControlFlowLabel(\n const DxbcShaderInstruction& ins) {\n uint32_t functionNr = ins.dst[0].idx[0].offset;\n uint32_t functionId = getFunctionId(functionNr);\n \n this->emitFunctionBegin(\n functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n \n m_module.opLabel(m_module.allocateId());\n m_module.setDebugName(functionId, str::format(\"label\", functionNr).c_str());\n \n m_insideFunction = true;\n\n // We have to assume that this function gets\n // called from non-uniform control flow\n m_topLevelIsUniform = false;\n }\n void emitControlFlowCall(\n const DxbcShaderInstruction& ins) {\n uint32_t functionNr = ins.src[0].idx[0].offset;\n uint32_t functionId = getFunctionId(functionNr);\n\n m_module.opFunctionCall(\n m_module.defVoidType(),\n functionId, 0, nullptr);\n }\n void emitControlFlowCallc(\n const DxbcShaderInstruction& ins) {\n uint32_t functionNr = ins.src[1].idx[0].offset;\n uint32_t functionId = getFunctionId(functionNr);\n\n // Perform zero test on the first component of the condition\n const DxbcRegisterValue condition = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n const DxbcRegisterValue zeroTest = emitRegisterZeroTest(\n condition, ins.controls.zeroTest());\n \n // We basically have to wrap this into an 'if' block\n const uint32_t callLabel = m_module.allocateId();\n const uint32_t skipLabel = m_module.allocateId();\n \n m_module.opSelectionMerge(skipLabel,\n spv::SelectionControlMaskNone);\n \n m_module.opBranchConditional(\n zeroTest.id, callLabel, skipLabel);\n \n m_module.opLabel(callLabel);\n m_module.opFunctionCall(\n m_module.defVoidType(),\n functionId, 0, nullptr);\n\n m_module.opBranch(skipLabel);\n m_module.opLabel(skipLabel);\n }\n void emitControlFlow(\n const DxbcShaderInstruction& ins) {\n switch (ins.op) {\n case DxbcOpcode::If:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowIf(ins);\n break;\n \n case DxbcOpcode::Else:\n this->emitControlFlowElse(ins);\n break;\n \n case DxbcOpcode::EndIf:\n this->emitControlFlowEndIf(ins);\n this->emitUavBarrier(0, 0);\n break;\n \n case DxbcOpcode::Switch:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowSwitch(ins);\n break;\n \n case DxbcOpcode::Case:\n this->emitControlFlowCase(ins);\n break;\n \n case DxbcOpcode::Default:\n this->emitControlFlowDefault(ins);\n break;\n \n case DxbcOpcode::EndSwitch:\n this->emitControlFlowEndSwitch(ins);\n this->emitUavBarrier(0, 0);\n break;\n\n case DxbcOpcode::Loop:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowLoop(ins);\n break;\n \n case DxbcOpcode::EndLoop:\n this->emitControlFlowEndLoop(ins);\n this->emitUavBarrier(0, 0);\n break;\n\n case DxbcOpcode::Break:\n case DxbcOpcode::Continue:\n this->emitControlFlowBreak(ins);\n break;\n \n case DxbcOpcode::Breakc:\n case DxbcOpcode::Continuec:\n this->emitControlFlowBreakc(ins);\n break;\n\n case DxbcOpcode::Ret:\n this->emitControlFlowRet(ins);\n break;\n\n case DxbcOpcode::Retc:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowRetc(ins);\n break;\n \n case DxbcOpcode::Discard:\n this->emitControlFlowDiscard(ins);\n break;\n \n case DxbcOpcode::Label:\n this->emitControlFlowLabel(ins);\n break;\n\n case DxbcOpcode::Call:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowCall(ins);\n this->emitUavBarrier(-1, -1);\n break;\n\n case DxbcOpcode::Callc:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowCallc(ins);\n this->emitUavBarrier(-1, -1);\n break;\n\n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n }\n }\n DxbcRegisterValue emitBuildConstVecf32(\n float x,\n float y,\n float z,\n float w,\n const DxbcRegMask& writeMask) {\n // TODO refactor these functions into one single template\n std::array ids = { 0, 0, 0, 0 };\n uint32_t componentIndex = 0;\n \n if (writeMask[0]) ids[componentIndex++] = m_module.constf32(x);\n if (writeMask[1]) ids[componentIndex++] = m_module.constf32(y);\n if (writeMask[2]) ids[componentIndex++] = m_module.constf32(z);\n if (writeMask[3]) ids[componentIndex++] = m_module.constf32(w);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Float32;\n result.type.ccount = componentIndex;\n result.id = componentIndex > 1\n ? m_module.constComposite(\n getVectorTypeId(result.type),\n componentIndex, ids.data())\n : ids[0];\n return result;\n }\n DxbcRegisterValue emitBuildConstVecu32(\n uint32_t x,\n uint32_t y,\n uint32_t z,\n uint32_t w,\n const DxbcRegMask& writeMask) {\n std::array ids = { 0, 0, 0, 0 };\n uint32_t componentIndex = 0;\n \n if (writeMask[0]) ids[componentIndex++] = m_module.constu32(x);\n if (writeMask[1]) ids[componentIndex++] = m_module.constu32(y);\n if (writeMask[2]) ids[componentIndex++] = m_module.constu32(z);\n if (writeMask[3]) ids[componentIndex++] = m_module.constu32(w);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = componentIndex;\n result.id = componentIndex > 1\n ? m_module.constComposite(\n getVectorTypeId(result.type),\n componentIndex, ids.data())\n : ids[0];\n return result;\n }\n DxbcRegisterValue emitBuildConstVeci32(\n int32_t x,\n int32_t y,\n int32_t z,\n int32_t w,\n const DxbcRegMask& writeMask) {\n std::array ids = { 0, 0, 0, 0 };\n uint32_t componentIndex = 0;\n \n if (writeMask[0]) ids[componentIndex++] = m_module.consti32(x);\n if (writeMask[1]) ids[componentIndex++] = m_module.consti32(y);\n if (writeMask[2]) ids[componentIndex++] = m_module.consti32(z);\n if (writeMask[3]) ids[componentIndex++] = m_module.consti32(w);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Sint32;\n result.type.ccount = componentIndex;\n result.id = componentIndex > 1\n ? m_module.constComposite(\n getVectorTypeId(result.type),\n componentIndex, ids.data())\n : ids[0];\n return result;\n }\n DxbcRegisterValue emitBuildConstVecf64(\n double xy,\n double zw,\n const DxbcRegMask& writeMask) {\n std::array ids = { 0, 0 };\n uint32_t componentIndex = 0;\n \n if (writeMask[0] && writeMask[1]) ids[componentIndex++] = m_module.constf64(xy);\n if (writeMask[2] && writeMask[3]) ids[componentIndex++] = m_module.constf64(zw);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Float64;\n result.type.ccount = componentIndex;\n result.id = componentIndex > 1\n ? m_module.constComposite(\n getVectorTypeId(result.type),\n componentIndex, ids.data())\n : ids[0];\n return result;\n }\n DxbcRegisterValue emitBuildVector(\n DxbcRegisterValue scalar,\n uint32_t count) {\n if (count == 1)\n return scalar;\n\n std::array scalarIds =\n { scalar.id, scalar.id, scalar.id, scalar.id };\n\n DxbcRegisterValue result;\n result.type.ctype = scalar.type.ctype;\n result.type.ccount = count;\n result.id = m_module.constComposite(\n getVectorTypeId(result.type),\n count, scalarIds.data());\n return result;\n }\n DxbcRegisterValue emitBuildZeroVector(\n DxbcVectorType type) {\n DxbcRegisterValue result;\n result.type.ctype = type.ctype;\n result.type.ccount = 1;\n\n switch (type.ctype) {\n case DxbcScalarType::Float32: result.id = m_module.constf32(0.0f); break;\n case DxbcScalarType::Uint32: result.id = m_module.constu32(0u); break;\n case DxbcScalarType::Sint32: result.id = m_module.consti32(0); break;\n default: throw DxvkError(\"DxbcCompiler: Invalid scalar type\");\n }\n\n return emitBuildVector(result, type.ccount);\n }\n DxbcRegisterValue emitRegisterBitcast(\n DxbcRegisterValue srcValue,\n DxbcScalarType dstType) {\n DxbcScalarType srcType = srcValue.type.ctype;\n\n if (srcType == dstType)\n return srcValue;\n \n DxbcRegisterValue result;\n result.type.ctype = dstType;\n result.type.ccount = srcValue.type.ccount;\n\n if (isDoubleType(srcType)) result.type.ccount *= 2;\n if (isDoubleType(dstType)) result.type.ccount /= 2;\n\n result.id = m_module.opBitcast(\n getVectorTypeId(result.type),\n srcValue.id);\n return result;\n }\n DxbcRegisterValue emitRegisterSwizzle(\n DxbcRegisterValue value,\n DxbcRegSwizzle swizzle,\n DxbcRegMask writeMask) {\n if (value.type.ccount == 1)\n return emitRegisterExtend(value, writeMask.popCount());\n \n std::array indices;\n \n uint32_t dstIndex = 0;\n \n for (uint32_t i = 0; i < 4; i++) {\n if (writeMask[i])\n indices[dstIndex++] = swizzle[i];\n }\n \n // If the swizzle combined with the mask can be reduced\n // to a no-op, we don't need to insert any instructions.\n bool isIdentitySwizzle = dstIndex == value.type.ccount;\n \n for (uint32_t i = 0; i < dstIndex && isIdentitySwizzle; i++)\n isIdentitySwizzle &= indices[i] == i;\n \n if (isIdentitySwizzle)\n return value;\n \n // Use OpCompositeExtract if the resulting vector contains\n // only one component, and OpVectorShuffle if it is a vector.\n DxbcRegisterValue result;\n result.type.ctype = value.type.ctype;\n result.type.ccount = dstIndex;\n \n const uint32_t typeId = getVectorTypeId(result.type);\n \n if (dstIndex == 1) {\n result.id = m_module.opCompositeExtract(\n typeId, value.id, 1, indices.data());\n } else {\n result.id = m_module.opVectorShuffle(\n typeId, value.id, value.id,\n dstIndex, indices.data());\n }\n \n return result;\n }\n DxbcRegisterValue emitRegisterExtract(\n DxbcRegisterValue value,\n DxbcRegMask mask) {\n return emitRegisterSwizzle(value,\n DxbcRegSwizzle(0, 1, 2, 3), mask);\n }\n DxbcRegisterValue emitRegisterInsert(\n DxbcRegisterValue dstValue,\n DxbcRegisterValue srcValue,\n DxbcRegMask srcMask) {\n DxbcRegisterValue result;\n result.type = dstValue.type;\n \n const uint32_t typeId = getVectorTypeId(result.type);\n \n if (srcMask.popCount() == 0) {\n // Nothing to do if the insertion mask is empty\n result.id = dstValue.id;\n } else if (dstValue.type.ccount == 1) {\n // Both values are scalar, so the first component\n // of the write mask decides which one to take.\n result.id = srcMask[0] ? srcValue.id : dstValue.id;\n } else if (srcValue.type.ccount == 1) {\n // The source value is scalar. Since OpVectorShuffle\n // requires both arguments to be vectors, we have to\n // use OpCompositeInsert to modify the vector instead.\n const uint32_t componentId = srcMask.firstSet();\n \n result.id = m_module.opCompositeInsert(typeId,\n srcValue.id, dstValue.id, 1, &componentId);\n } else {\n // Both arguments are vectors. We can determine which\n // components to take from which vector and use the\n // OpVectorShuffle instruction.\n std::array components;\n uint32_t srcComponentId = dstValue.type.ccount;\n \n for (uint32_t i = 0; i < dstValue.type.ccount; i++)\n components.at(i) = srcMask[i] ? srcComponentId++ : i;\n \n result.id = m_module.opVectorShuffle(\n typeId, dstValue.id, srcValue.id,\n dstValue.type.ccount, components.data());\n }\n \n return result;\n }\n DxbcRegisterValue emitRegisterConcat(\n DxbcRegisterValue value1,\n DxbcRegisterValue value2) {\n std::array ids =\n {{ value1.id, value2.id }};\n \n DxbcRegisterValue result;\n result.type.ctype = value1.type.ctype;\n result.type.ccount = value1.type.ccount + value2.type.ccount;\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n ids.size(), ids.data());\n return result;\n }\n DxbcRegisterValue emitRegisterExtend(\n DxbcRegisterValue value,\n uint32_t size) {\n if (size == 1)\n return value;\n \n std::array ids = {{\n value.id, value.id,\n value.id, value.id, \n }};\n \n DxbcRegisterValue result;\n result.type.ctype = value.type.ctype;\n result.type.ccount = size;\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n size, ids.data());\n return result;\n }\n DxbcRegisterValue emitRegisterAbsolute(\n DxbcRegisterValue value) {\n const uint32_t typeId = getVectorTypeId(value.type);\n \n switch (value.type.ctype) {\n case DxbcScalarType::Float32: value.id = m_module.opFAbs(typeId, value.id); break;\n case DxbcScalarType::Float64: value.id = m_module.opFAbs(typeId, value.id); break;\n case DxbcScalarType::Sint32: value.id = m_module.opSAbs(typeId, value.id); break;\n case DxbcScalarType::Sint64: value.id = m_module.opSAbs(typeId, value.id); break;\n default: Logger::warn(\"DxbcCompiler: Cannot get absolute value for given type\");\n }\n \n return value;\n }\n DxbcRegisterValue emitRegisterNegate(\n DxbcRegisterValue value) {\n const uint32_t typeId = getVectorTypeId(value.type);\n \n switch (value.type.ctype) {\n case DxbcScalarType::Float32: value.id = m_module.opFNegate(typeId, value.id); break;\n case DxbcScalarType::Float64: value.id = m_module.opFNegate(typeId, value.id); break;\n case DxbcScalarType::Sint32: value.id = m_module.opSNegate(typeId, value.id); break;\n case DxbcScalarType::Sint64: value.id = m_module.opSNegate(typeId, value.id); break;\n default: Logger::warn(\"DxbcCompiler: Cannot negate given type\");\n }\n \n return value;\n }\n DxbcRegisterValue emitRegisterZeroTest(\n DxbcRegisterValue value,\n DxbcZeroTest test) {\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Bool;\n result.type.ccount = 1;\n \n const uint32_t zeroId = m_module.constu32(0u);\n const uint32_t typeId = getVectorTypeId(result.type);\n \n result.id = test == DxbcZeroTest::TestZ\n ? m_module.opIEqual (typeId, value.id, zeroId)\n : m_module.opINotEqual(typeId, value.id, zeroId);\n return result;\n }\n DxbcRegisterValue emitRegisterMaskBits(\n DxbcRegisterValue value,\n uint32_t mask) {\n DxbcRegisterValue maskVector = emitBuildConstVecu32(\n mask, mask, mask, mask, DxbcRegMask::firstN(value.type.ccount));\n \n DxbcRegisterValue result;\n result.type = value.type;\n result.id = m_module.opBitwiseAnd(\n getVectorTypeId(result.type),\n value.id, maskVector.id);\n return result;\n }\n DxbcRegisterValue emitSrcOperandModifiers(\n DxbcRegisterValue value,\n DxbcRegModifiers modifiers) {\n if (modifiers.test(DxbcRegModifier::Abs))\n value = emitRegisterAbsolute(value);\n \n if (modifiers.test(DxbcRegModifier::Neg))\n value = emitRegisterNegate(value);\n return value;\n }\n DxbcRegisterValue emitDstOperandModifiers(\n DxbcRegisterValue value,\n DxbcOpModifiers modifiers) {\n const uint32_t typeId = getVectorTypeId(value.type);\n \n if (modifiers.saturate) {\n DxbcRegMask mask;\n DxbcRegisterValue vec0, vec1;\n\n if (value.type.ctype == DxbcScalarType::Float32) {\n mask = DxbcRegMask::firstN(value.type.ccount);\n vec0 = emitBuildConstVecf32(0.0f, 0.0f, 0.0f, 0.0f, mask);\n vec1 = emitBuildConstVecf32(1.0f, 1.0f, 1.0f, 1.0f, mask);\n } else if (value.type.ctype == DxbcScalarType::Float64) {\n mask = DxbcRegMask::firstN(value.type.ccount * 2);\n vec0 = emitBuildConstVecf64(0.0, 0.0, mask);\n vec1 = emitBuildConstVecf64(1.0, 1.0, mask);\n }\n\n if (mask)\n value.id = m_module.opNClamp(typeId, value.id, vec0.id, vec1.id);\n }\n \n return value;\n }\n uint32_t emitExtractSparseTexel(\n uint32_t texelTypeId,\n uint32_t resultId) {\n uint32_t index = 1;\n\n return m_module.opCompositeExtract(\n texelTypeId, resultId, 1, &index);\n }\n void emitStoreSparseFeedback(\n const DxbcRegister& feedbackRegister,\n uint32_t resultId) {\n if (feedbackRegister.type != DxbcOperandType::Null) {\n uint32_t index = 0;\n\n DxbcRegisterValue result;\n result.type = { DxbcScalarType::Uint32, 1 };\n result.id = m_module.opCompositeExtract(\n getScalarTypeId(DxbcScalarType::Uint32),\n resultId, 1, &index);\n\n emitRegisterStore(feedbackRegister, result);\n }\n }\n DxbcRegisterPointer emitArrayAccess(\n DxbcRegisterPointer pointer,\n spv::StorageClass sclass,\n uint32_t index) {\n uint32_t ptrTypeId = m_module.defPointerType(\n getVectorTypeId(pointer.type), sclass);\n \n DxbcRegisterPointer result;\n result.type = pointer.type;\n result.id = m_module.opAccessChain(\n ptrTypeId, pointer.id, 1, &index);\n return result;\n }\n uint32_t emitLoadSampledImage(\n const DxbcShaderResource& textureResource,\n const DxbcSampler& samplerResource,\n bool isDepthCompare) {\n uint32_t baseId = isDepthCompare\n ? textureResource.depthTypeId\n : textureResource.colorTypeId;\n\n if (!baseId)\n return 0;\n\n uint32_t sampledImageType = m_module.defSampledImageType(baseId);\n\n return m_module.opSampledImage(sampledImageType,\n m_module.opLoad(textureResource.imageTypeId, textureResource.varId),\n m_module.opLoad(samplerResource.typeId, samplerResource.varId));\n }\n DxbcRegisterPointer emitGetTempPtr(\n const DxbcRegister& operand) {\n // r# regs are indexed as follows:\n // (0) register index (immediate)\n uint32_t regIdx = operand.idx[0].offset;\n\n if (regIdx >= m_rRegs.size())\n m_rRegs.resize(regIdx + 1, 0u);\n\n if (!m_rRegs.at(regIdx)) {\n DxbcRegisterInfo info;\n info.type.ctype = DxbcScalarType::Float32;\n info.type.ccount = 4;\n info.type.alength = 0;\n info.sclass = spv::StorageClassPrivate;\n\n uint32_t varId = emitNewVariable(info);\n m_rRegs.at(regIdx) = varId;\n\n m_module.setDebugName(varId,\n str::format(\"r\", regIdx).c_str());\n }\n\n DxbcRegisterPointer result;\n result.type.ctype = DxbcScalarType::Float32;\n result.type.ccount = 4;\n result.id = m_rRegs.at(regIdx);\n return result;\n }\n DxbcRegisterPointer emitGetIndexableTempPtr(\n const DxbcRegister& operand) {\n return getIndexableTempPtr(operand, emitIndexLoad(operand.idx[1]));\n }\n DxbcRegisterPointer emitGetInputPtr(\n const DxbcRegister& operand) {\n // In the vertex and pixel stages,\n // v# regs are indexed as follows:\n // (0) register index (relative)\n // \n // In the tessellation and geometry\n // stages, the index has two dimensions:\n // (0) vertex index (relative)\n // (1) register index (relative)\n DxbcRegisterPointer result;\n result.type.ctype = DxbcScalarType::Float32;\n result.type.ccount = 4;\n \n std::array indices = {{ 0, 0 }};\n \n for (uint32_t i = 0; i < operand.idxDim; i++)\n indices.at(i) = emitIndexLoad(operand.idx[i]).id;\n \n // Pick the input array depending on\n // the program type and operand type\n struct InputArray {\n uint32_t id;\n spv::StorageClass sclass;\n };\n \n const InputArray array = [&] () -> InputArray {\n switch (operand.type) {\n case DxbcOperandType::InputControlPoint:\n return m_programInfo.type() == DxbcProgramType::HullShader\n ? InputArray { m_vArray, spv::StorageClassPrivate }\n : InputArray { m_ds.inputPerVertex, spv::StorageClassInput };\n case DxbcOperandType::InputPatchConstant:\n return m_programInfo.type() == DxbcProgramType::HullShader\n ? InputArray { m_hs.outputPerPatch, spv::StorageClassPrivate }\n : InputArray { m_ds.inputPerPatch, spv::StorageClassInput };\n case DxbcOperandType::OutputControlPoint:\n return InputArray { m_hs.outputPerVertex, spv::StorageClassOutput };\n default:\n return { m_vArray, spv::StorageClassPrivate };\n }\n }();\n \n DxbcRegisterInfo info;\n info.type.ctype = result.type.ctype;\n info.type.ccount = result.type.ccount;\n info.type.alength = 0;\n info.sclass = array.sclass;\n \n result.id = m_module.opAccessChain(\n getPointerTypeId(info), array.id,\n operand.idxDim, indices.data());\n \n return result;\n }\n DxbcRegisterPointer emitGetOutputPtr(\n const DxbcRegister& operand) {\n if (m_programInfo.type() == DxbcProgramType::HullShader) {\n // Hull shaders are special in that they have two sets of\n // output registers, one for per-patch values and one for\n // per-vertex values.\n DxbcRegisterPointer result;\n result.type.ctype = DxbcScalarType::Float32;\n result.type.ccount = 4;\n \n uint32_t registerId = emitIndexLoad(operand.idx[0]).id;\n\n if (m_hs.currPhaseType == DxbcCompilerHsPhase::ControlPoint) {\n std::array indices = {{\n m_module.opLoad(m_module.defIntType(32, 0), m_hs.builtinInvocationId),\n registerId,\n }};\n \n uint32_t ptrTypeId = m_module.defPointerType(\n getVectorTypeId(result.type),\n spv::StorageClassOutput);\n \n result.id = m_module.opAccessChain(\n ptrTypeId, m_hs.outputPerVertex,\n indices.size(), indices.data());\n } else {\n uint32_t ptrTypeId = m_module.defPointerType(\n getVectorTypeId(result.type),\n spv::StorageClassPrivate);\n \n result.id = m_module.opAccessChain(\n ptrTypeId, m_hs.outputPerPatch,\n 1, ®isterId);\n }\n\n return result;\n } else {\n // Regular shaders have their output\n // registers set up at declaration time\n return m_oRegs.at(operand.idx[0].offset);\n }\n }\n DxbcRegisterPointer emitGetConstBufPtr(\n const DxbcRegister& operand);\n DxbcRegisterPointer emitGetImmConstBufPtr(\n const DxbcRegister& operand) {\n DxbcRegisterValue constId = emitIndexLoad(operand.idx[0]);\n\n if (m_icbArray) {\n // We pad the icb array with an extra zero vector, so we can\n // clamp the index and get correct robustness behaviour.\n constId.id = m_module.opUMin(getVectorTypeId(constId.type),\n constId.id, m_module.constu32(m_icbSize));\n\n DxbcRegisterInfo ptrInfo;\n ptrInfo.type.ctype = DxbcScalarType::Uint32;\n ptrInfo.type.ccount = m_icbComponents;\n ptrInfo.type.alength = 0;\n ptrInfo.sclass = spv::StorageClassPrivate;\n\n DxbcRegisterPointer result;\n result.type.ctype = ptrInfo.type.ctype;\n result.type.ccount = ptrInfo.type.ccount;\n result.id = m_module.opAccessChain(\n getPointerTypeId(ptrInfo),\n m_icbArray, 1, &constId.id);\n return result;\n } else if (m_constantBuffers.at(Icb_BindingSlotId).varId != 0) {\n const std::array indices =\n {{ m_module.consti32(0), constId.id }};\n \n DxbcRegisterInfo ptrInfo;\n ptrInfo.type.ctype = DxbcScalarType::Float32;\n ptrInfo.type.ccount = m_icbComponents;\n ptrInfo.type.alength = 0;\n ptrInfo.sclass = spv::StorageClassUniform;\n\n DxbcRegisterPointer result;\n result.type.ctype = ptrInfo.type.ctype;\n result.type.ccount = ptrInfo.type.ccount;\n result.id = m_module.opAccessChain(\n getPointerTypeId(ptrInfo),\n m_constantBuffers.at(Icb_BindingSlotId).varId,\n indices.size(), indices.data());\n return result;\n } else {\n throw DxvkError(\"DxbcCompiler: Immediate constant buffer not defined\");\n }\n }\n DxbcRegisterPointer emitGetOperandPtr(\n const DxbcRegister& operand) {\n switch (operand.type) {\n case DxbcOperandType::Temp:\n return emitGetTempPtr(operand);\n \n case DxbcOperandType::IndexableTemp:\n return emitGetIndexableTempPtr(operand);\n \n case DxbcOperandType::Input:\n case DxbcOperandType::InputControlPoint:\n case DxbcOperandType::InputPatchConstant:\n case DxbcOperandType::OutputControlPoint:\n return emitGetInputPtr(operand);\n \n case DxbcOperandType::Output:\n return emitGetOutputPtr(operand);\n \n case DxbcOperandType::ImmediateConstantBuffer:\n return emitGetImmConstBufPtr(operand);\n\n case DxbcOperandType::InputThreadId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 3 },\n m_cs.builtinGlobalInvocationId };\n \n case DxbcOperandType::InputThreadGroupId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 3 },\n m_cs.builtinWorkgroupId };\n \n case DxbcOperandType::InputThreadIdInGroup:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 3 },\n m_cs.builtinLocalInvocationId };\n \n case DxbcOperandType::InputThreadIndexInGroup:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n m_cs.builtinLocalInvocationIndex };\n \n case DxbcOperandType::InputCoverageMask: {\n const std::array indices\n = {{ m_module.constu32(0) }};\n \n DxbcRegisterPointer result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(result.type),\n spv::StorageClassInput),\n m_ps.builtinSampleMaskIn,\n indices.size(), indices.data());\n return result;\n }\n \n case DxbcOperandType::OutputCoverageMask: {\n const std::array indices\n = {{ m_module.constu32(0) }};\n \n DxbcRegisterPointer result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(result.type),\n spv::StorageClassOutput),\n m_ps.builtinSampleMaskOut,\n indices.size(), indices.data());\n return result;\n }\n \n case DxbcOperandType::OutputDepth:\n case DxbcOperandType::OutputDepthGe:\n case DxbcOperandType::OutputDepthLe:\n return DxbcRegisterPointer {\n { DxbcScalarType::Float32, 1 },\n m_ps.builtinDepth };\n \n case DxbcOperandType::OutputStencilRef:\n return DxbcRegisterPointer {\n { DxbcScalarType::Sint32, 1 },\n m_ps.builtinStencilRef };\n\n case DxbcOperandType::InputPrimitiveId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n m_primitiveIdIn };\n \n case DxbcOperandType::InputDomainPoint:\n return DxbcRegisterPointer {\n { DxbcScalarType::Float32, 3 },\n m_ds.builtinTessCoord };\n \n case DxbcOperandType::OutputControlPointId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n m_hs.builtinInvocationId };\n \n case DxbcOperandType::InputForkInstanceId:\n case DxbcOperandType::InputJoinInstanceId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n getCurrentHsForkJoinPhase()->instanceIdPtr };\n \n case DxbcOperandType::InputGsInstanceId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n m_gs.builtinInvocationId };\n \n case DxbcOperandType::InputInnerCoverage:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n m_ps.builtinInnerCoverageId };\n \n default:\n throw DxvkError(str::format(\n \"DxbcCompiler: Unhandled operand type: \",\n operand.type));\n }\n }\n DxbcRegisterPointer emitGetAtomicPointer(\n const DxbcRegister& operand,\n const DxbcRegister& address) {\n // Query information about the resource itself\n const uint32_t registerId = operand.idx[0].offset;\n const DxbcBufferInfo resourceInfo = getBufferInfo(operand);\n \n // For UAVs and shared memory, different methods\n // of obtaining the final pointer are used.\n bool isTgsm = operand.type == DxbcOperandType::ThreadGroupSharedMemory;\n bool isSsbo = resourceInfo.isSsbo;\n \n // Compute the actual address into the resource\n const DxbcRegisterValue addressValue = [&] {\n switch (resourceInfo.type) {\n case DxbcResourceType::Raw:\n return emitCalcBufferIndexRaw(emitRegisterLoad(\n address, DxbcRegMask(true, false, false, false)));\n \n case DxbcResourceType::Structured: {\n const DxbcRegisterValue addressComponents = emitRegisterLoad(\n address, DxbcRegMask(true, true, false, false));\n \n return emitCalcBufferIndexStructured(\n emitRegisterExtract(addressComponents, DxbcRegMask(true, false, false, false)),\n emitRegisterExtract(addressComponents, DxbcRegMask(false, true, false, false)),\n resourceInfo.stride);\n };\n \n case DxbcResourceType::Typed: {\n if (isTgsm)\n throw DxvkError(\"DxbcCompiler: TGSM cannot be typed\");\n \n return emitLoadTexCoord(address,\n m_uavs.at(registerId).imageInfo);\n }\n \n default:\n throw DxvkError(\"DxbcCompiler: Unhandled resource type\");\n }\n }();\n \n // Compute the actual pointer\n DxbcRegisterPointer result;\n result.type.ctype = resourceInfo.stype;\n result.type.ccount = 1;\n\n if (isTgsm) {\n result.id = m_module.opAccessChain(resourceInfo.typeId,\n resourceInfo.varId, 1, &addressValue.id);\n } else if (isSsbo) {\n uint32_t indices[2] = { m_module.constu32(0), addressValue.id };\n result.id = m_module.opAccessChain(resourceInfo.typeId,\n resourceInfo.varId, 2, indices);\n } else {\n result.id = m_module.opImageTexelPointer(\n m_module.defPointerType(getVectorTypeId(result.type), spv::StorageClassImage),\n resourceInfo.varId, addressValue.id, m_module.constu32(0));\n }\n\n return result;\n }\n DxbcRegisterValue emitQueryBufferSize(\n const DxbcRegister& resource) {\n const DxbcBufferInfo bufferInfo = getBufferInfo(resource);\n\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opArrayLength(\n getVectorTypeId(result.type),\n bufferInfo.varId, 0);\n\n return result;\n }\n DxbcRegisterValue emitQueryTexelBufferSize(\n const DxbcRegister& resource) {\n // Load the texel buffer object. This cannot be used with\n // constant buffers or any other type of resource.\n const DxbcBufferInfo bufferInfo = getBufferInfo(resource);\n \n const uint32_t bufferId = m_module.opLoad(\n bufferInfo.typeId, bufferInfo.varId);\n \n // We'll store this as a scalar unsigned integer\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opImageQuerySize(\n getVectorTypeId(result.type), bufferId);\n\n return result;\n }\n DxbcRegisterValue emitQueryTextureLods(\n const DxbcRegister& resource) {\n const DxbcBufferInfo info = getBufferInfo(resource);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n \n if (info.image.ms == 0 && info.image.sampled == 1) {\n result.id = m_module.opImageQueryLevels(\n getVectorTypeId(result.type),\n m_module.opLoad(info.typeId, info.varId));\n } else {\n // Report one LOD in case of UAVs or multisampled images\n result.id = m_module.constu32(1);\n }\n\n return result;\n }\n DxbcRegisterValue emitQueryTextureSamples(\n const DxbcRegister& resource) {\n if (resource.type == DxbcOperandType::Rasterizer) {\n // SPIR-V has no gl_NumSamples equivalent, so we\n // have to work around it using a push constant\n if (!m_ps.pushConstantId)\n m_ps.pushConstantId = emitPushConstants();\n\n uint32_t uintTypeId = m_module.defIntType(32, 0);\n uint32_t ptrTypeId = m_module.defPointerType(uintTypeId, spv::StorageClassPushConstant);\n uint32_t index = m_module.constu32(0);\n\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opLoad(uintTypeId,\n m_module.opAccessChain(ptrTypeId, m_ps.pushConstantId, 1, &index));\n return result;\n } else {\n DxbcBufferInfo info = getBufferInfo(resource);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n\n if (info.image.ms) {\n result.id = m_module.opImageQuerySamples(\n getVectorTypeId(result.type),\n m_module.opLoad(info.typeId, info.varId));\n } else {\n // OpImageQuerySamples requires MSAA images\n result.id = m_module.constu32(1);\n }\n \n return result;\n }\n }\n DxbcRegisterValue emitQueryTextureSize(\n const DxbcRegister& resource,\n DxbcRegisterValue lod) {\n const DxbcBufferInfo info = getBufferInfo(resource);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = getTexSizeDim(info.image);\n \n if (info.image.ms == 0 && info.image.sampled == 1) {\n result.id = m_module.opImageQuerySizeLod(\n getVectorTypeId(result.type),\n m_module.opLoad(info.typeId, info.varId),\n lod.id);\n } else {\n result.id = m_module.opImageQuerySize(\n getVectorTypeId(result.type),\n m_module.opLoad(info.typeId, info.varId));\n }\n\n return result;\n }\n DxbcRegisterValue emitCalcBufferIndexStructured(\n DxbcRegisterValue structId,\n DxbcRegisterValue structOffset,\n uint32_t structStride) {\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Sint32;\n result.type.ccount = 1;\n \n uint32_t typeId = getVectorTypeId(result.type);\n uint32_t offset = m_module.opShiftRightLogical(typeId, structOffset.id, m_module.consti32(2));\n \n result.id = m_module.opIAdd(typeId,\n m_module.opIMul(typeId, structId.id, m_module.consti32(structStride / 4)),\n offset);\n return result;\n }\n DxbcRegisterValue emitCalcBufferIndexRaw(\n DxbcRegisterValue byteOffset) {\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Sint32;\n result.type.ccount = 1;\n \n uint32_t typeId = getVectorTypeId(result.type);\n result.id = m_module.opShiftRightLogical(typeId, byteOffset.id, m_module.consti32(2));\n return result;\n }\n DxbcRegisterValue emitCalcTexCoord(\n DxbcRegisterValue coordVector,\n const DxbcImageInfo& imageInfo) {\n const uint32_t dim = getTexCoordDim(imageInfo);\n \n if (dim != coordVector.type.ccount) {\n coordVector = emitRegisterExtract(\n coordVector, DxbcRegMask::firstN(dim)); \n }\n \n return coordVector;\n }\n DxbcRegisterValue emitLoadTexCoord(\n const DxbcRegister& coordReg,\n const DxbcImageInfo& imageInfo) {\n return emitCalcTexCoord(emitRegisterLoad(coordReg,\n DxbcRegMask(true, true, true, true)), imageInfo);\n }\n DxbcRegisterValue emitIndexLoad(\n DxbcRegIndex index) {\n if (index.relReg != nullptr) {\n DxbcRegisterValue result = emitRegisterLoad(\n *index.relReg, DxbcRegMask(true, false, false, false));\n \n if (index.offset != 0) {\n result.id = m_module.opIAdd(\n getVectorTypeId(result.type), result.id,\n m_module.consti32(index.offset));\n }\n \n return result;\n } else {\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Sint32;\n result.type.ccount = 1;\n result.id = m_module.consti32(index.offset);\n return result;\n }\n }\n DxbcRegisterValue emitValueLoad(\n DxbcRegisterPointer ptr) {\n DxbcRegisterValue result;\n result.type = ptr.type;\n result.id = m_module.opLoad(\n getVectorTypeId(result.type),\n ptr.id);\n return result;\n }\n void emitValueStore(\n DxbcRegisterPointer ptr,\n DxbcRegisterValue value,\n DxbcRegMask writeMask) {\n // If the component types are not compatible,\n // we need to bit-cast the source variable.\n if (value.type.ctype != ptr.type.ctype)\n value = emitRegisterBitcast(value, ptr.type.ctype);\n \n // If the source value consists of only one component,\n // it is stored in all components of the destination.\n if (value.type.ccount == 1)\n value = emitRegisterExtend(value, writeMask.popCount());\n \n if (ptr.type.ccount == writeMask.popCount()) {\n // Simple case: We write to the entire register\n m_module.opStore(ptr.id, value.id);\n } else {\n // We only write to part of the destination\n // register, so we need to load and modify it\n DxbcRegisterValue tmp = emitValueLoad(ptr);\n tmp = emitRegisterInsert(tmp, value, writeMask);\n \n m_module.opStore(ptr.id, tmp.id);\n }\n }\n DxbcRegisterValue emitRegisterLoadRaw(\n const DxbcRegister& reg) {\n // Try to find index range for the given register\n const DxbcIndexRange* indexRange = nullptr;\n\n if (reg.idxDim && reg.idx[reg.idxDim - 1u].relReg) {\n uint32_t offset = reg.idx[reg.idxDim - 1u].offset;\n\n for (const auto& range : m_indexRanges) {\n if (reg.type == range.type && offset >= range.start && offset < range.start + range.length)\n indexRange = ⦥\n }\n }\n\n if (reg.type == DxbcOperandType::IndexableTemp || indexRange) {\n bool doBoundsCheck = reg.idx[reg.idxDim - 1u].relReg != nullptr;\n\n if (doBoundsCheck) {\n DxbcRegisterValue indexId = emitIndexLoad(reg.idx[reg.idxDim - 1u]);\n uint32_t boundsCheck = 0u;\n\n if (reg.type == DxbcOperandType::IndexableTemp) {\n boundsCheck = m_module.opULessThan(\n m_module.defBoolType(), indexId.id,\n m_module.constu32(m_xRegs.at(reg.idx[0].offset).alength));\n } else {\n uint32_t adjustedId = m_module.opISub(getVectorTypeId(indexId.type),\n indexId.id, m_module.consti32(indexRange->start));\n\n boundsCheck = m_module.opULessThan(\n m_module.defBoolType(), adjustedId,\n m_module.constu32(indexRange->length));\n }\n\n // Kind of ugly to have an empty else block here but there's no\n // way for us to know the current block ID for the phi below\n DxbcConditional cond;\n cond.labelIf = m_module.allocateId();\n cond.labelElse = m_module.allocateId();\n cond.labelEnd = m_module.allocateId();\n\n m_module.opSelectionMerge(cond.labelEnd, spv::SelectionControlMaskNone);\n m_module.opBranchConditional(boundsCheck, cond.labelIf, cond.labelElse);\n\n m_module.opLabel(cond.labelIf);\n\n DxbcRegisterValue returnValue = emitValueLoad(emitGetOperandPtr(reg));\n\n m_module.opBranch(cond.labelEnd);\n m_module.opLabel (cond.labelElse);\n\n DxbcRegisterValue zeroValue = emitBuildZeroVector(returnValue.type);\n\n m_module.opBranch(cond.labelEnd);\n m_module.opLabel (cond.labelEnd);\n\n std::array phiLabels = {{\n { returnValue.id, cond.labelIf },\n { zeroValue.id, cond.labelElse },\n }};\n\n returnValue.id = m_module.opPhi(\n getVectorTypeId(returnValue.type),\n phiLabels.size(), phiLabels.data());\n return returnValue;\n }\n }\n\n DxbcRegisterValue value = emitValueLoad(emitGetOperandPtr(reg));\n\n // Pad icb values to a vec4 since the app may access components that are always 0\n if (reg.type == DxbcOperandType::ImmediateConstantBuffer && value.type.ccount < 4u) {\n DxbcVectorType zeroType;\n zeroType.ctype = value.type.ctype;\n zeroType.ccount = 4u - value.type.ccount;\n\n uint32_t zeroVector = emitBuildZeroVector(zeroType).id;\n\n std::array constituents = { value.id, zeroVector };\n\n value.type.ccount = 4u;\n value.id = m_module.opCompositeConstruct(getVectorTypeId(value.type),\n constituents.size(), constituents.data());\n }\n\n return value;\n }\n DxbcRegisterValue emitConstantBufferLoad(\n const DxbcRegister& reg,\n DxbcRegMask writeMask) {\n // Constant buffers take a two-dimensional index:\n // (0) register index (immediate)\n // (1) constant offset (relative)\n DxbcRegisterInfo info;\n info.type.ctype = DxbcScalarType::Float32;\n info.type.ccount = 4;\n info.type.alength = 0;\n info.sclass = spv::StorageClassUniform;\n \n uint32_t regId = reg.idx[0].offset;\n DxbcRegisterValue constId = emitIndexLoad(reg.idx[1]);\n \n uint32_t ptrTypeId = getPointerTypeId(info);\n \n const std::array indices =\n {{ m_module.consti32(0), constId.id }};\n \n DxbcRegisterPointer ptr;\n ptr.type.ctype = info.type.ctype;\n ptr.type.ccount = info.type.ccount;\n ptr.id = m_module.opAccessChain(ptrTypeId,\n m_constantBuffers.at(regId).varId,\n indices.size(), indices.data());\n\n // Load individual components from buffer\n std::array ccomps = { 0, 0, 0, 0 };\n std::array scomps = { 0, 0, 0, 0 };\n uint32_t scount = 0;\n\n for (uint32_t i = 0; i < 4; i++) {\n uint32_t sindex = reg.swizzle[i];\n\n if (!writeMask[i] || ccomps[sindex])\n continue;\n \n uint32_t componentId = m_module.constu32(sindex);\n uint32_t componentPtr = m_module.opAccessChain(\n m_module.defPointerType(\n getScalarTypeId(DxbcScalarType::Float32),\n spv::StorageClassUniform),\n ptr.id, 1, &componentId);\n \n ccomps[sindex] = m_module.opLoad(\n getScalarTypeId(DxbcScalarType::Float32),\n componentPtr);\n }\n\n for (uint32_t i = 0; i < 4; i++) {\n uint32_t sindex = reg.swizzle[i];\n \n if (writeMask[i])\n scomps[scount++] = ccomps[sindex];\n }\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Float32;\n result.type.ccount = scount;\n result.id = scomps[0];\n \n if (scount > 1) {\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n scount, scomps.data());\n }\n\n // Apply any post-processing that might be necessary\n result = emitRegisterBitcast(result, reg.dataType);\n result = emitSrcOperandModifiers(result, reg.modifiers);\n return result;\n }\n DxbcRegisterValue emitRegisterLoad(\n const DxbcRegister& reg,\n DxbcRegMask writeMask) {\n if (reg.type == DxbcOperandType::Imm32\n || reg.type == DxbcOperandType::Imm64) {\n DxbcRegisterValue result;\n \n if (reg.componentCount == DxbcComponentCount::Component1) {\n // Create one single u32 constant\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.constu32(reg.imm.u32_1);\n\n result = emitRegisterExtend(result, writeMask.popCount());\n } else if (reg.componentCount == DxbcComponentCount::Component4) {\n // Create a u32 vector with as many components as needed\n std::array indices = { };\n uint32_t indexId = 0;\n \n for (uint32_t i = 0; i < indices.size(); i++) {\n if (writeMask[i]) {\n indices.at(indexId++) =\n m_module.constu32(reg.imm.u32_4[i]);\n }\n }\n \n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = writeMask.popCount();\n result.id = indices.at(0);\n \n if (indexId > 1) {\n result.id = m_module.constComposite(\n getVectorTypeId(result.type),\n result.type.ccount, indices.data());\n }\n \n } else {\n // Something went horribly wrong in the decoder or the shader is broken\n throw DxvkError(\"DxbcCompiler: Invalid component count for immediate operand\");\n }\n \n // Cast constants to the requested type\n return emitRegisterBitcast(result, reg.dataType);\n } else if (reg.type == DxbcOperandType::ConstantBuffer) {\n return emitConstantBufferLoad(reg, writeMask);\n } else {\n // Load operand from the operand pointer\n DxbcRegisterValue result = emitRegisterLoadRaw(reg);\n \n // Apply operand swizzle to the operand value\n result = emitRegisterSwizzle(result, reg.swizzle, writeMask);\n \n // Cast it to the requested type. We need to do\n // this after the swizzling for 64-bit types.\n result = emitRegisterBitcast(result, reg.dataType);\n \n // Apply operand modifiers\n result = emitSrcOperandModifiers(result, reg.modifiers);\n return result;\n }\n }\n void emitRegisterStore(\n const DxbcRegister& reg,\n DxbcRegisterValue value) {\n if (reg.type == DxbcOperandType::IndexableTemp) {\n bool doBoundsCheck = reg.idx[1].relReg != nullptr;\n DxbcRegisterValue vectorId = emitIndexLoad(reg.idx[1]);\n\n if (doBoundsCheck) {\n uint32_t boundsCheck = m_module.opULessThan(\n m_module.defBoolType(), vectorId.id,\n m_module.constu32(m_xRegs.at(reg.idx[0].offset).alength));\n \n DxbcConditional cond;\n cond.labelIf = m_module.allocateId();\n cond.labelEnd = m_module.allocateId();\n \n m_module.opSelectionMerge(cond.labelEnd, spv::SelectionControlMaskNone);\n m_module.opBranchConditional(boundsCheck, cond.labelIf, cond.labelEnd);\n \n m_module.opLabel(cond.labelIf);\n\n emitValueStore(getIndexableTempPtr(reg, vectorId), value, reg.mask);\n\n m_module.opBranch(cond.labelEnd);\n m_module.opLabel (cond.labelEnd);\n } else {\n emitValueStore(getIndexableTempPtr(reg, vectorId), value, reg.mask);\n }\n } else {\n emitValueStore(emitGetOperandPtr(reg), value, reg.mask);\n }\n }\n void emitInputSetup() {\n m_module.setLateConst(m_vArrayLengthId, &m_vArrayLength);\n\n // Copy all defined v# registers into the input array. Note\n // that the outer index of the array is the vertex index.\n const uint32_t vecTypeId = m_module.defVectorType(m_module.defFloatType(32), 4);\n const uint32_t dstPtrTypeId = m_module.defPointerType(vecTypeId, spv::StorageClassPrivate);\n \n for (uint32_t i = 0; i < m_vRegs.size(); i++) {\n if (m_vRegs.at(i).id != 0) {\n const uint32_t registerId = m_module.consti32(i);\n \n for (uint32_t v = 0; v < vertexCount; v++) {\n std::array indices\n = {{ m_module.consti32(v), registerId }};\n \n DxbcRegisterPointer srcPtr;\n srcPtr.type = m_vRegs.at(i).type;\n srcPtr.id = m_module.opAccessChain(\n m_module.defPointerType(getVectorTypeId(srcPtr.type), spv::StorageClassInput),\n m_vRegs.at(i).id, 1, indices.data());\n \n DxbcRegisterValue srcValue = emitRegisterBitcast(\n emitValueLoad(srcPtr), DxbcScalarType::Float32);\n \n DxbcRegisterPointer dstPtr;\n dstPtr.type = { DxbcScalarType::Float32, 4 };\n dstPtr.id = m_module.opAccessChain(\n dstPtrTypeId, m_vArray, 2, indices.data());\n\n emitValueStore(dstPtr, srcValue, DxbcRegMask::firstN(srcValue.type.ccount));\n }\n }\n }\n \n // Copy all system value registers into the array,\n // preserving any previously written contents.\n for (const DxbcSvMapping& map : m_vMappings) {\n const uint32_t registerId = m_module.consti32(map.regId);\n \n for (uint32_t v = 0; v < vertexCount; v++) {\n const DxbcRegisterValue value = [&] {\n switch (m_programInfo.type()) {\n case DxbcProgramType::GeometryShader: return emitGsSystemValueLoad(map.sv, map.regMask, v);\n default: throw DxvkError(str::format(\"DxbcCompiler: Unexpected stage: \", m_programInfo.type()));\n }\n }();\n \n std::array indices = {\n m_module.consti32(v), registerId,\n };\n \n DxbcRegisterPointer inputReg;\n inputReg.type.ctype = DxbcScalarType::Float32;\n inputReg.type.ccount = 4;\n inputReg.id = m_module.opAccessChain(dstPtrTypeId,\n m_vArray, indices.size(), indices.data());\n emitValueStore(inputReg, value, map.regMask);\n }\n }\n }\n void emitInputSetup(uint32_t vertexCount) {\n m_module.setLateConst(m_vArrayLengthId, &m_vArrayLength);\n\n // Copy all defined v# registers into the input array. Note\n // that the outer index of the array is the vertex index.\n const uint32_t vecTypeId = m_module.defVectorType(m_module.defFloatType(32), 4);\n const uint32_t dstPtrTypeId = m_module.defPointerType(vecTypeId, spv::StorageClassPrivate);\n \n for (uint32_t i = 0; i < m_vRegs.size(); i++) {\n if (m_vRegs.at(i).id != 0) {\n const uint32_t registerId = m_module.consti32(i);\n \n for (uint32_t v = 0; v < vertexCount; v++) {\n std::array indices\n = {{ m_module.consti32(v), registerId }};\n \n DxbcRegisterPointer srcPtr;\n srcPtr.type = m_vRegs.at(i).type;\n srcPtr.id = m_module.opAccessChain(\n m_module.defPointerType(getVectorTypeId(srcPtr.type), spv::StorageClassInput),\n m_vRegs.at(i).id, 1, indices.data());\n \n DxbcRegisterValue srcValue = emitRegisterBitcast(\n emitValueLoad(srcPtr), DxbcScalarType::Float32);\n \n DxbcRegisterPointer dstPtr;\n dstPtr.type = { DxbcScalarType::Float32, 4 };\n dstPtr.id = m_module.opAccessChain(\n dstPtrTypeId, m_vArray, 2, indices.data());\n\n emitValueStore(dstPtr, srcValue, DxbcRegMask::firstN(srcValue.type.ccount));\n }\n }\n }\n \n // Copy all system value registers into the array,\n // preserving any previously written contents.\n for (const DxbcSvMapping& map : m_vMappings) {\n const uint32_t registerId = m_module.consti32(map.regId);\n \n for (uint32_t v = 0; v < vertexCount; v++) {\n const DxbcRegisterValue value = [&] {\n switch (m_programInfo.type()) {\n case DxbcProgramType::GeometryShader: return emitGsSystemValueLoad(map.sv, map.regMask, v);\n default: throw DxvkError(str::format(\"DxbcCompiler: Unexpected stage: \", m_programInfo.type()));\n }\n }();\n \n std::array indices = {\n m_module.consti32(v), registerId,\n };\n \n DxbcRegisterPointer inputReg;\n inputReg.type.ctype = DxbcScalarType::Float32;\n inputReg.type.ccount = 4;\n inputReg.id = m_module.opAccessChain(dstPtrTypeId,\n m_vArray, indices.size(), indices.data());\n emitValueStore(inputReg, value, map.regMask);\n }\n }\n }\n void emitOutputSetup() {\n for (const DxbcSvMapping& svMapping : m_oMappings) {\n DxbcRegisterPointer outputReg = m_oRegs.at(svMapping.regId);\n \n if (m_programInfo.type() == DxbcProgramType::HullShader) {\n uint32_t registerIndex = m_module.constu32(svMapping.regId);\n \n outputReg.type = { DxbcScalarType::Float32, 4 };\n outputReg.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(outputReg.type),\n spv::StorageClassPrivate),\n m_hs.outputPerPatch,\n 1, ®isterIndex);\n }\n \n auto sv = svMapping.sv;\n auto mask = svMapping.regMask;\n auto value = emitValueLoad(outputReg);\n \n switch (m_programInfo.type()) {\n case DxbcProgramType::VertexShader: emitVsSystemValueStore(sv, mask, value); break;\n case DxbcProgramType::GeometryShader: emitGsSystemValueStore(sv, mask, value); break;\n case DxbcProgramType::HullShader: emitHsSystemValueStore(sv, mask, value); break;\n case DxbcProgramType::DomainShader: emitDsSystemValueStore(sv, mask, value); break;\n case DxbcProgramType::PixelShader: emitPsSystemValueStore(sv, mask, value); break;\n default: break;\n }\n }\n }\n void emitOutputDepthClamp() {\n // HACK: Some drivers do not clamp FragDepth to [minDepth..maxDepth]\n // before writing to the depth attachment, but we do not have acccess\n // to those. Clamp to [0..1] instead.\n if (m_ps.builtinDepth) {\n DxbcRegisterPointer ptr;\n ptr.type = { DxbcScalarType::Float32, 1 };\n ptr.id = m_ps.builtinDepth;\n\n DxbcRegisterValue value = emitValueLoad(ptr);\n\n value.id = m_module.opNClamp(\n getVectorTypeId(ptr.type),\n value.id,\n m_module.constf32(0.0f),\n m_module.constf32(1.0f));\n \n emitValueStore(ptr, value,\n DxbcRegMask::firstN(1));\n }\n }\n void emitInitWorkgroupMemory() {\n bool hasTgsm = false;\n\n SpirvMemoryOperands memoryOperands;\n memoryOperands.flags = spv::MemoryAccessNonPrivatePointerMask;\n\n for (uint32_t i = 0; i < m_gRegs.size(); i++) {\n if (!m_gRegs[i].varId)\n continue;\n \n if (!m_cs.builtinLocalInvocationIndex) {\n m_cs.builtinLocalInvocationIndex = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInLocalInvocationIndex,\n \"vThreadIndexInGroup\");\n }\n\n uint32_t intTypeId = getScalarTypeId(DxbcScalarType::Uint32);\n uint32_t ptrTypeId = m_module.defPointerType(\n intTypeId, spv::StorageClassWorkgroup);\n\n uint32_t numElements = m_gRegs[i].type == DxbcResourceType::Structured\n ? m_gRegs[i].elementCount * m_gRegs[i].elementStride / 4\n : m_gRegs[i].elementCount / 4;\n \n uint32_t numThreads = m_cs.workgroupSizeX *\n m_cs.workgroupSizeY * m_cs.workgroupSizeZ;\n \n uint32_t numElementsPerThread = numElements / numThreads;\n uint32_t numElementsRemaining = numElements % numThreads;\n\n uint32_t threadId = m_module.opLoad(\n intTypeId, m_cs.builtinLocalInvocationIndex);\n uint32_t zeroId = m_module.constu32(0);\n\n for (uint32_t e = 0; e < numElementsPerThread; e++) {\n uint32_t ofsId = m_module.opIAdd(intTypeId, threadId,\n m_module.constu32(numThreads * e));\n \n uint32_t ptrId = m_module.opAccessChain(\n ptrTypeId, m_gRegs[i].varId, 1, &ofsId);\n\n m_module.opStore(ptrId, zeroId, memoryOperands);\n }\n\n if (numElementsRemaining) {\n uint32_t condition = m_module.opULessThan(\n m_module.defBoolType(), threadId,\n m_module.constu32(numElementsRemaining));\n \n DxbcConditional cond;\n cond.labelIf = m_module.allocateId();\n cond.labelEnd = m_module.allocateId();\n\n m_module.opSelectionMerge(cond.labelEnd, spv::SelectionControlMaskNone);\n m_module.opBranchConditional(condition, cond.labelIf, cond.labelEnd);\n\n m_module.opLabel(cond.labelIf);\n\n uint32_t ofsId = m_module.opIAdd(intTypeId, threadId,\n m_module.constu32(numThreads * numElementsPerThread));\n \n uint32_t ptrId = m_module.opAccessChain(\n ptrTypeId, m_gRegs[i].varId, 1, &ofsId);\n \n m_module.opStore(ptrId, zeroId, memoryOperands);\n\n m_module.opBranch(cond.labelEnd);\n m_module.opLabel (cond.labelEnd);\n }\n\n hasTgsm = true;\n }\n\n if (hasTgsm) {\n m_module.opControlBarrier(\n m_module.constu32(spv::ScopeWorkgroup),\n m_module.constu32(spv::ScopeWorkgroup),\n m_module.constu32(spv::MemorySemanticsWorkgroupMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask\n | spv::MemorySemanticsMakeAvailableMask\n | spv::MemorySemanticsMakeVisibleMask));\n }\n }\n DxbcRegisterValue emitVsSystemValueLoad(\n DxbcSystemValue sv,\n DxbcRegMask mask) {\n switch (sv) {\n case DxbcSystemValue::VertexId: {\n const uint32_t typeId = getScalarTypeId(DxbcScalarType::Uint32);\n \n if (m_vs.builtinVertexId == 0) {\n m_vs.builtinVertexId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInVertexIndex,\n \"vs_vertex_index\");\n }\n \n if (m_vs.builtinBaseVertex == 0) {\n m_vs.builtinBaseVertex = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInBaseVertex,\n \"vs_base_vertex\");\n }\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opISub(typeId,\n m_module.opLoad(typeId, m_vs.builtinVertexId),\n m_module.opLoad(typeId, m_vs.builtinBaseVertex));\n return result;\n } break;\n \n case DxbcSystemValue::InstanceId: {\n const uint32_t typeId = getScalarTypeId(DxbcScalarType::Uint32);\n \n if (m_vs.builtinInstanceId == 0) {\n m_vs.builtinInstanceId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInInstanceIndex,\n \"vs_instance_index\");\n }\n \n if (m_vs.builtinBaseInstance == 0) {\n m_vs.builtinBaseInstance = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInBaseInstance,\n \"vs_base_instance\");\n }\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opISub(typeId,\n m_module.opLoad(typeId, m_vs.builtinInstanceId),\n m_module.opLoad(typeId, m_vs.builtinBaseInstance));\n return result;\n } break;\n \n default:\n throw DxvkError(str::format(\n \"DxbcCompiler: Unhandled VS SV input: \", sv));\n }\n }\n DxbcRegisterValue emitGsSystemValueLoad(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n uint32_t vertexId) {\n switch (sv) {\n case DxbcSystemValue::Position: {\n uint32_t arrayIndex = m_module.consti32(vertexId);\n\n if (!m_positionIn) {\n m_positionIn = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 4, primitiveVertexCount(m_gs.inputPrimitive) },\n spv::StorageClassInput },\n spv::BuiltInPosition,\n \"in_position\");\n }\n\n DxbcRegisterPointer ptrIn;\n ptrIn.type.ctype = DxbcScalarType::Float32;\n ptrIn.type.ccount = 4;\n ptrIn.id = m_module.opAccessChain(\n m_module.defPointerType(getVectorTypeId(ptrIn.type), spv::StorageClassInput),\n m_positionIn, 1, &arrayIndex);\n \n return emitRegisterExtract(emitValueLoad(ptrIn), mask);\n } break;\n \n default:\n throw DxvkError(str::format(\n \"DxbcCompiler: Unhandled GS SV input: \", sv));\n }\n }\n DxbcRegisterValue emitPsSystemValueLoad(\n DxbcSystemValue sv,\n DxbcRegMask mask) {\n switch (sv) {\n case DxbcSystemValue::Position: {\n if (m_ps.builtinFragCoord == 0) {\n m_ps.builtinFragCoord = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 4, 0 },\n spv::StorageClassInput },\n spv::BuiltInFragCoord,\n \"ps_frag_coord\");\n }\n \n DxbcRegisterPointer ptrIn;\n ptrIn.type = { DxbcScalarType::Float32, 4 };\n ptrIn.id = m_ps.builtinFragCoord;\n \n // The X, Y and Z components of the SV_POSITION semantic\n // are identical to Vulkan's FragCoord builtin, but we\n // need to compute the reciprocal of the W component.\n DxbcRegisterValue fragCoord = emitValueLoad(ptrIn);\n \n uint32_t componentIndex = 3;\n uint32_t t_f32 = m_module.defFloatType(32);\n uint32_t v_wComp = m_module.opCompositeExtract(t_f32, fragCoord.id, 1, &componentIndex);\n v_wComp = m_module.opFDiv(t_f32, m_module.constf32(1.0f), v_wComp);\n \n fragCoord.id = m_module.opCompositeInsert(\n getVectorTypeId(fragCoord.type),\n v_wComp, fragCoord.id,\n 1, &componentIndex);\n \n return emitRegisterExtract(fragCoord, mask);\n } break;\n \n case DxbcSystemValue::IsFrontFace: {\n if (m_ps.builtinIsFrontFace == 0) {\n m_ps.builtinIsFrontFace = emitNewBuiltinVariable({\n { DxbcScalarType::Bool, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInFrontFacing,\n \"ps_is_front_face\");\n }\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opSelect(\n getVectorTypeId(result.type),\n m_module.opLoad(\n m_module.defBoolType(),\n m_ps.builtinIsFrontFace),\n m_module.constu32(0xFFFFFFFF),\n m_module.constu32(0x00000000));\n return result;\n } break;\n \n case DxbcSystemValue::PrimitiveId: {\n if (m_primitiveIdIn == 0) {\n m_module.enableCapability(spv::CapabilityGeometry);\n \n m_primitiveIdIn = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInPrimitiveId,\n \"ps_primitive_id\");\n }\n \n DxbcRegisterPointer ptrIn;\n ptrIn.type = { DxbcScalarType::Uint32, 1 };\n ptrIn.id = m_primitiveIdIn;\n \n return emitValueLoad(ptrIn);\n } break;\n \n case DxbcSystemValue::SampleIndex: {\n if (m_ps.builtinSampleId == 0) {\n m_module.enableCapability(spv::CapabilitySampleRateShading);\n \n m_ps.builtinSampleId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInSampleId,\n \"ps_sample_id\");\n }\n \n DxbcRegisterPointer ptrIn;\n ptrIn.type.ctype = DxbcScalarType::Uint32;\n ptrIn.type.ccount = 1;\n ptrIn.id = m_ps.builtinSampleId;\n \n return emitValueLoad(ptrIn);\n } break;\n \n case DxbcSystemValue::RenderTargetId: {\n if (m_ps.builtinLayer == 0) {\n m_module.enableCapability(spv::CapabilityGeometry);\n \n m_ps.builtinLayer = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInLayer,\n \"v_layer\");\n }\n \n DxbcRegisterPointer ptr;\n ptr.type.ctype = DxbcScalarType::Uint32;\n ptr.type.ccount = 1;\n ptr.id = m_ps.builtinLayer;\n \n return emitValueLoad(ptr);\n } break;\n \n case DxbcSystemValue::ViewportId: {\n if (m_ps.builtinViewportId == 0) {\n m_module.enableCapability(spv::CapabilityMultiViewport);\n \n m_ps.builtinViewportId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInViewportIndex,\n \"v_viewport\");\n }\n \n DxbcRegisterPointer ptr;\n ptr.type.ctype = DxbcScalarType::Uint32;\n ptr.type.ccount = 1;\n ptr.id = m_ps.builtinViewportId;\n \n return emitValueLoad(ptr);\n } break;\n \n default:\n throw DxvkError(str::format(\n \"DxbcCompiler: Unhandled PS SV input: \", sv));\n }\n }\n void emitVsSystemValueStore(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n const DxbcRegisterValue& value) {\n switch (sv) {\n case DxbcSystemValue::Position: {\n if (!m_positionOut) {\n m_positionOut = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 4, 0 },\n spv::StorageClassOutput },\n spv::BuiltInPosition,\n \"out_position\");\n }\n\n DxbcRegisterPointer ptr;\n ptr.type.ctype = DxbcScalarType::Float32;\n ptr.type.ccount = 4;\n ptr.id = m_positionOut;\n \n emitValueStore(ptr, value, mask);\n } break;\n \n case DxbcSystemValue::RenderTargetId: {\n if (m_programInfo.type() != DxbcProgramType::GeometryShader)\n m_module.enableCapability(spv::CapabilityShaderLayer);\n\n if (m_gs.builtinLayer == 0) {\n m_module.enableCapability(spv::CapabilityGeometry);\n\n m_gs.builtinLayer = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInLayer,\n \"o_layer\");\n }\n \n DxbcRegisterPointer ptr;\n ptr.type = { DxbcScalarType::Uint32, 1 };\n ptr.id = m_gs.builtinLayer;\n \n emitValueStore(\n ptr, emitRegisterExtract(value, mask),\n DxbcRegMask(true, false, false, false));\n } break;\n \n case DxbcSystemValue::ViewportId: {\n if (m_programInfo.type() != DxbcProgramType::GeometryShader)\n m_module.enableCapability(spv::CapabilityShaderViewportIndex);\n\n if (m_gs.builtinViewportId == 0) {\n m_module.enableCapability(spv::CapabilityMultiViewport);\n \n m_gs.builtinViewportId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInViewportIndex,\n \"o_viewport\");\n }\n \n DxbcRegisterPointer ptr;\n ptr.type = { DxbcScalarType::Uint32, 1};\n ptr.id = m_gs.builtinViewportId;\n \n emitValueStore(\n ptr, emitRegisterExtract(value, mask),\n DxbcRegMask(true, false, false, false));\n } break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled VS SV output: \", sv));\n }\n }\n void emitHsSystemValueStore(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n const DxbcRegisterValue& value) {\n if (sv >= DxbcSystemValue::FinalQuadUeq0EdgeTessFactor\n && sv <= DxbcSystemValue::FinalLineDensityTessFactor) {\n struct TessFactor {\n uint32_t array = 0;\n uint32_t index = 0;\n };\n \n static const std::array s_tessFactors = {{\n { m_hs.builtinTessLevelOuter, 0 }, // FinalQuadUeq0EdgeTessFactor\n { m_hs.builtinTessLevelOuter, 1 }, // FinalQuadVeq0EdgeTessFactor\n { m_hs.builtinTessLevelOuter, 2 }, // FinalQuadUeq1EdgeTessFactor\n { m_hs.builtinTessLevelOuter, 3 }, // FinalQuadVeq1EdgeTessFactor\n { m_hs.builtinTessLevelInner, 0 }, // FinalQuadUInsideTessFactor\n { m_hs.builtinTessLevelInner, 1 }, // FinalQuadVInsideTessFactor\n { m_hs.builtinTessLevelOuter, 0 }, // FinalTriUeq0EdgeTessFactor\n { m_hs.builtinTessLevelOuter, 1 }, // FinalTriVeq0EdgeTessFactor\n { m_hs.builtinTessLevelOuter, 2 }, // FinalTriWeq0EdgeTessFactor\n { m_hs.builtinTessLevelInner, 0 }, // FinalTriInsideTessFactor\n { m_hs.builtinTessLevelOuter, 0 }, // FinalLineDensityTessFactor\n { m_hs.builtinTessLevelOuter, 1 }, // FinalLineDetailTessFactor\n }};\n \n const TessFactor tessFactor = s_tessFactors.at(uint32_t(sv)\n - uint32_t(DxbcSystemValue::FinalQuadUeq0EdgeTessFactor));\n \n const uint32_t tessFactorArrayIndex\n = m_module.constu32(tessFactor.index);\n \n // Apply global tess factor limit\n float maxTessFactor = m_hs.maxTessFactor;\n\n if (m_moduleInfo.tess != nullptr) {\n if (m_moduleInfo.tess->maxTessFactor < maxTessFactor)\n maxTessFactor = m_moduleInfo.tess->maxTessFactor;\n }\n\n DxbcRegisterValue tessValue = emitRegisterExtract(value, mask);\n tessValue.id = m_module.opNClamp(getVectorTypeId(tessValue.type),\n tessValue.id, m_module.constf32(0.0f),\n m_module.constf32(maxTessFactor));\n \n DxbcRegisterPointer ptr;\n ptr.type.ctype = DxbcScalarType::Float32;\n ptr.type.ccount = 1;\n ptr.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(ptr.type),\n spv::StorageClassOutput),\n tessFactor.array, 1,\n &tessFactorArrayIndex);\n \n emitValueStore(ptr, tessValue,\n DxbcRegMask(true, false, false, false));\n } else {\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled HS SV output: \", sv));\n }\n }\n void emitDsSystemValueStore(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n const DxbcRegisterValue& value) {\n switch (sv) {\n case DxbcSystemValue::Position:\n case DxbcSystemValue::CullDistance:\n case DxbcSystemValue::ClipDistance:\n case DxbcSystemValue::RenderTargetId:\n case DxbcSystemValue::ViewportId:\n emitVsSystemValueStore(sv, mask, value);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled DS SV output: \", sv));\n }\n }\n void emitGsSystemValueStore(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n const DxbcRegisterValue& value) {\n switch (sv) {\n case DxbcSystemValue::Position:\n case DxbcSystemValue::CullDistance:\n case DxbcSystemValue::ClipDistance:\n case DxbcSystemValue::RenderTargetId:\n case DxbcSystemValue::ViewportId:\n emitVsSystemValueStore(sv, mask, value);\n break;\n \n case DxbcSystemValue::PrimitiveId: {\n if (m_primitiveIdOut == 0) {\n m_primitiveIdOut = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInPrimitiveId,\n \"gs_primitive_id\");\n }\n \n DxbcRegisterPointer ptr;\n ptr.type = { DxbcScalarType::Uint32, 1};\n ptr.id = m_primitiveIdOut;\n \n emitValueStore(\n ptr, emitRegisterExtract(value, mask),\n DxbcRegMask(true, false, false, false));\n } break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled GS SV output: \", sv));\n }\n }\n void emitPsSystemValueStore(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n const DxbcRegisterValue& value) {\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled PS SV output: \", sv));\n }\n void emitClipCullStore(\n DxbcSystemValue sv,\n uint32_t dstArray) {\n uint32_t offset = 0;\n \n if (dstArray == 0)\n return;\n \n for (auto e = m_osgn->begin(); e != m_osgn->end(); e++) {\n if (e->systemValue == sv) {\n DxbcRegisterPointer srcPtr = m_oRegs.at(e->registerId);\n DxbcRegisterValue srcValue = emitValueLoad(srcPtr);\n \n for (uint32_t i = 0; i < 4; i++) {\n if (e->componentMask[i]) {\n uint32_t offsetId = m_module.consti32(offset++);\n \n DxbcRegisterValue component = emitRegisterExtract(\n srcValue, DxbcRegMask::select(i));\n \n DxbcRegisterPointer dstPtr;\n dstPtr.type = { DxbcScalarType::Float32, 1 };\n dstPtr.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(dstPtr.type),\n spv::StorageClassOutput),\n dstArray, 1, &offsetId);\n \n emitValueStore(dstPtr, component,\n DxbcRegMask(true, false, false, false));\n }\n }\n }\n }\n }\n void emitClipCullLoad(\n DxbcSystemValue sv,\n uint32_t srcArray) {\n uint32_t offset = 0;\n \n if (srcArray == 0)\n return;\n \n for (auto e = m_isgn->begin(); e != m_isgn->end(); e++) {\n if (e->systemValue == sv) {\n // Load individual components from the source array\n uint32_t componentIndex = 0;\n std::array componentIds = {{ 0, 0, 0, 0 }};\n \n for (uint32_t i = 0; i < 4; i++) {\n if (e->componentMask[i]) {\n uint32_t offsetId = m_module.consti32(offset++);\n \n DxbcRegisterPointer srcPtr;\n srcPtr.type = { DxbcScalarType::Float32, 1 };\n srcPtr.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(srcPtr.type),\n spv::StorageClassInput),\n srcArray, 1, &offsetId);\n \n componentIds[componentIndex++]\n = emitValueLoad(srcPtr).id;\n }\n }\n \n // Put everything into one vector\n DxbcRegisterValue dstValue;\n dstValue.type = { DxbcScalarType::Float32, componentIndex };\n dstValue.id = componentIds[0];\n \n if (componentIndex > 1) {\n dstValue.id = m_module.opCompositeConstruct(\n getVectorTypeId(dstValue.type),\n componentIndex, componentIds.data());\n }\n \n // Store vector to the input array\n uint32_t registerId = m_module.consti32(e->registerId);\n \n DxbcRegisterPointer dstInput;\n dstInput.type = { DxbcScalarType::Float32, 4 };\n dstInput.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(dstInput.type),\n spv::StorageClassPrivate),\n m_vArray, 1, ®isterId);\n \n emitValueStore(dstInput, dstValue, e->componentMask);\n }\n }\n }\n void emitPointSizeStore() {\n if (m_moduleInfo.options.needsPointSizeExport) {\n uint32_t pointSizeId = emitNewBuiltinVariable(DxbcRegisterInfo {\n { DxbcScalarType::Float32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInPointSize,\n \"point_size\");\n\n m_module.opStore(pointSizeId, m_module.constf32(1.0f));\n }\n }\n void emitInit() {\n // Set up common capabilities for all shaders\n m_module.enableCapability(spv::CapabilityShader);\n m_module.enableCapability(spv::CapabilityImageQuery);\n \n // Initialize the shader module with capabilities\n // etc. Each shader type has its own peculiarities.\n switch (m_programInfo.type()) {\n case DxbcProgramType::VertexShader: emitVsInit(); break;\n case DxbcProgramType::HullShader: emitHsInit(); break;\n case DxbcProgramType::DomainShader: emitDsInit(); break;\n case DxbcProgramType::GeometryShader: emitGsInit(); break;\n case DxbcProgramType::PixelShader: emitPsInit(); break;\n case DxbcProgramType::ComputeShader: emitCsInit(); break;\n default: throw DxvkError(\"Invalid shader stage\");\n }\n }\n void emitFunctionBegin(\n uint32_t entryPoint,\n uint32_t returnType,\n uint32_t funcType) {\n this->emitFunctionEnd();\n \n m_module.functionBegin(\n returnType, entryPoint, funcType,\n spv::FunctionControlMaskNone);\n \n m_insideFunction = true;\n }\n void emitFunctionEnd() {\n if (m_insideFunction) {\n m_module.opReturn();\n m_module.functionEnd();\n }\n \n m_insideFunction = false;\n }\n void emitFunctionLabel() {\n m_module.opLabel(m_module.allocateId());\n }\n void emitMainFunctionBegin() {\n this->emitFunctionBegin(\n m_entryPointId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitVsInit() {\n m_module.enableCapability(spv::CapabilityClipDistance);\n m_module.enableCapability(spv::CapabilityCullDistance);\n m_module.enableCapability(spv::CapabilityDrawParameters);\n \n // Standard input array\n emitDclInputArray(0);\n \n // Cull/clip distances as outputs\n m_clipDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numClipPlanes,\n spv::BuiltInClipDistance,\n spv::StorageClassOutput);\n \n m_cullDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numCullPlanes,\n spv::BuiltInCullDistance,\n spv::StorageClassOutput);\n \n // Main function of the vertex shader\n m_vs.functionId = m_module.allocateId();\n m_module.setDebugName(m_vs.functionId, \"vs_main\");\n \n this->emitFunctionBegin(\n m_vs.functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitHsInit() {\n m_module.enableCapability(spv::CapabilityTessellation);\n m_module.enableCapability(spv::CapabilityClipDistance);\n m_module.enableCapability(spv::CapabilityCullDistance);\n \n m_hs.builtinInvocationId = emitNewBuiltinVariable(\n DxbcRegisterInfo {\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInInvocationId,\n \"vOutputControlPointId\");\n \n m_hs.builtinTessLevelOuter = emitBuiltinTessLevelOuter(spv::StorageClassOutput);\n m_hs.builtinTessLevelInner = emitBuiltinTessLevelInner(spv::StorageClassOutput);\n }\n void emitDsInit() {\n m_module.enableCapability(spv::CapabilityTessellation);\n m_module.enableCapability(spv::CapabilityClipDistance);\n m_module.enableCapability(spv::CapabilityCullDistance);\n \n m_ds.builtinTessLevelOuter = emitBuiltinTessLevelOuter(spv::StorageClassInput);\n m_ds.builtinTessLevelInner = emitBuiltinTessLevelInner(spv::StorageClassInput);\n \n // Cull/clip distances as outputs\n m_clipDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numClipPlanes,\n spv::BuiltInClipDistance,\n spv::StorageClassOutput);\n \n m_cullDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numCullPlanes,\n spv::BuiltInCullDistance,\n spv::StorageClassOutput);\n \n // Main function of the domain shader\n m_ds.functionId = m_module.allocateId();\n m_module.setDebugName(m_ds.functionId, \"ds_main\");\n \n this->emitFunctionBegin(\n m_ds.functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitGsInit() {\n m_module.enableCapability(spv::CapabilityGeometry);\n m_module.enableCapability(spv::CapabilityClipDistance);\n m_module.enableCapability(spv::CapabilityCullDistance);\n\n // Enable capabilities for xfb mode if necessary\n if (m_moduleInfo.xfb) {\n m_module.enableCapability(spv::CapabilityGeometryStreams);\n m_module.enableCapability(spv::CapabilityTransformFeedback);\n \n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeXfb);\n }\n\n // We only need outputs if rasterization is enabled\n m_gs.needsOutputSetup = !m_moduleInfo.xfb\n || m_moduleInfo.xfb->rasterizedStream >= 0;\n \n // Cull/clip distances as outputs\n m_clipDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numClipPlanes,\n spv::BuiltInClipDistance,\n spv::StorageClassOutput);\n \n m_cullDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numCullPlanes,\n spv::BuiltInCullDistance,\n spv::StorageClassOutput);\n \n // Emit Xfb variables if necessary\n if (m_moduleInfo.xfb)\n emitXfbOutputDeclarations();\n\n // Main function of the vertex shader\n m_gs.functionId = m_module.allocateId();\n m_module.setDebugName(m_gs.functionId, \"gs_main\");\n \n this->emitFunctionBegin(\n m_gs.functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitPsInit() {\n m_module.enableCapability(spv::CapabilityDerivativeControl);\n \n m_module.setExecutionMode(m_entryPointId,\n spv::ExecutionModeOriginUpperLeft);\n \n // Standard input array\n emitDclInputArray(0);\n \n // Cull/clip distances as inputs\n m_clipDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullIn.numClipPlanes,\n spv::BuiltInClipDistance,\n spv::StorageClassInput);\n \n m_cullDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullIn.numCullPlanes,\n spv::BuiltInCullDistance,\n spv::StorageClassInput);\n \n // Main function of the pixel shader\n m_ps.functionId = m_module.allocateId();\n m_module.setDebugName(m_ps.functionId, \"ps_main\");\n \n this->emitFunctionBegin(\n m_ps.functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitCsInit() {\n // Main function of the compute shader\n m_cs.functionId = m_module.allocateId();\n m_module.setDebugName(m_cs.functionId, \"cs_main\");\n \n this->emitFunctionBegin(\n m_cs.functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitVsFinalize() {\n this->emitMainFunctionBegin();\n this->emitInputSetup();\n m_module.opFunctionCall(\n m_module.defVoidType(),\n m_vs.functionId, 0, nullptr);\n this->emitOutputSetup();\n this->emitClipCullStore(DxbcSystemValue::ClipDistance, m_clipDistances);\n this->emitClipCullStore(DxbcSystemValue::CullDistance, m_cullDistances);\n this->emitPointSizeStore();\n this->emitFunctionEnd();\n }\n void emitHsFinalize() {\n if (m_hs.cpPhase.functionId == 0)\n m_hs.cpPhase = this->emitNewHullShaderPassthroughPhase();\n \n // Control point phase\n this->emitMainFunctionBegin();\n this->emitInputSetup(m_hs.vertexCountIn);\n this->emitHsControlPointPhase(m_hs.cpPhase);\n this->emitHsPhaseBarrier();\n \n // Fork-join phases and output setup\n this->emitHsInvocationBlockBegin(1);\n \n for (const auto& phase : m_hs.forkPhases)\n this->emitHsForkJoinPhase(phase);\n \n for (const auto& phase : m_hs.joinPhases)\n this->emitHsForkJoinPhase(phase);\n \n this->emitOutputSetup();\n this->emitHsOutputSetup();\n this->emitHsInvocationBlockEnd();\n this->emitFunctionEnd();\n }\n void emitDsFinalize() {\n this->emitMainFunctionBegin();\n m_module.opFunctionCall(\n m_module.defVoidType(),\n m_ds.functionId, 0, nullptr);\n this->emitOutputSetup();\n this->emitClipCullStore(DxbcSystemValue::ClipDistance, m_clipDistances);\n this->emitClipCullStore(DxbcSystemValue::CullDistance, m_cullDistances);\n this->emitFunctionEnd();\n }\n void emitGsFinalize() {\n if (!m_gs.invocationCount)\n m_module.setInvocations(m_entryPointId, 1);\n\n this->emitMainFunctionBegin();\n this->emitInputSetup(\n primitiveVertexCount(m_gs.inputPrimitive));\n m_module.opFunctionCall(\n m_module.defVoidType(),\n m_gs.functionId, 0, nullptr);\n // No output setup at this point as that was\n // already done during the EmitVertex step\n this->emitFunctionEnd();\n }\n void emitPsFinalize() {\n this->emitMainFunctionBegin();\n this->emitInputSetup();\n this->emitClipCullLoad(DxbcSystemValue::ClipDistance, m_clipDistances);\n this->emitClipCullLoad(DxbcSystemValue::CullDistance, m_cullDistances);\n\n if (m_hasRasterizerOrderedUav) {\n // For simplicity, just lock the entire fragment shader\n // if there are any rasterizer ordered views.\n m_module.enableExtension(\"SPV_EXT_fragment_shader_interlock\");\n\n if (m_module.hasCapability(spv::CapabilitySampleRateShading)\n && m_moduleInfo.options.enableSampleShadingInterlock) {\n m_module.enableCapability(spv::CapabilityFragmentShaderSampleInterlockEXT);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeSampleInterlockOrderedEXT);\n } else {\n m_module.enableCapability(spv::CapabilityFragmentShaderPixelInterlockEXT);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModePixelInterlockOrderedEXT);\n }\n\n m_module.opBeginInvocationInterlock();\n }\n\n m_module.opFunctionCall(\n m_module.defVoidType(),\n m_ps.functionId, 0, nullptr);\n\n if (m_hasRasterizerOrderedUav)\n m_module.opEndInvocationInterlock();\n\n this->emitOutputSetup();\n\n if (m_moduleInfo.options.useDepthClipWorkaround)\n this->emitOutputDepthClamp();\n \n this->emitFunctionEnd();\n }\n void emitCsFinalize() {\n this->emitMainFunctionBegin();\n\n if (m_moduleInfo.options.zeroInitWorkgroupMemory)\n this->emitInitWorkgroupMemory();\n\n m_module.opFunctionCall(\n m_module.defVoidType(),\n m_cs.functionId, 0, nullptr);\n \n this->emitFunctionEnd();\n }\n void emitXfbOutputDeclarations() {\n for (uint32_t i = 0; i < m_moduleInfo.xfb->entryCount; i++) {\n const DxbcXfbEntry* xfbEntry = m_moduleInfo.xfb->entries + i;\n const DxbcSgnEntry* sigEntry = m_osgn->find(\n xfbEntry->semanticName,\n xfbEntry->semanticIndex,\n xfbEntry->streamId);\n\n if (sigEntry == nullptr)\n continue;\n \n DxbcRegisterInfo varInfo;\n varInfo.type.ctype = DxbcScalarType::Float32;\n varInfo.type.ccount = xfbEntry->componentCount;\n varInfo.type.alength = 0;\n varInfo.sclass = spv::StorageClassOutput;\n \n uint32_t dstComponentMask = (1 << xfbEntry->componentCount) - 1;\n uint32_t srcComponentMask = dstComponentMask\n << sigEntry->componentMask.firstSet()\n << xfbEntry->componentIndex;\n \n DxbcXfbVar xfbVar;\n xfbVar.varId = emitNewVariable(varInfo);\n xfbVar.streamId = xfbEntry->streamId;\n xfbVar.outputId = sigEntry->registerId;\n xfbVar.srcMask = DxbcRegMask(srcComponentMask);\n xfbVar.dstMask = DxbcRegMask(dstComponentMask);\n m_xfbVars.push_back(xfbVar);\n\n m_module.setDebugName(xfbVar.varId,\n str::format(\"xfb\", i).c_str());\n \n m_module.decorateXfb(xfbVar.varId,\n xfbEntry->streamId, xfbEntry->bufferId, xfbEntry->offset,\n m_moduleInfo.xfb->strides[xfbEntry->bufferId]);\n }\n\n // TODO Compact location/component assignment\n for (uint32_t i = 0; i < m_xfbVars.size(); i++) {\n m_xfbVars[i].location = i;\n m_xfbVars[i].component = 0;\n }\n\n for (uint32_t i = 0; i < m_xfbVars.size(); i++) {\n const DxbcXfbVar* var = &m_xfbVars[i];\n\n m_module.decorateLocation (var->varId, var->location);\n m_module.decorateComponent(var->varId, var->component);\n }\n }\n void emitXfbOutputSetup(\n uint32_t streamId,\n bool passthrough) {\n for (size_t i = 0; i < m_xfbVars.size(); i++) {\n if (m_xfbVars[i].streamId == streamId) {\n DxbcRegisterPointer srcPtr = passthrough\n ? m_vRegs[m_xfbVars[i].outputId]\n : m_oRegs[m_xfbVars[i].outputId];\n\n if (passthrough) {\n srcPtr = emitArrayAccess(srcPtr,\n spv::StorageClassInput,\n m_module.constu32(0));\n }\n \n DxbcRegisterPointer dstPtr;\n dstPtr.type.ctype = DxbcScalarType::Float32;\n dstPtr.type.ccount = m_xfbVars[i].dstMask.popCount();\n dstPtr.id = m_xfbVars[i].varId;\n\n DxbcRegisterValue value = emitRegisterExtract(\n emitValueLoad(srcPtr), m_xfbVars[i].srcMask);\n emitValueStore(dstPtr, value, m_xfbVars[i].dstMask);\n }\n }\n }\n void emitHsControlPointPhase(\n const DxbcCompilerHsControlPointPhase& phase) {\n m_module.opFunctionCall(\n m_module.defVoidType(),\n phase.functionId, 0, nullptr);\n }\n void emitHsForkJoinPhase(\n const DxbcCompilerHsForkJoinPhase& phase) {\n for (uint32_t i = 0; i < phase.instanceCount; i++) {\n uint32_t invocationId = m_module.constu32(i);\n \n m_module.opFunctionCall(\n m_module.defVoidType(),\n phase.functionId, 1,\n &invocationId);\n }\n }\n void emitHsPhaseBarrier() {\n uint32_t exeScopeId = m_module.constu32(spv::ScopeWorkgroup);\n uint32_t memScopeId = m_module.constu32(spv::ScopeWorkgroup);\n uint32_t semanticId = m_module.constu32(\n spv::MemorySemanticsOutputMemoryMask |\n spv::MemorySemanticsAcquireReleaseMask |\n spv::MemorySemanticsMakeAvailableMask |\n spv::MemorySemanticsMakeVisibleMask);\n \n m_module.opControlBarrier(exeScopeId, memScopeId, semanticId);\n }\n void emitHsInvocationBlockBegin(\n uint32_t count) {\n uint32_t invocationId = m_module.opLoad(\n getScalarTypeId(DxbcScalarType::Uint32),\n m_hs.builtinInvocationId);\n \n uint32_t condition = m_module.opULessThan(\n m_module.defBoolType(), invocationId,\n m_module.constu32(count));\n \n m_hs.invocationBlockBegin = m_module.allocateId();\n m_hs.invocationBlockEnd = m_module.allocateId();\n \n m_module.opSelectionMerge(\n m_hs.invocationBlockEnd,\n spv::SelectionControlMaskNone);\n \n m_module.opBranchConditional(\n condition,\n m_hs.invocationBlockBegin,\n m_hs.invocationBlockEnd);\n \n m_module.opLabel(\n m_hs.invocationBlockBegin);\n }\n void emitHsInvocationBlockEnd() {\n m_module.opBranch (m_hs.invocationBlockEnd);\n m_module.opLabel (m_hs.invocationBlockEnd);\n \n m_hs.invocationBlockBegin = 0;\n m_hs.invocationBlockEnd = 0;\n }\n void emitHsOutputSetup() {\n uint32_t outputPerPatch = emitTessInterfacePerPatch(spv::StorageClassOutput);\n\n if (!outputPerPatch)\n return;\n\n uint32_t vecType = getVectorTypeId({ DxbcScalarType::Float32, 4 });\n\n uint32_t srcPtrType = m_module.defPointerType(vecType, spv::StorageClassPrivate);\n uint32_t dstPtrType = m_module.defPointerType(vecType, spv::StorageClassOutput);\n\n for (uint32_t i = 0; i < 32; i++) {\n if (m_hs.outputPerPatchMask & (1 << i)) {\n uint32_t index = m_module.constu32(i);\n\n uint32_t srcPtr = m_module.opAccessChain(srcPtrType, m_hs.outputPerPatch, 1, &index);\n uint32_t dstPtr = m_module.opAccessChain(dstPtrType, outputPerPatch, 1, &index);\n\n m_module.opStore(dstPtr, m_module.opLoad(vecType, srcPtr));\n }\n }\n }\n uint32_t emitTessInterfacePerPatch(\n spv::StorageClass storageClass) {\n const char* name = \"vPatch\";\n\n if (storageClass == spv::StorageClassPrivate)\n name = \"rPatch\";\n if (storageClass == spv::StorageClassOutput)\n name = \"oPatch\";\n \n uint32_t arrLen = m_psgn != nullptr ? m_psgn->maxRegisterCount() : 0;\n\n if (!arrLen)\n return 0;\n\n uint32_t vecType = m_module.defVectorType (m_module.defFloatType(32), 4);\n uint32_t arrType = m_module.defArrayType (vecType, m_module.constu32(arrLen));\n uint32_t ptrType = m_module.defPointerType(arrType, storageClass);\n uint32_t varId = m_module.newVar (ptrType, storageClass);\n \n m_module.setDebugName (varId, name);\n \n if (storageClass != spv::StorageClassPrivate) {\n m_module.decorate (varId, spv::DecorationPatch);\n m_module.decorateLocation (varId, 0);\n }\n\n return varId;\n }\n uint32_t emitTessInterfacePerVertex(\n spv::StorageClass storageClass,\n uint32_t vertexCount) {\n const bool isInput = storageClass == spv::StorageClassInput;\n \n uint32_t arrLen = isInput\n ? (m_isgn != nullptr ? m_isgn->maxRegisterCount() : 0)\n : (m_osgn != nullptr ? m_osgn->maxRegisterCount() : 0);\n \n if (!arrLen)\n return 0;\n \n uint32_t locIdx = m_psgn != nullptr\n ? m_psgn->maxRegisterCount()\n : 0;\n \n uint32_t vecType = m_module.defVectorType (m_module.defFloatType(32), 4);\n uint32_t arrTypeInner = m_module.defArrayType (vecType, m_module.constu32(arrLen));\n uint32_t arrTypeOuter = m_module.defArrayType (arrTypeInner, m_module.constu32(vertexCount));\n uint32_t ptrType = m_module.defPointerType(arrTypeOuter, storageClass);\n uint32_t varId = m_module.newVar (ptrType, storageClass);\n \n m_module.setDebugName (varId, isInput ? \"vVertex\" : \"oVertex\");\n m_module.decorateLocation (varId, locIdx);\n return varId;\n }\n void emitDclInputArray(\n uint32_t vertexCount) {\n DxbcVectorType info;\n info.ctype = DxbcScalarType::Float32;\n info.ccount = 4;\n\n // Define the array type. This will be two-dimensional\n // in some shaders, with the outer index representing\n // the vertex ID within an invocation.\n m_vArrayLength = m_isgn != nullptr ? std::max(1u, m_isgn->maxRegisterCount()) : 1;\n m_vArrayLengthId = m_module.lateConst32(getScalarTypeId(DxbcScalarType::Uint32));\n\n uint32_t vectorTypeId = getVectorTypeId(info);\n uint32_t arrayTypeId = m_module.defArrayType(vectorTypeId, m_vArrayLengthId);\n \n if (vertexCount != 0) {\n arrayTypeId = m_module.defArrayType(\n arrayTypeId, m_module.constu32(vertexCount));\n }\n \n // Define the actual variable. Note that this is private\n // because we will copy input registers and some system\n // variables to the array during the setup phase.\n const uint32_t ptrTypeId = m_module.defPointerType(\n arrayTypeId, spv::StorageClassPrivate);\n \n const uint32_t varId = m_module.newVar(\n ptrTypeId, spv::StorageClassPrivate);\n \n m_module.setDebugName(varId, \"shader_in\");\n m_vArray = varId;\n }\n uint32_t emitDclClipCullDistanceArray(\n uint32_t length,\n spv::BuiltIn builtIn,\n spv::StorageClass storageClass) {\n if (length == 0)\n return 0;\n \n uint32_t t_f32 = m_module.defFloatType(32);\n uint32_t t_arr = m_module.defArrayType(t_f32, m_module.constu32(length));\n uint32_t t_ptr = m_module.defPointerType(t_arr, storageClass);\n uint32_t varId = m_module.newVar(t_ptr, storageClass);\n \n m_module.decorateBuiltIn(varId, builtIn);\n m_module.setDebugName(varId,\n builtIn == spv::BuiltInClipDistance\n ? \"clip_distances\"\n : \"cull_distances\");\n \n return varId;\n }\n DxbcCompilerHsControlPointPhase emitNewHullShaderControlPointPhase() {\n uint32_t funTypeId = m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr);\n \n uint32_t funId = m_module.allocateId();\n \n this->emitFunctionBegin(funId,\n m_module.defVoidType(),\n funTypeId);\n this->emitFunctionLabel();\n \n DxbcCompilerHsControlPointPhase result;\n result.functionId = funId;\n return result;\n }\n DxbcCompilerHsControlPointPhase emitNewHullShaderPassthroughPhase() {\n uint32_t funTypeId = m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr);\n \n // Begin passthrough function\n uint32_t funId = m_module.allocateId();\n m_module.setDebugName(funId, \"hs_passthrough\");\n \n this->emitFunctionBegin(funId,\n m_module.defVoidType(),\n funTypeId);\n this->emitFunctionLabel();\n \n // We'll basically copy each input variable to the corresponding\n // output, using the shader's invocation ID as the array index.\n const uint32_t invocationId = m_module.opLoad(\n getScalarTypeId(DxbcScalarType::Uint32),\n m_hs.builtinInvocationId);\n \n for (auto i = m_isgn->begin(); i != m_isgn->end(); i++) {\n this->emitDclInput(\n i->registerId, m_hs.vertexCountIn,\n i->componentMask,\n DxbcSystemValue::None,\n DxbcInterpolationMode::Undefined);\n \n // Vector type index\n const std::array dstIndices\n = {{ invocationId, m_module.constu32(i->registerId) }};\n \n DxbcRegisterPointer srcPtr;\n srcPtr.type = m_vRegs.at(i->registerId).type;\n srcPtr.id = m_module.opAccessChain(\n m_module.defPointerType(getVectorTypeId(srcPtr.type), spv::StorageClassInput),\n m_vRegs.at(i->registerId).id, 1, &invocationId);\n \n DxbcRegisterValue srcValue = emitRegisterBitcast(\n emitValueLoad(srcPtr), DxbcScalarType::Float32);\n\n DxbcRegisterPointer dstPtr;\n dstPtr.type = { DxbcScalarType::Float32, 4 };\n dstPtr.id = m_module.opAccessChain(\n m_module.defPointerType(getVectorTypeId(dstPtr.type), spv::StorageClassOutput),\n m_hs.outputPerVertex, dstIndices.size(), dstIndices.data());\n\n emitValueStore(dstPtr, srcValue, DxbcRegMask::firstN(srcValue.type.ccount));\n }\n \n // End function\n this->emitFunctionEnd();\n \n DxbcCompilerHsControlPointPhase result;\n result.functionId = funId;\n return result;\n }\n DxbcCompilerHsForkJoinPhase emitNewHullShaderForkJoinPhase() {\n uint32_t argTypeId = m_module.defIntType(32, 0);\n uint32_t funTypeId = m_module.defFunctionType(\n m_module.defVoidType(), 1, &argTypeId);\n \n uint32_t funId = m_module.allocateId();\n \n this->emitFunctionBegin(funId,\n m_module.defVoidType(),\n funTypeId);\n \n uint32_t argId = m_module.functionParameter(argTypeId);\n this->emitFunctionLabel();\n \n DxbcCompilerHsForkJoinPhase result;\n result.functionId = funId;\n result.instanceId = argId;\n return result;\n }\n uint32_t emitSamplePosArray() {\n const std::array samplePosVectors = {{\n // Invalid sample count / unbound resource\n m_module.constvec2f32( 0.0f, 0.0f),\n // VK_SAMPLE_COUNT_1_BIT\n m_module.constvec2f32( 0.0f, 0.0f),\n // VK_SAMPLE_COUNT_2_BIT\n m_module.constvec2f32( 0.25f, 0.25f),\n m_module.constvec2f32(-0.25f,-0.25f),\n // VK_SAMPLE_COUNT_4_BIT\n m_module.constvec2f32(-0.125f,-0.375f),\n m_module.constvec2f32( 0.375f,-0.125f),\n m_module.constvec2f32(-0.375f, 0.125f),\n m_module.constvec2f32( 0.125f, 0.375f),\n // VK_SAMPLE_COUNT_8_BIT\n m_module.constvec2f32( 0.0625f,-0.1875f),\n m_module.constvec2f32(-0.0625f, 0.1875f),\n m_module.constvec2f32( 0.3125f, 0.0625f),\n m_module.constvec2f32(-0.1875f,-0.3125f),\n m_module.constvec2f32(-0.3125f, 0.3125f),\n m_module.constvec2f32(-0.4375f,-0.0625f),\n m_module.constvec2f32( 0.1875f, 0.4375f),\n m_module.constvec2f32( 0.4375f,-0.4375f),\n // VK_SAMPLE_COUNT_16_BIT\n m_module.constvec2f32( 0.0625f, 0.0625f),\n m_module.constvec2f32(-0.0625f,-0.1875f),\n m_module.constvec2f32(-0.1875f, 0.1250f),\n m_module.constvec2f32( 0.2500f,-0.0625f),\n m_module.constvec2f32(-0.3125f,-0.1250f),\n m_module.constvec2f32( 0.1250f, 0.3125f),\n m_module.constvec2f32( 0.3125f, 0.1875f),\n m_module.constvec2f32( 0.1875f,-0.3125f),\n m_module.constvec2f32(-0.1250f, 0.3750f),\n m_module.constvec2f32( 0.0000f,-0.4375f),\n m_module.constvec2f32(-0.2500f,-0.3750f),\n m_module.constvec2f32(-0.3750f, 0.2500f),\n m_module.constvec2f32(-0.5000f, 0.0000f),\n m_module.constvec2f32( 0.4375f,-0.2500f),\n m_module.constvec2f32( 0.3750f, 0.4375f),\n m_module.constvec2f32(-0.4375f,-0.5000f),\n }};\n \n uint32_t arrayTypeId = getArrayTypeId({\n DxbcScalarType::Float32, 2,\n static_cast(samplePosVectors.size()) });\n \n uint32_t samplePosArray = m_module.constComposite(\n arrayTypeId,\n samplePosVectors.size(),\n samplePosVectors.data());\n \n uint32_t varId = m_module.newVarInit(\n m_module.defPointerType(arrayTypeId, spv::StorageClassPrivate),\n spv::StorageClassPrivate, samplePosArray);\n \n m_module.setDebugName(varId, \"g_sample_pos\");\n m_module.decorate(varId, spv::DecorationNonWritable);\n return varId;\n }\n void emitFloatControl() {\n DxbcFloatControlFlags flags = m_moduleInfo.options.floatControl;\n\n if (flags.isClear())\n return;\n\n const uint32_t width32 = 32;\n const uint32_t width64 = 64;\n\n if (flags.test(DxbcFloatControlFlag::DenormFlushToZero32)) {\n m_module.enableCapability(spv::CapabilityDenormFlushToZero);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDenormFlushToZero, 1, &width32);\n }\n\n if (flags.test(DxbcFloatControlFlag::PreserveNan32)) {\n m_module.enableCapability(spv::CapabilitySignedZeroInfNanPreserve);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeSignedZeroInfNanPreserve, 1, &width32);\n }\n\n if (m_module.hasCapability(spv::CapabilityFloat64)) {\n if (flags.test(DxbcFloatControlFlag::DenormPreserve64)) {\n m_module.enableCapability(spv::CapabilityDenormPreserve);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDenormPreserve, 1, &width64);\n }\n\n if (flags.test(DxbcFloatControlFlag::PreserveNan64)) {\n m_module.enableCapability(spv::CapabilitySignedZeroInfNanPreserve);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeSignedZeroInfNanPreserve, 1, &width64);\n }\n }\n }\n uint32_t emitNewVariable(\n const DxbcRegisterInfo& info) {\n const uint32_t ptrTypeId = this->getPointerTypeId(info);\n return m_module.newVar(ptrTypeId, info.sclass);\n }\n uint32_t emitNewBuiltinVariable(\n const DxbcRegisterInfo& info,\n spv::BuiltIn builtIn,\n const char* name) {\n const uint32_t varId = emitNewVariable(info);\n \n if (name)\n m_module.setDebugName(varId, name);\n\n m_module.decorateBuiltIn(varId, builtIn);\n\n if (m_programInfo.type() == DxbcProgramType::PixelShader\n && info.type.ctype != DxbcScalarType::Float32\n && info.type.ctype != DxbcScalarType::Bool\n && info.sclass == spv::StorageClassInput)\n m_module.decorate(varId, spv::DecorationFlat);\n\n return varId;\n }\n uint32_t emitBuiltinTessLevelOuter(\n spv::StorageClass storageClass) {\n uint32_t id = emitNewBuiltinVariable(\n DxbcRegisterInfo {\n { DxbcScalarType::Float32, 0, 4 },\n storageClass },\n spv::BuiltInTessLevelOuter,\n \"bTessLevelOuter\");\n \n m_module.decorate(id, spv::DecorationPatch);\n return id;\n }\n uint32_t emitBuiltinTessLevelInner(\n spv::StorageClass storageClass) {\n uint32_t id = emitNewBuiltinVariable(\n DxbcRegisterInfo {\n { DxbcScalarType::Float32, 0, 2 },\n storageClass },\n spv::BuiltInTessLevelInner,\n \"bTessLevelInner\");\n \n m_module.decorate(id, spv::DecorationPatch);\n return id;\n }\n uint32_t emitPushConstants() {\n uint32_t uintTypeId = m_module.defIntType(32, 0);\n uint32_t structTypeId = m_module.defStructTypeUnique(1, &uintTypeId);\n\n m_module.setDebugName(structTypeId, \"pc_t\");\n m_module.setDebugMemberName(structTypeId, 0, \"RasterizerSampleCount\");\n m_module.memberDecorateOffset(structTypeId, 0, 0);\n\n uint32_t ptrTypeId = m_module.defPointerType(structTypeId, spv::StorageClassPushConstant);\n uint32_t varId = m_module.newVar(ptrTypeId, spv::StorageClassPushConstant);\n\n m_module.setDebugName(varId, \"pc\");\n return varId;\n }\n DxbcCfgBlock* cfgFindBlock(\n const std::initializer_list& types);\n DxbcBufferInfo getBufferInfo(\n const DxbcRegister& reg) {\n const uint32_t registerId = reg.idx[0].offset;\n \n switch (reg.type) {\n case DxbcOperandType::Resource: {\n const auto& texture = m_textures.at(registerId);\n\n DxbcBufferInfo result;\n result.image = texture.imageInfo;\n result.stype = texture.sampledType;\n result.type = texture.type;\n result.typeId = texture.imageTypeId;\n result.varId = texture.varId;\n result.stride = texture.structStride;\n result.coherence = 0;\n result.isSsbo = texture.isRawSsbo;\n return result;\n } break;\n \n case DxbcOperandType::UnorderedAccessView: {\n const auto& uav = m_uavs.at(registerId);\n\n DxbcBufferInfo result;\n result.image = uav.imageInfo;\n result.stype = uav.sampledType;\n result.type = uav.type;\n result.typeId = uav.imageTypeId;\n result.varId = uav.varId;\n result.stride = uav.structStride;\n result.coherence = uav.coherence;\n result.isSsbo = uav.isRawSsbo;\n return result;\n } break;\n \n case DxbcOperandType::ThreadGroupSharedMemory: {\n DxbcBufferInfo result;\n result.image = { spv::DimBuffer, 0, 0, 0 };\n result.stype = DxbcScalarType::Uint32;\n result.type = m_gRegs.at(registerId).type;\n result.typeId = m_module.defPointerType(\n getScalarTypeId(DxbcScalarType::Uint32),\n spv::StorageClassWorkgroup);\n result.varId = m_gRegs.at(registerId).varId;\n result.stride = m_gRegs.at(registerId).elementStride;\n result.coherence = spv::ScopeInvocation;\n result.isSsbo = false;\n return result;\n } break;\n \n default:\n throw DxvkError(str::format(\"DxbcCompiler: Invalid operand type for buffer: \", reg.type));\n }\n }\n uint32_t getTexSizeDim(\n const DxbcImageInfo& imageType) const {\n switch (imageType.dim) {\n case spv::DimBuffer: return 1 + imageType.array;\n case spv::Dim1D: return 1 + imageType.array;\n case spv::Dim2D: return 2 + imageType.array;\n case spv::Dim3D: return 3 + imageType.array;\n case spv::DimCube: return 2 + imageType.array;\n default: throw DxvkError(\"DxbcCompiler: getTexLayerDim: Unsupported image dimension\");\n }\n }\n uint32_t getTexLayerDim(\n const DxbcImageInfo& imageType) const {\n switch (imageType.dim) {\n case spv::DimBuffer: return 1;\n case spv::Dim1D: return 1;\n case spv::Dim2D: return 2;\n case spv::Dim3D: return 3;\n case spv::DimCube: return 3;\n default: throw DxvkError(\"DxbcCompiler: getTexLayerDim: Unsupported image dimension\");\n }\n }\n uint32_t getTexCoordDim(\n const DxbcImageInfo& imageType) const {\n return getTexLayerDim(imageType) + imageType.array;\n }\n DxbcRegMask getTexCoordMask(\n const DxbcImageInfo& imageType) const {\n return DxbcRegMask::firstN(getTexCoordDim(imageType));\n }\n DxbcVectorType getInputRegType(\n uint32_t regIdx) const {\n switch (m_programInfo.type()) {\n case DxbcProgramType::VertexShader: {\n const DxbcSgnEntry* entry = m_isgn->findByRegister(regIdx);\n \n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n \n if (entry != nullptr) {\n result.ctype = entry->componentType;\n result.ccount = entry->componentMask.popCount();\n }\n \n return result;\n }\n\n case DxbcProgramType::DomainShader: {\n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n return result;\n }\n\n default: {\n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n\n if (m_isgn == nullptr || !m_isgn->findByRegister(regIdx))\n return result;\n\n DxbcRegMask mask(0u);\n DxbcRegMask used(0u);\n\n for (const auto& e : *m_isgn) {\n if (e.registerId == regIdx && !ignoreInputSystemValue(e.systemValue)) {\n mask |= e.componentMask;\n used |= e.componentUsed;\n }\n }\n\n if (m_programInfo.type() == DxbcProgramType::PixelShader) {\n if ((used.raw() & mask.raw()) == used.raw())\n mask = used;\n }\n\n result.ccount = mask.minComponents();\n return result;\n }\n }\n }\n DxbcVectorType getOutputRegType(\n uint32_t regIdx) const {\n switch (m_programInfo.type()) {\n case DxbcProgramType::PixelShader: {\n const DxbcSgnEntry* entry = m_osgn->findByRegister(regIdx);\n\n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n \n if (entry != nullptr) {\n result.ctype = entry->componentType;\n result.ccount = entry->componentMask.popCount();\n }\n\n return result;\n }\n\n case DxbcProgramType::HullShader: {\n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n return result;\n }\n\n default: {\n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n\n if (m_osgn->findByRegister(regIdx))\n result.ccount = m_osgn->regMask(regIdx).minComponents();\n return result;\n }\n }\n }\n DxbcImageInfo getResourceType(\n DxbcResourceDim resourceType,\n bool isUav) const {\n uint32_t ms = m_moduleInfo.options.disableMsaa ? 0 : 1;\n\n switch (resourceType) {\n case DxbcResourceDim::Buffer: return { spv::DimBuffer, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_MAX_ENUM };\n case DxbcResourceDim::Texture1D: return { spv::Dim1D, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_1D };\n case DxbcResourceDim::Texture1DArr: return { spv::Dim1D, 1, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_1D_ARRAY };\n case DxbcResourceDim::Texture2D: return { spv::Dim2D, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_2D };\n case DxbcResourceDim::Texture2DArr: return { spv::Dim2D, 1, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_2D_ARRAY };\n case DxbcResourceDim::Texture2DMs: return { spv::Dim2D, 0, ms,isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_2D };\n case DxbcResourceDim::Texture2DMsArr: return { spv::Dim2D, 1, ms,isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_2D_ARRAY };\n case DxbcResourceDim::Texture3D: return { spv::Dim3D, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_3D };\n case DxbcResourceDim::TextureCube: return { spv::DimCube, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_CUBE };\n case DxbcResourceDim::TextureCubeArr: return { spv::DimCube, 1, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_CUBE_ARRAY };\n default: throw DxvkError(str::format(\"DxbcCompiler: Unsupported resource type: \", resourceType));\n }\n }\n spv::ImageFormat getScalarImageFormat(\n DxbcScalarType type) const {\n switch (type) {\n case DxbcScalarType::Float32: return spv::ImageFormatR32f;\n case DxbcScalarType::Sint32: return spv::ImageFormatR32i;\n case DxbcScalarType::Uint32: return spv::ImageFormatR32ui;\n default: throw DxvkError(\"DxbcCompiler: Unhandled scalar resource type\");\n }\n }\n bool isDoubleType(\n DxbcScalarType type) const {\n return type == DxbcScalarType::Sint64\n || type == DxbcScalarType::Uint64\n || type == DxbcScalarType::Float64;\n }\n DxbcRegisterPointer getIndexableTempPtr(\n const DxbcRegister& operand,\n DxbcRegisterValue vectorId) {\n // x# regs are indexed as follows:\n // (0) register index (immediate)\n // (1) element index (relative)\n const uint32_t regId = operand.idx[0].offset;\n \n DxbcRegisterInfo info;\n info.type.ctype = DxbcScalarType::Float32;\n info.type.ccount = m_xRegs[regId].ccount;\n info.type.alength = 0;\n info.sclass = spv::StorageClassPrivate;\n \n DxbcRegisterPointer result;\n result.type.ctype = info.type.ctype;\n result.type.ccount = info.type.ccount;\n result.id = m_module.opAccessChain(\n getPointerTypeId(info),\n m_xRegs.at(regId).varId,\n 1, &vectorId.id);\n\n return result;\n }\n bool caseBlockIsFallthrough() const {\n return m_lastOp != DxbcOpcode::Case\n && m_lastOp != DxbcOpcode::Default\n && m_lastOp != DxbcOpcode::Break\n && m_lastOp != DxbcOpcode::Ret;\n }\n uint32_t getUavCoherence(\n uint32_t registerId,\n DxbcUavFlags flags) {\n // For any ROV with write access, we must ensure that\n // availability operations happen within the locked scope.\n if (flags.test(DxbcUavFlag::RasterizerOrdered)\n && (m_analysis->uavInfos[registerId].accessFlags & VK_ACCESS_SHADER_WRITE_BIT)) {\n m_hasGloballyCoherentUav = true;\n m_hasRasterizerOrderedUav = true;\n return spv::ScopeQueueFamily;\n }\n\n // Ignore any resources that can't both be read and written in\n // the current shader, explicit availability/visibility operands\n // are not useful in that case.\n if (m_analysis->uavInfos[registerId].accessFlags != (VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT))\n return 0;\n\n // If the globally coherent flag is set, the resource must be\n // coherent across multiple workgroups of the same dispatch\n if (flags.test(DxbcUavFlag::GloballyCoherent)) {\n m_hasGloballyCoherentUav = true;\n return spv::ScopeQueueFamily;\n }\n\n // In compute shaders, UAVs are implicitly workgroup coherent,\n // but we can rely on memory barrier instructions to make any\n // access available and visible to the entire workgroup.\n if (m_programInfo.type() == DxbcProgramType::ComputeShader)\n return spv::ScopeInvocation;\n\n return 0;\n }\n bool ignoreInputSystemValue(\n DxbcSystemValue sv) const {\n switch (sv) {\n case DxbcSystemValue::Position:\n case DxbcSystemValue::IsFrontFace:\n case DxbcSystemValue::SampleIndex:\n case DxbcSystemValue::PrimitiveId:\n case DxbcSystemValue::Coverage:\n return m_programInfo.type() == DxbcProgramType::PixelShader;\n\n default:\n return false;\n }\n }\n void emitUavBarrier(\n uint64_t readMask,\n uint64_t writeMask) {\n if (!m_moduleInfo.options.forceComputeUavBarriers\n || m_programInfo.type() != DxbcProgramType::ComputeShader)\n return;\n\n // If both masks are 0, emit a barrier in case at least one read-write UAV\n // has a pending unsynchronized access. Only consider read-after-write and\n // write-after-read hazards, assume that back-to-back stores are safe and\n // do not overlap in memory. Atomics are also completely ignored here.\n uint64_t rdMask = m_uavRdMask;\n uint64_t wrMask = m_uavWrMask;\n\n bool insertBarrier = bool(rdMask & wrMask);\n\n if (readMask || writeMask) {\n rdMask &= m_uavWrMask;\n wrMask &= m_uavRdMask;\n }\n\n for (auto uav : bit::BitMask(rdMask | wrMask)) {\n constexpr VkAccessFlags rwAccess = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;\n insertBarrier |= (m_analysis->uavInfos[uav].accessFlags & rwAccess) == rwAccess;\n }\n\n // Need to be in uniform top-level control flow, or otherwise\n // it is not safe to insert control barriers.\n if (insertBarrier && m_controlFlowBlocks.empty() && m_topLevelIsUniform) {\n m_module.opControlBarrier(\n m_module.constu32(spv::ScopeWorkgroup),\n m_module.constu32(m_hasGloballyCoherentUav ? spv::ScopeQueueFamily : spv::ScopeWorkgroup),\n m_module.constu32(spv::MemorySemanticsWorkgroupMemoryMask\n | spv::MemorySemanticsImageMemoryMask\n | spv::MemorySemanticsUniformMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask\n | spv::MemorySemanticsMakeAvailableMask\n | spv::MemorySemanticsMakeVisibleMask));\n\n m_uavWrMask = 0u;\n m_uavRdMask = 0u;\n }\n\n // Mark pending accesses\n m_uavWrMask |= writeMask;\n m_uavRdMask |= readMask;\n }\n uint32_t getScalarTypeId(\n DxbcScalarType type) {\n if (type == DxbcScalarType::Float64)\n m_module.enableCapability(spv::CapabilityFloat64);\n \n if (type == DxbcScalarType::Sint64 || type == DxbcScalarType::Uint64)\n m_module.enableCapability(spv::CapabilityInt64);\n \n switch (type) {\n case DxbcScalarType::Uint32: return m_module.defIntType(32, 0);\n case DxbcScalarType::Uint64: return m_module.defIntType(64, 0);\n case DxbcScalarType::Sint32: return m_module.defIntType(32, 1);\n case DxbcScalarType::Sint64: return m_module.defIntType(64, 1);\n case DxbcScalarType::Float32: return m_module.defFloatType(32);\n case DxbcScalarType::Float64: return m_module.defFloatType(64);\n case DxbcScalarType::Bool: return m_module.defBoolType();\n }\n\n throw DxvkError(\"DxbcCompiler: Invalid scalar type\");\n }\n uint32_t getVectorTypeId(\n const DxbcVectorType& type) {\n uint32_t typeId = this->getScalarTypeId(type.ctype);\n \n if (type.ccount > 1)\n typeId = m_module.defVectorType(typeId, type.ccount);\n \n return typeId;\n }\n uint32_t getArrayTypeId(\n const DxbcArrayType& type) {\n DxbcVectorType vtype;\n vtype.ctype = type.ctype;\n vtype.ccount = type.ccount;\n \n uint32_t typeId = this->getVectorTypeId(vtype);\n \n if (type.alength != 0) {\n typeId = m_module.defArrayType(typeId,\n m_module.constu32(type.alength));\n }\n \n return typeId;\n }\n uint32_t getPointerTypeId(\n const DxbcRegisterInfo& type) {\n return m_module.defPointerType(\n this->getArrayTypeId(type.type),\n type.sclass);\n }\n uint32_t getSparseResultTypeId(\n uint32_t baseType) {\n m_module.enableCapability(spv::CapabilitySparseResidency);\n\n uint32_t uintType = getScalarTypeId(DxbcScalarType::Uint32);\n std::array typeIds = { uintType, baseType };\n return m_module.defStructType(typeIds.size(), typeIds.data());\n }\n uint32_t getFunctionId(\n uint32_t functionNr) {\n auto entry = m_subroutines.find(functionNr);\n if (entry != m_subroutines.end())\n return entry->second;\n \n uint32_t functionId = m_module.allocateId();\n m_subroutines.insert({ functionNr, functionId });\n return functionId;\n }\n DxbcCompilerHsForkJoinPhase* getCurrentHsForkJoinPhase();\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_module.h", "class DxbcAnalyzer {\n public:\n DxbcModule(DxbcReader& reader) {\n for (uint32_t i = 0; i < m_header.numChunks(); i++) {\n \n // The chunk tag is stored at the beginning of each chunk\n auto chunkReader = reader.clone(m_header.chunkOffset(i));\n auto tag = chunkReader.readTag();\n \n // The chunk size follows right after the four-character\n // code. This does not include the eight bytes that are\n // consumed by the FourCC and chunk length entry.\n auto chunkLength = chunkReader.readu32();\n \n chunkReader = chunkReader.clone(8);\n chunkReader = chunkReader.resize(chunkLength);\n \n if ((tag == \"SHDR\") || (tag == \"SHEX\"))\n m_shexChunk = new DxbcShex(chunkReader);\n \n if ((tag == \"ISGN\") || (tag == \"ISG1\"))\n m_isgnChunk = new DxbcIsgn(chunkReader, tag);\n \n if ((tag == \"OSGN\") || (tag == \"OSG5\") || (tag == \"OSG1\"))\n m_osgnChunk = new DxbcIsgn(chunkReader, tag);\n \n if ((tag == \"PCSG\") || (tag == \"PSG1\"))\n m_psgnChunk = new DxbcIsgn(chunkReader, tag);\n }\n }\n ~DxbcModule() {\n \n }\n SpirvCodeBuffer compile(\n const DxbcModuleInfo& moduleInfo,\n const std::string& fileName) {\n if (m_shexChunk == nullptr)\n throw DxvkError(\"DxbcModule::compile: No SHDR/SHEX chunk\");\n \n DxbcAnalysisInfo analysisInfo;\n \n DxbcAnalyzer analyzer(moduleInfo,\n m_shexChunk->programInfo(),\n m_isgnChunk, m_osgnChunk,\n m_psgnChunk, analysisInfo);\n \n this->runAnalyzer(analyzer, m_shexChunk->slice());\n\n m_bindings = std::make_optional(analysisInfo.bindings);\n \n DxbcCompiler compiler(\n fileName, moduleInfo,\n m_shexChunk->programInfo(),\n m_isgnChunk, m_osgnChunk,\n m_psgnChunk, analysisInfo);\n \n this->runCompiler(compiler, m_shexChunk->slice());\n\n m_icb = compiler.getIcbData();\n\n return compiler.finalize();\n }\n SpirvCodeBuffer compilePassthroughShader(\n const DxbcModuleInfo& moduleInfo,\n const std::string& fileName) const {\n if (m_shexChunk == nullptr)\n throw DxvkError(\"DxbcModule::compile: No SHDR/SHEX chunk\");\n \n DxbcAnalysisInfo analysisInfo;\n\n DxbcCompiler compiler(\n fileName, moduleInfo,\n DxbcProgramType::GeometryShader,\n m_osgnChunk, m_osgnChunk,\n m_psgnChunk, analysisInfo);\n \n compiler.processXfbPassthrough();\n return compiler.finalize();\n }\n private:\n DxbcHeader m_header;\n Rc m_isgnChunk;\n Rc m_osgnChunk;\n Rc m_psgnChunk;\n Rc m_shexChunk;\n std::vector m_icb;\n std::optional m_bindings;\n void runAnalyzer(\n DxbcAnalyzer& analyzer,\n DxbcCodeSlice slice) const {\n DxbcDecodeContext decoder;\n \n while (!slice.atEnd()) {\n decoder.decodeInstruction(slice);\n \n analyzer.processInstruction(\n decoder.getInstruction());\n }\n }\n void runCompiler(\n DxbcCompiler& compiler,\n DxbcCodeSlice slice) const {\n DxbcDecodeContext decoder;\n \n while (!slice.atEnd()) {\n decoder.decodeInstruction(slice);\n \n compiler.processInstruction(\n decoder.getInstruction());\n }\n }\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_analysis.h", "class DxbcAnalyzer {\n public:\n DxbcAnalyzer(\n const DxbcModuleInfo& moduleInfo,\n const DxbcProgramInfo& programInfo,\n const Rc& isgn,\n const Rc& osgn,\n const Rc& psgn,\n DxbcAnalysisInfo& analysis) {\n // Get number of clipping and culling planes from the\n // input and output signatures. We will need this to\n // declare the shader input and output interfaces.\n m_analysis->clipCullIn = getClipCullInfo(m_isgn);\n m_analysis->clipCullOut = getClipCullInfo(m_osgn);\n }\n ~DxbcAnalyzer() {\n \n }\n void processInstruction(\n const DxbcShaderInstruction& ins) {\n switch (ins.opClass) {\n case DxbcInstClass::Atomic: {\n const uint32_t operandId = ins.dstCount - 1;\n\n if (ins.dst[operandId].type == DxbcOperandType::UnorderedAccessView) {\n const uint32_t registerId = ins.dst[operandId].idx[0].offset;\n m_analysis->uavInfos[registerId].accessAtomicOp = true;\n m_analysis->uavInfos[registerId].accessFlags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;\n\n // Check whether the atomic operation is order-invariant\n DxvkAccessOp op = DxvkAccessOp::None;\n\n switch (ins.op) {\n case DxbcOpcode::AtomicAnd: op = DxvkAccessOp::And; break;\n case DxbcOpcode::AtomicOr: op = DxvkAccessOp::Or; break;\n case DxbcOpcode::AtomicXor: op = DxvkAccessOp::Xor; break;\n case DxbcOpcode::AtomicIAdd: op = DxvkAccessOp::Add; break;\n case DxbcOpcode::AtomicIMax: op = DxvkAccessOp::IMax; break;\n case DxbcOpcode::AtomicIMin: op = DxvkAccessOp::IMin; break;\n case DxbcOpcode::AtomicUMax: op = DxvkAccessOp::UMax; break;\n case DxbcOpcode::AtomicUMin: op = DxvkAccessOp::UMin; break;\n default: break;\n }\n\n setUavAccessOp(registerId, op);\n }\n } break;\n\n case DxbcInstClass::TextureSample:\n case DxbcInstClass::TextureGather:\n case DxbcInstClass::TextureQueryLod:\n case DxbcInstClass::VectorDeriv: {\n m_analysis->usesDerivatives = true;\n } break;\n\n case DxbcInstClass::ControlFlow: {\n if (ins.op == DxbcOpcode::Discard)\n m_analysis->usesKill = true;\n } break;\n\n case DxbcInstClass::BufferLoad: {\n uint32_t operandId = ins.op == DxbcOpcode::LdStructured ? 2 : 1;\n bool sparseFeedback = ins.dstCount == 2;\n\n if (ins.src[operandId].type == DxbcOperandType::UnorderedAccessView) {\n const uint32_t registerId = ins.src[operandId].idx[0].offset;\n m_analysis->uavInfos[registerId].accessFlags |= VK_ACCESS_SHADER_READ_BIT;\n m_analysis->uavInfos[registerId].sparseFeedback |= sparseFeedback;\n\n setUavAccessOp(registerId, DxvkAccessOp::None);\n } else if (ins.src[operandId].type == DxbcOperandType::Resource) {\n const uint32_t registerId = ins.src[operandId].idx[0].offset;\n m_analysis->srvInfos[registerId].sparseFeedback |= sparseFeedback;\n }\n } break;\n\n case DxbcInstClass::BufferStore: {\n if (ins.dst[0].type == DxbcOperandType::UnorderedAccessView) {\n const uint32_t registerId = ins.dst[0].idx[0].offset;\n m_analysis->uavInfos[registerId].accessFlags |= VK_ACCESS_SHADER_WRITE_BIT;\n\n setUavAccessOp(registerId, getStoreAccessOp(ins.dst[0].mask, ins.src[ins.srcCount - 1u]));\n }\n } break;\n\n case DxbcInstClass::TypedUavLoad: {\n const uint32_t registerId = ins.src[1].idx[0].offset;\n m_analysis->uavInfos[registerId].accessTypedLoad = true;\n m_analysis->uavInfos[registerId].accessFlags |= VK_ACCESS_SHADER_READ_BIT;\n\n setUavAccessOp(registerId, DxvkAccessOp::None);\n } break;\n\n case DxbcInstClass::TypedUavStore: {\n const uint32_t registerId = ins.dst[0].idx[0].offset;\n m_analysis->uavInfos[registerId].accessFlags |= VK_ACCESS_SHADER_WRITE_BIT;\n\n // The UAV format may change between dispatches, so be conservative here\n // and only allow this optimization when the app is writing zeroes.\n DxvkAccessOp storeOp = getStoreAccessOp(DxbcRegMask(0xf), ins.src[1u]);\n\n if (storeOp != DxvkAccessOp(DxvkAccessOp::OpType::StoreUi, 0u))\n storeOp = DxvkAccessOp::None;\n\n setUavAccessOp(registerId, storeOp);\n } break;\n\n case DxbcInstClass::Declaration: {\n switch (ins.op) {\n case DxbcOpcode::DclConstantBuffer: {\n uint32_t registerId = ins.dst[0].idx[0].offset;\n\n if (registerId < DxbcConstBufBindingCount)\n m_analysis->bindings.cbvMask |= 1u << registerId;\n } break;\n\n case DxbcOpcode::DclSampler: {\n uint32_t registerId = ins.dst[0].idx[0].offset;\n\n if (registerId < DxbcSamplerBindingCount)\n m_analysis->bindings.samplerMask |= 1u << registerId;\n } break;\n\n case DxbcOpcode::DclResource:\n case DxbcOpcode::DclResourceRaw:\n case DxbcOpcode::DclResourceStructured: {\n uint32_t registerId = ins.dst[0].idx[0].offset;\n\n uint32_t idx = registerId / 64u;\n uint32_t bit = registerId % 64u;\n\n if (registerId < DxbcResourceBindingCount)\n m_analysis->bindings.srvMask[idx] |= uint64_t(1u) << bit;\n } break;\n\n case DxbcOpcode::DclUavTyped:\n case DxbcOpcode::DclUavRaw:\n case DxbcOpcode::DclUavStructured: {\n uint32_t registerId = ins.dst[0].idx[0].offset;\n\n if (registerId < DxbcUavBindingCount)\n m_analysis->bindings.uavMask |= uint64_t(1u) << registerId;\n } break;\n\n default: ;\n }\n } break;\n\n default:\n break;\n }\n\n for (uint32_t i = 0; i < ins.dstCount; i++) {\n if (ins.dst[i].type == DxbcOperandType::IndexableTemp) {\n uint32_t index = ins.dst[i].idx[0].offset;\n m_analysis->xRegMasks[index] |= ins.dst[i].mask;\n }\n }\n }\n private:\n Rc m_isgn;\n Rc m_osgn;\n Rc m_psgn;\n DxbcAnalysisInfo* m_analysis = nullptr;\n DxbcClipCullInfo getClipCullInfo(\n const Rc& sgn) const {\n DxbcClipCullInfo result;\n \n if (sgn != nullptr) {\n for (auto e = sgn->begin(); e != sgn->end(); e++) {\n const uint32_t componentCount = e->componentMask.popCount();\n \n if (e->systemValue == DxbcSystemValue::ClipDistance)\n result.numClipPlanes += componentCount;\n if (e->systemValue == DxbcSystemValue::CullDistance)\n result.numCullPlanes += componentCount;\n }\n }\n \n return result;\n }\n void setUavAccessOp(uint32_t uav, DxvkAccessOp op) {\n if (m_analysis->uavInfos[uav].accessOp == DxvkAccessOp::None)\n m_analysis->uavInfos[uav].accessOp = op;\n\n // Maintain ordering if the UAV is accessed via other operations as well\n if (op == DxvkAccessOp::None || m_analysis->uavInfos[uav].accessOp != op)\n m_analysis->uavInfos[uav].nonInvariantAccess = true;\n }\n static DxvkAccessOp getStoreAccessOp(DxbcRegMask writeMask, const DxbcRegister& src) {\n if (src.type != DxbcOperandType::Imm32)\n return DxvkAccessOp::None;\n\n // Trivial case, same value is written to all components\n if (src.componentCount == DxbcComponentCount::Component1)\n return getConstantStoreOp(src.imm.u32_1);\n\n if (src.componentCount != DxbcComponentCount::Component4)\n return DxvkAccessOp::None;\n\n // Otherwise, make sure that all written components are equal\n DxvkAccessOp op = DxvkAccessOp::None;\n\n for (uint32_t i = 0u; i < 4u; i++) {\n if (!writeMask[i])\n continue;\n\n // If the written value can't be represented, skip\n DxvkAccessOp scalarOp = getConstantStoreOp(src.imm.u32_4[i]);\n\n if (scalarOp == DxvkAccessOp::None)\n return DxvkAccessOp::None;\n\n // First component written\n if (op == DxvkAccessOp::None)\n op = scalarOp;\n\n // Conflicting store ops\n if (op != scalarOp)\n return DxvkAccessOp::None;\n }\n\n return op;\n }\n static DxvkAccessOp getConstantStoreOp(uint32_t value) {\n constexpr uint32_t mask = 0xfffu;\n\n uint32_t ubits = value & mask;\n uint32_t fbits = (value >> 20u);\n\n if (value == ubits)\n return DxvkAccessOp(DxvkAccessOp::OpType::StoreUi, ubits);\n\n if (value == (ubits | ~mask))\n return DxvkAccessOp(DxvkAccessOp::OpType::StoreSi, ubits);\n\n if (value == (fbits << 20u))\n return DxvkAccessOp(DxvkAccessOp::OpType::StoreF, fbits);\n\n return DxvkAccessOp::None;\n }\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_decoder.h", "class DxbcRegModifier {\n public:\n private:\n uint8_t m_mask = 0;\n public:\n private:\n uint8_t m_mask = 0;\n public:\n private:\n uint32_t m_bits;\n public:\n const uint32_t* ptrAt(uint32_t id) const;\n uint32_t at(uint32_t id) const {\n if (m_ptr + id >= m_end)\n throw DxvkError(\"DxbcCodeSlice: End of stream\");\n return m_ptr[id];\n }\n uint32_t read() {\n if (m_ptr >= m_end)\n throw DxvkError(\"DxbcCodeSlice: End of stream\");\n return *(m_ptr++);\n }\n DxbcCodeSlice take(uint32_t n) const {\n if (m_ptr + n > m_end)\n throw DxvkError(\"DxbcCodeSlice: End of stream\");\n return DxbcCodeSlice(m_ptr, m_ptr + n);\n }\n DxbcCodeSlice skip(uint32_t n) const {\n if (m_ptr + n > m_end)\n throw DxvkError(\"DxbcCodeSlice: End of stream\");\n return DxbcCodeSlice(m_ptr + n, m_end);\n }\n private:\n const uint32_t* m_ptr = nullptr;\n const uint32_t* m_end = nullptr;\n public:\n void decodeInstruction(DxbcCodeSlice& code) {\n const uint32_t token0 = code.at(0);\n \n // Initialize the instruction structure. Some of these values\n // may not get written otherwise while decoding the instruction.\n m_instruction.op = static_cast(bit::extract(token0, 0, 10));\n m_instruction.opClass = DxbcInstClass::Undefined;\n m_instruction.sampleControls = { 0, 0, 0 };\n m_instruction.dstCount = 0;\n m_instruction.srcCount = 0;\n m_instruction.immCount = 0;\n m_instruction.dst = m_dstOperands.data();\n m_instruction.src = m_srcOperands.data();\n m_instruction.imm = m_immOperands.data();\n m_instruction.customDataType = DxbcCustomDataClass::Comment;\n m_instruction.customDataSize = 0;\n m_instruction.customData = nullptr;\n \n // Reset the index pointer, which may still contain\n // a non-zero value from the previous iteration\n m_indexId = 0;\n \n // Instruction length, in DWORDs. This includes the token\n // itself and any other prefix that an instruction may have.\n uint32_t length = 0;\n \n if (m_instruction.op == DxbcOpcode::CustomData) {\n length = code.at(1);\n this->decodeCustomData(code.take(length));\n } else {\n length = bit::extract(token0, 24, 30);\n this->decodeOperation(code.take(length));\n }\n \n // Advance the caller's slice to the next token so that\n // they can make consecutive calls to decodeInstruction()\n code = code.skip(length);\n }\n private:\n DxbcShaderInstruction m_instruction;\n std::array m_dstOperands;\n std::array m_srcOperands;\n std::array m_immOperands;\n std::array m_indices;\n uint32_t m_indexId = 0;\n void decodeCustomData(DxbcCodeSlice code) {\n const uint32_t blockLength = code.at(1);\n \n if (blockLength < 2) {\n Logger::err(\"DxbcDecodeContext: Invalid custom data block\");\n return;\n }\n \n // Custom data blocks have their own instruction class\n m_instruction.op = DxbcOpcode::CustomData;\n m_instruction.opClass = DxbcInstClass::CustomData;\n \n // We'll point into the code buffer rather than making a copy\n m_instruction.customDataType = static_cast(\n bit::extract(code.at(0), 11, 31));\n m_instruction.customDataSize = blockLength - 2;\n m_instruction.customData = code.ptrAt(2);\n }\n void decodeOperation(DxbcCodeSlice code) {\n uint32_t token = code.read();\n \n // Result modifiers, which are applied to common ALU ops\n m_instruction.modifiers.saturate = !!bit::extract(token, 13, 13);\n m_instruction.modifiers.precise = !!bit::extract(token, 19, 22);\n \n // Opcode controls. It will depend on the\n // opcode itself which ones are valid.\n m_instruction.controls = DxbcShaderOpcodeControls(token);\n \n // Process extended opcode tokens\n while (bit::extract(token, 31, 31)) {\n token = code.read();\n \n const DxbcExtOpcode extOpcode\n = static_cast(bit::extract(token, 0, 5));\n \n switch (extOpcode) {\n case DxbcExtOpcode::SampleControls: {\n struct {\n int u : 4;\n int v : 4;\n int w : 4;\n } aoffimmi;\n \n aoffimmi.u = bit::extract(token, 9, 12);\n aoffimmi.v = bit::extract(token, 13, 16);\n aoffimmi.w = bit::extract(token, 17, 20);\n \n // Four-bit signed numbers, sign-extend them\n m_instruction.sampleControls.u = aoffimmi.u;\n m_instruction.sampleControls.v = aoffimmi.v;\n m_instruction.sampleControls.w = aoffimmi.w;\n } break;\n \n case DxbcExtOpcode::ResourceDim:\n case DxbcExtOpcode::ResourceReturnType:\n break; // part of resource description\n \n default:\n Logger::warn(str::format(\n \"DxbcDecodeContext: Unhandled extended opcode: \",\n extOpcode));\n }\n }\n \n // Retrieve the instruction format in order to parse the\n // operands. Doing this mostly automatically means that\n // the compiler can rely on the operands being valid.\n const DxbcInstFormat format = dxbcInstructionFormat(m_instruction.op);\n m_instruction.opClass = format.instructionClass;\n \n for (uint32_t i = 0; i < format.operandCount; i++)\n this->decodeOperand(code, format.operands[i]);\n }\n void decodeComponentSelection(DxbcRegister& reg, uint32_t token) {\n // Pick the correct component selection mode based on the\n // component count. We'll simplify this here so that the\n // compiler can assume that everything is a 4D vector.\n reg.componentCount = static_cast(bit::extract(token, 0, 1));\n \n switch (reg.componentCount) {\n // No components - used for samplers etc.\n case DxbcComponentCount::Component0:\n reg.mask = DxbcRegMask(false, false, false, false);\n reg.swizzle = DxbcRegSwizzle(0, 0, 0, 0);\n break;\n \n // One component - used for immediates\n // and a few built-in registers.\n case DxbcComponentCount::Component1:\n reg.mask = DxbcRegMask(true, false, false, false);\n reg.swizzle = DxbcRegSwizzle(0, 0, 0, 0);\n break;\n \n // Four components - everything else. This requires us\n // to actually parse the component selection mode.\n case DxbcComponentCount::Component4: {\n const DxbcRegMode componentMode =\n static_cast(bit::extract(token, 2, 3));\n \n switch (componentMode) {\n // Write mask for destination operands\n case DxbcRegMode::Mask:\n reg.mask = bit::extract(token, 4, 7);\n reg.swizzle = DxbcRegSwizzle(0, 1, 2, 3);\n break;\n \n // Swizzle for source operands (including resources)\n case DxbcRegMode::Swizzle:\n reg.mask = DxbcRegMask(true, true, true, true);\n reg.swizzle = DxbcRegSwizzle(\n bit::extract(token, 4, 5),\n bit::extract(token, 6, 7),\n bit::extract(token, 8, 9),\n bit::extract(token, 10, 11));\n break;\n \n // Selection of one component. We can generate both a\n // mask and a swizzle for this so that the compiler\n // won't have to deal with this case specifically.\n case DxbcRegMode::Select1: {\n const uint32_t n = bit::extract(token, 4, 5);\n reg.mask = DxbcRegMask(n == 0, n == 1, n == 2, n == 3);\n reg.swizzle = DxbcRegSwizzle(n, n, n, n);\n } break;\n \n default:\n Logger::warn(\"DxbcDecodeContext: Invalid component selection mode\");\n }\n } break;\n \n default:\n Logger::warn(\"DxbcDecodeContext: Invalid component count\");\n }\n }\n void decodeOperandExtensions(DxbcCodeSlice& code, DxbcRegister& reg, uint32_t token) {\n while (bit::extract(token, 31, 31)) {\n token = code.read();\n \n // Type of the extended operand token\n const DxbcOperandExt extTokenType =\n static_cast(bit::extract(token, 0, 5));\n \n switch (extTokenType) {\n // Operand modifiers, which are used to manipulate the\n // value of a source operand during the load operation\n case DxbcOperandExt::OperandModifier:\n reg.modifiers = bit::extract(token, 6, 13);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcDecodeContext: Unhandled extended operand token: \",\n extTokenType));\n }\n }\n }\n void decodeOperandImmediates(DxbcCodeSlice& code, DxbcRegister& reg) {\n if (reg.type == DxbcOperandType::Imm32\n || reg.type == DxbcOperandType::Imm64) {\n switch (reg.componentCount) {\n // This is commonly used if only one vector\n // component is involved in an operation\n case DxbcComponentCount::Component1: {\n reg.imm.u32_1 = code.read();\n } break;\n \n // Typical four-component vector\n case DxbcComponentCount::Component4: {\n reg.imm.u32_4[0] = code.read();\n reg.imm.u32_4[1] = code.read();\n reg.imm.u32_4[2] = code.read();\n reg.imm.u32_4[3] = code.read();\n } break;\n\n default:\n Logger::warn(\"DxbcDecodeContext: Invalid component count for immediate operand\");\n }\n }\n }\n void decodeOperandIndex(DxbcCodeSlice& code, DxbcRegister& reg, uint32_t token) {\n reg.idxDim = bit::extract(token, 20, 21);\n \n for (uint32_t i = 0; i < reg.idxDim; i++) {\n // An index can be encoded in various different ways\n const DxbcOperandIndexRepresentation repr =\n static_cast(\n bit::extract(token, 22 + 3 * i, 24 + 3 * i));\n \n switch (repr) {\n case DxbcOperandIndexRepresentation::Imm32:\n reg.idx[i].offset = static_cast(code.read());\n reg.idx[i].relReg = nullptr;\n break;\n \n case DxbcOperandIndexRepresentation::Relative:\n reg.idx[i].offset = 0;\n reg.idx[i].relReg = &m_indices.at(m_indexId);\n \n this->decodeRegister(code,\n m_indices.at(m_indexId++),\n DxbcScalarType::Sint32);\n break;\n \n case DxbcOperandIndexRepresentation::Imm32Relative:\n reg.idx[i].offset = static_cast(code.read());\n reg.idx[i].relReg = &m_indices.at(m_indexId);\n \n this->decodeRegister(code,\n m_indices.at(m_indexId++),\n DxbcScalarType::Sint32);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcDecodeContext: Unhandled index representation: \",\n repr));\n }\n }\n }\n void decodeRegister(DxbcCodeSlice& code, DxbcRegister& reg, DxbcScalarType type) {\n const uint32_t token = code.read();\n \n reg.type = static_cast(bit::extract(token, 12, 19));\n reg.dataType = type;\n reg.modifiers = 0;\n reg.idxDim = 0;\n \n for (uint32_t i = 0; i < DxbcMaxRegIndexDim; i++) {\n reg.idx[i].relReg = nullptr;\n reg.idx[i].offset = 0;\n }\n \n this->decodeComponentSelection(reg, token);\n this->decodeOperandExtensions(code, reg, token);\n this->decodeOperandImmediates(code, reg);\n this->decodeOperandIndex(code, reg, token);\n }\n void decodeImm32(DxbcCodeSlice& code, DxbcImmediate& imm, DxbcScalarType type) {\n imm.u32 = code.read();\n }\n void decodeOperand(DxbcCodeSlice& code, const DxbcInstOperandFormat& format) {\n switch (format.kind) {\n case DxbcOperandKind::DstReg: {\n const uint32_t operandId = m_instruction.dstCount++;\n this->decodeRegister(code, m_dstOperands.at(operandId), format.type);\n } break;\n \n case DxbcOperandKind::SrcReg: {\n const uint32_t operandId = m_instruction.srcCount++;\n this->decodeRegister(code, m_srcOperands.at(operandId), format.type);\n } break;\n \n case DxbcOperandKind::Imm32: {\n const uint32_t operandId = m_instruction.immCount++;\n this->decodeImm32(code, m_immOperands.at(operandId), format.type);\n } break;\n \n default:\n throw DxvkError(\"DxbcDecodeContext: Invalid operand format\");\n }\n }\n};"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_bit.h", "#pragma once\n\n#if (defined(__x86_64__) && !defined(__arm64ec__)) || (defined(_M_X64) && !defined(_M_ARM64EC)) \\\n || defined(__i386__) || defined(_M_IX86) || defined(__e2k__)\n #define DXVK_ARCH_X86\n #if defined(__x86_64__) || defined(_M_X64) || defined(__e2k__)\n #define DXVK_ARCH_X86_64\n #endif\n#elif defined(__aarch64__) || defined(_M_ARM64) || defined(_M_ARM64EC)\n #define DXVK_ARCH_ARM64\n#endif\n\n#ifdef DXVK_ARCH_X86\n #ifndef _MSC_VER\n #if defined(_WIN32) && (defined(__AVX__) || defined(__AVX2__))\n #error \"AVX-enabled builds not supported due to stack alignment issues.\"\n #endif\n #if defined(__WINE__) && defined(__clang__)\n #pragma push_macro(\"_WIN32\")\n #undef _WIN32\n #endif\n #include \n #if defined(__WINE__) && defined(__clang__)\n #pragma pop_macro(\"_WIN32\")\n #endif\n #else\n #include \n #endif\n#endif\n\n#include \"util_likely.h\"\n#include \"util_math.h\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace dxvk::bit {\n\n template\n T cast(const J& src) {\n static_assert(sizeof(T) == sizeof(J));\n static_assert(std::is_trivially_copyable::value && std::is_trivial::value);\n\n T dst;\n std::memcpy(&dst, &src, sizeof(T));\n return dst;\n }\n \n template\n T extract(T value, uint32_t fst, uint32_t lst) {\n return (value >> fst) & ~(~T(0) << (lst - fst + 1));\n }\n\n template\n T popcnt(T n) {\n n -= ((n >> 1u) & T(0x5555555555555555ull));\n n = (n & T(0x3333333333333333ull)) + ((n >> 2u) & T(0x3333333333333333ull));\n n = (n + (n >> 4u)) & T(0x0f0f0f0f0f0f0f0full);\n n *= T(0x0101010101010101ull);\n return n >> (8u * (sizeof(T) - 1u));\n }\n\n inline uint32_t tzcnt(uint32_t n) {\n #if defined(_MSC_VER) && !defined(__clang__)\n if(n == 0)\n return 32;\n return _tzcnt_u32(n);\n #elif defined(__BMI__)\n return __tzcnt_u32(n);\n #elif defined(DXVK_ARCH_X86) && (defined(__GNUC__) || defined(__clang__))\n // tzcnt is encoded as rep bsf, so we can use it on all\n // processors, but the behaviour of zero inputs differs:\n // - bsf: zf = 1, cf = ?, result = ?\n // - tzcnt: zf = 0, cf = 1, result = 32\n // We'll have to handle this case manually.\n uint32_t res;\n uint32_t tmp;\n asm (\n \"tzcnt %2, %0;\"\n \"mov $32, %1;\"\n \"test %2, %2;\"\n \"cmovz %1, %0;\"\n : \"=&r\" (res), \"=&r\" (tmp)\n : \"r\" (n)\n : \"cc\");\n return res;\n #elif defined(__GNUC__) || defined(__clang__)\n return n != 0 ? __builtin_ctz(n) : 32;\n #else\n uint32_t r = 31;\n n &= -n;\n r -= (n & 0x0000FFFF) ? 16 : 0;\n r -= (n & 0x00FF00FF) ? 8 : 0;\n r -= (n & 0x0F0F0F0F) ? 4 : 0;\n r -= (n & 0x33333333) ? 2 : 0;\n r -= (n & 0x55555555) ? 1 : 0;\n return n != 0 ? r : 32;\n #endif\n }\n\n inline uint32_t tzcnt(uint64_t n) {\n #if defined(DXVK_ARCH_X86_64) && defined(_MSC_VER) && !defined(__clang__)\n if(n == 0)\n return 64;\n return (uint32_t)_tzcnt_u64(n);\n #elif defined(DXVK_ARCH_X86_64) && defined(__BMI__)\n return __tzcnt_u64(n);\n #elif defined(DXVK_ARCH_X86_64) && (defined(__GNUC__) || defined(__clang__))\n uint64_t res;\n uint64_t tmp;\n asm (\n \"tzcnt %2, %0;\"\n \"mov $64, %1;\"\n \"test %2, %2;\"\n \"cmovz %1, %0;\"\n : \"=&r\" (res), \"=&r\" (tmp)\n : \"r\" (n)\n : \"cc\");\n return res;\n #elif defined(__GNUC__) || defined(__clang__)\n return n != 0 ? __builtin_ctzll(n) : 64;\n #else\n uint32_t lo = uint32_t(n);\n if (lo) {\n return tzcnt(lo);\n } else {\n uint32_t hi = uint32_t(n >> 32);\n return tzcnt(hi) + 32;\n }\n #endif\n }\n\n inline uint32_t bsf(uint32_t n) {\n #if (defined(__GNUC__) || defined(__clang__)) && !defined(__BMI__) && defined(DXVK_ARCH_X86)\n uint32_t res;\n asm (\"tzcnt %1,%0\"\n : \"=r\" (res)\n : \"r\" (n)\n : \"cc\");\n return res;\n #else\n return tzcnt(n);\n #endif\n }\n\n inline uint32_t bsf(uint64_t n) {\n #if (defined(__GNUC__) || defined(__clang__)) && !defined(__BMI__) && defined(DXVK_ARCH_X86_64)\n uint64_t res;\n asm (\"tzcnt %1,%0\"\n : \"=r\" (res)\n : \"r\" (n)\n : \"cc\");\n return res;\n #else\n return tzcnt(n);\n #endif\n }\n\n inline uint32_t lzcnt(uint32_t n) {\n #if defined(_MSC_VER) && !defined(__clang__) && !defined(__LZCNT__)\n unsigned long bsr;\n if(n == 0)\n return 32;\n _BitScanReverse(&bsr, n);\n return 31-bsr;\n #elif (defined(_MSC_VER) && !defined(__clang__)) || defined(__LZCNT__)\n return _lzcnt_u32(n);\n #elif defined(__GNUC__) || defined(__clang__)\n return n != 0 ? __builtin_clz(n) : 32;\n #else\n uint32_t r = 0;\n\n if (n == 0)\treturn 32;\n\n if (n <= 0x0000FFFF) { r += 16; n <<= 16; }\n if (n <= 0x00FFFFFF) { r += 8; n <<= 8; }\n if (n <= 0x0FFFFFFF) { r += 4; n <<= 4; }\n if (n <= 0x3FFFFFFF) { r += 2; n <<= 2; }\n if (n <= 0x7FFFFFFF) { r += 1; n <<= 1; }\n\n return r;\n #endif\n }\n\n inline uint32_t lzcnt(uint64_t n) {\n #if defined(_MSC_VER) && !defined(__clang__) && !defined(__LZCNT__) && defined(DXVK_ARCH_X86_64)\n unsigned long bsr;\n if(n == 0)\n return 64;\n _BitScanReverse64(&bsr, n);\n return 63-bsr;\n #elif defined(DXVK_ARCH_X86_64) && ((defined(_MSC_VER) && !defined(__clang__)) && defined(__LZCNT__))\n return _lzcnt_u64(n);\n #elif defined(DXVK_ARCH_X86_64) && (defined(__GNUC__) || defined(__clang__))\n return n != 0 ? __builtin_clzll(n) : 64;\n #else\n uint32_t lo = uint32_t(n);\n uint32_t hi = uint32_t(n >> 32u);\n return hi ? lzcnt(hi) : lzcnt(lo) + 32u;\n #endif\n }\n\n template\n uint32_t pack(T& dst, uint32_t& shift, T src, uint32_t count) {\n constexpr uint32_t Bits = 8 * sizeof(T);\n if (likely(shift < Bits))\n dst |= src << shift;\n shift += count;\n return shift > Bits ? shift - Bits : 0;\n }\n\n template\n uint32_t unpack(T& dst, T src, uint32_t& shift, uint32_t count) {\n constexpr uint32_t Bits = 8 * sizeof(T);\n if (likely(shift < Bits))\n dst = (src >> shift) & ((T(1) << count) - 1);\n shift += count;\n return shift > Bits ? shift - Bits : 0;\n }\n\n\n /**\n * \\brief Clears cache lines of memory\n *\n * Uses non-temporal stores. The memory region offset\n * and size are assumed to be aligned to 64 bytes.\n * \\param [in] mem Memory region to clear\n * \\param [in] size Number of bytes to clear\n */\n inline void bclear(void* mem, size_t size) {\n #if defined(DXVK_ARCH_X86) && (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER))\n auto zero = _mm_setzero_si128();\n\n #if defined(__clang__)\n #pragma nounroll\n #elif defined(__GNUC__)\n #pragma GCC unroll 0\n #endif\n for (size_t i = 0; i < size; i += 64u) {\n auto* ptr = reinterpret_cast<__m128i*>(mem) + i / sizeof(zero);\n _mm_stream_si128(ptr + 0u, zero);\n _mm_stream_si128(ptr + 1u, zero);\n _mm_stream_si128(ptr + 2u, zero);\n _mm_stream_si128(ptr + 3u, zero);\n }\n #else\n std::memset(mem, 0, size);\n #endif\n }\n\n\n /**\n * \\brief Compares two aligned structs bit by bit\n *\n * \\param [in] a First struct\n * \\param [in] b Second struct\n * \\returns \\c true if the structs are equal\n */\n template\n bool bcmpeq(const T* a, const T* b) {\n static_assert(alignof(T) >= 16);\n #if defined(DXVK_ARCH_X86) && (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER))\n auto ai = reinterpret_cast(a);\n auto bi = reinterpret_cast(b);\n\n size_t i = 0;\n\n #if defined(__clang__)\n #pragma nounroll\n #elif defined(__GNUC__)\n #pragma GCC unroll 0\n #endif\n\n for ( ; i < 2 * (sizeof(T) / 32); i += 2) {\n __m128i eq0 = _mm_cmpeq_epi8(\n _mm_load_si128(ai + i),\n _mm_load_si128(bi + i));\n __m128i eq1 = _mm_cmpeq_epi8(\n _mm_load_si128(ai + i + 1),\n _mm_load_si128(bi + i + 1));\n __m128i eq = _mm_and_si128(eq0, eq1);\n\n int mask = _mm_movemask_epi8(eq);\n if (mask != 0xFFFF)\n return false;\n }\n\n for ( ; i < sizeof(T) / 16; i++) {\n __m128i eq = _mm_cmpeq_epi8(\n _mm_load_si128(ai + i),\n _mm_load_si128(bi + i));\n\n int mask = _mm_movemask_epi8(eq);\n if (mask != 0xFFFF)\n return false;\n }\n\n return true;\n #else\n return !std::memcmp(a, b, sizeof(T));\n #endif\n }\n\n template \n class bitset {\n static constexpr size_t Dwords = align(Bits, 32) / 32;\n public:\n\n constexpr bitset()\n : m_dwords() {\n\n }\n\n constexpr bool get(uint32_t idx) const {\n uint32_t dword = 0;\n uint32_t bit = idx;\n\n // Compiler doesn't remove this otherwise.\n if constexpr (Dwords > 1) {\n dword = idx / 32;\n bit = idx % 32;\n }\n\n return m_dwords[dword] & (1u << bit);\n }\n\n constexpr void set(uint32_t idx, bool value) {\n uint32_t dword = 0;\n uint32_t bit = idx;\n\n // Compiler doesn't remove this otherwise.\n if constexpr (Dwords > 1) {\n dword = idx / 32;\n bit = idx % 32;\n }\n\n if (value)\n m_dwords[dword] |= 1u << bit;\n else\n m_dwords[dword] &= ~(1u << bit);\n }\n\n constexpr bool exchange(uint32_t idx, bool value) {\n bool oldValue = get(idx);\n set(idx, value);\n return oldValue;\n }\n\n constexpr void flip(uint32_t idx) {\n uint32_t dword = 0;\n uint32_t bit = idx;\n\n // Compiler doesn't remove this otherwise.\n if constexpr (Dwords > 1) {\n dword = idx / 32;\n bit = idx % 32;\n }\n\n m_dwords[dword] ^= 1u << bit;\n }\n\n constexpr void setAll() {\n if constexpr (Bits % 32 == 0) {\n for (size_t i = 0; i < Dwords; i++)\n m_dwords[i] = std::numeric_limits::max();\n }\n else {\n for (size_t i = 0; i < Dwords - 1; i++)\n m_dwords[i] = std::numeric_limits::max();\n\n m_dwords[Dwords - 1] = (1u << (Bits % 32)) - 1;\n }\n }\n\n constexpr void clearAll() {\n for (size_t i = 0; i < Dwords; i++)\n m_dwords[i] = 0;\n }\n\n constexpr bool any() const {\n for (size_t i = 0; i < Dwords; i++) {\n if (m_dwords[i] != 0)\n return true;\n }\n\n return false;\n }\n\n constexpr uint32_t& dword(uint32_t idx) {\n return m_dwords[idx];\n }\n\n constexpr size_t bitCount() {\n return Bits;\n }\n\n constexpr size_t dwordCount() {\n return Dwords;\n }\n\n constexpr bool operator [] (uint32_t idx) const {\n return get(idx);\n }\n\n constexpr void setN(uint32_t bits) {\n uint32_t fullDwords = bits / 32;\n uint32_t offset = bits % 32;\n\n for (size_t i = 0; i < fullDwords; i++)\n m_dwords[i] = std::numeric_limits::max();\n \n if (offset > 0)\n m_dwords[fullDwords] = (1u << offset) - 1;\n }\n\n private:\n\n uint32_t m_dwords[Dwords];\n\n };\n\n class bitvector {\n public:\n\n bool get(uint32_t idx) const {\n uint32_t dword = idx / 32;\n uint32_t bit = idx % 32;\n\n return m_dwords[dword] & (1u << bit);\n }\n\n void ensureSize(uint32_t bitCount) {\n uint32_t dword = bitCount / 32;\n if (unlikely(dword >= m_dwords.size())) {\n m_dwords.resize(dword + 1);\n }\n m_bitCount = std::max(m_bitCount, bitCount);\n }\n\n void set(uint32_t idx, bool value) {\n ensureSize(idx + 1);\n\n uint32_t dword = 0;\n uint32_t bit = idx;\n\n if (value)\n m_dwords[dword] |= 1u << bit;\n else\n m_dwords[dword] &= ~(1u << bit);\n }\n\n bool exchange(uint32_t idx, bool value) {\n ensureSize(idx + 1);\n\n bool oldValue = get(idx);\n set(idx, value);\n return oldValue;\n }\n\n void flip(uint32_t idx) {\n ensureSize(idx + 1);\n\n uint32_t dword = idx / 32;\n uint32_t bit = idx % 32;\n\n m_dwords[dword] ^= 1u << bit;\n }\n\n void setAll() {\n if (m_bitCount % 32 == 0) {\n for (size_t i = 0; i < m_dwords.size(); i++)\n m_dwords[i] = std::numeric_limits::max();\n }\n else {\n for (size_t i = 0; i < m_dwords.size() - 1; i++)\n m_dwords[i] = std::numeric_limits::max();\n\n m_dwords[m_dwords.size() - 1] = (1u << (m_bitCount % 32)) - 1;\n }\n }\n\n void clearAll() {\n for (size_t i = 0; i < m_dwords.size(); i++)\n m_dwords[i] = 0;\n }\n\n bool any() const {\n for (size_t i = 0; i < m_dwords.size(); i++) {\n if (m_dwords[i] != 0)\n return true;\n }\n\n return false;\n }\n\n uint32_t& dword(uint32_t idx) {\n return m_dwords[idx];\n }\n\n size_t bitCount() const {\n return m_bitCount;\n }\n\n size_t dwordCount() const {\n return m_dwords.size();\n }\n\n bool operator [] (uint32_t idx) const {\n return get(idx);\n }\n\n void setN(uint32_t bits) {\n ensureSize(bits);\n\n uint32_t fullDwords = bits / 32;\n uint32_t offset = bits % 32;\n\n for (size_t i = 0; i < fullDwords; i++)\n m_dwords[i] = std::numeric_limits::max();\n\n if (offset > 0)\n m_dwords[fullDwords] = (1u << offset) - 1;\n }\n\n private:\n\n std::vector m_dwords;\n uint32_t m_bitCount = 0;\n\n };\n\n template\n class BitMask {\n\n public:\n\n class iterator {\n public:\n using iterator_category = std::input_iterator_tag;\n using value_type = T;\n using difference_type = T;\n using pointer = const T*;\n using reference = T;\n\n explicit iterator(T flags)\n : m_mask(flags) { }\n\n iterator& operator ++ () {\n m_mask &= m_mask - 1;\n return *this;\n }\n\n iterator operator ++ (int) {\n iterator retval = *this;\n m_mask &= m_mask - 1;\n return retval;\n }\n\n T operator * () const {\n return bsf(m_mask);\n }\n\n bool operator == (iterator other) const { return m_mask == other.m_mask; }\n bool operator != (iterator other) const { return m_mask != other.m_mask; }\n\n private:\n\n T m_mask;\n\n };\n\n BitMask()\n : m_mask(0) { }\n\n explicit BitMask(T n)\n : m_mask(n) { }\n\n iterator begin() {\n return iterator(m_mask);\n }\n\n iterator end() {\n return iterator(0);\n }\n\n private:\n\n T m_mask;\n\n };\n\n\n /**\n * \\brief Encodes float as fixed point\n *\n * Rounds away from zero. If this is not suitable for\n * certain use cases, implement round to nearest even.\n * \\tparam T Integer type, may be signed\n * \\tparam I Integer bits\n * \\tparam F Fractional bits\n * \\param n Float to encode\n * \\returns Encoded fixed-point value\n */\n template\n T encodeFixed(float n) {\n if (n != n)\n return 0u;\n\n n *= float(1u << F);\n\n if constexpr (std::is_signed_v) {\n n = std::max(n, -float(1u << (I + F - 1u)));\n n = std::min(n, float(1u << (I + F - 1u)) - 1.0f);\n n += n < 0.0f ? -0.5f : 0.5f;\n } else {\n n = std::max(n, 0.0f);\n n = std::min(n, float(1u << (I + F)) - 1.0f);\n n += 0.5f;\n }\n\n T result = T(n);\n\n if constexpr (std::is_signed_v)\n result &= ((T(1u) << (I + F)) - 1u);\n\n return result;\n }\n\n\n /**\n * \\brief Decodes fixed-point integer to float\n *\n * \\tparam T Integer type, may be signed\n * \\tparam I Integer bits\n * \\tparam F Fractional bits\n * \\param n Number to decode\n * \\returns Decoded number\n */\n template\n float decodeFixed(T n) {\n // Sign-extend as necessary\n if constexpr (std::is_signed_v)\n n -= (n & (T(1u) << (I + F - 1u))) << 1u;\n\n return float(n) / float(1u << F);\n }\n\n\n /**\n * \\brief Inserts one null bit after each bit\n */\n inline uint32_t split2(uint32_t c) {\n c = (c ^ (c << 8u)) & 0x00ff00ffu;\n c = (c ^ (c << 4u)) & 0x0f0f0f0fu;\n c = (c ^ (c << 2u)) & 0x33333333u;\n c = (c ^ (c << 1u)) & 0x55555555u;\n return c;\n }\n\n\n /**\n * \\brief Inserts two null bits after each bit\n */\n inline uint64_t split3(uint64_t c) {\n c = (c | c << 32u) & 0x001f00000000ffffull;\n c = (c | c << 16u) & 0x001f0000ff0000ffull;\n c = (c | c << 8u) & 0x100f00f00f00f00full;\n c = (c | c << 4u) & 0x10c30c30c30c30c3ull;\n c = (c | c << 2u) & 0x1249249249249249ull;\n return c;\n }\n\n\n /**\n * \\brief Interleaves bits from two integers\n *\n * Both numbers must fit into 16 bits.\n * \\param [in] x X coordinate\n * \\param [in] y Y coordinate\n * \\returns Morton code of x and y\n */\n inline uint32_t interleave(uint16_t x, uint16_t y) {\n return split2(x) | (split2(y) << 1u);\n }\n\n\n /**\n * \\brief Interleaves bits from three integers\n *\n * All three numbers must fit into 16 bits.\n */\n inline uint64_t interleave(uint16_t x, uint16_t y, uint16_t z) {\n return split3(x) | (split3(y) << 1u) | (split3(z) << 2u);\n }\n\n\n /**\n * \\brief 48-bit integer storage type\n */\n struct uint48_t {\n explicit uint48_t(uint64_t n)\n : a(uint16_t(n)), b(uint16_t(n >> 16)), c(uint16_t(n >> 32)) { }\n\n uint16_t a;\n uint16_t b;\n uint16_t c;\n\n explicit operator uint64_t () const {\n // GCC generates worse code if we promote to uint64 directly\n uint32_t lo = uint32_t(a) | (uint32_t(b) << 16);\n return uint64_t(lo) | (uint64_t(c) << 32);\n }\n };\n\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/spirv/spirv_module.h", "class generates {\n public:\n explicit SpirvModule(uint32_t version) {\n this->instImportGlsl450();\n }\n ~SpirvModule() {\n \n }\n SpirvCodeBuffer compile() {\n SpirvCodeBuffer result;\n result.putHeader(m_version, m_id);\n result.append(m_capabilities);\n result.append(m_extensions);\n result.append(m_instExt);\n result.append(m_memoryModel);\n result.append(m_entryPoints);\n result.append(m_execModeInfo);\n result.append(m_debugNames);\n result.append(m_annotations);\n result.append(m_typeConstDefs);\n result.append(m_variables);\n\n // Perform some crude dead code elimination. In some cases, our compilers\n // may emit invalid code, such as an unreachable block branching to a loop's\n // continue block, but those cases cannot be reasonably detected up-front.\n std::unordered_set reachableBlocks;\n std::unordered_set mergeBlocks;\n\n classifyBlocks(reachableBlocks, mergeBlocks);\n\n bool reachable = true;\n\n for (auto ins : m_code) {\n if (ins.opCode() == spv::OpFunctionEnd) {\n reachable = true;\n result.append(ins);\n } else if (ins.opCode() == spv::OpLabel) {\n uint32_t labelId = ins.arg(1);\n\n if ((reachable = reachableBlocks.find(labelId) != reachableBlocks.end())) {\n result.append(ins);\n } else if (mergeBlocks.find(labelId) != mergeBlocks.end()) {\n result.append(ins);\n result.putIns(spv::OpUnreachable, 1);\n }\n } else if (reachable) {\n result.append(ins);\n }\n }\n\n return result;\n }\n uint32_t allocateId() {\n return m_id++;\n }\n bool hasCapability(\n spv::Capability capability) {\n for (auto ins : m_capabilities) {\n if (ins.opCode() == spv::OpCapability && ins.arg(1) == capability)\n return true;\n }\n\n return false;\n }\n void enableCapability(\n spv::Capability capability) {\n // Scan the generated instructions to check\n // whether we already enabled the capability.\n if (!hasCapability(capability)) {\n m_capabilities.putIns (spv::OpCapability, 2);\n m_capabilities.putWord(capability);\n }\n }\n void enableExtension(\n const char* extensionName) {\n m_extensions.putIns (spv::OpExtension, 1 + m_extensions.strLen(extensionName));\n m_extensions.putStr (extensionName);\n }\n void addEntryPoint(\n uint32_t entryPointId,\n spv::ExecutionModel executionModel,\n const char* name) {\n m_entryPoints.putIns (spv::OpEntryPoint, 3 + m_entryPoints.strLen(name) + m_interfaceVars.size());\n m_entryPoints.putWord (executionModel);\n m_entryPoints.putWord (entryPointId);\n m_entryPoints.putStr (name);\n \n for (uint32_t varId : m_interfaceVars)\n m_entryPoints.putWord(varId);\n }\n void setMemoryModel(\n spv::AddressingModel addressModel,\n spv::MemoryModel memoryModel) {\n m_memoryModel.putIns (spv::OpMemoryModel, 3);\n m_memoryModel.putWord (addressModel);\n m_memoryModel.putWord (memoryModel);\n }\n void setExecutionMode(\n uint32_t entryPointId,\n spv::ExecutionMode executionMode) {\n m_execModeInfo.putIns (spv::OpExecutionMode, 3 + argCount);\n m_execModeInfo.putWord(entryPointId);\n m_execModeInfo.putWord(executionMode);\n\n for (uint32_t i = 0; i < argCount; i++)\n m_execModeInfo.putWord(args[i]);\n }\n void setExecutionMode(\n uint32_t entryPointId,\n spv::ExecutionMode executionMode,\n uint32_t argCount,\n const uint32_t* args) {\n m_execModeInfo.putIns (spv::OpExecutionMode, 3 + argCount);\n m_execModeInfo.putWord(entryPointId);\n m_execModeInfo.putWord(executionMode);\n\n for (uint32_t i = 0; i < argCount; i++)\n m_execModeInfo.putWord(args[i]);\n }\n void setInvocations(\n uint32_t entryPointId,\n uint32_t invocations) {\n m_execModeInfo.putIns (spv::OpExecutionMode, 4);\n m_execModeInfo.putWord (entryPointId);\n m_execModeInfo.putWord (spv::ExecutionModeInvocations);\n m_execModeInfo.putInt32(invocations);\n }\n void setLocalSize(\n uint32_t entryPointId,\n uint32_t x,\n uint32_t y,\n uint32_t z) {\n m_execModeInfo.putIns (spv::OpExecutionMode, 6);\n m_execModeInfo.putWord (entryPointId);\n m_execModeInfo.putWord (spv::ExecutionModeLocalSize);\n m_execModeInfo.putInt32(x);\n m_execModeInfo.putInt32(y);\n m_execModeInfo.putInt32(z);\n }\n void setOutputVertices(\n uint32_t entryPointId,\n uint32_t vertexCount) {\n m_execModeInfo.putIns (spv::OpExecutionMode, 4);\n m_execModeInfo.putWord(entryPointId);\n m_execModeInfo.putWord(spv::ExecutionModeOutputVertices);\n m_execModeInfo.putWord(vertexCount);\n }\n uint32_t addDebugString(\n const char* string) {\n uint32_t resultId = this->allocateId();\n \n m_debugNames.putIns (spv::OpString,\n 2 + m_debugNames.strLen(string));\n m_debugNames.putWord(resultId);\n m_debugNames.putStr (string);\n return resultId;\n }\n void setDebugSource(\n spv::SourceLanguage language,\n uint32_t version,\n uint32_t file,\n const char* source) {\n uint32_t strLen = source != nullptr\n ? m_debugNames.strLen(source) : 0;\n \n m_debugNames.putIns (spv::OpSource, 4 + strLen);\n m_debugNames.putWord(language);\n m_debugNames.putWord(version);\n m_debugNames.putWord(file);\n \n if (source != nullptr)\n m_debugNames.putStr(source);\n }\n void setDebugName(\n uint32_t expressionId,\n const char* debugName) {\n m_debugNames.putIns (spv::OpName, 2 + m_debugNames.strLen(debugName));\n m_debugNames.putWord(expressionId);\n m_debugNames.putStr (debugName);\n }\n void setDebugMemberName(\n uint32_t structId,\n uint32_t memberId,\n const char* debugName) {\n m_debugNames.putIns (spv::OpMemberName, 3 + m_debugNames.strLen(debugName));\n m_debugNames.putWord(structId);\n m_debugNames.putWord(memberId);\n m_debugNames.putStr (debugName);\n }\n uint32_t constBool(\n bool v) {\n return this->defConst(v\n ? spv::OpConstantTrue\n : spv::OpConstantFalse,\n this->defBoolType(),\n 0, nullptr);\n }\n uint32_t consti32(\n int32_t v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defIntType(32, 1),\n data.size(),\n data.data());\n }\n uint32_t consti64(\n int64_t v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defIntType(64, 1),\n data.size(),\n data.data());\n }\n uint32_t constu32(\n uint32_t v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defIntType(32, 0),\n data.size(),\n data.data());\n }\n uint32_t constu64(\n uint64_t v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defIntType(64, 0),\n data.size(),\n data.data());\n }\n uint32_t constf32(\n float v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defFloatType(32),\n data.size(),\n data.data());\n }\n uint32_t constf64(\n double v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defFloatType(64),\n data.size(),\n data.data());\n }\n uint32_t constvec4i32(\n int32_t x,\n int32_t y,\n int32_t z,\n int32_t w) {\n std::array args = {{\n this->consti32(x), this->consti32(y),\n this->consti32(z), this->consti32(w),\n }};\n \n uint32_t scalarTypeId = this->defIntType(32, 1);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 4);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constvec4b32(\n bool x,\n bool y,\n bool z,\n bool w) {\n std::array args = {{\n this->constBool(x), this->constBool(y),\n this->constBool(z), this->constBool(w),\n }};\n \n uint32_t scalarTypeId = this->defBoolType();\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 4);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constvec4u32(\n uint32_t x,\n uint32_t y,\n uint32_t z,\n uint32_t w) {\n std::array args = {{\n this->constu32(x), this->constu32(y),\n this->constu32(z), this->constu32(w),\n }};\n \n uint32_t scalarTypeId = this->defIntType(32, 0);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 4);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constvec2f32(\n float x,\n float y) {\n std::array args = {{\n this->constf32(x), this->constf32(y),\n }};\n \n uint32_t scalarTypeId = this->defFloatType(32);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 2);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constvec3f32(\n float x,\n float y,\n float z) {\n std::array args = {{\n this->constf32(x), this->constf32(y),\n this->constf32(z),\n }};\n \n uint32_t scalarTypeId = this->defFloatType(32);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 3);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constvec4f32(\n float x,\n float y,\n float z,\n float w) {\n std::array args = {{\n this->constf32(x), this->constf32(y),\n this->constf32(z), this->constf32(w),\n }};\n \n uint32_t scalarTypeId = this->defFloatType(32);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 4);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constfReplicant(\n float replicant,\n uint32_t count) {\n uint32_t value = this->constf32(replicant);\n\n std::array args = { value, value, value, value };\n\n // Can't make a scalar composite.\n if (count == 1)\n return args[0];\n \n uint32_t scalarTypeId = this->defFloatType(32);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, count);\n \n return this->constComposite(vectorTypeId, count, args.data());\n }\n uint32_t constbReplicant(\n bool replicant,\n uint32_t count) {\n uint32_t value = this->constBool(replicant);\n\n std::array args = { value, value, value, value };\n\n // Can't make a scalar composite.\n if (count == 1)\n return args[0];\n \n uint32_t scalarTypeId = this->defBoolType();\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, count);\n \n return this->constComposite(vectorTypeId, count, args.data());\n }\n uint32_t constiReplicant(\n int32_t replicant,\n uint32_t count) {\n uint32_t value = this->consti32(replicant);\n\n std::array args = { value, value, value, value };\n\n // Can't make a scalar composite.\n if (count == 1)\n return args[0];\n \n uint32_t scalarTypeId = this->defIntType(32, 1);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, count);\n \n return this->constComposite(vectorTypeId, count, args.data());\n }\n uint32_t constuReplicant(\n int32_t replicant,\n uint32_t count) {\n uint32_t value = this->constu32(replicant);\n\n std::array args = { value, value, value, value };\n\n // Can't make a scalar composite.\n if (count == 1)\n return args[0];\n \n uint32_t scalarTypeId = this->defIntType(32, 0);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, count);\n \n return this->constComposite(vectorTypeId, count, args.data());\n }\n uint32_t constComposite(\n uint32_t typeId,\n uint32_t constCount,\n const uint32_t* constIds) {\n return this->defConst(\n spv::OpConstantComposite,\n typeId, constCount, constIds);\n }\n uint32_t constUndef(\n uint32_t typeId) {\n return this->defConst(spv::OpUndef,\n typeId, 0, nullptr);\n }\n uint32_t constNull(\n uint32_t typeId) {\n return this->defConst(spv::OpConstantNull,\n typeId, 0, nullptr);\n }\n uint32_t lateConst32(\n uint32_t typeId) {\n uint32_t resultId = this->allocateId();\n m_lateConsts.insert(resultId);\n\n m_typeConstDefs.putIns (spv::OpConstant, 4);\n m_typeConstDefs.putWord(typeId);\n m_typeConstDefs.putWord(resultId);\n m_typeConstDefs.putWord(0);\n return resultId;\n }\n void setLateConst(\n uint32_t constId,\n const uint32_t* argIds) {\n for (auto ins : m_typeConstDefs) {\n if (ins.opCode() != spv::OpConstant\n && ins.opCode() != spv::OpConstantComposite)\n continue;\n \n if (ins.arg(2) != constId)\n continue;\n\n for (uint32_t i = 3; i < ins.length(); i++)\n ins.setArg(i, argIds[i - 3]);\n\n return;\n }\n }\n uint32_t specConstBool(\n bool v) {\n uint32_t typeId = this->defBoolType();\n uint32_t resultId = this->allocateId();\n \n const spv::Op op = v\n ? spv::OpSpecConstantTrue\n : spv::OpSpecConstantFalse;\n \n m_typeConstDefs.putIns (op, 3);\n m_typeConstDefs.putWord (typeId);\n m_typeConstDefs.putWord (resultId);\n return resultId;\n }\n uint32_t specConst32(\n uint32_t typeId,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_typeConstDefs.putIns (spv::OpSpecConstant, 4);\n m_typeConstDefs.putWord (typeId);\n m_typeConstDefs.putWord (resultId);\n m_typeConstDefs.putWord (value);\n return resultId;\n }\n void decorate(\n uint32_t object,\n spv::Decoration decoration) {\n m_annotations.putIns (spv::OpDecorate, 3);\n m_annotations.putWord (object);\n m_annotations.putWord (decoration);\n }\n void decorateArrayStride(\n uint32_t object,\n uint32_t stride) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationArrayStride);\n m_annotations.putInt32(stride);\n }\n void decorateBinding(\n uint32_t object,\n uint32_t binding) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationBinding);\n m_annotations.putInt32(binding);\n }\n void decorateBlock(\n uint32_t object) {\n m_annotations.putIns (spv::OpDecorate, 3);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationBlock);\n }\n void decorateBuiltIn(\n uint32_t object,\n spv::BuiltIn builtIn) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationBuiltIn);\n m_annotations.putWord (builtIn);\n }\n void decorateComponent(\n uint32_t object,\n uint32_t location) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationComponent);\n m_annotations.putInt32(location);\n }\n void decorateDescriptorSet(\n uint32_t object,\n uint32_t set) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationDescriptorSet);\n m_annotations.putInt32(set);\n }\n void decorateIndex(\n uint32_t object,\n uint32_t index) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationIndex);\n m_annotations.putInt32(index);\n }\n void decorateLocation(\n uint32_t object,\n uint32_t location) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationLocation);\n m_annotations.putInt32(location);\n }\n void decorateSpecId(\n uint32_t object,\n uint32_t specId) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationSpecId);\n m_annotations.putInt32(specId);\n }\n void decorateXfb(\n uint32_t object,\n uint32_t streamId,\n uint32_t bufferId,\n uint32_t offset,\n uint32_t stride) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationStream);\n m_annotations.putInt32(streamId);\n\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationXfbBuffer);\n m_annotations.putInt32(bufferId);\n\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationXfbStride);\n m_annotations.putInt32(stride);\n\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationOffset);\n m_annotations.putInt32(offset);\n }\n void memberDecorateBuiltIn(\n uint32_t structId,\n uint32_t memberId,\n spv::BuiltIn builtIn) {\n m_annotations.putIns (spv::OpMemberDecorate, 5);\n m_annotations.putWord (structId);\n m_annotations.putWord (memberId);\n m_annotations.putWord (spv::DecorationBuiltIn);\n m_annotations.putWord (builtIn);\n }\n void memberDecorate(\n uint32_t structId,\n uint32_t memberId,\n spv::Decoration decoration) {\n m_annotations.putIns (spv::OpMemberDecorate, 4);\n m_annotations.putWord (structId);\n m_annotations.putWord (memberId);\n m_annotations.putWord (decoration);\n }\n void memberDecorateMatrixStride(\n uint32_t structId,\n uint32_t memberId,\n uint32_t stride) {\n m_annotations.putIns (spv::OpMemberDecorate, 5);\n m_annotations.putWord (structId);\n m_annotations.putWord (memberId);\n m_annotations.putWord (spv::DecorationMatrixStride);\n m_annotations.putWord (stride);\n }\n void memberDecorateOffset(\n uint32_t structId,\n uint32_t memberId,\n uint32_t offset) {\n m_annotations.putIns (spv::OpMemberDecorate, 5);\n m_annotations.putWord (structId);\n m_annotations.putWord (memberId);\n m_annotations.putWord (spv::DecorationOffset);\n m_annotations.putWord (offset);\n }\n uint32_t defVoidType() {\n return this->defType(spv::OpTypeVoid, 0, nullptr);\n }\n uint32_t defBoolType() {\n return this->defType(spv::OpTypeBool, 0, nullptr);\n }\n uint32_t defIntType(\n uint32_t width,\n uint32_t isSigned) {\n std::array args = {{ width, isSigned }};\n return this->defType(spv::OpTypeInt,\n args.size(), args.data());\n }\n uint32_t defFloatType(\n uint32_t width) {\n std::array args = {{ width }};\n return this->defType(spv::OpTypeFloat,\n args.size(), args.data());\n }\n uint32_t defVectorType(\n uint32_t elementType,\n uint32_t elementCount) {\n std::array args =\n {{ elementType, elementCount }};\n \n return this->defType(spv::OpTypeVector,\n args.size(), args.data());\n }\n uint32_t defMatrixType(\n uint32_t columnType,\n uint32_t columnCount) {\n std::array args =\n {{ columnType, columnCount }};\n \n return this->defType(spv::OpTypeMatrix,\n args.size(), args.data());\n }\n uint32_t defArrayType(\n uint32_t typeId,\n uint32_t length) {\n std::array args = {{ typeId, length }};\n \n return this->defType(spv::OpTypeArray,\n args.size(), args.data());\n }\n uint32_t defArrayTypeUnique(\n uint32_t typeId,\n uint32_t length) {\n uint32_t resultId = this->allocateId();\n \n m_typeConstDefs.putIns (spv::OpTypeArray, 4);\n m_typeConstDefs.putWord(resultId);\n m_typeConstDefs.putWord(typeId);\n m_typeConstDefs.putWord(length);\n return resultId;\n }\n uint32_t defRuntimeArrayType(\n uint32_t typeId) {\n std::array args = { typeId };\n \n return this->defType(spv::OpTypeRuntimeArray,\n args.size(), args.data());\n }\n uint32_t defRuntimeArrayTypeUnique(\n uint32_t typeId) {\n uint32_t resultId = this->allocateId();\n \n m_typeConstDefs.putIns (spv::OpTypeRuntimeArray, 3);\n m_typeConstDefs.putWord(resultId);\n m_typeConstDefs.putWord(typeId);\n return resultId;\n }\n uint32_t defFunctionType(\n uint32_t returnType,\n uint32_t argCount,\n const uint32_t* argTypes) {\n std::vector args;\n args.push_back(returnType);\n \n for (uint32_t i = 0; i < argCount; i++)\n args.push_back(argTypes[i]);\n \n return this->defType(spv::OpTypeFunction,\n args.size(), args.data());\n }\n uint32_t defStructType(\n uint32_t memberCount,\n const uint32_t* memberTypes) {\n return this->defType(spv::OpTypeStruct,\n memberCount, memberTypes);\n }\n uint32_t defStructTypeUnique(\n uint32_t memberCount,\n const uint32_t* memberTypes) {\n uint32_t resultId = this->allocateId();\n \n m_typeConstDefs.putIns (spv::OpTypeStruct, 2 + memberCount);\n m_typeConstDefs.putWord(resultId);\n \n for (uint32_t i = 0; i < memberCount; i++)\n m_typeConstDefs.putWord(memberTypes[i]);\n return resultId;\n }\n uint32_t defPointerType(\n uint32_t variableType,\n spv::StorageClass storageClass) {\n std::array args = {{\n static_cast(storageClass),\n variableType,\n }};\n \n return this->defType(spv::OpTypePointer,\n args.size(), args.data());\n }\n uint32_t defSamplerType() {\n return this->defType(spv::OpTypeSampler, 0, nullptr);\n }\n uint32_t defImageType(\n uint32_t sampledType,\n spv::Dim dimensionality,\n uint32_t depth,\n uint32_t arrayed,\n uint32_t multisample,\n uint32_t sampled,\n spv::ImageFormat format) {\n std::array args = {{\n sampledType,\n static_cast(dimensionality),\n depth, arrayed,\n multisample,\n sampled,\n static_cast(format)\n }};\n \n return this->defType(spv::OpTypeImage,\n args.size(), args.data());\n }\n uint32_t defSampledImageType(\n uint32_t imageType) {\n return this->defType(spv::OpTypeSampledImage, 1, &imageType);\n }\n uint32_t newVar(\n uint32_t pointerType,\n spv::StorageClass storageClass) {\n uint32_t resultId = this->allocateId();\n \n if (isInterfaceVar(storageClass))\n m_interfaceVars.push_back(resultId);\n\n auto& code = storageClass != spv::StorageClassFunction\n ? m_variables : m_code;\n\n code.putIns (spv::OpVariable, 4);\n code.putWord (pointerType);\n code.putWord (resultId);\n code.putWord (storageClass);\n return resultId;\n }\n uint32_t newVarInit(\n uint32_t pointerType,\n spv::StorageClass storageClass,\n uint32_t initialValue) {\n uint32_t resultId = this->allocateId();\n \n if (isInterfaceVar(storageClass))\n m_interfaceVars.push_back(resultId);\n\n auto& code = storageClass != spv::StorageClassFunction\n ? m_variables : m_code;\n \n code.putIns (spv::OpVariable, 5);\n code.putWord (pointerType);\n code.putWord (resultId);\n code.putWord (storageClass);\n code.putWord (initialValue);\n return resultId;\n }\n void functionBegin(\n uint32_t returnType,\n uint32_t functionId,\n uint32_t functionType,\n spv::FunctionControlMask functionControl) {\n m_code.putIns (spv::OpFunction, 5);\n m_code.putWord(returnType);\n m_code.putWord(functionId);\n m_code.putWord(functionControl);\n m_code.putWord(functionType);\n }\n uint32_t functionParameter(\n uint32_t parameterType) {\n uint32_t parameterId = this->allocateId();\n \n m_code.putIns (spv::OpFunctionParameter, 3);\n m_code.putWord(parameterType);\n m_code.putWord(parameterId);\n return parameterId;\n }\n void functionEnd() {\n m_code.putIns (spv::OpFunctionEnd, 1);\n }\n uint32_t opAccessChain(\n uint32_t resultType,\n uint32_t composite,\n uint32_t indexCount,\n const uint32_t* indexArray) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAccessChain, 4 + indexCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(composite);\n \n for (uint32_t i = 0; i < indexCount; i++)\n m_code.putInt32(indexArray[i]);\n return resultId;\n }\n uint32_t opArrayLength(\n uint32_t resultType,\n uint32_t structure,\n uint32_t memberId) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpArrayLength, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(structure);\n m_code.putWord(memberId);\n return resultId;\n }\n uint32_t opAny(\n uint32_t resultType,\n uint32_t vector) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAny, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector);\n return resultId;\n }\n uint32_t opAll(\n uint32_t resultType,\n uint32_t vector) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAll, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector);\n return resultId;\n }\n uint32_t opAtomicLoad(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicLoad, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n return resultId;\n }\n void opAtomicStore(\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n m_code.putIns (spv::OpAtomicStore, 5);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n }\n uint32_t opAtomicExchange(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicExchange, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicCompareExchange(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t equal,\n uint32_t unequal,\n uint32_t value,\n uint32_t comparator) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicCompareExchange, 9);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(equal);\n m_code.putWord(unequal);\n m_code.putWord(value);\n m_code.putWord(comparator);\n return resultId;\n }\n uint32_t opAtomicIIncrement(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicIIncrement, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n return resultId;\n }\n uint32_t opAtomicIDecrement(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicIDecrement, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n return resultId;\n }\n uint32_t opAtomicIAdd(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicIAdd, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicISub(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicISub, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicSMin(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicSMin, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicSMax(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicSMax, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicUMin(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicUMin, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicUMax(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicUMax, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicAnd(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicAnd, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicOr(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicOr, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicXor(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicXor, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opBitcast(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitcast, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opBitCount(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitCount, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opBitReverse(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitReverse, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFindILsb(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FindILsb);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFindUMsb(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FindUMsb);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFindSMsb(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FindSMsb);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opBitFieldInsert(\n uint32_t resultType,\n uint32_t base,\n uint32_t insert,\n uint32_t offset,\n uint32_t count) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitFieldInsert, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(insert);\n m_code.putWord(offset);\n m_code.putWord(count);\n return resultId;\n }\n uint32_t opBitFieldSExtract(\n uint32_t resultType,\n uint32_t base,\n uint32_t offset,\n uint32_t count) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitFieldSExtract, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(offset);\n m_code.putWord(count);\n return resultId;\n }\n uint32_t opBitFieldUExtract(\n uint32_t resultType,\n uint32_t base,\n uint32_t offset,\n uint32_t count) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitFieldUExtract, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(offset);\n m_code.putWord(count);\n return resultId;\n }\n uint32_t opBitwiseAnd(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitwiseAnd, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opBitwiseOr(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitwiseOr, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opBitwiseXor(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitwiseXor, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opNot(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpNot, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opShiftLeftLogical(\n uint32_t resultType,\n uint32_t base,\n uint32_t shift) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpShiftLeftLogical, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(shift);\n return resultId;\n }\n uint32_t opShiftRightArithmetic(\n uint32_t resultType,\n uint32_t base,\n uint32_t shift) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpShiftRightArithmetic, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(shift);\n return resultId;\n }\n uint32_t opShiftRightLogical(\n uint32_t resultType,\n uint32_t base,\n uint32_t shift) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpShiftRightLogical, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(shift);\n return resultId;\n }\n uint32_t opConvertFtoS(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpConvertFToS, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opConvertFtoU(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpConvertFToU, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opConvertStoF(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpConvertSToF, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opConvertUtoF(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpConvertUToF, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opCompositeConstruct(\n uint32_t resultType,\n uint32_t valueCount,\n const uint32_t* valueArray) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpCompositeConstruct, 3 + valueCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n \n for (uint32_t i = 0; i < valueCount; i++)\n m_code.putWord(valueArray[i]);\n return resultId;\n }\n uint32_t opCompositeExtract(\n uint32_t resultType,\n uint32_t composite,\n uint32_t indexCount,\n const uint32_t* indexArray) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpCompositeExtract, 4 + indexCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(composite);\n \n for (uint32_t i = 0; i < indexCount; i++)\n m_code.putInt32(indexArray[i]);\n return resultId;\n }\n uint32_t opCompositeInsert(\n uint32_t resultType,\n uint32_t object,\n uint32_t composite,\n uint32_t indexCount,\n const uint32_t* indexArray) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpCompositeInsert, 5 + indexCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(object);\n m_code.putWord(composite);\n \n for (uint32_t i = 0; i < indexCount; i++)\n m_code.putInt32(indexArray[i]);\n return resultId;\n }\n uint32_t opDpdx(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdx, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDpdy(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdy, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDpdxCoarse(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdxCoarse, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDpdyCoarse(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdyCoarse, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDpdxFine(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdxFine, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDpdyFine(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdyFine, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opVectorExtractDynamic(\n uint32_t resultType,\n uint32_t vector,\n uint32_t index) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpVectorExtractDynamic, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector);\n m_code.putWord(index);\n return resultId;\n }\n uint32_t opVectorShuffle(\n uint32_t resultType,\n uint32_t vectorLeft,\n uint32_t vectorRight,\n uint32_t indexCount,\n const uint32_t* indexArray) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpVectorShuffle, 5 + indexCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vectorLeft);\n m_code.putWord(vectorRight);\n \n for (uint32_t i = 0; i < indexCount; i++)\n m_code.putInt32(indexArray[i]);\n return resultId;\n }\n uint32_t opSNegate(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSNegate, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFNegate(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFNegate, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opSAbs(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450SAbs);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFAbs(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FAbs);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFSign(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FSign);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFMix(\n uint32_t resultType,\n uint32_t x,\n uint32_t y,\n uint32_t a) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 8);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FMix);\n m_code.putWord(x);\n m_code.putWord(y);\n m_code.putWord(a);\n return resultId;\n }\n uint32_t opCross(\n uint32_t resultType,\n uint32_t x,\n uint32_t y) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Cross);\n m_code.putWord(x);\n m_code.putWord(y);\n return resultId;\n }\n uint32_t opIAdd(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpIAdd, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opISub(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpISub, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFAdd(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFAdd, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFSub(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFSub, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opSDiv(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSDiv, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opUDiv(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpUDiv, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opSRem(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSRem, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opUMod(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpUMod, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFDiv(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFDiv, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opIMul(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpIMul, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFMul(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFMul, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opVectorTimesScalar(\n uint32_t resultType,\n uint32_t vector,\n uint32_t scalar) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpVectorTimesScalar, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector);\n m_code.putWord(scalar);\n return resultId;\n }\n uint32_t opMatrixTimesMatrix(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpMatrixTimesMatrix, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opMatrixTimesVector(\n uint32_t resultType,\n uint32_t matrix,\n uint32_t vector) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpMatrixTimesVector, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(matrix);\n m_code.putWord(vector);\n return resultId;\n }\n uint32_t opVectorTimesMatrix(\n uint32_t resultType,\n uint32_t vector,\n uint32_t matrix) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpVectorTimesMatrix, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector);\n m_code.putWord(matrix);\n return resultId;\n }\n uint32_t opTranspose(\n uint32_t resultType,\n uint32_t matrix) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpTranspose, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(matrix);\n return resultId;\n }\n uint32_t opInverse(\n uint32_t resultType,\n uint32_t matrix) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450MatrixInverse);\n m_code.putWord(matrix);\n return resultId;\n }\n uint32_t opFFma(\n uint32_t resultType,\n uint32_t a,\n uint32_t b,\n uint32_t c) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 8);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Fma);\n m_code.putWord(a);\n m_code.putWord(b);\n m_code.putWord(c);\n return resultId;\n }\n uint32_t opFMax(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FMax);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFMin(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FMin);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opNMax(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450NMax);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opNMin(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450NMin);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opSMax(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450SMax);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opSMin(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450SMin);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opUMax(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450UMax);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opUMin(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450UMin);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFClamp(\n uint32_t resultType,\n uint32_t x,\n uint32_t minVal,\n uint32_t maxVal) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 8);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FClamp);\n m_code.putWord(x);\n m_code.putWord(minVal);\n m_code.putWord(maxVal);\n return resultId;\n }\n uint32_t opNClamp(\n uint32_t resultType,\n uint32_t x,\n uint32_t minVal,\n uint32_t maxVal) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 8);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450NClamp);\n m_code.putWord(x);\n m_code.putWord(minVal);\n m_code.putWord(maxVal);\n return resultId;\n }\n uint32_t opIEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpIEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opINotEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpINotEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opSLessThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSLessThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opSLessThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSLessThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opSGreaterThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSGreaterThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opSGreaterThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSGreaterThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opULessThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpULessThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opULessThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpULessThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opUGreaterThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpUGreaterThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opUGreaterThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpUGreaterThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFOrdEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFOrdEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFUnordNotEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFUnordNotEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFOrdLessThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFOrdLessThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFOrdLessThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFOrdLessThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFOrdGreaterThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFOrdGreaterThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFOrdGreaterThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFOrdGreaterThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opLogicalEqual(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLogicalEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opLogicalNotEqual(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLogicalNotEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opLogicalAnd(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLogicalAnd, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opLogicalOr(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLogicalOr, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opLogicalNot(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLogicalNot, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDot(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDot, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opSin(\n uint32_t resultType,\n uint32_t vector) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Sin);\n m_code.putWord(vector);\n return resultId;\n }\n uint32_t opCos(\n uint32_t resultType,\n uint32_t vector) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Cos);\n m_code.putWord(vector);\n return resultId;\n }\n uint32_t opSqrt(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Sqrt);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opInverseSqrt(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450InverseSqrt);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opNormalize(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Normalize);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opRawAccessChain(\n uint32_t resultType,\n uint32_t base,\n uint32_t stride,\n uint32_t index,\n uint32_t offset,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns (spv::OpRawAccessChainNV, operand ? 8 : 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(stride);\n m_code.putWord(index);\n m_code.putWord(offset);\n\n if (operand)\n m_code.putWord(operand);\n\n return resultId;\n }\n uint32_t opReflect(\n uint32_t resultType,\n uint32_t incident,\n uint32_t normal) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Reflect);\n m_code.putWord(incident);\n m_code.putWord(normal);\n return resultId;\n }\n uint32_t opLength(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Length);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opExp2(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Exp2);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opExp(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Exp);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opLog2(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Log2);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opPow(\n uint32_t resultType,\n uint32_t base,\n uint32_t exponent) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Pow);\n m_code.putWord(base);\n m_code.putWord(exponent);\n return resultId;\n }\n uint32_t opFract(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Fract);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opCeil(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Ceil);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFloor(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Floor);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opRound(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Round);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opRoundEven(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450RoundEven);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opTrunc(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Trunc);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFConvert(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns (spv::OpFConvert, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opPackHalf2x16(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450PackHalf2x16);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opUnpackHalf2x16(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450UnpackHalf2x16);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opSelect(\n uint32_t resultType,\n uint32_t condition,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSelect, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(condition);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opIsNan(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpIsNan, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opIsInf(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpIsInf, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFunctionCall(\n uint32_t resultType,\n uint32_t functionId,\n uint32_t argCount,\n const uint32_t* argIds) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFunctionCall, 4 + argCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(functionId);\n \n for (uint32_t i = 0; i < argCount; i++)\n m_code.putWord(argIds[i]);\n return resultId;\n }\n void opLabel(\n uint32_t labelId) {\n m_code.putIns (spv::OpLabel, 2);\n m_code.putWord(labelId);\n\n m_blockId = labelId;\n }\n uint32_t opLoad(\n uint32_t typeId,\n uint32_t pointerId) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLoad, 4 + getMemoryOperandWordCount(operands));\n m_code.putWord(typeId);\n m_code.putWord(resultId);\n m_code.putWord(pointerId);\n\n putMemoryOperands(operands);\n return resultId;\n }\n uint32_t opLoad(\n uint32_t typeId,\n uint32_t pointerId,\n const SpirvMemoryOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLoad, 4 + getMemoryOperandWordCount(operands));\n m_code.putWord(typeId);\n m_code.putWord(resultId);\n m_code.putWord(pointerId);\n\n putMemoryOperands(operands);\n return resultId;\n }\n void opStore(\n uint32_t pointerId,\n uint32_t valueId) {\n m_code.putIns (spv::OpStore, 3 + getMemoryOperandWordCount(operands));\n m_code.putWord(pointerId);\n m_code.putWord(valueId);\n\n putMemoryOperands(operands);\n }\n void opStore(\n uint32_t pointerId,\n uint32_t valueId,\n const SpirvMemoryOperands& operands) {\n m_code.putIns (spv::OpStore, 3 + getMemoryOperandWordCount(operands));\n m_code.putWord(pointerId);\n m_code.putWord(valueId);\n\n putMemoryOperands(operands);\n }\n uint32_t opInterpolateAtCentroid(\n uint32_t resultType,\n uint32_t interpolant) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450InterpolateAtCentroid);\n m_code.putWord(interpolant);\n return resultId;\n }\n uint32_t opInterpolateAtSample(\n uint32_t resultType,\n uint32_t interpolant,\n uint32_t sample) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450InterpolateAtSample);\n m_code.putWord(interpolant);\n m_code.putWord(sample);\n return resultId;\n }\n uint32_t opInterpolateAtOffset(\n uint32_t resultType,\n uint32_t interpolant,\n uint32_t offset) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450InterpolateAtOffset);\n m_code.putWord(interpolant);\n m_code.putWord(offset);\n return resultId;\n }\n uint32_t opImage(\n uint32_t resultType,\n uint32_t sampledImage) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpImage, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n return resultId;\n }\n uint32_t opImageRead(\n uint32_t resultType,\n uint32_t image,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseRead\n : spv::OpImageRead;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n void opImageWrite(\n uint32_t image,\n uint32_t coordinates,\n uint32_t texel,\n const SpirvImageOperands& operands) {\n m_code.putIns (spv::OpImageWrite,\n 4 + getImageOperandWordCount(operands));\n m_code.putWord(image);\n m_code.putWord(coordinates);\n m_code.putWord(texel);\n \n putImageOperands(operands);\n }\n uint32_t opImageSparseTexelsResident(\n uint32_t resultType,\n uint32_t residentCode) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns (spv::OpImageSparseTexelsResident, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(residentCode);\n\n return resultId;\n }\n uint32_t opImageTexelPointer(\n uint32_t resultType,\n uint32_t image,\n uint32_t coordinates,\n uint32_t sample) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageTexelPointer, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n m_code.putWord(coordinates);\n m_code.putWord(sample);\n return resultId;\n }\n uint32_t opSampledImage(\n uint32_t resultType,\n uint32_t image,\n uint32_t sampler) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSampledImage, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n m_code.putWord(sampler);\n return resultId;\n }\n uint32_t opImageQuerySizeLod(\n uint32_t resultType,\n uint32_t image,\n uint32_t lod) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageQuerySizeLod, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n m_code.putWord(lod);\n return resultId;\n }\n uint32_t opImageQuerySize(\n uint32_t resultType,\n uint32_t image) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageQuerySize, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n return resultId;\n }\n uint32_t opImageQueryLevels(\n uint32_t resultType,\n uint32_t image) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageQueryLevels, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n return resultId;\n }\n uint32_t opImageQueryLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageQueryLod, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n return resultId;\n }\n uint32_t opImageQuerySamples(\n uint32_t resultType,\n uint32_t image) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageQuerySamples, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n return resultId;\n }\n uint32_t opImageFetch(\n uint32_t resultType,\n uint32_t image,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n\n spv::Op op = operands.sparse\n ? spv::OpImageSparseFetch\n : spv::OpImageFetch;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageGather(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t component,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseGather\n : spv::OpImageGather;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(component);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageDrefGather(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t reference,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseDrefGather\n : spv::OpImageDrefGather;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(reference);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleImplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleImplicitLod\n : spv::OpImageSampleImplicitLod;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleExplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleExplicitLod\n : spv::OpImageSampleExplicitLod;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleProjImplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleProjImplicitLod\n : spv::OpImageSampleProjImplicitLod;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleProjExplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleProjExplicitLod\n : spv::OpImageSampleProjExplicitLod;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleDrefImplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t reference,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleDrefImplicitLod\n : spv::OpImageSampleDrefImplicitLod;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(reference);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleDrefExplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t reference,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleDrefExplicitLod\n : spv::OpImageSampleDrefExplicitLod;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(reference);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleProjDrefImplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t reference,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleProjDrefImplicitLod\n : spv::OpImageSampleProjDrefImplicitLod;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(reference);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleProjDrefExplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t reference,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleProjDrefExplicitLod\n : spv::OpImageSampleProjDrefExplicitLod;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(reference);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opGroupNonUniformBallot(\n uint32_t resultType,\n uint32_t execution,\n uint32_t predicate) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpGroupNonUniformBallot, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(execution);\n m_code.putWord(predicate);\n return resultId;\n }\n uint32_t opGroupNonUniformBallotBitCount(\n uint32_t resultType,\n uint32_t execution,\n uint32_t operation,\n uint32_t ballot) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpGroupNonUniformBallotBitCount, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(execution);\n m_code.putWord(operation);\n m_code.putWord(ballot);\n return resultId;\n }\n uint32_t opGroupNonUniformElect(\n uint32_t resultType,\n uint32_t execution) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpGroupNonUniformElect, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(execution);\n return resultId;\n }\n uint32_t opGroupNonUniformBroadcastFirst(\n uint32_t resultType,\n uint32_t execution,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpGroupNonUniformBroadcastFirst, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(execution);\n m_code.putWord(value);\n return resultId;\n }\n void opControlBarrier(\n uint32_t execution,\n uint32_t memory,\n uint32_t semantics) {\n m_code.putIns (spv::OpControlBarrier, 4);\n m_code.putWord(execution);\n m_code.putWord(memory);\n m_code.putWord(semantics);\n }\n void opMemoryBarrier(\n uint32_t memory,\n uint32_t semantics) {\n m_code.putIns (spv::OpMemoryBarrier, 3);\n m_code.putWord(memory);\n m_code.putWord(semantics);\n }\n void opLoopMerge(\n uint32_t mergeBlock,\n uint32_t continueTarget,\n uint32_t loopControl) {\n m_code.putIns (spv::OpLoopMerge, 4);\n m_code.putWord(mergeBlock);\n m_code.putWord(continueTarget);\n m_code.putWord(loopControl);\n }\n void opSelectionMerge(\n uint32_t mergeBlock,\n uint32_t selectionControl) {\n m_code.putIns (spv::OpSelectionMerge, 3);\n m_code.putWord(mergeBlock);\n m_code.putWord(selectionControl);\n }\n void opBranch(\n uint32_t label) {\n m_code.putIns (spv::OpBranch, 2);\n m_code.putWord(label);\n\n m_blockId = 0;\n }\n void opBranchConditional(\n uint32_t condition,\n uint32_t trueLabel,\n uint32_t falseLabel) {\n m_code.putIns (spv::OpBranchConditional, 4);\n m_code.putWord(condition);\n m_code.putWord(trueLabel);\n m_code.putWord(falseLabel);\n\n m_blockId = 0;\n }\n void opSwitch(\n uint32_t selector,\n uint32_t jumpDefault,\n uint32_t caseCount,\n const SpirvSwitchCaseLabel* caseLabels) {\n m_code.putIns (spv::OpSwitch, 3 + 2 * caseCount);\n m_code.putWord(selector);\n m_code.putWord(jumpDefault);\n \n for (uint32_t i = 0; i < caseCount; i++) {\n m_code.putWord(caseLabels[i].literal);\n m_code.putWord(caseLabels[i].labelId);\n }\n\n m_blockId = 0;\n }\n uint32_t opPhi(\n uint32_t resultType,\n uint32_t sourceCount,\n const SpirvPhiLabel* sourceLabels) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpPhi, 3 + 2 * sourceCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n \n for (uint32_t i = 0; i < sourceCount; i++) {\n m_code.putWord(sourceLabels[i].varId);\n m_code.putWord(sourceLabels[i].labelId);\n }\n \n return resultId;\n }\n void opReturn() {\n m_code.putIns (spv::OpReturn, 1);\n m_blockId = 0;\n }\n void opDemoteToHelperInvocation() {\n m_code.putIns (spv::OpDemoteToHelperInvocation, 1);\n }\n void opEmitVertex(\n uint32_t streamId) {\n if (streamId == 0) {\n m_code.putIns (spv::OpEmitVertex, 1);\n } else {\n m_code.putIns (spv::OpEmitStreamVertex, 2);\n m_code.putWord(streamId);\n }\n }\n void opEndPrimitive(\n uint32_t streamId) {\n if (streamId == 0) {\n m_code.putIns (spv::OpEndPrimitive, 1);\n } else {\n m_code.putIns (spv::OpEndStreamPrimitive, 2);\n m_code.putWord(streamId);\n }\n }\n void opBeginInvocationInterlock() {\n m_code.putIns(spv::OpBeginInvocationInterlockEXT, 1);\n }\n void opEndInvocationInterlock() {\n m_code.putIns(spv::OpEndInvocationInterlockEXT, 1);\n }\n uint32_t opSinCos(\n uint32_t x,\n bool useBuiltIn) {\n // We only operate on 32-bit floats here\n uint32_t floatType = defFloatType(32);\n uint32_t resultType = defVectorType(floatType, 2u);\n\n if (useBuiltIn) {\n std::array members = { opSin(floatType, x), opCos(floatType, x) };\n return opCompositeConstruct(resultType, members.size(), members.data());\n } else {\n uint32_t uintType = defIntType(32, false);\n uint32_t sintType = defIntType(32, true);\n uint32_t boolType = defBoolType();\n\n // Normalize input to multiple of pi/4\n uint32_t xNorm = opFMul(floatType, opFAbs(floatType, x), constf32(4.0 / pi));\n\n uint32_t xTrunc = opTrunc(floatType, xNorm);\n uint32_t xFract = opFSub(floatType, xNorm, xTrunc);\n\n uint32_t xInt = opConvertFtoU(uintType, xTrunc);\n\n // Mirror input along x axis as necessary\n uint32_t mirror = opINotEqual(boolType, opBitwiseAnd(uintType, xInt, constu32(1u)), constu32(0u));\n xFract = opSelect(floatType, mirror, opFSub(floatType, constf32(1.0f), xFract), xFract);\n\n // Compute taylor series for fractional part\n uint32_t xFract_2 = opFMul(floatType, xFract, xFract);\n uint32_t xFract_4 = opFMul(floatType, xFract_2, xFract_2);\n uint32_t xFract_6 = opFMul(floatType, xFract_4, xFract_2);\n\n uint32_t taylor = opFMul(floatType, xFract_6, constf32(-sincosTaylorFactor(7)));\n decorate(taylor, spv::DecorationNoContraction);\n\n taylor = opFFma(floatType, xFract_4, constf32(sincosTaylorFactor(5)), taylor);\n decorate(taylor, spv::DecorationNoContraction);\n\n taylor = opFFma(floatType, xFract_2, constf32(-sincosTaylorFactor(3)), taylor);\n decorate(taylor, spv::DecorationNoContraction);\n\n taylor = opFAdd(floatType, constf32(sincosTaylorFactor(1)), taylor);\n decorate(taylor, spv::DecorationNoContraction);\n\n taylor = opFMul(floatType, taylor, xFract);\n decorate(taylor, spv::DecorationNoContraction);\n\n // Compute co-function based on sin^2 + cos^2 = 1\n uint32_t coFunc = opSqrt(floatType, opFSub(floatType, constf32(1.0f), opFMul(floatType, taylor, taylor)));\n\n // Determine whether the taylor series was used for sine or cosine and assign the correct result\n uint32_t funcIsSin = opIEqual(boolType, opBitwiseAnd(uintType, opIAdd(uintType, xInt, constu32(1u)), constu32(2u)), constu32(0u));\n\n uint32_t sin = opSelect(floatType, funcIsSin, taylor, coFunc);\n uint32_t cos = opSelect(floatType, funcIsSin, coFunc, taylor);\n\n // Determine whether sine is negative. Interpret the input as a\n // signed integer in order to propagate signed zeroes properly.\n uint32_t inputNeg = opSLessThan(boolType, opBitcast(sintType, x), consti32(0));\n\n uint32_t sinNeg = opINotEqual(boolType, opBitwiseAnd(uintType, xInt, constu32(4u)), constu32(0u));\n sinNeg = opLogicalNotEqual(boolType, sinNeg, inputNeg);\n\n // Determine whether cosine is negative\n uint32_t cosNeg = opINotEqual(boolType, opBitwiseAnd(uintType, opIAdd(uintType, xInt, constu32(2u)), constu32(4u)), constu32(0u));\n\n sin = opSelect(floatType, sinNeg, opFNegate(floatType, sin), sin);\n cos = opSelect(floatType, cosNeg, opFNegate(floatType, cos), cos);\n\n std::array members = { sin, cos };\n return opCompositeConstruct(resultType, members.size(), members.data());\n }\n }\n private:\n uint32_t m_version;\n uint32_t m_id = 1;\n uint32_t m_instExtGlsl450 = 0;\n uint32_t m_blockId = 0;\n SpirvCodeBuffer m_capabilities;\n SpirvCodeBuffer m_extensions;\n SpirvCodeBuffer m_instExt;\n SpirvCodeBuffer m_memoryModel;\n SpirvCodeBuffer m_entryPoints;\n SpirvCodeBuffer m_execModeInfo;\n SpirvCodeBuffer m_debugNames;\n SpirvCodeBuffer m_annotations;\n SpirvCodeBuffer m_typeConstDefs;\n SpirvCodeBuffer m_variables;\n SpirvCodeBuffer m_code;\n std::unordered_set m_lateConsts;\n std::vector m_interfaceVars;\n uint32_t defType(\n spv::Op op, \n uint32_t argCount,\n const uint32_t* argIds) {\n // Since the type info is stored in the code buffer,\n // we can use the code buffer to look up type IDs as\n // well. Result IDs are always stored as argument 1.\n for (auto ins : m_typeConstDefs) {\n bool match = ins.opCode() == op\n && ins.length() == 2 + argCount;\n \n for (uint32_t i = 0; i < argCount && match; i++)\n match &= ins.arg(2 + i) == argIds[i];\n \n if (match)\n return ins.arg(1);\n }\n \n // Type not yet declared, create a new one.\n uint32_t resultId = this->allocateId();\n m_typeConstDefs.putIns (op, 2 + argCount);\n m_typeConstDefs.putWord(resultId);\n \n for (uint32_t i = 0; i < argCount; i++)\n m_typeConstDefs.putWord(argIds[i]);\n return resultId;\n }\n uint32_t defConst(\n spv::Op op,\n uint32_t typeId,\n uint32_t argCount,\n const uint32_t* argIds) {\n // Avoid declaring constants multiple times\n for (auto ins : m_typeConstDefs) {\n bool match = ins.opCode() == op\n && ins.length() == 3 + argCount\n && ins.arg(1) == typeId;\n \n for (uint32_t i = 0; i < argCount && match; i++)\n match &= ins.arg(3 + i) == argIds[i];\n \n if (!match)\n continue;\n \n uint32_t id = ins.arg(2);\n\n if (m_lateConsts.find(id) == m_lateConsts.end())\n return id;\n }\n \n // Constant not yet declared, make a new one\n uint32_t resultId = this->allocateId();\n m_typeConstDefs.putIns (op, 3 + argCount);\n m_typeConstDefs.putWord(typeId);\n m_typeConstDefs.putWord(resultId);\n \n for (uint32_t i = 0; i < argCount; i++)\n m_typeConstDefs.putWord(argIds[i]);\n return resultId;\n }\n void instImportGlsl450() {\n m_instExtGlsl450 = this->allocateId();\n const char* name = \"GLSL.std.450\";\n \n m_instExt.putIns (spv::OpExtInstImport, 2 + m_instExt.strLen(name));\n m_instExt.putWord(m_instExtGlsl450);\n m_instExt.putStr (name);\n }\n uint32_t getMemoryOperandWordCount(\n const SpirvMemoryOperands& op) const {\n const uint32_t result\n = ((op.flags & spv::MemoryAccessAlignedMask) ? 1 : 0)\n + ((op.flags & spv::MemoryAccessMakePointerAvailableMask) ? 1 : 0)\n + ((op.flags & spv::MemoryAccessMakePointerVisibleMask) ? 1 : 0);\n\n return op.flags ? result + 1 : 0;\n }\n void putMemoryOperands(\n const SpirvMemoryOperands& op) {\n if (op.flags) {\n m_code.putWord(op.flags);\n\n if (op.flags & spv::MemoryAccessAlignedMask)\n m_code.putWord(op.alignment);\n\n if (op.flags & spv::MemoryAccessMakePointerAvailableMask)\n m_code.putWord(op.makeAvailable);\n\n if (op.flags & spv::MemoryAccessMakePointerVisibleMask)\n m_code.putWord(op.makeVisible);\n }\n }\n uint32_t getImageOperandWordCount(\n const SpirvImageOperands& op) const {\n // Each flag may add one or more operands\n const uint32_t result\n = ((op.flags & spv::ImageOperandsBiasMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsLodMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsConstOffsetMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsGradMask) ? 2 : 0)\n + ((op.flags & spv::ImageOperandsOffsetMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsConstOffsetsMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsSampleMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsMinLodMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsMakeTexelAvailableMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsMakeTexelVisibleMask) ? 1 : 0);\n \n // Add a DWORD for the operand mask if it is non-zero\n return op.flags ? result + 1 : 0;\n }\n void putImageOperands(\n const SpirvImageOperands& op) {\n if (op.flags) {\n m_code.putWord(op.flags);\n \n if (op.flags & spv::ImageOperandsBiasMask)\n m_code.putWord(op.sLodBias);\n \n if (op.flags & spv::ImageOperandsLodMask)\n m_code.putWord(op.sLod);\n\n if (op.flags & spv::ImageOperandsGradMask) {\n m_code.putWord(op.sGradX);\n m_code.putWord(op.sGradY);\n }\n\n if (op.flags & spv::ImageOperandsConstOffsetMask)\n m_code.putWord(op.sConstOffset);\n\n if (op.flags & spv::ImageOperandsOffsetMask)\n m_code.putWord(op.gOffset);\n \n if (op.flags & spv::ImageOperandsConstOffsetsMask)\n m_code.putWord(op.gConstOffsets);\n \n if (op.flags & spv::ImageOperandsSampleMask)\n m_code.putWord(op.sSampleId);\n \n if (op.flags & spv::ImageOperandsMinLodMask)\n m_code.putWord(op.sMinLod);\n\n if (op.flags & spv::ImageOperandsMakeTexelAvailableMask)\n m_code.putWord(op.makeAvailable);\n\n if (op.flags & spv::ImageOperandsMakeTexelVisibleMask)\n m_code.putWord(op.makeVisible);\n }\n }\n bool isInterfaceVar(\n spv::StorageClass sclass) const {\n if (m_version < spvVersion(1, 4)) {\n return sclass == spv::StorageClassInput\n || sclass == spv::StorageClassOutput;\n } else {\n // All global variables need to be declared\n return sclass != spv::StorageClassFunction;\n }\n }\n void classifyBlocks(\n std::unordered_set& reachableBlocks,\n std::unordered_set& mergeBlocks) {\n std::unordered_multimap branches;\n std::queue blockQueue;\n\n uint32_t blockId = 0;\n\n for (auto ins : m_code) {\n switch (ins.opCode()) {\n case spv::OpLabel: {\n uint32_t id = ins.arg(1);\n\n if (!blockId)\n branches.insert({ 0u, id });\n\n blockId = id;\n } break;\n\n case spv::OpFunction: {\n blockId = 0u;\n } break;\n\n case spv::OpBranch: {\n branches.insert({ blockId, ins.arg(1) });\n } break;\n\n case spv::OpBranchConditional: {\n branches.insert({ blockId, ins.arg(2) });\n branches.insert({ blockId, ins.arg(3) });\n } break;\n\n case spv::OpSwitch: {\n branches.insert({ blockId, ins.arg(2) });\n\n for (uint32_t i = 4; i < ins.length(); i += 2)\n branches.insert({ blockId, ins.arg(i) });\n } break;\n\n case spv::OpSelectionMerge: {\n mergeBlocks.insert(ins.arg(1));\n } break;\n\n case spv::OpLoopMerge: {\n mergeBlocks.insert(ins.arg(1));\n\n // It is possible for the continue block to be unreachable in\n // practice, but we still need to emit it if we are not going\n // to eliminate this loop. Since the current block dominates\n // the loop, use it to keep the continue block intact.\n branches.insert({ blockId, ins.arg(2) });\n } break;\n\n default:;\n }\n }\n\n blockQueue.push(0);\n\n while (!blockQueue.empty()) {\n uint32_t id = blockQueue.front();\n\n auto range = branches.equal_range(id);\n\n for (auto i = range.first; i != range.second; i++) {\n if (reachableBlocks.insert(i->second).second)\n blockQueue.push(i->second);\n }\n\n blockQueue.pop();\n }\n }\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_header.h", "class DxbcHeader {\n public:\n DxbcHeader(DxbcReader& reader) {\n // FourCC at the start of the file, must be 'DXBC'\n DxbcTag fourcc = reader.readTag();\n \n if (fourcc != \"DXBC\")\n throw DxvkError(\"DxbcHeader::DxbcHeader: Invalid fourcc, expected 'DXBC'\");\n \n // Stuff we don't actually need to store\n reader.skip(4 * sizeof(uint32_t)); // Check sum\n reader.skip(1 * sizeof(uint32_t)); // Constant 1\n reader.skip(1 * sizeof(uint32_t)); // Bytecode length\n \n // Number of chunks in the file\n uint32_t chunkCount = reader.readu32();\n \n // Chunk offsets are stored immediately after\n for (uint32_t i = 0; i < chunkCount; i++)\n m_chunkOffsets.push_back(reader.readu32());\n }\n ~DxbcHeader() {\n \n }\n private:\n std::vector m_chunkOffsets;\n};"], ["/lsfg-vk/thirdparty/dxbc/include/spirv/spirv_code_buffer.h", "class for {\n public:\n SpirvCodeBuffer() {\n stream.ignore(std::numeric_limits::max());\n std::streamsize length = stream.gcount();\n stream.clear();\n stream.seekg(0, std::ios_base::beg);\n \n std::vector buffer(length);\n stream.read(buffer.data(), length);\n buffer.resize(stream.gcount());\n \n m_code.resize(buffer.size() / sizeof(uint32_t));\n std::memcpy(reinterpret_cast(m_code.data()),\n buffer.data(), m_code.size() * sizeof(uint32_t));\n \n m_ptr = m_code.size();\n }\n explicit SpirvCodeBuffer(uint32_t size) {\n stream.ignore(std::numeric_limits::max());\n std::streamsize length = stream.gcount();\n stream.clear();\n stream.seekg(0, std::ios_base::beg);\n \n std::vector buffer(length);\n stream.read(buffer.data(), length);\n buffer.resize(stream.gcount());\n \n m_code.resize(buffer.size() / sizeof(uint32_t));\n std::memcpy(reinterpret_cast(m_code.data()),\n buffer.data(), m_code.size() * sizeof(uint32_t));\n \n m_ptr = m_code.size();\n }\n SpirvCodeBuffer(uint32_t size, const uint32_t* data) {\n stream.ignore(std::numeric_limits::max());\n std::streamsize length = stream.gcount();\n stream.clear();\n stream.seekg(0, std::ios_base::beg);\n \n std::vector buffer(length);\n stream.read(buffer.data(), length);\n buffer.resize(stream.gcount());\n \n m_code.resize(buffer.size() / sizeof(uint32_t));\n std::memcpy(reinterpret_cast(m_code.data()),\n buffer.data(), m_code.size() * sizeof(uint32_t));\n \n m_ptr = m_code.size();\n }\n SpirvCodeBuffer(std::istream& stream) {\n stream.ignore(std::numeric_limits::max());\n std::streamsize length = stream.gcount();\n stream.clear();\n stream.seekg(0, std::ios_base::beg);\n \n std::vector buffer(length);\n stream.read(buffer.data(), length);\n buffer.resize(stream.gcount());\n \n m_code.resize(buffer.size() / sizeof(uint32_t));\n std::memcpy(reinterpret_cast(m_code.data()),\n buffer.data(), m_code.size() * sizeof(uint32_t));\n \n m_ptr = m_code.size();\n }\n template\n SpirvCodeBuffer(const uint32_t (&data)[N])\n : SpirvCodeBuffer(N, data) { }\n ~SpirvCodeBuffer() { }\n uint32_t allocId() {\n constexpr size_t BoundIdsOffset = 3;\n\n if (m_code.size() <= BoundIdsOffset)\n return 0;\n\n return m_code[BoundIdsOffset]++;\n }\n void append(const SpirvInstruction& ins) {\n if (other.size() != 0) {\n const size_t size = m_code.size();\n m_code.resize(size + other.m_code.size());\n \n uint32_t* dst = this->m_code.data();\n const uint32_t* src = other.m_code.data();\n \n std::memcpy(dst + size, src, other.size());\n m_ptr += other.m_code.size();\n }\n }\n void append(const SpirvCodeBuffer& other) {\n if (other.size() != 0) {\n const size_t size = m_code.size();\n m_code.resize(size + other.m_code.size());\n \n uint32_t* dst = this->m_code.data();\n const uint32_t* src = other.m_code.data();\n \n std::memcpy(dst + size, src, other.size());\n m_ptr += other.m_code.size();\n }\n }\n void putWord(uint32_t word) {\n m_code.insert(m_code.begin() + m_ptr, word);\n m_ptr += 1;\n }\n void putIns(spv::Op opCode, uint16_t wordCount) {\n this->putWord(\n (static_cast(opCode) << 0)\n | (static_cast(wordCount) << 16));\n }\n void putInt32(uint32_t word) {\n this->putWord(word);\n }\n void putInt64(uint64_t value) {\n this->putWord(value >> 0);\n this->putWord(value >> 32);\n }\n void putFloat32(float value) {\n uint32_t tmp;\n static_assert(sizeof(tmp) == sizeof(value));\n std::memcpy(&tmp, &value, sizeof(value));\n this->putInt32(tmp);\n }\n void putFloat64(double value) {\n uint64_t tmp;\n static_assert(sizeof(tmp) == sizeof(value));\n std::memcpy(&tmp, &value, sizeof(value));\n this->putInt64(tmp);\n }\n void putStr(const char* str) {\n uint32_t word = 0;\n uint32_t nbit = 0;\n \n for (uint32_t i = 0; str[i] != '\\0'; str++) {\n word |= (static_cast(str[i]) & 0xFF) << nbit;\n \n if ((nbit += 8) == 32) {\n this->putWord(word);\n word = 0;\n nbit = 0;\n }\n }\n \n // Commit current word\n this->putWord(word);\n }\n void putHeader(uint32_t version, uint32_t boundIds) {\n this->putWord(spv::MagicNumber);\n this->putWord(version);\n this->putWord(0); // Generator\n this->putWord(boundIds);\n this->putWord(0); // Schema\n }\n void erase(size_t size) {\n m_code.erase(\n m_code.begin() + m_ptr,\n m_code.begin() + m_ptr + size);\n }\n uint32_t strLen(const char* str) {\n // Null-termination plus padding\n return (std::strlen(str) + 4) / 4;\n }\n void store(std::ostream& stream) const {\n stream.write(\n reinterpret_cast(m_code.data()),\n sizeof(uint32_t) * m_code.size());\n }\n private:\n std::vector m_code;\n size_t m_ptr = 0;\n};"], ["/lsfg-vk/src/context.cpp", "#include \"context.hpp\"\n#include \"config/config.hpp\"\n#include \"common/exception.hpp\"\n#include \"extract/extract.hpp\"\n#include \"extract/trans.hpp\"\n#include \"utils/utils.hpp\"\n#include \"hooks.hpp\"\n#include \"layer.hpp\"\n\n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nLsContext::LsContext(const Hooks::DeviceInfo& info, VkSwapchainKHR swapchain,\n VkExtent2D extent, const std::vector& swapchainImages)\n : swapchain(swapchain), swapchainImages(swapchainImages),\n extent(extent) {\n // get updated configuration\n auto& conf = Config::activeConf;\n if (!conf.config_file.empty()\n && (\n !std::filesystem::exists(conf.config_file)\n || conf.timestamp != std::filesystem::last_write_time(conf.config_file)\n )) {\n std::cerr << \"lsfg-vk: Rereading configuration, as it is no longer valid.\\n\";\n std::this_thread::sleep_for(std::chrono::milliseconds(100));\n\n // reread configuration\n const std::string file = Utils::getConfigFile();\n const auto name = Utils::getProcessName();\n try {\n Config::updateConfig(file);\n conf = Config::getConfig(name);\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: Failed to update configuration, continuing using old:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n }\n\n LSFG_3_1P::finalize();\n LSFG_3_1::finalize();\n\n // print config\n std::cerr << \"lsfg-vk: Reloaded configuration for \" << name.second << \":\\n\";\n if (!conf.dll.empty()) std::cerr << \" Using DLL from: \" << conf.dll << '\\n';\n std::cerr << \" Multiplier: \" << conf.multiplier << '\\n';\n std::cerr << \" Flow Scale: \" << conf.flowScale << '\\n';\n std::cerr << \" Performance Mode: \" << (conf.performance ? \"Enabled\" : \"Disabled\") << '\\n';\n std::cerr << \" HDR Mode: \" << (conf.hdr ? \"Enabled\" : \"Disabled\") << '\\n';\n if (conf.e_present != 2) std::cerr << \" ! Present Mode: \" << conf.e_present << '\\n';\n\n if (conf.multiplier <= 1) return;\n }\n // we could take the format from the swapchain,\n // but honestly this is safer.\n const VkFormat format = conf.hdr\n ? VK_FORMAT_R8G8B8A8_UNORM\n : VK_FORMAT_R16G16B16A16_SFLOAT;\n\n // prepare textures for lsfg\n std::array fds{};\n this->frame_0 = Mini::Image(info.device, info.physicalDevice,\n extent, format, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_ASPECT_COLOR_BIT,\n &fds.at(0));\n this->frame_1 = Mini::Image(info.device, info.physicalDevice,\n extent, format, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_ASPECT_COLOR_BIT,\n &fds.at(1));\n\n std::vector outFds(conf.multiplier - 1);\n for (size_t i = 0; i < (conf.multiplier - 1); ++i)\n this->out_n.emplace_back(info.device, info.physicalDevice,\n extent, format,\n VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_ASPECT_COLOR_BIT,\n &outFds.at(i));\n\n // initialize lsfg\n auto* lsfgInitialize = LSFG_3_1::initialize;\n auto* lsfgCreateContext = LSFG_3_1::createContext;\n auto* lsfgDeleteContext = LSFG_3_1::deleteContext;\n if (conf.performance) {\n lsfgInitialize = LSFG_3_1P::initialize;\n lsfgCreateContext = LSFG_3_1P::createContext;\n lsfgDeleteContext = LSFG_3_1P::deleteContext;\n }\n\n setenv(\"DISABLE_LSFG\", \"1\", 1); // NOLINT\n\n lsfgInitialize(\n Utils::getDeviceUUID(info.physicalDevice),\n conf.hdr, 1.0F / conf.flowScale, conf.multiplier - 1,\n [](const std::string& name) {\n auto dxbc = Extract::getShader(name);\n auto spirv = Extract::translateShader(dxbc);\n return spirv;\n }\n );\n\n this->lsfgCtxId = std::shared_ptr(\n new int32_t(lsfgCreateContext(fds.at(0), fds.at(1), outFds, extent, format)),\n [lsfgDeleteContext = lsfgDeleteContext](const int32_t* id) {\n lsfgDeleteContext(*id);\n }\n );\n\n unsetenv(\"DISABLE_LSFG\"); // NOLINT\n\n // prepare render passes\n this->cmdPool = Mini::CommandPool(info.device, info.queue.first);\n for (size_t i = 0; i < 8; i++) {\n auto& pass = this->passInfos.at(i);\n pass.renderSemaphores.resize(conf.multiplier - 1);\n pass.acquireSemaphores.resize(conf.multiplier - 1);\n pass.postCopyBufs.resize(conf.multiplier - 1);\n pass.postCopySemaphores.resize(conf.multiplier - 1);\n pass.prevPostCopySemaphores.resize(conf.multiplier - 1);\n }\n}\n\nVkResult LsContext::present(const Hooks::DeviceInfo& info, const void* pNext, VkQueue queue,\n const std::vector& gameRenderSemaphores, uint32_t presentIdx) {\n const auto& conf = Config::activeConf;\n auto& pass = this->passInfos.at(this->frameIdx % 8);\n\n // 1. copy swapchain image to frame_0/frame_1\n int preCopySemaphoreFd{};\n pass.preCopySemaphores.at(0) = Mini::Semaphore(info.device, &preCopySemaphoreFd);\n pass.preCopySemaphores.at(1) = Mini::Semaphore(info.device);\n pass.preCopyBuf = Mini::CommandBuffer(info.device, this->cmdPool);\n pass.preCopyBuf.begin();\n\n Utils::copyImage(pass.preCopyBuf.handle(),\n this->swapchainImages.at(presentIdx),\n this->frameIdx % 2 == 0 ? this->frame_0.handle() : this->frame_1.handle(),\n this->extent.width, this->extent.height,\n VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,\n true, false);\n\n pass.preCopyBuf.end();\n\n std::vector gameRenderSemaphores2 = gameRenderSemaphores;\n if (this->frameIdx > 0)\n gameRenderSemaphores2.emplace_back(this->passInfos.at((this->frameIdx - 1) % 8)\n .preCopySemaphores.at(1).handle());\n pass.preCopyBuf.submit(info.queue.second,\n gameRenderSemaphores2,\n { pass.preCopySemaphores.at(0).handle(),\n pass.preCopySemaphores.at(1).handle() });\n\n // 2. render intermediary frames\n std::vector renderSemaphoreFds(conf.multiplier - 1);\n for (size_t i = 0; i < (conf.multiplier - 1); ++i)\n pass.renderSemaphores.at(i) = Mini::Semaphore(info.device, &renderSemaphoreFds.at(i));\n\n if (conf.performance)\n LSFG_3_1P::presentContext(*this->lsfgCtxId,\n preCopySemaphoreFd,\n renderSemaphoreFds);\n else\n LSFG_3_1::presentContext(*this->lsfgCtxId,\n preCopySemaphoreFd,\n renderSemaphoreFds);\n\n for (size_t i = 0; i < (conf.multiplier - 1); i++) {\n // 3. acquire next swapchain image\n pass.acquireSemaphores.at(i) = Mini::Semaphore(info.device);\n uint32_t imageIdx{};\n auto res = Layer::ovkAcquireNextImageKHR(info.device, this->swapchain, UINT64_MAX,\n pass.acquireSemaphores.at(i).handle(), VK_NULL_HANDLE, &imageIdx);\n if (res != VK_SUCCESS && res != VK_SUBOPTIMAL_KHR)\n throw LSFG::vulkan_error(res, \"Failed to acquire next swapchain image\");\n\n // 4. copy output image to swapchain image\n pass.postCopySemaphores.at(i) = Mini::Semaphore(info.device);\n pass.prevPostCopySemaphores.at(i) = Mini::Semaphore(info.device);\n pass.postCopyBufs.at(i) = Mini::CommandBuffer(info.device, this->cmdPool);\n pass.postCopyBufs.at(i).begin();\n\n Utils::copyImage(pass.postCopyBufs.at(i).handle(),\n this->out_n.at(i).handle(),\n this->swapchainImages.at(imageIdx),\n this->extent.width, this->extent.height,\n VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,\n false, true);\n\n pass.postCopyBufs.at(i).end();\n pass.postCopyBufs.at(i).submit(info.queue.second,\n { pass.acquireSemaphores.at(i).handle(),\n pass.renderSemaphores.at(i).handle() },\n { pass.postCopySemaphores.at(i).handle(),\n pass.prevPostCopySemaphores.at(i).handle() });\n\n // 5. present swapchain image\n std::vector waitSemaphores{ pass.postCopySemaphores.at(i).handle() };\n if (i != 0) waitSemaphores.emplace_back(pass.prevPostCopySemaphores.at(i - 1).handle());\n\n const VkPresentInfoKHR presentInfo{\n .sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,\n .pNext = i == 0 ? pNext : nullptr, // only set on first present\n .waitSemaphoreCount = static_cast(waitSemaphores.size()),\n .pWaitSemaphores = waitSemaphores.data(),\n .swapchainCount = 1,\n .pSwapchains = &this->swapchain,\n .pImageIndices = &imageIdx,\n };\n res = Layer::ovkQueuePresentKHR(queue, &presentInfo);\n if (res != VK_SUCCESS && res != VK_SUBOPTIMAL_KHR)\n throw LSFG::vulkan_error(res, \"Failed to present swapchain image\");\n }\n\n // 6. present actual next frame\n VkSemaphore lastPrevPostCopySemaphore =\n pass.prevPostCopySemaphores.at(conf.multiplier - 1 - 1).handle();\n const VkPresentInfoKHR presentInfo{\n .sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,\n .waitSemaphoreCount = 1,\n .pWaitSemaphores = &lastPrevPostCopySemaphore,\n .swapchainCount = 1,\n .pSwapchains = &this->swapchain,\n .pImageIndices = &presentIdx,\n };\n auto res = Layer::ovkQueuePresentKHR(queue, &presentInfo);\n if (res != VK_SUCCESS && res != VK_SUBOPTIMAL_KHR)\n throw LSFG::vulkan_error(res, \"Failed to present swapchain image\");\n\n this->frameIdx++;\n return res;\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_enums.h", "#pragma once\n\n#include \"dxbc_include.h\"\n\nnamespace dxvk {\n \n /**\n * \\brief Instruction code listing\n */\n enum class DxbcOpcode : uint32_t {\n Add = 0,\n And = 1,\n Break = 2,\n Breakc = 3,\n Call = 4,\n Callc = 5,\n Case = 6,\n Continue = 7,\n Continuec = 8,\n Cut = 9,\n Default = 10,\n DerivRtx = 11,\n DerivRty = 12,\n Discard = 13,\n Div = 14,\n Dp2 = 15,\n Dp3 = 16,\n Dp4 = 17,\n Else = 18,\n Emit = 19,\n EmitThenCut = 20,\n EndIf = 21,\n EndLoop = 22,\n EndSwitch = 23,\n Eq = 24,\n Exp = 25,\n Frc = 26,\n FtoI = 27,\n FtoU = 28,\n Ge = 29,\n IAdd = 30,\n If = 31,\n IEq = 32,\n IGe = 33,\n ILt = 34,\n IMad = 35,\n IMax = 36,\n IMin = 37,\n IMul = 38,\n INe = 39,\n INeg = 40,\n IShl = 41,\n IShr = 42,\n ItoF = 43,\n Label = 44,\n Ld = 45,\n LdMs = 46,\n Log = 47,\n Loop = 48,\n Lt = 49,\n Mad = 50,\n Min = 51,\n Max = 52,\n CustomData = 53,\n Mov = 54,\n Movc = 55,\n Mul = 56,\n Ne = 57,\n Nop = 58,\n Not = 59,\n Or = 60,\n ResInfo = 61,\n Ret = 62,\n Retc = 63,\n RoundNe = 64,\n RoundNi = 65,\n RoundPi = 66,\n RoundZ = 67,\n Rsq = 68,\n Sample = 69,\n SampleC = 70,\n SampleClz = 71,\n SampleL = 72,\n SampleD = 73,\n SampleB = 74,\n Sqrt = 75,\n Switch = 76,\n SinCos = 77,\n UDiv = 78,\n ULt = 79,\n UGe = 80,\n UMul = 81,\n UMad = 82,\n UMax = 83,\n UMin = 84,\n UShr = 85,\n UtoF = 86,\n Xor = 87,\n DclResource = 88,\n DclConstantBuffer = 89,\n DclSampler = 90,\n DclIndexRange = 91,\n DclGsOutputPrimitiveTopology = 92,\n DclGsInputPrimitive = 93,\n DclMaxOutputVertexCount = 94,\n DclInput = 95,\n DclInputSgv = 96,\n DclInputSiv = 97,\n DclInputPs = 98,\n DclInputPsSgv = 99,\n DclInputPsSiv = 100,\n DclOutput = 101,\n DclOutputSgv = 102,\n DclOutputSiv = 103,\n DclTemps = 104,\n DclIndexableTemp = 105,\n DclGlobalFlags = 106,\n Reserved0 = 107,\n Lod = 108,\n Gather4 = 109,\n SamplePos = 110,\n SampleInfo = 111,\n Reserved1 = 112,\n HsDecls = 113,\n HsControlPointPhase = 114,\n HsForkPhase = 115,\n HsJoinPhase = 116,\n EmitStream = 117,\n CutStream = 118,\n EmitThenCutStream = 119,\n InterfaceCall = 120,\n BufInfo = 121,\n DerivRtxCoarse = 122,\n DerivRtxFine = 123,\n DerivRtyCoarse = 124,\n DerivRtyFine = 125,\n Gather4C = 126,\n Gather4Po = 127,\n Gather4PoC = 128,\n Rcp = 129,\n F32toF16 = 130,\n F16toF32 = 131,\n UAddc = 132,\n USubb = 133,\n CountBits = 134,\n FirstBitHi = 135,\n FirstBitLo = 136,\n FirstBitShi = 137,\n UBfe = 138,\n IBfe = 139,\n Bfi = 140,\n BfRev = 141,\n Swapc = 142,\n DclStream = 143,\n DclFunctionBody = 144,\n DclFunctionTable = 145,\n DclInterface = 146,\n DclInputControlPointCount = 147,\n DclOutputControlPointCount = 148,\n DclTessDomain = 149,\n DclTessPartitioning = 150,\n DclTessOutputPrimitive = 151,\n DclHsMaxTessFactor = 152,\n DclHsForkPhaseInstanceCount = 153,\n DclHsJoinPhaseInstanceCount = 154,\n DclThreadGroup = 155,\n DclUavTyped = 156,\n DclUavRaw = 157,\n DclUavStructured = 158,\n DclThreadGroupSharedMemoryRaw = 159,\n DclThreadGroupSharedMemoryStructured = 160,\n DclResourceRaw = 161,\n DclResourceStructured = 162,\n LdUavTyped = 163,\n StoreUavTyped = 164,\n LdRaw = 165,\n StoreRaw = 166,\n LdStructured = 167,\n StoreStructured = 168,\n AtomicAnd = 169,\n AtomicOr = 170,\n AtomicXor = 171,\n AtomicCmpStore = 172,\n AtomicIAdd = 173,\n AtomicIMax = 174,\n AtomicIMin = 175,\n AtomicUMax = 176,\n AtomicUMin = 177,\n ImmAtomicAlloc = 178,\n ImmAtomicConsume = 179,\n ImmAtomicIAdd = 180,\n ImmAtomicAnd = 181,\n ImmAtomicOr = 182,\n ImmAtomicXor = 183,\n ImmAtomicExch = 184,\n ImmAtomicCmpExch = 185,\n ImmAtomicIMax = 186,\n ImmAtomicIMin = 187,\n ImmAtomicUMax = 188,\n ImmAtomicUMin = 189,\n Sync = 190,\n DAdd = 191,\n DMax = 192,\n DMin = 193,\n DMul = 194,\n DEq = 195,\n DGe = 196,\n DLt = 197,\n DNe = 198,\n DMov = 199,\n DMovc = 200,\n DtoF = 201,\n FtoD = 202,\n EvalSnapped = 203,\n EvalSampleIndex = 204,\n EvalCentroid = 205,\n DclGsInstanceCount = 206,\n Abort = 207,\n DebugBreak = 208,\n ReservedBegin11_1 = 209,\n DDiv = 210,\n DFma = 211,\n DRcp = 212,\n Msad = 213,\n DtoI = 214,\n DtoU = 215,\n ItoD = 216,\n UtoD = 217,\n ReservedBegin11_2 = 218,\n Gather4S = 219,\n Gather4CS = 220,\n Gather4PoS = 221,\n Gather4PoCS = 222,\n LdS = 223,\n LdMsS = 224,\n LdUavTypedS = 225,\n LdRawS = 226,\n LdStructuredS = 227,\n SampleLS = 228,\n SampleClzS = 229,\n SampleClampS = 230,\n SampleBClampS = 231,\n SampleDClampS = 232,\n SampleCClampS = 233,\n CheckAccessFullyMapped = 234,\n };\n \n \n /**\n * \\brief Extended opcode\n */\n enum class DxbcExtOpcode : uint32_t {\n Empty = 0,\n SampleControls = 1,\n ResourceDim = 2,\n ResourceReturnType = 3,\n };\n \n \n /**\n * \\brief Operand type\n * \n * Selects the 'register file' from which\n * to retrieve an operand's value.\n */\n enum class DxbcOperandType : uint32_t {\n Temp = 0,\n Input = 1,\n Output = 2,\n IndexableTemp = 3,\n Imm32 = 4,\n Imm64 = 5,\n Sampler = 6,\n Resource = 7,\n ConstantBuffer = 8,\n ImmediateConstantBuffer = 9,\n Label = 10,\n InputPrimitiveId = 11,\n OutputDepth = 12,\n Null = 13,\n Rasterizer = 14,\n OutputCoverageMask = 15,\n Stream = 16,\n FunctionBody = 17,\n FunctionTable = 18,\n Interface = 19,\n FunctionInput = 20,\n FunctionOutput = 21,\n OutputControlPointId = 22,\n InputForkInstanceId = 23,\n InputJoinInstanceId = 24,\n InputControlPoint = 25,\n OutputControlPoint = 26,\n InputPatchConstant = 27,\n InputDomainPoint = 28,\n ThisPointer = 29,\n UnorderedAccessView = 30,\n ThreadGroupSharedMemory = 31,\n InputThreadId = 32,\n InputThreadGroupId = 33,\n InputThreadIdInGroup = 34,\n InputCoverageMask = 35,\n InputThreadIndexInGroup = 36,\n InputGsInstanceId = 37,\n OutputDepthGe = 38,\n OutputDepthLe = 39,\n CycleCounter = 40,\n OutputStencilRef = 41,\n InputInnerCoverage = 42,\n };\n \n \n /**\n * \\brief Number of components\n * \n * Used by operands to determine whether the\n * operand has one, four or zero components.\n */\n enum class DxbcComponentCount : uint32_t {\n Component0 = 0,\n Component1 = 1,\n Component4 = 2,\n };\n \n \n /**\n * \\brief Component selection mode\n * \n * When an operand has four components, the\n * component selection mode deterines which\n * components are used for the operation.\n */\n enum class DxbcRegMode : uint32_t {\n Mask = 0,\n Swizzle = 1,\n Select1 = 2,\n };\n \n \n /**\n * \\brief Index representation\n * \n * Determines how an operand\n * register index is stored.\n */\n enum class DxbcOperandIndexRepresentation : uint32_t {\n Imm32 = 0,\n Imm64 = 1,\n Relative = 2,\n Imm32Relative = 3,\n Imm64Relative = 4,\n };\n \n \n /**\n * \\brief Extended operand type\n */\n enum class DxbcOperandExt : uint32_t {\n OperandModifier = 1,\n };\n \n \n /**\n * \\brief Resource dimension\n * The type of a resource.\n */\n enum class DxbcResourceDim : uint32_t {\n Unknown = 0,\n Buffer = 1,\n Texture1D = 2,\n Texture2D = 3,\n Texture2DMs = 4,\n Texture3D = 5,\n TextureCube = 6,\n Texture1DArr = 7,\n Texture2DArr = 8,\n Texture2DMsArr = 9,\n TextureCubeArr = 10,\n RawBuffer = 11,\n StructuredBuffer = 12,\n };\n \n \n /**\n * \\brief Resource return type\n * Data type for resource read ops.\n */\n enum class DxbcResourceReturnType : uint32_t {\n Unorm = 1,\n Snorm = 2,\n Sint = 3,\n Uint = 4,\n Float = 5,\n Mixed = 6, /// ?\n Double = 7,\n Continued = 8, /// ?\n Unused = 9, /// ?\n };\n \n \n /**\n * \\brief Register component type\n * Data type of a register component.\n */\n enum class DxbcRegisterComponentType : uint32_t {\n Unknown = 0,\n Uint32 = 1,\n Sint32 = 2,\n Float32 = 3,\n };\n \n \n /**\n * \\brief Instruction return type\n */\n enum class DxbcInstructionReturnType : uint32_t {\n Float = 0,\n Uint = 1,\n };\n \n \n enum class DxbcSystemValue : uint32_t {\n None = 0,\n Position = 1,\n ClipDistance = 2,\n CullDistance = 3,\n RenderTargetId = 4,\n ViewportId = 5,\n VertexId = 6,\n PrimitiveId = 7,\n InstanceId = 8,\n IsFrontFace = 9,\n SampleIndex = 10,\n FinalQuadUeq0EdgeTessFactor = 11,\n FinalQuadVeq0EdgeTessFactor = 12,\n FinalQuadUeq1EdgeTessFactor = 13,\n FinalQuadVeq1EdgeTessFactor = 14,\n FinalQuadUInsideTessFactor = 15,\n FinalQuadVInsideTessFactor = 16,\n FinalTriUeq0EdgeTessFactor = 17,\n FinalTriVeq0EdgeTessFactor = 18,\n FinalTriWeq0EdgeTessFactor = 19,\n FinalTriInsideTessFactor = 20,\n FinalLineDetailTessFactor = 21,\n FinalLineDensityTessFactor = 22,\n Target = 64,\n Depth = 65,\n Coverage = 66,\n DepthGe = 67,\n DepthLe = 68\n };\n \n \n enum class DxbcInterpolationMode : uint32_t {\n Undefined = 0,\n Constant = 1,\n Linear = 2,\n LinearCentroid = 3,\n LinearNoPerspective = 4,\n LinearNoPerspectiveCentroid = 5,\n LinearSample = 6,\n LinearNoPerspectiveSample = 7,\n };\n \n \n enum class DxbcGlobalFlag : uint32_t {\n RefactoringAllowed = 0,\n DoublePrecision = 1,\n EarlyFragmentTests = 2,\n RawStructuredBuffers = 3,\n };\n \n using DxbcGlobalFlags = Flags;\n \n enum class DxbcZeroTest : uint32_t {\n TestZ = 0,\n TestNz = 1,\n };\n \n enum class DxbcResinfoType : uint32_t {\n Float = 0,\n RcpFloat = 1,\n Uint = 2,\n };\n \n enum class DxbcSyncFlag : uint32_t {\n ThreadsInGroup = 0,\n ThreadGroupSharedMemory = 1,\n UavMemoryGroup = 2,\n UavMemoryGlobal = 3,\n };\n \n using DxbcSyncFlags = Flags;\n \n \n /**\n * \\brief Geometry shader input primitive\n */\n enum class DxbcPrimitive : uint32_t {\n Undefined = 0,\n Point = 1,\n Line = 2,\n Triangle = 3,\n LineAdj = 6,\n TriangleAdj = 7,\n Patch1 = 8,\n Patch2 = 9,\n Patch3 = 10,\n Patch4 = 11,\n Patch5 = 12,\n Patch6 = 13,\n Patch7 = 14,\n Patch8 = 15,\n Patch9 = 16,\n Patch10 = 17,\n Patch11 = 18,\n Patch12 = 19,\n Patch13 = 20,\n Patch14 = 21,\n Patch15 = 22,\n Patch16 = 23,\n Patch17 = 24,\n Patch18 = 25,\n Patch19 = 26,\n Patch20 = 27,\n Patch21 = 28,\n Patch22 = 29,\n Patch23 = 30,\n Patch24 = 31,\n Patch25 = 32,\n Patch26 = 33,\n Patch27 = 34,\n Patch28 = 35,\n Patch29 = 36,\n Patch30 = 37,\n Patch31 = 38,\n Patch32 = 39,\n };\n \n \n /**\n * \\brief Geometry shader output topology\n */\n enum class DxbcPrimitiveTopology : uint32_t {\n Undefined = 0,\n PointList = 1,\n LineList = 2,\n LineStrip = 3,\n TriangleList = 4,\n TriangleStrip = 5,\n LineListAdj = 10,\n LineStripAdj = 11,\n TriangleListAdj = 12,\n TriangleStripAdj = 13,\n };\n \n \n /**\n * \\brief Sampler operation mode\n */\n enum class DxbcSamplerMode : uint32_t {\n Default = 0,\n Comparison = 1,\n Mono = 2,\n };\n \n \n /**\n * \\brief Scalar value type\n * \n * Enumerates possible register component\n * types. Scalar types are represented as\n * a one-component vector type.\n */\n enum class DxbcScalarType : uint32_t {\n Uint32 = 0,\n Uint64 = 1,\n Sint32 = 2,\n Sint64 = 3,\n Float32 = 4,\n Float64 = 5,\n Bool = 6,\n };\n \n \n /**\n * \\brief Tessellator domain\n */\n enum class DxbcTessDomain : uint32_t {\n Undefined = 0,\n Isolines = 1,\n Triangles = 2,\n Quads = 3,\n };\n \n /**\n * \\brief Tessellator partitioning\n */\n enum class DxbcTessPartitioning : uint32_t {\n Undefined = 0,\n Integer = 1,\n Pow2 = 2,\n FractOdd = 3,\n FractEven = 4,\n };\n \n /**\n * \\brief UAV definition flags\n */\n enum class DxbcUavFlag : uint32_t {\n GloballyCoherent = 0,\n RasterizerOrdered = 1,\n };\n \n using DxbcUavFlags = Flags;\n \n /**\n * \\brief Tessellator output primitive\n */\n enum class DxbcTessOutputPrimitive : uint32_t {\n Undefined = 0,\n Point = 1,\n Line = 2,\n TriangleCw = 3,\n TriangleCcw = 4,\n };\n \n /**\n * \\brief Custom data class\n * \n * Stores which type of custom data is\n * referenced by the instruction.\n */\n enum class DxbcCustomDataClass : uint32_t {\n Comment = 0,\n DebugInfo = 1,\n Opaque = 2,\n ImmConstBuf = 3,\n };\n \n \n enum class DxbcResourceType : uint32_t {\n Typed = 0,\n Raw = 1,\n Structured = 2,\n };\n\n\n enum class DxbcConstantBufferAccessType : uint32_t {\n StaticallyIndexed = 0,\n DynamicallyIndexed = 1,\n };\n \n}"], ["/lsfg-vk/thirdparty/pe-parse/dump-pe/main.cpp", "/*\nThe MIT License (MIT)\n\nCopyright (c) 2013 Andrew Ruef\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n*/\n\n#include \n#include \n#include \n#include \n#include \n\n#include \n\n#include \"vendor/argh.h\"\n\nusing namespace peparse;\n\nint printExps(void *N,\n const VA &funcAddr,\n std::uint16_t ordinal,\n const std::string &mod,\n const std::string &func,\n const std::string &fwd) {\n static_cast(N);\n\n auto address = static_cast(funcAddr);\n\n // save default formatting\n std::ios initial(nullptr);\n initial.copyfmt(std::cout);\n\n std::cout << \"EXP #\";\n std::cout << ordinal;\n std::cout << \": \";\n std::cout << mod;\n std::cout << \"!\";\n std::cout << func;\n std::cout << \": \";\n if (!fwd.empty()) {\n std::cout << fwd;\n } else {\n std::cout << std::showbase << std::hex << address;\n }\n std::cout << \"\\n\";\n\n // restore default formatting\n std::cout.copyfmt(initial);\n return 0;\n}\n\nint printImports(void *N,\n const VA &impAddr,\n const std::string &modName,\n const std::string &symName) {\n static_cast(N);\n\n auto address = static_cast(impAddr);\n\n std::cout << \"0x\" << std::hex << address << \" \" << modName << \"!\" << symName;\n std::cout << \"\\n\";\n return 0;\n}\n\nint printRelocs(void *N, const VA &relocAddr, const reloc_type &type) {\n static_cast(N);\n\n std::cout << \"TYPE: \";\n switch (type) {\n case RELOC_ABSOLUTE:\n std::cout << \"ABSOLUTE\";\n break;\n case RELOC_HIGH:\n std::cout << \"HIGH\";\n break;\n case RELOC_LOW:\n std::cout << \"LOW\";\n break;\n case RELOC_HIGHLOW:\n std::cout << \"HIGHLOW\";\n break;\n case RELOC_HIGHADJ:\n std::cout << \"HIGHADJ\";\n break;\n case RELOC_MIPS_JMPADDR:\n std::cout << \"MIPS_JMPADDR\";\n break;\n case RELOC_MIPS_JMPADDR16:\n std::cout << \"MIPS_JMPADD16\";\n break;\n case RELOC_DIR64:\n std::cout << \"DIR64\";\n break;\n default:\n std::cout << \"UNKNOWN\";\n break;\n }\n\n std::cout << \" VA: 0x\" << std::hex << relocAddr << \"\\n\";\n\n return 0;\n}\n\nint printDebugs(void *N,\n const std::uint32_t &type,\n const bounded_buffer *data) {\n static_cast(N);\n\n std::cout << \"Debug Directory Type: \";\n switch (type) {\n case 0:\n std::cout << \"IMAGE_DEBUG_TYPE_UNKNOWN\";\n break;\n case 1:\n std::cout << \"IMAGE_DEBUG_TYPE_COFF\";\n break;\n case 2:\n std::cout << \"IMAGE_DEBUG_TYPE_CODEVIEW\";\n break;\n case 3:\n std::cout << \"IMAGE_DEBUG_TYPE_FPO\";\n break;\n case 4:\n std::cout << \"IMAGE_DEBUG_TYPE_MISC\";\n break;\n case 5:\n std::cout << \"IMAGE_DEBUG_TYPE_EXCEPTION\";\n break;\n case 6:\n std::cout << \"IMAGE_DEBUG_TYPE_FIXUP\";\n break;\n case 7:\n std::cout << \"IMAGE_DEBUG_TYPE_OMAP_TO_SRC\";\n break;\n case 8:\n std::cout << \"IMAGE_DEBUG_TYPE_OMAP_FROM_SRC\";\n break;\n case 9:\n std::cout << \"IMAGE_DEBUG_TYPE_BORLAND\";\n break;\n case 10:\n std::cout << \"IMAGE_DEBUG_TYPE_RESERVED10\";\n break;\n case 11:\n std::cout << \"IMAGE_DEBUG_TYPE_CLSID\";\n break;\n case 12:\n std::cout << \"IMAGE_DEBUG_TYPE_VC_FEATURE\";\n break;\n case 13:\n std::cout << \"IMAGE_DEBUG_TYPE_POGO\";\n break;\n case 14:\n std::cout << \"IMAGE_DEBUG_TYPE_ILTCG\";\n break;\n case 15:\n std::cout << \"IMAGE_DEBUG_TYPE_MPX\";\n break;\n case 16:\n std::cout << \"IMAGE_DEBUG_TYPE_REPRO\";\n break;\n case 20:\n std::cout << \"IMAGE_DEBUG_TYPE_EX_DLLCHARACTERISTICS\";\n break;\n default:\n std::cout << \"INVALID\";\n break;\n }\n std::cout << \"\\n\";\n std::cout << \"Debug Directory Data: \";\n for (uint32_t i = 0; i < data->bufLen; i++) {\n std::cout << \" 0x\" << std::hex << static_cast(data->buf[i]);\n }\n std::cout << \"\\n\";\n\n return 0;\n}\n\nint printSymbols(void *N,\n const std::string &strName,\n const uint32_t &value,\n const int16_t §ionNumber,\n const uint16_t &type,\n const uint8_t &storageClass,\n const uint8_t &numberOfAuxSymbols) {\n static_cast(N);\n\n std::cout << \"Symbol Name: \" << strName << \"\\n\";\n std::cout << \"Symbol Value: 0x\" << std::hex << value << \"\\n\";\n\n std::cout << \"Symbol Section Number: \";\n switch (sectionNumber) {\n case IMAGE_SYM_UNDEFINED:\n std::cout << \"UNDEFINED\";\n break;\n case IMAGE_SYM_ABSOLUTE:\n std::cout << \"ABSOLUTE\";\n break;\n case IMAGE_SYM_DEBUG:\n std::cout << \"DEBUG\";\n break;\n default:\n std::cout << sectionNumber;\n break;\n }\n std::cout << \"\\n\";\n\n std::cout << \"Symbol Type: \";\n switch (type) {\n case IMAGE_SYM_TYPE_NULL:\n std::cout << \"NULL\";\n break;\n case IMAGE_SYM_TYPE_VOID:\n std::cout << \"VOID\";\n break;\n case IMAGE_SYM_TYPE_CHAR:\n std::cout << \"CHAR\";\n break;\n case IMAGE_SYM_TYPE_SHORT:\n std::cout << \"SHORT\";\n break;\n case IMAGE_SYM_TYPE_INT:\n std::cout << \"INT\";\n break;\n case IMAGE_SYM_TYPE_LONG:\n std::cout << \"LONG\";\n break;\n case IMAGE_SYM_TYPE_FLOAT:\n std::cout << \"FLOAT\";\n break;\n case IMAGE_SYM_TYPE_DOUBLE:\n std::cout << \"DOUBLE\";\n break;\n case IMAGE_SYM_TYPE_STRUCT:\n std::cout << \"STRUCT\";\n break;\n case IMAGE_SYM_TYPE_UNION:\n std::cout << \"UNION\";\n break;\n case IMAGE_SYM_TYPE_ENUM:\n std::cout << \"ENUM\";\n break;\n case IMAGE_SYM_TYPE_MOE:\n std::cout << \"IMAGE_SYM_TYPE_MOE\";\n break;\n case IMAGE_SYM_TYPE_BYTE:\n std::cout << \"BYTE\";\n break;\n case IMAGE_SYM_TYPE_WORD:\n std::cout << \"WORD\";\n break;\n case IMAGE_SYM_TYPE_UINT:\n std::cout << \"UINT\";\n break;\n case IMAGE_SYM_TYPE_DWORD:\n std::cout << \"DWORD\";\n break;\n default:\n std::cout << \"UNKNOWN\";\n break;\n }\n std::cout << \"\\n\";\n\n std::cout << \"Symbol Storage Class: \";\n switch (storageClass) {\n case IMAGE_SYM_CLASS_END_OF_FUNCTION:\n std::cout << \"FUNCTION\";\n break;\n case IMAGE_SYM_CLASS_NULL:\n std::cout << \"NULL\";\n break;\n case IMAGE_SYM_CLASS_AUTOMATIC:\n std::cout << \"AUTOMATIC\";\n break;\n case IMAGE_SYM_CLASS_EXTERNAL:\n std::cout << \"EXTERNAL\";\n break;\n case IMAGE_SYM_CLASS_STATIC:\n std::cout << \"STATIC\";\n break;\n case IMAGE_SYM_CLASS_REGISTER:\n std::cout << \"REGISTER\";\n break;\n case IMAGE_SYM_CLASS_EXTERNAL_DEF:\n std::cout << \"EXTERNAL DEF\";\n break;\n case IMAGE_SYM_CLASS_LABEL:\n std::cout << \"LABEL\";\n break;\n case IMAGE_SYM_CLASS_UNDEFINED_LABEL:\n std::cout << \"UNDEFINED LABEL\";\n break;\n case IMAGE_SYM_CLASS_MEMBER_OF_STRUCT:\n std::cout << \"MEMBER OF STRUCT\";\n break;\n default:\n std::cout << \"UNKNOWN\";\n break;\n }\n std::cout << \"\\n\";\n\n std::cout << \"Symbol Number of Aux Symbols: \"\n << static_cast(numberOfAuxSymbols) << \"\\n\";\n\n return 0;\n}\n\nint printRich(void *N, const rich_entry &r) {\n static_cast(N);\n std::cout << std::dec;\n std::cout << std::setw(10) << \"ProdId:\" << std::setw(7) << r.ProductId;\n std::cout << std::setw(10) << \"Build:\" << std::setw(7) << r.BuildNumber;\n std::cout << std::setw(10) << \"Name:\" << std::setw(40)\n << GetRichProductName(r.BuildNumber) << \" \"\n << GetRichObjectType(r.ProductId);\n std::cout << std::setw(10) << \"Count:\" << std::setw(7) << r.Count << \"\\n\";\n return 0;\n}\n\nint printRsrc(void *N, const resource &r) {\n static_cast(N);\n\n if (r.type_str.length())\n std::cout << \"Type (string): \" << r.type_str << \"\\n\";\n else\n std::cout << \"Type: 0x\" << std::hex << r.type << \"\\n\";\n\n if (r.name_str.length())\n std::cout << \"Name (string): \" << r.name_str << \"\\n\";\n else\n std::cout << \"Name: 0x\" << std::hex << r.name << \"\\n\";\n\n if (r.lang_str.length())\n std::cout << \"Lang (string): \" << r.lang_str << \"\\n\";\n else\n std::cout << \"Lang: 0x\" << std::hex << r.lang << \"\\n\";\n\n std::cout << \"Codepage: 0x\" << std::hex << r.codepage << \"\\n\";\n std::cout << \"RVA: \" << std::dec << r.RVA << \"\\n\";\n std::cout << \"Size: \" << std::dec << r.size << \"\\n\";\n return 0;\n}\n\nint printSecs(void *N,\n const VA &secBase,\n const std::string &secName,\n const image_section_header &s,\n const bounded_buffer *data) {\n static_cast(N);\n static_cast(s);\n\n std::cout << \"Sec Name: \" << secName << \"\\n\";\n std::cout << \"Sec Base: 0x\" << std::hex << secBase << \"\\n\";\n if (data)\n std::cout << \"Sec Size: \" << std::dec << data->bufLen << \"\\n\";\n else\n std::cout << \"Sec Size: 0\"\n << \"\\n\";\n return 0;\n}\n\n#define DUMP_FIELD(x) \\\n std::cout << \"\" #x << \": 0x\"; \\\n std::cout << std::hex << static_cast(p->peHeader.x) << \"\\n\";\n#define DUMP_DEC_FIELD(x) \\\n std::cout << \"\" #x << \": \"; \\\n std::cout << std::dec << static_cast(p->peHeader.x) << \"\\n\";\n#define DUMP_BOOL_FIELD(x) \\\n std::cout << \"\" #x << \": \"; \\\n std::cout << std::boolalpha << static_cast(p->peHeader.x) << \"\\n\";\n\nint main(int argc, char *argv[]) {\n\n argh::parser cmdl(argv);\n\n if (cmdl[{\"-h\", \"--help\"}] || argc <= 1) {\n std::cout << \"dump-pe utility from Trail of Bits\\n\";\n std::cout << \"Repository: https://github.com/trailofbits/pe-parse\\n\\n\";\n std::cout << \"Usage:\\n\\tdump-pe /path/to/executable.exe\\n\";\n return 0;\n } else if (cmdl[{\"-v\", \"--version\"}]) {\n std::cout << \"dump-pe (pe-parse) version \" << PEPARSE_VERSION << \"\\n\";\n return 0;\n }\n\n parsed_pe *p = ParsePEFromFile(cmdl[1].c_str());\n\n if (p == nullptr) {\n std::cout << \"Error: \" << GetPEErr() << \" (\" << GetPEErrString() << \")\"\n << \"\\n\";\n std::cout << \"Location: \" << GetPEErrLoc() << \"\\n\";\n return 1;\n }\n\n if (p != NULL) {\n // Print DOS header\n DUMP_FIELD(dos.e_magic);\n DUMP_FIELD(dos.e_cp);\n DUMP_FIELD(dos.e_crlc);\n DUMP_FIELD(dos.e_cparhdr);\n DUMP_FIELD(dos.e_minalloc);\n DUMP_FIELD(dos.e_maxalloc);\n DUMP_FIELD(dos.e_ss);\n DUMP_FIELD(dos.e_sp);\n DUMP_FIELD(dos.e_csum);\n DUMP_FIELD(dos.e_ip);\n DUMP_FIELD(dos.e_cs);\n DUMP_FIELD(dos.e_lfarlc);\n DUMP_FIELD(dos.e_ovno);\n DUMP_FIELD(dos.e_res[0]);\n DUMP_FIELD(dos.e_res[1]);\n DUMP_FIELD(dos.e_res[2]);\n DUMP_FIELD(dos.e_res[3]);\n DUMP_FIELD(dos.e_oemid);\n DUMP_FIELD(dos.e_oeminfo);\n DUMP_FIELD(dos.e_res2[0]);\n DUMP_FIELD(dos.e_res2[1]);\n DUMP_FIELD(dos.e_res2[2]);\n DUMP_FIELD(dos.e_res2[3]);\n DUMP_FIELD(dos.e_res2[4]);\n DUMP_FIELD(dos.e_res2[5]);\n DUMP_FIELD(dos.e_res2[6]);\n DUMP_FIELD(dos.e_res2[7]);\n DUMP_FIELD(dos.e_res2[8]);\n DUMP_FIELD(dos.e_res2[9]);\n DUMP_FIELD(dos.e_lfanew);\n // Print Rich header info\n DUMP_BOOL_FIELD(rich.isPresent);\n if (p->peHeader.rich.isPresent) {\n DUMP_FIELD(rich.DecryptionKey);\n DUMP_FIELD(rich.Checksum);\n DUMP_BOOL_FIELD(rich.isValid);\n IterRich(p, printRich, NULL);\n }\n // print out some things\n DUMP_FIELD(nt.Signature);\n DUMP_FIELD(nt.FileHeader.Machine);\n DUMP_FIELD(nt.FileHeader.NumberOfSections);\n DUMP_DEC_FIELD(nt.FileHeader.TimeDateStamp);\n DUMP_FIELD(nt.FileHeader.PointerToSymbolTable);\n DUMP_DEC_FIELD(nt.FileHeader.NumberOfSymbols);\n DUMP_FIELD(nt.FileHeader.SizeOfOptionalHeader);\n DUMP_FIELD(nt.FileHeader.Characteristics);\n if (p->peHeader.nt.OptionalMagic == NT_OPTIONAL_32_MAGIC) {\n DUMP_FIELD(nt.OptionalHeader.Magic);\n DUMP_DEC_FIELD(nt.OptionalHeader.MajorLinkerVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader.MinorLinkerVersion);\n DUMP_FIELD(nt.OptionalHeader.SizeOfCode);\n DUMP_FIELD(nt.OptionalHeader.SizeOfInitializedData);\n DUMP_FIELD(nt.OptionalHeader.SizeOfUninitializedData);\n DUMP_FIELD(nt.OptionalHeader.AddressOfEntryPoint);\n DUMP_FIELD(nt.OptionalHeader.BaseOfCode);\n DUMP_FIELD(nt.OptionalHeader.BaseOfData);\n DUMP_FIELD(nt.OptionalHeader.ImageBase);\n DUMP_FIELD(nt.OptionalHeader.SectionAlignment);\n DUMP_FIELD(nt.OptionalHeader.FileAlignment);\n DUMP_DEC_FIELD(nt.OptionalHeader.MajorOperatingSystemVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader.MinorOperatingSystemVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader.Win32VersionValue);\n DUMP_FIELD(nt.OptionalHeader.SizeOfImage);\n DUMP_FIELD(nt.OptionalHeader.SizeOfHeaders);\n DUMP_FIELD(nt.OptionalHeader.CheckSum);\n DUMP_FIELD(nt.OptionalHeader.Subsystem);\n DUMP_FIELD(nt.OptionalHeader.DllCharacteristics);\n DUMP_FIELD(nt.OptionalHeader.SizeOfStackReserve);\n DUMP_FIELD(nt.OptionalHeader.SizeOfStackCommit);\n DUMP_FIELD(nt.OptionalHeader.SizeOfHeapReserve);\n DUMP_FIELD(nt.OptionalHeader.SizeOfHeapCommit);\n DUMP_FIELD(nt.OptionalHeader.LoaderFlags);\n DUMP_DEC_FIELD(nt.OptionalHeader.NumberOfRvaAndSizes);\n } else {\n DUMP_FIELD(nt.OptionalHeader64.Magic);\n DUMP_DEC_FIELD(nt.OptionalHeader64.MajorLinkerVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader64.MinorLinkerVersion);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfCode);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfInitializedData);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfUninitializedData);\n DUMP_FIELD(nt.OptionalHeader64.AddressOfEntryPoint);\n DUMP_FIELD(nt.OptionalHeader64.BaseOfCode);\n DUMP_FIELD(nt.OptionalHeader64.ImageBase);\n DUMP_FIELD(nt.OptionalHeader64.SectionAlignment);\n DUMP_FIELD(nt.OptionalHeader64.FileAlignment);\n DUMP_DEC_FIELD(nt.OptionalHeader64.MajorOperatingSystemVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader64.MinorOperatingSystemVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader64.Win32VersionValue);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfImage);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfHeaders);\n DUMP_FIELD(nt.OptionalHeader64.CheckSum);\n DUMP_FIELD(nt.OptionalHeader64.Subsystem);\n DUMP_FIELD(nt.OptionalHeader64.DllCharacteristics);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfStackReserve);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfStackCommit);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfHeapReserve);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfHeapCommit);\n DUMP_FIELD(nt.OptionalHeader64.LoaderFlags);\n DUMP_DEC_FIELD(nt.OptionalHeader64.NumberOfRvaAndSizes);\n }\n\n#undef DUMP_FIELD\n#undef DUMP_DEC_FIELD\n\n std::cout << \"Imports: \"\n << \"\\n\";\n IterImpVAString(p, printImports, NULL);\n std::cout << \"Relocations: \"\n << \"\\n\";\n IterRelocs(p, printRelocs, NULL);\n std::cout << \"Debug Directories: \"\n << \"\\n\";\n IterDebugs(p, printDebugs, NULL);\n std::cout << \"Symbols (symbol table): \"\n << \"\\n\";\n IterSymbols(p, printSymbols, NULL);\n std::cout << \"Sections: \"\n << \"\\n\";\n IterSec(p, printSecs, NULL);\n std::cout << \"Exports: \"\n << \"\\n\";\n IterExpFull(p, printExps, NULL);\n\n // read the first 8 bytes from the entry point and print them\n VA entryPoint;\n if (GetEntryPoint(p, entryPoint)) {\n std::cout << \"First 8 bytes from entry point (0x\";\n std::cout << std::hex << entryPoint << \"):\"\n << \"\\n\";\n for (std::size_t i = 0; i < 8; i++) {\n std::uint8_t b;\n if (!ReadByteAtVA(p, i + entryPoint, b)) {\n std::cout << \" ERR\";\n } else {\n std::cout << \" 0x\" << std::hex << static_cast(b);\n }\n }\n\n std::cout << \"\\n\";\n }\n\n std::cout << \"Resources: \"\n << \"\\n\";\n IterRsrc(p, printRsrc, NULL);\n\n DestructParsedPE(p);\n\n return 0;\n }\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_reader.h", "class DxbcReader {\n public:\n template\n auto readEnum() {\n using Tx = std::underlying_type_t;\n return static_cast(this->readNum());\n }\n DxbcTag readTag() {\n DxbcTag tag;\n this->read(&tag, 4);\n return tag;\n }\n std::string readString() {\n std::string result;\n \n while (m_data[m_pos] != '\\0')\n result.push_back(m_data[m_pos++]);\n \n m_pos++;\n return result;\n }\n void read(void* dst, size_t n) {\n if (m_pos + n > m_size)\n throw DxvkError(\"DxbcReader::read: Unexpected end of file\");\n std::memcpy(dst, m_data + m_pos, n);\n m_pos += n;\n }\n void skip(size_t n) {\n if (m_pos + n > m_size)\n throw DxvkError(\"DxbcReader::skip: Unexpected end of file\");\n m_pos += n;\n }\n DxbcReader clone(size_t pos) const {\n if (pos > m_size)\n throw DxvkError(\"DxbcReader::clone: Invalid offset\");\n return DxbcReader(m_data + pos, m_size - pos);\n }\n DxbcReader resize(size_t size) const {\n if (size > m_size)\n throw DxvkError(\"DxbcReader::resize: Invalid size\");\n return DxbcReader(m_data, size, m_pos);\n }\n void store(std::ostream&& stream) const {\n stream.write(m_data, m_size);\n }\n private:\n const char* m_data = nullptr;\n size_t m_size = 0;\n size_t m_pos = 0;\n template\n T readNum() {\n T result;\n this->read(&result, sizeof(result));\n return result;\n }\n};"], ["/lsfg-vk/src/extract/trans.cpp", "#include \"extract/trans.hpp\"\n\n#include \n\n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n\nusing namespace Extract;\n\nstruct BindingOffsets {\n uint32_t bindingIndex{};\n uint32_t bindingOffset{};\n uint32_t setIndex{};\n uint32_t setOffset{};\n};\n\nstd::vector Extract::translateShader(std::vector bytecode) {\n // compile the shader\n dxvk::DxbcReader reader(reinterpret_cast(bytecode.data()), bytecode.size());\n dxvk::DxbcModule module(reader);\n const dxvk::DxbcModuleInfo info{};\n auto code = module.compile(info, \"CS\");\n\n // find all bindings\n std::vector bindingOffsets;\n std::vector varIds;\n for (auto ins : code) {\n if (ins.opCode() == spv::OpDecorate) {\n if (ins.arg(2) == spv::DecorationBinding) {\n const uint32_t varId = ins.arg(1);\n bindingOffsets.resize(std::max(bindingOffsets.size(), size_t(varId + 1)));\n bindingOffsets[varId].bindingIndex = ins.arg(3);\n bindingOffsets[varId].bindingOffset = ins.offset() + 3;\n varIds.push_back(varId);\n }\n\n if (ins.arg(2) == spv::DecorationDescriptorSet) {\n const uint32_t varId = ins.arg(1);\n bindingOffsets.resize(std::max(bindingOffsets.size(), size_t(varId + 1)));\n bindingOffsets[varId].setIndex = ins.arg(3);\n bindingOffsets[varId].setOffset = ins.offset() + 3;\n }\n }\n\n if (ins.opCode() == spv::OpFunction)\n break;\n }\n\n std::vector validBindings;\n for (const auto varId : varIds) {\n auto info = bindingOffsets[varId];\n\n if (info.bindingOffset)\n validBindings.push_back(info);\n }\n\n // patch binding offset\n #pragma clang diagnostic push\n #pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n for (size_t i = 0; i < validBindings.size(); i++)\n code.data()[validBindings.at(i).bindingOffset] // NOLINT\n = static_cast(i);\n #pragma clang diagnostic pop\n\n // return the new bytecode\n std::vector spirvBytecode(code.size());\n std::copy_n(reinterpret_cast(code.data()),\n code.size(), spirvBytecode.data());\n return spirvBytecode;\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/spirv/spirv_instruction.h", "#pragma once\n\n#include \"spirv_include.h\"\n\nnamespace dxvk {\n \n /**\n * \\brief SPIR-V instruction\n * \n * Helps parsing a single instruction, providing\n * access to the op code, instruction length and\n * instruction arguments.\n */\n class SpirvInstruction {\n \n public:\n \n SpirvInstruction() { }\n SpirvInstruction(uint32_t* code, uint32_t offset, uint32_t length)\n : m_code(code), m_offset(offset), m_length(length) { }\n \n /**\n * \\brief SPIR-V Op code\n * \\returns The op code\n */\n spv::Op opCode() const {\n return static_cast(\n this->arg(0) & spv::OpCodeMask);\n }\n \n /**\n * \\brief Instruction length\n * \\returns Number of DWORDs\n */\n uint32_t length() const {\n return this->arg(0) >> spv::WordCountShift;\n }\n \n /**\n * \\brief Instruction offset\n * \\returns Offset in DWORDs\n */\n uint32_t offset() const {\n return m_offset;\n }\n \n /**\n * \\brief Argument value\n * \n * Retrieves an argument DWORD. Note that some instructions\n * take 64-bit arguments which require more than one DWORD.\n * Arguments start at index 1. Calling this method with an\n * argument ID of 0 will return the opcode token.\n * \\param [in] idx Argument index, starting at 1\n * \\returns The argument value\n */\n uint32_t arg(uint32_t idx) const {\n const uint32_t index = m_offset + idx;\n return index < m_length ? m_code[index] : 0;\n }\n\n /**\n * \\brief Argument string\n *\n * Retrieves a pointer to a UTF-8-encoded string.\n * \\param [in] idx Argument index, starting at 1\n * \\returns Pointer to the literal string\n */\n const char* chr(uint32_t idx) const {\n const uint32_t index = m_offset + idx;\n return index < m_length ? reinterpret_cast(&m_code[index]) : nullptr;\n }\n \n /**\n * \\brief Changes the value of an argument\n * \n * \\param [in] idx Argument index, starting at 1\n * \\param [in] word New argument word\n */\n void setArg(uint32_t idx, uint32_t word) const {\n if (m_offset + idx < m_length)\n m_code[m_offset + idx] = word;\n }\n \n private:\n \n uint32_t* m_code = nullptr;\n uint32_t m_offset = 0;\n uint32_t m_length = 0;\n \n };\n \n \n /**\n * \\brief SPIR-V instruction iterator\n * \n * Convenient iterator that can be used\n * to process raw SPIR-V shader code.\n */\n class SpirvInstructionIterator {\n \n public:\n \n SpirvInstructionIterator() { }\n SpirvInstructionIterator(uint32_t* code, uint32_t offset, uint32_t length)\n : m_code (length != 0 ? code : nullptr),\n m_offset(length != 0 ? offset : 0),\n m_length(length) {\n if ((length >= 5) && (offset == 0) && (m_code[0] == spv::MagicNumber))\n this->advance(5);\n }\n \n SpirvInstructionIterator& operator ++ () {\n this->advance(SpirvInstruction(m_code, m_offset, m_length).length());\n return *this;\n }\n \n SpirvInstructionIterator operator ++ (int) {\n SpirvInstructionIterator result = *this;\n this->advance(SpirvInstruction(m_code, m_offset, m_length).length());\n return result;\n }\n \n SpirvInstruction operator * () const {\n return SpirvInstruction(m_code, m_offset, m_length);\n }\n \n bool operator == (const SpirvInstructionIterator& other) const {\n return this->m_code == other.m_code\n && this->m_offset == other.m_offset\n && this->m_length == other.m_length;\n }\n \n bool operator != (const SpirvInstructionIterator& other) const {\n return this->m_code != other.m_code\n || this->m_offset != other.m_offset\n || this->m_length != other.m_length;\n }\n \n private:\n \n uint32_t* m_code = nullptr;\n uint32_t m_offset = 0;\n uint32_t m_length = 0;\n \n void advance(uint32_t n) {\n if (m_offset + n < m_length) {\n m_offset += n;\n } else {\n m_code = nullptr;\n m_offset = 0;\n m_length = 0;\n }\n }\n \n };\n \n}"], ["/lsfg-vk/src/utils/utils.cpp", "#include \"utils/utils.hpp\"\n#include \"common/exception.hpp\"\n#include \"layer.hpp\"\n\n#include \n#include \n#include // NOLINT\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace Utils;\n\nstd::pair Utils::findQueue(VkDevice device, VkPhysicalDevice physicalDevice,\n VkDeviceCreateInfo* desc, VkQueueFlags flags) {\n std::vector enabledQueues(desc->queueCreateInfoCount);\n std::copy_n(desc->pQueueCreateInfos, enabledQueues.size(), enabledQueues.data());\n\n uint32_t familyCount{};\n Layer::ovkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, &familyCount, nullptr);\n std::vector families(familyCount);\n Layer::ovkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, &familyCount,\n families.data());\n\n std::optional idx;\n for (const auto& queueInfo : enabledQueues) {\n if ((queueInfo.queueFamilyIndex < families.size()) &&\n (families[queueInfo.queueFamilyIndex].queueFlags & flags)) {\n idx = queueInfo.queueFamilyIndex;\n break;\n }\n }\n if (!idx.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"No suitable queue found\");\n\n VkQueue queue{};\n Layer::ovkGetDeviceQueue(device, *idx, 0, &queue);\n\n auto res = Layer::ovkSetDeviceLoaderData(device, queue);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to set device loader data for queue\");\n\n return { *idx, queue };\n}\n\nuint64_t Utils::getDeviceUUID(VkPhysicalDevice physicalDevice) {\n VkPhysicalDeviceProperties properties{};\n Layer::ovkGetPhysicalDeviceProperties(physicalDevice, &properties);\n\n return static_cast(properties.vendorID) << 32 | properties.deviceID;\n}\n\nuint32_t Utils::getMaxImageCount(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface) {\n VkSurfaceCapabilitiesKHR capabilities{};\n auto res = Layer::ovkGetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice,\n surface, &capabilities);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to get surface capabilities\");\n if (capabilities.maxImageCount == 0)\n return 999; // :3\n return capabilities.maxImageCount;\n}\n\nstd::vector Utils::addExtensions(const char* const* extensions, size_t count,\n const std::vector& requiredExtensions) {\n std::vector ext(count);\n std::copy_n(extensions, count, ext.data());\n\n for (const auto& e : requiredExtensions) {\n auto it = std::ranges::find_if(ext,\n [e](const char* extName) {\n return std::string(extName) == std::string(e);\n });\n if (it == ext.end())\n ext.push_back(e);\n }\n\n return ext;\n}\n\nvoid Utils::copyImage(VkCommandBuffer buf,\n VkImage src, VkImage dst,\n uint32_t width, uint32_t height,\n VkPipelineStageFlags pre, VkPipelineStageFlags post,\n bool makeSrcPresentable, bool makeDstPresentable) {\n const VkImageMemoryBarrier srcBarrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,\n .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,\n .oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,\n .newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,\n .image = src,\n .subresourceRange = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n const VkImageMemoryBarrier dstBarrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,\n .dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,\n .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n .image = dst,\n .subresourceRange = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n const std::vector barriers = { srcBarrier, dstBarrier };\n Layer::ovkCmdPipelineBarrier(buf,\n pre, VK_PIPELINE_STAGE_TRANSFER_BIT, 0,\n 0, nullptr, 0, nullptr,\n static_cast(barriers.size()), barriers.data());\n\n const VkImageBlit imageBlit{\n .srcSubresource = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .layerCount = 1\n },\n .srcOffsets = {\n { 0, 0, 0 },\n { static_cast(width), static_cast(height), 1 }\n },\n .dstSubresource = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .layerCount = 1\n },\n .dstOffsets = {\n { 0, 0, 0 },\n { static_cast(width), static_cast(height), 1 }\n }\n };\n Layer::ovkCmdBlitImage(\n buf,\n src, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,\n dst, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n 1, &imageBlit,\n VK_FILTER_NEAREST\n );\n\n if (makeSrcPresentable) {\n const VkImageMemoryBarrier presentBarrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,\n .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,\n .newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,\n .image = src,\n .subresourceRange = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n Layer::ovkCmdPipelineBarrier(buf,\n VK_PIPELINE_STAGE_TRANSFER_BIT, post, 0,\n 0, nullptr, 0, nullptr,\n 1, &presentBarrier);\n }\n\n if (makeDstPresentable) {\n const VkImageMemoryBarrier presentBarrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,\n .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,\n .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT,\n .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n .newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,\n .image = dst,\n .subresourceRange = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n Layer::ovkCmdPipelineBarrier(buf,\n VK_PIPELINE_STAGE_TRANSFER_BIT, post, 0,\n 0, nullptr, 0, nullptr,\n 1, &presentBarrier);\n }\n}\n\nnamespace {\n auto& logCounts() {\n static std::unordered_map map;\n return map;\n }\n}\n\nvoid Utils::logLimitN(const std::string& id, size_t n, const std::string& message) {\n auto& count = logCounts()[id];\n if (count <= n)\n std::cerr << \"lsfg-vk: \" << message << '\\n';\n if (count == n)\n std::cerr << \"(above message has been repeated \" << n << \" times, suppressing further)\\n\";\n count++;\n}\n\nvoid Utils::resetLimitN(const std::string& id) noexcept {\n logCounts().erase(id);\n}\n\nstd::pair Utils::getProcessName() {\n const char* process_name = std::getenv(\"LSFG_PROCESS\");\n if (process_name && *process_name != '\\0')\n return { process_name, process_name };\n\n const char* benchmark_flag = std::getenv(\"LSFG_BENCHMARK\");\n if (benchmark_flag)\n return { \"benchmark\", \"benchmark\" };\n std::array exe{};\n\n const ssize_t exe_len = readlink(\"/proc/self/exe\", exe.data(), exe.size() - 1);\n if (exe_len <= 0)\n return { \"Unknown Process\", \"unknown\" };\n exe.at(static_cast(exe_len)) = '\\0';\n\n std::ifstream comm_file(\"/proc/self/comm\");\n if (!comm_file.is_open())\n return { std::string(exe.data()), \"unknown\" };\n std::array comm{};\n comm_file.read(comm.data(), 256);\n comm.at(static_cast(comm_file.gcount())) = '\\0';\n std::string comm_str(comm.data());\n if (comm_str.back() == '\\n')\n comm_str.pop_back();\n\n return{ std::string(exe.data()), comm_str };\n}\n\nstd::string Utils::getConfigFile() {\n const char* configFile = std::getenv(\"LSFG_CONFIG\");\n if (configFile && *configFile != '\\0')\n return{configFile};\n const char* xdgPath = std::getenv(\"XDG_CONFIG_HOME\");\n if (xdgPath && *xdgPath != '\\0')\n return std::string(xdgPath) + \"/lsfg-vk/conf.toml\";\n const char* homePath = std::getenv(\"HOME\");\n if (homePath && *homePath != '\\0')\n return std::string(homePath) + \"/.config/lsfg-vk/conf.toml\";\n return \"/etc/lsfg-vk/conf.toml\";\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxvk/dxvk_pipelayout.h", "#pragma once\n\n#include \n\n#include \n\n#include \"dxvk_hash.h\"\n\n#include \"util_math.h\"\n#include \"util_bit.h\"\n#include \"util_flags.h\"\n\nnamespace dxvk {\n\n class DxvkDevice;\n class DxvkPipelineManager;\n\n /**\n * \\brief Order-invariant atomic access operation\n *\n * Information used to optimize barriers when a resource\n * is accessed exlusively via order-invariant stores.\n */\n struct DxvkAccessOp {\n enum OpType : uint16_t {\n None = 0x0u,\n Or = 0x1u,\n And = 0x2u,\n Xor = 0x3u,\n Add = 0x4u,\n IMin = 0x5u,\n IMax = 0x6u,\n UMin = 0x7u,\n UMax = 0x8u,\n\n StoreF = 0xdu,\n StoreUi = 0xeu,\n StoreSi = 0xfu,\n };\n\n DxvkAccessOp() = default;\n DxvkAccessOp(OpType t)\n : op(uint16_t(t)) { }\n\n DxvkAccessOp(OpType t, uint16_t constant)\n : op(uint16_t(t) | (constant << 4u)) { }\n\n uint16_t op = 0u;\n\n bool operator == (const DxvkAccessOp& t) const { return op == t.op; }\n bool operator != (const DxvkAccessOp& t) const { return op != t.op; }\n\n template, bool> = true>\n explicit operator T() const { return op; }\n };\n\n static_assert(sizeof(DxvkAccessOp) == sizeof(uint16_t));\n\n /**\n * \\brief Binding info\n *\n * Stores metadata for a single binding in\n * a given shader, or for the whole pipeline.\n */\n struct DxvkBindingInfo {\n VkDescriptorType descriptorType = VK_DESCRIPTOR_TYPE_MAX_ENUM; ///< Vulkan descriptor type\n uint32_t resourceBinding = 0u; ///< API binding slot for the resource\n VkImageViewType viewType = VK_IMAGE_VIEW_TYPE_MAX_ENUM; ///< Image view type\n VkShaderStageFlagBits stage = VK_SHADER_STAGE_FLAG_BITS_MAX_ENUM; ///< Shader stage\n VkAccessFlags access = 0u; ///< Access mask for the resource\n DxvkAccessOp accessOp = DxvkAccessOp::None; ///< Order-invariant store type, if any\n bool uboSet = false; ///< Whether to include this in the UBO set\n bool isMultisampled = false; ///< Multisampled binding\n\n /**\n * \\brief Computes descriptor set index for the given binding\n *\n * This is determines based on the shader stages that use the binding.\n * \\returns Descriptor set index\n */\n uint32_t computeSetIndex() const;\n\n /**\n * \\brief Numeric value of the binding\n *\n * Used when sorting bindings.\n * \\returns Numeric value\n */\n uint32_t value() const;\n\n /**\n * \\brief Checks for equality\n *\n * \\param [in] other Binding to compare to\n * \\returns \\c true if both bindings are equal\n */\n bool eq(const DxvkBindingInfo& other) const;\n\n /**\n * \\brief Hashes binding info\n * \\returns Binding hash\n */\n size_t hash() const;\n\n };\n\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_modinfo.h", "#pragma once\n\n#include \"dxbc_options.h\"\n\nnamespace dxvk {\n\n /**\n * \\brief Tessellation info\n * \n * Stores the maximum tessellation factor\n * to export from tessellation shaders.\n */\n struct DxbcTessInfo {\n float maxTessFactor;\n };\n\n /**\n * \\brief Xfb capture entry\n * \n * Stores an output variable to capture,\n * as well as the buffer to write it to.\n */\n struct DxbcXfbEntry {\n const char* semanticName;\n uint32_t semanticIndex;\n uint32_t componentIndex;\n uint32_t componentCount;\n uint32_t streamId;\n uint32_t bufferId;\n uint32_t offset;\n };\n\n /**\n * \\brief Xfb info\n * \n * Stores capture entries and output buffer\n * strides. This structure must only be\n * defined if \\c entryCount is non-zero.\n */\n struct DxbcXfbInfo {\n uint32_t entryCount;\n DxbcXfbEntry entries[128];\n uint32_t strides[4];\n int32_t rasterizedStream;\n };\n\n /**\n * \\brief Shader module info\n * \n * Stores information which may affect shader compilation.\n * This data can be supplied by the client API implementation.\n */\n struct DxbcModuleInfo {\n DxbcOptions options;\n DxbcTessInfo* tess;\n DxbcXfbInfo* xfb;\n };\n\n}"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_tag.h", "#pragma once\n\n#include \"dxbc_include.h\"\n\nnamespace dxvk {\n \n /**\n * \\brief Four-character tag\n * \n * Used to identify chunks in the\n * compiled DXBC file by name.\n */\n class DxbcTag {\n \n public:\n \n DxbcTag() {\n for (size_t i = 0; i < 4; i++)\n m_chars[i] = '\\0';\n }\n \n DxbcTag(const char* tag) {\n for (size_t i = 0; i < 4; i++)\n m_chars[i] = tag[i];\n }\n \n bool operator == (const DxbcTag& other) const {\n bool result = true;\n for (size_t i = 0; i < 4; i++)\n result &= m_chars[i] == other.m_chars[i];\n return result;\n }\n \n bool operator != (const DxbcTag& other) const {\n return !this->operator == (other);\n }\n \n const char* operator & () const { return m_chars; }\n char* operator & () { return m_chars; }\n \n private:\n \n char m_chars[4];\n \n };\n \n}"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_chunk_shex.h", "class DxbcShex {\n public:\n DxbcShex(DxbcReader reader) {\n // The shader version and type are stored in a 32-bit unit,\n // where the first byte contains the major and minor version\n // numbers, and the high word contains the program type.\n reader.skip(2);\n auto pType = reader.readEnum();\n m_programInfo = DxbcProgramInfo(pType);\n \n // Read the actual shader code as an array of DWORDs.\n auto codeLength = reader.readu32() - 2;\n m_code.resize(codeLength);\n reader.read(m_code.data(), codeLength * sizeof(uint32_t));\n }\n ~DxbcShex() {\n \n }\n private:\n DxbcProgramInfo m_programInfo;\n std::vector m_code;\n};"], ["/lsfg-vk/thirdparty/pe-parse/pepy/pepy.cpp", "/*\n * Copyright (c) 2013, Wesley Shields . All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *\n * 2. Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND\n * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE\n * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n * SUCH DAMAGE.\n */\n\n#include \n#include \n#include \n\nusing namespace peparse;\n\n/* NOTE(ww): These don't necessarily have to be the same, but currently are.\n */\n#define PEPY_VERSION PEPARSE_VERSION\n\n/* These are used to across multiple objects. */\n#define PEPY_OBJECT_GET(OBJ, ATTR) \\\n static PyObject *pepy_##OBJ##_get_##ATTR(PyObject *self, void *closure) { \\\n Py_INCREF(((pepy_##OBJ *) self)->ATTR); \\\n return ((pepy_##OBJ *) self)->ATTR; \\\n }\n\n#define OBJECTGETTER(OBJ, ATTR, DOC) \\\n { \\\n (char *) #ATTR, (getter) pepy_##OBJ##_get_##ATTR, \\\n (setter) pepy_attr_not_writable, (char *) #DOC, NULL \\\n }\n\n/* 'OPTIONAL' references the fact that these are from the Optional Header */\n#define OBJECTGETTER_OPTIONAL(ATTR, DOC) \\\n { \\\n (char *) #ATTR, (getter) pepy_parsed_get_optional_##ATTR, \\\n (setter) pepy_attr_not_writable, (char *) #DOC, NULL \\\n }\n\nstatic PyObject *pepy_error;\n\nstruct pepy {\n PyObject_HEAD\n};\n\nstruct pepy_parsed {\n PyObject_HEAD parsed_pe *pe;\n};\n\nstruct pepy_section {\n PyObject_HEAD PyObject *name;\n PyObject *base;\n PyObject *length;\n PyObject *virtaddr;\n PyObject *virtsize;\n PyObject *numrelocs;\n PyObject *numlinenums;\n PyObject *characteristics;\n PyObject *data;\n};\n\nstruct pepy_resource {\n PyObject_HEAD PyObject *type_str;\n PyObject *name_str;\n PyObject *lang_str;\n PyObject *type;\n PyObject *name;\n PyObject *lang;\n PyObject *codepage;\n PyObject *RVA;\n PyObject *size;\n PyObject *data;\n};\n\nstruct pepy_import {\n PyObject_HEAD PyObject *name;\n PyObject *sym;\n PyObject *addr;\n};\n\nstruct pepy_export {\n PyObject_HEAD PyObject *mod;\n PyObject *func;\n PyObject *addr;\n};\n\nstruct pepy_relocation {\n PyObject_HEAD PyObject *type;\n PyObject *addr;\n};\n\n/* None of the attributes in these objects are writable. */\nstatic int\npepy_attr_not_writable(PyObject *self, PyObject *value, void *closure) {\n PyErr_SetString(PyExc_TypeError, \"Attribute not writable.\");\n return -1;\n}\n\nstatic PyObject *\npepy_import_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_import *self;\n\n self = (pepy_import *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int pepy_import_init(pepy_import *self, PyObject *args, PyObject *kwds) {\n if (!PyArg_ParseTuple(\n args, \"OOO:pepy_import_init\", &self->name, &self->sym, &self->addr))\n return -1;\n return 0;\n}\n\nstatic void pepy_import_dealloc(pepy_import *self) {\n Py_XDECREF(self->name);\n Py_XDECREF(self->sym);\n Py_XDECREF(self->addr);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nPEPY_OBJECT_GET(import, name);\nPEPY_OBJECT_GET(import, sym);\nPEPY_OBJECT_GET(import, addr);\n\nstatic PyGetSetDef pepy_import_getseters[] = {\n OBJECTGETTER(import, name, \"Name\"),\n OBJECTGETTER(import, sym, \"Symbol\"),\n OBJECTGETTER(import, addr, \"Address\"),\n {NULL}};\n\nstatic PyTypeObject pepy_import_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.import\", /* tp_name */\n sizeof(pepy_import), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_import_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT, /* tp_flags */\n \"pepy import object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n 0, /* tp_methods */\n 0, /* tp_members */\n pepy_import_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_import_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_import_new /* tp_new */\n};\n\nstatic PyObject *\npepy_export_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_export *self;\n\n self = (pepy_export *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int pepy_export_init(pepy_export *self, PyObject *args, PyObject *kwds) {\n if (!PyArg_ParseTuple(\n args, \"OOO:pepy_export_init\", &self->mod, &self->func, &self->addr))\n return -1;\n return 0;\n}\n\nstatic void pepy_export_dealloc(pepy_export *self) {\n Py_XDECREF(self->mod);\n Py_XDECREF(self->func);\n Py_XDECREF(self->addr);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nPEPY_OBJECT_GET(export, mod);\nPEPY_OBJECT_GET(export, func);\nPEPY_OBJECT_GET(export, addr);\n\nstatic PyGetSetDef pepy_export_getseters[] = {\n OBJECTGETTER(export, mod, \"Module\"),\n OBJECTGETTER(export, func, \"Function\"),\n OBJECTGETTER(export, addr, \"Address\"),\n {NULL}};\n\nstatic PyTypeObject pepy_export_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.export\", /* tp_name */\n sizeof(pepy_export), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_export_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT, /* tp_flags */\n \"pepy export object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n 0, /* tp_methods */\n 0, /* tp_members */\n pepy_export_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_export_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_export_new /* tp_new */\n};\n\nstatic PyObject *\npepy_relocation_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_relocation *self;\n\n self = (pepy_relocation *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int\npepy_relocation_init(pepy_relocation *self, PyObject *args, PyObject *kwds) {\n if (!PyArg_ParseTuple(\n args, \"OO:pepy_relocation_init\", &self->type, &self->addr))\n return -1;\n return 0;\n}\n\nstatic void pepy_relocation_dealloc(pepy_relocation *self) {\n Py_XDECREF(self->type);\n Py_XDECREF(self->addr);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nPEPY_OBJECT_GET(relocation, type);\nPEPY_OBJECT_GET(relocation, addr);\n\nstatic PyGetSetDef pepy_relocation_getseters[] = {\n OBJECTGETTER(relocation, type, \"Type\"),\n OBJECTGETTER(relocation, addr, \"Address\"),\n {NULL}};\n\nstatic PyTypeObject pepy_relocation_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.relocation\", /* tp_name */\n sizeof(pepy_relocation), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_relocation_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT, /* tp_flags */\n \"pepy relocation object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n 0, /* tp_methods */\n 0, /* tp_members */\n pepy_relocation_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_relocation_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_relocation_new /* tp_new */\n};\n\nstatic PyObject *\npepy_section_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_section *self;\n\n self = (pepy_section *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int\npepy_section_init(pepy_section *self, PyObject *args, PyObject *kwds) {\n if (!PyArg_ParseTuple(args,\n \"OOOOOOOOO:pepy_section_init\",\n &self->name,\n &self->base,\n &self->length,\n &self->virtaddr,\n &self->virtsize,\n &self->numrelocs,\n &self->numlinenums,\n &self->characteristics,\n &self->data))\n return -1;\n return 0;\n}\n\nstatic void pepy_section_dealloc(pepy_section *self) {\n Py_XDECREF(self->name);\n Py_XDECREF(self->base);\n Py_XDECREF(self->length);\n Py_XDECREF(self->virtaddr);\n Py_XDECREF(self->virtsize);\n Py_XDECREF(self->numrelocs);\n Py_XDECREF(self->numlinenums);\n Py_XDECREF(self->characteristics);\n Py_XDECREF(self->data);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nPEPY_OBJECT_GET(section, name);\nPEPY_OBJECT_GET(section, base);\nPEPY_OBJECT_GET(section, length);\nPEPY_OBJECT_GET(section, virtaddr);\nPEPY_OBJECT_GET(section, virtsize);\nPEPY_OBJECT_GET(section, numrelocs);\nPEPY_OBJECT_GET(section, numlinenums);\nPEPY_OBJECT_GET(section, characteristics);\nPEPY_OBJECT_GET(section, data);\n\nstatic PyGetSetDef pepy_section_getseters[] = {\n OBJECTGETTER(section, name, \"Name\"),\n OBJECTGETTER(section, base, \"Base address\"),\n OBJECTGETTER(section, length, \"Length\"),\n OBJECTGETTER(section, virtaddr, \"Virtual address\"),\n OBJECTGETTER(section, virtsize, \"Virtual size\"),\n OBJECTGETTER(section, numrelocs, \"Number of relocations\"),\n OBJECTGETTER(section, numlinenums, \"Number of line numbers\"),\n OBJECTGETTER(section, characteristics, \"Characteristics\"),\n OBJECTGETTER(section, data, \"Section data\"),\n {NULL}};\n\nstatic PyTypeObject pepy_section_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.section\", /* tp_name */\n sizeof(pepy_section), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_section_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT, /* tp_flags */\n \"pepy section object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n 0, /* tp_methods */\n 0, /* tp_members */\n pepy_section_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_section_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_section_new /* tp_new */\n};\n\nstatic PyObject *\npepy_resource_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_resource *self;\n\n self = (pepy_resource *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int\npepy_resource_init(pepy_resource *self, PyObject *args, PyObject *kwds) {\n if (!PyArg_ParseTuple(args,\n \"OOOOOOOOOO:pepy_resource_init\",\n &self->type_str,\n &self->name_str,\n &self->lang_str,\n &self->type,\n &self->name,\n &self->lang,\n &self->codepage,\n &self->RVA,\n &self->size,\n &self->data))\n return -1;\n\n return 0;\n}\n\nstatic void pepy_resource_dealloc(pepy_resource *self) {\n Py_XDECREF(self->type_str);\n Py_XDECREF(self->name_str);\n Py_XDECREF(self->lang_str);\n Py_XDECREF(self->type);\n Py_XDECREF(self->name);\n Py_XDECREF(self->lang);\n Py_XDECREF(self->codepage);\n Py_XDECREF(self->RVA);\n Py_XDECREF(self->size);\n Py_XDECREF(self->data);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nPEPY_OBJECT_GET(resource, type_str);\nPEPY_OBJECT_GET(resource, name_str);\nPEPY_OBJECT_GET(resource, lang_str);\nPEPY_OBJECT_GET(resource, type);\nPEPY_OBJECT_GET(resource, name);\nPEPY_OBJECT_GET(resource, lang);\nPEPY_OBJECT_GET(resource, codepage);\nPEPY_OBJECT_GET(resource, RVA);\nPEPY_OBJECT_GET(resource, size);\nPEPY_OBJECT_GET(resource, data);\n\nstatic PyObject *pepy_resource_type_as_str(PyObject *self, PyObject *args) {\n PyObject *ret;\n char *str;\n long type;\n\n type = PyLong_AsUnsignedLong(((pepy_resource *) self)->type);\n if (type == -1) {\n if (PyErr_Occurred()) {\n PyErr_PrintEx(0);\n return NULL;\n }\n }\n switch ((resource_type) type) {\n case (RT_CURSOR):\n str = (char *) \"CURSOR\";\n break;\n case (RT_BITMAP):\n str = (char *) \"BITMAP\";\n break;\n case (RT_ICON):\n str = (char *) \"ICON\";\n break;\n case (RT_MENU):\n str = (char *) \"MENU\";\n break;\n case (RT_DIALOG):\n str = (char *) \"DIALOG\";\n break;\n case (RT_STRING):\n str = (char *) \"STRING\";\n break;\n case (RT_FONTDIR):\n str = (char *) \"FONTDIR\";\n break;\n case (RT_FONT):\n str = (char *) \"FONT\";\n break;\n case (RT_ACCELERATOR):\n str = (char *) \"ACCELERATOR\";\n break;\n case (RT_RCDATA):\n str = (char *) \"RCDATA\";\n break;\n case (RT_MESSAGETABLE):\n str = (char *) \"MESSAGETABLE\";\n break;\n case (RT_GROUP_CURSOR):\n str = (char *) \"GROUP_CURSOR\";\n break;\n case (RT_GROUP_ICON):\n str = (char *) \"GROUP_ICON\";\n break;\n case (RT_VERSION):\n str = (char *) \"VERSION\";\n break;\n case (RT_DLGINCLUDE):\n str = (char *) \"DLGINCLUDE\";\n break;\n case (RT_PLUGPLAY):\n str = (char *) \"PLUGPLAY\";\n break;\n case (RT_VXD):\n str = (char *) \"VXD\";\n break;\n case (RT_ANICURSOR):\n str = (char *) \"ANICURSOR\";\n break;\n case (RT_ANIICON):\n str = (char *) \"ANIICON\";\n break;\n case (RT_HTML):\n str = (char *) \"HTML\";\n break;\n case (RT_MANIFEST):\n str = (char *) \"MANIFEST\";\n break;\n default:\n str = (char *) \"UNKNOWN\";\n break;\n }\n\n ret = PyUnicode_FromString(str);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create return string.\");\n return NULL;\n }\n\n return ret;\n}\n\nstatic PyMethodDef pepy_resource_methods[] = {\n {\"type_as_str\",\n pepy_resource_type_as_str,\n METH_NOARGS,\n \"Return the resource type as a string.\"},\n {NULL}};\n\nstatic PyGetSetDef pepy_resource_getseters[] = {\n OBJECTGETTER(resource, type_str, \"Type string\"),\n OBJECTGETTER(resource, name_str, \"Name string\"),\n OBJECTGETTER(resource, lang_str, \"Lang string\"),\n OBJECTGETTER(resource, type, \"Type\"),\n OBJECTGETTER(resource, name, \"Name\"),\n OBJECTGETTER(resource, lang, \"Language\"),\n OBJECTGETTER(resource, codepage, \"Codepage\"),\n OBJECTGETTER(resource, RVA, \"RVA\"),\n OBJECTGETTER(resource, size, \"Size (specified in RDAT)\"),\n OBJECTGETTER(resource, data, \"Resource data\"),\n {NULL}};\n\nstatic PyTypeObject pepy_resource_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.resource\", /* tp_name */\n sizeof(pepy_resource), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_resource_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT, /* tp_flags */\n \"pepy resource object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n pepy_resource_methods, /* tp_methods */\n 0, /* tp_members */\n pepy_resource_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_resource_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_resource_new /* tp_new */\n};\n\nstatic PyObject *\npepy_parsed_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_parsed *self;\n\n self = (pepy_parsed *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int pepy_parsed_init(pepy_parsed *self, PyObject *args, PyObject *kwds) {\n char *pe_path;\n\n if (!PyArg_ParseTuple(args, \"s:pepy_parse\", &pe_path))\n return -1;\n\n if (!pe_path)\n return -1;\n\n self->pe = ParsePEFromFile(pe_path);\n if (!self->pe) {\n return -2;\n }\n\n return 0;\n}\n\nstatic void pepy_parsed_dealloc(pepy_parsed *self) {\n DestructParsedPE(self->pe);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nstatic PyObject *pepy_parsed_get_entry_point(PyObject *self, PyObject *args) {\n VA entrypoint;\n PyObject *ret;\n\n if (!GetEntryPoint(((pepy_parsed *) self)->pe, entrypoint))\n Py_RETURN_NONE;\n\n ret = PyLong_FromUnsignedLongLong(entrypoint);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create return object.\");\n return NULL;\n }\n\n return ret;\n}\n\nstatic PyObject *pepy_parsed_get_machine_as_str(PyObject *self,\n PyObject *args) {\n PyObject *ret;\n const char *str;\n\n str = GetMachineAsString(((pepy_parsed *) self)->pe);\n if (!str)\n Py_RETURN_NONE;\n\n ret = PyUnicode_FromString(str);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create return string.\");\n return NULL;\n }\n\n return ret;\n}\n\nstatic PyObject *pepy_parsed_get_subsystem_as_str(PyObject *self,\n PyObject *args) {\n PyObject *ret;\n const char *str;\n\n str = GetSubsystemAsString(((pepy_parsed *) self)->pe);\n if (!str)\n Py_RETURN_NONE;\n\n ret = PyUnicode_FromString(str);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create return string.\");\n return NULL;\n }\n\n return ret;\n}\n\nstatic PyObject *pepy_parsed_get_bytes(PyObject *self, PyObject *args) {\n uint64_t start;\n Py_ssize_t len, idx;\n PyObject *ret;\n\n if (!PyArg_ParseTuple(args, \"KK:pepy_parsed_get_bytes\", &start, &len))\n return NULL;\n\n /*\n * XXX: a new implementation read all bytes in char* and use\n * PybyteArray_FromStringAndSize\n */\n\n uint8_t *buf = new (std::nothrow) uint8_t[len];\n if (!buf) {\n /* in case allocation failed */\n PyErr_SetString(pepy_error,\n \"Unable to create initial buffer (allocation failure).\");\n return NULL;\n }\n\n for (idx = 0; idx < len; idx++) {\n if (!ReadByteAtVA(((pepy_parsed *) self)->pe, start + idx, buf[idx]))\n break;\n }\n\n /* use idx as content length, if we get less than asked for */\n ret = PyByteArray_FromStringAndSize(reinterpret_cast(buf), idx);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new byte array.\");\n return NULL;\n }\n\n delete[] buf;\n return ret;\n}\n\n/*\n * This is used to convert bounded buffers into python byte array objects.\n * In case the buffer is NULL, return an empty bytearray.\n */\nstatic PyObject *pepy_data_converter(bounded_buffer *data) {\n PyObject *ret;\n const char *str;\n Py_ssize_t len;\n\n if (!data || !data->buf) {\n str = \"\";\n len = 0;\n } else {\n str = (const char *) data->buf;\n len = data->bufLen;\n }\n\n ret = PyByteArray_FromStringAndSize(str, len);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to convert data to byte array.\");\n return NULL;\n }\n\n return ret;\n}\n\nint section_callback(void *cbd,\n const VA &base,\n const std::string &name,\n const image_section_header &s,\n const bounded_buffer *data) {\n uint32_t buflen;\n PyObject *sect;\n PyObject *tuple;\n PyObject *list = (PyObject *) cbd;\n\n /*\n * I've seen some interesting binaries with a section where the\n * PointerToRawData and SizeOfRawData are invalid. The parser library\n * handles this by setting sectionData to NULL as returned by splitBuffer().\n * The sectionData (passed in to us as *data) is converted using\n * pepy_data_converter() which will return an empty string object.\n * However, we need to address the fact that we pass an invalid length\n * via data->bufLen.\n */\n if (!data) {\n buflen = 0;\n } else {\n buflen = data->bufLen;\n }\n\n /*\n * The tuple item order is important here. It is passed into the\n * section type initialization and parsed there.\n */\n tuple = Py_BuildValue(\"sKKIIHHIO&\",\n name.c_str(),\n base,\n buflen,\n s.VirtualAddress,\n s.Misc.VirtualSize,\n s.NumberOfRelocations,\n s.NumberOfLinenumbers,\n s.Characteristics,\n pepy_data_converter,\n data);\n if (!tuple)\n return 1;\n\n sect = pepy_section_new(&pepy_section_type, NULL, NULL);\n if (!sect) {\n Py_DECREF(tuple);\n return 1;\n }\n\n if (pepy_section_init((pepy_section *) sect, tuple, NULL) == -1) {\n PyErr_SetString(pepy_error, \"Unable to init new section.\");\n return 1;\n }\n\n if (PyList_Append(list, sect) == -1) {\n Py_DECREF(tuple);\n Py_DECREF(sect);\n return 1;\n }\n\n return 0;\n}\n\nstatic PyObject *pepy_parsed_get_sections(PyObject *self, PyObject *args) {\n PyObject *ret = PyList_New(0);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new list.\");\n return NULL;\n }\n\n IterSec(((pepy_parsed *) self)->pe, section_callback, ret);\n\n return ret;\n}\n\nint resource_callback(void *cbd, const resource &r) {\n PyObject *rsrc;\n PyObject *tuple;\n PyObject *list = (PyObject *) cbd;\n\n /*\n * The tuple item order is important here. It is passed into the\n * section type initialization and parsed there.\n */\n tuple = Py_BuildValue(\"s#s#s#IIIIIIO&\",\n r.type_str.c_str(),\n r.type_str.length(),\n r.name_str.c_str(),\n r.name_str.length(),\n r.lang_str.c_str(),\n r.lang_str.length(),\n r.type,\n r.name,\n r.lang,\n r.codepage,\n r.RVA,\n r.size,\n pepy_data_converter,\n r.buf);\n if (!tuple)\n return 1;\n\n rsrc = pepy_resource_new(&pepy_resource_type, NULL, NULL);\n if (!rsrc) {\n Py_DECREF(tuple);\n return 1;\n }\n\n if (pepy_resource_init((pepy_resource *) rsrc, tuple, NULL) == -1) {\n PyErr_SetString(pepy_error, \"Unable to init new resource.\");\n return 1;\n }\n\n if (PyList_Append(list, rsrc) == -1) {\n Py_DECREF(tuple);\n Py_DECREF(rsrc);\n return 1;\n }\n\n return 0;\n}\n\nstatic PyObject *pepy_parsed_get_resources(PyObject *self, PyObject *args) {\n PyObject *ret = PyList_New(0);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new list.\");\n return NULL;\n }\n\n IterRsrc(((pepy_parsed *) self)->pe, resource_callback, ret);\n\n return ret;\n}\n\nint import_callback(void *cbd,\n const VA &addr,\n const std::string &name,\n const std::string &sym) {\n PyObject *imp;\n PyObject *tuple;\n PyObject *list = (PyObject *) cbd;\n\n /*\n * The tuple item order is important here. It is passed into the\n * import type initialization and parsed there.\n */\n tuple = Py_BuildValue(\"ssI\", name.c_str(), sym.c_str(), addr);\n if (!tuple)\n return 1;\n\n imp = pepy_import_new(&pepy_import_type, NULL, NULL);\n if (!imp) {\n Py_DECREF(tuple);\n return 1;\n }\n\n if (pepy_import_init((pepy_import *) imp, tuple, NULL) == -1) {\n PyErr_SetString(pepy_error, \"Unable to init new section.\");\n return 1;\n }\n\n if (PyList_Append(list, imp) == -1) {\n Py_DECREF(tuple);\n Py_DECREF(imp);\n return 1;\n }\n\n return 0;\n}\n\nstatic PyObject *pepy_parsed_get_imports(PyObject *self, PyObject *args) {\n PyObject *ret = PyList_New(0);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new list.\");\n return NULL;\n }\n\n IterImpVAString(((pepy_parsed *) self)->pe, import_callback, ret);\n\n return ret;\n}\n\nint export_callback(void *cbd,\n const VA &addr,\n const std::string &mod,\n const std::string &func) {\n PyObject *exp;\n PyObject *tuple;\n PyObject *list = (PyObject *) cbd;\n\n /*\n * The tuple item order is important here. It is passed into the\n * export type initialization and parsed there.\n */\n tuple = Py_BuildValue(\"ssI\", mod.c_str(), func.c_str(), addr);\n if (!tuple)\n return 1;\n\n exp = pepy_export_new(&pepy_export_type, NULL, NULL);\n if (!exp) {\n Py_DECREF(tuple);\n return 1;\n }\n\n if (pepy_export_init((pepy_export *) exp, tuple, NULL) == -1) {\n PyErr_SetString(pepy_error, \"Unable to init new section.\");\n return 1;\n }\n\n if (PyList_Append(list, exp) == -1) {\n Py_DECREF(tuple);\n Py_DECREF(exp);\n return 1;\n }\n\n return 0;\n}\n\nstatic PyObject *pepy_parsed_get_exports(PyObject *self, PyObject *args) {\n PyObject *ret = PyList_New(0);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new list.\");\n return NULL;\n }\n\n /*\n * This could use the same callback and object as imports but the names\n * of the attributes would be slightly off.\n */\n IterExpVA(((pepy_parsed *) self)->pe, export_callback, ret);\n\n return ret;\n}\n\nint reloc_callback(void *cbd, const VA &addr, const reloc_type &type) {\n PyObject *reloc;\n PyObject *tuple;\n PyObject *list = (PyObject *) cbd;\n\n /*\n * The tuple item order is important here. It is passed into the\n * relocation type initialization and parsed there.\n */\n tuple = Py_BuildValue(\"II\", type, addr);\n if (!tuple)\n return 1;\n\n reloc = pepy_relocation_new(&pepy_relocation_type, NULL, NULL);\n if (!reloc) {\n Py_DECREF(tuple);\n return 1;\n }\n\n if (pepy_relocation_init((pepy_relocation *) reloc, tuple, NULL) == -1) {\n PyErr_SetString(pepy_error, \"Unable to init new section.\");\n return 1;\n }\n\n if (PyList_Append(list, reloc) == -1) {\n Py_DECREF(tuple);\n Py_DECREF(reloc);\n return 1;\n }\n\n return 0;\n}\n\nstatic PyObject *pepy_parsed_get_relocations(PyObject *self, PyObject *args) {\n PyObject *ret = PyList_New(0);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new list.\");\n return NULL;\n }\n\n IterRelocs(((pepy_parsed *) self)->pe, reloc_callback, ret);\n\n return ret;\n}\n\n#define PEPY_PARSED_GET(ATTR, VAL) \\\n static PyObject *pepy_parsed_get_##ATTR(PyObject *self, void *closure) { \\\n PyObject *ret = PyLong_FromUnsignedLongLong( \\\n ((pepy_parsed *) self)->pe->peHeader.nt.VAL); \\\n if (!ret) \\\n PyErr_SetString(PyExc_AttributeError, \"Error getting attribute.\"); \\\n return ret; \\\n }\n\nPEPY_PARSED_GET(signature, Signature);\nPEPY_PARSED_GET(machine, FileHeader.Machine);\nPEPY_PARSED_GET(numberofsections, FileHeader.NumberOfSections);\nPEPY_PARSED_GET(timedatestamp, FileHeader.TimeDateStamp);\nPEPY_PARSED_GET(numberofsymbols, FileHeader.NumberOfSymbols);\nPEPY_PARSED_GET(characteristics, FileHeader.Characteristics);\nPEPY_PARSED_GET(magic, OptionalMagic);\n\n/*\n * This is used to get things from the optional header, which can be either\n * the PE32 or PE32+ version, depending upon the magic value. Technically\n * the magic is stored in the OptionalHeader, but to make life easier pe-parse\n * stores the value in nt_header_32 along with the appropriate optional header.\n * This is why \"magic\" is handled above, and not here.\n */\n#define PEPY_PARSED_GET_OPTIONAL(ATTR, VAL) \\\n static PyObject *pepy_parsed_get_optional_##ATTR(PyObject *self, \\\n void *closure) { \\\n PyObject *ret = NULL; \\\n if (((pepy_parsed *) self)->pe->peHeader.nt.OptionalMagic == \\\n NT_OPTIONAL_32_MAGIC) { \\\n ret = PyLong_FromUnsignedLongLong( \\\n ((pepy_parsed *) self)->pe->peHeader.nt.OptionalHeader.VAL); \\\n if (!ret) \\\n PyErr_SetString(PyExc_AttributeError, \"Error getting attribute.\"); \\\n } else if (((pepy_parsed *) self)->pe->peHeader.nt.OptionalMagic == \\\n NT_OPTIONAL_64_MAGIC) { \\\n ret = PyLong_FromUnsignedLongLong( \\\n ((pepy_parsed *) self)->pe->peHeader.nt.OptionalHeader64.VAL); \\\n if (!ret) \\\n PyErr_SetString(PyExc_AttributeError, \"Error getting attribute.\"); \\\n } else { \\\n PyErr_SetString(pepy_error, \"Bad magic value.\"); \\\n } \\\n return ret; \\\n }\n\nPEPY_PARSED_GET_OPTIONAL(majorlinkerver, MajorLinkerVersion);\nPEPY_PARSED_GET_OPTIONAL(minorlinkerver, MinorLinkerVersion);\nPEPY_PARSED_GET_OPTIONAL(codesize, SizeOfCode);\nPEPY_PARSED_GET_OPTIONAL(initdatasize, SizeOfInitializedData);\nPEPY_PARSED_GET_OPTIONAL(uninitdatasize, SizeOfUninitializedData);\nPEPY_PARSED_GET_OPTIONAL(entrypointaddr, AddressOfEntryPoint);\nPEPY_PARSED_GET_OPTIONAL(baseofcode, BaseOfCode);\nPEPY_PARSED_GET_OPTIONAL(imagebase, ImageBase);\nPEPY_PARSED_GET_OPTIONAL(sectionalignement, SectionAlignment);\nPEPY_PARSED_GET_OPTIONAL(filealignment, FileAlignment);\nPEPY_PARSED_GET_OPTIONAL(majorosver, MajorOperatingSystemVersion);\nPEPY_PARSED_GET_OPTIONAL(minorosver, MinorOperatingSystemVersion);\nPEPY_PARSED_GET_OPTIONAL(win32ver, Win32VersionValue);\nPEPY_PARSED_GET_OPTIONAL(imagesize, SizeOfImage);\nPEPY_PARSED_GET_OPTIONAL(headersize, SizeOfHeaders);\nPEPY_PARSED_GET_OPTIONAL(checksum, CheckSum);\nPEPY_PARSED_GET_OPTIONAL(subsystem, Subsystem);\nPEPY_PARSED_GET_OPTIONAL(dllcharacteristics, DllCharacteristics);\nPEPY_PARSED_GET_OPTIONAL(stackreservesize, SizeOfStackReserve);\nPEPY_PARSED_GET_OPTIONAL(stackcommitsize, SizeOfStackCommit);\nPEPY_PARSED_GET_OPTIONAL(heapreservesize, SizeOfHeapReserve);\nPEPY_PARSED_GET_OPTIONAL(heapcommitsize, SizeOfHeapCommit);\nPEPY_PARSED_GET_OPTIONAL(loaderflags, LoaderFlags);\nPEPY_PARSED_GET_OPTIONAL(rvasandsize, NumberOfRvaAndSizes);\n\n/*\n * BaseOfData is only in PE32, not PE32+. Thus, it uses a non-standard\n * getter function compared to the other shared fields.\n */\nstatic PyObject *pepy_parsed_get_optional_baseofdata(PyObject *self,\n void *closure) {\n PyObject *ret = NULL;\n if (((pepy_parsed *) self)->pe->peHeader.nt.OptionalMagic ==\n NT_OPTIONAL_32_MAGIC) {\n ret = PyLong_FromUnsignedLong(\n ((pepy_parsed *) self)->pe->peHeader.nt.OptionalHeader.BaseOfData);\n if (!ret)\n PyErr_SetString(PyExc_AttributeError, \"Error getting attribute.\");\n } else if (((pepy_parsed *) self)->pe->peHeader.nt.OptionalMagic ==\n NT_OPTIONAL_64_MAGIC) {\n PyErr_SetString(PyExc_AttributeError, \"Not available on PE32+.\");\n } else {\n PyErr_SetString(pepy_error, \"Bad magic value.\");\n }\n return ret;\n}\n\nstatic PyGetSetDef pepy_parsed_getseters[] = {\n OBJECTGETTER(parsed, signature, \"PE Signature\"),\n OBJECTGETTER(parsed, machine, \"Machine\"),\n OBJECTGETTER(parsed, numberofsections, \"Number of sections\"),\n OBJECTGETTER(parsed, timedatestamp, \"Timedate stamp\"),\n OBJECTGETTER(parsed, numberofsymbols, \"Number of symbols\"),\n OBJECTGETTER(parsed, characteristics, \"Characteristics\"),\n OBJECTGETTER(parsed, magic, \"Magic\"),\n OBJECTGETTER_OPTIONAL(majorlinkerver, \"Major linker version\"),\n OBJECTGETTER_OPTIONAL(minorlinkerver, \"Minor linker version\"),\n OBJECTGETTER_OPTIONAL(codesize, \"Size of code\"),\n OBJECTGETTER_OPTIONAL(initdatasize, \"Size of initialized data\"),\n OBJECTGETTER_OPTIONAL(uninitdatasize, \"Size of uninitialized data\"),\n OBJECTGETTER_OPTIONAL(entrypointaddr, \"Address of entry point\"),\n OBJECTGETTER_OPTIONAL(baseofcode, \"Base address of code\"),\n OBJECTGETTER_OPTIONAL(imagebase, \"Image base address\"),\n OBJECTGETTER_OPTIONAL(sectionalignement, \"Section alignment\"),\n OBJECTGETTER_OPTIONAL(filealignment, \"File alignment\"),\n OBJECTGETTER_OPTIONAL(majorosver, \"Major OS version\"),\n OBJECTGETTER_OPTIONAL(minorosver, \"Minor OS version\"),\n OBJECTGETTER_OPTIONAL(win32ver, \"Win32 version\"),\n OBJECTGETTER_OPTIONAL(imagesize, \"Size of image\"),\n OBJECTGETTER_OPTIONAL(headersize, \"Size of headers\"),\n OBJECTGETTER_OPTIONAL(checksum, \"Checksum\"),\n OBJECTGETTER_OPTIONAL(subsystem, \"Subsystem\"),\n OBJECTGETTER_OPTIONAL(dllcharacteristics, \"DLL characteristics\"),\n OBJECTGETTER_OPTIONAL(stackreservesize, \"Size of stack reserve\"),\n OBJECTGETTER_OPTIONAL(stackcommitsize, \"Size of stack commit\"),\n OBJECTGETTER_OPTIONAL(heapreservesize, \"Size of heap reserve\"),\n OBJECTGETTER_OPTIONAL(heapcommitsize, \"Size of heap commit\"),\n OBJECTGETTER_OPTIONAL(loaderflags, \"Loader flags\"),\n OBJECTGETTER_OPTIONAL(rvasandsize, \"Number of RVA and sizes\"),\n /* Base of data is only available in PE32, not PE32+. */\n {(char *) \"baseofdata\",\n (getter) pepy_parsed_get_optional_baseofdata,\n (setter) pepy_attr_not_writable,\n (char *) \"Base address of data\",\n NULL},\n {NULL}};\n\nstatic PyMethodDef pepy_parsed_methods[] = {\n {\"get_entry_point\",\n pepy_parsed_get_entry_point,\n METH_NOARGS,\n \"Return the entry point address.\"},\n {\"get_machine_as_str\",\n pepy_parsed_get_machine_as_str,\n METH_NOARGS,\n \"Return the machine as a human readable string.\"},\n {\"get_subsystem_as_str\",\n pepy_parsed_get_subsystem_as_str,\n METH_NOARGS,\n \"Return the subsystem as a human readable string.\"},\n {\"get_bytes\",\n pepy_parsed_get_bytes,\n METH_VARARGS,\n \"Return the first N bytes at a given address.\"},\n {\"get_sections\",\n pepy_parsed_get_sections,\n METH_NOARGS,\n \"Return a list of section objects.\"},\n {\"get_imports\",\n pepy_parsed_get_imports,\n METH_NOARGS,\n \"Return a list of import objects.\"},\n {\"get_exports\",\n pepy_parsed_get_exports,\n METH_NOARGS,\n \"Return a list of export objects.\"},\n {\"get_relocations\",\n pepy_parsed_get_relocations,\n METH_NOARGS,\n \"Return a list of relocation objects.\"},\n {\"get_resources\",\n pepy_parsed_get_resources,\n METH_NOARGS,\n \"Return a list of resource objects.\"},\n {NULL}};\n\nstatic PyTypeObject pepy_parsed_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.parsed\", /* tp_name */\n sizeof(pepy_parsed), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_parsed_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */\n \"pepy parsed object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n pepy_parsed_methods, /* tp_methods */\n 0, /* tp_members */\n pepy_parsed_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_parsed_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_parsed_new /* tp_new */\n};\n\nstatic PyObject *pepy_parse(PyObject *self, PyObject *args) {\n PyObject *parsed;\n int ret;\n char *err_str = NULL;\n\n parsed = pepy_parsed_new(&pepy_parsed_type, NULL, NULL);\n if (!parsed) {\n PyErr_SetString(pepy_error, \"Unable to make new parsed object.\");\n return NULL;\n }\n\n ret = pepy_parsed_init((pepy_parsed *) parsed, args, NULL);\n if (ret < 0) {\n if (ret == -2) {\n // error (loc)\n size_t len = GetPEErrString().length() + GetPEErrLoc().length() + 4;\n err_str = (char *) malloc(len);\n if (!err_str)\n return PyErr_NoMemory();\n snprintf(err_str,\n len,\n \"%s (%s)\",\n GetPEErrString().c_str(),\n GetPEErrLoc().c_str());\n PyErr_SetString(pepy_error, err_str);\n } else\n PyErr_SetString(pepy_error, \"Unable to init new parsed object.\");\n return NULL;\n }\n\n return parsed;\n}\n\nstatic PyMethodDef pepy_methods[] = {\n {\"parse\", pepy_parse, METH_VARARGS, \"Parse PE from file.\"}, {NULL}};\n\nPyMODINIT_FUNC PyInit_pepy(void) {\n PyObject *m;\n\n if (PyType_Ready(&pepy_parsed_type) < 0 ||\n PyType_Ready(&pepy_section_type) < 0 ||\n PyType_Ready(&pepy_import_type) < 0 ||\n PyType_Ready(&pepy_export_type) < 0 ||\n PyType_Ready(&pepy_relocation_type) < 0 ||\n PyType_Ready(&pepy_resource_type) < 0)\n return NULL;\n\n static struct PyModuleDef moduledef = {\n PyModuleDef_HEAD_INIT,\n \"pepy\",\n \"Python interface to pe-parse.\",\n -1,\n pepy_methods,\n NULL,\n NULL,\n NULL,\n NULL,\n };\n\n m = PyModule_Create(&moduledef);\n if (!m)\n return NULL;\n\n pepy_error = PyErr_NewException((char *) \"pepy.error\", NULL, NULL);\n Py_INCREF(pepy_error);\n PyModule_AddObject(m, \"error\", pepy_error);\n\n Py_INCREF(&pepy_parsed_type);\n PyModule_AddObject(m, \"pepy_parsed\", (PyObject *) &pepy_parsed_type);\n\n Py_INCREF(&pepy_section_type);\n PyModule_AddObject(m, \"pepy_section\", (PyObject *) &pepy_section_type);\n\n Py_INCREF(&pepy_import_type);\n PyModule_AddObject(m, \"pepy_import\", (PyObject *) &pepy_import_type);\n\n Py_INCREF(&pepy_export_type);\n PyModule_AddObject(m, \"pepy_export\", (PyObject *) &pepy_export_type);\n\n Py_INCREF(&pepy_relocation_type);\n PyModule_AddObject(m, \"pepy_relocation\", (PyObject *) &pepy_relocation_type);\n\n Py_INCREF(&pepy_resource_type);\n PyModule_AddObject(m, \"pepy_resource\", (PyObject *) &pepy_resource_type);\n\n PyModule_AddStringMacro(m, PEPY_VERSION);\n PyModule_AddStringMacro(m, PEPARSE_VERSION);\n PyModule_AddStringConstant(m, \"__version__\", PEPY_VERSION);\n\n PyModule_AddIntMacro(m, MZ_MAGIC);\n PyModule_AddIntMacro(m, NT_MAGIC);\n PyModule_AddIntMacro(m, NUM_DIR_ENTRIES);\n PyModule_AddIntMacro(m, NT_OPTIONAL_32_MAGIC);\n PyModule_AddIntMacro(m, NT_SHORT_NAME_LEN);\n PyModule_AddIntMacro(m, DIR_EXPORT);\n PyModule_AddIntMacro(m, DIR_IMPORT);\n PyModule_AddIntMacro(m, DIR_RESOURCE);\n PyModule_AddIntMacro(m, DIR_EXCEPTION);\n PyModule_AddIntMacro(m, DIR_SECURITY);\n PyModule_AddIntMacro(m, DIR_BASERELOC);\n PyModule_AddIntMacro(m, DIR_DEBUG);\n PyModule_AddIntMacro(m, DIR_ARCHITECTURE);\n PyModule_AddIntMacro(m, DIR_GLOBALPTR);\n PyModule_AddIntMacro(m, DIR_TLS);\n PyModule_AddIntMacro(m, DIR_LOAD_CONFIG);\n PyModule_AddIntMacro(m, DIR_BOUND_IMPORT);\n PyModule_AddIntMacro(m, DIR_IAT);\n PyModule_AddIntMacro(m, DIR_DELAY_IMPORT);\n PyModule_AddIntMacro(m, DIR_COM_DESCRIPTOR);\n\n PyModule_AddIntMacro(m, IMAGE_SCN_TYPE_NO_PAD);\n PyModule_AddIntMacro(m, IMAGE_SCN_CNT_CODE);\n PyModule_AddIntMacro(m, IMAGE_SCN_CNT_INITIALIZED_DATA);\n PyModule_AddIntMacro(m, IMAGE_SCN_CNT_UNINITIALIZED_DATA);\n PyModule_AddIntMacro(m, IMAGE_SCN_LNK_OTHER);\n PyModule_AddIntMacro(m, IMAGE_SCN_LNK_INFO);\n PyModule_AddIntMacro(m, IMAGE_SCN_LNK_REMOVE);\n PyModule_AddIntMacro(m, IMAGE_SCN_LNK_COMDAT);\n PyModule_AddIntMacro(m, IMAGE_SCN_NO_DEFER_SPEC_EXC);\n PyModule_AddIntMacro(m, IMAGE_SCN_GPREL);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_FARDATA);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_PURGEABLE);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_16BIT);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_LOCKED);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_PRELOAD);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_1BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_2BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_4BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_8BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_16BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_32BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_64BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_128BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_256BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_512BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_1024BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_2048BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_4096BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_8192BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_MASK);\n PyModule_AddIntMacro(m, IMAGE_SCN_LNK_NRELOC_OVFL);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_DISCARDABLE);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_NOT_CACHED);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_NOT_PAGED);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_SHARED);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_EXECUTE);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_READ);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_WRITE);\n\n return m;\n}\n"], ["/lsfg-vk/thirdparty/toml11/tools/expand/main.cpp", "#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n\nstd::optional\nis_include(const std::string& line, const std::filesystem::path& filepath)\n{\n // [ws] # [ws] include [ws] \\\".+\\\"\n auto iter = line.begin();\n while(iter < line.end())\n {\n if(*iter != ' ' && *iter != '\\t')\n {\n break;\n }\n ++iter;\n }\n if(iter == line.end() || *iter != '#') {return std::nullopt;}\n\n assert(*iter == '#');\n ++iter;\n\n while(iter < line.end())\n {\n if(*iter != ' ' && *iter != '\\t')\n {\n break;\n }\n ++iter;\n }\n if(iter == line.end() || *iter != 'i') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'n') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'c') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'l') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'u') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'd') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'e') {return std::nullopt;} else {++iter;}\n\n while(iter < line.end())\n {\n if(*iter != ' ' && *iter != '\\t')\n {\n break;\n }\n ++iter;\n }\n if(iter == line.end() || *iter != '\"') {return std::nullopt;} else {++iter;}\n\n std::string filename;\n while(iter < line.end())\n {\n if(*iter == '\"') {break;}\n filename += *iter;\n ++iter;\n }\n if(iter == line.end() || *iter != '\"') {return std::nullopt;} else {++iter;}\n\n return std::filesystem::canonical(filepath.parent_path() / std::filesystem::path(filename));\n}\n\nstruct File\n{\n File() = default;\n\n explicit File(std::filesystem::path f)\n : filename(std::move(f))\n {\n std::ifstream ifs(filename);\n if( ! ifs.good())\n {\n throw std::runtime_error(\"file open error: \" + filename.string());\n }\n\n std::string line;\n while(std::getline(ifs, line))\n {\n if(const auto incl = is_include(line, filename))\n {\n includes.push_back(incl.value());\n }\n else\n {\n content.push_back(line);\n }\n }\n }\n\n File(std::filesystem::path f, std::vector c,\n std::vector i)\n : filename(std::move(f)), content(std::move(c)), includes(std::move(i))\n {}\n\n std::filesystem::path filename;\n std::vector content; // w/o include\n std::vector includes;\n};\n\nstruct Graph\n{\n struct Node\n {\n std::vector included;\n std::vector includes;\n };\n\n std::map nodes;\n};\n\nint main(int argc, char** argv)\n{\n using namespace std::literals::string_literals;\n if(argc != 2)\n {\n std::cerr << \"Usage: ./a.out path/to/toml.hpp > single_include/toml.hpp\" << std::endl;\n return 1;\n }\n\n const auto input_file = std::filesystem::path(std::string(argv[1]));\n assert(input_file.filename() == \"toml.hpp\");\n\n const auto include_path = input_file.parent_path();\n\n // -------------------------------------------------------------------------\n // load files and detect `include \"xxx.hpp\"`.\n // If the file has `_fwd` and `_impl`, expand those files first.\n\n std::set fwd_impl_files;\n for(const auto& entities : std::filesystem::directory_iterator(include_path/\"toml11\"/\"fwd\"))\n {\n if( ! entities.is_regular_file()) {continue;}\n std::string fname = entities.path().filename().string();\n if(fname.ends_with(\"_fwd.hpp\"))\n {\n for(const auto c : \"_fwd.hpp\"s) {fname.pop_back(); (void)c;}\n fwd_impl_files.insert(std::move(fname));\n }\n }\n for(const auto& entities : std::filesystem::directory_iterator(include_path/\"toml11\"/\"impl\"))\n {\n if( ! entities.is_regular_file()) {continue;}\n std::string fname = entities.path().filename().string();\n if(fname.ends_with(\"_impl.hpp\"))\n {\n for(const auto c : \"_impl.hpp\"s) {fname.pop_back(); (void)c;}\n // all impl files has fwd file\n assert(fwd_impl_files.contains(fname));\n }\n }\n\n const auto input = File(input_file);\n\n std::map files;\n files[input_file] = input;\n\n for(const auto& fname : input.includes)\n {\n if(fwd_impl_files.contains(fname.stem().string()))\n {\n std::cerr << \"expanding fwd/impl file of \" << fname.string() << std::endl;\n\n // expand the first include\n std::ifstream ifs(fname);\n\n std::vector content;\n std::vector includes;\n\n std::string line;\n while(std::getline(ifs, line))\n {\n // expand _fwd and _impl files first.\n const auto incl = is_include(line, fname);\n if(incl.has_value())\n {\n // if a file has _fwd/_impl files, it only includes fwd/impl files.\n assert(incl.value().string().ends_with(\"_impl.hpp\") ||\n incl.value().string().ends_with(\"_fwd.hpp\") );\n\n const File included(incl.value());\n for(const auto& l : included.content)\n {\n content.push_back(l);\n }\n for(const auto& i : included.includes)\n {\n includes.push_back(i);\n }\n }\n else\n {\n content.push_back(line);\n }\n }\n files[fname] = File(fname, std::move(content), std::move(includes));\n }\n else\n {\n files[fname] = File(fname);\n }\n std::cerr << \"file \" << fname << \" has \" << files.at(fname).content.size() << \" lines.\" << std::endl;\n }\n\n std::cerr << \"-------------------------------------------------------------\\n\";\n std::cerr << \"files have been read. next: constructing dependency graph...\\n\";\n std::cerr << \"-------------------------------------------------------------\\n\";\n\n // -------------------------------------------------------------------------\n // construct dependency graph\n\n Graph g;\n for(const auto& [k, v] : files)\n {\n g.nodes[k] = Graph::Node{};\n }\n\n for(const auto& [fname, file] : files)\n {\n for(auto incl : file.includes)\n {\n auto incl_stem = incl.stem().string();\n if(incl_stem.ends_with(\"_fwd\"))\n {\n for(const char c : \"_fwd\"s) {incl_stem.pop_back(); (void)c;}\n\n // include original file instaed\n incl = incl.parent_path() / \"..\" / std::filesystem::path(incl_stem + \".hpp\");\n }\n else if(incl_stem.ends_with(\"_impl\"))\n {\n for(const char c : \"_impl\"s) {incl_stem.pop_back(); (void)c;}\n\n // include original file instaed\n incl = incl.parent_path() / \"..\" / std::filesystem::path(incl_stem + \".hpp\");\n }\n incl = std::filesystem::canonical(incl);\n\n // avoid self include loop\n if(fname != incl)\n {\n std::cerr << fname << \" includes \" << incl << std::endl;\n\n g.nodes.at(fname).includes.push_back(incl);\n g.nodes.at(incl) .included.push_back(fname);\n }\n }\n }\n\n std::cerr << \"-------------------------------------------------------------\\n\";\n std::cerr << \"graph has been constructed. flattening...\\n\";\n std::cerr << \"-------------------------------------------------------------\\n\";\n\n // -------------------------------------------------------------------------\n // flatten graph by topological sort\n\n // collect files that does not include\n std::vector sources;\n for(const auto& [fname, node] : g.nodes)\n {\n if(node.includes.empty())\n {\n sources.push_back(fname);\n }\n }\n assert( ! sources.empty());\n\n std::vector sorted;\n while( ! sources.empty())\n {\n const auto file = sources.back();\n sorted.push_back(sources.back());\n sources.pop_back();\n\n for(const auto& included : g.nodes.at(file).included)\n {\n auto found = std::find(g.nodes.at(included).includes.begin(),\n g.nodes.at(included).includes.end(), file);\n g.nodes.at(included).includes.erase(found);\n\n if(g.nodes.at(included).includes.empty())\n {\n sources.push_back(included);\n }\n }\n }\n\n std::cerr << \"-------------------------------------------------------------\\n\";\n std::cerr << \"flattened. outputting...\\n\";\n std::cerr << \"-------------------------------------------------------------\\n\";\n\n // -------------------------------------------------------------------------\n // output all the file in the sorted order\n\n for(const auto& fname : sorted)\n {\n std::cerr << \"expanding: \" << fname << std::endl;\n for(const auto& line : files.at(fname).content)\n {\n std::cout << line << '\\n';\n }\n }\n\n return 0;\n}\n"], ["/lsfg-vk/src/main.cpp", "#include \"config/config.hpp\"\n#include \"extract/extract.hpp\"\n#include \"utils/benchmark.hpp\"\n#include \"utils/utils.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace {\n __attribute__((constructor)) void lsfgvk_init() {\n std::cerr << std::unitbuf;\n\n // read configuration\n const std::string file = Utils::getConfigFile();\n try {\n Config::updateConfig(file);\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occured while trying to parse the configuration, IGNORING:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n return; // default configuration will unload\n }\n\n const auto name = Utils::getProcessName();\n try {\n Config::activeConf = Config::getConfig(name);\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: The configuration for \" << name.second << \" is invalid, IGNORING:\\n\";\n std::cerr << e.what() << '\\n';\n return; // default configuration will unload\n }\n\n // exit silently if not enabled\n auto& conf = Config::activeConf;\n if (!conf.enable && name.second != \"benchmark\")\n return; // default configuration will unload\n\n // print config\n std::cerr << \"lsfg-vk: Loaded configuration for \" << name.second << \":\\n\";\n if (!conf.dll.empty()) std::cerr << \" Using DLL from: \" << conf.dll << '\\n';\n std::cerr << \" Multiplier: \" << conf.multiplier << '\\n';\n std::cerr << \" Flow Scale: \" << conf.flowScale << '\\n';\n std::cerr << \" Performance Mode: \" << (conf.performance ? \"Enabled\" : \"Disabled\") << '\\n';\n std::cerr << \" HDR Mode: \" << (conf.hdr ? \"Enabled\" : \"Disabled\") << '\\n';\n if (conf.e_present != 2) std::cerr << \" ! Present Mode: \" << conf.e_present << '\\n';\n\n // remove mesa var in favor of config\n unsetenv(\"MESA_VK_WSI_PRESENT_MODE\"); // NOLINT\n\n // write latest file\n try {\n std::ofstream latest(\"/tmp/lsfg-vk_last\", std::ios::trunc);\n if (!latest.is_open())\n throw std::runtime_error(\"Failed to open /tmp/lsfg-vk_last for writing\");\n latest << \"exe: \" << name.first << '\\n';\n latest << \"comm: \" << name.second << '\\n';\n latest << \"pid: \" << getpid() << '\\n';\n if (!latest.good())\n throw std::runtime_error(\"Failed to write to /tmp/lsfg-vk_last\");\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred while trying to write the latest file, exiting:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n exit(EXIT_FAILURE);\n }\n\n // load shaders\n try {\n Extract::extractShaders();\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred while trying to extract the shaders, exiting:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n exit(EXIT_FAILURE);\n }\n std::cerr << \"lsfg-vk: Shaders extracted successfully.\\n\";\n\n // run benchmark if requested\n const char* benchmark_flag = std::getenv(\"LSFG_BENCHMARK\");\n if (!benchmark_flag)\n return;\n\n const std::string resolution(benchmark_flag);\n uint32_t width{};\n uint32_t height{};\n try {\n const size_t x = resolution.find('x');\n if (x == std::string::npos)\n throw std::runtime_error(\"Unable to find 'x' in benchmark string\");\n\n const std::string width_str = resolution.substr(0, x);\n const std::string height_str = resolution.substr(x + 1);\n if (width_str.empty() || height_str.empty())\n throw std::runtime_error(\"Invalid resolution\");\n\n const int32_t w = std::stoi(width_str);\n const int32_t h = std::stoi(height_str);\n if (w < 0 || h < 0)\n throw std::runtime_error(\"Resolution cannot be negative\");\n\n width = static_cast(w);\n height = static_cast(h);\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred while trying to parse the resolution, exiting:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n exit(EXIT_FAILURE);\n }\n\n std::thread benchmark([width, height]() {\n try {\n Benchmark::run(width, height);\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred during the benchmark:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n exit(EXIT_FAILURE);\n }\n });\n benchmark.detach();\n conf.enable = false;\n }\n}\n"], ["/lsfg-vk/src/utils/benchmark.cpp", "#include \"utils/benchmark.hpp\"\n#include \"config/config.hpp\"\n#include \"extract/extract.hpp\"\n#include \"extract/trans.hpp\"\n\n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace Benchmark;\n\nvoid Benchmark::run(uint32_t width, uint32_t height) {\n const auto& conf = Config::activeConf;\n\n auto* lsfgInitialize = LSFG_3_1::initialize;\n auto* lsfgCreateContext = LSFG_3_1::createContext;\n auto* lsfgPresentContext = LSFG_3_1::presentContext;\n if (conf.performance) {\n lsfgInitialize = LSFG_3_1P::initialize;\n lsfgCreateContext = LSFG_3_1P::createContext;\n lsfgPresentContext = LSFG_3_1P::presentContext;\n }\n\n // create the benchmark context\n const char* lsfgDeviceUUID = std::getenv(\"LSFG_DEVICE_UUID\");\n const uint64_t deviceUUID = lsfgDeviceUUID\n ? std::stoull(std::string(lsfgDeviceUUID), nullptr, 16) : 0x1463ABAC;\n\n setenv(\"DISABLE_LSFG\", \"1\", 1); // NOLINT\n\n Extract::extractShaders();\n lsfgInitialize(\n deviceUUID, // some magic number if not given\n conf.hdr, 1.0F / conf.flowScale, conf.multiplier - 1,\n [](const std::string& name) -> std::vector {\n auto dxbc = Extract::getShader(name);\n auto spirv = Extract::translateShader(dxbc);\n return spirv;\n }\n );\n const int32_t ctx = lsfgCreateContext(-1, -1, {},\n { .width = width, .height = height },\n conf.hdr ? VK_FORMAT_R16G16B16A16_SFLOAT : VK_FORMAT_R8G8B8A8_UNORM\n );\n\n unsetenv(\"DISABLE_LSFG\"); // NOLINT\n\n // run the benchmark (run 8*n + 1 so the fences are waited on)\n const auto now = std::chrono::high_resolution_clock::now();\n const uint64_t iterations = 8 * 500UL;\n\n std::cerr << \"lsfg-vk: Benchmark started, running \" << iterations << \" iterations...\\n\";\n for (uint64_t count = 0; count < iterations + 1; count++) {\n lsfgPresentContext(ctx, -1, {});\n\n if (count % 50 == 0 && count > 0)\n std::cerr << \"lsfg-vk: \"\n << std::setprecision(2) << std::fixed\n << static_cast(count) / static_cast(iterations) * 100.0F\n << \"% done (\" << count + 1 << \"/\" << iterations << \")\\r\";\n }\n const auto then = std::chrono::high_resolution_clock::now();\n\n // print results\n const auto ms = std::chrono::duration_cast(then - now).count();\n\n const auto perIteration = static_cast(ms) / static_cast(iterations);\n\n const uint64_t totalGen = (conf.multiplier - 1) * iterations;\n const auto genFps = static_cast(totalGen) / (static_cast(ms) / 1000.0F);\n\n const uint64_t totalFrames = iterations * conf.multiplier;\n const auto totalFps = static_cast(totalFrames) / (static_cast(ms) / 1000.0F);\n\n std::cerr << \"lsfg-vk: Benchmark completed in \" << ms << \" ms\\n\";\n std::cerr << \" Time taken per real frame: \"\n << std::setprecision(2) << std::fixed << perIteration << \" ms\\n\";\n std::cerr << \" Generated \" << totalGen << \" frames in total at \"\n << std::setprecision(2) << std::fixed << genFps << \" FPS\\n\";\n std::cerr << \" Total of \" << totalFrames << \" frames presented at \"\n << std::setprecision(2) << std::fixed << totalFps << \" FPS\\n\";\n\n // sleep for a second, then exit\n std::this_thread::sleep_for(std::chrono::seconds(1));\n _exit(0);\n}\n"], ["/lsfg-vk/src/layer.cpp", "#include \"layer.hpp\"\n#include \"common/exception.hpp\"\n#include \"config/config.hpp\"\n#include \"hooks.hpp\"\n\n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n\nnamespace {\n PFN_vkCreateInstance next_vkCreateInstance{};\n PFN_vkDestroyInstance next_vkDestroyInstance{};\n\n PFN_vkCreateDevice next_vkCreateDevice{};\n PFN_vkDestroyDevice next_vkDestroyDevice{};\n\n PFN_vkSetDeviceLoaderData next_vSetDeviceLoaderData{};\n\n PFN_vkGetInstanceProcAddr next_vkGetInstanceProcAddr{};\n PFN_vkGetDeviceProcAddr next_vkGetDeviceProcAddr{};\n\n PFN_vkGetPhysicalDeviceQueueFamilyProperties next_vkGetPhysicalDeviceQueueFamilyProperties{};\n PFN_vkGetPhysicalDeviceMemoryProperties next_vkGetPhysicalDeviceMemoryProperties{};\n PFN_vkGetPhysicalDeviceProperties next_vkGetPhysicalDeviceProperties{};\n PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR next_vkGetPhysicalDeviceSurfaceCapabilitiesKHR{};\n\n PFN_vkCreateSwapchainKHR next_vkCreateSwapchainKHR{};\n PFN_vkQueuePresentKHR next_vkQueuePresentKHR{};\n PFN_vkDestroySwapchainKHR next_vkDestroySwapchainKHR{};\n PFN_vkGetSwapchainImagesKHR next_vkGetSwapchainImagesKHR{};\n PFN_vkAllocateCommandBuffers next_vkAllocateCommandBuffers{};\n PFN_vkFreeCommandBuffers next_vkFreeCommandBuffers{};\n PFN_vkBeginCommandBuffer next_vkBeginCommandBuffer{};\n PFN_vkEndCommandBuffer next_vkEndCommandBuffer{};\n PFN_vkCreateCommandPool next_vkCreateCommandPool{};\n PFN_vkDestroyCommandPool next_vkDestroyCommandPool{};\n PFN_vkCreateImage next_vkCreateImage{};\n PFN_vkDestroyImage next_vkDestroyImage{};\n PFN_vkGetImageMemoryRequirements next_vkGetImageMemoryRequirements{};\n PFN_vkBindImageMemory next_vkBindImageMemory{};\n PFN_vkAllocateMemory next_vkAllocateMemory{};\n PFN_vkFreeMemory next_vkFreeMemory{};\n PFN_vkCreateSemaphore next_vkCreateSemaphore{};\n PFN_vkDestroySemaphore next_vkDestroySemaphore{};\n PFN_vkGetMemoryFdKHR next_vkGetMemoryFdKHR{};\n PFN_vkGetSemaphoreFdKHR next_vkGetSemaphoreFdKHR{};\n PFN_vkGetDeviceQueue next_vkGetDeviceQueue{};\n PFN_vkQueueSubmit next_vkQueueSubmit{};\n PFN_vkCmdPipelineBarrier next_vkCmdPipelineBarrier{};\n PFN_vkCmdBlitImage next_vkCmdBlitImage{};\n PFN_vkAcquireNextImageKHR next_vkAcquireNextImageKHR{};\n\n template\n bool initInstanceFunc(VkInstance instance, const char* name, T* func) {\n *func = reinterpret_cast(next_vkGetInstanceProcAddr(instance, name));\n if (!*func) {\n std::cerr << \"(no function pointer for \" << name << \")\\n\";\n return false;\n }\n return true;\n }\n\n template\n bool initDeviceFunc(VkDevice device, const char* name, T* func) {\n *func = reinterpret_cast(next_vkGetDeviceProcAddr(device, name));\n if (!*func) {\n std::cerr << \"(no function pointer for \" << name << \")\\n\";\n return false;\n }\n return true;\n }\n}\n\nnamespace {\n VkResult layer_vkCreateInstance(\n const VkInstanceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkInstance* pInstance) {\n try {\n // prepare layer | NOLINTBEGIN\n auto* layerDesc = const_cast(\n reinterpret_cast(pCreateInfo->pNext));\n while (layerDesc && (layerDesc->sType != VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO\n || layerDesc->function != VK_LAYER_LINK_INFO)) {\n layerDesc = const_cast(\n reinterpret_cast(layerDesc->pNext));\n }\n if (!layerDesc)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"No layer creation info found in pNext chain\");\n\n next_vkGetInstanceProcAddr = layerDesc->u.pLayerInfo->pfnNextGetInstanceProcAddr;\n layerDesc->u.pLayerInfo = layerDesc->u.pLayerInfo->pNext;\n\n bool success = initInstanceFunc(nullptr, \"vkCreateInstance\", &next_vkCreateInstance);\n if (!success)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"Failed to get instance function pointer for vkCreateInstance\");\n\n // NOLINTEND | skip initialization if the layer is disabled\n if (!Config::activeConf.enable) {\n auto res = next_vkCreateInstance(pCreateInfo, pAllocator, pInstance);\n initInstanceFunc(*pInstance, \"vkCreateDevice\", &next_vkCreateDevice);\n return res;\n }\n\n // create instance\n try {\n auto* createInstanceHook = reinterpret_cast(\n Hooks::hooks[\"vkCreateInstance\"]);\n auto res = createInstanceHook(pCreateInfo, pAllocator, pInstance);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unknown error\");\n } catch (const std::exception& e) {\n throw LSFG::rethrowable_error(\"Failed to create Vulkan instance\", e);\n }\n\n // get relevant function pointers from the next layer\n success = true;\n success &= initInstanceFunc(*pInstance,\n \"vkDestroyInstance\", &next_vkDestroyInstance);\n success &= initInstanceFunc(*pInstance,\n \"vkCreateDevice\", &next_vkCreateDevice); // workaround mesa bug\n success &= initInstanceFunc(*pInstance,\n \"vkGetPhysicalDeviceQueueFamilyProperties\", &next_vkGetPhysicalDeviceQueueFamilyProperties);\n success &= initInstanceFunc(*pInstance,\n \"vkGetPhysicalDeviceMemoryProperties\", &next_vkGetPhysicalDeviceMemoryProperties);\n success &= initInstanceFunc(*pInstance,\n \"vkGetPhysicalDeviceProperties\", &next_vkGetPhysicalDeviceProperties);\n success &= initInstanceFunc(*pInstance,\n \"vkGetPhysicalDeviceSurfaceCapabilitiesKHR\", &next_vkGetPhysicalDeviceSurfaceCapabilitiesKHR);\n if (!success)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"Failed to get instance function pointers\");\n\n std::cerr << \"lsfg-vk: Vulkan instance layer initialized successfully.\\n\";\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred while initializing the Vulkan instance layer:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n return VK_ERROR_INITIALIZATION_FAILED;\n }\n return VK_SUCCESS;\n }\n\n VkResult layer_vkCreateDevice( // NOLINTBEGIN\n VkPhysicalDevice physicalDevice,\n const VkDeviceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkDevice* pDevice) {\n try {\n // prepare layer | NOLINTBEGIN\n auto* layerDesc = const_cast(\n reinterpret_cast(pCreateInfo->pNext));\n while (layerDesc && (layerDesc->sType != VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO\n || layerDesc->function != VK_LAYER_LINK_INFO)) {\n layerDesc = const_cast(\n reinterpret_cast(layerDesc->pNext));\n }\n if (!layerDesc)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"No layer creation info found in pNext chain\");\n\n next_vkGetDeviceProcAddr = layerDesc->u.pLayerInfo->pfnNextGetDeviceProcAddr;\n layerDesc->u.pLayerInfo = layerDesc->u.pLayerInfo->pNext;\n\n auto* layerDesc2 = const_cast(\n reinterpret_cast(pCreateInfo->pNext));\n while (layerDesc2 && (layerDesc2->sType != VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO\n || layerDesc2->function != VK_LOADER_DATA_CALLBACK)) {\n layerDesc2 = const_cast(\n reinterpret_cast(layerDesc2->pNext));\n }\n if (!layerDesc2)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"No layer device loader data found in pNext chain\");\n\n next_vSetDeviceLoaderData = layerDesc2->u.pfnSetDeviceLoaderData;\n\n // NOLINTEND | skip initialization if the layer is disabled\n if (!Config::activeConf.enable)\n return next_vkCreateDevice(physicalDevice, pCreateInfo, pAllocator, pDevice);\n\n // create device\n try {\n auto* createDeviceHook = reinterpret_cast(\n Hooks::hooks[\"vkCreateDevicePre\"]);\n auto res = createDeviceHook(physicalDevice, pCreateInfo, pAllocator, pDevice);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unknown error\");\n } catch (const std::exception& e) {\n throw LSFG::rethrowable_error(\"Failed to create Vulkan device\", e);\n }\n\n // get relevant function pointers from the next layer\n bool success = true;\n success &= initDeviceFunc(*pDevice, \"vkDestroyDevice\", &next_vkDestroyDevice);\n success &= initDeviceFunc(*pDevice, \"vkCreateSwapchainKHR\", &next_vkCreateSwapchainKHR);\n success &= initDeviceFunc(*pDevice, \"vkQueuePresentKHR\", &next_vkQueuePresentKHR);\n success &= initDeviceFunc(*pDevice, \"vkDestroySwapchainKHR\", &next_vkDestroySwapchainKHR);\n success &= initDeviceFunc(*pDevice, \"vkGetSwapchainImagesKHR\", &next_vkGetSwapchainImagesKHR);\n success &= initDeviceFunc(*pDevice, \"vkAllocateCommandBuffers\", &next_vkAllocateCommandBuffers);\n success &= initDeviceFunc(*pDevice, \"vkFreeCommandBuffers\", &next_vkFreeCommandBuffers);\n success &= initDeviceFunc(*pDevice, \"vkBeginCommandBuffer\", &next_vkBeginCommandBuffer);\n success &= initDeviceFunc(*pDevice, \"vkEndCommandBuffer\", &next_vkEndCommandBuffer);\n success &= initDeviceFunc(*pDevice, \"vkCreateCommandPool\", &next_vkCreateCommandPool);\n success &= initDeviceFunc(*pDevice, \"vkDestroyCommandPool\", &next_vkDestroyCommandPool);\n success &= initDeviceFunc(*pDevice, \"vkCreateImage\", &next_vkCreateImage);\n success &= initDeviceFunc(*pDevice, \"vkDestroyImage\", &next_vkDestroyImage);\n success &= initDeviceFunc(*pDevice, \"vkGetImageMemoryRequirements\", &next_vkGetImageMemoryRequirements);\n success &= initDeviceFunc(*pDevice, \"vkBindImageMemory\", &next_vkBindImageMemory);\n success &= initDeviceFunc(*pDevice, \"vkGetMemoryFdKHR\", &next_vkGetMemoryFdKHR);\n success &= initDeviceFunc(*pDevice, \"vkAllocateMemory\", &next_vkAllocateMemory);\n success &= initDeviceFunc(*pDevice, \"vkFreeMemory\", &next_vkFreeMemory);\n success &= initDeviceFunc(*pDevice, \"vkCreateSemaphore\", &next_vkCreateSemaphore);\n success &= initDeviceFunc(*pDevice, \"vkDestroySemaphore\", &next_vkDestroySemaphore);\n success &= initDeviceFunc(*pDevice, \"vkGetSemaphoreFdKHR\", &next_vkGetSemaphoreFdKHR);\n success &= initDeviceFunc(*pDevice, \"vkGetDeviceQueue\", &next_vkGetDeviceQueue);\n success &= initDeviceFunc(*pDevice, \"vkQueueSubmit\", &next_vkQueueSubmit);\n success &= initDeviceFunc(*pDevice, \"vkCmdPipelineBarrier\", &next_vkCmdPipelineBarrier);\n success &= initDeviceFunc(*pDevice, \"vkCmdBlitImage\", &next_vkCmdBlitImage);\n success &= initDeviceFunc(*pDevice, \"vkAcquireNextImageKHR\", &next_vkAcquireNextImageKHR);\n if (!success)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"Failed to get device function pointers\");\n\n auto postCreateDeviceHook = reinterpret_cast(\n Hooks::hooks[\"vkCreateDevicePost\"]);\n auto res = postCreateDeviceHook(physicalDevice, pCreateInfo, pAllocator, pDevice);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unknown error\");\n\n std::cerr << \"lsfg-vk: Vulkan device layer initialized successfully.\\n\";\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred while initializing the Vulkan device layer:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n return VK_ERROR_INITIALIZATION_FAILED;\n }\n return VK_SUCCESS;\n } // NOLINTEND\n}\n\nconst std::unordered_map layerFunctions = {\n { \"vkCreateInstance\",\n reinterpret_cast(&layer_vkCreateInstance) },\n { \"vkCreateDevice\",\n reinterpret_cast(&layer_vkCreateDevice) },\n { \"vkGetInstanceProcAddr\",\n reinterpret_cast(&layer_vkGetInstanceProcAddr) },\n { \"vkGetDeviceProcAddr\",\n reinterpret_cast(&layer_vkGetDeviceProcAddr) },\n};\n\nPFN_vkVoidFunction layer_vkGetInstanceProcAddr(VkInstance instance, const char* pName) {\n const std::string name(pName);\n auto it = layerFunctions.find(name);\n if (it != layerFunctions.end())\n return it->second;\n\n it = Hooks::hooks.find(name);\n if (it != Hooks::hooks.end() && Config::activeConf.enable)\n return it->second;\n\n return next_vkGetInstanceProcAddr(instance, pName);\n}\n\nPFN_vkVoidFunction layer_vkGetDeviceProcAddr(VkDevice device, const char* pName) {\n const std::string name(pName);\n auto it = layerFunctions.find(name);\n if (it != layerFunctions.end())\n return it->second;\n\n it = Hooks::hooks.find(name);\n if (it != Hooks::hooks.end() && Config::activeConf.enable)\n return it->second;\n\n return next_vkGetDeviceProcAddr(device, pName);\n}\n\n// original functions\nnamespace Layer {\n VkResult ovkCreateInstance(\n const VkInstanceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkInstance* pInstance) {\n return next_vkCreateInstance(pCreateInfo, pAllocator, pInstance);\n }\n void ovkDestroyInstance(\n VkInstance instance,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroyInstance(instance, pAllocator);\n }\n\n VkResult ovkCreateDevice(\n VkPhysicalDevice physicalDevice,\n const VkDeviceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkDevice* pDevice) {\n return next_vkCreateDevice(physicalDevice, pCreateInfo, pAllocator, pDevice);\n }\n void ovkDestroyDevice(\n VkDevice device,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroyDevice(device, pAllocator);\n }\n\n VkResult ovkSetDeviceLoaderData(VkDevice device, void* object) {\n return next_vSetDeviceLoaderData(device, object);\n }\n\n PFN_vkVoidFunction ovkGetInstanceProcAddr(\n VkInstance instance,\n const char* pName) {\n return next_vkGetInstanceProcAddr(instance, pName);\n }\n PFN_vkVoidFunction ovkGetDeviceProcAddr(\n VkDevice device,\n const char* pName) {\n return next_vkGetDeviceProcAddr(device, pName);\n }\n\n void ovkGetPhysicalDeviceQueueFamilyProperties(\n VkPhysicalDevice physicalDevice,\n uint32_t* pQueueFamilyPropertyCount,\n VkQueueFamilyProperties* pQueueFamilyProperties) {\n next_vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties);\n }\n void ovkGetPhysicalDeviceMemoryProperties(\n VkPhysicalDevice physicalDevice,\n VkPhysicalDeviceMemoryProperties* pMemoryProperties) {\n next_vkGetPhysicalDeviceMemoryProperties(physicalDevice, pMemoryProperties);\n }\n void ovkGetPhysicalDeviceProperties(\n VkPhysicalDevice physicalDevice,\n VkPhysicalDeviceProperties* pProperties) {\n next_vkGetPhysicalDeviceProperties(physicalDevice, pProperties);\n }\n VkResult ovkGetPhysicalDeviceSurfaceCapabilitiesKHR(\n VkPhysicalDevice physicalDevice,\n VkSurfaceKHR surface,\n VkSurfaceCapabilitiesKHR* pSurfaceCapabilities) {\n return next_vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface, pSurfaceCapabilities);\n }\n\n VkResult ovkCreateSwapchainKHR(\n VkDevice device,\n const VkSwapchainCreateInfoKHR* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkSwapchainKHR* pSwapchain) {\n return next_vkCreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);\n }\n VkResult ovkQueuePresentKHR(\n VkQueue queue,\n const VkPresentInfoKHR* pPresentInfo) {\n return next_vkQueuePresentKHR(queue, pPresentInfo);\n }\n void ovkDestroySwapchainKHR(\n VkDevice device,\n VkSwapchainKHR swapchain,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroySwapchainKHR(device, swapchain, pAllocator);\n }\n\n VkResult ovkGetSwapchainImagesKHR(\n VkDevice device,\n VkSwapchainKHR swapchain,\n uint32_t* pSwapchainImageCount,\n VkImage* pSwapchainImages) {\n return next_vkGetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);\n }\n\n VkResult ovkAllocateCommandBuffers(\n VkDevice device,\n const VkCommandBufferAllocateInfo* pAllocateInfo,\n VkCommandBuffer* pCommandBuffers) {\n return next_vkAllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers);\n }\n void ovkFreeCommandBuffers(\n VkDevice device,\n VkCommandPool commandPool,\n uint32_t commandBufferCount,\n const VkCommandBuffer* pCommandBuffers) {\n next_vkFreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);\n }\n\n VkResult ovkBeginCommandBuffer(\n VkCommandBuffer commandBuffer,\n const VkCommandBufferBeginInfo* pBeginInfo) {\n return next_vkBeginCommandBuffer(commandBuffer, pBeginInfo);\n }\n VkResult ovkEndCommandBuffer(\n VkCommandBuffer commandBuffer) {\n return next_vkEndCommandBuffer(commandBuffer);\n }\n\n VkResult ovkCreateCommandPool(\n VkDevice device,\n const VkCommandPoolCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkCommandPool* pCommandPool) {\n return next_vkCreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);\n }\n void ovkDestroyCommandPool(\n VkDevice device,\n VkCommandPool commandPool,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroyCommandPool(device, commandPool, pAllocator);\n }\n\n VkResult ovkCreateImage(\n VkDevice device,\n const VkImageCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkImage* pImage) {\n return next_vkCreateImage(device, pCreateInfo, pAllocator, pImage);\n }\n void ovkDestroyImage(\n VkDevice device,\n VkImage image,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroyImage(device, image, pAllocator);\n }\n\n void ovkGetImageMemoryRequirements(\n VkDevice device,\n VkImage image,\n VkMemoryRequirements* pMemoryRequirements) {\n next_vkGetImageMemoryRequirements(device, image, pMemoryRequirements);\n }\n VkResult ovkBindImageMemory(\n VkDevice device,\n VkImage image,\n VkDeviceMemory memory,\n VkDeviceSize memoryOffset) {\n return next_vkBindImageMemory(device, image, memory, memoryOffset);\n }\n\n VkResult ovkAllocateMemory(\n VkDevice device,\n const VkMemoryAllocateInfo* pAllocateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkDeviceMemory* pMemory) {\n return next_vkAllocateMemory(device, pAllocateInfo, pAllocator, pMemory);\n }\n void ovkFreeMemory(\n VkDevice device,\n VkDeviceMemory memory,\n const VkAllocationCallbacks* pAllocator) {\n next_vkFreeMemory(device, memory, pAllocator);\n }\n\n VkResult ovkCreateSemaphore(\n VkDevice device,\n const VkSemaphoreCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkSemaphore* pSemaphore) {\n return next_vkCreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);\n }\n void ovkDestroySemaphore(\n VkDevice device,\n VkSemaphore semaphore,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroySemaphore(device, semaphore, pAllocator);\n }\n\n VkResult ovkGetMemoryFdKHR(\n VkDevice device,\n const VkMemoryGetFdInfoKHR* pGetFdInfo,\n int* pFd) {\n return next_vkGetMemoryFdKHR(device, pGetFdInfo, pFd);\n }\n VkResult ovkGetSemaphoreFdKHR(\n VkDevice device,\n const VkSemaphoreGetFdInfoKHR* pGetFdInfo,\n int* pFd) {\n return next_vkGetSemaphoreFdKHR(device, pGetFdInfo, pFd);\n }\n\n void ovkGetDeviceQueue(\n VkDevice device,\n uint32_t queueFamilyIndex,\n uint32_t queueIndex,\n VkQueue* pQueue) {\n next_vkGetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);\n }\n VkResult ovkQueueSubmit(\n VkQueue queue,\n uint32_t submitCount,\n const VkSubmitInfo* pSubmits,\n VkFence fence) {\n return next_vkQueueSubmit(queue, submitCount, pSubmits, fence);\n }\n\n void ovkCmdPipelineBarrier(\n VkCommandBuffer commandBuffer,\n VkPipelineStageFlags srcStageMask,\n VkPipelineStageFlags dstStageMask,\n VkDependencyFlags dependencyFlags,\n uint32_t memoryBarrierCount,\n const VkMemoryBarrier* pMemoryBarriers,\n uint32_t bufferMemoryBarrierCount,\n const VkBufferMemoryBarrier* pBufferMemoryBarriers,\n uint32_t imageMemoryBarrierCount,\n const VkImageMemoryBarrier* pImageMemoryBarriers) {\n next_vkCmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,\n memoryBarrierCount, pMemoryBarriers,\n bufferMemoryBarrierCount, pBufferMemoryBarriers,\n imageMemoryBarrierCount, pImageMemoryBarriers);\n }\n void ovkCmdBlitImage(\n VkCommandBuffer commandBuffer,\n VkImage srcImage,\n VkImageLayout srcImageLayout,\n VkImage dstImage,\n VkImageLayout dstImageLayout,\n uint32_t regionCount,\n const VkImageBlit* pRegions,\n VkFilter filter) {\n next_vkCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter);\n }\n\n VkResult ovkAcquireNextImageKHR(\n VkDevice device,\n VkSwapchainKHR swapchain,\n uint64_t timeout,\n VkSemaphore semaphore,\n VkFence fence,\n uint32_t* pImageIndex) {\n return next_vkAcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);\n }\n}\n"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/include/pe-parse/nt-headers.h", "/*\nThe MIT License (MIT)\n\nCopyright (c) 2013 Andrew Ruef\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n*/\n\n#pragma once\n\n#include \n#include \n#include \n\n// need to pack these structure definitions\n\n// some constant definitions\n// clang-format off\nnamespace peparse {\nconstexpr std::uint32_t RICH_MAGIC_END = 0x68636952;\nconstexpr std::uint32_t RICH_MAGIC_START = 0x536e6144;\nconstexpr std::uint32_t RICH_OFFSET = 0x80;\nconstexpr std::uint16_t MZ_MAGIC = 0x5A4D;\nconstexpr std::uint32_t NT_MAGIC = 0x00004550;\nconstexpr std::uint16_t NUM_DIR_ENTRIES = 16;\nconstexpr std::uint16_t NT_OPTIONAL_32_MAGIC = 0x10B;\nconstexpr std::uint16_t NT_OPTIONAL_64_MAGIC = 0x20B;\nconstexpr std::uint16_t NT_SHORT_NAME_LEN = 8;\nconstexpr std::uint16_t SYMTAB_RECORD_LEN = 18;\n\n#ifndef _PEPARSE_WINDOWS_CONFLICTS\n// Machine Types\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_UNKNOWN = 0x0;\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_ALPHA = 0x184; // Alpha_AXP\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_ALPHA64 = 0x284; // ALPHA64\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_AM33 = 0x1d3; // Matsushita AM33\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_AMD64 = 0x8664; // x64\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_ARM = 0x1c0; // ARM little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_ARM64 = 0xaa64; // ARM64 little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_ARMNT = 0x1c4; // ARM Thumb-2 little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_AXP64 = 0x284; // ALPHA64\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_CEE = 0xc0ee;\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_CEF = 0xcef;\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_EBC = 0xebc; // EFI byte code\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_I386 = 0x14c; // Intel 386 or later processors and compatible processors\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_IA64 = 0x200; // Intel Itanium processor family\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_LOONGARCH32 = 0x6232; // LoongArch 32-bit address space\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_LOONGARCH64 = 0x6264; // LoongArch 64-bit address space\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_M32R = 0x9041; // Mitsubishi M32R little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_MIPS16 = 0x266; // MIPS16\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_MIPSFPU = 0x366; // MIPS with FPU\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_MIPSFPU16 = 0x466; // MIPS16 with FPU\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_POWERPC = 0x1f0; // Power PC little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_POWERPCFP = 0x1f1; // Power PC with floating point support\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_POWERPCBE = 0x1f2; // Power PC big endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_R3000 = 0x162; // MIPS little endian, 0x160 big-endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_R4000 = 0x166; // MIPS little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_R10000 = 0x168; // MIPS little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_RISCV32 = 0x5032; // RISC-V 32-bit address space\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_RISCV64 = 0x5064; // RISC-V 64-bit address space\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_RISCV128 = 0x5128; // RISC-V 128-bit address space\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_SH3 = 0x1a2; // Hitachi SH3\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_SH3DSP = 0x1a3; // Hitachi SH3 DSP\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_SH3E = 0x1a4; // Hitachi SH3E\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_SH4 = 0x1a6; // Hitachi SH4\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_SH5 = 0x1a8; // Hitachi SH5\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_THUMB = 0x1c2; // Thumb\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_TRICORE = 0x520; // Infineon\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_WCEMIPSV2 = 0x169; // MIPS little-endian WCE v2\n\nconstexpr std::uint16_t IMAGE_FILE_RELOCS_STRIPPED = 0x0001;\nconstexpr std::uint16_t IMAGE_FILE_EXECUTABLE_IMAGE = 0x0002;\nconstexpr std::uint16_t IMAGE_FILE_LINE_NUMS_STRIPPED = 0x0004;\nconstexpr std::uint16_t IMAGE_FILE_LOCAL_SYMS_STRIPPED = 0x0008;\nconstexpr std::uint16_t IMAGE_FILE_AGGRESSIVE_WS_TRIM = 0x0010;\nconstexpr std::uint16_t IMAGE_FILE_LARGE_ADDRESS_AWARE = 0x0020;\nconstexpr std::uint16_t IMAGE_FILE_BYTES_REVERSED_LO = 0x0080;\nconstexpr std::uint16_t IMAGE_FILE_32BIT_MACHINE = 0x0100;\nconstexpr std::uint16_t IMAGE_FILE_DEBUG_STRIPPED = 0x0200;\nconstexpr std::uint16_t IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP = 0x0400;\nconstexpr std::uint16_t IMAGE_FILE_NET_RUN_FROM_SWAP = 0x0800;\nconstexpr std::uint16_t IMAGE_FILE_SYSTEM = 0x1000;\nconstexpr std::uint16_t IMAGE_FILE_DLL = 0x2000;\nconstexpr std::uint16_t IMAGE_FILE_UP_SYSTEM_ONLY = 0x4000;\nconstexpr std::uint16_t IMAGE_FILE_BYTES_REVERSED_HI = 0x8000;\n\nconstexpr std::uint32_t IMAGE_SCN_TYPE_NO_PAD = 0x00000008;\nconstexpr std::uint32_t IMAGE_SCN_CNT_CODE = 0x00000020;\nconstexpr std::uint32_t IMAGE_SCN_CNT_INITIALIZED_DATA = 0x00000040;\nconstexpr std::uint32_t IMAGE_SCN_CNT_UNINITIALIZED_DATA = 0x00000080;\nconstexpr std::uint32_t IMAGE_SCN_LNK_OTHER = 0x00000100;\nconstexpr std::uint32_t IMAGE_SCN_LNK_INFO = 0x00000200;\nconstexpr std::uint32_t IMAGE_SCN_LNK_REMOVE = 0x00000800;\nconstexpr std::uint32_t IMAGE_SCN_LNK_COMDAT = 0x00001000;\nconstexpr std::uint32_t IMAGE_SCN_NO_DEFER_SPEC_EXC = 0x00004000;\nconstexpr std::uint32_t IMAGE_SCN_GPREL = 0x00008000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_FARDATA = 0x00008000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_PURGEABLE = 0x00020000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_16BIT = 0x00020000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_LOCKED = 0x00040000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_PRELOAD = 0x00080000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_1BYTES = 0x00100000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_2BYTES = 0x00200000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_4BYTES = 0x00300000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_8BYTES = 0x00400000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_16BYTES = 0x00500000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_32BYTES = 0x00600000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_64BYTES = 0x00700000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_128BYTES = 0x00800000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_256BYTES = 0x00900000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_512BYTES = 0x00A00000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_1024BYTES = 0x00B00000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_2048BYTES = 0x00C00000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_4096BYTES = 0x00D00000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_8192BYTES = 0x00E00000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_MASK = 0x00F00000;\nconstexpr std::uint32_t IMAGE_SCN_LNK_NRELOC_OVFL = 0x01000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_DISCARDABLE = 0x02000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_NOT_CACHED = 0x04000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_NOT_PAGED = 0x08000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_SHARED = 0x10000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_EXECUTE = 0x20000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_READ = 0x40000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_WRITE = 0x80000000;\n\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_UNKNOWN = 0;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_NATIVE = 1;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_WINDOWS_GUI = 2;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_WINDOWS_CUI = 3;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_OS2_CUI = 5;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_POSIX_CUI = 7;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_NATIVE_WINDOWS = 8;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_WINDOWS_CE_GUI = 9;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_EFI_APPLICATION = 10;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER = 11;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER = 12;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_EFI_ROM = 13;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_XBOX = 14;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION = 16;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_XBOX_CODE_CATALOG = 17;\n\n// Symbol section number values\nconstexpr std::int16_t IMAGE_SYM_UNDEFINED = 0;\nconstexpr std::int16_t IMAGE_SYM_ABSOLUTE = -1;\nconstexpr std::int16_t IMAGE_SYM_DEBUG = -2;\n\n// Symbol table types\nconstexpr std::uint16_t IMAGE_SYM_TYPE_NULL = 0;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_VOID = 1;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_CHAR = 2;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_SHORT = 3;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_INT = 4;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_LONG = 5;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_FLOAT = 6;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_DOUBLE = 7;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_STRUCT = 8;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_UNION = 9;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_ENUM = 10;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_MOE = 11;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_BYTE = 12;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_WORD = 13;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_UINT = 14;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_DWORD = 15;\nconstexpr std::uint16_t IMAGE_SYM_DTYPE_NULL = 0;\nconstexpr std::uint16_t IMAGE_SYM_DTYPE_POINTER = 1;\nconstexpr std::uint16_t IMAGE_SYM_DTYPE_FUNCTION = 2;\nconstexpr std::uint16_t IMAGE_SYM_DTYPE_ARRAY = 3;\n\n// Symbol table storage classes\nconstexpr std::uint8_t IMAGE_SYM_CLASS_END_OF_FUNCTION = static_cast(-1);\nconstexpr std::uint8_t IMAGE_SYM_CLASS_NULL = 0;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_AUTOMATIC = 1;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_EXTERNAL = 2;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_STATIC = 3;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_REGISTER = 4;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_EXTERNAL_DEF = 5;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_LABEL = 6;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_UNDEFINED_LABEL = 7;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_MEMBER_OF_STRUCT = 8;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_ARGUMENT = 9;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_STRUCT_TAG = 10;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_MEMBER_OF_UNION = 11;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_UNION_TAG = 12;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_TYPE_DEFINITION = 13;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_UNDEFINED_STATIC = 14;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_ENUM_TAG = 15;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_MEMBER_OF_ENUM = 16;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_REGISTER_PARAM = 17;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_BIT_FIELD = 18;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_BLOCK = 100;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_FUNCTION = 101;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_END_OF_STRUCT = 102;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_FILE = 103;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_SECTION = 104;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_WEAK_EXTERNAL = 105;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_CLR_TOKEN = 107;\n\n// Optional header DLL characteristics\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE = 0x0040;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY = 0x0080;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_NX_COMPAT = 0x0100;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_NO_ISOLATION = 0x0200;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_NO_SEH = 0x0400;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_NO_BIND = 0x0800;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_APPCONTAINER = 0x1000;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_WDM_DRIVER = 0x2000;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_GUARD_CF = 0x4000;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE = 0x8000;\n\n// Extended DLL characteristics\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_EX_CET_COMPAT = 0x0001;\n#endif\n// clang-format on\n\nstruct dos_header {\n std::uint16_t e_magic;\n std::uint16_t e_cblp;\n std::uint16_t e_cp;\n std::uint16_t e_crlc;\n std::uint16_t e_cparhdr;\n std::uint16_t e_minalloc;\n std::uint16_t e_maxalloc;\n std::uint16_t e_ss;\n std::uint16_t e_sp;\n std::uint16_t e_csum;\n std::uint16_t e_ip;\n std::uint16_t e_cs;\n std::uint16_t e_lfarlc;\n std::uint16_t e_ovno;\n std::uint16_t e_res[4];\n std::uint16_t e_oemid;\n std::uint16_t e_oeminfo;\n std::uint16_t e_res2[10];\n std::uint32_t e_lfanew;\n};\n\nstruct file_header {\n std::uint16_t Machine;\n std::uint16_t NumberOfSections;\n std::uint32_t TimeDateStamp;\n std::uint32_t PointerToSymbolTable;\n std::uint32_t NumberOfSymbols;\n std::uint16_t SizeOfOptionalHeader;\n std::uint16_t Characteristics;\n};\n\nstruct data_directory {\n std::uint32_t VirtualAddress;\n std::uint32_t Size;\n};\n\nenum data_directory_kind {\n DIR_EXPORT = 0,\n DIR_IMPORT = 1,\n DIR_RESOURCE = 2,\n DIR_EXCEPTION = 3,\n DIR_SECURITY = 4,\n DIR_BASERELOC = 5,\n DIR_DEBUG = 6,\n DIR_ARCHITECTURE = 7,\n DIR_GLOBALPTR = 8,\n DIR_TLS = 9,\n DIR_LOAD_CONFIG = 10,\n DIR_BOUND_IMPORT = 11,\n DIR_IAT = 12,\n DIR_DELAY_IMPORT = 13,\n DIR_COM_DESCRIPTOR = 14,\n DIR_RESERVED = 15,\n};\n\nstruct optional_header_32 {\n std::uint16_t Magic;\n std::uint8_t MajorLinkerVersion;\n std::uint8_t MinorLinkerVersion;\n std::uint32_t SizeOfCode;\n std::uint32_t SizeOfInitializedData;\n std::uint32_t SizeOfUninitializedData;\n std::uint32_t AddressOfEntryPoint;\n std::uint32_t BaseOfCode;\n std::uint32_t BaseOfData;\n std::uint32_t ImageBase;\n std::uint32_t SectionAlignment;\n std::uint32_t FileAlignment;\n std::uint16_t MajorOperatingSystemVersion;\n std::uint16_t MinorOperatingSystemVersion;\n std::uint16_t MajorImageVersion;\n std::uint16_t MinorImageVersion;\n std::uint16_t MajorSubsystemVersion;\n std::uint16_t MinorSubsystemVersion;\n std::uint32_t Win32VersionValue;\n std::uint32_t SizeOfImage;\n std::uint32_t SizeOfHeaders;\n std::uint32_t CheckSum;\n std::uint16_t Subsystem;\n std::uint16_t DllCharacteristics;\n std::uint32_t SizeOfStackReserve;\n std::uint32_t SizeOfStackCommit;\n std::uint32_t SizeOfHeapReserve;\n std::uint32_t SizeOfHeapCommit;\n std::uint32_t LoaderFlags;\n std::uint32_t NumberOfRvaAndSizes;\n data_directory DataDirectory[NUM_DIR_ENTRIES];\n};\n\n/*\n * This is used for PE32+ binaries. It is similar to optional_header_32\n * except some fields don't exist here (BaseOfData), and others are bigger.\n */\nstruct optional_header_64 {\n std::uint16_t Magic;\n std::uint8_t MajorLinkerVersion;\n std::uint8_t MinorLinkerVersion;\n std::uint32_t SizeOfCode;\n std::uint32_t SizeOfInitializedData;\n std::uint32_t SizeOfUninitializedData;\n std::uint32_t AddressOfEntryPoint;\n std::uint32_t BaseOfCode;\n std::uint64_t ImageBase;\n std::uint32_t SectionAlignment;\n std::uint32_t FileAlignment;\n std::uint16_t MajorOperatingSystemVersion;\n std::uint16_t MinorOperatingSystemVersion;\n std::uint16_t MajorImageVersion;\n std::uint16_t MinorImageVersion;\n std::uint16_t MajorSubsystemVersion;\n std::uint16_t MinorSubsystemVersion;\n std::uint32_t Win32VersionValue;\n std::uint32_t SizeOfImage;\n std::uint32_t SizeOfHeaders;\n std::uint32_t CheckSum;\n std::uint16_t Subsystem;\n std::uint16_t DllCharacteristics;\n std::uint64_t SizeOfStackReserve;\n std::uint64_t SizeOfStackCommit;\n std::uint64_t SizeOfHeapReserve;\n std::uint64_t SizeOfHeapCommit;\n std::uint32_t LoaderFlags;\n std::uint32_t NumberOfRvaAndSizes;\n data_directory DataDirectory[NUM_DIR_ENTRIES];\n};\n\nstruct nt_header_32 {\n std::uint32_t Signature;\n file_header FileHeader;\n optional_header_32 OptionalHeader;\n optional_header_64 OptionalHeader64;\n std::uint16_t OptionalMagic;\n};\n\nstruct rich_entry {\n std::uint16_t ProductId;\n std::uint16_t BuildNumber;\n std::uint32_t Count;\n};\n\nstruct rich_header {\n std::uint32_t StartSignature;\n std::vector Entries;\n std::uint32_t EndSignature;\n std::uint32_t DecryptionKey;\n std::uint32_t Checksum;\n bool isPresent;\n bool isValid;\n};\n\n/*\n * This structure is only used to know how far to move the offset\n * when parsing resources. The data is stored in a resource_dir_entry\n * struct but that also has extra information used in the parsing which\n * causes the size to be inaccurate.\n */\nstruct resource_dir_entry_sz {\n std::uint32_t ID;\n std::uint32_t RVA;\n};\n\nstruct resource_dir_entry {\n inline resource_dir_entry(void) : ID(0), RVA(0), type(0), name(0), lang(0) {\n }\n\n std::uint32_t ID;\n std::uint32_t RVA;\n std::uint32_t type;\n std::uint32_t name;\n std::uint32_t lang;\n std::string type_str;\n std::string name_str;\n std::string lang_str;\n};\n\nstruct resource_dir_table {\n std::uint32_t Characteristics;\n std::uint32_t TimeDateStamp;\n std::uint16_t MajorVersion;\n std::uint16_t MinorVersion;\n std::uint16_t NameEntries;\n std::uint16_t IDEntries;\n};\n\nstruct resource_dat_entry {\n std::uint32_t RVA;\n std::uint32_t size;\n std::uint32_t codepage;\n std::uint32_t reserved;\n};\n\nstruct image_section_header {\n std::uint8_t Name[NT_SHORT_NAME_LEN];\n union {\n std::uint32_t PhysicalAddress;\n std::uint32_t VirtualSize;\n } Misc;\n std::uint32_t VirtualAddress;\n std::uint32_t SizeOfRawData;\n std::uint32_t PointerToRawData;\n std::uint32_t PointerToRelocations;\n std::uint32_t PointerToLinenumbers;\n std::uint16_t NumberOfRelocations;\n std::uint16_t NumberOfLinenumbers;\n std::uint32_t Characteristics;\n};\n\nstruct import_dir_entry {\n std::uint32_t LookupTableRVA;\n std::uint32_t TimeStamp;\n std::uint32_t ForwarderChain;\n std::uint32_t NameRVA;\n std::uint32_t AddressRVA;\n};\n\nstruct export_dir_table {\n std::uint32_t ExportFlags;\n std::uint32_t TimeDateStamp;\n std::uint16_t MajorVersion;\n std::uint16_t MinorVersion;\n std::uint32_t NameRVA;\n std::uint32_t OrdinalBase;\n std::uint32_t AddressTableEntries;\n std::uint32_t NumberOfNamePointers;\n std::uint32_t ExportAddressTableRVA;\n std::uint32_t NamePointerRVA;\n std::uint32_t OrdinalTableRVA;\n};\n\nstruct debug_dir_entry {\n std::uint32_t Characteristics;\n std::uint32_t TimeStamp;\n std::uint16_t MajorVersion;\n std::uint16_t MinorVersion;\n std::uint32_t Type;\n std::uint32_t SizeOfData;\n std::uint32_t AddressOfRawData;\n std::uint32_t PointerToRawData;\n};\n\nenum reloc_type {\n RELOC_ABSOLUTE = 0,\n RELOC_HIGH = 1,\n RELOC_LOW = 2,\n RELOC_HIGHLOW = 3,\n RELOC_HIGHADJ = 4,\n RELOC_MIPS_JMPADDR = 5, // only valid on MIPS\n RELOC_ARM_MOV32 = 5, // only valid on ARM/Thumb\n RELOC_RISCV_HIGH20 = 5, // only valid on RISC-V\n RELOC_RESERVED = 6,\n RELOC_THUMB_MOV32 = 7, // only valid on Thumb\n RELOC_RISCV_LOW32I = 7, // only valid on RISC-V\n RELOC_RISCV_LOW12S = 8, // only valid on RISC-V\n RELOC_LOONGARCH32_MARK_LA = 8, // only valid on LoongArch 32\n RELOC_LOONGARCH64_MARK_LA = 8, // only valid on LoongArch 64\n RELOC_MIPS_JMPADDR16 = 9, // only valid on MIPS\n RELOC_IA64_IMM64 = 9,\n RELOC_DIR64 = 10\n};\n\nstruct reloc_block {\n std::uint32_t PageRVA;\n std::uint32_t BlockSize;\n};\n\nstruct image_load_config_code_integrity {\n std::uint16_t Flags;\n std::uint16_t Catalog;\n std::uint32_t CatalogOffset;\n std::uint32_t Reserved;\n};\n\nstruct image_load_config_32 {\n std::uint32_t Size;\n std::uint32_t TimeDateStamp;\n std::uint16_t MajorVersion;\n std::uint16_t MinorVersion;\n std::uint32_t GlobalFlagsClear;\n std::uint32_t GlobalFlagsSet;\n std::uint32_t CriticalSectionDefaultTimeout;\n std::uint32_t DeCommitFreeBlockThreshold;\n std::uint32_t DeCommitTotalFreeThreshold;\n std::uint32_t LockPrefixTable;\n std::uint32_t MaximumAllocationSize;\n std::uint32_t VirtualMemoryThreshold;\n std::uint32_t ProcessHeapFlags;\n std::uint32_t ProcessAffinityMask;\n std::uint16_t CSDVersion;\n std::uint16_t DependentLoadFlags;\n std::uint32_t EditList;\n std::uint32_t SecurityCookie;\n std::uint32_t SEHandlerTable;\n std::uint32_t SEHandlerCount;\n std::uint32_t GuardCFCheckFunctionPointer;\n std::uint32_t GuardCFDispatchFunctionPointer;\n std::uint32_t GuardCFFunctionTable;\n std::uint32_t GuardCFFunctionCount;\n std::uint32_t GuardFlags;\n image_load_config_code_integrity CodeIntegrity;\n std::uint32_t GuardAddressTakenIatEntryTable;\n std::uint32_t GuardAddressTakenIatEntryCount;\n std::uint32_t GuardLongJumpTargetTable;\n std::uint32_t GuardLongJumpTargetCount;\n std::uint32_t DynamicValueRelocTable;\n std::uint32_t CHPEMetadataPointer;\n std::uint32_t GuardRFFailureRoutine;\n std::uint32_t GuardRFFailureRoutineFunctionPointer;\n std::uint32_t DynamicValueRelocTableOffset;\n std::uint16_t DynamicValueRelocTableSection;\n std::uint16_t Reserved2;\n std::uint32_t GuardRFVerifyStackPointerFunctionPointer;\n std::uint32_t HotPatchTableOffset;\n std::uint32_t Reserved3;\n std::uint32_t EnclaveConfigurationPointer;\n std::uint32_t VolatileMetadataPointer;\n};\n\nstruct image_load_config_64 {\n std::uint32_t Size;\n std::uint32_t TimeDateStamp;\n std::uint16_t MajorVersion;\n std::uint16_t MinorVersion;\n std::uint32_t GlobalFlagsClear;\n std::uint32_t GlobalFlagsSet;\n std::uint32_t CriticalSectionDefaultTimeout;\n std::uint64_t DeCommitFreeBlockThreshold;\n std::uint64_t DeCommitTotalFreeThreshold;\n std::uint64_t LockPrefixTable;\n std::uint64_t MaximumAllocationSize;\n std::uint64_t VirtualMemoryThreshold;\n std::uint64_t ProcessAffinityMask;\n std::uint32_t ProcessHeapFlags;\n std::uint16_t CSDVersion;\n std::uint16_t DependentLoadFlags;\n std::uint64_t EditList;\n std::uint64_t SecurityCookie;\n std::uint64_t SEHandlerTable;\n std::uint64_t SEHandlerCount;\n std::uint64_t GuardCFCheckFunctionPointer;\n std::uint64_t GuardCFDispatchFunctionPointer;\n std::uint64_t GuardCFFunctionTable;\n std::uint64_t GuardCFFunctionCount;\n std::uint32_t GuardFlags;\n image_load_config_code_integrity CodeIntegrity;\n std::uint64_t GuardAddressTakenIatEntryTable;\n std::uint64_t GuardAddressTakenIatEntryCount;\n std::uint64_t GuardLongJumpTargetTable;\n std::uint64_t GuardLongJumpTargetCount;\n std::uint64_t DynamicValueRelocTable;\n std::uint64_t CHPEMetadataPointer;\n std::uint64_t GuardRFFailureRoutine;\n std::uint64_t GuardRFFailureRoutineFunctionPointer;\n std::uint32_t DynamicValueRelocTableOffset;\n std::uint16_t DynamicValueRelocTableSection;\n std::uint16_t Reserved2;\n std::uint64_t GuardRFVerifyStackPointerFunctionPointer;\n std::uint32_t HotPatchTableOffset;\n std::uint32_t Reserved3;\n std::uint64_t EnclaveConfigurationPointer;\n std::uint64_t VolatileMetadataPointer;\n};\n} // namespace peparse\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_small_vector.h", "#pragma once\n\n#include \n#include \n#include \n#include \n\n#include \"util_bit.h\"\n#include \"util_likely.h\"\n\nnamespace dxvk {\n\n template\n class small_vector {\n using storage = std::aligned_storage_t;\n public:\n\n constexpr static size_t MinCapacity = N;\n\n small_vector() { }\n\n small_vector(size_t size) {\n resize(size);\n }\n\n small_vector(const small_vector& other) {\n reserve(other.m_size);\n\n for (size_t i = 0; i < other.m_size; i++)\n *ptr(i) = *other.ptr(i);\n\n m_size = other.m_size;\n }\n\n small_vector& operator = (const small_vector& other) {\n for (size_t i = 0; i < m_size; i++)\n ptr(i)->~T();\n\n reserve(other.m_size);\n\n for (size_t i = 0; i < other.m_size; i++)\n *ptr(i) = *other.ptr(i);\n\n m_size = other.m_size;\n return *this;\n }\n\n small_vector(small_vector&& other) {\n if (other.m_size <= N) {\n for (size_t i = 0; i < other.m_size; i++)\n new (&u.m_data[i]) T(std::move(*other.ptr(i)));\n } else {\n u.m_ptr = other.u.m_ptr;\n m_capacity = other.m_capacity;\n\n other.u.m_ptr = nullptr;\n other.m_capacity = N;\n }\n\n m_size = other.m_size;\n other.m_size = 0;\n }\n\n small_vector& operator = (small_vector&& other) {\n for (size_t i = 0; i < m_size; i++)\n ptr(i)->~T();\n\n if (m_capacity > N)\n delete[] u.m_ptr;\n\n if (other.m_size <= N) {\n m_capacity = N;\n\n for (size_t i = 0; i < other.m_size; i++)\n new (&u.m_data[i]) T(std::move(*other.ptr(i)));\n } else {\n u.m_ptr = other.u.m_ptr;\n m_capacity = other.m_capacity;\n\n other.u.m_ptr = nullptr;\n other.m_capacity = N;\n }\n\n m_size = other.m_size;\n other.m_size = 0;\n return *this;\n }\n\n ~small_vector() {\n for (size_t i = 0; i < m_size; i++)\n ptr(i)->~T();\n\n if (m_capacity > N)\n delete[] u.m_ptr;\n }\n\n size_t size() const {\n return m_size;\n }\n\n void reserve(size_t n) {\n if (likely(n <= m_capacity))\n return;\n\n n = pick_capacity(n);\n\n storage* data = new storage[n];\n\n for (size_t i = 0; i < m_size; i++) {\n new (&data[i]) T(std::move(*ptr(i)));\n ptr(i)->~T();\n }\n\n if (m_capacity > N)\n delete[] u.m_ptr;\n\n m_capacity = n;\n u.m_ptr = data;\n }\n\n const T* data() const { return ptr(0); }\n T* data() { return ptr(0); }\n\n void resize(size_t n) {\n reserve(n);\n\n for (size_t i = n; i < m_size; i++)\n ptr(i)->~T();\n\n for (size_t i = m_size; i < n; i++)\n new (ptr(i)) T();\n\n m_size = n;\n }\n\n void push_back(const T& object) {\n reserve(m_size + 1);\n new (ptr(m_size++)) T(object);\n }\n\n void push_back(T&& object) {\n reserve(m_size + 1);\n new (ptr(m_size++)) T(std::move(object));\n }\n\n template\n T& emplace_back(Args... args) {\n reserve(m_size + 1);\n return *(new (ptr(m_size++)) T(std::forward(args)...));\n }\n\n void erase(size_t idx) {\n ptr(idx)->~T();\n\n for (size_t i = idx; i < m_size - 1; i++) {\n new (ptr(i)) T(std::move(*ptr(i + 1)));\n ptr(i + 1)->~T();\n }\n }\n\n void pop_back() {\n ptr(--m_size)->~T();\n }\n\n void clear() {\n for (size_t i = 0; i < m_size; i++)\n ptr(i)->~T();\n\n m_size = 0;\n }\n\n bool empty() const {\n return m_size == 0;\n }\n\n T& operator [] (size_t idx) { return *ptr(idx); }\n const T& operator [] (size_t idx) const { return *ptr(idx); }\n\n T& front() { return *ptr(0); }\n const T& front() const { return *ptr(0); }\n\n T& back() { return *ptr(m_size - 1); }\n const T& back() const { return *ptr(m_size - 1); }\n\n private:\n\n size_t m_capacity = N;\n size_t m_size = 0;\n\n union {\n storage* m_ptr;\n storage m_data[N];\n } u;\n\n size_t pick_capacity(size_t n) {\n // Pick next largest power of two for the new capacity\n return size_t(1u) << ((sizeof(n) * 8u) - bit::lzcnt(n - 1));\n }\n\n T* ptr(size_t idx) {\n return m_capacity == N\n ? reinterpret_cast(&u.m_data[idx])\n : reinterpret_cast(&u.m_ptr[idx]);\n }\n\n const T* ptr(size_t idx) const {\n return m_capacity == N\n ? reinterpret_cast(&u.m_data[idx])\n : reinterpret_cast(&u.m_ptr[idx]);\n }\n\n };\n\n}\n"], ["/lsfg-vk/src/config/config.cpp", "#include \"config/config.hpp\"\n#include \"common/exception.hpp\"\n\n#include \"config/default_conf.hpp\"\n\n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace Config;\n\nnamespace {\n Configuration globalConf{};\n std::optional> gameConfs;\n}\n\nConfiguration Config::activeConf{};\n\nnamespace {\n /// Turn a string into a VkPresentModeKHR enum value.\n VkPresentModeKHR into_present(const std::string& mode) {\n if (mode == \"fifo\" || mode == \"vsync\")\n return VkPresentModeKHR::VK_PRESENT_MODE_FIFO_KHR;\n if (mode == \"mailbox\")\n return VkPresentModeKHR::VK_PRESENT_MODE_MAILBOX_KHR;\n if (mode == \"immediate\")\n return VkPresentModeKHR::VK_PRESENT_MODE_IMMEDIATE_KHR;\n return VkPresentModeKHR::VK_PRESENT_MODE_FIFO_KHR;\n }\n}\n\nvoid Config::updateConfig(const std::string& file) {\n if (!std::filesystem::exists(file)) {\n std::cerr << \"lsfg-vk: Placing default configuration file at \" << file << '\\n';\n const auto parent = std::filesystem::path(file).parent_path();\n if (!std::filesystem::exists(parent))\n if (!std::filesystem::create_directories(parent))\n throw std::runtime_error(\"Unable to create configuration directory at \" + parent.string());\n\n std::ofstream out(file);\n if (!out.is_open())\n throw std::runtime_error(\"Unable to create configuration file at \" + file);\n out << DEFAULT_CONFIG;\n out.close();\n }\n\n // parse config file\n std::optional parsed;\n try {\n parsed.emplace(toml::parse(file));\n if (!parsed->contains(\"version\"))\n throw std::runtime_error(\"Configuration file is missing 'version' field\");\n if (parsed->at(\"version\").as_integer() != 1)\n throw std::runtime_error(\"Configuration file version is not supported, expected 1\");\n } catch (const std::exception& e) {\n throw LSFG::rethrowable_error(\"Unable to parse configuration file\", e);\n }\n auto& toml = *parsed;\n\n // parse global configuration\n const toml::value globalTable = toml::find_or_default(toml, \"global\");\n const Configuration global{\n .dll = toml::find_or(globalTable, \"dll\", std::string()),\n .config_file = file,\n .timestamp = std::filesystem::last_write_time(file)\n };\n\n // validate global configuration\n if (global.multiplier < 2)\n throw std::runtime_error(\"Global Multiplier cannot be less than 2\");\n if (global.flowScale < 0.25F || global.flowScale > 1.0F)\n throw std::runtime_error(\"Flow scale must be between 0.25 and 1.0\");\n\n // parse game-specific configuration\n std::unordered_map games;\n const toml::value gamesList = toml::find_or_default(toml, \"game\");\n for (const auto& gameTable : gamesList.as_array()) {\n if (!gameTable.is_table())\n throw std::runtime_error(\"Invalid game configuration entry\");\n if (!gameTable.contains(\"exe\"))\n throw std::runtime_error(\"Game override missing 'exe' field\");\n\n const std::string exe = toml::find(gameTable, \"exe\");\n Configuration game{\n .enable = true,\n .dll = global.dll,\n .multiplier = toml::find_or(gameTable, \"multiplier\", 2U),\n .flowScale = toml::find_or(gameTable, \"flow_scale\", 1.0F),\n .performance = toml::find_or(gameTable, \"performance_mode\", false),\n .hdr = toml::find_or(gameTable, \"hdr_mode\", false),\n .e_present = into_present(toml::find_or(gameTable, \"experimental_present_mode\", \"\")),\n .config_file = file,\n .timestamp = global.timestamp\n };\n\n // validate the configuration\n if (game.multiplier < 1)\n throw std::runtime_error(\"Multiplier cannot be less than 1\");\n if (game.flowScale < 0.25F || game.flowScale > 1.0F)\n throw std::runtime_error(\"Flow scale must be between 0.25 and 1.0\");\n games[exe] = std::move(game);\n }\n\n // store configurations\n globalConf = global;\n gameConfs = std::move(games);\n}\n\nConfiguration Config::getConfig(const std::pair& name) {\n // process legacy environment variables\n if (std::getenv(\"LSFG_LEGACY\")) {\n Configuration conf{\n .enable = true,\n .multiplier = 2,\n .flowScale = 1.0F,\n .e_present = VkPresentModeKHR::VK_PRESENT_MODE_FIFO_KHR\n };\n\n const char* dll = std::getenv(\"LSFG_DLL_PATH\");\n if (dll) conf.dll = std::string(dll);\n const char* multiplier = std::getenv(\"LSFG_MULTIPLIER\");\n if (multiplier) conf.multiplier = std::stoul(multiplier);\n const char* flow_scale = std::getenv(\"LSFG_FLOW_SCALE\");\n if (flow_scale) conf.flowScale = std::stof(flow_scale);\n const char* performance = std::getenv(\"LSFG_PERFORMANCE_MODE\");\n if (performance) conf.performance = std::string(performance) == \"1\";\n const char* hdr = std::getenv(\"LSFG_HDR_MODE\");\n if (hdr) conf.hdr = std::string(hdr) == \"1\";\n const char* e_present = std::getenv(\"LSFG_EXPERIMENTAL_PRESENT_MODE\");\n if (e_present) conf.e_present = into_present(std::string(e_present));\n\n return conf;\n }\n\n // process new configuration system\n if (!gameConfs.has_value())\n return globalConf;\n\n const auto& games = *gameConfs;\n auto it = std::ranges::find_if(games, [&name](const auto& pair) {\n return name.first.ends_with(pair.first) || (name.second == pair.first);\n });\n if (it != games.end())\n return it->second;\n\n return globalConf;\n}\n"], ["/lsfg-vk/src/hooks.cpp", "#include \"hooks.hpp\"\n#include \"common/exception.hpp\"\n#include \"config/config.hpp\"\n#include \"utils/utils.hpp\"\n#include \"context.hpp\"\n#include \"layer.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace Hooks;\n\nnamespace {\n\n ///\n /// Add extensions to the instance create info.\n ///\n VkResult myvkCreateInstance(\n const VkInstanceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkInstance* pInstance) {\n auto extensions = Utils::addExtensions(\n pCreateInfo->ppEnabledExtensionNames,\n pCreateInfo->enabledExtensionCount,\n {\n \"VK_KHR_get_physical_device_properties2\",\n \"VK_KHR_external_memory_capabilities\",\n \"VK_KHR_external_semaphore_capabilities\"\n }\n );\n VkInstanceCreateInfo createInfo = *pCreateInfo;\n createInfo.enabledExtensionCount = static_cast(extensions.size());\n createInfo.ppEnabledExtensionNames = extensions.data();\n auto res = Layer::ovkCreateInstance(&createInfo, pAllocator, pInstance);\n if (res == VK_ERROR_EXTENSION_NOT_PRESENT)\n throw std::runtime_error(\n \"Required Vulkan instance extensions are not present.\"\n \"Your GPU driver is not supported.\");\n return res;\n }\n\n /// Map of devices to related information.\n std::unordered_map deviceToInfo;\n\n ///\n /// Add extensions to the device create info.\n /// (function pointers are not initialized yet)\n ///\n VkResult myvkCreateDevicePre(\n VkPhysicalDevice physicalDevice,\n const VkDeviceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkDevice* pDevice) {\n // add extensions\n auto extensions = Utils::addExtensions(\n pCreateInfo->ppEnabledExtensionNames,\n pCreateInfo->enabledExtensionCount,\n {\n \"VK_KHR_external_memory\",\n \"VK_KHR_external_memory_fd\",\n \"VK_KHR_external_semaphore\",\n \"VK_KHR_external_semaphore_fd\"\n }\n );\n VkDeviceCreateInfo createInfo = *pCreateInfo;\n createInfo.enabledExtensionCount = static_cast(extensions.size());\n createInfo.ppEnabledExtensionNames = extensions.data();\n auto res = Layer::ovkCreateDevice(physicalDevice, &createInfo, pAllocator, pDevice);\n if (res == VK_ERROR_EXTENSION_NOT_PRESENT)\n throw std::runtime_error(\n \"Required Vulkan device extensions are not present.\"\n \"Your GPU driver is not supported.\");\n return res;\n }\n\n ///\n /// Add related device information after the device is created.\n ///\n VkResult myvkCreateDevicePost(\n VkPhysicalDevice physicalDevice,\n VkDeviceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks*,\n VkDevice* pDevice) {\n deviceToInfo.emplace(*pDevice, DeviceInfo {\n .device = *pDevice,\n .physicalDevice = physicalDevice,\n .queue = Utils::findQueue(*pDevice, physicalDevice, pCreateInfo, VK_QUEUE_GRAPHICS_BIT)\n });\n return VK_SUCCESS;\n }\n\n /// Erase the device information when the device is destroyed.\n void myvkDestroyDevice(VkDevice device, const VkAllocationCallbacks* pAllocator) noexcept {\n deviceToInfo.erase(device);\n Layer::ovkDestroyDevice(device, pAllocator);\n }\n\n std::unordered_map swapchains;\n std::unordered_map swapchainToDeviceTable;\n std::unordered_map swapchainToPresent;\n\n ///\n /// Adjust swapchain creation parameters and create a swapchain context.\n ///\n VkResult myvkCreateSwapchainKHR(\n VkDevice device,\n const VkSwapchainCreateInfoKHR* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkSwapchainKHR* pSwapchain) noexcept {\n // find device\n auto it = deviceToInfo.find(device);\n if (it == deviceToInfo.end()) {\n Utils::logLimitN(\"swapMap\", 5, \"Device not found in map\");\n return Layer::ovkCreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);\n }\n Utils::resetLimitN(\"swapMap\");\n auto& deviceInfo = it->second;\n\n // increase amount of images in swapchain\n VkSwapchainCreateInfoKHR createInfo = *pCreateInfo;\n const auto maxImages = Utils::getMaxImageCount(\n deviceInfo.physicalDevice, pCreateInfo->surface);\n createInfo.minImageCount = createInfo.minImageCount + 1\n + static_cast(deviceInfo.queue.first);\n if (createInfo.minImageCount > maxImages) {\n createInfo.minImageCount = maxImages;\n Utils::logLimitN(\"swapCount\", 10,\n \"Requested image count (\" +\n std::to_string(pCreateInfo->minImageCount) + \") \"\n \"exceeds maximum allowed (\" +\n std::to_string(maxImages) + \"). \"\n \"Continuing with maximum allowed image count. \"\n \"This might lead to performance degradation.\");\n } else {\n Utils::resetLimitN(\"swapCount\");\n }\n\n // allow copy operations on swapchain images\n createInfo.imageUsage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;\n createInfo.imageUsage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;\n\n // enforce present mode\n createInfo.presentMode = Config::activeConf.e_present;\n\n // retire potential old swapchain\n if (pCreateInfo->oldSwapchain) {\n swapchains.erase(pCreateInfo->oldSwapchain);\n swapchainToDeviceTable.erase(pCreateInfo->oldSwapchain);\n }\n\n // create swapchain\n auto res = Layer::ovkCreateSwapchainKHR(device, &createInfo, pAllocator, pSwapchain);\n if (res != VK_SUCCESS)\n return res; // can't be caused by lsfg-vk (yet)\n\n try {\n swapchainToPresent.emplace(*pSwapchain, createInfo.presentMode);\n\n // get all swapchain images\n uint32_t imageCount{};\n res = Layer::ovkGetSwapchainImagesKHR(device, *pSwapchain, &imageCount, nullptr);\n if (res != VK_SUCCESS || imageCount == 0)\n throw LSFG::vulkan_error(res, \"Failed to get swapchain image count\");\n\n std::vector swapchainImages(imageCount);\n res = Layer::ovkGetSwapchainImagesKHR(device, *pSwapchain,\n &imageCount, swapchainImages.data());\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to get swapchain images\");\n\n // create swapchain context\n swapchainToDeviceTable.emplace(*pSwapchain, device);\n swapchains.emplace(*pSwapchain, LsContext(\n deviceInfo, *pSwapchain, pCreateInfo->imageExtent,\n swapchainImages\n ));\n\n std::cerr << \"lsfg-vk: Swapchain context \" <<\n (createInfo.oldSwapchain ? \"recreated\" : \"created\")\n << \" (using \" << imageCount << \" images).\\n\";\n\n Utils::resetLimitN(\"swapCtxCreate\");\n } catch (const std::exception& e) {\n Utils::logLimitN(\"swapCtxCreate\", 5,\n \"An error occurred while creating the swapchain wrapper:\\n\"\n \"- \" + std::string(e.what()));\n return VK_SUCCESS; // swapchain is still valid\n }\n return VK_SUCCESS;\n }\n\n ///\n /// Update presentation parameters and present the next frame(s).\n ///\n VkResult myvkQueuePresentKHR(\n VkQueue queue,\n const VkPresentInfoKHR* pPresentInfo) noexcept {\n // find swapchain device\n auto it = swapchainToDeviceTable.find(*pPresentInfo->pSwapchains);\n if (it == swapchainToDeviceTable.end()) {\n Utils::logLimitN(\"swapMap\", 5,\n \"Swapchain not found in map\");\n return Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n }\n\n // find device info\n auto it2 = deviceToInfo.find(it->second);\n if (it2 == deviceToInfo.end()) {\n Utils::logLimitN(\"swapMap\", 5,\n \"Device not found in map\");\n return Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n }\n auto& deviceInfo = it2->second;\n\n // find swapchain context\n auto it3 = swapchains.find(*pPresentInfo->pSwapchains);\n if (it3 == swapchains.end()) {\n Utils::logLimitN(\"swapMap\", 5,\n \"Swapchain context not found in map\");\n return Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n }\n auto& swapchain = it3->second;\n\n // find present mode\n auto it4 = swapchainToPresent.find(*pPresentInfo->pSwapchains);\n if (it4 == swapchainToPresent.end()) {\n Utils::logLimitN(\"swapMap\", 5,\n \"Swapchain present mode not found in map\");\n return Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n }\n auto& present = it4->second;\n\n // enforce present mode | NOLINTBEGIN\n #pragma clang diagnostic push\n #pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n const VkSwapchainPresentModeInfoEXT* presentModeInfo =\n reinterpret_cast(pPresentInfo->pNext);\n while (presentModeInfo) {\n if (presentModeInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_PRESENT_MODE_INFO_EXT) {\n for (size_t i = 0; i < presentModeInfo->swapchainCount; i++)\n const_cast(presentModeInfo->pPresentModes)[i] =\n present;\n }\n presentModeInfo =\n reinterpret_cast(presentModeInfo->pNext);\n }\n #pragma clang diagnostic pop\n\n // NOLINTEND | present the next frame\n VkResult res{}; // might return VK_SUBOPTIMAL_KHR\n try {\n // ensure config is valid\n auto& conf = Config::activeConf;\n if (!conf.config_file.empty()\n && (\n !std::filesystem::exists(conf.config_file)\n || conf.timestamp != std::filesystem::last_write_time(conf.config_file)\n )) {\n Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n return VK_ERROR_OUT_OF_DATE_KHR;\n }\n\n // ensure present mode is still valid\n if (present != conf.e_present) {\n Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n return VK_ERROR_OUT_OF_DATE_KHR;\n }\n\n // skip if disabled\n if (conf.multiplier <= 1)\n return Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n\n // present the swapchain\n std::vector semaphores(pPresentInfo->waitSemaphoreCount);\n std::copy_n(pPresentInfo->pWaitSemaphores, semaphores.size(), semaphores.data());\n\n res = swapchain.present(deviceInfo, pPresentInfo->pNext,\n queue, semaphores, *pPresentInfo->pImageIndices);\n\n Utils::resetLimitN(\"swapPresent\");\n } catch (const std::exception& e) {\n Utils::logLimitN(\"swapPresent\", 5,\n \"An error occurred while presenting the swapchain:\\n\"\n \"- \" + std::string(e.what()));\n return VK_ERROR_INITIALIZATION_FAILED;\n }\n return res;\n }\n\n /// Erase the swapchain context and mapping when the swapchain is destroyed.\n void myvkDestroySwapchainKHR(\n VkDevice device,\n VkSwapchainKHR swapchain,\n const VkAllocationCallbacks* pAllocator) noexcept {\n swapchains.erase(swapchain);\n swapchainToDeviceTable.erase(swapchain);\n swapchainToPresent.erase(swapchain);\n Layer::ovkDestroySwapchainKHR(device, swapchain, pAllocator);\n }\n}\n\nstd::unordered_map Hooks::hooks = {\n // instance hooks\n {\"vkCreateInstance\", reinterpret_cast(myvkCreateInstance)},\n\n // device hooks\n {\"vkCreateDevicePre\", reinterpret_cast(myvkCreateDevicePre)},\n {\"vkCreateDevicePost\", reinterpret_cast(myvkCreateDevicePost)},\n {\"vkDestroyDevice\", reinterpret_cast(myvkDestroyDevice)},\n\n // swapchain hooks\n {\"vkCreateSwapchainKHR\", reinterpret_cast(myvkCreateSwapchainKHR)},\n {\"vkQueuePresentKHR\", reinterpret_cast(myvkQueuePresentKHR)},\n {\"vkDestroySwapchainKHR\", reinterpret_cast(myvkDestroySwapchainKHR)}\n};\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/rc/util_rc_ptr.h", "#pragma once\n\n#include \n#include \n#include \n#include \n#include \n\nnamespace dxvk {\n\n /**\n * \\brief Pointer for reference-counted objects\n * \n * This only requires the given type to implement \\c incRef\n * and \\c decRef methods that adjust the reference count.\n * \\tparam T Object type\n */\n template\n class Rc {\n template\n friend class Rc;\n public:\n\n Rc() = default;\n Rc(std::nullptr_t) { }\n\n Rc(T* object)\n : m_object(object) {\n this->incRef();\n }\n\n Rc(const Rc& other)\n : m_object(other.m_object) {\n this->incRef();\n }\n\n template\n Rc(const Rc& other)\n : m_object(other.m_object) {\n this->incRef();\n }\n\n Rc(Rc&& other)\n : m_object(other.m_object) {\n other.m_object = nullptr;\n }\n\n template\n Rc(Rc&& other)\n : m_object(other.m_object) {\n other.m_object = nullptr;\n }\n\n Rc& operator = (std::nullptr_t) {\n this->decRef();\n m_object = nullptr;\n return *this;\n }\n\n Rc& operator = (const Rc& other) {\n other.incRef();\n this->decRef();\n m_object = other.m_object;\n return *this;\n }\n\n template\n Rc& operator = (const Rc& other) {\n other.incRef();\n this->decRef();\n m_object = other.m_object;\n return *this;\n }\n\n Rc& operator = (Rc&& other) {\n this->decRef();\n this->m_object = other.m_object;\n other.m_object = nullptr;\n return *this;\n }\n\n template\n Rc& operator = (Rc&& other) {\n this->decRef();\n this->m_object = other.m_object;\n other.m_object = nullptr;\n return *this;\n }\n\n ~Rc() {\n this->decRef();\n }\n\n T& operator * () const { return *m_object; }\n T* operator -> () const { return m_object; }\n T* ptr() const { return m_object; }\n\n template bool operator == (const Rc& other) const { return m_object == other.m_object; }\n template bool operator != (const Rc& other) const { return m_object != other.m_object; }\n\n template bool operator == (Tx* other) const { return m_object == other; }\n template bool operator != (Tx* other) const { return m_object != other; }\n\n bool operator == (std::nullptr_t) const { return m_object == nullptr; }\n bool operator != (std::nullptr_t) const { return m_object != nullptr; }\n \n explicit operator bool () const {\n return m_object != nullptr;\n }\n\n /**\n * \\brief Sets pointer without acquiring a reference\n *\n * Must only be use when a reference has been taken via\n * other means.\n * \\param [in] object Object pointer\n */\n void unsafeInsert(T* object) {\n this->decRef();\n m_object = object;\n }\n\n /**\n * \\brief Extracts raw pointer\n *\n * Sets the smart pointer to null without decrementing the\n * reference count. Must only be used when the reference\n * count is decremented in some other way.\n * \\returns Pointer to owned object\n */\n T* unsafeExtract() {\n return std::exchange(m_object, nullptr);\n }\n\n /**\n * \\brief Creates smart pointer without taking reference\n *\n * Must only be used when a refernece has been obtained via other means.\n * \\param [in] object Pointer to object to take ownership of\n */\n static Rc unsafeCreate(T* object) {\n return Rc(object, false);\n }\n\n private:\n\n T* m_object = nullptr;\n\n explicit Rc(T* object, bool)\n : m_object(object) { }\n\n force_inline void incRef() const {\n if (m_object != nullptr)\n m_object->incRef();\n }\n\n force_inline void decRef() const {\n if (m_object != nullptr) {\n if constexpr (std::is_void_vdecRef())>) {\n m_object->decRef();\n } else {\n // Deprecated, objects should manage themselves now.\n if (!m_object->decRef())\n delete m_object;\n }\n }\n }\n\n };\n\n template\n bool operator == (Tx* a, const Rc& b) { return b == a; }\n\n template\n bool operator != (Tx* a, const Rc& b) { return b != a; }\n\n struct RcHash {\n template\n size_t operator () (const Rc& rc) const {\n return reinterpret_cast(rc.ptr()) / sizeof(T);\n }\n };\n\n}\n\ntemplate\nstd::ostream& operator << (std::ostream& os, const dxvk::Rc& rc) {\n return os << rc.ptr();\n}\n"], ["/lsfg-vk/framegen/src/core/image.cpp", "#include \n#include \n\n#include \"core/image.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nImage::Image(const Core::Device& device, VkExtent2D extent, VkFormat format,\n VkImageUsageFlags usage, VkImageAspectFlags aspectFlags)\n : extent(extent), format(format), aspectFlags(aspectFlags) {\n // create image\n const VkImageCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,\n .imageType = VK_IMAGE_TYPE_2D,\n .format = format,\n .extent = {\n .width = extent.width,\n .height = extent.height,\n .depth = 1\n },\n .mipLevels = 1,\n .arrayLayers = 1,\n .samples = VK_SAMPLE_COUNT_1_BIT,\n .usage = usage,\n .sharingMode = VK_SHARING_MODE_EXCLUSIVE\n };\n VkImage imageHandle{};\n auto res = vkCreateImage(device.handle(), &desc, nullptr, &imageHandle);\n if (res != VK_SUCCESS || imageHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create Vulkan image\");\n\n // find memory type\n VkPhysicalDeviceMemoryProperties memProps;\n vkGetPhysicalDeviceMemoryProperties(device.getPhysicalDevice(), &memProps);\n\n VkMemoryRequirements memReqs;\n vkGetImageMemoryRequirements(device.handle(), imageHandle, &memReqs);\n\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n std::optional memType{};\n for (uint32_t i = 0; i < memProps.memoryTypeCount; ++i) {\n if ((memReqs.memoryTypeBits & (1 << i)) && // NOLINTBEGIN\n (memProps.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) {\n memType.emplace(i);\n break;\n } // NOLINTEND\n }\n if (!memType.has_value())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Unable to find memory type for image\");\n#pragma clang diagnostic pop\n\n // allocate and bind memory\n const VkMemoryAllocateInfo allocInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,\n .allocationSize = memReqs.size,\n .memoryTypeIndex = memType.value()\n };\n VkDeviceMemory memoryHandle{};\n res = vkAllocateMemory(device.handle(), &allocInfo, nullptr, &memoryHandle);\n if (res != VK_SUCCESS || memoryHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to allocate memory for Vulkan image\");\n\n res = vkBindImageMemory(device.handle(), imageHandle, memoryHandle, 0);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to bind memory to Vulkan image\");\n\n // create image view\n const VkImageViewCreateInfo viewDesc{\n .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,\n .image = imageHandle,\n .viewType = VK_IMAGE_VIEW_TYPE_2D,\n .format = format,\n .components = {\n .r = VK_COMPONENT_SWIZZLE_IDENTITY,\n .g = VK_COMPONENT_SWIZZLE_IDENTITY,\n .b = VK_COMPONENT_SWIZZLE_IDENTITY,\n .a = VK_COMPONENT_SWIZZLE_IDENTITY\n },\n .subresourceRange = {\n .aspectMask = aspectFlags,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n\n VkImageView viewHandle{};\n res = vkCreateImageView(device.handle(), &viewDesc, nullptr, &viewHandle);\n if (res != VK_SUCCESS || viewHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create image view\");\n\n // store objects in shared ptr\n this->layout = std::make_shared(VK_IMAGE_LAYOUT_UNDEFINED);\n this->image = std::shared_ptr(\n new VkImage(imageHandle),\n [dev = device.handle()](VkImage* img) {\n vkDestroyImage(dev, *img, nullptr);\n }\n );\n this->memory = std::shared_ptr(\n new VkDeviceMemory(memoryHandle),\n [dev = device.handle()](VkDeviceMemory* mem) {\n vkFreeMemory(dev, *mem, nullptr);\n }\n );\n this->view = std::shared_ptr(\n new VkImageView(viewHandle),\n [dev = device.handle()](VkImageView* imgView) {\n vkDestroyImageView(dev, *imgView, nullptr);\n }\n );\n}\n\n// shared memory constructor\n\nImage::Image(const Core::Device& device, VkExtent2D extent, VkFormat format,\n VkImageUsageFlags usage, VkImageAspectFlags aspectFlags, int fd)\n : extent(extent), format(format), aspectFlags(aspectFlags) {\n // create image\n const VkExternalMemoryImageCreateInfo externalInfo{\n .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,\n .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR\n };\n const VkImageCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,\n .pNext = &externalInfo,\n .imageType = VK_IMAGE_TYPE_2D,\n .format = format,\n .extent = {\n .width = extent.width,\n .height = extent.height,\n .depth = 1\n },\n .mipLevels = 1,\n .arrayLayers = 1,\n .samples = VK_SAMPLE_COUNT_1_BIT,\n .usage = usage,\n .sharingMode = VK_SHARING_MODE_EXCLUSIVE\n };\n VkImage imageHandle{};\n auto res = vkCreateImage(device.handle(), &desc, nullptr, &imageHandle);\n if (res != VK_SUCCESS || imageHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create Vulkan image\");\n\n // find memory type\n VkPhysicalDeviceMemoryProperties memProps;\n vkGetPhysicalDeviceMemoryProperties(device.getPhysicalDevice(), &memProps);\n\n VkMemoryRequirements memReqs;\n vkGetImageMemoryRequirements(device.handle(), imageHandle, &memReqs);\n\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n std::optional memType{};\n for (uint32_t i = 0; i < memProps.memoryTypeCount; ++i) {\n if ((memReqs.memoryTypeBits & (1 << i)) && // NOLINTBEGIN\n (memProps.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) {\n memType.emplace(i);\n break;\n } // NOLINTEND\n }\n if (!memType.has_value())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Unable to find memory type for image\");\n#pragma clang diagnostic pop\n\n // ~~allocate~~ and bind memory\n const VkMemoryDedicatedAllocateInfoKHR dedicatedInfo2{\n .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,\n .image = imageHandle,\n };\n const VkImportMemoryFdInfoKHR importInfo{\n .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR,\n .pNext = &dedicatedInfo2,\n .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR,\n .fd = fd // closes the fd\n };\n const VkMemoryAllocateInfo allocInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,\n .pNext = fd == -1 ? nullptr : &importInfo,\n .allocationSize = memReqs.size,\n .memoryTypeIndex = memType.value()\n };\n VkDeviceMemory memoryHandle{};\n res = vkAllocateMemory(device.handle(), &allocInfo, nullptr, &memoryHandle);\n if (res != VK_SUCCESS || memoryHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to allocate memory for Vulkan image\");\n\n res = vkBindImageMemory(device.handle(), imageHandle, memoryHandle, 0);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to bind memory to Vulkan image\");\n\n // create image view\n const VkImageViewCreateInfo viewDesc{\n .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,\n .image = imageHandle,\n .viewType = VK_IMAGE_VIEW_TYPE_2D,\n .format = format,\n .components = {\n .r = VK_COMPONENT_SWIZZLE_IDENTITY,\n .g = VK_COMPONENT_SWIZZLE_IDENTITY,\n .b = VK_COMPONENT_SWIZZLE_IDENTITY,\n .a = VK_COMPONENT_SWIZZLE_IDENTITY\n },\n .subresourceRange = {\n .aspectMask = aspectFlags,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n\n VkImageView viewHandle{};\n res = vkCreateImageView(device.handle(), &viewDesc, nullptr, &viewHandle);\n if (res != VK_SUCCESS || viewHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create image view\");\n\n // store objects in shared ptr\n this->layout = std::make_shared(VK_IMAGE_LAYOUT_UNDEFINED);\n this->image = std::shared_ptr(\n new VkImage(imageHandle),\n [dev = device.handle()](VkImage* img) {\n vkDestroyImage(dev, *img, nullptr);\n }\n );\n this->memory = std::shared_ptr(\n new VkDeviceMemory(memoryHandle),\n [dev = device.handle()](VkDeviceMemory* mem) {\n vkFreeMemory(dev, *mem, nullptr);\n }\n );\n this->view = std::shared_ptr(\n new VkImageView(viewHandle),\n [dev = device.handle()](VkImageView* imgView) {\n vkDestroyImageView(dev, *imgView, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/src/core/device.cpp", "#include \n#include \n\n#include \"core/device.hpp\"\n#include \"core/instance.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nconst std::vector requiredExtensions = {\n \"VK_KHR_external_memory_fd\",\n \"VK_KHR_external_semaphore_fd\",\n \"VK_EXT_robustness2\",\n};\n\nDevice::Device(const Instance& instance, uint64_t deviceUUID) {\n // get all physical devices\n uint32_t deviceCount{};\n auto res = vkEnumeratePhysicalDevices(instance.handle(), &deviceCount, nullptr);\n if (res != VK_SUCCESS || deviceCount == 0)\n throw LSFG::vulkan_error(res, \"Failed to enumerate physical devices\");\n\n std::vector devices(deviceCount);\n res = vkEnumeratePhysicalDevices(instance.handle(), &deviceCount, devices.data());\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to get physical devices\");\n\n // get device by uuid\n std::optional physicalDevice;\n for (const auto& device : devices) {\n VkPhysicalDeviceProperties properties;\n vkGetPhysicalDeviceProperties(device, &properties);\n\n const uint64_t uuid =\n static_cast(properties.vendorID) << 32 | properties.deviceID;\n if (deviceUUID == uuid || deviceUUID == 0x1463ABAC) {\n physicalDevice = device;\n break;\n }\n }\n if (!physicalDevice)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"Could not find physical device with UUID\");\n\n // find queue family indices\n uint32_t familyCount{};\n vkGetPhysicalDeviceQueueFamilyProperties(*physicalDevice, &familyCount, nullptr);\n\n std::vector queueFamilies(familyCount);\n vkGetPhysicalDeviceQueueFamilyProperties(*physicalDevice, &familyCount, queueFamilies.data());\n\n std::optional computeFamilyIdx;\n for (uint32_t i = 0; i < familyCount; ++i) {\n if (queueFamilies[i].queueFlags & VK_QUEUE_COMPUTE_BIT)\n computeFamilyIdx = i;\n }\n if (!computeFamilyIdx)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"No compute queue family found\");\n\n // create logical device\n const float queuePriority{1.0F}; // highest priority\n VkPhysicalDeviceRobustness2FeaturesEXT robustness2{\n .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT,\n .nullDescriptor = VK_TRUE,\n };\n VkPhysicalDeviceVulkan13Features features13{\n .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES,\n .pNext = &robustness2,\n .synchronization2 = VK_TRUE\n };\n const VkPhysicalDeviceVulkan12Features features12{\n .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES,\n .pNext = &features13,\n .timelineSemaphore = VK_TRUE,\n .vulkanMemoryModel = VK_TRUE\n };\n const VkDeviceQueueCreateInfo computeQueueDesc{\n .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,\n .queueFamilyIndex = *computeFamilyIdx,\n .queueCount = 1,\n .pQueuePriorities = &queuePriority\n };\n const VkDeviceCreateInfo deviceCreateInfo{\n .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,\n .pNext = &features12,\n .queueCreateInfoCount = 1,\n .pQueueCreateInfos = &computeQueueDesc,\n .enabledExtensionCount = static_cast(requiredExtensions.size()),\n .ppEnabledExtensionNames = requiredExtensions.data()\n };\n VkDevice deviceHandle{};\n res = vkCreateDevice(*physicalDevice, &deviceCreateInfo, nullptr, &deviceHandle);\n if (res != VK_SUCCESS | deviceHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create logical device\");\n\n volkLoadDevice(deviceHandle);\n\n // get compute queue\n VkQueue queueHandle{};\n vkGetDeviceQueue(deviceHandle, *computeFamilyIdx, 0, &queueHandle);\n\n // store in shared ptr\n this->computeQueue = queueHandle;\n this->computeFamilyIdx = *computeFamilyIdx;\n this->physicalDevice = *physicalDevice;\n this->device = std::shared_ptr(\n new VkDevice(deviceHandle),\n [](VkDevice* device) {\n vkDestroyDevice(*device, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/src/buffer.cpp", "/*\nThe MIT License (MIT)\n\nCopyright (c) 2013 Andrew Ruef\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n*/\n\n#include \n#include \n\n// keep this header above \"windows.h\" because it contains many types\n#include \n\n#ifdef _WIN32\n\n#define WIN32_LEAN_AND_MEAN\n#define VC_EXTRALEAN\n\n#include \n#include \n#else\n#include \n#include \n#include \n#include \n#endif\n\nnamespace {\n\ninline std::uint16_t byteSwapUint16(std::uint16_t val) {\n#if defined(_MSC_VER) || defined(_MSC_FULL_VER)\n return _byteswap_ushort(val);\n#else\n return __builtin_bswap16(val);\n#endif\n}\n\ninline std::uint32_t byteSwapUint32(std::uint32_t val) {\n#if defined(_MSC_VER) || defined(_MSC_FULL_VER)\n return _byteswap_ulong(val);\n#else\n return __builtin_bswap32(val);\n#endif\n}\n\ninline uint64_t byteSwapUint64(std::uint64_t val) {\n#if defined(_MSC_VER) || defined(_MSC_FULL_VER)\n return _byteswap_uint64(val);\n#else\n return __builtin_bswap64(val);\n#endif\n}\n\n} // anonymous namespace\n\nnamespace peparse {\n\nextern std::uint32_t err;\nextern std::string err_loc;\n\nstruct buffer_detail {\n#ifdef _WIN32\n HANDLE file;\n HANDLE sec;\n#else\n int fd;\n#endif\n};\n\nbool readByte(bounded_buffer *b, std::uint32_t offset, std::uint8_t &out) {\n if (b == nullptr) {\n PE_ERR(PEERR_BUFFER);\n return false;\n }\n\n if (offset >= b->bufLen) {\n PE_ERR(PEERR_ADDRESS);\n return false;\n }\n\n std::uint8_t *tmp = (b->buf + offset);\n out = *tmp;\n\n return true;\n}\n\nbool readWord(bounded_buffer *b, std::uint32_t offset, std::uint16_t &out) {\n if (b == nullptr) {\n PE_ERR(PEERR_BUFFER);\n return false;\n }\n\n if (static_cast(offset) + 1 >= b->bufLen) {\n PE_ERR(PEERR_ADDRESS);\n return false;\n }\n\n std::uint16_t tmp;\n memcpy(&tmp, (b->buf + offset), sizeof(std::uint16_t));\n if (b->swapBytes) {\n out = byteSwapUint16(tmp);\n } else {\n out = tmp;\n }\n\n return true;\n}\n\nbool readDword(bounded_buffer *b, std::uint32_t offset, std::uint32_t &out) {\n if (b == nullptr) {\n PE_ERR(PEERR_BUFFER);\n return false;\n }\n\n if (static_cast(offset) + 3 >= b->bufLen) {\n PE_ERR(PEERR_ADDRESS);\n return false;\n }\n\n std::uint32_t tmp;\n memcpy(&tmp, (b->buf + offset), sizeof(std::uint32_t));\n if (b->swapBytes) {\n out = byteSwapUint32(tmp);\n } else {\n out = tmp;\n }\n\n return true;\n}\n\nbool readQword(bounded_buffer *b, std::uint32_t offset, std::uint64_t &out) {\n if (b == nullptr) {\n PE_ERR(PEERR_BUFFER);\n return false;\n }\n\n if (static_cast(offset) + 7 >= b->bufLen) {\n PE_ERR(PEERR_ADDRESS);\n return false;\n }\n\n std::uint64_t tmp;\n memcpy(&tmp, (b->buf + offset), sizeof(std::uint64_t));\n if (b->swapBytes) {\n out = byteSwapUint64(tmp);\n } else {\n out = tmp;\n }\n\n return true;\n}\n\nbool readChar16(bounded_buffer *b, std::uint32_t offset, char16_t &out) {\n if (b == nullptr) {\n PE_ERR(PEERR_BUFFER);\n return false;\n }\n\n if (static_cast(offset) + 1 >= b->bufLen) {\n PE_ERR(PEERR_ADDRESS);\n return false;\n }\n\n char16_t tmp;\n if (b->swapBytes) {\n std::uint8_t tmpBuf[2];\n tmpBuf[0] = *(b->buf + offset + 1);\n tmpBuf[1] = *(b->buf + offset);\n memcpy(&tmp, tmpBuf, sizeof(std::uint16_t));\n } else {\n memcpy(&tmp, (b->buf + offset), sizeof(std::uint16_t));\n }\n out = tmp;\n\n return true;\n}\n\nbounded_buffer *readFileToFileBuffer(const char *filePath) {\n#ifdef _WIN32\n HANDLE h = CreateFileA(filePath,\n GENERIC_READ,\n FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,\n nullptr,\n OPEN_EXISTING,\n FILE_ATTRIBUTE_NORMAL,\n nullptr);\n if (h == INVALID_HANDLE_VALUE) {\n PE_ERR(PEERR_OPEN);\n return nullptr;\n }\n\n DWORD fileSize = GetFileSize(h, nullptr);\n\n if (fileSize == INVALID_FILE_SIZE) {\n CloseHandle(h);\n PE_ERR(PEERR_OPEN);\n return nullptr;\n }\n\n#else\n // only where we have mmap / open / etc\n int fd = open(filePath, O_RDONLY);\n\n if (fd == -1) {\n PE_ERR(PEERR_OPEN);\n return nullptr;\n }\n#endif\n\n // make a buffer object\n bounded_buffer *p = new (std::nothrow) bounded_buffer();\n if (p == nullptr) {\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n memset(p, 0, sizeof(bounded_buffer));\n buffer_detail *d = new (std::nothrow) buffer_detail();\n\n if (d == nullptr) {\n delete p;\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n memset(d, 0, sizeof(buffer_detail));\n p->detail = d;\n\n// only where we have mmap / open / etc\n#ifdef _WIN32\n p->detail->file = h;\n\n HANDLE hMap = CreateFileMapping(h, nullptr, PAGE_READONLY, 0, 0, nullptr);\n\n if (hMap == nullptr) {\n CloseHandle(h);\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n p->detail->sec = hMap;\n\n LPVOID ptr = MapViewOfFile(hMap, FILE_MAP_READ, 0, 0, 0);\n\n if (ptr == nullptr) {\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n p->buf = reinterpret_cast(ptr);\n p->bufLen = fileSize;\n#else\n p->detail->fd = fd;\n\n struct stat s;\n memset(&s, 0, sizeof(struct stat));\n\n if (fstat(fd, &s) != 0) {\n close(fd);\n delete d;\n delete p;\n PE_ERR(PEERR_STAT);\n return nullptr;\n }\n\n void *maddr = mmap(nullptr,\n static_cast(s.st_size),\n PROT_READ,\n MAP_SHARED,\n fd,\n 0);\n\n if (maddr == MAP_FAILED) {\n close(fd);\n delete d;\n delete p;\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n p->buf = reinterpret_cast(maddr);\n p->bufLen = static_cast(s.st_size);\n#endif\n p->copy = false;\n p->swapBytes = false;\n\n return p;\n}\n\nbounded_buffer *makeBufferFromPointer(std::uint8_t *data, std::uint32_t sz) {\n if (data == nullptr) {\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n bounded_buffer *p = new (std::nothrow) bounded_buffer();\n\n if (p == nullptr) {\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n p->copy = true;\n p->detail = nullptr;\n p->buf = data;\n p->bufLen = sz;\n p->swapBytes = false;\n\n return p;\n}\n\n// split buffer inclusively from from to to by offset\nbounded_buffer *\nsplitBuffer(bounded_buffer *b, std::uint32_t from, std::uint32_t to) {\n if (b == nullptr) {\n return nullptr;\n }\n\n // safety checks\n if (to < from || to > b->bufLen) {\n return nullptr;\n }\n\n // make a new buffer\n auto newBuff = new (std::nothrow) bounded_buffer();\n if (newBuff == nullptr) {\n return nullptr;\n }\n\n newBuff->copy = true;\n newBuff->buf = b->buf + from;\n newBuff->bufLen = (to - from);\n\n return newBuff;\n}\n\nvoid deleteBuffer(bounded_buffer *b) {\n if (b == nullptr) {\n return;\n }\n\n if (!b->copy) {\n#ifdef _WIN32\n UnmapViewOfFile(b->buf);\n CloseHandle(b->detail->sec);\n CloseHandle(b->detail->file);\n#else\n munmap(b->buf, b->bufLen);\n close(b->detail->fd);\n#endif\n }\n\n delete b->detail;\n delete b;\n}\n\nstd::uint64_t bufLen(bounded_buffer *b) {\n return b->bufLen;\n}\n} // namespace peparse\n"], ["/lsfg-vk/src/extract/extract.cpp", "#include \"extract/extract.hpp\"\n#include \"config/config.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace Extract;\n\nconst std::unordered_map nameIdxTable = {{\n { \"mipmaps\", 255 },\n { \"alpha[0]\", 267 },\n { \"alpha[1]\", 268 },\n { \"alpha[2]\", 269 },\n { \"alpha[3]\", 270 },\n { \"beta[0]\", 275 },\n { \"beta[1]\", 276 },\n { \"beta[2]\", 277 },\n { \"beta[3]\", 278 },\n { \"beta[4]\", 279 },\n { \"gamma[0]\", 257 },\n { \"gamma[1]\", 259 },\n { \"gamma[2]\", 260 },\n { \"gamma[3]\", 261 },\n { \"gamma[4]\", 262 },\n { \"delta[0]\", 257 },\n { \"delta[1]\", 263 },\n { \"delta[2]\", 264 },\n { \"delta[3]\", 265 },\n { \"delta[4]\", 266 },\n { \"delta[5]\", 258 },\n { \"delta[6]\", 271 },\n { \"delta[7]\", 272 },\n { \"delta[8]\", 273 },\n { \"delta[9]\", 274 },\n { \"generate\", 256 },\n { \"p_mipmaps\", 255 },\n { \"p_alpha[0]\", 290 },\n { \"p_alpha[1]\", 291 },\n { \"p_alpha[2]\", 292 },\n { \"p_alpha[3]\", 293 },\n { \"p_beta[0]\", 298 },\n { \"p_beta[1]\", 299 },\n { \"p_beta[2]\", 300 },\n { \"p_beta[3]\", 301 },\n { \"p_beta[4]\", 302 },\n { \"p_gamma[0]\", 280 },\n { \"p_gamma[1]\", 282 },\n { \"p_gamma[2]\", 283 },\n { \"p_gamma[3]\", 284 },\n { \"p_gamma[4]\", 285 },\n { \"p_delta[0]\", 280 },\n { \"p_delta[1]\", 286 },\n { \"p_delta[2]\", 287 },\n { \"p_delta[3]\", 288 },\n { \"p_delta[4]\", 289 },\n { \"p_delta[5]\", 281 },\n { \"p_delta[6]\", 294 },\n { \"p_delta[7]\", 295 },\n { \"p_delta[8]\", 296 },\n { \"p_delta[9]\", 297 },\n { \"p_generate\", 256 },\n}};\n\nnamespace {\n auto& shaders() {\n static std::unordered_map> shaderData;\n return shaderData;\n }\n\n int on_resource(void*, const peparse::resource& res) {\n if (res.type != peparse::RT_RCDATA || res.buf == nullptr || res.buf->bufLen <= 0)\n return 0;\n std::vector resource_data(res.buf->bufLen);\n std::copy_n(res.buf->buf, res.buf->bufLen, resource_data.data());\n shaders()[res.name] = resource_data;\n return 0;\n }\n\n const std::vector PATHS{{\n \".local/share/Steam/steamapps/common\",\n \".steam/steam/steamapps/common\",\n \".steam/debian-installation/steamapps/common\",\n \".var/app/com.valvesoftware.Steam/.local/share/Steam/steamapps/common\",\n \"snap/steam/common/.local/share/Steam/steamapps/common\"\n }};\n\n std::string getDllPath() {\n // overriden path\n std::string dllPath = Config::activeConf.dll;\n if (!dllPath.empty())\n return dllPath;\n // home based paths\n const char* home = getenv(\"HOME\");\n const std::string homeStr = home ? home : \"\";\n for (const auto& base : PATHS) {\n const std::filesystem::path path =\n std::filesystem::path(homeStr) / base / \"Lossless Scaling\" / \"Lossless.dll\";\n if (std::filesystem::exists(path))\n return path.string();\n }\n // xdg home\n const char* dataDir = getenv(\"XDG_DATA_HOME\");\n if (dataDir && *dataDir != '\\0')\n return std::string(dataDir) + \"/Steam/steamapps/common/Lossless Scaling/Lossless.dll\";\n // final fallback\n return \"Lossless.dll\";\n }\n}\n\nvoid Extract::extractShaders() {\n if (!shaders().empty())\n return;\n\n // parse the dll\n peparse::parsed_pe* dll = peparse::ParsePEFromFile(getDllPath().c_str());\n if (!dll)\n throw std::runtime_error(\"Unable to read Lossless.dll, is it installed?\");\n peparse::IterRsrc(dll, on_resource, nullptr);\n peparse::DestructParsedPE(dll);\n\n // ensure all shaders are present\n for (const auto& [name, idx] : nameIdxTable)\n if (shaders().find(idx) == shaders().end())\n throw std::runtime_error(\"Shader not found: \" + name + \".\\n- Is Lossless Scaling up to date?\");\n}\n\nstd::vector Extract::getShader(const std::string& name) {\n if (shaders().empty())\n throw std::runtime_error(\"Shaders are not loaded.\");\n\n auto hit = nameIdxTable.find(name);\n if (hit == nameIdxTable.end())\n throw std::runtime_error(\"Shader hash not found: \" + name);\n\n auto sit = shaders().find(hit->second);\n if (sit == shaders().end())\n throw std::runtime_error(\"Shader not found: \" + name);\n\n return sit->second;\n}\n"], ["/lsfg-vk/framegen/src/core/commandbuffer.cpp", "#include \n#include \n\n#include \"core/commandbuffer.hpp\"\n#include \"core/device.hpp\"\n#include \"core/commandpool.hpp\"\n#include \"core/fence.hpp\"\n#include \"core/semaphore.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nCommandBuffer::CommandBuffer(const Core::Device& device, const CommandPool& pool) {\n // create command buffer\n const VkCommandBufferAllocateInfo desc{\n .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,\n .commandPool = pool.handle(),\n .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,\n .commandBufferCount = 1\n };\n VkCommandBuffer commandBufferHandle{};\n auto res = vkAllocateCommandBuffers(device.handle(), &desc, &commandBufferHandle);\n if (res != VK_SUCCESS || commandBufferHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to allocate command buffer\");\n\n // store command buffer in shared ptr\n this->state = std::make_shared(CommandBufferState::Empty);\n this->commandBuffer = std::shared_ptr(\n new VkCommandBuffer(commandBufferHandle),\n [dev = device.handle(), pool = pool.handle()](VkCommandBuffer* cmdBuffer) {\n vkFreeCommandBuffers(dev, pool, 1, cmdBuffer);\n }\n );\n}\n\nvoid CommandBuffer::begin() {\n if (*this->state != CommandBufferState::Empty)\n throw std::logic_error(\"Command buffer is not in Empty state\");\n\n const VkCommandBufferBeginInfo beginInfo = {\n .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,\n .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT\n };\n auto res = vkBeginCommandBuffer(*this->commandBuffer, &beginInfo);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to begin command buffer\");\n\n *this->state = CommandBufferState::Recording;\n}\n\nvoid CommandBuffer::dispatch(uint32_t x, uint32_t y, uint32_t z) const {\n if (*this->state != CommandBufferState::Recording)\n throw std::logic_error(\"Command buffer is not in Recording state\");\n\n vkCmdDispatch(*this->commandBuffer, x, y, z);\n}\n\nvoid CommandBuffer::end() {\n if (*this->state != CommandBufferState::Recording)\n throw std::logic_error(\"Command buffer is not in Recording state\");\n\n auto res = vkEndCommandBuffer(*this->commandBuffer);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to end command buffer\");\n\n *this->state = CommandBufferState::Full;\n}\n\nvoid CommandBuffer::submit(VkQueue queue, std::optional fence,\n const std::vector& waitSemaphores,\n std::optional> waitSemaphoreValues,\n const std::vector& signalSemaphores,\n std::optional> signalSemaphoreValues) {\n if (*this->state != CommandBufferState::Full)\n throw std::logic_error(\"Command buffer is not in Full state\");\n\n const std::vector waitStages(waitSemaphores.size(),\n VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);\n VkTimelineSemaphoreSubmitInfo timelineInfo{\n .sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO,\n };\n if (waitSemaphoreValues.has_value()) {\n timelineInfo.waitSemaphoreValueCount =\n static_cast(waitSemaphoreValues->size());\n timelineInfo.pWaitSemaphoreValues = waitSemaphoreValues->data();\n }\n if (signalSemaphoreValues.has_value()) {\n timelineInfo.signalSemaphoreValueCount =\n static_cast(signalSemaphoreValues->size());\n timelineInfo.pSignalSemaphoreValues = signalSemaphoreValues->data();\n }\n\n std::vector waitSemaphoresHandles;\n waitSemaphoresHandles.reserve(waitSemaphores.size());\n for (const auto& semaphore : waitSemaphores)\n waitSemaphoresHandles.push_back(semaphore.handle());\n std::vector signalSemaphoresHandles;\n signalSemaphoresHandles.reserve(signalSemaphores.size());\n for (const auto& semaphore : signalSemaphores)\n signalSemaphoresHandles.push_back(semaphore.handle());\n\n const VkSubmitInfo submitInfo{\n .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,\n .pNext = (waitSemaphoreValues.has_value() || signalSemaphoreValues.has_value())\n ? &timelineInfo : nullptr,\n .waitSemaphoreCount = static_cast(waitSemaphores.size()),\n .pWaitSemaphores = waitSemaphoresHandles.data(),\n .pWaitDstStageMask = waitStages.data(),\n .commandBufferCount = 1,\n .pCommandBuffers = &(*this->commandBuffer),\n .signalSemaphoreCount = static_cast(signalSemaphores.size()),\n .pSignalSemaphores = signalSemaphoresHandles.data()\n };\n auto res = vkQueueSubmit(queue, 1, &submitInfo, fence ? fence->handle() : VK_NULL_HANDLE);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to submit command buffer\");\n\n *this->state = CommandBufferState::Submitted;\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/context.cpp", "#include \n#include \n\n#include \"v3_1p/context.hpp\"\n#include \"common/utils.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG_3_1P;\n\nContext::Context(Vulkan& vk,\n int in0, int in1, const std::vector& outN,\n VkExtent2D extent, VkFormat format) {\n // import input images\n this->inImg_0 = Core::Image(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, in0);\n this->inImg_1 = Core::Image(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, in1);\n\n // prepare render data\n for (size_t i = 0; i < 8; i++) {\n auto& data = this->data.at(i);\n data.internalSemaphores.resize(vk.generationCount);\n data.outSemaphores.resize(vk.generationCount);\n data.completionFences.resize(vk.generationCount);\n data.cmdBuffers2.resize(vk.generationCount);\n }\n\n // create shader chains\n this->mipmaps = Shaders::Mipmaps(vk, this->inImg_0, this->inImg_1);\n for (size_t i = 0; i < 7; i++)\n this->alpha.at(i) = Shaders::Alpha(vk, this->mipmaps.getOutImages().at(i));\n this->beta = Shaders::Beta(vk, this->alpha.at(0).getOutImages());\n for (size_t i = 0; i < 7; i++) {\n this->gamma.at(i) = Shaders::Gamma(vk,\n this->alpha.at(6 - i).getOutImages(),\n this->beta.getOutImages().at(std::min(6 - i, 5)),\n (i == 0) ? std::nullopt : std::make_optional(this->gamma.at(i - 1).getOutImage()));\n if (i < 4) continue;\n\n this->delta.at(i - 4) = Shaders::Delta(vk,\n this->alpha.at(6 - i).getOutImages(),\n this->beta.getOutImages().at(6 - i),\n (i == 4) ? std::nullopt : std::make_optional(this->gamma.at(i - 1).getOutImage()),\n (i == 4) ? std::nullopt : std::make_optional(this->delta.at(i - 5).getOutImage1()));\n }\n this->generate = Shaders::Generate(vk,\n this->inImg_0, this->inImg_1,\n this->gamma.at(6).getOutImage(),\n this->delta.at(2).getOutImage1(),\n this->delta.at(2).getOutImage2(),\n outN, format);\n}\n\nvoid Context::present(Vulkan& vk,\n int inSem, const std::vector& outSem) {\n auto& data = this->data.at(this->frameIdx % 8);\n\n // 3. wait for completion of previous frame in this slot\n if (data.shouldWait)\n for (auto& fence : data.completionFences)\n if (!fence.wait(vk.device, UINT64_MAX))\n throw LSFG::vulkan_error(VK_TIMEOUT, \"Fence wait timed out\");\n data.shouldWait = true;\n\n // 1. create mipmaps and process input image\n if (inSem >= 0) data.inSemaphore = Core::Semaphore(vk.device, inSem);\n for (size_t i = 0; i < vk.generationCount; i++)\n data.internalSemaphores.at(i) = Core::Semaphore(vk.device);\n\n data.cmdBuffer1 = Core::CommandBuffer(vk.device, vk.commandPool);\n data.cmdBuffer1.begin();\n\n this->mipmaps.Dispatch(data.cmdBuffer1, this->frameIdx);\n for (size_t i = 0; i < 7; i++)\n this->alpha.at(6 - i).Dispatch(data.cmdBuffer1, this->frameIdx);\n this->beta.Dispatch(data.cmdBuffer1, this->frameIdx);\n\n data.cmdBuffer1.end();\n std::vector waits = { data.inSemaphore };\n if (inSem < 0) waits.clear();\n data.cmdBuffer1.submit(vk.device.getComputeQueue(), std::nullopt,\n waits, std::nullopt,\n data.internalSemaphores, std::nullopt);\n\n // 2. generate intermediary frames\n for (size_t pass = 0; pass < vk.generationCount; pass++) {\n auto& internalSemaphore = data.internalSemaphores.at(pass);\n auto& outSemaphore = data.outSemaphores.at(pass);\n if (inSem >= 0) outSemaphore = Core::Semaphore(vk.device, outSem.empty() ? -1 : outSem.at(pass));\n auto& completionFence = data.completionFences.at(pass);\n completionFence = Core::Fence(vk.device);\n\n auto& buf2 = data.cmdBuffers2.at(pass);\n buf2 = Core::CommandBuffer(vk.device, vk.commandPool);\n buf2.begin();\n\n for (size_t i = 0; i < 7; i++) {\n this->gamma.at(i).Dispatch(buf2, this->frameIdx, pass);\n if (i >= 4)\n this->delta.at(i - 4).Dispatch(buf2, this->frameIdx, pass, i == 6);\n }\n this->generate.Dispatch(buf2, this->frameIdx, pass);\n\n buf2.end();\n std::vector signals = { outSemaphore };\n if (inSem < 0) signals.clear();\n buf2.submit(vk.device.getComputeQueue(), completionFence,\n { internalSemaphore }, std::nullopt,\n signals, std::nullopt);\n }\n\n this->frameIdx++;\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_common.h", "class DxbcProgramType {\n public:\n VkShaderStageFlagBits shaderStage() const {\n switch (m_type) {\n case DxbcProgramType::PixelShader : return VK_SHADER_STAGE_FRAGMENT_BIT;\n case DxbcProgramType::VertexShader : return VK_SHADER_STAGE_VERTEX_BIT;\n case DxbcProgramType::GeometryShader : return VK_SHADER_STAGE_GEOMETRY_BIT;\n case DxbcProgramType::HullShader : return VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;\n case DxbcProgramType::DomainShader : return VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;\n case DxbcProgramType::ComputeShader : return VK_SHADER_STAGE_COMPUTE_BIT;\n default: throw DxvkError(\"DxbcProgramInfo::shaderStage: Unsupported program type\");\n }\n }\n spv::ExecutionModel executionModel() const {\n switch (m_type) {\n case DxbcProgramType::PixelShader : return spv::ExecutionModelFragment;\n case DxbcProgramType::VertexShader : return spv::ExecutionModelVertex;\n case DxbcProgramType::GeometryShader : return spv::ExecutionModelGeometry;\n case DxbcProgramType::HullShader : return spv::ExecutionModelTessellationControl;\n case DxbcProgramType::DomainShader : return spv::ExecutionModelTessellationEvaluation;\n case DxbcProgramType::ComputeShader : return spv::ExecutionModelGLCompute;\n default: throw DxvkError(\"DxbcProgramInfo::executionModel: Unsupported program type\");\n }\n }\n private:\n DxbcProgramType m_type = DxbcProgramType::PixelShader;\n};"], ["/lsfg-vk/framegen/src/common/utils.cpp", "#include \n#include \n\n#include \"common/utils.hpp\"\n#include \"core/buffer.hpp\"\n#include \"core/image.hpp\"\n#include \"core/device.hpp\"\n#include \"core/commandpool.hpp\"\n#include \"core/fence.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG::Utils;\n\nBarrierBuilder& BarrierBuilder::addR2W(Core::Image& image) {\n this->barriers.emplace_back(VkImageMemoryBarrier2 {\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,\n .srcStageMask = VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,\n .srcAccessMask = VK_ACCESS_2_SHADER_READ_BIT,\n .dstStageMask = VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,\n .dstAccessMask = VK_ACCESS_2_SHADER_WRITE_BIT,\n .oldLayout = image.getLayout(),\n .newLayout = VK_IMAGE_LAYOUT_GENERAL,\n .image = image.handle(),\n .subresourceRange = {\n .aspectMask = image.getAspectFlags(),\n .levelCount = 1,\n .layerCount = 1\n }\n });\n image.setLayout(VK_IMAGE_LAYOUT_GENERAL);\n\n return *this;\n}\n\nBarrierBuilder& BarrierBuilder::addW2R(Core::Image& image) {\n this->barriers.emplace_back(VkImageMemoryBarrier2 {\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,\n .srcStageMask = VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,\n .srcAccessMask = VK_ACCESS_2_SHADER_WRITE_BIT,\n .dstStageMask = VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,\n .dstAccessMask = VK_ACCESS_2_SHADER_READ_BIT,\n .oldLayout = image.getLayout(),\n .newLayout = VK_IMAGE_LAYOUT_GENERAL,\n .image = image.handle(),\n .subresourceRange = {\n .aspectMask = image.getAspectFlags(),\n .levelCount = 1,\n .layerCount = 1\n }\n });\n image.setLayout(VK_IMAGE_LAYOUT_GENERAL);\n\n return *this;\n}\n\nvoid BarrierBuilder::build() const {\n const VkDependencyInfo dependencyInfo = {\n .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,\n .imageMemoryBarrierCount = static_cast(this->barriers.size()),\n .pImageMemoryBarriers = this->barriers.data()\n };\n vkCmdPipelineBarrier2(this->commandBuffer->handle(), &dependencyInfo);\n}\n\nvoid Utils::uploadImage(const Core::Device& device, const Core::CommandPool& commandPool,\n Core::Image& image, const std::string& path) {\n // read image bytecode\n std::ifstream file(path.data(), std::ios::binary | std::ios::ate);\n if (!file.is_open())\n throw std::system_error(errno, std::generic_category(), \"Failed to open image: \" + path);\n\n std::streamsize size = file.tellg();\n size -= 124 + 4; // dds header and magic bytes\n std::vector code(static_cast(size));\n\n file.seekg(124 + 4, std::ios::beg);\n if (!file.read(code.data(), size))\n throw std::system_error(errno, std::generic_category(), \"Failed to read image: \" + path);\n\n file.close();\n\n // copy data to buffer\n const Core::Buffer stagingBuffer(\n device, code.data(), static_cast(code.size()),\n VK_BUFFER_USAGE_TRANSFER_SRC_BIT\n );\n\n // perform the upload\n Core::CommandBuffer commandBuffer(device, commandPool);\n commandBuffer.begin();\n\n const VkImageMemoryBarrier barrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,\n .srcAccessMask = VK_ACCESS_NONE,\n .dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,\n .oldLayout = image.getLayout(),\n .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n .image = image.handle(),\n .subresourceRange = {\n .aspectMask = image.getAspectFlags(),\n .levelCount = 1,\n .layerCount = 1\n }\n };\n image.setLayout(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);\n vkCmdPipelineBarrier(\n commandBuffer.handle(),\n VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,\n 0, 0, nullptr, 0, nullptr, 1, &barrier\n );\n\n auto extent = image.getExtent();\n const VkBufferImageCopy region{\n .bufferImageHeight = 0,\n .imageSubresource = {\n .aspectMask = image.getAspectFlags(),\n .layerCount = 1\n },\n .imageExtent = { extent.width, extent.height, 1 }\n };\n vkCmdCopyBufferToImage(\n commandBuffer.handle(),\n stagingBuffer.handle(), image.handle(),\n VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion\n );\n\n commandBuffer.end();\n\n Core::Fence fence(device);\n commandBuffer.submit(device.getComputeQueue(), fence);\n\n // wait for the upload to complete\n if (!fence.wait(device))\n throw LSFG::vulkan_error(VK_TIMEOUT, \"Upload operation timed out\");\n}\n\nvoid Utils::clearImage(const Core::Device& device, Core::Image& image, bool white) {\n Core::Fence fence(device);\n const Core::CommandPool cmdPool(device);\n Core::CommandBuffer cmdBuf(device, cmdPool);\n cmdBuf.begin();\n\n const VkImageMemoryBarrier2 barrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,\n .dstStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT,\n .dstAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,\n .oldLayout = image.getLayout(),\n .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n .image = image.handle(),\n .subresourceRange = {\n .aspectMask = image.getAspectFlags(),\n .levelCount = 1,\n .layerCount = 1\n }\n };\n const VkDependencyInfo dependencyInfo = {\n .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,\n .imageMemoryBarrierCount = 1,\n .pImageMemoryBarriers = &barrier\n };\n image.setLayout(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);\n vkCmdPipelineBarrier2(cmdBuf.handle(), &dependencyInfo);\n\n const float clearValue = white ? 1.0F : 0.0F;\n const VkClearColorValue clearColor = {{ clearValue, clearValue, clearValue, clearValue }};\n const VkImageSubresourceRange subresourceRange = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .levelCount = 1,\n .layerCount = 1\n };\n vkCmdClearColorImage(cmdBuf.handle(),\n image.handle(), image.getLayout(),\n &clearColor,\n 1, &subresourceRange);\n\n cmdBuf.end();\n\n cmdBuf.submit(device.getComputeQueue(), fence);\n if (!fence.wait(device))\n throw LSFG::vulkan_error(VK_TIMEOUT, \"Failed to wait for clearing fence.\");\n}\n"], ["/lsfg-vk/framegen/v3.1_src/context.cpp", "#include \n#include \n\n#include \"v3_1/context.hpp\"\n#include \"common/utils.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1;\n\nContext::Context(Vulkan& vk,\n int in0, int in1, const std::vector& outN,\n VkExtent2D extent, VkFormat format) {\n // import input images\n this->inImg_0 = Core::Image(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, in0);\n this->inImg_1 = Core::Image(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, in1);\n\n // prepare render data\n for (size_t i = 0; i < 8; i++) {\n auto& data = this->data.at(i);\n data.internalSemaphores.resize(vk.generationCount);\n data.outSemaphores.resize(vk.generationCount);\n data.completionFences.resize(vk.generationCount);\n data.cmdBuffers2.resize(vk.generationCount);\n }\n\n // create shader chains\n this->mipmaps = Shaders::Mipmaps(vk, this->inImg_0, this->inImg_1);\n for (size_t i = 0; i < 7; i++)\n this->alpha.at(i) = Shaders::Alpha(vk, this->mipmaps.getOutImages().at(i));\n this->beta = Shaders::Beta(vk, this->alpha.at(0).getOutImages());\n for (size_t i = 0; i < 7; i++) {\n this->gamma.at(i) = Shaders::Gamma(vk,\n this->alpha.at(6 - i).getOutImages(),\n this->beta.getOutImages().at(std::min(6 - i, 5)),\n (i == 0) ? std::nullopt : std::make_optional(this->gamma.at(i - 1).getOutImage()));\n if (i < 4) continue;\n\n this->delta.at(i - 4) = Shaders::Delta(vk,\n this->alpha.at(6 - i).getOutImages(),\n this->beta.getOutImages().at(6 - i),\n (i == 4) ? std::nullopt : std::make_optional(this->gamma.at(i - 1).getOutImage()),\n (i == 4) ? std::nullopt : std::make_optional(this->delta.at(i - 5).getOutImage1()),\n (i == 4) ? std::nullopt : std::make_optional(this->delta.at(i - 5).getOutImage2()));\n }\n this->generate = Shaders::Generate(vk,\n this->inImg_0, this->inImg_1,\n this->gamma.at(6).getOutImage(),\n this->delta.at(2).getOutImage1(),\n this->delta.at(2).getOutImage2(),\n outN, format);\n}\n\nvoid Context::present(Vulkan& vk,\n int inSem, const std::vector& outSem) {\n auto& data = this->data.at(this->frameIdx % 8);\n\n // 3. wait for completion of previous frame in this slot\n if (data.shouldWait)\n for (auto& fence : data.completionFences)\n if (!fence.wait(vk.device, UINT64_MAX))\n throw LSFG::vulkan_error(VK_TIMEOUT, \"Fence wait timed out\");\n data.shouldWait = true;\n\n // 1. create mipmaps and process input image\n if (inSem >= 0) data.inSemaphore = Core::Semaphore(vk.device, inSem);\n for (size_t i = 0; i < vk.generationCount; i++)\n data.internalSemaphores.at(i) = Core::Semaphore(vk.device);\n\n data.cmdBuffer1 = Core::CommandBuffer(vk.device, vk.commandPool);\n data.cmdBuffer1.begin();\n\n this->mipmaps.Dispatch(data.cmdBuffer1, this->frameIdx);\n for (size_t i = 0; i < 7; i++)\n this->alpha.at(6 - i).Dispatch(data.cmdBuffer1, this->frameIdx);\n this->beta.Dispatch(data.cmdBuffer1, this->frameIdx);\n\n data.cmdBuffer1.end();\n std::vector waits = { data.inSemaphore };\n if (inSem < 0) waits.clear();\n data.cmdBuffer1.submit(vk.device.getComputeQueue(), std::nullopt,\n waits, std::nullopt,\n data.internalSemaphores, std::nullopt);\n\n // 2. generate intermediary frames\n for (size_t pass = 0; pass < vk.generationCount; pass++) {\n auto& internalSemaphore = data.internalSemaphores.at(pass);\n auto& outSemaphore = data.outSemaphores.at(pass);\n if (inSem >= 0) outSemaphore = Core::Semaphore(vk.device, outSem.empty() ? -1 : outSem.at(pass));\n auto& completionFence = data.completionFences.at(pass);\n completionFence = Core::Fence(vk.device);\n\n auto& buf2 = data.cmdBuffers2.at(pass);\n buf2 = Core::CommandBuffer(vk.device, vk.commandPool);\n buf2.begin();\n\n for (size_t i = 0; i < 7; i++) {\n this->gamma.at(i).Dispatch(buf2, this->frameIdx, pass);\n if (i >= 4)\n this->delta.at(i - 4).Dispatch(buf2, this->frameIdx, pass);\n }\n this->generate.Dispatch(buf2, this->frameIdx, pass);\n\n buf2.end();\n std::vector signals = { outSemaphore };\n if (inSem < 0) signals.clear();\n buf2.submit(vk.device.getComputeQueue(), completionFence,\n { internalSemaphore }, std::nullopt,\n signals, std::nullopt);\n }\n\n this->frameIdx++;\n}\n"], ["/lsfg-vk/framegen/src/core/buffer.cpp", "#include \n#include \n\n#include \"core/buffer.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nvoid Buffer::construct(const Core::Device& device, const void* data, VkBufferUsageFlags usage) {\n // create buffer\n const VkBufferCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,\n .size = this->size,\n .usage = usage,\n .sharingMode = VK_SHARING_MODE_EXCLUSIVE\n };\n VkBuffer bufferHandle{};\n auto res = vkCreateBuffer(device.handle(), &desc, nullptr, &bufferHandle);\n if (res != VK_SUCCESS || bufferHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create Vulkan buffer\");\n\n // find memory type\n VkPhysicalDeviceMemoryProperties memProps;\n vkGetPhysicalDeviceMemoryProperties(device.getPhysicalDevice(), &memProps);\n\n VkMemoryRequirements memReqs;\n vkGetBufferMemoryRequirements(device.handle(), bufferHandle, &memReqs);\n\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n std::optional memType{};\n for (uint32_t i = 0; i < memProps.memoryTypeCount; ++i) {\n if ((memReqs.memoryTypeBits & (1 << i)) && // NOLINTBEGIN\n (memProps.memoryTypes[i].propertyFlags &\n (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT))) {\n memType.emplace(i);\n break;\n } // NOLINTEND\n }\n if (!memType.has_value())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Unable to find memory type for buffer\");\n#pragma clang diagnostic pop\n\n // allocate and bind memory\n const VkMemoryAllocateInfo allocInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,\n .allocationSize = memReqs.size,\n .memoryTypeIndex = memType.value()\n };\n VkDeviceMemory memoryHandle{};\n res = vkAllocateMemory(device.handle(), &allocInfo, nullptr, &memoryHandle);\n if (res != VK_SUCCESS || memoryHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to allocate memory for Vulkan buffer\");\n\n res = vkBindBufferMemory(device.handle(), bufferHandle, memoryHandle, 0);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to bind memory to Vulkan buffer\");\n\n // upload data to buffer\n uint8_t* buf{};\n res = vkMapMemory(device.handle(), memoryHandle, 0, this->size, 0, reinterpret_cast(&buf));\n if (res != VK_SUCCESS || buf == nullptr)\n throw LSFG::vulkan_error(res, \"Failed to map memory for Vulkan buffer\");\n std::copy_n(reinterpret_cast(data), this->size, buf);\n vkUnmapMemory(device.handle(), memoryHandle);\n\n // store buffer and memory in shared ptr\n this->buffer = std::shared_ptr(\n new VkBuffer(bufferHandle),\n [dev = device.handle()](VkBuffer* img) {\n vkDestroyBuffer(dev, *img, nullptr);\n }\n );\n this->memory = std::shared_ptr(\n new VkDeviceMemory(memoryHandle),\n [dev = device.handle()](VkDeviceMemory* mem) {\n vkFreeMemory(dev, *mem, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/src/core/descriptorset.cpp", "#include \n#include \n\n#include \"core/descriptorset.hpp\"\n#include \"core/device.hpp\"\n#include \"core/descriptorpool.hpp\"\n#include \"core/shadermodule.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/pipeline.hpp\"\n#include \"core/image.hpp\"\n#include \"core/sampler.hpp\"\n#include \"core/buffer.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n\nusing namespace LSFG::Core;\n\nDescriptorSet::DescriptorSet(const Core::Device& device,\n const DescriptorPool& pool, const ShaderModule& shaderModule) {\n // create descriptor set\n VkDescriptorSetLayout layout = shaderModule.getLayout();\n const VkDescriptorSetAllocateInfo desc{\n .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,\n .descriptorPool = pool.handle(),\n .descriptorSetCount = 1,\n .pSetLayouts = &layout\n };\n VkDescriptorSet descriptorSetHandle{};\n auto res = vkAllocateDescriptorSets(device.handle(), &desc, &descriptorSetHandle);\n if (res != VK_SUCCESS || descriptorSetHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to allocate descriptor set\");\n\n /// store set in shared ptr\n this->descriptorSet = std::shared_ptr(\n new VkDescriptorSet(descriptorSetHandle),\n [dev = device.handle(), pool = pool](VkDescriptorSet* setHandle) {\n vkFreeDescriptorSets(dev, pool.handle(), 1, setHandle);\n }\n );\n}\n\nDescriptorSetUpdateBuilder DescriptorSet::update(const Core::Device& device) const {\n return { *this, device };\n}\n\nvoid DescriptorSet::bind(const CommandBuffer& commandBuffer, const Pipeline& pipeline) const {\n VkDescriptorSet descriptorSetHandle = this->handle();\n vkCmdBindDescriptorSets(commandBuffer.handle(),\n VK_PIPELINE_BIND_POINT_COMPUTE, pipeline.getLayout(),\n 0, 1, &descriptorSetHandle, 0, nullptr);\n}\n\n// updater class\n\nDescriptorSetUpdateBuilder& DescriptorSetUpdateBuilder::add(VkDescriptorType type, const Image& image) {\n this->entries.push_back({\n .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,\n .dstSet = this->descriptorSet->handle(),\n .dstBinding = static_cast(this->entries.size()),\n .descriptorCount = 1,\n .descriptorType = type,\n .pImageInfo = new VkDescriptorImageInfo {\n .imageView = image.getView(),\n .imageLayout = VK_IMAGE_LAYOUT_GENERAL\n },\n .pBufferInfo = nullptr\n });\n return *this;\n}\n\nDescriptorSetUpdateBuilder& DescriptorSetUpdateBuilder::add(VkDescriptorType type, const Sampler& sampler) {\n this->entries.push_back({\n .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,\n .dstSet = this->descriptorSet->handle(),\n .dstBinding = static_cast(this->entries.size()),\n .descriptorCount = 1,\n .descriptorType = type,\n .pImageInfo = new VkDescriptorImageInfo {\n .sampler = sampler.handle(),\n },\n .pBufferInfo = nullptr\n });\n return *this;\n}\n\nDescriptorSetUpdateBuilder& DescriptorSetUpdateBuilder::add(VkDescriptorType type, const Buffer& buffer) {\n this->entries.push_back({\n .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,\n .dstSet = this->descriptorSet->handle(),\n .dstBinding = static_cast(this->entries.size()),\n .descriptorCount = 1,\n .descriptorType = type,\n .pImageInfo = nullptr,\n .pBufferInfo = new VkDescriptorBufferInfo {\n .buffer = buffer.handle(),\n .range = buffer.getSize()\n }\n });\n return *this;\n}\n\nDescriptorSetUpdateBuilder& DescriptorSetUpdateBuilder::add(VkDescriptorType type) {\n this->entries.push_back({\n .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,\n .dstSet = this->descriptorSet->handle(),\n .dstBinding = static_cast(this->entries.size()),\n .descriptorCount = 1,\n .descriptorType = type,\n .pImageInfo = new VkDescriptorImageInfo {\n },\n .pBufferInfo = nullptr\n });\n return *this;\n}\n\nvoid DescriptorSetUpdateBuilder::build() {\n vkUpdateDescriptorSets(this->device->handle(),\n static_cast(this->entries.size()),\n this->entries.data(), 0, nullptr);\n\n // NOLINTBEGIN\n for (const auto& entry : this->entries) {\n delete entry.pImageInfo;\n delete entry.pBufferInfo;\n }\n // NOLINTEND\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_flags.h", "#pragma once\n\n#include \n\n#include \"util_bit.h\"\n\nnamespace dxvk {\n \n template\n class Flags {\n \n public:\n \n using IntType = std::underlying_type_t;\n \n Flags() { }\n \n Flags(IntType t)\n : m_bits(t) { }\n \n template\n Flags(T f, Tx... fx) {\n this->set(f, fx...);\n }\n \n template\n void set(Tx... fx) {\n m_bits |= bits(fx...);\n }\n \n void set(Flags flags) {\n m_bits |= flags.m_bits;\n }\n \n template\n void clr(Tx... fx) {\n m_bits &= ~bits(fx...);\n }\n \n void clr(Flags flags) {\n m_bits &= ~flags.m_bits;\n }\n \n template\n bool any(Tx... fx) const {\n return (m_bits & bits(fx...)) != 0;\n }\n \n template\n bool all(Tx... fx) const {\n const IntType mask = bits(fx...);\n return (m_bits & mask) == mask;\n }\n \n bool test(T f) const {\n return this->any(f);\n }\n \n bool isClear() const {\n return m_bits == 0;\n }\n \n void clrAll() {\n m_bits = 0;\n }\n \n IntType raw() const {\n return m_bits;\n }\n \n Flags operator & (const Flags& other) const {\n return Flags(m_bits & other.m_bits);\n }\n \n Flags operator | (const Flags& other) const {\n return Flags(m_bits | other.m_bits);\n }\n \n Flags operator ^ (const Flags& other) const {\n return Flags(m_bits ^ other.m_bits);\n }\n\n bool operator == (const Flags& other) const {\n return m_bits == other.m_bits;\n }\n \n bool operator != (const Flags& other) const {\n return m_bits != other.m_bits;\n }\n \n private:\n \n IntType m_bits = 0;\n \n static IntType bit(T f) {\n return IntType(1) << static_cast(f);\n }\n \n template\n static IntType bits(T f, Tx... fx) {\n return bit(f) | bits(fx...);\n }\n \n static IntType bits() {\n return 0;\n }\n \n };\n \n}"], ["/lsfg-vk/framegen/v3.1p_src/shaders/delta.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/delta.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nDelta::Delta(Vulkan& vk, std::array, 3> inImgs1,\n Core::Image inImg2,\n std::optional optImg1,\n std::optional optImg2)\n : inImgs1(std::move(inImgs1)), inImg2(std::move(inImg2)),\n optImg1(std::move(optImg1)), optImg2(std::move(optImg2)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"p_delta[0]\",\n { { 1 , VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 5, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 3, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 3, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[5]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 6, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[6]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[7]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[8]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[9]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"p_delta[0]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[1]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[2]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[3]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[4]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[5]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[6]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[7]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[8]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[9]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n this->samplers.at(2) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS, false);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs1.at(0).at(0).getExtent();\n for (size_t i = 0; i < 3; i++)\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n for (size_t i = 0; i < 2; i++)\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n\n this->outImg1 = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n this->outImg2 = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n\n // hook up shaders\n for (size_t pass_idx = 0; pass_idx < vk.generationCount; pass_idx++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(pass_idx + 1) / static_cast(vk.generationCount + 1),\n false, !this->optImg1.has_value());\n for (size_t i = 0; i < 3; i++) {\n pass.firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(0));\n pass.firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n }\n pass.descriptorSets.at(0) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(1));\n pass.descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(1) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(2));\n pass.descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .build();\n pass.descriptorSets.at(2) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(3));\n pass.descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(3) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(4));\n pass.descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg1)\n .build();\n for (size_t i = 0; i < 3; i++) {\n pass.sixthDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(5));\n pass.sixthDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(0))\n .build();\n }\n pass.descriptorSets.at(4) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(6));\n pass.descriptorSets.at(4).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .build();\n pass.descriptorSets.at(5) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(7));\n pass.descriptorSets.at(5).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(0))\n .build();\n pass.descriptorSets.at(6) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(8));\n pass.descriptorSets.at(6).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .build();\n pass.descriptorSets.at(7) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(9));\n pass.descriptorSets.at(7).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg2)\n .build();\n }\n}\n\nvoid Delta::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx,\n bool last) {\n auto& pass = this->passes.at(pass_idx);\n\n // first shader\n const auto extent = this->tempImgs1.at(0).getExtent();\n const uint32_t threadsX = (extent.width + 7) >> 3;\n const uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg1)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n pass.firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n pass.descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(2).bind(buf);\n pass.descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n pass.descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addW2R(this->optImg1)\n .addW2R(this->inImg2)\n .addR2W(this->outImg1)\n .build();\n\n this->pipelines.at(4).bind(buf);\n pass.descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n\n // sixth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg1)\n .addW2R(this->optImg2)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(5).bind(buf);\n pass.sixthDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(5));\n buf.dispatch(threadsX, threadsY, 1);\n\n if (!last)\n return;\n\n // seventh shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .build();\n\n this->pipelines.at(6).bind(buf);\n pass.descriptorSets.at(4).bind(buf, this->pipelines.at(6));\n buf.dispatch(threadsX, threadsY, 1);\n\n // eighth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addR2W(this->tempImgs2)\n .build();\n this->pipelines.at(7).bind(buf);\n pass.descriptorSets.at(5).bind(buf, this->pipelines.at(7));\n buf.dispatch(threadsX, threadsY, 1);\n\n // ninth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .build();\n\n this->pipelines.at(8).bind(buf);\n pass.descriptorSets.at(6).bind(buf, this->pipelines.at(8));\n buf.dispatch(threadsX, threadsY, 1);\n\n // tenth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addR2W(this->outImg2)\n .build();\n\n this->pipelines.at(9).bind(buf);\n pass.descriptorSets.at(7).bind(buf, this->pipelines.at(9));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/src/pool/resourcepool.cpp", "#include \"pool/resourcepool.hpp\"\n#include \"core/buffer.hpp\"\n#include \"core/device.hpp\"\n#include \"core/sampler.hpp\"\n\n#include \n\n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG::Pool;\n\nstruct ConstantBuffer {\n std::array inputOffset;\n uint32_t firstIter;\n uint32_t firstIterS;\n uint32_t advancedColorKind;\n uint32_t hdrSupport;\n float resolutionInvScale;\n float timestamp;\n float uiThreshold;\n std::array pad;\n};\n\nCore::Buffer ResourcePool::getBuffer(\n const Core::Device& device,\n float timestamp, bool firstIter, bool firstIterS) {\n uint64_t hash = 0;\n const union { float f; uint32_t i; } u{\n .f = timestamp };\n hash |= u.i;\n hash |= static_cast(firstIter) << 32;\n hash |= static_cast(firstIterS) << 33;\n\n auto it = buffers.find(hash);\n if (it != buffers.end())\n return it->second;\n\n // create the buffer\n const ConstantBuffer data{\n .inputOffset = { 0, 0 },\n .advancedColorKind = this->isHdr ? 2U : 0U,\n .hdrSupport = this->isHdr,\n .resolutionInvScale = this->flowScale,\n .timestamp = timestamp,\n .uiThreshold = 0.5F,\n };\n Core::Buffer buffer(device, data, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);\n buffers[hash] = buffer;\n return buffer;\n}\n\nCore::Sampler ResourcePool::getSampler(\n const Core::Device& device,\n VkSamplerAddressMode type,\n VkCompareOp compare,\n bool isWhite) {\n uint64_t hash = 0;\n hash |= static_cast(type) << 0;\n hash |= static_cast(compare) << 8;\n hash |= static_cast(isWhite) << 16;\n\n auto it = samplers.find(hash);\n if (it != samplers.end())\n return it->second;\n\n // create the sampler\n Core::Sampler sampler(device, type, compare, isWhite);\n samplers[hash] = sampler;\n return sampler;\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/shaders/gamma.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/gamma.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nGamma::Gamma(Vulkan& vk, std::array, 3> inImgs1,\n Core::Image inImg2,\n std::optional optImg)\n : inImgs1(std::move(inImgs1)), inImg2(std::move(inImg2)),\n optImg(std::move(optImg)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"p_gamma[0]\",\n { { 1 , VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 5, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 3, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_gamma[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 3, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_gamma[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_gamma[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_gamma[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"p_gamma[0]\"),\n vk.shaders.getPipeline(vk.device, \"p_gamma[1]\"),\n vk.shaders.getPipeline(vk.device, \"p_gamma[2]\"),\n vk.shaders.getPipeline(vk.device, \"p_gamma[3]\"),\n vk.shaders.getPipeline(vk.device, \"p_gamma[4]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n this->samplers.at(2) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS, false);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs1.at(0).at(0).getExtent();\n for (size_t i = 0; i < 3; i++)\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n for (size_t i = 0; i < 2; i++)\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n\n this->outImg = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n\n // hook up shaders\n for (size_t pass_idx = 0; pass_idx < vk.generationCount; pass_idx++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(pass_idx + 1) / static_cast(vk.generationCount + 1),\n !this->optImg.has_value());\n for (size_t i = 0; i < 3; i++) {\n pass.firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(0));\n pass.firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n }\n pass.descriptorSets.at(0) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(1));\n pass.descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(1) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(2));\n pass.descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .build();\n pass.descriptorSets.at(2) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(3));\n pass.descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(3) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(4));\n pass.descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg)\n .build();\n }\n}\n\nvoid Gamma::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx) {\n auto& pass = this->passes.at(pass_idx);\n\n // first shader\n const auto extent = this->tempImgs1.at(0).getExtent();\n const uint32_t threadsX = (extent.width + 7) >> 3;\n const uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n pass.firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n pass.descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .build();\n\n this->pipelines.at(2).bind(buf);\n pass.descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n pass.descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addW2R(this->optImg)\n .addW2R(this->inImg2)\n .addR2W(this->outImg)\n .build();\n\n this->pipelines.at(4).bind(buf);\n pass.descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1_src/shaders/generate.cpp", "#include \n#include \n\n#include \"v3_1/shaders/generate.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nGenerate::Generate(Vulkan& vk,\n Core::Image inImg1, Core::Image inImg2,\n Core::Image inImg3, Core::Image inImg4, Core::Image inImg5,\n const std::vector& fds, VkFormat format)\n : inImg1(std::move(inImg1)), inImg2(std::move(inImg2)),\n inImg3(std::move(inImg3)), inImg4(std::move(inImg4)),\n inImg5(std::move(inImg5)) {\n // create resources\n this->shaderModule = vk.shaders.getShader(vk.device, \"generate\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 5, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } });\n this->pipeline = vk.shaders.getPipeline(vk.device, \"generate\");\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImg1.getExtent();\n for (size_t i = 0; i < vk.generationCount; i++)\n this->outImgs.emplace_back(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, fds.empty() ? -1 : fds.at(i));\n\n // hook up shaders\n for (size_t i = 0; i < vk.generationCount; i++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(i + 1) / static_cast(vk.generationCount + 1));\n for (size_t j = 0; j < 2; j++) {\n pass.descriptorSet.at(j) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModule);\n pass.descriptorSet.at(j).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, j == 0 ? this->inImg2 : this->inImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, j == 0 ? this->inImg1 : this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg3)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg4)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg5)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs.at(i))\n .build();\n }\n }\n}\n\nvoid Generate::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx) {\n auto& pass = this->passes.at(pass_idx);\n\n // first pass\n const auto extent = this->inImg1.getExtent();\n const uint32_t threadsX = (extent.width + 15) >> 4;\n const uint32_t threadsY = (extent.height + 15) >> 4;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImg1)\n .addW2R(this->inImg2)\n .addW2R(this->inImg3)\n .addW2R(this->inImg4)\n .addW2R(this->inImg5)\n .addR2W(this->outImgs.at(pass_idx))\n .build();\n\n this->pipeline.bind(buf);\n pass.descriptorSet.at(frameCount % 2).bind(buf, this->pipeline);\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/shaders/generate.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/generate.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nGenerate::Generate(Vulkan& vk,\n Core::Image inImg1, Core::Image inImg2,\n Core::Image inImg3, Core::Image inImg4, Core::Image inImg5,\n const std::vector& fds, VkFormat format)\n : inImg1(std::move(inImg1)), inImg2(std::move(inImg2)),\n inImg3(std::move(inImg3)), inImg4(std::move(inImg4)),\n inImg5(std::move(inImg5)) {\n // create resources\n this->shaderModule = vk.shaders.getShader(vk.device, \"p_generate\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 5, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } });\n this->pipeline = vk.shaders.getPipeline(vk.device, \"p_generate\");\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImg1.getExtent();\n for (size_t i = 0; i < vk.generationCount; i++)\n this->outImgs.emplace_back(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, fds.empty() ? -1 : fds.at(i));\n\n // hook up shaders\n for (size_t i = 0; i < vk.generationCount; i++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(i + 1) / static_cast(vk.generationCount + 1));\n for (size_t j = 0; j < 2; j++) {\n pass.descriptorSet.at(j) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModule);\n pass.descriptorSet.at(j).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, j == 0 ? this->inImg2 : this->inImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, j == 0 ? this->inImg1 : this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg3)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg4)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg5)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs.at(i))\n .build();\n }\n }\n}\n\nvoid Generate::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx) {\n auto& pass = this->passes.at(pass_idx);\n\n // first pass\n const auto extent = this->inImg1.getExtent();\n const uint32_t threadsX = (extent.width + 15) >> 4;\n const uint32_t threadsY = (extent.height + 15) >> 4;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImg1)\n .addW2R(this->inImg2)\n .addW2R(this->inImg3)\n .addW2R(this->inImg4)\n .addW2R(this->inImg5)\n .addR2W(this->outImgs.at(pass_idx))\n .build();\n\n this->pipeline.bind(buf);\n pass.descriptorSet.at(frameCount % 2).bind(buf, this->pipeline);\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/src/core/shadermodule.cpp", "#include \n#include \n\n#include \"core/shadermodule.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nShaderModule::ShaderModule(const Core::Device& device, const std::vector& code,\n const std::vector>& descriptorTypes) {\n // create shader module\n const uint8_t* data_ptr = code.data();\n const VkShaderModuleCreateInfo createInfo{\n .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,\n .codeSize = code.size(),\n .pCode = reinterpret_cast(data_ptr)\n };\n VkShaderModule shaderModuleHandle{};\n auto res = vkCreateShaderModule(device.handle(), &createInfo, nullptr, &shaderModuleHandle);\n if (res != VK_SUCCESS || !shaderModuleHandle)\n throw LSFG::vulkan_error(res, \"Failed to create shader module\");\n\n // create descriptor set layout\n std::vector layoutBindings;\n size_t bindIdx = 0;\n for (const auto &[count, type] : descriptorTypes)\n for (size_t i = 0; i < count; i++, bindIdx++)\n layoutBindings.emplace_back(VkDescriptorSetLayoutBinding {\n .binding = static_cast(bindIdx),\n .descriptorType = type,\n .descriptorCount = 1,\n .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT\n });\n\n const VkDescriptorSetLayoutCreateInfo layoutDesc{\n .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,\n .bindingCount = static_cast(layoutBindings.size()),\n .pBindings = layoutBindings.data()\n };\n VkDescriptorSetLayout descriptorSetLayout{};\n res = vkCreateDescriptorSetLayout(device.handle(), &layoutDesc, nullptr, &descriptorSetLayout);\n if (res != VK_SUCCESS || !descriptorSetLayout)\n throw LSFG::vulkan_error(res, \"Failed to create descriptor set layout\");\n\n // store module and layout in shared ptr\n this->shaderModule = std::shared_ptr(\n new VkShaderModule(shaderModuleHandle),\n [dev = device.handle()](VkShaderModule* shaderModuleHandle) {\n vkDestroyShaderModule(dev, *shaderModuleHandle, nullptr);\n }\n );\n this->descriptorSetLayout = std::shared_ptr(\n new VkDescriptorSetLayout(descriptorSetLayout),\n [dev = device.handle()](VkDescriptorSetLayout* layout) {\n vkDestroyDescriptorSetLayout(dev, *layout, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/v3.1_src/shaders/gamma.cpp", "#include \n#include \n\n#include \"v3_1/shaders/gamma.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nGamma::Gamma(Vulkan& vk, std::array, 3> inImgs1,\n Core::Image inImg2,\n std::optional optImg)\n : inImgs1(std::move(inImgs1)), inImg2(std::move(inImg2)),\n optImg(std::move(optImg)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"gamma[0]\",\n { { 1 , VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 9, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 3, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"gamma[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 3, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"gamma[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"gamma[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"gamma[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 6, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"gamma[0]\"),\n vk.shaders.getPipeline(vk.device, \"gamma[1]\"),\n vk.shaders.getPipeline(vk.device, \"gamma[2]\"),\n vk.shaders.getPipeline(vk.device, \"gamma[3]\"),\n vk.shaders.getPipeline(vk.device, \"gamma[4]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n this->samplers.at(2) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS, false);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs1.at(0).at(0).getExtent();\n for (size_t i = 0; i < 4; i++) {\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n }\n\n this->outImg = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n\n // hook up shaders\n for (size_t pass_idx = 0; pass_idx < vk.generationCount; pass_idx++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(pass_idx + 1) / static_cast(vk.generationCount + 1),\n !this->optImg.has_value());\n for (size_t i = 0; i < 3; i++) {\n pass.firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(0));\n pass.firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(2))\n .build();\n }\n pass.descriptorSets.at(0) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(1));\n pass.descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(2))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(1) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(2));\n pass.descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n pass.descriptorSets.at(2) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(3));\n pass.descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(3) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(4));\n pass.descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg)\n .build();\n }\n}\n\nvoid Gamma::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx) {\n auto& pass = this->passes.at(pass_idx);\n\n // first shader\n const auto extent = this->tempImgs1.at(0).getExtent();\n const uint32_t threadsX = (extent.width + 7) >> 3;\n const uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .addR2W(this->tempImgs1.at(2))\n .build();\n\n this->pipelines.at(0).bind(buf);\n pass.firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addW2R(this->tempImgs1.at(2))\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n pass.descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(2).bind(buf);\n pass.descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n pass.descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addW2R(this->optImg)\n .addW2R(this->inImg2)\n .addR2W(this->outImg)\n .build();\n\n this->pipelines.at(4).bind(buf);\n pass.descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/src/mini/image.cpp", "#include \"mini/image.hpp\"\n#include \"common/exception.hpp\"\n#include \"layer.hpp\"\n\n#include \n\n#include \n#include \n#include \n\nusing namespace Mini;\n\nImage::Image(VkDevice device, VkPhysicalDevice physicalDevice,\n VkExtent2D extent, VkFormat format,\n VkImageUsageFlags usage, VkImageAspectFlags aspectFlags, int* fd)\n : extent(extent), format(format), aspectFlags(aspectFlags) {\n // create image\n const VkExternalMemoryImageCreateInfo externalInfo{\n .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,\n .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR\n };\n const VkImageCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,\n .pNext = &externalInfo,\n .imageType = VK_IMAGE_TYPE_2D,\n .format = format,\n .extent = {\n .width = extent.width,\n .height = extent.height,\n .depth = 1\n },\n .mipLevels = 1,\n .arrayLayers = 1,\n .samples = VK_SAMPLE_COUNT_1_BIT,\n .usage = usage,\n .sharingMode = VK_SHARING_MODE_EXCLUSIVE\n };\n VkImage imageHandle{};\n auto res = Layer::ovkCreateImage(device, &desc, nullptr, &imageHandle);\n if (res != VK_SUCCESS || imageHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create Vulkan image\");\n\n // find memory type\n VkPhysicalDeviceMemoryProperties memProps;\n Layer::ovkGetPhysicalDeviceMemoryProperties(physicalDevice, &memProps);\n\n VkMemoryRequirements memReqs;\n Layer::ovkGetImageMemoryRequirements(device, imageHandle, &memReqs);\n\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n std::optional memType{};\n for (uint32_t i = 0; i < memProps.memoryTypeCount; ++i) {\n if ((memReqs.memoryTypeBits & (1 << i)) && // NOLINTBEGIN\n (memProps.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) {\n memType.emplace(i);\n break;\n } // NOLINTEND\n }\n if (!memType.has_value())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Unable to find memory type for image\");\n#pragma clang diagnostic pop\n\n // allocate and bind memory\n const VkMemoryDedicatedAllocateInfoKHR dedicatedInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,\n .image = imageHandle,\n };\n const VkExportMemoryAllocateInfo exportInfo{\n .sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO,\n .pNext = &dedicatedInfo,\n .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR\n };\n const VkMemoryAllocateInfo allocInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,\n .pNext = &exportInfo,\n .allocationSize = memReqs.size,\n .memoryTypeIndex = memType.value()\n };\n VkDeviceMemory memoryHandle{};\n res = Layer::ovkAllocateMemory(device, &allocInfo, nullptr, &memoryHandle);\n if (res != VK_SUCCESS || memoryHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to allocate memory for Vulkan image\");\n\n res = Layer::ovkBindImageMemory(device, imageHandle, memoryHandle, 0);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to bind memory to Vulkan image\");\n\n // obtain the sharing fd\n const VkMemoryGetFdInfoKHR fdInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR,\n .memory = memoryHandle,\n .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR,\n };\n res = Layer::ovkGetMemoryFdKHR(device, &fdInfo, fd);\n if (res != VK_SUCCESS || *fd < 0)\n throw LSFG::vulkan_error(res, \"Failed to obtain sharing fd for Vulkan image\");\n\n // store objects in shared ptr\n this->image = std::shared_ptr(\n new VkImage(imageHandle),\n [dev = device](VkImage* img) {\n Layer::ovkDestroyImage(dev, *img, nullptr);\n }\n );\n this->memory = std::shared_ptr(\n new VkDeviceMemory(memoryHandle),\n [dev = device](VkDeviceMemory* mem) {\n Layer::ovkFreeMemory(dev, *mem, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/v3.1_src/shaders/delta.cpp", "#include \n#include \n\n#include \"v3_1/shaders/delta.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nDelta::Delta(Vulkan& vk, std::array, 3> inImgs1,\n Core::Image inImg2,\n std::optional optImg1,\n std::optional optImg2,\n std::optional optImg3)\n : inImgs1(std::move(inImgs1)), inImg2(std::move(inImg2)),\n optImg1(std::move(optImg1)), optImg2(std::move(optImg2)),\n optImg3(std::move(optImg3)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"delta[0]\",\n { { 1 , VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 9, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 3, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 3, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 6, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[5]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 10, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[6]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[7]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[8]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[9]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 3, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"delta[0]\"),\n vk.shaders.getPipeline(vk.device, \"delta[1]\"),\n vk.shaders.getPipeline(vk.device, \"delta[2]\"),\n vk.shaders.getPipeline(vk.device, \"delta[3]\"),\n vk.shaders.getPipeline(vk.device, \"delta[4]\"),\n vk.shaders.getPipeline(vk.device, \"delta[5]\"),\n vk.shaders.getPipeline(vk.device, \"delta[6]\"),\n vk.shaders.getPipeline(vk.device, \"delta[7]\"),\n vk.shaders.getPipeline(vk.device, \"delta[8]\"),\n vk.shaders.getPipeline(vk.device, \"delta[9]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n this->samplers.at(2) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS, false);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs1.at(0).at(0).getExtent();\n for (size_t i = 0; i < 4; i++) {\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n }\n\n this->outImg1 = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n this->outImg2 = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n\n // hook up shaders\n for (size_t pass_idx = 0; pass_idx < vk.generationCount; pass_idx++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(pass_idx + 1) / static_cast(vk.generationCount + 1),\n false, !this->optImg1.has_value());\n for (size_t i = 0; i < 3; i++) {\n pass.firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(0));\n pass.firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(2))\n .build();\n }\n pass.descriptorSets.at(0) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(1));\n pass.descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(2))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(1) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(2));\n pass.descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n pass.descriptorSets.at(2) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(3));\n pass.descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(3) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(4));\n pass.descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg1)\n .build();\n for (size_t i = 0; i < 3; i++) {\n pass.sixthDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(5));\n pass.sixthDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(1))\n .build();\n }\n pass.descriptorSets.at(4) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(6));\n pass.descriptorSets.at(4).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .build();\n pass.descriptorSets.at(5) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(7));\n pass.descriptorSets.at(5).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(1))\n .build();\n pass.descriptorSets.at(6) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(8));\n pass.descriptorSets.at(6).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .build();\n pass.descriptorSets.at(7) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(9));\n pass.descriptorSets.at(7).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg3)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg2)\n .build();\n }\n}\n\nvoid Delta::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx) {\n auto& pass = this->passes.at(pass_idx);\n\n // first shader\n const auto extent = this->tempImgs1.at(0).getExtent();\n const uint32_t threadsX = (extent.width + 7) >> 3;\n const uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg1)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .addR2W(this->tempImgs1.at(2))\n .build();\n\n this->pipelines.at(0).bind(buf);\n pass.firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addW2R(this->tempImgs1.at(2))\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n pass.descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(2).bind(buf);\n pass.descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n pass.descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addW2R(this->optImg1)\n .addW2R(this->inImg2)\n .addR2W(this->outImg1)\n .build();\n\n this->pipelines.at(4).bind(buf);\n pass.descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n\n // sixth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg1)\n .addW2R(this->optImg2)\n .addR2W(this->tempImgs2.at(0))\n .addR2W(this->tempImgs2.at(1))\n .build();\n\n this->pipelines.at(5).bind(buf);\n pass.sixthDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(5));\n buf.dispatch(threadsX, threadsY, 1);\n\n // seventh shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2.at(0))\n .addW2R(this->tempImgs2.at(1))\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .build();\n\n this->pipelines.at(6).bind(buf);\n pass.descriptorSets.at(4).bind(buf, this->pipelines.at(6));\n buf.dispatch(threadsX, threadsY, 1);\n\n // eighth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addR2W(this->tempImgs2.at(0))\n .addR2W(this->tempImgs2.at(1))\n .build();\n this->pipelines.at(7).bind(buf);\n pass.descriptorSets.at(5).bind(buf, this->pipelines.at(7));\n buf.dispatch(threadsX, threadsY, 1);\n\n // ninth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2.at(0))\n .addW2R(this->tempImgs2.at(1))\n .addW2R(this->optImg3)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .build();\n\n this->pipelines.at(8).bind(buf);\n pass.descriptorSets.at(6).bind(buf, this->pipelines.at(8));\n buf.dispatch(threadsX, threadsY, 1);\n\n // tenth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addW2R(this->optImg3)\n .addR2W(this->outImg2)\n .build();\n\n this->pipelines.at(9).bind(buf);\n pass.descriptorSets.at(7).bind(buf, this->pipelines.at(9));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_options.h", "#pragma once\n\n#include \n\n#include \"util_flags.h\"\n\nnamespace dxvk {\n\n struct D3D11Options;\n\n enum class DxbcFloatControlFlag : uint32_t {\n DenormFlushToZero32,\n DenormPreserve64,\n PreserveNan32,\n PreserveNan64,\n };\n\n using DxbcFloatControlFlags = Flags;\n\n struct DxbcOptions {\n DxbcOptions() {}\n\n // Clamp oDepth in fragment shaders if the depth\n // clip device feature is not supported\n bool useDepthClipWorkaround = false;\n\n /// Determines whether format qualifiers\n /// on typed UAV loads are required\n bool supportsTypedUavLoadR32 = false;\n\n /// Determines whether raw access chains are supported\n bool supportsRawAccessChains = false;\n\n /// Clear thread-group shared memory to zero\n bool zeroInitWorkgroupMemory = false;\n\n /// Declare vertex positions as invariant\n bool invariantPosition = false;\n\n /// Insert memory barriers after TGSM stoes\n bool forceVolatileTgsmAccess = false;\n\n /// Try to detect hazards in UAV access and insert\n /// barriers when we know control flow is uniform.\n bool forceComputeUavBarriers = false;\n\n /// Replace ld_ms with ld\n bool disableMsaa = false;\n\n /// Force sample rate shading by using sample\n /// interpolation for fragment shader inputs\n bool forceSampleRateShading = false;\n\n // Enable per-sample interlock if supported\n bool enableSampleShadingInterlock = false;\n\n /// Use tightly packed arrays for immediate\n /// constant buffers if possible\n bool supportsTightIcbPacking = false;\n\n /// Whether exporting point size is required\n bool needsPointSizeExport = true;\n\n /// Whether to enable sincos emulation\n bool sincosEmulation = false;\n\n /// Float control flags\n DxbcFloatControlFlags floatControl;\n\n /// Minimum storage buffer alignment\n VkDeviceSize minSsboAlignment = 0;\n };\n \n}\n"], ["/lsfg-vk/src/mini/commandbuffer.cpp", "#include \"mini/commandbuffer.hpp\"\n#include \"mini/commandpool.hpp\"\n#include \"common/exception.hpp\"\n#include \"layer.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n\nusing namespace Mini;\n\nCommandBuffer::CommandBuffer(VkDevice device, const CommandPool& pool) {\n // create command buffer\n const VkCommandBufferAllocateInfo desc{\n .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,\n .commandPool = pool.handle(),\n .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,\n .commandBufferCount = 1\n };\n VkCommandBuffer commandBufferHandle{};\n auto res = Layer::ovkAllocateCommandBuffers(device, &desc, &commandBufferHandle);\n if (res != VK_SUCCESS || commandBufferHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to allocate command buffer\");\n res = Layer::ovkSetDeviceLoaderData(device, commandBufferHandle);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to set device loader data for command buffer\");\n\n // store command buffer in shared ptr\n this->state = std::make_shared(CommandBufferState::Empty);\n this->commandBuffer = std::shared_ptr(\n new VkCommandBuffer(commandBufferHandle),\n [dev = device, pool = pool.handle()](VkCommandBuffer* cmdBuffer) {\n Layer::ovkFreeCommandBuffers(dev, pool, 1, cmdBuffer);\n }\n );\n}\n\nvoid CommandBuffer::begin() {\n if (*this->state != CommandBufferState::Empty)\n throw std::logic_error(\"Command buffer is not in Empty state\");\n\n const VkCommandBufferBeginInfo beginInfo = {\n .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,\n .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT\n };\n auto res = Layer::ovkBeginCommandBuffer(*this->commandBuffer, &beginInfo);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to begin command buffer\");\n\n *this->state = CommandBufferState::Recording;\n}\n\nvoid CommandBuffer::end() {\n if (*this->state != CommandBufferState::Recording)\n throw std::logic_error(\"Command buffer is not in Recording state\");\n\n auto res = Layer::ovkEndCommandBuffer(*this->commandBuffer);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to end command buffer\");\n\n *this->state = CommandBufferState::Full;\n}\n\nvoid CommandBuffer::submit(VkQueue queue,\n const std::vector& waitSemaphores,\n const std::vector& signalSemaphores) {\n if (*this->state != CommandBufferState::Full)\n throw std::logic_error(\"Command buffer is not in Full state\");\n\n const std::vector waitStages(waitSemaphores.size(),\n VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);\n\n const VkSubmitInfo submitInfo{\n .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,\n .waitSemaphoreCount = static_cast(waitSemaphores.size()),\n .pWaitSemaphores = waitSemaphores.data(),\n .pWaitDstStageMask = waitStages.data(),\n .commandBufferCount = 1,\n .pCommandBuffers = &(*this->commandBuffer),\n .signalSemaphoreCount = static_cast(signalSemaphores.size()),\n .pSignalSemaphores = signalSemaphores.data()\n };\n auto res = Layer::ovkQueueSubmit(queue, 1, &submitInfo, VK_NULL_HANDLE);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to submit command buffer\");\n\n *this->state = CommandBufferState::Submitted;\n}\n"], ["/lsfg-vk/framegen/src/core/semaphore.cpp", "#include \n#include \n\n#include \"core/semaphore.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nSemaphore::Semaphore(const Core::Device& device, std::optional initial) {\n // create semaphore\n const VkSemaphoreTypeCreateInfo typeInfo{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO,\n .semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE,\n .initialValue = initial.value_or(0)\n };\n const VkSemaphoreCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,\n .pNext = initial.has_value() ? &typeInfo : nullptr,\n };\n VkSemaphore semaphoreHandle{};\n auto res = vkCreateSemaphore(device.handle(), &desc, nullptr, &semaphoreHandle);\n if (res != VK_SUCCESS || semaphoreHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create semaphore\");\n\n // store semaphore in shared ptr\n this->isTimeline = initial.has_value();\n this->semaphore = std::shared_ptr(\n new VkSemaphore(semaphoreHandle),\n [dev = device.handle()](VkSemaphore* semaphoreHandle) {\n vkDestroySemaphore(dev, *semaphoreHandle, nullptr);\n }\n );\n}\n\nSemaphore::Semaphore(const Core::Device& device, int fd) {\n // create semaphore\n const VkExportSemaphoreCreateInfo exportInfo{\n .sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO,\n .handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT\n };\n const VkSemaphoreCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,\n .pNext = &exportInfo\n };\n VkSemaphore semaphoreHandle{};\n auto res = vkCreateSemaphore(device.handle(), &desc, nullptr, &semaphoreHandle);\n if (res != VK_SUCCESS || semaphoreHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create semaphore\");\n\n // import semaphore from fd\n auto vkImportSemaphoreFdKHR = reinterpret_cast(\n vkGetDeviceProcAddr(device.handle(), \"vkImportSemaphoreFdKHR\"));\n\n const VkImportSemaphoreFdInfoKHR importInfo{\n .sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR,\n .semaphore = semaphoreHandle,\n .handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT,\n .fd = fd // closes the fd\n };\n res = vkImportSemaphoreFdKHR(device.handle(), &importInfo);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to import semaphore from fd\");\n\n // store semaphore in shared ptr\n this->isTimeline = false;\n this->semaphore = std::shared_ptr(\n new VkSemaphore(semaphoreHandle),\n [dev = device.handle()](VkSemaphore* semaphoreHandle) {\n vkDestroySemaphore(dev, *semaphoreHandle, nullptr);\n }\n );\n}\n\nvoid Semaphore::signal(const Core::Device& device, uint64_t value) const {\n if (!this->isTimeline)\n throw std::logic_error(\"Invalid timeline semaphore\");\n\n const VkSemaphoreSignalInfo signalInfo{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO,\n .semaphore = this->handle(),\n .value = value\n };\n auto res = vkSignalSemaphore(device.handle(), &signalInfo);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to signal semaphore\");\n}\n\nbool Semaphore::wait(const Core::Device& device, uint64_t value, uint64_t timeout) const {\n if (!this->isTimeline)\n throw std::logic_error(\"Invalid timeline semaphore\");\n\n VkSemaphore semaphore = this->handle();\n const VkSemaphoreWaitInfo waitInfo{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO,\n .semaphoreCount = 1,\n .pSemaphores = &semaphore,\n .pValues = &value\n };\n auto res = vkWaitSemaphores(device.handle(), &waitInfo, timeout);\n if (res != VK_SUCCESS && res != VK_TIMEOUT)\n throw LSFG::vulkan_error(res, \"Unable to wait for semaphore\");\n\n return res == VK_SUCCESS;\n}\n"], ["/lsfg-vk/framegen/v3.1_src/lsfg.cpp", "#include \n#include \n\n#include \"lsfg_3_1.hpp\"\n#include \"v3_1/context.hpp\"\n#include \"core/commandpool.hpp\"\n#include \"core/descriptorpool.hpp\"\n#include \"core/instance.hpp\"\n#include \"pool/shaderpool.hpp\"\n#include \"common/exception.hpp\"\n#include \"common/utils.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG_3_1;\n\nnamespace {\n std::optional instance;\n std::optional device;\n std::unordered_map contexts;\n}\n\nvoid LSFG_3_1::initialize(uint64_t deviceUUID,\n bool isHdr, float flowScale, uint64_t generationCount,\n const std::function(const std::string&)>& loader) {\n if (instance.has_value() || device.has_value())\n return;\n\n instance.emplace();\n device.emplace(Vulkan {\n .device{*instance, deviceUUID},\n .generationCount = generationCount,\n .flowScale = flowScale,\n .isHdr = isHdr\n });\n contexts = std::unordered_map();\n\n device->commandPool = Core::CommandPool(device->device);\n device->descriptorPool = Core::DescriptorPool(device->device);\n\n device->resources = Pool::ResourcePool(device->isHdr, device->flowScale);\n device->shaders = Pool::ShaderPool(loader);\n\n std::srand(static_cast(std::time(nullptr)));\n}\n\nint32_t LSFG_3_1::createContext(\n int in0, int in1, const std::vector& outN,\n VkExtent2D extent, VkFormat format) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n const int32_t id = std::rand();\n contexts.emplace(id, Context(*device, in0, in1, outN, extent, format));\n return id;\n}\n\nvoid LSFG_3_1::presentContext(int32_t id, int inSem, const std::vector& outSem) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n auto it = contexts.find(id);\n if (it == contexts.end())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Context not found\");\n\n it->second.present(*device, inSem, outSem);\n}\n\nvoid LSFG_3_1::deleteContext(int32_t id) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n auto it = contexts.find(id);\n if (it == contexts.end())\n throw LSFG::vulkan_error(VK_ERROR_DEVICE_LOST, \"No such context\");\n\n vkDeviceWaitIdle(device->device.handle());\n contexts.erase(it);\n}\n\nvoid LSFG_3_1::finalize() {\n if (!instance.has_value() || !device.has_value())\n return;\n\n vkDeviceWaitIdle(device->device.handle());\n contexts.clear();\n device.reset();\n instance.reset();\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/lsfg.cpp", "#include \n#include \n\n#include \"lsfg_3_1p.hpp\"\n#include \"v3_1p/context.hpp\"\n#include \"core/commandpool.hpp\"\n#include \"core/descriptorpool.hpp\"\n#include \"core/instance.hpp\"\n#include \"pool/shaderpool.hpp\"\n#include \"common/exception.hpp\"\n#include \"common/utils.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG_3_1P;\n\nnamespace {\n std::optional instance;\n std::optional device;\n std::unordered_map contexts;\n}\n\nvoid LSFG_3_1P::initialize(uint64_t deviceUUID,\n bool isHdr, float flowScale, uint64_t generationCount,\n const std::function(const std::string&)>& loader) {\n if (instance.has_value() || device.has_value())\n return;\n\n instance.emplace();\n device.emplace(Vulkan {\n .device{*instance, deviceUUID},\n .generationCount = generationCount,\n .flowScale = flowScale,\n .isHdr = isHdr\n });\n contexts = std::unordered_map();\n\n device->commandPool = Core::CommandPool(device->device);\n device->descriptorPool = Core::DescriptorPool(device->device);\n\n device->resources = Pool::ResourcePool(device->isHdr, device->flowScale);\n device->shaders = Pool::ShaderPool(loader);\n\n std::srand(static_cast(std::time(nullptr)));\n}\n\nint32_t LSFG_3_1P::createContext(\n int in0, int in1, const std::vector& outN,\n VkExtent2D extent, VkFormat format) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n const int32_t id = std::rand();\n contexts.emplace(id, Context(*device, in0, in1, outN, extent, format));\n return id;\n}\n\nvoid LSFG_3_1P::presentContext(int32_t id, int inSem, const std::vector& outSem) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n auto it = contexts.find(id);\n if (it == contexts.end())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Context not found\");\n\n it->second.present(*device, inSem, outSem);\n}\n\nvoid LSFG_3_1P::deleteContext(int32_t id) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n auto it = contexts.find(id);\n if (it == contexts.end())\n throw LSFG::vulkan_error(VK_ERROR_DEVICE_LOST, \"No such context\");\n\n vkDeviceWaitIdle(device->device.handle());\n contexts.erase(it);\n}\n\nvoid LSFG_3_1P::finalize() {\n if (!instance.has_value() || !device.has_value())\n return;\n\n vkDeviceWaitIdle(device->device.handle());\n contexts.clear();\n device.reset();\n instance.reset();\n}\n"], ["/lsfg-vk/framegen/v3.1_src/shaders/mipmaps.cpp", "#include \n#include \n\n#include \"v3_1/shaders/mipmaps.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/image.hpp\"\n#include \"core/commandbuffer.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nMipmaps::Mipmaps(Vulkan& vk,\n Core::Image inImg_0, Core::Image inImg_1)\n : inImg_0(std::move(inImg_0)), inImg_1(std::move(inImg_1)) {\n // create resources\n this->shaderModule = vk.shaders.getShader(vk.device, \"mipmaps\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 7, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } });\n this->pipeline = vk.shaders.getPipeline(vk.device, \"mipmaps\");\n this->buffer = vk.resources.getBuffer(vk.device);\n this->sampler = vk.resources.getSampler(vk.device);\n for (size_t i = 0; i < 2; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModule);\n\n // create outputs\n const VkExtent2D flowExtent{\n .width = static_cast(\n static_cast(this->inImg_0.getExtent().width) / vk.flowScale),\n .height = static_cast(\n static_cast(this->inImg_0.getExtent().height) / vk.flowScale)\n };\n for (size_t i = 0; i < 7; i++)\n this->outImgs.at(i) = Core::Image(vk.device,\n { flowExtent.width >> i, flowExtent.height >> i },\n VK_FORMAT_R8_UNORM);\n\n // hook up shaders\n for (size_t fc = 0; fc < 2; fc++)\n this->descriptorSets.at(fc).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, this->buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, (fc % 2 == 0) ? this->inImg_0 : this->inImg_1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs)\n .build();\n}\n\nvoid Mipmaps::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto flowExtent = this->outImgs.at(0).getExtent();\n const uint32_t threadsX = (flowExtent.width + 63) >> 6;\n const uint32_t threadsY = (flowExtent.height + 63) >> 6;\n\n Utils::BarrierBuilder(buf)\n .addW2R((frameCount % 2 == 0) ? this->inImg_0 : this->inImg_1)\n .addR2W(this->outImgs)\n .build();\n\n this->pipeline.bind(buf);\n this->descriptorSets.at(frameCount % 2).bind(buf, this->pipeline);\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/shaders/mipmaps.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/mipmaps.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/image.hpp\"\n#include \"core/commandbuffer.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nMipmaps::Mipmaps(Vulkan& vk,\n Core::Image inImg_0, Core::Image inImg_1)\n : inImg_0(std::move(inImg_0)), inImg_1(std::move(inImg_1)) {\n // create resources\n this->shaderModule = vk.shaders.getShader(vk.device, \"p_mipmaps\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 7, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } });\n this->pipeline = vk.shaders.getPipeline(vk.device, \"p_mipmaps\");\n this->buffer = vk.resources.getBuffer(vk.device);\n this->sampler = vk.resources.getSampler(vk.device);\n for (size_t i = 0; i < 2; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModule);\n\n // create outputs\n const VkExtent2D flowExtent{\n .width = static_cast(\n static_cast(this->inImg_0.getExtent().width) / vk.flowScale),\n .height = static_cast(\n static_cast(this->inImg_0.getExtent().height) / vk.flowScale)\n };\n for (size_t i = 0; i < 7; i++)\n this->outImgs.at(i) = Core::Image(vk.device,\n { flowExtent.width >> i, flowExtent.height >> i },\n VK_FORMAT_R8_UNORM);\n\n // hook up shaders\n for (size_t fc = 0; fc < 2; fc++)\n this->descriptorSets.at(fc).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, this->buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, (fc % 2 == 0) ? this->inImg_0 : this->inImg_1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs)\n .build();\n}\n\nvoid Mipmaps::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto flowExtent = this->outImgs.at(0).getExtent();\n const uint32_t threadsX = (flowExtent.width + 63) >> 6;\n const uint32_t threadsY = (flowExtent.height + 63) >> 6;\n\n Utils::BarrierBuilder(buf)\n .addW2R((frameCount % 2 == 0) ? this->inImg_0 : this->inImg_1)\n .addR2W(this->outImgs)\n .build();\n\n this->pipeline.bind(buf);\n this->descriptorSets.at(frameCount % 2).bind(buf, this->pipeline);\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1_src/shaders/beta.cpp", "#include \n#include \n\n#include \"v3_1/shaders/beta.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nBeta::Beta(Vulkan& vk, std::array, 3> inImgs)\n : inImgs(std::move(inImgs)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"beta[0]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 12, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"beta[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"beta[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"beta[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"beta[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 6, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"beta[0]\"),\n vk.shaders.getPipeline(vk.device, \"beta[1]\"),\n vk.shaders.getPipeline(vk.device, \"beta[2]\"),\n vk.shaders.getPipeline(vk.device, \"beta[3]\"),\n vk.shaders.getPipeline(vk.device, \"beta[4]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n for (size_t i = 0; i < 3; i++)\n this->firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(0));\n for (size_t i = 0; i < 4; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(i + 1));\n this->buffer = vk.resources.getBuffer(vk.device, 0.5F);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs.at(0).at(0).getExtent();\n for (size_t i = 0; i < 2; i++) {\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n }\n\n for (size_t i = 0; i < 6; i++)\n this->outImgs.at(i) = Core::Image(vk.device,\n { extent.width >> i, extent.height >> i },\n VK_FORMAT_R8_UNORM);\n\n // hook up shaders\n for (size_t i = 0; i < 3; i++) {\n this->firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at((i + 1) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n }\n this->descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n this->descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n this->descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n this->descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, this->buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs)\n .build();\n}\n\nvoid Beta::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto extent = this->tempImgs1.at(0).getExtent();\n uint32_t threadsX = (extent.width + 7) >> 3;\n uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs.at(0))\n .addW2R(this->inImgs.at(1))\n .addW2R(this->inImgs.at(2))\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n this->firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n this->descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(2).bind(buf);\n this->descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n this->descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth pass\n threadsX = (extent.width + 31) >> 5;\n threadsY = (extent.height + 31) >> 5;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->outImgs)\n .build();\n\n this->pipelines.at(4).bind(buf);\n this->descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/shaders/beta.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/beta.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nBeta::Beta(Vulkan& vk, std::array, 3> inImgs)\n : inImgs(std::move(inImgs)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"p_beta[0]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 6, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_beta[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_beta[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_beta[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_beta[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 6, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"p_beta[0]\"),\n vk.shaders.getPipeline(vk.device, \"p_beta[1]\"),\n vk.shaders.getPipeline(vk.device, \"p_beta[2]\"),\n vk.shaders.getPipeline(vk.device, \"p_beta[3]\"),\n vk.shaders.getPipeline(vk.device, \"p_beta[4]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n for (size_t i = 0; i < 3; i++)\n this->firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(0));\n for (size_t i = 0; i < 4; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(i + 1));\n this->buffer = vk.resources.getBuffer(vk.device, 0.5F);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs.at(0).at(0).getExtent();\n for (size_t i = 0; i < 2; i++) {\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n }\n\n for (size_t i = 0; i < 6; i++)\n this->outImgs.at(i) = Core::Image(vk.device,\n { extent.width >> i, extent.height >> i },\n VK_FORMAT_R8_UNORM);\n\n // hook up shaders\n for (size_t i = 0; i < 3; i++) {\n this->firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at((i + 1) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n }\n this->descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n this->descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n this->descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n this->descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, this->buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs)\n .build();\n}\n\nvoid Beta::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto extent = this->tempImgs1.at(0).getExtent();\n uint32_t threadsX = (extent.width + 7) >> 3;\n uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs.at(0))\n .addW2R(this->inImgs.at(1))\n .addW2R(this->inImgs.at(2))\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n this->firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n this->descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(2).bind(buf);\n this->descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n this->descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth pass\n threadsX = (extent.width + 31) >> 5;\n threadsY = (extent.height + 31) >> 5;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->outImgs)\n .build();\n\n this->pipelines.at(4).bind(buf);\n this->descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/rc/util_rc.h", "#pragma once\n\n#include \n\n#include \"../util_likely.h\"\n\nnamespace dxvk {\n \n /**\n * \\brief Reference-counted object\n */\n class RcObject {\n \n public:\n \n /**\n * \\brief Increments reference count\n * \\returns New reference count\n */\n force_inline uint32_t incRef() {\n return ++m_refCount;\n }\n \n /**\n * \\brief Decrements reference count\n * \\returns New reference count\n */\n force_inline uint32_t decRef() {\n return --m_refCount;\n }\n \n private:\n \n std::atomic m_refCount = { 0u };\n \n };\n \n}"], ["/lsfg-vk/thirdparty/dxbc/include/util/log/log.h", "#pragma once\n\n#include \n#include \n\nnamespace dxvk {\n \n enum class LogLevel : uint32_t {\n Trace = 0,\n Debug = 1,\n Info = 2,\n Warn = 3,\n Error = 4,\n None = 5,\n };\n\n /**\n * \\brief Logger\n * \n * Logger for one DLL. Creates a text file and\n * writes all log messages to that file.\n */\n class Logger {\n \n public:\n \n Logger() {}\n Logger(const std::string& file_name) {}\n ~Logger() {}\n \n static void trace(const std::string& message) {}\n static void debug(const std::string& message) {}\n static void info (const std::string& message) {}\n static void warn (const std::string& message) {}\n static void err (const std::string& message) {}\n static void log (LogLevel level, const std::string& message) {}\n \n static LogLevel logLevel() {\n return LogLevel::Warn;\n }\n\n };\n \n}\n"], ["/lsfg-vk/framegen/v3.1_src/shaders/alpha.cpp", "#include \n#include \n\n#include \"v3_1/shaders/alpha.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nAlpha::Alpha(Vulkan& vk, Core::Image inImg) : inImg(std::move(inImg)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"alpha[0]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"alpha[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"alpha[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"alpha[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"alpha[0]\"),\n vk.shaders.getPipeline(vk.device, \"alpha[1]\"),\n vk.shaders.getPipeline(vk.device, \"alpha[2]\"),\n vk.shaders.getPipeline(vk.device, \"alpha[3]\")\n }};\n this->sampler = vk.resources.getSampler(vk.device);\n for (size_t i = 0; i < 3; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(i));\n for (size_t i = 0; i < 3; i++)\n this->lastDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(3));\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImg.getExtent();\n const VkExtent2D halfExtent = {\n .width = (extent.width + 1) >> 1,\n .height = (extent.height + 1) >> 1\n };\n for (size_t i = 0; i < 2; i++) {\n this->tempImgs1.at(i) = Core::Image(vk.device, halfExtent);\n this->tempImgs2.at(i) = Core::Image(vk.device, halfExtent);\n }\n\n const VkExtent2D quarterExtent = {\n .width = (halfExtent.width + 1) >> 1,\n .height = (halfExtent.height + 1) >> 1\n };\n for (size_t i = 0; i < 4; i++) {\n this->tempImgs3.at(i) = Core::Image(vk.device, quarterExtent);\n for (size_t j = 0; j < 3; j++)\n this->outImgs.at(j).at(i) = Core::Image(vk.device, quarterExtent);\n }\n\n // hook up shaders\n this->descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n this->descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n this->descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs3)\n .build();\n for (size_t i = 0; i < 3; i++)\n this->lastDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs3)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs.at(i))\n .build();\n}\n\nvoid Alpha::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto halfExtent = this->tempImgs1.at(0).getExtent();\n uint32_t threadsX = (halfExtent.width + 7) >> 3;\n uint32_t threadsY = (halfExtent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImg)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n this->descriptorSets.at(0).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n this->descriptorSets.at(1).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third pass\n const auto quarterExtent = this->tempImgs3.at(0).getExtent();\n threadsX = (quarterExtent.width + 7) >> 3;\n threadsY = (quarterExtent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs3)\n .build();\n\n this->pipelines.at(2).bind(buf);\n this->descriptorSets.at(2).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs3)\n .addR2W(this->outImgs.at(frameCount % 3))\n .build();\n\n this->pipelines.at(3).bind(buf);\n this->lastDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/shaders/alpha.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/alpha.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nAlpha::Alpha(Vulkan& vk, Core::Image inImg) : inImg(std::move(inImg)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"p_alpha[0]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_alpha[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_alpha[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_alpha[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"p_alpha[0]\"),\n vk.shaders.getPipeline(vk.device, \"p_alpha[1]\"),\n vk.shaders.getPipeline(vk.device, \"p_alpha[2]\"),\n vk.shaders.getPipeline(vk.device, \"p_alpha[3]\")\n }};\n this->sampler = vk.resources.getSampler(vk.device);\n for (size_t i = 0; i < 3; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(i));\n for (size_t i = 0; i < 3; i++)\n this->lastDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(3));\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImg.getExtent();\n const VkExtent2D halfExtent = {\n .width = (extent.width + 1) >> 1,\n .height = (extent.height + 1) >> 1\n };\n this->tempImg1 = Core::Image(vk.device, halfExtent);\n this->tempImg2 = Core::Image(vk.device, halfExtent);\n\n const VkExtent2D quarterExtent = {\n .width = (halfExtent.width + 1) >> 1,\n .height = (halfExtent.height + 1) >> 1\n };\n for (size_t i = 0; i < 2; i++) {\n this->tempImgs3.at(i) = Core::Image(vk.device, quarterExtent);\n for (size_t j = 0; j < 3; j++)\n this->outImgs.at(j).at(i) = Core::Image(vk.device, quarterExtent);\n }\n\n // hook up shaders\n this->descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImg1)\n .build();\n this->descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImg1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImg2)\n .build();\n this->descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs3)\n .build();\n for (size_t i = 0; i < 3; i++)\n this->lastDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs3)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs.at(i))\n .build();\n}\n\nvoid Alpha::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto halfExtent = this->tempImg1.getExtent();\n uint32_t threadsX = (halfExtent.width + 7) >> 3;\n uint32_t threadsY = (halfExtent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImg)\n .addR2W(this->tempImg1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n this->descriptorSets.at(0).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImg1)\n .addR2W(this->tempImg2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n this->descriptorSets.at(1).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third pass\n const auto quarterExtent = this->tempImgs3.at(0).getExtent();\n threadsX = (quarterExtent.width + 7) >> 3;\n threadsY = (quarterExtent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImg2)\n .addR2W(this->tempImgs3)\n .build();\n\n this->pipelines.at(2).bind(buf);\n this->descriptorSets.at(2).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs3)\n .addR2W(this->outImgs.at(frameCount % 3))\n .build();\n\n this->pipelines.at(3).bind(buf);\n this->lastDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/spirv/thirdparty/GLSL.std.450.h", "/*\n** Copyright (c) 2014-2024 The Khronos Group Inc.\n**\n** Permission is hereby granted, free of charge, to any person obtaining a copy\n** of this software and/or associated documentation files (the \"Materials\"),\n** to deal in the Materials without restriction, including without limitation\n** the rights to use, copy, modify, merge, publish, distribute, sublicense,\n** and/or sell copies of the Materials, and to permit persons to whom the\n** Materials are furnished to do so, subject to the following conditions:\n**\n** The above copyright notice and this permission notice shall be included in\n** all copies or substantial portions of the Materials.\n**\n** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS\n** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND\n** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ \n**\n** THE MATERIALS ARE PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS\n** IN THE MATERIALS.\n*/\n\n#ifndef GLSLstd450_H\n#define GLSLstd450_H\n\nstatic const int GLSLstd450Version = 100;\nstatic const int GLSLstd450Revision = 3;\n\nenum GLSLstd450 {\n GLSLstd450Bad = 0, // Don't use\n\n GLSLstd450Round = 1,\n GLSLstd450RoundEven = 2,\n GLSLstd450Trunc = 3,\n GLSLstd450FAbs = 4,\n GLSLstd450SAbs = 5,\n GLSLstd450FSign = 6,\n GLSLstd450SSign = 7,\n GLSLstd450Floor = 8,\n GLSLstd450Ceil = 9,\n GLSLstd450Fract = 10,\n\n GLSLstd450Radians = 11,\n GLSLstd450Degrees = 12,\n GLSLstd450Sin = 13,\n GLSLstd450Cos = 14,\n GLSLstd450Tan = 15,\n GLSLstd450Asin = 16,\n GLSLstd450Acos = 17,\n GLSLstd450Atan = 18,\n GLSLstd450Sinh = 19,\n GLSLstd450Cosh = 20,\n GLSLstd450Tanh = 21,\n GLSLstd450Asinh = 22,\n GLSLstd450Acosh = 23,\n GLSLstd450Atanh = 24,\n GLSLstd450Atan2 = 25,\n\n GLSLstd450Pow = 26,\n GLSLstd450Exp = 27,\n GLSLstd450Log = 28,\n GLSLstd450Exp2 = 29,\n GLSLstd450Log2 = 30,\n GLSLstd450Sqrt = 31,\n GLSLstd450InverseSqrt = 32,\n\n GLSLstd450Determinant = 33,\n GLSLstd450MatrixInverse = 34,\n\n GLSLstd450Modf = 35, // second operand needs an OpVariable to write to\n GLSLstd450ModfStruct = 36, // no OpVariable operand\n GLSLstd450FMin = 37,\n GLSLstd450UMin = 38,\n GLSLstd450SMin = 39,\n GLSLstd450FMax = 40,\n GLSLstd450UMax = 41,\n GLSLstd450SMax = 42,\n GLSLstd450FClamp = 43,\n GLSLstd450UClamp = 44,\n GLSLstd450SClamp = 45,\n GLSLstd450FMix = 46,\n GLSLstd450IMix = 47, // Reserved\n GLSLstd450Step = 48,\n GLSLstd450SmoothStep = 49,\n\n GLSLstd450Fma = 50,\n GLSLstd450Frexp = 51, // second operand needs an OpVariable to write to\n GLSLstd450FrexpStruct = 52, // no OpVariable operand\n GLSLstd450Ldexp = 53,\n\n GLSLstd450PackSnorm4x8 = 54,\n GLSLstd450PackUnorm4x8 = 55,\n GLSLstd450PackSnorm2x16 = 56,\n GLSLstd450PackUnorm2x16 = 57,\n GLSLstd450PackHalf2x16 = 58,\n GLSLstd450PackDouble2x32 = 59,\n GLSLstd450UnpackSnorm2x16 = 60,\n GLSLstd450UnpackUnorm2x16 = 61,\n GLSLstd450UnpackHalf2x16 = 62,\n GLSLstd450UnpackSnorm4x8 = 63,\n GLSLstd450UnpackUnorm4x8 = 64,\n GLSLstd450UnpackDouble2x32 = 65,\n\n GLSLstd450Length = 66,\n GLSLstd450Distance = 67,\n GLSLstd450Cross = 68,\n GLSLstd450Normalize = 69,\n GLSLstd450FaceForward = 70,\n GLSLstd450Reflect = 71,\n GLSLstd450Refract = 72,\n\n GLSLstd450FindILsb = 73,\n GLSLstd450FindSMsb = 74,\n GLSLstd450FindUMsb = 75,\n\n GLSLstd450InterpolateAtCentroid = 76,\n GLSLstd450InterpolateAtSample = 77,\n GLSLstd450InterpolateAtOffset = 78,\n\n GLSLstd450NMin = 79,\n GLSLstd450NMax = 80,\n GLSLstd450NClamp = 81,\n\n GLSLstd450Count\n};\n\n#endif // #ifndef GLSLstd450_H\n"], ["/lsfg-vk/framegen/src/core/descriptorpool.cpp", "#include \n#include \n\n#include \"core/descriptorpool.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nDescriptorPool::DescriptorPool(const Core::Device& device) {\n // create descriptor pool\n const std::array pools{{ // arbitrary limits\n { .type = VK_DESCRIPTOR_TYPE_SAMPLER, .descriptorCount = 4096 },\n { .type = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, .descriptorCount = 4096 },\n { .type = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, .descriptorCount = 4096 },\n { .type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, .descriptorCount = 4096 }\n }};\n const VkDescriptorPoolCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,\n .flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,\n .maxSets = 16384,\n .poolSizeCount = static_cast(pools.size()),\n .pPoolSizes = pools.data()\n };\n VkDescriptorPool poolHandle{};\n auto res = vkCreateDescriptorPool(device.handle(), &desc, nullptr, &poolHandle);\n if (res != VK_SUCCESS || poolHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create descriptor pool\");\n\n // store pool in shared ptr\n this->descriptorPool = std::shared_ptr(\n new VkDescriptorPool(poolHandle),\n [dev = device.handle()](VkDescriptorPool* poolHandle) {\n vkDestroyDescriptorPool(dev, *poolHandle, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxvk/dxvk_hash.h", "#pragma once\n\n#include \n\nnamespace dxvk {\n\n struct DxvkEq {\n template\n size_t operator () (const T& a, const T& b) const {\n return a.eq(b);\n }\n };\n\n struct DxvkHash {\n template\n size_t operator () (const T& object) const {\n return object.hash();\n }\n };\n\n class DxvkHashState {\n\n public:\n\n void add(size_t hash) {\n m_value ^= hash + 0x9e3779b9\n + (m_value << 6)\n + (m_value >> 2);\n }\n\n operator size_t () const {\n return m_value;\n }\n\n private:\n\n size_t m_value = 0;\n\n };\n\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_math.h", "#pragma once\n\n#include \n\nnamespace dxvk {\n \n constexpr size_t CACHE_LINE_SIZE = 64;\n constexpr double pi = 3.14159265359;\n\n template\n constexpr T clamp(T n, T lo, T hi) {\n if (n < lo) return lo;\n if (n > hi) return hi;\n return n;\n }\n \n template\n constexpr T align(T what, U to) {\n return (what + to - 1) & ~(to - 1);\n }\n\n template\n constexpr T alignDown(T what, U to) {\n return (what / to) * to;\n }\n\n // Equivalent of std::clamp for use with floating point numbers\n // Handles (-){INFINITY,NAN} cases.\n // Will return min in cases of NAN, etc.\n inline float fclamp(float value, float min, float max) {\n return std::fmin(\n std::fmax(value, min), max);\n }\n\n template\n inline T divCeil(T dividend, T divisor) {\n return (dividend + divisor - 1) / divisor;\n }\n \n}\n"], ["/lsfg-vk/thirdparty/volk/volk.h", "/**\n * volk\n *\n * Copyright (C) 2018-2025, by Arseny Kapoulkine (arseny.kapoulkine@gmail.com)\n * Report bugs and download new versions at https://github.com/zeux/volk\n *\n * This library is distributed under the MIT License. See notice at the end of this file.\n */\n/* clang-format off */\n#ifndef VOLK_H_\n#define VOLK_H_\n\n#if defined(VULKAN_H_) && !defined(VK_NO_PROTOTYPES)\n#\terror To use volk, you need to define VK_NO_PROTOTYPES before including vulkan.h\n#endif\n\n/* VOLK_GENERATE_VERSION_DEFINE */\n#define VOLK_HEADER_VERSION 323\n/* VOLK_GENERATE_VERSION_DEFINE */\n\n#ifndef VK_NO_PROTOTYPES\n#\tdefine VK_NO_PROTOTYPES\n#endif\n\n#ifndef VULKAN_H_\n#\tifdef VOLK_VULKAN_H_PATH\n#\t\tinclude VOLK_VULKAN_H_PATH\n#\telse /* Platform headers included below */\n#\t\tinclude \n#\t\tinclude \n#\tendif\n#endif\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\nstruct VolkDeviceTable;\n\n/**\n * Initialize library by loading Vulkan loader; call this function before creating the Vulkan instance.\n *\n * Returns VK_SUCCESS on success and VK_ERROR_INITIALIZATION_FAILED otherwise.\n */\nVkResult volkInitialize(void);\n\n/**\n * Initialize library by providing a custom handler to load global symbols.\n *\n * This function can be used instead of volkInitialize.\n * The handler function pointer will be asked to load global Vulkan symbols which require no instance\n * (such as vkCreateInstance, vkEnumerateInstance* and vkEnumerateInstanceVersion if available).\n */\nvoid volkInitializeCustom(PFN_vkGetInstanceProcAddr handler);\n\n/**\n * Finalize library by unloading Vulkan loader and resetting global symbols to NULL.\n *\n * This function does not need to be called on process exit (as loader will be unloaded automatically) or if volkInitialize failed.\n * In general this function is optional to call but may be useful in rare cases eg if volk needs to be reinitialized multiple times.\n */\nvoid volkFinalize(void);\n\n/**\n * Get Vulkan instance version supported by the Vulkan loader, or 0 if Vulkan isn't supported\n *\n * Returns 0 if volkInitialize wasn't called or failed.\n */\nuint32_t volkGetInstanceVersion(void);\n\n/**\n * Load global function pointers using application-created VkInstance; call this function after creating the Vulkan instance.\n */\nvoid volkLoadInstance(VkInstance instance);\n\n/**\n * Load global function pointers using application-created VkInstance; call this function after creating the Vulkan instance.\n * Skips loading device-based function pointers, requires usage of volkLoadDevice afterwards.\n */\nvoid volkLoadInstanceOnly(VkInstance instance);\n\n/**\n * Load global function pointers using application-created VkDevice; call this function after creating the Vulkan device.\n *\n * Note: this is not suitable for applications that want to use multiple VkDevice objects concurrently.\n */\nvoid volkLoadDevice(VkDevice device);\n\n/**\n * Return last VkInstance for which global function pointers have been loaded via volkLoadInstance(),\n * or VK_NULL_HANDLE if volkLoadInstance() has not been called.\n */\nVkInstance volkGetLoadedInstance(void);\n\n/**\n * Return last VkDevice for which global function pointers have been loaded via volkLoadDevice(),\n * or VK_NULL_HANDLE if volkLoadDevice() has not been called.\n */\nVkDevice volkGetLoadedDevice(void);\n\n/**\n * Load function pointers using application-created VkDevice into a table.\n * Application should use function pointers from that table instead of using global function pointers.\n */\nvoid volkLoadDeviceTable(struct VolkDeviceTable* table, VkDevice device);\n\n#ifdef __cplusplus\n}\n#endif\n\n/* Instead of directly including vulkan.h, we include platform-specific parts of the SDK manually\n * This is necessary to avoid including platform headers in some cases (which vulkan.h does unconditionally)\n * and replace them with forward declarations, which makes build times faster and avoids macro conflicts.\n *\n * Note that we only replace platform-specific headers when the headers are known to be problematic: very large\n * or slow to compile (Windows), or introducing unprefixed macros which can cause conflicts (Windows, Xlib).\n */\n#if !defined(VULKAN_H_) && !defined(VOLK_VULKAN_H_PATH)\n\n#ifdef VK_USE_PLATFORM_ANDROID_KHR\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_FUCHSIA\n#include \n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_IOS_MVK\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_MACOS_MVK\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_METAL_EXT\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_VI_NN\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_WAYLAND_KHR\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_WIN32_KHR\ntypedef unsigned long DWORD;\ntypedef const wchar_t* LPCWSTR;\ntypedef void* HANDLE;\ntypedef struct HINSTANCE__* HINSTANCE;\ntypedef struct HWND__* HWND;\ntypedef struct HMONITOR__* HMONITOR;\ntypedef struct _SECURITY_ATTRIBUTES SECURITY_ATTRIBUTES;\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_XCB_KHR\n#include \n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_XLIB_KHR\ntypedef struct _XDisplay Display;\ntypedef unsigned long Window;\ntypedef unsigned long VisualID;\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_DIRECTFB_EXT\n#include \n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT\ntypedef struct _XDisplay Display;\ntypedef unsigned long RROutput;\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_GGP\n#include \n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_SCREEN_QNX\n#include \n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_SCI\n#include \n#include \n#include \n#endif\n\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n#include \n#endif\n\n#endif\n\n/**\n * Device-specific function pointer table\n */\nstruct VolkDeviceTable\n{\n\t/* VOLK_GENERATE_DEVICE_TABLE */\n#if defined(VK_VERSION_1_0)\n\tPFN_vkAllocateCommandBuffers vkAllocateCommandBuffers;\n\tPFN_vkAllocateDescriptorSets vkAllocateDescriptorSets;\n\tPFN_vkAllocateMemory vkAllocateMemory;\n\tPFN_vkBeginCommandBuffer vkBeginCommandBuffer;\n\tPFN_vkBindBufferMemory vkBindBufferMemory;\n\tPFN_vkBindImageMemory vkBindImageMemory;\n\tPFN_vkCmdBeginQuery vkCmdBeginQuery;\n\tPFN_vkCmdBeginRenderPass vkCmdBeginRenderPass;\n\tPFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets;\n\tPFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer;\n\tPFN_vkCmdBindPipeline vkCmdBindPipeline;\n\tPFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers;\n\tPFN_vkCmdBlitImage vkCmdBlitImage;\n\tPFN_vkCmdClearAttachments vkCmdClearAttachments;\n\tPFN_vkCmdClearColorImage vkCmdClearColorImage;\n\tPFN_vkCmdClearDepthStencilImage vkCmdClearDepthStencilImage;\n\tPFN_vkCmdCopyBuffer vkCmdCopyBuffer;\n\tPFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage;\n\tPFN_vkCmdCopyImage vkCmdCopyImage;\n\tPFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer;\n\tPFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults;\n\tPFN_vkCmdDispatch vkCmdDispatch;\n\tPFN_vkCmdDispatchIndirect vkCmdDispatchIndirect;\n\tPFN_vkCmdDraw vkCmdDraw;\n\tPFN_vkCmdDrawIndexed vkCmdDrawIndexed;\n\tPFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect;\n\tPFN_vkCmdDrawIndirect vkCmdDrawIndirect;\n\tPFN_vkCmdEndQuery vkCmdEndQuery;\n\tPFN_vkCmdEndRenderPass vkCmdEndRenderPass;\n\tPFN_vkCmdExecuteCommands vkCmdExecuteCommands;\n\tPFN_vkCmdFillBuffer vkCmdFillBuffer;\n\tPFN_vkCmdNextSubpass vkCmdNextSubpass;\n\tPFN_vkCmdPipelineBarrier vkCmdPipelineBarrier;\n\tPFN_vkCmdPushConstants vkCmdPushConstants;\n\tPFN_vkCmdResetEvent vkCmdResetEvent;\n\tPFN_vkCmdResetQueryPool vkCmdResetQueryPool;\n\tPFN_vkCmdResolveImage vkCmdResolveImage;\n\tPFN_vkCmdSetBlendConstants vkCmdSetBlendConstants;\n\tPFN_vkCmdSetDepthBias vkCmdSetDepthBias;\n\tPFN_vkCmdSetDepthBounds vkCmdSetDepthBounds;\n\tPFN_vkCmdSetEvent vkCmdSetEvent;\n\tPFN_vkCmdSetLineWidth vkCmdSetLineWidth;\n\tPFN_vkCmdSetScissor vkCmdSetScissor;\n\tPFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask;\n\tPFN_vkCmdSetStencilReference vkCmdSetStencilReference;\n\tPFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask;\n\tPFN_vkCmdSetViewport vkCmdSetViewport;\n\tPFN_vkCmdUpdateBuffer vkCmdUpdateBuffer;\n\tPFN_vkCmdWaitEvents vkCmdWaitEvents;\n\tPFN_vkCmdWriteTimestamp vkCmdWriteTimestamp;\n\tPFN_vkCreateBuffer vkCreateBuffer;\n\tPFN_vkCreateBufferView vkCreateBufferView;\n\tPFN_vkCreateCommandPool vkCreateCommandPool;\n\tPFN_vkCreateComputePipelines vkCreateComputePipelines;\n\tPFN_vkCreateDescriptorPool vkCreateDescriptorPool;\n\tPFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout;\n\tPFN_vkCreateEvent vkCreateEvent;\n\tPFN_vkCreateFence vkCreateFence;\n\tPFN_vkCreateFramebuffer vkCreateFramebuffer;\n\tPFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines;\n\tPFN_vkCreateImage vkCreateImage;\n\tPFN_vkCreateImageView vkCreateImageView;\n\tPFN_vkCreatePipelineCache vkCreatePipelineCache;\n\tPFN_vkCreatePipelineLayout vkCreatePipelineLayout;\n\tPFN_vkCreateQueryPool vkCreateQueryPool;\n\tPFN_vkCreateRenderPass vkCreateRenderPass;\n\tPFN_vkCreateSampler vkCreateSampler;\n\tPFN_vkCreateSemaphore vkCreateSemaphore;\n\tPFN_vkCreateShaderModule vkCreateShaderModule;\n\tPFN_vkDestroyBuffer vkDestroyBuffer;\n\tPFN_vkDestroyBufferView vkDestroyBufferView;\n\tPFN_vkDestroyCommandPool vkDestroyCommandPool;\n\tPFN_vkDestroyDescriptorPool vkDestroyDescriptorPool;\n\tPFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout;\n\tPFN_vkDestroyDevice vkDestroyDevice;\n\tPFN_vkDestroyEvent vkDestroyEvent;\n\tPFN_vkDestroyFence vkDestroyFence;\n\tPFN_vkDestroyFramebuffer vkDestroyFramebuffer;\n\tPFN_vkDestroyImage vkDestroyImage;\n\tPFN_vkDestroyImageView vkDestroyImageView;\n\tPFN_vkDestroyPipeline vkDestroyPipeline;\n\tPFN_vkDestroyPipelineCache vkDestroyPipelineCache;\n\tPFN_vkDestroyPipelineLayout vkDestroyPipelineLayout;\n\tPFN_vkDestroyQueryPool vkDestroyQueryPool;\n\tPFN_vkDestroyRenderPass vkDestroyRenderPass;\n\tPFN_vkDestroySampler vkDestroySampler;\n\tPFN_vkDestroySemaphore vkDestroySemaphore;\n\tPFN_vkDestroyShaderModule vkDestroyShaderModule;\n\tPFN_vkDeviceWaitIdle vkDeviceWaitIdle;\n\tPFN_vkEndCommandBuffer vkEndCommandBuffer;\n\tPFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;\n\tPFN_vkFreeCommandBuffers vkFreeCommandBuffers;\n\tPFN_vkFreeDescriptorSets vkFreeDescriptorSets;\n\tPFN_vkFreeMemory vkFreeMemory;\n\tPFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;\n\tPFN_vkGetDeviceMemoryCommitment vkGetDeviceMemoryCommitment;\n\tPFN_vkGetDeviceQueue vkGetDeviceQueue;\n\tPFN_vkGetEventStatus vkGetEventStatus;\n\tPFN_vkGetFenceStatus vkGetFenceStatus;\n\tPFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;\n\tPFN_vkGetImageSparseMemoryRequirements vkGetImageSparseMemoryRequirements;\n\tPFN_vkGetImageSubresourceLayout vkGetImageSubresourceLayout;\n\tPFN_vkGetPipelineCacheData vkGetPipelineCacheData;\n\tPFN_vkGetQueryPoolResults vkGetQueryPoolResults;\n\tPFN_vkGetRenderAreaGranularity vkGetRenderAreaGranularity;\n\tPFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;\n\tPFN_vkMapMemory vkMapMemory;\n\tPFN_vkMergePipelineCaches vkMergePipelineCaches;\n\tPFN_vkQueueBindSparse vkQueueBindSparse;\n\tPFN_vkQueueSubmit vkQueueSubmit;\n\tPFN_vkQueueWaitIdle vkQueueWaitIdle;\n\tPFN_vkResetCommandBuffer vkResetCommandBuffer;\n\tPFN_vkResetCommandPool vkResetCommandPool;\n\tPFN_vkResetDescriptorPool vkResetDescriptorPool;\n\tPFN_vkResetEvent vkResetEvent;\n\tPFN_vkResetFences vkResetFences;\n\tPFN_vkSetEvent vkSetEvent;\n\tPFN_vkUnmapMemory vkUnmapMemory;\n\tPFN_vkUpdateDescriptorSets vkUpdateDescriptorSets;\n\tPFN_vkWaitForFences vkWaitForFences;\n#else\n\tPFN_vkVoidFunction padding_6ce80d51[120];\n#endif /* defined(VK_VERSION_1_0) */\n#if defined(VK_VERSION_1_1)\n\tPFN_vkBindBufferMemory2 vkBindBufferMemory2;\n\tPFN_vkBindImageMemory2 vkBindImageMemory2;\n\tPFN_vkCmdDispatchBase vkCmdDispatchBase;\n\tPFN_vkCmdSetDeviceMask vkCmdSetDeviceMask;\n\tPFN_vkCreateDescriptorUpdateTemplate vkCreateDescriptorUpdateTemplate;\n\tPFN_vkCreateSamplerYcbcrConversion vkCreateSamplerYcbcrConversion;\n\tPFN_vkDestroyDescriptorUpdateTemplate vkDestroyDescriptorUpdateTemplate;\n\tPFN_vkDestroySamplerYcbcrConversion vkDestroySamplerYcbcrConversion;\n\tPFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;\n\tPFN_vkGetDescriptorSetLayoutSupport vkGetDescriptorSetLayoutSupport;\n\tPFN_vkGetDeviceGroupPeerMemoryFeatures vkGetDeviceGroupPeerMemoryFeatures;\n\tPFN_vkGetDeviceQueue2 vkGetDeviceQueue2;\n\tPFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;\n\tPFN_vkGetImageSparseMemoryRequirements2 vkGetImageSparseMemoryRequirements2;\n\tPFN_vkTrimCommandPool vkTrimCommandPool;\n\tPFN_vkUpdateDescriptorSetWithTemplate vkUpdateDescriptorSetWithTemplate;\n#else\n\tPFN_vkVoidFunction padding_1ec56847[16];\n#endif /* defined(VK_VERSION_1_1) */\n#if defined(VK_VERSION_1_2)\n\tPFN_vkCmdBeginRenderPass2 vkCmdBeginRenderPass2;\n\tPFN_vkCmdDrawIndexedIndirectCount vkCmdDrawIndexedIndirectCount;\n\tPFN_vkCmdDrawIndirectCount vkCmdDrawIndirectCount;\n\tPFN_vkCmdEndRenderPass2 vkCmdEndRenderPass2;\n\tPFN_vkCmdNextSubpass2 vkCmdNextSubpass2;\n\tPFN_vkCreateRenderPass2 vkCreateRenderPass2;\n\tPFN_vkGetBufferDeviceAddress vkGetBufferDeviceAddress;\n\tPFN_vkGetBufferOpaqueCaptureAddress vkGetBufferOpaqueCaptureAddress;\n\tPFN_vkGetDeviceMemoryOpaqueCaptureAddress vkGetDeviceMemoryOpaqueCaptureAddress;\n\tPFN_vkGetSemaphoreCounterValue vkGetSemaphoreCounterValue;\n\tPFN_vkResetQueryPool vkResetQueryPool;\n\tPFN_vkSignalSemaphore vkSignalSemaphore;\n\tPFN_vkWaitSemaphores vkWaitSemaphores;\n#else\n\tPFN_vkVoidFunction padding_a3e00662[13];\n#endif /* defined(VK_VERSION_1_2) */\n#if defined(VK_VERSION_1_3)\n\tPFN_vkCmdBeginRendering vkCmdBeginRendering;\n\tPFN_vkCmdBindVertexBuffers2 vkCmdBindVertexBuffers2;\n\tPFN_vkCmdBlitImage2 vkCmdBlitImage2;\n\tPFN_vkCmdCopyBuffer2 vkCmdCopyBuffer2;\n\tPFN_vkCmdCopyBufferToImage2 vkCmdCopyBufferToImage2;\n\tPFN_vkCmdCopyImage2 vkCmdCopyImage2;\n\tPFN_vkCmdCopyImageToBuffer2 vkCmdCopyImageToBuffer2;\n\tPFN_vkCmdEndRendering vkCmdEndRendering;\n\tPFN_vkCmdPipelineBarrier2 vkCmdPipelineBarrier2;\n\tPFN_vkCmdResetEvent2 vkCmdResetEvent2;\n\tPFN_vkCmdResolveImage2 vkCmdResolveImage2;\n\tPFN_vkCmdSetCullMode vkCmdSetCullMode;\n\tPFN_vkCmdSetDepthBiasEnable vkCmdSetDepthBiasEnable;\n\tPFN_vkCmdSetDepthBoundsTestEnable vkCmdSetDepthBoundsTestEnable;\n\tPFN_vkCmdSetDepthCompareOp vkCmdSetDepthCompareOp;\n\tPFN_vkCmdSetDepthTestEnable vkCmdSetDepthTestEnable;\n\tPFN_vkCmdSetDepthWriteEnable vkCmdSetDepthWriteEnable;\n\tPFN_vkCmdSetEvent2 vkCmdSetEvent2;\n\tPFN_vkCmdSetFrontFace vkCmdSetFrontFace;\n\tPFN_vkCmdSetPrimitiveRestartEnable vkCmdSetPrimitiveRestartEnable;\n\tPFN_vkCmdSetPrimitiveTopology vkCmdSetPrimitiveTopology;\n\tPFN_vkCmdSetRasterizerDiscardEnable vkCmdSetRasterizerDiscardEnable;\n\tPFN_vkCmdSetScissorWithCount vkCmdSetScissorWithCount;\n\tPFN_vkCmdSetStencilOp vkCmdSetStencilOp;\n\tPFN_vkCmdSetStencilTestEnable vkCmdSetStencilTestEnable;\n\tPFN_vkCmdSetViewportWithCount vkCmdSetViewportWithCount;\n\tPFN_vkCmdWaitEvents2 vkCmdWaitEvents2;\n\tPFN_vkCmdWriteTimestamp2 vkCmdWriteTimestamp2;\n\tPFN_vkCreatePrivateDataSlot vkCreatePrivateDataSlot;\n\tPFN_vkDestroyPrivateDataSlot vkDestroyPrivateDataSlot;\n\tPFN_vkGetDeviceBufferMemoryRequirements vkGetDeviceBufferMemoryRequirements;\n\tPFN_vkGetDeviceImageMemoryRequirements vkGetDeviceImageMemoryRequirements;\n\tPFN_vkGetDeviceImageSparseMemoryRequirements vkGetDeviceImageSparseMemoryRequirements;\n\tPFN_vkGetPrivateData vkGetPrivateData;\n\tPFN_vkQueueSubmit2 vkQueueSubmit2;\n\tPFN_vkSetPrivateData vkSetPrivateData;\n#else\n\tPFN_vkVoidFunction padding_ee798a88[36];\n#endif /* defined(VK_VERSION_1_3) */\n#if defined(VK_VERSION_1_4)\n\tPFN_vkCmdBindDescriptorSets2 vkCmdBindDescriptorSets2;\n\tPFN_vkCmdBindIndexBuffer2 vkCmdBindIndexBuffer2;\n\tPFN_vkCmdPushConstants2 vkCmdPushConstants2;\n\tPFN_vkCmdPushDescriptorSet vkCmdPushDescriptorSet;\n\tPFN_vkCmdPushDescriptorSet2 vkCmdPushDescriptorSet2;\n\tPFN_vkCmdPushDescriptorSetWithTemplate vkCmdPushDescriptorSetWithTemplate;\n\tPFN_vkCmdPushDescriptorSetWithTemplate2 vkCmdPushDescriptorSetWithTemplate2;\n\tPFN_vkCmdSetLineStipple vkCmdSetLineStipple;\n\tPFN_vkCmdSetRenderingAttachmentLocations vkCmdSetRenderingAttachmentLocations;\n\tPFN_vkCmdSetRenderingInputAttachmentIndices vkCmdSetRenderingInputAttachmentIndices;\n\tPFN_vkCopyImageToImage vkCopyImageToImage;\n\tPFN_vkCopyImageToMemory vkCopyImageToMemory;\n\tPFN_vkCopyMemoryToImage vkCopyMemoryToImage;\n\tPFN_vkGetDeviceImageSubresourceLayout vkGetDeviceImageSubresourceLayout;\n\tPFN_vkGetImageSubresourceLayout2 vkGetImageSubresourceLayout2;\n\tPFN_vkGetRenderingAreaGranularity vkGetRenderingAreaGranularity;\n\tPFN_vkMapMemory2 vkMapMemory2;\n\tPFN_vkTransitionImageLayout vkTransitionImageLayout;\n\tPFN_vkUnmapMemory2 vkUnmapMemory2;\n#else\n\tPFN_vkVoidFunction padding_82585fa3[19];\n#endif /* defined(VK_VERSION_1_4) */\n#if defined(VK_AMDX_shader_enqueue)\n\tPFN_vkCmdDispatchGraphAMDX vkCmdDispatchGraphAMDX;\n\tPFN_vkCmdDispatchGraphIndirectAMDX vkCmdDispatchGraphIndirectAMDX;\n\tPFN_vkCmdDispatchGraphIndirectCountAMDX vkCmdDispatchGraphIndirectCountAMDX;\n\tPFN_vkCmdInitializeGraphScratchMemoryAMDX vkCmdInitializeGraphScratchMemoryAMDX;\n\tPFN_vkCreateExecutionGraphPipelinesAMDX vkCreateExecutionGraphPipelinesAMDX;\n\tPFN_vkGetExecutionGraphPipelineNodeIndexAMDX vkGetExecutionGraphPipelineNodeIndexAMDX;\n\tPFN_vkGetExecutionGraphPipelineScratchSizeAMDX vkGetExecutionGraphPipelineScratchSizeAMDX;\n#else\n\tPFN_vkVoidFunction padding_9d3e2bba[7];\n#endif /* defined(VK_AMDX_shader_enqueue) */\n#if defined(VK_AMD_anti_lag)\n\tPFN_vkAntiLagUpdateAMD vkAntiLagUpdateAMD;\n#else\n\tPFN_vkVoidFunction padding_cf792fb4[1];\n#endif /* defined(VK_AMD_anti_lag) */\n#if defined(VK_AMD_buffer_marker)\n\tPFN_vkCmdWriteBufferMarkerAMD vkCmdWriteBufferMarkerAMD;\n#else\n\tPFN_vkVoidFunction padding_7836e92f[1];\n#endif /* defined(VK_AMD_buffer_marker) */\n#if defined(VK_AMD_buffer_marker) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2))\n\tPFN_vkCmdWriteBufferMarker2AMD vkCmdWriteBufferMarker2AMD;\n#else\n\tPFN_vkVoidFunction padding_bbf9b7bb[1];\n#endif /* defined(VK_AMD_buffer_marker) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) */\n#if defined(VK_AMD_display_native_hdr)\n\tPFN_vkSetLocalDimmingAMD vkSetLocalDimmingAMD;\n#else\n\tPFN_vkVoidFunction padding_6b81b2fb[1];\n#endif /* defined(VK_AMD_display_native_hdr) */\n#if defined(VK_AMD_draw_indirect_count)\n\tPFN_vkCmdDrawIndexedIndirectCountAMD vkCmdDrawIndexedIndirectCountAMD;\n\tPFN_vkCmdDrawIndirectCountAMD vkCmdDrawIndirectCountAMD;\n#else\n\tPFN_vkVoidFunction padding_fbfa9964[2];\n#endif /* defined(VK_AMD_draw_indirect_count) */\n#if defined(VK_AMD_shader_info)\n\tPFN_vkGetShaderInfoAMD vkGetShaderInfoAMD;\n#else\n\tPFN_vkVoidFunction padding_bfb754b[1];\n#endif /* defined(VK_AMD_shader_info) */\n#if defined(VK_ANDROID_external_memory_android_hardware_buffer)\n\tPFN_vkGetAndroidHardwareBufferPropertiesANDROID vkGetAndroidHardwareBufferPropertiesANDROID;\n\tPFN_vkGetMemoryAndroidHardwareBufferANDROID vkGetMemoryAndroidHardwareBufferANDROID;\n#else\n\tPFN_vkVoidFunction padding_c67b1beb[2];\n#endif /* defined(VK_ANDROID_external_memory_android_hardware_buffer) */\n#if defined(VK_ARM_data_graph)\n\tPFN_vkBindDataGraphPipelineSessionMemoryARM vkBindDataGraphPipelineSessionMemoryARM;\n\tPFN_vkCmdDispatchDataGraphARM vkCmdDispatchDataGraphARM;\n\tPFN_vkCreateDataGraphPipelineSessionARM vkCreateDataGraphPipelineSessionARM;\n\tPFN_vkCreateDataGraphPipelinesARM vkCreateDataGraphPipelinesARM;\n\tPFN_vkDestroyDataGraphPipelineSessionARM vkDestroyDataGraphPipelineSessionARM;\n\tPFN_vkGetDataGraphPipelineAvailablePropertiesARM vkGetDataGraphPipelineAvailablePropertiesARM;\n\tPFN_vkGetDataGraphPipelinePropertiesARM vkGetDataGraphPipelinePropertiesARM;\n\tPFN_vkGetDataGraphPipelineSessionBindPointRequirementsARM vkGetDataGraphPipelineSessionBindPointRequirementsARM;\n\tPFN_vkGetDataGraphPipelineSessionMemoryRequirementsARM vkGetDataGraphPipelineSessionMemoryRequirementsARM;\n#else\n\tPFN_vkVoidFunction padding_894d85d8[9];\n#endif /* defined(VK_ARM_data_graph) */\n#if defined(VK_ARM_tensors)\n\tPFN_vkBindTensorMemoryARM vkBindTensorMemoryARM;\n\tPFN_vkCmdCopyTensorARM vkCmdCopyTensorARM;\n\tPFN_vkCreateTensorARM vkCreateTensorARM;\n\tPFN_vkCreateTensorViewARM vkCreateTensorViewARM;\n\tPFN_vkDestroyTensorARM vkDestroyTensorARM;\n\tPFN_vkDestroyTensorViewARM vkDestroyTensorViewARM;\n\tPFN_vkGetDeviceTensorMemoryRequirementsARM vkGetDeviceTensorMemoryRequirementsARM;\n\tPFN_vkGetTensorMemoryRequirementsARM vkGetTensorMemoryRequirementsARM;\n#else\n\tPFN_vkVoidFunction padding_df67a729[8];\n#endif /* defined(VK_ARM_tensors) */\n#if defined(VK_ARM_tensors) && defined(VK_EXT_descriptor_buffer)\n\tPFN_vkGetTensorOpaqueCaptureDescriptorDataARM vkGetTensorOpaqueCaptureDescriptorDataARM;\n\tPFN_vkGetTensorViewOpaqueCaptureDescriptorDataARM vkGetTensorViewOpaqueCaptureDescriptorDataARM;\n#else\n\tPFN_vkVoidFunction padding_9483bf7e[2];\n#endif /* defined(VK_ARM_tensors) && defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_EXT_attachment_feedback_loop_dynamic_state)\n\tPFN_vkCmdSetAttachmentFeedbackLoopEnableEXT vkCmdSetAttachmentFeedbackLoopEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_760a41f5[1];\n#endif /* defined(VK_EXT_attachment_feedback_loop_dynamic_state) */\n#if defined(VK_EXT_buffer_device_address)\n\tPFN_vkGetBufferDeviceAddressEXT vkGetBufferDeviceAddressEXT;\n#else\n\tPFN_vkVoidFunction padding_3b69d885[1];\n#endif /* defined(VK_EXT_buffer_device_address) */\n#if defined(VK_EXT_calibrated_timestamps)\n\tPFN_vkGetCalibratedTimestampsEXT vkGetCalibratedTimestampsEXT;\n#else\n\tPFN_vkVoidFunction padding_d0981c89[1];\n#endif /* defined(VK_EXT_calibrated_timestamps) */\n#if defined(VK_EXT_color_write_enable)\n\tPFN_vkCmdSetColorWriteEnableEXT vkCmdSetColorWriteEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_d301ecc3[1];\n#endif /* defined(VK_EXT_color_write_enable) */\n#if defined(VK_EXT_conditional_rendering)\n\tPFN_vkCmdBeginConditionalRenderingEXT vkCmdBeginConditionalRenderingEXT;\n\tPFN_vkCmdEndConditionalRenderingEXT vkCmdEndConditionalRenderingEXT;\n#else\n\tPFN_vkVoidFunction padding_ab532c18[2];\n#endif /* defined(VK_EXT_conditional_rendering) */\n#if defined(VK_EXT_debug_marker)\n\tPFN_vkCmdDebugMarkerBeginEXT vkCmdDebugMarkerBeginEXT;\n\tPFN_vkCmdDebugMarkerEndEXT vkCmdDebugMarkerEndEXT;\n\tPFN_vkCmdDebugMarkerInsertEXT vkCmdDebugMarkerInsertEXT;\n\tPFN_vkDebugMarkerSetObjectNameEXT vkDebugMarkerSetObjectNameEXT;\n\tPFN_vkDebugMarkerSetObjectTagEXT vkDebugMarkerSetObjectTagEXT;\n#else\n\tPFN_vkVoidFunction padding_89986968[5];\n#endif /* defined(VK_EXT_debug_marker) */\n#if defined(VK_EXT_depth_bias_control)\n\tPFN_vkCmdSetDepthBias2EXT vkCmdSetDepthBias2EXT;\n#else\n\tPFN_vkVoidFunction padding_bcddab4d[1];\n#endif /* defined(VK_EXT_depth_bias_control) */\n#if defined(VK_EXT_descriptor_buffer)\n\tPFN_vkCmdBindDescriptorBufferEmbeddedSamplersEXT vkCmdBindDescriptorBufferEmbeddedSamplersEXT;\n\tPFN_vkCmdBindDescriptorBuffersEXT vkCmdBindDescriptorBuffersEXT;\n\tPFN_vkCmdSetDescriptorBufferOffsetsEXT vkCmdSetDescriptorBufferOffsetsEXT;\n\tPFN_vkGetBufferOpaqueCaptureDescriptorDataEXT vkGetBufferOpaqueCaptureDescriptorDataEXT;\n\tPFN_vkGetDescriptorEXT vkGetDescriptorEXT;\n\tPFN_vkGetDescriptorSetLayoutBindingOffsetEXT vkGetDescriptorSetLayoutBindingOffsetEXT;\n\tPFN_vkGetDescriptorSetLayoutSizeEXT vkGetDescriptorSetLayoutSizeEXT;\n\tPFN_vkGetImageOpaqueCaptureDescriptorDataEXT vkGetImageOpaqueCaptureDescriptorDataEXT;\n\tPFN_vkGetImageViewOpaqueCaptureDescriptorDataEXT vkGetImageViewOpaqueCaptureDescriptorDataEXT;\n\tPFN_vkGetSamplerOpaqueCaptureDescriptorDataEXT vkGetSamplerOpaqueCaptureDescriptorDataEXT;\n#else\n\tPFN_vkVoidFunction padding_80aa973c[10];\n#endif /* defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing))\n\tPFN_vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT;\n#else\n\tPFN_vkVoidFunction padding_98d0fb33[1];\n#endif /* defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) */\n#if defined(VK_EXT_device_fault)\n\tPFN_vkGetDeviceFaultInfoEXT vkGetDeviceFaultInfoEXT;\n#else\n\tPFN_vkVoidFunction padding_55095419[1];\n#endif /* defined(VK_EXT_device_fault) */\n#if defined(VK_EXT_device_generated_commands)\n\tPFN_vkCmdExecuteGeneratedCommandsEXT vkCmdExecuteGeneratedCommandsEXT;\n\tPFN_vkCmdPreprocessGeneratedCommandsEXT vkCmdPreprocessGeneratedCommandsEXT;\n\tPFN_vkCreateIndirectCommandsLayoutEXT vkCreateIndirectCommandsLayoutEXT;\n\tPFN_vkCreateIndirectExecutionSetEXT vkCreateIndirectExecutionSetEXT;\n\tPFN_vkDestroyIndirectCommandsLayoutEXT vkDestroyIndirectCommandsLayoutEXT;\n\tPFN_vkDestroyIndirectExecutionSetEXT vkDestroyIndirectExecutionSetEXT;\n\tPFN_vkGetGeneratedCommandsMemoryRequirementsEXT vkGetGeneratedCommandsMemoryRequirementsEXT;\n\tPFN_vkUpdateIndirectExecutionSetPipelineEXT vkUpdateIndirectExecutionSetPipelineEXT;\n\tPFN_vkUpdateIndirectExecutionSetShaderEXT vkUpdateIndirectExecutionSetShaderEXT;\n#else\n\tPFN_vkVoidFunction padding_7ba7ebaa[9];\n#endif /* defined(VK_EXT_device_generated_commands) */\n#if defined(VK_EXT_discard_rectangles)\n\tPFN_vkCmdSetDiscardRectangleEXT vkCmdSetDiscardRectangleEXT;\n#else\n\tPFN_vkVoidFunction padding_d6355c2[1];\n#endif /* defined(VK_EXT_discard_rectangles) */\n#if defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2\n\tPFN_vkCmdSetDiscardRectangleEnableEXT vkCmdSetDiscardRectangleEnableEXT;\n\tPFN_vkCmdSetDiscardRectangleModeEXT vkCmdSetDiscardRectangleModeEXT;\n#else\n\tPFN_vkVoidFunction padding_7bb44f77[2];\n#endif /* defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 */\n#if defined(VK_EXT_display_control)\n\tPFN_vkDisplayPowerControlEXT vkDisplayPowerControlEXT;\n\tPFN_vkGetSwapchainCounterEXT vkGetSwapchainCounterEXT;\n\tPFN_vkRegisterDeviceEventEXT vkRegisterDeviceEventEXT;\n\tPFN_vkRegisterDisplayEventEXT vkRegisterDisplayEventEXT;\n#else\n\tPFN_vkVoidFunction padding_d30dfaaf[4];\n#endif /* defined(VK_EXT_display_control) */\n#if defined(VK_EXT_external_memory_host)\n\tPFN_vkGetMemoryHostPointerPropertiesEXT vkGetMemoryHostPointerPropertiesEXT;\n#else\n\tPFN_vkVoidFunction padding_357656e9[1];\n#endif /* defined(VK_EXT_external_memory_host) */\n#if defined(VK_EXT_external_memory_metal)\n\tPFN_vkGetMemoryMetalHandleEXT vkGetMemoryMetalHandleEXT;\n\tPFN_vkGetMemoryMetalHandlePropertiesEXT vkGetMemoryMetalHandlePropertiesEXT;\n#else\n\tPFN_vkVoidFunction padding_37d43fb[2];\n#endif /* defined(VK_EXT_external_memory_metal) */\n#if defined(VK_EXT_fragment_density_map_offset)\n\tPFN_vkCmdEndRendering2EXT vkCmdEndRendering2EXT;\n#else\n\tPFN_vkVoidFunction padding_9c90cf11[1];\n#endif /* defined(VK_EXT_fragment_density_map_offset) */\n#if defined(VK_EXT_full_screen_exclusive)\n\tPFN_vkAcquireFullScreenExclusiveModeEXT vkAcquireFullScreenExclusiveModeEXT;\n\tPFN_vkReleaseFullScreenExclusiveModeEXT vkReleaseFullScreenExclusiveModeEXT;\n#else\n\tPFN_vkVoidFunction padding_3859df46[2];\n#endif /* defined(VK_EXT_full_screen_exclusive) */\n#if defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1))\n\tPFN_vkGetDeviceGroupSurfacePresentModes2EXT vkGetDeviceGroupSurfacePresentModes2EXT;\n#else\n\tPFN_vkVoidFunction padding_e5b48b5b[1];\n#endif /* defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1)) */\n#if defined(VK_EXT_hdr_metadata)\n\tPFN_vkSetHdrMetadataEXT vkSetHdrMetadataEXT;\n#else\n\tPFN_vkVoidFunction padding_ca6d733c[1];\n#endif /* defined(VK_EXT_hdr_metadata) */\n#if defined(VK_EXT_host_image_copy)\n\tPFN_vkCopyImageToImageEXT vkCopyImageToImageEXT;\n\tPFN_vkCopyImageToMemoryEXT vkCopyImageToMemoryEXT;\n\tPFN_vkCopyMemoryToImageEXT vkCopyMemoryToImageEXT;\n\tPFN_vkTransitionImageLayoutEXT vkTransitionImageLayoutEXT;\n#else\n\tPFN_vkVoidFunction padding_dd6d9b61[4];\n#endif /* defined(VK_EXT_host_image_copy) */\n#if defined(VK_EXT_host_query_reset)\n\tPFN_vkResetQueryPoolEXT vkResetQueryPoolEXT;\n#else\n\tPFN_vkVoidFunction padding_34e58bd3[1];\n#endif /* defined(VK_EXT_host_query_reset) */\n#if defined(VK_EXT_image_drm_format_modifier)\n\tPFN_vkGetImageDrmFormatModifierPropertiesEXT vkGetImageDrmFormatModifierPropertiesEXT;\n#else\n\tPFN_vkVoidFunction padding_eb50dc14[1];\n#endif /* defined(VK_EXT_image_drm_format_modifier) */\n#if defined(VK_EXT_line_rasterization)\n\tPFN_vkCmdSetLineStippleEXT vkCmdSetLineStippleEXT;\n#else\n\tPFN_vkVoidFunction padding_8a212c37[1];\n#endif /* defined(VK_EXT_line_rasterization) */\n#if defined(VK_EXT_mesh_shader)\n\tPFN_vkCmdDrawMeshTasksEXT vkCmdDrawMeshTasksEXT;\n\tPFN_vkCmdDrawMeshTasksIndirectEXT vkCmdDrawMeshTasksIndirectEXT;\n#else\n\tPFN_vkVoidFunction padding_f65e838[2];\n#endif /* defined(VK_EXT_mesh_shader) */\n#if defined(VK_EXT_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2))\n\tPFN_vkCmdDrawMeshTasksIndirectCountEXT vkCmdDrawMeshTasksIndirectCountEXT;\n#else\n\tPFN_vkVoidFunction padding_dcbaac2f[1];\n#endif /* defined(VK_EXT_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2)) */\n#if defined(VK_EXT_metal_objects)\n\tPFN_vkExportMetalObjectsEXT vkExportMetalObjectsEXT;\n#else\n\tPFN_vkVoidFunction padding_df21f735[1];\n#endif /* defined(VK_EXT_metal_objects) */\n#if defined(VK_EXT_multi_draw)\n\tPFN_vkCmdDrawMultiEXT vkCmdDrawMultiEXT;\n\tPFN_vkCmdDrawMultiIndexedEXT vkCmdDrawMultiIndexedEXT;\n#else\n\tPFN_vkVoidFunction padding_ce8b93b6[2];\n#endif /* defined(VK_EXT_multi_draw) */\n#if defined(VK_EXT_opacity_micromap)\n\tPFN_vkBuildMicromapsEXT vkBuildMicromapsEXT;\n\tPFN_vkCmdBuildMicromapsEXT vkCmdBuildMicromapsEXT;\n\tPFN_vkCmdCopyMemoryToMicromapEXT vkCmdCopyMemoryToMicromapEXT;\n\tPFN_vkCmdCopyMicromapEXT vkCmdCopyMicromapEXT;\n\tPFN_vkCmdCopyMicromapToMemoryEXT vkCmdCopyMicromapToMemoryEXT;\n\tPFN_vkCmdWriteMicromapsPropertiesEXT vkCmdWriteMicromapsPropertiesEXT;\n\tPFN_vkCopyMemoryToMicromapEXT vkCopyMemoryToMicromapEXT;\n\tPFN_vkCopyMicromapEXT vkCopyMicromapEXT;\n\tPFN_vkCopyMicromapToMemoryEXT vkCopyMicromapToMemoryEXT;\n\tPFN_vkCreateMicromapEXT vkCreateMicromapEXT;\n\tPFN_vkDestroyMicromapEXT vkDestroyMicromapEXT;\n\tPFN_vkGetDeviceMicromapCompatibilityEXT vkGetDeviceMicromapCompatibilityEXT;\n\tPFN_vkGetMicromapBuildSizesEXT vkGetMicromapBuildSizesEXT;\n\tPFN_vkWriteMicromapsPropertiesEXT vkWriteMicromapsPropertiesEXT;\n#else\n\tPFN_vkVoidFunction padding_fa41e53c[14];\n#endif /* defined(VK_EXT_opacity_micromap) */\n#if defined(VK_EXT_pageable_device_local_memory)\n\tPFN_vkSetDeviceMemoryPriorityEXT vkSetDeviceMemoryPriorityEXT;\n#else\n\tPFN_vkVoidFunction padding_b2d2c2d7[1];\n#endif /* defined(VK_EXT_pageable_device_local_memory) */\n#if defined(VK_EXT_pipeline_properties)\n\tPFN_vkGetPipelinePropertiesEXT vkGetPipelinePropertiesEXT;\n#else\n\tPFN_vkVoidFunction padding_11313020[1];\n#endif /* defined(VK_EXT_pipeline_properties) */\n#if defined(VK_EXT_private_data)\n\tPFN_vkCreatePrivateDataSlotEXT vkCreatePrivateDataSlotEXT;\n\tPFN_vkDestroyPrivateDataSlotEXT vkDestroyPrivateDataSlotEXT;\n\tPFN_vkGetPrivateDataEXT vkGetPrivateDataEXT;\n\tPFN_vkSetPrivateDataEXT vkSetPrivateDataEXT;\n#else\n\tPFN_vkVoidFunction padding_108010f[4];\n#endif /* defined(VK_EXT_private_data) */\n#if defined(VK_EXT_sample_locations)\n\tPFN_vkCmdSetSampleLocationsEXT vkCmdSetSampleLocationsEXT;\n#else\n\tPFN_vkVoidFunction padding_26f9079f[1];\n#endif /* defined(VK_EXT_sample_locations) */\n#if defined(VK_EXT_shader_module_identifier)\n\tPFN_vkGetShaderModuleCreateInfoIdentifierEXT vkGetShaderModuleCreateInfoIdentifierEXT;\n\tPFN_vkGetShaderModuleIdentifierEXT vkGetShaderModuleIdentifierEXT;\n#else\n\tPFN_vkVoidFunction padding_e10c8f86[2];\n#endif /* defined(VK_EXT_shader_module_identifier) */\n#if defined(VK_EXT_shader_object)\n\tPFN_vkCmdBindShadersEXT vkCmdBindShadersEXT;\n\tPFN_vkCreateShadersEXT vkCreateShadersEXT;\n\tPFN_vkDestroyShaderEXT vkDestroyShaderEXT;\n\tPFN_vkGetShaderBinaryDataEXT vkGetShaderBinaryDataEXT;\n#else\n\tPFN_vkVoidFunction padding_374f3e18[4];\n#endif /* defined(VK_EXT_shader_object) */\n#if defined(VK_EXT_swapchain_maintenance1)\n\tPFN_vkReleaseSwapchainImagesEXT vkReleaseSwapchainImagesEXT;\n#else\n\tPFN_vkVoidFunction padding_ea55bf74[1];\n#endif /* defined(VK_EXT_swapchain_maintenance1) */\n#if defined(VK_EXT_transform_feedback)\n\tPFN_vkCmdBeginQueryIndexedEXT vkCmdBeginQueryIndexedEXT;\n\tPFN_vkCmdBeginTransformFeedbackEXT vkCmdBeginTransformFeedbackEXT;\n\tPFN_vkCmdBindTransformFeedbackBuffersEXT vkCmdBindTransformFeedbackBuffersEXT;\n\tPFN_vkCmdDrawIndirectByteCountEXT vkCmdDrawIndirectByteCountEXT;\n\tPFN_vkCmdEndQueryIndexedEXT vkCmdEndQueryIndexedEXT;\n\tPFN_vkCmdEndTransformFeedbackEXT vkCmdEndTransformFeedbackEXT;\n#else\n\tPFN_vkVoidFunction padding_36980658[6];\n#endif /* defined(VK_EXT_transform_feedback) */\n#if defined(VK_EXT_validation_cache)\n\tPFN_vkCreateValidationCacheEXT vkCreateValidationCacheEXT;\n\tPFN_vkDestroyValidationCacheEXT vkDestroyValidationCacheEXT;\n\tPFN_vkGetValidationCacheDataEXT vkGetValidationCacheDataEXT;\n\tPFN_vkMergeValidationCachesEXT vkMergeValidationCachesEXT;\n#else\n\tPFN_vkVoidFunction padding_b4f2df29[4];\n#endif /* defined(VK_EXT_validation_cache) */\n#if defined(VK_FUCHSIA_buffer_collection)\n\tPFN_vkCreateBufferCollectionFUCHSIA vkCreateBufferCollectionFUCHSIA;\n\tPFN_vkDestroyBufferCollectionFUCHSIA vkDestroyBufferCollectionFUCHSIA;\n\tPFN_vkGetBufferCollectionPropertiesFUCHSIA vkGetBufferCollectionPropertiesFUCHSIA;\n\tPFN_vkSetBufferCollectionBufferConstraintsFUCHSIA vkSetBufferCollectionBufferConstraintsFUCHSIA;\n\tPFN_vkSetBufferCollectionImageConstraintsFUCHSIA vkSetBufferCollectionImageConstraintsFUCHSIA;\n#else\n\tPFN_vkVoidFunction padding_8eaa27bc[5];\n#endif /* defined(VK_FUCHSIA_buffer_collection) */\n#if defined(VK_FUCHSIA_external_memory)\n\tPFN_vkGetMemoryZirconHandleFUCHSIA vkGetMemoryZirconHandleFUCHSIA;\n\tPFN_vkGetMemoryZirconHandlePropertiesFUCHSIA vkGetMemoryZirconHandlePropertiesFUCHSIA;\n#else\n\tPFN_vkVoidFunction padding_e3cb8a67[2];\n#endif /* defined(VK_FUCHSIA_external_memory) */\n#if defined(VK_FUCHSIA_external_semaphore)\n\tPFN_vkGetSemaphoreZirconHandleFUCHSIA vkGetSemaphoreZirconHandleFUCHSIA;\n\tPFN_vkImportSemaphoreZirconHandleFUCHSIA vkImportSemaphoreZirconHandleFUCHSIA;\n#else\n\tPFN_vkVoidFunction padding_3df6f656[2];\n#endif /* defined(VK_FUCHSIA_external_semaphore) */\n#if defined(VK_GOOGLE_display_timing)\n\tPFN_vkGetPastPresentationTimingGOOGLE vkGetPastPresentationTimingGOOGLE;\n\tPFN_vkGetRefreshCycleDurationGOOGLE vkGetRefreshCycleDurationGOOGLE;\n#else\n\tPFN_vkVoidFunction padding_2a6f50cd[2];\n#endif /* defined(VK_GOOGLE_display_timing) */\n#if defined(VK_HUAWEI_cluster_culling_shader)\n\tPFN_vkCmdDrawClusterHUAWEI vkCmdDrawClusterHUAWEI;\n\tPFN_vkCmdDrawClusterIndirectHUAWEI vkCmdDrawClusterIndirectHUAWEI;\n#else\n\tPFN_vkVoidFunction padding_75b97be6[2];\n#endif /* defined(VK_HUAWEI_cluster_culling_shader) */\n#if defined(VK_HUAWEI_invocation_mask)\n\tPFN_vkCmdBindInvocationMaskHUAWEI vkCmdBindInvocationMaskHUAWEI;\n#else\n\tPFN_vkVoidFunction padding_c3a4569f[1];\n#endif /* defined(VK_HUAWEI_invocation_mask) */\n#if defined(VK_HUAWEI_subpass_shading) && VK_HUAWEI_SUBPASS_SHADING_SPEC_VERSION >= 2\n\tPFN_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI;\n#else\n\tPFN_vkVoidFunction padding_2e923f32[1];\n#endif /* defined(VK_HUAWEI_subpass_shading) && VK_HUAWEI_SUBPASS_SHADING_SPEC_VERSION >= 2 */\n#if defined(VK_HUAWEI_subpass_shading)\n\tPFN_vkCmdSubpassShadingHUAWEI vkCmdSubpassShadingHUAWEI;\n#else\n\tPFN_vkVoidFunction padding_f766fdf5[1];\n#endif /* defined(VK_HUAWEI_subpass_shading) */\n#if defined(VK_INTEL_performance_query)\n\tPFN_vkAcquirePerformanceConfigurationINTEL vkAcquirePerformanceConfigurationINTEL;\n\tPFN_vkCmdSetPerformanceMarkerINTEL vkCmdSetPerformanceMarkerINTEL;\n\tPFN_vkCmdSetPerformanceOverrideINTEL vkCmdSetPerformanceOverrideINTEL;\n\tPFN_vkCmdSetPerformanceStreamMarkerINTEL vkCmdSetPerformanceStreamMarkerINTEL;\n\tPFN_vkGetPerformanceParameterINTEL vkGetPerformanceParameterINTEL;\n\tPFN_vkInitializePerformanceApiINTEL vkInitializePerformanceApiINTEL;\n\tPFN_vkQueueSetPerformanceConfigurationINTEL vkQueueSetPerformanceConfigurationINTEL;\n\tPFN_vkReleasePerformanceConfigurationINTEL vkReleasePerformanceConfigurationINTEL;\n\tPFN_vkUninitializePerformanceApiINTEL vkUninitializePerformanceApiINTEL;\n#else\n\tPFN_vkVoidFunction padding_495a0a0b[9];\n#endif /* defined(VK_INTEL_performance_query) */\n#if defined(VK_KHR_acceleration_structure)\n\tPFN_vkBuildAccelerationStructuresKHR vkBuildAccelerationStructuresKHR;\n\tPFN_vkCmdBuildAccelerationStructuresIndirectKHR vkCmdBuildAccelerationStructuresIndirectKHR;\n\tPFN_vkCmdBuildAccelerationStructuresKHR vkCmdBuildAccelerationStructuresKHR;\n\tPFN_vkCmdCopyAccelerationStructureKHR vkCmdCopyAccelerationStructureKHR;\n\tPFN_vkCmdCopyAccelerationStructureToMemoryKHR vkCmdCopyAccelerationStructureToMemoryKHR;\n\tPFN_vkCmdCopyMemoryToAccelerationStructureKHR vkCmdCopyMemoryToAccelerationStructureKHR;\n\tPFN_vkCmdWriteAccelerationStructuresPropertiesKHR vkCmdWriteAccelerationStructuresPropertiesKHR;\n\tPFN_vkCopyAccelerationStructureKHR vkCopyAccelerationStructureKHR;\n\tPFN_vkCopyAccelerationStructureToMemoryKHR vkCopyAccelerationStructureToMemoryKHR;\n\tPFN_vkCopyMemoryToAccelerationStructureKHR vkCopyMemoryToAccelerationStructureKHR;\n\tPFN_vkCreateAccelerationStructureKHR vkCreateAccelerationStructureKHR;\n\tPFN_vkDestroyAccelerationStructureKHR vkDestroyAccelerationStructureKHR;\n\tPFN_vkGetAccelerationStructureBuildSizesKHR vkGetAccelerationStructureBuildSizesKHR;\n\tPFN_vkGetAccelerationStructureDeviceAddressKHR vkGetAccelerationStructureDeviceAddressKHR;\n\tPFN_vkGetDeviceAccelerationStructureCompatibilityKHR vkGetDeviceAccelerationStructureCompatibilityKHR;\n\tPFN_vkWriteAccelerationStructuresPropertiesKHR vkWriteAccelerationStructuresPropertiesKHR;\n#else\n\tPFN_vkVoidFunction padding_5a999b78[16];\n#endif /* defined(VK_KHR_acceleration_structure) */\n#if defined(VK_KHR_bind_memory2)\n\tPFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR;\n\tPFN_vkBindImageMemory2KHR vkBindImageMemory2KHR;\n#else\n\tPFN_vkVoidFunction padding_ed8481f5[2];\n#endif /* defined(VK_KHR_bind_memory2) */\n#if defined(VK_KHR_buffer_device_address)\n\tPFN_vkGetBufferDeviceAddressKHR vkGetBufferDeviceAddressKHR;\n\tPFN_vkGetBufferOpaqueCaptureAddressKHR vkGetBufferOpaqueCaptureAddressKHR;\n\tPFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR vkGetDeviceMemoryOpaqueCaptureAddressKHR;\n#else\n\tPFN_vkVoidFunction padding_178fdf81[3];\n#endif /* defined(VK_KHR_buffer_device_address) */\n#if defined(VK_KHR_calibrated_timestamps)\n\tPFN_vkGetCalibratedTimestampsKHR vkGetCalibratedTimestampsKHR;\n#else\n\tPFN_vkVoidFunction padding_8fd6f40d[1];\n#endif /* defined(VK_KHR_calibrated_timestamps) */\n#if defined(VK_KHR_copy_commands2)\n\tPFN_vkCmdBlitImage2KHR vkCmdBlitImage2KHR;\n\tPFN_vkCmdCopyBuffer2KHR vkCmdCopyBuffer2KHR;\n\tPFN_vkCmdCopyBufferToImage2KHR vkCmdCopyBufferToImage2KHR;\n\tPFN_vkCmdCopyImage2KHR vkCmdCopyImage2KHR;\n\tPFN_vkCmdCopyImageToBuffer2KHR vkCmdCopyImageToBuffer2KHR;\n\tPFN_vkCmdResolveImage2KHR vkCmdResolveImage2KHR;\n#else\n\tPFN_vkVoidFunction padding_4c841ff2[6];\n#endif /* defined(VK_KHR_copy_commands2) */\n#if defined(VK_KHR_create_renderpass2)\n\tPFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR;\n\tPFN_vkCmdEndRenderPass2KHR vkCmdEndRenderPass2KHR;\n\tPFN_vkCmdNextSubpass2KHR vkCmdNextSubpass2KHR;\n\tPFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR;\n#else\n\tPFN_vkVoidFunction padding_2a0a8727[4];\n#endif /* defined(VK_KHR_create_renderpass2) */\n#if defined(VK_KHR_deferred_host_operations)\n\tPFN_vkCreateDeferredOperationKHR vkCreateDeferredOperationKHR;\n\tPFN_vkDeferredOperationJoinKHR vkDeferredOperationJoinKHR;\n\tPFN_vkDestroyDeferredOperationKHR vkDestroyDeferredOperationKHR;\n\tPFN_vkGetDeferredOperationMaxConcurrencyKHR vkGetDeferredOperationMaxConcurrencyKHR;\n\tPFN_vkGetDeferredOperationResultKHR vkGetDeferredOperationResultKHR;\n#else\n\tPFN_vkVoidFunction padding_346287bb[5];\n#endif /* defined(VK_KHR_deferred_host_operations) */\n#if defined(VK_KHR_descriptor_update_template)\n\tPFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR;\n\tPFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR;\n\tPFN_vkUpdateDescriptorSetWithTemplateKHR vkUpdateDescriptorSetWithTemplateKHR;\n#else\n\tPFN_vkVoidFunction padding_3d63aec0[3];\n#endif /* defined(VK_KHR_descriptor_update_template) */\n#if defined(VK_KHR_device_group)\n\tPFN_vkCmdDispatchBaseKHR vkCmdDispatchBaseKHR;\n\tPFN_vkCmdSetDeviceMaskKHR vkCmdSetDeviceMaskKHR;\n\tPFN_vkGetDeviceGroupPeerMemoryFeaturesKHR vkGetDeviceGroupPeerMemoryFeaturesKHR;\n#else\n\tPFN_vkVoidFunction padding_5ebe16bd[3];\n#endif /* defined(VK_KHR_device_group) */\n#if defined(VK_KHR_display_swapchain)\n\tPFN_vkCreateSharedSwapchainsKHR vkCreateSharedSwapchainsKHR;\n#else\n\tPFN_vkVoidFunction padding_12099367[1];\n#endif /* defined(VK_KHR_display_swapchain) */\n#if defined(VK_KHR_draw_indirect_count)\n\tPFN_vkCmdDrawIndexedIndirectCountKHR vkCmdDrawIndexedIndirectCountKHR;\n\tPFN_vkCmdDrawIndirectCountKHR vkCmdDrawIndirectCountKHR;\n#else\n\tPFN_vkVoidFunction padding_7b5bc4c1[2];\n#endif /* defined(VK_KHR_draw_indirect_count) */\n#if defined(VK_KHR_dynamic_rendering)\n\tPFN_vkCmdBeginRenderingKHR vkCmdBeginRenderingKHR;\n\tPFN_vkCmdEndRenderingKHR vkCmdEndRenderingKHR;\n#else\n\tPFN_vkVoidFunction padding_b80f75a5[2];\n#endif /* defined(VK_KHR_dynamic_rendering) */\n#if defined(VK_KHR_dynamic_rendering_local_read)\n\tPFN_vkCmdSetRenderingAttachmentLocationsKHR vkCmdSetRenderingAttachmentLocationsKHR;\n\tPFN_vkCmdSetRenderingInputAttachmentIndicesKHR vkCmdSetRenderingInputAttachmentIndicesKHR;\n#else\n\tPFN_vkVoidFunction padding_b1510532[2];\n#endif /* defined(VK_KHR_dynamic_rendering_local_read) */\n#if defined(VK_KHR_external_fence_fd)\n\tPFN_vkGetFenceFdKHR vkGetFenceFdKHR;\n\tPFN_vkImportFenceFdKHR vkImportFenceFdKHR;\n#else\n\tPFN_vkVoidFunction padding_a2c787d5[2];\n#endif /* defined(VK_KHR_external_fence_fd) */\n#if defined(VK_KHR_external_fence_win32)\n\tPFN_vkGetFenceWin32HandleKHR vkGetFenceWin32HandleKHR;\n\tPFN_vkImportFenceWin32HandleKHR vkImportFenceWin32HandleKHR;\n#else\n\tPFN_vkVoidFunction padding_55d8e6a9[2];\n#endif /* defined(VK_KHR_external_fence_win32) */\n#if defined(VK_KHR_external_memory_fd)\n\tPFN_vkGetMemoryFdKHR vkGetMemoryFdKHR;\n\tPFN_vkGetMemoryFdPropertiesKHR vkGetMemoryFdPropertiesKHR;\n#else\n\tPFN_vkVoidFunction padding_982d9e19[2];\n#endif /* defined(VK_KHR_external_memory_fd) */\n#if defined(VK_KHR_external_memory_win32)\n\tPFN_vkGetMemoryWin32HandleKHR vkGetMemoryWin32HandleKHR;\n\tPFN_vkGetMemoryWin32HandlePropertiesKHR vkGetMemoryWin32HandlePropertiesKHR;\n#else\n\tPFN_vkVoidFunction padding_4af9e25a[2];\n#endif /* defined(VK_KHR_external_memory_win32) */\n#if defined(VK_KHR_external_semaphore_fd)\n\tPFN_vkGetSemaphoreFdKHR vkGetSemaphoreFdKHR;\n\tPFN_vkImportSemaphoreFdKHR vkImportSemaphoreFdKHR;\n#else\n\tPFN_vkVoidFunction padding_2237b7cf[2];\n#endif /* defined(VK_KHR_external_semaphore_fd) */\n#if defined(VK_KHR_external_semaphore_win32)\n\tPFN_vkGetSemaphoreWin32HandleKHR vkGetSemaphoreWin32HandleKHR;\n\tPFN_vkImportSemaphoreWin32HandleKHR vkImportSemaphoreWin32HandleKHR;\n#else\n\tPFN_vkVoidFunction padding_c18dea52[2];\n#endif /* defined(VK_KHR_external_semaphore_win32) */\n#if defined(VK_KHR_fragment_shading_rate)\n\tPFN_vkCmdSetFragmentShadingRateKHR vkCmdSetFragmentShadingRateKHR;\n#else\n\tPFN_vkVoidFunction padding_f91b0a90[1];\n#endif /* defined(VK_KHR_fragment_shading_rate) */\n#if defined(VK_KHR_get_memory_requirements2)\n\tPFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;\n\tPFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;\n\tPFN_vkGetImageSparseMemoryRequirements2KHR vkGetImageSparseMemoryRequirements2KHR;\n#else\n\tPFN_vkVoidFunction padding_79d9c5c4[3];\n#endif /* defined(VK_KHR_get_memory_requirements2) */\n#if defined(VK_KHR_line_rasterization)\n\tPFN_vkCmdSetLineStippleKHR vkCmdSetLineStippleKHR;\n#else\n\tPFN_vkVoidFunction padding_83c2939[1];\n#endif /* defined(VK_KHR_line_rasterization) */\n#if defined(VK_KHR_maintenance1)\n\tPFN_vkTrimCommandPoolKHR vkTrimCommandPoolKHR;\n#else\n\tPFN_vkVoidFunction padding_4b372c56[1];\n#endif /* defined(VK_KHR_maintenance1) */\n#if defined(VK_KHR_maintenance3)\n\tPFN_vkGetDescriptorSetLayoutSupportKHR vkGetDescriptorSetLayoutSupportKHR;\n#else\n\tPFN_vkVoidFunction padding_5ea7858d[1];\n#endif /* defined(VK_KHR_maintenance3) */\n#if defined(VK_KHR_maintenance4)\n\tPFN_vkGetDeviceBufferMemoryRequirementsKHR vkGetDeviceBufferMemoryRequirementsKHR;\n\tPFN_vkGetDeviceImageMemoryRequirementsKHR vkGetDeviceImageMemoryRequirementsKHR;\n\tPFN_vkGetDeviceImageSparseMemoryRequirementsKHR vkGetDeviceImageSparseMemoryRequirementsKHR;\n#else\n\tPFN_vkVoidFunction padding_8e2d4198[3];\n#endif /* defined(VK_KHR_maintenance4) */\n#if defined(VK_KHR_maintenance5)\n\tPFN_vkCmdBindIndexBuffer2KHR vkCmdBindIndexBuffer2KHR;\n\tPFN_vkGetDeviceImageSubresourceLayoutKHR vkGetDeviceImageSubresourceLayoutKHR;\n\tPFN_vkGetImageSubresourceLayout2KHR vkGetImageSubresourceLayout2KHR;\n\tPFN_vkGetRenderingAreaGranularityKHR vkGetRenderingAreaGranularityKHR;\n#else\n\tPFN_vkVoidFunction padding_37040339[4];\n#endif /* defined(VK_KHR_maintenance5) */\n#if defined(VK_KHR_maintenance6)\n\tPFN_vkCmdBindDescriptorSets2KHR vkCmdBindDescriptorSets2KHR;\n\tPFN_vkCmdPushConstants2KHR vkCmdPushConstants2KHR;\n#else\n\tPFN_vkVoidFunction padding_442955d8[2];\n#endif /* defined(VK_KHR_maintenance6) */\n#if defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor)\n\tPFN_vkCmdPushDescriptorSet2KHR vkCmdPushDescriptorSet2KHR;\n\tPFN_vkCmdPushDescriptorSetWithTemplate2KHR vkCmdPushDescriptorSetWithTemplate2KHR;\n#else\n\tPFN_vkVoidFunction padding_80e8513f[2];\n#endif /* defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor) */\n#if defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer)\n\tPFN_vkCmdBindDescriptorBufferEmbeddedSamplers2EXT vkCmdBindDescriptorBufferEmbeddedSamplers2EXT;\n\tPFN_vkCmdSetDescriptorBufferOffsets2EXT vkCmdSetDescriptorBufferOffsets2EXT;\n#else\n\tPFN_vkVoidFunction padding_2816b9cd[2];\n#endif /* defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_KHR_map_memory2)\n\tPFN_vkMapMemory2KHR vkMapMemory2KHR;\n\tPFN_vkUnmapMemory2KHR vkUnmapMemory2KHR;\n#else\n\tPFN_vkVoidFunction padding_5a6d8986[2];\n#endif /* defined(VK_KHR_map_memory2) */\n#if defined(VK_KHR_performance_query)\n\tPFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR;\n\tPFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR;\n#else\n\tPFN_vkVoidFunction padding_76f2673b[2];\n#endif /* defined(VK_KHR_performance_query) */\n#if defined(VK_KHR_pipeline_binary)\n\tPFN_vkCreatePipelineBinariesKHR vkCreatePipelineBinariesKHR;\n\tPFN_vkDestroyPipelineBinaryKHR vkDestroyPipelineBinaryKHR;\n\tPFN_vkGetPipelineBinaryDataKHR vkGetPipelineBinaryDataKHR;\n\tPFN_vkGetPipelineKeyKHR vkGetPipelineKeyKHR;\n\tPFN_vkReleaseCapturedPipelineDataKHR vkReleaseCapturedPipelineDataKHR;\n#else\n\tPFN_vkVoidFunction padding_65232810[5];\n#endif /* defined(VK_KHR_pipeline_binary) */\n#if defined(VK_KHR_pipeline_executable_properties)\n\tPFN_vkGetPipelineExecutableInternalRepresentationsKHR vkGetPipelineExecutableInternalRepresentationsKHR;\n\tPFN_vkGetPipelineExecutablePropertiesKHR vkGetPipelineExecutablePropertiesKHR;\n\tPFN_vkGetPipelineExecutableStatisticsKHR vkGetPipelineExecutableStatisticsKHR;\n#else\n\tPFN_vkVoidFunction padding_f7629b1e[3];\n#endif /* defined(VK_KHR_pipeline_executable_properties) */\n#if defined(VK_KHR_present_wait)\n\tPFN_vkWaitForPresentKHR vkWaitForPresentKHR;\n#else\n\tPFN_vkVoidFunction padding_b16cbe03[1];\n#endif /* defined(VK_KHR_present_wait) */\n#if defined(VK_KHR_present_wait2)\n\tPFN_vkWaitForPresent2KHR vkWaitForPresent2KHR;\n#else\n\tPFN_vkVoidFunction padding_7401483a[1];\n#endif /* defined(VK_KHR_present_wait2) */\n#if defined(VK_KHR_push_descriptor)\n\tPFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR;\n#else\n\tPFN_vkVoidFunction padding_8f7712ad[1];\n#endif /* defined(VK_KHR_push_descriptor) */\n#if defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline)\n\tPFN_vkCmdTraceRaysIndirect2KHR vkCmdTraceRaysIndirect2KHR;\n#else\n\tPFN_vkVoidFunction padding_dd5f9b4a[1];\n#endif /* defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) */\n#if defined(VK_KHR_ray_tracing_pipeline)\n\tPFN_vkCmdSetRayTracingPipelineStackSizeKHR vkCmdSetRayTracingPipelineStackSizeKHR;\n\tPFN_vkCmdTraceRaysIndirectKHR vkCmdTraceRaysIndirectKHR;\n\tPFN_vkCmdTraceRaysKHR vkCmdTraceRaysKHR;\n\tPFN_vkCreateRayTracingPipelinesKHR vkCreateRayTracingPipelinesKHR;\n\tPFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR vkGetRayTracingCaptureReplayShaderGroupHandlesKHR;\n\tPFN_vkGetRayTracingShaderGroupHandlesKHR vkGetRayTracingShaderGroupHandlesKHR;\n\tPFN_vkGetRayTracingShaderGroupStackSizeKHR vkGetRayTracingShaderGroupStackSizeKHR;\n#else\n\tPFN_vkVoidFunction padding_af99aedc[7];\n#endif /* defined(VK_KHR_ray_tracing_pipeline) */\n#if defined(VK_KHR_sampler_ycbcr_conversion)\n\tPFN_vkCreateSamplerYcbcrConversionKHR vkCreateSamplerYcbcrConversionKHR;\n\tPFN_vkDestroySamplerYcbcrConversionKHR vkDestroySamplerYcbcrConversionKHR;\n#else\n\tPFN_vkVoidFunction padding_88e61b30[2];\n#endif /* defined(VK_KHR_sampler_ycbcr_conversion) */\n#if defined(VK_KHR_shared_presentable_image)\n\tPFN_vkGetSwapchainStatusKHR vkGetSwapchainStatusKHR;\n#else\n\tPFN_vkVoidFunction padding_1ff3379[1];\n#endif /* defined(VK_KHR_shared_presentable_image) */\n#if defined(VK_KHR_swapchain)\n\tPFN_vkAcquireNextImageKHR vkAcquireNextImageKHR;\n\tPFN_vkCreateSwapchainKHR vkCreateSwapchainKHR;\n\tPFN_vkDestroySwapchainKHR vkDestroySwapchainKHR;\n\tPFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR;\n\tPFN_vkQueuePresentKHR vkQueuePresentKHR;\n#else\n\tPFN_vkVoidFunction padding_a1de893b[5];\n#endif /* defined(VK_KHR_swapchain) */\n#if defined(VK_KHR_swapchain_maintenance1)\n\tPFN_vkReleaseSwapchainImagesKHR vkReleaseSwapchainImagesKHR;\n#else\n\tPFN_vkVoidFunction padding_e032d5c4[1];\n#endif /* defined(VK_KHR_swapchain_maintenance1) */\n#if defined(VK_KHR_synchronization2)\n\tPFN_vkCmdPipelineBarrier2KHR vkCmdPipelineBarrier2KHR;\n\tPFN_vkCmdResetEvent2KHR vkCmdResetEvent2KHR;\n\tPFN_vkCmdSetEvent2KHR vkCmdSetEvent2KHR;\n\tPFN_vkCmdWaitEvents2KHR vkCmdWaitEvents2KHR;\n\tPFN_vkCmdWriteTimestamp2KHR vkCmdWriteTimestamp2KHR;\n\tPFN_vkQueueSubmit2KHR vkQueueSubmit2KHR;\n#else\n\tPFN_vkVoidFunction padding_e85bf128[6];\n#endif /* defined(VK_KHR_synchronization2) */\n#if defined(VK_KHR_timeline_semaphore)\n\tPFN_vkGetSemaphoreCounterValueKHR vkGetSemaphoreCounterValueKHR;\n\tPFN_vkSignalSemaphoreKHR vkSignalSemaphoreKHR;\n\tPFN_vkWaitSemaphoresKHR vkWaitSemaphoresKHR;\n#else\n\tPFN_vkVoidFunction padding_c799d931[3];\n#endif /* defined(VK_KHR_timeline_semaphore) */\n#if defined(VK_KHR_video_decode_queue)\n\tPFN_vkCmdDecodeVideoKHR vkCmdDecodeVideoKHR;\n#else\n\tPFN_vkVoidFunction padding_7a7cc7ad[1];\n#endif /* defined(VK_KHR_video_decode_queue) */\n#if defined(VK_KHR_video_encode_queue)\n\tPFN_vkCmdEncodeVideoKHR vkCmdEncodeVideoKHR;\n\tPFN_vkGetEncodedVideoSessionParametersKHR vkGetEncodedVideoSessionParametersKHR;\n#else\n\tPFN_vkVoidFunction padding_f2997fb4[2];\n#endif /* defined(VK_KHR_video_encode_queue) */\n#if defined(VK_KHR_video_queue)\n\tPFN_vkBindVideoSessionMemoryKHR vkBindVideoSessionMemoryKHR;\n\tPFN_vkCmdBeginVideoCodingKHR vkCmdBeginVideoCodingKHR;\n\tPFN_vkCmdControlVideoCodingKHR vkCmdControlVideoCodingKHR;\n\tPFN_vkCmdEndVideoCodingKHR vkCmdEndVideoCodingKHR;\n\tPFN_vkCreateVideoSessionKHR vkCreateVideoSessionKHR;\n\tPFN_vkCreateVideoSessionParametersKHR vkCreateVideoSessionParametersKHR;\n\tPFN_vkDestroyVideoSessionKHR vkDestroyVideoSessionKHR;\n\tPFN_vkDestroyVideoSessionParametersKHR vkDestroyVideoSessionParametersKHR;\n\tPFN_vkGetVideoSessionMemoryRequirementsKHR vkGetVideoSessionMemoryRequirementsKHR;\n\tPFN_vkUpdateVideoSessionParametersKHR vkUpdateVideoSessionParametersKHR;\n#else\n\tPFN_vkVoidFunction padding_98fb7016[10];\n#endif /* defined(VK_KHR_video_queue) */\n#if defined(VK_NVX_binary_import)\n\tPFN_vkCmdCuLaunchKernelNVX vkCmdCuLaunchKernelNVX;\n\tPFN_vkCreateCuFunctionNVX vkCreateCuFunctionNVX;\n\tPFN_vkCreateCuModuleNVX vkCreateCuModuleNVX;\n\tPFN_vkDestroyCuFunctionNVX vkDestroyCuFunctionNVX;\n\tPFN_vkDestroyCuModuleNVX vkDestroyCuModuleNVX;\n#else\n\tPFN_vkVoidFunction padding_eb54309b[5];\n#endif /* defined(VK_NVX_binary_import) */\n#if defined(VK_NVX_image_view_handle)\n\tPFN_vkGetImageViewHandleNVX vkGetImageViewHandleNVX;\n#else\n\tPFN_vkVoidFunction padding_887f6736[1];\n#endif /* defined(VK_NVX_image_view_handle) */\n#if defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 3\n\tPFN_vkGetImageViewHandle64NVX vkGetImageViewHandle64NVX;\n#else\n\tPFN_vkVoidFunction padding_64ad40e2[1];\n#endif /* defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 3 */\n#if defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 2\n\tPFN_vkGetImageViewAddressNVX vkGetImageViewAddressNVX;\n#else\n\tPFN_vkVoidFunction padding_d290479a[1];\n#endif /* defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 2 */\n#if defined(VK_NV_clip_space_w_scaling)\n\tPFN_vkCmdSetViewportWScalingNV vkCmdSetViewportWScalingNV;\n#else\n\tPFN_vkVoidFunction padding_88d7eb2e[1];\n#endif /* defined(VK_NV_clip_space_w_scaling) */\n#if defined(VK_NV_cluster_acceleration_structure)\n\tPFN_vkCmdBuildClusterAccelerationStructureIndirectNV vkCmdBuildClusterAccelerationStructureIndirectNV;\n\tPFN_vkGetClusterAccelerationStructureBuildSizesNV vkGetClusterAccelerationStructureBuildSizesNV;\n#else\n\tPFN_vkVoidFunction padding_60e35395[2];\n#endif /* defined(VK_NV_cluster_acceleration_structure) */\n#if defined(VK_NV_cooperative_vector)\n\tPFN_vkCmdConvertCooperativeVectorMatrixNV vkCmdConvertCooperativeVectorMatrixNV;\n\tPFN_vkConvertCooperativeVectorMatrixNV vkConvertCooperativeVectorMatrixNV;\n#else\n\tPFN_vkVoidFunction padding_f4a887d0[2];\n#endif /* defined(VK_NV_cooperative_vector) */\n#if defined(VK_NV_copy_memory_indirect)\n\tPFN_vkCmdCopyMemoryIndirectNV vkCmdCopyMemoryIndirectNV;\n\tPFN_vkCmdCopyMemoryToImageIndirectNV vkCmdCopyMemoryToImageIndirectNV;\n#else\n\tPFN_vkVoidFunction padding_9536230e[2];\n#endif /* defined(VK_NV_copy_memory_indirect) */\n#if defined(VK_NV_cuda_kernel_launch)\n\tPFN_vkCmdCudaLaunchKernelNV vkCmdCudaLaunchKernelNV;\n\tPFN_vkCreateCudaFunctionNV vkCreateCudaFunctionNV;\n\tPFN_vkCreateCudaModuleNV vkCreateCudaModuleNV;\n\tPFN_vkDestroyCudaFunctionNV vkDestroyCudaFunctionNV;\n\tPFN_vkDestroyCudaModuleNV vkDestroyCudaModuleNV;\n\tPFN_vkGetCudaModuleCacheNV vkGetCudaModuleCacheNV;\n#else\n\tPFN_vkVoidFunction padding_2eabdf3b[6];\n#endif /* defined(VK_NV_cuda_kernel_launch) */\n#if defined(VK_NV_device_diagnostic_checkpoints)\n\tPFN_vkCmdSetCheckpointNV vkCmdSetCheckpointNV;\n\tPFN_vkGetQueueCheckpointDataNV vkGetQueueCheckpointDataNV;\n#else\n\tPFN_vkVoidFunction padding_adaa5a21[2];\n#endif /* defined(VK_NV_device_diagnostic_checkpoints) */\n#if defined(VK_NV_device_diagnostic_checkpoints) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2))\n\tPFN_vkGetQueueCheckpointData2NV vkGetQueueCheckpointData2NV;\n#else\n\tPFN_vkVoidFunction padding_c776633d[1];\n#endif /* defined(VK_NV_device_diagnostic_checkpoints) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) */\n#if defined(VK_NV_device_generated_commands)\n\tPFN_vkCmdBindPipelineShaderGroupNV vkCmdBindPipelineShaderGroupNV;\n\tPFN_vkCmdExecuteGeneratedCommandsNV vkCmdExecuteGeneratedCommandsNV;\n\tPFN_vkCmdPreprocessGeneratedCommandsNV vkCmdPreprocessGeneratedCommandsNV;\n\tPFN_vkCreateIndirectCommandsLayoutNV vkCreateIndirectCommandsLayoutNV;\n\tPFN_vkDestroyIndirectCommandsLayoutNV vkDestroyIndirectCommandsLayoutNV;\n\tPFN_vkGetGeneratedCommandsMemoryRequirementsNV vkGetGeneratedCommandsMemoryRequirementsNV;\n#else\n\tPFN_vkVoidFunction padding_4c7e4395[6];\n#endif /* defined(VK_NV_device_generated_commands) */\n#if defined(VK_NV_device_generated_commands_compute)\n\tPFN_vkCmdUpdatePipelineIndirectBufferNV vkCmdUpdatePipelineIndirectBufferNV;\n\tPFN_vkGetPipelineIndirectDeviceAddressNV vkGetPipelineIndirectDeviceAddressNV;\n\tPFN_vkGetPipelineIndirectMemoryRequirementsNV vkGetPipelineIndirectMemoryRequirementsNV;\n#else\n\tPFN_vkVoidFunction padding_5195094c[3];\n#endif /* defined(VK_NV_device_generated_commands_compute) */\n#if defined(VK_NV_external_compute_queue)\n\tPFN_vkCreateExternalComputeQueueNV vkCreateExternalComputeQueueNV;\n\tPFN_vkDestroyExternalComputeQueueNV vkDestroyExternalComputeQueueNV;\n\tPFN_vkGetExternalComputeQueueDataNV vkGetExternalComputeQueueDataNV;\n#else\n\tPFN_vkVoidFunction padding_4f947e0b[3];\n#endif /* defined(VK_NV_external_compute_queue) */\n#if defined(VK_NV_external_memory_rdma)\n\tPFN_vkGetMemoryRemoteAddressNV vkGetMemoryRemoteAddressNV;\n#else\n\tPFN_vkVoidFunction padding_920e405[1];\n#endif /* defined(VK_NV_external_memory_rdma) */\n#if defined(VK_NV_external_memory_win32)\n\tPFN_vkGetMemoryWin32HandleNV vkGetMemoryWin32HandleNV;\n#else\n\tPFN_vkVoidFunction padding_c13d6f3a[1];\n#endif /* defined(VK_NV_external_memory_win32) */\n#if defined(VK_NV_fragment_shading_rate_enums)\n\tPFN_vkCmdSetFragmentShadingRateEnumNV vkCmdSetFragmentShadingRateEnumNV;\n#else\n\tPFN_vkVoidFunction padding_4979ca14[1];\n#endif /* defined(VK_NV_fragment_shading_rate_enums) */\n#if defined(VK_NV_low_latency2)\n\tPFN_vkGetLatencyTimingsNV vkGetLatencyTimingsNV;\n\tPFN_vkLatencySleepNV vkLatencySleepNV;\n\tPFN_vkQueueNotifyOutOfBandNV vkQueueNotifyOutOfBandNV;\n\tPFN_vkSetLatencyMarkerNV vkSetLatencyMarkerNV;\n\tPFN_vkSetLatencySleepModeNV vkSetLatencySleepModeNV;\n#else\n\tPFN_vkVoidFunction padding_fabf8b19[5];\n#endif /* defined(VK_NV_low_latency2) */\n#if defined(VK_NV_memory_decompression)\n\tPFN_vkCmdDecompressMemoryIndirectCountNV vkCmdDecompressMemoryIndirectCountNV;\n\tPFN_vkCmdDecompressMemoryNV vkCmdDecompressMemoryNV;\n#else\n\tPFN_vkVoidFunction padding_706009[2];\n#endif /* defined(VK_NV_memory_decompression) */\n#if defined(VK_NV_mesh_shader)\n\tPFN_vkCmdDrawMeshTasksIndirectNV vkCmdDrawMeshTasksIndirectNV;\n\tPFN_vkCmdDrawMeshTasksNV vkCmdDrawMeshTasksNV;\n#else\n\tPFN_vkVoidFunction padding_ac232758[2];\n#endif /* defined(VK_NV_mesh_shader) */\n#if defined(VK_NV_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2))\n\tPFN_vkCmdDrawMeshTasksIndirectCountNV vkCmdDrawMeshTasksIndirectCountNV;\n#else\n\tPFN_vkVoidFunction padding_53495be7[1];\n#endif /* defined(VK_NV_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2)) */\n#if defined(VK_NV_optical_flow)\n\tPFN_vkBindOpticalFlowSessionImageNV vkBindOpticalFlowSessionImageNV;\n\tPFN_vkCmdOpticalFlowExecuteNV vkCmdOpticalFlowExecuteNV;\n\tPFN_vkCreateOpticalFlowSessionNV vkCreateOpticalFlowSessionNV;\n\tPFN_vkDestroyOpticalFlowSessionNV vkDestroyOpticalFlowSessionNV;\n#else\n\tPFN_vkVoidFunction padding_f67571eb[4];\n#endif /* defined(VK_NV_optical_flow) */\n#if defined(VK_NV_partitioned_acceleration_structure)\n\tPFN_vkCmdBuildPartitionedAccelerationStructuresNV vkCmdBuildPartitionedAccelerationStructuresNV;\n\tPFN_vkGetPartitionedAccelerationStructuresBuildSizesNV vkGetPartitionedAccelerationStructuresBuildSizesNV;\n#else\n\tPFN_vkVoidFunction padding_d27c8c6d[2];\n#endif /* defined(VK_NV_partitioned_acceleration_structure) */\n#if defined(VK_NV_ray_tracing)\n\tPFN_vkBindAccelerationStructureMemoryNV vkBindAccelerationStructureMemoryNV;\n\tPFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV;\n\tPFN_vkCmdCopyAccelerationStructureNV vkCmdCopyAccelerationStructureNV;\n\tPFN_vkCmdTraceRaysNV vkCmdTraceRaysNV;\n\tPFN_vkCmdWriteAccelerationStructuresPropertiesNV vkCmdWriteAccelerationStructuresPropertiesNV;\n\tPFN_vkCompileDeferredNV vkCompileDeferredNV;\n\tPFN_vkCreateAccelerationStructureNV vkCreateAccelerationStructureNV;\n\tPFN_vkCreateRayTracingPipelinesNV vkCreateRayTracingPipelinesNV;\n\tPFN_vkDestroyAccelerationStructureNV vkDestroyAccelerationStructureNV;\n\tPFN_vkGetAccelerationStructureHandleNV vkGetAccelerationStructureHandleNV;\n\tPFN_vkGetAccelerationStructureMemoryRequirementsNV vkGetAccelerationStructureMemoryRequirementsNV;\n\tPFN_vkGetRayTracingShaderGroupHandlesNV vkGetRayTracingShaderGroupHandlesNV;\n#else\n\tPFN_vkVoidFunction padding_feefbeac[12];\n#endif /* defined(VK_NV_ray_tracing) */\n#if defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2\n\tPFN_vkCmdSetExclusiveScissorEnableNV vkCmdSetExclusiveScissorEnableNV;\n#else\n\tPFN_vkVoidFunction padding_e3c24f80[1];\n#endif /* defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 */\n#if defined(VK_NV_scissor_exclusive)\n\tPFN_vkCmdSetExclusiveScissorNV vkCmdSetExclusiveScissorNV;\n#else\n\tPFN_vkVoidFunction padding_8e88d86c[1];\n#endif /* defined(VK_NV_scissor_exclusive) */\n#if defined(VK_NV_shading_rate_image)\n\tPFN_vkCmdBindShadingRateImageNV vkCmdBindShadingRateImageNV;\n\tPFN_vkCmdSetCoarseSampleOrderNV vkCmdSetCoarseSampleOrderNV;\n\tPFN_vkCmdSetViewportShadingRatePaletteNV vkCmdSetViewportShadingRatePaletteNV;\n#else\n\tPFN_vkVoidFunction padding_92a0767f[3];\n#endif /* defined(VK_NV_shading_rate_image) */\n#if defined(VK_QCOM_tile_memory_heap)\n\tPFN_vkCmdBindTileMemoryQCOM vkCmdBindTileMemoryQCOM;\n#else\n\tPFN_vkVoidFunction padding_e2d55d04[1];\n#endif /* defined(VK_QCOM_tile_memory_heap) */\n#if defined(VK_QCOM_tile_properties)\n\tPFN_vkGetDynamicRenderingTilePropertiesQCOM vkGetDynamicRenderingTilePropertiesQCOM;\n\tPFN_vkGetFramebufferTilePropertiesQCOM vkGetFramebufferTilePropertiesQCOM;\n#else\n\tPFN_vkVoidFunction padding_be12e32[2];\n#endif /* defined(VK_QCOM_tile_properties) */\n#if defined(VK_QCOM_tile_shading)\n\tPFN_vkCmdBeginPerTileExecutionQCOM vkCmdBeginPerTileExecutionQCOM;\n\tPFN_vkCmdDispatchTileQCOM vkCmdDispatchTileQCOM;\n\tPFN_vkCmdEndPerTileExecutionQCOM vkCmdEndPerTileExecutionQCOM;\n#else\n\tPFN_vkVoidFunction padding_fcd9e1df[3];\n#endif /* defined(VK_QCOM_tile_shading) */\n#if defined(VK_QNX_external_memory_screen_buffer)\n\tPFN_vkGetScreenBufferPropertiesQNX vkGetScreenBufferPropertiesQNX;\n#else\n\tPFN_vkVoidFunction padding_1c27735d[1];\n#endif /* defined(VK_QNX_external_memory_screen_buffer) */\n#if defined(VK_VALVE_descriptor_set_host_mapping)\n\tPFN_vkGetDescriptorSetHostMappingVALVE vkGetDescriptorSetHostMappingVALVE;\n\tPFN_vkGetDescriptorSetLayoutHostMappingInfoVALVE vkGetDescriptorSetLayoutHostMappingInfoVALVE;\n#else\n\tPFN_vkVoidFunction padding_fd71e4c6[2];\n#endif /* defined(VK_VALVE_descriptor_set_host_mapping) */\n#if (defined(VK_EXT_depth_clamp_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clamp_control))\n\tPFN_vkCmdSetDepthClampRangeEXT vkCmdSetDepthClampRangeEXT;\n#else\n\tPFN_vkVoidFunction padding_faa18a61[1];\n#endif /* (defined(VK_EXT_depth_clamp_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clamp_control)) */\n#if (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object))\n\tPFN_vkCmdBindVertexBuffers2EXT vkCmdBindVertexBuffers2EXT;\n\tPFN_vkCmdSetCullModeEXT vkCmdSetCullModeEXT;\n\tPFN_vkCmdSetDepthBoundsTestEnableEXT vkCmdSetDepthBoundsTestEnableEXT;\n\tPFN_vkCmdSetDepthCompareOpEXT vkCmdSetDepthCompareOpEXT;\n\tPFN_vkCmdSetDepthTestEnableEXT vkCmdSetDepthTestEnableEXT;\n\tPFN_vkCmdSetDepthWriteEnableEXT vkCmdSetDepthWriteEnableEXT;\n\tPFN_vkCmdSetFrontFaceEXT vkCmdSetFrontFaceEXT;\n\tPFN_vkCmdSetPrimitiveTopologyEXT vkCmdSetPrimitiveTopologyEXT;\n\tPFN_vkCmdSetScissorWithCountEXT vkCmdSetScissorWithCountEXT;\n\tPFN_vkCmdSetStencilOpEXT vkCmdSetStencilOpEXT;\n\tPFN_vkCmdSetStencilTestEnableEXT vkCmdSetStencilTestEnableEXT;\n\tPFN_vkCmdSetViewportWithCountEXT vkCmdSetViewportWithCountEXT;\n#else\n\tPFN_vkVoidFunction padding_3e8c720f[12];\n#endif /* (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object))\n\tPFN_vkCmdSetDepthBiasEnableEXT vkCmdSetDepthBiasEnableEXT;\n\tPFN_vkCmdSetLogicOpEXT vkCmdSetLogicOpEXT;\n\tPFN_vkCmdSetPatchControlPointsEXT vkCmdSetPatchControlPointsEXT;\n\tPFN_vkCmdSetPrimitiveRestartEnableEXT vkCmdSetPrimitiveRestartEnableEXT;\n\tPFN_vkCmdSetRasterizerDiscardEnableEXT vkCmdSetRasterizerDiscardEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_b93e02a6[5];\n#endif /* (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object))\n\tPFN_vkCmdSetAlphaToCoverageEnableEXT vkCmdSetAlphaToCoverageEnableEXT;\n\tPFN_vkCmdSetAlphaToOneEnableEXT vkCmdSetAlphaToOneEnableEXT;\n\tPFN_vkCmdSetColorBlendEnableEXT vkCmdSetColorBlendEnableEXT;\n\tPFN_vkCmdSetColorBlendEquationEXT vkCmdSetColorBlendEquationEXT;\n\tPFN_vkCmdSetColorWriteMaskEXT vkCmdSetColorWriteMaskEXT;\n\tPFN_vkCmdSetDepthClampEnableEXT vkCmdSetDepthClampEnableEXT;\n\tPFN_vkCmdSetLogicOpEnableEXT vkCmdSetLogicOpEnableEXT;\n\tPFN_vkCmdSetPolygonModeEXT vkCmdSetPolygonModeEXT;\n\tPFN_vkCmdSetRasterizationSamplesEXT vkCmdSetRasterizationSamplesEXT;\n\tPFN_vkCmdSetSampleMaskEXT vkCmdSetSampleMaskEXT;\n#else\n\tPFN_vkVoidFunction padding_ab566e7e[10];\n#endif /* (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object))\n\tPFN_vkCmdSetTessellationDomainOriginEXT vkCmdSetTessellationDomainOriginEXT;\n#else\n\tPFN_vkVoidFunction padding_6730ed0c[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback))\n\tPFN_vkCmdSetRasterizationStreamEXT vkCmdSetRasterizationStreamEXT;\n#else\n\tPFN_vkVoidFunction padding_d3ebb335[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization))\n\tPFN_vkCmdSetConservativeRasterizationModeEXT vkCmdSetConservativeRasterizationModeEXT;\n\tPFN_vkCmdSetExtraPrimitiveOverestimationSizeEXT vkCmdSetExtraPrimitiveOverestimationSizeEXT;\n#else\n\tPFN_vkVoidFunction padding_a21758f4[2];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable))\n\tPFN_vkCmdSetDepthClipEnableEXT vkCmdSetDepthClipEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_a498a838[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations))\n\tPFN_vkCmdSetSampleLocationsEnableEXT vkCmdSetSampleLocationsEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_67db38de[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced))\n\tPFN_vkCmdSetColorBlendAdvancedEXT vkCmdSetColorBlendAdvancedEXT;\n#else\n\tPFN_vkVoidFunction padding_fbea7481[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex))\n\tPFN_vkCmdSetProvokingVertexModeEXT vkCmdSetProvokingVertexModeEXT;\n#else\n\tPFN_vkVoidFunction padding_3a8ec90e[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization))\n\tPFN_vkCmdSetLineRasterizationModeEXT vkCmdSetLineRasterizationModeEXT;\n\tPFN_vkCmdSetLineStippleEnableEXT vkCmdSetLineStippleEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_29cdb756[2];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control))\n\tPFN_vkCmdSetDepthClipNegativeOneToOneEXT vkCmdSetDepthClipNegativeOneToOneEXT;\n#else\n\tPFN_vkVoidFunction padding_815a7240[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling))\n\tPFN_vkCmdSetViewportWScalingEnableNV vkCmdSetViewportWScalingEnableNV;\n#else\n\tPFN_vkVoidFunction padding_d1f00511[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle))\n\tPFN_vkCmdSetViewportSwizzleNV vkCmdSetViewportSwizzleNV;\n#else\n\tPFN_vkVoidFunction padding_7a73d553[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color))\n\tPFN_vkCmdSetCoverageToColorEnableNV vkCmdSetCoverageToColorEnableNV;\n\tPFN_vkCmdSetCoverageToColorLocationNV vkCmdSetCoverageToColorLocationNV;\n#else\n\tPFN_vkVoidFunction padding_6045fb8c[2];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples))\n\tPFN_vkCmdSetCoverageModulationModeNV vkCmdSetCoverageModulationModeNV;\n\tPFN_vkCmdSetCoverageModulationTableEnableNV vkCmdSetCoverageModulationTableEnableNV;\n\tPFN_vkCmdSetCoverageModulationTableNV vkCmdSetCoverageModulationTableNV;\n#else\n\tPFN_vkVoidFunction padding_bdc35c80[3];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image))\n\tPFN_vkCmdSetShadingRateImageEnableNV vkCmdSetShadingRateImageEnableNV;\n#else\n\tPFN_vkVoidFunction padding_9a5cd6e8[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test))\n\tPFN_vkCmdSetRepresentativeFragmentTestEnableNV vkCmdSetRepresentativeFragmentTestEnableNV;\n#else\n\tPFN_vkVoidFunction padding_3ee17e96[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode))\n\tPFN_vkCmdSetCoverageReductionModeNV vkCmdSetCoverageReductionModeNV;\n#else\n\tPFN_vkVoidFunction padding_263d525a[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) */\n#if (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control))\n\tPFN_vkGetImageSubresourceLayout2EXT vkGetImageSubresourceLayout2EXT;\n#else\n\tPFN_vkVoidFunction padding_ecddace1[1];\n#endif /* (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) */\n#if (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state))\n\tPFN_vkCmdSetVertexInputEXT vkCmdSetVertexInputEXT;\n#else\n\tPFN_vkVoidFunction padding_d83e1de1[1];\n#endif /* (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) */\n#if (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template)))\n\tPFN_vkCmdPushDescriptorSetWithTemplateKHR vkCmdPushDescriptorSetWithTemplateKHR;\n#else\n\tPFN_vkVoidFunction padding_60f8358a[1];\n#endif /* (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template))) */\n#if (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1))\n\tPFN_vkGetDeviceGroupPresentCapabilitiesKHR vkGetDeviceGroupPresentCapabilitiesKHR;\n\tPFN_vkGetDeviceGroupSurfacePresentModesKHR vkGetDeviceGroupSurfacePresentModesKHR;\n#else\n\tPFN_vkVoidFunction padding_460290c6[2];\n#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */\n#if (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1))\n\tPFN_vkAcquireNextImage2KHR vkAcquireNextImage2KHR;\n#else\n\tPFN_vkVoidFunction padding_cffc198[1];\n#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */\n\t/* VOLK_GENERATE_DEVICE_TABLE */\n};\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n/* VOLK_GENERATE_PROTOTYPES_H */\n#if defined(VK_VERSION_1_0)\nextern PFN_vkAllocateCommandBuffers vkAllocateCommandBuffers;\nextern PFN_vkAllocateDescriptorSets vkAllocateDescriptorSets;\nextern PFN_vkAllocateMemory vkAllocateMemory;\nextern PFN_vkBeginCommandBuffer vkBeginCommandBuffer;\nextern PFN_vkBindBufferMemory vkBindBufferMemory;\nextern PFN_vkBindImageMemory vkBindImageMemory;\nextern PFN_vkCmdBeginQuery vkCmdBeginQuery;\nextern PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass;\nextern PFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets;\nextern PFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer;\nextern PFN_vkCmdBindPipeline vkCmdBindPipeline;\nextern PFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers;\nextern PFN_vkCmdBlitImage vkCmdBlitImage;\nextern PFN_vkCmdClearAttachments vkCmdClearAttachments;\nextern PFN_vkCmdClearColorImage vkCmdClearColorImage;\nextern PFN_vkCmdClearDepthStencilImage vkCmdClearDepthStencilImage;\nextern PFN_vkCmdCopyBuffer vkCmdCopyBuffer;\nextern PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage;\nextern PFN_vkCmdCopyImage vkCmdCopyImage;\nextern PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer;\nextern PFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults;\nextern PFN_vkCmdDispatch vkCmdDispatch;\nextern PFN_vkCmdDispatchIndirect vkCmdDispatchIndirect;\nextern PFN_vkCmdDraw vkCmdDraw;\nextern PFN_vkCmdDrawIndexed vkCmdDrawIndexed;\nextern PFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect;\nextern PFN_vkCmdDrawIndirect vkCmdDrawIndirect;\nextern PFN_vkCmdEndQuery vkCmdEndQuery;\nextern PFN_vkCmdEndRenderPass vkCmdEndRenderPass;\nextern PFN_vkCmdExecuteCommands vkCmdExecuteCommands;\nextern PFN_vkCmdFillBuffer vkCmdFillBuffer;\nextern PFN_vkCmdNextSubpass vkCmdNextSubpass;\nextern PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier;\nextern PFN_vkCmdPushConstants vkCmdPushConstants;\nextern PFN_vkCmdResetEvent vkCmdResetEvent;\nextern PFN_vkCmdResetQueryPool vkCmdResetQueryPool;\nextern PFN_vkCmdResolveImage vkCmdResolveImage;\nextern PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants;\nextern PFN_vkCmdSetDepthBias vkCmdSetDepthBias;\nextern PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds;\nextern PFN_vkCmdSetEvent vkCmdSetEvent;\nextern PFN_vkCmdSetLineWidth vkCmdSetLineWidth;\nextern PFN_vkCmdSetScissor vkCmdSetScissor;\nextern PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask;\nextern PFN_vkCmdSetStencilReference vkCmdSetStencilReference;\nextern PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask;\nextern PFN_vkCmdSetViewport vkCmdSetViewport;\nextern PFN_vkCmdUpdateBuffer vkCmdUpdateBuffer;\nextern PFN_vkCmdWaitEvents vkCmdWaitEvents;\nextern PFN_vkCmdWriteTimestamp vkCmdWriteTimestamp;\nextern PFN_vkCreateBuffer vkCreateBuffer;\nextern PFN_vkCreateBufferView vkCreateBufferView;\nextern PFN_vkCreateCommandPool vkCreateCommandPool;\nextern PFN_vkCreateComputePipelines vkCreateComputePipelines;\nextern PFN_vkCreateDescriptorPool vkCreateDescriptorPool;\nextern PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout;\nextern PFN_vkCreateDevice vkCreateDevice;\nextern PFN_vkCreateEvent vkCreateEvent;\nextern PFN_vkCreateFence vkCreateFence;\nextern PFN_vkCreateFramebuffer vkCreateFramebuffer;\nextern PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines;\nextern PFN_vkCreateImage vkCreateImage;\nextern PFN_vkCreateImageView vkCreateImageView;\nextern PFN_vkCreateInstance vkCreateInstance;\nextern PFN_vkCreatePipelineCache vkCreatePipelineCache;\nextern PFN_vkCreatePipelineLayout vkCreatePipelineLayout;\nextern PFN_vkCreateQueryPool vkCreateQueryPool;\nextern PFN_vkCreateRenderPass vkCreateRenderPass;\nextern PFN_vkCreateSampler vkCreateSampler;\nextern PFN_vkCreateSemaphore vkCreateSemaphore;\nextern PFN_vkCreateShaderModule vkCreateShaderModule;\nextern PFN_vkDestroyBuffer vkDestroyBuffer;\nextern PFN_vkDestroyBufferView vkDestroyBufferView;\nextern PFN_vkDestroyCommandPool vkDestroyCommandPool;\nextern PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool;\nextern PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout;\nextern PFN_vkDestroyDevice vkDestroyDevice;\nextern PFN_vkDestroyEvent vkDestroyEvent;\nextern PFN_vkDestroyFence vkDestroyFence;\nextern PFN_vkDestroyFramebuffer vkDestroyFramebuffer;\nextern PFN_vkDestroyImage vkDestroyImage;\nextern PFN_vkDestroyImageView vkDestroyImageView;\nextern PFN_vkDestroyInstance vkDestroyInstance;\nextern PFN_vkDestroyPipeline vkDestroyPipeline;\nextern PFN_vkDestroyPipelineCache vkDestroyPipelineCache;\nextern PFN_vkDestroyPipelineLayout vkDestroyPipelineLayout;\nextern PFN_vkDestroyQueryPool vkDestroyQueryPool;\nextern PFN_vkDestroyRenderPass vkDestroyRenderPass;\nextern PFN_vkDestroySampler vkDestroySampler;\nextern PFN_vkDestroySemaphore vkDestroySemaphore;\nextern PFN_vkDestroyShaderModule vkDestroyShaderModule;\nextern PFN_vkDeviceWaitIdle vkDeviceWaitIdle;\nextern PFN_vkEndCommandBuffer vkEndCommandBuffer;\nextern PFN_vkEnumerateDeviceExtensionProperties vkEnumerateDeviceExtensionProperties;\nextern PFN_vkEnumerateDeviceLayerProperties vkEnumerateDeviceLayerProperties;\nextern PFN_vkEnumerateInstanceExtensionProperties vkEnumerateInstanceExtensionProperties;\nextern PFN_vkEnumerateInstanceLayerProperties vkEnumerateInstanceLayerProperties;\nextern PFN_vkEnumeratePhysicalDevices vkEnumeratePhysicalDevices;\nextern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;\nextern PFN_vkFreeCommandBuffers vkFreeCommandBuffers;\nextern PFN_vkFreeDescriptorSets vkFreeDescriptorSets;\nextern PFN_vkFreeMemory vkFreeMemory;\nextern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;\nextern PFN_vkGetDeviceMemoryCommitment vkGetDeviceMemoryCommitment;\nextern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;\nextern PFN_vkGetDeviceQueue vkGetDeviceQueue;\nextern PFN_vkGetEventStatus vkGetEventStatus;\nextern PFN_vkGetFenceStatus vkGetFenceStatus;\nextern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;\nextern PFN_vkGetImageSparseMemoryRequirements vkGetImageSparseMemoryRequirements;\nextern PFN_vkGetImageSubresourceLayout vkGetImageSubresourceLayout;\nextern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;\nextern PFN_vkGetPhysicalDeviceFeatures vkGetPhysicalDeviceFeatures;\nextern PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties;\nextern PFN_vkGetPhysicalDeviceImageFormatProperties vkGetPhysicalDeviceImageFormatProperties;\nextern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;\nextern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;\nextern PFN_vkGetPhysicalDeviceQueueFamilyProperties vkGetPhysicalDeviceQueueFamilyProperties;\nextern PFN_vkGetPhysicalDeviceSparseImageFormatProperties vkGetPhysicalDeviceSparseImageFormatProperties;\nextern PFN_vkGetPipelineCacheData vkGetPipelineCacheData;\nextern PFN_vkGetQueryPoolResults vkGetQueryPoolResults;\nextern PFN_vkGetRenderAreaGranularity vkGetRenderAreaGranularity;\nextern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;\nextern PFN_vkMapMemory vkMapMemory;\nextern PFN_vkMergePipelineCaches vkMergePipelineCaches;\nextern PFN_vkQueueBindSparse vkQueueBindSparse;\nextern PFN_vkQueueSubmit vkQueueSubmit;\nextern PFN_vkQueueWaitIdle vkQueueWaitIdle;\nextern PFN_vkResetCommandBuffer vkResetCommandBuffer;\nextern PFN_vkResetCommandPool vkResetCommandPool;\nextern PFN_vkResetDescriptorPool vkResetDescriptorPool;\nextern PFN_vkResetEvent vkResetEvent;\nextern PFN_vkResetFences vkResetFences;\nextern PFN_vkSetEvent vkSetEvent;\nextern PFN_vkUnmapMemory vkUnmapMemory;\nextern PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets;\nextern PFN_vkWaitForFences vkWaitForFences;\n#endif /* defined(VK_VERSION_1_0) */\n#if defined(VK_VERSION_1_1)\nextern PFN_vkBindBufferMemory2 vkBindBufferMemory2;\nextern PFN_vkBindImageMemory2 vkBindImageMemory2;\nextern PFN_vkCmdDispatchBase vkCmdDispatchBase;\nextern PFN_vkCmdSetDeviceMask vkCmdSetDeviceMask;\nextern PFN_vkCreateDescriptorUpdateTemplate vkCreateDescriptorUpdateTemplate;\nextern PFN_vkCreateSamplerYcbcrConversion vkCreateSamplerYcbcrConversion;\nextern PFN_vkDestroyDescriptorUpdateTemplate vkDestroyDescriptorUpdateTemplate;\nextern PFN_vkDestroySamplerYcbcrConversion vkDestroySamplerYcbcrConversion;\nextern PFN_vkEnumerateInstanceVersion vkEnumerateInstanceVersion;\nextern PFN_vkEnumeratePhysicalDeviceGroups vkEnumeratePhysicalDeviceGroups;\nextern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;\nextern PFN_vkGetDescriptorSetLayoutSupport vkGetDescriptorSetLayoutSupport;\nextern PFN_vkGetDeviceGroupPeerMemoryFeatures vkGetDeviceGroupPeerMemoryFeatures;\nextern PFN_vkGetDeviceQueue2 vkGetDeviceQueue2;\nextern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;\nextern PFN_vkGetImageSparseMemoryRequirements2 vkGetImageSparseMemoryRequirements2;\nextern PFN_vkGetPhysicalDeviceExternalBufferProperties vkGetPhysicalDeviceExternalBufferProperties;\nextern PFN_vkGetPhysicalDeviceExternalFenceProperties vkGetPhysicalDeviceExternalFenceProperties;\nextern PFN_vkGetPhysicalDeviceExternalSemaphoreProperties vkGetPhysicalDeviceExternalSemaphoreProperties;\nextern PFN_vkGetPhysicalDeviceFeatures2 vkGetPhysicalDeviceFeatures2;\nextern PFN_vkGetPhysicalDeviceFormatProperties2 vkGetPhysicalDeviceFormatProperties2;\nextern PFN_vkGetPhysicalDeviceImageFormatProperties2 vkGetPhysicalDeviceImageFormatProperties2;\nextern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2;\nextern PFN_vkGetPhysicalDeviceProperties2 vkGetPhysicalDeviceProperties2;\nextern PFN_vkGetPhysicalDeviceQueueFamilyProperties2 vkGetPhysicalDeviceQueueFamilyProperties2;\nextern PFN_vkGetPhysicalDeviceSparseImageFormatProperties2 vkGetPhysicalDeviceSparseImageFormatProperties2;\nextern PFN_vkTrimCommandPool vkTrimCommandPool;\nextern PFN_vkUpdateDescriptorSetWithTemplate vkUpdateDescriptorSetWithTemplate;\n#endif /* defined(VK_VERSION_1_1) */\n#if defined(VK_VERSION_1_2)\nextern PFN_vkCmdBeginRenderPass2 vkCmdBeginRenderPass2;\nextern PFN_vkCmdDrawIndexedIndirectCount vkCmdDrawIndexedIndirectCount;\nextern PFN_vkCmdDrawIndirectCount vkCmdDrawIndirectCount;\nextern PFN_vkCmdEndRenderPass2 vkCmdEndRenderPass2;\nextern PFN_vkCmdNextSubpass2 vkCmdNextSubpass2;\nextern PFN_vkCreateRenderPass2 vkCreateRenderPass2;\nextern PFN_vkGetBufferDeviceAddress vkGetBufferDeviceAddress;\nextern PFN_vkGetBufferOpaqueCaptureAddress vkGetBufferOpaqueCaptureAddress;\nextern PFN_vkGetDeviceMemoryOpaqueCaptureAddress vkGetDeviceMemoryOpaqueCaptureAddress;\nextern PFN_vkGetSemaphoreCounterValue vkGetSemaphoreCounterValue;\nextern PFN_vkResetQueryPool vkResetQueryPool;\nextern PFN_vkSignalSemaphore vkSignalSemaphore;\nextern PFN_vkWaitSemaphores vkWaitSemaphores;\n#endif /* defined(VK_VERSION_1_2) */\n#if defined(VK_VERSION_1_3)\nextern PFN_vkCmdBeginRendering vkCmdBeginRendering;\nextern PFN_vkCmdBindVertexBuffers2 vkCmdBindVertexBuffers2;\nextern PFN_vkCmdBlitImage2 vkCmdBlitImage2;\nextern PFN_vkCmdCopyBuffer2 vkCmdCopyBuffer2;\nextern PFN_vkCmdCopyBufferToImage2 vkCmdCopyBufferToImage2;\nextern PFN_vkCmdCopyImage2 vkCmdCopyImage2;\nextern PFN_vkCmdCopyImageToBuffer2 vkCmdCopyImageToBuffer2;\nextern PFN_vkCmdEndRendering vkCmdEndRendering;\nextern PFN_vkCmdPipelineBarrier2 vkCmdPipelineBarrier2;\nextern PFN_vkCmdResetEvent2 vkCmdResetEvent2;\nextern PFN_vkCmdResolveImage2 vkCmdResolveImage2;\nextern PFN_vkCmdSetCullMode vkCmdSetCullMode;\nextern PFN_vkCmdSetDepthBiasEnable vkCmdSetDepthBiasEnable;\nextern PFN_vkCmdSetDepthBoundsTestEnable vkCmdSetDepthBoundsTestEnable;\nextern PFN_vkCmdSetDepthCompareOp vkCmdSetDepthCompareOp;\nextern PFN_vkCmdSetDepthTestEnable vkCmdSetDepthTestEnable;\nextern PFN_vkCmdSetDepthWriteEnable vkCmdSetDepthWriteEnable;\nextern PFN_vkCmdSetEvent2 vkCmdSetEvent2;\nextern PFN_vkCmdSetFrontFace vkCmdSetFrontFace;\nextern PFN_vkCmdSetPrimitiveRestartEnable vkCmdSetPrimitiveRestartEnable;\nextern PFN_vkCmdSetPrimitiveTopology vkCmdSetPrimitiveTopology;\nextern PFN_vkCmdSetRasterizerDiscardEnable vkCmdSetRasterizerDiscardEnable;\nextern PFN_vkCmdSetScissorWithCount vkCmdSetScissorWithCount;\nextern PFN_vkCmdSetStencilOp vkCmdSetStencilOp;\nextern PFN_vkCmdSetStencilTestEnable vkCmdSetStencilTestEnable;\nextern PFN_vkCmdSetViewportWithCount vkCmdSetViewportWithCount;\nextern PFN_vkCmdWaitEvents2 vkCmdWaitEvents2;\nextern PFN_vkCmdWriteTimestamp2 vkCmdWriteTimestamp2;\nextern PFN_vkCreatePrivateDataSlot vkCreatePrivateDataSlot;\nextern PFN_vkDestroyPrivateDataSlot vkDestroyPrivateDataSlot;\nextern PFN_vkGetDeviceBufferMemoryRequirements vkGetDeviceBufferMemoryRequirements;\nextern PFN_vkGetDeviceImageMemoryRequirements vkGetDeviceImageMemoryRequirements;\nextern PFN_vkGetDeviceImageSparseMemoryRequirements vkGetDeviceImageSparseMemoryRequirements;\nextern PFN_vkGetPhysicalDeviceToolProperties vkGetPhysicalDeviceToolProperties;\nextern PFN_vkGetPrivateData vkGetPrivateData;\nextern PFN_vkQueueSubmit2 vkQueueSubmit2;\nextern PFN_vkSetPrivateData vkSetPrivateData;\n#endif /* defined(VK_VERSION_1_3) */\n#if defined(VK_VERSION_1_4)\nextern PFN_vkCmdBindDescriptorSets2 vkCmdBindDescriptorSets2;\nextern PFN_vkCmdBindIndexBuffer2 vkCmdBindIndexBuffer2;\nextern PFN_vkCmdPushConstants2 vkCmdPushConstants2;\nextern PFN_vkCmdPushDescriptorSet vkCmdPushDescriptorSet;\nextern PFN_vkCmdPushDescriptorSet2 vkCmdPushDescriptorSet2;\nextern PFN_vkCmdPushDescriptorSetWithTemplate vkCmdPushDescriptorSetWithTemplate;\nextern PFN_vkCmdPushDescriptorSetWithTemplate2 vkCmdPushDescriptorSetWithTemplate2;\nextern PFN_vkCmdSetLineStipple vkCmdSetLineStipple;\nextern PFN_vkCmdSetRenderingAttachmentLocations vkCmdSetRenderingAttachmentLocations;\nextern PFN_vkCmdSetRenderingInputAttachmentIndices vkCmdSetRenderingInputAttachmentIndices;\nextern PFN_vkCopyImageToImage vkCopyImageToImage;\nextern PFN_vkCopyImageToMemory vkCopyImageToMemory;\nextern PFN_vkCopyMemoryToImage vkCopyMemoryToImage;\nextern PFN_vkGetDeviceImageSubresourceLayout vkGetDeviceImageSubresourceLayout;\nextern PFN_vkGetImageSubresourceLayout2 vkGetImageSubresourceLayout2;\nextern PFN_vkGetRenderingAreaGranularity vkGetRenderingAreaGranularity;\nextern PFN_vkMapMemory2 vkMapMemory2;\nextern PFN_vkTransitionImageLayout vkTransitionImageLayout;\nextern PFN_vkUnmapMemory2 vkUnmapMemory2;\n#endif /* defined(VK_VERSION_1_4) */\n#if defined(VK_AMDX_shader_enqueue)\nextern PFN_vkCmdDispatchGraphAMDX vkCmdDispatchGraphAMDX;\nextern PFN_vkCmdDispatchGraphIndirectAMDX vkCmdDispatchGraphIndirectAMDX;\nextern PFN_vkCmdDispatchGraphIndirectCountAMDX vkCmdDispatchGraphIndirectCountAMDX;\nextern PFN_vkCmdInitializeGraphScratchMemoryAMDX vkCmdInitializeGraphScratchMemoryAMDX;\nextern PFN_vkCreateExecutionGraphPipelinesAMDX vkCreateExecutionGraphPipelinesAMDX;\nextern PFN_vkGetExecutionGraphPipelineNodeIndexAMDX vkGetExecutionGraphPipelineNodeIndexAMDX;\nextern PFN_vkGetExecutionGraphPipelineScratchSizeAMDX vkGetExecutionGraphPipelineScratchSizeAMDX;\n#endif /* defined(VK_AMDX_shader_enqueue) */\n#if defined(VK_AMD_anti_lag)\nextern PFN_vkAntiLagUpdateAMD vkAntiLagUpdateAMD;\n#endif /* defined(VK_AMD_anti_lag) */\n#if defined(VK_AMD_buffer_marker)\nextern PFN_vkCmdWriteBufferMarkerAMD vkCmdWriteBufferMarkerAMD;\n#endif /* defined(VK_AMD_buffer_marker) */\n#if defined(VK_AMD_buffer_marker) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2))\nextern PFN_vkCmdWriteBufferMarker2AMD vkCmdWriteBufferMarker2AMD;\n#endif /* defined(VK_AMD_buffer_marker) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) */\n#if defined(VK_AMD_display_native_hdr)\nextern PFN_vkSetLocalDimmingAMD vkSetLocalDimmingAMD;\n#endif /* defined(VK_AMD_display_native_hdr) */\n#if defined(VK_AMD_draw_indirect_count)\nextern PFN_vkCmdDrawIndexedIndirectCountAMD vkCmdDrawIndexedIndirectCountAMD;\nextern PFN_vkCmdDrawIndirectCountAMD vkCmdDrawIndirectCountAMD;\n#endif /* defined(VK_AMD_draw_indirect_count) */\n#if defined(VK_AMD_shader_info)\nextern PFN_vkGetShaderInfoAMD vkGetShaderInfoAMD;\n#endif /* defined(VK_AMD_shader_info) */\n#if defined(VK_ANDROID_external_memory_android_hardware_buffer)\nextern PFN_vkGetAndroidHardwareBufferPropertiesANDROID vkGetAndroidHardwareBufferPropertiesANDROID;\nextern PFN_vkGetMemoryAndroidHardwareBufferANDROID vkGetMemoryAndroidHardwareBufferANDROID;\n#endif /* defined(VK_ANDROID_external_memory_android_hardware_buffer) */\n#if defined(VK_ARM_data_graph)\nextern PFN_vkBindDataGraphPipelineSessionMemoryARM vkBindDataGraphPipelineSessionMemoryARM;\nextern PFN_vkCmdDispatchDataGraphARM vkCmdDispatchDataGraphARM;\nextern PFN_vkCreateDataGraphPipelineSessionARM vkCreateDataGraphPipelineSessionARM;\nextern PFN_vkCreateDataGraphPipelinesARM vkCreateDataGraphPipelinesARM;\nextern PFN_vkDestroyDataGraphPipelineSessionARM vkDestroyDataGraphPipelineSessionARM;\nextern PFN_vkGetDataGraphPipelineAvailablePropertiesARM vkGetDataGraphPipelineAvailablePropertiesARM;\nextern PFN_vkGetDataGraphPipelinePropertiesARM vkGetDataGraphPipelinePropertiesARM;\nextern PFN_vkGetDataGraphPipelineSessionBindPointRequirementsARM vkGetDataGraphPipelineSessionBindPointRequirementsARM;\nextern PFN_vkGetDataGraphPipelineSessionMemoryRequirementsARM vkGetDataGraphPipelineSessionMemoryRequirementsARM;\nextern PFN_vkGetPhysicalDeviceQueueFamilyDataGraphProcessingEnginePropertiesARM vkGetPhysicalDeviceQueueFamilyDataGraphProcessingEnginePropertiesARM;\nextern PFN_vkGetPhysicalDeviceQueueFamilyDataGraphPropertiesARM vkGetPhysicalDeviceQueueFamilyDataGraphPropertiesARM;\n#endif /* defined(VK_ARM_data_graph) */\n#if defined(VK_ARM_tensors)\nextern PFN_vkBindTensorMemoryARM vkBindTensorMemoryARM;\nextern PFN_vkCmdCopyTensorARM vkCmdCopyTensorARM;\nextern PFN_vkCreateTensorARM vkCreateTensorARM;\nextern PFN_vkCreateTensorViewARM vkCreateTensorViewARM;\nextern PFN_vkDestroyTensorARM vkDestroyTensorARM;\nextern PFN_vkDestroyTensorViewARM vkDestroyTensorViewARM;\nextern PFN_vkGetDeviceTensorMemoryRequirementsARM vkGetDeviceTensorMemoryRequirementsARM;\nextern PFN_vkGetPhysicalDeviceExternalTensorPropertiesARM vkGetPhysicalDeviceExternalTensorPropertiesARM;\nextern PFN_vkGetTensorMemoryRequirementsARM vkGetTensorMemoryRequirementsARM;\n#endif /* defined(VK_ARM_tensors) */\n#if defined(VK_ARM_tensors) && defined(VK_EXT_descriptor_buffer)\nextern PFN_vkGetTensorOpaqueCaptureDescriptorDataARM vkGetTensorOpaqueCaptureDescriptorDataARM;\nextern PFN_vkGetTensorViewOpaqueCaptureDescriptorDataARM vkGetTensorViewOpaqueCaptureDescriptorDataARM;\n#endif /* defined(VK_ARM_tensors) && defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_EXT_acquire_drm_display)\nextern PFN_vkAcquireDrmDisplayEXT vkAcquireDrmDisplayEXT;\nextern PFN_vkGetDrmDisplayEXT vkGetDrmDisplayEXT;\n#endif /* defined(VK_EXT_acquire_drm_display) */\n#if defined(VK_EXT_acquire_xlib_display)\nextern PFN_vkAcquireXlibDisplayEXT vkAcquireXlibDisplayEXT;\nextern PFN_vkGetRandROutputDisplayEXT vkGetRandROutputDisplayEXT;\n#endif /* defined(VK_EXT_acquire_xlib_display) */\n#if defined(VK_EXT_attachment_feedback_loop_dynamic_state)\nextern PFN_vkCmdSetAttachmentFeedbackLoopEnableEXT vkCmdSetAttachmentFeedbackLoopEnableEXT;\n#endif /* defined(VK_EXT_attachment_feedback_loop_dynamic_state) */\n#if defined(VK_EXT_buffer_device_address)\nextern PFN_vkGetBufferDeviceAddressEXT vkGetBufferDeviceAddressEXT;\n#endif /* defined(VK_EXT_buffer_device_address) */\n#if defined(VK_EXT_calibrated_timestamps)\nextern PFN_vkGetCalibratedTimestampsEXT vkGetCalibratedTimestampsEXT;\nextern PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT vkGetPhysicalDeviceCalibrateableTimeDomainsEXT;\n#endif /* defined(VK_EXT_calibrated_timestamps) */\n#if defined(VK_EXT_color_write_enable)\nextern PFN_vkCmdSetColorWriteEnableEXT vkCmdSetColorWriteEnableEXT;\n#endif /* defined(VK_EXT_color_write_enable) */\n#if defined(VK_EXT_conditional_rendering)\nextern PFN_vkCmdBeginConditionalRenderingEXT vkCmdBeginConditionalRenderingEXT;\nextern PFN_vkCmdEndConditionalRenderingEXT vkCmdEndConditionalRenderingEXT;\n#endif /* defined(VK_EXT_conditional_rendering) */\n#if defined(VK_EXT_debug_marker)\nextern PFN_vkCmdDebugMarkerBeginEXT vkCmdDebugMarkerBeginEXT;\nextern PFN_vkCmdDebugMarkerEndEXT vkCmdDebugMarkerEndEXT;\nextern PFN_vkCmdDebugMarkerInsertEXT vkCmdDebugMarkerInsertEXT;\nextern PFN_vkDebugMarkerSetObjectNameEXT vkDebugMarkerSetObjectNameEXT;\nextern PFN_vkDebugMarkerSetObjectTagEXT vkDebugMarkerSetObjectTagEXT;\n#endif /* defined(VK_EXT_debug_marker) */\n#if defined(VK_EXT_debug_report)\nextern PFN_vkCreateDebugReportCallbackEXT vkCreateDebugReportCallbackEXT;\nextern PFN_vkDebugReportMessageEXT vkDebugReportMessageEXT;\nextern PFN_vkDestroyDebugReportCallbackEXT vkDestroyDebugReportCallbackEXT;\n#endif /* defined(VK_EXT_debug_report) */\n#if defined(VK_EXT_debug_utils)\nextern PFN_vkCmdBeginDebugUtilsLabelEXT vkCmdBeginDebugUtilsLabelEXT;\nextern PFN_vkCmdEndDebugUtilsLabelEXT vkCmdEndDebugUtilsLabelEXT;\nextern PFN_vkCmdInsertDebugUtilsLabelEXT vkCmdInsertDebugUtilsLabelEXT;\nextern PFN_vkCreateDebugUtilsMessengerEXT vkCreateDebugUtilsMessengerEXT;\nextern PFN_vkDestroyDebugUtilsMessengerEXT vkDestroyDebugUtilsMessengerEXT;\nextern PFN_vkQueueBeginDebugUtilsLabelEXT vkQueueBeginDebugUtilsLabelEXT;\nextern PFN_vkQueueEndDebugUtilsLabelEXT vkQueueEndDebugUtilsLabelEXT;\nextern PFN_vkQueueInsertDebugUtilsLabelEXT vkQueueInsertDebugUtilsLabelEXT;\nextern PFN_vkSetDebugUtilsObjectNameEXT vkSetDebugUtilsObjectNameEXT;\nextern PFN_vkSetDebugUtilsObjectTagEXT vkSetDebugUtilsObjectTagEXT;\nextern PFN_vkSubmitDebugUtilsMessageEXT vkSubmitDebugUtilsMessageEXT;\n#endif /* defined(VK_EXT_debug_utils) */\n#if defined(VK_EXT_depth_bias_control)\nextern PFN_vkCmdSetDepthBias2EXT vkCmdSetDepthBias2EXT;\n#endif /* defined(VK_EXT_depth_bias_control) */\n#if defined(VK_EXT_descriptor_buffer)\nextern PFN_vkCmdBindDescriptorBufferEmbeddedSamplersEXT vkCmdBindDescriptorBufferEmbeddedSamplersEXT;\nextern PFN_vkCmdBindDescriptorBuffersEXT vkCmdBindDescriptorBuffersEXT;\nextern PFN_vkCmdSetDescriptorBufferOffsetsEXT vkCmdSetDescriptorBufferOffsetsEXT;\nextern PFN_vkGetBufferOpaqueCaptureDescriptorDataEXT vkGetBufferOpaqueCaptureDescriptorDataEXT;\nextern PFN_vkGetDescriptorEXT vkGetDescriptorEXT;\nextern PFN_vkGetDescriptorSetLayoutBindingOffsetEXT vkGetDescriptorSetLayoutBindingOffsetEXT;\nextern PFN_vkGetDescriptorSetLayoutSizeEXT vkGetDescriptorSetLayoutSizeEXT;\nextern PFN_vkGetImageOpaqueCaptureDescriptorDataEXT vkGetImageOpaqueCaptureDescriptorDataEXT;\nextern PFN_vkGetImageViewOpaqueCaptureDescriptorDataEXT vkGetImageViewOpaqueCaptureDescriptorDataEXT;\nextern PFN_vkGetSamplerOpaqueCaptureDescriptorDataEXT vkGetSamplerOpaqueCaptureDescriptorDataEXT;\n#endif /* defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing))\nextern PFN_vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT;\n#endif /* defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) */\n#if defined(VK_EXT_device_fault)\nextern PFN_vkGetDeviceFaultInfoEXT vkGetDeviceFaultInfoEXT;\n#endif /* defined(VK_EXT_device_fault) */\n#if defined(VK_EXT_device_generated_commands)\nextern PFN_vkCmdExecuteGeneratedCommandsEXT vkCmdExecuteGeneratedCommandsEXT;\nextern PFN_vkCmdPreprocessGeneratedCommandsEXT vkCmdPreprocessGeneratedCommandsEXT;\nextern PFN_vkCreateIndirectCommandsLayoutEXT vkCreateIndirectCommandsLayoutEXT;\nextern PFN_vkCreateIndirectExecutionSetEXT vkCreateIndirectExecutionSetEXT;\nextern PFN_vkDestroyIndirectCommandsLayoutEXT vkDestroyIndirectCommandsLayoutEXT;\nextern PFN_vkDestroyIndirectExecutionSetEXT vkDestroyIndirectExecutionSetEXT;\nextern PFN_vkGetGeneratedCommandsMemoryRequirementsEXT vkGetGeneratedCommandsMemoryRequirementsEXT;\nextern PFN_vkUpdateIndirectExecutionSetPipelineEXT vkUpdateIndirectExecutionSetPipelineEXT;\nextern PFN_vkUpdateIndirectExecutionSetShaderEXT vkUpdateIndirectExecutionSetShaderEXT;\n#endif /* defined(VK_EXT_device_generated_commands) */\n#if defined(VK_EXT_direct_mode_display)\nextern PFN_vkReleaseDisplayEXT vkReleaseDisplayEXT;\n#endif /* defined(VK_EXT_direct_mode_display) */\n#if defined(VK_EXT_directfb_surface)\nextern PFN_vkCreateDirectFBSurfaceEXT vkCreateDirectFBSurfaceEXT;\nextern PFN_vkGetPhysicalDeviceDirectFBPresentationSupportEXT vkGetPhysicalDeviceDirectFBPresentationSupportEXT;\n#endif /* defined(VK_EXT_directfb_surface) */\n#if defined(VK_EXT_discard_rectangles)\nextern PFN_vkCmdSetDiscardRectangleEXT vkCmdSetDiscardRectangleEXT;\n#endif /* defined(VK_EXT_discard_rectangles) */\n#if defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2\nextern PFN_vkCmdSetDiscardRectangleEnableEXT vkCmdSetDiscardRectangleEnableEXT;\nextern PFN_vkCmdSetDiscardRectangleModeEXT vkCmdSetDiscardRectangleModeEXT;\n#endif /* defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 */\n#if defined(VK_EXT_display_control)\nextern PFN_vkDisplayPowerControlEXT vkDisplayPowerControlEXT;\nextern PFN_vkGetSwapchainCounterEXT vkGetSwapchainCounterEXT;\nextern PFN_vkRegisterDeviceEventEXT vkRegisterDeviceEventEXT;\nextern PFN_vkRegisterDisplayEventEXT vkRegisterDisplayEventEXT;\n#endif /* defined(VK_EXT_display_control) */\n#if defined(VK_EXT_display_surface_counter)\nextern PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT vkGetPhysicalDeviceSurfaceCapabilities2EXT;\n#endif /* defined(VK_EXT_display_surface_counter) */\n#if defined(VK_EXT_external_memory_host)\nextern PFN_vkGetMemoryHostPointerPropertiesEXT vkGetMemoryHostPointerPropertiesEXT;\n#endif /* defined(VK_EXT_external_memory_host) */\n#if defined(VK_EXT_external_memory_metal)\nextern PFN_vkGetMemoryMetalHandleEXT vkGetMemoryMetalHandleEXT;\nextern PFN_vkGetMemoryMetalHandlePropertiesEXT vkGetMemoryMetalHandlePropertiesEXT;\n#endif /* defined(VK_EXT_external_memory_metal) */\n#if defined(VK_EXT_fragment_density_map_offset)\nextern PFN_vkCmdEndRendering2EXT vkCmdEndRendering2EXT;\n#endif /* defined(VK_EXT_fragment_density_map_offset) */\n#if defined(VK_EXT_full_screen_exclusive)\nextern PFN_vkAcquireFullScreenExclusiveModeEXT vkAcquireFullScreenExclusiveModeEXT;\nextern PFN_vkGetPhysicalDeviceSurfacePresentModes2EXT vkGetPhysicalDeviceSurfacePresentModes2EXT;\nextern PFN_vkReleaseFullScreenExclusiveModeEXT vkReleaseFullScreenExclusiveModeEXT;\n#endif /* defined(VK_EXT_full_screen_exclusive) */\n#if defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1))\nextern PFN_vkGetDeviceGroupSurfacePresentModes2EXT vkGetDeviceGroupSurfacePresentModes2EXT;\n#endif /* defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1)) */\n#if defined(VK_EXT_hdr_metadata)\nextern PFN_vkSetHdrMetadataEXT vkSetHdrMetadataEXT;\n#endif /* defined(VK_EXT_hdr_metadata) */\n#if defined(VK_EXT_headless_surface)\nextern PFN_vkCreateHeadlessSurfaceEXT vkCreateHeadlessSurfaceEXT;\n#endif /* defined(VK_EXT_headless_surface) */\n#if defined(VK_EXT_host_image_copy)\nextern PFN_vkCopyImageToImageEXT vkCopyImageToImageEXT;\nextern PFN_vkCopyImageToMemoryEXT vkCopyImageToMemoryEXT;\nextern PFN_vkCopyMemoryToImageEXT vkCopyMemoryToImageEXT;\nextern PFN_vkTransitionImageLayoutEXT vkTransitionImageLayoutEXT;\n#endif /* defined(VK_EXT_host_image_copy) */\n#if defined(VK_EXT_host_query_reset)\nextern PFN_vkResetQueryPoolEXT vkResetQueryPoolEXT;\n#endif /* defined(VK_EXT_host_query_reset) */\n#if defined(VK_EXT_image_drm_format_modifier)\nextern PFN_vkGetImageDrmFormatModifierPropertiesEXT vkGetImageDrmFormatModifierPropertiesEXT;\n#endif /* defined(VK_EXT_image_drm_format_modifier) */\n#if defined(VK_EXT_line_rasterization)\nextern PFN_vkCmdSetLineStippleEXT vkCmdSetLineStippleEXT;\n#endif /* defined(VK_EXT_line_rasterization) */\n#if defined(VK_EXT_mesh_shader)\nextern PFN_vkCmdDrawMeshTasksEXT vkCmdDrawMeshTasksEXT;\nextern PFN_vkCmdDrawMeshTasksIndirectEXT vkCmdDrawMeshTasksIndirectEXT;\n#endif /* defined(VK_EXT_mesh_shader) */\n#if defined(VK_EXT_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2))\nextern PFN_vkCmdDrawMeshTasksIndirectCountEXT vkCmdDrawMeshTasksIndirectCountEXT;\n#endif /* defined(VK_EXT_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2)) */\n#if defined(VK_EXT_metal_objects)\nextern PFN_vkExportMetalObjectsEXT vkExportMetalObjectsEXT;\n#endif /* defined(VK_EXT_metal_objects) */\n#if defined(VK_EXT_metal_surface)\nextern PFN_vkCreateMetalSurfaceEXT vkCreateMetalSurfaceEXT;\n#endif /* defined(VK_EXT_metal_surface) */\n#if defined(VK_EXT_multi_draw)\nextern PFN_vkCmdDrawMultiEXT vkCmdDrawMultiEXT;\nextern PFN_vkCmdDrawMultiIndexedEXT vkCmdDrawMultiIndexedEXT;\n#endif /* defined(VK_EXT_multi_draw) */\n#if defined(VK_EXT_opacity_micromap)\nextern PFN_vkBuildMicromapsEXT vkBuildMicromapsEXT;\nextern PFN_vkCmdBuildMicromapsEXT vkCmdBuildMicromapsEXT;\nextern PFN_vkCmdCopyMemoryToMicromapEXT vkCmdCopyMemoryToMicromapEXT;\nextern PFN_vkCmdCopyMicromapEXT vkCmdCopyMicromapEXT;\nextern PFN_vkCmdCopyMicromapToMemoryEXT vkCmdCopyMicromapToMemoryEXT;\nextern PFN_vkCmdWriteMicromapsPropertiesEXT vkCmdWriteMicromapsPropertiesEXT;\nextern PFN_vkCopyMemoryToMicromapEXT vkCopyMemoryToMicromapEXT;\nextern PFN_vkCopyMicromapEXT vkCopyMicromapEXT;\nextern PFN_vkCopyMicromapToMemoryEXT vkCopyMicromapToMemoryEXT;\nextern PFN_vkCreateMicromapEXT vkCreateMicromapEXT;\nextern PFN_vkDestroyMicromapEXT vkDestroyMicromapEXT;\nextern PFN_vkGetDeviceMicromapCompatibilityEXT vkGetDeviceMicromapCompatibilityEXT;\nextern PFN_vkGetMicromapBuildSizesEXT vkGetMicromapBuildSizesEXT;\nextern PFN_vkWriteMicromapsPropertiesEXT vkWriteMicromapsPropertiesEXT;\n#endif /* defined(VK_EXT_opacity_micromap) */\n#if defined(VK_EXT_pageable_device_local_memory)\nextern PFN_vkSetDeviceMemoryPriorityEXT vkSetDeviceMemoryPriorityEXT;\n#endif /* defined(VK_EXT_pageable_device_local_memory) */\n#if defined(VK_EXT_pipeline_properties)\nextern PFN_vkGetPipelinePropertiesEXT vkGetPipelinePropertiesEXT;\n#endif /* defined(VK_EXT_pipeline_properties) */\n#if defined(VK_EXT_private_data)\nextern PFN_vkCreatePrivateDataSlotEXT vkCreatePrivateDataSlotEXT;\nextern PFN_vkDestroyPrivateDataSlotEXT vkDestroyPrivateDataSlotEXT;\nextern PFN_vkGetPrivateDataEXT vkGetPrivateDataEXT;\nextern PFN_vkSetPrivateDataEXT vkSetPrivateDataEXT;\n#endif /* defined(VK_EXT_private_data) */\n#if defined(VK_EXT_sample_locations)\nextern PFN_vkCmdSetSampleLocationsEXT vkCmdSetSampleLocationsEXT;\nextern PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT vkGetPhysicalDeviceMultisamplePropertiesEXT;\n#endif /* defined(VK_EXT_sample_locations) */\n#if defined(VK_EXT_shader_module_identifier)\nextern PFN_vkGetShaderModuleCreateInfoIdentifierEXT vkGetShaderModuleCreateInfoIdentifierEXT;\nextern PFN_vkGetShaderModuleIdentifierEXT vkGetShaderModuleIdentifierEXT;\n#endif /* defined(VK_EXT_shader_module_identifier) */\n#if defined(VK_EXT_shader_object)\nextern PFN_vkCmdBindShadersEXT vkCmdBindShadersEXT;\nextern PFN_vkCreateShadersEXT vkCreateShadersEXT;\nextern PFN_vkDestroyShaderEXT vkDestroyShaderEXT;\nextern PFN_vkGetShaderBinaryDataEXT vkGetShaderBinaryDataEXT;\n#endif /* defined(VK_EXT_shader_object) */\n#if defined(VK_EXT_swapchain_maintenance1)\nextern PFN_vkReleaseSwapchainImagesEXT vkReleaseSwapchainImagesEXT;\n#endif /* defined(VK_EXT_swapchain_maintenance1) */\n#if defined(VK_EXT_tooling_info)\nextern PFN_vkGetPhysicalDeviceToolPropertiesEXT vkGetPhysicalDeviceToolPropertiesEXT;\n#endif /* defined(VK_EXT_tooling_info) */\n#if defined(VK_EXT_transform_feedback)\nextern PFN_vkCmdBeginQueryIndexedEXT vkCmdBeginQueryIndexedEXT;\nextern PFN_vkCmdBeginTransformFeedbackEXT vkCmdBeginTransformFeedbackEXT;\nextern PFN_vkCmdBindTransformFeedbackBuffersEXT vkCmdBindTransformFeedbackBuffersEXT;\nextern PFN_vkCmdDrawIndirectByteCountEXT vkCmdDrawIndirectByteCountEXT;\nextern PFN_vkCmdEndQueryIndexedEXT vkCmdEndQueryIndexedEXT;\nextern PFN_vkCmdEndTransformFeedbackEXT vkCmdEndTransformFeedbackEXT;\n#endif /* defined(VK_EXT_transform_feedback) */\n#if defined(VK_EXT_validation_cache)\nextern PFN_vkCreateValidationCacheEXT vkCreateValidationCacheEXT;\nextern PFN_vkDestroyValidationCacheEXT vkDestroyValidationCacheEXT;\nextern PFN_vkGetValidationCacheDataEXT vkGetValidationCacheDataEXT;\nextern PFN_vkMergeValidationCachesEXT vkMergeValidationCachesEXT;\n#endif /* defined(VK_EXT_validation_cache) */\n#if defined(VK_FUCHSIA_buffer_collection)\nextern PFN_vkCreateBufferCollectionFUCHSIA vkCreateBufferCollectionFUCHSIA;\nextern PFN_vkDestroyBufferCollectionFUCHSIA vkDestroyBufferCollectionFUCHSIA;\nextern PFN_vkGetBufferCollectionPropertiesFUCHSIA vkGetBufferCollectionPropertiesFUCHSIA;\nextern PFN_vkSetBufferCollectionBufferConstraintsFUCHSIA vkSetBufferCollectionBufferConstraintsFUCHSIA;\nextern PFN_vkSetBufferCollectionImageConstraintsFUCHSIA vkSetBufferCollectionImageConstraintsFUCHSIA;\n#endif /* defined(VK_FUCHSIA_buffer_collection) */\n#if defined(VK_FUCHSIA_external_memory)\nextern PFN_vkGetMemoryZirconHandleFUCHSIA vkGetMemoryZirconHandleFUCHSIA;\nextern PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA vkGetMemoryZirconHandlePropertiesFUCHSIA;\n#endif /* defined(VK_FUCHSIA_external_memory) */\n#if defined(VK_FUCHSIA_external_semaphore)\nextern PFN_vkGetSemaphoreZirconHandleFUCHSIA vkGetSemaphoreZirconHandleFUCHSIA;\nextern PFN_vkImportSemaphoreZirconHandleFUCHSIA vkImportSemaphoreZirconHandleFUCHSIA;\n#endif /* defined(VK_FUCHSIA_external_semaphore) */\n#if defined(VK_FUCHSIA_imagepipe_surface)\nextern PFN_vkCreateImagePipeSurfaceFUCHSIA vkCreateImagePipeSurfaceFUCHSIA;\n#endif /* defined(VK_FUCHSIA_imagepipe_surface) */\n#if defined(VK_GGP_stream_descriptor_surface)\nextern PFN_vkCreateStreamDescriptorSurfaceGGP vkCreateStreamDescriptorSurfaceGGP;\n#endif /* defined(VK_GGP_stream_descriptor_surface) */\n#if defined(VK_GOOGLE_display_timing)\nextern PFN_vkGetPastPresentationTimingGOOGLE vkGetPastPresentationTimingGOOGLE;\nextern PFN_vkGetRefreshCycleDurationGOOGLE vkGetRefreshCycleDurationGOOGLE;\n#endif /* defined(VK_GOOGLE_display_timing) */\n#if defined(VK_HUAWEI_cluster_culling_shader)\nextern PFN_vkCmdDrawClusterHUAWEI vkCmdDrawClusterHUAWEI;\nextern PFN_vkCmdDrawClusterIndirectHUAWEI vkCmdDrawClusterIndirectHUAWEI;\n#endif /* defined(VK_HUAWEI_cluster_culling_shader) */\n#if defined(VK_HUAWEI_invocation_mask)\nextern PFN_vkCmdBindInvocationMaskHUAWEI vkCmdBindInvocationMaskHUAWEI;\n#endif /* defined(VK_HUAWEI_invocation_mask) */\n#if defined(VK_HUAWEI_subpass_shading) && VK_HUAWEI_SUBPASS_SHADING_SPEC_VERSION >= 2\nextern PFN_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI;\n#endif /* defined(VK_HUAWEI_subpass_shading) && VK_HUAWEI_SUBPASS_SHADING_SPEC_VERSION >= 2 */\n#if defined(VK_HUAWEI_subpass_shading)\nextern PFN_vkCmdSubpassShadingHUAWEI vkCmdSubpassShadingHUAWEI;\n#endif /* defined(VK_HUAWEI_subpass_shading) */\n#if defined(VK_INTEL_performance_query)\nextern PFN_vkAcquirePerformanceConfigurationINTEL vkAcquirePerformanceConfigurationINTEL;\nextern PFN_vkCmdSetPerformanceMarkerINTEL vkCmdSetPerformanceMarkerINTEL;\nextern PFN_vkCmdSetPerformanceOverrideINTEL vkCmdSetPerformanceOverrideINTEL;\nextern PFN_vkCmdSetPerformanceStreamMarkerINTEL vkCmdSetPerformanceStreamMarkerINTEL;\nextern PFN_vkGetPerformanceParameterINTEL vkGetPerformanceParameterINTEL;\nextern PFN_vkInitializePerformanceApiINTEL vkInitializePerformanceApiINTEL;\nextern PFN_vkQueueSetPerformanceConfigurationINTEL vkQueueSetPerformanceConfigurationINTEL;\nextern PFN_vkReleasePerformanceConfigurationINTEL vkReleasePerformanceConfigurationINTEL;\nextern PFN_vkUninitializePerformanceApiINTEL vkUninitializePerformanceApiINTEL;\n#endif /* defined(VK_INTEL_performance_query) */\n#if defined(VK_KHR_acceleration_structure)\nextern PFN_vkBuildAccelerationStructuresKHR vkBuildAccelerationStructuresKHR;\nextern PFN_vkCmdBuildAccelerationStructuresIndirectKHR vkCmdBuildAccelerationStructuresIndirectKHR;\nextern PFN_vkCmdBuildAccelerationStructuresKHR vkCmdBuildAccelerationStructuresKHR;\nextern PFN_vkCmdCopyAccelerationStructureKHR vkCmdCopyAccelerationStructureKHR;\nextern PFN_vkCmdCopyAccelerationStructureToMemoryKHR vkCmdCopyAccelerationStructureToMemoryKHR;\nextern PFN_vkCmdCopyMemoryToAccelerationStructureKHR vkCmdCopyMemoryToAccelerationStructureKHR;\nextern PFN_vkCmdWriteAccelerationStructuresPropertiesKHR vkCmdWriteAccelerationStructuresPropertiesKHR;\nextern PFN_vkCopyAccelerationStructureKHR vkCopyAccelerationStructureKHR;\nextern PFN_vkCopyAccelerationStructureToMemoryKHR vkCopyAccelerationStructureToMemoryKHR;\nextern PFN_vkCopyMemoryToAccelerationStructureKHR vkCopyMemoryToAccelerationStructureKHR;\nextern PFN_vkCreateAccelerationStructureKHR vkCreateAccelerationStructureKHR;\nextern PFN_vkDestroyAccelerationStructureKHR vkDestroyAccelerationStructureKHR;\nextern PFN_vkGetAccelerationStructureBuildSizesKHR vkGetAccelerationStructureBuildSizesKHR;\nextern PFN_vkGetAccelerationStructureDeviceAddressKHR vkGetAccelerationStructureDeviceAddressKHR;\nextern PFN_vkGetDeviceAccelerationStructureCompatibilityKHR vkGetDeviceAccelerationStructureCompatibilityKHR;\nextern PFN_vkWriteAccelerationStructuresPropertiesKHR vkWriteAccelerationStructuresPropertiesKHR;\n#endif /* defined(VK_KHR_acceleration_structure) */\n#if defined(VK_KHR_android_surface)\nextern PFN_vkCreateAndroidSurfaceKHR vkCreateAndroidSurfaceKHR;\n#endif /* defined(VK_KHR_android_surface) */\n#if defined(VK_KHR_bind_memory2)\nextern PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR;\nextern PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR;\n#endif /* defined(VK_KHR_bind_memory2) */\n#if defined(VK_KHR_buffer_device_address)\nextern PFN_vkGetBufferDeviceAddressKHR vkGetBufferDeviceAddressKHR;\nextern PFN_vkGetBufferOpaqueCaptureAddressKHR vkGetBufferOpaqueCaptureAddressKHR;\nextern PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR vkGetDeviceMemoryOpaqueCaptureAddressKHR;\n#endif /* defined(VK_KHR_buffer_device_address) */\n#if defined(VK_KHR_calibrated_timestamps)\nextern PFN_vkGetCalibratedTimestampsKHR vkGetCalibratedTimestampsKHR;\nextern PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsKHR vkGetPhysicalDeviceCalibrateableTimeDomainsKHR;\n#endif /* defined(VK_KHR_calibrated_timestamps) */\n#if defined(VK_KHR_cooperative_matrix)\nextern PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR;\n#endif /* defined(VK_KHR_cooperative_matrix) */\n#if defined(VK_KHR_copy_commands2)\nextern PFN_vkCmdBlitImage2KHR vkCmdBlitImage2KHR;\nextern PFN_vkCmdCopyBuffer2KHR vkCmdCopyBuffer2KHR;\nextern PFN_vkCmdCopyBufferToImage2KHR vkCmdCopyBufferToImage2KHR;\nextern PFN_vkCmdCopyImage2KHR vkCmdCopyImage2KHR;\nextern PFN_vkCmdCopyImageToBuffer2KHR vkCmdCopyImageToBuffer2KHR;\nextern PFN_vkCmdResolveImage2KHR vkCmdResolveImage2KHR;\n#endif /* defined(VK_KHR_copy_commands2) */\n#if defined(VK_KHR_create_renderpass2)\nextern PFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR;\nextern PFN_vkCmdEndRenderPass2KHR vkCmdEndRenderPass2KHR;\nextern PFN_vkCmdNextSubpass2KHR vkCmdNextSubpass2KHR;\nextern PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR;\n#endif /* defined(VK_KHR_create_renderpass2) */\n#if defined(VK_KHR_deferred_host_operations)\nextern PFN_vkCreateDeferredOperationKHR vkCreateDeferredOperationKHR;\nextern PFN_vkDeferredOperationJoinKHR vkDeferredOperationJoinKHR;\nextern PFN_vkDestroyDeferredOperationKHR vkDestroyDeferredOperationKHR;\nextern PFN_vkGetDeferredOperationMaxConcurrencyKHR vkGetDeferredOperationMaxConcurrencyKHR;\nextern PFN_vkGetDeferredOperationResultKHR vkGetDeferredOperationResultKHR;\n#endif /* defined(VK_KHR_deferred_host_operations) */\n#if defined(VK_KHR_descriptor_update_template)\nextern PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR;\nextern PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR;\nextern PFN_vkUpdateDescriptorSetWithTemplateKHR vkUpdateDescriptorSetWithTemplateKHR;\n#endif /* defined(VK_KHR_descriptor_update_template) */\n#if defined(VK_KHR_device_group)\nextern PFN_vkCmdDispatchBaseKHR vkCmdDispatchBaseKHR;\nextern PFN_vkCmdSetDeviceMaskKHR vkCmdSetDeviceMaskKHR;\nextern PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR vkGetDeviceGroupPeerMemoryFeaturesKHR;\n#endif /* defined(VK_KHR_device_group) */\n#if defined(VK_KHR_device_group_creation)\nextern PFN_vkEnumeratePhysicalDeviceGroupsKHR vkEnumeratePhysicalDeviceGroupsKHR;\n#endif /* defined(VK_KHR_device_group_creation) */\n#if defined(VK_KHR_display)\nextern PFN_vkCreateDisplayModeKHR vkCreateDisplayModeKHR;\nextern PFN_vkCreateDisplayPlaneSurfaceKHR vkCreateDisplayPlaneSurfaceKHR;\nextern PFN_vkGetDisplayModePropertiesKHR vkGetDisplayModePropertiesKHR;\nextern PFN_vkGetDisplayPlaneCapabilitiesKHR vkGetDisplayPlaneCapabilitiesKHR;\nextern PFN_vkGetDisplayPlaneSupportedDisplaysKHR vkGetDisplayPlaneSupportedDisplaysKHR;\nextern PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR vkGetPhysicalDeviceDisplayPlanePropertiesKHR;\nextern PFN_vkGetPhysicalDeviceDisplayPropertiesKHR vkGetPhysicalDeviceDisplayPropertiesKHR;\n#endif /* defined(VK_KHR_display) */\n#if defined(VK_KHR_display_swapchain)\nextern PFN_vkCreateSharedSwapchainsKHR vkCreateSharedSwapchainsKHR;\n#endif /* defined(VK_KHR_display_swapchain) */\n#if defined(VK_KHR_draw_indirect_count)\nextern PFN_vkCmdDrawIndexedIndirectCountKHR vkCmdDrawIndexedIndirectCountKHR;\nextern PFN_vkCmdDrawIndirectCountKHR vkCmdDrawIndirectCountKHR;\n#endif /* defined(VK_KHR_draw_indirect_count) */\n#if defined(VK_KHR_dynamic_rendering)\nextern PFN_vkCmdBeginRenderingKHR vkCmdBeginRenderingKHR;\nextern PFN_vkCmdEndRenderingKHR vkCmdEndRenderingKHR;\n#endif /* defined(VK_KHR_dynamic_rendering) */\n#if defined(VK_KHR_dynamic_rendering_local_read)\nextern PFN_vkCmdSetRenderingAttachmentLocationsKHR vkCmdSetRenderingAttachmentLocationsKHR;\nextern PFN_vkCmdSetRenderingInputAttachmentIndicesKHR vkCmdSetRenderingInputAttachmentIndicesKHR;\n#endif /* defined(VK_KHR_dynamic_rendering_local_read) */\n#if defined(VK_KHR_external_fence_capabilities)\nextern PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR vkGetPhysicalDeviceExternalFencePropertiesKHR;\n#endif /* defined(VK_KHR_external_fence_capabilities) */\n#if defined(VK_KHR_external_fence_fd)\nextern PFN_vkGetFenceFdKHR vkGetFenceFdKHR;\nextern PFN_vkImportFenceFdKHR vkImportFenceFdKHR;\n#endif /* defined(VK_KHR_external_fence_fd) */\n#if defined(VK_KHR_external_fence_win32)\nextern PFN_vkGetFenceWin32HandleKHR vkGetFenceWin32HandleKHR;\nextern PFN_vkImportFenceWin32HandleKHR vkImportFenceWin32HandleKHR;\n#endif /* defined(VK_KHR_external_fence_win32) */\n#if defined(VK_KHR_external_memory_capabilities)\nextern PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR vkGetPhysicalDeviceExternalBufferPropertiesKHR;\n#endif /* defined(VK_KHR_external_memory_capabilities) */\n#if defined(VK_KHR_external_memory_fd)\nextern PFN_vkGetMemoryFdKHR vkGetMemoryFdKHR;\nextern PFN_vkGetMemoryFdPropertiesKHR vkGetMemoryFdPropertiesKHR;\n#endif /* defined(VK_KHR_external_memory_fd) */\n#if defined(VK_KHR_external_memory_win32)\nextern PFN_vkGetMemoryWin32HandleKHR vkGetMemoryWin32HandleKHR;\nextern PFN_vkGetMemoryWin32HandlePropertiesKHR vkGetMemoryWin32HandlePropertiesKHR;\n#endif /* defined(VK_KHR_external_memory_win32) */\n#if defined(VK_KHR_external_semaphore_capabilities)\nextern PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR vkGetPhysicalDeviceExternalSemaphorePropertiesKHR;\n#endif /* defined(VK_KHR_external_semaphore_capabilities) */\n#if defined(VK_KHR_external_semaphore_fd)\nextern PFN_vkGetSemaphoreFdKHR vkGetSemaphoreFdKHR;\nextern PFN_vkImportSemaphoreFdKHR vkImportSemaphoreFdKHR;\n#endif /* defined(VK_KHR_external_semaphore_fd) */\n#if defined(VK_KHR_external_semaphore_win32)\nextern PFN_vkGetSemaphoreWin32HandleKHR vkGetSemaphoreWin32HandleKHR;\nextern PFN_vkImportSemaphoreWin32HandleKHR vkImportSemaphoreWin32HandleKHR;\n#endif /* defined(VK_KHR_external_semaphore_win32) */\n#if defined(VK_KHR_fragment_shading_rate)\nextern PFN_vkCmdSetFragmentShadingRateKHR vkCmdSetFragmentShadingRateKHR;\nextern PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR vkGetPhysicalDeviceFragmentShadingRatesKHR;\n#endif /* defined(VK_KHR_fragment_shading_rate) */\n#if defined(VK_KHR_get_display_properties2)\nextern PFN_vkGetDisplayModeProperties2KHR vkGetDisplayModeProperties2KHR;\nextern PFN_vkGetDisplayPlaneCapabilities2KHR vkGetDisplayPlaneCapabilities2KHR;\nextern PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR vkGetPhysicalDeviceDisplayPlaneProperties2KHR;\nextern PFN_vkGetPhysicalDeviceDisplayProperties2KHR vkGetPhysicalDeviceDisplayProperties2KHR;\n#endif /* defined(VK_KHR_get_display_properties2) */\n#if defined(VK_KHR_get_memory_requirements2)\nextern PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;\nextern PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;\nextern PFN_vkGetImageSparseMemoryRequirements2KHR vkGetImageSparseMemoryRequirements2KHR;\n#endif /* defined(VK_KHR_get_memory_requirements2) */\n#if defined(VK_KHR_get_physical_device_properties2)\nextern PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR;\nextern PFN_vkGetPhysicalDeviceFormatProperties2KHR vkGetPhysicalDeviceFormatProperties2KHR;\nextern PFN_vkGetPhysicalDeviceImageFormatProperties2KHR vkGetPhysicalDeviceImageFormatProperties2KHR;\nextern PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR;\nextern PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR;\nextern PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR vkGetPhysicalDeviceQueueFamilyProperties2KHR;\nextern PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR vkGetPhysicalDeviceSparseImageFormatProperties2KHR;\n#endif /* defined(VK_KHR_get_physical_device_properties2) */\n#if defined(VK_KHR_get_surface_capabilities2)\nextern PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR vkGetPhysicalDeviceSurfaceCapabilities2KHR;\nextern PFN_vkGetPhysicalDeviceSurfaceFormats2KHR vkGetPhysicalDeviceSurfaceFormats2KHR;\n#endif /* defined(VK_KHR_get_surface_capabilities2) */\n#if defined(VK_KHR_line_rasterization)\nextern PFN_vkCmdSetLineStippleKHR vkCmdSetLineStippleKHR;\n#endif /* defined(VK_KHR_line_rasterization) */\n#if defined(VK_KHR_maintenance1)\nextern PFN_vkTrimCommandPoolKHR vkTrimCommandPoolKHR;\n#endif /* defined(VK_KHR_maintenance1) */\n#if defined(VK_KHR_maintenance3)\nextern PFN_vkGetDescriptorSetLayoutSupportKHR vkGetDescriptorSetLayoutSupportKHR;\n#endif /* defined(VK_KHR_maintenance3) */\n#if defined(VK_KHR_maintenance4)\nextern PFN_vkGetDeviceBufferMemoryRequirementsKHR vkGetDeviceBufferMemoryRequirementsKHR;\nextern PFN_vkGetDeviceImageMemoryRequirementsKHR vkGetDeviceImageMemoryRequirementsKHR;\nextern PFN_vkGetDeviceImageSparseMemoryRequirementsKHR vkGetDeviceImageSparseMemoryRequirementsKHR;\n#endif /* defined(VK_KHR_maintenance4) */\n#if defined(VK_KHR_maintenance5)\nextern PFN_vkCmdBindIndexBuffer2KHR vkCmdBindIndexBuffer2KHR;\nextern PFN_vkGetDeviceImageSubresourceLayoutKHR vkGetDeviceImageSubresourceLayoutKHR;\nextern PFN_vkGetImageSubresourceLayout2KHR vkGetImageSubresourceLayout2KHR;\nextern PFN_vkGetRenderingAreaGranularityKHR vkGetRenderingAreaGranularityKHR;\n#endif /* defined(VK_KHR_maintenance5) */\n#if defined(VK_KHR_maintenance6)\nextern PFN_vkCmdBindDescriptorSets2KHR vkCmdBindDescriptorSets2KHR;\nextern PFN_vkCmdPushConstants2KHR vkCmdPushConstants2KHR;\n#endif /* defined(VK_KHR_maintenance6) */\n#if defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor)\nextern PFN_vkCmdPushDescriptorSet2KHR vkCmdPushDescriptorSet2KHR;\nextern PFN_vkCmdPushDescriptorSetWithTemplate2KHR vkCmdPushDescriptorSetWithTemplate2KHR;\n#endif /* defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor) */\n#if defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer)\nextern PFN_vkCmdBindDescriptorBufferEmbeddedSamplers2EXT vkCmdBindDescriptorBufferEmbeddedSamplers2EXT;\nextern PFN_vkCmdSetDescriptorBufferOffsets2EXT vkCmdSetDescriptorBufferOffsets2EXT;\n#endif /* defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_KHR_map_memory2)\nextern PFN_vkMapMemory2KHR vkMapMemory2KHR;\nextern PFN_vkUnmapMemory2KHR vkUnmapMemory2KHR;\n#endif /* defined(VK_KHR_map_memory2) */\n#if defined(VK_KHR_performance_query)\nextern PFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR;\nextern PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR;\nextern PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR;\nextern PFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR;\n#endif /* defined(VK_KHR_performance_query) */\n#if defined(VK_KHR_pipeline_binary)\nextern PFN_vkCreatePipelineBinariesKHR vkCreatePipelineBinariesKHR;\nextern PFN_vkDestroyPipelineBinaryKHR vkDestroyPipelineBinaryKHR;\nextern PFN_vkGetPipelineBinaryDataKHR vkGetPipelineBinaryDataKHR;\nextern PFN_vkGetPipelineKeyKHR vkGetPipelineKeyKHR;\nextern PFN_vkReleaseCapturedPipelineDataKHR vkReleaseCapturedPipelineDataKHR;\n#endif /* defined(VK_KHR_pipeline_binary) */\n#if defined(VK_KHR_pipeline_executable_properties)\nextern PFN_vkGetPipelineExecutableInternalRepresentationsKHR vkGetPipelineExecutableInternalRepresentationsKHR;\nextern PFN_vkGetPipelineExecutablePropertiesKHR vkGetPipelineExecutablePropertiesKHR;\nextern PFN_vkGetPipelineExecutableStatisticsKHR vkGetPipelineExecutableStatisticsKHR;\n#endif /* defined(VK_KHR_pipeline_executable_properties) */\n#if defined(VK_KHR_present_wait)\nextern PFN_vkWaitForPresentKHR vkWaitForPresentKHR;\n#endif /* defined(VK_KHR_present_wait) */\n#if defined(VK_KHR_present_wait2)\nextern PFN_vkWaitForPresent2KHR vkWaitForPresent2KHR;\n#endif /* defined(VK_KHR_present_wait2) */\n#if defined(VK_KHR_push_descriptor)\nextern PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR;\n#endif /* defined(VK_KHR_push_descriptor) */\n#if defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline)\nextern PFN_vkCmdTraceRaysIndirect2KHR vkCmdTraceRaysIndirect2KHR;\n#endif /* defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) */\n#if defined(VK_KHR_ray_tracing_pipeline)\nextern PFN_vkCmdSetRayTracingPipelineStackSizeKHR vkCmdSetRayTracingPipelineStackSizeKHR;\nextern PFN_vkCmdTraceRaysIndirectKHR vkCmdTraceRaysIndirectKHR;\nextern PFN_vkCmdTraceRaysKHR vkCmdTraceRaysKHR;\nextern PFN_vkCreateRayTracingPipelinesKHR vkCreateRayTracingPipelinesKHR;\nextern PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR vkGetRayTracingCaptureReplayShaderGroupHandlesKHR;\nextern PFN_vkGetRayTracingShaderGroupHandlesKHR vkGetRayTracingShaderGroupHandlesKHR;\nextern PFN_vkGetRayTracingShaderGroupStackSizeKHR vkGetRayTracingShaderGroupStackSizeKHR;\n#endif /* defined(VK_KHR_ray_tracing_pipeline) */\n#if defined(VK_KHR_sampler_ycbcr_conversion)\nextern PFN_vkCreateSamplerYcbcrConversionKHR vkCreateSamplerYcbcrConversionKHR;\nextern PFN_vkDestroySamplerYcbcrConversionKHR vkDestroySamplerYcbcrConversionKHR;\n#endif /* defined(VK_KHR_sampler_ycbcr_conversion) */\n#if defined(VK_KHR_shared_presentable_image)\nextern PFN_vkGetSwapchainStatusKHR vkGetSwapchainStatusKHR;\n#endif /* defined(VK_KHR_shared_presentable_image) */\n#if defined(VK_KHR_surface)\nextern PFN_vkDestroySurfaceKHR vkDestroySurfaceKHR;\nextern PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR vkGetPhysicalDeviceSurfaceCapabilitiesKHR;\nextern PFN_vkGetPhysicalDeviceSurfaceFormatsKHR vkGetPhysicalDeviceSurfaceFormatsKHR;\nextern PFN_vkGetPhysicalDeviceSurfacePresentModesKHR vkGetPhysicalDeviceSurfacePresentModesKHR;\nextern PFN_vkGetPhysicalDeviceSurfaceSupportKHR vkGetPhysicalDeviceSurfaceSupportKHR;\n#endif /* defined(VK_KHR_surface) */\n#if defined(VK_KHR_swapchain)\nextern PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR;\nextern PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR;\nextern PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR;\nextern PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR;\nextern PFN_vkQueuePresentKHR vkQueuePresentKHR;\n#endif /* defined(VK_KHR_swapchain) */\n#if defined(VK_KHR_swapchain_maintenance1)\nextern PFN_vkReleaseSwapchainImagesKHR vkReleaseSwapchainImagesKHR;\n#endif /* defined(VK_KHR_swapchain_maintenance1) */\n#if defined(VK_KHR_synchronization2)\nextern PFN_vkCmdPipelineBarrier2KHR vkCmdPipelineBarrier2KHR;\nextern PFN_vkCmdResetEvent2KHR vkCmdResetEvent2KHR;\nextern PFN_vkCmdSetEvent2KHR vkCmdSetEvent2KHR;\nextern PFN_vkCmdWaitEvents2KHR vkCmdWaitEvents2KHR;\nextern PFN_vkCmdWriteTimestamp2KHR vkCmdWriteTimestamp2KHR;\nextern PFN_vkQueueSubmit2KHR vkQueueSubmit2KHR;\n#endif /* defined(VK_KHR_synchronization2) */\n#if defined(VK_KHR_timeline_semaphore)\nextern PFN_vkGetSemaphoreCounterValueKHR vkGetSemaphoreCounterValueKHR;\nextern PFN_vkSignalSemaphoreKHR vkSignalSemaphoreKHR;\nextern PFN_vkWaitSemaphoresKHR vkWaitSemaphoresKHR;\n#endif /* defined(VK_KHR_timeline_semaphore) */\n#if defined(VK_KHR_video_decode_queue)\nextern PFN_vkCmdDecodeVideoKHR vkCmdDecodeVideoKHR;\n#endif /* defined(VK_KHR_video_decode_queue) */\n#if defined(VK_KHR_video_encode_queue)\nextern PFN_vkCmdEncodeVideoKHR vkCmdEncodeVideoKHR;\nextern PFN_vkGetEncodedVideoSessionParametersKHR vkGetEncodedVideoSessionParametersKHR;\nextern PFN_vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR;\n#endif /* defined(VK_KHR_video_encode_queue) */\n#if defined(VK_KHR_video_queue)\nextern PFN_vkBindVideoSessionMemoryKHR vkBindVideoSessionMemoryKHR;\nextern PFN_vkCmdBeginVideoCodingKHR vkCmdBeginVideoCodingKHR;\nextern PFN_vkCmdControlVideoCodingKHR vkCmdControlVideoCodingKHR;\nextern PFN_vkCmdEndVideoCodingKHR vkCmdEndVideoCodingKHR;\nextern PFN_vkCreateVideoSessionKHR vkCreateVideoSessionKHR;\nextern PFN_vkCreateVideoSessionParametersKHR vkCreateVideoSessionParametersKHR;\nextern PFN_vkDestroyVideoSessionKHR vkDestroyVideoSessionKHR;\nextern PFN_vkDestroyVideoSessionParametersKHR vkDestroyVideoSessionParametersKHR;\nextern PFN_vkGetPhysicalDeviceVideoCapabilitiesKHR vkGetPhysicalDeviceVideoCapabilitiesKHR;\nextern PFN_vkGetPhysicalDeviceVideoFormatPropertiesKHR vkGetPhysicalDeviceVideoFormatPropertiesKHR;\nextern PFN_vkGetVideoSessionMemoryRequirementsKHR vkGetVideoSessionMemoryRequirementsKHR;\nextern PFN_vkUpdateVideoSessionParametersKHR vkUpdateVideoSessionParametersKHR;\n#endif /* defined(VK_KHR_video_queue) */\n#if defined(VK_KHR_wayland_surface)\nextern PFN_vkCreateWaylandSurfaceKHR vkCreateWaylandSurfaceKHR;\nextern PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR vkGetPhysicalDeviceWaylandPresentationSupportKHR;\n#endif /* defined(VK_KHR_wayland_surface) */\n#if defined(VK_KHR_win32_surface)\nextern PFN_vkCreateWin32SurfaceKHR vkCreateWin32SurfaceKHR;\nextern PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR vkGetPhysicalDeviceWin32PresentationSupportKHR;\n#endif /* defined(VK_KHR_win32_surface) */\n#if defined(VK_KHR_xcb_surface)\nextern PFN_vkCreateXcbSurfaceKHR vkCreateXcbSurfaceKHR;\nextern PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR vkGetPhysicalDeviceXcbPresentationSupportKHR;\n#endif /* defined(VK_KHR_xcb_surface) */\n#if defined(VK_KHR_xlib_surface)\nextern PFN_vkCreateXlibSurfaceKHR vkCreateXlibSurfaceKHR;\nextern PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR vkGetPhysicalDeviceXlibPresentationSupportKHR;\n#endif /* defined(VK_KHR_xlib_surface) */\n#if defined(VK_MVK_ios_surface)\nextern PFN_vkCreateIOSSurfaceMVK vkCreateIOSSurfaceMVK;\n#endif /* defined(VK_MVK_ios_surface) */\n#if defined(VK_MVK_macos_surface)\nextern PFN_vkCreateMacOSSurfaceMVK vkCreateMacOSSurfaceMVK;\n#endif /* defined(VK_MVK_macos_surface) */\n#if defined(VK_NN_vi_surface)\nextern PFN_vkCreateViSurfaceNN vkCreateViSurfaceNN;\n#endif /* defined(VK_NN_vi_surface) */\n#if defined(VK_NVX_binary_import)\nextern PFN_vkCmdCuLaunchKernelNVX vkCmdCuLaunchKernelNVX;\nextern PFN_vkCreateCuFunctionNVX vkCreateCuFunctionNVX;\nextern PFN_vkCreateCuModuleNVX vkCreateCuModuleNVX;\nextern PFN_vkDestroyCuFunctionNVX vkDestroyCuFunctionNVX;\nextern PFN_vkDestroyCuModuleNVX vkDestroyCuModuleNVX;\n#endif /* defined(VK_NVX_binary_import) */\n#if defined(VK_NVX_image_view_handle)\nextern PFN_vkGetImageViewHandleNVX vkGetImageViewHandleNVX;\n#endif /* defined(VK_NVX_image_view_handle) */\n#if defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 3\nextern PFN_vkGetImageViewHandle64NVX vkGetImageViewHandle64NVX;\n#endif /* defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 3 */\n#if defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 2\nextern PFN_vkGetImageViewAddressNVX vkGetImageViewAddressNVX;\n#endif /* defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 2 */\n#if defined(VK_NV_acquire_winrt_display)\nextern PFN_vkAcquireWinrtDisplayNV vkAcquireWinrtDisplayNV;\nextern PFN_vkGetWinrtDisplayNV vkGetWinrtDisplayNV;\n#endif /* defined(VK_NV_acquire_winrt_display) */\n#if defined(VK_NV_clip_space_w_scaling)\nextern PFN_vkCmdSetViewportWScalingNV vkCmdSetViewportWScalingNV;\n#endif /* defined(VK_NV_clip_space_w_scaling) */\n#if defined(VK_NV_cluster_acceleration_structure)\nextern PFN_vkCmdBuildClusterAccelerationStructureIndirectNV vkCmdBuildClusterAccelerationStructureIndirectNV;\nextern PFN_vkGetClusterAccelerationStructureBuildSizesNV vkGetClusterAccelerationStructureBuildSizesNV;\n#endif /* defined(VK_NV_cluster_acceleration_structure) */\n#if defined(VK_NV_cooperative_matrix)\nextern PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV vkGetPhysicalDeviceCooperativeMatrixPropertiesNV;\n#endif /* defined(VK_NV_cooperative_matrix) */\n#if defined(VK_NV_cooperative_matrix2)\nextern PFN_vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV;\n#endif /* defined(VK_NV_cooperative_matrix2) */\n#if defined(VK_NV_cooperative_vector)\nextern PFN_vkCmdConvertCooperativeVectorMatrixNV vkCmdConvertCooperativeVectorMatrixNV;\nextern PFN_vkConvertCooperativeVectorMatrixNV vkConvertCooperativeVectorMatrixNV;\nextern PFN_vkGetPhysicalDeviceCooperativeVectorPropertiesNV vkGetPhysicalDeviceCooperativeVectorPropertiesNV;\n#endif /* defined(VK_NV_cooperative_vector) */\n#if defined(VK_NV_copy_memory_indirect)\nextern PFN_vkCmdCopyMemoryIndirectNV vkCmdCopyMemoryIndirectNV;\nextern PFN_vkCmdCopyMemoryToImageIndirectNV vkCmdCopyMemoryToImageIndirectNV;\n#endif /* defined(VK_NV_copy_memory_indirect) */\n#if defined(VK_NV_coverage_reduction_mode)\nextern PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV;\n#endif /* defined(VK_NV_coverage_reduction_mode) */\n#if defined(VK_NV_cuda_kernel_launch)\nextern PFN_vkCmdCudaLaunchKernelNV vkCmdCudaLaunchKernelNV;\nextern PFN_vkCreateCudaFunctionNV vkCreateCudaFunctionNV;\nextern PFN_vkCreateCudaModuleNV vkCreateCudaModuleNV;\nextern PFN_vkDestroyCudaFunctionNV vkDestroyCudaFunctionNV;\nextern PFN_vkDestroyCudaModuleNV vkDestroyCudaModuleNV;\nextern PFN_vkGetCudaModuleCacheNV vkGetCudaModuleCacheNV;\n#endif /* defined(VK_NV_cuda_kernel_launch) */\n#if defined(VK_NV_device_diagnostic_checkpoints)\nextern PFN_vkCmdSetCheckpointNV vkCmdSetCheckpointNV;\nextern PFN_vkGetQueueCheckpointDataNV vkGetQueueCheckpointDataNV;\n#endif /* defined(VK_NV_device_diagnostic_checkpoints) */\n#if defined(VK_NV_device_diagnostic_checkpoints) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2))\nextern PFN_vkGetQueueCheckpointData2NV vkGetQueueCheckpointData2NV;\n#endif /* defined(VK_NV_device_diagnostic_checkpoints) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) */\n#if defined(VK_NV_device_generated_commands)\nextern PFN_vkCmdBindPipelineShaderGroupNV vkCmdBindPipelineShaderGroupNV;\nextern PFN_vkCmdExecuteGeneratedCommandsNV vkCmdExecuteGeneratedCommandsNV;\nextern PFN_vkCmdPreprocessGeneratedCommandsNV vkCmdPreprocessGeneratedCommandsNV;\nextern PFN_vkCreateIndirectCommandsLayoutNV vkCreateIndirectCommandsLayoutNV;\nextern PFN_vkDestroyIndirectCommandsLayoutNV vkDestroyIndirectCommandsLayoutNV;\nextern PFN_vkGetGeneratedCommandsMemoryRequirementsNV vkGetGeneratedCommandsMemoryRequirementsNV;\n#endif /* defined(VK_NV_device_generated_commands) */\n#if defined(VK_NV_device_generated_commands_compute)\nextern PFN_vkCmdUpdatePipelineIndirectBufferNV vkCmdUpdatePipelineIndirectBufferNV;\nextern PFN_vkGetPipelineIndirectDeviceAddressNV vkGetPipelineIndirectDeviceAddressNV;\nextern PFN_vkGetPipelineIndirectMemoryRequirementsNV vkGetPipelineIndirectMemoryRequirementsNV;\n#endif /* defined(VK_NV_device_generated_commands_compute) */\n#if defined(VK_NV_external_compute_queue)\nextern PFN_vkCreateExternalComputeQueueNV vkCreateExternalComputeQueueNV;\nextern PFN_vkDestroyExternalComputeQueueNV vkDestroyExternalComputeQueueNV;\nextern PFN_vkGetExternalComputeQueueDataNV vkGetExternalComputeQueueDataNV;\n#endif /* defined(VK_NV_external_compute_queue) */\n#if defined(VK_NV_external_memory_capabilities)\nextern PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV vkGetPhysicalDeviceExternalImageFormatPropertiesNV;\n#endif /* defined(VK_NV_external_memory_capabilities) */\n#if defined(VK_NV_external_memory_rdma)\nextern PFN_vkGetMemoryRemoteAddressNV vkGetMemoryRemoteAddressNV;\n#endif /* defined(VK_NV_external_memory_rdma) */\n#if defined(VK_NV_external_memory_win32)\nextern PFN_vkGetMemoryWin32HandleNV vkGetMemoryWin32HandleNV;\n#endif /* defined(VK_NV_external_memory_win32) */\n#if defined(VK_NV_fragment_shading_rate_enums)\nextern PFN_vkCmdSetFragmentShadingRateEnumNV vkCmdSetFragmentShadingRateEnumNV;\n#endif /* defined(VK_NV_fragment_shading_rate_enums) */\n#if defined(VK_NV_low_latency2)\nextern PFN_vkGetLatencyTimingsNV vkGetLatencyTimingsNV;\nextern PFN_vkLatencySleepNV vkLatencySleepNV;\nextern PFN_vkQueueNotifyOutOfBandNV vkQueueNotifyOutOfBandNV;\nextern PFN_vkSetLatencyMarkerNV vkSetLatencyMarkerNV;\nextern PFN_vkSetLatencySleepModeNV vkSetLatencySleepModeNV;\n#endif /* defined(VK_NV_low_latency2) */\n#if defined(VK_NV_memory_decompression)\nextern PFN_vkCmdDecompressMemoryIndirectCountNV vkCmdDecompressMemoryIndirectCountNV;\nextern PFN_vkCmdDecompressMemoryNV vkCmdDecompressMemoryNV;\n#endif /* defined(VK_NV_memory_decompression) */\n#if defined(VK_NV_mesh_shader)\nextern PFN_vkCmdDrawMeshTasksIndirectNV vkCmdDrawMeshTasksIndirectNV;\nextern PFN_vkCmdDrawMeshTasksNV vkCmdDrawMeshTasksNV;\n#endif /* defined(VK_NV_mesh_shader) */\n#if defined(VK_NV_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2))\nextern PFN_vkCmdDrawMeshTasksIndirectCountNV vkCmdDrawMeshTasksIndirectCountNV;\n#endif /* defined(VK_NV_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2)) */\n#if defined(VK_NV_optical_flow)\nextern PFN_vkBindOpticalFlowSessionImageNV vkBindOpticalFlowSessionImageNV;\nextern PFN_vkCmdOpticalFlowExecuteNV vkCmdOpticalFlowExecuteNV;\nextern PFN_vkCreateOpticalFlowSessionNV vkCreateOpticalFlowSessionNV;\nextern PFN_vkDestroyOpticalFlowSessionNV vkDestroyOpticalFlowSessionNV;\nextern PFN_vkGetPhysicalDeviceOpticalFlowImageFormatsNV vkGetPhysicalDeviceOpticalFlowImageFormatsNV;\n#endif /* defined(VK_NV_optical_flow) */\n#if defined(VK_NV_partitioned_acceleration_structure)\nextern PFN_vkCmdBuildPartitionedAccelerationStructuresNV vkCmdBuildPartitionedAccelerationStructuresNV;\nextern PFN_vkGetPartitionedAccelerationStructuresBuildSizesNV vkGetPartitionedAccelerationStructuresBuildSizesNV;\n#endif /* defined(VK_NV_partitioned_acceleration_structure) */\n#if defined(VK_NV_ray_tracing)\nextern PFN_vkBindAccelerationStructureMemoryNV vkBindAccelerationStructureMemoryNV;\nextern PFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV;\nextern PFN_vkCmdCopyAccelerationStructureNV vkCmdCopyAccelerationStructureNV;\nextern PFN_vkCmdTraceRaysNV vkCmdTraceRaysNV;\nextern PFN_vkCmdWriteAccelerationStructuresPropertiesNV vkCmdWriteAccelerationStructuresPropertiesNV;\nextern PFN_vkCompileDeferredNV vkCompileDeferredNV;\nextern PFN_vkCreateAccelerationStructureNV vkCreateAccelerationStructureNV;\nextern PFN_vkCreateRayTracingPipelinesNV vkCreateRayTracingPipelinesNV;\nextern PFN_vkDestroyAccelerationStructureNV vkDestroyAccelerationStructureNV;\nextern PFN_vkGetAccelerationStructureHandleNV vkGetAccelerationStructureHandleNV;\nextern PFN_vkGetAccelerationStructureMemoryRequirementsNV vkGetAccelerationStructureMemoryRequirementsNV;\nextern PFN_vkGetRayTracingShaderGroupHandlesNV vkGetRayTracingShaderGroupHandlesNV;\n#endif /* defined(VK_NV_ray_tracing) */\n#if defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2\nextern PFN_vkCmdSetExclusiveScissorEnableNV vkCmdSetExclusiveScissorEnableNV;\n#endif /* defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 */\n#if defined(VK_NV_scissor_exclusive)\nextern PFN_vkCmdSetExclusiveScissorNV vkCmdSetExclusiveScissorNV;\n#endif /* defined(VK_NV_scissor_exclusive) */\n#if defined(VK_NV_shading_rate_image)\nextern PFN_vkCmdBindShadingRateImageNV vkCmdBindShadingRateImageNV;\nextern PFN_vkCmdSetCoarseSampleOrderNV vkCmdSetCoarseSampleOrderNV;\nextern PFN_vkCmdSetViewportShadingRatePaletteNV vkCmdSetViewportShadingRatePaletteNV;\n#endif /* defined(VK_NV_shading_rate_image) */\n#if defined(VK_OHOS_surface)\nextern PFN_vkCreateSurfaceOHOS vkCreateSurfaceOHOS;\n#endif /* defined(VK_OHOS_surface) */\n#if defined(VK_QCOM_tile_memory_heap)\nextern PFN_vkCmdBindTileMemoryQCOM vkCmdBindTileMemoryQCOM;\n#endif /* defined(VK_QCOM_tile_memory_heap) */\n#if defined(VK_QCOM_tile_properties)\nextern PFN_vkGetDynamicRenderingTilePropertiesQCOM vkGetDynamicRenderingTilePropertiesQCOM;\nextern PFN_vkGetFramebufferTilePropertiesQCOM vkGetFramebufferTilePropertiesQCOM;\n#endif /* defined(VK_QCOM_tile_properties) */\n#if defined(VK_QCOM_tile_shading)\nextern PFN_vkCmdBeginPerTileExecutionQCOM vkCmdBeginPerTileExecutionQCOM;\nextern PFN_vkCmdDispatchTileQCOM vkCmdDispatchTileQCOM;\nextern PFN_vkCmdEndPerTileExecutionQCOM vkCmdEndPerTileExecutionQCOM;\n#endif /* defined(VK_QCOM_tile_shading) */\n#if defined(VK_QNX_external_memory_screen_buffer)\nextern PFN_vkGetScreenBufferPropertiesQNX vkGetScreenBufferPropertiesQNX;\n#endif /* defined(VK_QNX_external_memory_screen_buffer) */\n#if defined(VK_QNX_screen_surface)\nextern PFN_vkCreateScreenSurfaceQNX vkCreateScreenSurfaceQNX;\nextern PFN_vkGetPhysicalDeviceScreenPresentationSupportQNX vkGetPhysicalDeviceScreenPresentationSupportQNX;\n#endif /* defined(VK_QNX_screen_surface) */\n#if defined(VK_VALVE_descriptor_set_host_mapping)\nextern PFN_vkGetDescriptorSetHostMappingVALVE vkGetDescriptorSetHostMappingVALVE;\nextern PFN_vkGetDescriptorSetLayoutHostMappingInfoVALVE vkGetDescriptorSetLayoutHostMappingInfoVALVE;\n#endif /* defined(VK_VALVE_descriptor_set_host_mapping) */\n#if (defined(VK_EXT_depth_clamp_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clamp_control))\nextern PFN_vkCmdSetDepthClampRangeEXT vkCmdSetDepthClampRangeEXT;\n#endif /* (defined(VK_EXT_depth_clamp_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clamp_control)) */\n#if (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object))\nextern PFN_vkCmdBindVertexBuffers2EXT vkCmdBindVertexBuffers2EXT;\nextern PFN_vkCmdSetCullModeEXT vkCmdSetCullModeEXT;\nextern PFN_vkCmdSetDepthBoundsTestEnableEXT vkCmdSetDepthBoundsTestEnableEXT;\nextern PFN_vkCmdSetDepthCompareOpEXT vkCmdSetDepthCompareOpEXT;\nextern PFN_vkCmdSetDepthTestEnableEXT vkCmdSetDepthTestEnableEXT;\nextern PFN_vkCmdSetDepthWriteEnableEXT vkCmdSetDepthWriteEnableEXT;\nextern PFN_vkCmdSetFrontFaceEXT vkCmdSetFrontFaceEXT;\nextern PFN_vkCmdSetPrimitiveTopologyEXT vkCmdSetPrimitiveTopologyEXT;\nextern PFN_vkCmdSetScissorWithCountEXT vkCmdSetScissorWithCountEXT;\nextern PFN_vkCmdSetStencilOpEXT vkCmdSetStencilOpEXT;\nextern PFN_vkCmdSetStencilTestEnableEXT vkCmdSetStencilTestEnableEXT;\nextern PFN_vkCmdSetViewportWithCountEXT vkCmdSetViewportWithCountEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object))\nextern PFN_vkCmdSetDepthBiasEnableEXT vkCmdSetDepthBiasEnableEXT;\nextern PFN_vkCmdSetLogicOpEXT vkCmdSetLogicOpEXT;\nextern PFN_vkCmdSetPatchControlPointsEXT vkCmdSetPatchControlPointsEXT;\nextern PFN_vkCmdSetPrimitiveRestartEnableEXT vkCmdSetPrimitiveRestartEnableEXT;\nextern PFN_vkCmdSetRasterizerDiscardEnableEXT vkCmdSetRasterizerDiscardEnableEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object))\nextern PFN_vkCmdSetAlphaToCoverageEnableEXT vkCmdSetAlphaToCoverageEnableEXT;\nextern PFN_vkCmdSetAlphaToOneEnableEXT vkCmdSetAlphaToOneEnableEXT;\nextern PFN_vkCmdSetColorBlendEnableEXT vkCmdSetColorBlendEnableEXT;\nextern PFN_vkCmdSetColorBlendEquationEXT vkCmdSetColorBlendEquationEXT;\nextern PFN_vkCmdSetColorWriteMaskEXT vkCmdSetColorWriteMaskEXT;\nextern PFN_vkCmdSetDepthClampEnableEXT vkCmdSetDepthClampEnableEXT;\nextern PFN_vkCmdSetLogicOpEnableEXT vkCmdSetLogicOpEnableEXT;\nextern PFN_vkCmdSetPolygonModeEXT vkCmdSetPolygonModeEXT;\nextern PFN_vkCmdSetRasterizationSamplesEXT vkCmdSetRasterizationSamplesEXT;\nextern PFN_vkCmdSetSampleMaskEXT vkCmdSetSampleMaskEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object))\nextern PFN_vkCmdSetTessellationDomainOriginEXT vkCmdSetTessellationDomainOriginEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback))\nextern PFN_vkCmdSetRasterizationStreamEXT vkCmdSetRasterizationStreamEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization))\nextern PFN_vkCmdSetConservativeRasterizationModeEXT vkCmdSetConservativeRasterizationModeEXT;\nextern PFN_vkCmdSetExtraPrimitiveOverestimationSizeEXT vkCmdSetExtraPrimitiveOverestimationSizeEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable))\nextern PFN_vkCmdSetDepthClipEnableEXT vkCmdSetDepthClipEnableEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations))\nextern PFN_vkCmdSetSampleLocationsEnableEXT vkCmdSetSampleLocationsEnableEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced))\nextern PFN_vkCmdSetColorBlendAdvancedEXT vkCmdSetColorBlendAdvancedEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex))\nextern PFN_vkCmdSetProvokingVertexModeEXT vkCmdSetProvokingVertexModeEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization))\nextern PFN_vkCmdSetLineRasterizationModeEXT vkCmdSetLineRasterizationModeEXT;\nextern PFN_vkCmdSetLineStippleEnableEXT vkCmdSetLineStippleEnableEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control))\nextern PFN_vkCmdSetDepthClipNegativeOneToOneEXT vkCmdSetDepthClipNegativeOneToOneEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling))\nextern PFN_vkCmdSetViewportWScalingEnableNV vkCmdSetViewportWScalingEnableNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle))\nextern PFN_vkCmdSetViewportSwizzleNV vkCmdSetViewportSwizzleNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color))\nextern PFN_vkCmdSetCoverageToColorEnableNV vkCmdSetCoverageToColorEnableNV;\nextern PFN_vkCmdSetCoverageToColorLocationNV vkCmdSetCoverageToColorLocationNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples))\nextern PFN_vkCmdSetCoverageModulationModeNV vkCmdSetCoverageModulationModeNV;\nextern PFN_vkCmdSetCoverageModulationTableEnableNV vkCmdSetCoverageModulationTableEnableNV;\nextern PFN_vkCmdSetCoverageModulationTableNV vkCmdSetCoverageModulationTableNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image))\nextern PFN_vkCmdSetShadingRateImageEnableNV vkCmdSetShadingRateImageEnableNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test))\nextern PFN_vkCmdSetRepresentativeFragmentTestEnableNV vkCmdSetRepresentativeFragmentTestEnableNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode))\nextern PFN_vkCmdSetCoverageReductionModeNV vkCmdSetCoverageReductionModeNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) */\n#if (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control))\nextern PFN_vkGetImageSubresourceLayout2EXT vkGetImageSubresourceLayout2EXT;\n#endif /* (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) */\n#if (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state))\nextern PFN_vkCmdSetVertexInputEXT vkCmdSetVertexInputEXT;\n#endif /* (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) */\n#if (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template)))\nextern PFN_vkCmdPushDescriptorSetWithTemplateKHR vkCmdPushDescriptorSetWithTemplateKHR;\n#endif /* (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template))) */\n#if (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1))\nextern PFN_vkGetDeviceGroupPresentCapabilitiesKHR vkGetDeviceGroupPresentCapabilitiesKHR;\nextern PFN_vkGetDeviceGroupSurfacePresentModesKHR vkGetDeviceGroupSurfacePresentModesKHR;\nextern PFN_vkGetPhysicalDevicePresentRectanglesKHR vkGetPhysicalDevicePresentRectanglesKHR;\n#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */\n#if (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1))\nextern PFN_vkAcquireNextImage2KHR vkAcquireNextImage2KHR;\n#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */\n/* VOLK_GENERATE_PROTOTYPES_H */\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif\n\n#ifdef VOLK_IMPLEMENTATION\n#undef VOLK_IMPLEMENTATION\n/* Prevent tools like dependency checkers from detecting a cyclic dependency */\n#define VOLK_SOURCE \"volk.c\"\n#include VOLK_SOURCE\n#endif\n\n/**\n * Copyright (c) 2018-2025 Arseny Kapoulkine\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to deal\n * in the Software without restriction, including without limitation the rights\n * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in all\n * copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n * SOFTWARE.\n*/\n/* clang-format on */\n"], ["/lsfg-vk/framegen/src/core/instance.cpp", "#include \n#include \n\n#include \"core/instance.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nconst std::vector requiredExtensions = {\n\n};\n\nInstance::Instance() {\n volkInitialize();\n\n // create Vulkan instance\n const VkApplicationInfo appInfo{\n .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,\n .pApplicationName = \"lsfg-vk-base\",\n .applicationVersion = VK_MAKE_VERSION(0, 0, 1),\n .pEngineName = \"lsfg-vk-base\",\n .engineVersion = VK_MAKE_VERSION(0, 0, 1),\n .apiVersion = VK_API_VERSION_1_3\n };\n const VkInstanceCreateInfo createInfo{\n .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,\n .pApplicationInfo = &appInfo,\n .enabledExtensionCount = static_cast(requiredExtensions.size()),\n .ppEnabledExtensionNames = requiredExtensions.data()\n };\n VkInstance instanceHandle{};\n auto res = vkCreateInstance(&createInfo, nullptr, &instanceHandle);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to create Vulkan instance\");\n\n volkLoadInstance(instanceHandle);\n\n // store in shared ptr\n this->instance = std::shared_ptr(\n new VkInstance(instanceHandle),\n [](VkInstance* instance) {\n vkDestroyInstance(*instance, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/src/core/fence.cpp", "#include \n#include \n\n#include \"core/fence.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n\nusing namespace LSFG::Core;\n\nFence::Fence(const Core::Device& device) {\n // create fence\n const VkFenceCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO\n };\n VkFence fenceHandle{};\n auto res = vkCreateFence(device.handle(), &desc, nullptr, &fenceHandle);\n if (res != VK_SUCCESS || fenceHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create fence\");\n\n // store fence in shared ptr\n this->fence = std::shared_ptr(\n new VkFence(fenceHandle),\n [dev = device.handle()](VkFence* fenceHandle) {\n vkDestroyFence(dev, *fenceHandle, nullptr);\n }\n );\n}\n\nvoid Fence::reset(const Core::Device& device) const {\n VkFence fenceHandle = this->handle();\n auto res = vkResetFences(device.handle(), 1, &fenceHandle);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to reset fence\");\n}\n\nbool Fence::wait(const Core::Device& device, uint64_t timeout) const {\n VkFence fenceHandle = this->handle();\n auto res = vkWaitForFences(device.handle(), 1, &fenceHandle, VK_TRUE, timeout);\n if (res != VK_SUCCESS && res != VK_TIMEOUT)\n throw LSFG::vulkan_error(res, \"Unable to wait for fence\");\n\n return res == VK_SUCCESS;\n}\n"], ["/lsfg-vk/framegen/src/common/exception.cpp", "#include \"common/exception.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\n\nvulkan_error::vulkan_error(VkResult result, const std::string& message)\n : std::runtime_error(std::format(\"{} (error {})\", message, static_cast(result))),\n result(result) {}\n\nvulkan_error::~vulkan_error() noexcept = default;\n\nrethrowable_error::rethrowable_error(const std::string& message, const std::exception& exe)\n : std::runtime_error(message) {\n this->message = std::format(\"{}\\n- {}\", message, exe.what());\n}\n\nrethrowable_error::~rethrowable_error() noexcept = default;\n"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/src/unicode_winapi.cpp", "/*\nThe MIT License (MIT)\n\nCopyright (c) 2020 Trail of Bits, Inc.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n*/\n\n#include \n#include \n\nnamespace peparse {\nstd::string from_utf16(const UCharString &u) {\n std::string result;\n std::size_t size = WideCharToMultiByte(CP_UTF8,\n 0,\n u.data(),\n static_cast(u.size()),\n nullptr,\n 0,\n nullptr,\n nullptr);\n\n if (size <= 0) {\n return result;\n }\n\n result.reserve(size);\n WideCharToMultiByte(CP_UTF8,\n 0,\n u.data(),\n static_cast(u.size()),\n &result[0],\n static_cast(result.capacity()),\n nullptr,\n nullptr);\n\n return result;\n}\n} // namespace peparse\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxvk/dxvk_limits.h", "#pragma once\n\n#include \n\nnamespace dxvk {\n\n enum DxvkLimits : size_t {\n MaxNumRenderTargets = 8,\n MaxNumVertexAttributes = 32,\n MaxNumVertexBindings = 32,\n MaxNumXfbBuffers = 4,\n MaxNumXfbStreams = 4,\n MaxNumViewports = 16,\n MaxNumResourceSlots = 1216,\n MaxNumQueuedCommandBuffers = 32,\n MaxNumQueryCountPerPool = 128,\n MaxNumSpecConstants = 12,\n MaxUniformBufferSize = 65536,\n MaxVertexBindingStride = 2048,\n MaxPushConstantSize = 128,\n };\n\n}\n"], ["/lsfg-vk/src/mini/commandpool.cpp", "#include \"mini/commandpool.hpp\"\n#include \"common/exception.hpp\"\n#include \"layer.hpp\"\n\n#include \n\n#include \n#include \n\nusing namespace Mini;\n\nCommandPool::CommandPool(VkDevice device, uint32_t graphicsFamilyIdx) {\n // create command pool\n const VkCommandPoolCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,\n .queueFamilyIndex = graphicsFamilyIdx\n };\n VkCommandPool commandPoolHandle{};\n auto res = Layer::ovkCreateCommandPool(device, &desc, nullptr, &commandPoolHandle);\n if (res != VK_SUCCESS || commandPoolHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create command pool\");\n\n // store command pool in shared ptr\n this->commandPool = std::shared_ptr(\n new VkCommandPool(commandPoolHandle),\n [dev = device](VkCommandPool* commandPoolHandle) {\n Layer::ovkDestroyCommandPool(dev, *commandPoolHandle, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/src/core/pipeline.cpp", "#include \n#include \n\n#include \"core/pipeline.hpp\"\n#include \"core/device.hpp\"\n#include \"core/shadermodule.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n\nusing namespace LSFG::Core;\n\nPipeline::Pipeline(const Core::Device& device, const ShaderModule& shader) {\n // create pipeline layout\n VkDescriptorSetLayout shaderLayout = shader.getLayout();\n const VkPipelineLayoutCreateInfo layoutDesc{\n .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,\n .setLayoutCount = 1,\n .pSetLayouts = &shaderLayout,\n };\n VkPipelineLayout layoutHandle{};\n auto res = vkCreatePipelineLayout(device.handle(), &layoutDesc, nullptr, &layoutHandle);\n if (res != VK_SUCCESS || !layoutHandle)\n throw LSFG::vulkan_error(res, \"Failed to create pipeline layout\");\n\n // create pipeline\n const VkPipelineShaderStageCreateInfo shaderStageInfo{\n .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,\n .stage = VK_SHADER_STAGE_COMPUTE_BIT,\n .module = shader.handle(),\n .pName = \"main\",\n };\n const VkComputePipelineCreateInfo pipelineDesc{\n .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,\n .stage = shaderStageInfo,\n .layout = layoutHandle,\n };\n VkPipeline pipelineHandle{};\n res = vkCreateComputePipelines(device.handle(),\n VK_NULL_HANDLE, 1, &pipelineDesc, nullptr, &pipelineHandle);\n if (res != VK_SUCCESS || !pipelineHandle)\n throw LSFG::vulkan_error(res, \"Failed to create compute pipeline\");\n\n // store layout and pipeline in shared ptr\n this->layout = std::shared_ptr(\n new VkPipelineLayout(layoutHandle),\n [dev = device.handle()](VkPipelineLayout* layout) {\n vkDestroyPipelineLayout(dev, *layout, nullptr);\n }\n );\n this->pipeline = std::shared_ptr(\n new VkPipeline(pipelineHandle),\n [dev = device.handle()](VkPipeline* pipeline) {\n vkDestroyPipeline(dev, *pipeline, nullptr);\n }\n );\n}\n\nvoid Pipeline::bind(const CommandBuffer& commandBuffer) const {\n vkCmdBindPipeline(commandBuffer.handle(), VK_PIPELINE_BIND_POINT_COMPUTE, *this->pipeline);\n}\n"], ["/lsfg-vk/src/mini/semaphore.cpp", "#include \"mini/semaphore.hpp\"\n#include \"common/exception.hpp\"\n#include \"layer.hpp\"\n\n#include \n\n#include \n\nusing namespace Mini;\n\nSemaphore::Semaphore(VkDevice device) {\n // create semaphore\n const VkSemaphoreCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO\n };\n VkSemaphore semaphoreHandle{};\n auto res = Layer::ovkCreateSemaphore(device, &desc, nullptr, &semaphoreHandle);\n if (res != VK_SUCCESS || semaphoreHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create semaphore\");\n\n // store semaphore in shared ptr\n this->semaphore = std::shared_ptr(\n new VkSemaphore(semaphoreHandle),\n [dev = device](VkSemaphore* semaphoreHandle) {\n Layer::ovkDestroySemaphore(dev, *semaphoreHandle, nullptr);\n }\n );\n}\n\nSemaphore::Semaphore(VkDevice device, int* fd) {\n // create semaphore\n const VkExportSemaphoreCreateInfo exportInfo{\n .sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO,\n .handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT\n };\n const VkSemaphoreCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,\n .pNext = &exportInfo\n };\n VkSemaphore semaphoreHandle{};\n auto res = Layer::ovkCreateSemaphore(device, &desc, nullptr, &semaphoreHandle);\n if (res != VK_SUCCESS || semaphoreHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create semaphore\");\n\n // export semaphore to fd\n const VkSemaphoreGetFdInfoKHR fdInfo{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR,\n .semaphore = semaphoreHandle,\n .handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT\n };\n res = Layer::ovkGetSemaphoreFdKHR(device, &fdInfo, fd);\n if (res != VK_SUCCESS || *fd < 0)\n throw LSFG::vulkan_error(res, \"Unable to export semaphore to fd\");\n\n // store semaphore in shared ptr\n this->semaphore = std::shared_ptr(\n new VkSemaphore(semaphoreHandle),\n [dev = device](VkSemaphore* semaphoreHandle) {\n Layer::ovkDestroySemaphore(dev, *semaphoreHandle, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/src/pool/shaderpool.cpp", "#include \"pool/shaderpool.hpp\"\n#include \"core/shadermodule.hpp\"\n#include \"core/device.hpp\"\n#include \"core/pipeline.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG::Pool;\n\nCore::ShaderModule ShaderPool::getShader(\n const Core::Device& device, const std::string& name,\n const std::vector>& types) {\n auto it = shaders.find(name);\n if (it != shaders.end())\n return it->second;\n\n // grab the shader\n auto bytecode = this->source(name);\n if (bytecode.empty())\n throw std::runtime_error(\"Shader code is empty: \" + name);\n\n // create the shader module\n Core::ShaderModule shader(device, bytecode, types);\n shaders[name] = shader;\n return shader;\n}\n\nCore::Pipeline ShaderPool::getPipeline(\n const Core::Device& device, const std::string& name) {\n auto it = pipelines.find(name);\n if (it != pipelines.end())\n return it->second;\n\n // grab the shader module\n auto shader = this->getShader(device, name, {});\n\n // create the pipeline\n Core::Pipeline pipeline(device, shader);\n pipelines[name] = pipeline;\n return pipeline;\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_error.h", "#pragma once\n\n#include \n\nnamespace dxvk {\n \n /**\n * \\brief DXVK error\n * \n * A generic exception class that stores a\n * message. Exceptions should be logged.\n */\n class DxvkError {\n \n public:\n \n DxvkError() { }\n DxvkError(std::string&& message)\n : m_message(std::move(message)) { }\n \n const std::string& message() const {\n return m_message;\n }\n \n private:\n \n std::string m_message;\n \n };\n \n}"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/src/unicode_codecvt.cpp", "/*\nThe MIT License (MIT)\n\nCopyright (c) 2019 Trail of Bits, Inc.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n*/\n\n#include \n#include \n#include \n\nnamespace peparse {\n// See\n// https://stackoverflow.com/questions/38688417/utf-conversion-functions-in-c11\nstd::string from_utf16(const UCharString &u) {\n std::wstring_convert, char16_t> convert;\n return convert.to_bytes(u);\n}\n} // namespace peparse\n"], ["/lsfg-vk/framegen/src/core/sampler.cpp", "#include \n#include \n\n#include \"core/sampler.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n\nusing namespace LSFG::Core;\n\nSampler::Sampler(const Core::Device& device,\n VkSamplerAddressMode mode,\n VkCompareOp compare,\n bool isWhite) {\n // create sampler\n const VkSamplerCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,\n .magFilter = VK_FILTER_LINEAR,\n .minFilter = VK_FILTER_LINEAR,\n .mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR,\n .addressModeU = mode,\n .addressModeV = mode,\n .addressModeW = mode,\n .compareOp = compare,\n .maxLod = VK_LOD_CLAMP_NONE,\n .borderColor =\n isWhite ? VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE\n : VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK\n };\n VkSampler samplerHandle{};\n auto res = vkCreateSampler(device.handle(), &desc, nullptr, &samplerHandle);\n if (res != VK_SUCCESS || samplerHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create sampler\");\n\n // store sampler in shared ptr\n this->sampler = std::shared_ptr(\n new VkSampler(samplerHandle),\n [dev = device.handle()](VkSampler* samplerHandle) {\n vkDestroySampler(dev, *samplerHandle, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/src/core/commandpool.cpp", "#include \n#include \n\n#include \"core/commandpool.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n\nusing namespace LSFG::Core;\n\nCommandPool::CommandPool(const Core::Device& device) {\n // create command pool\n const VkCommandPoolCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,\n .queueFamilyIndex = device.getComputeFamilyIdx()\n };\n VkCommandPool commandPoolHandle{};\n auto res = vkCreateCommandPool(device.handle(), &desc, nullptr, &commandPoolHandle);\n if (res != VK_SUCCESS || commandPoolHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create command pool\");\n\n // store command pool in shared ptr\n this->commandPool = std::shared_ptr(\n new VkCommandPool(commandPoolHandle),\n [dev = device.handle()](VkCommandPool* commandPoolHandle) {\n vkDestroyCommandPool(dev, *commandPoolHandle, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/log/log_debug.h", "#pragma once\n\n#include \n\n#include \"log/log.h\"\n\n#ifdef _MSC_VER\n#define METHOD_NAME __FUNCSIG__\n#else\n#define METHOD_NAME __PRETTY_FUNCTION__\n#endif\n\n#define TRACE_ENABLED\n\n#ifdef TRACE_ENABLED\n#define TRACE(...) \\\n do { dxvk::debug::trace(METHOD_NAME, ##__VA_ARGS__); } while (0)\n#else\n#define TRACE(...) \\\n do { } while (0)\n#endif\n\nnamespace dxvk::debug {\n \n std::string methodName(const std::string& prettyName);\n \n inline void traceArgs(std::stringstream& stream) { }\n \n template\n void traceArgs(std::stringstream& stream, const Arg1& arg1) {\n stream << arg1;\n }\n \n template\n void traceArgs(std::stringstream& stream, const Arg1& arg1, const Arg2& arg2, const Args&... args) {\n stream << arg1 << \",\";\n traceArgs(stream, arg2, args...);\n }\n \n template\n void trace(const std::string& funcName, const Args&... args) {\n std::stringstream stream;\n stream << methodName(funcName) << \"(\";\n traceArgs(stream, args...);\n stream << \")\";\n Logger::trace(stream.str());\n }\n \n}\n"], ["/lsfg-vk/thirdparty/dxbc/src/util/util_log.cpp", "#include \"log/log_debug.h\"\n\nnamespace dxvk::debug {\n \n std::string methodName(const std::string& prettyName) {\n size_t end = prettyName.find(\"(\");\n size_t begin = prettyName.substr(0, end).rfind(\" \") + 1;\n return prettyName.substr(begin,end - begin);\n }\n \n}\n"], ["/lsfg-vk/thirdparty/toml11/src/skip.cpp", "#include \n#include \n#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n\nnamespace toml\n{\n\nnamespace detail\n{\ntemplate bool skip_whitespace (location& loc, const context&);\ntemplate bool skip_empty_lines (location& loc, const context&);\ntemplate void skip_comment_block (location& loc, const context&);\ntemplate void skip_empty_or_comment_lines(location& loc, const context&);\ntemplate void skip_string_like (location& loc, const context&);\ntemplate void skip_array_like (location& loc, const context&);\ntemplate void skip_inline_table_like (location& loc, const context&);\ntemplate void skip_value (location& loc, const context&);\ntemplate void skip_key_value_pair (location& loc, const context&);\ntemplate void skip_until_next_table (location& loc, const context&);\n\ntemplate bool skip_whitespace (location& loc, const context&);\ntemplate bool skip_empty_lines (location& loc, const context&);\ntemplate void skip_comment_block (location& loc, const context&);\ntemplate void skip_empty_or_comment_lines(location& loc, const context&);\ntemplate void skip_string_like (location& loc, const context&);\ntemplate void skip_array_like (location& loc, const context&);\ntemplate void skip_inline_table_like (location& loc, const context&);\ntemplate void skip_value (location& loc, const context&);\ntemplate void skip_key_value_pair (location& loc, const context&);\ntemplate void skip_until_next_table (location& loc, const context&);\n\n} // detail\n} // toml\n"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/include/pe-parse/to_string.h", "#pragma once\n\n#include \n#include \n\n#if defined(_MSC_VER)\ntypedef std::basic_string UCharString;\n#else\ntypedef std::u16string UCharString;\n#endif\n\nnamespace peparse {\ntemplate \nstatic std::string to_string(T t, std::ios_base &(*f)(std::ios_base &) ) {\n std::ostringstream oss;\n oss << f << t;\n return oss.str();\n}\n\nstd::string from_utf16(const UCharString &u);\n} // namespace peparse\n"], ["/lsfg-vk/thirdparty/toml11/src/parser.cpp", "#include \n#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n\nnamespace toml\n{\n\ntemplate result, std::vector> try_parse(std::vector, std::string, spec);\ntemplate result, std::vector> try_parse(std::istream&, std::string, spec);\ntemplate result, std::vector> try_parse(std::string, spec);\ntemplate result, std::vector> try_parse(FILE*, std::string, spec);\ntemplate result, std::vector> try_parse_str(std::string, spec, cxx::source_location);\n\ntemplate basic_value parse(std::vector, std::string, spec);\ntemplate basic_value parse(std::istream&, std::string, spec);\ntemplate basic_value parse(std::string, spec);\ntemplate basic_value parse(FILE*, std::string, spec);\ntemplate basic_value parse_str(std::string, spec, cxx::source_location);\n\ntemplate result, std::vector> try_parse(std::vector, std::string, spec);\ntemplate result, std::vector> try_parse(std::istream&, std::string, spec);\ntemplate result, std::vector> try_parse(std::string, spec);\ntemplate result, std::vector> try_parse(FILE*, std::string, spec);\ntemplate result, std::vector> try_parse_str(std::string, spec, cxx::source_location);\n\ntemplate basic_value parse(std::vector, std::string, spec);\ntemplate basic_value parse(std::istream&, std::string, spec);\ntemplate basic_value parse(std::string, spec);\ntemplate basic_value parse(FILE*, std::string, spec);\ntemplate basic_value parse_str(std::string, spec, cxx::source_location);\n\n#if defined(TOML11_HAS_FILESYSTEM)\ntemplate cxx::enable_if_t::value, result, std::vector>> try_parse(const std::filesystem::path&, spec);\ntemplate cxx::enable_if_t::value, result, std::vector>> try_parse(const std::filesystem::path&, spec);\ntemplate cxx::enable_if_t::value, basic_value > parse (const std::filesystem::path&, spec);\ntemplate cxx::enable_if_t::value, basic_value > parse (const std::filesystem::path&, spec);\n#endif // filesystem\n\n} // toml\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_defs.h", "class DxbcOperandKind {\n};"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_enum.h", "#pragma once\n\n#define ENUM_NAME(name) \\\n case name: return os << #name\n\n#define ENUM_DEFAULT(name) \\\n default: return os << static_cast(e)\n"], ["/lsfg-vk/thirdparty/toml11/src/serializer.cpp", "#include \n#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n\nnamespace toml\n{\nstruct type_config;\nstruct ordered_type_config;\n\ntemplate typename basic_value::string_type\nformat(const basic_value&, const spec);\n\ntemplate typename basic_value::string_type\nformat(const typename basic_value::key_type& k,\n const basic_value& v, const spec);\n\ntemplate typename basic_value::string_type\nformat(const std::vector::key_type>& ks,\n const basic_value& v, const spec s);\n\ntemplate typename basic_value::string_type\nformat(const basic_value&, const spec);\n\ntemplate typename basic_value::string_type\nformat(const typename basic_value::key_type& k,\n const basic_value& v, const spec);\n\ntemplate typename basic_value::string_type\nformat(const std::vector::key_type>& ks,\n const basic_value& v, const spec s);\n\nnamespace detail\n{\ntemplate class serializer<::toml::type_config>;\ntemplate class serializer<::toml::ordered_type_config>;\n} // detail\n} // toml\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_names.h", "class dxbc_names {\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_util.h", "class dxbc_util {\n};"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_likely.h", "#pragma once\n\n#ifdef __GNUC__\n#define likely(x) __builtin_expect(bool(x),1)\n#define unlikely(x) __builtin_expect(bool(x),0)\n#define force_inline inline __attribute__((always_inline))\n#else\n#define likely(x) (x)\n#define unlikely(x) (x)\n#define force_inline inline\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/context.cpp", "#include \n#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n\nnamespace toml\n{\nnamespace detail\n{\ntemplate class context<::toml::type_config>;\ntemplate class context<::toml::ordered_type_config>;\n} // detail\n} // toml\n"], ["/lsfg-vk/thirdparty/toml11/src/value_t.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/datetime.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/region.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/format.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/color.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/comments.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/location.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/syntax.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/error_info.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/literal.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/source_location.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/scanner.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/types.cpp", "#include \n#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n\nnamespace toml\n{\ntemplate class basic_value;\ntemplate class basic_value;\n} // toml\n"], ["/lsfg-vk/thirdparty/dxbc/include/spirv/spirv_include.h", "#pragma once\n\n#include \n#include \n\n#include \"log/log.h\"\n#include \"log/log_debug.h\"\n\n#include \"util_error.h\"\n#include \"util_flags.h\"\n#include \"util_likely.h\"\n#include \"util_string.h\"\n\n#include \"rc/util_rc.h\"\n#include \"rc/util_rc_ptr.h\"\n"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/include/pe-parse/parse.h", "class parse {\n};"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_string.h", "class util_string {\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_include.h", "#pragma once\n\n#include \n#include \n\n#include \"dxvk_limits.h\"\n#include \"dxvk_pipelayout.h\"\n\n#include \"log/log.h\"\n#include \"log/log_debug.h\"\n\n#include \"rc/util_rc.h\"\n#include \"rc/util_rc_ptr.h\"\n\n#include \"util_bit.h\"\n#include \"util_enum.h\"\n#include \"util_error.h\"\n#include \"util_string.h\"\n#include \"util_flags.h\"\n#include \"util_small_vector.h\"\n"]], "task_instance_info": {"created_time": "2025-08-20 20:01:24", "created_task_model": "DeepSeek-R1", "function_skeleton": "static bool compareSemanticNames(\n const std::string& a,\n const std::string& b) {}", "code_description": "This code segment functions to: Compare two input strings in a case-insensitive manner and verify their character sequence equivalence regardless of letter casing differences."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```cpp\n[TASK_BEGIN]\nstatic bool compareSemanticNames(\n const std::string& a,\n const std::string& b) {\n if (a.size() != b.size())\n return false;\n \n for (size_t i = 0; i < a.size(); ++i) {\n if (std::tolower(static_cast(a[i])) != \n std::tolower(static_cast(b[i])))\n return false;\n }\n \n return true;\n}\n[TASK_END]\n```", "inference_time": "2025-08-20 20-02-48"}, "editdistance_info": {"edit_distance": 41.6667, "calculate_time": "2025-08-20 20:02:48", "true_code_clean": "static bool compareSemanticNames(\n const std::string& a,\n const std::string& b) {\n if (a.size() != b.size())\n return false;\n for (size_t i = 0; i < a.size(); i++) {\n char ac = a[i];\n char bc = b[i];\n if (ac != bc) {\n if (ac >= 'A' && ac <= 'Z') ac += 'a' - 'A';\n if (bc >= 'A' && bc <= 'Z') bc += 'a' - 'A';\n if (ac != bc)\n return false;\n }\n }\n return true;\n }", "predict_code_clean": "static bool compareSemanticNames(\n const std::string& a,\n const std::string& b) {\n if (a.size() != b.size())\n return false;\n for (size_t i = 0; i < a.size(); ++i) {\n if (std::tolower(static_cast(a[i])) != \n std::tolower(static_cast(b[i])))\n return false;\n }\n return true;\n}"}} {"repo_name": "lsfg-vk", "file_name": "/lsfg-vk/framegen/src/core/sampler.cpp", "inference_info": {"prefix_code": "#include \n#include \n\n#include \"core/sampler.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n\nusing namespace LSFG::Core;\n\n", "suffix_code": "\n", "middle_code": "Sampler::Sampler(const Core::Device& device,\n VkSamplerAddressMode mode,\n VkCompareOp compare,\n bool isWhite) {\n const VkSamplerCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,\n .magFilter = VK_FILTER_LINEAR,\n .minFilter = VK_FILTER_LINEAR,\n .mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR,\n .addressModeU = mode,\n .addressModeV = mode,\n .addressModeW = mode,\n .compareOp = compare,\n .maxLod = VK_LOD_CLAMP_NONE,\n .borderColor =\n isWhite ? VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE\n : VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK\n };\n VkSampler samplerHandle{};\n auto res = vkCreateSampler(device.handle(), &desc, nullptr, &samplerHandle);\n if (res != VK_SUCCESS || samplerHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create sampler\");\n this->sampler = std::shared_ptr(\n new VkSampler(samplerHandle),\n [dev = device.handle()](VkSampler* samplerHandle) {\n vkDestroySampler(dev, *samplerHandle, nullptr);\n }\n );\n}", "code_description": null, "fill_type": "FUNCTION_TYPE", "language_type": "cpp", "sub_task_type": null}, "context_code": [["/lsfg-vk/framegen/src/core/image.cpp", "#include \n#include \n\n#include \"core/image.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nImage::Image(const Core::Device& device, VkExtent2D extent, VkFormat format,\n VkImageUsageFlags usage, VkImageAspectFlags aspectFlags)\n : extent(extent), format(format), aspectFlags(aspectFlags) {\n // create image\n const VkImageCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,\n .imageType = VK_IMAGE_TYPE_2D,\n .format = format,\n .extent = {\n .width = extent.width,\n .height = extent.height,\n .depth = 1\n },\n .mipLevels = 1,\n .arrayLayers = 1,\n .samples = VK_SAMPLE_COUNT_1_BIT,\n .usage = usage,\n .sharingMode = VK_SHARING_MODE_EXCLUSIVE\n };\n VkImage imageHandle{};\n auto res = vkCreateImage(device.handle(), &desc, nullptr, &imageHandle);\n if (res != VK_SUCCESS || imageHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create Vulkan image\");\n\n // find memory type\n VkPhysicalDeviceMemoryProperties memProps;\n vkGetPhysicalDeviceMemoryProperties(device.getPhysicalDevice(), &memProps);\n\n VkMemoryRequirements memReqs;\n vkGetImageMemoryRequirements(device.handle(), imageHandle, &memReqs);\n\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n std::optional memType{};\n for (uint32_t i = 0; i < memProps.memoryTypeCount; ++i) {\n if ((memReqs.memoryTypeBits & (1 << i)) && // NOLINTBEGIN\n (memProps.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) {\n memType.emplace(i);\n break;\n } // NOLINTEND\n }\n if (!memType.has_value())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Unable to find memory type for image\");\n#pragma clang diagnostic pop\n\n // allocate and bind memory\n const VkMemoryAllocateInfo allocInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,\n .allocationSize = memReqs.size,\n .memoryTypeIndex = memType.value()\n };\n VkDeviceMemory memoryHandle{};\n res = vkAllocateMemory(device.handle(), &allocInfo, nullptr, &memoryHandle);\n if (res != VK_SUCCESS || memoryHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to allocate memory for Vulkan image\");\n\n res = vkBindImageMemory(device.handle(), imageHandle, memoryHandle, 0);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to bind memory to Vulkan image\");\n\n // create image view\n const VkImageViewCreateInfo viewDesc{\n .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,\n .image = imageHandle,\n .viewType = VK_IMAGE_VIEW_TYPE_2D,\n .format = format,\n .components = {\n .r = VK_COMPONENT_SWIZZLE_IDENTITY,\n .g = VK_COMPONENT_SWIZZLE_IDENTITY,\n .b = VK_COMPONENT_SWIZZLE_IDENTITY,\n .a = VK_COMPONENT_SWIZZLE_IDENTITY\n },\n .subresourceRange = {\n .aspectMask = aspectFlags,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n\n VkImageView viewHandle{};\n res = vkCreateImageView(device.handle(), &viewDesc, nullptr, &viewHandle);\n if (res != VK_SUCCESS || viewHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create image view\");\n\n // store objects in shared ptr\n this->layout = std::make_shared(VK_IMAGE_LAYOUT_UNDEFINED);\n this->image = std::shared_ptr(\n new VkImage(imageHandle),\n [dev = device.handle()](VkImage* img) {\n vkDestroyImage(dev, *img, nullptr);\n }\n );\n this->memory = std::shared_ptr(\n new VkDeviceMemory(memoryHandle),\n [dev = device.handle()](VkDeviceMemory* mem) {\n vkFreeMemory(dev, *mem, nullptr);\n }\n );\n this->view = std::shared_ptr(\n new VkImageView(viewHandle),\n [dev = device.handle()](VkImageView* imgView) {\n vkDestroyImageView(dev, *imgView, nullptr);\n }\n );\n}\n\n// shared memory constructor\n\nImage::Image(const Core::Device& device, VkExtent2D extent, VkFormat format,\n VkImageUsageFlags usage, VkImageAspectFlags aspectFlags, int fd)\n : extent(extent), format(format), aspectFlags(aspectFlags) {\n // create image\n const VkExternalMemoryImageCreateInfo externalInfo{\n .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,\n .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR\n };\n const VkImageCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,\n .pNext = &externalInfo,\n .imageType = VK_IMAGE_TYPE_2D,\n .format = format,\n .extent = {\n .width = extent.width,\n .height = extent.height,\n .depth = 1\n },\n .mipLevels = 1,\n .arrayLayers = 1,\n .samples = VK_SAMPLE_COUNT_1_BIT,\n .usage = usage,\n .sharingMode = VK_SHARING_MODE_EXCLUSIVE\n };\n VkImage imageHandle{};\n auto res = vkCreateImage(device.handle(), &desc, nullptr, &imageHandle);\n if (res != VK_SUCCESS || imageHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create Vulkan image\");\n\n // find memory type\n VkPhysicalDeviceMemoryProperties memProps;\n vkGetPhysicalDeviceMemoryProperties(device.getPhysicalDevice(), &memProps);\n\n VkMemoryRequirements memReqs;\n vkGetImageMemoryRequirements(device.handle(), imageHandle, &memReqs);\n\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n std::optional memType{};\n for (uint32_t i = 0; i < memProps.memoryTypeCount; ++i) {\n if ((memReqs.memoryTypeBits & (1 << i)) && // NOLINTBEGIN\n (memProps.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) {\n memType.emplace(i);\n break;\n } // NOLINTEND\n }\n if (!memType.has_value())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Unable to find memory type for image\");\n#pragma clang diagnostic pop\n\n // ~~allocate~~ and bind memory\n const VkMemoryDedicatedAllocateInfoKHR dedicatedInfo2{\n .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,\n .image = imageHandle,\n };\n const VkImportMemoryFdInfoKHR importInfo{\n .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR,\n .pNext = &dedicatedInfo2,\n .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR,\n .fd = fd // closes the fd\n };\n const VkMemoryAllocateInfo allocInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,\n .pNext = fd == -1 ? nullptr : &importInfo,\n .allocationSize = memReqs.size,\n .memoryTypeIndex = memType.value()\n };\n VkDeviceMemory memoryHandle{};\n res = vkAllocateMemory(device.handle(), &allocInfo, nullptr, &memoryHandle);\n if (res != VK_SUCCESS || memoryHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to allocate memory for Vulkan image\");\n\n res = vkBindImageMemory(device.handle(), imageHandle, memoryHandle, 0);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to bind memory to Vulkan image\");\n\n // create image view\n const VkImageViewCreateInfo viewDesc{\n .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,\n .image = imageHandle,\n .viewType = VK_IMAGE_VIEW_TYPE_2D,\n .format = format,\n .components = {\n .r = VK_COMPONENT_SWIZZLE_IDENTITY,\n .g = VK_COMPONENT_SWIZZLE_IDENTITY,\n .b = VK_COMPONENT_SWIZZLE_IDENTITY,\n .a = VK_COMPONENT_SWIZZLE_IDENTITY\n },\n .subresourceRange = {\n .aspectMask = aspectFlags,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n\n VkImageView viewHandle{};\n res = vkCreateImageView(device.handle(), &viewDesc, nullptr, &viewHandle);\n if (res != VK_SUCCESS || viewHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create image view\");\n\n // store objects in shared ptr\n this->layout = std::make_shared(VK_IMAGE_LAYOUT_UNDEFINED);\n this->image = std::shared_ptr(\n new VkImage(imageHandle),\n [dev = device.handle()](VkImage* img) {\n vkDestroyImage(dev, *img, nullptr);\n }\n );\n this->memory = std::shared_ptr(\n new VkDeviceMemory(memoryHandle),\n [dev = device.handle()](VkDeviceMemory* mem) {\n vkFreeMemory(dev, *mem, nullptr);\n }\n );\n this->view = std::shared_ptr(\n new VkImageView(viewHandle),\n [dev = device.handle()](VkImageView* imgView) {\n vkDestroyImageView(dev, *imgView, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/src/core/semaphore.cpp", "#include \n#include \n\n#include \"core/semaphore.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nSemaphore::Semaphore(const Core::Device& device, std::optional initial) {\n // create semaphore\n const VkSemaphoreTypeCreateInfo typeInfo{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO,\n .semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE,\n .initialValue = initial.value_or(0)\n };\n const VkSemaphoreCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,\n .pNext = initial.has_value() ? &typeInfo : nullptr,\n };\n VkSemaphore semaphoreHandle{};\n auto res = vkCreateSemaphore(device.handle(), &desc, nullptr, &semaphoreHandle);\n if (res != VK_SUCCESS || semaphoreHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create semaphore\");\n\n // store semaphore in shared ptr\n this->isTimeline = initial.has_value();\n this->semaphore = std::shared_ptr(\n new VkSemaphore(semaphoreHandle),\n [dev = device.handle()](VkSemaphore* semaphoreHandle) {\n vkDestroySemaphore(dev, *semaphoreHandle, nullptr);\n }\n );\n}\n\nSemaphore::Semaphore(const Core::Device& device, int fd) {\n // create semaphore\n const VkExportSemaphoreCreateInfo exportInfo{\n .sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO,\n .handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT\n };\n const VkSemaphoreCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,\n .pNext = &exportInfo\n };\n VkSemaphore semaphoreHandle{};\n auto res = vkCreateSemaphore(device.handle(), &desc, nullptr, &semaphoreHandle);\n if (res != VK_SUCCESS || semaphoreHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create semaphore\");\n\n // import semaphore from fd\n auto vkImportSemaphoreFdKHR = reinterpret_cast(\n vkGetDeviceProcAddr(device.handle(), \"vkImportSemaphoreFdKHR\"));\n\n const VkImportSemaphoreFdInfoKHR importInfo{\n .sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR,\n .semaphore = semaphoreHandle,\n .handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT,\n .fd = fd // closes the fd\n };\n res = vkImportSemaphoreFdKHR(device.handle(), &importInfo);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to import semaphore from fd\");\n\n // store semaphore in shared ptr\n this->isTimeline = false;\n this->semaphore = std::shared_ptr(\n new VkSemaphore(semaphoreHandle),\n [dev = device.handle()](VkSemaphore* semaphoreHandle) {\n vkDestroySemaphore(dev, *semaphoreHandle, nullptr);\n }\n );\n}\n\nvoid Semaphore::signal(const Core::Device& device, uint64_t value) const {\n if (!this->isTimeline)\n throw std::logic_error(\"Invalid timeline semaphore\");\n\n const VkSemaphoreSignalInfo signalInfo{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO,\n .semaphore = this->handle(),\n .value = value\n };\n auto res = vkSignalSemaphore(device.handle(), &signalInfo);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to signal semaphore\");\n}\n\nbool Semaphore::wait(const Core::Device& device, uint64_t value, uint64_t timeout) const {\n if (!this->isTimeline)\n throw std::logic_error(\"Invalid timeline semaphore\");\n\n VkSemaphore semaphore = this->handle();\n const VkSemaphoreWaitInfo waitInfo{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO,\n .semaphoreCount = 1,\n .pSemaphores = &semaphore,\n .pValues = &value\n };\n auto res = vkWaitSemaphores(device.handle(), &waitInfo, timeout);\n if (res != VK_SUCCESS && res != VK_TIMEOUT)\n throw LSFG::vulkan_error(res, \"Unable to wait for semaphore\");\n\n return res == VK_SUCCESS;\n}\n"], ["/lsfg-vk/framegen/src/core/buffer.cpp", "#include \n#include \n\n#include \"core/buffer.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nvoid Buffer::construct(const Core::Device& device, const void* data, VkBufferUsageFlags usage) {\n // create buffer\n const VkBufferCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,\n .size = this->size,\n .usage = usage,\n .sharingMode = VK_SHARING_MODE_EXCLUSIVE\n };\n VkBuffer bufferHandle{};\n auto res = vkCreateBuffer(device.handle(), &desc, nullptr, &bufferHandle);\n if (res != VK_SUCCESS || bufferHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create Vulkan buffer\");\n\n // find memory type\n VkPhysicalDeviceMemoryProperties memProps;\n vkGetPhysicalDeviceMemoryProperties(device.getPhysicalDevice(), &memProps);\n\n VkMemoryRequirements memReqs;\n vkGetBufferMemoryRequirements(device.handle(), bufferHandle, &memReqs);\n\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n std::optional memType{};\n for (uint32_t i = 0; i < memProps.memoryTypeCount; ++i) {\n if ((memReqs.memoryTypeBits & (1 << i)) && // NOLINTBEGIN\n (memProps.memoryTypes[i].propertyFlags &\n (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT))) {\n memType.emplace(i);\n break;\n } // NOLINTEND\n }\n if (!memType.has_value())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Unable to find memory type for buffer\");\n#pragma clang diagnostic pop\n\n // allocate and bind memory\n const VkMemoryAllocateInfo allocInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,\n .allocationSize = memReqs.size,\n .memoryTypeIndex = memType.value()\n };\n VkDeviceMemory memoryHandle{};\n res = vkAllocateMemory(device.handle(), &allocInfo, nullptr, &memoryHandle);\n if (res != VK_SUCCESS || memoryHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to allocate memory for Vulkan buffer\");\n\n res = vkBindBufferMemory(device.handle(), bufferHandle, memoryHandle, 0);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to bind memory to Vulkan buffer\");\n\n // upload data to buffer\n uint8_t* buf{};\n res = vkMapMemory(device.handle(), memoryHandle, 0, this->size, 0, reinterpret_cast(&buf));\n if (res != VK_SUCCESS || buf == nullptr)\n throw LSFG::vulkan_error(res, \"Failed to map memory for Vulkan buffer\");\n std::copy_n(reinterpret_cast(data), this->size, buf);\n vkUnmapMemory(device.handle(), memoryHandle);\n\n // store buffer and memory in shared ptr\n this->buffer = std::shared_ptr(\n new VkBuffer(bufferHandle),\n [dev = device.handle()](VkBuffer* img) {\n vkDestroyBuffer(dev, *img, nullptr);\n }\n );\n this->memory = std::shared_ptr(\n new VkDeviceMemory(memoryHandle),\n [dev = device.handle()](VkDeviceMemory* mem) {\n vkFreeMemory(dev, *mem, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/src/mini/image.cpp", "#include \"mini/image.hpp\"\n#include \"common/exception.hpp\"\n#include \"layer.hpp\"\n\n#include \n\n#include \n#include \n#include \n\nusing namespace Mini;\n\nImage::Image(VkDevice device, VkPhysicalDevice physicalDevice,\n VkExtent2D extent, VkFormat format,\n VkImageUsageFlags usage, VkImageAspectFlags aspectFlags, int* fd)\n : extent(extent), format(format), aspectFlags(aspectFlags) {\n // create image\n const VkExternalMemoryImageCreateInfo externalInfo{\n .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,\n .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR\n };\n const VkImageCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,\n .pNext = &externalInfo,\n .imageType = VK_IMAGE_TYPE_2D,\n .format = format,\n .extent = {\n .width = extent.width,\n .height = extent.height,\n .depth = 1\n },\n .mipLevels = 1,\n .arrayLayers = 1,\n .samples = VK_SAMPLE_COUNT_1_BIT,\n .usage = usage,\n .sharingMode = VK_SHARING_MODE_EXCLUSIVE\n };\n VkImage imageHandle{};\n auto res = Layer::ovkCreateImage(device, &desc, nullptr, &imageHandle);\n if (res != VK_SUCCESS || imageHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create Vulkan image\");\n\n // find memory type\n VkPhysicalDeviceMemoryProperties memProps;\n Layer::ovkGetPhysicalDeviceMemoryProperties(physicalDevice, &memProps);\n\n VkMemoryRequirements memReqs;\n Layer::ovkGetImageMemoryRequirements(device, imageHandle, &memReqs);\n\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n std::optional memType{};\n for (uint32_t i = 0; i < memProps.memoryTypeCount; ++i) {\n if ((memReqs.memoryTypeBits & (1 << i)) && // NOLINTBEGIN\n (memProps.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) {\n memType.emplace(i);\n break;\n } // NOLINTEND\n }\n if (!memType.has_value())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Unable to find memory type for image\");\n#pragma clang diagnostic pop\n\n // allocate and bind memory\n const VkMemoryDedicatedAllocateInfoKHR dedicatedInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,\n .image = imageHandle,\n };\n const VkExportMemoryAllocateInfo exportInfo{\n .sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO,\n .pNext = &dedicatedInfo,\n .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR\n };\n const VkMemoryAllocateInfo allocInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,\n .pNext = &exportInfo,\n .allocationSize = memReqs.size,\n .memoryTypeIndex = memType.value()\n };\n VkDeviceMemory memoryHandle{};\n res = Layer::ovkAllocateMemory(device, &allocInfo, nullptr, &memoryHandle);\n if (res != VK_SUCCESS || memoryHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to allocate memory for Vulkan image\");\n\n res = Layer::ovkBindImageMemory(device, imageHandle, memoryHandle, 0);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to bind memory to Vulkan image\");\n\n // obtain the sharing fd\n const VkMemoryGetFdInfoKHR fdInfo{\n .sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR,\n .memory = memoryHandle,\n .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR,\n };\n res = Layer::ovkGetMemoryFdKHR(device, &fdInfo, fd);\n if (res != VK_SUCCESS || *fd < 0)\n throw LSFG::vulkan_error(res, \"Failed to obtain sharing fd for Vulkan image\");\n\n // store objects in shared ptr\n this->image = std::shared_ptr(\n new VkImage(imageHandle),\n [dev = device](VkImage* img) {\n Layer::ovkDestroyImage(dev, *img, nullptr);\n }\n );\n this->memory = std::shared_ptr(\n new VkDeviceMemory(memoryHandle),\n [dev = device](VkDeviceMemory* mem) {\n Layer::ovkFreeMemory(dev, *mem, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/src/core/descriptorset.cpp", "#include \n#include \n\n#include \"core/descriptorset.hpp\"\n#include \"core/device.hpp\"\n#include \"core/descriptorpool.hpp\"\n#include \"core/shadermodule.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/pipeline.hpp\"\n#include \"core/image.hpp\"\n#include \"core/sampler.hpp\"\n#include \"core/buffer.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n\nusing namespace LSFG::Core;\n\nDescriptorSet::DescriptorSet(const Core::Device& device,\n const DescriptorPool& pool, const ShaderModule& shaderModule) {\n // create descriptor set\n VkDescriptorSetLayout layout = shaderModule.getLayout();\n const VkDescriptorSetAllocateInfo desc{\n .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,\n .descriptorPool = pool.handle(),\n .descriptorSetCount = 1,\n .pSetLayouts = &layout\n };\n VkDescriptorSet descriptorSetHandle{};\n auto res = vkAllocateDescriptorSets(device.handle(), &desc, &descriptorSetHandle);\n if (res != VK_SUCCESS || descriptorSetHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to allocate descriptor set\");\n\n /// store set in shared ptr\n this->descriptorSet = std::shared_ptr(\n new VkDescriptorSet(descriptorSetHandle),\n [dev = device.handle(), pool = pool](VkDescriptorSet* setHandle) {\n vkFreeDescriptorSets(dev, pool.handle(), 1, setHandle);\n }\n );\n}\n\nDescriptorSetUpdateBuilder DescriptorSet::update(const Core::Device& device) const {\n return { *this, device };\n}\n\nvoid DescriptorSet::bind(const CommandBuffer& commandBuffer, const Pipeline& pipeline) const {\n VkDescriptorSet descriptorSetHandle = this->handle();\n vkCmdBindDescriptorSets(commandBuffer.handle(),\n VK_PIPELINE_BIND_POINT_COMPUTE, pipeline.getLayout(),\n 0, 1, &descriptorSetHandle, 0, nullptr);\n}\n\n// updater class\n\nDescriptorSetUpdateBuilder& DescriptorSetUpdateBuilder::add(VkDescriptorType type, const Image& image) {\n this->entries.push_back({\n .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,\n .dstSet = this->descriptorSet->handle(),\n .dstBinding = static_cast(this->entries.size()),\n .descriptorCount = 1,\n .descriptorType = type,\n .pImageInfo = new VkDescriptorImageInfo {\n .imageView = image.getView(),\n .imageLayout = VK_IMAGE_LAYOUT_GENERAL\n },\n .pBufferInfo = nullptr\n });\n return *this;\n}\n\nDescriptorSetUpdateBuilder& DescriptorSetUpdateBuilder::add(VkDescriptorType type, const Sampler& sampler) {\n this->entries.push_back({\n .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,\n .dstSet = this->descriptorSet->handle(),\n .dstBinding = static_cast(this->entries.size()),\n .descriptorCount = 1,\n .descriptorType = type,\n .pImageInfo = new VkDescriptorImageInfo {\n .sampler = sampler.handle(),\n },\n .pBufferInfo = nullptr\n });\n return *this;\n}\n\nDescriptorSetUpdateBuilder& DescriptorSetUpdateBuilder::add(VkDescriptorType type, const Buffer& buffer) {\n this->entries.push_back({\n .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,\n .dstSet = this->descriptorSet->handle(),\n .dstBinding = static_cast(this->entries.size()),\n .descriptorCount = 1,\n .descriptorType = type,\n .pImageInfo = nullptr,\n .pBufferInfo = new VkDescriptorBufferInfo {\n .buffer = buffer.handle(),\n .range = buffer.getSize()\n }\n });\n return *this;\n}\n\nDescriptorSetUpdateBuilder& DescriptorSetUpdateBuilder::add(VkDescriptorType type) {\n this->entries.push_back({\n .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,\n .dstSet = this->descriptorSet->handle(),\n .dstBinding = static_cast(this->entries.size()),\n .descriptorCount = 1,\n .descriptorType = type,\n .pImageInfo = new VkDescriptorImageInfo {\n },\n .pBufferInfo = nullptr\n });\n return *this;\n}\n\nvoid DescriptorSetUpdateBuilder::build() {\n vkUpdateDescriptorSets(this->device->handle(),\n static_cast(this->entries.size()),\n this->entries.data(), 0, nullptr);\n\n // NOLINTBEGIN\n for (const auto& entry : this->entries) {\n delete entry.pImageInfo;\n delete entry.pBufferInfo;\n }\n // NOLINTEND\n}\n"], ["/lsfg-vk/src/mini/semaphore.cpp", "#include \"mini/semaphore.hpp\"\n#include \"common/exception.hpp\"\n#include \"layer.hpp\"\n\n#include \n\n#include \n\nusing namespace Mini;\n\nSemaphore::Semaphore(VkDevice device) {\n // create semaphore\n const VkSemaphoreCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO\n };\n VkSemaphore semaphoreHandle{};\n auto res = Layer::ovkCreateSemaphore(device, &desc, nullptr, &semaphoreHandle);\n if (res != VK_SUCCESS || semaphoreHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create semaphore\");\n\n // store semaphore in shared ptr\n this->semaphore = std::shared_ptr(\n new VkSemaphore(semaphoreHandle),\n [dev = device](VkSemaphore* semaphoreHandle) {\n Layer::ovkDestroySemaphore(dev, *semaphoreHandle, nullptr);\n }\n );\n}\n\nSemaphore::Semaphore(VkDevice device, int* fd) {\n // create semaphore\n const VkExportSemaphoreCreateInfo exportInfo{\n .sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO,\n .handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT\n };\n const VkSemaphoreCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,\n .pNext = &exportInfo\n };\n VkSemaphore semaphoreHandle{};\n auto res = Layer::ovkCreateSemaphore(device, &desc, nullptr, &semaphoreHandle);\n if (res != VK_SUCCESS || semaphoreHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create semaphore\");\n\n // export semaphore to fd\n const VkSemaphoreGetFdInfoKHR fdInfo{\n .sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR,\n .semaphore = semaphoreHandle,\n .handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT\n };\n res = Layer::ovkGetSemaphoreFdKHR(device, &fdInfo, fd);\n if (res != VK_SUCCESS || *fd < 0)\n throw LSFG::vulkan_error(res, \"Unable to export semaphore to fd\");\n\n // store semaphore in shared ptr\n this->semaphore = std::shared_ptr(\n new VkSemaphore(semaphoreHandle),\n [dev = device](VkSemaphore* semaphoreHandle) {\n Layer::ovkDestroySemaphore(dev, *semaphoreHandle, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/src/core/descriptorpool.cpp", "#include \n#include \n\n#include \"core/descriptorpool.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nDescriptorPool::DescriptorPool(const Core::Device& device) {\n // create descriptor pool\n const std::array pools{{ // arbitrary limits\n { .type = VK_DESCRIPTOR_TYPE_SAMPLER, .descriptorCount = 4096 },\n { .type = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, .descriptorCount = 4096 },\n { .type = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, .descriptorCount = 4096 },\n { .type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, .descriptorCount = 4096 }\n }};\n const VkDescriptorPoolCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,\n .flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,\n .maxSets = 16384,\n .poolSizeCount = static_cast(pools.size()),\n .pPoolSizes = pools.data()\n };\n VkDescriptorPool poolHandle{};\n auto res = vkCreateDescriptorPool(device.handle(), &desc, nullptr, &poolHandle);\n if (res != VK_SUCCESS || poolHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create descriptor pool\");\n\n // store pool in shared ptr\n this->descriptorPool = std::shared_ptr(\n new VkDescriptorPool(poolHandle),\n [dev = device.handle()](VkDescriptorPool* poolHandle) {\n vkDestroyDescriptorPool(dev, *poolHandle, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/src/core/pipeline.cpp", "#include \n#include \n\n#include \"core/pipeline.hpp\"\n#include \"core/device.hpp\"\n#include \"core/shadermodule.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n\nusing namespace LSFG::Core;\n\nPipeline::Pipeline(const Core::Device& device, const ShaderModule& shader) {\n // create pipeline layout\n VkDescriptorSetLayout shaderLayout = shader.getLayout();\n const VkPipelineLayoutCreateInfo layoutDesc{\n .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,\n .setLayoutCount = 1,\n .pSetLayouts = &shaderLayout,\n };\n VkPipelineLayout layoutHandle{};\n auto res = vkCreatePipelineLayout(device.handle(), &layoutDesc, nullptr, &layoutHandle);\n if (res != VK_SUCCESS || !layoutHandle)\n throw LSFG::vulkan_error(res, \"Failed to create pipeline layout\");\n\n // create pipeline\n const VkPipelineShaderStageCreateInfo shaderStageInfo{\n .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,\n .stage = VK_SHADER_STAGE_COMPUTE_BIT,\n .module = shader.handle(),\n .pName = \"main\",\n };\n const VkComputePipelineCreateInfo pipelineDesc{\n .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,\n .stage = shaderStageInfo,\n .layout = layoutHandle,\n };\n VkPipeline pipelineHandle{};\n res = vkCreateComputePipelines(device.handle(),\n VK_NULL_HANDLE, 1, &pipelineDesc, nullptr, &pipelineHandle);\n if (res != VK_SUCCESS || !pipelineHandle)\n throw LSFG::vulkan_error(res, \"Failed to create compute pipeline\");\n\n // store layout and pipeline in shared ptr\n this->layout = std::shared_ptr(\n new VkPipelineLayout(layoutHandle),\n [dev = device.handle()](VkPipelineLayout* layout) {\n vkDestroyPipelineLayout(dev, *layout, nullptr);\n }\n );\n this->pipeline = std::shared_ptr(\n new VkPipeline(pipelineHandle),\n [dev = device.handle()](VkPipeline* pipeline) {\n vkDestroyPipeline(dev, *pipeline, nullptr);\n }\n );\n}\n\nvoid Pipeline::bind(const CommandBuffer& commandBuffer) const {\n vkCmdBindPipeline(commandBuffer.handle(), VK_PIPELINE_BIND_POINT_COMPUTE, *this->pipeline);\n}\n"], ["/lsfg-vk/framegen/src/pool/resourcepool.cpp", "#include \"pool/resourcepool.hpp\"\n#include \"core/buffer.hpp\"\n#include \"core/device.hpp\"\n#include \"core/sampler.hpp\"\n\n#include \n\n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG::Pool;\n\nstruct ConstantBuffer {\n std::array inputOffset;\n uint32_t firstIter;\n uint32_t firstIterS;\n uint32_t advancedColorKind;\n uint32_t hdrSupport;\n float resolutionInvScale;\n float timestamp;\n float uiThreshold;\n std::array pad;\n};\n\nCore::Buffer ResourcePool::getBuffer(\n const Core::Device& device,\n float timestamp, bool firstIter, bool firstIterS) {\n uint64_t hash = 0;\n const union { float f; uint32_t i; } u{\n .f = timestamp };\n hash |= u.i;\n hash |= static_cast(firstIter) << 32;\n hash |= static_cast(firstIterS) << 33;\n\n auto it = buffers.find(hash);\n if (it != buffers.end())\n return it->second;\n\n // create the buffer\n const ConstantBuffer data{\n .inputOffset = { 0, 0 },\n .advancedColorKind = this->isHdr ? 2U : 0U,\n .hdrSupport = this->isHdr,\n .resolutionInvScale = this->flowScale,\n .timestamp = timestamp,\n .uiThreshold = 0.5F,\n };\n Core::Buffer buffer(device, data, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);\n buffers[hash] = buffer;\n return buffer;\n}\n\nCore::Sampler ResourcePool::getSampler(\n const Core::Device& device,\n VkSamplerAddressMode type,\n VkCompareOp compare,\n bool isWhite) {\n uint64_t hash = 0;\n hash |= static_cast(type) << 0;\n hash |= static_cast(compare) << 8;\n hash |= static_cast(isWhite) << 16;\n\n auto it = samplers.find(hash);\n if (it != samplers.end())\n return it->second;\n\n // create the sampler\n Core::Sampler sampler(device, type, compare, isWhite);\n samplers[hash] = sampler;\n return sampler;\n}\n"], ["/lsfg-vk/framegen/src/core/commandbuffer.cpp", "#include \n#include \n\n#include \"core/commandbuffer.hpp\"\n#include \"core/device.hpp\"\n#include \"core/commandpool.hpp\"\n#include \"core/fence.hpp\"\n#include \"core/semaphore.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nCommandBuffer::CommandBuffer(const Core::Device& device, const CommandPool& pool) {\n // create command buffer\n const VkCommandBufferAllocateInfo desc{\n .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,\n .commandPool = pool.handle(),\n .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,\n .commandBufferCount = 1\n };\n VkCommandBuffer commandBufferHandle{};\n auto res = vkAllocateCommandBuffers(device.handle(), &desc, &commandBufferHandle);\n if (res != VK_SUCCESS || commandBufferHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to allocate command buffer\");\n\n // store command buffer in shared ptr\n this->state = std::make_shared(CommandBufferState::Empty);\n this->commandBuffer = std::shared_ptr(\n new VkCommandBuffer(commandBufferHandle),\n [dev = device.handle(), pool = pool.handle()](VkCommandBuffer* cmdBuffer) {\n vkFreeCommandBuffers(dev, pool, 1, cmdBuffer);\n }\n );\n}\n\nvoid CommandBuffer::begin() {\n if (*this->state != CommandBufferState::Empty)\n throw std::logic_error(\"Command buffer is not in Empty state\");\n\n const VkCommandBufferBeginInfo beginInfo = {\n .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,\n .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT\n };\n auto res = vkBeginCommandBuffer(*this->commandBuffer, &beginInfo);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to begin command buffer\");\n\n *this->state = CommandBufferState::Recording;\n}\n\nvoid CommandBuffer::dispatch(uint32_t x, uint32_t y, uint32_t z) const {\n if (*this->state != CommandBufferState::Recording)\n throw std::logic_error(\"Command buffer is not in Recording state\");\n\n vkCmdDispatch(*this->commandBuffer, x, y, z);\n}\n\nvoid CommandBuffer::end() {\n if (*this->state != CommandBufferState::Recording)\n throw std::logic_error(\"Command buffer is not in Recording state\");\n\n auto res = vkEndCommandBuffer(*this->commandBuffer);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to end command buffer\");\n\n *this->state = CommandBufferState::Full;\n}\n\nvoid CommandBuffer::submit(VkQueue queue, std::optional fence,\n const std::vector& waitSemaphores,\n std::optional> waitSemaphoreValues,\n const std::vector& signalSemaphores,\n std::optional> signalSemaphoreValues) {\n if (*this->state != CommandBufferState::Full)\n throw std::logic_error(\"Command buffer is not in Full state\");\n\n const std::vector waitStages(waitSemaphores.size(),\n VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);\n VkTimelineSemaphoreSubmitInfo timelineInfo{\n .sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO,\n };\n if (waitSemaphoreValues.has_value()) {\n timelineInfo.waitSemaphoreValueCount =\n static_cast(waitSemaphoreValues->size());\n timelineInfo.pWaitSemaphoreValues = waitSemaphoreValues->data();\n }\n if (signalSemaphoreValues.has_value()) {\n timelineInfo.signalSemaphoreValueCount =\n static_cast(signalSemaphoreValues->size());\n timelineInfo.pSignalSemaphoreValues = signalSemaphoreValues->data();\n }\n\n std::vector waitSemaphoresHandles;\n waitSemaphoresHandles.reserve(waitSemaphores.size());\n for (const auto& semaphore : waitSemaphores)\n waitSemaphoresHandles.push_back(semaphore.handle());\n std::vector signalSemaphoresHandles;\n signalSemaphoresHandles.reserve(signalSemaphores.size());\n for (const auto& semaphore : signalSemaphores)\n signalSemaphoresHandles.push_back(semaphore.handle());\n\n const VkSubmitInfo submitInfo{\n .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,\n .pNext = (waitSemaphoreValues.has_value() || signalSemaphoreValues.has_value())\n ? &timelineInfo : nullptr,\n .waitSemaphoreCount = static_cast(waitSemaphores.size()),\n .pWaitSemaphores = waitSemaphoresHandles.data(),\n .pWaitDstStageMask = waitStages.data(),\n .commandBufferCount = 1,\n .pCommandBuffers = &(*this->commandBuffer),\n .signalSemaphoreCount = static_cast(signalSemaphores.size()),\n .pSignalSemaphores = signalSemaphoresHandles.data()\n };\n auto res = vkQueueSubmit(queue, 1, &submitInfo, fence ? fence->handle() : VK_NULL_HANDLE);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to submit command buffer\");\n\n *this->state = CommandBufferState::Submitted;\n}\n"], ["/lsfg-vk/framegen/src/core/shadermodule.cpp", "#include \n#include \n\n#include \"core/shadermodule.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nShaderModule::ShaderModule(const Core::Device& device, const std::vector& code,\n const std::vector>& descriptorTypes) {\n // create shader module\n const uint8_t* data_ptr = code.data();\n const VkShaderModuleCreateInfo createInfo{\n .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,\n .codeSize = code.size(),\n .pCode = reinterpret_cast(data_ptr)\n };\n VkShaderModule shaderModuleHandle{};\n auto res = vkCreateShaderModule(device.handle(), &createInfo, nullptr, &shaderModuleHandle);\n if (res != VK_SUCCESS || !shaderModuleHandle)\n throw LSFG::vulkan_error(res, \"Failed to create shader module\");\n\n // create descriptor set layout\n std::vector layoutBindings;\n size_t bindIdx = 0;\n for (const auto &[count, type] : descriptorTypes)\n for (size_t i = 0; i < count; i++, bindIdx++)\n layoutBindings.emplace_back(VkDescriptorSetLayoutBinding {\n .binding = static_cast(bindIdx),\n .descriptorType = type,\n .descriptorCount = 1,\n .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT\n });\n\n const VkDescriptorSetLayoutCreateInfo layoutDesc{\n .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,\n .bindingCount = static_cast(layoutBindings.size()),\n .pBindings = layoutBindings.data()\n };\n VkDescriptorSetLayout descriptorSetLayout{};\n res = vkCreateDescriptorSetLayout(device.handle(), &layoutDesc, nullptr, &descriptorSetLayout);\n if (res != VK_SUCCESS || !descriptorSetLayout)\n throw LSFG::vulkan_error(res, \"Failed to create descriptor set layout\");\n\n // store module and layout in shared ptr\n this->shaderModule = std::shared_ptr(\n new VkShaderModule(shaderModuleHandle),\n [dev = device.handle()](VkShaderModule* shaderModuleHandle) {\n vkDestroyShaderModule(dev, *shaderModuleHandle, nullptr);\n }\n );\n this->descriptorSetLayout = std::shared_ptr(\n new VkDescriptorSetLayout(descriptorSetLayout),\n [dev = device.handle()](VkDescriptorSetLayout* layout) {\n vkDestroyDescriptorSetLayout(dev, *layout, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/src/core/device.cpp", "#include \n#include \n\n#include \"core/device.hpp\"\n#include \"core/instance.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nconst std::vector requiredExtensions = {\n \"VK_KHR_external_memory_fd\",\n \"VK_KHR_external_semaphore_fd\",\n \"VK_EXT_robustness2\",\n};\n\nDevice::Device(const Instance& instance, uint64_t deviceUUID) {\n // get all physical devices\n uint32_t deviceCount{};\n auto res = vkEnumeratePhysicalDevices(instance.handle(), &deviceCount, nullptr);\n if (res != VK_SUCCESS || deviceCount == 0)\n throw LSFG::vulkan_error(res, \"Failed to enumerate physical devices\");\n\n std::vector devices(deviceCount);\n res = vkEnumeratePhysicalDevices(instance.handle(), &deviceCount, devices.data());\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to get physical devices\");\n\n // get device by uuid\n std::optional physicalDevice;\n for (const auto& device : devices) {\n VkPhysicalDeviceProperties properties;\n vkGetPhysicalDeviceProperties(device, &properties);\n\n const uint64_t uuid =\n static_cast(properties.vendorID) << 32 | properties.deviceID;\n if (deviceUUID == uuid || deviceUUID == 0x1463ABAC) {\n physicalDevice = device;\n break;\n }\n }\n if (!physicalDevice)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"Could not find physical device with UUID\");\n\n // find queue family indices\n uint32_t familyCount{};\n vkGetPhysicalDeviceQueueFamilyProperties(*physicalDevice, &familyCount, nullptr);\n\n std::vector queueFamilies(familyCount);\n vkGetPhysicalDeviceQueueFamilyProperties(*physicalDevice, &familyCount, queueFamilies.data());\n\n std::optional computeFamilyIdx;\n for (uint32_t i = 0; i < familyCount; ++i) {\n if (queueFamilies[i].queueFlags & VK_QUEUE_COMPUTE_BIT)\n computeFamilyIdx = i;\n }\n if (!computeFamilyIdx)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"No compute queue family found\");\n\n // create logical device\n const float queuePriority{1.0F}; // highest priority\n VkPhysicalDeviceRobustness2FeaturesEXT robustness2{\n .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT,\n .nullDescriptor = VK_TRUE,\n };\n VkPhysicalDeviceVulkan13Features features13{\n .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES,\n .pNext = &robustness2,\n .synchronization2 = VK_TRUE\n };\n const VkPhysicalDeviceVulkan12Features features12{\n .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES,\n .pNext = &features13,\n .timelineSemaphore = VK_TRUE,\n .vulkanMemoryModel = VK_TRUE\n };\n const VkDeviceQueueCreateInfo computeQueueDesc{\n .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,\n .queueFamilyIndex = *computeFamilyIdx,\n .queueCount = 1,\n .pQueuePriorities = &queuePriority\n };\n const VkDeviceCreateInfo deviceCreateInfo{\n .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,\n .pNext = &features12,\n .queueCreateInfoCount = 1,\n .pQueueCreateInfos = &computeQueueDesc,\n .enabledExtensionCount = static_cast(requiredExtensions.size()),\n .ppEnabledExtensionNames = requiredExtensions.data()\n };\n VkDevice deviceHandle{};\n res = vkCreateDevice(*physicalDevice, &deviceCreateInfo, nullptr, &deviceHandle);\n if (res != VK_SUCCESS | deviceHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Failed to create logical device\");\n\n volkLoadDevice(deviceHandle);\n\n // get compute queue\n VkQueue queueHandle{};\n vkGetDeviceQueue(deviceHandle, *computeFamilyIdx, 0, &queueHandle);\n\n // store in shared ptr\n this->computeQueue = queueHandle;\n this->computeFamilyIdx = *computeFamilyIdx;\n this->physicalDevice = *physicalDevice;\n this->device = std::shared_ptr(\n new VkDevice(deviceHandle),\n [](VkDevice* device) {\n vkDestroyDevice(*device, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/src/core/commandpool.cpp", "#include \n#include \n\n#include \"core/commandpool.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n\nusing namespace LSFG::Core;\n\nCommandPool::CommandPool(const Core::Device& device) {\n // create command pool\n const VkCommandPoolCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,\n .queueFamilyIndex = device.getComputeFamilyIdx()\n };\n VkCommandPool commandPoolHandle{};\n auto res = vkCreateCommandPool(device.handle(), &desc, nullptr, &commandPoolHandle);\n if (res != VK_SUCCESS || commandPoolHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create command pool\");\n\n // store command pool in shared ptr\n this->commandPool = std::shared_ptr(\n new VkCommandPool(commandPoolHandle),\n [dev = device.handle()](VkCommandPool* commandPoolHandle) {\n vkDestroyCommandPool(dev, *commandPoolHandle, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/framegen/src/core/fence.cpp", "#include \n#include \n\n#include \"core/fence.hpp\"\n#include \"core/device.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n\nusing namespace LSFG::Core;\n\nFence::Fence(const Core::Device& device) {\n // create fence\n const VkFenceCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO\n };\n VkFence fenceHandle{};\n auto res = vkCreateFence(device.handle(), &desc, nullptr, &fenceHandle);\n if (res != VK_SUCCESS || fenceHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create fence\");\n\n // store fence in shared ptr\n this->fence = std::shared_ptr(\n new VkFence(fenceHandle),\n [dev = device.handle()](VkFence* fenceHandle) {\n vkDestroyFence(dev, *fenceHandle, nullptr);\n }\n );\n}\n\nvoid Fence::reset(const Core::Device& device) const {\n VkFence fenceHandle = this->handle();\n auto res = vkResetFences(device.handle(), 1, &fenceHandle);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to reset fence\");\n}\n\nbool Fence::wait(const Core::Device& device, uint64_t timeout) const {\n VkFence fenceHandle = this->handle();\n auto res = vkWaitForFences(device.handle(), 1, &fenceHandle, VK_TRUE, timeout);\n if (res != VK_SUCCESS && res != VK_TIMEOUT)\n throw LSFG::vulkan_error(res, \"Unable to wait for fence\");\n\n return res == VK_SUCCESS;\n}\n"], ["/lsfg-vk/src/mini/commandbuffer.cpp", "#include \"mini/commandbuffer.hpp\"\n#include \"mini/commandpool.hpp\"\n#include \"common/exception.hpp\"\n#include \"layer.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n\nusing namespace Mini;\n\nCommandBuffer::CommandBuffer(VkDevice device, const CommandPool& pool) {\n // create command buffer\n const VkCommandBufferAllocateInfo desc{\n .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,\n .commandPool = pool.handle(),\n .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,\n .commandBufferCount = 1\n };\n VkCommandBuffer commandBufferHandle{};\n auto res = Layer::ovkAllocateCommandBuffers(device, &desc, &commandBufferHandle);\n if (res != VK_SUCCESS || commandBufferHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to allocate command buffer\");\n res = Layer::ovkSetDeviceLoaderData(device, commandBufferHandle);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to set device loader data for command buffer\");\n\n // store command buffer in shared ptr\n this->state = std::make_shared(CommandBufferState::Empty);\n this->commandBuffer = std::shared_ptr(\n new VkCommandBuffer(commandBufferHandle),\n [dev = device, pool = pool.handle()](VkCommandBuffer* cmdBuffer) {\n Layer::ovkFreeCommandBuffers(dev, pool, 1, cmdBuffer);\n }\n );\n}\n\nvoid CommandBuffer::begin() {\n if (*this->state != CommandBufferState::Empty)\n throw std::logic_error(\"Command buffer is not in Empty state\");\n\n const VkCommandBufferBeginInfo beginInfo = {\n .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,\n .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT\n };\n auto res = Layer::ovkBeginCommandBuffer(*this->commandBuffer, &beginInfo);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to begin command buffer\");\n\n *this->state = CommandBufferState::Recording;\n}\n\nvoid CommandBuffer::end() {\n if (*this->state != CommandBufferState::Recording)\n throw std::logic_error(\"Command buffer is not in Recording state\");\n\n auto res = Layer::ovkEndCommandBuffer(*this->commandBuffer);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to end command buffer\");\n\n *this->state = CommandBufferState::Full;\n}\n\nvoid CommandBuffer::submit(VkQueue queue,\n const std::vector& waitSemaphores,\n const std::vector& signalSemaphores) {\n if (*this->state != CommandBufferState::Full)\n throw std::logic_error(\"Command buffer is not in Full state\");\n\n const std::vector waitStages(waitSemaphores.size(),\n VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);\n\n const VkSubmitInfo submitInfo{\n .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,\n .waitSemaphoreCount = static_cast(waitSemaphores.size()),\n .pWaitSemaphores = waitSemaphores.data(),\n .pWaitDstStageMask = waitStages.data(),\n .commandBufferCount = 1,\n .pCommandBuffers = &(*this->commandBuffer),\n .signalSemaphoreCount = static_cast(signalSemaphores.size()),\n .pSignalSemaphores = signalSemaphores.data()\n };\n auto res = Layer::ovkQueueSubmit(queue, 1, &submitInfo, VK_NULL_HANDLE);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to submit command buffer\");\n\n *this->state = CommandBufferState::Submitted;\n}\n"], ["/lsfg-vk/src/mini/commandpool.cpp", "#include \"mini/commandpool.hpp\"\n#include \"common/exception.hpp\"\n#include \"layer.hpp\"\n\n#include \n\n#include \n#include \n\nusing namespace Mini;\n\nCommandPool::CommandPool(VkDevice device, uint32_t graphicsFamilyIdx) {\n // create command pool\n const VkCommandPoolCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,\n .queueFamilyIndex = graphicsFamilyIdx\n };\n VkCommandPool commandPoolHandle{};\n auto res = Layer::ovkCreateCommandPool(device, &desc, nullptr, &commandPoolHandle);\n if (res != VK_SUCCESS || commandPoolHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create command pool\");\n\n // store command pool in shared ptr\n this->commandPool = std::shared_ptr(\n new VkCommandPool(commandPoolHandle),\n [dev = device](VkCommandPool* commandPoolHandle) {\n Layer::ovkDestroyCommandPool(dev, *commandPoolHandle, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/src/hooks.cpp", "#include \"hooks.hpp\"\n#include \"common/exception.hpp\"\n#include \"config/config.hpp\"\n#include \"utils/utils.hpp\"\n#include \"context.hpp\"\n#include \"layer.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace Hooks;\n\nnamespace {\n\n ///\n /// Add extensions to the instance create info.\n ///\n VkResult myvkCreateInstance(\n const VkInstanceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkInstance* pInstance) {\n auto extensions = Utils::addExtensions(\n pCreateInfo->ppEnabledExtensionNames,\n pCreateInfo->enabledExtensionCount,\n {\n \"VK_KHR_get_physical_device_properties2\",\n \"VK_KHR_external_memory_capabilities\",\n \"VK_KHR_external_semaphore_capabilities\"\n }\n );\n VkInstanceCreateInfo createInfo = *pCreateInfo;\n createInfo.enabledExtensionCount = static_cast(extensions.size());\n createInfo.ppEnabledExtensionNames = extensions.data();\n auto res = Layer::ovkCreateInstance(&createInfo, pAllocator, pInstance);\n if (res == VK_ERROR_EXTENSION_NOT_PRESENT)\n throw std::runtime_error(\n \"Required Vulkan instance extensions are not present.\"\n \"Your GPU driver is not supported.\");\n return res;\n }\n\n /// Map of devices to related information.\n std::unordered_map deviceToInfo;\n\n ///\n /// Add extensions to the device create info.\n /// (function pointers are not initialized yet)\n ///\n VkResult myvkCreateDevicePre(\n VkPhysicalDevice physicalDevice,\n const VkDeviceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkDevice* pDevice) {\n // add extensions\n auto extensions = Utils::addExtensions(\n pCreateInfo->ppEnabledExtensionNames,\n pCreateInfo->enabledExtensionCount,\n {\n \"VK_KHR_external_memory\",\n \"VK_KHR_external_memory_fd\",\n \"VK_KHR_external_semaphore\",\n \"VK_KHR_external_semaphore_fd\"\n }\n );\n VkDeviceCreateInfo createInfo = *pCreateInfo;\n createInfo.enabledExtensionCount = static_cast(extensions.size());\n createInfo.ppEnabledExtensionNames = extensions.data();\n auto res = Layer::ovkCreateDevice(physicalDevice, &createInfo, pAllocator, pDevice);\n if (res == VK_ERROR_EXTENSION_NOT_PRESENT)\n throw std::runtime_error(\n \"Required Vulkan device extensions are not present.\"\n \"Your GPU driver is not supported.\");\n return res;\n }\n\n ///\n /// Add related device information after the device is created.\n ///\n VkResult myvkCreateDevicePost(\n VkPhysicalDevice physicalDevice,\n VkDeviceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks*,\n VkDevice* pDevice) {\n deviceToInfo.emplace(*pDevice, DeviceInfo {\n .device = *pDevice,\n .physicalDevice = physicalDevice,\n .queue = Utils::findQueue(*pDevice, physicalDevice, pCreateInfo, VK_QUEUE_GRAPHICS_BIT)\n });\n return VK_SUCCESS;\n }\n\n /// Erase the device information when the device is destroyed.\n void myvkDestroyDevice(VkDevice device, const VkAllocationCallbacks* pAllocator) noexcept {\n deviceToInfo.erase(device);\n Layer::ovkDestroyDevice(device, pAllocator);\n }\n\n std::unordered_map swapchains;\n std::unordered_map swapchainToDeviceTable;\n std::unordered_map swapchainToPresent;\n\n ///\n /// Adjust swapchain creation parameters and create a swapchain context.\n ///\n VkResult myvkCreateSwapchainKHR(\n VkDevice device,\n const VkSwapchainCreateInfoKHR* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkSwapchainKHR* pSwapchain) noexcept {\n // find device\n auto it = deviceToInfo.find(device);\n if (it == deviceToInfo.end()) {\n Utils::logLimitN(\"swapMap\", 5, \"Device not found in map\");\n return Layer::ovkCreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);\n }\n Utils::resetLimitN(\"swapMap\");\n auto& deviceInfo = it->second;\n\n // increase amount of images in swapchain\n VkSwapchainCreateInfoKHR createInfo = *pCreateInfo;\n const auto maxImages = Utils::getMaxImageCount(\n deviceInfo.physicalDevice, pCreateInfo->surface);\n createInfo.minImageCount = createInfo.minImageCount + 1\n + static_cast(deviceInfo.queue.first);\n if (createInfo.minImageCount > maxImages) {\n createInfo.minImageCount = maxImages;\n Utils::logLimitN(\"swapCount\", 10,\n \"Requested image count (\" +\n std::to_string(pCreateInfo->minImageCount) + \") \"\n \"exceeds maximum allowed (\" +\n std::to_string(maxImages) + \"). \"\n \"Continuing with maximum allowed image count. \"\n \"This might lead to performance degradation.\");\n } else {\n Utils::resetLimitN(\"swapCount\");\n }\n\n // allow copy operations on swapchain images\n createInfo.imageUsage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;\n createInfo.imageUsage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;\n\n // enforce present mode\n createInfo.presentMode = Config::activeConf.e_present;\n\n // retire potential old swapchain\n if (pCreateInfo->oldSwapchain) {\n swapchains.erase(pCreateInfo->oldSwapchain);\n swapchainToDeviceTable.erase(pCreateInfo->oldSwapchain);\n }\n\n // create swapchain\n auto res = Layer::ovkCreateSwapchainKHR(device, &createInfo, pAllocator, pSwapchain);\n if (res != VK_SUCCESS)\n return res; // can't be caused by lsfg-vk (yet)\n\n try {\n swapchainToPresent.emplace(*pSwapchain, createInfo.presentMode);\n\n // get all swapchain images\n uint32_t imageCount{};\n res = Layer::ovkGetSwapchainImagesKHR(device, *pSwapchain, &imageCount, nullptr);\n if (res != VK_SUCCESS || imageCount == 0)\n throw LSFG::vulkan_error(res, \"Failed to get swapchain image count\");\n\n std::vector swapchainImages(imageCount);\n res = Layer::ovkGetSwapchainImagesKHR(device, *pSwapchain,\n &imageCount, swapchainImages.data());\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to get swapchain images\");\n\n // create swapchain context\n swapchainToDeviceTable.emplace(*pSwapchain, device);\n swapchains.emplace(*pSwapchain, LsContext(\n deviceInfo, *pSwapchain, pCreateInfo->imageExtent,\n swapchainImages\n ));\n\n std::cerr << \"lsfg-vk: Swapchain context \" <<\n (createInfo.oldSwapchain ? \"recreated\" : \"created\")\n << \" (using \" << imageCount << \" images).\\n\";\n\n Utils::resetLimitN(\"swapCtxCreate\");\n } catch (const std::exception& e) {\n Utils::logLimitN(\"swapCtxCreate\", 5,\n \"An error occurred while creating the swapchain wrapper:\\n\"\n \"- \" + std::string(e.what()));\n return VK_SUCCESS; // swapchain is still valid\n }\n return VK_SUCCESS;\n }\n\n ///\n /// Update presentation parameters and present the next frame(s).\n ///\n VkResult myvkQueuePresentKHR(\n VkQueue queue,\n const VkPresentInfoKHR* pPresentInfo) noexcept {\n // find swapchain device\n auto it = swapchainToDeviceTable.find(*pPresentInfo->pSwapchains);\n if (it == swapchainToDeviceTable.end()) {\n Utils::logLimitN(\"swapMap\", 5,\n \"Swapchain not found in map\");\n return Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n }\n\n // find device info\n auto it2 = deviceToInfo.find(it->second);\n if (it2 == deviceToInfo.end()) {\n Utils::logLimitN(\"swapMap\", 5,\n \"Device not found in map\");\n return Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n }\n auto& deviceInfo = it2->second;\n\n // find swapchain context\n auto it3 = swapchains.find(*pPresentInfo->pSwapchains);\n if (it3 == swapchains.end()) {\n Utils::logLimitN(\"swapMap\", 5,\n \"Swapchain context not found in map\");\n return Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n }\n auto& swapchain = it3->second;\n\n // find present mode\n auto it4 = swapchainToPresent.find(*pPresentInfo->pSwapchains);\n if (it4 == swapchainToPresent.end()) {\n Utils::logLimitN(\"swapMap\", 5,\n \"Swapchain present mode not found in map\");\n return Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n }\n auto& present = it4->second;\n\n // enforce present mode | NOLINTBEGIN\n #pragma clang diagnostic push\n #pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n const VkSwapchainPresentModeInfoEXT* presentModeInfo =\n reinterpret_cast(pPresentInfo->pNext);\n while (presentModeInfo) {\n if (presentModeInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_PRESENT_MODE_INFO_EXT) {\n for (size_t i = 0; i < presentModeInfo->swapchainCount; i++)\n const_cast(presentModeInfo->pPresentModes)[i] =\n present;\n }\n presentModeInfo =\n reinterpret_cast(presentModeInfo->pNext);\n }\n #pragma clang diagnostic pop\n\n // NOLINTEND | present the next frame\n VkResult res{}; // might return VK_SUBOPTIMAL_KHR\n try {\n // ensure config is valid\n auto& conf = Config::activeConf;\n if (!conf.config_file.empty()\n && (\n !std::filesystem::exists(conf.config_file)\n || conf.timestamp != std::filesystem::last_write_time(conf.config_file)\n )) {\n Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n return VK_ERROR_OUT_OF_DATE_KHR;\n }\n\n // ensure present mode is still valid\n if (present != conf.e_present) {\n Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n return VK_ERROR_OUT_OF_DATE_KHR;\n }\n\n // skip if disabled\n if (conf.multiplier <= 1)\n return Layer::ovkQueuePresentKHR(queue, pPresentInfo);\n\n // present the swapchain\n std::vector semaphores(pPresentInfo->waitSemaphoreCount);\n std::copy_n(pPresentInfo->pWaitSemaphores, semaphores.size(), semaphores.data());\n\n res = swapchain.present(deviceInfo, pPresentInfo->pNext,\n queue, semaphores, *pPresentInfo->pImageIndices);\n\n Utils::resetLimitN(\"swapPresent\");\n } catch (const std::exception& e) {\n Utils::logLimitN(\"swapPresent\", 5,\n \"An error occurred while presenting the swapchain:\\n\"\n \"- \" + std::string(e.what()));\n return VK_ERROR_INITIALIZATION_FAILED;\n }\n return res;\n }\n\n /// Erase the swapchain context and mapping when the swapchain is destroyed.\n void myvkDestroySwapchainKHR(\n VkDevice device,\n VkSwapchainKHR swapchain,\n const VkAllocationCallbacks* pAllocator) noexcept {\n swapchains.erase(swapchain);\n swapchainToDeviceTable.erase(swapchain);\n swapchainToPresent.erase(swapchain);\n Layer::ovkDestroySwapchainKHR(device, swapchain, pAllocator);\n }\n}\n\nstd::unordered_map Hooks::hooks = {\n // instance hooks\n {\"vkCreateInstance\", reinterpret_cast(myvkCreateInstance)},\n\n // device hooks\n {\"vkCreateDevicePre\", reinterpret_cast(myvkCreateDevicePre)},\n {\"vkCreateDevicePost\", reinterpret_cast(myvkCreateDevicePost)},\n {\"vkDestroyDevice\", reinterpret_cast(myvkDestroyDevice)},\n\n // swapchain hooks\n {\"vkCreateSwapchainKHR\", reinterpret_cast(myvkCreateSwapchainKHR)},\n {\"vkQueuePresentKHR\", reinterpret_cast(myvkQueuePresentKHR)},\n {\"vkDestroySwapchainKHR\", reinterpret_cast(myvkDestroySwapchainKHR)}\n};\n"], ["/lsfg-vk/framegen/src/core/instance.cpp", "#include \n#include \n\n#include \"core/instance.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG::Core;\n\nconst std::vector requiredExtensions = {\n\n};\n\nInstance::Instance() {\n volkInitialize();\n\n // create Vulkan instance\n const VkApplicationInfo appInfo{\n .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,\n .pApplicationName = \"lsfg-vk-base\",\n .applicationVersion = VK_MAKE_VERSION(0, 0, 1),\n .pEngineName = \"lsfg-vk-base\",\n .engineVersion = VK_MAKE_VERSION(0, 0, 1),\n .apiVersion = VK_API_VERSION_1_3\n };\n const VkInstanceCreateInfo createInfo{\n .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,\n .pApplicationInfo = &appInfo,\n .enabledExtensionCount = static_cast(requiredExtensions.size()),\n .ppEnabledExtensionNames = requiredExtensions.data()\n };\n VkInstance instanceHandle{};\n auto res = vkCreateInstance(&createInfo, nullptr, &instanceHandle);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to create Vulkan instance\");\n\n volkLoadInstance(instanceHandle);\n\n // store in shared ptr\n this->instance = std::shared_ptr(\n new VkInstance(instanceHandle),\n [](VkInstance* instance) {\n vkDestroyInstance(*instance, nullptr);\n }\n );\n}\n"], ["/lsfg-vk/src/context.cpp", "#include \"context.hpp\"\n#include \"config/config.hpp\"\n#include \"common/exception.hpp\"\n#include \"extract/extract.hpp\"\n#include \"extract/trans.hpp\"\n#include \"utils/utils.hpp\"\n#include \"hooks.hpp\"\n#include \"layer.hpp\"\n\n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nLsContext::LsContext(const Hooks::DeviceInfo& info, VkSwapchainKHR swapchain,\n VkExtent2D extent, const std::vector& swapchainImages)\n : swapchain(swapchain), swapchainImages(swapchainImages),\n extent(extent) {\n // get updated configuration\n auto& conf = Config::activeConf;\n if (!conf.config_file.empty()\n && (\n !std::filesystem::exists(conf.config_file)\n || conf.timestamp != std::filesystem::last_write_time(conf.config_file)\n )) {\n std::cerr << \"lsfg-vk: Rereading configuration, as it is no longer valid.\\n\";\n std::this_thread::sleep_for(std::chrono::milliseconds(100));\n\n // reread configuration\n const std::string file = Utils::getConfigFile();\n const auto name = Utils::getProcessName();\n try {\n Config::updateConfig(file);\n conf = Config::getConfig(name);\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: Failed to update configuration, continuing using old:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n }\n\n LSFG_3_1P::finalize();\n LSFG_3_1::finalize();\n\n // print config\n std::cerr << \"lsfg-vk: Reloaded configuration for \" << name.second << \":\\n\";\n if (!conf.dll.empty()) std::cerr << \" Using DLL from: \" << conf.dll << '\\n';\n std::cerr << \" Multiplier: \" << conf.multiplier << '\\n';\n std::cerr << \" Flow Scale: \" << conf.flowScale << '\\n';\n std::cerr << \" Performance Mode: \" << (conf.performance ? \"Enabled\" : \"Disabled\") << '\\n';\n std::cerr << \" HDR Mode: \" << (conf.hdr ? \"Enabled\" : \"Disabled\") << '\\n';\n if (conf.e_present != 2) std::cerr << \" ! Present Mode: \" << conf.e_present << '\\n';\n\n if (conf.multiplier <= 1) return;\n }\n // we could take the format from the swapchain,\n // but honestly this is safer.\n const VkFormat format = conf.hdr\n ? VK_FORMAT_R8G8B8A8_UNORM\n : VK_FORMAT_R16G16B16A16_SFLOAT;\n\n // prepare textures for lsfg\n std::array fds{};\n this->frame_0 = Mini::Image(info.device, info.physicalDevice,\n extent, format, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_ASPECT_COLOR_BIT,\n &fds.at(0));\n this->frame_1 = Mini::Image(info.device, info.physicalDevice,\n extent, format, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_ASPECT_COLOR_BIT,\n &fds.at(1));\n\n std::vector outFds(conf.multiplier - 1);\n for (size_t i = 0; i < (conf.multiplier - 1); ++i)\n this->out_n.emplace_back(info.device, info.physicalDevice,\n extent, format,\n VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_ASPECT_COLOR_BIT,\n &outFds.at(i));\n\n // initialize lsfg\n auto* lsfgInitialize = LSFG_3_1::initialize;\n auto* lsfgCreateContext = LSFG_3_1::createContext;\n auto* lsfgDeleteContext = LSFG_3_1::deleteContext;\n if (conf.performance) {\n lsfgInitialize = LSFG_3_1P::initialize;\n lsfgCreateContext = LSFG_3_1P::createContext;\n lsfgDeleteContext = LSFG_3_1P::deleteContext;\n }\n\n setenv(\"DISABLE_LSFG\", \"1\", 1); // NOLINT\n\n lsfgInitialize(\n Utils::getDeviceUUID(info.physicalDevice),\n conf.hdr, 1.0F / conf.flowScale, conf.multiplier - 1,\n [](const std::string& name) {\n auto dxbc = Extract::getShader(name);\n auto spirv = Extract::translateShader(dxbc);\n return spirv;\n }\n );\n\n this->lsfgCtxId = std::shared_ptr(\n new int32_t(lsfgCreateContext(fds.at(0), fds.at(1), outFds, extent, format)),\n [lsfgDeleteContext = lsfgDeleteContext](const int32_t* id) {\n lsfgDeleteContext(*id);\n }\n );\n\n unsetenv(\"DISABLE_LSFG\"); // NOLINT\n\n // prepare render passes\n this->cmdPool = Mini::CommandPool(info.device, info.queue.first);\n for (size_t i = 0; i < 8; i++) {\n auto& pass = this->passInfos.at(i);\n pass.renderSemaphores.resize(conf.multiplier - 1);\n pass.acquireSemaphores.resize(conf.multiplier - 1);\n pass.postCopyBufs.resize(conf.multiplier - 1);\n pass.postCopySemaphores.resize(conf.multiplier - 1);\n pass.prevPostCopySemaphores.resize(conf.multiplier - 1);\n }\n}\n\nVkResult LsContext::present(const Hooks::DeviceInfo& info, const void* pNext, VkQueue queue,\n const std::vector& gameRenderSemaphores, uint32_t presentIdx) {\n const auto& conf = Config::activeConf;\n auto& pass = this->passInfos.at(this->frameIdx % 8);\n\n // 1. copy swapchain image to frame_0/frame_1\n int preCopySemaphoreFd{};\n pass.preCopySemaphores.at(0) = Mini::Semaphore(info.device, &preCopySemaphoreFd);\n pass.preCopySemaphores.at(1) = Mini::Semaphore(info.device);\n pass.preCopyBuf = Mini::CommandBuffer(info.device, this->cmdPool);\n pass.preCopyBuf.begin();\n\n Utils::copyImage(pass.preCopyBuf.handle(),\n this->swapchainImages.at(presentIdx),\n this->frameIdx % 2 == 0 ? this->frame_0.handle() : this->frame_1.handle(),\n this->extent.width, this->extent.height,\n VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,\n true, false);\n\n pass.preCopyBuf.end();\n\n std::vector gameRenderSemaphores2 = gameRenderSemaphores;\n if (this->frameIdx > 0)\n gameRenderSemaphores2.emplace_back(this->passInfos.at((this->frameIdx - 1) % 8)\n .preCopySemaphores.at(1).handle());\n pass.preCopyBuf.submit(info.queue.second,\n gameRenderSemaphores2,\n { pass.preCopySemaphores.at(0).handle(),\n pass.preCopySemaphores.at(1).handle() });\n\n // 2. render intermediary frames\n std::vector renderSemaphoreFds(conf.multiplier - 1);\n for (size_t i = 0; i < (conf.multiplier - 1); ++i)\n pass.renderSemaphores.at(i) = Mini::Semaphore(info.device, &renderSemaphoreFds.at(i));\n\n if (conf.performance)\n LSFG_3_1P::presentContext(*this->lsfgCtxId,\n preCopySemaphoreFd,\n renderSemaphoreFds);\n else\n LSFG_3_1::presentContext(*this->lsfgCtxId,\n preCopySemaphoreFd,\n renderSemaphoreFds);\n\n for (size_t i = 0; i < (conf.multiplier - 1); i++) {\n // 3. acquire next swapchain image\n pass.acquireSemaphores.at(i) = Mini::Semaphore(info.device);\n uint32_t imageIdx{};\n auto res = Layer::ovkAcquireNextImageKHR(info.device, this->swapchain, UINT64_MAX,\n pass.acquireSemaphores.at(i).handle(), VK_NULL_HANDLE, &imageIdx);\n if (res != VK_SUCCESS && res != VK_SUBOPTIMAL_KHR)\n throw LSFG::vulkan_error(res, \"Failed to acquire next swapchain image\");\n\n // 4. copy output image to swapchain image\n pass.postCopySemaphores.at(i) = Mini::Semaphore(info.device);\n pass.prevPostCopySemaphores.at(i) = Mini::Semaphore(info.device);\n pass.postCopyBufs.at(i) = Mini::CommandBuffer(info.device, this->cmdPool);\n pass.postCopyBufs.at(i).begin();\n\n Utils::copyImage(pass.postCopyBufs.at(i).handle(),\n this->out_n.at(i).handle(),\n this->swapchainImages.at(imageIdx),\n this->extent.width, this->extent.height,\n VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,\n false, true);\n\n pass.postCopyBufs.at(i).end();\n pass.postCopyBufs.at(i).submit(info.queue.second,\n { pass.acquireSemaphores.at(i).handle(),\n pass.renderSemaphores.at(i).handle() },\n { pass.postCopySemaphores.at(i).handle(),\n pass.prevPostCopySemaphores.at(i).handle() });\n\n // 5. present swapchain image\n std::vector waitSemaphores{ pass.postCopySemaphores.at(i).handle() };\n if (i != 0) waitSemaphores.emplace_back(pass.prevPostCopySemaphores.at(i - 1).handle());\n\n const VkPresentInfoKHR presentInfo{\n .sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,\n .pNext = i == 0 ? pNext : nullptr, // only set on first present\n .waitSemaphoreCount = static_cast(waitSemaphores.size()),\n .pWaitSemaphores = waitSemaphores.data(),\n .swapchainCount = 1,\n .pSwapchains = &this->swapchain,\n .pImageIndices = &imageIdx,\n };\n res = Layer::ovkQueuePresentKHR(queue, &presentInfo);\n if (res != VK_SUCCESS && res != VK_SUBOPTIMAL_KHR)\n throw LSFG::vulkan_error(res, \"Failed to present swapchain image\");\n }\n\n // 6. present actual next frame\n VkSemaphore lastPrevPostCopySemaphore =\n pass.prevPostCopySemaphores.at(conf.multiplier - 1 - 1).handle();\n const VkPresentInfoKHR presentInfo{\n .sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,\n .waitSemaphoreCount = 1,\n .pWaitSemaphores = &lastPrevPostCopySemaphore,\n .swapchainCount = 1,\n .pSwapchains = &this->swapchain,\n .pImageIndices = &presentIdx,\n };\n auto res = Layer::ovkQueuePresentKHR(queue, &presentInfo);\n if (res != VK_SUCCESS && res != VK_SUBOPTIMAL_KHR)\n throw LSFG::vulkan_error(res, \"Failed to present swapchain image\");\n\n this->frameIdx++;\n return res;\n}\n"], ["/lsfg-vk/src/utils/utils.cpp", "#include \"utils/utils.hpp\"\n#include \"common/exception.hpp\"\n#include \"layer.hpp\"\n\n#include \n#include \n#include // NOLINT\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace Utils;\n\nstd::pair Utils::findQueue(VkDevice device, VkPhysicalDevice physicalDevice,\n VkDeviceCreateInfo* desc, VkQueueFlags flags) {\n std::vector enabledQueues(desc->queueCreateInfoCount);\n std::copy_n(desc->pQueueCreateInfos, enabledQueues.size(), enabledQueues.data());\n\n uint32_t familyCount{};\n Layer::ovkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, &familyCount, nullptr);\n std::vector families(familyCount);\n Layer::ovkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, &familyCount,\n families.data());\n\n std::optional idx;\n for (const auto& queueInfo : enabledQueues) {\n if ((queueInfo.queueFamilyIndex < families.size()) &&\n (families[queueInfo.queueFamilyIndex].queueFlags & flags)) {\n idx = queueInfo.queueFamilyIndex;\n break;\n }\n }\n if (!idx.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"No suitable queue found\");\n\n VkQueue queue{};\n Layer::ovkGetDeviceQueue(device, *idx, 0, &queue);\n\n auto res = Layer::ovkSetDeviceLoaderData(device, queue);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unable to set device loader data for queue\");\n\n return { *idx, queue };\n}\n\nuint64_t Utils::getDeviceUUID(VkPhysicalDevice physicalDevice) {\n VkPhysicalDeviceProperties properties{};\n Layer::ovkGetPhysicalDeviceProperties(physicalDevice, &properties);\n\n return static_cast(properties.vendorID) << 32 | properties.deviceID;\n}\n\nuint32_t Utils::getMaxImageCount(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface) {\n VkSurfaceCapabilitiesKHR capabilities{};\n auto res = Layer::ovkGetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice,\n surface, &capabilities);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Failed to get surface capabilities\");\n if (capabilities.maxImageCount == 0)\n return 999; // :3\n return capabilities.maxImageCount;\n}\n\nstd::vector Utils::addExtensions(const char* const* extensions, size_t count,\n const std::vector& requiredExtensions) {\n std::vector ext(count);\n std::copy_n(extensions, count, ext.data());\n\n for (const auto& e : requiredExtensions) {\n auto it = std::ranges::find_if(ext,\n [e](const char* extName) {\n return std::string(extName) == std::string(e);\n });\n if (it == ext.end())\n ext.push_back(e);\n }\n\n return ext;\n}\n\nvoid Utils::copyImage(VkCommandBuffer buf,\n VkImage src, VkImage dst,\n uint32_t width, uint32_t height,\n VkPipelineStageFlags pre, VkPipelineStageFlags post,\n bool makeSrcPresentable, bool makeDstPresentable) {\n const VkImageMemoryBarrier srcBarrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,\n .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,\n .oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,\n .newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,\n .image = src,\n .subresourceRange = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n const VkImageMemoryBarrier dstBarrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,\n .dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,\n .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n .image = dst,\n .subresourceRange = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n const std::vector barriers = { srcBarrier, dstBarrier };\n Layer::ovkCmdPipelineBarrier(buf,\n pre, VK_PIPELINE_STAGE_TRANSFER_BIT, 0,\n 0, nullptr, 0, nullptr,\n static_cast(barriers.size()), barriers.data());\n\n const VkImageBlit imageBlit{\n .srcSubresource = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .layerCount = 1\n },\n .srcOffsets = {\n { 0, 0, 0 },\n { static_cast(width), static_cast(height), 1 }\n },\n .dstSubresource = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .layerCount = 1\n },\n .dstOffsets = {\n { 0, 0, 0 },\n { static_cast(width), static_cast(height), 1 }\n }\n };\n Layer::ovkCmdBlitImage(\n buf,\n src, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,\n dst, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n 1, &imageBlit,\n VK_FILTER_NEAREST\n );\n\n if (makeSrcPresentable) {\n const VkImageMemoryBarrier presentBarrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,\n .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,\n .newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,\n .image = src,\n .subresourceRange = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n Layer::ovkCmdPipelineBarrier(buf,\n VK_PIPELINE_STAGE_TRANSFER_BIT, post, 0,\n 0, nullptr, 0, nullptr,\n 1, &presentBarrier);\n }\n\n if (makeDstPresentable) {\n const VkImageMemoryBarrier presentBarrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,\n .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,\n .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT,\n .oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n .newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,\n .image = dst,\n .subresourceRange = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .levelCount = 1,\n .layerCount = 1\n }\n };\n Layer::ovkCmdPipelineBarrier(buf,\n VK_PIPELINE_STAGE_TRANSFER_BIT, post, 0,\n 0, nullptr, 0, nullptr,\n 1, &presentBarrier);\n }\n}\n\nnamespace {\n auto& logCounts() {\n static std::unordered_map map;\n return map;\n }\n}\n\nvoid Utils::logLimitN(const std::string& id, size_t n, const std::string& message) {\n auto& count = logCounts()[id];\n if (count <= n)\n std::cerr << \"lsfg-vk: \" << message << '\\n';\n if (count == n)\n std::cerr << \"(above message has been repeated \" << n << \" times, suppressing further)\\n\";\n count++;\n}\n\nvoid Utils::resetLimitN(const std::string& id) noexcept {\n logCounts().erase(id);\n}\n\nstd::pair Utils::getProcessName() {\n const char* process_name = std::getenv(\"LSFG_PROCESS\");\n if (process_name && *process_name != '\\0')\n return { process_name, process_name };\n\n const char* benchmark_flag = std::getenv(\"LSFG_BENCHMARK\");\n if (benchmark_flag)\n return { \"benchmark\", \"benchmark\" };\n std::array exe{};\n\n const ssize_t exe_len = readlink(\"/proc/self/exe\", exe.data(), exe.size() - 1);\n if (exe_len <= 0)\n return { \"Unknown Process\", \"unknown\" };\n exe.at(static_cast(exe_len)) = '\\0';\n\n std::ifstream comm_file(\"/proc/self/comm\");\n if (!comm_file.is_open())\n return { std::string(exe.data()), \"unknown\" };\n std::array comm{};\n comm_file.read(comm.data(), 256);\n comm.at(static_cast(comm_file.gcount())) = '\\0';\n std::string comm_str(comm.data());\n if (comm_str.back() == '\\n')\n comm_str.pop_back();\n\n return{ std::string(exe.data()), comm_str };\n}\n\nstd::string Utils::getConfigFile() {\n const char* configFile = std::getenv(\"LSFG_CONFIG\");\n if (configFile && *configFile != '\\0')\n return{configFile};\n const char* xdgPath = std::getenv(\"XDG_CONFIG_HOME\");\n if (xdgPath && *xdgPath != '\\0')\n return std::string(xdgPath) + \"/lsfg-vk/conf.toml\";\n const char* homePath = std::getenv(\"HOME\");\n if (homePath && *homePath != '\\0')\n return std::string(homePath) + \"/.config/lsfg-vk/conf.toml\";\n return \"/etc/lsfg-vk/conf.toml\";\n}\n"], ["/lsfg-vk/framegen/src/common/utils.cpp", "#include \n#include \n\n#include \"common/utils.hpp\"\n#include \"core/buffer.hpp\"\n#include \"core/image.hpp\"\n#include \"core/device.hpp\"\n#include \"core/commandpool.hpp\"\n#include \"core/fence.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG::Utils;\n\nBarrierBuilder& BarrierBuilder::addR2W(Core::Image& image) {\n this->barriers.emplace_back(VkImageMemoryBarrier2 {\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,\n .srcStageMask = VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,\n .srcAccessMask = VK_ACCESS_2_SHADER_READ_BIT,\n .dstStageMask = VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,\n .dstAccessMask = VK_ACCESS_2_SHADER_WRITE_BIT,\n .oldLayout = image.getLayout(),\n .newLayout = VK_IMAGE_LAYOUT_GENERAL,\n .image = image.handle(),\n .subresourceRange = {\n .aspectMask = image.getAspectFlags(),\n .levelCount = 1,\n .layerCount = 1\n }\n });\n image.setLayout(VK_IMAGE_LAYOUT_GENERAL);\n\n return *this;\n}\n\nBarrierBuilder& BarrierBuilder::addW2R(Core::Image& image) {\n this->barriers.emplace_back(VkImageMemoryBarrier2 {\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,\n .srcStageMask = VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,\n .srcAccessMask = VK_ACCESS_2_SHADER_WRITE_BIT,\n .dstStageMask = VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,\n .dstAccessMask = VK_ACCESS_2_SHADER_READ_BIT,\n .oldLayout = image.getLayout(),\n .newLayout = VK_IMAGE_LAYOUT_GENERAL,\n .image = image.handle(),\n .subresourceRange = {\n .aspectMask = image.getAspectFlags(),\n .levelCount = 1,\n .layerCount = 1\n }\n });\n image.setLayout(VK_IMAGE_LAYOUT_GENERAL);\n\n return *this;\n}\n\nvoid BarrierBuilder::build() const {\n const VkDependencyInfo dependencyInfo = {\n .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,\n .imageMemoryBarrierCount = static_cast(this->barriers.size()),\n .pImageMemoryBarriers = this->barriers.data()\n };\n vkCmdPipelineBarrier2(this->commandBuffer->handle(), &dependencyInfo);\n}\n\nvoid Utils::uploadImage(const Core::Device& device, const Core::CommandPool& commandPool,\n Core::Image& image, const std::string& path) {\n // read image bytecode\n std::ifstream file(path.data(), std::ios::binary | std::ios::ate);\n if (!file.is_open())\n throw std::system_error(errno, std::generic_category(), \"Failed to open image: \" + path);\n\n std::streamsize size = file.tellg();\n size -= 124 + 4; // dds header and magic bytes\n std::vector code(static_cast(size));\n\n file.seekg(124 + 4, std::ios::beg);\n if (!file.read(code.data(), size))\n throw std::system_error(errno, std::generic_category(), \"Failed to read image: \" + path);\n\n file.close();\n\n // copy data to buffer\n const Core::Buffer stagingBuffer(\n device, code.data(), static_cast(code.size()),\n VK_BUFFER_USAGE_TRANSFER_SRC_BIT\n );\n\n // perform the upload\n Core::CommandBuffer commandBuffer(device, commandPool);\n commandBuffer.begin();\n\n const VkImageMemoryBarrier barrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,\n .srcAccessMask = VK_ACCESS_NONE,\n .dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,\n .oldLayout = image.getLayout(),\n .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n .image = image.handle(),\n .subresourceRange = {\n .aspectMask = image.getAspectFlags(),\n .levelCount = 1,\n .layerCount = 1\n }\n };\n image.setLayout(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);\n vkCmdPipelineBarrier(\n commandBuffer.handle(),\n VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,\n 0, 0, nullptr, 0, nullptr, 1, &barrier\n );\n\n auto extent = image.getExtent();\n const VkBufferImageCopy region{\n .bufferImageHeight = 0,\n .imageSubresource = {\n .aspectMask = image.getAspectFlags(),\n .layerCount = 1\n },\n .imageExtent = { extent.width, extent.height, 1 }\n };\n vkCmdCopyBufferToImage(\n commandBuffer.handle(),\n stagingBuffer.handle(), image.handle(),\n VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion\n );\n\n commandBuffer.end();\n\n Core::Fence fence(device);\n commandBuffer.submit(device.getComputeQueue(), fence);\n\n // wait for the upload to complete\n if (!fence.wait(device))\n throw LSFG::vulkan_error(VK_TIMEOUT, \"Upload operation timed out\");\n}\n\nvoid Utils::clearImage(const Core::Device& device, Core::Image& image, bool white) {\n Core::Fence fence(device);\n const Core::CommandPool cmdPool(device);\n Core::CommandBuffer cmdBuf(device, cmdPool);\n cmdBuf.begin();\n\n const VkImageMemoryBarrier2 barrier{\n .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,\n .dstStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT,\n .dstAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,\n .oldLayout = image.getLayout(),\n .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,\n .image = image.handle(),\n .subresourceRange = {\n .aspectMask = image.getAspectFlags(),\n .levelCount = 1,\n .layerCount = 1\n }\n };\n const VkDependencyInfo dependencyInfo = {\n .sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,\n .imageMemoryBarrierCount = 1,\n .pImageMemoryBarriers = &barrier\n };\n image.setLayout(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);\n vkCmdPipelineBarrier2(cmdBuf.handle(), &dependencyInfo);\n\n const float clearValue = white ? 1.0F : 0.0F;\n const VkClearColorValue clearColor = {{ clearValue, clearValue, clearValue, clearValue }};\n const VkImageSubresourceRange subresourceRange = {\n .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,\n .levelCount = 1,\n .layerCount = 1\n };\n vkCmdClearColorImage(cmdBuf.handle(),\n image.handle(), image.getLayout(),\n &clearColor,\n 1, &subresourceRange);\n\n cmdBuf.end();\n\n cmdBuf.submit(device.getComputeQueue(), fence);\n if (!fence.wait(device))\n throw LSFG::vulkan_error(VK_TIMEOUT, \"Failed to wait for clearing fence.\");\n}\n"], ["/lsfg-vk/framegen/v3.1_src/shaders/mipmaps.cpp", "#include \n#include \n\n#include \"v3_1/shaders/mipmaps.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/image.hpp\"\n#include \"core/commandbuffer.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nMipmaps::Mipmaps(Vulkan& vk,\n Core::Image inImg_0, Core::Image inImg_1)\n : inImg_0(std::move(inImg_0)), inImg_1(std::move(inImg_1)) {\n // create resources\n this->shaderModule = vk.shaders.getShader(vk.device, \"mipmaps\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 7, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } });\n this->pipeline = vk.shaders.getPipeline(vk.device, \"mipmaps\");\n this->buffer = vk.resources.getBuffer(vk.device);\n this->sampler = vk.resources.getSampler(vk.device);\n for (size_t i = 0; i < 2; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModule);\n\n // create outputs\n const VkExtent2D flowExtent{\n .width = static_cast(\n static_cast(this->inImg_0.getExtent().width) / vk.flowScale),\n .height = static_cast(\n static_cast(this->inImg_0.getExtent().height) / vk.flowScale)\n };\n for (size_t i = 0; i < 7; i++)\n this->outImgs.at(i) = Core::Image(vk.device,\n { flowExtent.width >> i, flowExtent.height >> i },\n VK_FORMAT_R8_UNORM);\n\n // hook up shaders\n for (size_t fc = 0; fc < 2; fc++)\n this->descriptorSets.at(fc).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, this->buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, (fc % 2 == 0) ? this->inImg_0 : this->inImg_1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs)\n .build();\n}\n\nvoid Mipmaps::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto flowExtent = this->outImgs.at(0).getExtent();\n const uint32_t threadsX = (flowExtent.width + 63) >> 6;\n const uint32_t threadsY = (flowExtent.height + 63) >> 6;\n\n Utils::BarrierBuilder(buf)\n .addW2R((frameCount % 2 == 0) ? this->inImg_0 : this->inImg_1)\n .addR2W(this->outImgs)\n .build();\n\n this->pipeline.bind(buf);\n this->descriptorSets.at(frameCount % 2).bind(buf, this->pipeline);\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/shaders/mipmaps.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/mipmaps.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/image.hpp\"\n#include \"core/commandbuffer.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nMipmaps::Mipmaps(Vulkan& vk,\n Core::Image inImg_0, Core::Image inImg_1)\n : inImg_0(std::move(inImg_0)), inImg_1(std::move(inImg_1)) {\n // create resources\n this->shaderModule = vk.shaders.getShader(vk.device, \"p_mipmaps\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 7, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } });\n this->pipeline = vk.shaders.getPipeline(vk.device, \"p_mipmaps\");\n this->buffer = vk.resources.getBuffer(vk.device);\n this->sampler = vk.resources.getSampler(vk.device);\n for (size_t i = 0; i < 2; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModule);\n\n // create outputs\n const VkExtent2D flowExtent{\n .width = static_cast(\n static_cast(this->inImg_0.getExtent().width) / vk.flowScale),\n .height = static_cast(\n static_cast(this->inImg_0.getExtent().height) / vk.flowScale)\n };\n for (size_t i = 0; i < 7; i++)\n this->outImgs.at(i) = Core::Image(vk.device,\n { flowExtent.width >> i, flowExtent.height >> i },\n VK_FORMAT_R8_UNORM);\n\n // hook up shaders\n for (size_t fc = 0; fc < 2; fc++)\n this->descriptorSets.at(fc).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, this->buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, (fc % 2 == 0) ? this->inImg_0 : this->inImg_1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs)\n .build();\n}\n\nvoid Mipmaps::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto flowExtent = this->outImgs.at(0).getExtent();\n const uint32_t threadsX = (flowExtent.width + 63) >> 6;\n const uint32_t threadsY = (flowExtent.height + 63) >> 6;\n\n Utils::BarrierBuilder(buf)\n .addW2R((frameCount % 2 == 0) ? this->inImg_0 : this->inImg_1)\n .addR2W(this->outImgs)\n .build();\n\n this->pipeline.bind(buf);\n this->descriptorSets.at(frameCount % 2).bind(buf, this->pipeline);\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1_src/shaders/generate.cpp", "#include \n#include \n\n#include \"v3_1/shaders/generate.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nGenerate::Generate(Vulkan& vk,\n Core::Image inImg1, Core::Image inImg2,\n Core::Image inImg3, Core::Image inImg4, Core::Image inImg5,\n const std::vector& fds, VkFormat format)\n : inImg1(std::move(inImg1)), inImg2(std::move(inImg2)),\n inImg3(std::move(inImg3)), inImg4(std::move(inImg4)),\n inImg5(std::move(inImg5)) {\n // create resources\n this->shaderModule = vk.shaders.getShader(vk.device, \"generate\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 5, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } });\n this->pipeline = vk.shaders.getPipeline(vk.device, \"generate\");\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImg1.getExtent();\n for (size_t i = 0; i < vk.generationCount; i++)\n this->outImgs.emplace_back(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, fds.empty() ? -1 : fds.at(i));\n\n // hook up shaders\n for (size_t i = 0; i < vk.generationCount; i++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(i + 1) / static_cast(vk.generationCount + 1));\n for (size_t j = 0; j < 2; j++) {\n pass.descriptorSet.at(j) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModule);\n pass.descriptorSet.at(j).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, j == 0 ? this->inImg2 : this->inImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, j == 0 ? this->inImg1 : this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg3)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg4)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg5)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs.at(i))\n .build();\n }\n }\n}\n\nvoid Generate::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx) {\n auto& pass = this->passes.at(pass_idx);\n\n // first pass\n const auto extent = this->inImg1.getExtent();\n const uint32_t threadsX = (extent.width + 15) >> 4;\n const uint32_t threadsY = (extent.height + 15) >> 4;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImg1)\n .addW2R(this->inImg2)\n .addW2R(this->inImg3)\n .addW2R(this->inImg4)\n .addW2R(this->inImg5)\n .addR2W(this->outImgs.at(pass_idx))\n .build();\n\n this->pipeline.bind(buf);\n pass.descriptorSet.at(frameCount % 2).bind(buf, this->pipeline);\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/shaders/generate.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/generate.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nGenerate::Generate(Vulkan& vk,\n Core::Image inImg1, Core::Image inImg2,\n Core::Image inImg3, Core::Image inImg4, Core::Image inImg5,\n const std::vector& fds, VkFormat format)\n : inImg1(std::move(inImg1)), inImg2(std::move(inImg2)),\n inImg3(std::move(inImg3)), inImg4(std::move(inImg4)),\n inImg5(std::move(inImg5)) {\n // create resources\n this->shaderModule = vk.shaders.getShader(vk.device, \"p_generate\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 5, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } });\n this->pipeline = vk.shaders.getPipeline(vk.device, \"p_generate\");\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImg1.getExtent();\n for (size_t i = 0; i < vk.generationCount; i++)\n this->outImgs.emplace_back(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, fds.empty() ? -1 : fds.at(i));\n\n // hook up shaders\n for (size_t i = 0; i < vk.generationCount; i++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(i + 1) / static_cast(vk.generationCount + 1));\n for (size_t j = 0; j < 2; j++) {\n pass.descriptorSet.at(j) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModule);\n pass.descriptorSet.at(j).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, j == 0 ? this->inImg2 : this->inImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, j == 0 ? this->inImg1 : this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg3)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg4)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg5)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs.at(i))\n .build();\n }\n }\n}\n\nvoid Generate::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx) {\n auto& pass = this->passes.at(pass_idx);\n\n // first pass\n const auto extent = this->inImg1.getExtent();\n const uint32_t threadsX = (extent.width + 15) >> 4;\n const uint32_t threadsY = (extent.height + 15) >> 4;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImg1)\n .addW2R(this->inImg2)\n .addW2R(this->inImg3)\n .addW2R(this->inImg4)\n .addW2R(this->inImg5)\n .addR2W(this->outImgs.at(pass_idx))\n .build();\n\n this->pipeline.bind(buf);\n pass.descriptorSet.at(frameCount % 2).bind(buf, this->pipeline);\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/shaders/alpha.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/alpha.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nAlpha::Alpha(Vulkan& vk, Core::Image inImg) : inImg(std::move(inImg)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"p_alpha[0]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_alpha[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_alpha[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_alpha[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"p_alpha[0]\"),\n vk.shaders.getPipeline(vk.device, \"p_alpha[1]\"),\n vk.shaders.getPipeline(vk.device, \"p_alpha[2]\"),\n vk.shaders.getPipeline(vk.device, \"p_alpha[3]\")\n }};\n this->sampler = vk.resources.getSampler(vk.device);\n for (size_t i = 0; i < 3; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(i));\n for (size_t i = 0; i < 3; i++)\n this->lastDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(3));\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImg.getExtent();\n const VkExtent2D halfExtent = {\n .width = (extent.width + 1) >> 1,\n .height = (extent.height + 1) >> 1\n };\n this->tempImg1 = Core::Image(vk.device, halfExtent);\n this->tempImg2 = Core::Image(vk.device, halfExtent);\n\n const VkExtent2D quarterExtent = {\n .width = (halfExtent.width + 1) >> 1,\n .height = (halfExtent.height + 1) >> 1\n };\n for (size_t i = 0; i < 2; i++) {\n this->tempImgs3.at(i) = Core::Image(vk.device, quarterExtent);\n for (size_t j = 0; j < 3; j++)\n this->outImgs.at(j).at(i) = Core::Image(vk.device, quarterExtent);\n }\n\n // hook up shaders\n this->descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImg1)\n .build();\n this->descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImg1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImg2)\n .build();\n this->descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs3)\n .build();\n for (size_t i = 0; i < 3; i++)\n this->lastDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs3)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs.at(i))\n .build();\n}\n\nvoid Alpha::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto halfExtent = this->tempImg1.getExtent();\n uint32_t threadsX = (halfExtent.width + 7) >> 3;\n uint32_t threadsY = (halfExtent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImg)\n .addR2W(this->tempImg1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n this->descriptorSets.at(0).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImg1)\n .addR2W(this->tempImg2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n this->descriptorSets.at(1).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third pass\n const auto quarterExtent = this->tempImgs3.at(0).getExtent();\n threadsX = (quarterExtent.width + 7) >> 3;\n threadsY = (quarterExtent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImg2)\n .addR2W(this->tempImgs3)\n .build();\n\n this->pipelines.at(2).bind(buf);\n this->descriptorSets.at(2).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs3)\n .addR2W(this->outImgs.at(frameCount % 3))\n .build();\n\n this->pipelines.at(3).bind(buf);\n this->lastDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1_src/shaders/alpha.cpp", "#include \n#include \n\n#include \"v3_1/shaders/alpha.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nAlpha::Alpha(Vulkan& vk, Core::Image inImg) : inImg(std::move(inImg)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"alpha[0]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"alpha[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"alpha[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"alpha[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"alpha[0]\"),\n vk.shaders.getPipeline(vk.device, \"alpha[1]\"),\n vk.shaders.getPipeline(vk.device, \"alpha[2]\"),\n vk.shaders.getPipeline(vk.device, \"alpha[3]\")\n }};\n this->sampler = vk.resources.getSampler(vk.device);\n for (size_t i = 0; i < 3; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(i));\n for (size_t i = 0; i < 3; i++)\n this->lastDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(3));\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImg.getExtent();\n const VkExtent2D halfExtent = {\n .width = (extent.width + 1) >> 1,\n .height = (extent.height + 1) >> 1\n };\n for (size_t i = 0; i < 2; i++) {\n this->tempImgs1.at(i) = Core::Image(vk.device, halfExtent);\n this->tempImgs2.at(i) = Core::Image(vk.device, halfExtent);\n }\n\n const VkExtent2D quarterExtent = {\n .width = (halfExtent.width + 1) >> 1,\n .height = (halfExtent.height + 1) >> 1\n };\n for (size_t i = 0; i < 4; i++) {\n this->tempImgs3.at(i) = Core::Image(vk.device, quarterExtent);\n for (size_t j = 0; j < 3; j++)\n this->outImgs.at(j).at(i) = Core::Image(vk.device, quarterExtent);\n }\n\n // hook up shaders\n this->descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n this->descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n this->descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs3)\n .build();\n for (size_t i = 0; i < 3; i++)\n this->lastDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->sampler)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs3)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs.at(i))\n .build();\n}\n\nvoid Alpha::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto halfExtent = this->tempImgs1.at(0).getExtent();\n uint32_t threadsX = (halfExtent.width + 7) >> 3;\n uint32_t threadsY = (halfExtent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImg)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n this->descriptorSets.at(0).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n this->descriptorSets.at(1).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third pass\n const auto quarterExtent = this->tempImgs3.at(0).getExtent();\n threadsX = (quarterExtent.width + 7) >> 3;\n threadsY = (quarterExtent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs3)\n .build();\n\n this->pipelines.at(2).bind(buf);\n this->descriptorSets.at(2).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs3)\n .addR2W(this->outImgs.at(frameCount % 3))\n .build();\n\n this->pipelines.at(3).bind(buf);\n this->lastDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/shaders/gamma.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/gamma.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nGamma::Gamma(Vulkan& vk, std::array, 3> inImgs1,\n Core::Image inImg2,\n std::optional optImg)\n : inImgs1(std::move(inImgs1)), inImg2(std::move(inImg2)),\n optImg(std::move(optImg)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"p_gamma[0]\",\n { { 1 , VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 5, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 3, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_gamma[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 3, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_gamma[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_gamma[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_gamma[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"p_gamma[0]\"),\n vk.shaders.getPipeline(vk.device, \"p_gamma[1]\"),\n vk.shaders.getPipeline(vk.device, \"p_gamma[2]\"),\n vk.shaders.getPipeline(vk.device, \"p_gamma[3]\"),\n vk.shaders.getPipeline(vk.device, \"p_gamma[4]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n this->samplers.at(2) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS, false);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs1.at(0).at(0).getExtent();\n for (size_t i = 0; i < 3; i++)\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n for (size_t i = 0; i < 2; i++)\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n\n this->outImg = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n\n // hook up shaders\n for (size_t pass_idx = 0; pass_idx < vk.generationCount; pass_idx++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(pass_idx + 1) / static_cast(vk.generationCount + 1),\n !this->optImg.has_value());\n for (size_t i = 0; i < 3; i++) {\n pass.firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(0));\n pass.firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n }\n pass.descriptorSets.at(0) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(1));\n pass.descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(1) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(2));\n pass.descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .build();\n pass.descriptorSets.at(2) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(3));\n pass.descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(3) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(4));\n pass.descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg)\n .build();\n }\n}\n\nvoid Gamma::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx) {\n auto& pass = this->passes.at(pass_idx);\n\n // first shader\n const auto extent = this->tempImgs1.at(0).getExtent();\n const uint32_t threadsX = (extent.width + 7) >> 3;\n const uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n pass.firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n pass.descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .build();\n\n this->pipelines.at(2).bind(buf);\n pass.descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n pass.descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addW2R(this->optImg)\n .addW2R(this->inImg2)\n .addR2W(this->outImg)\n .build();\n\n this->pipelines.at(4).bind(buf);\n pass.descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1_src/shaders/gamma.cpp", "#include \n#include \n\n#include \"v3_1/shaders/gamma.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nGamma::Gamma(Vulkan& vk, std::array, 3> inImgs1,\n Core::Image inImg2,\n std::optional optImg)\n : inImgs1(std::move(inImgs1)), inImg2(std::move(inImg2)),\n optImg(std::move(optImg)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"gamma[0]\",\n { { 1 , VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 9, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 3, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"gamma[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 3, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"gamma[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"gamma[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"gamma[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 6, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"gamma[0]\"),\n vk.shaders.getPipeline(vk.device, \"gamma[1]\"),\n vk.shaders.getPipeline(vk.device, \"gamma[2]\"),\n vk.shaders.getPipeline(vk.device, \"gamma[3]\"),\n vk.shaders.getPipeline(vk.device, \"gamma[4]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n this->samplers.at(2) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS, false);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs1.at(0).at(0).getExtent();\n for (size_t i = 0; i < 4; i++) {\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n }\n\n this->outImg = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n\n // hook up shaders\n for (size_t pass_idx = 0; pass_idx < vk.generationCount; pass_idx++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(pass_idx + 1) / static_cast(vk.generationCount + 1),\n !this->optImg.has_value());\n for (size_t i = 0; i < 3; i++) {\n pass.firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(0));\n pass.firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(2))\n .build();\n }\n pass.descriptorSets.at(0) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(1));\n pass.descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(2))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(1) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(2));\n pass.descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n pass.descriptorSets.at(2) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(3));\n pass.descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(3) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(4));\n pass.descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg)\n .build();\n }\n}\n\nvoid Gamma::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx) {\n auto& pass = this->passes.at(pass_idx);\n\n // first shader\n const auto extent = this->tempImgs1.at(0).getExtent();\n const uint32_t threadsX = (extent.width + 7) >> 3;\n const uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .addR2W(this->tempImgs1.at(2))\n .build();\n\n this->pipelines.at(0).bind(buf);\n pass.firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addW2R(this->tempImgs1.at(2))\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n pass.descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(2).bind(buf);\n pass.descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n pass.descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addW2R(this->optImg)\n .addW2R(this->inImg2)\n .addR2W(this->outImg)\n .build();\n\n this->pipelines.at(4).bind(buf);\n pass.descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/src/layer.cpp", "#include \"layer.hpp\"\n#include \"common/exception.hpp\"\n#include \"config/config.hpp\"\n#include \"hooks.hpp\"\n\n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n\nnamespace {\n PFN_vkCreateInstance next_vkCreateInstance{};\n PFN_vkDestroyInstance next_vkDestroyInstance{};\n\n PFN_vkCreateDevice next_vkCreateDevice{};\n PFN_vkDestroyDevice next_vkDestroyDevice{};\n\n PFN_vkSetDeviceLoaderData next_vSetDeviceLoaderData{};\n\n PFN_vkGetInstanceProcAddr next_vkGetInstanceProcAddr{};\n PFN_vkGetDeviceProcAddr next_vkGetDeviceProcAddr{};\n\n PFN_vkGetPhysicalDeviceQueueFamilyProperties next_vkGetPhysicalDeviceQueueFamilyProperties{};\n PFN_vkGetPhysicalDeviceMemoryProperties next_vkGetPhysicalDeviceMemoryProperties{};\n PFN_vkGetPhysicalDeviceProperties next_vkGetPhysicalDeviceProperties{};\n PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR next_vkGetPhysicalDeviceSurfaceCapabilitiesKHR{};\n\n PFN_vkCreateSwapchainKHR next_vkCreateSwapchainKHR{};\n PFN_vkQueuePresentKHR next_vkQueuePresentKHR{};\n PFN_vkDestroySwapchainKHR next_vkDestroySwapchainKHR{};\n PFN_vkGetSwapchainImagesKHR next_vkGetSwapchainImagesKHR{};\n PFN_vkAllocateCommandBuffers next_vkAllocateCommandBuffers{};\n PFN_vkFreeCommandBuffers next_vkFreeCommandBuffers{};\n PFN_vkBeginCommandBuffer next_vkBeginCommandBuffer{};\n PFN_vkEndCommandBuffer next_vkEndCommandBuffer{};\n PFN_vkCreateCommandPool next_vkCreateCommandPool{};\n PFN_vkDestroyCommandPool next_vkDestroyCommandPool{};\n PFN_vkCreateImage next_vkCreateImage{};\n PFN_vkDestroyImage next_vkDestroyImage{};\n PFN_vkGetImageMemoryRequirements next_vkGetImageMemoryRequirements{};\n PFN_vkBindImageMemory next_vkBindImageMemory{};\n PFN_vkAllocateMemory next_vkAllocateMemory{};\n PFN_vkFreeMemory next_vkFreeMemory{};\n PFN_vkCreateSemaphore next_vkCreateSemaphore{};\n PFN_vkDestroySemaphore next_vkDestroySemaphore{};\n PFN_vkGetMemoryFdKHR next_vkGetMemoryFdKHR{};\n PFN_vkGetSemaphoreFdKHR next_vkGetSemaphoreFdKHR{};\n PFN_vkGetDeviceQueue next_vkGetDeviceQueue{};\n PFN_vkQueueSubmit next_vkQueueSubmit{};\n PFN_vkCmdPipelineBarrier next_vkCmdPipelineBarrier{};\n PFN_vkCmdBlitImage next_vkCmdBlitImage{};\n PFN_vkAcquireNextImageKHR next_vkAcquireNextImageKHR{};\n\n template\n bool initInstanceFunc(VkInstance instance, const char* name, T* func) {\n *func = reinterpret_cast(next_vkGetInstanceProcAddr(instance, name));\n if (!*func) {\n std::cerr << \"(no function pointer for \" << name << \")\\n\";\n return false;\n }\n return true;\n }\n\n template\n bool initDeviceFunc(VkDevice device, const char* name, T* func) {\n *func = reinterpret_cast(next_vkGetDeviceProcAddr(device, name));\n if (!*func) {\n std::cerr << \"(no function pointer for \" << name << \")\\n\";\n return false;\n }\n return true;\n }\n}\n\nnamespace {\n VkResult layer_vkCreateInstance(\n const VkInstanceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkInstance* pInstance) {\n try {\n // prepare layer | NOLINTBEGIN\n auto* layerDesc = const_cast(\n reinterpret_cast(pCreateInfo->pNext));\n while (layerDesc && (layerDesc->sType != VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO\n || layerDesc->function != VK_LAYER_LINK_INFO)) {\n layerDesc = const_cast(\n reinterpret_cast(layerDesc->pNext));\n }\n if (!layerDesc)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"No layer creation info found in pNext chain\");\n\n next_vkGetInstanceProcAddr = layerDesc->u.pLayerInfo->pfnNextGetInstanceProcAddr;\n layerDesc->u.pLayerInfo = layerDesc->u.pLayerInfo->pNext;\n\n bool success = initInstanceFunc(nullptr, \"vkCreateInstance\", &next_vkCreateInstance);\n if (!success)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"Failed to get instance function pointer for vkCreateInstance\");\n\n // NOLINTEND | skip initialization if the layer is disabled\n if (!Config::activeConf.enable) {\n auto res = next_vkCreateInstance(pCreateInfo, pAllocator, pInstance);\n initInstanceFunc(*pInstance, \"vkCreateDevice\", &next_vkCreateDevice);\n return res;\n }\n\n // create instance\n try {\n auto* createInstanceHook = reinterpret_cast(\n Hooks::hooks[\"vkCreateInstance\"]);\n auto res = createInstanceHook(pCreateInfo, pAllocator, pInstance);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unknown error\");\n } catch (const std::exception& e) {\n throw LSFG::rethrowable_error(\"Failed to create Vulkan instance\", e);\n }\n\n // get relevant function pointers from the next layer\n success = true;\n success &= initInstanceFunc(*pInstance,\n \"vkDestroyInstance\", &next_vkDestroyInstance);\n success &= initInstanceFunc(*pInstance,\n \"vkCreateDevice\", &next_vkCreateDevice); // workaround mesa bug\n success &= initInstanceFunc(*pInstance,\n \"vkGetPhysicalDeviceQueueFamilyProperties\", &next_vkGetPhysicalDeviceQueueFamilyProperties);\n success &= initInstanceFunc(*pInstance,\n \"vkGetPhysicalDeviceMemoryProperties\", &next_vkGetPhysicalDeviceMemoryProperties);\n success &= initInstanceFunc(*pInstance,\n \"vkGetPhysicalDeviceProperties\", &next_vkGetPhysicalDeviceProperties);\n success &= initInstanceFunc(*pInstance,\n \"vkGetPhysicalDeviceSurfaceCapabilitiesKHR\", &next_vkGetPhysicalDeviceSurfaceCapabilitiesKHR);\n if (!success)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"Failed to get instance function pointers\");\n\n std::cerr << \"lsfg-vk: Vulkan instance layer initialized successfully.\\n\";\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred while initializing the Vulkan instance layer:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n return VK_ERROR_INITIALIZATION_FAILED;\n }\n return VK_SUCCESS;\n }\n\n VkResult layer_vkCreateDevice( // NOLINTBEGIN\n VkPhysicalDevice physicalDevice,\n const VkDeviceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkDevice* pDevice) {\n try {\n // prepare layer | NOLINTBEGIN\n auto* layerDesc = const_cast(\n reinterpret_cast(pCreateInfo->pNext));\n while (layerDesc && (layerDesc->sType != VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO\n || layerDesc->function != VK_LAYER_LINK_INFO)) {\n layerDesc = const_cast(\n reinterpret_cast(layerDesc->pNext));\n }\n if (!layerDesc)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"No layer creation info found in pNext chain\");\n\n next_vkGetDeviceProcAddr = layerDesc->u.pLayerInfo->pfnNextGetDeviceProcAddr;\n layerDesc->u.pLayerInfo = layerDesc->u.pLayerInfo->pNext;\n\n auto* layerDesc2 = const_cast(\n reinterpret_cast(pCreateInfo->pNext));\n while (layerDesc2 && (layerDesc2->sType != VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO\n || layerDesc2->function != VK_LOADER_DATA_CALLBACK)) {\n layerDesc2 = const_cast(\n reinterpret_cast(layerDesc2->pNext));\n }\n if (!layerDesc2)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"No layer device loader data found in pNext chain\");\n\n next_vSetDeviceLoaderData = layerDesc2->u.pfnSetDeviceLoaderData;\n\n // NOLINTEND | skip initialization if the layer is disabled\n if (!Config::activeConf.enable)\n return next_vkCreateDevice(physicalDevice, pCreateInfo, pAllocator, pDevice);\n\n // create device\n try {\n auto* createDeviceHook = reinterpret_cast(\n Hooks::hooks[\"vkCreateDevicePre\"]);\n auto res = createDeviceHook(physicalDevice, pCreateInfo, pAllocator, pDevice);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unknown error\");\n } catch (const std::exception& e) {\n throw LSFG::rethrowable_error(\"Failed to create Vulkan device\", e);\n }\n\n // get relevant function pointers from the next layer\n bool success = true;\n success &= initDeviceFunc(*pDevice, \"vkDestroyDevice\", &next_vkDestroyDevice);\n success &= initDeviceFunc(*pDevice, \"vkCreateSwapchainKHR\", &next_vkCreateSwapchainKHR);\n success &= initDeviceFunc(*pDevice, \"vkQueuePresentKHR\", &next_vkQueuePresentKHR);\n success &= initDeviceFunc(*pDevice, \"vkDestroySwapchainKHR\", &next_vkDestroySwapchainKHR);\n success &= initDeviceFunc(*pDevice, \"vkGetSwapchainImagesKHR\", &next_vkGetSwapchainImagesKHR);\n success &= initDeviceFunc(*pDevice, \"vkAllocateCommandBuffers\", &next_vkAllocateCommandBuffers);\n success &= initDeviceFunc(*pDevice, \"vkFreeCommandBuffers\", &next_vkFreeCommandBuffers);\n success &= initDeviceFunc(*pDevice, \"vkBeginCommandBuffer\", &next_vkBeginCommandBuffer);\n success &= initDeviceFunc(*pDevice, \"vkEndCommandBuffer\", &next_vkEndCommandBuffer);\n success &= initDeviceFunc(*pDevice, \"vkCreateCommandPool\", &next_vkCreateCommandPool);\n success &= initDeviceFunc(*pDevice, \"vkDestroyCommandPool\", &next_vkDestroyCommandPool);\n success &= initDeviceFunc(*pDevice, \"vkCreateImage\", &next_vkCreateImage);\n success &= initDeviceFunc(*pDevice, \"vkDestroyImage\", &next_vkDestroyImage);\n success &= initDeviceFunc(*pDevice, \"vkGetImageMemoryRequirements\", &next_vkGetImageMemoryRequirements);\n success &= initDeviceFunc(*pDevice, \"vkBindImageMemory\", &next_vkBindImageMemory);\n success &= initDeviceFunc(*pDevice, \"vkGetMemoryFdKHR\", &next_vkGetMemoryFdKHR);\n success &= initDeviceFunc(*pDevice, \"vkAllocateMemory\", &next_vkAllocateMemory);\n success &= initDeviceFunc(*pDevice, \"vkFreeMemory\", &next_vkFreeMemory);\n success &= initDeviceFunc(*pDevice, \"vkCreateSemaphore\", &next_vkCreateSemaphore);\n success &= initDeviceFunc(*pDevice, \"vkDestroySemaphore\", &next_vkDestroySemaphore);\n success &= initDeviceFunc(*pDevice, \"vkGetSemaphoreFdKHR\", &next_vkGetSemaphoreFdKHR);\n success &= initDeviceFunc(*pDevice, \"vkGetDeviceQueue\", &next_vkGetDeviceQueue);\n success &= initDeviceFunc(*pDevice, \"vkQueueSubmit\", &next_vkQueueSubmit);\n success &= initDeviceFunc(*pDevice, \"vkCmdPipelineBarrier\", &next_vkCmdPipelineBarrier);\n success &= initDeviceFunc(*pDevice, \"vkCmdBlitImage\", &next_vkCmdBlitImage);\n success &= initDeviceFunc(*pDevice, \"vkAcquireNextImageKHR\", &next_vkAcquireNextImageKHR);\n if (!success)\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED,\n \"Failed to get device function pointers\");\n\n auto postCreateDeviceHook = reinterpret_cast(\n Hooks::hooks[\"vkCreateDevicePost\"]);\n auto res = postCreateDeviceHook(physicalDevice, pCreateInfo, pAllocator, pDevice);\n if (res != VK_SUCCESS)\n throw LSFG::vulkan_error(res, \"Unknown error\");\n\n std::cerr << \"lsfg-vk: Vulkan device layer initialized successfully.\\n\";\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred while initializing the Vulkan device layer:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n return VK_ERROR_INITIALIZATION_FAILED;\n }\n return VK_SUCCESS;\n } // NOLINTEND\n}\n\nconst std::unordered_map layerFunctions = {\n { \"vkCreateInstance\",\n reinterpret_cast(&layer_vkCreateInstance) },\n { \"vkCreateDevice\",\n reinterpret_cast(&layer_vkCreateDevice) },\n { \"vkGetInstanceProcAddr\",\n reinterpret_cast(&layer_vkGetInstanceProcAddr) },\n { \"vkGetDeviceProcAddr\",\n reinterpret_cast(&layer_vkGetDeviceProcAddr) },\n};\n\nPFN_vkVoidFunction layer_vkGetInstanceProcAddr(VkInstance instance, const char* pName) {\n const std::string name(pName);\n auto it = layerFunctions.find(name);\n if (it != layerFunctions.end())\n return it->second;\n\n it = Hooks::hooks.find(name);\n if (it != Hooks::hooks.end() && Config::activeConf.enable)\n return it->second;\n\n return next_vkGetInstanceProcAddr(instance, pName);\n}\n\nPFN_vkVoidFunction layer_vkGetDeviceProcAddr(VkDevice device, const char* pName) {\n const std::string name(pName);\n auto it = layerFunctions.find(name);\n if (it != layerFunctions.end())\n return it->second;\n\n it = Hooks::hooks.find(name);\n if (it != Hooks::hooks.end() && Config::activeConf.enable)\n return it->second;\n\n return next_vkGetDeviceProcAddr(device, pName);\n}\n\n// original functions\nnamespace Layer {\n VkResult ovkCreateInstance(\n const VkInstanceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkInstance* pInstance) {\n return next_vkCreateInstance(pCreateInfo, pAllocator, pInstance);\n }\n void ovkDestroyInstance(\n VkInstance instance,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroyInstance(instance, pAllocator);\n }\n\n VkResult ovkCreateDevice(\n VkPhysicalDevice physicalDevice,\n const VkDeviceCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkDevice* pDevice) {\n return next_vkCreateDevice(physicalDevice, pCreateInfo, pAllocator, pDevice);\n }\n void ovkDestroyDevice(\n VkDevice device,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroyDevice(device, pAllocator);\n }\n\n VkResult ovkSetDeviceLoaderData(VkDevice device, void* object) {\n return next_vSetDeviceLoaderData(device, object);\n }\n\n PFN_vkVoidFunction ovkGetInstanceProcAddr(\n VkInstance instance,\n const char* pName) {\n return next_vkGetInstanceProcAddr(instance, pName);\n }\n PFN_vkVoidFunction ovkGetDeviceProcAddr(\n VkDevice device,\n const char* pName) {\n return next_vkGetDeviceProcAddr(device, pName);\n }\n\n void ovkGetPhysicalDeviceQueueFamilyProperties(\n VkPhysicalDevice physicalDevice,\n uint32_t* pQueueFamilyPropertyCount,\n VkQueueFamilyProperties* pQueueFamilyProperties) {\n next_vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties);\n }\n void ovkGetPhysicalDeviceMemoryProperties(\n VkPhysicalDevice physicalDevice,\n VkPhysicalDeviceMemoryProperties* pMemoryProperties) {\n next_vkGetPhysicalDeviceMemoryProperties(physicalDevice, pMemoryProperties);\n }\n void ovkGetPhysicalDeviceProperties(\n VkPhysicalDevice physicalDevice,\n VkPhysicalDeviceProperties* pProperties) {\n next_vkGetPhysicalDeviceProperties(physicalDevice, pProperties);\n }\n VkResult ovkGetPhysicalDeviceSurfaceCapabilitiesKHR(\n VkPhysicalDevice physicalDevice,\n VkSurfaceKHR surface,\n VkSurfaceCapabilitiesKHR* pSurfaceCapabilities) {\n return next_vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface, pSurfaceCapabilities);\n }\n\n VkResult ovkCreateSwapchainKHR(\n VkDevice device,\n const VkSwapchainCreateInfoKHR* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkSwapchainKHR* pSwapchain) {\n return next_vkCreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);\n }\n VkResult ovkQueuePresentKHR(\n VkQueue queue,\n const VkPresentInfoKHR* pPresentInfo) {\n return next_vkQueuePresentKHR(queue, pPresentInfo);\n }\n void ovkDestroySwapchainKHR(\n VkDevice device,\n VkSwapchainKHR swapchain,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroySwapchainKHR(device, swapchain, pAllocator);\n }\n\n VkResult ovkGetSwapchainImagesKHR(\n VkDevice device,\n VkSwapchainKHR swapchain,\n uint32_t* pSwapchainImageCount,\n VkImage* pSwapchainImages) {\n return next_vkGetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);\n }\n\n VkResult ovkAllocateCommandBuffers(\n VkDevice device,\n const VkCommandBufferAllocateInfo* pAllocateInfo,\n VkCommandBuffer* pCommandBuffers) {\n return next_vkAllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers);\n }\n void ovkFreeCommandBuffers(\n VkDevice device,\n VkCommandPool commandPool,\n uint32_t commandBufferCount,\n const VkCommandBuffer* pCommandBuffers) {\n next_vkFreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);\n }\n\n VkResult ovkBeginCommandBuffer(\n VkCommandBuffer commandBuffer,\n const VkCommandBufferBeginInfo* pBeginInfo) {\n return next_vkBeginCommandBuffer(commandBuffer, pBeginInfo);\n }\n VkResult ovkEndCommandBuffer(\n VkCommandBuffer commandBuffer) {\n return next_vkEndCommandBuffer(commandBuffer);\n }\n\n VkResult ovkCreateCommandPool(\n VkDevice device,\n const VkCommandPoolCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkCommandPool* pCommandPool) {\n return next_vkCreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);\n }\n void ovkDestroyCommandPool(\n VkDevice device,\n VkCommandPool commandPool,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroyCommandPool(device, commandPool, pAllocator);\n }\n\n VkResult ovkCreateImage(\n VkDevice device,\n const VkImageCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkImage* pImage) {\n return next_vkCreateImage(device, pCreateInfo, pAllocator, pImage);\n }\n void ovkDestroyImage(\n VkDevice device,\n VkImage image,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroyImage(device, image, pAllocator);\n }\n\n void ovkGetImageMemoryRequirements(\n VkDevice device,\n VkImage image,\n VkMemoryRequirements* pMemoryRequirements) {\n next_vkGetImageMemoryRequirements(device, image, pMemoryRequirements);\n }\n VkResult ovkBindImageMemory(\n VkDevice device,\n VkImage image,\n VkDeviceMemory memory,\n VkDeviceSize memoryOffset) {\n return next_vkBindImageMemory(device, image, memory, memoryOffset);\n }\n\n VkResult ovkAllocateMemory(\n VkDevice device,\n const VkMemoryAllocateInfo* pAllocateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkDeviceMemory* pMemory) {\n return next_vkAllocateMemory(device, pAllocateInfo, pAllocator, pMemory);\n }\n void ovkFreeMemory(\n VkDevice device,\n VkDeviceMemory memory,\n const VkAllocationCallbacks* pAllocator) {\n next_vkFreeMemory(device, memory, pAllocator);\n }\n\n VkResult ovkCreateSemaphore(\n VkDevice device,\n const VkSemaphoreCreateInfo* pCreateInfo,\n const VkAllocationCallbacks* pAllocator,\n VkSemaphore* pSemaphore) {\n return next_vkCreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);\n }\n void ovkDestroySemaphore(\n VkDevice device,\n VkSemaphore semaphore,\n const VkAllocationCallbacks* pAllocator) {\n next_vkDestroySemaphore(device, semaphore, pAllocator);\n }\n\n VkResult ovkGetMemoryFdKHR(\n VkDevice device,\n const VkMemoryGetFdInfoKHR* pGetFdInfo,\n int* pFd) {\n return next_vkGetMemoryFdKHR(device, pGetFdInfo, pFd);\n }\n VkResult ovkGetSemaphoreFdKHR(\n VkDevice device,\n const VkSemaphoreGetFdInfoKHR* pGetFdInfo,\n int* pFd) {\n return next_vkGetSemaphoreFdKHR(device, pGetFdInfo, pFd);\n }\n\n void ovkGetDeviceQueue(\n VkDevice device,\n uint32_t queueFamilyIndex,\n uint32_t queueIndex,\n VkQueue* pQueue) {\n next_vkGetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);\n }\n VkResult ovkQueueSubmit(\n VkQueue queue,\n uint32_t submitCount,\n const VkSubmitInfo* pSubmits,\n VkFence fence) {\n return next_vkQueueSubmit(queue, submitCount, pSubmits, fence);\n }\n\n void ovkCmdPipelineBarrier(\n VkCommandBuffer commandBuffer,\n VkPipelineStageFlags srcStageMask,\n VkPipelineStageFlags dstStageMask,\n VkDependencyFlags dependencyFlags,\n uint32_t memoryBarrierCount,\n const VkMemoryBarrier* pMemoryBarriers,\n uint32_t bufferMemoryBarrierCount,\n const VkBufferMemoryBarrier* pBufferMemoryBarriers,\n uint32_t imageMemoryBarrierCount,\n const VkImageMemoryBarrier* pImageMemoryBarriers) {\n next_vkCmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,\n memoryBarrierCount, pMemoryBarriers,\n bufferMemoryBarrierCount, pBufferMemoryBarriers,\n imageMemoryBarrierCount, pImageMemoryBarriers);\n }\n void ovkCmdBlitImage(\n VkCommandBuffer commandBuffer,\n VkImage srcImage,\n VkImageLayout srcImageLayout,\n VkImage dstImage,\n VkImageLayout dstImageLayout,\n uint32_t regionCount,\n const VkImageBlit* pRegions,\n VkFilter filter) {\n next_vkCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter);\n }\n\n VkResult ovkAcquireNextImageKHR(\n VkDevice device,\n VkSwapchainKHR swapchain,\n uint64_t timeout,\n VkSemaphore semaphore,\n VkFence fence,\n uint32_t* pImageIndex) {\n return next_vkAcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);\n }\n}\n"], ["/lsfg-vk/src/config/config.cpp", "#include \"config/config.hpp\"\n#include \"common/exception.hpp\"\n\n#include \"config/default_conf.hpp\"\n\n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace Config;\n\nnamespace {\n Configuration globalConf{};\n std::optional> gameConfs;\n}\n\nConfiguration Config::activeConf{};\n\nnamespace {\n /// Turn a string into a VkPresentModeKHR enum value.\n VkPresentModeKHR into_present(const std::string& mode) {\n if (mode == \"fifo\" || mode == \"vsync\")\n return VkPresentModeKHR::VK_PRESENT_MODE_FIFO_KHR;\n if (mode == \"mailbox\")\n return VkPresentModeKHR::VK_PRESENT_MODE_MAILBOX_KHR;\n if (mode == \"immediate\")\n return VkPresentModeKHR::VK_PRESENT_MODE_IMMEDIATE_KHR;\n return VkPresentModeKHR::VK_PRESENT_MODE_FIFO_KHR;\n }\n}\n\nvoid Config::updateConfig(const std::string& file) {\n if (!std::filesystem::exists(file)) {\n std::cerr << \"lsfg-vk: Placing default configuration file at \" << file << '\\n';\n const auto parent = std::filesystem::path(file).parent_path();\n if (!std::filesystem::exists(parent))\n if (!std::filesystem::create_directories(parent))\n throw std::runtime_error(\"Unable to create configuration directory at \" + parent.string());\n\n std::ofstream out(file);\n if (!out.is_open())\n throw std::runtime_error(\"Unable to create configuration file at \" + file);\n out << DEFAULT_CONFIG;\n out.close();\n }\n\n // parse config file\n std::optional parsed;\n try {\n parsed.emplace(toml::parse(file));\n if (!parsed->contains(\"version\"))\n throw std::runtime_error(\"Configuration file is missing 'version' field\");\n if (parsed->at(\"version\").as_integer() != 1)\n throw std::runtime_error(\"Configuration file version is not supported, expected 1\");\n } catch (const std::exception& e) {\n throw LSFG::rethrowable_error(\"Unable to parse configuration file\", e);\n }\n auto& toml = *parsed;\n\n // parse global configuration\n const toml::value globalTable = toml::find_or_default(toml, \"global\");\n const Configuration global{\n .dll = toml::find_or(globalTable, \"dll\", std::string()),\n .config_file = file,\n .timestamp = std::filesystem::last_write_time(file)\n };\n\n // validate global configuration\n if (global.multiplier < 2)\n throw std::runtime_error(\"Global Multiplier cannot be less than 2\");\n if (global.flowScale < 0.25F || global.flowScale > 1.0F)\n throw std::runtime_error(\"Flow scale must be between 0.25 and 1.0\");\n\n // parse game-specific configuration\n std::unordered_map games;\n const toml::value gamesList = toml::find_or_default(toml, \"game\");\n for (const auto& gameTable : gamesList.as_array()) {\n if (!gameTable.is_table())\n throw std::runtime_error(\"Invalid game configuration entry\");\n if (!gameTable.contains(\"exe\"))\n throw std::runtime_error(\"Game override missing 'exe' field\");\n\n const std::string exe = toml::find(gameTable, \"exe\");\n Configuration game{\n .enable = true,\n .dll = global.dll,\n .multiplier = toml::find_or(gameTable, \"multiplier\", 2U),\n .flowScale = toml::find_or(gameTable, \"flow_scale\", 1.0F),\n .performance = toml::find_or(gameTable, \"performance_mode\", false),\n .hdr = toml::find_or(gameTable, \"hdr_mode\", false),\n .e_present = into_present(toml::find_or(gameTable, \"experimental_present_mode\", \"\")),\n .config_file = file,\n .timestamp = global.timestamp\n };\n\n // validate the configuration\n if (game.multiplier < 1)\n throw std::runtime_error(\"Multiplier cannot be less than 1\");\n if (game.flowScale < 0.25F || game.flowScale > 1.0F)\n throw std::runtime_error(\"Flow scale must be between 0.25 and 1.0\");\n games[exe] = std::move(game);\n }\n\n // store configurations\n globalConf = global;\n gameConfs = std::move(games);\n}\n\nConfiguration Config::getConfig(const std::pair& name) {\n // process legacy environment variables\n if (std::getenv(\"LSFG_LEGACY\")) {\n Configuration conf{\n .enable = true,\n .multiplier = 2,\n .flowScale = 1.0F,\n .e_present = VkPresentModeKHR::VK_PRESENT_MODE_FIFO_KHR\n };\n\n const char* dll = std::getenv(\"LSFG_DLL_PATH\");\n if (dll) conf.dll = std::string(dll);\n const char* multiplier = std::getenv(\"LSFG_MULTIPLIER\");\n if (multiplier) conf.multiplier = std::stoul(multiplier);\n const char* flow_scale = std::getenv(\"LSFG_FLOW_SCALE\");\n if (flow_scale) conf.flowScale = std::stof(flow_scale);\n const char* performance = std::getenv(\"LSFG_PERFORMANCE_MODE\");\n if (performance) conf.performance = std::string(performance) == \"1\";\n const char* hdr = std::getenv(\"LSFG_HDR_MODE\");\n if (hdr) conf.hdr = std::string(hdr) == \"1\";\n const char* e_present = std::getenv(\"LSFG_EXPERIMENTAL_PRESENT_MODE\");\n if (e_present) conf.e_present = into_present(std::string(e_present));\n\n return conf;\n }\n\n // process new configuration system\n if (!gameConfs.has_value())\n return globalConf;\n\n const auto& games = *gameConfs;\n auto it = std::ranges::find_if(games, [&name](const auto& pair) {\n return name.first.ends_with(pair.first) || (name.second == pair.first);\n });\n if (it != games.end())\n return it->second;\n\n return globalConf;\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_compiler.h", "class DxbcCompilerHsPhase {\n public:\n DxbcCompiler(\n const std::string& fileName,\n const DxbcModuleInfo& moduleInfo,\n const DxbcProgramInfo& programInfo,\n const Rc& isgn,\n const Rc& osgn,\n const Rc& psgn,\n const DxbcAnalysisInfo& analysis) {\n // Declare an entry point ID. We'll need it during the\n // initialization phase where the execution mode is set.\n m_entryPointId = m_module.allocateId();\n \n // Set the shader name so that we recognize it in renderdoc\n m_module.setDebugSource(\n spv::SourceLanguageUnknown, 0,\n m_module.addDebugString(fileName.c_str()),\n nullptr);\n\n // Set the memory model. This is the same for all shaders.\n m_module.enableCapability(\n spv::CapabilityVulkanMemoryModel);\n\n m_module.setMemoryModel(\n spv::AddressingModelLogical,\n spv::MemoryModelVulkan);\n \n // Make sure our interface registers are clear\n for (uint32_t i = 0; i < DxbcMaxInterfaceRegs; i++) {\n m_vRegs.at(i) = DxbcRegisterPointer { };\n m_oRegs.at(i) = DxbcRegisterPointer { };\n }\n \n this->emitInit();\n }\n ~DxbcCompiler() {\n \n }\n void processInstruction(\n const DxbcShaderInstruction& ins) {\n m_lastOp = m_currOp;\n m_currOp = ins.op;\n\n switch (ins.opClass) {\n case DxbcInstClass::Declaration:\n return this->emitDcl(ins);\n \n case DxbcInstClass::CustomData:\n return this->emitCustomData(ins);\n \n case DxbcInstClass::Atomic:\n return this->emitAtomic(ins);\n \n case DxbcInstClass::AtomicCounter:\n return this->emitAtomicCounter(ins);\n \n case DxbcInstClass::Barrier:\n return this->emitBarrier(ins);\n \n case DxbcInstClass::BitExtract:\n return this->emitBitExtract(ins);\n \n case DxbcInstClass::BitInsert:\n return this->emitBitInsert(ins);\n \n case DxbcInstClass::BitScan:\n return this->emitBitScan(ins);\n \n case DxbcInstClass::BufferQuery:\n return this->emitBufferQuery(ins);\n \n case DxbcInstClass::BufferLoad:\n return this->emitBufferLoad(ins);\n \n case DxbcInstClass::BufferStore:\n return this->emitBufferStore(ins);\n \n case DxbcInstClass::ConvertFloat16:\n return this->emitConvertFloat16(ins);\n \n case DxbcInstClass::ConvertFloat64:\n return this->emitConvertFloat64(ins);\n \n case DxbcInstClass::ControlFlow:\n return this->emitControlFlow(ins);\n \n case DxbcInstClass::GeometryEmit:\n return this->emitGeometryEmit(ins);\n \n case DxbcInstClass::HullShaderPhase:\n return this->emitHullShaderPhase(ins);\n \n case DxbcInstClass::HullShaderInstCnt:\n return this->emitHullShaderInstCnt(ins);\n \n case DxbcInstClass::Interpolate:\n return this->emitInterpolate(ins);\n \n case DxbcInstClass::NoOperation:\n return;\n\n case DxbcInstClass::SparseCheckAccess:\n return this->emitSparseCheckAccess(ins);\n\n case DxbcInstClass::TextureQuery:\n return this->emitTextureQuery(ins);\n \n case DxbcInstClass::TextureQueryLod:\n return this->emitTextureQueryLod(ins);\n \n case DxbcInstClass::TextureQueryMs:\n return this->emitTextureQueryMs(ins);\n \n case DxbcInstClass::TextureQueryMsPos:\n return this->emitTextureQueryMsPos(ins);\n \n case DxbcInstClass::TextureFetch:\n return this->emitTextureFetch(ins);\n \n case DxbcInstClass::TextureGather:\n return this->emitTextureGather(ins);\n \n case DxbcInstClass::TextureSample:\n return this->emitTextureSample(ins);\n \n case DxbcInstClass::TypedUavLoad:\n return this->emitTypedUavLoad(ins);\n \n case DxbcInstClass::TypedUavStore:\n return this->emitTypedUavStore(ins);\n \n case DxbcInstClass::VectorAlu:\n return this->emitVectorAlu(ins);\n \n case DxbcInstClass::VectorCmov:\n return this->emitVectorCmov(ins);\n \n case DxbcInstClass::VectorCmp:\n return this->emitVectorCmp(ins);\n \n case DxbcInstClass::VectorDeriv:\n return this->emitVectorDeriv(ins);\n \n case DxbcInstClass::VectorDot:\n return this->emitVectorDot(ins);\n \n case DxbcInstClass::VectorIdiv:\n return this->emitVectorIdiv(ins);\n \n case DxbcInstClass::VectorImul:\n return this->emitVectorImul(ins);\n \n case DxbcInstClass::VectorMsad:\n return this->emitVectorMsad(ins);\n \n case DxbcInstClass::VectorShift:\n return this->emitVectorShift(ins);\n \n case DxbcInstClass::VectorSinCos:\n return this->emitVectorSinCos(ins);\n \n default:\n Logger::warn(\n str::format(\"DxbcCompiler: Unhandled opcode class: \",\n ins.op));\n }\n }\n void processXfbPassthrough() {\n m_module.setExecutionMode (m_entryPointId, spv::ExecutionModeInputPoints);\n m_module.setExecutionMode (m_entryPointId, spv::ExecutionModeOutputPoints);\n m_module.setOutputVertices(m_entryPointId, 1);\n\n for (auto e = m_isgn->begin(); e != m_isgn->end(); e++) {\n emitDclInput(e->registerId, 1,\n e->componentMask, DxbcSystemValue::None,\n DxbcInterpolationMode::Undefined);\n }\n\n // Figure out which streams to enable\n uint32_t streamMask = 0;\n\n for (size_t i = 0; i < m_xfbVars.size(); i++)\n streamMask |= 1u << m_xfbVars[i].streamId;\n \n for (uint32_t streamId : bit::BitMask(streamMask)) {\n emitXfbOutputSetup(streamId, true);\n m_module.opEmitVertex(m_module.constu32(streamId));\n }\n\n // End the main function\n emitFunctionEnd();\n\n // For pass-through we always assume points\n m_inputTopology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST;\n }\n SpirvCodeBuffer finalize() {\n // Depending on the shader type, this will prepare\n // input registers, call various shader functions\n // and write back the output registers.\n switch (m_programInfo.type()) {\n case DxbcProgramType::VertexShader: this->emitVsFinalize(); break;\n case DxbcProgramType::HullShader: this->emitHsFinalize(); break;\n case DxbcProgramType::DomainShader: this->emitDsFinalize(); break;\n case DxbcProgramType::GeometryShader: this->emitGsFinalize(); break;\n case DxbcProgramType::PixelShader: this->emitPsFinalize(); break;\n case DxbcProgramType::ComputeShader: this->emitCsFinalize(); break;\n default: throw DxvkError(\"Invalid shader stage\");\n }\n\n // Emit float control mode if the extension is supported\n this->emitFloatControl();\n \n // Declare the entry point, we now have all the\n // information we need, including the interfaces\n m_module.addEntryPoint(m_entryPointId,\n m_programInfo.executionModel(), \"main\");\n m_module.setDebugName(m_entryPointId, \"main\");\n\n return m_module.compile();\n }\n private:\n DxbcModuleInfo m_moduleInfo;\n DxbcProgramInfo m_programInfo;\n SpirvModule m_module;\n Rc m_isgn;\n Rc m_osgn;\n Rc m_psgn;\n const DxbcAnalysisInfo* m_analysis;\n std::vector m_bindings;\n std::vector m_rRegs;\n std::vector m_xRegs;\n std::vector m_gRegs;\n std::array<\n DxbcRegisterPointer,\n DxbcMaxInterfaceRegs> m_vRegs;\n std::vector m_vMappings;\n std::array<\n DxbcRegisterPointer,\n DxbcMaxInterfaceRegs> m_oRegs;\n std::vector m_oMappings;\n std::vector m_xfbVars;\n std::vector m_indexRanges = { };\n std::array m_constantBuffers;\n std::array m_samplers;\n std::array m_textures;\n std::array m_uavs;\n bool m_hasGloballyCoherentUav = false;\n bool m_hasRasterizerOrderedUav = false;\n std::vector m_controlFlowBlocks;\n bool m_topLevelIsUniform = true;\n uint64_t m_uavRdMask = 0u;\n uint64_t m_uavWrMask = 0u;\n bool m_insideFunction = false;\n uint32_t m_vArrayLength = 0;\n uint32_t m_vArrayLengthId = 0;\n uint32_t m_vArray = 0;\n uint32_t m_positionIn = 0;\n uint32_t m_positionOut = 0;\n uint32_t m_clipDistances = 0;\n uint32_t m_cullDistances = 0;\n uint32_t m_primitiveIdIn = 0;\n uint32_t m_primitiveIdOut = 0;\n uint32_t m_icbArray = 0;\n std::vector m_icbData;\n uint32_t m_icbComponents = 0u;\n uint32_t m_icbSize = 0u;\n uint32_t m_samplePositions = 0;\n uint32_t m_uavCtrStructType = 0;\n uint32_t m_uavCtrPointerType = 0;\n std::unordered_map m_subroutines;\n uint32_t m_entryPointId = 0;\n bool m_hasRawAccessChains = false;\n uint32_t m_inputMask = 0u;\n uint32_t m_outputMask = 0u;\n DxbcCompilerVsPart m_vs;\n DxbcCompilerHsPart m_hs;\n DxbcCompilerDsPart m_ds;\n DxbcCompilerGsPart m_gs;\n DxbcCompilerPsPart m_ps;\n DxbcCompilerCsPart m_cs;\n bool m_precise = true;\n DxbcOpcode m_lastOp = DxbcOpcode::Nop;\n DxbcOpcode m_currOp = DxbcOpcode::Nop;\n VkPrimitiveTopology m_inputTopology = VK_PRIMITIVE_TOPOLOGY_MAX_ENUM;\n VkPrimitiveTopology m_outputTopology = VK_PRIMITIVE_TOPOLOGY_MAX_ENUM;\n void emitDcl(\n const DxbcShaderInstruction& ins) {\n switch (ins.op) {\n case DxbcOpcode::DclGlobalFlags:\n return this->emitDclGlobalFlags(ins);\n \n case DxbcOpcode::DclIndexRange:\n return this->emitDclIndexRange(ins);\n \n case DxbcOpcode::DclTemps:\n return this->emitDclTemps(ins);\n \n case DxbcOpcode::DclIndexableTemp:\n return this->emitDclIndexableTemp(ins);\n \n case DxbcOpcode::DclInput:\n case DxbcOpcode::DclInputSgv:\n case DxbcOpcode::DclInputSiv:\n case DxbcOpcode::DclInputPs:\n case DxbcOpcode::DclInputPsSgv:\n case DxbcOpcode::DclInputPsSiv:\n case DxbcOpcode::DclOutput:\n case DxbcOpcode::DclOutputSgv:\n case DxbcOpcode::DclOutputSiv:\n return this->emitDclInterfaceReg(ins);\n \n case DxbcOpcode::DclConstantBuffer:\n return this->emitDclConstantBuffer(ins);\n \n case DxbcOpcode::DclSampler:\n return this->emitDclSampler(ins);\n \n case DxbcOpcode::DclStream:\n return this->emitDclStream(ins);\n \n case DxbcOpcode::DclUavTyped:\n case DxbcOpcode::DclResource:\n return this->emitDclResourceTyped(ins);\n \n case DxbcOpcode::DclUavRaw:\n case DxbcOpcode::DclResourceRaw:\n case DxbcOpcode::DclUavStructured:\n case DxbcOpcode::DclResourceStructured:\n return this->emitDclResourceRawStructured(ins);\n \n case DxbcOpcode::DclThreadGroupSharedMemoryRaw:\n case DxbcOpcode::DclThreadGroupSharedMemoryStructured:\n return this->emitDclThreadGroupSharedMemory(ins);\n \n case DxbcOpcode::DclGsInputPrimitive:\n return this->emitDclGsInputPrimitive(ins);\n \n case DxbcOpcode::DclGsOutputPrimitiveTopology:\n return this->emitDclGsOutputTopology(ins);\n \n case DxbcOpcode::DclMaxOutputVertexCount:\n return this->emitDclMaxOutputVertexCount(ins);\n \n case DxbcOpcode::DclInputControlPointCount:\n return this->emitDclInputControlPointCount(ins);\n \n case DxbcOpcode::DclOutputControlPointCount:\n return this->emitDclOutputControlPointCount(ins);\n \n case DxbcOpcode::DclHsMaxTessFactor:\n return this->emitDclHsMaxTessFactor(ins);\n \n case DxbcOpcode::DclTessDomain:\n return this->emitDclTessDomain(ins);\n \n case DxbcOpcode::DclTessPartitioning:\n return this->emitDclTessPartitioning(ins);\n \n case DxbcOpcode::DclTessOutputPrimitive:\n return this->emitDclTessOutputPrimitive(ins);\n \n case DxbcOpcode::DclThreadGroup:\n return this->emitDclThreadGroup(ins);\n \n case DxbcOpcode::DclGsInstanceCount:\n return this->emitDclGsInstanceCount(ins);\n \n default:\n Logger::warn(\n str::format(\"DxbcCompiler: Unhandled opcode: \",\n ins.op));\n }\n }\n void emitDclGlobalFlags(\n const DxbcShaderInstruction& ins) {\n const DxbcGlobalFlags flags = ins.controls.globalFlags();\n \n if (flags.test(DxbcGlobalFlag::RefactoringAllowed))\n m_precise = false;\n\n if (flags.test(DxbcGlobalFlag::EarlyFragmentTests))\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeEarlyFragmentTests);\n }\n void emitDclIndexRange(\n const DxbcShaderInstruction& ins) {\n // dcl_index_range has one operand:\n // (0) Range start, either an input or output register\n // (1) Range end\n uint32_t index = ins.dst[0].idxDim - 1u;\n\n DxbcIndexRange range = { };\n range.type = ins.dst[0].type;\n range.start = ins.dst[0].idx[index].offset;\n range.length = ins.imm[0].u32;\n\n m_indexRanges.push_back(range);\n }\n void emitDclTemps(\n const DxbcShaderInstruction& ins) {\n // dcl_temps has one operand:\n // (imm0) Number of temp registers\n\n // Ignore this and declare temps on demand.\n }\n void emitDclIndexableTemp(\n const DxbcShaderInstruction& ins) {\n // dcl_indexable_temps has three operands:\n // (imm0) Array register index (x#)\n // (imm1) Number of vectors stored in the array\n // (imm2) Component count of each individual vector. This is\n // always 4 in fxc-generated binaries and therefore useless.\n const uint32_t regId = ins.imm[0].u32;\n\n DxbcRegisterInfo info;\n info.type.ctype = DxbcScalarType::Float32;\n info.type.ccount = m_analysis->xRegMasks.at(regId).minComponents();\n info.type.alength = ins.imm[1].u32;\n info.sclass = spv::StorageClassPrivate;\n\n if (regId >= m_xRegs.size())\n m_xRegs.resize(regId + 1);\n \n m_xRegs.at(regId).ccount = info.type.ccount;\n m_xRegs.at(regId).alength = info.type.alength;\n m_xRegs.at(regId).varId = emitNewVariable(info);\n \n m_module.setDebugName(m_xRegs.at(regId).varId,\n str::format(\"x\", regId).c_str());\n }\n void emitDclInterfaceReg(\n const DxbcShaderInstruction& ins) {\n switch (ins.dst[0].type) {\n case DxbcOperandType::InputControlPoint:\n if (m_programInfo.type() != DxbcProgramType::HullShader)\n break;\n [[fallthrough]];\n\n case DxbcOperandType::Input:\n case DxbcOperandType::Output: {\n // dcl_input and dcl_output instructions\n // have the following operands:\n // (dst0) The register to declare\n // (imm0) The system value (optional)\n uint32_t regDim = 0;\n uint32_t regIdx = 0;\n \n // In the vertex and fragment shader stage, the\n // operand indices will have the following format:\n // (0) Register index\n // \n // In other stages, the input and output registers\n // may be declared as arrays of a fixed size:\n // (0) Array length\n // (1) Register index\n if (ins.dst[0].idxDim == 2) {\n regDim = ins.dst[0].idx[0].offset;\n regIdx = ins.dst[0].idx[1].offset;\n } else if (ins.dst[0].idxDim == 1) {\n regIdx = ins.dst[0].idx[0].offset;\n } else {\n Logger::err(str::format(\n \"DxbcCompiler: \", ins.op,\n \": Invalid index dimension\"));\n return;\n }\n \n // This declaration may map an output register to a system\n // value. If that is the case, the system value type will\n // be stored in the second operand.\n const bool hasSv =\n ins.op == DxbcOpcode::DclInputSgv\n || ins.op == DxbcOpcode::DclInputSiv\n || ins.op == DxbcOpcode::DclInputPsSgv\n || ins.op == DxbcOpcode::DclInputPsSiv\n || ins.op == DxbcOpcode::DclOutputSgv\n || ins.op == DxbcOpcode::DclOutputSiv;\n \n DxbcSystemValue sv = DxbcSystemValue::None;\n \n if (hasSv)\n sv = static_cast(ins.imm[0].u32);\n \n // In the pixel shader, inputs are declared with an\n // interpolation mode that is part of the op token.\n const bool hasInterpolationMode =\n ins.op == DxbcOpcode::DclInputPs\n || ins.op == DxbcOpcode::DclInputPsSiv;\n \n DxbcInterpolationMode im = DxbcInterpolationMode::Undefined;\n \n if (hasInterpolationMode)\n im = ins.controls.interpolation();\n \n // Declare the actual input/output variable\n switch (ins.op) {\n case DxbcOpcode::DclInput:\n case DxbcOpcode::DclInputSgv:\n case DxbcOpcode::DclInputSiv:\n case DxbcOpcode::DclInputPs:\n case DxbcOpcode::DclInputPsSgv:\n case DxbcOpcode::DclInputPsSiv:\n this->emitDclInput(regIdx, regDim, ins.dst[0].mask, sv, im);\n break;\n \n case DxbcOpcode::DclOutput:\n case DxbcOpcode::DclOutputSgv:\n case DxbcOpcode::DclOutputSiv:\n this->emitDclOutput(regIdx, regDim, ins.dst[0].mask, sv, im);\n break;\n \n default:\n Logger::err(str::format(\n \"DxbcCompiler: Unexpected opcode: \",\n ins.op));\n }\n } break;\n \n case DxbcOperandType::InputThreadId: {\n m_cs.builtinGlobalInvocationId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 3, 0 },\n spv::StorageClassInput },\n spv::BuiltInGlobalInvocationId,\n \"vThreadId\");\n } break;\n \n case DxbcOperandType::InputThreadGroupId: {\n m_cs.builtinWorkgroupId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 3, 0 },\n spv::StorageClassInput },\n spv::BuiltInWorkgroupId,\n \"vThreadGroupId\");\n } break;\n \n case DxbcOperandType::InputThreadIdInGroup: {\n m_cs.builtinLocalInvocationId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 3, 0 },\n spv::StorageClassInput },\n spv::BuiltInLocalInvocationId,\n \"vThreadIdInGroup\");\n } break;\n \n case DxbcOperandType::InputThreadIndexInGroup: {\n m_cs.builtinLocalInvocationIndex = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInLocalInvocationIndex,\n \"vThreadIndexInGroup\");\n } break;\n \n case DxbcOperandType::InputCoverageMask: {\n m_ps.builtinSampleMaskIn = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 1 },\n spv::StorageClassInput },\n spv::BuiltInSampleMask,\n \"vCoverage\");\n } break;\n \n case DxbcOperandType::OutputCoverageMask: {\n m_ps.builtinSampleMaskOut = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 1 },\n spv::StorageClassOutput },\n spv::BuiltInSampleMask,\n \"oMask\");\n } break;\n \n case DxbcOperandType::OutputDepth: {\n m_module.setExecutionMode(m_entryPointId,\n spv::ExecutionModeDepthReplacing);\n m_ps.builtinDepth = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInFragDepth,\n \"oDepth\");\n } break;\n \n case DxbcOperandType::OutputStencilRef: {\n m_module.enableExtension(\"SPV_EXT_shader_stencil_export\");\n m_module.enableCapability(spv::CapabilityStencilExportEXT);\n m_module.setExecutionMode(m_entryPointId,\n spv::ExecutionModeStencilRefReplacingEXT);\n m_ps.builtinStencilRef = emitNewBuiltinVariable({\n { DxbcScalarType::Sint32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInFragStencilRefEXT,\n \"oStencilRef\");\n } break;\n\n case DxbcOperandType::OutputDepthGe: {\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDepthReplacing);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDepthGreater);\n m_ps.builtinDepth = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInFragDepth,\n \"oDepthGe\");\n } break;\n \n case DxbcOperandType::OutputDepthLe: {\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDepthReplacing);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDepthLess);\n m_ps.builtinDepth = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInFragDepth,\n \"oDepthLe\");\n } break;\n \n case DxbcOperandType::InputPrimitiveId: {\n m_primitiveIdIn = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInPrimitiveId,\n \"vPrim\");\n } break;\n \n case DxbcOperandType::InputDomainPoint: {\n m_ds.builtinTessCoord = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 3, 0 },\n spv::StorageClassInput },\n spv::BuiltInTessCoord,\n \"vDomain\");\n } break;\n \n case DxbcOperandType::InputForkInstanceId:\n case DxbcOperandType::InputJoinInstanceId: {\n auto phase = this->getCurrentHsForkJoinPhase();\n \n phase->instanceIdPtr = m_module.newVar(\n m_module.defPointerType(\n m_module.defIntType(32, 0),\n spv::StorageClassFunction),\n spv::StorageClassFunction);\n \n m_module.opStore(phase->instanceIdPtr, phase->instanceId);\n m_module.setDebugName(phase->instanceIdPtr,\n ins.dst[0].type == DxbcOperandType::InputForkInstanceId\n ? \"vForkInstanceId\" : \"vJoinInstanceId\");\n } break;\n \n case DxbcOperandType::OutputControlPointId: {\n // This system value map to the invocation\n // ID, which has been declared already.\n } break;\n \n case DxbcOperandType::InputPatchConstant:\n case DxbcOperandType::OutputControlPoint: {\n // These have been declared as global input and\n // output arrays, so there's nothing left to do.\n } break;\n \n case DxbcOperandType::InputGsInstanceId: {\n m_gs.builtinInvocationId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInInvocationId,\n \"vInstanceID\");\n } break;\n \n case DxbcOperandType::InputInnerCoverage: {\n m_module.enableExtension(\"SPV_EXT_fragment_fully_covered\");\n m_module.enableCapability(spv::CapabilityFragmentFullyCoveredEXT);\n\n // This is bool in SPIR-V but uint32 in DXBC. A bool value of\n // false must be 0, and bit 1 must be set to represent true.\n uint32_t builtinId = emitNewBuiltinVariable({\n { DxbcScalarType::Bool, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInFullyCoveredEXT,\n nullptr);\n\n m_ps.builtinInnerCoverageId = emitNewVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassPrivate });\n\n m_module.setDebugName(m_ps.builtinInnerCoverageId, \"vInnerCoverage\");\n\n uint32_t boolTypeId = m_module.defBoolType();\n uint32_t uintTypeId = m_module.defIntType(32, 0);\n\n m_module.opStore(m_ps.builtinInnerCoverageId,\n m_module.opSelect(uintTypeId,\n m_module.opLoad(boolTypeId, builtinId),\n m_module.constu32(1),\n m_module.constu32(0)));\n } break;\n\n default:\n Logger::err(str::format(\n \"DxbcCompiler: Unsupported operand type declaration: \",\n ins.dst[0].type));\n \n }\n }\n void emitDclInput(\n uint32_t regIdx,\n uint32_t regDim,\n DxbcRegMask regMask,\n DxbcSystemValue sv,\n DxbcInterpolationMode im) {\n // Avoid declaring the same variable multiple times.\n // This may happen when multiple system values are\n // mapped to different parts of the same register.\n if (m_vRegs.at(regIdx).id == 0 && sv == DxbcSystemValue::None) {\n const DxbcVectorType regType = getInputRegType(regIdx);\n \n DxbcRegisterInfo info;\n info.type.ctype = regType.ctype;\n info.type.ccount = regType.ccount;\n info.type.alength = regDim;\n info.sclass = spv::StorageClassInput;\n \n const uint32_t varId = emitNewVariable(info);\n \n m_module.decorateLocation(varId, regIdx);\n m_module.setDebugName(varId, str::format(\"v\", regIdx).c_str());\n \n m_vRegs.at(regIdx) = { regType, varId };\n \n // Interpolation mode, used in pixel shaders\n if (im == DxbcInterpolationMode::Constant)\n m_module.decorate(varId, spv::DecorationFlat);\n \n if (im == DxbcInterpolationMode::LinearCentroid\n || im == DxbcInterpolationMode::LinearNoPerspectiveCentroid)\n m_module.decorate(varId, spv::DecorationCentroid);\n \n if (im == DxbcInterpolationMode::LinearNoPerspective\n || im == DxbcInterpolationMode::LinearNoPerspectiveCentroid\n || im == DxbcInterpolationMode::LinearNoPerspectiveSample)\n m_module.decorate(varId, spv::DecorationNoPerspective);\n \n if (im == DxbcInterpolationMode::LinearSample\n || im == DxbcInterpolationMode::LinearNoPerspectiveSample) {\n m_module.enableCapability(spv::CapabilitySampleRateShading);\n m_module.decorate(varId, spv::DecorationSample);\n }\n\n if (m_moduleInfo.options.forceSampleRateShading) {\n if (im == DxbcInterpolationMode::Linear\n || im == DxbcInterpolationMode::LinearNoPerspective) {\n m_module.enableCapability(spv::CapabilitySampleRateShading);\n m_module.decorate(varId, spv::DecorationSample);\n }\n }\n\n // Declare the input slot as defined\n m_inputMask |= 1u << regIdx;\n m_vArrayLength = std::max(m_vArrayLength, regIdx + 1);\n } else if (sv != DxbcSystemValue::None) {\n // Add a new system value mapping if needed\n bool skipSv = sv == DxbcSystemValue::ClipDistance\n || sv == DxbcSystemValue::CullDistance;\n \n if (!skipSv)\n m_vMappings.push_back({ regIdx, regMask, sv });\n }\n }\n void emitDclOutput(\n uint32_t regIdx,\n uint32_t regDim,\n DxbcRegMask regMask,\n DxbcSystemValue sv,\n DxbcInterpolationMode im) {\n // Add a new system value mapping if needed. Clip\n // and cull distances are handled separately.\n if (sv != DxbcSystemValue::None\n && sv != DxbcSystemValue::ClipDistance\n && sv != DxbcSystemValue::CullDistance)\n m_oMappings.push_back({ regIdx, regMask, sv });\n \n if (m_programInfo.type() == DxbcProgramType::HullShader) {\n // Hull shaders don't use standard outputs\n if (getCurrentHsForkJoinPhase() != nullptr)\n m_hs.outputPerPatchMask |= 1 << regIdx;\n } else if (m_oRegs.at(regIdx).id == 0) {\n // Avoid declaring the same variable multiple times.\n // This may happen when multiple system values are\n // mapped to different parts of the same register.\n const DxbcVectorType regType = getOutputRegType(regIdx);\n \n DxbcRegisterInfo info;\n info.type.ctype = regType.ctype;\n info.type.ccount = regType.ccount;\n info.type.alength = regDim;\n info.sclass = spv::StorageClassOutput;\n\n // In xfb mode, we set up the actual\n // output vars when emitting a vertex\n if (m_moduleInfo.xfb != nullptr)\n info.sclass = spv::StorageClassPrivate;\n \n // In geometry shaders, don't duplicate system value outputs\n // to stay within device limits. The pixel shader will read\n // all GS system value outputs as system value inputs.\n if (m_programInfo.type() == DxbcProgramType::GeometryShader && sv != DxbcSystemValue::None)\n info.sclass = spv::StorageClassPrivate;\n\n const uint32_t varId = this->emitNewVariable(info);\n m_module.setDebugName(varId, str::format(\"o\", regIdx).c_str());\n \n if (info.sclass == spv::StorageClassOutput) {\n m_module.decorateLocation(varId, regIdx);\n\n // Add index decoration for potential dual-source blending\n if (m_programInfo.type() == DxbcProgramType::PixelShader)\n m_module.decorateIndex(varId, 0);\n\n // Declare vertex positions in all stages as invariant, even if\n // this is not the last stage, to help with potential Z fighting.\n if (sv == DxbcSystemValue::Position && m_moduleInfo.options.invariantPosition)\n m_module.decorate(varId, spv::DecorationInvariant);\n }\n \n m_oRegs.at(regIdx) = { regType, varId };\n \n // Declare the output slot as defined\n m_outputMask |= 1u << regIdx;\n }\n }\n void emitDclConstantBuffer(\n const DxbcShaderInstruction& ins) {\n // dcl_constant_buffer has one operand with two indices:\n // (0) Constant buffer register ID (cb#)\n // (1) Number of constants in the buffer\n uint32_t bufferId = ins.dst[0].idx[0].offset;\n uint32_t elementCount = ins.dst[0].idx[1].offset;\n\n // With dynamic indexing, games will often index constant buffers\n // out of bounds. Declare an upper bound to stay within spec.\n if (ins.controls.accessType() == DxbcConstantBufferAccessType::DynamicallyIndexed)\n elementCount = 4096;\n\n this->emitDclConstantBufferVar(bufferId, elementCount, 4u,\n str::format(\"cb\", bufferId).c_str());\n }\n void emitDclConstantBufferVar(\n uint32_t regIdx,\n uint32_t numConstants,\n uint32_t numComponents,\n const char* name) {\n // Uniform buffer data is stored as a fixed-size array\n // of 4x32-bit vectors. SPIR-V requires explicit strides.\n const uint32_t arrayType = m_module.defArrayTypeUnique(\n getVectorTypeId({ DxbcScalarType::Float32, numComponents }),\n m_module.constu32(numConstants));\n m_module.decorateArrayStride(arrayType, sizeof(uint32_t) * numComponents);\n \n // SPIR-V requires us to put that array into a\n // struct and decorate that struct as a block.\n const uint32_t structType = m_module.defStructTypeUnique(1, &arrayType);\n \n m_module.decorate(structType, spv::DecorationBlock);\n m_module.memberDecorateOffset(structType, 0, 0);\n \n m_module.setDebugName (structType, str::format(name, \"_t\").c_str());\n m_module.setDebugMemberName (structType, 0, \"m\");\n \n // Variable that we'll use to access the buffer\n const uint32_t varId = m_module.newVar(\n m_module.defPointerType(structType, spv::StorageClassUniform),\n spv::StorageClassUniform);\n \n m_module.setDebugName(varId, name);\n \n // Compute the DXVK binding slot index for the buffer.\n // D3D11 needs to bind the actual buffers to this slot.\n uint32_t bindingId = computeConstantBufferBinding(\n m_programInfo.type(), regIdx);\n \n m_module.decorateDescriptorSet(varId, 0);\n m_module.decorateBinding(varId, bindingId);\n\n DxbcConstantBuffer buf;\n buf.varId = varId;\n buf.size = numConstants;\n m_constantBuffers.at(regIdx) = buf;\n \n // Store descriptor info for the shader interface\n DxvkBindingInfo binding = { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER };\n binding.viewType = VK_IMAGE_VIEW_TYPE_MAX_ENUM;\n binding.access = VK_ACCESS_UNIFORM_READ_BIT;\n binding.resourceBinding = bindingId;\n binding.uboSet = true;\n m_bindings.push_back(binding);\n }\n void emitDclSampler(\n const DxbcShaderInstruction& ins) {\n // dclSampler takes one operand:\n // (dst0) The sampler register to declare\n const uint32_t samplerId = ins.dst[0].idx[0].offset;\n \n // The sampler type is opaque, but we still have to\n // define a pointer and a variable in oder to use it\n const uint32_t samplerType = m_module.defSamplerType();\n const uint32_t samplerPtrType = m_module.defPointerType(\n samplerType, spv::StorageClassUniformConstant);\n \n // Define the sampler variable\n const uint32_t varId = m_module.newVar(samplerPtrType,\n spv::StorageClassUniformConstant);\n m_module.setDebugName(varId,\n str::format(\"s\", samplerId).c_str());\n \n m_samplers.at(samplerId).varId = varId;\n m_samplers.at(samplerId).typeId = samplerType;\n \n // Compute binding slot index for the sampler\n uint32_t bindingId = computeSamplerBinding(\n m_programInfo.type(), samplerId);\n \n m_module.decorateDescriptorSet(varId, 0);\n m_module.decorateBinding(varId, bindingId);\n \n // Store descriptor info for the shader interface\n DxvkBindingInfo binding = { VK_DESCRIPTOR_TYPE_SAMPLER };\n binding.viewType = VK_IMAGE_VIEW_TYPE_MAX_ENUM;\n binding.resourceBinding = bindingId;\n m_bindings.push_back(binding);\n }\n void emitDclStream(\n const DxbcShaderInstruction& ins) {\n if (ins.dst[0].idx[0].offset != 0 && m_moduleInfo.xfb == nullptr)\n Logger::err(\"Dxbc: Multiple streams not supported\");\n }\n void emitDclResourceTyped(\n const DxbcShaderInstruction& ins) {\n // dclResource takes two operands:\n // (dst0) The resource register ID\n // (imm0) The resource return type\n const uint32_t registerId = ins.dst[0].idx[0].offset;\n \n // We also handle unordered access views here\n const bool isUav = ins.op == DxbcOpcode::DclUavTyped;\n \n if (isUav) {\n if (m_moduleInfo.options.supportsTypedUavLoadR32)\n m_module.enableCapability(spv::CapabilityStorageImageReadWithoutFormat);\n m_module.enableCapability(spv::CapabilityStorageImageWriteWithoutFormat);\n }\n \n // Defines the type of the resource (texture2D, ...)\n const DxbcResourceDim resourceType = ins.controls.resourceDim();\n \n // Defines the type of a read operation. DXBC has the ability\n // to define four different types whereas SPIR-V only allows\n // one, but in practice this should not be much of a problem.\n auto xType = static_cast(\n bit::extract(ins.imm[0].u32, 0, 3));\n auto yType = static_cast(\n bit::extract(ins.imm[0].u32, 4, 7));\n auto zType = static_cast(\n bit::extract(ins.imm[0].u32, 8, 11));\n auto wType = static_cast(\n bit::extract(ins.imm[0].u32, 12, 15));\n \n if ((xType != yType) || (xType != zType) || (xType != wType))\n Logger::warn(\"DxbcCompiler: dcl_resource: Ignoring resource return types\");\n \n // Declare the actual sampled type\n const DxbcScalarType sampledType = [xType] {\n switch (xType) {\n // FIXME is this correct? There's no documentation about it\n case DxbcResourceReturnType::Mixed: return DxbcScalarType::Uint32;\n // FIXME do we have to manually clamp writes to SNORM/UNORM resources?\n case DxbcResourceReturnType::Snorm: return DxbcScalarType::Float32;\n case DxbcResourceReturnType::Unorm: return DxbcScalarType::Float32;\n case DxbcResourceReturnType::Float: return DxbcScalarType::Float32;\n case DxbcResourceReturnType::Sint: return DxbcScalarType::Sint32;\n case DxbcResourceReturnType::Uint: return DxbcScalarType::Uint32;\n default: throw DxvkError(str::format(\"DxbcCompiler: Invalid sampled type: \", xType));\n }\n }();\n \n // Declare the resource type\n const uint32_t sampledTypeId = getScalarTypeId(sampledType);\n const DxbcImageInfo typeInfo = getResourceType(resourceType, isUav); \n \n // Declare additional capabilities if necessary\n switch (resourceType) {\n case DxbcResourceDim::Buffer:\n m_module.enableCapability(isUav\n ? spv::CapabilityImageBuffer\n : spv::CapabilitySampledBuffer);\n break;\n \n case DxbcResourceDim::Texture1D:\n case DxbcResourceDim::Texture1DArr:\n m_module.enableCapability(isUav\n ? spv::CapabilityImage1D\n : spv::CapabilitySampled1D);\n break;\n \n case DxbcResourceDim::TextureCubeArr:\n m_module.enableCapability(\n spv::CapabilitySampledCubeArray);\n break;\n \n default:\n // No additional capabilities required\n break;\n }\n \n // If the read-without-format capability is not set and this\n // image is access via a typed load, or if atomic operations\n // are used,, we must define the image format explicitly.\n spv::ImageFormat imageFormat = spv::ImageFormatUnknown;\n \n if (isUav) {\n if ((m_analysis->uavInfos[registerId].accessAtomicOp)\n || (m_analysis->uavInfos[registerId].accessTypedLoad\n && !m_moduleInfo.options.supportsTypedUavLoadR32))\n imageFormat = getScalarImageFormat(sampledType);\n }\n \n // We do not know whether the image is going to be used as\n // a color image or a depth image yet, but we can pick the\n // correct type when creating a sampled image object.\n const uint32_t imageTypeId = m_module.defImageType(sampledTypeId,\n typeInfo.dim, 0, typeInfo.array, typeInfo.ms, typeInfo.sampled,\n imageFormat);\n \n // We'll declare the texture variable with the color type\n // and decide which one to use when the texture is sampled.\n const uint32_t resourcePtrType = m_module.defPointerType(\n imageTypeId, spv::StorageClassUniformConstant);\n \n const uint32_t varId = m_module.newVar(resourcePtrType,\n spv::StorageClassUniformConstant);\n \n m_module.setDebugName(varId,\n str::format(isUav ? \"u\" : \"t\", registerId).c_str());\n \n // Compute the DXVK binding slot index for the resource.\n // D3D11 needs to bind the actual resource to this slot.\n uint32_t bindingId = isUav\n ? computeUavBinding(m_programInfo.type(), registerId)\n : computeSrvBinding(m_programInfo.type(), registerId);\n \n m_module.decorateDescriptorSet(varId, 0);\n m_module.decorateBinding(varId, bindingId);\n \n // Declare a specialization constant which will\n // store whether or not the resource is bound.\n if (isUav) {\n DxbcUav uav;\n uav.type = DxbcResourceType::Typed;\n uav.imageInfo = typeInfo;\n uav.varId = varId;\n uav.ctrId = 0;\n uav.sampledType = sampledType;\n uav.sampledTypeId = sampledTypeId;\n uav.imageTypeId = imageTypeId;\n uav.structStride = 0;\n uav.coherence = getUavCoherence(registerId, ins.controls.uavFlags());\n uav.isRawSsbo = false;\n m_uavs.at(registerId) = uav;\n } else {\n DxbcShaderResource res;\n res.type = DxbcResourceType::Typed;\n res.imageInfo = typeInfo;\n res.varId = varId;\n res.sampledType = sampledType;\n res.sampledTypeId = sampledTypeId;\n res.imageTypeId = imageTypeId;\n res.colorTypeId = imageTypeId;\n res.depthTypeId = 0;\n res.structStride = 0;\n res.isRawSsbo = false;\n \n if ((sampledType == DxbcScalarType::Float32)\n && (resourceType == DxbcResourceDim::Texture1D\n || resourceType == DxbcResourceDim::Texture1DArr\n || resourceType == DxbcResourceDim::Texture2D\n || resourceType == DxbcResourceDim::Texture2DArr\n || resourceType == DxbcResourceDim::TextureCube\n || resourceType == DxbcResourceDim::TextureCubeArr)) {\n res.depthTypeId = m_module.defImageType(sampledTypeId,\n typeInfo.dim, 1, typeInfo.array, typeInfo.ms, typeInfo.sampled,\n spv::ImageFormatUnknown);\n }\n \n m_textures.at(registerId) = res;\n }\n \n // Store descriptor info for the shader interface\n DxvkBindingInfo binding = { };\n binding.viewType = typeInfo.vtype;\n binding.resourceBinding = bindingId;\n binding.isMultisampled = typeInfo.ms;\n\n if (isUav) {\n binding.descriptorType = resourceType == DxbcResourceDim::Buffer\n ? VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER\n : VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;\n binding.access = m_analysis->uavInfos[registerId].accessFlags;\n\n if (!m_analysis->uavInfos[registerId].nonInvariantAccess)\n binding.accessOp = m_analysis->uavInfos[registerId].accessOp;\n\n if (!(binding.access & VK_ACCESS_SHADER_WRITE_BIT))\n m_module.decorate(varId, spv::DecorationNonWritable);\n if (!(binding.access & VK_ACCESS_SHADER_READ_BIT))\n m_module.decorate(varId, spv::DecorationNonReadable);\n } else {\n binding.descriptorType = resourceType == DxbcResourceDim::Buffer\n ? VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER\n : VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;\n binding.access = VK_ACCESS_SHADER_READ_BIT;\n }\n\n m_bindings.push_back(binding);\n }\n void emitDclResourceRawStructured(\n const DxbcShaderInstruction& ins) {\n // dcl_resource_raw and dcl_uav_raw take one argument:\n // (dst0) The resource register ID\n // dcl_resource_structured and dcl_uav_structured take two arguments:\n // (dst0) The resource register ID\n // (imm0) Structure stride, in bytes\n const uint32_t registerId = ins.dst[0].idx[0].offset;\n \n const bool isUav = ins.op == DxbcOpcode::DclUavRaw\n || ins.op == DxbcOpcode::DclUavStructured;\n \n const bool isStructured = ins.op == DxbcOpcode::DclUavStructured\n || ins.op == DxbcOpcode::DclResourceStructured;\n \n const DxbcScalarType sampledType = DxbcScalarType::Uint32;\n const uint32_t sampledTypeId = getScalarTypeId(sampledType);\n \n const DxbcImageInfo typeInfo = { spv::DimBuffer, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_MAX_ENUM };\n \n // Declare the resource type\n uint32_t resTypeId = 0;\n uint32_t varId = 0;\n \n // Write back resource info\n DxbcResourceType resType = isStructured\n ? DxbcResourceType::Structured\n : DxbcResourceType::Raw;\n \n uint32_t resStride = isStructured\n ? ins.imm[0].u32\n : 0;\n \n uint32_t resAlign = isStructured\n ? (resStride & -resStride)\n : 16;\n \n // Compute the DXVK binding slot index for the resource.\n uint32_t bindingId = isUav\n ? computeUavBinding(m_programInfo.type(), registerId)\n : computeSrvBinding(m_programInfo.type(), registerId);\n \n // Test whether we should use a raw SSBO for this resource\n bool hasSparseFeedback = isUav\n ? m_analysis->uavInfos[registerId].sparseFeedback\n : m_analysis->srvInfos[registerId].sparseFeedback;\n\n bool useRawSsbo = m_moduleInfo.options.minSsboAlignment <= resAlign && !hasSparseFeedback;\n \n if (useRawSsbo) {\n uint32_t elemType = getScalarTypeId(DxbcScalarType::Uint32);\n uint32_t arrayType = m_module.defRuntimeArrayTypeUnique(elemType);\n uint32_t structType = m_module.defStructTypeUnique(1, &arrayType);\n uint32_t ptrType = m_module.defPointerType(structType, spv::StorageClassStorageBuffer);\n\n resTypeId = m_module.defPointerType(elemType, spv::StorageClassStorageBuffer);\n varId = m_module.newVar(ptrType, spv::StorageClassStorageBuffer);\n \n m_module.decorateArrayStride(arrayType, sizeof(uint32_t));\n m_module.decorate(structType, spv::DecorationBlock);\n m_module.memberDecorateOffset(structType, 0, 0);\n\n m_module.setDebugName(structType,\n str::format(isUav ? \"u\" : \"t\", registerId, \"_t\").c_str());\n m_module.setDebugMemberName(structType, 0, \"m\");\n } else {\n // Structured and raw buffers are represented as\n // texel buffers consisting of 32-bit integers.\n m_module.enableCapability(isUav\n ? spv::CapabilityImageBuffer\n : spv::CapabilitySampledBuffer);\n \n resTypeId = m_module.defImageType(sampledTypeId,\n typeInfo.dim, 0, typeInfo.array, typeInfo.ms, typeInfo.sampled,\n spv::ImageFormatR32ui);\n \n varId = m_module.newVar(\n m_module.defPointerType(resTypeId, spv::StorageClassUniformConstant),\n spv::StorageClassUniformConstant);\n }\n\n m_module.setDebugName(varId,\n str::format(isUav ? \"u\" : \"t\", registerId).c_str());\n \n m_module.decorateDescriptorSet(varId, 0);\n m_module.decorateBinding(varId, bindingId);\n \n if (isUav) {\n DxbcUav uav;\n uav.type = resType;\n uav.imageInfo = typeInfo;\n uav.varId = varId;\n uav.ctrId = 0;\n uav.sampledType = sampledType;\n uav.sampledTypeId = sampledTypeId;\n uav.imageTypeId = resTypeId;\n uav.structStride = resStride;\n uav.coherence = getUavCoherence(registerId, ins.controls.uavFlags());\n uav.isRawSsbo = useRawSsbo;\n m_uavs.at(registerId) = uav;\n } else {\n DxbcShaderResource res;\n res.type = resType;\n res.imageInfo = typeInfo;\n res.varId = varId;\n res.sampledType = sampledType;\n res.sampledTypeId = sampledTypeId;\n res.imageTypeId = resTypeId;\n res.colorTypeId = resTypeId;\n res.depthTypeId = 0;\n res.structStride = resStride;\n res.isRawSsbo = useRawSsbo;\n m_textures.at(registerId) = res;\n }\n \n // Store descriptor info for the shader interface\n DxvkBindingInfo binding = { };\n binding.descriptorType = useRawSsbo\n ? VK_DESCRIPTOR_TYPE_STORAGE_BUFFER\n : (isUav ? VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER : VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER);\n binding.viewType = VK_IMAGE_VIEW_TYPE_MAX_ENUM;\n binding.resourceBinding = bindingId;\n binding.access = VK_ACCESS_SHADER_READ_BIT;\n\n if (isUav) {\n binding.access = m_analysis->uavInfos[registerId].accessFlags;\n\n if (!m_analysis->uavInfos[registerId].nonInvariantAccess)\n binding.accessOp = m_analysis->uavInfos[registerId].accessOp;\n }\n\n if (useRawSsbo || isUav) {\n if (!(binding.access & VK_ACCESS_SHADER_WRITE_BIT))\n m_module.decorate(varId, spv::DecorationNonWritable);\n if (!(binding.access & VK_ACCESS_SHADER_READ_BIT))\n m_module.decorate(varId, spv::DecorationNonReadable);\n }\n\n m_bindings.push_back(binding);\n\n // If supported, we'll be using raw access chains to access this\n if (!m_hasRawAccessChains && m_moduleInfo.options.supportsRawAccessChains) {\n m_module.enableExtension(\"SPV_NV_raw_access_chains\");\n m_module.enableCapability(spv::CapabilityRawAccessChainsNV);\n\n m_hasRawAccessChains = true;\n }\n }\n void emitDclThreadGroupSharedMemory(\n const DxbcShaderInstruction& ins) {\n // dcl_tgsm_raw takes two arguments:\n // (dst0) The resource register ID\n // (imm0) Block size, in bytes\n // dcl_tgsm_structured takes three arguments:\n // (dst0) The resource register ID\n // (imm0) Structure stride, in bytes\n // (imm1) Structure count\n const bool isStructured = ins.op == DxbcOpcode::DclThreadGroupSharedMemoryStructured;\n \n const uint32_t regId = ins.dst[0].idx[0].offset;\n \n if (regId >= m_gRegs.size())\n m_gRegs.resize(regId + 1);\n \n const uint32_t elementStride = isStructured ? ins.imm[0].u32 : 0;\n const uint32_t elementCount = isStructured ? ins.imm[1].u32 : ins.imm[0].u32;\n \n DxbcRegisterInfo varInfo;\n varInfo.type.ctype = DxbcScalarType::Uint32;\n varInfo.type.ccount = 1;\n varInfo.type.alength = isStructured\n ? elementCount * elementStride / 4\n : elementCount / 4;\n varInfo.sclass = spv::StorageClassWorkgroup;\n \n m_gRegs[regId].type = isStructured\n ? DxbcResourceType::Structured\n : DxbcResourceType::Raw;\n m_gRegs[regId].elementStride = elementStride;\n m_gRegs[regId].elementCount = elementCount;\n m_gRegs[regId].varId = emitNewVariable(varInfo);\n \n m_module.setDebugName(m_gRegs[regId].varId,\n str::format(\"g\", regId).c_str());\n }\n void emitDclGsInputPrimitive(\n const DxbcShaderInstruction& ins) {\n // The input primitive type is stored within in the\n // control bits of the opcode token. In SPIR-V, we\n // have to define an execution mode.\n const auto mode = [&] {\n switch (ins.controls.primitive()) {\n case DxbcPrimitive::Point: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_POINT_LIST, spv::ExecutionModeInputPoints);\n case DxbcPrimitive::Line: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_LINE_LIST, spv::ExecutionModeInputLines);\n case DxbcPrimitive::Triangle: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, spv::ExecutionModeTriangles);\n case DxbcPrimitive::LineAdj: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY, spv::ExecutionModeInputLinesAdjacency);\n case DxbcPrimitive::TriangleAdj: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY, spv::ExecutionModeInputTrianglesAdjacency);\n default: throw DxvkError(\"DxbcCompiler: Unsupported primitive type\");\n }\n }();\n\n m_gs.inputPrimitive = ins.controls.primitive();\n m_module.setExecutionMode(m_entryPointId, mode.second);\n m_inputTopology = mode.first;\n \n emitDclInputArray(primitiveVertexCount(m_gs.inputPrimitive));\n }\n void emitDclGsOutputTopology(\n const DxbcShaderInstruction& ins) {\n // The input primitive topology is stored within in the\n // control bits of the opcode token. In SPIR-V, we have\n // to define an execution mode.\n auto mode = [&] {\n switch (ins.controls.primitiveTopology()) {\n case DxbcPrimitiveTopology::PointList: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_POINT_LIST, spv::ExecutionModeOutputPoints);\n case DxbcPrimitiveTopology::LineStrip: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_LINE_LIST, spv::ExecutionModeOutputLineStrip);\n case DxbcPrimitiveTopology::TriangleStrip: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, spv::ExecutionModeOutputTriangleStrip);\n default: throw DxvkError(\"DxbcCompiler: Unsupported primitive topology\");\n }\n }();\n \n m_outputTopology = mode.first;\n m_module.setExecutionMode(m_entryPointId, mode.second);\n }\n void emitDclMaxOutputVertexCount(\n const DxbcShaderInstruction& ins) {\n // dcl_max_output_vertex_count has one operand:\n // (imm0) The maximum number of vertices\n m_gs.outputVertexCount = ins.imm[0].u32;\n \n m_module.setOutputVertices(m_entryPointId, m_gs.outputVertexCount);\n }\n void emitDclInputControlPointCount(\n const DxbcShaderInstruction& ins) {\n // dcl_input_control_points has the control point\n // count embedded within the opcode token.\n if (m_programInfo.type() == DxbcProgramType::HullShader) {\n m_hs.vertexCountIn = ins.controls.controlPointCount();\n \n emitDclInputArray(m_hs.vertexCountIn); \n } else {\n m_ds.vertexCountIn = ins.controls.controlPointCount();\n \n m_ds.inputPerPatch = emitTessInterfacePerPatch (spv::StorageClassInput);\n m_ds.inputPerVertex = emitTessInterfacePerVertex(spv::StorageClassInput, m_ds.vertexCountIn);\n }\n }\n void emitDclOutputControlPointCount(\n const DxbcShaderInstruction& ins) {\n // dcl_output_control_points has the control point\n // count embedded within the opcode token.\n m_hs.vertexCountOut = ins.controls.controlPointCount();\n \n m_hs.outputPerPatch = emitTessInterfacePerPatch(spv::StorageClassPrivate);\n m_hs.outputPerVertex = emitTessInterfacePerVertex(spv::StorageClassOutput, m_hs.vertexCountOut);\n \n m_module.setOutputVertices(m_entryPointId, m_hs.vertexCountOut);\n }\n void emitDclHsMaxTessFactor(\n const DxbcShaderInstruction& ins) {\n m_hs.maxTessFactor = ins.imm[0].f32;\n }\n void emitDclTessDomain(\n const DxbcShaderInstruction& ins) {\n auto mode = [&] {\n switch (ins.controls.tessDomain()) {\n case DxbcTessDomain::Isolines: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_LINE_LIST, spv::ExecutionModeIsolines);\n case DxbcTessDomain::Triangles: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, spv::ExecutionModeTriangles);\n case DxbcTessDomain::Quads: return std::make_pair(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, spv::ExecutionModeQuads);\n default: throw DxvkError(\"Dxbc: Invalid tess domain\");\n }\n }();\n \n m_outputTopology = mode.first;\n m_module.setExecutionMode(m_entryPointId, mode.second);\n }\n void emitDclTessPartitioning(\n const DxbcShaderInstruction& ins) {\n const spv::ExecutionMode executionMode = [&] {\n switch (ins.controls.tessPartitioning()) {\n case DxbcTessPartitioning::Pow2:\n case DxbcTessPartitioning::Integer: return spv::ExecutionModeSpacingEqual;\n case DxbcTessPartitioning::FractOdd: return spv::ExecutionModeSpacingFractionalOdd;\n case DxbcTessPartitioning::FractEven: return spv::ExecutionModeSpacingFractionalEven;\n default: throw DxvkError(\"Dxbc: Invalid tess partitioning\");\n }\n }();\n \n m_module.setExecutionMode(m_entryPointId, executionMode);\n }\n void emitDclTessOutputPrimitive(\n const DxbcShaderInstruction& ins) {\n switch (ins.controls.tessOutputPrimitive()) {\n case DxbcTessOutputPrimitive::Point:\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModePointMode);\n break;\n \n case DxbcTessOutputPrimitive::Line:\n break;\n \n case DxbcTessOutputPrimitive::TriangleCw:\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeVertexOrderCw);\n break;\n \n case DxbcTessOutputPrimitive::TriangleCcw:\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeVertexOrderCcw);\n break;\n \n default:\n throw DxvkError(\"Dxbc: Invalid tess output primitive\");\n }\n }\n void emitDclThreadGroup(\n const DxbcShaderInstruction& ins) {\n // dcl_thread_group has three operands:\n // (imm0) Number of threads in X dimension\n // (imm1) Number of threads in Y dimension\n // (imm2) Number of threads in Z dimension\n m_cs.workgroupSizeX = ins.imm[0].u32;\n m_cs.workgroupSizeY = ins.imm[1].u32;\n m_cs.workgroupSizeZ = ins.imm[2].u32;\n\n m_module.setLocalSize(m_entryPointId,\n ins.imm[0].u32, ins.imm[1].u32, ins.imm[2].u32);\n }\n void emitDclGsInstanceCount(\n const DxbcShaderInstruction& ins) {\n // dcl_gs_instance_count has one operand:\n // (imm0) Number of geometry shader invocations\n m_module.setInvocations(m_entryPointId, ins.imm[0].u32);\n m_gs.invocationCount = ins.imm[0].u32;\n }\n uint32_t emitDclUavCounter(\n uint32_t regId) {\n // Declare a structure type which holds the UAV counter\n if (m_uavCtrStructType == 0) {\n const uint32_t t_u32 = m_module.defIntType(32, 0);\n const uint32_t t_struct = m_module.defStructTypeUnique(1, &t_u32);\n \n m_module.decorate(t_struct, spv::DecorationBlock);\n m_module.memberDecorateOffset(t_struct, 0, 0);\n \n m_module.setDebugName (t_struct, \"uav_meta\");\n m_module.setDebugMemberName(t_struct, 0, \"ctr\");\n \n m_uavCtrStructType = t_struct;\n m_uavCtrPointerType = m_module.defPointerType(\n t_struct, spv::StorageClassStorageBuffer);\n }\n \n // Declare the buffer variable\n const uint32_t varId = m_module.newVar(\n m_uavCtrPointerType, spv::StorageClassStorageBuffer);\n \n m_module.setDebugName(varId,\n str::format(\"u\", regId, \"_meta\").c_str());\n \n uint32_t bindingId = computeUavCounterBinding(\n m_programInfo.type(), regId);\n \n m_module.decorateDescriptorSet(varId, 0);\n m_module.decorateBinding(varId, bindingId);\n \n // Declare the storage buffer binding\n DxvkBindingInfo binding = { VK_DESCRIPTOR_TYPE_STORAGE_BUFFER };\n binding.resourceBinding = bindingId;\n binding.viewType = VK_IMAGE_VIEW_TYPE_MAX_ENUM;\n binding.access = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;\n m_bindings.push_back(binding);\n\n return varId;\n }\n void emitDclImmediateConstantBuffer(\n const DxbcShaderInstruction& ins) {\n if (m_icbArray)\n throw DxvkError(\"DxbcCompiler: Immediate constant buffer already declared\");\n \n if ((ins.customDataSize & 0x3) != 0)\n throw DxvkError(\"DxbcCompiler: Immediate constant buffer size not a multiple of four DWORDs\");\n\n // A lot of the time we'll be dealing with a scalar or vec2\n // array here, there's no reason to emit all those zeroes.\n uint32_t componentCount = 1u;\n\n for (uint32_t i = 0; i < ins.customDataSize; i += 4u) {\n for (uint32_t c = componentCount; c < 4u; c++) {\n if (ins.customData[i + c])\n componentCount = c + 1u;\n }\n\n if (componentCount == 4u)\n break;\n }\n\n uint32_t vectorCount = (ins.customDataSize / 4u);\n uint32_t dwordCount = vectorCount * componentCount;\n\n if (dwordCount <= Icb_MaxBakedDwords) {\n this->emitDclImmediateConstantBufferBaked(\n ins.customDataSize, ins.customData, componentCount);\n } else {\n this->emitDclImmediateConstantBufferUbo(\n ins.customDataSize, ins.customData, componentCount);\n }\n }\n void emitDclImmediateConstantBufferBaked(\n uint32_t dwordCount,\n const uint32_t* dwordArray,\n uint32_t componentCount) {\n // Declare individual vector constants as 4x32-bit vectors\n small_vector vectorIds;\n \n DxbcVectorType vecType;\n vecType.ctype = DxbcScalarType::Uint32;\n vecType.ccount = componentCount;\n \n uint32_t vectorTypeId = getVectorTypeId(vecType);\n \n for (uint32_t i = 0; i < dwordCount; i += 4u) {\n std::array scalarIds = { };\n\n for (uint32_t c = 0; c < componentCount; c++)\n scalarIds[c] = m_module.constu32(dwordArray[i + c]);\n\n uint32_t id = scalarIds[0];\n\n if (componentCount > 1u)\n id = m_module.constComposite(vectorTypeId, componentCount, scalarIds.data());\n\n vectorIds.push_back(id);\n }\n\n // Pad array with one entry of zeroes so that we can\n // handle out-of-bounds accesses more conveniently.\n vectorIds.push_back(emitBuildZeroVector(vecType).id);\n\n // Declare the array that contains all the vectors\n DxbcArrayType arrInfo;\n arrInfo.ctype = DxbcScalarType::Uint32;\n arrInfo.ccount = componentCount;\n arrInfo.alength = vectorIds.size();\n\n uint32_t arrayTypeId = getArrayTypeId(arrInfo);\n uint32_t arrayId = m_module.constComposite(\n arrayTypeId, vectorIds.size(), vectorIds.data());\n\n // Declare the variable that will hold the constant\n // data and initialize it with the constant array.\n uint32_t pointerTypeId = m_module.defPointerType(\n arrayTypeId, spv::StorageClassPrivate);\n\n m_icbArray = m_module.newVarInit(\n pointerTypeId, spv::StorageClassPrivate,\n arrayId);\n\n m_module.setDebugName(m_icbArray, \"icb\");\n m_module.decorate(m_icbArray, spv::DecorationNonWritable);\n\n m_icbComponents = componentCount;\n m_icbSize = dwordCount / 4u;\n }\n void emitDclImmediateConstantBufferUbo(\n uint32_t dwordCount,\n const uint32_t* dwordArray,\n uint32_t componentCount) {\n uint32_t vectorCount = dwordCount / 4u;\n\n // Tightly pack vec2 or scalar arrays if possible. Don't bother with\n // vec3 since we'd rather have properly vectorized loads in that case.\n if (m_moduleInfo.options.supportsTightIcbPacking && componentCount <= 2u)\n m_icbComponents = componentCount;\n else\n m_icbComponents = 4u;\n\n // Immediate constant buffer can be read out of bounds, declare\n // it with the maximum possible size and rely on robustness.\n this->emitDclConstantBufferVar(Icb_BindingSlotId, 4096u, m_icbComponents, \"icb\");\n\n m_icbData.reserve(vectorCount * componentCount);\n\n for (uint32_t i = 0; i < dwordCount; i += 4u) {\n for (uint32_t c = 0; c < m_icbComponents; c++)\n m_icbData.push_back(dwordArray[i + c]);\n }\n\n m_icbSize = vectorCount;\n }\n void emitCustomData(\n const DxbcShaderInstruction& ins) {\n switch (ins.customDataType) {\n case DxbcCustomDataClass::ImmConstBuf:\n return emitDclImmediateConstantBuffer(ins);\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unsupported custom data block: \",\n ins.customDataType));\n }\n }\n void emitVectorAlu(\n const DxbcShaderInstruction& ins) {\n std::array src;\n \n for (uint32_t i = 0; i < ins.srcCount; i++)\n src.at(i) = emitRegisterLoad(ins.src[i], ins.dst[0].mask);\n \n DxbcRegisterValue dst;\n dst.type.ctype = ins.dst[0].dataType;\n dst.type.ccount = ins.dst[0].mask.popCount();\n\n if (isDoubleType(ins.dst[0].dataType))\n dst.type.ccount /= 2;\n \n const uint32_t typeId = getVectorTypeId(dst.type);\n \n switch (ins.op) {\n /////////////////////\n // Move instructions\n case DxbcOpcode::Mov:\n case DxbcOpcode::DMov:\n dst.id = src.at(0).id;\n break;\n \n /////////////////////////////////////\n // ALU operations on float32 numbers\n case DxbcOpcode::Add:\n case DxbcOpcode::DAdd:\n dst.id = m_module.opFAdd(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Div:\n case DxbcOpcode::DDiv:\n dst.id = m_module.opFDiv(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Exp:\n dst.id = m_module.opExp2(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Frc:\n dst.id = m_module.opFract(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Log:\n dst.id = m_module.opLog2(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Mad:\n case DxbcOpcode::DFma:\n if (ins.controls.precise()) {\n // FXC only emits precise mad if the shader explicitly uses\n // the HLSL mad()/fma() intrinsics, let's preserve that.\n dst.id = m_module.opFFma(typeId,\n src.at(0).id, src.at(1).id, src.at(2).id);\n } else {\n dst.id = m_module.opFMul(typeId, src.at(0).id, src.at(1).id);\n dst.id = m_module.opFAdd(typeId, dst.id, src.at(2).id);\n }\n break;\n \n case DxbcOpcode::Max:\n case DxbcOpcode::DMax:\n dst.id = m_module.opNMax(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Min:\n case DxbcOpcode::DMin:\n dst.id = m_module.opNMin(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Mul:\n case DxbcOpcode::DMul:\n dst.id = m_module.opFMul(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Rcp:\n dst.id = m_module.opFDiv(typeId,\n emitBuildConstVecf32(\n 1.0f, 1.0f, 1.0f, 1.0f,\n ins.dst[0].mask).id,\n src.at(0).id);\n break;\n \n case DxbcOpcode::DRcp:\n dst.id = m_module.opFDiv(typeId,\n emitBuildConstVecf64(1.0, 1.0,\n ins.dst[0].mask).id,\n src.at(0).id);\n break;\n \n case DxbcOpcode::RoundNe:\n dst.id = m_module.opRoundEven(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::RoundNi:\n dst.id = m_module.opFloor(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::RoundPi:\n dst.id = m_module.opCeil(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::RoundZ:\n dst.id = m_module.opTrunc(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Rsq:\n dst.id = m_module.opInverseSqrt(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Sqrt:\n dst.id = m_module.opSqrt(\n typeId, src.at(0).id);\n break;\n \n /////////////////////////////////////\n // ALU operations on signed integers\n case DxbcOpcode::IAdd:\n dst.id = m_module.opIAdd(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::IMad:\n case DxbcOpcode::UMad:\n dst.id = m_module.opIAdd(typeId,\n m_module.opIMul(typeId,\n src.at(0).id, src.at(1).id),\n src.at(2).id);\n break;\n \n case DxbcOpcode::IMax:\n dst.id = m_module.opSMax(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::IMin:\n dst.id = m_module.opSMin(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::INeg:\n dst.id = m_module.opSNegate(\n typeId, src.at(0).id);\n break;\n \n ///////////////////////////////////////\n // ALU operations on unsigned integers\n case DxbcOpcode::UMax:\n dst.id = m_module.opUMax(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::UMin:\n dst.id = m_module.opUMin(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n ///////////////////////////////////////\n // Bit operations on unsigned integers\n case DxbcOpcode::And:\n dst.id = m_module.opBitwiseAnd(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Not:\n dst.id = m_module.opNot(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::Or:\n dst.id = m_module.opBitwiseOr(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Xor:\n dst.id = m_module.opBitwiseXor(typeId,\n src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::CountBits:\n dst.id = m_module.opBitCount(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::BfRev:\n dst.id = m_module.opBitReverse(\n typeId, src.at(0).id);\n break;\n \n ///////////////////////////\n // Conversion instructions\n case DxbcOpcode::ItoF:\n dst.id = m_module.opConvertStoF(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::UtoF:\n dst.id = m_module.opConvertUtoF(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::FtoI:\n dst.id = m_module.opConvertFtoS(\n typeId, src.at(0).id);\n break;\n \n case DxbcOpcode::FtoU:\n dst.id = m_module.opConvertFtoU(\n typeId, src.at(0).id);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n if (ins.controls.precise() || m_precise)\n m_module.decorate(dst.id, spv::DecorationNoContraction);\n \n // Store computed value\n dst = emitDstOperandModifiers(dst, ins.modifiers);\n emitRegisterStore(ins.dst[0], dst);\n }\n void emitVectorCmov(\n const DxbcShaderInstruction& ins) {\n // movc and swapc have the following operands:\n // (dst0) The first destination register\n // (dst1) The second destination register (swapc only)\n // (src0) The condition vector\n // (src1) Vector to select from if the condition is not 0\n // (src2) Vector to select from if the condition is 0\n DxbcRegMask condMask = ins.dst[0].mask;\n\n if (ins.dst[0].dataType == DxbcScalarType::Float64) {\n condMask = DxbcRegMask(\n condMask[0] && condMask[1],\n condMask[2] && condMask[3],\n false, false);\n }\n \n const DxbcRegisterValue condition = emitRegisterLoad(ins.src[0], condMask);\n const DxbcRegisterValue selectTrue = emitRegisterLoad(ins.src[1], ins.dst[0].mask);\n const DxbcRegisterValue selectFalse = emitRegisterLoad(ins.src[2], ins.dst[0].mask);\n \n uint32_t componentCount = condMask.popCount();\n \n // We'll compare against a vector of zeroes to generate a\n // boolean vector, which in turn will be used by OpSelect\n uint32_t zeroType = m_module.defIntType(32, 0);\n uint32_t boolType = m_module.defBoolType();\n \n uint32_t zero = m_module.constu32(0);\n \n if (componentCount > 1) {\n zeroType = m_module.defVectorType(zeroType, componentCount);\n boolType = m_module.defVectorType(boolType, componentCount);\n \n const std::array zeroVec = { zero, zero, zero, zero };\n zero = m_module.constComposite(zeroType, componentCount, zeroVec.data());\n }\n \n // In case of swapc, the second destination operand receives\n // the output that a cmov instruction would normally get\n const uint32_t trueIndex = ins.op == DxbcOpcode::Swapc ? 1 : 0;\n \n for (uint32_t i = 0; i < ins.dstCount; i++) {\n DxbcRegisterValue result;\n result.type.ctype = ins.dst[i].dataType;\n result.type.ccount = componentCount;\n result.id = m_module.opSelect(\n getVectorTypeId(result.type),\n m_module.opINotEqual(boolType, condition.id, zero),\n i == trueIndex ? selectTrue.id : selectFalse.id,\n i != trueIndex ? selectTrue.id : selectFalse.id);\n \n result = emitDstOperandModifiers(result, ins.modifiers);\n emitRegisterStore(ins.dst[i], result);\n }\n }\n void emitVectorCmp(\n const DxbcShaderInstruction& ins) {\n // Compare instructions have three operands:\n // (dst0) The destination register\n // (src0) The first vector to compare\n // (src1) The second vector to compare\n uint32_t componentCount = ins.dst[0].mask.popCount();\n\n // For 64-bit operations, we'll return a 32-bit\n // vector, so we have to adjust the read mask\n DxbcRegMask srcMask = ins.dst[0].mask;\n\n if (isDoubleType(ins.src[0].dataType)) {\n srcMask = DxbcRegMask(\n componentCount > 0, componentCount > 0,\n componentCount > 1, componentCount > 1);\n }\n\n const std::array src = {\n emitRegisterLoad(ins.src[0], srcMask),\n emitRegisterLoad(ins.src[1], srcMask),\n };\n \n // Condition, which is a boolean vector used\n // to select between the ~0u and 0u vectors.\n uint32_t condition = 0;\n uint32_t conditionType = m_module.defBoolType();\n \n if (componentCount > 1)\n conditionType = m_module.defVectorType(conditionType, componentCount);\n \n bool invert = false;\n\n switch (ins.op) {\n case DxbcOpcode::Ne:\n case DxbcOpcode::DNe:\n invert = true;\n [[fallthrough]];\n\n case DxbcOpcode::Eq:\n case DxbcOpcode::DEq:\n condition = m_module.opFOrdEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Ge:\n case DxbcOpcode::DGe:\n condition = m_module.opFOrdGreaterThanEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::Lt:\n case DxbcOpcode::DLt:\n condition = m_module.opFOrdLessThan(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::IEq:\n condition = m_module.opIEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::IGe:\n condition = m_module.opSGreaterThanEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::ILt:\n condition = m_module.opSLessThan(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::INe:\n condition = m_module.opINotEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::UGe:\n condition = m_module.opUGreaterThanEqual(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n case DxbcOpcode::ULt:\n condition = m_module.opULessThan(\n conditionType, src.at(0).id, src.at(1).id);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n // Generate constant vectors for selection\n uint32_t sFalse = m_module.constu32( 0u);\n uint32_t sTrue = m_module.constu32(~0u);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = componentCount;\n \n const uint32_t typeId = getVectorTypeId(result.type);\n \n if (componentCount > 1) {\n const std::array vFalse = { sFalse, sFalse, sFalse, sFalse };\n const std::array vTrue = { sTrue, sTrue, sTrue, sTrue };\n \n sFalse = m_module.constComposite(typeId, componentCount, vFalse.data());\n sTrue = m_module.constComposite(typeId, componentCount, vTrue .data());\n }\n \n if (invert)\n std::swap(sFalse, sTrue);\n\n // Perform component-wise mask selection\n // based on the condition evaluated above.\n result.id = m_module.opSelect(\n typeId, condition, sTrue, sFalse);\n \n emitRegisterStore(ins.dst[0], result);\n }\n void emitVectorDeriv(\n const DxbcShaderInstruction& ins) {\n // Derivative instructions have two operands:\n // (dst0) Destination register for the derivative\n // (src0) The operand to compute the derivative of\n DxbcRegisterValue value = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n const uint32_t typeId = getVectorTypeId(value.type);\n \n switch (ins.op) {\n case DxbcOpcode::DerivRtx:\n value.id = m_module.opDpdx(typeId, value.id);\n break;\n \n case DxbcOpcode::DerivRty:\n value.id = m_module.opDpdy(typeId, value.id);\n break;\n \n case DxbcOpcode::DerivRtxCoarse:\n value.id = m_module.opDpdxCoarse(typeId, value.id);\n break;\n \n case DxbcOpcode::DerivRtyCoarse:\n value.id = m_module.opDpdyCoarse(typeId, value.id);\n break;\n \n case DxbcOpcode::DerivRtxFine:\n value.id = m_module.opDpdxFine(typeId, value.id);\n break;\n \n case DxbcOpcode::DerivRtyFine:\n value.id = m_module.opDpdyFine(typeId, value.id);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n value = emitDstOperandModifiers(value, ins.modifiers);\n emitRegisterStore(ins.dst[0], value);\n }\n void emitVectorDot(\n const DxbcShaderInstruction& ins) {\n const DxbcRegMask srcMask(true,\n ins.op >= DxbcOpcode::Dp2,\n ins.op >= DxbcOpcode::Dp3,\n ins.op >= DxbcOpcode::Dp4);\n \n const std::array src = {\n emitRegisterLoad(ins.src[0], srcMask),\n emitRegisterLoad(ins.src[1], srcMask),\n };\n \n DxbcRegisterValue dst;\n dst.type.ctype = ins.dst[0].dataType;\n dst.type.ccount = 1;\n dst.id = 0;\n\n uint32_t componentType = getVectorTypeId(dst.type);\n uint32_t componentCount = srcMask.popCount();\n\n for (uint32_t i = 0; i < componentCount; i++) {\n if (dst.id) {\n dst.id = m_module.opFFma(componentType,\n m_module.opCompositeExtract(componentType, src.at(0).id, 1, &i),\n m_module.opCompositeExtract(componentType, src.at(1).id, 1, &i),\n dst.id);\n } else {\n dst.id = m_module.opFMul(componentType,\n m_module.opCompositeExtract(componentType, src.at(0).id, 1, &i),\n m_module.opCompositeExtract(componentType, src.at(1).id, 1, &i));\n }\n\n // Unconditionally mark as precise since the exact order of operation\n // matters for some games, even if the instruction itself is not marked\n // as precise.\n m_module.decorate(dst.id, spv::DecorationNoContraction);\n }\n\n dst = emitDstOperandModifiers(dst, ins.modifiers);\n emitRegisterStore(ins.dst[0], dst);\n }\n void emitVectorIdiv(\n const DxbcShaderInstruction& ins) {\n // udiv has four operands:\n // (dst0) Quotient destination register\n // (dst1) Remainder destination register\n // (src0) The first vector to compare\n // (src1) The second vector to compare\n if (ins.dst[0].type == DxbcOperandType::Null\n && ins.dst[1].type == DxbcOperandType::Null)\n return;\n \n // FIXME support this if applications require it\n if (ins.dst[0].type != DxbcOperandType::Null\n && ins.dst[1].type != DxbcOperandType::Null\n && ins.dst[0].mask != ins.dst[1].mask) {\n Logger::warn(\"DxbcCompiler: Idiv with different destination masks not supported\");\n return;\n }\n \n // Load source operands as integers with the\n // mask of one non-NULL destination operand\n const DxbcRegMask srcMask =\n ins.dst[0].type != DxbcOperandType::Null\n ? ins.dst[0].mask\n : ins.dst[1].mask;\n \n const std::array src = {\n emitRegisterLoad(ins.src[0], srcMask),\n emitRegisterLoad(ins.src[1], srcMask),\n };\n \n // Division by zero will return 0xffffffff for both results\n auto bvecId = getVectorTypeId({ DxbcScalarType::Bool, srcMask.popCount() });\n\n DxbcRegisterValue const0 = emitBuildConstVecu32( 0u, 0u, 0u, 0u, srcMask);\n DxbcRegisterValue constff = emitBuildConstVecu32(~0u, ~0u, ~0u, ~0u, srcMask);\n\n uint32_t cmpValue = m_module.opINotEqual(bvecId, src.at(1).id, const0.id);\n\n // Compute results only if the destination\n // operands are not NULL.\n if (ins.dst[0].type != DxbcOperandType::Null) {\n DxbcRegisterValue quotient;\n quotient.type.ctype = ins.dst[0].dataType;\n quotient.type.ccount = ins.dst[0].mask.popCount();\n \n quotient.id = m_module.opUDiv(\n getVectorTypeId(quotient.type),\n src.at(0).id, src.at(1).id);\n\n quotient.id = m_module.opSelect(\n getVectorTypeId(quotient.type),\n cmpValue, quotient.id, constff.id);\n \n quotient = emitDstOperandModifiers(quotient, ins.modifiers);\n emitRegisterStore(ins.dst[0], quotient);\n }\n \n if (ins.dst[1].type != DxbcOperandType::Null) {\n DxbcRegisterValue remainder;\n remainder.type.ctype = ins.dst[1].dataType;\n remainder.type.ccount = ins.dst[1].mask.popCount();\n \n remainder.id = m_module.opUMod(\n getVectorTypeId(remainder.type),\n src.at(0).id, src.at(1).id);\n\n remainder.id = m_module.opSelect(\n getVectorTypeId(remainder.type),\n cmpValue, remainder.id, constff.id);\n \n remainder = emitDstOperandModifiers(remainder, ins.modifiers);\n emitRegisterStore(ins.dst[1], remainder);\n }\n }\n void emitVectorImul(\n const DxbcShaderInstruction& ins) {\n // imul and umul have four operands:\n // (dst0) High destination register\n // (dst1) Low destination register\n // (src0) The first vector to compare\n // (src1) The second vector to compare\n if (ins.dst[0].type == DxbcOperandType::Null) {\n if (ins.dst[1].type == DxbcOperandType::Null)\n return;\n \n // If dst0 is NULL, this instruction behaves just\n // like any other three-operand ALU instruction\n const std::array src = {\n emitRegisterLoad(ins.src[0], ins.dst[1].mask),\n emitRegisterLoad(ins.src[1], ins.dst[1].mask),\n };\n \n DxbcRegisterValue result;\n result.type.ctype = ins.dst[1].dataType;\n result.type.ccount = ins.dst[1].mask.popCount();\n result.id = m_module.opIMul(\n getVectorTypeId(result.type),\n src.at(0).id, src.at(1).id);\n \n result = emitDstOperandModifiers(result, ins.modifiers);\n emitRegisterStore(ins.dst[1], result);\n } else {\n // TODO implement this\n Logger::warn(\"DxbcCompiler: Extended Imul not yet supported\");\n }\n }\n void emitVectorMsad(\n const DxbcShaderInstruction& ins) {\n // msad has four operands:\n // (dst0) Destination\n // (src0) Reference (packed uint8)\n // (src1) Source (packed uint8)\n // (src2) Accumulator\n DxbcRegisterValue refReg = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n DxbcRegisterValue srcReg = emitRegisterLoad(ins.src[1], ins.dst[0].mask);\n DxbcRegisterValue result = emitRegisterLoad(ins.src[2], ins.dst[0].mask);\n \n auto typeId = getVectorTypeId(result.type);\n auto bvecId = getVectorTypeId({ DxbcScalarType::Bool, result.type.ccount });\n\n for (uint32_t i = 0; i < 4; i++) {\n auto shift = m_module.constu32(8 * i);\n auto count = m_module.constu32(8);\n\n auto ref = m_module.opBitFieldUExtract(typeId, refReg.id, shift, count);\n auto src = m_module.opBitFieldUExtract(typeId, srcReg.id, shift, count);\n\n auto zero = emitBuildConstVecu32(0, 0, 0, 0, ins.dst[0].mask);\n auto mask = m_module.opINotEqual(bvecId, ref, zero.id);\n\n auto diff = m_module.opSAbs(typeId, m_module.opISub(typeId, ref, src));\n result.id = m_module.opSelect(typeId, mask, m_module.opIAdd(typeId, result.id, diff), result.id);\n }\n\n result = emitDstOperandModifiers(result, ins.modifiers);\n emitRegisterStore(ins.dst[0], result);\n }\n void emitVectorShift(\n const DxbcShaderInstruction& ins) {\n // Shift operations have three operands:\n // (dst0) The destination register\n // (src0) The register to shift\n // (src1) The shift amount (scalar)\n DxbcRegisterValue shiftReg = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n DxbcRegisterValue countReg = emitRegisterLoad(ins.src[1], ins.dst[0].mask);\n\n if (ins.src[1].type != DxbcOperandType::Imm32)\n countReg = emitRegisterMaskBits(countReg, 0x1F);\n \n if (countReg.type.ccount == 1)\n countReg = emitRegisterExtend(countReg, shiftReg.type.ccount);\n \n DxbcRegisterValue result;\n result.type.ctype = ins.dst[0].dataType;\n result.type.ccount = ins.dst[0].mask.popCount();\n \n switch (ins.op) {\n case DxbcOpcode::IShl:\n result.id = m_module.opShiftLeftLogical(\n getVectorTypeId(result.type),\n shiftReg.id, countReg.id);\n break;\n \n case DxbcOpcode::IShr:\n result.id = m_module.opShiftRightArithmetic(\n getVectorTypeId(result.type),\n shiftReg.id, countReg.id);\n break;\n \n case DxbcOpcode::UShr:\n result.id = m_module.opShiftRightLogical(\n getVectorTypeId(result.type),\n shiftReg.id, countReg.id);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n result = emitDstOperandModifiers(result, ins.modifiers);\n emitRegisterStore(ins.dst[0], result);\n }\n void emitVectorSinCos(\n const DxbcShaderInstruction& ins) {\n // sincos has three operands:\n // (dst0) Destination register for sin(x)\n // (dst1) Destination register for cos(x)\n // (src0) Source operand x\n \n // Load source operand as 32-bit float vector.\n const DxbcRegisterValue srcValue = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, true, true, true));\n\n uint32_t typeId = getScalarTypeId(srcValue.type.ctype);\n\n DxbcRegisterValue sinVector = { };\n sinVector.type.ctype = DxbcScalarType::Float32;\n\n DxbcRegisterValue cosVector = { };\n cosVector.type.ctype = DxbcScalarType::Float32;\n\n // Only compute sincos for enabled components\n std::array sinIds = { };\n std::array cosIds = { };\n\n for (uint32_t i = 0; i < 4; i++) {\n const uint32_t sinIndex = 0u;\n const uint32_t cosIndex = 1u;\n\n if (ins.dst[0].mask[i] || ins.dst[1].mask[i]) {\n uint32_t sincosId = m_module.opSinCos(m_module.opCompositeExtract(typeId, srcValue.id, 1u, &i), !m_moduleInfo.options.sincosEmulation);\n\n if (ins.dst[0].type != DxbcOperandType::Null && ins.dst[0].mask[i])\n sinIds[sinVector.type.ccount++] = m_module.opCompositeExtract(typeId, sincosId, 1u, &sinIndex);\n\n if (ins.dst[1].type != DxbcOperandType::Null && ins.dst[1].mask[i])\n cosIds[cosVector.type.ccount++] = m_module.opCompositeExtract(typeId, sincosId, 1u, &cosIndex);\n }\n }\n\n if (sinVector.type.ccount) {\n sinVector.id = sinVector.type.ccount > 1u\n ? m_module.opCompositeConstruct(getVectorTypeId(sinVector.type), sinVector.type.ccount, sinIds.data())\n : sinIds[0];\n\n emitRegisterStore(ins.dst[0], sinVector);\n }\n\n if (cosVector.type.ccount) {\n cosVector.id = cosVector.type.ccount > 1u\n ? m_module.opCompositeConstruct(getVectorTypeId(cosVector.type), cosVector.type.ccount, cosIds.data())\n : cosIds[0];\n\n emitRegisterStore(ins.dst[1], cosVector);\n }\n }\n void emitGeometryEmit(\n const DxbcShaderInstruction& ins) {\n // In xfb mode we might have multiple streams, so\n // we have to figure out which stream to write to\n uint32_t streamId = 0;\n uint32_t streamVar = 0;\n\n if (m_moduleInfo.xfb != nullptr) {\n streamId = ins.dstCount > 0 ? ins.dst[0].idx[0].offset : 0;\n streamVar = m_module.constu32(streamId);\n }\n\n // Checking the negation is easier for EmitThenCut/EmitThenCutStream\n bool doEmit = ins.op != DxbcOpcode::Cut && ins.op != DxbcOpcode::CutStream;\n bool doCut = ins.op != DxbcOpcode::Emit && ins.op != DxbcOpcode::EmitStream;\n\n if (doEmit) {\n if (m_gs.needsOutputSetup)\n emitOutputSetup();\n emitClipCullStore(DxbcSystemValue::ClipDistance, m_clipDistances);\n emitClipCullStore(DxbcSystemValue::CullDistance, m_cullDistances);\n emitXfbOutputSetup(streamId, false);\n m_module.opEmitVertex(streamVar);\n }\n\n if (doCut)\n m_module.opEndPrimitive(streamVar);\n }\n void emitAtomic(\n const DxbcShaderInstruction& ins) {\n // atomic_* operations have the following operands:\n // (dst0) Destination u# or g# register\n // (src0) Index into the texture or buffer\n // (src1) The source value for the operation\n // (src2) Second source operand (optional)\n // imm_atomic_* operations have the following operands:\n // (dst0) Register that receives the result\n // (dst1) Destination u# or g# register\n // (srcX) As above\n const DxbcBufferInfo bufferInfo = getBufferInfo(ins.dst[ins.dstCount - 1]);\n \n bool isImm = ins.dstCount == 2;\n bool isUav = ins.dst[ins.dstCount - 1].type == DxbcOperandType::UnorderedAccessView;\n bool isSsbo = bufferInfo.isSsbo;\n\n // Retrieve destination pointer for the atomic operation>\n const DxbcRegisterPointer pointer = emitGetAtomicPointer(\n ins.dst[ins.dstCount - 1], ins.src[0]);\n \n // Load source values\n std::array src;\n \n for (uint32_t i = 1; i < ins.srcCount; i++) {\n src[i - 1] = emitRegisterBitcast(\n emitRegisterLoad(ins.src[i], DxbcRegMask(true, false, false, false)),\n pointer.type.ctype);\n }\n \n // Define memory scope and semantics based on the operands\n uint32_t scope = 0;\n uint32_t semantics = 0;\n \n if (isUav) {\n scope = spv::ScopeQueueFamily;\n semantics = spv::MemorySemanticsAcquireReleaseMask;\n\n semantics |= isSsbo\n ? spv::MemorySemanticsUniformMemoryMask\n : spv::MemorySemanticsImageMemoryMask;\n } else {\n scope = spv::ScopeWorkgroup;\n semantics = spv::MemorySemanticsWorkgroupMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask;\n }\n\n const uint32_t scopeId = m_module.constu32(scope);\n const uint32_t semanticsId = m_module.constu32(semantics);\n \n // Perform the atomic operation on the given pointer\n DxbcRegisterValue value;\n value.type = pointer.type;\n value.id = 0;\n \n // The result type, which is a scalar integer\n const uint32_t typeId = getVectorTypeId(value.type);\n \n switch (ins.op) {\n case DxbcOpcode::AtomicCmpStore:\n case DxbcOpcode::ImmAtomicCmpExch:\n value.id = m_module.opAtomicCompareExchange(\n typeId, pointer.id, scopeId, semanticsId,\n m_module.constu32(spv::MemorySemanticsMaskNone),\n src[1].id, src[0].id);\n break;\n \n case DxbcOpcode::ImmAtomicExch:\n value.id = m_module.opAtomicExchange(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicIAdd:\n case DxbcOpcode::ImmAtomicIAdd:\n value.id = m_module.opAtomicIAdd(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicAnd:\n case DxbcOpcode::ImmAtomicAnd:\n value.id = m_module.opAtomicAnd(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicOr:\n case DxbcOpcode::ImmAtomicOr:\n value.id = m_module.opAtomicOr(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicXor:\n case DxbcOpcode::ImmAtomicXor:\n value.id = m_module.opAtomicXor(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicIMin:\n case DxbcOpcode::ImmAtomicIMin:\n value.id = m_module.opAtomicSMin(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicIMax:\n case DxbcOpcode::ImmAtomicIMax:\n value.id = m_module.opAtomicSMax(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicUMin:\n case DxbcOpcode::ImmAtomicUMin:\n value.id = m_module.opAtomicUMin(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n case DxbcOpcode::AtomicUMax:\n case DxbcOpcode::ImmAtomicUMax:\n value.id = m_module.opAtomicUMax(typeId,\n pointer.id, scopeId, semanticsId,\n src[0].id);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n // Write back the result to the destination\n // register if this is an imm_atomic_* opcode.\n if (isImm)\n emitRegisterStore(ins.dst[0], value);\n }\n void emitAtomicCounter(\n const DxbcShaderInstruction& ins) {\n // imm_atomic_alloc and imm_atomic_consume have the following operands:\n // (dst0) The register that will hold the old counter value\n // (dst1) The UAV whose counter is going to be modified\n const uint32_t registerId = ins.dst[1].idx[0].offset;\n \n if (m_uavs.at(registerId).ctrId == 0)\n m_uavs.at(registerId).ctrId = emitDclUavCounter(registerId);\n \n // Get a pointer to the atomic counter in question\n DxbcRegisterInfo ptrType;\n ptrType.type.ctype = DxbcScalarType::Uint32;\n ptrType.type.ccount = 1;\n ptrType.type.alength = 0;\n ptrType.sclass = spv::StorageClassStorageBuffer;\n \n uint32_t zeroId = m_module.consti32(0);\n uint32_t ptrId = m_module.opAccessChain(\n getPointerTypeId(ptrType),\n m_uavs.at(registerId).ctrId,\n 1, &zeroId);\n \n // Define memory scope and semantics based on the operands\n uint32_t scope = spv::ScopeQueueFamily;\n uint32_t semantics = spv::MemorySemanticsUniformMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask;\n \n uint32_t scopeId = m_module.constu32(scope);\n uint32_t semanticsId = m_module.constu32(semantics);\n \n // Compute the result value\n DxbcRegisterValue value;\n value.type.ctype = DxbcScalarType::Uint32;\n value.type.ccount = 1;\n \n uint32_t typeId = getVectorTypeId(value.type);\n \n switch (ins.op) {\n case DxbcOpcode::ImmAtomicAlloc:\n value.id = m_module.opAtomicIAdd(typeId, ptrId,\n scopeId, semanticsId, m_module.constu32(1));\n break;\n \n case DxbcOpcode::ImmAtomicConsume:\n value.id = m_module.opAtomicISub(typeId, ptrId,\n scopeId, semanticsId, m_module.constu32(1));\n value.id = m_module.opISub(typeId, value.id,\n m_module.constu32(1));\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n\n // Store the result\n emitRegisterStore(ins.dst[0], value);\n }\n void emitBarrier(\n const DxbcShaderInstruction& ins) {\n // sync takes no operands. Instead, the synchronization\n // scope is defined by the operand control bits.\n const DxbcSyncFlags flags = ins.controls.syncFlags();\n \n uint32_t executionScope = spv::ScopeInvocation;\n uint32_t memoryScope = spv::ScopeInvocation;\n uint32_t memorySemantics = 0;\n \n if (flags.test(DxbcSyncFlag::ThreadsInGroup))\n executionScope = spv::ScopeWorkgroup;\n \n if (flags.test(DxbcSyncFlag::ThreadGroupSharedMemory)) {\n memoryScope = spv::ScopeWorkgroup;\n memorySemantics |= spv::MemorySemanticsWorkgroupMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask\n | spv::MemorySemanticsMakeAvailableMask\n | spv::MemorySemanticsMakeVisibleMask;\n }\n \n if (flags.test(DxbcSyncFlag::UavMemoryGroup)) {\n memoryScope = spv::ScopeWorkgroup;\n memorySemantics |= spv::MemorySemanticsImageMemoryMask\n | spv::MemorySemanticsUniformMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask\n | spv::MemorySemanticsMakeAvailableMask\n | spv::MemorySemanticsMakeVisibleMask;\n }\n \n if (flags.test(DxbcSyncFlag::UavMemoryGlobal)) {\n memoryScope = spv::ScopeQueueFamily;\n\n if (m_programInfo.type() == DxbcProgramType::ComputeShader && !m_hasGloballyCoherentUav)\n memoryScope = spv::ScopeWorkgroup;\n\n memorySemantics |= spv::MemorySemanticsImageMemoryMask\n | spv::MemorySemanticsUniformMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask\n | spv::MemorySemanticsMakeAvailableMask\n | spv::MemorySemanticsMakeVisibleMask;\n }\n \n if (executionScope != spv::ScopeInvocation) {\n m_module.opControlBarrier(\n m_module.constu32(executionScope),\n m_module.constu32(memoryScope),\n m_module.constu32(memorySemantics));\n } else if (memoryScope != spv::ScopeInvocation) {\n m_module.opMemoryBarrier(\n m_module.constu32(memoryScope),\n m_module.constu32(memorySemantics));\n } else {\n Logger::warn(\"DxbcCompiler: sync instruction has no effect\");\n }\n }\n void emitBitExtract(\n const DxbcShaderInstruction& ins) {\n // ibfe and ubfe take the following arguments:\n // (dst0) The destination register\n // (src0) Number of bits to extact\n // (src1) Offset of the bits to extract\n // (src2) Register to extract bits from\n const bool isSigned = ins.op == DxbcOpcode::IBfe;\n \n DxbcRegisterValue bitCnt = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n DxbcRegisterValue bitOfs = emitRegisterLoad(ins.src[1], ins.dst[0].mask);\n\n if (ins.src[0].type != DxbcOperandType::Imm32)\n bitCnt = emitRegisterMaskBits(bitCnt, 0x1F);\n \n if (ins.src[1].type != DxbcOperandType::Imm32)\n bitOfs = emitRegisterMaskBits(bitOfs, 0x1F);\n \n const DxbcRegisterValue src = emitRegisterLoad(ins.src[2], ins.dst[0].mask);\n \n const uint32_t componentCount = src.type.ccount;\n std::array componentIds = {{ 0, 0, 0, 0 }};\n \n for (uint32_t i = 0; i < componentCount; i++) {\n const DxbcRegisterValue currBitCnt = emitRegisterExtract(bitCnt, DxbcRegMask::select(i));\n const DxbcRegisterValue currBitOfs = emitRegisterExtract(bitOfs, DxbcRegMask::select(i));\n const DxbcRegisterValue currSrc = emitRegisterExtract(src, DxbcRegMask::select(i));\n \n const uint32_t typeId = getVectorTypeId(currSrc.type);\n \n componentIds[i] = isSigned\n ? m_module.opBitFieldSExtract(typeId, currSrc.id, currBitOfs.id, currBitCnt.id)\n : m_module.opBitFieldUExtract(typeId, currSrc.id, currBitOfs.id, currBitCnt.id);\n }\n \n DxbcRegisterValue result;\n result.type = src.type;\n result.id = componentCount > 1\n ? m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n componentCount, componentIds.data())\n : componentIds[0];\n emitRegisterStore(ins.dst[0], result);\n }\n void emitBitInsert(\n const DxbcShaderInstruction& ins) {\n // ibfe and ubfe take the following arguments:\n // (dst0) The destination register\n // (src0) Number of bits to extact\n // (src1) Offset of the bits to extract\n // (src2) Register to take bits from\n // (src3) Register to replace bits in\n DxbcRegisterValue bitCnt = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n DxbcRegisterValue bitOfs = emitRegisterLoad(ins.src[1], ins.dst[0].mask);\n \n if (ins.src[0].type != DxbcOperandType::Imm32)\n bitCnt = emitRegisterMaskBits(bitCnt, 0x1F);\n \n if (ins.src[1].type != DxbcOperandType::Imm32)\n bitOfs = emitRegisterMaskBits(bitOfs, 0x1F);\n \n const DxbcRegisterValue insert = emitRegisterLoad(ins.src[2], ins.dst[0].mask);\n const DxbcRegisterValue base = emitRegisterLoad(ins.src[3], ins.dst[0].mask);\n \n const uint32_t componentCount = base.type.ccount;\n std::array componentIds = {{ 0, 0, 0, 0 }};\n \n for (uint32_t i = 0; i < componentCount; i++) {\n const DxbcRegisterValue currBitCnt = emitRegisterExtract(bitCnt, DxbcRegMask::select(i));\n const DxbcRegisterValue currBitOfs = emitRegisterExtract(bitOfs, DxbcRegMask::select(i));\n const DxbcRegisterValue currInsert = emitRegisterExtract(insert, DxbcRegMask::select(i));\n const DxbcRegisterValue currBase = emitRegisterExtract(base, DxbcRegMask::select(i));\n \n componentIds[i] = m_module.opBitFieldInsert(\n getVectorTypeId(currBase.type),\n currBase.id, currInsert.id,\n currBitOfs.id, currBitCnt.id);\n }\n \n DxbcRegisterValue result;\n result.type = base.type;\n result.id = componentCount > 1\n ? m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n componentCount, componentIds.data())\n : componentIds[0];\n emitRegisterStore(ins.dst[0], result);\n }\n void emitBitScan(\n const DxbcShaderInstruction& ins) {\n // firstbit(lo|hi|shi) have two operands:\n // (dst0) The destination operant\n // (src0) Source operand to scan\n DxbcRegisterValue src = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n \n DxbcRegisterValue dst;\n dst.type.ctype = ins.dst[0].dataType;\n dst.type.ccount = ins.dst[0].mask.popCount();\n \n // Result type, should be an unsigned integer\n const uint32_t typeId = getVectorTypeId(dst.type);\n \n switch (ins.op) {\n case DxbcOpcode::FirstBitLo: dst.id = m_module.opFindILsb(typeId, src.id); break;\n case DxbcOpcode::FirstBitHi: dst.id = m_module.opFindUMsb(typeId, src.id); break;\n case DxbcOpcode::FirstBitShi: dst.id = m_module.opFindSMsb(typeId, src.id); break;\n default: Logger::warn(str::format(\"DxbcCompiler: Unhandled instruction: \", ins.op)); return;\n }\n \n // The 'Hi' variants are counted from the MSB in DXBC\n // rather than the LSB, so we have to invert the number\n if (ins.op == DxbcOpcode::FirstBitHi || ins.op == DxbcOpcode::FirstBitShi) {\n uint32_t boolTypeId = m_module.defBoolType();\n\n if (dst.type.ccount > 1)\n boolTypeId = m_module.defVectorType(boolTypeId, dst.type.ccount);\n\n DxbcRegisterValue const31 = emitBuildConstVecu32(31u, 31u, 31u, 31u, ins.dst[0].mask);\n DxbcRegisterValue constff = emitBuildConstVecu32(~0u, ~0u, ~0u, ~0u, ins.dst[0].mask);\n\n dst.id = m_module.opSelect(typeId,\n m_module.opINotEqual(boolTypeId, dst.id, constff.id),\n m_module.opISub(typeId, const31.id, dst.id),\n constff.id);\n }\n \n // No modifiers are supported\n emitRegisterStore(ins.dst[0], dst);\n }\n void emitBufferQuery(\n const DxbcShaderInstruction& ins) {\n // bufinfo takes two arguments\n // (dst0) The destination register\n // (src0) The buffer register to query\n const DxbcBufferInfo bufferInfo = getBufferInfo(ins.src[0]);\n bool isSsbo = bufferInfo.isSsbo;\n \n // We'll store this as a scalar unsigned integer\n DxbcRegisterValue result = isSsbo\n ? emitQueryBufferSize(ins.src[0])\n : emitQueryTexelBufferSize(ins.src[0]);\n \n uint32_t typeId = getVectorTypeId(result.type);\n \n // Adjust returned size if this is a raw or structured\n // buffer, as emitQueryTexelBufferSize only returns the\n // number of typed elements in the buffer.\n if (bufferInfo.type == DxbcResourceType::Raw) {\n result.id = m_module.opIMul(typeId,\n result.id, m_module.constu32(4));\n } else if (bufferInfo.type == DxbcResourceType::Structured) {\n result.id = m_module.opUDiv(typeId, result.id,\n m_module.constu32(bufferInfo.stride / 4));\n }\n\n // Store the result. The scalar will be extended to a\n // vector if the write mask consists of more than one\n // component, which is the desired behaviour.\n emitRegisterStore(ins.dst[0], result);\n }\n void emitBufferLoad(\n const DxbcShaderInstruction& ins) {\n // ld_raw takes three arguments:\n // (dst0) Destination register\n // (src0) Byte offset\n // (src1) Source register\n // ld_structured takes four arguments:\n // (dst0) Destination register\n // (src0) Structure index\n // (src1) Byte offset\n // (src2) Source register\n const bool isStructured = ins.op == DxbcOpcode::LdStructured\n || ins.op == DxbcOpcode::LdStructuredS;\n \n // Source register. The exact way we access\n // the data depends on the register type.\n const DxbcRegister& dstReg = ins.dst[0];\n const DxbcRegister& srcReg = isStructured ? ins.src[2] : ins.src[1];\n\n if (dstReg.type == DxbcOperandType::UnorderedAccessView)\n emitUavBarrier(uint64_t(1u) << srcReg.idx[0].offset, 0u);\n\n // Retrieve common info about the buffer\n const DxbcBufferInfo bufferInfo = getBufferInfo(srcReg);\n\n // Shared memory is the only type of buffer that\n // is not accessed through a texel buffer view\n bool isTgsm = srcReg.type == DxbcOperandType::ThreadGroupSharedMemory;\n bool isSsbo = bufferInfo.isSsbo;\n\n // Common types and IDs used while loading the data\n uint32_t bufferId = isTgsm || isSsbo ? 0 : m_module.opLoad(bufferInfo.typeId, bufferInfo.varId);\n\n uint32_t vectorTypeId = getVectorTypeId({ DxbcScalarType::Uint32, 4 });\n uint32_t scalarTypeId = getVectorTypeId({ DxbcScalarType::Uint32, 1 });\n\n // Since all data is represented as a sequence of 32-bit\n // integers, we have to load each component individually.\n std::array ccomps = { 0, 0, 0, 0 };\n std::array scomps = { 0, 0, 0, 0 };\n uint32_t scount = 0;\n\n // The sparse feedback ID will be non-zero for sparse\n // instructions on input. We need to reset it to 0.\n SpirvMemoryOperands memoryOperands;\n SpirvImageOperands imageOperands;\n imageOperands.sparse = ins.dstCount == 2;\n\n uint32_t coherence = bufferInfo.coherence;\n\n if (isTgsm && m_moduleInfo.options.forceVolatileTgsmAccess) {\n memoryOperands.flags |= spv::MemoryAccessVolatileMask;\n coherence = spv::ScopeWorkgroup;\n }\n\n if (coherence) {\n memoryOperands.flags |= spv::MemoryAccessNonPrivatePointerMask;\n\n if (coherence != spv::ScopeInvocation) {\n memoryOperands.flags |= spv::MemoryAccessMakePointerVisibleMask;\n memoryOperands.makeVisible = m_module.constu32(coherence);\n\n imageOperands.flags = spv::ImageOperandsNonPrivateTexelMask\n | spv::ImageOperandsMakeTexelVisibleMask;\n imageOperands.makeVisible = m_module.constu32(coherence);\n }\n }\n\n uint32_t sparseFeedbackId = 0;\n\n bool useRawAccessChains = m_hasRawAccessChains && isSsbo && !imageOperands.sparse;\n\n DxbcRegisterValue index = emitRegisterLoad(ins.src[0], DxbcRegMask(true, false, false, false));\n DxbcRegisterValue offset = index;\n\n if (isStructured)\n offset = emitRegisterLoad(ins.src[1], DxbcRegMask(true, false, false, false));\n\n DxbcRegisterValue elementIndex = { };\n\n uint32_t baseAlignment = sizeof(uint32_t);\n\n if (useRawAccessChains) {\n memoryOperands.flags |= spv::MemoryAccessAlignedMask;\n\n if (isStructured && ins.src[1].type == DxbcOperandType::Imm32) {\n baseAlignment = bufferInfo.stride | ins.src[1].imm.u32_1;\n baseAlignment = baseAlignment & -baseAlignment;\n baseAlignment = std::min(baseAlignment, uint32_t(m_moduleInfo.options.minSsboAlignment));\n }\n } else {\n elementIndex = isStructured\n ? emitCalcBufferIndexStructured(index, offset, bufferInfo.stride)\n : emitCalcBufferIndexRaw(offset);\n }\n\n uint32_t readMask = 0u;\n\n for (uint32_t i = 0; i < 4; i++) {\n if (dstReg.mask[i])\n readMask |= 1u << srcReg.swizzle[i];\n }\n\n while (readMask) {\n uint32_t sindex = bit::tzcnt(readMask);\n uint32_t scount = bit::tzcnt(~(readMask >> sindex));\n uint32_t zero = 0;\n\n if (useRawAccessChains) {\n uint32_t alignment = baseAlignment;\n uint32_t offsetId = offset.id;\n\n if (sindex) {\n offsetId = m_module.opIAdd(scalarTypeId,\n offsetId, m_module.constu32(sizeof(uint32_t) * sindex));\n alignment |= sizeof(uint32_t) * sindex;\n }\n\n DxbcRegisterInfo storeInfo;\n storeInfo.type.ctype = DxbcScalarType::Uint32;\n storeInfo.type.ccount = scount;\n storeInfo.type.alength = 0;\n storeInfo.sclass = spv::StorageClassStorageBuffer;\n\n uint32_t loadTypeId = getArrayTypeId(storeInfo.type);\n uint32_t ptrTypeId = getPointerTypeId(storeInfo);\n\n uint32_t accessChain = isStructured\n ? m_module.opRawAccessChain(ptrTypeId, bufferInfo.varId,\n m_module.constu32(bufferInfo.stride), index.id, offsetId,\n spv::RawAccessChainOperandsRobustnessPerElementNVMask)\n : m_module.opRawAccessChain(ptrTypeId, bufferInfo.varId,\n m_module.constu32(0), m_module.constu32(0), offsetId,\n spv::RawAccessChainOperandsRobustnessPerComponentNVMask);\n\n memoryOperands.alignment = alignment & -alignment;\n\n uint32_t vectorId = m_module.opLoad(loadTypeId, accessChain, memoryOperands);\n\n for (uint32_t i = 0; i < scount; i++) {\n ccomps[sindex + i] = vectorId;\n\n if (scount > 1) {\n ccomps[sindex + i] = m_module.opCompositeExtract(\n scalarTypeId, vectorId, 1, &i);\n }\n }\n\n readMask &= ~(((1u << scount) - 1u) << sindex);\n } else {\n uint32_t elementIndexAdjusted = m_module.opIAdd(\n getVectorTypeId(elementIndex.type), elementIndex.id,\n m_module.consti32(sindex));\n\n if (isTgsm) {\n ccomps[sindex] = m_module.opLoad(scalarTypeId,\n m_module.opAccessChain(bufferInfo.typeId,\n bufferInfo.varId, 1, &elementIndexAdjusted),\n memoryOperands);\n } else if (isSsbo) {\n uint32_t indices[2] = { m_module.constu32(0), elementIndexAdjusted };\n ccomps[sindex] = m_module.opLoad(scalarTypeId,\n m_module.opAccessChain(bufferInfo.typeId,\n bufferInfo.varId, 2, indices),\n memoryOperands);\n } else {\n uint32_t resultTypeId = vectorTypeId;\n uint32_t resultId = 0;\n\n if (imageOperands.sparse)\n resultTypeId = getSparseResultTypeId(vectorTypeId);\n\n if (srcReg.type == DxbcOperandType::Resource) {\n resultId = m_module.opImageFetch(resultTypeId,\n bufferId, elementIndexAdjusted, imageOperands);\n } else if (srcReg.type == DxbcOperandType::UnorderedAccessView) {\n resultId = m_module.opImageRead(resultTypeId,\n bufferId, elementIndexAdjusted, imageOperands);\n } else {\n throw DxvkError(\"DxbcCompiler: Invalid operand type for strucured/raw load\");\n }\n\n // Only read sparse feedback once. This may be somewhat inaccurate\n // for reads that straddle pages, but we can't easily emulate this.\n if (imageOperands.sparse) {\n imageOperands.sparse = false;\n sparseFeedbackId = resultId;\n\n resultId = emitExtractSparseTexel(vectorTypeId, resultId);\n }\n\n ccomps[sindex] = m_module.opCompositeExtract(scalarTypeId, resultId, 1, &zero);\n }\n\n readMask &= readMask - 1;\n }\n }\n\n for (uint32_t i = 0; i < 4; i++) {\n uint32_t sindex = srcReg.swizzle[i];\n\n if (dstReg.mask[i])\n scomps[scount++] = ccomps[sindex];\n }\n\n DxbcRegisterValue result = { };\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = scount;\n result.id = scomps[0];\n\n if (scount > 1) {\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n scount, scomps.data());\n }\n\n emitRegisterStore(dstReg, result);\n\n if (sparseFeedbackId)\n emitStoreSparseFeedback(ins.dst[1], sparseFeedbackId);\n }\n void emitBufferStore(\n const DxbcShaderInstruction& ins) {\n // store_raw takes three arguments:\n // (dst0) Destination register\n // (src0) Byte offset\n // (src1) Source register\n // store_structured takes four arguments:\n // (dst0) Destination register\n // (src0) Structure index\n // (src1) Byte offset\n // (src2) Source register\n const bool isStructured = ins.op == DxbcOpcode::StoreStructured;\n \n // Source register. The exact way we access\n // the data depends on the register type.\n const DxbcRegister& dstReg = ins.dst[0];\n const DxbcRegister& srcReg = isStructured ? ins.src[2] : ins.src[1];\n\n if (dstReg.type == DxbcOperandType::UnorderedAccessView)\n emitUavBarrier(0u, uint64_t(1u) << dstReg.idx[0].offset);\n\n DxbcRegisterValue value = emitRegisterLoad(srcReg, dstReg.mask);\n value = emitRegisterBitcast(value, DxbcScalarType::Uint32);\n\n // Retrieve common info about the buffer\n const DxbcBufferInfo bufferInfo = getBufferInfo(dstReg);\n\n // Thread Group Shared Memory is not accessed through a texel buffer view\n bool isTgsm = dstReg.type == DxbcOperandType::ThreadGroupSharedMemory;\n bool isSsbo = bufferInfo.isSsbo;\n\n uint32_t bufferId = isTgsm || isSsbo ? 0 : m_module.opLoad(bufferInfo.typeId, bufferInfo.varId);\n\n uint32_t scalarTypeId = getVectorTypeId({ DxbcScalarType::Uint32, 1 });\n uint32_t vectorTypeId = getVectorTypeId({ DxbcScalarType::Uint32, 4 });\n\n // Set memory operands according to resource properties\n SpirvMemoryOperands memoryOperands;\n SpirvImageOperands imageOperands;\n\n uint32_t coherence = bufferInfo.coherence;\n\n if (isTgsm && m_moduleInfo.options.forceVolatileTgsmAccess) {\n memoryOperands.flags |= spv::MemoryAccessVolatileMask;\n coherence = spv::ScopeWorkgroup;\n }\n\n if (coherence) {\n memoryOperands.flags |= spv::MemoryAccessNonPrivatePointerMask;\n\n if (coherence != spv::ScopeInvocation) {\n memoryOperands.flags |= spv::MemoryAccessMakePointerAvailableMask;\n memoryOperands.makeAvailable = m_module.constu32(coherence);\n\n imageOperands.flags = spv::ImageOperandsNonPrivateTexelMask\n | spv::ImageOperandsMakeTexelAvailableMask;\n imageOperands.makeAvailable = m_module.constu32(coherence);\n }\n }\n\n // Compute flat element index as necessary\n bool useRawAccessChains = isSsbo && m_hasRawAccessChains;\n\n DxbcRegisterValue index = emitRegisterLoad(ins.src[0], DxbcRegMask(true, false, false, false));\n DxbcRegisterValue offset = index;\n\n if (isStructured)\n offset = emitRegisterLoad(ins.src[1], DxbcRegMask(true, false, false, false));\n\n DxbcRegisterValue elementIndex = { };\n\n uint32_t baseAlignment = sizeof(uint32_t);\n\n if (useRawAccessChains) {\n memoryOperands.flags |= spv::MemoryAccessAlignedMask;\n\n if (isStructured && ins.src[1].type == DxbcOperandType::Imm32) {\n baseAlignment = bufferInfo.stride | ins.src[1].imm.u32_1;\n baseAlignment = baseAlignment & -baseAlignment;\n baseAlignment = std::min(baseAlignment, uint32_t(m_moduleInfo.options.minSsboAlignment));\n }\n } else {\n elementIndex = isStructured\n ? emitCalcBufferIndexStructured(index, offset, bufferInfo.stride)\n : emitCalcBufferIndexRaw(offset);\n }\n\n uint32_t writeMask = dstReg.mask.raw();\n\n while (writeMask) {\n uint32_t sindex = bit::tzcnt(writeMask);\n uint32_t scount = bit::tzcnt(~(writeMask >> sindex));\n\n if (useRawAccessChains) {\n uint32_t alignment = baseAlignment;\n uint32_t offsetId = offset.id;\n\n if (sindex) {\n offsetId = m_module.opIAdd(scalarTypeId,\n offsetId, m_module.constu32(sizeof(uint32_t) * sindex));\n alignment = alignment | (sizeof(uint32_t) * sindex);\n }\n\n DxbcRegisterInfo storeInfo;\n storeInfo.type.ctype = DxbcScalarType::Uint32;\n storeInfo.type.ccount = scount;\n storeInfo.type.alength = 0;\n storeInfo.sclass = spv::StorageClassStorageBuffer;\n\n uint32_t storeTypeId = getArrayTypeId(storeInfo.type);\n uint32_t ptrTypeId = getPointerTypeId(storeInfo);\n\n uint32_t accessChain = isStructured\n ? m_module.opRawAccessChain(ptrTypeId, bufferInfo.varId,\n m_module.constu32(bufferInfo.stride), index.id, offsetId,\n spv::RawAccessChainOperandsRobustnessPerElementNVMask)\n : m_module.opRawAccessChain(ptrTypeId, bufferInfo.varId,\n m_module.constu32(0), m_module.constu32(0), offsetId,\n spv::RawAccessChainOperandsRobustnessPerComponentNVMask);\n\n uint32_t valueId = value.id;\n\n if (scount < value.type.ccount) {\n if (scount == 1) {\n valueId = m_module.opCompositeExtract(storeTypeId, value.id, 1, &sindex);\n } else {\n std::array indices = { sindex, sindex + 1u, sindex + 2u, sindex + 3u };\n valueId = m_module.opVectorShuffle(storeTypeId, value.id, value.id, scount, indices.data());\n }\n }\n\n memoryOperands.alignment = alignment & -alignment;\n m_module.opStore(accessChain, valueId, memoryOperands);\n\n writeMask &= ~(((1u << scount) - 1u) << sindex);\n } else {\n uint32_t srcComponentId = value.type.ccount > 1\n ? m_module.opCompositeExtract(scalarTypeId,\n value.id, 1, &sindex)\n : value.id;\n\n uint32_t elementIndexAdjusted = sindex != 0\n ? m_module.opIAdd(getVectorTypeId(elementIndex.type),\n elementIndex.id, m_module.consti32(sindex))\n : elementIndex.id;\n\n if (isTgsm) {\n m_module.opStore(\n m_module.opAccessChain(bufferInfo.typeId,\n bufferInfo.varId, 1, &elementIndexAdjusted),\n srcComponentId, memoryOperands);\n } else if (isSsbo) {\n uint32_t indices[2] = { m_module.constu32(0), elementIndexAdjusted };\n m_module.opStore(\n m_module.opAccessChain(bufferInfo.typeId,\n bufferInfo.varId, 2, indices),\n srcComponentId, memoryOperands);\n } else if (dstReg.type == DxbcOperandType::UnorderedAccessView) {\n const std::array srcVectorIds = {\n srcComponentId, srcComponentId,\n srcComponentId, srcComponentId,\n };\n\n m_module.opImageWrite(\n bufferId, elementIndexAdjusted,\n m_module.opCompositeConstruct(vectorTypeId,\n 4, srcVectorIds.data()),\n imageOperands);\n } else {\n throw DxvkError(\"DxbcCompiler: Invalid operand type for strucured/raw store\");\n }\n\n writeMask &= writeMask - 1u;\n }\n }\n }\n void emitConvertFloat16(\n const DxbcShaderInstruction& ins) {\n // f32tof16 takes two operands:\n // (dst0) Destination register as a uint32 vector\n // (src0) Source register as a float32 vector\n // f16tof32 takes two operands:\n // (dst0) Destination register as a float32 vector\n // (src0) Source register as a uint32 vector\n const DxbcRegisterValue src = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n \n // We handle both packing and unpacking here\n const bool isPack = ins.op == DxbcOpcode::F32toF16;\n \n // The conversion instructions do not map very well to the\n // SPIR-V pack instructions, which operate on 2D vectors.\n std::array scalarIds = {{ 0, 0, 0, 0 }};\n \n const uint32_t componentCount = src.type.ccount;\n \n // These types are used in both pack and unpack operations\n const uint32_t t_u32 = getVectorTypeId({ DxbcScalarType::Uint32, 1 });\n const uint32_t t_f32 = getVectorTypeId({ DxbcScalarType::Float32, 1 });\n const uint32_t t_f32v2 = getVectorTypeId({ DxbcScalarType::Float32, 2 });\n \n // Constant zero-bit pattern, used for packing\n const uint32_t zerof32 = isPack ? m_module.constf32(0.0f) : 0;\n \n for (uint32_t i = 0; i < componentCount; i++) {\n const DxbcRegisterValue componentValue\n = emitRegisterExtract(src, DxbcRegMask::select(i));\n \n if (isPack) { // f32tof16\n const std::array packIds =\n {{ componentValue.id, zerof32 }};\n \n scalarIds[i] = m_module.opPackHalf2x16(t_u32,\n m_module.opCompositeConstruct(t_f32v2, packIds.size(), packIds.data()));\n } else { // f16tof32\n const uint32_t zeroIndex = 0;\n \n scalarIds[i] = m_module.opCompositeExtract(t_f32,\n m_module.opUnpackHalf2x16(t_f32v2, componentValue.id),\n 1, &zeroIndex);\n }\n }\n\n DxbcRegisterValue result;\n result.type.ctype = ins.dst[0].dataType;\n result.type.ccount = componentCount;\n\n uint32_t typeId = getVectorTypeId(result.type);\n result.id = componentCount > 1\n ? m_module.opCompositeConstruct(typeId,\n componentCount, scalarIds.data())\n : scalarIds[0];\n\n if (isPack) {\n // Some drivers return infinity if the input value is above a certain\n // threshold, but D3D wants us to return infinity only if the input is\n // actually infinite. Fix this up to return the maximum representable\n // 16-bit floating point number instead, but preserve input infinity.\n uint32_t t_bvec = getVectorTypeId({ DxbcScalarType::Bool, componentCount });\n uint32_t f16Infinity = m_module.constuReplicant(0x7C00, componentCount);\n uint32_t f16Unsigned = m_module.constuReplicant(0x7FFF, componentCount);\n\n uint32_t isInputInf = m_module.opIsInf(t_bvec, src.id);\n uint32_t isValueInf = m_module.opIEqual(t_bvec, f16Infinity,\n m_module.opBitwiseAnd(typeId, result.id, f16Unsigned));\n\n result.id = m_module.opSelect(getVectorTypeId(result.type),\n m_module.opLogicalAnd(t_bvec, isValueInf, m_module.opLogicalNot(t_bvec, isInputInf)),\n m_module.opISub(typeId, result.id, m_module.constuReplicant(1, componentCount)),\n result.id);\n }\n\n // Store result in the destination register\n emitRegisterStore(ins.dst[0], result);\n }\n void emitConvertFloat64(\n const DxbcShaderInstruction& ins) {\n // ftod and dtof take the following operands:\n // (dst0) Destination operand\n // (src0) Number to convert\n uint32_t dstBits = ins.dst[0].mask.popCount();\n\n DxbcRegMask srcMask = isDoubleType(ins.dst[0].dataType)\n ? DxbcRegMask(dstBits >= 2, dstBits >= 4, false, false)\n : DxbcRegMask(dstBits >= 1, dstBits >= 1, dstBits >= 2, dstBits >= 2);\n\n // Perform actual conversion, destination modifiers are not applied\n DxbcRegisterValue val = emitRegisterLoad(ins.src[0], srcMask);\n\n DxbcRegisterValue result;\n result.type.ctype = ins.dst[0].dataType;\n result.type.ccount = val.type.ccount;\n\n switch (ins.op) {\n case DxbcOpcode::DtoF:\n case DxbcOpcode::FtoD:\n result.id = m_module.opFConvert(\n getVectorTypeId(result.type), val.id);\n break;\n\n case DxbcOpcode::DtoI:\n result.id = m_module.opConvertFtoS(\n getVectorTypeId(result.type), val.id);\n break;\n\n case DxbcOpcode::DtoU:\n result.id = m_module.opConvertFtoU(\n getVectorTypeId(result.type), val.id);\n break;\n\n case DxbcOpcode::ItoD:\n result.id = m_module.opConvertStoF(\n getVectorTypeId(result.type), val.id);\n break;\n \n case DxbcOpcode::UtoD:\n result.id = m_module.opConvertUtoF(\n getVectorTypeId(result.type), val.id);\n break;\n \n default:\n Logger::warn(str::format(\"DxbcCompiler: Unhandled instruction: \", ins.op));\n return;\n }\n \n emitRegisterStore(ins.dst[0], result);\n }\n void emitHullShaderPhase(\n const DxbcShaderInstruction& ins) {\n switch (ins.op) {\n case DxbcOpcode::HsDecls: {\n if (m_hs.currPhaseType != DxbcCompilerHsPhase::None)\n Logger::err(\"DXBC: HsDecls not the first phase in hull shader\");\n \n m_hs.currPhaseType = DxbcCompilerHsPhase::Decl;\n } break;\n \n case DxbcOpcode::HsControlPointPhase: {\n m_hs.cpPhase = this->emitNewHullShaderControlPointPhase();\n \n m_hs.currPhaseType = DxbcCompilerHsPhase::ControlPoint;\n m_hs.currPhaseId = 0;\n \n m_module.setDebugName(m_hs.cpPhase.functionId, \"hs_control_point\");\n } break;\n \n case DxbcOpcode::HsForkPhase: {\n auto phase = this->emitNewHullShaderForkJoinPhase();\n m_hs.forkPhases.push_back(phase);\n \n m_hs.currPhaseType = DxbcCompilerHsPhase::Fork;\n m_hs.currPhaseId = m_hs.forkPhases.size() - 1;\n \n m_module.setDebugName(phase.functionId,\n str::format(\"hs_fork_\", m_hs.currPhaseId).c_str());\n } break;\n \n case DxbcOpcode::HsJoinPhase: {\n auto phase = this->emitNewHullShaderForkJoinPhase();\n m_hs.joinPhases.push_back(phase);\n \n m_hs.currPhaseType = DxbcCompilerHsPhase::Join;\n m_hs.currPhaseId = m_hs.joinPhases.size() - 1;\n \n m_module.setDebugName(phase.functionId,\n str::format(\"hs_join_\", m_hs.currPhaseId).c_str());\n } break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n }\n }\n void emitHullShaderInstCnt(\n const DxbcShaderInstruction& ins) {\n this->getCurrentHsForkJoinPhase()->instanceCount = ins.imm[0].u32;\n }\n void emitInterpolate(\n const DxbcShaderInstruction& ins) {\n m_module.enableCapability(spv::CapabilityInterpolationFunction);\n\n // The SPIR-V instructions operate on input variable pointers,\n // which are all declared as four-component float vectors.\n uint32_t registerId = ins.src[0].idx[0].offset;\n \n DxbcRegisterValue result;\n result.type = getInputRegType(registerId);\n \n switch (ins.op) {\n case DxbcOpcode::EvalCentroid: {\n result.id = m_module.opInterpolateAtCentroid(\n getVectorTypeId(result.type),\n m_vRegs.at(registerId).id);\n } break;\n \n case DxbcOpcode::EvalSampleIndex: {\n const DxbcRegisterValue sampleIndex = emitRegisterLoad(\n ins.src[1], DxbcRegMask(true, false, false, false));\n \n result.id = m_module.opInterpolateAtSample(\n getVectorTypeId(result.type),\n m_vRegs.at(registerId).id,\n sampleIndex.id);\n } break;\n \n case DxbcOpcode::EvalSnapped: {\n // The offset is encoded as a 4-bit fixed point value\n DxbcRegisterValue offset = emitRegisterLoad(\n ins.src[1], DxbcRegMask(true, true, false, false));\n offset.id = m_module.opBitFieldSExtract(\n getVectorTypeId(offset.type), offset.id,\n m_module.consti32(0), m_module.consti32(4));\n\n offset.type.ctype = DxbcScalarType::Float32;\n offset.id = m_module.opConvertStoF(\n getVectorTypeId(offset.type), offset.id);\n\n offset.id = m_module.opFMul(\n getVectorTypeId(offset.type), offset.id,\n m_module.constvec2f32(1.0f / 16.0f, 1.0f / 16.0f));\n\n result.id = m_module.opInterpolateAtOffset(\n getVectorTypeId(result.type),\n m_vRegs.at(registerId).id,\n offset.id);\n } break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n \n result = emitRegisterSwizzle(result,\n ins.src[0].swizzle, ins.dst[0].mask);\n emitRegisterStore(ins.dst[0], result);\n }\n void emitSparseCheckAccess(\n const DxbcShaderInstruction& ins) {\n // check_access_mapped has two operands:\n // (dst0) The destination register\n // (src0) The residency code\n m_module.enableCapability(spv::CapabilitySparseResidency);\n\n DxbcRegisterValue srcValue = emitRegisterLoad(ins.src[0], ins.dst[0].mask);\n\n uint32_t boolId = m_module.opImageSparseTexelsResident(\n m_module.defBoolType(), srcValue.id);\n\n DxbcRegisterValue dstValue;\n dstValue.type = { DxbcScalarType::Uint32, 1 };\n dstValue.id = m_module.opSelect(getScalarTypeId(DxbcScalarType::Uint32),\n boolId, m_module.constu32(~0u), m_module.constu32(0));\n\n emitRegisterStore(ins.dst[0], dstValue);\n }\n void emitTextureQuery(\n const DxbcShaderInstruction& ins) {\n // resinfo has three operands:\n // (dst0) The destination register\n // (src0) Resource LOD to query\n // (src1) Resource to query\n const DxbcBufferInfo resourceInfo = getBufferInfo(ins.src[1]);\n const DxbcResinfoType resinfoType = ins.controls.resinfoType();\n \n // Read the exact LOD for the image query\n const DxbcRegisterValue mipLod = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n const DxbcScalarType returnType = resinfoType == DxbcResinfoType::Uint\n ? DxbcScalarType::Uint32 : DxbcScalarType::Float32;\n \n // Query the size of the selected mip level, as well as the\n // total number of mip levels. We will have to combine the\n // result into a four-component vector later.\n DxbcRegisterValue imageSize = emitQueryTextureSize(ins.src[1], mipLod);\n DxbcRegisterValue imageLevels = emitQueryTextureLods(ins.src[1]);\n\n // If the mip level is out of bounds, D3D requires us to return\n // zero before applying modifiers, whereas SPIR-V is undefined,\n // so we need to fix it up manually here.\n imageSize.id = m_module.opSelect(getVectorTypeId(imageSize.type),\n m_module.opULessThan(m_module.defBoolType(), mipLod.id, imageLevels.id),\n imageSize.id, emitBuildZeroVector(imageSize.type).id);\n\n // Convert intermediates to the requested type\n if (returnType == DxbcScalarType::Float32) {\n imageSize.type.ctype = DxbcScalarType::Float32;\n imageSize.id = m_module.opConvertUtoF(\n getVectorTypeId(imageSize.type),\n imageSize.id);\n \n imageLevels.type.ctype = DxbcScalarType::Float32;\n imageLevels.id = m_module.opConvertUtoF(\n getVectorTypeId(imageLevels.type),\n imageLevels.id);\n }\n \n // If the selected return type is rcpFloat, we need\n // to compute the reciprocal of the image dimensions,\n // but not the array size, so we need to separate it.\n const uint32_t imageCoordDim = imageSize.type.ccount;\n \n DxbcRegisterValue imageLayers;\n imageLayers.type = imageSize.type;\n imageLayers.id = 0;\n \n if (resinfoType == DxbcResinfoType::RcpFloat && resourceInfo.image.array) {\n imageLayers = emitRegisterExtract(imageSize, DxbcRegMask::select(imageCoordDim - 1));\n imageSize = emitRegisterExtract(imageSize, DxbcRegMask::firstN(imageCoordDim - 1));\n }\n \n if (resinfoType == DxbcResinfoType::RcpFloat) {\n imageSize.id = m_module.opFDiv(\n getVectorTypeId(imageSize.type),\n emitBuildConstVecf32(1.0f, 1.0f, 1.0f, 1.0f,\n DxbcRegMask::firstN(imageSize.type.ccount)).id,\n imageSize.id);\n }\n \n // Concatenate result vectors and scalars to form a\n // 4D vector. Unused components will be set to zero.\n std::array vectorIds = { imageSize.id, 0, 0, 0 };\n uint32_t numVectorIds = 1;\n \n if (imageLayers.id != 0)\n vectorIds[numVectorIds++] = imageLayers.id;\n \n if (imageCoordDim < 3) {\n const uint32_t zero = returnType == DxbcScalarType::Uint32\n ? m_module.constu32(0)\n : m_module.constf32(0.0f);\n \n for (uint32_t i = imageCoordDim; i < 3; i++)\n vectorIds[numVectorIds++] = zero;\n }\n \n vectorIds[numVectorIds++] = imageLevels.id;\n \n // Create the actual result vector\n DxbcRegisterValue result;\n result.type.ctype = returnType;\n result.type.ccount = 4;\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n numVectorIds, vectorIds.data());\n \n // Swizzle components using the resource swizzle\n // and the destination operand's write mask\n result = emitRegisterSwizzle(result,\n ins.src[1].swizzle, ins.dst[0].mask);\n emitRegisterStore(ins.dst[0], result);\n }\n void emitTextureQueryLod(\n const DxbcShaderInstruction& ins) {\n // All sample instructions have at least these operands:\n // (dst0) The destination register\n // (src0) Texture coordinates\n // (src1) The texture itself\n // (src2) The sampler object\n const DxbcRegister& texCoordReg = ins.src[0];\n const DxbcRegister& textureReg = ins.src[1];\n const DxbcRegister& samplerReg = ins.src[2];\n \n // Texture and sampler register IDs\n const auto& texture = m_textures.at(textureReg.idx[0].offset);\n const auto& sampler = m_samplers.at(samplerReg.idx[0].offset);\n \n // Load texture coordinates\n const DxbcRegisterValue coord = emitRegisterLoad(texCoordReg,\n DxbcRegMask::firstN(getTexLayerDim(texture.imageInfo)));\n \n // Query the LOD. The result is a two-dimensional float32\n // vector containing the mip level and virtual LOD numbers.\n const uint32_t sampledImageId = emitLoadSampledImage(texture, sampler, false);\n const uint32_t queriedLodId = m_module.opImageQueryLod(\n getVectorTypeId({ DxbcScalarType::Float32, 2 }),\n sampledImageId, coord.id);\n \n // Build the result array vector by filling up\n // the remaining two components with zeroes.\n const uint32_t zero = m_module.constf32(0.0f);\n const std::array resultIds\n = {{ queriedLodId, zero, zero }};\n \n DxbcRegisterValue result;\n result.type = DxbcVectorType { DxbcScalarType::Float32, 4 };\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n resultIds.size(), resultIds.data());\n \n result = emitRegisterSwizzle(result, ins.src[1].swizzle, ins.dst[0].mask);\n emitRegisterStore(ins.dst[0], result);\n }\n void emitTextureQueryMs(\n const DxbcShaderInstruction& ins) {\n // sampleinfo has two operands:\n // (dst0) The destination register\n // (src0) Resource to query\n DxbcRegisterValue sampleCount = emitQueryTextureSamples(ins.src[0]);\n \n if (ins.controls.returnType() != DxbcInstructionReturnType::Uint) {\n sampleCount.type = { DxbcScalarType::Float32, 1 };\n sampleCount.id = m_module.opConvertUtoF(\n getVectorTypeId(sampleCount.type),\n sampleCount.id);\n }\n \n emitRegisterStore(ins.dst[0], sampleCount);\n }\n void emitTextureQueryMsPos(\n const DxbcShaderInstruction& ins) {\n // samplepos has three operands:\n // (dst0) The destination register\n // (src0) Resource to query \n // (src1) Sample index\n if (m_samplePositions == 0)\n m_samplePositions = emitSamplePosArray();\n \n // The lookup index is qual to the sample count plus the\n // sample index, or 0 if the resource cannot be queried.\n DxbcRegisterValue sampleCount = emitQueryTextureSamples(ins.src[0]);\n DxbcRegisterValue sampleIndex = emitRegisterLoad(\n ins.src[1], DxbcRegMask(true, false, false, false));\n \n uint32_t lookupIndex = m_module.opIAdd(\n getVectorTypeId(sampleCount.type),\n sampleCount.id, sampleIndex.id);\n \n // Validate the parameters\n uint32_t sampleCountValid = m_module.opULessThanEqual(\n m_module.defBoolType(),\n sampleCount.id,\n m_module.constu32(16));\n \n uint32_t sampleIndexValid = m_module.opULessThan(\n m_module.defBoolType(),\n sampleIndex.id,\n sampleCount.id);\n \n // If the lookup cannot be performed, set the lookup\n // index to zero, which will return a zero vector.\n lookupIndex = m_module.opSelect(\n getVectorTypeId(sampleCount.type),\n m_module.opLogicalAnd(\n m_module.defBoolType(),\n sampleCountValid,\n sampleIndexValid),\n lookupIndex,\n m_module.constu32(0));\n \n // Load sample pos vector and write the masked\n // components to the destination register.\n DxbcRegisterPointer samplePos;\n samplePos.type.ctype = DxbcScalarType::Float32;\n samplePos.type.ccount = 2;\n samplePos.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(samplePos.type),\n spv::StorageClassPrivate),\n m_samplePositions, 1, &lookupIndex);\n \n // Expand to vec4 by appending zeroes\n DxbcRegisterValue result = emitValueLoad(samplePos);\n\n DxbcRegisterValue zero;\n zero.type.ctype = DxbcScalarType::Float32;\n zero.type.ccount = 2;\n zero.id = m_module.constvec2f32(0.0f, 0.0f);\n\n result = emitRegisterConcat(result, zero);\n \n emitRegisterStore(ins.dst[0],\n emitRegisterSwizzle(result,\n ins.src[0].swizzle,\n ins.dst[0].mask));\n }\n void emitTextureFetch(\n const DxbcShaderInstruction& ins) {\n // ld has three operands:\n // (dst0) The destination register\n // (src0) Source address\n // (src1) Source texture\n // ld2dms has four operands:\n // (dst0) The destination register\n // (src0) Source address\n // (src1) Source texture\n // (src2) Sample number\n const auto& texture = m_textures.at(ins.src[1].idx[0].offset);\n const uint32_t imageLayerDim = getTexLayerDim(texture.imageInfo);\n \n bool isMultisampled = ins.op == DxbcOpcode::LdMs\n || ins.op == DxbcOpcode::LdMsS;\n\n // Load the texture coordinates. The last component\n // contains the LOD if the resource is an image.\n const DxbcRegisterValue address = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, true, true, true));\n \n // Additional image operands. This will store\n // the LOD and the address offset if present.\n SpirvImageOperands imageOperands;\n imageOperands.sparse = ins.dstCount == 2;\n \n if (ins.sampleControls.u != 0 || ins.sampleControls.v != 0 || ins.sampleControls.w != 0) {\n const std::array offsetIds = {\n imageLayerDim >= 1 ? m_module.consti32(ins.sampleControls.u) : 0,\n imageLayerDim >= 2 ? m_module.consti32(ins.sampleControls.v) : 0,\n imageLayerDim >= 3 ? m_module.consti32(ins.sampleControls.w) : 0,\n };\n \n imageOperands.flags |= spv::ImageOperandsConstOffsetMask;\n imageOperands.sConstOffset = offsetIds[0];\n\n if (imageLayerDim > 1) {\n imageOperands.sConstOffset = m_module.constComposite(\n getVectorTypeId({ DxbcScalarType::Sint32, imageLayerDim }),\n imageLayerDim, offsetIds.data());\n }\n }\n \n // The LOD is not present when reading from\n // a buffer or from a multisample texture.\n if (texture.imageInfo.dim != spv::DimBuffer && texture.imageInfo.ms == 0) {\n DxbcRegisterValue imageLod;\n \n if (!isMultisampled) {\n imageLod = emitRegisterExtract(\n address, DxbcRegMask(false, false, false, true));\n } else {\n // If we force-disabled MSAA, fetch from LOD 0\n imageLod.type = { DxbcScalarType::Uint32, 1 };\n imageLod.id = m_module.constu32(0);\n }\n \n imageOperands.flags |= spv::ImageOperandsLodMask;\n imageOperands.sLod = imageLod.id;\n }\n \n // The ld2dms instruction has a sample index, but we\n // are only allowed to set it for multisample views\n if (isMultisampled && texture.imageInfo.ms == 1) {\n DxbcRegisterValue sampleId = emitRegisterLoad(\n ins.src[2], DxbcRegMask(true, false, false, false));\n \n imageOperands.flags |= spv::ImageOperandsSampleMask;\n imageOperands.sSampleId = sampleId.id;\n }\n \n // Extract coordinates from address\n const DxbcRegisterValue coord = emitCalcTexCoord(address, texture.imageInfo);\n \n // Reading a typed image or buffer view\n // always returns a four-component vector.\n const uint32_t imageId = m_module.opLoad(texture.imageTypeId, texture.varId);\n\n DxbcVectorType texelType;\n texelType.ctype = texture.sampledType;\n texelType.ccount = 4;\n\n uint32_t texelTypeId = getVectorTypeId(texelType);\n uint32_t resultTypeId = texelTypeId;\n uint32_t resultId = 0;\n\n if (imageOperands.sparse)\n resultTypeId = getSparseResultTypeId(texelTypeId);\n\n resultId = m_module.opImageFetch(resultTypeId,\n imageId, coord.id, imageOperands);\n\n DxbcRegisterValue result;\n result.type = texelType;\n result.id = imageOperands.sparse\n ? emitExtractSparseTexel(texelTypeId, resultId)\n : resultId;\n \n // Swizzle components using the texture swizzle\n // and the destination operand's write mask\n result = emitRegisterSwizzle(result,\n ins.src[1].swizzle, ins.dst[0].mask);\n \n emitRegisterStore(ins.dst[0], result);\n\n if (imageOperands.sparse)\n emitStoreSparseFeedback(ins.dst[1], resultId);\n }\n void emitTextureGather(\n const DxbcShaderInstruction& ins) {\n // Gather4 takes the following operands:\n // (dst0) The destination register\n // (dst1) The residency code for sparse ops\n // (src0) Texture coordinates\n // (src1) The texture itself\n // (src2) The sampler, with a component selector\n // Gather4C takes the following additional operand:\n // (src3) The depth reference value\n // The Gather4Po variants take an additional operand\n // which defines an extended constant offset.\n // TODO reduce code duplication by moving some common code\n // in both sample() and gather() into separate methods\n const bool isExtendedGather = ins.op == DxbcOpcode::Gather4Po\n || ins.op == DxbcOpcode::Gather4PoC\n || ins.op == DxbcOpcode::Gather4PoS\n || ins.op == DxbcOpcode::Gather4PoCS;\n \n const DxbcRegister& texCoordReg = ins.src[0];\n const DxbcRegister& textureReg = ins.src[1 + isExtendedGather];\n const DxbcRegister& samplerReg = ins.src[2 + isExtendedGather];\n \n // Texture and sampler register IDs\n const auto& texture = m_textures.at(textureReg.idx[0].offset);\n const auto& sampler = m_samplers.at(samplerReg.idx[0].offset);\n \n // Image type, which stores the image dimensions etc.\n const uint32_t imageLayerDim = getTexLayerDim(texture.imageInfo);\n \n // Load the texture coordinates. SPIR-V allows these\n // to be float4 even if not all components are used.\n DxbcRegisterValue coord = emitLoadTexCoord(texCoordReg, texture.imageInfo);\n \n // Load reference value for depth-compare operations\n const bool isDepthCompare = ins.op == DxbcOpcode::Gather4C\n || ins.op == DxbcOpcode::Gather4PoC\n || ins.op == DxbcOpcode::Gather4CS\n || ins.op == DxbcOpcode::Gather4PoCS;\n\n const DxbcRegisterValue referenceValue = isDepthCompare\n ? emitRegisterLoad(ins.src[3 + isExtendedGather],\n DxbcRegMask(true, false, false, false))\n : DxbcRegisterValue();\n \n // Accumulate additional image operands.\n SpirvImageOperands imageOperands;\n imageOperands.sparse = ins.dstCount == 2;\n\n if (isExtendedGather) {\n m_module.enableCapability(spv::CapabilityImageGatherExtended);\n \n DxbcRegisterValue gatherOffset = emitRegisterLoad(\n ins.src[1], DxbcRegMask::firstN(imageLayerDim));\n \n imageOperands.flags |= spv::ImageOperandsOffsetMask;\n imageOperands.gOffset = gatherOffset.id;\n } else if (ins.sampleControls.u != 0 || ins.sampleControls.v != 0 || ins.sampleControls.w != 0) {\n const std::array offsetIds = {\n imageLayerDim >= 1 ? m_module.consti32(ins.sampleControls.u) : 0,\n imageLayerDim >= 2 ? m_module.consti32(ins.sampleControls.v) : 0,\n imageLayerDim >= 3 ? m_module.consti32(ins.sampleControls.w) : 0,\n };\n \n imageOperands.flags |= spv::ImageOperandsConstOffsetMask;\n imageOperands.sConstOffset = offsetIds[0];\n\n if (imageLayerDim > 1) {\n imageOperands.sConstOffset = m_module.constComposite(\n getVectorTypeId({ DxbcScalarType::Sint32, imageLayerDim }),\n imageLayerDim, offsetIds.data());\n }\n }\n\n // Gathering texels always returns a four-component\n // vector, even for the depth-compare variants.\n uint32_t sampledImageId = emitLoadSampledImage(texture, sampler, isDepthCompare);\n\n DxbcVectorType texelType;\n texelType.ctype = texture.sampledType;\n texelType.ccount = 4;\n\n uint32_t texelTypeId = getVectorTypeId(texelType);\n uint32_t resultTypeId = texelTypeId;\n uint32_t resultId = 0;\n\n if (imageOperands.sparse)\n resultTypeId = getSparseResultTypeId(texelTypeId);\n\n if (sampledImageId) {\n switch (ins.op) {\n // Simple image gather operation\n case DxbcOpcode::Gather4:\n case DxbcOpcode::Gather4S:\n case DxbcOpcode::Gather4Po:\n case DxbcOpcode::Gather4PoS: {\n resultId = m_module.opImageGather(\n resultTypeId, sampledImageId, coord.id,\n m_module.consti32(samplerReg.swizzle[0]),\n imageOperands);\n } break;\n\n // Depth-compare operation\n case DxbcOpcode::Gather4C:\n case DxbcOpcode::Gather4CS:\n case DxbcOpcode::Gather4PoC:\n case DxbcOpcode::Gather4PoCS: {\n resultId = m_module.opImageDrefGather(\n resultTypeId, sampledImageId, coord.id,\n referenceValue.id, imageOperands);\n } break;\n\n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n } else {\n Logger::warn(str::format(\"DxbcCompiler: \", ins.op, \": Unsupported image type\"));\n resultId = m_module.constNull(resultTypeId);\n }\n\n // If necessary, deal with the sparse result\n DxbcRegisterValue result;\n result.type = texelType;\n result.id = imageOperands.sparse\n ? emitExtractSparseTexel(texelTypeId, resultId)\n : resultId;\n\n // Swizzle components using the texture swizzle\n // and the destination operand's write mask\n result = emitRegisterSwizzle(result,\n textureReg.swizzle, ins.dst[0].mask);\n \n emitRegisterStore(ins.dst[0], result);\n\n if (imageOperands.sparse)\n emitStoreSparseFeedback(ins.dst[1], resultId);\n }\n void emitTextureSample(\n const DxbcShaderInstruction& ins) {\n // All sample instructions have at least these operands:\n // (dst0) The destination register\n // (src0) Texture coordinates\n // (src1) The texture itself\n // (src2) The sampler object\n const DxbcRegister& texCoordReg = ins.src[0];\n const DxbcRegister& textureReg = ins.src[1];\n const DxbcRegister& samplerReg = ins.src[2];\n \n // Texture and sampler register IDs\n const auto& texture = m_textures.at(textureReg.idx[0].offset);\n const auto& sampler = m_samplers.at(samplerReg.idx[0].offset);\n const uint32_t imageLayerDim = getTexLayerDim(texture.imageInfo);\n \n // Load the texture coordinates. SPIR-V allows these\n // to be float4 even if not all components are used.\n DxbcRegisterValue coord = emitLoadTexCoord(texCoordReg, texture.imageInfo);\n \n // Load reference value for depth-compare operations\n const bool isDepthCompare = ins.op == DxbcOpcode::SampleC\n || ins.op == DxbcOpcode::SampleClz\n || ins.op == DxbcOpcode::SampleCClampS\n || ins.op == DxbcOpcode::SampleClzS;\n \n const DxbcRegisterValue referenceValue = isDepthCompare\n ? emitRegisterLoad(ins.src[3], DxbcRegMask(true, false, false, false))\n : DxbcRegisterValue();\n \n // Load explicit gradients for sample operations that require them\n const bool hasExplicitGradients = ins.op == DxbcOpcode::SampleD\n || ins.op == DxbcOpcode::SampleDClampS;\n \n const DxbcRegisterValue explicitGradientX = hasExplicitGradients\n ? emitRegisterLoad(ins.src[3], DxbcRegMask::firstN(imageLayerDim))\n : DxbcRegisterValue();\n \n const DxbcRegisterValue explicitGradientY = hasExplicitGradients\n ? emitRegisterLoad(ins.src[4], DxbcRegMask::firstN(imageLayerDim))\n : DxbcRegisterValue();\n \n // LOD for certain sample operations\n const bool hasLod = ins.op == DxbcOpcode::SampleL\n || ins.op == DxbcOpcode::SampleLS\n || ins.op == DxbcOpcode::SampleB\n || ins.op == DxbcOpcode::SampleBClampS;\n \n const DxbcRegisterValue lod = hasLod\n ? emitRegisterLoad(ins.src[3], DxbcRegMask(true, false, false, false))\n : DxbcRegisterValue();\n\n // Min LOD for certain sparse operations\n const bool hasMinLod = ins.op == DxbcOpcode::SampleClampS\n || ins.op == DxbcOpcode::SampleBClampS\n || ins.op == DxbcOpcode::SampleDClampS\n || ins.op == DxbcOpcode::SampleCClampS;\n\n const DxbcRegisterValue minLod = hasMinLod && ins.src[ins.srcCount - 1].type != DxbcOperandType::Null\n ? emitRegisterLoad(ins.src[ins.srcCount - 1], DxbcRegMask(true, false, false, false))\n : DxbcRegisterValue();\n\n // Accumulate additional image operands. These are\n // not part of the actual operand token in SPIR-V.\n SpirvImageOperands imageOperands;\n imageOperands.sparse = ins.dstCount == 2;\n\n if (ins.sampleControls.u != 0 || ins.sampleControls.v != 0 || ins.sampleControls.w != 0) {\n const std::array offsetIds = {\n imageLayerDim >= 1 ? m_module.consti32(ins.sampleControls.u) : 0,\n imageLayerDim >= 2 ? m_module.consti32(ins.sampleControls.v) : 0,\n imageLayerDim >= 3 ? m_module.consti32(ins.sampleControls.w) : 0,\n };\n \n imageOperands.flags |= spv::ImageOperandsConstOffsetMask;\n imageOperands.sConstOffset = offsetIds[0];\n\n if (imageLayerDim > 1) {\n imageOperands.sConstOffset = m_module.constComposite(\n getVectorTypeId({ DxbcScalarType::Sint32, imageLayerDim }),\n imageLayerDim, offsetIds.data());\n }\n }\n\n if (hasMinLod) {\n m_module.enableCapability(spv::CapabilityMinLod);\n\n imageOperands.flags |= spv::ImageOperandsMinLodMask;\n imageOperands.sMinLod = minLod.id;\n }\n\n // Combine the texture and the sampler into a sampled image\n uint32_t sampledImageId = emitLoadSampledImage(texture, sampler, isDepthCompare);\n \n // Sampling an image always returns a four-component\n // vector, whereas depth-compare ops return a scalar.\n DxbcVectorType texelType;\n texelType.ctype = texture.sampledType;\n texelType.ccount = isDepthCompare ? 1 : 4;\n\n uint32_t texelTypeId = getVectorTypeId(texelType);\n uint32_t resultTypeId = texelTypeId;\n uint32_t resultId = 0;\n\n if (imageOperands.sparse)\n resultTypeId = getSparseResultTypeId(texelTypeId);\n\n if (sampledImageId) {\n switch (ins.op) {\n // Simple image sample operation\n case DxbcOpcode::Sample:\n case DxbcOpcode::SampleClampS: {\n resultId = m_module.opImageSampleImplicitLod(\n resultTypeId, sampledImageId, coord.id,\n imageOperands);\n } break;\n\n // Depth-compare operation\n case DxbcOpcode::SampleC:\n case DxbcOpcode::SampleCClampS: {\n resultId = m_module.opImageSampleDrefImplicitLod(\n resultTypeId, sampledImageId, coord.id,\n referenceValue.id, imageOperands);\n } break;\n\n // Depth-compare operation on mip level zero\n case DxbcOpcode::SampleClz:\n case DxbcOpcode::SampleClzS: {\n imageOperands.flags |= spv::ImageOperandsLodMask;\n imageOperands.sLod = m_module.constf32(0.0f);\n\n resultId = m_module.opImageSampleDrefExplicitLod(\n resultTypeId, sampledImageId, coord.id,\n referenceValue.id, imageOperands);\n } break;\n\n // Sample operation with explicit gradients\n case DxbcOpcode::SampleD:\n case DxbcOpcode::SampleDClampS: {\n imageOperands.flags |= spv::ImageOperandsGradMask;\n imageOperands.sGradX = explicitGradientX.id;\n imageOperands.sGradY = explicitGradientY.id;\n\n resultId = m_module.opImageSampleExplicitLod(\n resultTypeId, sampledImageId, coord.id,\n imageOperands);\n } break;\n\n // Sample operation with explicit LOD\n case DxbcOpcode::SampleL:\n case DxbcOpcode::SampleLS: {\n imageOperands.flags |= spv::ImageOperandsLodMask;\n imageOperands.sLod = lod.id;\n\n resultId = m_module.opImageSampleExplicitLod(\n resultTypeId, sampledImageId, coord.id,\n imageOperands);\n } break;\n\n // Sample operation with LOD bias\n case DxbcOpcode::SampleB:\n case DxbcOpcode::SampleBClampS: {\n imageOperands.flags |= spv::ImageOperandsBiasMask;\n imageOperands.sLodBias = lod.id;\n\n resultId = m_module.opImageSampleImplicitLod(\n resultTypeId, sampledImageId, coord.id,\n imageOperands);\n } break;\n\n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n return;\n }\n } else {\n Logger::warn(str::format(\"DxbcCompiler: \", ins.op, \": Unsupported image type\"));\n resultId = m_module.constNull(resultTypeId);\n }\n \n DxbcRegisterValue result;\n result.type = texelType;\n result.id = imageOperands.sparse\n ? emitExtractSparseTexel(texelTypeId, resultId)\n : resultId;\n\n // Swizzle components using the texture swizzle\n // and the destination operand's write mask\n if (result.type.ccount != 1) {\n result = emitRegisterSwizzle(result,\n textureReg.swizzle, ins.dst[0].mask);\n }\n \n emitRegisterStore(ins.dst[0], result);\n\n if (imageOperands.sparse)\n emitStoreSparseFeedback(ins.dst[1], resultId);\n }\n void emitTypedUavLoad(\n const DxbcShaderInstruction& ins) {\n // load_uav_typed has three operands:\n // (dst0) The destination register\n // (src0) The texture or buffer coordinates\n // (src1) The UAV to load from\n const uint32_t registerId = ins.src[1].idx[0].offset;\n const DxbcUav uavInfo = m_uavs.at(registerId);\n\n emitUavBarrier(uint64_t(1u) << registerId, 0u);\n\n // Load texture coordinates\n DxbcRegisterValue texCoord = emitLoadTexCoord(\n ins.src[0], uavInfo.imageInfo);\n\n SpirvImageOperands imageOperands;\n imageOperands.sparse = ins.dstCount == 2;\n\n if (uavInfo.coherence) {\n imageOperands.flags |= spv::ImageOperandsNonPrivateTexelMask\n | spv::ImageOperandsMakeTexelVisibleMask;\n imageOperands.makeVisible = m_module.constu32(uavInfo.coherence);\n }\n\n DxbcVectorType texelType;\n texelType.ctype = uavInfo.sampledType;\n texelType.ccount = 4;\n\n uint32_t texelTypeId = getVectorTypeId(texelType);\n uint32_t resultTypeId = texelTypeId;\n uint32_t resultId = 0;\n\n if (imageOperands.sparse)\n resultTypeId = getSparseResultTypeId(texelTypeId);\n\n // Load source value from the UAV\n resultId = m_module.opImageRead(resultTypeId,\n m_module.opLoad(uavInfo.imageTypeId, uavInfo.varId),\n texCoord.id, imageOperands);\n \n // Apply component swizzle and mask\n DxbcRegisterValue uavValue;\n uavValue.type = texelType;\n uavValue.id = imageOperands.sparse\n ? emitExtractSparseTexel(texelTypeId, resultId)\n : resultId;\n\n uavValue = emitRegisterSwizzle(uavValue,\n ins.src[1].swizzle, ins.dst[0].mask);\n \n emitRegisterStore(ins.dst[0], uavValue);\n\n if (imageOperands.sparse)\n emitStoreSparseFeedback(ins.dst[1], resultId);\n }\n void emitTypedUavStore(\n const DxbcShaderInstruction& ins) {\n // store_uav_typed has three operands:\n // (dst0) The destination UAV\n // (src0) The texture or buffer coordinates\n // (src1) The value to store\n const DxbcBufferInfo uavInfo = getBufferInfo(ins.dst[0]);\n emitUavBarrier(0u, uint64_t(1u) << ins.dst[0].idx[0].offset);\n\n // Set image operands for coherent access if necessary \n SpirvImageOperands imageOperands;\n\n if (uavInfo.coherence) {\n imageOperands.flags |= spv::ImageOperandsNonPrivateTexelMask\n | spv::ImageOperandsMakeTexelAvailableMask;\n imageOperands.makeAvailable = m_module.constu32(uavInfo.coherence);\n }\n\n // Load texture coordinates\n DxbcRegisterValue texCoord = emitLoadTexCoord(ins.src[0], uavInfo.image);\n\n // Load the value that will be written to the image. We'll\n // have to cast it to the component type of the image.\n const DxbcRegisterValue texValue = emitRegisterBitcast(\n emitRegisterLoad(ins.src[1], DxbcRegMask(true, true, true, true)),\n uavInfo.stype);\n \n // Write the given value to the image\n m_module.opImageWrite(\n m_module.opLoad(uavInfo.typeId, uavInfo.varId),\n texCoord.id, texValue.id, imageOperands);\n }\n void emitControlFlowIf(\n const DxbcShaderInstruction& ins) {\n // Load the first component of the condition\n // operand and perform a zero test on it.\n const DxbcRegisterValue condition = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n // Declare the 'if' block. We do not know if there\n // will be an 'else' block or not, so we'll assume\n // that there is one and leave it empty otherwise.\n DxbcCfgBlock block;\n block.type = DxbcCfgBlockType::If;\n block.b_if.ztestId = emitRegisterZeroTest(condition, ins.controls.zeroTest()).id;\n block.b_if.labelIf = m_module.allocateId();\n block.b_if.labelElse = 0;\n block.b_if.labelEnd = m_module.allocateId();\n block.b_if.headerPtr = m_module.getInsertionPtr();\n m_controlFlowBlocks.push_back(block);\n \n // We'll insert the branch instruction when closing\n // the block, since we don't know whether or not an\n // else block is needed right now.\n m_module.opLabel(block.b_if.labelIf);\n }\n void emitControlFlowElse(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::If\n || m_controlFlowBlocks.back().b_if.labelElse != 0)\n throw DxvkError(\"DxbcCompiler: 'Else' without 'If' found\");\n \n // Set the 'Else' flag so that we do\n // not insert a dummy block on 'EndIf'\n DxbcCfgBlock& block = m_controlFlowBlocks.back();\n block.b_if.labelElse = m_module.allocateId();\n \n // Close the 'If' block by branching to\n // the merge block we declared earlier\n m_module.opBranch(block.b_if.labelEnd);\n m_module.opLabel (block.b_if.labelElse);\n }\n void emitControlFlowEndIf(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::If)\n throw DxvkError(\"DxbcCompiler: 'EndIf' without 'If' found\");\n \n // Remove the block from the stack, it's closed\n DxbcCfgBlock block = m_controlFlowBlocks.back();\n m_controlFlowBlocks.pop_back();\n \n // Write out the 'if' header\n m_module.beginInsertion(block.b_if.headerPtr);\n \n m_module.opSelectionMerge(\n block.b_if.labelEnd,\n spv::SelectionControlMaskNone);\n \n m_module.opBranchConditional(\n block.b_if.ztestId,\n block.b_if.labelIf,\n block.b_if.labelElse != 0\n ? block.b_if.labelElse\n : block.b_if.labelEnd);\n \n m_module.endInsertion();\n \n // End the active 'if' or 'else' block\n m_module.opBranch(block.b_if.labelEnd);\n m_module.opLabel (block.b_if.labelEnd);\n }\n void emitControlFlowSwitch(\n const DxbcShaderInstruction& ins) {\n // Load the selector as a scalar unsigned integer\n const DxbcRegisterValue selector = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n // Declare switch block. We cannot insert the switch\n // instruction itself yet because the number of case\n // statements and blocks is unknown at this point.\n DxbcCfgBlock block;\n block.type = DxbcCfgBlockType::Switch;\n block.b_switch.insertPtr = m_module.getInsertionPtr();\n block.b_switch.selectorId = selector.id;\n block.b_switch.labelBreak = m_module.allocateId();\n block.b_switch.labelCase = m_module.allocateId();\n block.b_switch.labelDefault = 0;\n block.b_switch.labelCases = nullptr;\n m_controlFlowBlocks.push_back(block);\n \n // Define the first 'case' label\n m_module.opLabel(block.b_switch.labelCase);\n }\n void emitControlFlowCase(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::Switch)\n throw DxvkError(\"DxbcCompiler: 'Case' without 'Switch' found\");\n \n // The source operand must be a 32-bit immediate.\n if (ins.src[0].type != DxbcOperandType::Imm32)\n throw DxvkError(\"DxbcCompiler: Invalid operand type for 'Case'\");\n\n // Use the last label allocated for 'case'.\n DxbcCfgBlockSwitch* block = &m_controlFlowBlocks.back().b_switch;\n\n if (caseBlockIsFallthrough()) {\n block->labelCase = m_module.allocateId();\n\n m_module.opBranch(block->labelCase);\n m_module.opLabel (block->labelCase);\n }\n\n DxbcSwitchLabel label;\n label.desc.literal = ins.src[0].imm.u32_1;\n label.desc.labelId = block->labelCase;\n label.next = block->labelCases;\n block->labelCases = new DxbcSwitchLabel(label);\n }\n void emitControlFlowDefault(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::Switch)\n throw DxvkError(\"DxbcCompiler: 'Default' without 'Switch' found\");\n \n DxbcCfgBlockSwitch* block = &m_controlFlowBlocks.back().b_switch;\n\n if (caseBlockIsFallthrough()) {\n block->labelCase = m_module.allocateId();\n\n m_module.opBranch(block->labelCase);\n m_module.opLabel (block->labelCase);\n }\n\n // Set the last label allocated for 'case' as the default label.\n block->labelDefault = block->labelCase;\n }\n void emitControlFlowEndSwitch(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::Switch)\n throw DxvkError(\"DxbcCompiler: 'EndSwitch' without 'Switch' found\");\n \n // Remove the block from the stack, it's closed\n DxbcCfgBlock block = m_controlFlowBlocks.back();\n m_controlFlowBlocks.pop_back();\n\n if (!block.b_switch.labelDefault) {\n block.b_switch.labelDefault = caseBlockIsFallthrough()\n ? block.b_switch.labelBreak\n : block.b_switch.labelCase;\n }\n \n // Close the current 'case' block\n m_module.opBranch(block.b_switch.labelBreak);\n \n // Insert the 'switch' statement. For that, we need to\n // gather all the literal-label pairs for the construct.\n m_module.beginInsertion(block.b_switch.insertPtr);\n m_module.opSelectionMerge(\n block.b_switch.labelBreak,\n spv::SelectionControlMaskNone);\n \n // We'll restore the original order of the case labels here\n std::vector jumpTargets;\n for (auto i = block.b_switch.labelCases; i != nullptr; i = i->next)\n jumpTargets.insert(jumpTargets.begin(), i->desc);\n \n m_module.opSwitch(\n block.b_switch.selectorId,\n block.b_switch.labelDefault,\n jumpTargets.size(),\n jumpTargets.data());\n m_module.endInsertion();\n \n // Destroy the list of case labels\n // FIXME we're leaking memory if compilation fails.\n DxbcSwitchLabel* caseLabel = block.b_switch.labelCases;\n \n while (caseLabel != nullptr)\n delete std::exchange(caseLabel, caseLabel->next);\n\n // Begin new block after switch blocks\n m_module.opLabel(block.b_switch.labelBreak);\n }\n void emitControlFlowLoop(\n const DxbcShaderInstruction& ins) {\n // Declare the 'loop' block\n DxbcCfgBlock block;\n block.type = DxbcCfgBlockType::Loop;\n block.b_loop.labelHeader = m_module.allocateId();\n block.b_loop.labelBegin = m_module.allocateId();\n block.b_loop.labelContinue = m_module.allocateId();\n block.b_loop.labelBreak = m_module.allocateId();\n m_controlFlowBlocks.push_back(block);\n \n m_module.opBranch(block.b_loop.labelHeader);\n m_module.opLabel (block.b_loop.labelHeader);\n \n m_module.opLoopMerge(\n block.b_loop.labelBreak,\n block.b_loop.labelContinue,\n spv::LoopControlMaskNone);\n \n m_module.opBranch(block.b_loop.labelBegin);\n m_module.opLabel (block.b_loop.labelBegin);\n }\n void emitControlFlowEndLoop(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() == 0\n || m_controlFlowBlocks.back().type != DxbcCfgBlockType::Loop)\n throw DxvkError(\"DxbcCompiler: 'EndLoop' without 'Loop' found\");\n \n // Remove the block from the stack, it's closed\n const DxbcCfgBlock block = m_controlFlowBlocks.back();\n m_controlFlowBlocks.pop_back();\n \n // Declare the continue block\n m_module.opBranch(block.b_loop.labelContinue);\n m_module.opLabel (block.b_loop.labelContinue);\n \n // Declare the merge block\n m_module.opBranch(block.b_loop.labelHeader);\n m_module.opLabel (block.b_loop.labelBreak);\n }\n void emitControlFlowBreak(\n const DxbcShaderInstruction& ins) {\n const bool isBreak = ins.op == DxbcOpcode::Break;\n \n DxbcCfgBlock* cfgBlock = isBreak\n ? cfgFindBlock({ DxbcCfgBlockType::Loop, DxbcCfgBlockType::Switch })\n : cfgFindBlock({ DxbcCfgBlockType::Loop });\n \n if (cfgBlock == nullptr)\n throw DxvkError(\"DxbcCompiler: 'Break' or 'Continue' outside 'Loop' or 'Switch' found\");\n \n if (cfgBlock->type == DxbcCfgBlockType::Loop) {\n m_module.opBranch(isBreak\n ? cfgBlock->b_loop.labelBreak\n : cfgBlock->b_loop.labelContinue);\n } else /* if (cfgBlock->type == DxbcCfgBlockType::Switch) */ {\n m_module.opBranch(cfgBlock->b_switch.labelBreak);\n }\n \n // Subsequent instructions assume that there is an open block\n const uint32_t labelId = m_module.allocateId();\n m_module.opLabel(labelId);\n \n // If this is on the same level as a switch-case construct,\n // rather than being nested inside an 'if' statement, close\n // the current 'case' block.\n if (m_controlFlowBlocks.back().type == DxbcCfgBlockType::Switch)\n cfgBlock->b_switch.labelCase = labelId;\n }\n void emitControlFlowBreakc(\n const DxbcShaderInstruction& ins) {\n const bool isBreak = ins.op == DxbcOpcode::Breakc;\n \n DxbcCfgBlock* cfgBlock = isBreak\n ? cfgFindBlock({ DxbcCfgBlockType::Loop, DxbcCfgBlockType::Switch })\n : cfgFindBlock({ DxbcCfgBlockType::Loop });\n \n if (cfgBlock == nullptr)\n throw DxvkError(\"DxbcCompiler: 'Breakc' or 'Continuec' outside 'Loop' or 'Switch' found\");\n \n // Perform zero test on the first component of the condition\n const DxbcRegisterValue condition = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n const DxbcRegisterValue zeroTest = emitRegisterZeroTest(\n condition, ins.controls.zeroTest());\n \n // We basically have to wrap this into an 'if' block\n const uint32_t breakBlock = m_module.allocateId();\n const uint32_t mergeBlock = m_module.allocateId();\n \n m_module.opSelectionMerge(mergeBlock,\n spv::SelectionControlMaskNone);\n \n m_module.opBranchConditional(\n zeroTest.id, breakBlock, mergeBlock);\n \n m_module.opLabel(breakBlock);\n \n if (cfgBlock->type == DxbcCfgBlockType::Loop) {\n m_module.opBranch(isBreak\n ? cfgBlock->b_loop.labelBreak\n : cfgBlock->b_loop.labelContinue);\n } else /* if (cfgBlock->type == DxbcCfgBlockType::Switch) */ {\n m_module.opBranch(cfgBlock->b_switch.labelBreak);\n }\n \n m_module.opLabel(mergeBlock);\n }\n void emitControlFlowRet(\n const DxbcShaderInstruction& ins) {\n if (m_controlFlowBlocks.size() != 0) {\n uint32_t labelId = m_module.allocateId();\n \n m_module.opReturn();\n m_module.opLabel(labelId);\n\n // return can be used in place of break to terminate a case block\n if (m_controlFlowBlocks.back().type == DxbcCfgBlockType::Switch)\n m_controlFlowBlocks.back().b_switch.labelCase = labelId;\n\n m_topLevelIsUniform = false;\n } else {\n // Last instruction in the current function\n this->emitFunctionEnd();\n }\n }\n void emitControlFlowRetc(\n const DxbcShaderInstruction& ins) {\n // Perform zero test on the first component of the condition\n const DxbcRegisterValue condition = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n const DxbcRegisterValue zeroTest = emitRegisterZeroTest(\n condition, ins.controls.zeroTest());\n \n // We basically have to wrap this into an 'if' block\n const uint32_t returnLabel = m_module.allocateId();\n const uint32_t continueLabel = m_module.allocateId();\n \n m_module.opSelectionMerge(continueLabel,\n spv::SelectionControlMaskNone);\n \n m_module.opBranchConditional(\n zeroTest.id, returnLabel, continueLabel);\n \n m_module.opLabel(returnLabel);\n m_module.opReturn();\n\n m_module.opLabel(continueLabel);\n\n // The return condition may be non-uniform\n m_topLevelIsUniform = false;\n }\n void emitControlFlowDiscard(\n const DxbcShaderInstruction& ins) {\n // Discard actually has an operand that determines\n // whether or not the fragment should be discarded\n const DxbcRegisterValue condition = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n const DxbcRegisterValue zeroTest = emitRegisterZeroTest(\n condition, ins.controls.zeroTest());\n \n DxbcConditional cond;\n cond.labelIf = m_module.allocateId();\n cond.labelEnd = m_module.allocateId();\n \n m_module.opSelectionMerge(cond.labelEnd, spv::SelectionControlMaskNone);\n m_module.opBranchConditional(zeroTest.id, cond.labelIf, cond.labelEnd);\n \n m_module.opLabel(cond.labelIf);\n m_module.opDemoteToHelperInvocation();\n m_module.opBranch(cond.labelEnd);\n \n m_module.opLabel(cond.labelEnd);\n\n m_module.enableCapability(spv::CapabilityDemoteToHelperInvocation);\n\n // Discard is just retc in a trenchcoat\n m_topLevelIsUniform = false;\n }\n void emitControlFlowLabel(\n const DxbcShaderInstruction& ins) {\n uint32_t functionNr = ins.dst[0].idx[0].offset;\n uint32_t functionId = getFunctionId(functionNr);\n \n this->emitFunctionBegin(\n functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n \n m_module.opLabel(m_module.allocateId());\n m_module.setDebugName(functionId, str::format(\"label\", functionNr).c_str());\n \n m_insideFunction = true;\n\n // We have to assume that this function gets\n // called from non-uniform control flow\n m_topLevelIsUniform = false;\n }\n void emitControlFlowCall(\n const DxbcShaderInstruction& ins) {\n uint32_t functionNr = ins.src[0].idx[0].offset;\n uint32_t functionId = getFunctionId(functionNr);\n\n m_module.opFunctionCall(\n m_module.defVoidType(),\n functionId, 0, nullptr);\n }\n void emitControlFlowCallc(\n const DxbcShaderInstruction& ins) {\n uint32_t functionNr = ins.src[1].idx[0].offset;\n uint32_t functionId = getFunctionId(functionNr);\n\n // Perform zero test on the first component of the condition\n const DxbcRegisterValue condition = emitRegisterLoad(\n ins.src[0], DxbcRegMask(true, false, false, false));\n \n const DxbcRegisterValue zeroTest = emitRegisterZeroTest(\n condition, ins.controls.zeroTest());\n \n // We basically have to wrap this into an 'if' block\n const uint32_t callLabel = m_module.allocateId();\n const uint32_t skipLabel = m_module.allocateId();\n \n m_module.opSelectionMerge(skipLabel,\n spv::SelectionControlMaskNone);\n \n m_module.opBranchConditional(\n zeroTest.id, callLabel, skipLabel);\n \n m_module.opLabel(callLabel);\n m_module.opFunctionCall(\n m_module.defVoidType(),\n functionId, 0, nullptr);\n\n m_module.opBranch(skipLabel);\n m_module.opLabel(skipLabel);\n }\n void emitControlFlow(\n const DxbcShaderInstruction& ins) {\n switch (ins.op) {\n case DxbcOpcode::If:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowIf(ins);\n break;\n \n case DxbcOpcode::Else:\n this->emitControlFlowElse(ins);\n break;\n \n case DxbcOpcode::EndIf:\n this->emitControlFlowEndIf(ins);\n this->emitUavBarrier(0, 0);\n break;\n \n case DxbcOpcode::Switch:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowSwitch(ins);\n break;\n \n case DxbcOpcode::Case:\n this->emitControlFlowCase(ins);\n break;\n \n case DxbcOpcode::Default:\n this->emitControlFlowDefault(ins);\n break;\n \n case DxbcOpcode::EndSwitch:\n this->emitControlFlowEndSwitch(ins);\n this->emitUavBarrier(0, 0);\n break;\n\n case DxbcOpcode::Loop:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowLoop(ins);\n break;\n \n case DxbcOpcode::EndLoop:\n this->emitControlFlowEndLoop(ins);\n this->emitUavBarrier(0, 0);\n break;\n\n case DxbcOpcode::Break:\n case DxbcOpcode::Continue:\n this->emitControlFlowBreak(ins);\n break;\n \n case DxbcOpcode::Breakc:\n case DxbcOpcode::Continuec:\n this->emitControlFlowBreakc(ins);\n break;\n\n case DxbcOpcode::Ret:\n this->emitControlFlowRet(ins);\n break;\n\n case DxbcOpcode::Retc:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowRetc(ins);\n break;\n \n case DxbcOpcode::Discard:\n this->emitControlFlowDiscard(ins);\n break;\n \n case DxbcOpcode::Label:\n this->emitControlFlowLabel(ins);\n break;\n\n case DxbcOpcode::Call:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowCall(ins);\n this->emitUavBarrier(-1, -1);\n break;\n\n case DxbcOpcode::Callc:\n this->emitUavBarrier(0, 0);\n this->emitControlFlowCallc(ins);\n this->emitUavBarrier(-1, -1);\n break;\n\n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled instruction: \",\n ins.op));\n }\n }\n DxbcRegisterValue emitBuildConstVecf32(\n float x,\n float y,\n float z,\n float w,\n const DxbcRegMask& writeMask) {\n // TODO refactor these functions into one single template\n std::array ids = { 0, 0, 0, 0 };\n uint32_t componentIndex = 0;\n \n if (writeMask[0]) ids[componentIndex++] = m_module.constf32(x);\n if (writeMask[1]) ids[componentIndex++] = m_module.constf32(y);\n if (writeMask[2]) ids[componentIndex++] = m_module.constf32(z);\n if (writeMask[3]) ids[componentIndex++] = m_module.constf32(w);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Float32;\n result.type.ccount = componentIndex;\n result.id = componentIndex > 1\n ? m_module.constComposite(\n getVectorTypeId(result.type),\n componentIndex, ids.data())\n : ids[0];\n return result;\n }\n DxbcRegisterValue emitBuildConstVecu32(\n uint32_t x,\n uint32_t y,\n uint32_t z,\n uint32_t w,\n const DxbcRegMask& writeMask) {\n std::array ids = { 0, 0, 0, 0 };\n uint32_t componentIndex = 0;\n \n if (writeMask[0]) ids[componentIndex++] = m_module.constu32(x);\n if (writeMask[1]) ids[componentIndex++] = m_module.constu32(y);\n if (writeMask[2]) ids[componentIndex++] = m_module.constu32(z);\n if (writeMask[3]) ids[componentIndex++] = m_module.constu32(w);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = componentIndex;\n result.id = componentIndex > 1\n ? m_module.constComposite(\n getVectorTypeId(result.type),\n componentIndex, ids.data())\n : ids[0];\n return result;\n }\n DxbcRegisterValue emitBuildConstVeci32(\n int32_t x,\n int32_t y,\n int32_t z,\n int32_t w,\n const DxbcRegMask& writeMask) {\n std::array ids = { 0, 0, 0, 0 };\n uint32_t componentIndex = 0;\n \n if (writeMask[0]) ids[componentIndex++] = m_module.consti32(x);\n if (writeMask[1]) ids[componentIndex++] = m_module.consti32(y);\n if (writeMask[2]) ids[componentIndex++] = m_module.consti32(z);\n if (writeMask[3]) ids[componentIndex++] = m_module.consti32(w);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Sint32;\n result.type.ccount = componentIndex;\n result.id = componentIndex > 1\n ? m_module.constComposite(\n getVectorTypeId(result.type),\n componentIndex, ids.data())\n : ids[0];\n return result;\n }\n DxbcRegisterValue emitBuildConstVecf64(\n double xy,\n double zw,\n const DxbcRegMask& writeMask) {\n std::array ids = { 0, 0 };\n uint32_t componentIndex = 0;\n \n if (writeMask[0] && writeMask[1]) ids[componentIndex++] = m_module.constf64(xy);\n if (writeMask[2] && writeMask[3]) ids[componentIndex++] = m_module.constf64(zw);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Float64;\n result.type.ccount = componentIndex;\n result.id = componentIndex > 1\n ? m_module.constComposite(\n getVectorTypeId(result.type),\n componentIndex, ids.data())\n : ids[0];\n return result;\n }\n DxbcRegisterValue emitBuildVector(\n DxbcRegisterValue scalar,\n uint32_t count) {\n if (count == 1)\n return scalar;\n\n std::array scalarIds =\n { scalar.id, scalar.id, scalar.id, scalar.id };\n\n DxbcRegisterValue result;\n result.type.ctype = scalar.type.ctype;\n result.type.ccount = count;\n result.id = m_module.constComposite(\n getVectorTypeId(result.type),\n count, scalarIds.data());\n return result;\n }\n DxbcRegisterValue emitBuildZeroVector(\n DxbcVectorType type) {\n DxbcRegisterValue result;\n result.type.ctype = type.ctype;\n result.type.ccount = 1;\n\n switch (type.ctype) {\n case DxbcScalarType::Float32: result.id = m_module.constf32(0.0f); break;\n case DxbcScalarType::Uint32: result.id = m_module.constu32(0u); break;\n case DxbcScalarType::Sint32: result.id = m_module.consti32(0); break;\n default: throw DxvkError(\"DxbcCompiler: Invalid scalar type\");\n }\n\n return emitBuildVector(result, type.ccount);\n }\n DxbcRegisterValue emitRegisterBitcast(\n DxbcRegisterValue srcValue,\n DxbcScalarType dstType) {\n DxbcScalarType srcType = srcValue.type.ctype;\n\n if (srcType == dstType)\n return srcValue;\n \n DxbcRegisterValue result;\n result.type.ctype = dstType;\n result.type.ccount = srcValue.type.ccount;\n\n if (isDoubleType(srcType)) result.type.ccount *= 2;\n if (isDoubleType(dstType)) result.type.ccount /= 2;\n\n result.id = m_module.opBitcast(\n getVectorTypeId(result.type),\n srcValue.id);\n return result;\n }\n DxbcRegisterValue emitRegisterSwizzle(\n DxbcRegisterValue value,\n DxbcRegSwizzle swizzle,\n DxbcRegMask writeMask) {\n if (value.type.ccount == 1)\n return emitRegisterExtend(value, writeMask.popCount());\n \n std::array indices;\n \n uint32_t dstIndex = 0;\n \n for (uint32_t i = 0; i < 4; i++) {\n if (writeMask[i])\n indices[dstIndex++] = swizzle[i];\n }\n \n // If the swizzle combined with the mask can be reduced\n // to a no-op, we don't need to insert any instructions.\n bool isIdentitySwizzle = dstIndex == value.type.ccount;\n \n for (uint32_t i = 0; i < dstIndex && isIdentitySwizzle; i++)\n isIdentitySwizzle &= indices[i] == i;\n \n if (isIdentitySwizzle)\n return value;\n \n // Use OpCompositeExtract if the resulting vector contains\n // only one component, and OpVectorShuffle if it is a vector.\n DxbcRegisterValue result;\n result.type.ctype = value.type.ctype;\n result.type.ccount = dstIndex;\n \n const uint32_t typeId = getVectorTypeId(result.type);\n \n if (dstIndex == 1) {\n result.id = m_module.opCompositeExtract(\n typeId, value.id, 1, indices.data());\n } else {\n result.id = m_module.opVectorShuffle(\n typeId, value.id, value.id,\n dstIndex, indices.data());\n }\n \n return result;\n }\n DxbcRegisterValue emitRegisterExtract(\n DxbcRegisterValue value,\n DxbcRegMask mask) {\n return emitRegisterSwizzle(value,\n DxbcRegSwizzle(0, 1, 2, 3), mask);\n }\n DxbcRegisterValue emitRegisterInsert(\n DxbcRegisterValue dstValue,\n DxbcRegisterValue srcValue,\n DxbcRegMask srcMask) {\n DxbcRegisterValue result;\n result.type = dstValue.type;\n \n const uint32_t typeId = getVectorTypeId(result.type);\n \n if (srcMask.popCount() == 0) {\n // Nothing to do if the insertion mask is empty\n result.id = dstValue.id;\n } else if (dstValue.type.ccount == 1) {\n // Both values are scalar, so the first component\n // of the write mask decides which one to take.\n result.id = srcMask[0] ? srcValue.id : dstValue.id;\n } else if (srcValue.type.ccount == 1) {\n // The source value is scalar. Since OpVectorShuffle\n // requires both arguments to be vectors, we have to\n // use OpCompositeInsert to modify the vector instead.\n const uint32_t componentId = srcMask.firstSet();\n \n result.id = m_module.opCompositeInsert(typeId,\n srcValue.id, dstValue.id, 1, &componentId);\n } else {\n // Both arguments are vectors. We can determine which\n // components to take from which vector and use the\n // OpVectorShuffle instruction.\n std::array components;\n uint32_t srcComponentId = dstValue.type.ccount;\n \n for (uint32_t i = 0; i < dstValue.type.ccount; i++)\n components.at(i) = srcMask[i] ? srcComponentId++ : i;\n \n result.id = m_module.opVectorShuffle(\n typeId, dstValue.id, srcValue.id,\n dstValue.type.ccount, components.data());\n }\n \n return result;\n }\n DxbcRegisterValue emitRegisterConcat(\n DxbcRegisterValue value1,\n DxbcRegisterValue value2) {\n std::array ids =\n {{ value1.id, value2.id }};\n \n DxbcRegisterValue result;\n result.type.ctype = value1.type.ctype;\n result.type.ccount = value1.type.ccount + value2.type.ccount;\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n ids.size(), ids.data());\n return result;\n }\n DxbcRegisterValue emitRegisterExtend(\n DxbcRegisterValue value,\n uint32_t size) {\n if (size == 1)\n return value;\n \n std::array ids = {{\n value.id, value.id,\n value.id, value.id, \n }};\n \n DxbcRegisterValue result;\n result.type.ctype = value.type.ctype;\n result.type.ccount = size;\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n size, ids.data());\n return result;\n }\n DxbcRegisterValue emitRegisterAbsolute(\n DxbcRegisterValue value) {\n const uint32_t typeId = getVectorTypeId(value.type);\n \n switch (value.type.ctype) {\n case DxbcScalarType::Float32: value.id = m_module.opFAbs(typeId, value.id); break;\n case DxbcScalarType::Float64: value.id = m_module.opFAbs(typeId, value.id); break;\n case DxbcScalarType::Sint32: value.id = m_module.opSAbs(typeId, value.id); break;\n case DxbcScalarType::Sint64: value.id = m_module.opSAbs(typeId, value.id); break;\n default: Logger::warn(\"DxbcCompiler: Cannot get absolute value for given type\");\n }\n \n return value;\n }\n DxbcRegisterValue emitRegisterNegate(\n DxbcRegisterValue value) {\n const uint32_t typeId = getVectorTypeId(value.type);\n \n switch (value.type.ctype) {\n case DxbcScalarType::Float32: value.id = m_module.opFNegate(typeId, value.id); break;\n case DxbcScalarType::Float64: value.id = m_module.opFNegate(typeId, value.id); break;\n case DxbcScalarType::Sint32: value.id = m_module.opSNegate(typeId, value.id); break;\n case DxbcScalarType::Sint64: value.id = m_module.opSNegate(typeId, value.id); break;\n default: Logger::warn(\"DxbcCompiler: Cannot negate given type\");\n }\n \n return value;\n }\n DxbcRegisterValue emitRegisterZeroTest(\n DxbcRegisterValue value,\n DxbcZeroTest test) {\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Bool;\n result.type.ccount = 1;\n \n const uint32_t zeroId = m_module.constu32(0u);\n const uint32_t typeId = getVectorTypeId(result.type);\n \n result.id = test == DxbcZeroTest::TestZ\n ? m_module.opIEqual (typeId, value.id, zeroId)\n : m_module.opINotEqual(typeId, value.id, zeroId);\n return result;\n }\n DxbcRegisterValue emitRegisterMaskBits(\n DxbcRegisterValue value,\n uint32_t mask) {\n DxbcRegisterValue maskVector = emitBuildConstVecu32(\n mask, mask, mask, mask, DxbcRegMask::firstN(value.type.ccount));\n \n DxbcRegisterValue result;\n result.type = value.type;\n result.id = m_module.opBitwiseAnd(\n getVectorTypeId(result.type),\n value.id, maskVector.id);\n return result;\n }\n DxbcRegisterValue emitSrcOperandModifiers(\n DxbcRegisterValue value,\n DxbcRegModifiers modifiers) {\n if (modifiers.test(DxbcRegModifier::Abs))\n value = emitRegisterAbsolute(value);\n \n if (modifiers.test(DxbcRegModifier::Neg))\n value = emitRegisterNegate(value);\n return value;\n }\n DxbcRegisterValue emitDstOperandModifiers(\n DxbcRegisterValue value,\n DxbcOpModifiers modifiers) {\n const uint32_t typeId = getVectorTypeId(value.type);\n \n if (modifiers.saturate) {\n DxbcRegMask mask;\n DxbcRegisterValue vec0, vec1;\n\n if (value.type.ctype == DxbcScalarType::Float32) {\n mask = DxbcRegMask::firstN(value.type.ccount);\n vec0 = emitBuildConstVecf32(0.0f, 0.0f, 0.0f, 0.0f, mask);\n vec1 = emitBuildConstVecf32(1.0f, 1.0f, 1.0f, 1.0f, mask);\n } else if (value.type.ctype == DxbcScalarType::Float64) {\n mask = DxbcRegMask::firstN(value.type.ccount * 2);\n vec0 = emitBuildConstVecf64(0.0, 0.0, mask);\n vec1 = emitBuildConstVecf64(1.0, 1.0, mask);\n }\n\n if (mask)\n value.id = m_module.opNClamp(typeId, value.id, vec0.id, vec1.id);\n }\n \n return value;\n }\n uint32_t emitExtractSparseTexel(\n uint32_t texelTypeId,\n uint32_t resultId) {\n uint32_t index = 1;\n\n return m_module.opCompositeExtract(\n texelTypeId, resultId, 1, &index);\n }\n void emitStoreSparseFeedback(\n const DxbcRegister& feedbackRegister,\n uint32_t resultId) {\n if (feedbackRegister.type != DxbcOperandType::Null) {\n uint32_t index = 0;\n\n DxbcRegisterValue result;\n result.type = { DxbcScalarType::Uint32, 1 };\n result.id = m_module.opCompositeExtract(\n getScalarTypeId(DxbcScalarType::Uint32),\n resultId, 1, &index);\n\n emitRegisterStore(feedbackRegister, result);\n }\n }\n DxbcRegisterPointer emitArrayAccess(\n DxbcRegisterPointer pointer,\n spv::StorageClass sclass,\n uint32_t index) {\n uint32_t ptrTypeId = m_module.defPointerType(\n getVectorTypeId(pointer.type), sclass);\n \n DxbcRegisterPointer result;\n result.type = pointer.type;\n result.id = m_module.opAccessChain(\n ptrTypeId, pointer.id, 1, &index);\n return result;\n }\n uint32_t emitLoadSampledImage(\n const DxbcShaderResource& textureResource,\n const DxbcSampler& samplerResource,\n bool isDepthCompare) {\n uint32_t baseId = isDepthCompare\n ? textureResource.depthTypeId\n : textureResource.colorTypeId;\n\n if (!baseId)\n return 0;\n\n uint32_t sampledImageType = m_module.defSampledImageType(baseId);\n\n return m_module.opSampledImage(sampledImageType,\n m_module.opLoad(textureResource.imageTypeId, textureResource.varId),\n m_module.opLoad(samplerResource.typeId, samplerResource.varId));\n }\n DxbcRegisterPointer emitGetTempPtr(\n const DxbcRegister& operand) {\n // r# regs are indexed as follows:\n // (0) register index (immediate)\n uint32_t regIdx = operand.idx[0].offset;\n\n if (regIdx >= m_rRegs.size())\n m_rRegs.resize(regIdx + 1, 0u);\n\n if (!m_rRegs.at(regIdx)) {\n DxbcRegisterInfo info;\n info.type.ctype = DxbcScalarType::Float32;\n info.type.ccount = 4;\n info.type.alength = 0;\n info.sclass = spv::StorageClassPrivate;\n\n uint32_t varId = emitNewVariable(info);\n m_rRegs.at(regIdx) = varId;\n\n m_module.setDebugName(varId,\n str::format(\"r\", regIdx).c_str());\n }\n\n DxbcRegisterPointer result;\n result.type.ctype = DxbcScalarType::Float32;\n result.type.ccount = 4;\n result.id = m_rRegs.at(regIdx);\n return result;\n }\n DxbcRegisterPointer emitGetIndexableTempPtr(\n const DxbcRegister& operand) {\n return getIndexableTempPtr(operand, emitIndexLoad(operand.idx[1]));\n }\n DxbcRegisterPointer emitGetInputPtr(\n const DxbcRegister& operand) {\n // In the vertex and pixel stages,\n // v# regs are indexed as follows:\n // (0) register index (relative)\n // \n // In the tessellation and geometry\n // stages, the index has two dimensions:\n // (0) vertex index (relative)\n // (1) register index (relative)\n DxbcRegisterPointer result;\n result.type.ctype = DxbcScalarType::Float32;\n result.type.ccount = 4;\n \n std::array indices = {{ 0, 0 }};\n \n for (uint32_t i = 0; i < operand.idxDim; i++)\n indices.at(i) = emitIndexLoad(operand.idx[i]).id;\n \n // Pick the input array depending on\n // the program type and operand type\n struct InputArray {\n uint32_t id;\n spv::StorageClass sclass;\n };\n \n const InputArray array = [&] () -> InputArray {\n switch (operand.type) {\n case DxbcOperandType::InputControlPoint:\n return m_programInfo.type() == DxbcProgramType::HullShader\n ? InputArray { m_vArray, spv::StorageClassPrivate }\n : InputArray { m_ds.inputPerVertex, spv::StorageClassInput };\n case DxbcOperandType::InputPatchConstant:\n return m_programInfo.type() == DxbcProgramType::HullShader\n ? InputArray { m_hs.outputPerPatch, spv::StorageClassPrivate }\n : InputArray { m_ds.inputPerPatch, spv::StorageClassInput };\n case DxbcOperandType::OutputControlPoint:\n return InputArray { m_hs.outputPerVertex, spv::StorageClassOutput };\n default:\n return { m_vArray, spv::StorageClassPrivate };\n }\n }();\n \n DxbcRegisterInfo info;\n info.type.ctype = result.type.ctype;\n info.type.ccount = result.type.ccount;\n info.type.alength = 0;\n info.sclass = array.sclass;\n \n result.id = m_module.opAccessChain(\n getPointerTypeId(info), array.id,\n operand.idxDim, indices.data());\n \n return result;\n }\n DxbcRegisterPointer emitGetOutputPtr(\n const DxbcRegister& operand) {\n if (m_programInfo.type() == DxbcProgramType::HullShader) {\n // Hull shaders are special in that they have two sets of\n // output registers, one for per-patch values and one for\n // per-vertex values.\n DxbcRegisterPointer result;\n result.type.ctype = DxbcScalarType::Float32;\n result.type.ccount = 4;\n \n uint32_t registerId = emitIndexLoad(operand.idx[0]).id;\n\n if (m_hs.currPhaseType == DxbcCompilerHsPhase::ControlPoint) {\n std::array indices = {{\n m_module.opLoad(m_module.defIntType(32, 0), m_hs.builtinInvocationId),\n registerId,\n }};\n \n uint32_t ptrTypeId = m_module.defPointerType(\n getVectorTypeId(result.type),\n spv::StorageClassOutput);\n \n result.id = m_module.opAccessChain(\n ptrTypeId, m_hs.outputPerVertex,\n indices.size(), indices.data());\n } else {\n uint32_t ptrTypeId = m_module.defPointerType(\n getVectorTypeId(result.type),\n spv::StorageClassPrivate);\n \n result.id = m_module.opAccessChain(\n ptrTypeId, m_hs.outputPerPatch,\n 1, ®isterId);\n }\n\n return result;\n } else {\n // Regular shaders have their output\n // registers set up at declaration time\n return m_oRegs.at(operand.idx[0].offset);\n }\n }\n DxbcRegisterPointer emitGetConstBufPtr(\n const DxbcRegister& operand);\n DxbcRegisterPointer emitGetImmConstBufPtr(\n const DxbcRegister& operand) {\n DxbcRegisterValue constId = emitIndexLoad(operand.idx[0]);\n\n if (m_icbArray) {\n // We pad the icb array with an extra zero vector, so we can\n // clamp the index and get correct robustness behaviour.\n constId.id = m_module.opUMin(getVectorTypeId(constId.type),\n constId.id, m_module.constu32(m_icbSize));\n\n DxbcRegisterInfo ptrInfo;\n ptrInfo.type.ctype = DxbcScalarType::Uint32;\n ptrInfo.type.ccount = m_icbComponents;\n ptrInfo.type.alength = 0;\n ptrInfo.sclass = spv::StorageClassPrivate;\n\n DxbcRegisterPointer result;\n result.type.ctype = ptrInfo.type.ctype;\n result.type.ccount = ptrInfo.type.ccount;\n result.id = m_module.opAccessChain(\n getPointerTypeId(ptrInfo),\n m_icbArray, 1, &constId.id);\n return result;\n } else if (m_constantBuffers.at(Icb_BindingSlotId).varId != 0) {\n const std::array indices =\n {{ m_module.consti32(0), constId.id }};\n \n DxbcRegisterInfo ptrInfo;\n ptrInfo.type.ctype = DxbcScalarType::Float32;\n ptrInfo.type.ccount = m_icbComponents;\n ptrInfo.type.alength = 0;\n ptrInfo.sclass = spv::StorageClassUniform;\n\n DxbcRegisterPointer result;\n result.type.ctype = ptrInfo.type.ctype;\n result.type.ccount = ptrInfo.type.ccount;\n result.id = m_module.opAccessChain(\n getPointerTypeId(ptrInfo),\n m_constantBuffers.at(Icb_BindingSlotId).varId,\n indices.size(), indices.data());\n return result;\n } else {\n throw DxvkError(\"DxbcCompiler: Immediate constant buffer not defined\");\n }\n }\n DxbcRegisterPointer emitGetOperandPtr(\n const DxbcRegister& operand) {\n switch (operand.type) {\n case DxbcOperandType::Temp:\n return emitGetTempPtr(operand);\n \n case DxbcOperandType::IndexableTemp:\n return emitGetIndexableTempPtr(operand);\n \n case DxbcOperandType::Input:\n case DxbcOperandType::InputControlPoint:\n case DxbcOperandType::InputPatchConstant:\n case DxbcOperandType::OutputControlPoint:\n return emitGetInputPtr(operand);\n \n case DxbcOperandType::Output:\n return emitGetOutputPtr(operand);\n \n case DxbcOperandType::ImmediateConstantBuffer:\n return emitGetImmConstBufPtr(operand);\n\n case DxbcOperandType::InputThreadId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 3 },\n m_cs.builtinGlobalInvocationId };\n \n case DxbcOperandType::InputThreadGroupId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 3 },\n m_cs.builtinWorkgroupId };\n \n case DxbcOperandType::InputThreadIdInGroup:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 3 },\n m_cs.builtinLocalInvocationId };\n \n case DxbcOperandType::InputThreadIndexInGroup:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n m_cs.builtinLocalInvocationIndex };\n \n case DxbcOperandType::InputCoverageMask: {\n const std::array indices\n = {{ m_module.constu32(0) }};\n \n DxbcRegisterPointer result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(result.type),\n spv::StorageClassInput),\n m_ps.builtinSampleMaskIn,\n indices.size(), indices.data());\n return result;\n }\n \n case DxbcOperandType::OutputCoverageMask: {\n const std::array indices\n = {{ m_module.constu32(0) }};\n \n DxbcRegisterPointer result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(result.type),\n spv::StorageClassOutput),\n m_ps.builtinSampleMaskOut,\n indices.size(), indices.data());\n return result;\n }\n \n case DxbcOperandType::OutputDepth:\n case DxbcOperandType::OutputDepthGe:\n case DxbcOperandType::OutputDepthLe:\n return DxbcRegisterPointer {\n { DxbcScalarType::Float32, 1 },\n m_ps.builtinDepth };\n \n case DxbcOperandType::OutputStencilRef:\n return DxbcRegisterPointer {\n { DxbcScalarType::Sint32, 1 },\n m_ps.builtinStencilRef };\n\n case DxbcOperandType::InputPrimitiveId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n m_primitiveIdIn };\n \n case DxbcOperandType::InputDomainPoint:\n return DxbcRegisterPointer {\n { DxbcScalarType::Float32, 3 },\n m_ds.builtinTessCoord };\n \n case DxbcOperandType::OutputControlPointId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n m_hs.builtinInvocationId };\n \n case DxbcOperandType::InputForkInstanceId:\n case DxbcOperandType::InputJoinInstanceId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n getCurrentHsForkJoinPhase()->instanceIdPtr };\n \n case DxbcOperandType::InputGsInstanceId:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n m_gs.builtinInvocationId };\n \n case DxbcOperandType::InputInnerCoverage:\n return DxbcRegisterPointer {\n { DxbcScalarType::Uint32, 1 },\n m_ps.builtinInnerCoverageId };\n \n default:\n throw DxvkError(str::format(\n \"DxbcCompiler: Unhandled operand type: \",\n operand.type));\n }\n }\n DxbcRegisterPointer emitGetAtomicPointer(\n const DxbcRegister& operand,\n const DxbcRegister& address) {\n // Query information about the resource itself\n const uint32_t registerId = operand.idx[0].offset;\n const DxbcBufferInfo resourceInfo = getBufferInfo(operand);\n \n // For UAVs and shared memory, different methods\n // of obtaining the final pointer are used.\n bool isTgsm = operand.type == DxbcOperandType::ThreadGroupSharedMemory;\n bool isSsbo = resourceInfo.isSsbo;\n \n // Compute the actual address into the resource\n const DxbcRegisterValue addressValue = [&] {\n switch (resourceInfo.type) {\n case DxbcResourceType::Raw:\n return emitCalcBufferIndexRaw(emitRegisterLoad(\n address, DxbcRegMask(true, false, false, false)));\n \n case DxbcResourceType::Structured: {\n const DxbcRegisterValue addressComponents = emitRegisterLoad(\n address, DxbcRegMask(true, true, false, false));\n \n return emitCalcBufferIndexStructured(\n emitRegisterExtract(addressComponents, DxbcRegMask(true, false, false, false)),\n emitRegisterExtract(addressComponents, DxbcRegMask(false, true, false, false)),\n resourceInfo.stride);\n };\n \n case DxbcResourceType::Typed: {\n if (isTgsm)\n throw DxvkError(\"DxbcCompiler: TGSM cannot be typed\");\n \n return emitLoadTexCoord(address,\n m_uavs.at(registerId).imageInfo);\n }\n \n default:\n throw DxvkError(\"DxbcCompiler: Unhandled resource type\");\n }\n }();\n \n // Compute the actual pointer\n DxbcRegisterPointer result;\n result.type.ctype = resourceInfo.stype;\n result.type.ccount = 1;\n\n if (isTgsm) {\n result.id = m_module.opAccessChain(resourceInfo.typeId,\n resourceInfo.varId, 1, &addressValue.id);\n } else if (isSsbo) {\n uint32_t indices[2] = { m_module.constu32(0), addressValue.id };\n result.id = m_module.opAccessChain(resourceInfo.typeId,\n resourceInfo.varId, 2, indices);\n } else {\n result.id = m_module.opImageTexelPointer(\n m_module.defPointerType(getVectorTypeId(result.type), spv::StorageClassImage),\n resourceInfo.varId, addressValue.id, m_module.constu32(0));\n }\n\n return result;\n }\n DxbcRegisterValue emitQueryBufferSize(\n const DxbcRegister& resource) {\n const DxbcBufferInfo bufferInfo = getBufferInfo(resource);\n\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opArrayLength(\n getVectorTypeId(result.type),\n bufferInfo.varId, 0);\n\n return result;\n }\n DxbcRegisterValue emitQueryTexelBufferSize(\n const DxbcRegister& resource) {\n // Load the texel buffer object. This cannot be used with\n // constant buffers or any other type of resource.\n const DxbcBufferInfo bufferInfo = getBufferInfo(resource);\n \n const uint32_t bufferId = m_module.opLoad(\n bufferInfo.typeId, bufferInfo.varId);\n \n // We'll store this as a scalar unsigned integer\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opImageQuerySize(\n getVectorTypeId(result.type), bufferId);\n\n return result;\n }\n DxbcRegisterValue emitQueryTextureLods(\n const DxbcRegister& resource) {\n const DxbcBufferInfo info = getBufferInfo(resource);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n \n if (info.image.ms == 0 && info.image.sampled == 1) {\n result.id = m_module.opImageQueryLevels(\n getVectorTypeId(result.type),\n m_module.opLoad(info.typeId, info.varId));\n } else {\n // Report one LOD in case of UAVs or multisampled images\n result.id = m_module.constu32(1);\n }\n\n return result;\n }\n DxbcRegisterValue emitQueryTextureSamples(\n const DxbcRegister& resource) {\n if (resource.type == DxbcOperandType::Rasterizer) {\n // SPIR-V has no gl_NumSamples equivalent, so we\n // have to work around it using a push constant\n if (!m_ps.pushConstantId)\n m_ps.pushConstantId = emitPushConstants();\n\n uint32_t uintTypeId = m_module.defIntType(32, 0);\n uint32_t ptrTypeId = m_module.defPointerType(uintTypeId, spv::StorageClassPushConstant);\n uint32_t index = m_module.constu32(0);\n\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opLoad(uintTypeId,\n m_module.opAccessChain(ptrTypeId, m_ps.pushConstantId, 1, &index));\n return result;\n } else {\n DxbcBufferInfo info = getBufferInfo(resource);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n\n if (info.image.ms) {\n result.id = m_module.opImageQuerySamples(\n getVectorTypeId(result.type),\n m_module.opLoad(info.typeId, info.varId));\n } else {\n // OpImageQuerySamples requires MSAA images\n result.id = m_module.constu32(1);\n }\n \n return result;\n }\n }\n DxbcRegisterValue emitQueryTextureSize(\n const DxbcRegister& resource,\n DxbcRegisterValue lod) {\n const DxbcBufferInfo info = getBufferInfo(resource);\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = getTexSizeDim(info.image);\n \n if (info.image.ms == 0 && info.image.sampled == 1) {\n result.id = m_module.opImageQuerySizeLod(\n getVectorTypeId(result.type),\n m_module.opLoad(info.typeId, info.varId),\n lod.id);\n } else {\n result.id = m_module.opImageQuerySize(\n getVectorTypeId(result.type),\n m_module.opLoad(info.typeId, info.varId));\n }\n\n return result;\n }\n DxbcRegisterValue emitCalcBufferIndexStructured(\n DxbcRegisterValue structId,\n DxbcRegisterValue structOffset,\n uint32_t structStride) {\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Sint32;\n result.type.ccount = 1;\n \n uint32_t typeId = getVectorTypeId(result.type);\n uint32_t offset = m_module.opShiftRightLogical(typeId, structOffset.id, m_module.consti32(2));\n \n result.id = m_module.opIAdd(typeId,\n m_module.opIMul(typeId, structId.id, m_module.consti32(structStride / 4)),\n offset);\n return result;\n }\n DxbcRegisterValue emitCalcBufferIndexRaw(\n DxbcRegisterValue byteOffset) {\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Sint32;\n result.type.ccount = 1;\n \n uint32_t typeId = getVectorTypeId(result.type);\n result.id = m_module.opShiftRightLogical(typeId, byteOffset.id, m_module.consti32(2));\n return result;\n }\n DxbcRegisterValue emitCalcTexCoord(\n DxbcRegisterValue coordVector,\n const DxbcImageInfo& imageInfo) {\n const uint32_t dim = getTexCoordDim(imageInfo);\n \n if (dim != coordVector.type.ccount) {\n coordVector = emitRegisterExtract(\n coordVector, DxbcRegMask::firstN(dim)); \n }\n \n return coordVector;\n }\n DxbcRegisterValue emitLoadTexCoord(\n const DxbcRegister& coordReg,\n const DxbcImageInfo& imageInfo) {\n return emitCalcTexCoord(emitRegisterLoad(coordReg,\n DxbcRegMask(true, true, true, true)), imageInfo);\n }\n DxbcRegisterValue emitIndexLoad(\n DxbcRegIndex index) {\n if (index.relReg != nullptr) {\n DxbcRegisterValue result = emitRegisterLoad(\n *index.relReg, DxbcRegMask(true, false, false, false));\n \n if (index.offset != 0) {\n result.id = m_module.opIAdd(\n getVectorTypeId(result.type), result.id,\n m_module.consti32(index.offset));\n }\n \n return result;\n } else {\n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Sint32;\n result.type.ccount = 1;\n result.id = m_module.consti32(index.offset);\n return result;\n }\n }\n DxbcRegisterValue emitValueLoad(\n DxbcRegisterPointer ptr) {\n DxbcRegisterValue result;\n result.type = ptr.type;\n result.id = m_module.opLoad(\n getVectorTypeId(result.type),\n ptr.id);\n return result;\n }\n void emitValueStore(\n DxbcRegisterPointer ptr,\n DxbcRegisterValue value,\n DxbcRegMask writeMask) {\n // If the component types are not compatible,\n // we need to bit-cast the source variable.\n if (value.type.ctype != ptr.type.ctype)\n value = emitRegisterBitcast(value, ptr.type.ctype);\n \n // If the source value consists of only one component,\n // it is stored in all components of the destination.\n if (value.type.ccount == 1)\n value = emitRegisterExtend(value, writeMask.popCount());\n \n if (ptr.type.ccount == writeMask.popCount()) {\n // Simple case: We write to the entire register\n m_module.opStore(ptr.id, value.id);\n } else {\n // We only write to part of the destination\n // register, so we need to load and modify it\n DxbcRegisterValue tmp = emitValueLoad(ptr);\n tmp = emitRegisterInsert(tmp, value, writeMask);\n \n m_module.opStore(ptr.id, tmp.id);\n }\n }\n DxbcRegisterValue emitRegisterLoadRaw(\n const DxbcRegister& reg) {\n // Try to find index range for the given register\n const DxbcIndexRange* indexRange = nullptr;\n\n if (reg.idxDim && reg.idx[reg.idxDim - 1u].relReg) {\n uint32_t offset = reg.idx[reg.idxDim - 1u].offset;\n\n for (const auto& range : m_indexRanges) {\n if (reg.type == range.type && offset >= range.start && offset < range.start + range.length)\n indexRange = ⦥\n }\n }\n\n if (reg.type == DxbcOperandType::IndexableTemp || indexRange) {\n bool doBoundsCheck = reg.idx[reg.idxDim - 1u].relReg != nullptr;\n\n if (doBoundsCheck) {\n DxbcRegisterValue indexId = emitIndexLoad(reg.idx[reg.idxDim - 1u]);\n uint32_t boundsCheck = 0u;\n\n if (reg.type == DxbcOperandType::IndexableTemp) {\n boundsCheck = m_module.opULessThan(\n m_module.defBoolType(), indexId.id,\n m_module.constu32(m_xRegs.at(reg.idx[0].offset).alength));\n } else {\n uint32_t adjustedId = m_module.opISub(getVectorTypeId(indexId.type),\n indexId.id, m_module.consti32(indexRange->start));\n\n boundsCheck = m_module.opULessThan(\n m_module.defBoolType(), adjustedId,\n m_module.constu32(indexRange->length));\n }\n\n // Kind of ugly to have an empty else block here but there's no\n // way for us to know the current block ID for the phi below\n DxbcConditional cond;\n cond.labelIf = m_module.allocateId();\n cond.labelElse = m_module.allocateId();\n cond.labelEnd = m_module.allocateId();\n\n m_module.opSelectionMerge(cond.labelEnd, spv::SelectionControlMaskNone);\n m_module.opBranchConditional(boundsCheck, cond.labelIf, cond.labelElse);\n\n m_module.opLabel(cond.labelIf);\n\n DxbcRegisterValue returnValue = emitValueLoad(emitGetOperandPtr(reg));\n\n m_module.opBranch(cond.labelEnd);\n m_module.opLabel (cond.labelElse);\n\n DxbcRegisterValue zeroValue = emitBuildZeroVector(returnValue.type);\n\n m_module.opBranch(cond.labelEnd);\n m_module.opLabel (cond.labelEnd);\n\n std::array phiLabels = {{\n { returnValue.id, cond.labelIf },\n { zeroValue.id, cond.labelElse },\n }};\n\n returnValue.id = m_module.opPhi(\n getVectorTypeId(returnValue.type),\n phiLabels.size(), phiLabels.data());\n return returnValue;\n }\n }\n\n DxbcRegisterValue value = emitValueLoad(emitGetOperandPtr(reg));\n\n // Pad icb values to a vec4 since the app may access components that are always 0\n if (reg.type == DxbcOperandType::ImmediateConstantBuffer && value.type.ccount < 4u) {\n DxbcVectorType zeroType;\n zeroType.ctype = value.type.ctype;\n zeroType.ccount = 4u - value.type.ccount;\n\n uint32_t zeroVector = emitBuildZeroVector(zeroType).id;\n\n std::array constituents = { value.id, zeroVector };\n\n value.type.ccount = 4u;\n value.id = m_module.opCompositeConstruct(getVectorTypeId(value.type),\n constituents.size(), constituents.data());\n }\n\n return value;\n }\n DxbcRegisterValue emitConstantBufferLoad(\n const DxbcRegister& reg,\n DxbcRegMask writeMask) {\n // Constant buffers take a two-dimensional index:\n // (0) register index (immediate)\n // (1) constant offset (relative)\n DxbcRegisterInfo info;\n info.type.ctype = DxbcScalarType::Float32;\n info.type.ccount = 4;\n info.type.alength = 0;\n info.sclass = spv::StorageClassUniform;\n \n uint32_t regId = reg.idx[0].offset;\n DxbcRegisterValue constId = emitIndexLoad(reg.idx[1]);\n \n uint32_t ptrTypeId = getPointerTypeId(info);\n \n const std::array indices =\n {{ m_module.consti32(0), constId.id }};\n \n DxbcRegisterPointer ptr;\n ptr.type.ctype = info.type.ctype;\n ptr.type.ccount = info.type.ccount;\n ptr.id = m_module.opAccessChain(ptrTypeId,\n m_constantBuffers.at(regId).varId,\n indices.size(), indices.data());\n\n // Load individual components from buffer\n std::array ccomps = { 0, 0, 0, 0 };\n std::array scomps = { 0, 0, 0, 0 };\n uint32_t scount = 0;\n\n for (uint32_t i = 0; i < 4; i++) {\n uint32_t sindex = reg.swizzle[i];\n\n if (!writeMask[i] || ccomps[sindex])\n continue;\n \n uint32_t componentId = m_module.constu32(sindex);\n uint32_t componentPtr = m_module.opAccessChain(\n m_module.defPointerType(\n getScalarTypeId(DxbcScalarType::Float32),\n spv::StorageClassUniform),\n ptr.id, 1, &componentId);\n \n ccomps[sindex] = m_module.opLoad(\n getScalarTypeId(DxbcScalarType::Float32),\n componentPtr);\n }\n\n for (uint32_t i = 0; i < 4; i++) {\n uint32_t sindex = reg.swizzle[i];\n \n if (writeMask[i])\n scomps[scount++] = ccomps[sindex];\n }\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Float32;\n result.type.ccount = scount;\n result.id = scomps[0];\n \n if (scount > 1) {\n result.id = m_module.opCompositeConstruct(\n getVectorTypeId(result.type),\n scount, scomps.data());\n }\n\n // Apply any post-processing that might be necessary\n result = emitRegisterBitcast(result, reg.dataType);\n result = emitSrcOperandModifiers(result, reg.modifiers);\n return result;\n }\n DxbcRegisterValue emitRegisterLoad(\n const DxbcRegister& reg,\n DxbcRegMask writeMask) {\n if (reg.type == DxbcOperandType::Imm32\n || reg.type == DxbcOperandType::Imm64) {\n DxbcRegisterValue result;\n \n if (reg.componentCount == DxbcComponentCount::Component1) {\n // Create one single u32 constant\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.constu32(reg.imm.u32_1);\n\n result = emitRegisterExtend(result, writeMask.popCount());\n } else if (reg.componentCount == DxbcComponentCount::Component4) {\n // Create a u32 vector with as many components as needed\n std::array indices = { };\n uint32_t indexId = 0;\n \n for (uint32_t i = 0; i < indices.size(); i++) {\n if (writeMask[i]) {\n indices.at(indexId++) =\n m_module.constu32(reg.imm.u32_4[i]);\n }\n }\n \n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = writeMask.popCount();\n result.id = indices.at(0);\n \n if (indexId > 1) {\n result.id = m_module.constComposite(\n getVectorTypeId(result.type),\n result.type.ccount, indices.data());\n }\n \n } else {\n // Something went horribly wrong in the decoder or the shader is broken\n throw DxvkError(\"DxbcCompiler: Invalid component count for immediate operand\");\n }\n \n // Cast constants to the requested type\n return emitRegisterBitcast(result, reg.dataType);\n } else if (reg.type == DxbcOperandType::ConstantBuffer) {\n return emitConstantBufferLoad(reg, writeMask);\n } else {\n // Load operand from the operand pointer\n DxbcRegisterValue result = emitRegisterLoadRaw(reg);\n \n // Apply operand swizzle to the operand value\n result = emitRegisterSwizzle(result, reg.swizzle, writeMask);\n \n // Cast it to the requested type. We need to do\n // this after the swizzling for 64-bit types.\n result = emitRegisterBitcast(result, reg.dataType);\n \n // Apply operand modifiers\n result = emitSrcOperandModifiers(result, reg.modifiers);\n return result;\n }\n }\n void emitRegisterStore(\n const DxbcRegister& reg,\n DxbcRegisterValue value) {\n if (reg.type == DxbcOperandType::IndexableTemp) {\n bool doBoundsCheck = reg.idx[1].relReg != nullptr;\n DxbcRegisterValue vectorId = emitIndexLoad(reg.idx[1]);\n\n if (doBoundsCheck) {\n uint32_t boundsCheck = m_module.opULessThan(\n m_module.defBoolType(), vectorId.id,\n m_module.constu32(m_xRegs.at(reg.idx[0].offset).alength));\n \n DxbcConditional cond;\n cond.labelIf = m_module.allocateId();\n cond.labelEnd = m_module.allocateId();\n \n m_module.opSelectionMerge(cond.labelEnd, spv::SelectionControlMaskNone);\n m_module.opBranchConditional(boundsCheck, cond.labelIf, cond.labelEnd);\n \n m_module.opLabel(cond.labelIf);\n\n emitValueStore(getIndexableTempPtr(reg, vectorId), value, reg.mask);\n\n m_module.opBranch(cond.labelEnd);\n m_module.opLabel (cond.labelEnd);\n } else {\n emitValueStore(getIndexableTempPtr(reg, vectorId), value, reg.mask);\n }\n } else {\n emitValueStore(emitGetOperandPtr(reg), value, reg.mask);\n }\n }\n void emitInputSetup() {\n m_module.setLateConst(m_vArrayLengthId, &m_vArrayLength);\n\n // Copy all defined v# registers into the input array. Note\n // that the outer index of the array is the vertex index.\n const uint32_t vecTypeId = m_module.defVectorType(m_module.defFloatType(32), 4);\n const uint32_t dstPtrTypeId = m_module.defPointerType(vecTypeId, spv::StorageClassPrivate);\n \n for (uint32_t i = 0; i < m_vRegs.size(); i++) {\n if (m_vRegs.at(i).id != 0) {\n const uint32_t registerId = m_module.consti32(i);\n \n for (uint32_t v = 0; v < vertexCount; v++) {\n std::array indices\n = {{ m_module.consti32(v), registerId }};\n \n DxbcRegisterPointer srcPtr;\n srcPtr.type = m_vRegs.at(i).type;\n srcPtr.id = m_module.opAccessChain(\n m_module.defPointerType(getVectorTypeId(srcPtr.type), spv::StorageClassInput),\n m_vRegs.at(i).id, 1, indices.data());\n \n DxbcRegisterValue srcValue = emitRegisterBitcast(\n emitValueLoad(srcPtr), DxbcScalarType::Float32);\n \n DxbcRegisterPointer dstPtr;\n dstPtr.type = { DxbcScalarType::Float32, 4 };\n dstPtr.id = m_module.opAccessChain(\n dstPtrTypeId, m_vArray, 2, indices.data());\n\n emitValueStore(dstPtr, srcValue, DxbcRegMask::firstN(srcValue.type.ccount));\n }\n }\n }\n \n // Copy all system value registers into the array,\n // preserving any previously written contents.\n for (const DxbcSvMapping& map : m_vMappings) {\n const uint32_t registerId = m_module.consti32(map.regId);\n \n for (uint32_t v = 0; v < vertexCount; v++) {\n const DxbcRegisterValue value = [&] {\n switch (m_programInfo.type()) {\n case DxbcProgramType::GeometryShader: return emitGsSystemValueLoad(map.sv, map.regMask, v);\n default: throw DxvkError(str::format(\"DxbcCompiler: Unexpected stage: \", m_programInfo.type()));\n }\n }();\n \n std::array indices = {\n m_module.consti32(v), registerId,\n };\n \n DxbcRegisterPointer inputReg;\n inputReg.type.ctype = DxbcScalarType::Float32;\n inputReg.type.ccount = 4;\n inputReg.id = m_module.opAccessChain(dstPtrTypeId,\n m_vArray, indices.size(), indices.data());\n emitValueStore(inputReg, value, map.regMask);\n }\n }\n }\n void emitInputSetup(uint32_t vertexCount) {\n m_module.setLateConst(m_vArrayLengthId, &m_vArrayLength);\n\n // Copy all defined v# registers into the input array. Note\n // that the outer index of the array is the vertex index.\n const uint32_t vecTypeId = m_module.defVectorType(m_module.defFloatType(32), 4);\n const uint32_t dstPtrTypeId = m_module.defPointerType(vecTypeId, spv::StorageClassPrivate);\n \n for (uint32_t i = 0; i < m_vRegs.size(); i++) {\n if (m_vRegs.at(i).id != 0) {\n const uint32_t registerId = m_module.consti32(i);\n \n for (uint32_t v = 0; v < vertexCount; v++) {\n std::array indices\n = {{ m_module.consti32(v), registerId }};\n \n DxbcRegisterPointer srcPtr;\n srcPtr.type = m_vRegs.at(i).type;\n srcPtr.id = m_module.opAccessChain(\n m_module.defPointerType(getVectorTypeId(srcPtr.type), spv::StorageClassInput),\n m_vRegs.at(i).id, 1, indices.data());\n \n DxbcRegisterValue srcValue = emitRegisterBitcast(\n emitValueLoad(srcPtr), DxbcScalarType::Float32);\n \n DxbcRegisterPointer dstPtr;\n dstPtr.type = { DxbcScalarType::Float32, 4 };\n dstPtr.id = m_module.opAccessChain(\n dstPtrTypeId, m_vArray, 2, indices.data());\n\n emitValueStore(dstPtr, srcValue, DxbcRegMask::firstN(srcValue.type.ccount));\n }\n }\n }\n \n // Copy all system value registers into the array,\n // preserving any previously written contents.\n for (const DxbcSvMapping& map : m_vMappings) {\n const uint32_t registerId = m_module.consti32(map.regId);\n \n for (uint32_t v = 0; v < vertexCount; v++) {\n const DxbcRegisterValue value = [&] {\n switch (m_programInfo.type()) {\n case DxbcProgramType::GeometryShader: return emitGsSystemValueLoad(map.sv, map.regMask, v);\n default: throw DxvkError(str::format(\"DxbcCompiler: Unexpected stage: \", m_programInfo.type()));\n }\n }();\n \n std::array indices = {\n m_module.consti32(v), registerId,\n };\n \n DxbcRegisterPointer inputReg;\n inputReg.type.ctype = DxbcScalarType::Float32;\n inputReg.type.ccount = 4;\n inputReg.id = m_module.opAccessChain(dstPtrTypeId,\n m_vArray, indices.size(), indices.data());\n emitValueStore(inputReg, value, map.regMask);\n }\n }\n }\n void emitOutputSetup() {\n for (const DxbcSvMapping& svMapping : m_oMappings) {\n DxbcRegisterPointer outputReg = m_oRegs.at(svMapping.regId);\n \n if (m_programInfo.type() == DxbcProgramType::HullShader) {\n uint32_t registerIndex = m_module.constu32(svMapping.regId);\n \n outputReg.type = { DxbcScalarType::Float32, 4 };\n outputReg.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(outputReg.type),\n spv::StorageClassPrivate),\n m_hs.outputPerPatch,\n 1, ®isterIndex);\n }\n \n auto sv = svMapping.sv;\n auto mask = svMapping.regMask;\n auto value = emitValueLoad(outputReg);\n \n switch (m_programInfo.type()) {\n case DxbcProgramType::VertexShader: emitVsSystemValueStore(sv, mask, value); break;\n case DxbcProgramType::GeometryShader: emitGsSystemValueStore(sv, mask, value); break;\n case DxbcProgramType::HullShader: emitHsSystemValueStore(sv, mask, value); break;\n case DxbcProgramType::DomainShader: emitDsSystemValueStore(sv, mask, value); break;\n case DxbcProgramType::PixelShader: emitPsSystemValueStore(sv, mask, value); break;\n default: break;\n }\n }\n }\n void emitOutputDepthClamp() {\n // HACK: Some drivers do not clamp FragDepth to [minDepth..maxDepth]\n // before writing to the depth attachment, but we do not have acccess\n // to those. Clamp to [0..1] instead.\n if (m_ps.builtinDepth) {\n DxbcRegisterPointer ptr;\n ptr.type = { DxbcScalarType::Float32, 1 };\n ptr.id = m_ps.builtinDepth;\n\n DxbcRegisterValue value = emitValueLoad(ptr);\n\n value.id = m_module.opNClamp(\n getVectorTypeId(ptr.type),\n value.id,\n m_module.constf32(0.0f),\n m_module.constf32(1.0f));\n \n emitValueStore(ptr, value,\n DxbcRegMask::firstN(1));\n }\n }\n void emitInitWorkgroupMemory() {\n bool hasTgsm = false;\n\n SpirvMemoryOperands memoryOperands;\n memoryOperands.flags = spv::MemoryAccessNonPrivatePointerMask;\n\n for (uint32_t i = 0; i < m_gRegs.size(); i++) {\n if (!m_gRegs[i].varId)\n continue;\n \n if (!m_cs.builtinLocalInvocationIndex) {\n m_cs.builtinLocalInvocationIndex = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInLocalInvocationIndex,\n \"vThreadIndexInGroup\");\n }\n\n uint32_t intTypeId = getScalarTypeId(DxbcScalarType::Uint32);\n uint32_t ptrTypeId = m_module.defPointerType(\n intTypeId, spv::StorageClassWorkgroup);\n\n uint32_t numElements = m_gRegs[i].type == DxbcResourceType::Structured\n ? m_gRegs[i].elementCount * m_gRegs[i].elementStride / 4\n : m_gRegs[i].elementCount / 4;\n \n uint32_t numThreads = m_cs.workgroupSizeX *\n m_cs.workgroupSizeY * m_cs.workgroupSizeZ;\n \n uint32_t numElementsPerThread = numElements / numThreads;\n uint32_t numElementsRemaining = numElements % numThreads;\n\n uint32_t threadId = m_module.opLoad(\n intTypeId, m_cs.builtinLocalInvocationIndex);\n uint32_t zeroId = m_module.constu32(0);\n\n for (uint32_t e = 0; e < numElementsPerThread; e++) {\n uint32_t ofsId = m_module.opIAdd(intTypeId, threadId,\n m_module.constu32(numThreads * e));\n \n uint32_t ptrId = m_module.opAccessChain(\n ptrTypeId, m_gRegs[i].varId, 1, &ofsId);\n\n m_module.opStore(ptrId, zeroId, memoryOperands);\n }\n\n if (numElementsRemaining) {\n uint32_t condition = m_module.opULessThan(\n m_module.defBoolType(), threadId,\n m_module.constu32(numElementsRemaining));\n \n DxbcConditional cond;\n cond.labelIf = m_module.allocateId();\n cond.labelEnd = m_module.allocateId();\n\n m_module.opSelectionMerge(cond.labelEnd, spv::SelectionControlMaskNone);\n m_module.opBranchConditional(condition, cond.labelIf, cond.labelEnd);\n\n m_module.opLabel(cond.labelIf);\n\n uint32_t ofsId = m_module.opIAdd(intTypeId, threadId,\n m_module.constu32(numThreads * numElementsPerThread));\n \n uint32_t ptrId = m_module.opAccessChain(\n ptrTypeId, m_gRegs[i].varId, 1, &ofsId);\n \n m_module.opStore(ptrId, zeroId, memoryOperands);\n\n m_module.opBranch(cond.labelEnd);\n m_module.opLabel (cond.labelEnd);\n }\n\n hasTgsm = true;\n }\n\n if (hasTgsm) {\n m_module.opControlBarrier(\n m_module.constu32(spv::ScopeWorkgroup),\n m_module.constu32(spv::ScopeWorkgroup),\n m_module.constu32(spv::MemorySemanticsWorkgroupMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask\n | spv::MemorySemanticsMakeAvailableMask\n | spv::MemorySemanticsMakeVisibleMask));\n }\n }\n DxbcRegisterValue emitVsSystemValueLoad(\n DxbcSystemValue sv,\n DxbcRegMask mask) {\n switch (sv) {\n case DxbcSystemValue::VertexId: {\n const uint32_t typeId = getScalarTypeId(DxbcScalarType::Uint32);\n \n if (m_vs.builtinVertexId == 0) {\n m_vs.builtinVertexId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInVertexIndex,\n \"vs_vertex_index\");\n }\n \n if (m_vs.builtinBaseVertex == 0) {\n m_vs.builtinBaseVertex = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInBaseVertex,\n \"vs_base_vertex\");\n }\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opISub(typeId,\n m_module.opLoad(typeId, m_vs.builtinVertexId),\n m_module.opLoad(typeId, m_vs.builtinBaseVertex));\n return result;\n } break;\n \n case DxbcSystemValue::InstanceId: {\n const uint32_t typeId = getScalarTypeId(DxbcScalarType::Uint32);\n \n if (m_vs.builtinInstanceId == 0) {\n m_vs.builtinInstanceId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInInstanceIndex,\n \"vs_instance_index\");\n }\n \n if (m_vs.builtinBaseInstance == 0) {\n m_vs.builtinBaseInstance = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInBaseInstance,\n \"vs_base_instance\");\n }\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opISub(typeId,\n m_module.opLoad(typeId, m_vs.builtinInstanceId),\n m_module.opLoad(typeId, m_vs.builtinBaseInstance));\n return result;\n } break;\n \n default:\n throw DxvkError(str::format(\n \"DxbcCompiler: Unhandled VS SV input: \", sv));\n }\n }\n DxbcRegisterValue emitGsSystemValueLoad(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n uint32_t vertexId) {\n switch (sv) {\n case DxbcSystemValue::Position: {\n uint32_t arrayIndex = m_module.consti32(vertexId);\n\n if (!m_positionIn) {\n m_positionIn = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 4, primitiveVertexCount(m_gs.inputPrimitive) },\n spv::StorageClassInput },\n spv::BuiltInPosition,\n \"in_position\");\n }\n\n DxbcRegisterPointer ptrIn;\n ptrIn.type.ctype = DxbcScalarType::Float32;\n ptrIn.type.ccount = 4;\n ptrIn.id = m_module.opAccessChain(\n m_module.defPointerType(getVectorTypeId(ptrIn.type), spv::StorageClassInput),\n m_positionIn, 1, &arrayIndex);\n \n return emitRegisterExtract(emitValueLoad(ptrIn), mask);\n } break;\n \n default:\n throw DxvkError(str::format(\n \"DxbcCompiler: Unhandled GS SV input: \", sv));\n }\n }\n DxbcRegisterValue emitPsSystemValueLoad(\n DxbcSystemValue sv,\n DxbcRegMask mask) {\n switch (sv) {\n case DxbcSystemValue::Position: {\n if (m_ps.builtinFragCoord == 0) {\n m_ps.builtinFragCoord = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 4, 0 },\n spv::StorageClassInput },\n spv::BuiltInFragCoord,\n \"ps_frag_coord\");\n }\n \n DxbcRegisterPointer ptrIn;\n ptrIn.type = { DxbcScalarType::Float32, 4 };\n ptrIn.id = m_ps.builtinFragCoord;\n \n // The X, Y and Z components of the SV_POSITION semantic\n // are identical to Vulkan's FragCoord builtin, but we\n // need to compute the reciprocal of the W component.\n DxbcRegisterValue fragCoord = emitValueLoad(ptrIn);\n \n uint32_t componentIndex = 3;\n uint32_t t_f32 = m_module.defFloatType(32);\n uint32_t v_wComp = m_module.opCompositeExtract(t_f32, fragCoord.id, 1, &componentIndex);\n v_wComp = m_module.opFDiv(t_f32, m_module.constf32(1.0f), v_wComp);\n \n fragCoord.id = m_module.opCompositeInsert(\n getVectorTypeId(fragCoord.type),\n v_wComp, fragCoord.id,\n 1, &componentIndex);\n \n return emitRegisterExtract(fragCoord, mask);\n } break;\n \n case DxbcSystemValue::IsFrontFace: {\n if (m_ps.builtinIsFrontFace == 0) {\n m_ps.builtinIsFrontFace = emitNewBuiltinVariable({\n { DxbcScalarType::Bool, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInFrontFacing,\n \"ps_is_front_face\");\n }\n \n DxbcRegisterValue result;\n result.type.ctype = DxbcScalarType::Uint32;\n result.type.ccount = 1;\n result.id = m_module.opSelect(\n getVectorTypeId(result.type),\n m_module.opLoad(\n m_module.defBoolType(),\n m_ps.builtinIsFrontFace),\n m_module.constu32(0xFFFFFFFF),\n m_module.constu32(0x00000000));\n return result;\n } break;\n \n case DxbcSystemValue::PrimitiveId: {\n if (m_primitiveIdIn == 0) {\n m_module.enableCapability(spv::CapabilityGeometry);\n \n m_primitiveIdIn = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInPrimitiveId,\n \"ps_primitive_id\");\n }\n \n DxbcRegisterPointer ptrIn;\n ptrIn.type = { DxbcScalarType::Uint32, 1 };\n ptrIn.id = m_primitiveIdIn;\n \n return emitValueLoad(ptrIn);\n } break;\n \n case DxbcSystemValue::SampleIndex: {\n if (m_ps.builtinSampleId == 0) {\n m_module.enableCapability(spv::CapabilitySampleRateShading);\n \n m_ps.builtinSampleId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInSampleId,\n \"ps_sample_id\");\n }\n \n DxbcRegisterPointer ptrIn;\n ptrIn.type.ctype = DxbcScalarType::Uint32;\n ptrIn.type.ccount = 1;\n ptrIn.id = m_ps.builtinSampleId;\n \n return emitValueLoad(ptrIn);\n } break;\n \n case DxbcSystemValue::RenderTargetId: {\n if (m_ps.builtinLayer == 0) {\n m_module.enableCapability(spv::CapabilityGeometry);\n \n m_ps.builtinLayer = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInLayer,\n \"v_layer\");\n }\n \n DxbcRegisterPointer ptr;\n ptr.type.ctype = DxbcScalarType::Uint32;\n ptr.type.ccount = 1;\n ptr.id = m_ps.builtinLayer;\n \n return emitValueLoad(ptr);\n } break;\n \n case DxbcSystemValue::ViewportId: {\n if (m_ps.builtinViewportId == 0) {\n m_module.enableCapability(spv::CapabilityMultiViewport);\n \n m_ps.builtinViewportId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInViewportIndex,\n \"v_viewport\");\n }\n \n DxbcRegisterPointer ptr;\n ptr.type.ctype = DxbcScalarType::Uint32;\n ptr.type.ccount = 1;\n ptr.id = m_ps.builtinViewportId;\n \n return emitValueLoad(ptr);\n } break;\n \n default:\n throw DxvkError(str::format(\n \"DxbcCompiler: Unhandled PS SV input: \", sv));\n }\n }\n void emitVsSystemValueStore(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n const DxbcRegisterValue& value) {\n switch (sv) {\n case DxbcSystemValue::Position: {\n if (!m_positionOut) {\n m_positionOut = emitNewBuiltinVariable({\n { DxbcScalarType::Float32, 4, 0 },\n spv::StorageClassOutput },\n spv::BuiltInPosition,\n \"out_position\");\n }\n\n DxbcRegisterPointer ptr;\n ptr.type.ctype = DxbcScalarType::Float32;\n ptr.type.ccount = 4;\n ptr.id = m_positionOut;\n \n emitValueStore(ptr, value, mask);\n } break;\n \n case DxbcSystemValue::RenderTargetId: {\n if (m_programInfo.type() != DxbcProgramType::GeometryShader)\n m_module.enableCapability(spv::CapabilityShaderLayer);\n\n if (m_gs.builtinLayer == 0) {\n m_module.enableCapability(spv::CapabilityGeometry);\n\n m_gs.builtinLayer = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInLayer,\n \"o_layer\");\n }\n \n DxbcRegisterPointer ptr;\n ptr.type = { DxbcScalarType::Uint32, 1 };\n ptr.id = m_gs.builtinLayer;\n \n emitValueStore(\n ptr, emitRegisterExtract(value, mask),\n DxbcRegMask(true, false, false, false));\n } break;\n \n case DxbcSystemValue::ViewportId: {\n if (m_programInfo.type() != DxbcProgramType::GeometryShader)\n m_module.enableCapability(spv::CapabilityShaderViewportIndex);\n\n if (m_gs.builtinViewportId == 0) {\n m_module.enableCapability(spv::CapabilityMultiViewport);\n \n m_gs.builtinViewportId = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInViewportIndex,\n \"o_viewport\");\n }\n \n DxbcRegisterPointer ptr;\n ptr.type = { DxbcScalarType::Uint32, 1};\n ptr.id = m_gs.builtinViewportId;\n \n emitValueStore(\n ptr, emitRegisterExtract(value, mask),\n DxbcRegMask(true, false, false, false));\n } break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled VS SV output: \", sv));\n }\n }\n void emitHsSystemValueStore(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n const DxbcRegisterValue& value) {\n if (sv >= DxbcSystemValue::FinalQuadUeq0EdgeTessFactor\n && sv <= DxbcSystemValue::FinalLineDensityTessFactor) {\n struct TessFactor {\n uint32_t array = 0;\n uint32_t index = 0;\n };\n \n static const std::array s_tessFactors = {{\n { m_hs.builtinTessLevelOuter, 0 }, // FinalQuadUeq0EdgeTessFactor\n { m_hs.builtinTessLevelOuter, 1 }, // FinalQuadVeq0EdgeTessFactor\n { m_hs.builtinTessLevelOuter, 2 }, // FinalQuadUeq1EdgeTessFactor\n { m_hs.builtinTessLevelOuter, 3 }, // FinalQuadVeq1EdgeTessFactor\n { m_hs.builtinTessLevelInner, 0 }, // FinalQuadUInsideTessFactor\n { m_hs.builtinTessLevelInner, 1 }, // FinalQuadVInsideTessFactor\n { m_hs.builtinTessLevelOuter, 0 }, // FinalTriUeq0EdgeTessFactor\n { m_hs.builtinTessLevelOuter, 1 }, // FinalTriVeq0EdgeTessFactor\n { m_hs.builtinTessLevelOuter, 2 }, // FinalTriWeq0EdgeTessFactor\n { m_hs.builtinTessLevelInner, 0 }, // FinalTriInsideTessFactor\n { m_hs.builtinTessLevelOuter, 0 }, // FinalLineDensityTessFactor\n { m_hs.builtinTessLevelOuter, 1 }, // FinalLineDetailTessFactor\n }};\n \n const TessFactor tessFactor = s_tessFactors.at(uint32_t(sv)\n - uint32_t(DxbcSystemValue::FinalQuadUeq0EdgeTessFactor));\n \n const uint32_t tessFactorArrayIndex\n = m_module.constu32(tessFactor.index);\n \n // Apply global tess factor limit\n float maxTessFactor = m_hs.maxTessFactor;\n\n if (m_moduleInfo.tess != nullptr) {\n if (m_moduleInfo.tess->maxTessFactor < maxTessFactor)\n maxTessFactor = m_moduleInfo.tess->maxTessFactor;\n }\n\n DxbcRegisterValue tessValue = emitRegisterExtract(value, mask);\n tessValue.id = m_module.opNClamp(getVectorTypeId(tessValue.type),\n tessValue.id, m_module.constf32(0.0f),\n m_module.constf32(maxTessFactor));\n \n DxbcRegisterPointer ptr;\n ptr.type.ctype = DxbcScalarType::Float32;\n ptr.type.ccount = 1;\n ptr.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(ptr.type),\n spv::StorageClassOutput),\n tessFactor.array, 1,\n &tessFactorArrayIndex);\n \n emitValueStore(ptr, tessValue,\n DxbcRegMask(true, false, false, false));\n } else {\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled HS SV output: \", sv));\n }\n }\n void emitDsSystemValueStore(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n const DxbcRegisterValue& value) {\n switch (sv) {\n case DxbcSystemValue::Position:\n case DxbcSystemValue::CullDistance:\n case DxbcSystemValue::ClipDistance:\n case DxbcSystemValue::RenderTargetId:\n case DxbcSystemValue::ViewportId:\n emitVsSystemValueStore(sv, mask, value);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled DS SV output: \", sv));\n }\n }\n void emitGsSystemValueStore(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n const DxbcRegisterValue& value) {\n switch (sv) {\n case DxbcSystemValue::Position:\n case DxbcSystemValue::CullDistance:\n case DxbcSystemValue::ClipDistance:\n case DxbcSystemValue::RenderTargetId:\n case DxbcSystemValue::ViewportId:\n emitVsSystemValueStore(sv, mask, value);\n break;\n \n case DxbcSystemValue::PrimitiveId: {\n if (m_primitiveIdOut == 0) {\n m_primitiveIdOut = emitNewBuiltinVariable({\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInPrimitiveId,\n \"gs_primitive_id\");\n }\n \n DxbcRegisterPointer ptr;\n ptr.type = { DxbcScalarType::Uint32, 1};\n ptr.id = m_primitiveIdOut;\n \n emitValueStore(\n ptr, emitRegisterExtract(value, mask),\n DxbcRegMask(true, false, false, false));\n } break;\n \n default:\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled GS SV output: \", sv));\n }\n }\n void emitPsSystemValueStore(\n DxbcSystemValue sv,\n DxbcRegMask mask,\n const DxbcRegisterValue& value) {\n Logger::warn(str::format(\n \"DxbcCompiler: Unhandled PS SV output: \", sv));\n }\n void emitClipCullStore(\n DxbcSystemValue sv,\n uint32_t dstArray) {\n uint32_t offset = 0;\n \n if (dstArray == 0)\n return;\n \n for (auto e = m_osgn->begin(); e != m_osgn->end(); e++) {\n if (e->systemValue == sv) {\n DxbcRegisterPointer srcPtr = m_oRegs.at(e->registerId);\n DxbcRegisterValue srcValue = emitValueLoad(srcPtr);\n \n for (uint32_t i = 0; i < 4; i++) {\n if (e->componentMask[i]) {\n uint32_t offsetId = m_module.consti32(offset++);\n \n DxbcRegisterValue component = emitRegisterExtract(\n srcValue, DxbcRegMask::select(i));\n \n DxbcRegisterPointer dstPtr;\n dstPtr.type = { DxbcScalarType::Float32, 1 };\n dstPtr.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(dstPtr.type),\n spv::StorageClassOutput),\n dstArray, 1, &offsetId);\n \n emitValueStore(dstPtr, component,\n DxbcRegMask(true, false, false, false));\n }\n }\n }\n }\n }\n void emitClipCullLoad(\n DxbcSystemValue sv,\n uint32_t srcArray) {\n uint32_t offset = 0;\n \n if (srcArray == 0)\n return;\n \n for (auto e = m_isgn->begin(); e != m_isgn->end(); e++) {\n if (e->systemValue == sv) {\n // Load individual components from the source array\n uint32_t componentIndex = 0;\n std::array componentIds = {{ 0, 0, 0, 0 }};\n \n for (uint32_t i = 0; i < 4; i++) {\n if (e->componentMask[i]) {\n uint32_t offsetId = m_module.consti32(offset++);\n \n DxbcRegisterPointer srcPtr;\n srcPtr.type = { DxbcScalarType::Float32, 1 };\n srcPtr.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(srcPtr.type),\n spv::StorageClassInput),\n srcArray, 1, &offsetId);\n \n componentIds[componentIndex++]\n = emitValueLoad(srcPtr).id;\n }\n }\n \n // Put everything into one vector\n DxbcRegisterValue dstValue;\n dstValue.type = { DxbcScalarType::Float32, componentIndex };\n dstValue.id = componentIds[0];\n \n if (componentIndex > 1) {\n dstValue.id = m_module.opCompositeConstruct(\n getVectorTypeId(dstValue.type),\n componentIndex, componentIds.data());\n }\n \n // Store vector to the input array\n uint32_t registerId = m_module.consti32(e->registerId);\n \n DxbcRegisterPointer dstInput;\n dstInput.type = { DxbcScalarType::Float32, 4 };\n dstInput.id = m_module.opAccessChain(\n m_module.defPointerType(\n getVectorTypeId(dstInput.type),\n spv::StorageClassPrivate),\n m_vArray, 1, ®isterId);\n \n emitValueStore(dstInput, dstValue, e->componentMask);\n }\n }\n }\n void emitPointSizeStore() {\n if (m_moduleInfo.options.needsPointSizeExport) {\n uint32_t pointSizeId = emitNewBuiltinVariable(DxbcRegisterInfo {\n { DxbcScalarType::Float32, 1, 0 },\n spv::StorageClassOutput },\n spv::BuiltInPointSize,\n \"point_size\");\n\n m_module.opStore(pointSizeId, m_module.constf32(1.0f));\n }\n }\n void emitInit() {\n // Set up common capabilities for all shaders\n m_module.enableCapability(spv::CapabilityShader);\n m_module.enableCapability(spv::CapabilityImageQuery);\n \n // Initialize the shader module with capabilities\n // etc. Each shader type has its own peculiarities.\n switch (m_programInfo.type()) {\n case DxbcProgramType::VertexShader: emitVsInit(); break;\n case DxbcProgramType::HullShader: emitHsInit(); break;\n case DxbcProgramType::DomainShader: emitDsInit(); break;\n case DxbcProgramType::GeometryShader: emitGsInit(); break;\n case DxbcProgramType::PixelShader: emitPsInit(); break;\n case DxbcProgramType::ComputeShader: emitCsInit(); break;\n default: throw DxvkError(\"Invalid shader stage\");\n }\n }\n void emitFunctionBegin(\n uint32_t entryPoint,\n uint32_t returnType,\n uint32_t funcType) {\n this->emitFunctionEnd();\n \n m_module.functionBegin(\n returnType, entryPoint, funcType,\n spv::FunctionControlMaskNone);\n \n m_insideFunction = true;\n }\n void emitFunctionEnd() {\n if (m_insideFunction) {\n m_module.opReturn();\n m_module.functionEnd();\n }\n \n m_insideFunction = false;\n }\n void emitFunctionLabel() {\n m_module.opLabel(m_module.allocateId());\n }\n void emitMainFunctionBegin() {\n this->emitFunctionBegin(\n m_entryPointId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitVsInit() {\n m_module.enableCapability(spv::CapabilityClipDistance);\n m_module.enableCapability(spv::CapabilityCullDistance);\n m_module.enableCapability(spv::CapabilityDrawParameters);\n \n // Standard input array\n emitDclInputArray(0);\n \n // Cull/clip distances as outputs\n m_clipDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numClipPlanes,\n spv::BuiltInClipDistance,\n spv::StorageClassOutput);\n \n m_cullDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numCullPlanes,\n spv::BuiltInCullDistance,\n spv::StorageClassOutput);\n \n // Main function of the vertex shader\n m_vs.functionId = m_module.allocateId();\n m_module.setDebugName(m_vs.functionId, \"vs_main\");\n \n this->emitFunctionBegin(\n m_vs.functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitHsInit() {\n m_module.enableCapability(spv::CapabilityTessellation);\n m_module.enableCapability(spv::CapabilityClipDistance);\n m_module.enableCapability(spv::CapabilityCullDistance);\n \n m_hs.builtinInvocationId = emitNewBuiltinVariable(\n DxbcRegisterInfo {\n { DxbcScalarType::Uint32, 1, 0 },\n spv::StorageClassInput },\n spv::BuiltInInvocationId,\n \"vOutputControlPointId\");\n \n m_hs.builtinTessLevelOuter = emitBuiltinTessLevelOuter(spv::StorageClassOutput);\n m_hs.builtinTessLevelInner = emitBuiltinTessLevelInner(spv::StorageClassOutput);\n }\n void emitDsInit() {\n m_module.enableCapability(spv::CapabilityTessellation);\n m_module.enableCapability(spv::CapabilityClipDistance);\n m_module.enableCapability(spv::CapabilityCullDistance);\n \n m_ds.builtinTessLevelOuter = emitBuiltinTessLevelOuter(spv::StorageClassInput);\n m_ds.builtinTessLevelInner = emitBuiltinTessLevelInner(spv::StorageClassInput);\n \n // Cull/clip distances as outputs\n m_clipDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numClipPlanes,\n spv::BuiltInClipDistance,\n spv::StorageClassOutput);\n \n m_cullDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numCullPlanes,\n spv::BuiltInCullDistance,\n spv::StorageClassOutput);\n \n // Main function of the domain shader\n m_ds.functionId = m_module.allocateId();\n m_module.setDebugName(m_ds.functionId, \"ds_main\");\n \n this->emitFunctionBegin(\n m_ds.functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitGsInit() {\n m_module.enableCapability(spv::CapabilityGeometry);\n m_module.enableCapability(spv::CapabilityClipDistance);\n m_module.enableCapability(spv::CapabilityCullDistance);\n\n // Enable capabilities for xfb mode if necessary\n if (m_moduleInfo.xfb) {\n m_module.enableCapability(spv::CapabilityGeometryStreams);\n m_module.enableCapability(spv::CapabilityTransformFeedback);\n \n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeXfb);\n }\n\n // We only need outputs if rasterization is enabled\n m_gs.needsOutputSetup = !m_moduleInfo.xfb\n || m_moduleInfo.xfb->rasterizedStream >= 0;\n \n // Cull/clip distances as outputs\n m_clipDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numClipPlanes,\n spv::BuiltInClipDistance,\n spv::StorageClassOutput);\n \n m_cullDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullOut.numCullPlanes,\n spv::BuiltInCullDistance,\n spv::StorageClassOutput);\n \n // Emit Xfb variables if necessary\n if (m_moduleInfo.xfb)\n emitXfbOutputDeclarations();\n\n // Main function of the vertex shader\n m_gs.functionId = m_module.allocateId();\n m_module.setDebugName(m_gs.functionId, \"gs_main\");\n \n this->emitFunctionBegin(\n m_gs.functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitPsInit() {\n m_module.enableCapability(spv::CapabilityDerivativeControl);\n \n m_module.setExecutionMode(m_entryPointId,\n spv::ExecutionModeOriginUpperLeft);\n \n // Standard input array\n emitDclInputArray(0);\n \n // Cull/clip distances as inputs\n m_clipDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullIn.numClipPlanes,\n spv::BuiltInClipDistance,\n spv::StorageClassInput);\n \n m_cullDistances = emitDclClipCullDistanceArray(\n m_analysis->clipCullIn.numCullPlanes,\n spv::BuiltInCullDistance,\n spv::StorageClassInput);\n \n // Main function of the pixel shader\n m_ps.functionId = m_module.allocateId();\n m_module.setDebugName(m_ps.functionId, \"ps_main\");\n \n this->emitFunctionBegin(\n m_ps.functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitCsInit() {\n // Main function of the compute shader\n m_cs.functionId = m_module.allocateId();\n m_module.setDebugName(m_cs.functionId, \"cs_main\");\n \n this->emitFunctionBegin(\n m_cs.functionId,\n m_module.defVoidType(),\n m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr));\n this->emitFunctionLabel();\n }\n void emitVsFinalize() {\n this->emitMainFunctionBegin();\n this->emitInputSetup();\n m_module.opFunctionCall(\n m_module.defVoidType(),\n m_vs.functionId, 0, nullptr);\n this->emitOutputSetup();\n this->emitClipCullStore(DxbcSystemValue::ClipDistance, m_clipDistances);\n this->emitClipCullStore(DxbcSystemValue::CullDistance, m_cullDistances);\n this->emitPointSizeStore();\n this->emitFunctionEnd();\n }\n void emitHsFinalize() {\n if (m_hs.cpPhase.functionId == 0)\n m_hs.cpPhase = this->emitNewHullShaderPassthroughPhase();\n \n // Control point phase\n this->emitMainFunctionBegin();\n this->emitInputSetup(m_hs.vertexCountIn);\n this->emitHsControlPointPhase(m_hs.cpPhase);\n this->emitHsPhaseBarrier();\n \n // Fork-join phases and output setup\n this->emitHsInvocationBlockBegin(1);\n \n for (const auto& phase : m_hs.forkPhases)\n this->emitHsForkJoinPhase(phase);\n \n for (const auto& phase : m_hs.joinPhases)\n this->emitHsForkJoinPhase(phase);\n \n this->emitOutputSetup();\n this->emitHsOutputSetup();\n this->emitHsInvocationBlockEnd();\n this->emitFunctionEnd();\n }\n void emitDsFinalize() {\n this->emitMainFunctionBegin();\n m_module.opFunctionCall(\n m_module.defVoidType(),\n m_ds.functionId, 0, nullptr);\n this->emitOutputSetup();\n this->emitClipCullStore(DxbcSystemValue::ClipDistance, m_clipDistances);\n this->emitClipCullStore(DxbcSystemValue::CullDistance, m_cullDistances);\n this->emitFunctionEnd();\n }\n void emitGsFinalize() {\n if (!m_gs.invocationCount)\n m_module.setInvocations(m_entryPointId, 1);\n\n this->emitMainFunctionBegin();\n this->emitInputSetup(\n primitiveVertexCount(m_gs.inputPrimitive));\n m_module.opFunctionCall(\n m_module.defVoidType(),\n m_gs.functionId, 0, nullptr);\n // No output setup at this point as that was\n // already done during the EmitVertex step\n this->emitFunctionEnd();\n }\n void emitPsFinalize() {\n this->emitMainFunctionBegin();\n this->emitInputSetup();\n this->emitClipCullLoad(DxbcSystemValue::ClipDistance, m_clipDistances);\n this->emitClipCullLoad(DxbcSystemValue::CullDistance, m_cullDistances);\n\n if (m_hasRasterizerOrderedUav) {\n // For simplicity, just lock the entire fragment shader\n // if there are any rasterizer ordered views.\n m_module.enableExtension(\"SPV_EXT_fragment_shader_interlock\");\n\n if (m_module.hasCapability(spv::CapabilitySampleRateShading)\n && m_moduleInfo.options.enableSampleShadingInterlock) {\n m_module.enableCapability(spv::CapabilityFragmentShaderSampleInterlockEXT);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeSampleInterlockOrderedEXT);\n } else {\n m_module.enableCapability(spv::CapabilityFragmentShaderPixelInterlockEXT);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModePixelInterlockOrderedEXT);\n }\n\n m_module.opBeginInvocationInterlock();\n }\n\n m_module.opFunctionCall(\n m_module.defVoidType(),\n m_ps.functionId, 0, nullptr);\n\n if (m_hasRasterizerOrderedUav)\n m_module.opEndInvocationInterlock();\n\n this->emitOutputSetup();\n\n if (m_moduleInfo.options.useDepthClipWorkaround)\n this->emitOutputDepthClamp();\n \n this->emitFunctionEnd();\n }\n void emitCsFinalize() {\n this->emitMainFunctionBegin();\n\n if (m_moduleInfo.options.zeroInitWorkgroupMemory)\n this->emitInitWorkgroupMemory();\n\n m_module.opFunctionCall(\n m_module.defVoidType(),\n m_cs.functionId, 0, nullptr);\n \n this->emitFunctionEnd();\n }\n void emitXfbOutputDeclarations() {\n for (uint32_t i = 0; i < m_moduleInfo.xfb->entryCount; i++) {\n const DxbcXfbEntry* xfbEntry = m_moduleInfo.xfb->entries + i;\n const DxbcSgnEntry* sigEntry = m_osgn->find(\n xfbEntry->semanticName,\n xfbEntry->semanticIndex,\n xfbEntry->streamId);\n\n if (sigEntry == nullptr)\n continue;\n \n DxbcRegisterInfo varInfo;\n varInfo.type.ctype = DxbcScalarType::Float32;\n varInfo.type.ccount = xfbEntry->componentCount;\n varInfo.type.alength = 0;\n varInfo.sclass = spv::StorageClassOutput;\n \n uint32_t dstComponentMask = (1 << xfbEntry->componentCount) - 1;\n uint32_t srcComponentMask = dstComponentMask\n << sigEntry->componentMask.firstSet()\n << xfbEntry->componentIndex;\n \n DxbcXfbVar xfbVar;\n xfbVar.varId = emitNewVariable(varInfo);\n xfbVar.streamId = xfbEntry->streamId;\n xfbVar.outputId = sigEntry->registerId;\n xfbVar.srcMask = DxbcRegMask(srcComponentMask);\n xfbVar.dstMask = DxbcRegMask(dstComponentMask);\n m_xfbVars.push_back(xfbVar);\n\n m_module.setDebugName(xfbVar.varId,\n str::format(\"xfb\", i).c_str());\n \n m_module.decorateXfb(xfbVar.varId,\n xfbEntry->streamId, xfbEntry->bufferId, xfbEntry->offset,\n m_moduleInfo.xfb->strides[xfbEntry->bufferId]);\n }\n\n // TODO Compact location/component assignment\n for (uint32_t i = 0; i < m_xfbVars.size(); i++) {\n m_xfbVars[i].location = i;\n m_xfbVars[i].component = 0;\n }\n\n for (uint32_t i = 0; i < m_xfbVars.size(); i++) {\n const DxbcXfbVar* var = &m_xfbVars[i];\n\n m_module.decorateLocation (var->varId, var->location);\n m_module.decorateComponent(var->varId, var->component);\n }\n }\n void emitXfbOutputSetup(\n uint32_t streamId,\n bool passthrough) {\n for (size_t i = 0; i < m_xfbVars.size(); i++) {\n if (m_xfbVars[i].streamId == streamId) {\n DxbcRegisterPointer srcPtr = passthrough\n ? m_vRegs[m_xfbVars[i].outputId]\n : m_oRegs[m_xfbVars[i].outputId];\n\n if (passthrough) {\n srcPtr = emitArrayAccess(srcPtr,\n spv::StorageClassInput,\n m_module.constu32(0));\n }\n \n DxbcRegisterPointer dstPtr;\n dstPtr.type.ctype = DxbcScalarType::Float32;\n dstPtr.type.ccount = m_xfbVars[i].dstMask.popCount();\n dstPtr.id = m_xfbVars[i].varId;\n\n DxbcRegisterValue value = emitRegisterExtract(\n emitValueLoad(srcPtr), m_xfbVars[i].srcMask);\n emitValueStore(dstPtr, value, m_xfbVars[i].dstMask);\n }\n }\n }\n void emitHsControlPointPhase(\n const DxbcCompilerHsControlPointPhase& phase) {\n m_module.opFunctionCall(\n m_module.defVoidType(),\n phase.functionId, 0, nullptr);\n }\n void emitHsForkJoinPhase(\n const DxbcCompilerHsForkJoinPhase& phase) {\n for (uint32_t i = 0; i < phase.instanceCount; i++) {\n uint32_t invocationId = m_module.constu32(i);\n \n m_module.opFunctionCall(\n m_module.defVoidType(),\n phase.functionId, 1,\n &invocationId);\n }\n }\n void emitHsPhaseBarrier() {\n uint32_t exeScopeId = m_module.constu32(spv::ScopeWorkgroup);\n uint32_t memScopeId = m_module.constu32(spv::ScopeWorkgroup);\n uint32_t semanticId = m_module.constu32(\n spv::MemorySemanticsOutputMemoryMask |\n spv::MemorySemanticsAcquireReleaseMask |\n spv::MemorySemanticsMakeAvailableMask |\n spv::MemorySemanticsMakeVisibleMask);\n \n m_module.opControlBarrier(exeScopeId, memScopeId, semanticId);\n }\n void emitHsInvocationBlockBegin(\n uint32_t count) {\n uint32_t invocationId = m_module.opLoad(\n getScalarTypeId(DxbcScalarType::Uint32),\n m_hs.builtinInvocationId);\n \n uint32_t condition = m_module.opULessThan(\n m_module.defBoolType(), invocationId,\n m_module.constu32(count));\n \n m_hs.invocationBlockBegin = m_module.allocateId();\n m_hs.invocationBlockEnd = m_module.allocateId();\n \n m_module.opSelectionMerge(\n m_hs.invocationBlockEnd,\n spv::SelectionControlMaskNone);\n \n m_module.opBranchConditional(\n condition,\n m_hs.invocationBlockBegin,\n m_hs.invocationBlockEnd);\n \n m_module.opLabel(\n m_hs.invocationBlockBegin);\n }\n void emitHsInvocationBlockEnd() {\n m_module.opBranch (m_hs.invocationBlockEnd);\n m_module.opLabel (m_hs.invocationBlockEnd);\n \n m_hs.invocationBlockBegin = 0;\n m_hs.invocationBlockEnd = 0;\n }\n void emitHsOutputSetup() {\n uint32_t outputPerPatch = emitTessInterfacePerPatch(spv::StorageClassOutput);\n\n if (!outputPerPatch)\n return;\n\n uint32_t vecType = getVectorTypeId({ DxbcScalarType::Float32, 4 });\n\n uint32_t srcPtrType = m_module.defPointerType(vecType, spv::StorageClassPrivate);\n uint32_t dstPtrType = m_module.defPointerType(vecType, spv::StorageClassOutput);\n\n for (uint32_t i = 0; i < 32; i++) {\n if (m_hs.outputPerPatchMask & (1 << i)) {\n uint32_t index = m_module.constu32(i);\n\n uint32_t srcPtr = m_module.opAccessChain(srcPtrType, m_hs.outputPerPatch, 1, &index);\n uint32_t dstPtr = m_module.opAccessChain(dstPtrType, outputPerPatch, 1, &index);\n\n m_module.opStore(dstPtr, m_module.opLoad(vecType, srcPtr));\n }\n }\n }\n uint32_t emitTessInterfacePerPatch(\n spv::StorageClass storageClass) {\n const char* name = \"vPatch\";\n\n if (storageClass == spv::StorageClassPrivate)\n name = \"rPatch\";\n if (storageClass == spv::StorageClassOutput)\n name = \"oPatch\";\n \n uint32_t arrLen = m_psgn != nullptr ? m_psgn->maxRegisterCount() : 0;\n\n if (!arrLen)\n return 0;\n\n uint32_t vecType = m_module.defVectorType (m_module.defFloatType(32), 4);\n uint32_t arrType = m_module.defArrayType (vecType, m_module.constu32(arrLen));\n uint32_t ptrType = m_module.defPointerType(arrType, storageClass);\n uint32_t varId = m_module.newVar (ptrType, storageClass);\n \n m_module.setDebugName (varId, name);\n \n if (storageClass != spv::StorageClassPrivate) {\n m_module.decorate (varId, spv::DecorationPatch);\n m_module.decorateLocation (varId, 0);\n }\n\n return varId;\n }\n uint32_t emitTessInterfacePerVertex(\n spv::StorageClass storageClass,\n uint32_t vertexCount) {\n const bool isInput = storageClass == spv::StorageClassInput;\n \n uint32_t arrLen = isInput\n ? (m_isgn != nullptr ? m_isgn->maxRegisterCount() : 0)\n : (m_osgn != nullptr ? m_osgn->maxRegisterCount() : 0);\n \n if (!arrLen)\n return 0;\n \n uint32_t locIdx = m_psgn != nullptr\n ? m_psgn->maxRegisterCount()\n : 0;\n \n uint32_t vecType = m_module.defVectorType (m_module.defFloatType(32), 4);\n uint32_t arrTypeInner = m_module.defArrayType (vecType, m_module.constu32(arrLen));\n uint32_t arrTypeOuter = m_module.defArrayType (arrTypeInner, m_module.constu32(vertexCount));\n uint32_t ptrType = m_module.defPointerType(arrTypeOuter, storageClass);\n uint32_t varId = m_module.newVar (ptrType, storageClass);\n \n m_module.setDebugName (varId, isInput ? \"vVertex\" : \"oVertex\");\n m_module.decorateLocation (varId, locIdx);\n return varId;\n }\n void emitDclInputArray(\n uint32_t vertexCount) {\n DxbcVectorType info;\n info.ctype = DxbcScalarType::Float32;\n info.ccount = 4;\n\n // Define the array type. This will be two-dimensional\n // in some shaders, with the outer index representing\n // the vertex ID within an invocation.\n m_vArrayLength = m_isgn != nullptr ? std::max(1u, m_isgn->maxRegisterCount()) : 1;\n m_vArrayLengthId = m_module.lateConst32(getScalarTypeId(DxbcScalarType::Uint32));\n\n uint32_t vectorTypeId = getVectorTypeId(info);\n uint32_t arrayTypeId = m_module.defArrayType(vectorTypeId, m_vArrayLengthId);\n \n if (vertexCount != 0) {\n arrayTypeId = m_module.defArrayType(\n arrayTypeId, m_module.constu32(vertexCount));\n }\n \n // Define the actual variable. Note that this is private\n // because we will copy input registers and some system\n // variables to the array during the setup phase.\n const uint32_t ptrTypeId = m_module.defPointerType(\n arrayTypeId, spv::StorageClassPrivate);\n \n const uint32_t varId = m_module.newVar(\n ptrTypeId, spv::StorageClassPrivate);\n \n m_module.setDebugName(varId, \"shader_in\");\n m_vArray = varId;\n }\n uint32_t emitDclClipCullDistanceArray(\n uint32_t length,\n spv::BuiltIn builtIn,\n spv::StorageClass storageClass) {\n if (length == 0)\n return 0;\n \n uint32_t t_f32 = m_module.defFloatType(32);\n uint32_t t_arr = m_module.defArrayType(t_f32, m_module.constu32(length));\n uint32_t t_ptr = m_module.defPointerType(t_arr, storageClass);\n uint32_t varId = m_module.newVar(t_ptr, storageClass);\n \n m_module.decorateBuiltIn(varId, builtIn);\n m_module.setDebugName(varId,\n builtIn == spv::BuiltInClipDistance\n ? \"clip_distances\"\n : \"cull_distances\");\n \n return varId;\n }\n DxbcCompilerHsControlPointPhase emitNewHullShaderControlPointPhase() {\n uint32_t funTypeId = m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr);\n \n uint32_t funId = m_module.allocateId();\n \n this->emitFunctionBegin(funId,\n m_module.defVoidType(),\n funTypeId);\n this->emitFunctionLabel();\n \n DxbcCompilerHsControlPointPhase result;\n result.functionId = funId;\n return result;\n }\n DxbcCompilerHsControlPointPhase emitNewHullShaderPassthroughPhase() {\n uint32_t funTypeId = m_module.defFunctionType(\n m_module.defVoidType(), 0, nullptr);\n \n // Begin passthrough function\n uint32_t funId = m_module.allocateId();\n m_module.setDebugName(funId, \"hs_passthrough\");\n \n this->emitFunctionBegin(funId,\n m_module.defVoidType(),\n funTypeId);\n this->emitFunctionLabel();\n \n // We'll basically copy each input variable to the corresponding\n // output, using the shader's invocation ID as the array index.\n const uint32_t invocationId = m_module.opLoad(\n getScalarTypeId(DxbcScalarType::Uint32),\n m_hs.builtinInvocationId);\n \n for (auto i = m_isgn->begin(); i != m_isgn->end(); i++) {\n this->emitDclInput(\n i->registerId, m_hs.vertexCountIn,\n i->componentMask,\n DxbcSystemValue::None,\n DxbcInterpolationMode::Undefined);\n \n // Vector type index\n const std::array dstIndices\n = {{ invocationId, m_module.constu32(i->registerId) }};\n \n DxbcRegisterPointer srcPtr;\n srcPtr.type = m_vRegs.at(i->registerId).type;\n srcPtr.id = m_module.opAccessChain(\n m_module.defPointerType(getVectorTypeId(srcPtr.type), spv::StorageClassInput),\n m_vRegs.at(i->registerId).id, 1, &invocationId);\n \n DxbcRegisterValue srcValue = emitRegisterBitcast(\n emitValueLoad(srcPtr), DxbcScalarType::Float32);\n\n DxbcRegisterPointer dstPtr;\n dstPtr.type = { DxbcScalarType::Float32, 4 };\n dstPtr.id = m_module.opAccessChain(\n m_module.defPointerType(getVectorTypeId(dstPtr.type), spv::StorageClassOutput),\n m_hs.outputPerVertex, dstIndices.size(), dstIndices.data());\n\n emitValueStore(dstPtr, srcValue, DxbcRegMask::firstN(srcValue.type.ccount));\n }\n \n // End function\n this->emitFunctionEnd();\n \n DxbcCompilerHsControlPointPhase result;\n result.functionId = funId;\n return result;\n }\n DxbcCompilerHsForkJoinPhase emitNewHullShaderForkJoinPhase() {\n uint32_t argTypeId = m_module.defIntType(32, 0);\n uint32_t funTypeId = m_module.defFunctionType(\n m_module.defVoidType(), 1, &argTypeId);\n \n uint32_t funId = m_module.allocateId();\n \n this->emitFunctionBegin(funId,\n m_module.defVoidType(),\n funTypeId);\n \n uint32_t argId = m_module.functionParameter(argTypeId);\n this->emitFunctionLabel();\n \n DxbcCompilerHsForkJoinPhase result;\n result.functionId = funId;\n result.instanceId = argId;\n return result;\n }\n uint32_t emitSamplePosArray() {\n const std::array samplePosVectors = {{\n // Invalid sample count / unbound resource\n m_module.constvec2f32( 0.0f, 0.0f),\n // VK_SAMPLE_COUNT_1_BIT\n m_module.constvec2f32( 0.0f, 0.0f),\n // VK_SAMPLE_COUNT_2_BIT\n m_module.constvec2f32( 0.25f, 0.25f),\n m_module.constvec2f32(-0.25f,-0.25f),\n // VK_SAMPLE_COUNT_4_BIT\n m_module.constvec2f32(-0.125f,-0.375f),\n m_module.constvec2f32( 0.375f,-0.125f),\n m_module.constvec2f32(-0.375f, 0.125f),\n m_module.constvec2f32( 0.125f, 0.375f),\n // VK_SAMPLE_COUNT_8_BIT\n m_module.constvec2f32( 0.0625f,-0.1875f),\n m_module.constvec2f32(-0.0625f, 0.1875f),\n m_module.constvec2f32( 0.3125f, 0.0625f),\n m_module.constvec2f32(-0.1875f,-0.3125f),\n m_module.constvec2f32(-0.3125f, 0.3125f),\n m_module.constvec2f32(-0.4375f,-0.0625f),\n m_module.constvec2f32( 0.1875f, 0.4375f),\n m_module.constvec2f32( 0.4375f,-0.4375f),\n // VK_SAMPLE_COUNT_16_BIT\n m_module.constvec2f32( 0.0625f, 0.0625f),\n m_module.constvec2f32(-0.0625f,-0.1875f),\n m_module.constvec2f32(-0.1875f, 0.1250f),\n m_module.constvec2f32( 0.2500f,-0.0625f),\n m_module.constvec2f32(-0.3125f,-0.1250f),\n m_module.constvec2f32( 0.1250f, 0.3125f),\n m_module.constvec2f32( 0.3125f, 0.1875f),\n m_module.constvec2f32( 0.1875f,-0.3125f),\n m_module.constvec2f32(-0.1250f, 0.3750f),\n m_module.constvec2f32( 0.0000f,-0.4375f),\n m_module.constvec2f32(-0.2500f,-0.3750f),\n m_module.constvec2f32(-0.3750f, 0.2500f),\n m_module.constvec2f32(-0.5000f, 0.0000f),\n m_module.constvec2f32( 0.4375f,-0.2500f),\n m_module.constvec2f32( 0.3750f, 0.4375f),\n m_module.constvec2f32(-0.4375f,-0.5000f),\n }};\n \n uint32_t arrayTypeId = getArrayTypeId({\n DxbcScalarType::Float32, 2,\n static_cast(samplePosVectors.size()) });\n \n uint32_t samplePosArray = m_module.constComposite(\n arrayTypeId,\n samplePosVectors.size(),\n samplePosVectors.data());\n \n uint32_t varId = m_module.newVarInit(\n m_module.defPointerType(arrayTypeId, spv::StorageClassPrivate),\n spv::StorageClassPrivate, samplePosArray);\n \n m_module.setDebugName(varId, \"g_sample_pos\");\n m_module.decorate(varId, spv::DecorationNonWritable);\n return varId;\n }\n void emitFloatControl() {\n DxbcFloatControlFlags flags = m_moduleInfo.options.floatControl;\n\n if (flags.isClear())\n return;\n\n const uint32_t width32 = 32;\n const uint32_t width64 = 64;\n\n if (flags.test(DxbcFloatControlFlag::DenormFlushToZero32)) {\n m_module.enableCapability(spv::CapabilityDenormFlushToZero);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDenormFlushToZero, 1, &width32);\n }\n\n if (flags.test(DxbcFloatControlFlag::PreserveNan32)) {\n m_module.enableCapability(spv::CapabilitySignedZeroInfNanPreserve);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeSignedZeroInfNanPreserve, 1, &width32);\n }\n\n if (m_module.hasCapability(spv::CapabilityFloat64)) {\n if (flags.test(DxbcFloatControlFlag::DenormPreserve64)) {\n m_module.enableCapability(spv::CapabilityDenormPreserve);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeDenormPreserve, 1, &width64);\n }\n\n if (flags.test(DxbcFloatControlFlag::PreserveNan64)) {\n m_module.enableCapability(spv::CapabilitySignedZeroInfNanPreserve);\n m_module.setExecutionMode(m_entryPointId, spv::ExecutionModeSignedZeroInfNanPreserve, 1, &width64);\n }\n }\n }\n uint32_t emitNewVariable(\n const DxbcRegisterInfo& info) {\n const uint32_t ptrTypeId = this->getPointerTypeId(info);\n return m_module.newVar(ptrTypeId, info.sclass);\n }\n uint32_t emitNewBuiltinVariable(\n const DxbcRegisterInfo& info,\n spv::BuiltIn builtIn,\n const char* name) {\n const uint32_t varId = emitNewVariable(info);\n \n if (name)\n m_module.setDebugName(varId, name);\n\n m_module.decorateBuiltIn(varId, builtIn);\n\n if (m_programInfo.type() == DxbcProgramType::PixelShader\n && info.type.ctype != DxbcScalarType::Float32\n && info.type.ctype != DxbcScalarType::Bool\n && info.sclass == spv::StorageClassInput)\n m_module.decorate(varId, spv::DecorationFlat);\n\n return varId;\n }\n uint32_t emitBuiltinTessLevelOuter(\n spv::StorageClass storageClass) {\n uint32_t id = emitNewBuiltinVariable(\n DxbcRegisterInfo {\n { DxbcScalarType::Float32, 0, 4 },\n storageClass },\n spv::BuiltInTessLevelOuter,\n \"bTessLevelOuter\");\n \n m_module.decorate(id, spv::DecorationPatch);\n return id;\n }\n uint32_t emitBuiltinTessLevelInner(\n spv::StorageClass storageClass) {\n uint32_t id = emitNewBuiltinVariable(\n DxbcRegisterInfo {\n { DxbcScalarType::Float32, 0, 2 },\n storageClass },\n spv::BuiltInTessLevelInner,\n \"bTessLevelInner\");\n \n m_module.decorate(id, spv::DecorationPatch);\n return id;\n }\n uint32_t emitPushConstants() {\n uint32_t uintTypeId = m_module.defIntType(32, 0);\n uint32_t structTypeId = m_module.defStructTypeUnique(1, &uintTypeId);\n\n m_module.setDebugName(structTypeId, \"pc_t\");\n m_module.setDebugMemberName(structTypeId, 0, \"RasterizerSampleCount\");\n m_module.memberDecorateOffset(structTypeId, 0, 0);\n\n uint32_t ptrTypeId = m_module.defPointerType(structTypeId, spv::StorageClassPushConstant);\n uint32_t varId = m_module.newVar(ptrTypeId, spv::StorageClassPushConstant);\n\n m_module.setDebugName(varId, \"pc\");\n return varId;\n }\n DxbcCfgBlock* cfgFindBlock(\n const std::initializer_list& types);\n DxbcBufferInfo getBufferInfo(\n const DxbcRegister& reg) {\n const uint32_t registerId = reg.idx[0].offset;\n \n switch (reg.type) {\n case DxbcOperandType::Resource: {\n const auto& texture = m_textures.at(registerId);\n\n DxbcBufferInfo result;\n result.image = texture.imageInfo;\n result.stype = texture.sampledType;\n result.type = texture.type;\n result.typeId = texture.imageTypeId;\n result.varId = texture.varId;\n result.stride = texture.structStride;\n result.coherence = 0;\n result.isSsbo = texture.isRawSsbo;\n return result;\n } break;\n \n case DxbcOperandType::UnorderedAccessView: {\n const auto& uav = m_uavs.at(registerId);\n\n DxbcBufferInfo result;\n result.image = uav.imageInfo;\n result.stype = uav.sampledType;\n result.type = uav.type;\n result.typeId = uav.imageTypeId;\n result.varId = uav.varId;\n result.stride = uav.structStride;\n result.coherence = uav.coherence;\n result.isSsbo = uav.isRawSsbo;\n return result;\n } break;\n \n case DxbcOperandType::ThreadGroupSharedMemory: {\n DxbcBufferInfo result;\n result.image = { spv::DimBuffer, 0, 0, 0 };\n result.stype = DxbcScalarType::Uint32;\n result.type = m_gRegs.at(registerId).type;\n result.typeId = m_module.defPointerType(\n getScalarTypeId(DxbcScalarType::Uint32),\n spv::StorageClassWorkgroup);\n result.varId = m_gRegs.at(registerId).varId;\n result.stride = m_gRegs.at(registerId).elementStride;\n result.coherence = spv::ScopeInvocation;\n result.isSsbo = false;\n return result;\n } break;\n \n default:\n throw DxvkError(str::format(\"DxbcCompiler: Invalid operand type for buffer: \", reg.type));\n }\n }\n uint32_t getTexSizeDim(\n const DxbcImageInfo& imageType) const {\n switch (imageType.dim) {\n case spv::DimBuffer: return 1 + imageType.array;\n case spv::Dim1D: return 1 + imageType.array;\n case spv::Dim2D: return 2 + imageType.array;\n case spv::Dim3D: return 3 + imageType.array;\n case spv::DimCube: return 2 + imageType.array;\n default: throw DxvkError(\"DxbcCompiler: getTexLayerDim: Unsupported image dimension\");\n }\n }\n uint32_t getTexLayerDim(\n const DxbcImageInfo& imageType) const {\n switch (imageType.dim) {\n case spv::DimBuffer: return 1;\n case spv::Dim1D: return 1;\n case spv::Dim2D: return 2;\n case spv::Dim3D: return 3;\n case spv::DimCube: return 3;\n default: throw DxvkError(\"DxbcCompiler: getTexLayerDim: Unsupported image dimension\");\n }\n }\n uint32_t getTexCoordDim(\n const DxbcImageInfo& imageType) const {\n return getTexLayerDim(imageType) + imageType.array;\n }\n DxbcRegMask getTexCoordMask(\n const DxbcImageInfo& imageType) const {\n return DxbcRegMask::firstN(getTexCoordDim(imageType));\n }\n DxbcVectorType getInputRegType(\n uint32_t regIdx) const {\n switch (m_programInfo.type()) {\n case DxbcProgramType::VertexShader: {\n const DxbcSgnEntry* entry = m_isgn->findByRegister(regIdx);\n \n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n \n if (entry != nullptr) {\n result.ctype = entry->componentType;\n result.ccount = entry->componentMask.popCount();\n }\n \n return result;\n }\n\n case DxbcProgramType::DomainShader: {\n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n return result;\n }\n\n default: {\n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n\n if (m_isgn == nullptr || !m_isgn->findByRegister(regIdx))\n return result;\n\n DxbcRegMask mask(0u);\n DxbcRegMask used(0u);\n\n for (const auto& e : *m_isgn) {\n if (e.registerId == regIdx && !ignoreInputSystemValue(e.systemValue)) {\n mask |= e.componentMask;\n used |= e.componentUsed;\n }\n }\n\n if (m_programInfo.type() == DxbcProgramType::PixelShader) {\n if ((used.raw() & mask.raw()) == used.raw())\n mask = used;\n }\n\n result.ccount = mask.minComponents();\n return result;\n }\n }\n }\n DxbcVectorType getOutputRegType(\n uint32_t regIdx) const {\n switch (m_programInfo.type()) {\n case DxbcProgramType::PixelShader: {\n const DxbcSgnEntry* entry = m_osgn->findByRegister(regIdx);\n\n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n \n if (entry != nullptr) {\n result.ctype = entry->componentType;\n result.ccount = entry->componentMask.popCount();\n }\n\n return result;\n }\n\n case DxbcProgramType::HullShader: {\n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n return result;\n }\n\n default: {\n DxbcVectorType result;\n result.ctype = DxbcScalarType::Float32;\n result.ccount = 4;\n\n if (m_osgn->findByRegister(regIdx))\n result.ccount = m_osgn->regMask(regIdx).minComponents();\n return result;\n }\n }\n }\n DxbcImageInfo getResourceType(\n DxbcResourceDim resourceType,\n bool isUav) const {\n uint32_t ms = m_moduleInfo.options.disableMsaa ? 0 : 1;\n\n switch (resourceType) {\n case DxbcResourceDim::Buffer: return { spv::DimBuffer, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_MAX_ENUM };\n case DxbcResourceDim::Texture1D: return { spv::Dim1D, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_1D };\n case DxbcResourceDim::Texture1DArr: return { spv::Dim1D, 1, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_1D_ARRAY };\n case DxbcResourceDim::Texture2D: return { spv::Dim2D, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_2D };\n case DxbcResourceDim::Texture2DArr: return { spv::Dim2D, 1, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_2D_ARRAY };\n case DxbcResourceDim::Texture2DMs: return { spv::Dim2D, 0, ms,isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_2D };\n case DxbcResourceDim::Texture2DMsArr: return { spv::Dim2D, 1, ms,isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_2D_ARRAY };\n case DxbcResourceDim::Texture3D: return { spv::Dim3D, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_3D };\n case DxbcResourceDim::TextureCube: return { spv::DimCube, 0, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_CUBE };\n case DxbcResourceDim::TextureCubeArr: return { spv::DimCube, 1, 0, isUav ? 2u : 1u, VK_IMAGE_VIEW_TYPE_CUBE_ARRAY };\n default: throw DxvkError(str::format(\"DxbcCompiler: Unsupported resource type: \", resourceType));\n }\n }\n spv::ImageFormat getScalarImageFormat(\n DxbcScalarType type) const {\n switch (type) {\n case DxbcScalarType::Float32: return spv::ImageFormatR32f;\n case DxbcScalarType::Sint32: return spv::ImageFormatR32i;\n case DxbcScalarType::Uint32: return spv::ImageFormatR32ui;\n default: throw DxvkError(\"DxbcCompiler: Unhandled scalar resource type\");\n }\n }\n bool isDoubleType(\n DxbcScalarType type) const {\n return type == DxbcScalarType::Sint64\n || type == DxbcScalarType::Uint64\n || type == DxbcScalarType::Float64;\n }\n DxbcRegisterPointer getIndexableTempPtr(\n const DxbcRegister& operand,\n DxbcRegisterValue vectorId) {\n // x# regs are indexed as follows:\n // (0) register index (immediate)\n // (1) element index (relative)\n const uint32_t regId = operand.idx[0].offset;\n \n DxbcRegisterInfo info;\n info.type.ctype = DxbcScalarType::Float32;\n info.type.ccount = m_xRegs[regId].ccount;\n info.type.alength = 0;\n info.sclass = spv::StorageClassPrivate;\n \n DxbcRegisterPointer result;\n result.type.ctype = info.type.ctype;\n result.type.ccount = info.type.ccount;\n result.id = m_module.opAccessChain(\n getPointerTypeId(info),\n m_xRegs.at(regId).varId,\n 1, &vectorId.id);\n\n return result;\n }\n bool caseBlockIsFallthrough() const {\n return m_lastOp != DxbcOpcode::Case\n && m_lastOp != DxbcOpcode::Default\n && m_lastOp != DxbcOpcode::Break\n && m_lastOp != DxbcOpcode::Ret;\n }\n uint32_t getUavCoherence(\n uint32_t registerId,\n DxbcUavFlags flags) {\n // For any ROV with write access, we must ensure that\n // availability operations happen within the locked scope.\n if (flags.test(DxbcUavFlag::RasterizerOrdered)\n && (m_analysis->uavInfos[registerId].accessFlags & VK_ACCESS_SHADER_WRITE_BIT)) {\n m_hasGloballyCoherentUav = true;\n m_hasRasterizerOrderedUav = true;\n return spv::ScopeQueueFamily;\n }\n\n // Ignore any resources that can't both be read and written in\n // the current shader, explicit availability/visibility operands\n // are not useful in that case.\n if (m_analysis->uavInfos[registerId].accessFlags != (VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT))\n return 0;\n\n // If the globally coherent flag is set, the resource must be\n // coherent across multiple workgroups of the same dispatch\n if (flags.test(DxbcUavFlag::GloballyCoherent)) {\n m_hasGloballyCoherentUav = true;\n return spv::ScopeQueueFamily;\n }\n\n // In compute shaders, UAVs are implicitly workgroup coherent,\n // but we can rely on memory barrier instructions to make any\n // access available and visible to the entire workgroup.\n if (m_programInfo.type() == DxbcProgramType::ComputeShader)\n return spv::ScopeInvocation;\n\n return 0;\n }\n bool ignoreInputSystemValue(\n DxbcSystemValue sv) const {\n switch (sv) {\n case DxbcSystemValue::Position:\n case DxbcSystemValue::IsFrontFace:\n case DxbcSystemValue::SampleIndex:\n case DxbcSystemValue::PrimitiveId:\n case DxbcSystemValue::Coverage:\n return m_programInfo.type() == DxbcProgramType::PixelShader;\n\n default:\n return false;\n }\n }\n void emitUavBarrier(\n uint64_t readMask,\n uint64_t writeMask) {\n if (!m_moduleInfo.options.forceComputeUavBarriers\n || m_programInfo.type() != DxbcProgramType::ComputeShader)\n return;\n\n // If both masks are 0, emit a barrier in case at least one read-write UAV\n // has a pending unsynchronized access. Only consider read-after-write and\n // write-after-read hazards, assume that back-to-back stores are safe and\n // do not overlap in memory. Atomics are also completely ignored here.\n uint64_t rdMask = m_uavRdMask;\n uint64_t wrMask = m_uavWrMask;\n\n bool insertBarrier = bool(rdMask & wrMask);\n\n if (readMask || writeMask) {\n rdMask &= m_uavWrMask;\n wrMask &= m_uavRdMask;\n }\n\n for (auto uav : bit::BitMask(rdMask | wrMask)) {\n constexpr VkAccessFlags rwAccess = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;\n insertBarrier |= (m_analysis->uavInfos[uav].accessFlags & rwAccess) == rwAccess;\n }\n\n // Need to be in uniform top-level control flow, or otherwise\n // it is not safe to insert control barriers.\n if (insertBarrier && m_controlFlowBlocks.empty() && m_topLevelIsUniform) {\n m_module.opControlBarrier(\n m_module.constu32(spv::ScopeWorkgroup),\n m_module.constu32(m_hasGloballyCoherentUav ? spv::ScopeQueueFamily : spv::ScopeWorkgroup),\n m_module.constu32(spv::MemorySemanticsWorkgroupMemoryMask\n | spv::MemorySemanticsImageMemoryMask\n | spv::MemorySemanticsUniformMemoryMask\n | spv::MemorySemanticsAcquireReleaseMask\n | spv::MemorySemanticsMakeAvailableMask\n | spv::MemorySemanticsMakeVisibleMask));\n\n m_uavWrMask = 0u;\n m_uavRdMask = 0u;\n }\n\n // Mark pending accesses\n m_uavWrMask |= writeMask;\n m_uavRdMask |= readMask;\n }\n uint32_t getScalarTypeId(\n DxbcScalarType type) {\n if (type == DxbcScalarType::Float64)\n m_module.enableCapability(spv::CapabilityFloat64);\n \n if (type == DxbcScalarType::Sint64 || type == DxbcScalarType::Uint64)\n m_module.enableCapability(spv::CapabilityInt64);\n \n switch (type) {\n case DxbcScalarType::Uint32: return m_module.defIntType(32, 0);\n case DxbcScalarType::Uint64: return m_module.defIntType(64, 0);\n case DxbcScalarType::Sint32: return m_module.defIntType(32, 1);\n case DxbcScalarType::Sint64: return m_module.defIntType(64, 1);\n case DxbcScalarType::Float32: return m_module.defFloatType(32);\n case DxbcScalarType::Float64: return m_module.defFloatType(64);\n case DxbcScalarType::Bool: return m_module.defBoolType();\n }\n\n throw DxvkError(\"DxbcCompiler: Invalid scalar type\");\n }\n uint32_t getVectorTypeId(\n const DxbcVectorType& type) {\n uint32_t typeId = this->getScalarTypeId(type.ctype);\n \n if (type.ccount > 1)\n typeId = m_module.defVectorType(typeId, type.ccount);\n \n return typeId;\n }\n uint32_t getArrayTypeId(\n const DxbcArrayType& type) {\n DxbcVectorType vtype;\n vtype.ctype = type.ctype;\n vtype.ccount = type.ccount;\n \n uint32_t typeId = this->getVectorTypeId(vtype);\n \n if (type.alength != 0) {\n typeId = m_module.defArrayType(typeId,\n m_module.constu32(type.alength));\n }\n \n return typeId;\n }\n uint32_t getPointerTypeId(\n const DxbcRegisterInfo& type) {\n return m_module.defPointerType(\n this->getArrayTypeId(type.type),\n type.sclass);\n }\n uint32_t getSparseResultTypeId(\n uint32_t baseType) {\n m_module.enableCapability(spv::CapabilitySparseResidency);\n\n uint32_t uintType = getScalarTypeId(DxbcScalarType::Uint32);\n std::array typeIds = { uintType, baseType };\n return m_module.defStructType(typeIds.size(), typeIds.data());\n }\n uint32_t getFunctionId(\n uint32_t functionNr) {\n auto entry = m_subroutines.find(functionNr);\n if (entry != m_subroutines.end())\n return entry->second;\n \n uint32_t functionId = m_module.allocateId();\n m_subroutines.insert({ functionNr, functionId });\n return functionId;\n }\n DxbcCompilerHsForkJoinPhase* getCurrentHsForkJoinPhase();\n};"], ["/lsfg-vk/framegen/v3.1_src/shaders/beta.cpp", "#include \n#include \n\n#include \"v3_1/shaders/beta.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nBeta::Beta(Vulkan& vk, std::array, 3> inImgs)\n : inImgs(std::move(inImgs)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"beta[0]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 12, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"beta[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"beta[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"beta[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"beta[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 6, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"beta[0]\"),\n vk.shaders.getPipeline(vk.device, \"beta[1]\"),\n vk.shaders.getPipeline(vk.device, \"beta[2]\"),\n vk.shaders.getPipeline(vk.device, \"beta[3]\"),\n vk.shaders.getPipeline(vk.device, \"beta[4]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n for (size_t i = 0; i < 3; i++)\n this->firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(0));\n for (size_t i = 0; i < 4; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(i + 1));\n this->buffer = vk.resources.getBuffer(vk.device, 0.5F);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs.at(0).at(0).getExtent();\n for (size_t i = 0; i < 2; i++) {\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n }\n\n for (size_t i = 0; i < 6; i++)\n this->outImgs.at(i) = Core::Image(vk.device,\n { extent.width >> i, extent.height >> i },\n VK_FORMAT_R8_UNORM);\n\n // hook up shaders\n for (size_t i = 0; i < 3; i++) {\n this->firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at((i + 1) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n }\n this->descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n this->descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n this->descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n this->descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, this->buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs)\n .build();\n}\n\nvoid Beta::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto extent = this->tempImgs1.at(0).getExtent();\n uint32_t threadsX = (extent.width + 7) >> 3;\n uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs.at(0))\n .addW2R(this->inImgs.at(1))\n .addW2R(this->inImgs.at(2))\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n this->firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n this->descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(2).bind(buf);\n this->descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n this->descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth pass\n threadsX = (extent.width + 31) >> 5;\n threadsY = (extent.height + 31) >> 5;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->outImgs)\n .build();\n\n this->pipelines.at(4).bind(buf);\n this->descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/shaders/beta.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/beta.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nBeta::Beta(Vulkan& vk, std::array, 3> inImgs)\n : inImgs(std::move(inImgs)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"p_beta[0]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 6, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_beta[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_beta[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_beta[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_beta[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 6, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"p_beta[0]\"),\n vk.shaders.getPipeline(vk.device, \"p_beta[1]\"),\n vk.shaders.getPipeline(vk.device, \"p_beta[2]\"),\n vk.shaders.getPipeline(vk.device, \"p_beta[3]\"),\n vk.shaders.getPipeline(vk.device, \"p_beta[4]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n for (size_t i = 0; i < 3; i++)\n this->firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(0));\n for (size_t i = 0; i < 4; i++)\n this->descriptorSets.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool, this->shaderModules.at(i + 1));\n this->buffer = vk.resources.getBuffer(vk.device, 0.5F);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs.at(0).at(0).getExtent();\n for (size_t i = 0; i < 2; i++) {\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n }\n\n for (size_t i = 0; i < 6; i++)\n this->outImgs.at(i) = Core::Image(vk.device,\n { extent.width >> i, extent.height >> i },\n VK_FORMAT_R8_UNORM);\n\n // hook up shaders\n for (size_t i = 0; i < 3; i++) {\n this->firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at((i + 1) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n }\n this->descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n this->descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n this->descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n this->descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, this->buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImgs)\n .build();\n}\n\nvoid Beta::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount) {\n // first pass\n const auto extent = this->tempImgs1.at(0).getExtent();\n uint32_t threadsX = (extent.width + 7) >> 3;\n uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs.at(0))\n .addW2R(this->inImgs.at(1))\n .addW2R(this->inImgs.at(2))\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n this->firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n this->descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(2).bind(buf);\n this->descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth pass\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n this->descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth pass\n threadsX = (extent.width + 31) >> 5;\n threadsY = (extent.height + 31) >> 5;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->outImgs)\n .build();\n\n this->pipelines.at(4).bind(buf);\n this->descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/shaders/delta.cpp", "#include \n#include \n\n#include \"v3_1p/shaders/delta.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1P::Shaders;\n\nDelta::Delta(Vulkan& vk, std::array, 3> inImgs1,\n Core::Image inImg2,\n std::optional optImg1,\n std::optional optImg2)\n : inImgs1(std::move(inImgs1)), inImg2(std::move(inImg2)),\n optImg1(std::move(optImg1)), optImg2(std::move(optImg2)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"p_delta[0]\",\n { { 1 , VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 5, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 3, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 3, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[5]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 6, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[6]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[7]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[8]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"p_delta[9]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"p_delta[0]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[1]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[2]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[3]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[4]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[5]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[6]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[7]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[8]\"),\n vk.shaders.getPipeline(vk.device, \"p_delta[9]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n this->samplers.at(2) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS, false);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs1.at(0).at(0).getExtent();\n for (size_t i = 0; i < 3; i++)\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n for (size_t i = 0; i < 2; i++)\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n\n this->outImg1 = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n this->outImg2 = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n\n // hook up shaders\n for (size_t pass_idx = 0; pass_idx < vk.generationCount; pass_idx++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(pass_idx + 1) / static_cast(vk.generationCount + 1),\n false, !this->optImg1.has_value());\n for (size_t i = 0; i < 3; i++) {\n pass.firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(0));\n pass.firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n }\n pass.descriptorSets.at(0) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(1));\n pass.descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(1) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(2));\n pass.descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .build();\n pass.descriptorSets.at(2) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(3));\n pass.descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(3) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(4));\n pass.descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg1)\n .build();\n for (size_t i = 0; i < 3; i++) {\n pass.sixthDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(5));\n pass.sixthDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(0))\n .build();\n }\n pass.descriptorSets.at(4) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(6));\n pass.descriptorSets.at(4).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .build();\n pass.descriptorSets.at(5) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(7));\n pass.descriptorSets.at(5).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(0))\n .build();\n pass.descriptorSets.at(6) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(8));\n pass.descriptorSets.at(6).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .build();\n pass.descriptorSets.at(7) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(9));\n pass.descriptorSets.at(7).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg2)\n .build();\n }\n}\n\nvoid Delta::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx,\n bool last) {\n auto& pass = this->passes.at(pass_idx);\n\n // first shader\n const auto extent = this->tempImgs1.at(0).getExtent();\n const uint32_t threadsX = (extent.width + 7) >> 3;\n const uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg1)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(0).bind(buf);\n pass.firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n pass.descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(2).bind(buf);\n pass.descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n pass.descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addW2R(this->optImg1)\n .addW2R(this->inImg2)\n .addR2W(this->outImg1)\n .build();\n\n this->pipelines.at(4).bind(buf);\n pass.descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n\n // sixth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg1)\n .addW2R(this->optImg2)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(5).bind(buf);\n pass.sixthDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(5));\n buf.dispatch(threadsX, threadsY, 1);\n\n if (!last)\n return;\n\n // seventh shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .build();\n\n this->pipelines.at(6).bind(buf);\n pass.descriptorSets.at(4).bind(buf, this->pipelines.at(6));\n buf.dispatch(threadsX, threadsY, 1);\n\n // eighth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addR2W(this->tempImgs2)\n .build();\n this->pipelines.at(7).bind(buf);\n pass.descriptorSets.at(5).bind(buf, this->pipelines.at(7));\n buf.dispatch(threadsX, threadsY, 1);\n\n // ninth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .build();\n\n this->pipelines.at(8).bind(buf);\n pass.descriptorSets.at(6).bind(buf, this->pipelines.at(8));\n buf.dispatch(threadsX, threadsY, 1);\n\n // tenth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addR2W(this->outImg2)\n .build();\n\n this->pipelines.at(9).bind(buf);\n pass.descriptorSets.at(7).bind(buf, this->pipelines.at(9));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1_src/shaders/delta.cpp", "#include \n#include \n\n#include \"v3_1/shaders/delta.hpp\"\n#include \"common/utils.hpp\"\n#include \"core/commandbuffer.hpp\"\n#include \"core/image.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1::Shaders;\n\nDelta::Delta(Vulkan& vk, std::array, 3> inImgs1,\n Core::Image inImg2,\n std::optional optImg1,\n std::optional optImg2,\n std::optional optImg3)\n : inImgs1(std::move(inImgs1)), inImg2(std::move(inImg2)),\n optImg1(std::move(optImg1)), optImg2(std::move(optImg2)),\n optImg3(std::move(optImg3)) {\n // create resources\n this->shaderModules = {{\n vk.shaders.getShader(vk.device, \"delta[0]\",\n { { 1 , VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 9, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 3, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[1]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 3, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[2]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[3]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 4, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 4, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[4]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 6, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[5]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 10, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[6]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[7]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[8]\",\n { { 1, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } }),\n vk.shaders.getShader(vk.device, \"delta[9]\",\n { { 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER },\n { 2, VK_DESCRIPTOR_TYPE_SAMPLER },\n { 3, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE },\n { 1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE } })\n }};\n this->pipelines = {{\n vk.shaders.getPipeline(vk.device, \"delta[0]\"),\n vk.shaders.getPipeline(vk.device, \"delta[1]\"),\n vk.shaders.getPipeline(vk.device, \"delta[2]\"),\n vk.shaders.getPipeline(vk.device, \"delta[3]\"),\n vk.shaders.getPipeline(vk.device, \"delta[4]\"),\n vk.shaders.getPipeline(vk.device, \"delta[5]\"),\n vk.shaders.getPipeline(vk.device, \"delta[6]\"),\n vk.shaders.getPipeline(vk.device, \"delta[7]\"),\n vk.shaders.getPipeline(vk.device, \"delta[8]\"),\n vk.shaders.getPipeline(vk.device, \"delta[9]\")\n }};\n this->samplers.at(0) = vk.resources.getSampler(vk.device);\n this->samplers.at(1) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, VK_COMPARE_OP_NEVER, true);\n this->samplers.at(2) = vk.resources.getSampler(vk.device,\n VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_COMPARE_OP_ALWAYS, false);\n\n // create internal images/outputs\n const VkExtent2D extent = this->inImgs1.at(0).at(0).getExtent();\n for (size_t i = 0; i < 4; i++) {\n this->tempImgs1.at(i) = Core::Image(vk.device, extent);\n this->tempImgs2.at(i) = Core::Image(vk.device, extent);\n }\n\n this->outImg1 = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n this->outImg2 = Core::Image(vk.device,\n { extent.width, extent.height },\n VK_FORMAT_R16G16B16A16_SFLOAT);\n\n // hook up shaders\n for (size_t pass_idx = 0; pass_idx < vk.generationCount; pass_idx++) {\n auto& pass = this->passes.emplace_back();\n pass.buffer = vk.resources.getBuffer(vk.device,\n static_cast(pass_idx + 1) / static_cast(vk.generationCount + 1),\n false, !this->optImg1.has_value());\n for (size_t i = 0; i < 3; i++) {\n pass.firstDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(0));\n pass.firstDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(2))\n .build();\n }\n pass.descriptorSets.at(0) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(1));\n pass.descriptorSets.at(0).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(2))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(1) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(2));\n pass.descriptorSets.at(1).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1)\n .build();\n pass.descriptorSets.at(2) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(3));\n pass.descriptorSets.at(2).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2)\n .build();\n pass.descriptorSets.at(3) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(4));\n pass.descriptorSets.at(3).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg1)\n .build();\n for (size_t i = 0; i < 3; i++) {\n pass.sixthDescriptorSet.at(i) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(5));\n pass.sixthDescriptorSet.at(i).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at((i + 2) % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->inImgs1.at(i % 3))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg1)\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg2)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(1))\n .build();\n }\n pass.descriptorSets.at(4) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(6));\n pass.descriptorSets.at(4).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .build();\n pass.descriptorSets.at(5) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(7));\n pass.descriptorSets.at(5).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs2.at(1))\n .build();\n pass.descriptorSets.at(6) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(8));\n pass.descriptorSets.at(6).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs2.at(1))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->tempImgs1.at(1))\n .build();\n pass.descriptorSets.at(7) = Core::DescriptorSet(vk.device, vk.descriptorPool,\n this->shaderModules.at(9));\n pass.descriptorSets.at(7).update(vk.device)\n .add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, pass.buffer)\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLER, this->samplers.at(2))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(0))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->tempImgs1.at(1))\n .add(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, this->optImg3)\n .add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, this->outImg2)\n .build();\n }\n}\n\nvoid Delta::Dispatch(const Core::CommandBuffer& buf, uint64_t frameCount, uint64_t pass_idx) {\n auto& pass = this->passes.at(pass_idx);\n\n // first shader\n const auto extent = this->tempImgs1.at(0).getExtent();\n const uint32_t threadsX = (extent.width + 7) >> 3;\n const uint32_t threadsY = (extent.height + 7) >> 3;\n\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg1)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .addR2W(this->tempImgs1.at(2))\n .build();\n\n this->pipelines.at(0).bind(buf);\n pass.firstDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(0));\n buf.dispatch(threadsX, threadsY, 1);\n\n // second shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addW2R(this->tempImgs1.at(2))\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(1).bind(buf);\n pass.descriptorSets.at(0).bind(buf, this->pipelines.at(1));\n buf.dispatch(threadsX, threadsY, 1);\n\n // third shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addR2W(this->tempImgs1)\n .build();\n\n this->pipelines.at(2).bind(buf);\n pass.descriptorSets.at(1).bind(buf, this->pipelines.at(2));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fourth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1)\n .addR2W(this->tempImgs2)\n .build();\n\n this->pipelines.at(3).bind(buf);\n pass.descriptorSets.at(2).bind(buf, this->pipelines.at(3));\n buf.dispatch(threadsX, threadsY, 1);\n\n // fifth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2)\n .addW2R(this->optImg1)\n .addW2R(this->inImg2)\n .addR2W(this->outImg1)\n .build();\n\n this->pipelines.at(4).bind(buf);\n pass.descriptorSets.at(3).bind(buf, this->pipelines.at(4));\n buf.dispatch(threadsX, threadsY, 1);\n\n // sixth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->inImgs1.at((frameCount + 2) % 3))\n .addW2R(this->inImgs1.at(frameCount % 3))\n .addW2R(this->optImg1)\n .addW2R(this->optImg2)\n .addR2W(this->tempImgs2.at(0))\n .addR2W(this->tempImgs2.at(1))\n .build();\n\n this->pipelines.at(5).bind(buf);\n pass.sixthDescriptorSet.at(frameCount % 3).bind(buf, this->pipelines.at(5));\n buf.dispatch(threadsX, threadsY, 1);\n\n // seventh shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2.at(0))\n .addW2R(this->tempImgs2.at(1))\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .build();\n\n this->pipelines.at(6).bind(buf);\n pass.descriptorSets.at(4).bind(buf, this->pipelines.at(6));\n buf.dispatch(threadsX, threadsY, 1);\n\n // eighth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addR2W(this->tempImgs2.at(0))\n .addR2W(this->tempImgs2.at(1))\n .build();\n this->pipelines.at(7).bind(buf);\n pass.descriptorSets.at(5).bind(buf, this->pipelines.at(7));\n buf.dispatch(threadsX, threadsY, 1);\n\n // ninth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs2.at(0))\n .addW2R(this->tempImgs2.at(1))\n .addW2R(this->optImg3)\n .addR2W(this->tempImgs1.at(0))\n .addR2W(this->tempImgs1.at(1))\n .build();\n\n this->pipelines.at(8).bind(buf);\n pass.descriptorSets.at(6).bind(buf, this->pipelines.at(8));\n buf.dispatch(threadsX, threadsY, 1);\n\n // tenth shader\n Utils::BarrierBuilder(buf)\n .addW2R(this->tempImgs1.at(0))\n .addW2R(this->tempImgs1.at(1))\n .addW2R(this->optImg3)\n .addR2W(this->outImg2)\n .build();\n\n this->pipelines.at(9).bind(buf);\n pass.descriptorSets.at(7).bind(buf, this->pipelines.at(9));\n buf.dispatch(threadsX, threadsY, 1);\n}\n"], ["/lsfg-vk/framegen/v3.1_src/lsfg.cpp", "#include \n#include \n\n#include \"lsfg_3_1.hpp\"\n#include \"v3_1/context.hpp\"\n#include \"core/commandpool.hpp\"\n#include \"core/descriptorpool.hpp\"\n#include \"core/instance.hpp\"\n#include \"pool/shaderpool.hpp\"\n#include \"common/exception.hpp\"\n#include \"common/utils.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG_3_1;\n\nnamespace {\n std::optional instance;\n std::optional device;\n std::unordered_map contexts;\n}\n\nvoid LSFG_3_1::initialize(uint64_t deviceUUID,\n bool isHdr, float flowScale, uint64_t generationCount,\n const std::function(const std::string&)>& loader) {\n if (instance.has_value() || device.has_value())\n return;\n\n instance.emplace();\n device.emplace(Vulkan {\n .device{*instance, deviceUUID},\n .generationCount = generationCount,\n .flowScale = flowScale,\n .isHdr = isHdr\n });\n contexts = std::unordered_map();\n\n device->commandPool = Core::CommandPool(device->device);\n device->descriptorPool = Core::DescriptorPool(device->device);\n\n device->resources = Pool::ResourcePool(device->isHdr, device->flowScale);\n device->shaders = Pool::ShaderPool(loader);\n\n std::srand(static_cast(std::time(nullptr)));\n}\n\nint32_t LSFG_3_1::createContext(\n int in0, int in1, const std::vector& outN,\n VkExtent2D extent, VkFormat format) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n const int32_t id = std::rand();\n contexts.emplace(id, Context(*device, in0, in1, outN, extent, format));\n return id;\n}\n\nvoid LSFG_3_1::presentContext(int32_t id, int inSem, const std::vector& outSem) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n auto it = contexts.find(id);\n if (it == contexts.end())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Context not found\");\n\n it->second.present(*device, inSem, outSem);\n}\n\nvoid LSFG_3_1::deleteContext(int32_t id) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n auto it = contexts.find(id);\n if (it == contexts.end())\n throw LSFG::vulkan_error(VK_ERROR_DEVICE_LOST, \"No such context\");\n\n vkDeviceWaitIdle(device->device.handle());\n contexts.erase(it);\n}\n\nvoid LSFG_3_1::finalize() {\n if (!instance.has_value() || !device.has_value())\n return;\n\n vkDeviceWaitIdle(device->device.handle());\n contexts.clear();\n device.reset();\n instance.reset();\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/lsfg.cpp", "#include \n#include \n\n#include \"lsfg_3_1p.hpp\"\n#include \"v3_1p/context.hpp\"\n#include \"core/commandpool.hpp\"\n#include \"core/descriptorpool.hpp\"\n#include \"core/instance.hpp\"\n#include \"pool/shaderpool.hpp\"\n#include \"common/exception.hpp\"\n#include \"common/utils.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG_3_1P;\n\nnamespace {\n std::optional instance;\n std::optional device;\n std::unordered_map contexts;\n}\n\nvoid LSFG_3_1P::initialize(uint64_t deviceUUID,\n bool isHdr, float flowScale, uint64_t generationCount,\n const std::function(const std::string&)>& loader) {\n if (instance.has_value() || device.has_value())\n return;\n\n instance.emplace();\n device.emplace(Vulkan {\n .device{*instance, deviceUUID},\n .generationCount = generationCount,\n .flowScale = flowScale,\n .isHdr = isHdr\n });\n contexts = std::unordered_map();\n\n device->commandPool = Core::CommandPool(device->device);\n device->descriptorPool = Core::DescriptorPool(device->device);\n\n device->resources = Pool::ResourcePool(device->isHdr, device->flowScale);\n device->shaders = Pool::ShaderPool(loader);\n\n std::srand(static_cast(std::time(nullptr)));\n}\n\nint32_t LSFG_3_1P::createContext(\n int in0, int in1, const std::vector& outN,\n VkExtent2D extent, VkFormat format) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n const int32_t id = std::rand();\n contexts.emplace(id, Context(*device, in0, in1, outN, extent, format));\n return id;\n}\n\nvoid LSFG_3_1P::presentContext(int32_t id, int inSem, const std::vector& outSem) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n auto it = contexts.find(id);\n if (it == contexts.end())\n throw LSFG::vulkan_error(VK_ERROR_UNKNOWN, \"Context not found\");\n\n it->second.present(*device, inSem, outSem);\n}\n\nvoid LSFG_3_1P::deleteContext(int32_t id) {\n if (!instance.has_value() || !device.has_value())\n throw LSFG::vulkan_error(VK_ERROR_INITIALIZATION_FAILED, \"LSFG not initialized\");\n\n auto it = contexts.find(id);\n if (it == contexts.end())\n throw LSFG::vulkan_error(VK_ERROR_DEVICE_LOST, \"No such context\");\n\n vkDeviceWaitIdle(device->device.handle());\n contexts.erase(it);\n}\n\nvoid LSFG_3_1P::finalize() {\n if (!instance.has_value() || !device.has_value())\n return;\n\n vkDeviceWaitIdle(device->device.handle());\n contexts.clear();\n device.reset();\n instance.reset();\n}\n"], ["/lsfg-vk/framegen/v3.1p_src/context.cpp", "#include \n#include \n\n#include \"v3_1p/context.hpp\"\n#include \"common/utils.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG_3_1P;\n\nContext::Context(Vulkan& vk,\n int in0, int in1, const std::vector& outN,\n VkExtent2D extent, VkFormat format) {\n // import input images\n this->inImg_0 = Core::Image(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, in0);\n this->inImg_1 = Core::Image(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, in1);\n\n // prepare render data\n for (size_t i = 0; i < 8; i++) {\n auto& data = this->data.at(i);\n data.internalSemaphores.resize(vk.generationCount);\n data.outSemaphores.resize(vk.generationCount);\n data.completionFences.resize(vk.generationCount);\n data.cmdBuffers2.resize(vk.generationCount);\n }\n\n // create shader chains\n this->mipmaps = Shaders::Mipmaps(vk, this->inImg_0, this->inImg_1);\n for (size_t i = 0; i < 7; i++)\n this->alpha.at(i) = Shaders::Alpha(vk, this->mipmaps.getOutImages().at(i));\n this->beta = Shaders::Beta(vk, this->alpha.at(0).getOutImages());\n for (size_t i = 0; i < 7; i++) {\n this->gamma.at(i) = Shaders::Gamma(vk,\n this->alpha.at(6 - i).getOutImages(),\n this->beta.getOutImages().at(std::min(6 - i, 5)),\n (i == 0) ? std::nullopt : std::make_optional(this->gamma.at(i - 1).getOutImage()));\n if (i < 4) continue;\n\n this->delta.at(i - 4) = Shaders::Delta(vk,\n this->alpha.at(6 - i).getOutImages(),\n this->beta.getOutImages().at(6 - i),\n (i == 4) ? std::nullopt : std::make_optional(this->gamma.at(i - 1).getOutImage()),\n (i == 4) ? std::nullopt : std::make_optional(this->delta.at(i - 5).getOutImage1()));\n }\n this->generate = Shaders::Generate(vk,\n this->inImg_0, this->inImg_1,\n this->gamma.at(6).getOutImage(),\n this->delta.at(2).getOutImage1(),\n this->delta.at(2).getOutImage2(),\n outN, format);\n}\n\nvoid Context::present(Vulkan& vk,\n int inSem, const std::vector& outSem) {\n auto& data = this->data.at(this->frameIdx % 8);\n\n // 3. wait for completion of previous frame in this slot\n if (data.shouldWait)\n for (auto& fence : data.completionFences)\n if (!fence.wait(vk.device, UINT64_MAX))\n throw LSFG::vulkan_error(VK_TIMEOUT, \"Fence wait timed out\");\n data.shouldWait = true;\n\n // 1. create mipmaps and process input image\n if (inSem >= 0) data.inSemaphore = Core::Semaphore(vk.device, inSem);\n for (size_t i = 0; i < vk.generationCount; i++)\n data.internalSemaphores.at(i) = Core::Semaphore(vk.device);\n\n data.cmdBuffer1 = Core::CommandBuffer(vk.device, vk.commandPool);\n data.cmdBuffer1.begin();\n\n this->mipmaps.Dispatch(data.cmdBuffer1, this->frameIdx);\n for (size_t i = 0; i < 7; i++)\n this->alpha.at(6 - i).Dispatch(data.cmdBuffer1, this->frameIdx);\n this->beta.Dispatch(data.cmdBuffer1, this->frameIdx);\n\n data.cmdBuffer1.end();\n std::vector waits = { data.inSemaphore };\n if (inSem < 0) waits.clear();\n data.cmdBuffer1.submit(vk.device.getComputeQueue(), std::nullopt,\n waits, std::nullopt,\n data.internalSemaphores, std::nullopt);\n\n // 2. generate intermediary frames\n for (size_t pass = 0; pass < vk.generationCount; pass++) {\n auto& internalSemaphore = data.internalSemaphores.at(pass);\n auto& outSemaphore = data.outSemaphores.at(pass);\n if (inSem >= 0) outSemaphore = Core::Semaphore(vk.device, outSem.empty() ? -1 : outSem.at(pass));\n auto& completionFence = data.completionFences.at(pass);\n completionFence = Core::Fence(vk.device);\n\n auto& buf2 = data.cmdBuffers2.at(pass);\n buf2 = Core::CommandBuffer(vk.device, vk.commandPool);\n buf2.begin();\n\n for (size_t i = 0; i < 7; i++) {\n this->gamma.at(i).Dispatch(buf2, this->frameIdx, pass);\n if (i >= 4)\n this->delta.at(i - 4).Dispatch(buf2, this->frameIdx, pass, i == 6);\n }\n this->generate.Dispatch(buf2, this->frameIdx, pass);\n\n buf2.end();\n std::vector signals = { outSemaphore };\n if (inSem < 0) signals.clear();\n buf2.submit(vk.device.getComputeQueue(), completionFence,\n { internalSemaphore }, std::nullopt,\n signals, std::nullopt);\n }\n\n this->frameIdx++;\n}\n"], ["/lsfg-vk/framegen/v3.1_src/context.cpp", "#include \n#include \n\n#include \"v3_1/context.hpp\"\n#include \"common/utils.hpp\"\n#include \"common/exception.hpp\"\n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG_3_1;\n\nContext::Context(Vulkan& vk,\n int in0, int in1, const std::vector& outN,\n VkExtent2D extent, VkFormat format) {\n // import input images\n this->inImg_0 = Core::Image(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, in0);\n this->inImg_1 = Core::Image(vk.device, extent, format,\n VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,\n VK_IMAGE_ASPECT_COLOR_BIT, in1);\n\n // prepare render data\n for (size_t i = 0; i < 8; i++) {\n auto& data = this->data.at(i);\n data.internalSemaphores.resize(vk.generationCount);\n data.outSemaphores.resize(vk.generationCount);\n data.completionFences.resize(vk.generationCount);\n data.cmdBuffers2.resize(vk.generationCount);\n }\n\n // create shader chains\n this->mipmaps = Shaders::Mipmaps(vk, this->inImg_0, this->inImg_1);\n for (size_t i = 0; i < 7; i++)\n this->alpha.at(i) = Shaders::Alpha(vk, this->mipmaps.getOutImages().at(i));\n this->beta = Shaders::Beta(vk, this->alpha.at(0).getOutImages());\n for (size_t i = 0; i < 7; i++) {\n this->gamma.at(i) = Shaders::Gamma(vk,\n this->alpha.at(6 - i).getOutImages(),\n this->beta.getOutImages().at(std::min(6 - i, 5)),\n (i == 0) ? std::nullopt : std::make_optional(this->gamma.at(i - 1).getOutImage()));\n if (i < 4) continue;\n\n this->delta.at(i - 4) = Shaders::Delta(vk,\n this->alpha.at(6 - i).getOutImages(),\n this->beta.getOutImages().at(6 - i),\n (i == 4) ? std::nullopt : std::make_optional(this->gamma.at(i - 1).getOutImage()),\n (i == 4) ? std::nullopt : std::make_optional(this->delta.at(i - 5).getOutImage1()),\n (i == 4) ? std::nullopt : std::make_optional(this->delta.at(i - 5).getOutImage2()));\n }\n this->generate = Shaders::Generate(vk,\n this->inImg_0, this->inImg_1,\n this->gamma.at(6).getOutImage(),\n this->delta.at(2).getOutImage1(),\n this->delta.at(2).getOutImage2(),\n outN, format);\n}\n\nvoid Context::present(Vulkan& vk,\n int inSem, const std::vector& outSem) {\n auto& data = this->data.at(this->frameIdx % 8);\n\n // 3. wait for completion of previous frame in this slot\n if (data.shouldWait)\n for (auto& fence : data.completionFences)\n if (!fence.wait(vk.device, UINT64_MAX))\n throw LSFG::vulkan_error(VK_TIMEOUT, \"Fence wait timed out\");\n data.shouldWait = true;\n\n // 1. create mipmaps and process input image\n if (inSem >= 0) data.inSemaphore = Core::Semaphore(vk.device, inSem);\n for (size_t i = 0; i < vk.generationCount; i++)\n data.internalSemaphores.at(i) = Core::Semaphore(vk.device);\n\n data.cmdBuffer1 = Core::CommandBuffer(vk.device, vk.commandPool);\n data.cmdBuffer1.begin();\n\n this->mipmaps.Dispatch(data.cmdBuffer1, this->frameIdx);\n for (size_t i = 0; i < 7; i++)\n this->alpha.at(6 - i).Dispatch(data.cmdBuffer1, this->frameIdx);\n this->beta.Dispatch(data.cmdBuffer1, this->frameIdx);\n\n data.cmdBuffer1.end();\n std::vector waits = { data.inSemaphore };\n if (inSem < 0) waits.clear();\n data.cmdBuffer1.submit(vk.device.getComputeQueue(), std::nullopt,\n waits, std::nullopt,\n data.internalSemaphores, std::nullopt);\n\n // 2. generate intermediary frames\n for (size_t pass = 0; pass < vk.generationCount; pass++) {\n auto& internalSemaphore = data.internalSemaphores.at(pass);\n auto& outSemaphore = data.outSemaphores.at(pass);\n if (inSem >= 0) outSemaphore = Core::Semaphore(vk.device, outSem.empty() ? -1 : outSem.at(pass));\n auto& completionFence = data.completionFences.at(pass);\n completionFence = Core::Fence(vk.device);\n\n auto& buf2 = data.cmdBuffers2.at(pass);\n buf2 = Core::CommandBuffer(vk.device, vk.commandPool);\n buf2.begin();\n\n for (size_t i = 0; i < 7; i++) {\n this->gamma.at(i).Dispatch(buf2, this->frameIdx, pass);\n if (i >= 4)\n this->delta.at(i - 4).Dispatch(buf2, this->frameIdx, pass);\n }\n this->generate.Dispatch(buf2, this->frameIdx, pass);\n\n buf2.end();\n std::vector signals = { outSemaphore };\n if (inSem < 0) signals.clear();\n buf2.submit(vk.device.getComputeQueue(), completionFence,\n { internalSemaphore }, std::nullopt,\n signals, std::nullopt);\n }\n\n this->frameIdx++;\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_analysis.h", "class DxbcAnalyzer {\n public:\n DxbcAnalyzer(\n const DxbcModuleInfo& moduleInfo,\n const DxbcProgramInfo& programInfo,\n const Rc& isgn,\n const Rc& osgn,\n const Rc& psgn,\n DxbcAnalysisInfo& analysis) {\n // Get number of clipping and culling planes from the\n // input and output signatures. We will need this to\n // declare the shader input and output interfaces.\n m_analysis->clipCullIn = getClipCullInfo(m_isgn);\n m_analysis->clipCullOut = getClipCullInfo(m_osgn);\n }\n ~DxbcAnalyzer() {\n \n }\n void processInstruction(\n const DxbcShaderInstruction& ins) {\n switch (ins.opClass) {\n case DxbcInstClass::Atomic: {\n const uint32_t operandId = ins.dstCount - 1;\n\n if (ins.dst[operandId].type == DxbcOperandType::UnorderedAccessView) {\n const uint32_t registerId = ins.dst[operandId].idx[0].offset;\n m_analysis->uavInfos[registerId].accessAtomicOp = true;\n m_analysis->uavInfos[registerId].accessFlags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;\n\n // Check whether the atomic operation is order-invariant\n DxvkAccessOp op = DxvkAccessOp::None;\n\n switch (ins.op) {\n case DxbcOpcode::AtomicAnd: op = DxvkAccessOp::And; break;\n case DxbcOpcode::AtomicOr: op = DxvkAccessOp::Or; break;\n case DxbcOpcode::AtomicXor: op = DxvkAccessOp::Xor; break;\n case DxbcOpcode::AtomicIAdd: op = DxvkAccessOp::Add; break;\n case DxbcOpcode::AtomicIMax: op = DxvkAccessOp::IMax; break;\n case DxbcOpcode::AtomicIMin: op = DxvkAccessOp::IMin; break;\n case DxbcOpcode::AtomicUMax: op = DxvkAccessOp::UMax; break;\n case DxbcOpcode::AtomicUMin: op = DxvkAccessOp::UMin; break;\n default: break;\n }\n\n setUavAccessOp(registerId, op);\n }\n } break;\n\n case DxbcInstClass::TextureSample:\n case DxbcInstClass::TextureGather:\n case DxbcInstClass::TextureQueryLod:\n case DxbcInstClass::VectorDeriv: {\n m_analysis->usesDerivatives = true;\n } break;\n\n case DxbcInstClass::ControlFlow: {\n if (ins.op == DxbcOpcode::Discard)\n m_analysis->usesKill = true;\n } break;\n\n case DxbcInstClass::BufferLoad: {\n uint32_t operandId = ins.op == DxbcOpcode::LdStructured ? 2 : 1;\n bool sparseFeedback = ins.dstCount == 2;\n\n if (ins.src[operandId].type == DxbcOperandType::UnorderedAccessView) {\n const uint32_t registerId = ins.src[operandId].idx[0].offset;\n m_analysis->uavInfos[registerId].accessFlags |= VK_ACCESS_SHADER_READ_BIT;\n m_analysis->uavInfos[registerId].sparseFeedback |= sparseFeedback;\n\n setUavAccessOp(registerId, DxvkAccessOp::None);\n } else if (ins.src[operandId].type == DxbcOperandType::Resource) {\n const uint32_t registerId = ins.src[operandId].idx[0].offset;\n m_analysis->srvInfos[registerId].sparseFeedback |= sparseFeedback;\n }\n } break;\n\n case DxbcInstClass::BufferStore: {\n if (ins.dst[0].type == DxbcOperandType::UnorderedAccessView) {\n const uint32_t registerId = ins.dst[0].idx[0].offset;\n m_analysis->uavInfos[registerId].accessFlags |= VK_ACCESS_SHADER_WRITE_BIT;\n\n setUavAccessOp(registerId, getStoreAccessOp(ins.dst[0].mask, ins.src[ins.srcCount - 1u]));\n }\n } break;\n\n case DxbcInstClass::TypedUavLoad: {\n const uint32_t registerId = ins.src[1].idx[0].offset;\n m_analysis->uavInfos[registerId].accessTypedLoad = true;\n m_analysis->uavInfos[registerId].accessFlags |= VK_ACCESS_SHADER_READ_BIT;\n\n setUavAccessOp(registerId, DxvkAccessOp::None);\n } break;\n\n case DxbcInstClass::TypedUavStore: {\n const uint32_t registerId = ins.dst[0].idx[0].offset;\n m_analysis->uavInfos[registerId].accessFlags |= VK_ACCESS_SHADER_WRITE_BIT;\n\n // The UAV format may change between dispatches, so be conservative here\n // and only allow this optimization when the app is writing zeroes.\n DxvkAccessOp storeOp = getStoreAccessOp(DxbcRegMask(0xf), ins.src[1u]);\n\n if (storeOp != DxvkAccessOp(DxvkAccessOp::OpType::StoreUi, 0u))\n storeOp = DxvkAccessOp::None;\n\n setUavAccessOp(registerId, storeOp);\n } break;\n\n case DxbcInstClass::Declaration: {\n switch (ins.op) {\n case DxbcOpcode::DclConstantBuffer: {\n uint32_t registerId = ins.dst[0].idx[0].offset;\n\n if (registerId < DxbcConstBufBindingCount)\n m_analysis->bindings.cbvMask |= 1u << registerId;\n } break;\n\n case DxbcOpcode::DclSampler: {\n uint32_t registerId = ins.dst[0].idx[0].offset;\n\n if (registerId < DxbcSamplerBindingCount)\n m_analysis->bindings.samplerMask |= 1u << registerId;\n } break;\n\n case DxbcOpcode::DclResource:\n case DxbcOpcode::DclResourceRaw:\n case DxbcOpcode::DclResourceStructured: {\n uint32_t registerId = ins.dst[0].idx[0].offset;\n\n uint32_t idx = registerId / 64u;\n uint32_t bit = registerId % 64u;\n\n if (registerId < DxbcResourceBindingCount)\n m_analysis->bindings.srvMask[idx] |= uint64_t(1u) << bit;\n } break;\n\n case DxbcOpcode::DclUavTyped:\n case DxbcOpcode::DclUavRaw:\n case DxbcOpcode::DclUavStructured: {\n uint32_t registerId = ins.dst[0].idx[0].offset;\n\n if (registerId < DxbcUavBindingCount)\n m_analysis->bindings.uavMask |= uint64_t(1u) << registerId;\n } break;\n\n default: ;\n }\n } break;\n\n default:\n break;\n }\n\n for (uint32_t i = 0; i < ins.dstCount; i++) {\n if (ins.dst[i].type == DxbcOperandType::IndexableTemp) {\n uint32_t index = ins.dst[i].idx[0].offset;\n m_analysis->xRegMasks[index] |= ins.dst[i].mask;\n }\n }\n }\n private:\n Rc m_isgn;\n Rc m_osgn;\n Rc m_psgn;\n DxbcAnalysisInfo* m_analysis = nullptr;\n DxbcClipCullInfo getClipCullInfo(\n const Rc& sgn) const {\n DxbcClipCullInfo result;\n \n if (sgn != nullptr) {\n for (auto e = sgn->begin(); e != sgn->end(); e++) {\n const uint32_t componentCount = e->componentMask.popCount();\n \n if (e->systemValue == DxbcSystemValue::ClipDistance)\n result.numClipPlanes += componentCount;\n if (e->systemValue == DxbcSystemValue::CullDistance)\n result.numCullPlanes += componentCount;\n }\n }\n \n return result;\n }\n void setUavAccessOp(uint32_t uav, DxvkAccessOp op) {\n if (m_analysis->uavInfos[uav].accessOp == DxvkAccessOp::None)\n m_analysis->uavInfos[uav].accessOp = op;\n\n // Maintain ordering if the UAV is accessed via other operations as well\n if (op == DxvkAccessOp::None || m_analysis->uavInfos[uav].accessOp != op)\n m_analysis->uavInfos[uav].nonInvariantAccess = true;\n }\n static DxvkAccessOp getStoreAccessOp(DxbcRegMask writeMask, const DxbcRegister& src) {\n if (src.type != DxbcOperandType::Imm32)\n return DxvkAccessOp::None;\n\n // Trivial case, same value is written to all components\n if (src.componentCount == DxbcComponentCount::Component1)\n return getConstantStoreOp(src.imm.u32_1);\n\n if (src.componentCount != DxbcComponentCount::Component4)\n return DxvkAccessOp::None;\n\n // Otherwise, make sure that all written components are equal\n DxvkAccessOp op = DxvkAccessOp::None;\n\n for (uint32_t i = 0u; i < 4u; i++) {\n if (!writeMask[i])\n continue;\n\n // If the written value can't be represented, skip\n DxvkAccessOp scalarOp = getConstantStoreOp(src.imm.u32_4[i]);\n\n if (scalarOp == DxvkAccessOp::None)\n return DxvkAccessOp::None;\n\n // First component written\n if (op == DxvkAccessOp::None)\n op = scalarOp;\n\n // Conflicting store ops\n if (op != scalarOp)\n return DxvkAccessOp::None;\n }\n\n return op;\n }\n static DxvkAccessOp getConstantStoreOp(uint32_t value) {\n constexpr uint32_t mask = 0xfffu;\n\n uint32_t ubits = value & mask;\n uint32_t fbits = (value >> 20u);\n\n if (value == ubits)\n return DxvkAccessOp(DxvkAccessOp::OpType::StoreUi, ubits);\n\n if (value == (ubits | ~mask))\n return DxvkAccessOp(DxvkAccessOp::OpType::StoreSi, ubits);\n\n if (value == (fbits << 20u))\n return DxvkAccessOp(DxvkAccessOp::OpType::StoreF, fbits);\n\n return DxvkAccessOp::None;\n }\n};"], ["/lsfg-vk/thirdparty/dxbc/include/spirv/spirv_module.h", "class generates {\n public:\n explicit SpirvModule(uint32_t version) {\n this->instImportGlsl450();\n }\n ~SpirvModule() {\n \n }\n SpirvCodeBuffer compile() {\n SpirvCodeBuffer result;\n result.putHeader(m_version, m_id);\n result.append(m_capabilities);\n result.append(m_extensions);\n result.append(m_instExt);\n result.append(m_memoryModel);\n result.append(m_entryPoints);\n result.append(m_execModeInfo);\n result.append(m_debugNames);\n result.append(m_annotations);\n result.append(m_typeConstDefs);\n result.append(m_variables);\n\n // Perform some crude dead code elimination. In some cases, our compilers\n // may emit invalid code, such as an unreachable block branching to a loop's\n // continue block, but those cases cannot be reasonably detected up-front.\n std::unordered_set reachableBlocks;\n std::unordered_set mergeBlocks;\n\n classifyBlocks(reachableBlocks, mergeBlocks);\n\n bool reachable = true;\n\n for (auto ins : m_code) {\n if (ins.opCode() == spv::OpFunctionEnd) {\n reachable = true;\n result.append(ins);\n } else if (ins.opCode() == spv::OpLabel) {\n uint32_t labelId = ins.arg(1);\n\n if ((reachable = reachableBlocks.find(labelId) != reachableBlocks.end())) {\n result.append(ins);\n } else if (mergeBlocks.find(labelId) != mergeBlocks.end()) {\n result.append(ins);\n result.putIns(spv::OpUnreachable, 1);\n }\n } else if (reachable) {\n result.append(ins);\n }\n }\n\n return result;\n }\n uint32_t allocateId() {\n return m_id++;\n }\n bool hasCapability(\n spv::Capability capability) {\n for (auto ins : m_capabilities) {\n if (ins.opCode() == spv::OpCapability && ins.arg(1) == capability)\n return true;\n }\n\n return false;\n }\n void enableCapability(\n spv::Capability capability) {\n // Scan the generated instructions to check\n // whether we already enabled the capability.\n if (!hasCapability(capability)) {\n m_capabilities.putIns (spv::OpCapability, 2);\n m_capabilities.putWord(capability);\n }\n }\n void enableExtension(\n const char* extensionName) {\n m_extensions.putIns (spv::OpExtension, 1 + m_extensions.strLen(extensionName));\n m_extensions.putStr (extensionName);\n }\n void addEntryPoint(\n uint32_t entryPointId,\n spv::ExecutionModel executionModel,\n const char* name) {\n m_entryPoints.putIns (spv::OpEntryPoint, 3 + m_entryPoints.strLen(name) + m_interfaceVars.size());\n m_entryPoints.putWord (executionModel);\n m_entryPoints.putWord (entryPointId);\n m_entryPoints.putStr (name);\n \n for (uint32_t varId : m_interfaceVars)\n m_entryPoints.putWord(varId);\n }\n void setMemoryModel(\n spv::AddressingModel addressModel,\n spv::MemoryModel memoryModel) {\n m_memoryModel.putIns (spv::OpMemoryModel, 3);\n m_memoryModel.putWord (addressModel);\n m_memoryModel.putWord (memoryModel);\n }\n void setExecutionMode(\n uint32_t entryPointId,\n spv::ExecutionMode executionMode) {\n m_execModeInfo.putIns (spv::OpExecutionMode, 3 + argCount);\n m_execModeInfo.putWord(entryPointId);\n m_execModeInfo.putWord(executionMode);\n\n for (uint32_t i = 0; i < argCount; i++)\n m_execModeInfo.putWord(args[i]);\n }\n void setExecutionMode(\n uint32_t entryPointId,\n spv::ExecutionMode executionMode,\n uint32_t argCount,\n const uint32_t* args) {\n m_execModeInfo.putIns (spv::OpExecutionMode, 3 + argCount);\n m_execModeInfo.putWord(entryPointId);\n m_execModeInfo.putWord(executionMode);\n\n for (uint32_t i = 0; i < argCount; i++)\n m_execModeInfo.putWord(args[i]);\n }\n void setInvocations(\n uint32_t entryPointId,\n uint32_t invocations) {\n m_execModeInfo.putIns (spv::OpExecutionMode, 4);\n m_execModeInfo.putWord (entryPointId);\n m_execModeInfo.putWord (spv::ExecutionModeInvocations);\n m_execModeInfo.putInt32(invocations);\n }\n void setLocalSize(\n uint32_t entryPointId,\n uint32_t x,\n uint32_t y,\n uint32_t z) {\n m_execModeInfo.putIns (spv::OpExecutionMode, 6);\n m_execModeInfo.putWord (entryPointId);\n m_execModeInfo.putWord (spv::ExecutionModeLocalSize);\n m_execModeInfo.putInt32(x);\n m_execModeInfo.putInt32(y);\n m_execModeInfo.putInt32(z);\n }\n void setOutputVertices(\n uint32_t entryPointId,\n uint32_t vertexCount) {\n m_execModeInfo.putIns (spv::OpExecutionMode, 4);\n m_execModeInfo.putWord(entryPointId);\n m_execModeInfo.putWord(spv::ExecutionModeOutputVertices);\n m_execModeInfo.putWord(vertexCount);\n }\n uint32_t addDebugString(\n const char* string) {\n uint32_t resultId = this->allocateId();\n \n m_debugNames.putIns (spv::OpString,\n 2 + m_debugNames.strLen(string));\n m_debugNames.putWord(resultId);\n m_debugNames.putStr (string);\n return resultId;\n }\n void setDebugSource(\n spv::SourceLanguage language,\n uint32_t version,\n uint32_t file,\n const char* source) {\n uint32_t strLen = source != nullptr\n ? m_debugNames.strLen(source) : 0;\n \n m_debugNames.putIns (spv::OpSource, 4 + strLen);\n m_debugNames.putWord(language);\n m_debugNames.putWord(version);\n m_debugNames.putWord(file);\n \n if (source != nullptr)\n m_debugNames.putStr(source);\n }\n void setDebugName(\n uint32_t expressionId,\n const char* debugName) {\n m_debugNames.putIns (spv::OpName, 2 + m_debugNames.strLen(debugName));\n m_debugNames.putWord(expressionId);\n m_debugNames.putStr (debugName);\n }\n void setDebugMemberName(\n uint32_t structId,\n uint32_t memberId,\n const char* debugName) {\n m_debugNames.putIns (spv::OpMemberName, 3 + m_debugNames.strLen(debugName));\n m_debugNames.putWord(structId);\n m_debugNames.putWord(memberId);\n m_debugNames.putStr (debugName);\n }\n uint32_t constBool(\n bool v) {\n return this->defConst(v\n ? spv::OpConstantTrue\n : spv::OpConstantFalse,\n this->defBoolType(),\n 0, nullptr);\n }\n uint32_t consti32(\n int32_t v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defIntType(32, 1),\n data.size(),\n data.data());\n }\n uint32_t consti64(\n int64_t v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defIntType(64, 1),\n data.size(),\n data.data());\n }\n uint32_t constu32(\n uint32_t v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defIntType(32, 0),\n data.size(),\n data.data());\n }\n uint32_t constu64(\n uint64_t v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defIntType(64, 0),\n data.size(),\n data.data());\n }\n uint32_t constf32(\n float v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defFloatType(32),\n data.size(),\n data.data());\n }\n uint32_t constf64(\n double v) {\n std::array data;\n std::memcpy(data.data(), &v, sizeof(v));\n \n return this->defConst(\n spv::OpConstant,\n this->defFloatType(64),\n data.size(),\n data.data());\n }\n uint32_t constvec4i32(\n int32_t x,\n int32_t y,\n int32_t z,\n int32_t w) {\n std::array args = {{\n this->consti32(x), this->consti32(y),\n this->consti32(z), this->consti32(w),\n }};\n \n uint32_t scalarTypeId = this->defIntType(32, 1);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 4);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constvec4b32(\n bool x,\n bool y,\n bool z,\n bool w) {\n std::array args = {{\n this->constBool(x), this->constBool(y),\n this->constBool(z), this->constBool(w),\n }};\n \n uint32_t scalarTypeId = this->defBoolType();\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 4);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constvec4u32(\n uint32_t x,\n uint32_t y,\n uint32_t z,\n uint32_t w) {\n std::array args = {{\n this->constu32(x), this->constu32(y),\n this->constu32(z), this->constu32(w),\n }};\n \n uint32_t scalarTypeId = this->defIntType(32, 0);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 4);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constvec2f32(\n float x,\n float y) {\n std::array args = {{\n this->constf32(x), this->constf32(y),\n }};\n \n uint32_t scalarTypeId = this->defFloatType(32);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 2);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constvec3f32(\n float x,\n float y,\n float z) {\n std::array args = {{\n this->constf32(x), this->constf32(y),\n this->constf32(z),\n }};\n \n uint32_t scalarTypeId = this->defFloatType(32);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 3);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constvec4f32(\n float x,\n float y,\n float z,\n float w) {\n std::array args = {{\n this->constf32(x), this->constf32(y),\n this->constf32(z), this->constf32(w),\n }};\n \n uint32_t scalarTypeId = this->defFloatType(32);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, 4);\n \n return this->constComposite(vectorTypeId, args.size(), args.data());\n }\n uint32_t constfReplicant(\n float replicant,\n uint32_t count) {\n uint32_t value = this->constf32(replicant);\n\n std::array args = { value, value, value, value };\n\n // Can't make a scalar composite.\n if (count == 1)\n return args[0];\n \n uint32_t scalarTypeId = this->defFloatType(32);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, count);\n \n return this->constComposite(vectorTypeId, count, args.data());\n }\n uint32_t constbReplicant(\n bool replicant,\n uint32_t count) {\n uint32_t value = this->constBool(replicant);\n\n std::array args = { value, value, value, value };\n\n // Can't make a scalar composite.\n if (count == 1)\n return args[0];\n \n uint32_t scalarTypeId = this->defBoolType();\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, count);\n \n return this->constComposite(vectorTypeId, count, args.data());\n }\n uint32_t constiReplicant(\n int32_t replicant,\n uint32_t count) {\n uint32_t value = this->consti32(replicant);\n\n std::array args = { value, value, value, value };\n\n // Can't make a scalar composite.\n if (count == 1)\n return args[0];\n \n uint32_t scalarTypeId = this->defIntType(32, 1);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, count);\n \n return this->constComposite(vectorTypeId, count, args.data());\n }\n uint32_t constuReplicant(\n int32_t replicant,\n uint32_t count) {\n uint32_t value = this->constu32(replicant);\n\n std::array args = { value, value, value, value };\n\n // Can't make a scalar composite.\n if (count == 1)\n return args[0];\n \n uint32_t scalarTypeId = this->defIntType(32, 0);\n uint32_t vectorTypeId = this->defVectorType(scalarTypeId, count);\n \n return this->constComposite(vectorTypeId, count, args.data());\n }\n uint32_t constComposite(\n uint32_t typeId,\n uint32_t constCount,\n const uint32_t* constIds) {\n return this->defConst(\n spv::OpConstantComposite,\n typeId, constCount, constIds);\n }\n uint32_t constUndef(\n uint32_t typeId) {\n return this->defConst(spv::OpUndef,\n typeId, 0, nullptr);\n }\n uint32_t constNull(\n uint32_t typeId) {\n return this->defConst(spv::OpConstantNull,\n typeId, 0, nullptr);\n }\n uint32_t lateConst32(\n uint32_t typeId) {\n uint32_t resultId = this->allocateId();\n m_lateConsts.insert(resultId);\n\n m_typeConstDefs.putIns (spv::OpConstant, 4);\n m_typeConstDefs.putWord(typeId);\n m_typeConstDefs.putWord(resultId);\n m_typeConstDefs.putWord(0);\n return resultId;\n }\n void setLateConst(\n uint32_t constId,\n const uint32_t* argIds) {\n for (auto ins : m_typeConstDefs) {\n if (ins.opCode() != spv::OpConstant\n && ins.opCode() != spv::OpConstantComposite)\n continue;\n \n if (ins.arg(2) != constId)\n continue;\n\n for (uint32_t i = 3; i < ins.length(); i++)\n ins.setArg(i, argIds[i - 3]);\n\n return;\n }\n }\n uint32_t specConstBool(\n bool v) {\n uint32_t typeId = this->defBoolType();\n uint32_t resultId = this->allocateId();\n \n const spv::Op op = v\n ? spv::OpSpecConstantTrue\n : spv::OpSpecConstantFalse;\n \n m_typeConstDefs.putIns (op, 3);\n m_typeConstDefs.putWord (typeId);\n m_typeConstDefs.putWord (resultId);\n return resultId;\n }\n uint32_t specConst32(\n uint32_t typeId,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_typeConstDefs.putIns (spv::OpSpecConstant, 4);\n m_typeConstDefs.putWord (typeId);\n m_typeConstDefs.putWord (resultId);\n m_typeConstDefs.putWord (value);\n return resultId;\n }\n void decorate(\n uint32_t object,\n spv::Decoration decoration) {\n m_annotations.putIns (spv::OpDecorate, 3);\n m_annotations.putWord (object);\n m_annotations.putWord (decoration);\n }\n void decorateArrayStride(\n uint32_t object,\n uint32_t stride) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationArrayStride);\n m_annotations.putInt32(stride);\n }\n void decorateBinding(\n uint32_t object,\n uint32_t binding) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationBinding);\n m_annotations.putInt32(binding);\n }\n void decorateBlock(\n uint32_t object) {\n m_annotations.putIns (spv::OpDecorate, 3);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationBlock);\n }\n void decorateBuiltIn(\n uint32_t object,\n spv::BuiltIn builtIn) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationBuiltIn);\n m_annotations.putWord (builtIn);\n }\n void decorateComponent(\n uint32_t object,\n uint32_t location) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationComponent);\n m_annotations.putInt32(location);\n }\n void decorateDescriptorSet(\n uint32_t object,\n uint32_t set) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationDescriptorSet);\n m_annotations.putInt32(set);\n }\n void decorateIndex(\n uint32_t object,\n uint32_t index) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationIndex);\n m_annotations.putInt32(index);\n }\n void decorateLocation(\n uint32_t object,\n uint32_t location) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationLocation);\n m_annotations.putInt32(location);\n }\n void decorateSpecId(\n uint32_t object,\n uint32_t specId) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationSpecId);\n m_annotations.putInt32(specId);\n }\n void decorateXfb(\n uint32_t object,\n uint32_t streamId,\n uint32_t bufferId,\n uint32_t offset,\n uint32_t stride) {\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationStream);\n m_annotations.putInt32(streamId);\n\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationXfbBuffer);\n m_annotations.putInt32(bufferId);\n\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationXfbStride);\n m_annotations.putInt32(stride);\n\n m_annotations.putIns (spv::OpDecorate, 4);\n m_annotations.putWord (object);\n m_annotations.putWord (spv::DecorationOffset);\n m_annotations.putInt32(offset);\n }\n void memberDecorateBuiltIn(\n uint32_t structId,\n uint32_t memberId,\n spv::BuiltIn builtIn) {\n m_annotations.putIns (spv::OpMemberDecorate, 5);\n m_annotations.putWord (structId);\n m_annotations.putWord (memberId);\n m_annotations.putWord (spv::DecorationBuiltIn);\n m_annotations.putWord (builtIn);\n }\n void memberDecorate(\n uint32_t structId,\n uint32_t memberId,\n spv::Decoration decoration) {\n m_annotations.putIns (spv::OpMemberDecorate, 4);\n m_annotations.putWord (structId);\n m_annotations.putWord (memberId);\n m_annotations.putWord (decoration);\n }\n void memberDecorateMatrixStride(\n uint32_t structId,\n uint32_t memberId,\n uint32_t stride) {\n m_annotations.putIns (spv::OpMemberDecorate, 5);\n m_annotations.putWord (structId);\n m_annotations.putWord (memberId);\n m_annotations.putWord (spv::DecorationMatrixStride);\n m_annotations.putWord (stride);\n }\n void memberDecorateOffset(\n uint32_t structId,\n uint32_t memberId,\n uint32_t offset) {\n m_annotations.putIns (spv::OpMemberDecorate, 5);\n m_annotations.putWord (structId);\n m_annotations.putWord (memberId);\n m_annotations.putWord (spv::DecorationOffset);\n m_annotations.putWord (offset);\n }\n uint32_t defVoidType() {\n return this->defType(spv::OpTypeVoid, 0, nullptr);\n }\n uint32_t defBoolType() {\n return this->defType(spv::OpTypeBool, 0, nullptr);\n }\n uint32_t defIntType(\n uint32_t width,\n uint32_t isSigned) {\n std::array args = {{ width, isSigned }};\n return this->defType(spv::OpTypeInt,\n args.size(), args.data());\n }\n uint32_t defFloatType(\n uint32_t width) {\n std::array args = {{ width }};\n return this->defType(spv::OpTypeFloat,\n args.size(), args.data());\n }\n uint32_t defVectorType(\n uint32_t elementType,\n uint32_t elementCount) {\n std::array args =\n {{ elementType, elementCount }};\n \n return this->defType(spv::OpTypeVector,\n args.size(), args.data());\n }\n uint32_t defMatrixType(\n uint32_t columnType,\n uint32_t columnCount) {\n std::array args =\n {{ columnType, columnCount }};\n \n return this->defType(spv::OpTypeMatrix,\n args.size(), args.data());\n }\n uint32_t defArrayType(\n uint32_t typeId,\n uint32_t length) {\n std::array args = {{ typeId, length }};\n \n return this->defType(spv::OpTypeArray,\n args.size(), args.data());\n }\n uint32_t defArrayTypeUnique(\n uint32_t typeId,\n uint32_t length) {\n uint32_t resultId = this->allocateId();\n \n m_typeConstDefs.putIns (spv::OpTypeArray, 4);\n m_typeConstDefs.putWord(resultId);\n m_typeConstDefs.putWord(typeId);\n m_typeConstDefs.putWord(length);\n return resultId;\n }\n uint32_t defRuntimeArrayType(\n uint32_t typeId) {\n std::array args = { typeId };\n \n return this->defType(spv::OpTypeRuntimeArray,\n args.size(), args.data());\n }\n uint32_t defRuntimeArrayTypeUnique(\n uint32_t typeId) {\n uint32_t resultId = this->allocateId();\n \n m_typeConstDefs.putIns (spv::OpTypeRuntimeArray, 3);\n m_typeConstDefs.putWord(resultId);\n m_typeConstDefs.putWord(typeId);\n return resultId;\n }\n uint32_t defFunctionType(\n uint32_t returnType,\n uint32_t argCount,\n const uint32_t* argTypes) {\n std::vector args;\n args.push_back(returnType);\n \n for (uint32_t i = 0; i < argCount; i++)\n args.push_back(argTypes[i]);\n \n return this->defType(spv::OpTypeFunction,\n args.size(), args.data());\n }\n uint32_t defStructType(\n uint32_t memberCount,\n const uint32_t* memberTypes) {\n return this->defType(spv::OpTypeStruct,\n memberCount, memberTypes);\n }\n uint32_t defStructTypeUnique(\n uint32_t memberCount,\n const uint32_t* memberTypes) {\n uint32_t resultId = this->allocateId();\n \n m_typeConstDefs.putIns (spv::OpTypeStruct, 2 + memberCount);\n m_typeConstDefs.putWord(resultId);\n \n for (uint32_t i = 0; i < memberCount; i++)\n m_typeConstDefs.putWord(memberTypes[i]);\n return resultId;\n }\n uint32_t defPointerType(\n uint32_t variableType,\n spv::StorageClass storageClass) {\n std::array args = {{\n static_cast(storageClass),\n variableType,\n }};\n \n return this->defType(spv::OpTypePointer,\n args.size(), args.data());\n }\n uint32_t defSamplerType() {\n return this->defType(spv::OpTypeSampler, 0, nullptr);\n }\n uint32_t defImageType(\n uint32_t sampledType,\n spv::Dim dimensionality,\n uint32_t depth,\n uint32_t arrayed,\n uint32_t multisample,\n uint32_t sampled,\n spv::ImageFormat format) {\n std::array args = {{\n sampledType,\n static_cast(dimensionality),\n depth, arrayed,\n multisample,\n sampled,\n static_cast(format)\n }};\n \n return this->defType(spv::OpTypeImage,\n args.size(), args.data());\n }\n uint32_t defSampledImageType(\n uint32_t imageType) {\n return this->defType(spv::OpTypeSampledImage, 1, &imageType);\n }\n uint32_t newVar(\n uint32_t pointerType,\n spv::StorageClass storageClass) {\n uint32_t resultId = this->allocateId();\n \n if (isInterfaceVar(storageClass))\n m_interfaceVars.push_back(resultId);\n\n auto& code = storageClass != spv::StorageClassFunction\n ? m_variables : m_code;\n\n code.putIns (spv::OpVariable, 4);\n code.putWord (pointerType);\n code.putWord (resultId);\n code.putWord (storageClass);\n return resultId;\n }\n uint32_t newVarInit(\n uint32_t pointerType,\n spv::StorageClass storageClass,\n uint32_t initialValue) {\n uint32_t resultId = this->allocateId();\n \n if (isInterfaceVar(storageClass))\n m_interfaceVars.push_back(resultId);\n\n auto& code = storageClass != spv::StorageClassFunction\n ? m_variables : m_code;\n \n code.putIns (spv::OpVariable, 5);\n code.putWord (pointerType);\n code.putWord (resultId);\n code.putWord (storageClass);\n code.putWord (initialValue);\n return resultId;\n }\n void functionBegin(\n uint32_t returnType,\n uint32_t functionId,\n uint32_t functionType,\n spv::FunctionControlMask functionControl) {\n m_code.putIns (spv::OpFunction, 5);\n m_code.putWord(returnType);\n m_code.putWord(functionId);\n m_code.putWord(functionControl);\n m_code.putWord(functionType);\n }\n uint32_t functionParameter(\n uint32_t parameterType) {\n uint32_t parameterId = this->allocateId();\n \n m_code.putIns (spv::OpFunctionParameter, 3);\n m_code.putWord(parameterType);\n m_code.putWord(parameterId);\n return parameterId;\n }\n void functionEnd() {\n m_code.putIns (spv::OpFunctionEnd, 1);\n }\n uint32_t opAccessChain(\n uint32_t resultType,\n uint32_t composite,\n uint32_t indexCount,\n const uint32_t* indexArray) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAccessChain, 4 + indexCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(composite);\n \n for (uint32_t i = 0; i < indexCount; i++)\n m_code.putInt32(indexArray[i]);\n return resultId;\n }\n uint32_t opArrayLength(\n uint32_t resultType,\n uint32_t structure,\n uint32_t memberId) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpArrayLength, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(structure);\n m_code.putWord(memberId);\n return resultId;\n }\n uint32_t opAny(\n uint32_t resultType,\n uint32_t vector) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAny, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector);\n return resultId;\n }\n uint32_t opAll(\n uint32_t resultType,\n uint32_t vector) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAll, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector);\n return resultId;\n }\n uint32_t opAtomicLoad(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicLoad, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n return resultId;\n }\n void opAtomicStore(\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n m_code.putIns (spv::OpAtomicStore, 5);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n }\n uint32_t opAtomicExchange(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicExchange, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicCompareExchange(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t equal,\n uint32_t unequal,\n uint32_t value,\n uint32_t comparator) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicCompareExchange, 9);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(equal);\n m_code.putWord(unequal);\n m_code.putWord(value);\n m_code.putWord(comparator);\n return resultId;\n }\n uint32_t opAtomicIIncrement(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicIIncrement, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n return resultId;\n }\n uint32_t opAtomicIDecrement(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicIDecrement, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n return resultId;\n }\n uint32_t opAtomicIAdd(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicIAdd, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicISub(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicISub, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicSMin(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicSMin, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicSMax(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicSMax, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicUMin(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicUMin, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicUMax(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicUMax, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicAnd(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicAnd, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicOr(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicOr, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opAtomicXor(\n uint32_t resultType,\n uint32_t pointer,\n uint32_t scope,\n uint32_t semantics,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpAtomicXor, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(pointer);\n m_code.putWord(scope);\n m_code.putWord(semantics);\n m_code.putWord(value);\n return resultId;\n }\n uint32_t opBitcast(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitcast, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opBitCount(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitCount, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opBitReverse(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitReverse, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFindILsb(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FindILsb);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFindUMsb(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FindUMsb);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFindSMsb(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FindSMsb);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opBitFieldInsert(\n uint32_t resultType,\n uint32_t base,\n uint32_t insert,\n uint32_t offset,\n uint32_t count) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitFieldInsert, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(insert);\n m_code.putWord(offset);\n m_code.putWord(count);\n return resultId;\n }\n uint32_t opBitFieldSExtract(\n uint32_t resultType,\n uint32_t base,\n uint32_t offset,\n uint32_t count) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitFieldSExtract, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(offset);\n m_code.putWord(count);\n return resultId;\n }\n uint32_t opBitFieldUExtract(\n uint32_t resultType,\n uint32_t base,\n uint32_t offset,\n uint32_t count) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitFieldUExtract, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(offset);\n m_code.putWord(count);\n return resultId;\n }\n uint32_t opBitwiseAnd(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitwiseAnd, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opBitwiseOr(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitwiseOr, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opBitwiseXor(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpBitwiseXor, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opNot(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpNot, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opShiftLeftLogical(\n uint32_t resultType,\n uint32_t base,\n uint32_t shift) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpShiftLeftLogical, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(shift);\n return resultId;\n }\n uint32_t opShiftRightArithmetic(\n uint32_t resultType,\n uint32_t base,\n uint32_t shift) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpShiftRightArithmetic, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(shift);\n return resultId;\n }\n uint32_t opShiftRightLogical(\n uint32_t resultType,\n uint32_t base,\n uint32_t shift) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpShiftRightLogical, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(shift);\n return resultId;\n }\n uint32_t opConvertFtoS(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpConvertFToS, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opConvertFtoU(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpConvertFToU, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opConvertStoF(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpConvertSToF, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opConvertUtoF(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpConvertUToF, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opCompositeConstruct(\n uint32_t resultType,\n uint32_t valueCount,\n const uint32_t* valueArray) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpCompositeConstruct, 3 + valueCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n \n for (uint32_t i = 0; i < valueCount; i++)\n m_code.putWord(valueArray[i]);\n return resultId;\n }\n uint32_t opCompositeExtract(\n uint32_t resultType,\n uint32_t composite,\n uint32_t indexCount,\n const uint32_t* indexArray) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpCompositeExtract, 4 + indexCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(composite);\n \n for (uint32_t i = 0; i < indexCount; i++)\n m_code.putInt32(indexArray[i]);\n return resultId;\n }\n uint32_t opCompositeInsert(\n uint32_t resultType,\n uint32_t object,\n uint32_t composite,\n uint32_t indexCount,\n const uint32_t* indexArray) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpCompositeInsert, 5 + indexCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(object);\n m_code.putWord(composite);\n \n for (uint32_t i = 0; i < indexCount; i++)\n m_code.putInt32(indexArray[i]);\n return resultId;\n }\n uint32_t opDpdx(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdx, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDpdy(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdy, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDpdxCoarse(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdxCoarse, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDpdyCoarse(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdyCoarse, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDpdxFine(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdxFine, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDpdyFine(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDPdyFine, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opVectorExtractDynamic(\n uint32_t resultType,\n uint32_t vector,\n uint32_t index) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpVectorExtractDynamic, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector);\n m_code.putWord(index);\n return resultId;\n }\n uint32_t opVectorShuffle(\n uint32_t resultType,\n uint32_t vectorLeft,\n uint32_t vectorRight,\n uint32_t indexCount,\n const uint32_t* indexArray) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpVectorShuffle, 5 + indexCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vectorLeft);\n m_code.putWord(vectorRight);\n \n for (uint32_t i = 0; i < indexCount; i++)\n m_code.putInt32(indexArray[i]);\n return resultId;\n }\n uint32_t opSNegate(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSNegate, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFNegate(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFNegate, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opSAbs(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450SAbs);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFAbs(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FAbs);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFSign(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FSign);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFMix(\n uint32_t resultType,\n uint32_t x,\n uint32_t y,\n uint32_t a) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 8);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FMix);\n m_code.putWord(x);\n m_code.putWord(y);\n m_code.putWord(a);\n return resultId;\n }\n uint32_t opCross(\n uint32_t resultType,\n uint32_t x,\n uint32_t y) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Cross);\n m_code.putWord(x);\n m_code.putWord(y);\n return resultId;\n }\n uint32_t opIAdd(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpIAdd, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opISub(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpISub, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFAdd(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFAdd, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFSub(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFSub, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opSDiv(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSDiv, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opUDiv(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpUDiv, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opSRem(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSRem, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opUMod(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpUMod, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFDiv(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFDiv, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opIMul(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpIMul, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFMul(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFMul, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opVectorTimesScalar(\n uint32_t resultType,\n uint32_t vector,\n uint32_t scalar) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpVectorTimesScalar, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector);\n m_code.putWord(scalar);\n return resultId;\n }\n uint32_t opMatrixTimesMatrix(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpMatrixTimesMatrix, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opMatrixTimesVector(\n uint32_t resultType,\n uint32_t matrix,\n uint32_t vector) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpMatrixTimesVector, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(matrix);\n m_code.putWord(vector);\n return resultId;\n }\n uint32_t opVectorTimesMatrix(\n uint32_t resultType,\n uint32_t vector,\n uint32_t matrix) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpVectorTimesMatrix, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector);\n m_code.putWord(matrix);\n return resultId;\n }\n uint32_t opTranspose(\n uint32_t resultType,\n uint32_t matrix) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpTranspose, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(matrix);\n return resultId;\n }\n uint32_t opInverse(\n uint32_t resultType,\n uint32_t matrix) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450MatrixInverse);\n m_code.putWord(matrix);\n return resultId;\n }\n uint32_t opFFma(\n uint32_t resultType,\n uint32_t a,\n uint32_t b,\n uint32_t c) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 8);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Fma);\n m_code.putWord(a);\n m_code.putWord(b);\n m_code.putWord(c);\n return resultId;\n }\n uint32_t opFMax(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FMax);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFMin(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FMin);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opNMax(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450NMax);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opNMin(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450NMin);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opSMax(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450SMax);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opSMin(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450SMin);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opUMax(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450UMax);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opUMin(\n uint32_t resultType,\n uint32_t a,\n uint32_t b) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450UMin);\n m_code.putWord(a);\n m_code.putWord(b);\n return resultId;\n }\n uint32_t opFClamp(\n uint32_t resultType,\n uint32_t x,\n uint32_t minVal,\n uint32_t maxVal) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 8);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450FClamp);\n m_code.putWord(x);\n m_code.putWord(minVal);\n m_code.putWord(maxVal);\n return resultId;\n }\n uint32_t opNClamp(\n uint32_t resultType,\n uint32_t x,\n uint32_t minVal,\n uint32_t maxVal) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 8);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450NClamp);\n m_code.putWord(x);\n m_code.putWord(minVal);\n m_code.putWord(maxVal);\n return resultId;\n }\n uint32_t opIEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpIEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opINotEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpINotEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opSLessThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSLessThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opSLessThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSLessThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opSGreaterThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSGreaterThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opSGreaterThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSGreaterThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opULessThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpULessThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opULessThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpULessThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opUGreaterThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpUGreaterThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opUGreaterThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpUGreaterThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFOrdEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFOrdEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFUnordNotEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFUnordNotEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFOrdLessThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFOrdLessThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFOrdLessThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFOrdLessThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFOrdGreaterThan(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFOrdGreaterThan, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opFOrdGreaterThanEqual(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFOrdGreaterThanEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opLogicalEqual(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLogicalEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opLogicalNotEqual(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLogicalNotEqual, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opLogicalAnd(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLogicalAnd, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opLogicalOr(\n uint32_t resultType,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLogicalOr, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opLogicalNot(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLogicalNot, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opDot(\n uint32_t resultType,\n uint32_t vector1,\n uint32_t vector2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpDot, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(vector1);\n m_code.putWord(vector2);\n return resultId;\n }\n uint32_t opSin(\n uint32_t resultType,\n uint32_t vector) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Sin);\n m_code.putWord(vector);\n return resultId;\n }\n uint32_t opCos(\n uint32_t resultType,\n uint32_t vector) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Cos);\n m_code.putWord(vector);\n return resultId;\n }\n uint32_t opSqrt(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Sqrt);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opInverseSqrt(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450InverseSqrt);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opNormalize(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Normalize);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opRawAccessChain(\n uint32_t resultType,\n uint32_t base,\n uint32_t stride,\n uint32_t index,\n uint32_t offset,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns (spv::OpRawAccessChainNV, operand ? 8 : 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(base);\n m_code.putWord(stride);\n m_code.putWord(index);\n m_code.putWord(offset);\n\n if (operand)\n m_code.putWord(operand);\n\n return resultId;\n }\n uint32_t opReflect(\n uint32_t resultType,\n uint32_t incident,\n uint32_t normal) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Reflect);\n m_code.putWord(incident);\n m_code.putWord(normal);\n return resultId;\n }\n uint32_t opLength(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Length);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opExp2(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Exp2);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opExp(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Exp);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opLog2(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Log2);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opPow(\n uint32_t resultType,\n uint32_t base,\n uint32_t exponent) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Pow);\n m_code.putWord(base);\n m_code.putWord(exponent);\n return resultId;\n }\n uint32_t opFract(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Fract);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opCeil(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Ceil);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFloor(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Floor);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opRound(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Round);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opRoundEven(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450RoundEven);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opTrunc(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450Trunc);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFConvert(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns (spv::OpFConvert, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opPackHalf2x16(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450PackHalf2x16);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opUnpackHalf2x16(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450UnpackHalf2x16);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opSelect(\n uint32_t resultType,\n uint32_t condition,\n uint32_t operand1,\n uint32_t operand2) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSelect, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(condition);\n m_code.putWord(operand1);\n m_code.putWord(operand2);\n return resultId;\n }\n uint32_t opIsNan(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpIsNan, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opIsInf(\n uint32_t resultType,\n uint32_t operand) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpIsInf, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(operand);\n return resultId;\n }\n uint32_t opFunctionCall(\n uint32_t resultType,\n uint32_t functionId,\n uint32_t argCount,\n const uint32_t* argIds) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpFunctionCall, 4 + argCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(functionId);\n \n for (uint32_t i = 0; i < argCount; i++)\n m_code.putWord(argIds[i]);\n return resultId;\n }\n void opLabel(\n uint32_t labelId) {\n m_code.putIns (spv::OpLabel, 2);\n m_code.putWord(labelId);\n\n m_blockId = labelId;\n }\n uint32_t opLoad(\n uint32_t typeId,\n uint32_t pointerId) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLoad, 4 + getMemoryOperandWordCount(operands));\n m_code.putWord(typeId);\n m_code.putWord(resultId);\n m_code.putWord(pointerId);\n\n putMemoryOperands(operands);\n return resultId;\n }\n uint32_t opLoad(\n uint32_t typeId,\n uint32_t pointerId,\n const SpirvMemoryOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpLoad, 4 + getMemoryOperandWordCount(operands));\n m_code.putWord(typeId);\n m_code.putWord(resultId);\n m_code.putWord(pointerId);\n\n putMemoryOperands(operands);\n return resultId;\n }\n void opStore(\n uint32_t pointerId,\n uint32_t valueId) {\n m_code.putIns (spv::OpStore, 3 + getMemoryOperandWordCount(operands));\n m_code.putWord(pointerId);\n m_code.putWord(valueId);\n\n putMemoryOperands(operands);\n }\n void opStore(\n uint32_t pointerId,\n uint32_t valueId,\n const SpirvMemoryOperands& operands) {\n m_code.putIns (spv::OpStore, 3 + getMemoryOperandWordCount(operands));\n m_code.putWord(pointerId);\n m_code.putWord(valueId);\n\n putMemoryOperands(operands);\n }\n uint32_t opInterpolateAtCentroid(\n uint32_t resultType,\n uint32_t interpolant) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450InterpolateAtCentroid);\n m_code.putWord(interpolant);\n return resultId;\n }\n uint32_t opInterpolateAtSample(\n uint32_t resultType,\n uint32_t interpolant,\n uint32_t sample) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450InterpolateAtSample);\n m_code.putWord(interpolant);\n m_code.putWord(sample);\n return resultId;\n }\n uint32_t opInterpolateAtOffset(\n uint32_t resultType,\n uint32_t interpolant,\n uint32_t offset) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpExtInst, 7);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(m_instExtGlsl450);\n m_code.putWord(GLSLstd450InterpolateAtOffset);\n m_code.putWord(interpolant);\n m_code.putWord(offset);\n return resultId;\n }\n uint32_t opImage(\n uint32_t resultType,\n uint32_t sampledImage) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpImage, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n return resultId;\n }\n uint32_t opImageRead(\n uint32_t resultType,\n uint32_t image,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseRead\n : spv::OpImageRead;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n void opImageWrite(\n uint32_t image,\n uint32_t coordinates,\n uint32_t texel,\n const SpirvImageOperands& operands) {\n m_code.putIns (spv::OpImageWrite,\n 4 + getImageOperandWordCount(operands));\n m_code.putWord(image);\n m_code.putWord(coordinates);\n m_code.putWord(texel);\n \n putImageOperands(operands);\n }\n uint32_t opImageSparseTexelsResident(\n uint32_t resultType,\n uint32_t residentCode) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns (spv::OpImageSparseTexelsResident, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(residentCode);\n\n return resultId;\n }\n uint32_t opImageTexelPointer(\n uint32_t resultType,\n uint32_t image,\n uint32_t coordinates,\n uint32_t sample) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageTexelPointer, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n m_code.putWord(coordinates);\n m_code.putWord(sample);\n return resultId;\n }\n uint32_t opSampledImage(\n uint32_t resultType,\n uint32_t image,\n uint32_t sampler) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpSampledImage, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n m_code.putWord(sampler);\n return resultId;\n }\n uint32_t opImageQuerySizeLod(\n uint32_t resultType,\n uint32_t image,\n uint32_t lod) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageQuerySizeLod, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n m_code.putWord(lod);\n return resultId;\n }\n uint32_t opImageQuerySize(\n uint32_t resultType,\n uint32_t image) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageQuerySize, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n return resultId;\n }\n uint32_t opImageQueryLevels(\n uint32_t resultType,\n uint32_t image) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageQueryLevels, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n return resultId;\n }\n uint32_t opImageQueryLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageQueryLod, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n return resultId;\n }\n uint32_t opImageQuerySamples(\n uint32_t resultType,\n uint32_t image) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpImageQuerySamples, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n return resultId;\n }\n uint32_t opImageFetch(\n uint32_t resultType,\n uint32_t image,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n\n spv::Op op = operands.sparse\n ? spv::OpImageSparseFetch\n : spv::OpImageFetch;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(image);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageGather(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t component,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseGather\n : spv::OpImageGather;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(component);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageDrefGather(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t reference,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseDrefGather\n : spv::OpImageDrefGather;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(reference);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleImplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleImplicitLod\n : spv::OpImageSampleImplicitLod;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleExplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleExplicitLod\n : spv::OpImageSampleExplicitLod;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleProjImplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleProjImplicitLod\n : spv::OpImageSampleProjImplicitLod;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleProjExplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleProjExplicitLod\n : spv::OpImageSampleProjExplicitLod;\n\n m_code.putIns(op, 5 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleDrefImplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t reference,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleDrefImplicitLod\n : spv::OpImageSampleDrefImplicitLod;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(reference);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleDrefExplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t reference,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleDrefExplicitLod\n : spv::OpImageSampleDrefExplicitLod;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(reference);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleProjDrefImplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t reference,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleProjDrefImplicitLod\n : spv::OpImageSampleProjDrefImplicitLod;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(reference);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opImageSampleProjDrefExplicitLod(\n uint32_t resultType,\n uint32_t sampledImage,\n uint32_t coordinates,\n uint32_t reference,\n const SpirvImageOperands& operands) {\n uint32_t resultId = this->allocateId();\n \n spv::Op op = operands.sparse\n ? spv::OpImageSparseSampleProjDrefExplicitLod\n : spv::OpImageSampleProjDrefExplicitLod;\n\n m_code.putIns(op, 6 + getImageOperandWordCount(operands));\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(sampledImage);\n m_code.putWord(coordinates);\n m_code.putWord(reference);\n \n putImageOperands(operands);\n return resultId;\n }\n uint32_t opGroupNonUniformBallot(\n uint32_t resultType,\n uint32_t execution,\n uint32_t predicate) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpGroupNonUniformBallot, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(execution);\n m_code.putWord(predicate);\n return resultId;\n }\n uint32_t opGroupNonUniformBallotBitCount(\n uint32_t resultType,\n uint32_t execution,\n uint32_t operation,\n uint32_t ballot) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpGroupNonUniformBallotBitCount, 6);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(execution);\n m_code.putWord(operation);\n m_code.putWord(ballot);\n return resultId;\n }\n uint32_t opGroupNonUniformElect(\n uint32_t resultType,\n uint32_t execution) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpGroupNonUniformElect, 4);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(execution);\n return resultId;\n }\n uint32_t opGroupNonUniformBroadcastFirst(\n uint32_t resultType,\n uint32_t execution,\n uint32_t value) {\n uint32_t resultId = this->allocateId();\n\n m_code.putIns(spv::OpGroupNonUniformBroadcastFirst, 5);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n m_code.putWord(execution);\n m_code.putWord(value);\n return resultId;\n }\n void opControlBarrier(\n uint32_t execution,\n uint32_t memory,\n uint32_t semantics) {\n m_code.putIns (spv::OpControlBarrier, 4);\n m_code.putWord(execution);\n m_code.putWord(memory);\n m_code.putWord(semantics);\n }\n void opMemoryBarrier(\n uint32_t memory,\n uint32_t semantics) {\n m_code.putIns (spv::OpMemoryBarrier, 3);\n m_code.putWord(memory);\n m_code.putWord(semantics);\n }\n void opLoopMerge(\n uint32_t mergeBlock,\n uint32_t continueTarget,\n uint32_t loopControl) {\n m_code.putIns (spv::OpLoopMerge, 4);\n m_code.putWord(mergeBlock);\n m_code.putWord(continueTarget);\n m_code.putWord(loopControl);\n }\n void opSelectionMerge(\n uint32_t mergeBlock,\n uint32_t selectionControl) {\n m_code.putIns (spv::OpSelectionMerge, 3);\n m_code.putWord(mergeBlock);\n m_code.putWord(selectionControl);\n }\n void opBranch(\n uint32_t label) {\n m_code.putIns (spv::OpBranch, 2);\n m_code.putWord(label);\n\n m_blockId = 0;\n }\n void opBranchConditional(\n uint32_t condition,\n uint32_t trueLabel,\n uint32_t falseLabel) {\n m_code.putIns (spv::OpBranchConditional, 4);\n m_code.putWord(condition);\n m_code.putWord(trueLabel);\n m_code.putWord(falseLabel);\n\n m_blockId = 0;\n }\n void opSwitch(\n uint32_t selector,\n uint32_t jumpDefault,\n uint32_t caseCount,\n const SpirvSwitchCaseLabel* caseLabels) {\n m_code.putIns (spv::OpSwitch, 3 + 2 * caseCount);\n m_code.putWord(selector);\n m_code.putWord(jumpDefault);\n \n for (uint32_t i = 0; i < caseCount; i++) {\n m_code.putWord(caseLabels[i].literal);\n m_code.putWord(caseLabels[i].labelId);\n }\n\n m_blockId = 0;\n }\n uint32_t opPhi(\n uint32_t resultType,\n uint32_t sourceCount,\n const SpirvPhiLabel* sourceLabels) {\n uint32_t resultId = this->allocateId();\n \n m_code.putIns (spv::OpPhi, 3 + 2 * sourceCount);\n m_code.putWord(resultType);\n m_code.putWord(resultId);\n \n for (uint32_t i = 0; i < sourceCount; i++) {\n m_code.putWord(sourceLabels[i].varId);\n m_code.putWord(sourceLabels[i].labelId);\n }\n \n return resultId;\n }\n void opReturn() {\n m_code.putIns (spv::OpReturn, 1);\n m_blockId = 0;\n }\n void opDemoteToHelperInvocation() {\n m_code.putIns (spv::OpDemoteToHelperInvocation, 1);\n }\n void opEmitVertex(\n uint32_t streamId) {\n if (streamId == 0) {\n m_code.putIns (spv::OpEmitVertex, 1);\n } else {\n m_code.putIns (spv::OpEmitStreamVertex, 2);\n m_code.putWord(streamId);\n }\n }\n void opEndPrimitive(\n uint32_t streamId) {\n if (streamId == 0) {\n m_code.putIns (spv::OpEndPrimitive, 1);\n } else {\n m_code.putIns (spv::OpEndStreamPrimitive, 2);\n m_code.putWord(streamId);\n }\n }\n void opBeginInvocationInterlock() {\n m_code.putIns(spv::OpBeginInvocationInterlockEXT, 1);\n }\n void opEndInvocationInterlock() {\n m_code.putIns(spv::OpEndInvocationInterlockEXT, 1);\n }\n uint32_t opSinCos(\n uint32_t x,\n bool useBuiltIn) {\n // We only operate on 32-bit floats here\n uint32_t floatType = defFloatType(32);\n uint32_t resultType = defVectorType(floatType, 2u);\n\n if (useBuiltIn) {\n std::array members = { opSin(floatType, x), opCos(floatType, x) };\n return opCompositeConstruct(resultType, members.size(), members.data());\n } else {\n uint32_t uintType = defIntType(32, false);\n uint32_t sintType = defIntType(32, true);\n uint32_t boolType = defBoolType();\n\n // Normalize input to multiple of pi/4\n uint32_t xNorm = opFMul(floatType, opFAbs(floatType, x), constf32(4.0 / pi));\n\n uint32_t xTrunc = opTrunc(floatType, xNorm);\n uint32_t xFract = opFSub(floatType, xNorm, xTrunc);\n\n uint32_t xInt = opConvertFtoU(uintType, xTrunc);\n\n // Mirror input along x axis as necessary\n uint32_t mirror = opINotEqual(boolType, opBitwiseAnd(uintType, xInt, constu32(1u)), constu32(0u));\n xFract = opSelect(floatType, mirror, opFSub(floatType, constf32(1.0f), xFract), xFract);\n\n // Compute taylor series for fractional part\n uint32_t xFract_2 = opFMul(floatType, xFract, xFract);\n uint32_t xFract_4 = opFMul(floatType, xFract_2, xFract_2);\n uint32_t xFract_6 = opFMul(floatType, xFract_4, xFract_2);\n\n uint32_t taylor = opFMul(floatType, xFract_6, constf32(-sincosTaylorFactor(7)));\n decorate(taylor, spv::DecorationNoContraction);\n\n taylor = opFFma(floatType, xFract_4, constf32(sincosTaylorFactor(5)), taylor);\n decorate(taylor, spv::DecorationNoContraction);\n\n taylor = opFFma(floatType, xFract_2, constf32(-sincosTaylorFactor(3)), taylor);\n decorate(taylor, spv::DecorationNoContraction);\n\n taylor = opFAdd(floatType, constf32(sincosTaylorFactor(1)), taylor);\n decorate(taylor, spv::DecorationNoContraction);\n\n taylor = opFMul(floatType, taylor, xFract);\n decorate(taylor, spv::DecorationNoContraction);\n\n // Compute co-function based on sin^2 + cos^2 = 1\n uint32_t coFunc = opSqrt(floatType, opFSub(floatType, constf32(1.0f), opFMul(floatType, taylor, taylor)));\n\n // Determine whether the taylor series was used for sine or cosine and assign the correct result\n uint32_t funcIsSin = opIEqual(boolType, opBitwiseAnd(uintType, opIAdd(uintType, xInt, constu32(1u)), constu32(2u)), constu32(0u));\n\n uint32_t sin = opSelect(floatType, funcIsSin, taylor, coFunc);\n uint32_t cos = opSelect(floatType, funcIsSin, coFunc, taylor);\n\n // Determine whether sine is negative. Interpret the input as a\n // signed integer in order to propagate signed zeroes properly.\n uint32_t inputNeg = opSLessThan(boolType, opBitcast(sintType, x), consti32(0));\n\n uint32_t sinNeg = opINotEqual(boolType, opBitwiseAnd(uintType, xInt, constu32(4u)), constu32(0u));\n sinNeg = opLogicalNotEqual(boolType, sinNeg, inputNeg);\n\n // Determine whether cosine is negative\n uint32_t cosNeg = opINotEqual(boolType, opBitwiseAnd(uintType, opIAdd(uintType, xInt, constu32(2u)), constu32(4u)), constu32(0u));\n\n sin = opSelect(floatType, sinNeg, opFNegate(floatType, sin), sin);\n cos = opSelect(floatType, cosNeg, opFNegate(floatType, cos), cos);\n\n std::array members = { sin, cos };\n return opCompositeConstruct(resultType, members.size(), members.data());\n }\n }\n private:\n uint32_t m_version;\n uint32_t m_id = 1;\n uint32_t m_instExtGlsl450 = 0;\n uint32_t m_blockId = 0;\n SpirvCodeBuffer m_capabilities;\n SpirvCodeBuffer m_extensions;\n SpirvCodeBuffer m_instExt;\n SpirvCodeBuffer m_memoryModel;\n SpirvCodeBuffer m_entryPoints;\n SpirvCodeBuffer m_execModeInfo;\n SpirvCodeBuffer m_debugNames;\n SpirvCodeBuffer m_annotations;\n SpirvCodeBuffer m_typeConstDefs;\n SpirvCodeBuffer m_variables;\n SpirvCodeBuffer m_code;\n std::unordered_set m_lateConsts;\n std::vector m_interfaceVars;\n uint32_t defType(\n spv::Op op, \n uint32_t argCount,\n const uint32_t* argIds) {\n // Since the type info is stored in the code buffer,\n // we can use the code buffer to look up type IDs as\n // well. Result IDs are always stored as argument 1.\n for (auto ins : m_typeConstDefs) {\n bool match = ins.opCode() == op\n && ins.length() == 2 + argCount;\n \n for (uint32_t i = 0; i < argCount && match; i++)\n match &= ins.arg(2 + i) == argIds[i];\n \n if (match)\n return ins.arg(1);\n }\n \n // Type not yet declared, create a new one.\n uint32_t resultId = this->allocateId();\n m_typeConstDefs.putIns (op, 2 + argCount);\n m_typeConstDefs.putWord(resultId);\n \n for (uint32_t i = 0; i < argCount; i++)\n m_typeConstDefs.putWord(argIds[i]);\n return resultId;\n }\n uint32_t defConst(\n spv::Op op,\n uint32_t typeId,\n uint32_t argCount,\n const uint32_t* argIds) {\n // Avoid declaring constants multiple times\n for (auto ins : m_typeConstDefs) {\n bool match = ins.opCode() == op\n && ins.length() == 3 + argCount\n && ins.arg(1) == typeId;\n \n for (uint32_t i = 0; i < argCount && match; i++)\n match &= ins.arg(3 + i) == argIds[i];\n \n if (!match)\n continue;\n \n uint32_t id = ins.arg(2);\n\n if (m_lateConsts.find(id) == m_lateConsts.end())\n return id;\n }\n \n // Constant not yet declared, make a new one\n uint32_t resultId = this->allocateId();\n m_typeConstDefs.putIns (op, 3 + argCount);\n m_typeConstDefs.putWord(typeId);\n m_typeConstDefs.putWord(resultId);\n \n for (uint32_t i = 0; i < argCount; i++)\n m_typeConstDefs.putWord(argIds[i]);\n return resultId;\n }\n void instImportGlsl450() {\n m_instExtGlsl450 = this->allocateId();\n const char* name = \"GLSL.std.450\";\n \n m_instExt.putIns (spv::OpExtInstImport, 2 + m_instExt.strLen(name));\n m_instExt.putWord(m_instExtGlsl450);\n m_instExt.putStr (name);\n }\n uint32_t getMemoryOperandWordCount(\n const SpirvMemoryOperands& op) const {\n const uint32_t result\n = ((op.flags & spv::MemoryAccessAlignedMask) ? 1 : 0)\n + ((op.flags & spv::MemoryAccessMakePointerAvailableMask) ? 1 : 0)\n + ((op.flags & spv::MemoryAccessMakePointerVisibleMask) ? 1 : 0);\n\n return op.flags ? result + 1 : 0;\n }\n void putMemoryOperands(\n const SpirvMemoryOperands& op) {\n if (op.flags) {\n m_code.putWord(op.flags);\n\n if (op.flags & spv::MemoryAccessAlignedMask)\n m_code.putWord(op.alignment);\n\n if (op.flags & spv::MemoryAccessMakePointerAvailableMask)\n m_code.putWord(op.makeAvailable);\n\n if (op.flags & spv::MemoryAccessMakePointerVisibleMask)\n m_code.putWord(op.makeVisible);\n }\n }\n uint32_t getImageOperandWordCount(\n const SpirvImageOperands& op) const {\n // Each flag may add one or more operands\n const uint32_t result\n = ((op.flags & spv::ImageOperandsBiasMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsLodMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsConstOffsetMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsGradMask) ? 2 : 0)\n + ((op.flags & spv::ImageOperandsOffsetMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsConstOffsetsMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsSampleMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsMinLodMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsMakeTexelAvailableMask) ? 1 : 0)\n + ((op.flags & spv::ImageOperandsMakeTexelVisibleMask) ? 1 : 0);\n \n // Add a DWORD for the operand mask if it is non-zero\n return op.flags ? result + 1 : 0;\n }\n void putImageOperands(\n const SpirvImageOperands& op) {\n if (op.flags) {\n m_code.putWord(op.flags);\n \n if (op.flags & spv::ImageOperandsBiasMask)\n m_code.putWord(op.sLodBias);\n \n if (op.flags & spv::ImageOperandsLodMask)\n m_code.putWord(op.sLod);\n\n if (op.flags & spv::ImageOperandsGradMask) {\n m_code.putWord(op.sGradX);\n m_code.putWord(op.sGradY);\n }\n\n if (op.flags & spv::ImageOperandsConstOffsetMask)\n m_code.putWord(op.sConstOffset);\n\n if (op.flags & spv::ImageOperandsOffsetMask)\n m_code.putWord(op.gOffset);\n \n if (op.flags & spv::ImageOperandsConstOffsetsMask)\n m_code.putWord(op.gConstOffsets);\n \n if (op.flags & spv::ImageOperandsSampleMask)\n m_code.putWord(op.sSampleId);\n \n if (op.flags & spv::ImageOperandsMinLodMask)\n m_code.putWord(op.sMinLod);\n\n if (op.flags & spv::ImageOperandsMakeTexelAvailableMask)\n m_code.putWord(op.makeAvailable);\n\n if (op.flags & spv::ImageOperandsMakeTexelVisibleMask)\n m_code.putWord(op.makeVisible);\n }\n }\n bool isInterfaceVar(\n spv::StorageClass sclass) const {\n if (m_version < spvVersion(1, 4)) {\n return sclass == spv::StorageClassInput\n || sclass == spv::StorageClassOutput;\n } else {\n // All global variables need to be declared\n return sclass != spv::StorageClassFunction;\n }\n }\n void classifyBlocks(\n std::unordered_set& reachableBlocks,\n std::unordered_set& mergeBlocks) {\n std::unordered_multimap branches;\n std::queue blockQueue;\n\n uint32_t blockId = 0;\n\n for (auto ins : m_code) {\n switch (ins.opCode()) {\n case spv::OpLabel: {\n uint32_t id = ins.arg(1);\n\n if (!blockId)\n branches.insert({ 0u, id });\n\n blockId = id;\n } break;\n\n case spv::OpFunction: {\n blockId = 0u;\n } break;\n\n case spv::OpBranch: {\n branches.insert({ blockId, ins.arg(1) });\n } break;\n\n case spv::OpBranchConditional: {\n branches.insert({ blockId, ins.arg(2) });\n branches.insert({ blockId, ins.arg(3) });\n } break;\n\n case spv::OpSwitch: {\n branches.insert({ blockId, ins.arg(2) });\n\n for (uint32_t i = 4; i < ins.length(); i += 2)\n branches.insert({ blockId, ins.arg(i) });\n } break;\n\n case spv::OpSelectionMerge: {\n mergeBlocks.insert(ins.arg(1));\n } break;\n\n case spv::OpLoopMerge: {\n mergeBlocks.insert(ins.arg(1));\n\n // It is possible for the continue block to be unreachable in\n // practice, but we still need to emit it if we are not going\n // to eliminate this loop. Since the current block dominates\n // the loop, use it to keep the continue block intact.\n branches.insert({ blockId, ins.arg(2) });\n } break;\n\n default:;\n }\n }\n\n blockQueue.push(0);\n\n while (!blockQueue.empty()) {\n uint32_t id = blockQueue.front();\n\n auto range = branches.equal_range(id);\n\n for (auto i = range.first; i != range.second; i++) {\n if (reachableBlocks.insert(i->second).second)\n blockQueue.push(i->second);\n }\n\n blockQueue.pop();\n }\n }\n};"], ["/lsfg-vk/src/utils/benchmark.cpp", "#include \"utils/benchmark.hpp\"\n#include \"config/config.hpp\"\n#include \"extract/extract.hpp\"\n#include \"extract/trans.hpp\"\n\n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace Benchmark;\n\nvoid Benchmark::run(uint32_t width, uint32_t height) {\n const auto& conf = Config::activeConf;\n\n auto* lsfgInitialize = LSFG_3_1::initialize;\n auto* lsfgCreateContext = LSFG_3_1::createContext;\n auto* lsfgPresentContext = LSFG_3_1::presentContext;\n if (conf.performance) {\n lsfgInitialize = LSFG_3_1P::initialize;\n lsfgCreateContext = LSFG_3_1P::createContext;\n lsfgPresentContext = LSFG_3_1P::presentContext;\n }\n\n // create the benchmark context\n const char* lsfgDeviceUUID = std::getenv(\"LSFG_DEVICE_UUID\");\n const uint64_t deviceUUID = lsfgDeviceUUID\n ? std::stoull(std::string(lsfgDeviceUUID), nullptr, 16) : 0x1463ABAC;\n\n setenv(\"DISABLE_LSFG\", \"1\", 1); // NOLINT\n\n Extract::extractShaders();\n lsfgInitialize(\n deviceUUID, // some magic number if not given\n conf.hdr, 1.0F / conf.flowScale, conf.multiplier - 1,\n [](const std::string& name) -> std::vector {\n auto dxbc = Extract::getShader(name);\n auto spirv = Extract::translateShader(dxbc);\n return spirv;\n }\n );\n const int32_t ctx = lsfgCreateContext(-1, -1, {},\n { .width = width, .height = height },\n conf.hdr ? VK_FORMAT_R16G16B16A16_SFLOAT : VK_FORMAT_R8G8B8A8_UNORM\n );\n\n unsetenv(\"DISABLE_LSFG\"); // NOLINT\n\n // run the benchmark (run 8*n + 1 so the fences are waited on)\n const auto now = std::chrono::high_resolution_clock::now();\n const uint64_t iterations = 8 * 500UL;\n\n std::cerr << \"lsfg-vk: Benchmark started, running \" << iterations << \" iterations...\\n\";\n for (uint64_t count = 0; count < iterations + 1; count++) {\n lsfgPresentContext(ctx, -1, {});\n\n if (count % 50 == 0 && count > 0)\n std::cerr << \"lsfg-vk: \"\n << std::setprecision(2) << std::fixed\n << static_cast(count) / static_cast(iterations) * 100.0F\n << \"% done (\" << count + 1 << \"/\" << iterations << \")\\r\";\n }\n const auto then = std::chrono::high_resolution_clock::now();\n\n // print results\n const auto ms = std::chrono::duration_cast(then - now).count();\n\n const auto perIteration = static_cast(ms) / static_cast(iterations);\n\n const uint64_t totalGen = (conf.multiplier - 1) * iterations;\n const auto genFps = static_cast(totalGen) / (static_cast(ms) / 1000.0F);\n\n const uint64_t totalFrames = iterations * conf.multiplier;\n const auto totalFps = static_cast(totalFrames) / (static_cast(ms) / 1000.0F);\n\n std::cerr << \"lsfg-vk: Benchmark completed in \" << ms << \" ms\\n\";\n std::cerr << \" Time taken per real frame: \"\n << std::setprecision(2) << std::fixed << perIteration << \" ms\\n\";\n std::cerr << \" Generated \" << totalGen << \" frames in total at \"\n << std::setprecision(2) << std::fixed << genFps << \" FPS\\n\";\n std::cerr << \" Total of \" << totalFrames << \" frames presented at \"\n << std::setprecision(2) << std::fixed << totalFps << \" FPS\\n\";\n\n // sleep for a second, then exit\n std::this_thread::sleep_for(std::chrono::seconds(1));\n _exit(0);\n}\n"], ["/lsfg-vk/thirdparty/volk/volk.h", "/**\n * volk\n *\n * Copyright (C) 2018-2025, by Arseny Kapoulkine (arseny.kapoulkine@gmail.com)\n * Report bugs and download new versions at https://github.com/zeux/volk\n *\n * This library is distributed under the MIT License. See notice at the end of this file.\n */\n/* clang-format off */\n#ifndef VOLK_H_\n#define VOLK_H_\n\n#if defined(VULKAN_H_) && !defined(VK_NO_PROTOTYPES)\n#\terror To use volk, you need to define VK_NO_PROTOTYPES before including vulkan.h\n#endif\n\n/* VOLK_GENERATE_VERSION_DEFINE */\n#define VOLK_HEADER_VERSION 323\n/* VOLK_GENERATE_VERSION_DEFINE */\n\n#ifndef VK_NO_PROTOTYPES\n#\tdefine VK_NO_PROTOTYPES\n#endif\n\n#ifndef VULKAN_H_\n#\tifdef VOLK_VULKAN_H_PATH\n#\t\tinclude VOLK_VULKAN_H_PATH\n#\telse /* Platform headers included below */\n#\t\tinclude \n#\t\tinclude \n#\tendif\n#endif\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\nstruct VolkDeviceTable;\n\n/**\n * Initialize library by loading Vulkan loader; call this function before creating the Vulkan instance.\n *\n * Returns VK_SUCCESS on success and VK_ERROR_INITIALIZATION_FAILED otherwise.\n */\nVkResult volkInitialize(void);\n\n/**\n * Initialize library by providing a custom handler to load global symbols.\n *\n * This function can be used instead of volkInitialize.\n * The handler function pointer will be asked to load global Vulkan symbols which require no instance\n * (such as vkCreateInstance, vkEnumerateInstance* and vkEnumerateInstanceVersion if available).\n */\nvoid volkInitializeCustom(PFN_vkGetInstanceProcAddr handler);\n\n/**\n * Finalize library by unloading Vulkan loader and resetting global symbols to NULL.\n *\n * This function does not need to be called on process exit (as loader will be unloaded automatically) or if volkInitialize failed.\n * In general this function is optional to call but may be useful in rare cases eg if volk needs to be reinitialized multiple times.\n */\nvoid volkFinalize(void);\n\n/**\n * Get Vulkan instance version supported by the Vulkan loader, or 0 if Vulkan isn't supported\n *\n * Returns 0 if volkInitialize wasn't called or failed.\n */\nuint32_t volkGetInstanceVersion(void);\n\n/**\n * Load global function pointers using application-created VkInstance; call this function after creating the Vulkan instance.\n */\nvoid volkLoadInstance(VkInstance instance);\n\n/**\n * Load global function pointers using application-created VkInstance; call this function after creating the Vulkan instance.\n * Skips loading device-based function pointers, requires usage of volkLoadDevice afterwards.\n */\nvoid volkLoadInstanceOnly(VkInstance instance);\n\n/**\n * Load global function pointers using application-created VkDevice; call this function after creating the Vulkan device.\n *\n * Note: this is not suitable for applications that want to use multiple VkDevice objects concurrently.\n */\nvoid volkLoadDevice(VkDevice device);\n\n/**\n * Return last VkInstance for which global function pointers have been loaded via volkLoadInstance(),\n * or VK_NULL_HANDLE if volkLoadInstance() has not been called.\n */\nVkInstance volkGetLoadedInstance(void);\n\n/**\n * Return last VkDevice for which global function pointers have been loaded via volkLoadDevice(),\n * or VK_NULL_HANDLE if volkLoadDevice() has not been called.\n */\nVkDevice volkGetLoadedDevice(void);\n\n/**\n * Load function pointers using application-created VkDevice into a table.\n * Application should use function pointers from that table instead of using global function pointers.\n */\nvoid volkLoadDeviceTable(struct VolkDeviceTable* table, VkDevice device);\n\n#ifdef __cplusplus\n}\n#endif\n\n/* Instead of directly including vulkan.h, we include platform-specific parts of the SDK manually\n * This is necessary to avoid including platform headers in some cases (which vulkan.h does unconditionally)\n * and replace them with forward declarations, which makes build times faster and avoids macro conflicts.\n *\n * Note that we only replace platform-specific headers when the headers are known to be problematic: very large\n * or slow to compile (Windows), or introducing unprefixed macros which can cause conflicts (Windows, Xlib).\n */\n#if !defined(VULKAN_H_) && !defined(VOLK_VULKAN_H_PATH)\n\n#ifdef VK_USE_PLATFORM_ANDROID_KHR\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_FUCHSIA\n#include \n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_IOS_MVK\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_MACOS_MVK\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_METAL_EXT\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_VI_NN\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_WAYLAND_KHR\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_WIN32_KHR\ntypedef unsigned long DWORD;\ntypedef const wchar_t* LPCWSTR;\ntypedef void* HANDLE;\ntypedef struct HINSTANCE__* HINSTANCE;\ntypedef struct HWND__* HWND;\ntypedef struct HMONITOR__* HMONITOR;\ntypedef struct _SECURITY_ATTRIBUTES SECURITY_ATTRIBUTES;\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_XCB_KHR\n#include \n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_XLIB_KHR\ntypedef struct _XDisplay Display;\ntypedef unsigned long Window;\ntypedef unsigned long VisualID;\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_DIRECTFB_EXT\n#include \n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT\ntypedef struct _XDisplay Display;\ntypedef unsigned long RROutput;\n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_GGP\n#include \n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_SCREEN_QNX\n#include \n#include \n#endif\n\n#ifdef VK_USE_PLATFORM_SCI\n#include \n#include \n#include \n#endif\n\n#ifdef VK_ENABLE_BETA_EXTENSIONS\n#include \n#endif\n\n#endif\n\n/**\n * Device-specific function pointer table\n */\nstruct VolkDeviceTable\n{\n\t/* VOLK_GENERATE_DEVICE_TABLE */\n#if defined(VK_VERSION_1_0)\n\tPFN_vkAllocateCommandBuffers vkAllocateCommandBuffers;\n\tPFN_vkAllocateDescriptorSets vkAllocateDescriptorSets;\n\tPFN_vkAllocateMemory vkAllocateMemory;\n\tPFN_vkBeginCommandBuffer vkBeginCommandBuffer;\n\tPFN_vkBindBufferMemory vkBindBufferMemory;\n\tPFN_vkBindImageMemory vkBindImageMemory;\n\tPFN_vkCmdBeginQuery vkCmdBeginQuery;\n\tPFN_vkCmdBeginRenderPass vkCmdBeginRenderPass;\n\tPFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets;\n\tPFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer;\n\tPFN_vkCmdBindPipeline vkCmdBindPipeline;\n\tPFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers;\n\tPFN_vkCmdBlitImage vkCmdBlitImage;\n\tPFN_vkCmdClearAttachments vkCmdClearAttachments;\n\tPFN_vkCmdClearColorImage vkCmdClearColorImage;\n\tPFN_vkCmdClearDepthStencilImage vkCmdClearDepthStencilImage;\n\tPFN_vkCmdCopyBuffer vkCmdCopyBuffer;\n\tPFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage;\n\tPFN_vkCmdCopyImage vkCmdCopyImage;\n\tPFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer;\n\tPFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults;\n\tPFN_vkCmdDispatch vkCmdDispatch;\n\tPFN_vkCmdDispatchIndirect vkCmdDispatchIndirect;\n\tPFN_vkCmdDraw vkCmdDraw;\n\tPFN_vkCmdDrawIndexed vkCmdDrawIndexed;\n\tPFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect;\n\tPFN_vkCmdDrawIndirect vkCmdDrawIndirect;\n\tPFN_vkCmdEndQuery vkCmdEndQuery;\n\tPFN_vkCmdEndRenderPass vkCmdEndRenderPass;\n\tPFN_vkCmdExecuteCommands vkCmdExecuteCommands;\n\tPFN_vkCmdFillBuffer vkCmdFillBuffer;\n\tPFN_vkCmdNextSubpass vkCmdNextSubpass;\n\tPFN_vkCmdPipelineBarrier vkCmdPipelineBarrier;\n\tPFN_vkCmdPushConstants vkCmdPushConstants;\n\tPFN_vkCmdResetEvent vkCmdResetEvent;\n\tPFN_vkCmdResetQueryPool vkCmdResetQueryPool;\n\tPFN_vkCmdResolveImage vkCmdResolveImage;\n\tPFN_vkCmdSetBlendConstants vkCmdSetBlendConstants;\n\tPFN_vkCmdSetDepthBias vkCmdSetDepthBias;\n\tPFN_vkCmdSetDepthBounds vkCmdSetDepthBounds;\n\tPFN_vkCmdSetEvent vkCmdSetEvent;\n\tPFN_vkCmdSetLineWidth vkCmdSetLineWidth;\n\tPFN_vkCmdSetScissor vkCmdSetScissor;\n\tPFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask;\n\tPFN_vkCmdSetStencilReference vkCmdSetStencilReference;\n\tPFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask;\n\tPFN_vkCmdSetViewport vkCmdSetViewport;\n\tPFN_vkCmdUpdateBuffer vkCmdUpdateBuffer;\n\tPFN_vkCmdWaitEvents vkCmdWaitEvents;\n\tPFN_vkCmdWriteTimestamp vkCmdWriteTimestamp;\n\tPFN_vkCreateBuffer vkCreateBuffer;\n\tPFN_vkCreateBufferView vkCreateBufferView;\n\tPFN_vkCreateCommandPool vkCreateCommandPool;\n\tPFN_vkCreateComputePipelines vkCreateComputePipelines;\n\tPFN_vkCreateDescriptorPool vkCreateDescriptorPool;\n\tPFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout;\n\tPFN_vkCreateEvent vkCreateEvent;\n\tPFN_vkCreateFence vkCreateFence;\n\tPFN_vkCreateFramebuffer vkCreateFramebuffer;\n\tPFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines;\n\tPFN_vkCreateImage vkCreateImage;\n\tPFN_vkCreateImageView vkCreateImageView;\n\tPFN_vkCreatePipelineCache vkCreatePipelineCache;\n\tPFN_vkCreatePipelineLayout vkCreatePipelineLayout;\n\tPFN_vkCreateQueryPool vkCreateQueryPool;\n\tPFN_vkCreateRenderPass vkCreateRenderPass;\n\tPFN_vkCreateSampler vkCreateSampler;\n\tPFN_vkCreateSemaphore vkCreateSemaphore;\n\tPFN_vkCreateShaderModule vkCreateShaderModule;\n\tPFN_vkDestroyBuffer vkDestroyBuffer;\n\tPFN_vkDestroyBufferView vkDestroyBufferView;\n\tPFN_vkDestroyCommandPool vkDestroyCommandPool;\n\tPFN_vkDestroyDescriptorPool vkDestroyDescriptorPool;\n\tPFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout;\n\tPFN_vkDestroyDevice vkDestroyDevice;\n\tPFN_vkDestroyEvent vkDestroyEvent;\n\tPFN_vkDestroyFence vkDestroyFence;\n\tPFN_vkDestroyFramebuffer vkDestroyFramebuffer;\n\tPFN_vkDestroyImage vkDestroyImage;\n\tPFN_vkDestroyImageView vkDestroyImageView;\n\tPFN_vkDestroyPipeline vkDestroyPipeline;\n\tPFN_vkDestroyPipelineCache vkDestroyPipelineCache;\n\tPFN_vkDestroyPipelineLayout vkDestroyPipelineLayout;\n\tPFN_vkDestroyQueryPool vkDestroyQueryPool;\n\tPFN_vkDestroyRenderPass vkDestroyRenderPass;\n\tPFN_vkDestroySampler vkDestroySampler;\n\tPFN_vkDestroySemaphore vkDestroySemaphore;\n\tPFN_vkDestroyShaderModule vkDestroyShaderModule;\n\tPFN_vkDeviceWaitIdle vkDeviceWaitIdle;\n\tPFN_vkEndCommandBuffer vkEndCommandBuffer;\n\tPFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;\n\tPFN_vkFreeCommandBuffers vkFreeCommandBuffers;\n\tPFN_vkFreeDescriptorSets vkFreeDescriptorSets;\n\tPFN_vkFreeMemory vkFreeMemory;\n\tPFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;\n\tPFN_vkGetDeviceMemoryCommitment vkGetDeviceMemoryCommitment;\n\tPFN_vkGetDeviceQueue vkGetDeviceQueue;\n\tPFN_vkGetEventStatus vkGetEventStatus;\n\tPFN_vkGetFenceStatus vkGetFenceStatus;\n\tPFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;\n\tPFN_vkGetImageSparseMemoryRequirements vkGetImageSparseMemoryRequirements;\n\tPFN_vkGetImageSubresourceLayout vkGetImageSubresourceLayout;\n\tPFN_vkGetPipelineCacheData vkGetPipelineCacheData;\n\tPFN_vkGetQueryPoolResults vkGetQueryPoolResults;\n\tPFN_vkGetRenderAreaGranularity vkGetRenderAreaGranularity;\n\tPFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;\n\tPFN_vkMapMemory vkMapMemory;\n\tPFN_vkMergePipelineCaches vkMergePipelineCaches;\n\tPFN_vkQueueBindSparse vkQueueBindSparse;\n\tPFN_vkQueueSubmit vkQueueSubmit;\n\tPFN_vkQueueWaitIdle vkQueueWaitIdle;\n\tPFN_vkResetCommandBuffer vkResetCommandBuffer;\n\tPFN_vkResetCommandPool vkResetCommandPool;\n\tPFN_vkResetDescriptorPool vkResetDescriptorPool;\n\tPFN_vkResetEvent vkResetEvent;\n\tPFN_vkResetFences vkResetFences;\n\tPFN_vkSetEvent vkSetEvent;\n\tPFN_vkUnmapMemory vkUnmapMemory;\n\tPFN_vkUpdateDescriptorSets vkUpdateDescriptorSets;\n\tPFN_vkWaitForFences vkWaitForFences;\n#else\n\tPFN_vkVoidFunction padding_6ce80d51[120];\n#endif /* defined(VK_VERSION_1_0) */\n#if defined(VK_VERSION_1_1)\n\tPFN_vkBindBufferMemory2 vkBindBufferMemory2;\n\tPFN_vkBindImageMemory2 vkBindImageMemory2;\n\tPFN_vkCmdDispatchBase vkCmdDispatchBase;\n\tPFN_vkCmdSetDeviceMask vkCmdSetDeviceMask;\n\tPFN_vkCreateDescriptorUpdateTemplate vkCreateDescriptorUpdateTemplate;\n\tPFN_vkCreateSamplerYcbcrConversion vkCreateSamplerYcbcrConversion;\n\tPFN_vkDestroyDescriptorUpdateTemplate vkDestroyDescriptorUpdateTemplate;\n\tPFN_vkDestroySamplerYcbcrConversion vkDestroySamplerYcbcrConversion;\n\tPFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;\n\tPFN_vkGetDescriptorSetLayoutSupport vkGetDescriptorSetLayoutSupport;\n\tPFN_vkGetDeviceGroupPeerMemoryFeatures vkGetDeviceGroupPeerMemoryFeatures;\n\tPFN_vkGetDeviceQueue2 vkGetDeviceQueue2;\n\tPFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;\n\tPFN_vkGetImageSparseMemoryRequirements2 vkGetImageSparseMemoryRequirements2;\n\tPFN_vkTrimCommandPool vkTrimCommandPool;\n\tPFN_vkUpdateDescriptorSetWithTemplate vkUpdateDescriptorSetWithTemplate;\n#else\n\tPFN_vkVoidFunction padding_1ec56847[16];\n#endif /* defined(VK_VERSION_1_1) */\n#if defined(VK_VERSION_1_2)\n\tPFN_vkCmdBeginRenderPass2 vkCmdBeginRenderPass2;\n\tPFN_vkCmdDrawIndexedIndirectCount vkCmdDrawIndexedIndirectCount;\n\tPFN_vkCmdDrawIndirectCount vkCmdDrawIndirectCount;\n\tPFN_vkCmdEndRenderPass2 vkCmdEndRenderPass2;\n\tPFN_vkCmdNextSubpass2 vkCmdNextSubpass2;\n\tPFN_vkCreateRenderPass2 vkCreateRenderPass2;\n\tPFN_vkGetBufferDeviceAddress vkGetBufferDeviceAddress;\n\tPFN_vkGetBufferOpaqueCaptureAddress vkGetBufferOpaqueCaptureAddress;\n\tPFN_vkGetDeviceMemoryOpaqueCaptureAddress vkGetDeviceMemoryOpaqueCaptureAddress;\n\tPFN_vkGetSemaphoreCounterValue vkGetSemaphoreCounterValue;\n\tPFN_vkResetQueryPool vkResetQueryPool;\n\tPFN_vkSignalSemaphore vkSignalSemaphore;\n\tPFN_vkWaitSemaphores vkWaitSemaphores;\n#else\n\tPFN_vkVoidFunction padding_a3e00662[13];\n#endif /* defined(VK_VERSION_1_2) */\n#if defined(VK_VERSION_1_3)\n\tPFN_vkCmdBeginRendering vkCmdBeginRendering;\n\tPFN_vkCmdBindVertexBuffers2 vkCmdBindVertexBuffers2;\n\tPFN_vkCmdBlitImage2 vkCmdBlitImage2;\n\tPFN_vkCmdCopyBuffer2 vkCmdCopyBuffer2;\n\tPFN_vkCmdCopyBufferToImage2 vkCmdCopyBufferToImage2;\n\tPFN_vkCmdCopyImage2 vkCmdCopyImage2;\n\tPFN_vkCmdCopyImageToBuffer2 vkCmdCopyImageToBuffer2;\n\tPFN_vkCmdEndRendering vkCmdEndRendering;\n\tPFN_vkCmdPipelineBarrier2 vkCmdPipelineBarrier2;\n\tPFN_vkCmdResetEvent2 vkCmdResetEvent2;\n\tPFN_vkCmdResolveImage2 vkCmdResolveImage2;\n\tPFN_vkCmdSetCullMode vkCmdSetCullMode;\n\tPFN_vkCmdSetDepthBiasEnable vkCmdSetDepthBiasEnable;\n\tPFN_vkCmdSetDepthBoundsTestEnable vkCmdSetDepthBoundsTestEnable;\n\tPFN_vkCmdSetDepthCompareOp vkCmdSetDepthCompareOp;\n\tPFN_vkCmdSetDepthTestEnable vkCmdSetDepthTestEnable;\n\tPFN_vkCmdSetDepthWriteEnable vkCmdSetDepthWriteEnable;\n\tPFN_vkCmdSetEvent2 vkCmdSetEvent2;\n\tPFN_vkCmdSetFrontFace vkCmdSetFrontFace;\n\tPFN_vkCmdSetPrimitiveRestartEnable vkCmdSetPrimitiveRestartEnable;\n\tPFN_vkCmdSetPrimitiveTopology vkCmdSetPrimitiveTopology;\n\tPFN_vkCmdSetRasterizerDiscardEnable vkCmdSetRasterizerDiscardEnable;\n\tPFN_vkCmdSetScissorWithCount vkCmdSetScissorWithCount;\n\tPFN_vkCmdSetStencilOp vkCmdSetStencilOp;\n\tPFN_vkCmdSetStencilTestEnable vkCmdSetStencilTestEnable;\n\tPFN_vkCmdSetViewportWithCount vkCmdSetViewportWithCount;\n\tPFN_vkCmdWaitEvents2 vkCmdWaitEvents2;\n\tPFN_vkCmdWriteTimestamp2 vkCmdWriteTimestamp2;\n\tPFN_vkCreatePrivateDataSlot vkCreatePrivateDataSlot;\n\tPFN_vkDestroyPrivateDataSlot vkDestroyPrivateDataSlot;\n\tPFN_vkGetDeviceBufferMemoryRequirements vkGetDeviceBufferMemoryRequirements;\n\tPFN_vkGetDeviceImageMemoryRequirements vkGetDeviceImageMemoryRequirements;\n\tPFN_vkGetDeviceImageSparseMemoryRequirements vkGetDeviceImageSparseMemoryRequirements;\n\tPFN_vkGetPrivateData vkGetPrivateData;\n\tPFN_vkQueueSubmit2 vkQueueSubmit2;\n\tPFN_vkSetPrivateData vkSetPrivateData;\n#else\n\tPFN_vkVoidFunction padding_ee798a88[36];\n#endif /* defined(VK_VERSION_1_3) */\n#if defined(VK_VERSION_1_4)\n\tPFN_vkCmdBindDescriptorSets2 vkCmdBindDescriptorSets2;\n\tPFN_vkCmdBindIndexBuffer2 vkCmdBindIndexBuffer2;\n\tPFN_vkCmdPushConstants2 vkCmdPushConstants2;\n\tPFN_vkCmdPushDescriptorSet vkCmdPushDescriptorSet;\n\tPFN_vkCmdPushDescriptorSet2 vkCmdPushDescriptorSet2;\n\tPFN_vkCmdPushDescriptorSetWithTemplate vkCmdPushDescriptorSetWithTemplate;\n\tPFN_vkCmdPushDescriptorSetWithTemplate2 vkCmdPushDescriptorSetWithTemplate2;\n\tPFN_vkCmdSetLineStipple vkCmdSetLineStipple;\n\tPFN_vkCmdSetRenderingAttachmentLocations vkCmdSetRenderingAttachmentLocations;\n\tPFN_vkCmdSetRenderingInputAttachmentIndices vkCmdSetRenderingInputAttachmentIndices;\n\tPFN_vkCopyImageToImage vkCopyImageToImage;\n\tPFN_vkCopyImageToMemory vkCopyImageToMemory;\n\tPFN_vkCopyMemoryToImage vkCopyMemoryToImage;\n\tPFN_vkGetDeviceImageSubresourceLayout vkGetDeviceImageSubresourceLayout;\n\tPFN_vkGetImageSubresourceLayout2 vkGetImageSubresourceLayout2;\n\tPFN_vkGetRenderingAreaGranularity vkGetRenderingAreaGranularity;\n\tPFN_vkMapMemory2 vkMapMemory2;\n\tPFN_vkTransitionImageLayout vkTransitionImageLayout;\n\tPFN_vkUnmapMemory2 vkUnmapMemory2;\n#else\n\tPFN_vkVoidFunction padding_82585fa3[19];\n#endif /* defined(VK_VERSION_1_4) */\n#if defined(VK_AMDX_shader_enqueue)\n\tPFN_vkCmdDispatchGraphAMDX vkCmdDispatchGraphAMDX;\n\tPFN_vkCmdDispatchGraphIndirectAMDX vkCmdDispatchGraphIndirectAMDX;\n\tPFN_vkCmdDispatchGraphIndirectCountAMDX vkCmdDispatchGraphIndirectCountAMDX;\n\tPFN_vkCmdInitializeGraphScratchMemoryAMDX vkCmdInitializeGraphScratchMemoryAMDX;\n\tPFN_vkCreateExecutionGraphPipelinesAMDX vkCreateExecutionGraphPipelinesAMDX;\n\tPFN_vkGetExecutionGraphPipelineNodeIndexAMDX vkGetExecutionGraphPipelineNodeIndexAMDX;\n\tPFN_vkGetExecutionGraphPipelineScratchSizeAMDX vkGetExecutionGraphPipelineScratchSizeAMDX;\n#else\n\tPFN_vkVoidFunction padding_9d3e2bba[7];\n#endif /* defined(VK_AMDX_shader_enqueue) */\n#if defined(VK_AMD_anti_lag)\n\tPFN_vkAntiLagUpdateAMD vkAntiLagUpdateAMD;\n#else\n\tPFN_vkVoidFunction padding_cf792fb4[1];\n#endif /* defined(VK_AMD_anti_lag) */\n#if defined(VK_AMD_buffer_marker)\n\tPFN_vkCmdWriteBufferMarkerAMD vkCmdWriteBufferMarkerAMD;\n#else\n\tPFN_vkVoidFunction padding_7836e92f[1];\n#endif /* defined(VK_AMD_buffer_marker) */\n#if defined(VK_AMD_buffer_marker) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2))\n\tPFN_vkCmdWriteBufferMarker2AMD vkCmdWriteBufferMarker2AMD;\n#else\n\tPFN_vkVoidFunction padding_bbf9b7bb[1];\n#endif /* defined(VK_AMD_buffer_marker) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) */\n#if defined(VK_AMD_display_native_hdr)\n\tPFN_vkSetLocalDimmingAMD vkSetLocalDimmingAMD;\n#else\n\tPFN_vkVoidFunction padding_6b81b2fb[1];\n#endif /* defined(VK_AMD_display_native_hdr) */\n#if defined(VK_AMD_draw_indirect_count)\n\tPFN_vkCmdDrawIndexedIndirectCountAMD vkCmdDrawIndexedIndirectCountAMD;\n\tPFN_vkCmdDrawIndirectCountAMD vkCmdDrawIndirectCountAMD;\n#else\n\tPFN_vkVoidFunction padding_fbfa9964[2];\n#endif /* defined(VK_AMD_draw_indirect_count) */\n#if defined(VK_AMD_shader_info)\n\tPFN_vkGetShaderInfoAMD vkGetShaderInfoAMD;\n#else\n\tPFN_vkVoidFunction padding_bfb754b[1];\n#endif /* defined(VK_AMD_shader_info) */\n#if defined(VK_ANDROID_external_memory_android_hardware_buffer)\n\tPFN_vkGetAndroidHardwareBufferPropertiesANDROID vkGetAndroidHardwareBufferPropertiesANDROID;\n\tPFN_vkGetMemoryAndroidHardwareBufferANDROID vkGetMemoryAndroidHardwareBufferANDROID;\n#else\n\tPFN_vkVoidFunction padding_c67b1beb[2];\n#endif /* defined(VK_ANDROID_external_memory_android_hardware_buffer) */\n#if defined(VK_ARM_data_graph)\n\tPFN_vkBindDataGraphPipelineSessionMemoryARM vkBindDataGraphPipelineSessionMemoryARM;\n\tPFN_vkCmdDispatchDataGraphARM vkCmdDispatchDataGraphARM;\n\tPFN_vkCreateDataGraphPipelineSessionARM vkCreateDataGraphPipelineSessionARM;\n\tPFN_vkCreateDataGraphPipelinesARM vkCreateDataGraphPipelinesARM;\n\tPFN_vkDestroyDataGraphPipelineSessionARM vkDestroyDataGraphPipelineSessionARM;\n\tPFN_vkGetDataGraphPipelineAvailablePropertiesARM vkGetDataGraphPipelineAvailablePropertiesARM;\n\tPFN_vkGetDataGraphPipelinePropertiesARM vkGetDataGraphPipelinePropertiesARM;\n\tPFN_vkGetDataGraphPipelineSessionBindPointRequirementsARM vkGetDataGraphPipelineSessionBindPointRequirementsARM;\n\tPFN_vkGetDataGraphPipelineSessionMemoryRequirementsARM vkGetDataGraphPipelineSessionMemoryRequirementsARM;\n#else\n\tPFN_vkVoidFunction padding_894d85d8[9];\n#endif /* defined(VK_ARM_data_graph) */\n#if defined(VK_ARM_tensors)\n\tPFN_vkBindTensorMemoryARM vkBindTensorMemoryARM;\n\tPFN_vkCmdCopyTensorARM vkCmdCopyTensorARM;\n\tPFN_vkCreateTensorARM vkCreateTensorARM;\n\tPFN_vkCreateTensorViewARM vkCreateTensorViewARM;\n\tPFN_vkDestroyTensorARM vkDestroyTensorARM;\n\tPFN_vkDestroyTensorViewARM vkDestroyTensorViewARM;\n\tPFN_vkGetDeviceTensorMemoryRequirementsARM vkGetDeviceTensorMemoryRequirementsARM;\n\tPFN_vkGetTensorMemoryRequirementsARM vkGetTensorMemoryRequirementsARM;\n#else\n\tPFN_vkVoidFunction padding_df67a729[8];\n#endif /* defined(VK_ARM_tensors) */\n#if defined(VK_ARM_tensors) && defined(VK_EXT_descriptor_buffer)\n\tPFN_vkGetTensorOpaqueCaptureDescriptorDataARM vkGetTensorOpaqueCaptureDescriptorDataARM;\n\tPFN_vkGetTensorViewOpaqueCaptureDescriptorDataARM vkGetTensorViewOpaqueCaptureDescriptorDataARM;\n#else\n\tPFN_vkVoidFunction padding_9483bf7e[2];\n#endif /* defined(VK_ARM_tensors) && defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_EXT_attachment_feedback_loop_dynamic_state)\n\tPFN_vkCmdSetAttachmentFeedbackLoopEnableEXT vkCmdSetAttachmentFeedbackLoopEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_760a41f5[1];\n#endif /* defined(VK_EXT_attachment_feedback_loop_dynamic_state) */\n#if defined(VK_EXT_buffer_device_address)\n\tPFN_vkGetBufferDeviceAddressEXT vkGetBufferDeviceAddressEXT;\n#else\n\tPFN_vkVoidFunction padding_3b69d885[1];\n#endif /* defined(VK_EXT_buffer_device_address) */\n#if defined(VK_EXT_calibrated_timestamps)\n\tPFN_vkGetCalibratedTimestampsEXT vkGetCalibratedTimestampsEXT;\n#else\n\tPFN_vkVoidFunction padding_d0981c89[1];\n#endif /* defined(VK_EXT_calibrated_timestamps) */\n#if defined(VK_EXT_color_write_enable)\n\tPFN_vkCmdSetColorWriteEnableEXT vkCmdSetColorWriteEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_d301ecc3[1];\n#endif /* defined(VK_EXT_color_write_enable) */\n#if defined(VK_EXT_conditional_rendering)\n\tPFN_vkCmdBeginConditionalRenderingEXT vkCmdBeginConditionalRenderingEXT;\n\tPFN_vkCmdEndConditionalRenderingEXT vkCmdEndConditionalRenderingEXT;\n#else\n\tPFN_vkVoidFunction padding_ab532c18[2];\n#endif /* defined(VK_EXT_conditional_rendering) */\n#if defined(VK_EXT_debug_marker)\n\tPFN_vkCmdDebugMarkerBeginEXT vkCmdDebugMarkerBeginEXT;\n\tPFN_vkCmdDebugMarkerEndEXT vkCmdDebugMarkerEndEXT;\n\tPFN_vkCmdDebugMarkerInsertEXT vkCmdDebugMarkerInsertEXT;\n\tPFN_vkDebugMarkerSetObjectNameEXT vkDebugMarkerSetObjectNameEXT;\n\tPFN_vkDebugMarkerSetObjectTagEXT vkDebugMarkerSetObjectTagEXT;\n#else\n\tPFN_vkVoidFunction padding_89986968[5];\n#endif /* defined(VK_EXT_debug_marker) */\n#if defined(VK_EXT_depth_bias_control)\n\tPFN_vkCmdSetDepthBias2EXT vkCmdSetDepthBias2EXT;\n#else\n\tPFN_vkVoidFunction padding_bcddab4d[1];\n#endif /* defined(VK_EXT_depth_bias_control) */\n#if defined(VK_EXT_descriptor_buffer)\n\tPFN_vkCmdBindDescriptorBufferEmbeddedSamplersEXT vkCmdBindDescriptorBufferEmbeddedSamplersEXT;\n\tPFN_vkCmdBindDescriptorBuffersEXT vkCmdBindDescriptorBuffersEXT;\n\tPFN_vkCmdSetDescriptorBufferOffsetsEXT vkCmdSetDescriptorBufferOffsetsEXT;\n\tPFN_vkGetBufferOpaqueCaptureDescriptorDataEXT vkGetBufferOpaqueCaptureDescriptorDataEXT;\n\tPFN_vkGetDescriptorEXT vkGetDescriptorEXT;\n\tPFN_vkGetDescriptorSetLayoutBindingOffsetEXT vkGetDescriptorSetLayoutBindingOffsetEXT;\n\tPFN_vkGetDescriptorSetLayoutSizeEXT vkGetDescriptorSetLayoutSizeEXT;\n\tPFN_vkGetImageOpaqueCaptureDescriptorDataEXT vkGetImageOpaqueCaptureDescriptorDataEXT;\n\tPFN_vkGetImageViewOpaqueCaptureDescriptorDataEXT vkGetImageViewOpaqueCaptureDescriptorDataEXT;\n\tPFN_vkGetSamplerOpaqueCaptureDescriptorDataEXT vkGetSamplerOpaqueCaptureDescriptorDataEXT;\n#else\n\tPFN_vkVoidFunction padding_80aa973c[10];\n#endif /* defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing))\n\tPFN_vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT;\n#else\n\tPFN_vkVoidFunction padding_98d0fb33[1];\n#endif /* defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) */\n#if defined(VK_EXT_device_fault)\n\tPFN_vkGetDeviceFaultInfoEXT vkGetDeviceFaultInfoEXT;\n#else\n\tPFN_vkVoidFunction padding_55095419[1];\n#endif /* defined(VK_EXT_device_fault) */\n#if defined(VK_EXT_device_generated_commands)\n\tPFN_vkCmdExecuteGeneratedCommandsEXT vkCmdExecuteGeneratedCommandsEXT;\n\tPFN_vkCmdPreprocessGeneratedCommandsEXT vkCmdPreprocessGeneratedCommandsEXT;\n\tPFN_vkCreateIndirectCommandsLayoutEXT vkCreateIndirectCommandsLayoutEXT;\n\tPFN_vkCreateIndirectExecutionSetEXT vkCreateIndirectExecutionSetEXT;\n\tPFN_vkDestroyIndirectCommandsLayoutEXT vkDestroyIndirectCommandsLayoutEXT;\n\tPFN_vkDestroyIndirectExecutionSetEXT vkDestroyIndirectExecutionSetEXT;\n\tPFN_vkGetGeneratedCommandsMemoryRequirementsEXT vkGetGeneratedCommandsMemoryRequirementsEXT;\n\tPFN_vkUpdateIndirectExecutionSetPipelineEXT vkUpdateIndirectExecutionSetPipelineEXT;\n\tPFN_vkUpdateIndirectExecutionSetShaderEXT vkUpdateIndirectExecutionSetShaderEXT;\n#else\n\tPFN_vkVoidFunction padding_7ba7ebaa[9];\n#endif /* defined(VK_EXT_device_generated_commands) */\n#if defined(VK_EXT_discard_rectangles)\n\tPFN_vkCmdSetDiscardRectangleEXT vkCmdSetDiscardRectangleEXT;\n#else\n\tPFN_vkVoidFunction padding_d6355c2[1];\n#endif /* defined(VK_EXT_discard_rectangles) */\n#if defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2\n\tPFN_vkCmdSetDiscardRectangleEnableEXT vkCmdSetDiscardRectangleEnableEXT;\n\tPFN_vkCmdSetDiscardRectangleModeEXT vkCmdSetDiscardRectangleModeEXT;\n#else\n\tPFN_vkVoidFunction padding_7bb44f77[2];\n#endif /* defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 */\n#if defined(VK_EXT_display_control)\n\tPFN_vkDisplayPowerControlEXT vkDisplayPowerControlEXT;\n\tPFN_vkGetSwapchainCounterEXT vkGetSwapchainCounterEXT;\n\tPFN_vkRegisterDeviceEventEXT vkRegisterDeviceEventEXT;\n\tPFN_vkRegisterDisplayEventEXT vkRegisterDisplayEventEXT;\n#else\n\tPFN_vkVoidFunction padding_d30dfaaf[4];\n#endif /* defined(VK_EXT_display_control) */\n#if defined(VK_EXT_external_memory_host)\n\tPFN_vkGetMemoryHostPointerPropertiesEXT vkGetMemoryHostPointerPropertiesEXT;\n#else\n\tPFN_vkVoidFunction padding_357656e9[1];\n#endif /* defined(VK_EXT_external_memory_host) */\n#if defined(VK_EXT_external_memory_metal)\n\tPFN_vkGetMemoryMetalHandleEXT vkGetMemoryMetalHandleEXT;\n\tPFN_vkGetMemoryMetalHandlePropertiesEXT vkGetMemoryMetalHandlePropertiesEXT;\n#else\n\tPFN_vkVoidFunction padding_37d43fb[2];\n#endif /* defined(VK_EXT_external_memory_metal) */\n#if defined(VK_EXT_fragment_density_map_offset)\n\tPFN_vkCmdEndRendering2EXT vkCmdEndRendering2EXT;\n#else\n\tPFN_vkVoidFunction padding_9c90cf11[1];\n#endif /* defined(VK_EXT_fragment_density_map_offset) */\n#if defined(VK_EXT_full_screen_exclusive)\n\tPFN_vkAcquireFullScreenExclusiveModeEXT vkAcquireFullScreenExclusiveModeEXT;\n\tPFN_vkReleaseFullScreenExclusiveModeEXT vkReleaseFullScreenExclusiveModeEXT;\n#else\n\tPFN_vkVoidFunction padding_3859df46[2];\n#endif /* defined(VK_EXT_full_screen_exclusive) */\n#if defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1))\n\tPFN_vkGetDeviceGroupSurfacePresentModes2EXT vkGetDeviceGroupSurfacePresentModes2EXT;\n#else\n\tPFN_vkVoidFunction padding_e5b48b5b[1];\n#endif /* defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1)) */\n#if defined(VK_EXT_hdr_metadata)\n\tPFN_vkSetHdrMetadataEXT vkSetHdrMetadataEXT;\n#else\n\tPFN_vkVoidFunction padding_ca6d733c[1];\n#endif /* defined(VK_EXT_hdr_metadata) */\n#if defined(VK_EXT_host_image_copy)\n\tPFN_vkCopyImageToImageEXT vkCopyImageToImageEXT;\n\tPFN_vkCopyImageToMemoryEXT vkCopyImageToMemoryEXT;\n\tPFN_vkCopyMemoryToImageEXT vkCopyMemoryToImageEXT;\n\tPFN_vkTransitionImageLayoutEXT vkTransitionImageLayoutEXT;\n#else\n\tPFN_vkVoidFunction padding_dd6d9b61[4];\n#endif /* defined(VK_EXT_host_image_copy) */\n#if defined(VK_EXT_host_query_reset)\n\tPFN_vkResetQueryPoolEXT vkResetQueryPoolEXT;\n#else\n\tPFN_vkVoidFunction padding_34e58bd3[1];\n#endif /* defined(VK_EXT_host_query_reset) */\n#if defined(VK_EXT_image_drm_format_modifier)\n\tPFN_vkGetImageDrmFormatModifierPropertiesEXT vkGetImageDrmFormatModifierPropertiesEXT;\n#else\n\tPFN_vkVoidFunction padding_eb50dc14[1];\n#endif /* defined(VK_EXT_image_drm_format_modifier) */\n#if defined(VK_EXT_line_rasterization)\n\tPFN_vkCmdSetLineStippleEXT vkCmdSetLineStippleEXT;\n#else\n\tPFN_vkVoidFunction padding_8a212c37[1];\n#endif /* defined(VK_EXT_line_rasterization) */\n#if defined(VK_EXT_mesh_shader)\n\tPFN_vkCmdDrawMeshTasksEXT vkCmdDrawMeshTasksEXT;\n\tPFN_vkCmdDrawMeshTasksIndirectEXT vkCmdDrawMeshTasksIndirectEXT;\n#else\n\tPFN_vkVoidFunction padding_f65e838[2];\n#endif /* defined(VK_EXT_mesh_shader) */\n#if defined(VK_EXT_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2))\n\tPFN_vkCmdDrawMeshTasksIndirectCountEXT vkCmdDrawMeshTasksIndirectCountEXT;\n#else\n\tPFN_vkVoidFunction padding_dcbaac2f[1];\n#endif /* defined(VK_EXT_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2)) */\n#if defined(VK_EXT_metal_objects)\n\tPFN_vkExportMetalObjectsEXT vkExportMetalObjectsEXT;\n#else\n\tPFN_vkVoidFunction padding_df21f735[1];\n#endif /* defined(VK_EXT_metal_objects) */\n#if defined(VK_EXT_multi_draw)\n\tPFN_vkCmdDrawMultiEXT vkCmdDrawMultiEXT;\n\tPFN_vkCmdDrawMultiIndexedEXT vkCmdDrawMultiIndexedEXT;\n#else\n\tPFN_vkVoidFunction padding_ce8b93b6[2];\n#endif /* defined(VK_EXT_multi_draw) */\n#if defined(VK_EXT_opacity_micromap)\n\tPFN_vkBuildMicromapsEXT vkBuildMicromapsEXT;\n\tPFN_vkCmdBuildMicromapsEXT vkCmdBuildMicromapsEXT;\n\tPFN_vkCmdCopyMemoryToMicromapEXT vkCmdCopyMemoryToMicromapEXT;\n\tPFN_vkCmdCopyMicromapEXT vkCmdCopyMicromapEXT;\n\tPFN_vkCmdCopyMicromapToMemoryEXT vkCmdCopyMicromapToMemoryEXT;\n\tPFN_vkCmdWriteMicromapsPropertiesEXT vkCmdWriteMicromapsPropertiesEXT;\n\tPFN_vkCopyMemoryToMicromapEXT vkCopyMemoryToMicromapEXT;\n\tPFN_vkCopyMicromapEXT vkCopyMicromapEXT;\n\tPFN_vkCopyMicromapToMemoryEXT vkCopyMicromapToMemoryEXT;\n\tPFN_vkCreateMicromapEXT vkCreateMicromapEXT;\n\tPFN_vkDestroyMicromapEXT vkDestroyMicromapEXT;\n\tPFN_vkGetDeviceMicromapCompatibilityEXT vkGetDeviceMicromapCompatibilityEXT;\n\tPFN_vkGetMicromapBuildSizesEXT vkGetMicromapBuildSizesEXT;\n\tPFN_vkWriteMicromapsPropertiesEXT vkWriteMicromapsPropertiesEXT;\n#else\n\tPFN_vkVoidFunction padding_fa41e53c[14];\n#endif /* defined(VK_EXT_opacity_micromap) */\n#if defined(VK_EXT_pageable_device_local_memory)\n\tPFN_vkSetDeviceMemoryPriorityEXT vkSetDeviceMemoryPriorityEXT;\n#else\n\tPFN_vkVoidFunction padding_b2d2c2d7[1];\n#endif /* defined(VK_EXT_pageable_device_local_memory) */\n#if defined(VK_EXT_pipeline_properties)\n\tPFN_vkGetPipelinePropertiesEXT vkGetPipelinePropertiesEXT;\n#else\n\tPFN_vkVoidFunction padding_11313020[1];\n#endif /* defined(VK_EXT_pipeline_properties) */\n#if defined(VK_EXT_private_data)\n\tPFN_vkCreatePrivateDataSlotEXT vkCreatePrivateDataSlotEXT;\n\tPFN_vkDestroyPrivateDataSlotEXT vkDestroyPrivateDataSlotEXT;\n\tPFN_vkGetPrivateDataEXT vkGetPrivateDataEXT;\n\tPFN_vkSetPrivateDataEXT vkSetPrivateDataEXT;\n#else\n\tPFN_vkVoidFunction padding_108010f[4];\n#endif /* defined(VK_EXT_private_data) */\n#if defined(VK_EXT_sample_locations)\n\tPFN_vkCmdSetSampleLocationsEXT vkCmdSetSampleLocationsEXT;\n#else\n\tPFN_vkVoidFunction padding_26f9079f[1];\n#endif /* defined(VK_EXT_sample_locations) */\n#if defined(VK_EXT_shader_module_identifier)\n\tPFN_vkGetShaderModuleCreateInfoIdentifierEXT vkGetShaderModuleCreateInfoIdentifierEXT;\n\tPFN_vkGetShaderModuleIdentifierEXT vkGetShaderModuleIdentifierEXT;\n#else\n\tPFN_vkVoidFunction padding_e10c8f86[2];\n#endif /* defined(VK_EXT_shader_module_identifier) */\n#if defined(VK_EXT_shader_object)\n\tPFN_vkCmdBindShadersEXT vkCmdBindShadersEXT;\n\tPFN_vkCreateShadersEXT vkCreateShadersEXT;\n\tPFN_vkDestroyShaderEXT vkDestroyShaderEXT;\n\tPFN_vkGetShaderBinaryDataEXT vkGetShaderBinaryDataEXT;\n#else\n\tPFN_vkVoidFunction padding_374f3e18[4];\n#endif /* defined(VK_EXT_shader_object) */\n#if defined(VK_EXT_swapchain_maintenance1)\n\tPFN_vkReleaseSwapchainImagesEXT vkReleaseSwapchainImagesEXT;\n#else\n\tPFN_vkVoidFunction padding_ea55bf74[1];\n#endif /* defined(VK_EXT_swapchain_maintenance1) */\n#if defined(VK_EXT_transform_feedback)\n\tPFN_vkCmdBeginQueryIndexedEXT vkCmdBeginQueryIndexedEXT;\n\tPFN_vkCmdBeginTransformFeedbackEXT vkCmdBeginTransformFeedbackEXT;\n\tPFN_vkCmdBindTransformFeedbackBuffersEXT vkCmdBindTransformFeedbackBuffersEXT;\n\tPFN_vkCmdDrawIndirectByteCountEXT vkCmdDrawIndirectByteCountEXT;\n\tPFN_vkCmdEndQueryIndexedEXT vkCmdEndQueryIndexedEXT;\n\tPFN_vkCmdEndTransformFeedbackEXT vkCmdEndTransformFeedbackEXT;\n#else\n\tPFN_vkVoidFunction padding_36980658[6];\n#endif /* defined(VK_EXT_transform_feedback) */\n#if defined(VK_EXT_validation_cache)\n\tPFN_vkCreateValidationCacheEXT vkCreateValidationCacheEXT;\n\tPFN_vkDestroyValidationCacheEXT vkDestroyValidationCacheEXT;\n\tPFN_vkGetValidationCacheDataEXT vkGetValidationCacheDataEXT;\n\tPFN_vkMergeValidationCachesEXT vkMergeValidationCachesEXT;\n#else\n\tPFN_vkVoidFunction padding_b4f2df29[4];\n#endif /* defined(VK_EXT_validation_cache) */\n#if defined(VK_FUCHSIA_buffer_collection)\n\tPFN_vkCreateBufferCollectionFUCHSIA vkCreateBufferCollectionFUCHSIA;\n\tPFN_vkDestroyBufferCollectionFUCHSIA vkDestroyBufferCollectionFUCHSIA;\n\tPFN_vkGetBufferCollectionPropertiesFUCHSIA vkGetBufferCollectionPropertiesFUCHSIA;\n\tPFN_vkSetBufferCollectionBufferConstraintsFUCHSIA vkSetBufferCollectionBufferConstraintsFUCHSIA;\n\tPFN_vkSetBufferCollectionImageConstraintsFUCHSIA vkSetBufferCollectionImageConstraintsFUCHSIA;\n#else\n\tPFN_vkVoidFunction padding_8eaa27bc[5];\n#endif /* defined(VK_FUCHSIA_buffer_collection) */\n#if defined(VK_FUCHSIA_external_memory)\n\tPFN_vkGetMemoryZirconHandleFUCHSIA vkGetMemoryZirconHandleFUCHSIA;\n\tPFN_vkGetMemoryZirconHandlePropertiesFUCHSIA vkGetMemoryZirconHandlePropertiesFUCHSIA;\n#else\n\tPFN_vkVoidFunction padding_e3cb8a67[2];\n#endif /* defined(VK_FUCHSIA_external_memory) */\n#if defined(VK_FUCHSIA_external_semaphore)\n\tPFN_vkGetSemaphoreZirconHandleFUCHSIA vkGetSemaphoreZirconHandleFUCHSIA;\n\tPFN_vkImportSemaphoreZirconHandleFUCHSIA vkImportSemaphoreZirconHandleFUCHSIA;\n#else\n\tPFN_vkVoidFunction padding_3df6f656[2];\n#endif /* defined(VK_FUCHSIA_external_semaphore) */\n#if defined(VK_GOOGLE_display_timing)\n\tPFN_vkGetPastPresentationTimingGOOGLE vkGetPastPresentationTimingGOOGLE;\n\tPFN_vkGetRefreshCycleDurationGOOGLE vkGetRefreshCycleDurationGOOGLE;\n#else\n\tPFN_vkVoidFunction padding_2a6f50cd[2];\n#endif /* defined(VK_GOOGLE_display_timing) */\n#if defined(VK_HUAWEI_cluster_culling_shader)\n\tPFN_vkCmdDrawClusterHUAWEI vkCmdDrawClusterHUAWEI;\n\tPFN_vkCmdDrawClusterIndirectHUAWEI vkCmdDrawClusterIndirectHUAWEI;\n#else\n\tPFN_vkVoidFunction padding_75b97be6[2];\n#endif /* defined(VK_HUAWEI_cluster_culling_shader) */\n#if defined(VK_HUAWEI_invocation_mask)\n\tPFN_vkCmdBindInvocationMaskHUAWEI vkCmdBindInvocationMaskHUAWEI;\n#else\n\tPFN_vkVoidFunction padding_c3a4569f[1];\n#endif /* defined(VK_HUAWEI_invocation_mask) */\n#if defined(VK_HUAWEI_subpass_shading) && VK_HUAWEI_SUBPASS_SHADING_SPEC_VERSION >= 2\n\tPFN_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI;\n#else\n\tPFN_vkVoidFunction padding_2e923f32[1];\n#endif /* defined(VK_HUAWEI_subpass_shading) && VK_HUAWEI_SUBPASS_SHADING_SPEC_VERSION >= 2 */\n#if defined(VK_HUAWEI_subpass_shading)\n\tPFN_vkCmdSubpassShadingHUAWEI vkCmdSubpassShadingHUAWEI;\n#else\n\tPFN_vkVoidFunction padding_f766fdf5[1];\n#endif /* defined(VK_HUAWEI_subpass_shading) */\n#if defined(VK_INTEL_performance_query)\n\tPFN_vkAcquirePerformanceConfigurationINTEL vkAcquirePerformanceConfigurationINTEL;\n\tPFN_vkCmdSetPerformanceMarkerINTEL vkCmdSetPerformanceMarkerINTEL;\n\tPFN_vkCmdSetPerformanceOverrideINTEL vkCmdSetPerformanceOverrideINTEL;\n\tPFN_vkCmdSetPerformanceStreamMarkerINTEL vkCmdSetPerformanceStreamMarkerINTEL;\n\tPFN_vkGetPerformanceParameterINTEL vkGetPerformanceParameterINTEL;\n\tPFN_vkInitializePerformanceApiINTEL vkInitializePerformanceApiINTEL;\n\tPFN_vkQueueSetPerformanceConfigurationINTEL vkQueueSetPerformanceConfigurationINTEL;\n\tPFN_vkReleasePerformanceConfigurationINTEL vkReleasePerformanceConfigurationINTEL;\n\tPFN_vkUninitializePerformanceApiINTEL vkUninitializePerformanceApiINTEL;\n#else\n\tPFN_vkVoidFunction padding_495a0a0b[9];\n#endif /* defined(VK_INTEL_performance_query) */\n#if defined(VK_KHR_acceleration_structure)\n\tPFN_vkBuildAccelerationStructuresKHR vkBuildAccelerationStructuresKHR;\n\tPFN_vkCmdBuildAccelerationStructuresIndirectKHR vkCmdBuildAccelerationStructuresIndirectKHR;\n\tPFN_vkCmdBuildAccelerationStructuresKHR vkCmdBuildAccelerationStructuresKHR;\n\tPFN_vkCmdCopyAccelerationStructureKHR vkCmdCopyAccelerationStructureKHR;\n\tPFN_vkCmdCopyAccelerationStructureToMemoryKHR vkCmdCopyAccelerationStructureToMemoryKHR;\n\tPFN_vkCmdCopyMemoryToAccelerationStructureKHR vkCmdCopyMemoryToAccelerationStructureKHR;\n\tPFN_vkCmdWriteAccelerationStructuresPropertiesKHR vkCmdWriteAccelerationStructuresPropertiesKHR;\n\tPFN_vkCopyAccelerationStructureKHR vkCopyAccelerationStructureKHR;\n\tPFN_vkCopyAccelerationStructureToMemoryKHR vkCopyAccelerationStructureToMemoryKHR;\n\tPFN_vkCopyMemoryToAccelerationStructureKHR vkCopyMemoryToAccelerationStructureKHR;\n\tPFN_vkCreateAccelerationStructureKHR vkCreateAccelerationStructureKHR;\n\tPFN_vkDestroyAccelerationStructureKHR vkDestroyAccelerationStructureKHR;\n\tPFN_vkGetAccelerationStructureBuildSizesKHR vkGetAccelerationStructureBuildSizesKHR;\n\tPFN_vkGetAccelerationStructureDeviceAddressKHR vkGetAccelerationStructureDeviceAddressKHR;\n\tPFN_vkGetDeviceAccelerationStructureCompatibilityKHR vkGetDeviceAccelerationStructureCompatibilityKHR;\n\tPFN_vkWriteAccelerationStructuresPropertiesKHR vkWriteAccelerationStructuresPropertiesKHR;\n#else\n\tPFN_vkVoidFunction padding_5a999b78[16];\n#endif /* defined(VK_KHR_acceleration_structure) */\n#if defined(VK_KHR_bind_memory2)\n\tPFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR;\n\tPFN_vkBindImageMemory2KHR vkBindImageMemory2KHR;\n#else\n\tPFN_vkVoidFunction padding_ed8481f5[2];\n#endif /* defined(VK_KHR_bind_memory2) */\n#if defined(VK_KHR_buffer_device_address)\n\tPFN_vkGetBufferDeviceAddressKHR vkGetBufferDeviceAddressKHR;\n\tPFN_vkGetBufferOpaqueCaptureAddressKHR vkGetBufferOpaqueCaptureAddressKHR;\n\tPFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR vkGetDeviceMemoryOpaqueCaptureAddressKHR;\n#else\n\tPFN_vkVoidFunction padding_178fdf81[3];\n#endif /* defined(VK_KHR_buffer_device_address) */\n#if defined(VK_KHR_calibrated_timestamps)\n\tPFN_vkGetCalibratedTimestampsKHR vkGetCalibratedTimestampsKHR;\n#else\n\tPFN_vkVoidFunction padding_8fd6f40d[1];\n#endif /* defined(VK_KHR_calibrated_timestamps) */\n#if defined(VK_KHR_copy_commands2)\n\tPFN_vkCmdBlitImage2KHR vkCmdBlitImage2KHR;\n\tPFN_vkCmdCopyBuffer2KHR vkCmdCopyBuffer2KHR;\n\tPFN_vkCmdCopyBufferToImage2KHR vkCmdCopyBufferToImage2KHR;\n\tPFN_vkCmdCopyImage2KHR vkCmdCopyImage2KHR;\n\tPFN_vkCmdCopyImageToBuffer2KHR vkCmdCopyImageToBuffer2KHR;\n\tPFN_vkCmdResolveImage2KHR vkCmdResolveImage2KHR;\n#else\n\tPFN_vkVoidFunction padding_4c841ff2[6];\n#endif /* defined(VK_KHR_copy_commands2) */\n#if defined(VK_KHR_create_renderpass2)\n\tPFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR;\n\tPFN_vkCmdEndRenderPass2KHR vkCmdEndRenderPass2KHR;\n\tPFN_vkCmdNextSubpass2KHR vkCmdNextSubpass2KHR;\n\tPFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR;\n#else\n\tPFN_vkVoidFunction padding_2a0a8727[4];\n#endif /* defined(VK_KHR_create_renderpass2) */\n#if defined(VK_KHR_deferred_host_operations)\n\tPFN_vkCreateDeferredOperationKHR vkCreateDeferredOperationKHR;\n\tPFN_vkDeferredOperationJoinKHR vkDeferredOperationJoinKHR;\n\tPFN_vkDestroyDeferredOperationKHR vkDestroyDeferredOperationKHR;\n\tPFN_vkGetDeferredOperationMaxConcurrencyKHR vkGetDeferredOperationMaxConcurrencyKHR;\n\tPFN_vkGetDeferredOperationResultKHR vkGetDeferredOperationResultKHR;\n#else\n\tPFN_vkVoidFunction padding_346287bb[5];\n#endif /* defined(VK_KHR_deferred_host_operations) */\n#if defined(VK_KHR_descriptor_update_template)\n\tPFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR;\n\tPFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR;\n\tPFN_vkUpdateDescriptorSetWithTemplateKHR vkUpdateDescriptorSetWithTemplateKHR;\n#else\n\tPFN_vkVoidFunction padding_3d63aec0[3];\n#endif /* defined(VK_KHR_descriptor_update_template) */\n#if defined(VK_KHR_device_group)\n\tPFN_vkCmdDispatchBaseKHR vkCmdDispatchBaseKHR;\n\tPFN_vkCmdSetDeviceMaskKHR vkCmdSetDeviceMaskKHR;\n\tPFN_vkGetDeviceGroupPeerMemoryFeaturesKHR vkGetDeviceGroupPeerMemoryFeaturesKHR;\n#else\n\tPFN_vkVoidFunction padding_5ebe16bd[3];\n#endif /* defined(VK_KHR_device_group) */\n#if defined(VK_KHR_display_swapchain)\n\tPFN_vkCreateSharedSwapchainsKHR vkCreateSharedSwapchainsKHR;\n#else\n\tPFN_vkVoidFunction padding_12099367[1];\n#endif /* defined(VK_KHR_display_swapchain) */\n#if defined(VK_KHR_draw_indirect_count)\n\tPFN_vkCmdDrawIndexedIndirectCountKHR vkCmdDrawIndexedIndirectCountKHR;\n\tPFN_vkCmdDrawIndirectCountKHR vkCmdDrawIndirectCountKHR;\n#else\n\tPFN_vkVoidFunction padding_7b5bc4c1[2];\n#endif /* defined(VK_KHR_draw_indirect_count) */\n#if defined(VK_KHR_dynamic_rendering)\n\tPFN_vkCmdBeginRenderingKHR vkCmdBeginRenderingKHR;\n\tPFN_vkCmdEndRenderingKHR vkCmdEndRenderingKHR;\n#else\n\tPFN_vkVoidFunction padding_b80f75a5[2];\n#endif /* defined(VK_KHR_dynamic_rendering) */\n#if defined(VK_KHR_dynamic_rendering_local_read)\n\tPFN_vkCmdSetRenderingAttachmentLocationsKHR vkCmdSetRenderingAttachmentLocationsKHR;\n\tPFN_vkCmdSetRenderingInputAttachmentIndicesKHR vkCmdSetRenderingInputAttachmentIndicesKHR;\n#else\n\tPFN_vkVoidFunction padding_b1510532[2];\n#endif /* defined(VK_KHR_dynamic_rendering_local_read) */\n#if defined(VK_KHR_external_fence_fd)\n\tPFN_vkGetFenceFdKHR vkGetFenceFdKHR;\n\tPFN_vkImportFenceFdKHR vkImportFenceFdKHR;\n#else\n\tPFN_vkVoidFunction padding_a2c787d5[2];\n#endif /* defined(VK_KHR_external_fence_fd) */\n#if defined(VK_KHR_external_fence_win32)\n\tPFN_vkGetFenceWin32HandleKHR vkGetFenceWin32HandleKHR;\n\tPFN_vkImportFenceWin32HandleKHR vkImportFenceWin32HandleKHR;\n#else\n\tPFN_vkVoidFunction padding_55d8e6a9[2];\n#endif /* defined(VK_KHR_external_fence_win32) */\n#if defined(VK_KHR_external_memory_fd)\n\tPFN_vkGetMemoryFdKHR vkGetMemoryFdKHR;\n\tPFN_vkGetMemoryFdPropertiesKHR vkGetMemoryFdPropertiesKHR;\n#else\n\tPFN_vkVoidFunction padding_982d9e19[2];\n#endif /* defined(VK_KHR_external_memory_fd) */\n#if defined(VK_KHR_external_memory_win32)\n\tPFN_vkGetMemoryWin32HandleKHR vkGetMemoryWin32HandleKHR;\n\tPFN_vkGetMemoryWin32HandlePropertiesKHR vkGetMemoryWin32HandlePropertiesKHR;\n#else\n\tPFN_vkVoidFunction padding_4af9e25a[2];\n#endif /* defined(VK_KHR_external_memory_win32) */\n#if defined(VK_KHR_external_semaphore_fd)\n\tPFN_vkGetSemaphoreFdKHR vkGetSemaphoreFdKHR;\n\tPFN_vkImportSemaphoreFdKHR vkImportSemaphoreFdKHR;\n#else\n\tPFN_vkVoidFunction padding_2237b7cf[2];\n#endif /* defined(VK_KHR_external_semaphore_fd) */\n#if defined(VK_KHR_external_semaphore_win32)\n\tPFN_vkGetSemaphoreWin32HandleKHR vkGetSemaphoreWin32HandleKHR;\n\tPFN_vkImportSemaphoreWin32HandleKHR vkImportSemaphoreWin32HandleKHR;\n#else\n\tPFN_vkVoidFunction padding_c18dea52[2];\n#endif /* defined(VK_KHR_external_semaphore_win32) */\n#if defined(VK_KHR_fragment_shading_rate)\n\tPFN_vkCmdSetFragmentShadingRateKHR vkCmdSetFragmentShadingRateKHR;\n#else\n\tPFN_vkVoidFunction padding_f91b0a90[1];\n#endif /* defined(VK_KHR_fragment_shading_rate) */\n#if defined(VK_KHR_get_memory_requirements2)\n\tPFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;\n\tPFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;\n\tPFN_vkGetImageSparseMemoryRequirements2KHR vkGetImageSparseMemoryRequirements2KHR;\n#else\n\tPFN_vkVoidFunction padding_79d9c5c4[3];\n#endif /* defined(VK_KHR_get_memory_requirements2) */\n#if defined(VK_KHR_line_rasterization)\n\tPFN_vkCmdSetLineStippleKHR vkCmdSetLineStippleKHR;\n#else\n\tPFN_vkVoidFunction padding_83c2939[1];\n#endif /* defined(VK_KHR_line_rasterization) */\n#if defined(VK_KHR_maintenance1)\n\tPFN_vkTrimCommandPoolKHR vkTrimCommandPoolKHR;\n#else\n\tPFN_vkVoidFunction padding_4b372c56[1];\n#endif /* defined(VK_KHR_maintenance1) */\n#if defined(VK_KHR_maintenance3)\n\tPFN_vkGetDescriptorSetLayoutSupportKHR vkGetDescriptorSetLayoutSupportKHR;\n#else\n\tPFN_vkVoidFunction padding_5ea7858d[1];\n#endif /* defined(VK_KHR_maintenance3) */\n#if defined(VK_KHR_maintenance4)\n\tPFN_vkGetDeviceBufferMemoryRequirementsKHR vkGetDeviceBufferMemoryRequirementsKHR;\n\tPFN_vkGetDeviceImageMemoryRequirementsKHR vkGetDeviceImageMemoryRequirementsKHR;\n\tPFN_vkGetDeviceImageSparseMemoryRequirementsKHR vkGetDeviceImageSparseMemoryRequirementsKHR;\n#else\n\tPFN_vkVoidFunction padding_8e2d4198[3];\n#endif /* defined(VK_KHR_maintenance4) */\n#if defined(VK_KHR_maintenance5)\n\tPFN_vkCmdBindIndexBuffer2KHR vkCmdBindIndexBuffer2KHR;\n\tPFN_vkGetDeviceImageSubresourceLayoutKHR vkGetDeviceImageSubresourceLayoutKHR;\n\tPFN_vkGetImageSubresourceLayout2KHR vkGetImageSubresourceLayout2KHR;\n\tPFN_vkGetRenderingAreaGranularityKHR vkGetRenderingAreaGranularityKHR;\n#else\n\tPFN_vkVoidFunction padding_37040339[4];\n#endif /* defined(VK_KHR_maintenance5) */\n#if defined(VK_KHR_maintenance6)\n\tPFN_vkCmdBindDescriptorSets2KHR vkCmdBindDescriptorSets2KHR;\n\tPFN_vkCmdPushConstants2KHR vkCmdPushConstants2KHR;\n#else\n\tPFN_vkVoidFunction padding_442955d8[2];\n#endif /* defined(VK_KHR_maintenance6) */\n#if defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor)\n\tPFN_vkCmdPushDescriptorSet2KHR vkCmdPushDescriptorSet2KHR;\n\tPFN_vkCmdPushDescriptorSetWithTemplate2KHR vkCmdPushDescriptorSetWithTemplate2KHR;\n#else\n\tPFN_vkVoidFunction padding_80e8513f[2];\n#endif /* defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor) */\n#if defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer)\n\tPFN_vkCmdBindDescriptorBufferEmbeddedSamplers2EXT vkCmdBindDescriptorBufferEmbeddedSamplers2EXT;\n\tPFN_vkCmdSetDescriptorBufferOffsets2EXT vkCmdSetDescriptorBufferOffsets2EXT;\n#else\n\tPFN_vkVoidFunction padding_2816b9cd[2];\n#endif /* defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_KHR_map_memory2)\n\tPFN_vkMapMemory2KHR vkMapMemory2KHR;\n\tPFN_vkUnmapMemory2KHR vkUnmapMemory2KHR;\n#else\n\tPFN_vkVoidFunction padding_5a6d8986[2];\n#endif /* defined(VK_KHR_map_memory2) */\n#if defined(VK_KHR_performance_query)\n\tPFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR;\n\tPFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR;\n#else\n\tPFN_vkVoidFunction padding_76f2673b[2];\n#endif /* defined(VK_KHR_performance_query) */\n#if defined(VK_KHR_pipeline_binary)\n\tPFN_vkCreatePipelineBinariesKHR vkCreatePipelineBinariesKHR;\n\tPFN_vkDestroyPipelineBinaryKHR vkDestroyPipelineBinaryKHR;\n\tPFN_vkGetPipelineBinaryDataKHR vkGetPipelineBinaryDataKHR;\n\tPFN_vkGetPipelineKeyKHR vkGetPipelineKeyKHR;\n\tPFN_vkReleaseCapturedPipelineDataKHR vkReleaseCapturedPipelineDataKHR;\n#else\n\tPFN_vkVoidFunction padding_65232810[5];\n#endif /* defined(VK_KHR_pipeline_binary) */\n#if defined(VK_KHR_pipeline_executable_properties)\n\tPFN_vkGetPipelineExecutableInternalRepresentationsKHR vkGetPipelineExecutableInternalRepresentationsKHR;\n\tPFN_vkGetPipelineExecutablePropertiesKHR vkGetPipelineExecutablePropertiesKHR;\n\tPFN_vkGetPipelineExecutableStatisticsKHR vkGetPipelineExecutableStatisticsKHR;\n#else\n\tPFN_vkVoidFunction padding_f7629b1e[3];\n#endif /* defined(VK_KHR_pipeline_executable_properties) */\n#if defined(VK_KHR_present_wait)\n\tPFN_vkWaitForPresentKHR vkWaitForPresentKHR;\n#else\n\tPFN_vkVoidFunction padding_b16cbe03[1];\n#endif /* defined(VK_KHR_present_wait) */\n#if defined(VK_KHR_present_wait2)\n\tPFN_vkWaitForPresent2KHR vkWaitForPresent2KHR;\n#else\n\tPFN_vkVoidFunction padding_7401483a[1];\n#endif /* defined(VK_KHR_present_wait2) */\n#if defined(VK_KHR_push_descriptor)\n\tPFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR;\n#else\n\tPFN_vkVoidFunction padding_8f7712ad[1];\n#endif /* defined(VK_KHR_push_descriptor) */\n#if defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline)\n\tPFN_vkCmdTraceRaysIndirect2KHR vkCmdTraceRaysIndirect2KHR;\n#else\n\tPFN_vkVoidFunction padding_dd5f9b4a[1];\n#endif /* defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) */\n#if defined(VK_KHR_ray_tracing_pipeline)\n\tPFN_vkCmdSetRayTracingPipelineStackSizeKHR vkCmdSetRayTracingPipelineStackSizeKHR;\n\tPFN_vkCmdTraceRaysIndirectKHR vkCmdTraceRaysIndirectKHR;\n\tPFN_vkCmdTraceRaysKHR vkCmdTraceRaysKHR;\n\tPFN_vkCreateRayTracingPipelinesKHR vkCreateRayTracingPipelinesKHR;\n\tPFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR vkGetRayTracingCaptureReplayShaderGroupHandlesKHR;\n\tPFN_vkGetRayTracingShaderGroupHandlesKHR vkGetRayTracingShaderGroupHandlesKHR;\n\tPFN_vkGetRayTracingShaderGroupStackSizeKHR vkGetRayTracingShaderGroupStackSizeKHR;\n#else\n\tPFN_vkVoidFunction padding_af99aedc[7];\n#endif /* defined(VK_KHR_ray_tracing_pipeline) */\n#if defined(VK_KHR_sampler_ycbcr_conversion)\n\tPFN_vkCreateSamplerYcbcrConversionKHR vkCreateSamplerYcbcrConversionKHR;\n\tPFN_vkDestroySamplerYcbcrConversionKHR vkDestroySamplerYcbcrConversionKHR;\n#else\n\tPFN_vkVoidFunction padding_88e61b30[2];\n#endif /* defined(VK_KHR_sampler_ycbcr_conversion) */\n#if defined(VK_KHR_shared_presentable_image)\n\tPFN_vkGetSwapchainStatusKHR vkGetSwapchainStatusKHR;\n#else\n\tPFN_vkVoidFunction padding_1ff3379[1];\n#endif /* defined(VK_KHR_shared_presentable_image) */\n#if defined(VK_KHR_swapchain)\n\tPFN_vkAcquireNextImageKHR vkAcquireNextImageKHR;\n\tPFN_vkCreateSwapchainKHR vkCreateSwapchainKHR;\n\tPFN_vkDestroySwapchainKHR vkDestroySwapchainKHR;\n\tPFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR;\n\tPFN_vkQueuePresentKHR vkQueuePresentKHR;\n#else\n\tPFN_vkVoidFunction padding_a1de893b[5];\n#endif /* defined(VK_KHR_swapchain) */\n#if defined(VK_KHR_swapchain_maintenance1)\n\tPFN_vkReleaseSwapchainImagesKHR vkReleaseSwapchainImagesKHR;\n#else\n\tPFN_vkVoidFunction padding_e032d5c4[1];\n#endif /* defined(VK_KHR_swapchain_maintenance1) */\n#if defined(VK_KHR_synchronization2)\n\tPFN_vkCmdPipelineBarrier2KHR vkCmdPipelineBarrier2KHR;\n\tPFN_vkCmdResetEvent2KHR vkCmdResetEvent2KHR;\n\tPFN_vkCmdSetEvent2KHR vkCmdSetEvent2KHR;\n\tPFN_vkCmdWaitEvents2KHR vkCmdWaitEvents2KHR;\n\tPFN_vkCmdWriteTimestamp2KHR vkCmdWriteTimestamp2KHR;\n\tPFN_vkQueueSubmit2KHR vkQueueSubmit2KHR;\n#else\n\tPFN_vkVoidFunction padding_e85bf128[6];\n#endif /* defined(VK_KHR_synchronization2) */\n#if defined(VK_KHR_timeline_semaphore)\n\tPFN_vkGetSemaphoreCounterValueKHR vkGetSemaphoreCounterValueKHR;\n\tPFN_vkSignalSemaphoreKHR vkSignalSemaphoreKHR;\n\tPFN_vkWaitSemaphoresKHR vkWaitSemaphoresKHR;\n#else\n\tPFN_vkVoidFunction padding_c799d931[3];\n#endif /* defined(VK_KHR_timeline_semaphore) */\n#if defined(VK_KHR_video_decode_queue)\n\tPFN_vkCmdDecodeVideoKHR vkCmdDecodeVideoKHR;\n#else\n\tPFN_vkVoidFunction padding_7a7cc7ad[1];\n#endif /* defined(VK_KHR_video_decode_queue) */\n#if defined(VK_KHR_video_encode_queue)\n\tPFN_vkCmdEncodeVideoKHR vkCmdEncodeVideoKHR;\n\tPFN_vkGetEncodedVideoSessionParametersKHR vkGetEncodedVideoSessionParametersKHR;\n#else\n\tPFN_vkVoidFunction padding_f2997fb4[2];\n#endif /* defined(VK_KHR_video_encode_queue) */\n#if defined(VK_KHR_video_queue)\n\tPFN_vkBindVideoSessionMemoryKHR vkBindVideoSessionMemoryKHR;\n\tPFN_vkCmdBeginVideoCodingKHR vkCmdBeginVideoCodingKHR;\n\tPFN_vkCmdControlVideoCodingKHR vkCmdControlVideoCodingKHR;\n\tPFN_vkCmdEndVideoCodingKHR vkCmdEndVideoCodingKHR;\n\tPFN_vkCreateVideoSessionKHR vkCreateVideoSessionKHR;\n\tPFN_vkCreateVideoSessionParametersKHR vkCreateVideoSessionParametersKHR;\n\tPFN_vkDestroyVideoSessionKHR vkDestroyVideoSessionKHR;\n\tPFN_vkDestroyVideoSessionParametersKHR vkDestroyVideoSessionParametersKHR;\n\tPFN_vkGetVideoSessionMemoryRequirementsKHR vkGetVideoSessionMemoryRequirementsKHR;\n\tPFN_vkUpdateVideoSessionParametersKHR vkUpdateVideoSessionParametersKHR;\n#else\n\tPFN_vkVoidFunction padding_98fb7016[10];\n#endif /* defined(VK_KHR_video_queue) */\n#if defined(VK_NVX_binary_import)\n\tPFN_vkCmdCuLaunchKernelNVX vkCmdCuLaunchKernelNVX;\n\tPFN_vkCreateCuFunctionNVX vkCreateCuFunctionNVX;\n\tPFN_vkCreateCuModuleNVX vkCreateCuModuleNVX;\n\tPFN_vkDestroyCuFunctionNVX vkDestroyCuFunctionNVX;\n\tPFN_vkDestroyCuModuleNVX vkDestroyCuModuleNVX;\n#else\n\tPFN_vkVoidFunction padding_eb54309b[5];\n#endif /* defined(VK_NVX_binary_import) */\n#if defined(VK_NVX_image_view_handle)\n\tPFN_vkGetImageViewHandleNVX vkGetImageViewHandleNVX;\n#else\n\tPFN_vkVoidFunction padding_887f6736[1];\n#endif /* defined(VK_NVX_image_view_handle) */\n#if defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 3\n\tPFN_vkGetImageViewHandle64NVX vkGetImageViewHandle64NVX;\n#else\n\tPFN_vkVoidFunction padding_64ad40e2[1];\n#endif /* defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 3 */\n#if defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 2\n\tPFN_vkGetImageViewAddressNVX vkGetImageViewAddressNVX;\n#else\n\tPFN_vkVoidFunction padding_d290479a[1];\n#endif /* defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 2 */\n#if defined(VK_NV_clip_space_w_scaling)\n\tPFN_vkCmdSetViewportWScalingNV vkCmdSetViewportWScalingNV;\n#else\n\tPFN_vkVoidFunction padding_88d7eb2e[1];\n#endif /* defined(VK_NV_clip_space_w_scaling) */\n#if defined(VK_NV_cluster_acceleration_structure)\n\tPFN_vkCmdBuildClusterAccelerationStructureIndirectNV vkCmdBuildClusterAccelerationStructureIndirectNV;\n\tPFN_vkGetClusterAccelerationStructureBuildSizesNV vkGetClusterAccelerationStructureBuildSizesNV;\n#else\n\tPFN_vkVoidFunction padding_60e35395[2];\n#endif /* defined(VK_NV_cluster_acceleration_structure) */\n#if defined(VK_NV_cooperative_vector)\n\tPFN_vkCmdConvertCooperativeVectorMatrixNV vkCmdConvertCooperativeVectorMatrixNV;\n\tPFN_vkConvertCooperativeVectorMatrixNV vkConvertCooperativeVectorMatrixNV;\n#else\n\tPFN_vkVoidFunction padding_f4a887d0[2];\n#endif /* defined(VK_NV_cooperative_vector) */\n#if defined(VK_NV_copy_memory_indirect)\n\tPFN_vkCmdCopyMemoryIndirectNV vkCmdCopyMemoryIndirectNV;\n\tPFN_vkCmdCopyMemoryToImageIndirectNV vkCmdCopyMemoryToImageIndirectNV;\n#else\n\tPFN_vkVoidFunction padding_9536230e[2];\n#endif /* defined(VK_NV_copy_memory_indirect) */\n#if defined(VK_NV_cuda_kernel_launch)\n\tPFN_vkCmdCudaLaunchKernelNV vkCmdCudaLaunchKernelNV;\n\tPFN_vkCreateCudaFunctionNV vkCreateCudaFunctionNV;\n\tPFN_vkCreateCudaModuleNV vkCreateCudaModuleNV;\n\tPFN_vkDestroyCudaFunctionNV vkDestroyCudaFunctionNV;\n\tPFN_vkDestroyCudaModuleNV vkDestroyCudaModuleNV;\n\tPFN_vkGetCudaModuleCacheNV vkGetCudaModuleCacheNV;\n#else\n\tPFN_vkVoidFunction padding_2eabdf3b[6];\n#endif /* defined(VK_NV_cuda_kernel_launch) */\n#if defined(VK_NV_device_diagnostic_checkpoints)\n\tPFN_vkCmdSetCheckpointNV vkCmdSetCheckpointNV;\n\tPFN_vkGetQueueCheckpointDataNV vkGetQueueCheckpointDataNV;\n#else\n\tPFN_vkVoidFunction padding_adaa5a21[2];\n#endif /* defined(VK_NV_device_diagnostic_checkpoints) */\n#if defined(VK_NV_device_diagnostic_checkpoints) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2))\n\tPFN_vkGetQueueCheckpointData2NV vkGetQueueCheckpointData2NV;\n#else\n\tPFN_vkVoidFunction padding_c776633d[1];\n#endif /* defined(VK_NV_device_diagnostic_checkpoints) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) */\n#if defined(VK_NV_device_generated_commands)\n\tPFN_vkCmdBindPipelineShaderGroupNV vkCmdBindPipelineShaderGroupNV;\n\tPFN_vkCmdExecuteGeneratedCommandsNV vkCmdExecuteGeneratedCommandsNV;\n\tPFN_vkCmdPreprocessGeneratedCommandsNV vkCmdPreprocessGeneratedCommandsNV;\n\tPFN_vkCreateIndirectCommandsLayoutNV vkCreateIndirectCommandsLayoutNV;\n\tPFN_vkDestroyIndirectCommandsLayoutNV vkDestroyIndirectCommandsLayoutNV;\n\tPFN_vkGetGeneratedCommandsMemoryRequirementsNV vkGetGeneratedCommandsMemoryRequirementsNV;\n#else\n\tPFN_vkVoidFunction padding_4c7e4395[6];\n#endif /* defined(VK_NV_device_generated_commands) */\n#if defined(VK_NV_device_generated_commands_compute)\n\tPFN_vkCmdUpdatePipelineIndirectBufferNV vkCmdUpdatePipelineIndirectBufferNV;\n\tPFN_vkGetPipelineIndirectDeviceAddressNV vkGetPipelineIndirectDeviceAddressNV;\n\tPFN_vkGetPipelineIndirectMemoryRequirementsNV vkGetPipelineIndirectMemoryRequirementsNV;\n#else\n\tPFN_vkVoidFunction padding_5195094c[3];\n#endif /* defined(VK_NV_device_generated_commands_compute) */\n#if defined(VK_NV_external_compute_queue)\n\tPFN_vkCreateExternalComputeQueueNV vkCreateExternalComputeQueueNV;\n\tPFN_vkDestroyExternalComputeQueueNV vkDestroyExternalComputeQueueNV;\n\tPFN_vkGetExternalComputeQueueDataNV vkGetExternalComputeQueueDataNV;\n#else\n\tPFN_vkVoidFunction padding_4f947e0b[3];\n#endif /* defined(VK_NV_external_compute_queue) */\n#if defined(VK_NV_external_memory_rdma)\n\tPFN_vkGetMemoryRemoteAddressNV vkGetMemoryRemoteAddressNV;\n#else\n\tPFN_vkVoidFunction padding_920e405[1];\n#endif /* defined(VK_NV_external_memory_rdma) */\n#if defined(VK_NV_external_memory_win32)\n\tPFN_vkGetMemoryWin32HandleNV vkGetMemoryWin32HandleNV;\n#else\n\tPFN_vkVoidFunction padding_c13d6f3a[1];\n#endif /* defined(VK_NV_external_memory_win32) */\n#if defined(VK_NV_fragment_shading_rate_enums)\n\tPFN_vkCmdSetFragmentShadingRateEnumNV vkCmdSetFragmentShadingRateEnumNV;\n#else\n\tPFN_vkVoidFunction padding_4979ca14[1];\n#endif /* defined(VK_NV_fragment_shading_rate_enums) */\n#if defined(VK_NV_low_latency2)\n\tPFN_vkGetLatencyTimingsNV vkGetLatencyTimingsNV;\n\tPFN_vkLatencySleepNV vkLatencySleepNV;\n\tPFN_vkQueueNotifyOutOfBandNV vkQueueNotifyOutOfBandNV;\n\tPFN_vkSetLatencyMarkerNV vkSetLatencyMarkerNV;\n\tPFN_vkSetLatencySleepModeNV vkSetLatencySleepModeNV;\n#else\n\tPFN_vkVoidFunction padding_fabf8b19[5];\n#endif /* defined(VK_NV_low_latency2) */\n#if defined(VK_NV_memory_decompression)\n\tPFN_vkCmdDecompressMemoryIndirectCountNV vkCmdDecompressMemoryIndirectCountNV;\n\tPFN_vkCmdDecompressMemoryNV vkCmdDecompressMemoryNV;\n#else\n\tPFN_vkVoidFunction padding_706009[2];\n#endif /* defined(VK_NV_memory_decompression) */\n#if defined(VK_NV_mesh_shader)\n\tPFN_vkCmdDrawMeshTasksIndirectNV vkCmdDrawMeshTasksIndirectNV;\n\tPFN_vkCmdDrawMeshTasksNV vkCmdDrawMeshTasksNV;\n#else\n\tPFN_vkVoidFunction padding_ac232758[2];\n#endif /* defined(VK_NV_mesh_shader) */\n#if defined(VK_NV_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2))\n\tPFN_vkCmdDrawMeshTasksIndirectCountNV vkCmdDrawMeshTasksIndirectCountNV;\n#else\n\tPFN_vkVoidFunction padding_53495be7[1];\n#endif /* defined(VK_NV_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2)) */\n#if defined(VK_NV_optical_flow)\n\tPFN_vkBindOpticalFlowSessionImageNV vkBindOpticalFlowSessionImageNV;\n\tPFN_vkCmdOpticalFlowExecuteNV vkCmdOpticalFlowExecuteNV;\n\tPFN_vkCreateOpticalFlowSessionNV vkCreateOpticalFlowSessionNV;\n\tPFN_vkDestroyOpticalFlowSessionNV vkDestroyOpticalFlowSessionNV;\n#else\n\tPFN_vkVoidFunction padding_f67571eb[4];\n#endif /* defined(VK_NV_optical_flow) */\n#if defined(VK_NV_partitioned_acceleration_structure)\n\tPFN_vkCmdBuildPartitionedAccelerationStructuresNV vkCmdBuildPartitionedAccelerationStructuresNV;\n\tPFN_vkGetPartitionedAccelerationStructuresBuildSizesNV vkGetPartitionedAccelerationStructuresBuildSizesNV;\n#else\n\tPFN_vkVoidFunction padding_d27c8c6d[2];\n#endif /* defined(VK_NV_partitioned_acceleration_structure) */\n#if defined(VK_NV_ray_tracing)\n\tPFN_vkBindAccelerationStructureMemoryNV vkBindAccelerationStructureMemoryNV;\n\tPFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV;\n\tPFN_vkCmdCopyAccelerationStructureNV vkCmdCopyAccelerationStructureNV;\n\tPFN_vkCmdTraceRaysNV vkCmdTraceRaysNV;\n\tPFN_vkCmdWriteAccelerationStructuresPropertiesNV vkCmdWriteAccelerationStructuresPropertiesNV;\n\tPFN_vkCompileDeferredNV vkCompileDeferredNV;\n\tPFN_vkCreateAccelerationStructureNV vkCreateAccelerationStructureNV;\n\tPFN_vkCreateRayTracingPipelinesNV vkCreateRayTracingPipelinesNV;\n\tPFN_vkDestroyAccelerationStructureNV vkDestroyAccelerationStructureNV;\n\tPFN_vkGetAccelerationStructureHandleNV vkGetAccelerationStructureHandleNV;\n\tPFN_vkGetAccelerationStructureMemoryRequirementsNV vkGetAccelerationStructureMemoryRequirementsNV;\n\tPFN_vkGetRayTracingShaderGroupHandlesNV vkGetRayTracingShaderGroupHandlesNV;\n#else\n\tPFN_vkVoidFunction padding_feefbeac[12];\n#endif /* defined(VK_NV_ray_tracing) */\n#if defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2\n\tPFN_vkCmdSetExclusiveScissorEnableNV vkCmdSetExclusiveScissorEnableNV;\n#else\n\tPFN_vkVoidFunction padding_e3c24f80[1];\n#endif /* defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 */\n#if defined(VK_NV_scissor_exclusive)\n\tPFN_vkCmdSetExclusiveScissorNV vkCmdSetExclusiveScissorNV;\n#else\n\tPFN_vkVoidFunction padding_8e88d86c[1];\n#endif /* defined(VK_NV_scissor_exclusive) */\n#if defined(VK_NV_shading_rate_image)\n\tPFN_vkCmdBindShadingRateImageNV vkCmdBindShadingRateImageNV;\n\tPFN_vkCmdSetCoarseSampleOrderNV vkCmdSetCoarseSampleOrderNV;\n\tPFN_vkCmdSetViewportShadingRatePaletteNV vkCmdSetViewportShadingRatePaletteNV;\n#else\n\tPFN_vkVoidFunction padding_92a0767f[3];\n#endif /* defined(VK_NV_shading_rate_image) */\n#if defined(VK_QCOM_tile_memory_heap)\n\tPFN_vkCmdBindTileMemoryQCOM vkCmdBindTileMemoryQCOM;\n#else\n\tPFN_vkVoidFunction padding_e2d55d04[1];\n#endif /* defined(VK_QCOM_tile_memory_heap) */\n#if defined(VK_QCOM_tile_properties)\n\tPFN_vkGetDynamicRenderingTilePropertiesQCOM vkGetDynamicRenderingTilePropertiesQCOM;\n\tPFN_vkGetFramebufferTilePropertiesQCOM vkGetFramebufferTilePropertiesQCOM;\n#else\n\tPFN_vkVoidFunction padding_be12e32[2];\n#endif /* defined(VK_QCOM_tile_properties) */\n#if defined(VK_QCOM_tile_shading)\n\tPFN_vkCmdBeginPerTileExecutionQCOM vkCmdBeginPerTileExecutionQCOM;\n\tPFN_vkCmdDispatchTileQCOM vkCmdDispatchTileQCOM;\n\tPFN_vkCmdEndPerTileExecutionQCOM vkCmdEndPerTileExecutionQCOM;\n#else\n\tPFN_vkVoidFunction padding_fcd9e1df[3];\n#endif /* defined(VK_QCOM_tile_shading) */\n#if defined(VK_QNX_external_memory_screen_buffer)\n\tPFN_vkGetScreenBufferPropertiesQNX vkGetScreenBufferPropertiesQNX;\n#else\n\tPFN_vkVoidFunction padding_1c27735d[1];\n#endif /* defined(VK_QNX_external_memory_screen_buffer) */\n#if defined(VK_VALVE_descriptor_set_host_mapping)\n\tPFN_vkGetDescriptorSetHostMappingVALVE vkGetDescriptorSetHostMappingVALVE;\n\tPFN_vkGetDescriptorSetLayoutHostMappingInfoVALVE vkGetDescriptorSetLayoutHostMappingInfoVALVE;\n#else\n\tPFN_vkVoidFunction padding_fd71e4c6[2];\n#endif /* defined(VK_VALVE_descriptor_set_host_mapping) */\n#if (defined(VK_EXT_depth_clamp_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clamp_control))\n\tPFN_vkCmdSetDepthClampRangeEXT vkCmdSetDepthClampRangeEXT;\n#else\n\tPFN_vkVoidFunction padding_faa18a61[1];\n#endif /* (defined(VK_EXT_depth_clamp_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clamp_control)) */\n#if (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object))\n\tPFN_vkCmdBindVertexBuffers2EXT vkCmdBindVertexBuffers2EXT;\n\tPFN_vkCmdSetCullModeEXT vkCmdSetCullModeEXT;\n\tPFN_vkCmdSetDepthBoundsTestEnableEXT vkCmdSetDepthBoundsTestEnableEXT;\n\tPFN_vkCmdSetDepthCompareOpEXT vkCmdSetDepthCompareOpEXT;\n\tPFN_vkCmdSetDepthTestEnableEXT vkCmdSetDepthTestEnableEXT;\n\tPFN_vkCmdSetDepthWriteEnableEXT vkCmdSetDepthWriteEnableEXT;\n\tPFN_vkCmdSetFrontFaceEXT vkCmdSetFrontFaceEXT;\n\tPFN_vkCmdSetPrimitiveTopologyEXT vkCmdSetPrimitiveTopologyEXT;\n\tPFN_vkCmdSetScissorWithCountEXT vkCmdSetScissorWithCountEXT;\n\tPFN_vkCmdSetStencilOpEXT vkCmdSetStencilOpEXT;\n\tPFN_vkCmdSetStencilTestEnableEXT vkCmdSetStencilTestEnableEXT;\n\tPFN_vkCmdSetViewportWithCountEXT vkCmdSetViewportWithCountEXT;\n#else\n\tPFN_vkVoidFunction padding_3e8c720f[12];\n#endif /* (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object))\n\tPFN_vkCmdSetDepthBiasEnableEXT vkCmdSetDepthBiasEnableEXT;\n\tPFN_vkCmdSetLogicOpEXT vkCmdSetLogicOpEXT;\n\tPFN_vkCmdSetPatchControlPointsEXT vkCmdSetPatchControlPointsEXT;\n\tPFN_vkCmdSetPrimitiveRestartEnableEXT vkCmdSetPrimitiveRestartEnableEXT;\n\tPFN_vkCmdSetRasterizerDiscardEnableEXT vkCmdSetRasterizerDiscardEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_b93e02a6[5];\n#endif /* (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object))\n\tPFN_vkCmdSetAlphaToCoverageEnableEXT vkCmdSetAlphaToCoverageEnableEXT;\n\tPFN_vkCmdSetAlphaToOneEnableEXT vkCmdSetAlphaToOneEnableEXT;\n\tPFN_vkCmdSetColorBlendEnableEXT vkCmdSetColorBlendEnableEXT;\n\tPFN_vkCmdSetColorBlendEquationEXT vkCmdSetColorBlendEquationEXT;\n\tPFN_vkCmdSetColorWriteMaskEXT vkCmdSetColorWriteMaskEXT;\n\tPFN_vkCmdSetDepthClampEnableEXT vkCmdSetDepthClampEnableEXT;\n\tPFN_vkCmdSetLogicOpEnableEXT vkCmdSetLogicOpEnableEXT;\n\tPFN_vkCmdSetPolygonModeEXT vkCmdSetPolygonModeEXT;\n\tPFN_vkCmdSetRasterizationSamplesEXT vkCmdSetRasterizationSamplesEXT;\n\tPFN_vkCmdSetSampleMaskEXT vkCmdSetSampleMaskEXT;\n#else\n\tPFN_vkVoidFunction padding_ab566e7e[10];\n#endif /* (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object))\n\tPFN_vkCmdSetTessellationDomainOriginEXT vkCmdSetTessellationDomainOriginEXT;\n#else\n\tPFN_vkVoidFunction padding_6730ed0c[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback))\n\tPFN_vkCmdSetRasterizationStreamEXT vkCmdSetRasterizationStreamEXT;\n#else\n\tPFN_vkVoidFunction padding_d3ebb335[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization))\n\tPFN_vkCmdSetConservativeRasterizationModeEXT vkCmdSetConservativeRasterizationModeEXT;\n\tPFN_vkCmdSetExtraPrimitiveOverestimationSizeEXT vkCmdSetExtraPrimitiveOverestimationSizeEXT;\n#else\n\tPFN_vkVoidFunction padding_a21758f4[2];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable))\n\tPFN_vkCmdSetDepthClipEnableEXT vkCmdSetDepthClipEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_a498a838[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations))\n\tPFN_vkCmdSetSampleLocationsEnableEXT vkCmdSetSampleLocationsEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_67db38de[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced))\n\tPFN_vkCmdSetColorBlendAdvancedEXT vkCmdSetColorBlendAdvancedEXT;\n#else\n\tPFN_vkVoidFunction padding_fbea7481[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex))\n\tPFN_vkCmdSetProvokingVertexModeEXT vkCmdSetProvokingVertexModeEXT;\n#else\n\tPFN_vkVoidFunction padding_3a8ec90e[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization))\n\tPFN_vkCmdSetLineRasterizationModeEXT vkCmdSetLineRasterizationModeEXT;\n\tPFN_vkCmdSetLineStippleEnableEXT vkCmdSetLineStippleEnableEXT;\n#else\n\tPFN_vkVoidFunction padding_29cdb756[2];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control))\n\tPFN_vkCmdSetDepthClipNegativeOneToOneEXT vkCmdSetDepthClipNegativeOneToOneEXT;\n#else\n\tPFN_vkVoidFunction padding_815a7240[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling))\n\tPFN_vkCmdSetViewportWScalingEnableNV vkCmdSetViewportWScalingEnableNV;\n#else\n\tPFN_vkVoidFunction padding_d1f00511[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle))\n\tPFN_vkCmdSetViewportSwizzleNV vkCmdSetViewportSwizzleNV;\n#else\n\tPFN_vkVoidFunction padding_7a73d553[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color))\n\tPFN_vkCmdSetCoverageToColorEnableNV vkCmdSetCoverageToColorEnableNV;\n\tPFN_vkCmdSetCoverageToColorLocationNV vkCmdSetCoverageToColorLocationNV;\n#else\n\tPFN_vkVoidFunction padding_6045fb8c[2];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples))\n\tPFN_vkCmdSetCoverageModulationModeNV vkCmdSetCoverageModulationModeNV;\n\tPFN_vkCmdSetCoverageModulationTableEnableNV vkCmdSetCoverageModulationTableEnableNV;\n\tPFN_vkCmdSetCoverageModulationTableNV vkCmdSetCoverageModulationTableNV;\n#else\n\tPFN_vkVoidFunction padding_bdc35c80[3];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image))\n\tPFN_vkCmdSetShadingRateImageEnableNV vkCmdSetShadingRateImageEnableNV;\n#else\n\tPFN_vkVoidFunction padding_9a5cd6e8[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test))\n\tPFN_vkCmdSetRepresentativeFragmentTestEnableNV vkCmdSetRepresentativeFragmentTestEnableNV;\n#else\n\tPFN_vkVoidFunction padding_3ee17e96[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode))\n\tPFN_vkCmdSetCoverageReductionModeNV vkCmdSetCoverageReductionModeNV;\n#else\n\tPFN_vkVoidFunction padding_263d525a[1];\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) */\n#if (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control))\n\tPFN_vkGetImageSubresourceLayout2EXT vkGetImageSubresourceLayout2EXT;\n#else\n\tPFN_vkVoidFunction padding_ecddace1[1];\n#endif /* (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) */\n#if (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state))\n\tPFN_vkCmdSetVertexInputEXT vkCmdSetVertexInputEXT;\n#else\n\tPFN_vkVoidFunction padding_d83e1de1[1];\n#endif /* (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) */\n#if (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template)))\n\tPFN_vkCmdPushDescriptorSetWithTemplateKHR vkCmdPushDescriptorSetWithTemplateKHR;\n#else\n\tPFN_vkVoidFunction padding_60f8358a[1];\n#endif /* (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template))) */\n#if (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1))\n\tPFN_vkGetDeviceGroupPresentCapabilitiesKHR vkGetDeviceGroupPresentCapabilitiesKHR;\n\tPFN_vkGetDeviceGroupSurfacePresentModesKHR vkGetDeviceGroupSurfacePresentModesKHR;\n#else\n\tPFN_vkVoidFunction padding_460290c6[2];\n#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */\n#if (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1))\n\tPFN_vkAcquireNextImage2KHR vkAcquireNextImage2KHR;\n#else\n\tPFN_vkVoidFunction padding_cffc198[1];\n#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */\n\t/* VOLK_GENERATE_DEVICE_TABLE */\n};\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n/* VOLK_GENERATE_PROTOTYPES_H */\n#if defined(VK_VERSION_1_0)\nextern PFN_vkAllocateCommandBuffers vkAllocateCommandBuffers;\nextern PFN_vkAllocateDescriptorSets vkAllocateDescriptorSets;\nextern PFN_vkAllocateMemory vkAllocateMemory;\nextern PFN_vkBeginCommandBuffer vkBeginCommandBuffer;\nextern PFN_vkBindBufferMemory vkBindBufferMemory;\nextern PFN_vkBindImageMemory vkBindImageMemory;\nextern PFN_vkCmdBeginQuery vkCmdBeginQuery;\nextern PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass;\nextern PFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets;\nextern PFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer;\nextern PFN_vkCmdBindPipeline vkCmdBindPipeline;\nextern PFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers;\nextern PFN_vkCmdBlitImage vkCmdBlitImage;\nextern PFN_vkCmdClearAttachments vkCmdClearAttachments;\nextern PFN_vkCmdClearColorImage vkCmdClearColorImage;\nextern PFN_vkCmdClearDepthStencilImage vkCmdClearDepthStencilImage;\nextern PFN_vkCmdCopyBuffer vkCmdCopyBuffer;\nextern PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage;\nextern PFN_vkCmdCopyImage vkCmdCopyImage;\nextern PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer;\nextern PFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults;\nextern PFN_vkCmdDispatch vkCmdDispatch;\nextern PFN_vkCmdDispatchIndirect vkCmdDispatchIndirect;\nextern PFN_vkCmdDraw vkCmdDraw;\nextern PFN_vkCmdDrawIndexed vkCmdDrawIndexed;\nextern PFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect;\nextern PFN_vkCmdDrawIndirect vkCmdDrawIndirect;\nextern PFN_vkCmdEndQuery vkCmdEndQuery;\nextern PFN_vkCmdEndRenderPass vkCmdEndRenderPass;\nextern PFN_vkCmdExecuteCommands vkCmdExecuteCommands;\nextern PFN_vkCmdFillBuffer vkCmdFillBuffer;\nextern PFN_vkCmdNextSubpass vkCmdNextSubpass;\nextern PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier;\nextern PFN_vkCmdPushConstants vkCmdPushConstants;\nextern PFN_vkCmdResetEvent vkCmdResetEvent;\nextern PFN_vkCmdResetQueryPool vkCmdResetQueryPool;\nextern PFN_vkCmdResolveImage vkCmdResolveImage;\nextern PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants;\nextern PFN_vkCmdSetDepthBias vkCmdSetDepthBias;\nextern PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds;\nextern PFN_vkCmdSetEvent vkCmdSetEvent;\nextern PFN_vkCmdSetLineWidth vkCmdSetLineWidth;\nextern PFN_vkCmdSetScissor vkCmdSetScissor;\nextern PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask;\nextern PFN_vkCmdSetStencilReference vkCmdSetStencilReference;\nextern PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask;\nextern PFN_vkCmdSetViewport vkCmdSetViewport;\nextern PFN_vkCmdUpdateBuffer vkCmdUpdateBuffer;\nextern PFN_vkCmdWaitEvents vkCmdWaitEvents;\nextern PFN_vkCmdWriteTimestamp vkCmdWriteTimestamp;\nextern PFN_vkCreateBuffer vkCreateBuffer;\nextern PFN_vkCreateBufferView vkCreateBufferView;\nextern PFN_vkCreateCommandPool vkCreateCommandPool;\nextern PFN_vkCreateComputePipelines vkCreateComputePipelines;\nextern PFN_vkCreateDescriptorPool vkCreateDescriptorPool;\nextern PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout;\nextern PFN_vkCreateDevice vkCreateDevice;\nextern PFN_vkCreateEvent vkCreateEvent;\nextern PFN_vkCreateFence vkCreateFence;\nextern PFN_vkCreateFramebuffer vkCreateFramebuffer;\nextern PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines;\nextern PFN_vkCreateImage vkCreateImage;\nextern PFN_vkCreateImageView vkCreateImageView;\nextern PFN_vkCreateInstance vkCreateInstance;\nextern PFN_vkCreatePipelineCache vkCreatePipelineCache;\nextern PFN_vkCreatePipelineLayout vkCreatePipelineLayout;\nextern PFN_vkCreateQueryPool vkCreateQueryPool;\nextern PFN_vkCreateRenderPass vkCreateRenderPass;\nextern PFN_vkCreateSampler vkCreateSampler;\nextern PFN_vkCreateSemaphore vkCreateSemaphore;\nextern PFN_vkCreateShaderModule vkCreateShaderModule;\nextern PFN_vkDestroyBuffer vkDestroyBuffer;\nextern PFN_vkDestroyBufferView vkDestroyBufferView;\nextern PFN_vkDestroyCommandPool vkDestroyCommandPool;\nextern PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool;\nextern PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout;\nextern PFN_vkDestroyDevice vkDestroyDevice;\nextern PFN_vkDestroyEvent vkDestroyEvent;\nextern PFN_vkDestroyFence vkDestroyFence;\nextern PFN_vkDestroyFramebuffer vkDestroyFramebuffer;\nextern PFN_vkDestroyImage vkDestroyImage;\nextern PFN_vkDestroyImageView vkDestroyImageView;\nextern PFN_vkDestroyInstance vkDestroyInstance;\nextern PFN_vkDestroyPipeline vkDestroyPipeline;\nextern PFN_vkDestroyPipelineCache vkDestroyPipelineCache;\nextern PFN_vkDestroyPipelineLayout vkDestroyPipelineLayout;\nextern PFN_vkDestroyQueryPool vkDestroyQueryPool;\nextern PFN_vkDestroyRenderPass vkDestroyRenderPass;\nextern PFN_vkDestroySampler vkDestroySampler;\nextern PFN_vkDestroySemaphore vkDestroySemaphore;\nextern PFN_vkDestroyShaderModule vkDestroyShaderModule;\nextern PFN_vkDeviceWaitIdle vkDeviceWaitIdle;\nextern PFN_vkEndCommandBuffer vkEndCommandBuffer;\nextern PFN_vkEnumerateDeviceExtensionProperties vkEnumerateDeviceExtensionProperties;\nextern PFN_vkEnumerateDeviceLayerProperties vkEnumerateDeviceLayerProperties;\nextern PFN_vkEnumerateInstanceExtensionProperties vkEnumerateInstanceExtensionProperties;\nextern PFN_vkEnumerateInstanceLayerProperties vkEnumerateInstanceLayerProperties;\nextern PFN_vkEnumeratePhysicalDevices vkEnumeratePhysicalDevices;\nextern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;\nextern PFN_vkFreeCommandBuffers vkFreeCommandBuffers;\nextern PFN_vkFreeDescriptorSets vkFreeDescriptorSets;\nextern PFN_vkFreeMemory vkFreeMemory;\nextern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;\nextern PFN_vkGetDeviceMemoryCommitment vkGetDeviceMemoryCommitment;\nextern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;\nextern PFN_vkGetDeviceQueue vkGetDeviceQueue;\nextern PFN_vkGetEventStatus vkGetEventStatus;\nextern PFN_vkGetFenceStatus vkGetFenceStatus;\nextern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;\nextern PFN_vkGetImageSparseMemoryRequirements vkGetImageSparseMemoryRequirements;\nextern PFN_vkGetImageSubresourceLayout vkGetImageSubresourceLayout;\nextern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;\nextern PFN_vkGetPhysicalDeviceFeatures vkGetPhysicalDeviceFeatures;\nextern PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties;\nextern PFN_vkGetPhysicalDeviceImageFormatProperties vkGetPhysicalDeviceImageFormatProperties;\nextern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;\nextern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;\nextern PFN_vkGetPhysicalDeviceQueueFamilyProperties vkGetPhysicalDeviceQueueFamilyProperties;\nextern PFN_vkGetPhysicalDeviceSparseImageFormatProperties vkGetPhysicalDeviceSparseImageFormatProperties;\nextern PFN_vkGetPipelineCacheData vkGetPipelineCacheData;\nextern PFN_vkGetQueryPoolResults vkGetQueryPoolResults;\nextern PFN_vkGetRenderAreaGranularity vkGetRenderAreaGranularity;\nextern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;\nextern PFN_vkMapMemory vkMapMemory;\nextern PFN_vkMergePipelineCaches vkMergePipelineCaches;\nextern PFN_vkQueueBindSparse vkQueueBindSparse;\nextern PFN_vkQueueSubmit vkQueueSubmit;\nextern PFN_vkQueueWaitIdle vkQueueWaitIdle;\nextern PFN_vkResetCommandBuffer vkResetCommandBuffer;\nextern PFN_vkResetCommandPool vkResetCommandPool;\nextern PFN_vkResetDescriptorPool vkResetDescriptorPool;\nextern PFN_vkResetEvent vkResetEvent;\nextern PFN_vkResetFences vkResetFences;\nextern PFN_vkSetEvent vkSetEvent;\nextern PFN_vkUnmapMemory vkUnmapMemory;\nextern PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets;\nextern PFN_vkWaitForFences vkWaitForFences;\n#endif /* defined(VK_VERSION_1_0) */\n#if defined(VK_VERSION_1_1)\nextern PFN_vkBindBufferMemory2 vkBindBufferMemory2;\nextern PFN_vkBindImageMemory2 vkBindImageMemory2;\nextern PFN_vkCmdDispatchBase vkCmdDispatchBase;\nextern PFN_vkCmdSetDeviceMask vkCmdSetDeviceMask;\nextern PFN_vkCreateDescriptorUpdateTemplate vkCreateDescriptorUpdateTemplate;\nextern PFN_vkCreateSamplerYcbcrConversion vkCreateSamplerYcbcrConversion;\nextern PFN_vkDestroyDescriptorUpdateTemplate vkDestroyDescriptorUpdateTemplate;\nextern PFN_vkDestroySamplerYcbcrConversion vkDestroySamplerYcbcrConversion;\nextern PFN_vkEnumerateInstanceVersion vkEnumerateInstanceVersion;\nextern PFN_vkEnumeratePhysicalDeviceGroups vkEnumeratePhysicalDeviceGroups;\nextern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;\nextern PFN_vkGetDescriptorSetLayoutSupport vkGetDescriptorSetLayoutSupport;\nextern PFN_vkGetDeviceGroupPeerMemoryFeatures vkGetDeviceGroupPeerMemoryFeatures;\nextern PFN_vkGetDeviceQueue2 vkGetDeviceQueue2;\nextern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;\nextern PFN_vkGetImageSparseMemoryRequirements2 vkGetImageSparseMemoryRequirements2;\nextern PFN_vkGetPhysicalDeviceExternalBufferProperties vkGetPhysicalDeviceExternalBufferProperties;\nextern PFN_vkGetPhysicalDeviceExternalFenceProperties vkGetPhysicalDeviceExternalFenceProperties;\nextern PFN_vkGetPhysicalDeviceExternalSemaphoreProperties vkGetPhysicalDeviceExternalSemaphoreProperties;\nextern PFN_vkGetPhysicalDeviceFeatures2 vkGetPhysicalDeviceFeatures2;\nextern PFN_vkGetPhysicalDeviceFormatProperties2 vkGetPhysicalDeviceFormatProperties2;\nextern PFN_vkGetPhysicalDeviceImageFormatProperties2 vkGetPhysicalDeviceImageFormatProperties2;\nextern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2;\nextern PFN_vkGetPhysicalDeviceProperties2 vkGetPhysicalDeviceProperties2;\nextern PFN_vkGetPhysicalDeviceQueueFamilyProperties2 vkGetPhysicalDeviceQueueFamilyProperties2;\nextern PFN_vkGetPhysicalDeviceSparseImageFormatProperties2 vkGetPhysicalDeviceSparseImageFormatProperties2;\nextern PFN_vkTrimCommandPool vkTrimCommandPool;\nextern PFN_vkUpdateDescriptorSetWithTemplate vkUpdateDescriptorSetWithTemplate;\n#endif /* defined(VK_VERSION_1_1) */\n#if defined(VK_VERSION_1_2)\nextern PFN_vkCmdBeginRenderPass2 vkCmdBeginRenderPass2;\nextern PFN_vkCmdDrawIndexedIndirectCount vkCmdDrawIndexedIndirectCount;\nextern PFN_vkCmdDrawIndirectCount vkCmdDrawIndirectCount;\nextern PFN_vkCmdEndRenderPass2 vkCmdEndRenderPass2;\nextern PFN_vkCmdNextSubpass2 vkCmdNextSubpass2;\nextern PFN_vkCreateRenderPass2 vkCreateRenderPass2;\nextern PFN_vkGetBufferDeviceAddress vkGetBufferDeviceAddress;\nextern PFN_vkGetBufferOpaqueCaptureAddress vkGetBufferOpaqueCaptureAddress;\nextern PFN_vkGetDeviceMemoryOpaqueCaptureAddress vkGetDeviceMemoryOpaqueCaptureAddress;\nextern PFN_vkGetSemaphoreCounterValue vkGetSemaphoreCounterValue;\nextern PFN_vkResetQueryPool vkResetQueryPool;\nextern PFN_vkSignalSemaphore vkSignalSemaphore;\nextern PFN_vkWaitSemaphores vkWaitSemaphores;\n#endif /* defined(VK_VERSION_1_2) */\n#if defined(VK_VERSION_1_3)\nextern PFN_vkCmdBeginRendering vkCmdBeginRendering;\nextern PFN_vkCmdBindVertexBuffers2 vkCmdBindVertexBuffers2;\nextern PFN_vkCmdBlitImage2 vkCmdBlitImage2;\nextern PFN_vkCmdCopyBuffer2 vkCmdCopyBuffer2;\nextern PFN_vkCmdCopyBufferToImage2 vkCmdCopyBufferToImage2;\nextern PFN_vkCmdCopyImage2 vkCmdCopyImage2;\nextern PFN_vkCmdCopyImageToBuffer2 vkCmdCopyImageToBuffer2;\nextern PFN_vkCmdEndRendering vkCmdEndRendering;\nextern PFN_vkCmdPipelineBarrier2 vkCmdPipelineBarrier2;\nextern PFN_vkCmdResetEvent2 vkCmdResetEvent2;\nextern PFN_vkCmdResolveImage2 vkCmdResolveImage2;\nextern PFN_vkCmdSetCullMode vkCmdSetCullMode;\nextern PFN_vkCmdSetDepthBiasEnable vkCmdSetDepthBiasEnable;\nextern PFN_vkCmdSetDepthBoundsTestEnable vkCmdSetDepthBoundsTestEnable;\nextern PFN_vkCmdSetDepthCompareOp vkCmdSetDepthCompareOp;\nextern PFN_vkCmdSetDepthTestEnable vkCmdSetDepthTestEnable;\nextern PFN_vkCmdSetDepthWriteEnable vkCmdSetDepthWriteEnable;\nextern PFN_vkCmdSetEvent2 vkCmdSetEvent2;\nextern PFN_vkCmdSetFrontFace vkCmdSetFrontFace;\nextern PFN_vkCmdSetPrimitiveRestartEnable vkCmdSetPrimitiveRestartEnable;\nextern PFN_vkCmdSetPrimitiveTopology vkCmdSetPrimitiveTopology;\nextern PFN_vkCmdSetRasterizerDiscardEnable vkCmdSetRasterizerDiscardEnable;\nextern PFN_vkCmdSetScissorWithCount vkCmdSetScissorWithCount;\nextern PFN_vkCmdSetStencilOp vkCmdSetStencilOp;\nextern PFN_vkCmdSetStencilTestEnable vkCmdSetStencilTestEnable;\nextern PFN_vkCmdSetViewportWithCount vkCmdSetViewportWithCount;\nextern PFN_vkCmdWaitEvents2 vkCmdWaitEvents2;\nextern PFN_vkCmdWriteTimestamp2 vkCmdWriteTimestamp2;\nextern PFN_vkCreatePrivateDataSlot vkCreatePrivateDataSlot;\nextern PFN_vkDestroyPrivateDataSlot vkDestroyPrivateDataSlot;\nextern PFN_vkGetDeviceBufferMemoryRequirements vkGetDeviceBufferMemoryRequirements;\nextern PFN_vkGetDeviceImageMemoryRequirements vkGetDeviceImageMemoryRequirements;\nextern PFN_vkGetDeviceImageSparseMemoryRequirements vkGetDeviceImageSparseMemoryRequirements;\nextern PFN_vkGetPhysicalDeviceToolProperties vkGetPhysicalDeviceToolProperties;\nextern PFN_vkGetPrivateData vkGetPrivateData;\nextern PFN_vkQueueSubmit2 vkQueueSubmit2;\nextern PFN_vkSetPrivateData vkSetPrivateData;\n#endif /* defined(VK_VERSION_1_3) */\n#if defined(VK_VERSION_1_4)\nextern PFN_vkCmdBindDescriptorSets2 vkCmdBindDescriptorSets2;\nextern PFN_vkCmdBindIndexBuffer2 vkCmdBindIndexBuffer2;\nextern PFN_vkCmdPushConstants2 vkCmdPushConstants2;\nextern PFN_vkCmdPushDescriptorSet vkCmdPushDescriptorSet;\nextern PFN_vkCmdPushDescriptorSet2 vkCmdPushDescriptorSet2;\nextern PFN_vkCmdPushDescriptorSetWithTemplate vkCmdPushDescriptorSetWithTemplate;\nextern PFN_vkCmdPushDescriptorSetWithTemplate2 vkCmdPushDescriptorSetWithTemplate2;\nextern PFN_vkCmdSetLineStipple vkCmdSetLineStipple;\nextern PFN_vkCmdSetRenderingAttachmentLocations vkCmdSetRenderingAttachmentLocations;\nextern PFN_vkCmdSetRenderingInputAttachmentIndices vkCmdSetRenderingInputAttachmentIndices;\nextern PFN_vkCopyImageToImage vkCopyImageToImage;\nextern PFN_vkCopyImageToMemory vkCopyImageToMemory;\nextern PFN_vkCopyMemoryToImage vkCopyMemoryToImage;\nextern PFN_vkGetDeviceImageSubresourceLayout vkGetDeviceImageSubresourceLayout;\nextern PFN_vkGetImageSubresourceLayout2 vkGetImageSubresourceLayout2;\nextern PFN_vkGetRenderingAreaGranularity vkGetRenderingAreaGranularity;\nextern PFN_vkMapMemory2 vkMapMemory2;\nextern PFN_vkTransitionImageLayout vkTransitionImageLayout;\nextern PFN_vkUnmapMemory2 vkUnmapMemory2;\n#endif /* defined(VK_VERSION_1_4) */\n#if defined(VK_AMDX_shader_enqueue)\nextern PFN_vkCmdDispatchGraphAMDX vkCmdDispatchGraphAMDX;\nextern PFN_vkCmdDispatchGraphIndirectAMDX vkCmdDispatchGraphIndirectAMDX;\nextern PFN_vkCmdDispatchGraphIndirectCountAMDX vkCmdDispatchGraphIndirectCountAMDX;\nextern PFN_vkCmdInitializeGraphScratchMemoryAMDX vkCmdInitializeGraphScratchMemoryAMDX;\nextern PFN_vkCreateExecutionGraphPipelinesAMDX vkCreateExecutionGraphPipelinesAMDX;\nextern PFN_vkGetExecutionGraphPipelineNodeIndexAMDX vkGetExecutionGraphPipelineNodeIndexAMDX;\nextern PFN_vkGetExecutionGraphPipelineScratchSizeAMDX vkGetExecutionGraphPipelineScratchSizeAMDX;\n#endif /* defined(VK_AMDX_shader_enqueue) */\n#if defined(VK_AMD_anti_lag)\nextern PFN_vkAntiLagUpdateAMD vkAntiLagUpdateAMD;\n#endif /* defined(VK_AMD_anti_lag) */\n#if defined(VK_AMD_buffer_marker)\nextern PFN_vkCmdWriteBufferMarkerAMD vkCmdWriteBufferMarkerAMD;\n#endif /* defined(VK_AMD_buffer_marker) */\n#if defined(VK_AMD_buffer_marker) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2))\nextern PFN_vkCmdWriteBufferMarker2AMD vkCmdWriteBufferMarker2AMD;\n#endif /* defined(VK_AMD_buffer_marker) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) */\n#if defined(VK_AMD_display_native_hdr)\nextern PFN_vkSetLocalDimmingAMD vkSetLocalDimmingAMD;\n#endif /* defined(VK_AMD_display_native_hdr) */\n#if defined(VK_AMD_draw_indirect_count)\nextern PFN_vkCmdDrawIndexedIndirectCountAMD vkCmdDrawIndexedIndirectCountAMD;\nextern PFN_vkCmdDrawIndirectCountAMD vkCmdDrawIndirectCountAMD;\n#endif /* defined(VK_AMD_draw_indirect_count) */\n#if defined(VK_AMD_shader_info)\nextern PFN_vkGetShaderInfoAMD vkGetShaderInfoAMD;\n#endif /* defined(VK_AMD_shader_info) */\n#if defined(VK_ANDROID_external_memory_android_hardware_buffer)\nextern PFN_vkGetAndroidHardwareBufferPropertiesANDROID vkGetAndroidHardwareBufferPropertiesANDROID;\nextern PFN_vkGetMemoryAndroidHardwareBufferANDROID vkGetMemoryAndroidHardwareBufferANDROID;\n#endif /* defined(VK_ANDROID_external_memory_android_hardware_buffer) */\n#if defined(VK_ARM_data_graph)\nextern PFN_vkBindDataGraphPipelineSessionMemoryARM vkBindDataGraphPipelineSessionMemoryARM;\nextern PFN_vkCmdDispatchDataGraphARM vkCmdDispatchDataGraphARM;\nextern PFN_vkCreateDataGraphPipelineSessionARM vkCreateDataGraphPipelineSessionARM;\nextern PFN_vkCreateDataGraphPipelinesARM vkCreateDataGraphPipelinesARM;\nextern PFN_vkDestroyDataGraphPipelineSessionARM vkDestroyDataGraphPipelineSessionARM;\nextern PFN_vkGetDataGraphPipelineAvailablePropertiesARM vkGetDataGraphPipelineAvailablePropertiesARM;\nextern PFN_vkGetDataGraphPipelinePropertiesARM vkGetDataGraphPipelinePropertiesARM;\nextern PFN_vkGetDataGraphPipelineSessionBindPointRequirementsARM vkGetDataGraphPipelineSessionBindPointRequirementsARM;\nextern PFN_vkGetDataGraphPipelineSessionMemoryRequirementsARM vkGetDataGraphPipelineSessionMemoryRequirementsARM;\nextern PFN_vkGetPhysicalDeviceQueueFamilyDataGraphProcessingEnginePropertiesARM vkGetPhysicalDeviceQueueFamilyDataGraphProcessingEnginePropertiesARM;\nextern PFN_vkGetPhysicalDeviceQueueFamilyDataGraphPropertiesARM vkGetPhysicalDeviceQueueFamilyDataGraphPropertiesARM;\n#endif /* defined(VK_ARM_data_graph) */\n#if defined(VK_ARM_tensors)\nextern PFN_vkBindTensorMemoryARM vkBindTensorMemoryARM;\nextern PFN_vkCmdCopyTensorARM vkCmdCopyTensorARM;\nextern PFN_vkCreateTensorARM vkCreateTensorARM;\nextern PFN_vkCreateTensorViewARM vkCreateTensorViewARM;\nextern PFN_vkDestroyTensorARM vkDestroyTensorARM;\nextern PFN_vkDestroyTensorViewARM vkDestroyTensorViewARM;\nextern PFN_vkGetDeviceTensorMemoryRequirementsARM vkGetDeviceTensorMemoryRequirementsARM;\nextern PFN_vkGetPhysicalDeviceExternalTensorPropertiesARM vkGetPhysicalDeviceExternalTensorPropertiesARM;\nextern PFN_vkGetTensorMemoryRequirementsARM vkGetTensorMemoryRequirementsARM;\n#endif /* defined(VK_ARM_tensors) */\n#if defined(VK_ARM_tensors) && defined(VK_EXT_descriptor_buffer)\nextern PFN_vkGetTensorOpaqueCaptureDescriptorDataARM vkGetTensorOpaqueCaptureDescriptorDataARM;\nextern PFN_vkGetTensorViewOpaqueCaptureDescriptorDataARM vkGetTensorViewOpaqueCaptureDescriptorDataARM;\n#endif /* defined(VK_ARM_tensors) && defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_EXT_acquire_drm_display)\nextern PFN_vkAcquireDrmDisplayEXT vkAcquireDrmDisplayEXT;\nextern PFN_vkGetDrmDisplayEXT vkGetDrmDisplayEXT;\n#endif /* defined(VK_EXT_acquire_drm_display) */\n#if defined(VK_EXT_acquire_xlib_display)\nextern PFN_vkAcquireXlibDisplayEXT vkAcquireXlibDisplayEXT;\nextern PFN_vkGetRandROutputDisplayEXT vkGetRandROutputDisplayEXT;\n#endif /* defined(VK_EXT_acquire_xlib_display) */\n#if defined(VK_EXT_attachment_feedback_loop_dynamic_state)\nextern PFN_vkCmdSetAttachmentFeedbackLoopEnableEXT vkCmdSetAttachmentFeedbackLoopEnableEXT;\n#endif /* defined(VK_EXT_attachment_feedback_loop_dynamic_state) */\n#if defined(VK_EXT_buffer_device_address)\nextern PFN_vkGetBufferDeviceAddressEXT vkGetBufferDeviceAddressEXT;\n#endif /* defined(VK_EXT_buffer_device_address) */\n#if defined(VK_EXT_calibrated_timestamps)\nextern PFN_vkGetCalibratedTimestampsEXT vkGetCalibratedTimestampsEXT;\nextern PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT vkGetPhysicalDeviceCalibrateableTimeDomainsEXT;\n#endif /* defined(VK_EXT_calibrated_timestamps) */\n#if defined(VK_EXT_color_write_enable)\nextern PFN_vkCmdSetColorWriteEnableEXT vkCmdSetColorWriteEnableEXT;\n#endif /* defined(VK_EXT_color_write_enable) */\n#if defined(VK_EXT_conditional_rendering)\nextern PFN_vkCmdBeginConditionalRenderingEXT vkCmdBeginConditionalRenderingEXT;\nextern PFN_vkCmdEndConditionalRenderingEXT vkCmdEndConditionalRenderingEXT;\n#endif /* defined(VK_EXT_conditional_rendering) */\n#if defined(VK_EXT_debug_marker)\nextern PFN_vkCmdDebugMarkerBeginEXT vkCmdDebugMarkerBeginEXT;\nextern PFN_vkCmdDebugMarkerEndEXT vkCmdDebugMarkerEndEXT;\nextern PFN_vkCmdDebugMarkerInsertEXT vkCmdDebugMarkerInsertEXT;\nextern PFN_vkDebugMarkerSetObjectNameEXT vkDebugMarkerSetObjectNameEXT;\nextern PFN_vkDebugMarkerSetObjectTagEXT vkDebugMarkerSetObjectTagEXT;\n#endif /* defined(VK_EXT_debug_marker) */\n#if defined(VK_EXT_debug_report)\nextern PFN_vkCreateDebugReportCallbackEXT vkCreateDebugReportCallbackEXT;\nextern PFN_vkDebugReportMessageEXT vkDebugReportMessageEXT;\nextern PFN_vkDestroyDebugReportCallbackEXT vkDestroyDebugReportCallbackEXT;\n#endif /* defined(VK_EXT_debug_report) */\n#if defined(VK_EXT_debug_utils)\nextern PFN_vkCmdBeginDebugUtilsLabelEXT vkCmdBeginDebugUtilsLabelEXT;\nextern PFN_vkCmdEndDebugUtilsLabelEXT vkCmdEndDebugUtilsLabelEXT;\nextern PFN_vkCmdInsertDebugUtilsLabelEXT vkCmdInsertDebugUtilsLabelEXT;\nextern PFN_vkCreateDebugUtilsMessengerEXT vkCreateDebugUtilsMessengerEXT;\nextern PFN_vkDestroyDebugUtilsMessengerEXT vkDestroyDebugUtilsMessengerEXT;\nextern PFN_vkQueueBeginDebugUtilsLabelEXT vkQueueBeginDebugUtilsLabelEXT;\nextern PFN_vkQueueEndDebugUtilsLabelEXT vkQueueEndDebugUtilsLabelEXT;\nextern PFN_vkQueueInsertDebugUtilsLabelEXT vkQueueInsertDebugUtilsLabelEXT;\nextern PFN_vkSetDebugUtilsObjectNameEXT vkSetDebugUtilsObjectNameEXT;\nextern PFN_vkSetDebugUtilsObjectTagEXT vkSetDebugUtilsObjectTagEXT;\nextern PFN_vkSubmitDebugUtilsMessageEXT vkSubmitDebugUtilsMessageEXT;\n#endif /* defined(VK_EXT_debug_utils) */\n#if defined(VK_EXT_depth_bias_control)\nextern PFN_vkCmdSetDepthBias2EXT vkCmdSetDepthBias2EXT;\n#endif /* defined(VK_EXT_depth_bias_control) */\n#if defined(VK_EXT_descriptor_buffer)\nextern PFN_vkCmdBindDescriptorBufferEmbeddedSamplersEXT vkCmdBindDescriptorBufferEmbeddedSamplersEXT;\nextern PFN_vkCmdBindDescriptorBuffersEXT vkCmdBindDescriptorBuffersEXT;\nextern PFN_vkCmdSetDescriptorBufferOffsetsEXT vkCmdSetDescriptorBufferOffsetsEXT;\nextern PFN_vkGetBufferOpaqueCaptureDescriptorDataEXT vkGetBufferOpaqueCaptureDescriptorDataEXT;\nextern PFN_vkGetDescriptorEXT vkGetDescriptorEXT;\nextern PFN_vkGetDescriptorSetLayoutBindingOffsetEXT vkGetDescriptorSetLayoutBindingOffsetEXT;\nextern PFN_vkGetDescriptorSetLayoutSizeEXT vkGetDescriptorSetLayoutSizeEXT;\nextern PFN_vkGetImageOpaqueCaptureDescriptorDataEXT vkGetImageOpaqueCaptureDescriptorDataEXT;\nextern PFN_vkGetImageViewOpaqueCaptureDescriptorDataEXT vkGetImageViewOpaqueCaptureDescriptorDataEXT;\nextern PFN_vkGetSamplerOpaqueCaptureDescriptorDataEXT vkGetSamplerOpaqueCaptureDescriptorDataEXT;\n#endif /* defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing))\nextern PFN_vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT;\n#endif /* defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) */\n#if defined(VK_EXT_device_fault)\nextern PFN_vkGetDeviceFaultInfoEXT vkGetDeviceFaultInfoEXT;\n#endif /* defined(VK_EXT_device_fault) */\n#if defined(VK_EXT_device_generated_commands)\nextern PFN_vkCmdExecuteGeneratedCommandsEXT vkCmdExecuteGeneratedCommandsEXT;\nextern PFN_vkCmdPreprocessGeneratedCommandsEXT vkCmdPreprocessGeneratedCommandsEXT;\nextern PFN_vkCreateIndirectCommandsLayoutEXT vkCreateIndirectCommandsLayoutEXT;\nextern PFN_vkCreateIndirectExecutionSetEXT vkCreateIndirectExecutionSetEXT;\nextern PFN_vkDestroyIndirectCommandsLayoutEXT vkDestroyIndirectCommandsLayoutEXT;\nextern PFN_vkDestroyIndirectExecutionSetEXT vkDestroyIndirectExecutionSetEXT;\nextern PFN_vkGetGeneratedCommandsMemoryRequirementsEXT vkGetGeneratedCommandsMemoryRequirementsEXT;\nextern PFN_vkUpdateIndirectExecutionSetPipelineEXT vkUpdateIndirectExecutionSetPipelineEXT;\nextern PFN_vkUpdateIndirectExecutionSetShaderEXT vkUpdateIndirectExecutionSetShaderEXT;\n#endif /* defined(VK_EXT_device_generated_commands) */\n#if defined(VK_EXT_direct_mode_display)\nextern PFN_vkReleaseDisplayEXT vkReleaseDisplayEXT;\n#endif /* defined(VK_EXT_direct_mode_display) */\n#if defined(VK_EXT_directfb_surface)\nextern PFN_vkCreateDirectFBSurfaceEXT vkCreateDirectFBSurfaceEXT;\nextern PFN_vkGetPhysicalDeviceDirectFBPresentationSupportEXT vkGetPhysicalDeviceDirectFBPresentationSupportEXT;\n#endif /* defined(VK_EXT_directfb_surface) */\n#if defined(VK_EXT_discard_rectangles)\nextern PFN_vkCmdSetDiscardRectangleEXT vkCmdSetDiscardRectangleEXT;\n#endif /* defined(VK_EXT_discard_rectangles) */\n#if defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2\nextern PFN_vkCmdSetDiscardRectangleEnableEXT vkCmdSetDiscardRectangleEnableEXT;\nextern PFN_vkCmdSetDiscardRectangleModeEXT vkCmdSetDiscardRectangleModeEXT;\n#endif /* defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 */\n#if defined(VK_EXT_display_control)\nextern PFN_vkDisplayPowerControlEXT vkDisplayPowerControlEXT;\nextern PFN_vkGetSwapchainCounterEXT vkGetSwapchainCounterEXT;\nextern PFN_vkRegisterDeviceEventEXT vkRegisterDeviceEventEXT;\nextern PFN_vkRegisterDisplayEventEXT vkRegisterDisplayEventEXT;\n#endif /* defined(VK_EXT_display_control) */\n#if defined(VK_EXT_display_surface_counter)\nextern PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT vkGetPhysicalDeviceSurfaceCapabilities2EXT;\n#endif /* defined(VK_EXT_display_surface_counter) */\n#if defined(VK_EXT_external_memory_host)\nextern PFN_vkGetMemoryHostPointerPropertiesEXT vkGetMemoryHostPointerPropertiesEXT;\n#endif /* defined(VK_EXT_external_memory_host) */\n#if defined(VK_EXT_external_memory_metal)\nextern PFN_vkGetMemoryMetalHandleEXT vkGetMemoryMetalHandleEXT;\nextern PFN_vkGetMemoryMetalHandlePropertiesEXT vkGetMemoryMetalHandlePropertiesEXT;\n#endif /* defined(VK_EXT_external_memory_metal) */\n#if defined(VK_EXT_fragment_density_map_offset)\nextern PFN_vkCmdEndRendering2EXT vkCmdEndRendering2EXT;\n#endif /* defined(VK_EXT_fragment_density_map_offset) */\n#if defined(VK_EXT_full_screen_exclusive)\nextern PFN_vkAcquireFullScreenExclusiveModeEXT vkAcquireFullScreenExclusiveModeEXT;\nextern PFN_vkGetPhysicalDeviceSurfacePresentModes2EXT vkGetPhysicalDeviceSurfacePresentModes2EXT;\nextern PFN_vkReleaseFullScreenExclusiveModeEXT vkReleaseFullScreenExclusiveModeEXT;\n#endif /* defined(VK_EXT_full_screen_exclusive) */\n#if defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1))\nextern PFN_vkGetDeviceGroupSurfacePresentModes2EXT vkGetDeviceGroupSurfacePresentModes2EXT;\n#endif /* defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1)) */\n#if defined(VK_EXT_hdr_metadata)\nextern PFN_vkSetHdrMetadataEXT vkSetHdrMetadataEXT;\n#endif /* defined(VK_EXT_hdr_metadata) */\n#if defined(VK_EXT_headless_surface)\nextern PFN_vkCreateHeadlessSurfaceEXT vkCreateHeadlessSurfaceEXT;\n#endif /* defined(VK_EXT_headless_surface) */\n#if defined(VK_EXT_host_image_copy)\nextern PFN_vkCopyImageToImageEXT vkCopyImageToImageEXT;\nextern PFN_vkCopyImageToMemoryEXT vkCopyImageToMemoryEXT;\nextern PFN_vkCopyMemoryToImageEXT vkCopyMemoryToImageEXT;\nextern PFN_vkTransitionImageLayoutEXT vkTransitionImageLayoutEXT;\n#endif /* defined(VK_EXT_host_image_copy) */\n#if defined(VK_EXT_host_query_reset)\nextern PFN_vkResetQueryPoolEXT vkResetQueryPoolEXT;\n#endif /* defined(VK_EXT_host_query_reset) */\n#if defined(VK_EXT_image_drm_format_modifier)\nextern PFN_vkGetImageDrmFormatModifierPropertiesEXT vkGetImageDrmFormatModifierPropertiesEXT;\n#endif /* defined(VK_EXT_image_drm_format_modifier) */\n#if defined(VK_EXT_line_rasterization)\nextern PFN_vkCmdSetLineStippleEXT vkCmdSetLineStippleEXT;\n#endif /* defined(VK_EXT_line_rasterization) */\n#if defined(VK_EXT_mesh_shader)\nextern PFN_vkCmdDrawMeshTasksEXT vkCmdDrawMeshTasksEXT;\nextern PFN_vkCmdDrawMeshTasksIndirectEXT vkCmdDrawMeshTasksIndirectEXT;\n#endif /* defined(VK_EXT_mesh_shader) */\n#if defined(VK_EXT_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2))\nextern PFN_vkCmdDrawMeshTasksIndirectCountEXT vkCmdDrawMeshTasksIndirectCountEXT;\n#endif /* defined(VK_EXT_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2)) */\n#if defined(VK_EXT_metal_objects)\nextern PFN_vkExportMetalObjectsEXT vkExportMetalObjectsEXT;\n#endif /* defined(VK_EXT_metal_objects) */\n#if defined(VK_EXT_metal_surface)\nextern PFN_vkCreateMetalSurfaceEXT vkCreateMetalSurfaceEXT;\n#endif /* defined(VK_EXT_metal_surface) */\n#if defined(VK_EXT_multi_draw)\nextern PFN_vkCmdDrawMultiEXT vkCmdDrawMultiEXT;\nextern PFN_vkCmdDrawMultiIndexedEXT vkCmdDrawMultiIndexedEXT;\n#endif /* defined(VK_EXT_multi_draw) */\n#if defined(VK_EXT_opacity_micromap)\nextern PFN_vkBuildMicromapsEXT vkBuildMicromapsEXT;\nextern PFN_vkCmdBuildMicromapsEXT vkCmdBuildMicromapsEXT;\nextern PFN_vkCmdCopyMemoryToMicromapEXT vkCmdCopyMemoryToMicromapEXT;\nextern PFN_vkCmdCopyMicromapEXT vkCmdCopyMicromapEXT;\nextern PFN_vkCmdCopyMicromapToMemoryEXT vkCmdCopyMicromapToMemoryEXT;\nextern PFN_vkCmdWriteMicromapsPropertiesEXT vkCmdWriteMicromapsPropertiesEXT;\nextern PFN_vkCopyMemoryToMicromapEXT vkCopyMemoryToMicromapEXT;\nextern PFN_vkCopyMicromapEXT vkCopyMicromapEXT;\nextern PFN_vkCopyMicromapToMemoryEXT vkCopyMicromapToMemoryEXT;\nextern PFN_vkCreateMicromapEXT vkCreateMicromapEXT;\nextern PFN_vkDestroyMicromapEXT vkDestroyMicromapEXT;\nextern PFN_vkGetDeviceMicromapCompatibilityEXT vkGetDeviceMicromapCompatibilityEXT;\nextern PFN_vkGetMicromapBuildSizesEXT vkGetMicromapBuildSizesEXT;\nextern PFN_vkWriteMicromapsPropertiesEXT vkWriteMicromapsPropertiesEXT;\n#endif /* defined(VK_EXT_opacity_micromap) */\n#if defined(VK_EXT_pageable_device_local_memory)\nextern PFN_vkSetDeviceMemoryPriorityEXT vkSetDeviceMemoryPriorityEXT;\n#endif /* defined(VK_EXT_pageable_device_local_memory) */\n#if defined(VK_EXT_pipeline_properties)\nextern PFN_vkGetPipelinePropertiesEXT vkGetPipelinePropertiesEXT;\n#endif /* defined(VK_EXT_pipeline_properties) */\n#if defined(VK_EXT_private_data)\nextern PFN_vkCreatePrivateDataSlotEXT vkCreatePrivateDataSlotEXT;\nextern PFN_vkDestroyPrivateDataSlotEXT vkDestroyPrivateDataSlotEXT;\nextern PFN_vkGetPrivateDataEXT vkGetPrivateDataEXT;\nextern PFN_vkSetPrivateDataEXT vkSetPrivateDataEXT;\n#endif /* defined(VK_EXT_private_data) */\n#if defined(VK_EXT_sample_locations)\nextern PFN_vkCmdSetSampleLocationsEXT vkCmdSetSampleLocationsEXT;\nextern PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT vkGetPhysicalDeviceMultisamplePropertiesEXT;\n#endif /* defined(VK_EXT_sample_locations) */\n#if defined(VK_EXT_shader_module_identifier)\nextern PFN_vkGetShaderModuleCreateInfoIdentifierEXT vkGetShaderModuleCreateInfoIdentifierEXT;\nextern PFN_vkGetShaderModuleIdentifierEXT vkGetShaderModuleIdentifierEXT;\n#endif /* defined(VK_EXT_shader_module_identifier) */\n#if defined(VK_EXT_shader_object)\nextern PFN_vkCmdBindShadersEXT vkCmdBindShadersEXT;\nextern PFN_vkCreateShadersEXT vkCreateShadersEXT;\nextern PFN_vkDestroyShaderEXT vkDestroyShaderEXT;\nextern PFN_vkGetShaderBinaryDataEXT vkGetShaderBinaryDataEXT;\n#endif /* defined(VK_EXT_shader_object) */\n#if defined(VK_EXT_swapchain_maintenance1)\nextern PFN_vkReleaseSwapchainImagesEXT vkReleaseSwapchainImagesEXT;\n#endif /* defined(VK_EXT_swapchain_maintenance1) */\n#if defined(VK_EXT_tooling_info)\nextern PFN_vkGetPhysicalDeviceToolPropertiesEXT vkGetPhysicalDeviceToolPropertiesEXT;\n#endif /* defined(VK_EXT_tooling_info) */\n#if defined(VK_EXT_transform_feedback)\nextern PFN_vkCmdBeginQueryIndexedEXT vkCmdBeginQueryIndexedEXT;\nextern PFN_vkCmdBeginTransformFeedbackEXT vkCmdBeginTransformFeedbackEXT;\nextern PFN_vkCmdBindTransformFeedbackBuffersEXT vkCmdBindTransformFeedbackBuffersEXT;\nextern PFN_vkCmdDrawIndirectByteCountEXT vkCmdDrawIndirectByteCountEXT;\nextern PFN_vkCmdEndQueryIndexedEXT vkCmdEndQueryIndexedEXT;\nextern PFN_vkCmdEndTransformFeedbackEXT vkCmdEndTransformFeedbackEXT;\n#endif /* defined(VK_EXT_transform_feedback) */\n#if defined(VK_EXT_validation_cache)\nextern PFN_vkCreateValidationCacheEXT vkCreateValidationCacheEXT;\nextern PFN_vkDestroyValidationCacheEXT vkDestroyValidationCacheEXT;\nextern PFN_vkGetValidationCacheDataEXT vkGetValidationCacheDataEXT;\nextern PFN_vkMergeValidationCachesEXT vkMergeValidationCachesEXT;\n#endif /* defined(VK_EXT_validation_cache) */\n#if defined(VK_FUCHSIA_buffer_collection)\nextern PFN_vkCreateBufferCollectionFUCHSIA vkCreateBufferCollectionFUCHSIA;\nextern PFN_vkDestroyBufferCollectionFUCHSIA vkDestroyBufferCollectionFUCHSIA;\nextern PFN_vkGetBufferCollectionPropertiesFUCHSIA vkGetBufferCollectionPropertiesFUCHSIA;\nextern PFN_vkSetBufferCollectionBufferConstraintsFUCHSIA vkSetBufferCollectionBufferConstraintsFUCHSIA;\nextern PFN_vkSetBufferCollectionImageConstraintsFUCHSIA vkSetBufferCollectionImageConstraintsFUCHSIA;\n#endif /* defined(VK_FUCHSIA_buffer_collection) */\n#if defined(VK_FUCHSIA_external_memory)\nextern PFN_vkGetMemoryZirconHandleFUCHSIA vkGetMemoryZirconHandleFUCHSIA;\nextern PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA vkGetMemoryZirconHandlePropertiesFUCHSIA;\n#endif /* defined(VK_FUCHSIA_external_memory) */\n#if defined(VK_FUCHSIA_external_semaphore)\nextern PFN_vkGetSemaphoreZirconHandleFUCHSIA vkGetSemaphoreZirconHandleFUCHSIA;\nextern PFN_vkImportSemaphoreZirconHandleFUCHSIA vkImportSemaphoreZirconHandleFUCHSIA;\n#endif /* defined(VK_FUCHSIA_external_semaphore) */\n#if defined(VK_FUCHSIA_imagepipe_surface)\nextern PFN_vkCreateImagePipeSurfaceFUCHSIA vkCreateImagePipeSurfaceFUCHSIA;\n#endif /* defined(VK_FUCHSIA_imagepipe_surface) */\n#if defined(VK_GGP_stream_descriptor_surface)\nextern PFN_vkCreateStreamDescriptorSurfaceGGP vkCreateStreamDescriptorSurfaceGGP;\n#endif /* defined(VK_GGP_stream_descriptor_surface) */\n#if defined(VK_GOOGLE_display_timing)\nextern PFN_vkGetPastPresentationTimingGOOGLE vkGetPastPresentationTimingGOOGLE;\nextern PFN_vkGetRefreshCycleDurationGOOGLE vkGetRefreshCycleDurationGOOGLE;\n#endif /* defined(VK_GOOGLE_display_timing) */\n#if defined(VK_HUAWEI_cluster_culling_shader)\nextern PFN_vkCmdDrawClusterHUAWEI vkCmdDrawClusterHUAWEI;\nextern PFN_vkCmdDrawClusterIndirectHUAWEI vkCmdDrawClusterIndirectHUAWEI;\n#endif /* defined(VK_HUAWEI_cluster_culling_shader) */\n#if defined(VK_HUAWEI_invocation_mask)\nextern PFN_vkCmdBindInvocationMaskHUAWEI vkCmdBindInvocationMaskHUAWEI;\n#endif /* defined(VK_HUAWEI_invocation_mask) */\n#if defined(VK_HUAWEI_subpass_shading) && VK_HUAWEI_SUBPASS_SHADING_SPEC_VERSION >= 2\nextern PFN_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI;\n#endif /* defined(VK_HUAWEI_subpass_shading) && VK_HUAWEI_SUBPASS_SHADING_SPEC_VERSION >= 2 */\n#if defined(VK_HUAWEI_subpass_shading)\nextern PFN_vkCmdSubpassShadingHUAWEI vkCmdSubpassShadingHUAWEI;\n#endif /* defined(VK_HUAWEI_subpass_shading) */\n#if defined(VK_INTEL_performance_query)\nextern PFN_vkAcquirePerformanceConfigurationINTEL vkAcquirePerformanceConfigurationINTEL;\nextern PFN_vkCmdSetPerformanceMarkerINTEL vkCmdSetPerformanceMarkerINTEL;\nextern PFN_vkCmdSetPerformanceOverrideINTEL vkCmdSetPerformanceOverrideINTEL;\nextern PFN_vkCmdSetPerformanceStreamMarkerINTEL vkCmdSetPerformanceStreamMarkerINTEL;\nextern PFN_vkGetPerformanceParameterINTEL vkGetPerformanceParameterINTEL;\nextern PFN_vkInitializePerformanceApiINTEL vkInitializePerformanceApiINTEL;\nextern PFN_vkQueueSetPerformanceConfigurationINTEL vkQueueSetPerformanceConfigurationINTEL;\nextern PFN_vkReleasePerformanceConfigurationINTEL vkReleasePerformanceConfigurationINTEL;\nextern PFN_vkUninitializePerformanceApiINTEL vkUninitializePerformanceApiINTEL;\n#endif /* defined(VK_INTEL_performance_query) */\n#if defined(VK_KHR_acceleration_structure)\nextern PFN_vkBuildAccelerationStructuresKHR vkBuildAccelerationStructuresKHR;\nextern PFN_vkCmdBuildAccelerationStructuresIndirectKHR vkCmdBuildAccelerationStructuresIndirectKHR;\nextern PFN_vkCmdBuildAccelerationStructuresKHR vkCmdBuildAccelerationStructuresKHR;\nextern PFN_vkCmdCopyAccelerationStructureKHR vkCmdCopyAccelerationStructureKHR;\nextern PFN_vkCmdCopyAccelerationStructureToMemoryKHR vkCmdCopyAccelerationStructureToMemoryKHR;\nextern PFN_vkCmdCopyMemoryToAccelerationStructureKHR vkCmdCopyMemoryToAccelerationStructureKHR;\nextern PFN_vkCmdWriteAccelerationStructuresPropertiesKHR vkCmdWriteAccelerationStructuresPropertiesKHR;\nextern PFN_vkCopyAccelerationStructureKHR vkCopyAccelerationStructureKHR;\nextern PFN_vkCopyAccelerationStructureToMemoryKHR vkCopyAccelerationStructureToMemoryKHR;\nextern PFN_vkCopyMemoryToAccelerationStructureKHR vkCopyMemoryToAccelerationStructureKHR;\nextern PFN_vkCreateAccelerationStructureKHR vkCreateAccelerationStructureKHR;\nextern PFN_vkDestroyAccelerationStructureKHR vkDestroyAccelerationStructureKHR;\nextern PFN_vkGetAccelerationStructureBuildSizesKHR vkGetAccelerationStructureBuildSizesKHR;\nextern PFN_vkGetAccelerationStructureDeviceAddressKHR vkGetAccelerationStructureDeviceAddressKHR;\nextern PFN_vkGetDeviceAccelerationStructureCompatibilityKHR vkGetDeviceAccelerationStructureCompatibilityKHR;\nextern PFN_vkWriteAccelerationStructuresPropertiesKHR vkWriteAccelerationStructuresPropertiesKHR;\n#endif /* defined(VK_KHR_acceleration_structure) */\n#if defined(VK_KHR_android_surface)\nextern PFN_vkCreateAndroidSurfaceKHR vkCreateAndroidSurfaceKHR;\n#endif /* defined(VK_KHR_android_surface) */\n#if defined(VK_KHR_bind_memory2)\nextern PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR;\nextern PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR;\n#endif /* defined(VK_KHR_bind_memory2) */\n#if defined(VK_KHR_buffer_device_address)\nextern PFN_vkGetBufferDeviceAddressKHR vkGetBufferDeviceAddressKHR;\nextern PFN_vkGetBufferOpaqueCaptureAddressKHR vkGetBufferOpaqueCaptureAddressKHR;\nextern PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR vkGetDeviceMemoryOpaqueCaptureAddressKHR;\n#endif /* defined(VK_KHR_buffer_device_address) */\n#if defined(VK_KHR_calibrated_timestamps)\nextern PFN_vkGetCalibratedTimestampsKHR vkGetCalibratedTimestampsKHR;\nextern PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsKHR vkGetPhysicalDeviceCalibrateableTimeDomainsKHR;\n#endif /* defined(VK_KHR_calibrated_timestamps) */\n#if defined(VK_KHR_cooperative_matrix)\nextern PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR;\n#endif /* defined(VK_KHR_cooperative_matrix) */\n#if defined(VK_KHR_copy_commands2)\nextern PFN_vkCmdBlitImage2KHR vkCmdBlitImage2KHR;\nextern PFN_vkCmdCopyBuffer2KHR vkCmdCopyBuffer2KHR;\nextern PFN_vkCmdCopyBufferToImage2KHR vkCmdCopyBufferToImage2KHR;\nextern PFN_vkCmdCopyImage2KHR vkCmdCopyImage2KHR;\nextern PFN_vkCmdCopyImageToBuffer2KHR vkCmdCopyImageToBuffer2KHR;\nextern PFN_vkCmdResolveImage2KHR vkCmdResolveImage2KHR;\n#endif /* defined(VK_KHR_copy_commands2) */\n#if defined(VK_KHR_create_renderpass2)\nextern PFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR;\nextern PFN_vkCmdEndRenderPass2KHR vkCmdEndRenderPass2KHR;\nextern PFN_vkCmdNextSubpass2KHR vkCmdNextSubpass2KHR;\nextern PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR;\n#endif /* defined(VK_KHR_create_renderpass2) */\n#if defined(VK_KHR_deferred_host_operations)\nextern PFN_vkCreateDeferredOperationKHR vkCreateDeferredOperationKHR;\nextern PFN_vkDeferredOperationJoinKHR vkDeferredOperationJoinKHR;\nextern PFN_vkDestroyDeferredOperationKHR vkDestroyDeferredOperationKHR;\nextern PFN_vkGetDeferredOperationMaxConcurrencyKHR vkGetDeferredOperationMaxConcurrencyKHR;\nextern PFN_vkGetDeferredOperationResultKHR vkGetDeferredOperationResultKHR;\n#endif /* defined(VK_KHR_deferred_host_operations) */\n#if defined(VK_KHR_descriptor_update_template)\nextern PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR;\nextern PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR;\nextern PFN_vkUpdateDescriptorSetWithTemplateKHR vkUpdateDescriptorSetWithTemplateKHR;\n#endif /* defined(VK_KHR_descriptor_update_template) */\n#if defined(VK_KHR_device_group)\nextern PFN_vkCmdDispatchBaseKHR vkCmdDispatchBaseKHR;\nextern PFN_vkCmdSetDeviceMaskKHR vkCmdSetDeviceMaskKHR;\nextern PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR vkGetDeviceGroupPeerMemoryFeaturesKHR;\n#endif /* defined(VK_KHR_device_group) */\n#if defined(VK_KHR_device_group_creation)\nextern PFN_vkEnumeratePhysicalDeviceGroupsKHR vkEnumeratePhysicalDeviceGroupsKHR;\n#endif /* defined(VK_KHR_device_group_creation) */\n#if defined(VK_KHR_display)\nextern PFN_vkCreateDisplayModeKHR vkCreateDisplayModeKHR;\nextern PFN_vkCreateDisplayPlaneSurfaceKHR vkCreateDisplayPlaneSurfaceKHR;\nextern PFN_vkGetDisplayModePropertiesKHR vkGetDisplayModePropertiesKHR;\nextern PFN_vkGetDisplayPlaneCapabilitiesKHR vkGetDisplayPlaneCapabilitiesKHR;\nextern PFN_vkGetDisplayPlaneSupportedDisplaysKHR vkGetDisplayPlaneSupportedDisplaysKHR;\nextern PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR vkGetPhysicalDeviceDisplayPlanePropertiesKHR;\nextern PFN_vkGetPhysicalDeviceDisplayPropertiesKHR vkGetPhysicalDeviceDisplayPropertiesKHR;\n#endif /* defined(VK_KHR_display) */\n#if defined(VK_KHR_display_swapchain)\nextern PFN_vkCreateSharedSwapchainsKHR vkCreateSharedSwapchainsKHR;\n#endif /* defined(VK_KHR_display_swapchain) */\n#if defined(VK_KHR_draw_indirect_count)\nextern PFN_vkCmdDrawIndexedIndirectCountKHR vkCmdDrawIndexedIndirectCountKHR;\nextern PFN_vkCmdDrawIndirectCountKHR vkCmdDrawIndirectCountKHR;\n#endif /* defined(VK_KHR_draw_indirect_count) */\n#if defined(VK_KHR_dynamic_rendering)\nextern PFN_vkCmdBeginRenderingKHR vkCmdBeginRenderingKHR;\nextern PFN_vkCmdEndRenderingKHR vkCmdEndRenderingKHR;\n#endif /* defined(VK_KHR_dynamic_rendering) */\n#if defined(VK_KHR_dynamic_rendering_local_read)\nextern PFN_vkCmdSetRenderingAttachmentLocationsKHR vkCmdSetRenderingAttachmentLocationsKHR;\nextern PFN_vkCmdSetRenderingInputAttachmentIndicesKHR vkCmdSetRenderingInputAttachmentIndicesKHR;\n#endif /* defined(VK_KHR_dynamic_rendering_local_read) */\n#if defined(VK_KHR_external_fence_capabilities)\nextern PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR vkGetPhysicalDeviceExternalFencePropertiesKHR;\n#endif /* defined(VK_KHR_external_fence_capabilities) */\n#if defined(VK_KHR_external_fence_fd)\nextern PFN_vkGetFenceFdKHR vkGetFenceFdKHR;\nextern PFN_vkImportFenceFdKHR vkImportFenceFdKHR;\n#endif /* defined(VK_KHR_external_fence_fd) */\n#if defined(VK_KHR_external_fence_win32)\nextern PFN_vkGetFenceWin32HandleKHR vkGetFenceWin32HandleKHR;\nextern PFN_vkImportFenceWin32HandleKHR vkImportFenceWin32HandleKHR;\n#endif /* defined(VK_KHR_external_fence_win32) */\n#if defined(VK_KHR_external_memory_capabilities)\nextern PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR vkGetPhysicalDeviceExternalBufferPropertiesKHR;\n#endif /* defined(VK_KHR_external_memory_capabilities) */\n#if defined(VK_KHR_external_memory_fd)\nextern PFN_vkGetMemoryFdKHR vkGetMemoryFdKHR;\nextern PFN_vkGetMemoryFdPropertiesKHR vkGetMemoryFdPropertiesKHR;\n#endif /* defined(VK_KHR_external_memory_fd) */\n#if defined(VK_KHR_external_memory_win32)\nextern PFN_vkGetMemoryWin32HandleKHR vkGetMemoryWin32HandleKHR;\nextern PFN_vkGetMemoryWin32HandlePropertiesKHR vkGetMemoryWin32HandlePropertiesKHR;\n#endif /* defined(VK_KHR_external_memory_win32) */\n#if defined(VK_KHR_external_semaphore_capabilities)\nextern PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR vkGetPhysicalDeviceExternalSemaphorePropertiesKHR;\n#endif /* defined(VK_KHR_external_semaphore_capabilities) */\n#if defined(VK_KHR_external_semaphore_fd)\nextern PFN_vkGetSemaphoreFdKHR vkGetSemaphoreFdKHR;\nextern PFN_vkImportSemaphoreFdKHR vkImportSemaphoreFdKHR;\n#endif /* defined(VK_KHR_external_semaphore_fd) */\n#if defined(VK_KHR_external_semaphore_win32)\nextern PFN_vkGetSemaphoreWin32HandleKHR vkGetSemaphoreWin32HandleKHR;\nextern PFN_vkImportSemaphoreWin32HandleKHR vkImportSemaphoreWin32HandleKHR;\n#endif /* defined(VK_KHR_external_semaphore_win32) */\n#if defined(VK_KHR_fragment_shading_rate)\nextern PFN_vkCmdSetFragmentShadingRateKHR vkCmdSetFragmentShadingRateKHR;\nextern PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR vkGetPhysicalDeviceFragmentShadingRatesKHR;\n#endif /* defined(VK_KHR_fragment_shading_rate) */\n#if defined(VK_KHR_get_display_properties2)\nextern PFN_vkGetDisplayModeProperties2KHR vkGetDisplayModeProperties2KHR;\nextern PFN_vkGetDisplayPlaneCapabilities2KHR vkGetDisplayPlaneCapabilities2KHR;\nextern PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR vkGetPhysicalDeviceDisplayPlaneProperties2KHR;\nextern PFN_vkGetPhysicalDeviceDisplayProperties2KHR vkGetPhysicalDeviceDisplayProperties2KHR;\n#endif /* defined(VK_KHR_get_display_properties2) */\n#if defined(VK_KHR_get_memory_requirements2)\nextern PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;\nextern PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;\nextern PFN_vkGetImageSparseMemoryRequirements2KHR vkGetImageSparseMemoryRequirements2KHR;\n#endif /* defined(VK_KHR_get_memory_requirements2) */\n#if defined(VK_KHR_get_physical_device_properties2)\nextern PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR;\nextern PFN_vkGetPhysicalDeviceFormatProperties2KHR vkGetPhysicalDeviceFormatProperties2KHR;\nextern PFN_vkGetPhysicalDeviceImageFormatProperties2KHR vkGetPhysicalDeviceImageFormatProperties2KHR;\nextern PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR;\nextern PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR;\nextern PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR vkGetPhysicalDeviceQueueFamilyProperties2KHR;\nextern PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR vkGetPhysicalDeviceSparseImageFormatProperties2KHR;\n#endif /* defined(VK_KHR_get_physical_device_properties2) */\n#if defined(VK_KHR_get_surface_capabilities2)\nextern PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR vkGetPhysicalDeviceSurfaceCapabilities2KHR;\nextern PFN_vkGetPhysicalDeviceSurfaceFormats2KHR vkGetPhysicalDeviceSurfaceFormats2KHR;\n#endif /* defined(VK_KHR_get_surface_capabilities2) */\n#if defined(VK_KHR_line_rasterization)\nextern PFN_vkCmdSetLineStippleKHR vkCmdSetLineStippleKHR;\n#endif /* defined(VK_KHR_line_rasterization) */\n#if defined(VK_KHR_maintenance1)\nextern PFN_vkTrimCommandPoolKHR vkTrimCommandPoolKHR;\n#endif /* defined(VK_KHR_maintenance1) */\n#if defined(VK_KHR_maintenance3)\nextern PFN_vkGetDescriptorSetLayoutSupportKHR vkGetDescriptorSetLayoutSupportKHR;\n#endif /* defined(VK_KHR_maintenance3) */\n#if defined(VK_KHR_maintenance4)\nextern PFN_vkGetDeviceBufferMemoryRequirementsKHR vkGetDeviceBufferMemoryRequirementsKHR;\nextern PFN_vkGetDeviceImageMemoryRequirementsKHR vkGetDeviceImageMemoryRequirementsKHR;\nextern PFN_vkGetDeviceImageSparseMemoryRequirementsKHR vkGetDeviceImageSparseMemoryRequirementsKHR;\n#endif /* defined(VK_KHR_maintenance4) */\n#if defined(VK_KHR_maintenance5)\nextern PFN_vkCmdBindIndexBuffer2KHR vkCmdBindIndexBuffer2KHR;\nextern PFN_vkGetDeviceImageSubresourceLayoutKHR vkGetDeviceImageSubresourceLayoutKHR;\nextern PFN_vkGetImageSubresourceLayout2KHR vkGetImageSubresourceLayout2KHR;\nextern PFN_vkGetRenderingAreaGranularityKHR vkGetRenderingAreaGranularityKHR;\n#endif /* defined(VK_KHR_maintenance5) */\n#if defined(VK_KHR_maintenance6)\nextern PFN_vkCmdBindDescriptorSets2KHR vkCmdBindDescriptorSets2KHR;\nextern PFN_vkCmdPushConstants2KHR vkCmdPushConstants2KHR;\n#endif /* defined(VK_KHR_maintenance6) */\n#if defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor)\nextern PFN_vkCmdPushDescriptorSet2KHR vkCmdPushDescriptorSet2KHR;\nextern PFN_vkCmdPushDescriptorSetWithTemplate2KHR vkCmdPushDescriptorSetWithTemplate2KHR;\n#endif /* defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor) */\n#if defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer)\nextern PFN_vkCmdBindDescriptorBufferEmbeddedSamplers2EXT vkCmdBindDescriptorBufferEmbeddedSamplers2EXT;\nextern PFN_vkCmdSetDescriptorBufferOffsets2EXT vkCmdSetDescriptorBufferOffsets2EXT;\n#endif /* defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer) */\n#if defined(VK_KHR_map_memory2)\nextern PFN_vkMapMemory2KHR vkMapMemory2KHR;\nextern PFN_vkUnmapMemory2KHR vkUnmapMemory2KHR;\n#endif /* defined(VK_KHR_map_memory2) */\n#if defined(VK_KHR_performance_query)\nextern PFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR;\nextern PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR;\nextern PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR;\nextern PFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR;\n#endif /* defined(VK_KHR_performance_query) */\n#if defined(VK_KHR_pipeline_binary)\nextern PFN_vkCreatePipelineBinariesKHR vkCreatePipelineBinariesKHR;\nextern PFN_vkDestroyPipelineBinaryKHR vkDestroyPipelineBinaryKHR;\nextern PFN_vkGetPipelineBinaryDataKHR vkGetPipelineBinaryDataKHR;\nextern PFN_vkGetPipelineKeyKHR vkGetPipelineKeyKHR;\nextern PFN_vkReleaseCapturedPipelineDataKHR vkReleaseCapturedPipelineDataKHR;\n#endif /* defined(VK_KHR_pipeline_binary) */\n#if defined(VK_KHR_pipeline_executable_properties)\nextern PFN_vkGetPipelineExecutableInternalRepresentationsKHR vkGetPipelineExecutableInternalRepresentationsKHR;\nextern PFN_vkGetPipelineExecutablePropertiesKHR vkGetPipelineExecutablePropertiesKHR;\nextern PFN_vkGetPipelineExecutableStatisticsKHR vkGetPipelineExecutableStatisticsKHR;\n#endif /* defined(VK_KHR_pipeline_executable_properties) */\n#if defined(VK_KHR_present_wait)\nextern PFN_vkWaitForPresentKHR vkWaitForPresentKHR;\n#endif /* defined(VK_KHR_present_wait) */\n#if defined(VK_KHR_present_wait2)\nextern PFN_vkWaitForPresent2KHR vkWaitForPresent2KHR;\n#endif /* defined(VK_KHR_present_wait2) */\n#if defined(VK_KHR_push_descriptor)\nextern PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR;\n#endif /* defined(VK_KHR_push_descriptor) */\n#if defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline)\nextern PFN_vkCmdTraceRaysIndirect2KHR vkCmdTraceRaysIndirect2KHR;\n#endif /* defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) */\n#if defined(VK_KHR_ray_tracing_pipeline)\nextern PFN_vkCmdSetRayTracingPipelineStackSizeKHR vkCmdSetRayTracingPipelineStackSizeKHR;\nextern PFN_vkCmdTraceRaysIndirectKHR vkCmdTraceRaysIndirectKHR;\nextern PFN_vkCmdTraceRaysKHR vkCmdTraceRaysKHR;\nextern PFN_vkCreateRayTracingPipelinesKHR vkCreateRayTracingPipelinesKHR;\nextern PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR vkGetRayTracingCaptureReplayShaderGroupHandlesKHR;\nextern PFN_vkGetRayTracingShaderGroupHandlesKHR vkGetRayTracingShaderGroupHandlesKHR;\nextern PFN_vkGetRayTracingShaderGroupStackSizeKHR vkGetRayTracingShaderGroupStackSizeKHR;\n#endif /* defined(VK_KHR_ray_tracing_pipeline) */\n#if defined(VK_KHR_sampler_ycbcr_conversion)\nextern PFN_vkCreateSamplerYcbcrConversionKHR vkCreateSamplerYcbcrConversionKHR;\nextern PFN_vkDestroySamplerYcbcrConversionKHR vkDestroySamplerYcbcrConversionKHR;\n#endif /* defined(VK_KHR_sampler_ycbcr_conversion) */\n#if defined(VK_KHR_shared_presentable_image)\nextern PFN_vkGetSwapchainStatusKHR vkGetSwapchainStatusKHR;\n#endif /* defined(VK_KHR_shared_presentable_image) */\n#if defined(VK_KHR_surface)\nextern PFN_vkDestroySurfaceKHR vkDestroySurfaceKHR;\nextern PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR vkGetPhysicalDeviceSurfaceCapabilitiesKHR;\nextern PFN_vkGetPhysicalDeviceSurfaceFormatsKHR vkGetPhysicalDeviceSurfaceFormatsKHR;\nextern PFN_vkGetPhysicalDeviceSurfacePresentModesKHR vkGetPhysicalDeviceSurfacePresentModesKHR;\nextern PFN_vkGetPhysicalDeviceSurfaceSupportKHR vkGetPhysicalDeviceSurfaceSupportKHR;\n#endif /* defined(VK_KHR_surface) */\n#if defined(VK_KHR_swapchain)\nextern PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR;\nextern PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR;\nextern PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR;\nextern PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR;\nextern PFN_vkQueuePresentKHR vkQueuePresentKHR;\n#endif /* defined(VK_KHR_swapchain) */\n#if defined(VK_KHR_swapchain_maintenance1)\nextern PFN_vkReleaseSwapchainImagesKHR vkReleaseSwapchainImagesKHR;\n#endif /* defined(VK_KHR_swapchain_maintenance1) */\n#if defined(VK_KHR_synchronization2)\nextern PFN_vkCmdPipelineBarrier2KHR vkCmdPipelineBarrier2KHR;\nextern PFN_vkCmdResetEvent2KHR vkCmdResetEvent2KHR;\nextern PFN_vkCmdSetEvent2KHR vkCmdSetEvent2KHR;\nextern PFN_vkCmdWaitEvents2KHR vkCmdWaitEvents2KHR;\nextern PFN_vkCmdWriteTimestamp2KHR vkCmdWriteTimestamp2KHR;\nextern PFN_vkQueueSubmit2KHR vkQueueSubmit2KHR;\n#endif /* defined(VK_KHR_synchronization2) */\n#if defined(VK_KHR_timeline_semaphore)\nextern PFN_vkGetSemaphoreCounterValueKHR vkGetSemaphoreCounterValueKHR;\nextern PFN_vkSignalSemaphoreKHR vkSignalSemaphoreKHR;\nextern PFN_vkWaitSemaphoresKHR vkWaitSemaphoresKHR;\n#endif /* defined(VK_KHR_timeline_semaphore) */\n#if defined(VK_KHR_video_decode_queue)\nextern PFN_vkCmdDecodeVideoKHR vkCmdDecodeVideoKHR;\n#endif /* defined(VK_KHR_video_decode_queue) */\n#if defined(VK_KHR_video_encode_queue)\nextern PFN_vkCmdEncodeVideoKHR vkCmdEncodeVideoKHR;\nextern PFN_vkGetEncodedVideoSessionParametersKHR vkGetEncodedVideoSessionParametersKHR;\nextern PFN_vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR;\n#endif /* defined(VK_KHR_video_encode_queue) */\n#if defined(VK_KHR_video_queue)\nextern PFN_vkBindVideoSessionMemoryKHR vkBindVideoSessionMemoryKHR;\nextern PFN_vkCmdBeginVideoCodingKHR vkCmdBeginVideoCodingKHR;\nextern PFN_vkCmdControlVideoCodingKHR vkCmdControlVideoCodingKHR;\nextern PFN_vkCmdEndVideoCodingKHR vkCmdEndVideoCodingKHR;\nextern PFN_vkCreateVideoSessionKHR vkCreateVideoSessionKHR;\nextern PFN_vkCreateVideoSessionParametersKHR vkCreateVideoSessionParametersKHR;\nextern PFN_vkDestroyVideoSessionKHR vkDestroyVideoSessionKHR;\nextern PFN_vkDestroyVideoSessionParametersKHR vkDestroyVideoSessionParametersKHR;\nextern PFN_vkGetPhysicalDeviceVideoCapabilitiesKHR vkGetPhysicalDeviceVideoCapabilitiesKHR;\nextern PFN_vkGetPhysicalDeviceVideoFormatPropertiesKHR vkGetPhysicalDeviceVideoFormatPropertiesKHR;\nextern PFN_vkGetVideoSessionMemoryRequirementsKHR vkGetVideoSessionMemoryRequirementsKHR;\nextern PFN_vkUpdateVideoSessionParametersKHR vkUpdateVideoSessionParametersKHR;\n#endif /* defined(VK_KHR_video_queue) */\n#if defined(VK_KHR_wayland_surface)\nextern PFN_vkCreateWaylandSurfaceKHR vkCreateWaylandSurfaceKHR;\nextern PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR vkGetPhysicalDeviceWaylandPresentationSupportKHR;\n#endif /* defined(VK_KHR_wayland_surface) */\n#if defined(VK_KHR_win32_surface)\nextern PFN_vkCreateWin32SurfaceKHR vkCreateWin32SurfaceKHR;\nextern PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR vkGetPhysicalDeviceWin32PresentationSupportKHR;\n#endif /* defined(VK_KHR_win32_surface) */\n#if defined(VK_KHR_xcb_surface)\nextern PFN_vkCreateXcbSurfaceKHR vkCreateXcbSurfaceKHR;\nextern PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR vkGetPhysicalDeviceXcbPresentationSupportKHR;\n#endif /* defined(VK_KHR_xcb_surface) */\n#if defined(VK_KHR_xlib_surface)\nextern PFN_vkCreateXlibSurfaceKHR vkCreateXlibSurfaceKHR;\nextern PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR vkGetPhysicalDeviceXlibPresentationSupportKHR;\n#endif /* defined(VK_KHR_xlib_surface) */\n#if defined(VK_MVK_ios_surface)\nextern PFN_vkCreateIOSSurfaceMVK vkCreateIOSSurfaceMVK;\n#endif /* defined(VK_MVK_ios_surface) */\n#if defined(VK_MVK_macos_surface)\nextern PFN_vkCreateMacOSSurfaceMVK vkCreateMacOSSurfaceMVK;\n#endif /* defined(VK_MVK_macos_surface) */\n#if defined(VK_NN_vi_surface)\nextern PFN_vkCreateViSurfaceNN vkCreateViSurfaceNN;\n#endif /* defined(VK_NN_vi_surface) */\n#if defined(VK_NVX_binary_import)\nextern PFN_vkCmdCuLaunchKernelNVX vkCmdCuLaunchKernelNVX;\nextern PFN_vkCreateCuFunctionNVX vkCreateCuFunctionNVX;\nextern PFN_vkCreateCuModuleNVX vkCreateCuModuleNVX;\nextern PFN_vkDestroyCuFunctionNVX vkDestroyCuFunctionNVX;\nextern PFN_vkDestroyCuModuleNVX vkDestroyCuModuleNVX;\n#endif /* defined(VK_NVX_binary_import) */\n#if defined(VK_NVX_image_view_handle)\nextern PFN_vkGetImageViewHandleNVX vkGetImageViewHandleNVX;\n#endif /* defined(VK_NVX_image_view_handle) */\n#if defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 3\nextern PFN_vkGetImageViewHandle64NVX vkGetImageViewHandle64NVX;\n#endif /* defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 3 */\n#if defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 2\nextern PFN_vkGetImageViewAddressNVX vkGetImageViewAddressNVX;\n#endif /* defined(VK_NVX_image_view_handle) && VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION >= 2 */\n#if defined(VK_NV_acquire_winrt_display)\nextern PFN_vkAcquireWinrtDisplayNV vkAcquireWinrtDisplayNV;\nextern PFN_vkGetWinrtDisplayNV vkGetWinrtDisplayNV;\n#endif /* defined(VK_NV_acquire_winrt_display) */\n#if defined(VK_NV_clip_space_w_scaling)\nextern PFN_vkCmdSetViewportWScalingNV vkCmdSetViewportWScalingNV;\n#endif /* defined(VK_NV_clip_space_w_scaling) */\n#if defined(VK_NV_cluster_acceleration_structure)\nextern PFN_vkCmdBuildClusterAccelerationStructureIndirectNV vkCmdBuildClusterAccelerationStructureIndirectNV;\nextern PFN_vkGetClusterAccelerationStructureBuildSizesNV vkGetClusterAccelerationStructureBuildSizesNV;\n#endif /* defined(VK_NV_cluster_acceleration_structure) */\n#if defined(VK_NV_cooperative_matrix)\nextern PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV vkGetPhysicalDeviceCooperativeMatrixPropertiesNV;\n#endif /* defined(VK_NV_cooperative_matrix) */\n#if defined(VK_NV_cooperative_matrix2)\nextern PFN_vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV;\n#endif /* defined(VK_NV_cooperative_matrix2) */\n#if defined(VK_NV_cooperative_vector)\nextern PFN_vkCmdConvertCooperativeVectorMatrixNV vkCmdConvertCooperativeVectorMatrixNV;\nextern PFN_vkConvertCooperativeVectorMatrixNV vkConvertCooperativeVectorMatrixNV;\nextern PFN_vkGetPhysicalDeviceCooperativeVectorPropertiesNV vkGetPhysicalDeviceCooperativeVectorPropertiesNV;\n#endif /* defined(VK_NV_cooperative_vector) */\n#if defined(VK_NV_copy_memory_indirect)\nextern PFN_vkCmdCopyMemoryIndirectNV vkCmdCopyMemoryIndirectNV;\nextern PFN_vkCmdCopyMemoryToImageIndirectNV vkCmdCopyMemoryToImageIndirectNV;\n#endif /* defined(VK_NV_copy_memory_indirect) */\n#if defined(VK_NV_coverage_reduction_mode)\nextern PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV;\n#endif /* defined(VK_NV_coverage_reduction_mode) */\n#if defined(VK_NV_cuda_kernel_launch)\nextern PFN_vkCmdCudaLaunchKernelNV vkCmdCudaLaunchKernelNV;\nextern PFN_vkCreateCudaFunctionNV vkCreateCudaFunctionNV;\nextern PFN_vkCreateCudaModuleNV vkCreateCudaModuleNV;\nextern PFN_vkDestroyCudaFunctionNV vkDestroyCudaFunctionNV;\nextern PFN_vkDestroyCudaModuleNV vkDestroyCudaModuleNV;\nextern PFN_vkGetCudaModuleCacheNV vkGetCudaModuleCacheNV;\n#endif /* defined(VK_NV_cuda_kernel_launch) */\n#if defined(VK_NV_device_diagnostic_checkpoints)\nextern PFN_vkCmdSetCheckpointNV vkCmdSetCheckpointNV;\nextern PFN_vkGetQueueCheckpointDataNV vkGetQueueCheckpointDataNV;\n#endif /* defined(VK_NV_device_diagnostic_checkpoints) */\n#if defined(VK_NV_device_diagnostic_checkpoints) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2))\nextern PFN_vkGetQueueCheckpointData2NV vkGetQueueCheckpointData2NV;\n#endif /* defined(VK_NV_device_diagnostic_checkpoints) && (defined(VK_VERSION_1_3) || defined(VK_KHR_synchronization2)) */\n#if defined(VK_NV_device_generated_commands)\nextern PFN_vkCmdBindPipelineShaderGroupNV vkCmdBindPipelineShaderGroupNV;\nextern PFN_vkCmdExecuteGeneratedCommandsNV vkCmdExecuteGeneratedCommandsNV;\nextern PFN_vkCmdPreprocessGeneratedCommandsNV vkCmdPreprocessGeneratedCommandsNV;\nextern PFN_vkCreateIndirectCommandsLayoutNV vkCreateIndirectCommandsLayoutNV;\nextern PFN_vkDestroyIndirectCommandsLayoutNV vkDestroyIndirectCommandsLayoutNV;\nextern PFN_vkGetGeneratedCommandsMemoryRequirementsNV vkGetGeneratedCommandsMemoryRequirementsNV;\n#endif /* defined(VK_NV_device_generated_commands) */\n#if defined(VK_NV_device_generated_commands_compute)\nextern PFN_vkCmdUpdatePipelineIndirectBufferNV vkCmdUpdatePipelineIndirectBufferNV;\nextern PFN_vkGetPipelineIndirectDeviceAddressNV vkGetPipelineIndirectDeviceAddressNV;\nextern PFN_vkGetPipelineIndirectMemoryRequirementsNV vkGetPipelineIndirectMemoryRequirementsNV;\n#endif /* defined(VK_NV_device_generated_commands_compute) */\n#if defined(VK_NV_external_compute_queue)\nextern PFN_vkCreateExternalComputeQueueNV vkCreateExternalComputeQueueNV;\nextern PFN_vkDestroyExternalComputeQueueNV vkDestroyExternalComputeQueueNV;\nextern PFN_vkGetExternalComputeQueueDataNV vkGetExternalComputeQueueDataNV;\n#endif /* defined(VK_NV_external_compute_queue) */\n#if defined(VK_NV_external_memory_capabilities)\nextern PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV vkGetPhysicalDeviceExternalImageFormatPropertiesNV;\n#endif /* defined(VK_NV_external_memory_capabilities) */\n#if defined(VK_NV_external_memory_rdma)\nextern PFN_vkGetMemoryRemoteAddressNV vkGetMemoryRemoteAddressNV;\n#endif /* defined(VK_NV_external_memory_rdma) */\n#if defined(VK_NV_external_memory_win32)\nextern PFN_vkGetMemoryWin32HandleNV vkGetMemoryWin32HandleNV;\n#endif /* defined(VK_NV_external_memory_win32) */\n#if defined(VK_NV_fragment_shading_rate_enums)\nextern PFN_vkCmdSetFragmentShadingRateEnumNV vkCmdSetFragmentShadingRateEnumNV;\n#endif /* defined(VK_NV_fragment_shading_rate_enums) */\n#if defined(VK_NV_low_latency2)\nextern PFN_vkGetLatencyTimingsNV vkGetLatencyTimingsNV;\nextern PFN_vkLatencySleepNV vkLatencySleepNV;\nextern PFN_vkQueueNotifyOutOfBandNV vkQueueNotifyOutOfBandNV;\nextern PFN_vkSetLatencyMarkerNV vkSetLatencyMarkerNV;\nextern PFN_vkSetLatencySleepModeNV vkSetLatencySleepModeNV;\n#endif /* defined(VK_NV_low_latency2) */\n#if defined(VK_NV_memory_decompression)\nextern PFN_vkCmdDecompressMemoryIndirectCountNV vkCmdDecompressMemoryIndirectCountNV;\nextern PFN_vkCmdDecompressMemoryNV vkCmdDecompressMemoryNV;\n#endif /* defined(VK_NV_memory_decompression) */\n#if defined(VK_NV_mesh_shader)\nextern PFN_vkCmdDrawMeshTasksIndirectNV vkCmdDrawMeshTasksIndirectNV;\nextern PFN_vkCmdDrawMeshTasksNV vkCmdDrawMeshTasksNV;\n#endif /* defined(VK_NV_mesh_shader) */\n#if defined(VK_NV_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2))\nextern PFN_vkCmdDrawMeshTasksIndirectCountNV vkCmdDrawMeshTasksIndirectCountNV;\n#endif /* defined(VK_NV_mesh_shader) && (defined(VK_KHR_draw_indirect_count) || defined(VK_VERSION_1_2)) */\n#if defined(VK_NV_optical_flow)\nextern PFN_vkBindOpticalFlowSessionImageNV vkBindOpticalFlowSessionImageNV;\nextern PFN_vkCmdOpticalFlowExecuteNV vkCmdOpticalFlowExecuteNV;\nextern PFN_vkCreateOpticalFlowSessionNV vkCreateOpticalFlowSessionNV;\nextern PFN_vkDestroyOpticalFlowSessionNV vkDestroyOpticalFlowSessionNV;\nextern PFN_vkGetPhysicalDeviceOpticalFlowImageFormatsNV vkGetPhysicalDeviceOpticalFlowImageFormatsNV;\n#endif /* defined(VK_NV_optical_flow) */\n#if defined(VK_NV_partitioned_acceleration_structure)\nextern PFN_vkCmdBuildPartitionedAccelerationStructuresNV vkCmdBuildPartitionedAccelerationStructuresNV;\nextern PFN_vkGetPartitionedAccelerationStructuresBuildSizesNV vkGetPartitionedAccelerationStructuresBuildSizesNV;\n#endif /* defined(VK_NV_partitioned_acceleration_structure) */\n#if defined(VK_NV_ray_tracing)\nextern PFN_vkBindAccelerationStructureMemoryNV vkBindAccelerationStructureMemoryNV;\nextern PFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV;\nextern PFN_vkCmdCopyAccelerationStructureNV vkCmdCopyAccelerationStructureNV;\nextern PFN_vkCmdTraceRaysNV vkCmdTraceRaysNV;\nextern PFN_vkCmdWriteAccelerationStructuresPropertiesNV vkCmdWriteAccelerationStructuresPropertiesNV;\nextern PFN_vkCompileDeferredNV vkCompileDeferredNV;\nextern PFN_vkCreateAccelerationStructureNV vkCreateAccelerationStructureNV;\nextern PFN_vkCreateRayTracingPipelinesNV vkCreateRayTracingPipelinesNV;\nextern PFN_vkDestroyAccelerationStructureNV vkDestroyAccelerationStructureNV;\nextern PFN_vkGetAccelerationStructureHandleNV vkGetAccelerationStructureHandleNV;\nextern PFN_vkGetAccelerationStructureMemoryRequirementsNV vkGetAccelerationStructureMemoryRequirementsNV;\nextern PFN_vkGetRayTracingShaderGroupHandlesNV vkGetRayTracingShaderGroupHandlesNV;\n#endif /* defined(VK_NV_ray_tracing) */\n#if defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2\nextern PFN_vkCmdSetExclusiveScissorEnableNV vkCmdSetExclusiveScissorEnableNV;\n#endif /* defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 */\n#if defined(VK_NV_scissor_exclusive)\nextern PFN_vkCmdSetExclusiveScissorNV vkCmdSetExclusiveScissorNV;\n#endif /* defined(VK_NV_scissor_exclusive) */\n#if defined(VK_NV_shading_rate_image)\nextern PFN_vkCmdBindShadingRateImageNV vkCmdBindShadingRateImageNV;\nextern PFN_vkCmdSetCoarseSampleOrderNV vkCmdSetCoarseSampleOrderNV;\nextern PFN_vkCmdSetViewportShadingRatePaletteNV vkCmdSetViewportShadingRatePaletteNV;\n#endif /* defined(VK_NV_shading_rate_image) */\n#if defined(VK_OHOS_surface)\nextern PFN_vkCreateSurfaceOHOS vkCreateSurfaceOHOS;\n#endif /* defined(VK_OHOS_surface) */\n#if defined(VK_QCOM_tile_memory_heap)\nextern PFN_vkCmdBindTileMemoryQCOM vkCmdBindTileMemoryQCOM;\n#endif /* defined(VK_QCOM_tile_memory_heap) */\n#if defined(VK_QCOM_tile_properties)\nextern PFN_vkGetDynamicRenderingTilePropertiesQCOM vkGetDynamicRenderingTilePropertiesQCOM;\nextern PFN_vkGetFramebufferTilePropertiesQCOM vkGetFramebufferTilePropertiesQCOM;\n#endif /* defined(VK_QCOM_tile_properties) */\n#if defined(VK_QCOM_tile_shading)\nextern PFN_vkCmdBeginPerTileExecutionQCOM vkCmdBeginPerTileExecutionQCOM;\nextern PFN_vkCmdDispatchTileQCOM vkCmdDispatchTileQCOM;\nextern PFN_vkCmdEndPerTileExecutionQCOM vkCmdEndPerTileExecutionQCOM;\n#endif /* defined(VK_QCOM_tile_shading) */\n#if defined(VK_QNX_external_memory_screen_buffer)\nextern PFN_vkGetScreenBufferPropertiesQNX vkGetScreenBufferPropertiesQNX;\n#endif /* defined(VK_QNX_external_memory_screen_buffer) */\n#if defined(VK_QNX_screen_surface)\nextern PFN_vkCreateScreenSurfaceQNX vkCreateScreenSurfaceQNX;\nextern PFN_vkGetPhysicalDeviceScreenPresentationSupportQNX vkGetPhysicalDeviceScreenPresentationSupportQNX;\n#endif /* defined(VK_QNX_screen_surface) */\n#if defined(VK_VALVE_descriptor_set_host_mapping)\nextern PFN_vkGetDescriptorSetHostMappingVALVE vkGetDescriptorSetHostMappingVALVE;\nextern PFN_vkGetDescriptorSetLayoutHostMappingInfoVALVE vkGetDescriptorSetLayoutHostMappingInfoVALVE;\n#endif /* defined(VK_VALVE_descriptor_set_host_mapping) */\n#if (defined(VK_EXT_depth_clamp_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clamp_control))\nextern PFN_vkCmdSetDepthClampRangeEXT vkCmdSetDepthClampRangeEXT;\n#endif /* (defined(VK_EXT_depth_clamp_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clamp_control)) */\n#if (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object))\nextern PFN_vkCmdBindVertexBuffers2EXT vkCmdBindVertexBuffers2EXT;\nextern PFN_vkCmdSetCullModeEXT vkCmdSetCullModeEXT;\nextern PFN_vkCmdSetDepthBoundsTestEnableEXT vkCmdSetDepthBoundsTestEnableEXT;\nextern PFN_vkCmdSetDepthCompareOpEXT vkCmdSetDepthCompareOpEXT;\nextern PFN_vkCmdSetDepthTestEnableEXT vkCmdSetDepthTestEnableEXT;\nextern PFN_vkCmdSetDepthWriteEnableEXT vkCmdSetDepthWriteEnableEXT;\nextern PFN_vkCmdSetFrontFaceEXT vkCmdSetFrontFaceEXT;\nextern PFN_vkCmdSetPrimitiveTopologyEXT vkCmdSetPrimitiveTopologyEXT;\nextern PFN_vkCmdSetScissorWithCountEXT vkCmdSetScissorWithCountEXT;\nextern PFN_vkCmdSetStencilOpEXT vkCmdSetStencilOpEXT;\nextern PFN_vkCmdSetStencilTestEnableEXT vkCmdSetStencilTestEnableEXT;\nextern PFN_vkCmdSetViewportWithCountEXT vkCmdSetViewportWithCountEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object))\nextern PFN_vkCmdSetDepthBiasEnableEXT vkCmdSetDepthBiasEnableEXT;\nextern PFN_vkCmdSetLogicOpEXT vkCmdSetLogicOpEXT;\nextern PFN_vkCmdSetPatchControlPointsEXT vkCmdSetPatchControlPointsEXT;\nextern PFN_vkCmdSetPrimitiveRestartEnableEXT vkCmdSetPrimitiveRestartEnableEXT;\nextern PFN_vkCmdSetRasterizerDiscardEnableEXT vkCmdSetRasterizerDiscardEnableEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object))\nextern PFN_vkCmdSetAlphaToCoverageEnableEXT vkCmdSetAlphaToCoverageEnableEXT;\nextern PFN_vkCmdSetAlphaToOneEnableEXT vkCmdSetAlphaToOneEnableEXT;\nextern PFN_vkCmdSetColorBlendEnableEXT vkCmdSetColorBlendEnableEXT;\nextern PFN_vkCmdSetColorBlendEquationEXT vkCmdSetColorBlendEquationEXT;\nextern PFN_vkCmdSetColorWriteMaskEXT vkCmdSetColorWriteMaskEXT;\nextern PFN_vkCmdSetDepthClampEnableEXT vkCmdSetDepthClampEnableEXT;\nextern PFN_vkCmdSetLogicOpEnableEXT vkCmdSetLogicOpEnableEXT;\nextern PFN_vkCmdSetPolygonModeEXT vkCmdSetPolygonModeEXT;\nextern PFN_vkCmdSetRasterizationSamplesEXT vkCmdSetRasterizationSamplesEXT;\nextern PFN_vkCmdSetSampleMaskEXT vkCmdSetSampleMaskEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object))\nextern PFN_vkCmdSetTessellationDomainOriginEXT vkCmdSetTessellationDomainOriginEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback))\nextern PFN_vkCmdSetRasterizationStreamEXT vkCmdSetRasterizationStreamEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization))\nextern PFN_vkCmdSetConservativeRasterizationModeEXT vkCmdSetConservativeRasterizationModeEXT;\nextern PFN_vkCmdSetExtraPrimitiveOverestimationSizeEXT vkCmdSetExtraPrimitiveOverestimationSizeEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable))\nextern PFN_vkCmdSetDepthClipEnableEXT vkCmdSetDepthClipEnableEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations))\nextern PFN_vkCmdSetSampleLocationsEnableEXT vkCmdSetSampleLocationsEnableEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced))\nextern PFN_vkCmdSetColorBlendAdvancedEXT vkCmdSetColorBlendAdvancedEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex))\nextern PFN_vkCmdSetProvokingVertexModeEXT vkCmdSetProvokingVertexModeEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization))\nextern PFN_vkCmdSetLineRasterizationModeEXT vkCmdSetLineRasterizationModeEXT;\nextern PFN_vkCmdSetLineStippleEnableEXT vkCmdSetLineStippleEnableEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control))\nextern PFN_vkCmdSetDepthClipNegativeOneToOneEXT vkCmdSetDepthClipNegativeOneToOneEXT;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling))\nextern PFN_vkCmdSetViewportWScalingEnableNV vkCmdSetViewportWScalingEnableNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle))\nextern PFN_vkCmdSetViewportSwizzleNV vkCmdSetViewportSwizzleNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color))\nextern PFN_vkCmdSetCoverageToColorEnableNV vkCmdSetCoverageToColorEnableNV;\nextern PFN_vkCmdSetCoverageToColorLocationNV vkCmdSetCoverageToColorLocationNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples))\nextern PFN_vkCmdSetCoverageModulationModeNV vkCmdSetCoverageModulationModeNV;\nextern PFN_vkCmdSetCoverageModulationTableEnableNV vkCmdSetCoverageModulationTableEnableNV;\nextern PFN_vkCmdSetCoverageModulationTableNV vkCmdSetCoverageModulationTableNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image))\nextern PFN_vkCmdSetShadingRateImageEnableNV vkCmdSetShadingRateImageEnableNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test))\nextern PFN_vkCmdSetRepresentativeFragmentTestEnableNV vkCmdSetRepresentativeFragmentTestEnableNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) */\n#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode))\nextern PFN_vkCmdSetCoverageReductionModeNV vkCmdSetCoverageReductionModeNV;\n#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) */\n#if (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control))\nextern PFN_vkGetImageSubresourceLayout2EXT vkGetImageSubresourceLayout2EXT;\n#endif /* (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) */\n#if (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state))\nextern PFN_vkCmdSetVertexInputEXT vkCmdSetVertexInputEXT;\n#endif /* (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) */\n#if (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template)))\nextern PFN_vkCmdPushDescriptorSetWithTemplateKHR vkCmdPushDescriptorSetWithTemplateKHR;\n#endif /* (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template))) */\n#if (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1))\nextern PFN_vkGetDeviceGroupPresentCapabilitiesKHR vkGetDeviceGroupPresentCapabilitiesKHR;\nextern PFN_vkGetDeviceGroupSurfacePresentModesKHR vkGetDeviceGroupSurfacePresentModesKHR;\nextern PFN_vkGetPhysicalDevicePresentRectanglesKHR vkGetPhysicalDevicePresentRectanglesKHR;\n#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */\n#if (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1))\nextern PFN_vkAcquireNextImage2KHR vkAcquireNextImage2KHR;\n#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */\n/* VOLK_GENERATE_PROTOTYPES_H */\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif\n\n#ifdef VOLK_IMPLEMENTATION\n#undef VOLK_IMPLEMENTATION\n/* Prevent tools like dependency checkers from detecting a cyclic dependency */\n#define VOLK_SOURCE \"volk.c\"\n#include VOLK_SOURCE\n#endif\n\n/**\n * Copyright (c) 2018-2025 Arseny Kapoulkine\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to deal\n * in the Software without restriction, including without limitation the rights\n * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in all\n * copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n * SOFTWARE.\n*/\n/* clang-format on */\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxvk/dxvk_pipelayout.h", "#pragma once\n\n#include \n\n#include \n\n#include \"dxvk_hash.h\"\n\n#include \"util_math.h\"\n#include \"util_bit.h\"\n#include \"util_flags.h\"\n\nnamespace dxvk {\n\n class DxvkDevice;\n class DxvkPipelineManager;\n\n /**\n * \\brief Order-invariant atomic access operation\n *\n * Information used to optimize barriers when a resource\n * is accessed exlusively via order-invariant stores.\n */\n struct DxvkAccessOp {\n enum OpType : uint16_t {\n None = 0x0u,\n Or = 0x1u,\n And = 0x2u,\n Xor = 0x3u,\n Add = 0x4u,\n IMin = 0x5u,\n IMax = 0x6u,\n UMin = 0x7u,\n UMax = 0x8u,\n\n StoreF = 0xdu,\n StoreUi = 0xeu,\n StoreSi = 0xfu,\n };\n\n DxvkAccessOp() = default;\n DxvkAccessOp(OpType t)\n : op(uint16_t(t)) { }\n\n DxvkAccessOp(OpType t, uint16_t constant)\n : op(uint16_t(t) | (constant << 4u)) { }\n\n uint16_t op = 0u;\n\n bool operator == (const DxvkAccessOp& t) const { return op == t.op; }\n bool operator != (const DxvkAccessOp& t) const { return op != t.op; }\n\n template, bool> = true>\n explicit operator T() const { return op; }\n };\n\n static_assert(sizeof(DxvkAccessOp) == sizeof(uint16_t));\n\n /**\n * \\brief Binding info\n *\n * Stores metadata for a single binding in\n * a given shader, or for the whole pipeline.\n */\n struct DxvkBindingInfo {\n VkDescriptorType descriptorType = VK_DESCRIPTOR_TYPE_MAX_ENUM; ///< Vulkan descriptor type\n uint32_t resourceBinding = 0u; ///< API binding slot for the resource\n VkImageViewType viewType = VK_IMAGE_VIEW_TYPE_MAX_ENUM; ///< Image view type\n VkShaderStageFlagBits stage = VK_SHADER_STAGE_FLAG_BITS_MAX_ENUM; ///< Shader stage\n VkAccessFlags access = 0u; ///< Access mask for the resource\n DxvkAccessOp accessOp = DxvkAccessOp::None; ///< Order-invariant store type, if any\n bool uboSet = false; ///< Whether to include this in the UBO set\n bool isMultisampled = false; ///< Multisampled binding\n\n /**\n * \\brief Computes descriptor set index for the given binding\n *\n * This is determines based on the shader stages that use the binding.\n * \\returns Descriptor set index\n */\n uint32_t computeSetIndex() const;\n\n /**\n * \\brief Numeric value of the binding\n *\n * Used when sorting bindings.\n * \\returns Numeric value\n */\n uint32_t value() const;\n\n /**\n * \\brief Checks for equality\n *\n * \\param [in] other Binding to compare to\n * \\returns \\c true if both bindings are equal\n */\n bool eq(const DxvkBindingInfo& other) const;\n\n /**\n * \\brief Hashes binding info\n * \\returns Binding hash\n */\n size_t hash() const;\n\n };\n\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_decoder.h", "class DxbcRegModifier {\n public:\n private:\n uint8_t m_mask = 0;\n public:\n private:\n uint8_t m_mask = 0;\n public:\n private:\n uint32_t m_bits;\n public:\n const uint32_t* ptrAt(uint32_t id) const;\n uint32_t at(uint32_t id) const {\n if (m_ptr + id >= m_end)\n throw DxvkError(\"DxbcCodeSlice: End of stream\");\n return m_ptr[id];\n }\n uint32_t read() {\n if (m_ptr >= m_end)\n throw DxvkError(\"DxbcCodeSlice: End of stream\");\n return *(m_ptr++);\n }\n DxbcCodeSlice take(uint32_t n) const {\n if (m_ptr + n > m_end)\n throw DxvkError(\"DxbcCodeSlice: End of stream\");\n return DxbcCodeSlice(m_ptr, m_ptr + n);\n }\n DxbcCodeSlice skip(uint32_t n) const {\n if (m_ptr + n > m_end)\n throw DxvkError(\"DxbcCodeSlice: End of stream\");\n return DxbcCodeSlice(m_ptr + n, m_end);\n }\n private:\n const uint32_t* m_ptr = nullptr;\n const uint32_t* m_end = nullptr;\n public:\n void decodeInstruction(DxbcCodeSlice& code) {\n const uint32_t token0 = code.at(0);\n \n // Initialize the instruction structure. Some of these values\n // may not get written otherwise while decoding the instruction.\n m_instruction.op = static_cast(bit::extract(token0, 0, 10));\n m_instruction.opClass = DxbcInstClass::Undefined;\n m_instruction.sampleControls = { 0, 0, 0 };\n m_instruction.dstCount = 0;\n m_instruction.srcCount = 0;\n m_instruction.immCount = 0;\n m_instruction.dst = m_dstOperands.data();\n m_instruction.src = m_srcOperands.data();\n m_instruction.imm = m_immOperands.data();\n m_instruction.customDataType = DxbcCustomDataClass::Comment;\n m_instruction.customDataSize = 0;\n m_instruction.customData = nullptr;\n \n // Reset the index pointer, which may still contain\n // a non-zero value from the previous iteration\n m_indexId = 0;\n \n // Instruction length, in DWORDs. This includes the token\n // itself and any other prefix that an instruction may have.\n uint32_t length = 0;\n \n if (m_instruction.op == DxbcOpcode::CustomData) {\n length = code.at(1);\n this->decodeCustomData(code.take(length));\n } else {\n length = bit::extract(token0, 24, 30);\n this->decodeOperation(code.take(length));\n }\n \n // Advance the caller's slice to the next token so that\n // they can make consecutive calls to decodeInstruction()\n code = code.skip(length);\n }\n private:\n DxbcShaderInstruction m_instruction;\n std::array m_dstOperands;\n std::array m_srcOperands;\n std::array m_immOperands;\n std::array m_indices;\n uint32_t m_indexId = 0;\n void decodeCustomData(DxbcCodeSlice code) {\n const uint32_t blockLength = code.at(1);\n \n if (blockLength < 2) {\n Logger::err(\"DxbcDecodeContext: Invalid custom data block\");\n return;\n }\n \n // Custom data blocks have their own instruction class\n m_instruction.op = DxbcOpcode::CustomData;\n m_instruction.opClass = DxbcInstClass::CustomData;\n \n // We'll point into the code buffer rather than making a copy\n m_instruction.customDataType = static_cast(\n bit::extract(code.at(0), 11, 31));\n m_instruction.customDataSize = blockLength - 2;\n m_instruction.customData = code.ptrAt(2);\n }\n void decodeOperation(DxbcCodeSlice code) {\n uint32_t token = code.read();\n \n // Result modifiers, which are applied to common ALU ops\n m_instruction.modifiers.saturate = !!bit::extract(token, 13, 13);\n m_instruction.modifiers.precise = !!bit::extract(token, 19, 22);\n \n // Opcode controls. It will depend on the\n // opcode itself which ones are valid.\n m_instruction.controls = DxbcShaderOpcodeControls(token);\n \n // Process extended opcode tokens\n while (bit::extract(token, 31, 31)) {\n token = code.read();\n \n const DxbcExtOpcode extOpcode\n = static_cast(bit::extract(token, 0, 5));\n \n switch (extOpcode) {\n case DxbcExtOpcode::SampleControls: {\n struct {\n int u : 4;\n int v : 4;\n int w : 4;\n } aoffimmi;\n \n aoffimmi.u = bit::extract(token, 9, 12);\n aoffimmi.v = bit::extract(token, 13, 16);\n aoffimmi.w = bit::extract(token, 17, 20);\n \n // Four-bit signed numbers, sign-extend them\n m_instruction.sampleControls.u = aoffimmi.u;\n m_instruction.sampleControls.v = aoffimmi.v;\n m_instruction.sampleControls.w = aoffimmi.w;\n } break;\n \n case DxbcExtOpcode::ResourceDim:\n case DxbcExtOpcode::ResourceReturnType:\n break; // part of resource description\n \n default:\n Logger::warn(str::format(\n \"DxbcDecodeContext: Unhandled extended opcode: \",\n extOpcode));\n }\n }\n \n // Retrieve the instruction format in order to parse the\n // operands. Doing this mostly automatically means that\n // the compiler can rely on the operands being valid.\n const DxbcInstFormat format = dxbcInstructionFormat(m_instruction.op);\n m_instruction.opClass = format.instructionClass;\n \n for (uint32_t i = 0; i < format.operandCount; i++)\n this->decodeOperand(code, format.operands[i]);\n }\n void decodeComponentSelection(DxbcRegister& reg, uint32_t token) {\n // Pick the correct component selection mode based on the\n // component count. We'll simplify this here so that the\n // compiler can assume that everything is a 4D vector.\n reg.componentCount = static_cast(bit::extract(token, 0, 1));\n \n switch (reg.componentCount) {\n // No components - used for samplers etc.\n case DxbcComponentCount::Component0:\n reg.mask = DxbcRegMask(false, false, false, false);\n reg.swizzle = DxbcRegSwizzle(0, 0, 0, 0);\n break;\n \n // One component - used for immediates\n // and a few built-in registers.\n case DxbcComponentCount::Component1:\n reg.mask = DxbcRegMask(true, false, false, false);\n reg.swizzle = DxbcRegSwizzle(0, 0, 0, 0);\n break;\n \n // Four components - everything else. This requires us\n // to actually parse the component selection mode.\n case DxbcComponentCount::Component4: {\n const DxbcRegMode componentMode =\n static_cast(bit::extract(token, 2, 3));\n \n switch (componentMode) {\n // Write mask for destination operands\n case DxbcRegMode::Mask:\n reg.mask = bit::extract(token, 4, 7);\n reg.swizzle = DxbcRegSwizzle(0, 1, 2, 3);\n break;\n \n // Swizzle for source operands (including resources)\n case DxbcRegMode::Swizzle:\n reg.mask = DxbcRegMask(true, true, true, true);\n reg.swizzle = DxbcRegSwizzle(\n bit::extract(token, 4, 5),\n bit::extract(token, 6, 7),\n bit::extract(token, 8, 9),\n bit::extract(token, 10, 11));\n break;\n \n // Selection of one component. We can generate both a\n // mask and a swizzle for this so that the compiler\n // won't have to deal with this case specifically.\n case DxbcRegMode::Select1: {\n const uint32_t n = bit::extract(token, 4, 5);\n reg.mask = DxbcRegMask(n == 0, n == 1, n == 2, n == 3);\n reg.swizzle = DxbcRegSwizzle(n, n, n, n);\n } break;\n \n default:\n Logger::warn(\"DxbcDecodeContext: Invalid component selection mode\");\n }\n } break;\n \n default:\n Logger::warn(\"DxbcDecodeContext: Invalid component count\");\n }\n }\n void decodeOperandExtensions(DxbcCodeSlice& code, DxbcRegister& reg, uint32_t token) {\n while (bit::extract(token, 31, 31)) {\n token = code.read();\n \n // Type of the extended operand token\n const DxbcOperandExt extTokenType =\n static_cast(bit::extract(token, 0, 5));\n \n switch (extTokenType) {\n // Operand modifiers, which are used to manipulate the\n // value of a source operand during the load operation\n case DxbcOperandExt::OperandModifier:\n reg.modifiers = bit::extract(token, 6, 13);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcDecodeContext: Unhandled extended operand token: \",\n extTokenType));\n }\n }\n }\n void decodeOperandImmediates(DxbcCodeSlice& code, DxbcRegister& reg) {\n if (reg.type == DxbcOperandType::Imm32\n || reg.type == DxbcOperandType::Imm64) {\n switch (reg.componentCount) {\n // This is commonly used if only one vector\n // component is involved in an operation\n case DxbcComponentCount::Component1: {\n reg.imm.u32_1 = code.read();\n } break;\n \n // Typical four-component vector\n case DxbcComponentCount::Component4: {\n reg.imm.u32_4[0] = code.read();\n reg.imm.u32_4[1] = code.read();\n reg.imm.u32_4[2] = code.read();\n reg.imm.u32_4[3] = code.read();\n } break;\n\n default:\n Logger::warn(\"DxbcDecodeContext: Invalid component count for immediate operand\");\n }\n }\n }\n void decodeOperandIndex(DxbcCodeSlice& code, DxbcRegister& reg, uint32_t token) {\n reg.idxDim = bit::extract(token, 20, 21);\n \n for (uint32_t i = 0; i < reg.idxDim; i++) {\n // An index can be encoded in various different ways\n const DxbcOperandIndexRepresentation repr =\n static_cast(\n bit::extract(token, 22 + 3 * i, 24 + 3 * i));\n \n switch (repr) {\n case DxbcOperandIndexRepresentation::Imm32:\n reg.idx[i].offset = static_cast(code.read());\n reg.idx[i].relReg = nullptr;\n break;\n \n case DxbcOperandIndexRepresentation::Relative:\n reg.idx[i].offset = 0;\n reg.idx[i].relReg = &m_indices.at(m_indexId);\n \n this->decodeRegister(code,\n m_indices.at(m_indexId++),\n DxbcScalarType::Sint32);\n break;\n \n case DxbcOperandIndexRepresentation::Imm32Relative:\n reg.idx[i].offset = static_cast(code.read());\n reg.idx[i].relReg = &m_indices.at(m_indexId);\n \n this->decodeRegister(code,\n m_indices.at(m_indexId++),\n DxbcScalarType::Sint32);\n break;\n \n default:\n Logger::warn(str::format(\n \"DxbcDecodeContext: Unhandled index representation: \",\n repr));\n }\n }\n }\n void decodeRegister(DxbcCodeSlice& code, DxbcRegister& reg, DxbcScalarType type) {\n const uint32_t token = code.read();\n \n reg.type = static_cast(bit::extract(token, 12, 19));\n reg.dataType = type;\n reg.modifiers = 0;\n reg.idxDim = 0;\n \n for (uint32_t i = 0; i < DxbcMaxRegIndexDim; i++) {\n reg.idx[i].relReg = nullptr;\n reg.idx[i].offset = 0;\n }\n \n this->decodeComponentSelection(reg, token);\n this->decodeOperandExtensions(code, reg, token);\n this->decodeOperandImmediates(code, reg);\n this->decodeOperandIndex(code, reg, token);\n }\n void decodeImm32(DxbcCodeSlice& code, DxbcImmediate& imm, DxbcScalarType type) {\n imm.u32 = code.read();\n }\n void decodeOperand(DxbcCodeSlice& code, const DxbcInstOperandFormat& format) {\n switch (format.kind) {\n case DxbcOperandKind::DstReg: {\n const uint32_t operandId = m_instruction.dstCount++;\n this->decodeRegister(code, m_dstOperands.at(operandId), format.type);\n } break;\n \n case DxbcOperandKind::SrcReg: {\n const uint32_t operandId = m_instruction.srcCount++;\n this->decodeRegister(code, m_srcOperands.at(operandId), format.type);\n } break;\n \n case DxbcOperandKind::Imm32: {\n const uint32_t operandId = m_instruction.immCount++;\n this->decodeImm32(code, m_immOperands.at(operandId), format.type);\n } break;\n \n default:\n throw DxvkError(\"DxbcDecodeContext: Invalid operand format\");\n }\n }\n};"], ["/lsfg-vk/framegen/src/pool/shaderpool.cpp", "#include \"pool/shaderpool.hpp\"\n#include \"core/shadermodule.hpp\"\n#include \"core/device.hpp\"\n#include \"core/pipeline.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\nusing namespace LSFG::Pool;\n\nCore::ShaderModule ShaderPool::getShader(\n const Core::Device& device, const std::string& name,\n const std::vector>& types) {\n auto it = shaders.find(name);\n if (it != shaders.end())\n return it->second;\n\n // grab the shader\n auto bytecode = this->source(name);\n if (bytecode.empty())\n throw std::runtime_error(\"Shader code is empty: \" + name);\n\n // create the shader module\n Core::ShaderModule shader(device, bytecode, types);\n shaders[name] = shader;\n return shader;\n}\n\nCore::Pipeline ShaderPool::getPipeline(\n const Core::Device& device, const std::string& name) {\n auto it = pipelines.find(name);\n if (it != pipelines.end())\n return it->second;\n\n // grab the shader module\n auto shader = this->getShader(device, name, {});\n\n // create the pipeline\n Core::Pipeline pipeline(device, shader);\n pipelines[name] = pipeline;\n return pipeline;\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_enums.h", "#pragma once\n\n#include \"dxbc_include.h\"\n\nnamespace dxvk {\n \n /**\n * \\brief Instruction code listing\n */\n enum class DxbcOpcode : uint32_t {\n Add = 0,\n And = 1,\n Break = 2,\n Breakc = 3,\n Call = 4,\n Callc = 5,\n Case = 6,\n Continue = 7,\n Continuec = 8,\n Cut = 9,\n Default = 10,\n DerivRtx = 11,\n DerivRty = 12,\n Discard = 13,\n Div = 14,\n Dp2 = 15,\n Dp3 = 16,\n Dp4 = 17,\n Else = 18,\n Emit = 19,\n EmitThenCut = 20,\n EndIf = 21,\n EndLoop = 22,\n EndSwitch = 23,\n Eq = 24,\n Exp = 25,\n Frc = 26,\n FtoI = 27,\n FtoU = 28,\n Ge = 29,\n IAdd = 30,\n If = 31,\n IEq = 32,\n IGe = 33,\n ILt = 34,\n IMad = 35,\n IMax = 36,\n IMin = 37,\n IMul = 38,\n INe = 39,\n INeg = 40,\n IShl = 41,\n IShr = 42,\n ItoF = 43,\n Label = 44,\n Ld = 45,\n LdMs = 46,\n Log = 47,\n Loop = 48,\n Lt = 49,\n Mad = 50,\n Min = 51,\n Max = 52,\n CustomData = 53,\n Mov = 54,\n Movc = 55,\n Mul = 56,\n Ne = 57,\n Nop = 58,\n Not = 59,\n Or = 60,\n ResInfo = 61,\n Ret = 62,\n Retc = 63,\n RoundNe = 64,\n RoundNi = 65,\n RoundPi = 66,\n RoundZ = 67,\n Rsq = 68,\n Sample = 69,\n SampleC = 70,\n SampleClz = 71,\n SampleL = 72,\n SampleD = 73,\n SampleB = 74,\n Sqrt = 75,\n Switch = 76,\n SinCos = 77,\n UDiv = 78,\n ULt = 79,\n UGe = 80,\n UMul = 81,\n UMad = 82,\n UMax = 83,\n UMin = 84,\n UShr = 85,\n UtoF = 86,\n Xor = 87,\n DclResource = 88,\n DclConstantBuffer = 89,\n DclSampler = 90,\n DclIndexRange = 91,\n DclGsOutputPrimitiveTopology = 92,\n DclGsInputPrimitive = 93,\n DclMaxOutputVertexCount = 94,\n DclInput = 95,\n DclInputSgv = 96,\n DclInputSiv = 97,\n DclInputPs = 98,\n DclInputPsSgv = 99,\n DclInputPsSiv = 100,\n DclOutput = 101,\n DclOutputSgv = 102,\n DclOutputSiv = 103,\n DclTemps = 104,\n DclIndexableTemp = 105,\n DclGlobalFlags = 106,\n Reserved0 = 107,\n Lod = 108,\n Gather4 = 109,\n SamplePos = 110,\n SampleInfo = 111,\n Reserved1 = 112,\n HsDecls = 113,\n HsControlPointPhase = 114,\n HsForkPhase = 115,\n HsJoinPhase = 116,\n EmitStream = 117,\n CutStream = 118,\n EmitThenCutStream = 119,\n InterfaceCall = 120,\n BufInfo = 121,\n DerivRtxCoarse = 122,\n DerivRtxFine = 123,\n DerivRtyCoarse = 124,\n DerivRtyFine = 125,\n Gather4C = 126,\n Gather4Po = 127,\n Gather4PoC = 128,\n Rcp = 129,\n F32toF16 = 130,\n F16toF32 = 131,\n UAddc = 132,\n USubb = 133,\n CountBits = 134,\n FirstBitHi = 135,\n FirstBitLo = 136,\n FirstBitShi = 137,\n UBfe = 138,\n IBfe = 139,\n Bfi = 140,\n BfRev = 141,\n Swapc = 142,\n DclStream = 143,\n DclFunctionBody = 144,\n DclFunctionTable = 145,\n DclInterface = 146,\n DclInputControlPointCount = 147,\n DclOutputControlPointCount = 148,\n DclTessDomain = 149,\n DclTessPartitioning = 150,\n DclTessOutputPrimitive = 151,\n DclHsMaxTessFactor = 152,\n DclHsForkPhaseInstanceCount = 153,\n DclHsJoinPhaseInstanceCount = 154,\n DclThreadGroup = 155,\n DclUavTyped = 156,\n DclUavRaw = 157,\n DclUavStructured = 158,\n DclThreadGroupSharedMemoryRaw = 159,\n DclThreadGroupSharedMemoryStructured = 160,\n DclResourceRaw = 161,\n DclResourceStructured = 162,\n LdUavTyped = 163,\n StoreUavTyped = 164,\n LdRaw = 165,\n StoreRaw = 166,\n LdStructured = 167,\n StoreStructured = 168,\n AtomicAnd = 169,\n AtomicOr = 170,\n AtomicXor = 171,\n AtomicCmpStore = 172,\n AtomicIAdd = 173,\n AtomicIMax = 174,\n AtomicIMin = 175,\n AtomicUMax = 176,\n AtomicUMin = 177,\n ImmAtomicAlloc = 178,\n ImmAtomicConsume = 179,\n ImmAtomicIAdd = 180,\n ImmAtomicAnd = 181,\n ImmAtomicOr = 182,\n ImmAtomicXor = 183,\n ImmAtomicExch = 184,\n ImmAtomicCmpExch = 185,\n ImmAtomicIMax = 186,\n ImmAtomicIMin = 187,\n ImmAtomicUMax = 188,\n ImmAtomicUMin = 189,\n Sync = 190,\n DAdd = 191,\n DMax = 192,\n DMin = 193,\n DMul = 194,\n DEq = 195,\n DGe = 196,\n DLt = 197,\n DNe = 198,\n DMov = 199,\n DMovc = 200,\n DtoF = 201,\n FtoD = 202,\n EvalSnapped = 203,\n EvalSampleIndex = 204,\n EvalCentroid = 205,\n DclGsInstanceCount = 206,\n Abort = 207,\n DebugBreak = 208,\n ReservedBegin11_1 = 209,\n DDiv = 210,\n DFma = 211,\n DRcp = 212,\n Msad = 213,\n DtoI = 214,\n DtoU = 215,\n ItoD = 216,\n UtoD = 217,\n ReservedBegin11_2 = 218,\n Gather4S = 219,\n Gather4CS = 220,\n Gather4PoS = 221,\n Gather4PoCS = 222,\n LdS = 223,\n LdMsS = 224,\n LdUavTypedS = 225,\n LdRawS = 226,\n LdStructuredS = 227,\n SampleLS = 228,\n SampleClzS = 229,\n SampleClampS = 230,\n SampleBClampS = 231,\n SampleDClampS = 232,\n SampleCClampS = 233,\n CheckAccessFullyMapped = 234,\n };\n \n \n /**\n * \\brief Extended opcode\n */\n enum class DxbcExtOpcode : uint32_t {\n Empty = 0,\n SampleControls = 1,\n ResourceDim = 2,\n ResourceReturnType = 3,\n };\n \n \n /**\n * \\brief Operand type\n * \n * Selects the 'register file' from which\n * to retrieve an operand's value.\n */\n enum class DxbcOperandType : uint32_t {\n Temp = 0,\n Input = 1,\n Output = 2,\n IndexableTemp = 3,\n Imm32 = 4,\n Imm64 = 5,\n Sampler = 6,\n Resource = 7,\n ConstantBuffer = 8,\n ImmediateConstantBuffer = 9,\n Label = 10,\n InputPrimitiveId = 11,\n OutputDepth = 12,\n Null = 13,\n Rasterizer = 14,\n OutputCoverageMask = 15,\n Stream = 16,\n FunctionBody = 17,\n FunctionTable = 18,\n Interface = 19,\n FunctionInput = 20,\n FunctionOutput = 21,\n OutputControlPointId = 22,\n InputForkInstanceId = 23,\n InputJoinInstanceId = 24,\n InputControlPoint = 25,\n OutputControlPoint = 26,\n InputPatchConstant = 27,\n InputDomainPoint = 28,\n ThisPointer = 29,\n UnorderedAccessView = 30,\n ThreadGroupSharedMemory = 31,\n InputThreadId = 32,\n InputThreadGroupId = 33,\n InputThreadIdInGroup = 34,\n InputCoverageMask = 35,\n InputThreadIndexInGroup = 36,\n InputGsInstanceId = 37,\n OutputDepthGe = 38,\n OutputDepthLe = 39,\n CycleCounter = 40,\n OutputStencilRef = 41,\n InputInnerCoverage = 42,\n };\n \n \n /**\n * \\brief Number of components\n * \n * Used by operands to determine whether the\n * operand has one, four or zero components.\n */\n enum class DxbcComponentCount : uint32_t {\n Component0 = 0,\n Component1 = 1,\n Component4 = 2,\n };\n \n \n /**\n * \\brief Component selection mode\n * \n * When an operand has four components, the\n * component selection mode deterines which\n * components are used for the operation.\n */\n enum class DxbcRegMode : uint32_t {\n Mask = 0,\n Swizzle = 1,\n Select1 = 2,\n };\n \n \n /**\n * \\brief Index representation\n * \n * Determines how an operand\n * register index is stored.\n */\n enum class DxbcOperandIndexRepresentation : uint32_t {\n Imm32 = 0,\n Imm64 = 1,\n Relative = 2,\n Imm32Relative = 3,\n Imm64Relative = 4,\n };\n \n \n /**\n * \\brief Extended operand type\n */\n enum class DxbcOperandExt : uint32_t {\n OperandModifier = 1,\n };\n \n \n /**\n * \\brief Resource dimension\n * The type of a resource.\n */\n enum class DxbcResourceDim : uint32_t {\n Unknown = 0,\n Buffer = 1,\n Texture1D = 2,\n Texture2D = 3,\n Texture2DMs = 4,\n Texture3D = 5,\n TextureCube = 6,\n Texture1DArr = 7,\n Texture2DArr = 8,\n Texture2DMsArr = 9,\n TextureCubeArr = 10,\n RawBuffer = 11,\n StructuredBuffer = 12,\n };\n \n \n /**\n * \\brief Resource return type\n * Data type for resource read ops.\n */\n enum class DxbcResourceReturnType : uint32_t {\n Unorm = 1,\n Snorm = 2,\n Sint = 3,\n Uint = 4,\n Float = 5,\n Mixed = 6, /// ?\n Double = 7,\n Continued = 8, /// ?\n Unused = 9, /// ?\n };\n \n \n /**\n * \\brief Register component type\n * Data type of a register component.\n */\n enum class DxbcRegisterComponentType : uint32_t {\n Unknown = 0,\n Uint32 = 1,\n Sint32 = 2,\n Float32 = 3,\n };\n \n \n /**\n * \\brief Instruction return type\n */\n enum class DxbcInstructionReturnType : uint32_t {\n Float = 0,\n Uint = 1,\n };\n \n \n enum class DxbcSystemValue : uint32_t {\n None = 0,\n Position = 1,\n ClipDistance = 2,\n CullDistance = 3,\n RenderTargetId = 4,\n ViewportId = 5,\n VertexId = 6,\n PrimitiveId = 7,\n InstanceId = 8,\n IsFrontFace = 9,\n SampleIndex = 10,\n FinalQuadUeq0EdgeTessFactor = 11,\n FinalQuadVeq0EdgeTessFactor = 12,\n FinalQuadUeq1EdgeTessFactor = 13,\n FinalQuadVeq1EdgeTessFactor = 14,\n FinalQuadUInsideTessFactor = 15,\n FinalQuadVInsideTessFactor = 16,\n FinalTriUeq0EdgeTessFactor = 17,\n FinalTriVeq0EdgeTessFactor = 18,\n FinalTriWeq0EdgeTessFactor = 19,\n FinalTriInsideTessFactor = 20,\n FinalLineDetailTessFactor = 21,\n FinalLineDensityTessFactor = 22,\n Target = 64,\n Depth = 65,\n Coverage = 66,\n DepthGe = 67,\n DepthLe = 68\n };\n \n \n enum class DxbcInterpolationMode : uint32_t {\n Undefined = 0,\n Constant = 1,\n Linear = 2,\n LinearCentroid = 3,\n LinearNoPerspective = 4,\n LinearNoPerspectiveCentroid = 5,\n LinearSample = 6,\n LinearNoPerspectiveSample = 7,\n };\n \n \n enum class DxbcGlobalFlag : uint32_t {\n RefactoringAllowed = 0,\n DoublePrecision = 1,\n EarlyFragmentTests = 2,\n RawStructuredBuffers = 3,\n };\n \n using DxbcGlobalFlags = Flags;\n \n enum class DxbcZeroTest : uint32_t {\n TestZ = 0,\n TestNz = 1,\n };\n \n enum class DxbcResinfoType : uint32_t {\n Float = 0,\n RcpFloat = 1,\n Uint = 2,\n };\n \n enum class DxbcSyncFlag : uint32_t {\n ThreadsInGroup = 0,\n ThreadGroupSharedMemory = 1,\n UavMemoryGroup = 2,\n UavMemoryGlobal = 3,\n };\n \n using DxbcSyncFlags = Flags;\n \n \n /**\n * \\brief Geometry shader input primitive\n */\n enum class DxbcPrimitive : uint32_t {\n Undefined = 0,\n Point = 1,\n Line = 2,\n Triangle = 3,\n LineAdj = 6,\n TriangleAdj = 7,\n Patch1 = 8,\n Patch2 = 9,\n Patch3 = 10,\n Patch4 = 11,\n Patch5 = 12,\n Patch6 = 13,\n Patch7 = 14,\n Patch8 = 15,\n Patch9 = 16,\n Patch10 = 17,\n Patch11 = 18,\n Patch12 = 19,\n Patch13 = 20,\n Patch14 = 21,\n Patch15 = 22,\n Patch16 = 23,\n Patch17 = 24,\n Patch18 = 25,\n Patch19 = 26,\n Patch20 = 27,\n Patch21 = 28,\n Patch22 = 29,\n Patch23 = 30,\n Patch24 = 31,\n Patch25 = 32,\n Patch26 = 33,\n Patch27 = 34,\n Patch28 = 35,\n Patch29 = 36,\n Patch30 = 37,\n Patch31 = 38,\n Patch32 = 39,\n };\n \n \n /**\n * \\brief Geometry shader output topology\n */\n enum class DxbcPrimitiveTopology : uint32_t {\n Undefined = 0,\n PointList = 1,\n LineList = 2,\n LineStrip = 3,\n TriangleList = 4,\n TriangleStrip = 5,\n LineListAdj = 10,\n LineStripAdj = 11,\n TriangleListAdj = 12,\n TriangleStripAdj = 13,\n };\n \n \n /**\n * \\brief Sampler operation mode\n */\n enum class DxbcSamplerMode : uint32_t {\n Default = 0,\n Comparison = 1,\n Mono = 2,\n };\n \n \n /**\n * \\brief Scalar value type\n * \n * Enumerates possible register component\n * types. Scalar types are represented as\n * a one-component vector type.\n */\n enum class DxbcScalarType : uint32_t {\n Uint32 = 0,\n Uint64 = 1,\n Sint32 = 2,\n Sint64 = 3,\n Float32 = 4,\n Float64 = 5,\n Bool = 6,\n };\n \n \n /**\n * \\brief Tessellator domain\n */\n enum class DxbcTessDomain : uint32_t {\n Undefined = 0,\n Isolines = 1,\n Triangles = 2,\n Quads = 3,\n };\n \n /**\n * \\brief Tessellator partitioning\n */\n enum class DxbcTessPartitioning : uint32_t {\n Undefined = 0,\n Integer = 1,\n Pow2 = 2,\n FractOdd = 3,\n FractEven = 4,\n };\n \n /**\n * \\brief UAV definition flags\n */\n enum class DxbcUavFlag : uint32_t {\n GloballyCoherent = 0,\n RasterizerOrdered = 1,\n };\n \n using DxbcUavFlags = Flags;\n \n /**\n * \\brief Tessellator output primitive\n */\n enum class DxbcTessOutputPrimitive : uint32_t {\n Undefined = 0,\n Point = 1,\n Line = 2,\n TriangleCw = 3,\n TriangleCcw = 4,\n };\n \n /**\n * \\brief Custom data class\n * \n * Stores which type of custom data is\n * referenced by the instruction.\n */\n enum class DxbcCustomDataClass : uint32_t {\n Comment = 0,\n DebugInfo = 1,\n Opaque = 2,\n ImmConstBuf = 3,\n };\n \n \n enum class DxbcResourceType : uint32_t {\n Typed = 0,\n Raw = 1,\n Structured = 2,\n };\n\n\n enum class DxbcConstantBufferAccessType : uint32_t {\n StaticallyIndexed = 0,\n DynamicallyIndexed = 1,\n };\n \n}"], ["/lsfg-vk/thirdparty/pe-parse/pepy/pepy.cpp", "/*\n * Copyright (c) 2013, Wesley Shields . All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *\n * 2. Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND\n * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE\n * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n * SUCH DAMAGE.\n */\n\n#include \n#include \n#include \n\nusing namespace peparse;\n\n/* NOTE(ww): These don't necessarily have to be the same, but currently are.\n */\n#define PEPY_VERSION PEPARSE_VERSION\n\n/* These are used to across multiple objects. */\n#define PEPY_OBJECT_GET(OBJ, ATTR) \\\n static PyObject *pepy_##OBJ##_get_##ATTR(PyObject *self, void *closure) { \\\n Py_INCREF(((pepy_##OBJ *) self)->ATTR); \\\n return ((pepy_##OBJ *) self)->ATTR; \\\n }\n\n#define OBJECTGETTER(OBJ, ATTR, DOC) \\\n { \\\n (char *) #ATTR, (getter) pepy_##OBJ##_get_##ATTR, \\\n (setter) pepy_attr_not_writable, (char *) #DOC, NULL \\\n }\n\n/* 'OPTIONAL' references the fact that these are from the Optional Header */\n#define OBJECTGETTER_OPTIONAL(ATTR, DOC) \\\n { \\\n (char *) #ATTR, (getter) pepy_parsed_get_optional_##ATTR, \\\n (setter) pepy_attr_not_writable, (char *) #DOC, NULL \\\n }\n\nstatic PyObject *pepy_error;\n\nstruct pepy {\n PyObject_HEAD\n};\n\nstruct pepy_parsed {\n PyObject_HEAD parsed_pe *pe;\n};\n\nstruct pepy_section {\n PyObject_HEAD PyObject *name;\n PyObject *base;\n PyObject *length;\n PyObject *virtaddr;\n PyObject *virtsize;\n PyObject *numrelocs;\n PyObject *numlinenums;\n PyObject *characteristics;\n PyObject *data;\n};\n\nstruct pepy_resource {\n PyObject_HEAD PyObject *type_str;\n PyObject *name_str;\n PyObject *lang_str;\n PyObject *type;\n PyObject *name;\n PyObject *lang;\n PyObject *codepage;\n PyObject *RVA;\n PyObject *size;\n PyObject *data;\n};\n\nstruct pepy_import {\n PyObject_HEAD PyObject *name;\n PyObject *sym;\n PyObject *addr;\n};\n\nstruct pepy_export {\n PyObject_HEAD PyObject *mod;\n PyObject *func;\n PyObject *addr;\n};\n\nstruct pepy_relocation {\n PyObject_HEAD PyObject *type;\n PyObject *addr;\n};\n\n/* None of the attributes in these objects are writable. */\nstatic int\npepy_attr_not_writable(PyObject *self, PyObject *value, void *closure) {\n PyErr_SetString(PyExc_TypeError, \"Attribute not writable.\");\n return -1;\n}\n\nstatic PyObject *\npepy_import_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_import *self;\n\n self = (pepy_import *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int pepy_import_init(pepy_import *self, PyObject *args, PyObject *kwds) {\n if (!PyArg_ParseTuple(\n args, \"OOO:pepy_import_init\", &self->name, &self->sym, &self->addr))\n return -1;\n return 0;\n}\n\nstatic void pepy_import_dealloc(pepy_import *self) {\n Py_XDECREF(self->name);\n Py_XDECREF(self->sym);\n Py_XDECREF(self->addr);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nPEPY_OBJECT_GET(import, name);\nPEPY_OBJECT_GET(import, sym);\nPEPY_OBJECT_GET(import, addr);\n\nstatic PyGetSetDef pepy_import_getseters[] = {\n OBJECTGETTER(import, name, \"Name\"),\n OBJECTGETTER(import, sym, \"Symbol\"),\n OBJECTGETTER(import, addr, \"Address\"),\n {NULL}};\n\nstatic PyTypeObject pepy_import_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.import\", /* tp_name */\n sizeof(pepy_import), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_import_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT, /* tp_flags */\n \"pepy import object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n 0, /* tp_methods */\n 0, /* tp_members */\n pepy_import_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_import_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_import_new /* tp_new */\n};\n\nstatic PyObject *\npepy_export_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_export *self;\n\n self = (pepy_export *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int pepy_export_init(pepy_export *self, PyObject *args, PyObject *kwds) {\n if (!PyArg_ParseTuple(\n args, \"OOO:pepy_export_init\", &self->mod, &self->func, &self->addr))\n return -1;\n return 0;\n}\n\nstatic void pepy_export_dealloc(pepy_export *self) {\n Py_XDECREF(self->mod);\n Py_XDECREF(self->func);\n Py_XDECREF(self->addr);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nPEPY_OBJECT_GET(export, mod);\nPEPY_OBJECT_GET(export, func);\nPEPY_OBJECT_GET(export, addr);\n\nstatic PyGetSetDef pepy_export_getseters[] = {\n OBJECTGETTER(export, mod, \"Module\"),\n OBJECTGETTER(export, func, \"Function\"),\n OBJECTGETTER(export, addr, \"Address\"),\n {NULL}};\n\nstatic PyTypeObject pepy_export_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.export\", /* tp_name */\n sizeof(pepy_export), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_export_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT, /* tp_flags */\n \"pepy export object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n 0, /* tp_methods */\n 0, /* tp_members */\n pepy_export_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_export_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_export_new /* tp_new */\n};\n\nstatic PyObject *\npepy_relocation_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_relocation *self;\n\n self = (pepy_relocation *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int\npepy_relocation_init(pepy_relocation *self, PyObject *args, PyObject *kwds) {\n if (!PyArg_ParseTuple(\n args, \"OO:pepy_relocation_init\", &self->type, &self->addr))\n return -1;\n return 0;\n}\n\nstatic void pepy_relocation_dealloc(pepy_relocation *self) {\n Py_XDECREF(self->type);\n Py_XDECREF(self->addr);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nPEPY_OBJECT_GET(relocation, type);\nPEPY_OBJECT_GET(relocation, addr);\n\nstatic PyGetSetDef pepy_relocation_getseters[] = {\n OBJECTGETTER(relocation, type, \"Type\"),\n OBJECTGETTER(relocation, addr, \"Address\"),\n {NULL}};\n\nstatic PyTypeObject pepy_relocation_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.relocation\", /* tp_name */\n sizeof(pepy_relocation), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_relocation_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT, /* tp_flags */\n \"pepy relocation object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n 0, /* tp_methods */\n 0, /* tp_members */\n pepy_relocation_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_relocation_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_relocation_new /* tp_new */\n};\n\nstatic PyObject *\npepy_section_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_section *self;\n\n self = (pepy_section *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int\npepy_section_init(pepy_section *self, PyObject *args, PyObject *kwds) {\n if (!PyArg_ParseTuple(args,\n \"OOOOOOOOO:pepy_section_init\",\n &self->name,\n &self->base,\n &self->length,\n &self->virtaddr,\n &self->virtsize,\n &self->numrelocs,\n &self->numlinenums,\n &self->characteristics,\n &self->data))\n return -1;\n return 0;\n}\n\nstatic void pepy_section_dealloc(pepy_section *self) {\n Py_XDECREF(self->name);\n Py_XDECREF(self->base);\n Py_XDECREF(self->length);\n Py_XDECREF(self->virtaddr);\n Py_XDECREF(self->virtsize);\n Py_XDECREF(self->numrelocs);\n Py_XDECREF(self->numlinenums);\n Py_XDECREF(self->characteristics);\n Py_XDECREF(self->data);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nPEPY_OBJECT_GET(section, name);\nPEPY_OBJECT_GET(section, base);\nPEPY_OBJECT_GET(section, length);\nPEPY_OBJECT_GET(section, virtaddr);\nPEPY_OBJECT_GET(section, virtsize);\nPEPY_OBJECT_GET(section, numrelocs);\nPEPY_OBJECT_GET(section, numlinenums);\nPEPY_OBJECT_GET(section, characteristics);\nPEPY_OBJECT_GET(section, data);\n\nstatic PyGetSetDef pepy_section_getseters[] = {\n OBJECTGETTER(section, name, \"Name\"),\n OBJECTGETTER(section, base, \"Base address\"),\n OBJECTGETTER(section, length, \"Length\"),\n OBJECTGETTER(section, virtaddr, \"Virtual address\"),\n OBJECTGETTER(section, virtsize, \"Virtual size\"),\n OBJECTGETTER(section, numrelocs, \"Number of relocations\"),\n OBJECTGETTER(section, numlinenums, \"Number of line numbers\"),\n OBJECTGETTER(section, characteristics, \"Characteristics\"),\n OBJECTGETTER(section, data, \"Section data\"),\n {NULL}};\n\nstatic PyTypeObject pepy_section_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.section\", /* tp_name */\n sizeof(pepy_section), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_section_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT, /* tp_flags */\n \"pepy section object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n 0, /* tp_methods */\n 0, /* tp_members */\n pepy_section_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_section_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_section_new /* tp_new */\n};\n\nstatic PyObject *\npepy_resource_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_resource *self;\n\n self = (pepy_resource *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int\npepy_resource_init(pepy_resource *self, PyObject *args, PyObject *kwds) {\n if (!PyArg_ParseTuple(args,\n \"OOOOOOOOOO:pepy_resource_init\",\n &self->type_str,\n &self->name_str,\n &self->lang_str,\n &self->type,\n &self->name,\n &self->lang,\n &self->codepage,\n &self->RVA,\n &self->size,\n &self->data))\n return -1;\n\n return 0;\n}\n\nstatic void pepy_resource_dealloc(pepy_resource *self) {\n Py_XDECREF(self->type_str);\n Py_XDECREF(self->name_str);\n Py_XDECREF(self->lang_str);\n Py_XDECREF(self->type);\n Py_XDECREF(self->name);\n Py_XDECREF(self->lang);\n Py_XDECREF(self->codepage);\n Py_XDECREF(self->RVA);\n Py_XDECREF(self->size);\n Py_XDECREF(self->data);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nPEPY_OBJECT_GET(resource, type_str);\nPEPY_OBJECT_GET(resource, name_str);\nPEPY_OBJECT_GET(resource, lang_str);\nPEPY_OBJECT_GET(resource, type);\nPEPY_OBJECT_GET(resource, name);\nPEPY_OBJECT_GET(resource, lang);\nPEPY_OBJECT_GET(resource, codepage);\nPEPY_OBJECT_GET(resource, RVA);\nPEPY_OBJECT_GET(resource, size);\nPEPY_OBJECT_GET(resource, data);\n\nstatic PyObject *pepy_resource_type_as_str(PyObject *self, PyObject *args) {\n PyObject *ret;\n char *str;\n long type;\n\n type = PyLong_AsUnsignedLong(((pepy_resource *) self)->type);\n if (type == -1) {\n if (PyErr_Occurred()) {\n PyErr_PrintEx(0);\n return NULL;\n }\n }\n switch ((resource_type) type) {\n case (RT_CURSOR):\n str = (char *) \"CURSOR\";\n break;\n case (RT_BITMAP):\n str = (char *) \"BITMAP\";\n break;\n case (RT_ICON):\n str = (char *) \"ICON\";\n break;\n case (RT_MENU):\n str = (char *) \"MENU\";\n break;\n case (RT_DIALOG):\n str = (char *) \"DIALOG\";\n break;\n case (RT_STRING):\n str = (char *) \"STRING\";\n break;\n case (RT_FONTDIR):\n str = (char *) \"FONTDIR\";\n break;\n case (RT_FONT):\n str = (char *) \"FONT\";\n break;\n case (RT_ACCELERATOR):\n str = (char *) \"ACCELERATOR\";\n break;\n case (RT_RCDATA):\n str = (char *) \"RCDATA\";\n break;\n case (RT_MESSAGETABLE):\n str = (char *) \"MESSAGETABLE\";\n break;\n case (RT_GROUP_CURSOR):\n str = (char *) \"GROUP_CURSOR\";\n break;\n case (RT_GROUP_ICON):\n str = (char *) \"GROUP_ICON\";\n break;\n case (RT_VERSION):\n str = (char *) \"VERSION\";\n break;\n case (RT_DLGINCLUDE):\n str = (char *) \"DLGINCLUDE\";\n break;\n case (RT_PLUGPLAY):\n str = (char *) \"PLUGPLAY\";\n break;\n case (RT_VXD):\n str = (char *) \"VXD\";\n break;\n case (RT_ANICURSOR):\n str = (char *) \"ANICURSOR\";\n break;\n case (RT_ANIICON):\n str = (char *) \"ANIICON\";\n break;\n case (RT_HTML):\n str = (char *) \"HTML\";\n break;\n case (RT_MANIFEST):\n str = (char *) \"MANIFEST\";\n break;\n default:\n str = (char *) \"UNKNOWN\";\n break;\n }\n\n ret = PyUnicode_FromString(str);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create return string.\");\n return NULL;\n }\n\n return ret;\n}\n\nstatic PyMethodDef pepy_resource_methods[] = {\n {\"type_as_str\",\n pepy_resource_type_as_str,\n METH_NOARGS,\n \"Return the resource type as a string.\"},\n {NULL}};\n\nstatic PyGetSetDef pepy_resource_getseters[] = {\n OBJECTGETTER(resource, type_str, \"Type string\"),\n OBJECTGETTER(resource, name_str, \"Name string\"),\n OBJECTGETTER(resource, lang_str, \"Lang string\"),\n OBJECTGETTER(resource, type, \"Type\"),\n OBJECTGETTER(resource, name, \"Name\"),\n OBJECTGETTER(resource, lang, \"Language\"),\n OBJECTGETTER(resource, codepage, \"Codepage\"),\n OBJECTGETTER(resource, RVA, \"RVA\"),\n OBJECTGETTER(resource, size, \"Size (specified in RDAT)\"),\n OBJECTGETTER(resource, data, \"Resource data\"),\n {NULL}};\n\nstatic PyTypeObject pepy_resource_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.resource\", /* tp_name */\n sizeof(pepy_resource), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_resource_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT, /* tp_flags */\n \"pepy resource object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n pepy_resource_methods, /* tp_methods */\n 0, /* tp_members */\n pepy_resource_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_resource_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_resource_new /* tp_new */\n};\n\nstatic PyObject *\npepy_parsed_new(PyTypeObject *type, PyObject *args, PyObject *kwds) {\n pepy_parsed *self;\n\n self = (pepy_parsed *) type->tp_alloc(type, 0);\n\n return (PyObject *) self;\n}\n\nstatic int pepy_parsed_init(pepy_parsed *self, PyObject *args, PyObject *kwds) {\n char *pe_path;\n\n if (!PyArg_ParseTuple(args, \"s:pepy_parse\", &pe_path))\n return -1;\n\n if (!pe_path)\n return -1;\n\n self->pe = ParsePEFromFile(pe_path);\n if (!self->pe) {\n return -2;\n }\n\n return 0;\n}\n\nstatic void pepy_parsed_dealloc(pepy_parsed *self) {\n DestructParsedPE(self->pe);\n Py_TYPE(self)->tp_free((PyObject *) self);\n}\n\nstatic PyObject *pepy_parsed_get_entry_point(PyObject *self, PyObject *args) {\n VA entrypoint;\n PyObject *ret;\n\n if (!GetEntryPoint(((pepy_parsed *) self)->pe, entrypoint))\n Py_RETURN_NONE;\n\n ret = PyLong_FromUnsignedLongLong(entrypoint);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create return object.\");\n return NULL;\n }\n\n return ret;\n}\n\nstatic PyObject *pepy_parsed_get_machine_as_str(PyObject *self,\n PyObject *args) {\n PyObject *ret;\n const char *str;\n\n str = GetMachineAsString(((pepy_parsed *) self)->pe);\n if (!str)\n Py_RETURN_NONE;\n\n ret = PyUnicode_FromString(str);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create return string.\");\n return NULL;\n }\n\n return ret;\n}\n\nstatic PyObject *pepy_parsed_get_subsystem_as_str(PyObject *self,\n PyObject *args) {\n PyObject *ret;\n const char *str;\n\n str = GetSubsystemAsString(((pepy_parsed *) self)->pe);\n if (!str)\n Py_RETURN_NONE;\n\n ret = PyUnicode_FromString(str);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create return string.\");\n return NULL;\n }\n\n return ret;\n}\n\nstatic PyObject *pepy_parsed_get_bytes(PyObject *self, PyObject *args) {\n uint64_t start;\n Py_ssize_t len, idx;\n PyObject *ret;\n\n if (!PyArg_ParseTuple(args, \"KK:pepy_parsed_get_bytes\", &start, &len))\n return NULL;\n\n /*\n * XXX: a new implementation read all bytes in char* and use\n * PybyteArray_FromStringAndSize\n */\n\n uint8_t *buf = new (std::nothrow) uint8_t[len];\n if (!buf) {\n /* in case allocation failed */\n PyErr_SetString(pepy_error,\n \"Unable to create initial buffer (allocation failure).\");\n return NULL;\n }\n\n for (idx = 0; idx < len; idx++) {\n if (!ReadByteAtVA(((pepy_parsed *) self)->pe, start + idx, buf[idx]))\n break;\n }\n\n /* use idx as content length, if we get less than asked for */\n ret = PyByteArray_FromStringAndSize(reinterpret_cast(buf), idx);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new byte array.\");\n return NULL;\n }\n\n delete[] buf;\n return ret;\n}\n\n/*\n * This is used to convert bounded buffers into python byte array objects.\n * In case the buffer is NULL, return an empty bytearray.\n */\nstatic PyObject *pepy_data_converter(bounded_buffer *data) {\n PyObject *ret;\n const char *str;\n Py_ssize_t len;\n\n if (!data || !data->buf) {\n str = \"\";\n len = 0;\n } else {\n str = (const char *) data->buf;\n len = data->bufLen;\n }\n\n ret = PyByteArray_FromStringAndSize(str, len);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to convert data to byte array.\");\n return NULL;\n }\n\n return ret;\n}\n\nint section_callback(void *cbd,\n const VA &base,\n const std::string &name,\n const image_section_header &s,\n const bounded_buffer *data) {\n uint32_t buflen;\n PyObject *sect;\n PyObject *tuple;\n PyObject *list = (PyObject *) cbd;\n\n /*\n * I've seen some interesting binaries with a section where the\n * PointerToRawData and SizeOfRawData are invalid. The parser library\n * handles this by setting sectionData to NULL as returned by splitBuffer().\n * The sectionData (passed in to us as *data) is converted using\n * pepy_data_converter() which will return an empty string object.\n * However, we need to address the fact that we pass an invalid length\n * via data->bufLen.\n */\n if (!data) {\n buflen = 0;\n } else {\n buflen = data->bufLen;\n }\n\n /*\n * The tuple item order is important here. It is passed into the\n * section type initialization and parsed there.\n */\n tuple = Py_BuildValue(\"sKKIIHHIO&\",\n name.c_str(),\n base,\n buflen,\n s.VirtualAddress,\n s.Misc.VirtualSize,\n s.NumberOfRelocations,\n s.NumberOfLinenumbers,\n s.Characteristics,\n pepy_data_converter,\n data);\n if (!tuple)\n return 1;\n\n sect = pepy_section_new(&pepy_section_type, NULL, NULL);\n if (!sect) {\n Py_DECREF(tuple);\n return 1;\n }\n\n if (pepy_section_init((pepy_section *) sect, tuple, NULL) == -1) {\n PyErr_SetString(pepy_error, \"Unable to init new section.\");\n return 1;\n }\n\n if (PyList_Append(list, sect) == -1) {\n Py_DECREF(tuple);\n Py_DECREF(sect);\n return 1;\n }\n\n return 0;\n}\n\nstatic PyObject *pepy_parsed_get_sections(PyObject *self, PyObject *args) {\n PyObject *ret = PyList_New(0);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new list.\");\n return NULL;\n }\n\n IterSec(((pepy_parsed *) self)->pe, section_callback, ret);\n\n return ret;\n}\n\nint resource_callback(void *cbd, const resource &r) {\n PyObject *rsrc;\n PyObject *tuple;\n PyObject *list = (PyObject *) cbd;\n\n /*\n * The tuple item order is important here. It is passed into the\n * section type initialization and parsed there.\n */\n tuple = Py_BuildValue(\"s#s#s#IIIIIIO&\",\n r.type_str.c_str(),\n r.type_str.length(),\n r.name_str.c_str(),\n r.name_str.length(),\n r.lang_str.c_str(),\n r.lang_str.length(),\n r.type,\n r.name,\n r.lang,\n r.codepage,\n r.RVA,\n r.size,\n pepy_data_converter,\n r.buf);\n if (!tuple)\n return 1;\n\n rsrc = pepy_resource_new(&pepy_resource_type, NULL, NULL);\n if (!rsrc) {\n Py_DECREF(tuple);\n return 1;\n }\n\n if (pepy_resource_init((pepy_resource *) rsrc, tuple, NULL) == -1) {\n PyErr_SetString(pepy_error, \"Unable to init new resource.\");\n return 1;\n }\n\n if (PyList_Append(list, rsrc) == -1) {\n Py_DECREF(tuple);\n Py_DECREF(rsrc);\n return 1;\n }\n\n return 0;\n}\n\nstatic PyObject *pepy_parsed_get_resources(PyObject *self, PyObject *args) {\n PyObject *ret = PyList_New(0);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new list.\");\n return NULL;\n }\n\n IterRsrc(((pepy_parsed *) self)->pe, resource_callback, ret);\n\n return ret;\n}\n\nint import_callback(void *cbd,\n const VA &addr,\n const std::string &name,\n const std::string &sym) {\n PyObject *imp;\n PyObject *tuple;\n PyObject *list = (PyObject *) cbd;\n\n /*\n * The tuple item order is important here. It is passed into the\n * import type initialization and parsed there.\n */\n tuple = Py_BuildValue(\"ssI\", name.c_str(), sym.c_str(), addr);\n if (!tuple)\n return 1;\n\n imp = pepy_import_new(&pepy_import_type, NULL, NULL);\n if (!imp) {\n Py_DECREF(tuple);\n return 1;\n }\n\n if (pepy_import_init((pepy_import *) imp, tuple, NULL) == -1) {\n PyErr_SetString(pepy_error, \"Unable to init new section.\");\n return 1;\n }\n\n if (PyList_Append(list, imp) == -1) {\n Py_DECREF(tuple);\n Py_DECREF(imp);\n return 1;\n }\n\n return 0;\n}\n\nstatic PyObject *pepy_parsed_get_imports(PyObject *self, PyObject *args) {\n PyObject *ret = PyList_New(0);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new list.\");\n return NULL;\n }\n\n IterImpVAString(((pepy_parsed *) self)->pe, import_callback, ret);\n\n return ret;\n}\n\nint export_callback(void *cbd,\n const VA &addr,\n const std::string &mod,\n const std::string &func) {\n PyObject *exp;\n PyObject *tuple;\n PyObject *list = (PyObject *) cbd;\n\n /*\n * The tuple item order is important here. It is passed into the\n * export type initialization and parsed there.\n */\n tuple = Py_BuildValue(\"ssI\", mod.c_str(), func.c_str(), addr);\n if (!tuple)\n return 1;\n\n exp = pepy_export_new(&pepy_export_type, NULL, NULL);\n if (!exp) {\n Py_DECREF(tuple);\n return 1;\n }\n\n if (pepy_export_init((pepy_export *) exp, tuple, NULL) == -1) {\n PyErr_SetString(pepy_error, \"Unable to init new section.\");\n return 1;\n }\n\n if (PyList_Append(list, exp) == -1) {\n Py_DECREF(tuple);\n Py_DECREF(exp);\n return 1;\n }\n\n return 0;\n}\n\nstatic PyObject *pepy_parsed_get_exports(PyObject *self, PyObject *args) {\n PyObject *ret = PyList_New(0);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new list.\");\n return NULL;\n }\n\n /*\n * This could use the same callback and object as imports but the names\n * of the attributes would be slightly off.\n */\n IterExpVA(((pepy_parsed *) self)->pe, export_callback, ret);\n\n return ret;\n}\n\nint reloc_callback(void *cbd, const VA &addr, const reloc_type &type) {\n PyObject *reloc;\n PyObject *tuple;\n PyObject *list = (PyObject *) cbd;\n\n /*\n * The tuple item order is important here. It is passed into the\n * relocation type initialization and parsed there.\n */\n tuple = Py_BuildValue(\"II\", type, addr);\n if (!tuple)\n return 1;\n\n reloc = pepy_relocation_new(&pepy_relocation_type, NULL, NULL);\n if (!reloc) {\n Py_DECREF(tuple);\n return 1;\n }\n\n if (pepy_relocation_init((pepy_relocation *) reloc, tuple, NULL) == -1) {\n PyErr_SetString(pepy_error, \"Unable to init new section.\");\n return 1;\n }\n\n if (PyList_Append(list, reloc) == -1) {\n Py_DECREF(tuple);\n Py_DECREF(reloc);\n return 1;\n }\n\n return 0;\n}\n\nstatic PyObject *pepy_parsed_get_relocations(PyObject *self, PyObject *args) {\n PyObject *ret = PyList_New(0);\n if (!ret) {\n PyErr_SetString(pepy_error, \"Unable to create new list.\");\n return NULL;\n }\n\n IterRelocs(((pepy_parsed *) self)->pe, reloc_callback, ret);\n\n return ret;\n}\n\n#define PEPY_PARSED_GET(ATTR, VAL) \\\n static PyObject *pepy_parsed_get_##ATTR(PyObject *self, void *closure) { \\\n PyObject *ret = PyLong_FromUnsignedLongLong( \\\n ((pepy_parsed *) self)->pe->peHeader.nt.VAL); \\\n if (!ret) \\\n PyErr_SetString(PyExc_AttributeError, \"Error getting attribute.\"); \\\n return ret; \\\n }\n\nPEPY_PARSED_GET(signature, Signature);\nPEPY_PARSED_GET(machine, FileHeader.Machine);\nPEPY_PARSED_GET(numberofsections, FileHeader.NumberOfSections);\nPEPY_PARSED_GET(timedatestamp, FileHeader.TimeDateStamp);\nPEPY_PARSED_GET(numberofsymbols, FileHeader.NumberOfSymbols);\nPEPY_PARSED_GET(characteristics, FileHeader.Characteristics);\nPEPY_PARSED_GET(magic, OptionalMagic);\n\n/*\n * This is used to get things from the optional header, which can be either\n * the PE32 or PE32+ version, depending upon the magic value. Technically\n * the magic is stored in the OptionalHeader, but to make life easier pe-parse\n * stores the value in nt_header_32 along with the appropriate optional header.\n * This is why \"magic\" is handled above, and not here.\n */\n#define PEPY_PARSED_GET_OPTIONAL(ATTR, VAL) \\\n static PyObject *pepy_parsed_get_optional_##ATTR(PyObject *self, \\\n void *closure) { \\\n PyObject *ret = NULL; \\\n if (((pepy_parsed *) self)->pe->peHeader.nt.OptionalMagic == \\\n NT_OPTIONAL_32_MAGIC) { \\\n ret = PyLong_FromUnsignedLongLong( \\\n ((pepy_parsed *) self)->pe->peHeader.nt.OptionalHeader.VAL); \\\n if (!ret) \\\n PyErr_SetString(PyExc_AttributeError, \"Error getting attribute.\"); \\\n } else if (((pepy_parsed *) self)->pe->peHeader.nt.OptionalMagic == \\\n NT_OPTIONAL_64_MAGIC) { \\\n ret = PyLong_FromUnsignedLongLong( \\\n ((pepy_parsed *) self)->pe->peHeader.nt.OptionalHeader64.VAL); \\\n if (!ret) \\\n PyErr_SetString(PyExc_AttributeError, \"Error getting attribute.\"); \\\n } else { \\\n PyErr_SetString(pepy_error, \"Bad magic value.\"); \\\n } \\\n return ret; \\\n }\n\nPEPY_PARSED_GET_OPTIONAL(majorlinkerver, MajorLinkerVersion);\nPEPY_PARSED_GET_OPTIONAL(minorlinkerver, MinorLinkerVersion);\nPEPY_PARSED_GET_OPTIONAL(codesize, SizeOfCode);\nPEPY_PARSED_GET_OPTIONAL(initdatasize, SizeOfInitializedData);\nPEPY_PARSED_GET_OPTIONAL(uninitdatasize, SizeOfUninitializedData);\nPEPY_PARSED_GET_OPTIONAL(entrypointaddr, AddressOfEntryPoint);\nPEPY_PARSED_GET_OPTIONAL(baseofcode, BaseOfCode);\nPEPY_PARSED_GET_OPTIONAL(imagebase, ImageBase);\nPEPY_PARSED_GET_OPTIONAL(sectionalignement, SectionAlignment);\nPEPY_PARSED_GET_OPTIONAL(filealignment, FileAlignment);\nPEPY_PARSED_GET_OPTIONAL(majorosver, MajorOperatingSystemVersion);\nPEPY_PARSED_GET_OPTIONAL(minorosver, MinorOperatingSystemVersion);\nPEPY_PARSED_GET_OPTIONAL(win32ver, Win32VersionValue);\nPEPY_PARSED_GET_OPTIONAL(imagesize, SizeOfImage);\nPEPY_PARSED_GET_OPTIONAL(headersize, SizeOfHeaders);\nPEPY_PARSED_GET_OPTIONAL(checksum, CheckSum);\nPEPY_PARSED_GET_OPTIONAL(subsystem, Subsystem);\nPEPY_PARSED_GET_OPTIONAL(dllcharacteristics, DllCharacteristics);\nPEPY_PARSED_GET_OPTIONAL(stackreservesize, SizeOfStackReserve);\nPEPY_PARSED_GET_OPTIONAL(stackcommitsize, SizeOfStackCommit);\nPEPY_PARSED_GET_OPTIONAL(heapreservesize, SizeOfHeapReserve);\nPEPY_PARSED_GET_OPTIONAL(heapcommitsize, SizeOfHeapCommit);\nPEPY_PARSED_GET_OPTIONAL(loaderflags, LoaderFlags);\nPEPY_PARSED_GET_OPTIONAL(rvasandsize, NumberOfRvaAndSizes);\n\n/*\n * BaseOfData is only in PE32, not PE32+. Thus, it uses a non-standard\n * getter function compared to the other shared fields.\n */\nstatic PyObject *pepy_parsed_get_optional_baseofdata(PyObject *self,\n void *closure) {\n PyObject *ret = NULL;\n if (((pepy_parsed *) self)->pe->peHeader.nt.OptionalMagic ==\n NT_OPTIONAL_32_MAGIC) {\n ret = PyLong_FromUnsignedLong(\n ((pepy_parsed *) self)->pe->peHeader.nt.OptionalHeader.BaseOfData);\n if (!ret)\n PyErr_SetString(PyExc_AttributeError, \"Error getting attribute.\");\n } else if (((pepy_parsed *) self)->pe->peHeader.nt.OptionalMagic ==\n NT_OPTIONAL_64_MAGIC) {\n PyErr_SetString(PyExc_AttributeError, \"Not available on PE32+.\");\n } else {\n PyErr_SetString(pepy_error, \"Bad magic value.\");\n }\n return ret;\n}\n\nstatic PyGetSetDef pepy_parsed_getseters[] = {\n OBJECTGETTER(parsed, signature, \"PE Signature\"),\n OBJECTGETTER(parsed, machine, \"Machine\"),\n OBJECTGETTER(parsed, numberofsections, \"Number of sections\"),\n OBJECTGETTER(parsed, timedatestamp, \"Timedate stamp\"),\n OBJECTGETTER(parsed, numberofsymbols, \"Number of symbols\"),\n OBJECTGETTER(parsed, characteristics, \"Characteristics\"),\n OBJECTGETTER(parsed, magic, \"Magic\"),\n OBJECTGETTER_OPTIONAL(majorlinkerver, \"Major linker version\"),\n OBJECTGETTER_OPTIONAL(minorlinkerver, \"Minor linker version\"),\n OBJECTGETTER_OPTIONAL(codesize, \"Size of code\"),\n OBJECTGETTER_OPTIONAL(initdatasize, \"Size of initialized data\"),\n OBJECTGETTER_OPTIONAL(uninitdatasize, \"Size of uninitialized data\"),\n OBJECTGETTER_OPTIONAL(entrypointaddr, \"Address of entry point\"),\n OBJECTGETTER_OPTIONAL(baseofcode, \"Base address of code\"),\n OBJECTGETTER_OPTIONAL(imagebase, \"Image base address\"),\n OBJECTGETTER_OPTIONAL(sectionalignement, \"Section alignment\"),\n OBJECTGETTER_OPTIONAL(filealignment, \"File alignment\"),\n OBJECTGETTER_OPTIONAL(majorosver, \"Major OS version\"),\n OBJECTGETTER_OPTIONAL(minorosver, \"Minor OS version\"),\n OBJECTGETTER_OPTIONAL(win32ver, \"Win32 version\"),\n OBJECTGETTER_OPTIONAL(imagesize, \"Size of image\"),\n OBJECTGETTER_OPTIONAL(headersize, \"Size of headers\"),\n OBJECTGETTER_OPTIONAL(checksum, \"Checksum\"),\n OBJECTGETTER_OPTIONAL(subsystem, \"Subsystem\"),\n OBJECTGETTER_OPTIONAL(dllcharacteristics, \"DLL characteristics\"),\n OBJECTGETTER_OPTIONAL(stackreservesize, \"Size of stack reserve\"),\n OBJECTGETTER_OPTIONAL(stackcommitsize, \"Size of stack commit\"),\n OBJECTGETTER_OPTIONAL(heapreservesize, \"Size of heap reserve\"),\n OBJECTGETTER_OPTIONAL(heapcommitsize, \"Size of heap commit\"),\n OBJECTGETTER_OPTIONAL(loaderflags, \"Loader flags\"),\n OBJECTGETTER_OPTIONAL(rvasandsize, \"Number of RVA and sizes\"),\n /* Base of data is only available in PE32, not PE32+. */\n {(char *) \"baseofdata\",\n (getter) pepy_parsed_get_optional_baseofdata,\n (setter) pepy_attr_not_writable,\n (char *) \"Base address of data\",\n NULL},\n {NULL}};\n\nstatic PyMethodDef pepy_parsed_methods[] = {\n {\"get_entry_point\",\n pepy_parsed_get_entry_point,\n METH_NOARGS,\n \"Return the entry point address.\"},\n {\"get_machine_as_str\",\n pepy_parsed_get_machine_as_str,\n METH_NOARGS,\n \"Return the machine as a human readable string.\"},\n {\"get_subsystem_as_str\",\n pepy_parsed_get_subsystem_as_str,\n METH_NOARGS,\n \"Return the subsystem as a human readable string.\"},\n {\"get_bytes\",\n pepy_parsed_get_bytes,\n METH_VARARGS,\n \"Return the first N bytes at a given address.\"},\n {\"get_sections\",\n pepy_parsed_get_sections,\n METH_NOARGS,\n \"Return a list of section objects.\"},\n {\"get_imports\",\n pepy_parsed_get_imports,\n METH_NOARGS,\n \"Return a list of import objects.\"},\n {\"get_exports\",\n pepy_parsed_get_exports,\n METH_NOARGS,\n \"Return a list of export objects.\"},\n {\"get_relocations\",\n pepy_parsed_get_relocations,\n METH_NOARGS,\n \"Return a list of relocation objects.\"},\n {\"get_resources\",\n pepy_parsed_get_resources,\n METH_NOARGS,\n \"Return a list of resource objects.\"},\n {NULL}};\n\nstatic PyTypeObject pepy_parsed_type = {\n PyVarObject_HEAD_INIT(NULL, 0) /* ob_size */\n \"pepy.parsed\", /* tp_name */\n sizeof(pepy_parsed), /* tp_basicsize */\n 0, /* tp_itemsize */\n (destructor) pepy_parsed_dealloc, /* tp_dealloc */\n 0, /* tp_print */\n 0, /* tp_getattr */\n 0, /* tp_setattr */\n 0, /* tp_compare */\n 0, /* tp_repr */\n 0, /* tp_as_number */\n 0, /* tp_as_sequence */\n 0, /* tp_as_mapping */\n 0, /* tp_hash */\n 0, /* tp_call */\n 0, /* tp_str */\n 0, /* tp_getattro */\n 0, /* tp_setattro */\n 0, /* tp_as_buffer */\n Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */\n \"pepy parsed object\", /* tp_doc */\n 0, /* tp_traverse */\n 0, /* tp_clear */\n 0, /* tp_richcompare */\n 0, /* tp_weaklistoffset */\n 0, /* tp_iter */\n 0, /* tp_iternext */\n pepy_parsed_methods, /* tp_methods */\n 0, /* tp_members */\n pepy_parsed_getseters, /* tp_getset */\n 0, /* tp_base */\n 0, /* tp_dict */\n 0, /* tp_descr_get */\n 0, /* tp_descr_set */\n 0, /* tp_dictoffset */\n (initproc) pepy_parsed_init, /* tp_init */\n 0, /* tp_alloc */\n pepy_parsed_new /* tp_new */\n};\n\nstatic PyObject *pepy_parse(PyObject *self, PyObject *args) {\n PyObject *parsed;\n int ret;\n char *err_str = NULL;\n\n parsed = pepy_parsed_new(&pepy_parsed_type, NULL, NULL);\n if (!parsed) {\n PyErr_SetString(pepy_error, \"Unable to make new parsed object.\");\n return NULL;\n }\n\n ret = pepy_parsed_init((pepy_parsed *) parsed, args, NULL);\n if (ret < 0) {\n if (ret == -2) {\n // error (loc)\n size_t len = GetPEErrString().length() + GetPEErrLoc().length() + 4;\n err_str = (char *) malloc(len);\n if (!err_str)\n return PyErr_NoMemory();\n snprintf(err_str,\n len,\n \"%s (%s)\",\n GetPEErrString().c_str(),\n GetPEErrLoc().c_str());\n PyErr_SetString(pepy_error, err_str);\n } else\n PyErr_SetString(pepy_error, \"Unable to init new parsed object.\");\n return NULL;\n }\n\n return parsed;\n}\n\nstatic PyMethodDef pepy_methods[] = {\n {\"parse\", pepy_parse, METH_VARARGS, \"Parse PE from file.\"}, {NULL}};\n\nPyMODINIT_FUNC PyInit_pepy(void) {\n PyObject *m;\n\n if (PyType_Ready(&pepy_parsed_type) < 0 ||\n PyType_Ready(&pepy_section_type) < 0 ||\n PyType_Ready(&pepy_import_type) < 0 ||\n PyType_Ready(&pepy_export_type) < 0 ||\n PyType_Ready(&pepy_relocation_type) < 0 ||\n PyType_Ready(&pepy_resource_type) < 0)\n return NULL;\n\n static struct PyModuleDef moduledef = {\n PyModuleDef_HEAD_INIT,\n \"pepy\",\n \"Python interface to pe-parse.\",\n -1,\n pepy_methods,\n NULL,\n NULL,\n NULL,\n NULL,\n };\n\n m = PyModule_Create(&moduledef);\n if (!m)\n return NULL;\n\n pepy_error = PyErr_NewException((char *) \"pepy.error\", NULL, NULL);\n Py_INCREF(pepy_error);\n PyModule_AddObject(m, \"error\", pepy_error);\n\n Py_INCREF(&pepy_parsed_type);\n PyModule_AddObject(m, \"pepy_parsed\", (PyObject *) &pepy_parsed_type);\n\n Py_INCREF(&pepy_section_type);\n PyModule_AddObject(m, \"pepy_section\", (PyObject *) &pepy_section_type);\n\n Py_INCREF(&pepy_import_type);\n PyModule_AddObject(m, \"pepy_import\", (PyObject *) &pepy_import_type);\n\n Py_INCREF(&pepy_export_type);\n PyModule_AddObject(m, \"pepy_export\", (PyObject *) &pepy_export_type);\n\n Py_INCREF(&pepy_relocation_type);\n PyModule_AddObject(m, \"pepy_relocation\", (PyObject *) &pepy_relocation_type);\n\n Py_INCREF(&pepy_resource_type);\n PyModule_AddObject(m, \"pepy_resource\", (PyObject *) &pepy_resource_type);\n\n PyModule_AddStringMacro(m, PEPY_VERSION);\n PyModule_AddStringMacro(m, PEPARSE_VERSION);\n PyModule_AddStringConstant(m, \"__version__\", PEPY_VERSION);\n\n PyModule_AddIntMacro(m, MZ_MAGIC);\n PyModule_AddIntMacro(m, NT_MAGIC);\n PyModule_AddIntMacro(m, NUM_DIR_ENTRIES);\n PyModule_AddIntMacro(m, NT_OPTIONAL_32_MAGIC);\n PyModule_AddIntMacro(m, NT_SHORT_NAME_LEN);\n PyModule_AddIntMacro(m, DIR_EXPORT);\n PyModule_AddIntMacro(m, DIR_IMPORT);\n PyModule_AddIntMacro(m, DIR_RESOURCE);\n PyModule_AddIntMacro(m, DIR_EXCEPTION);\n PyModule_AddIntMacro(m, DIR_SECURITY);\n PyModule_AddIntMacro(m, DIR_BASERELOC);\n PyModule_AddIntMacro(m, DIR_DEBUG);\n PyModule_AddIntMacro(m, DIR_ARCHITECTURE);\n PyModule_AddIntMacro(m, DIR_GLOBALPTR);\n PyModule_AddIntMacro(m, DIR_TLS);\n PyModule_AddIntMacro(m, DIR_LOAD_CONFIG);\n PyModule_AddIntMacro(m, DIR_BOUND_IMPORT);\n PyModule_AddIntMacro(m, DIR_IAT);\n PyModule_AddIntMacro(m, DIR_DELAY_IMPORT);\n PyModule_AddIntMacro(m, DIR_COM_DESCRIPTOR);\n\n PyModule_AddIntMacro(m, IMAGE_SCN_TYPE_NO_PAD);\n PyModule_AddIntMacro(m, IMAGE_SCN_CNT_CODE);\n PyModule_AddIntMacro(m, IMAGE_SCN_CNT_INITIALIZED_DATA);\n PyModule_AddIntMacro(m, IMAGE_SCN_CNT_UNINITIALIZED_DATA);\n PyModule_AddIntMacro(m, IMAGE_SCN_LNK_OTHER);\n PyModule_AddIntMacro(m, IMAGE_SCN_LNK_INFO);\n PyModule_AddIntMacro(m, IMAGE_SCN_LNK_REMOVE);\n PyModule_AddIntMacro(m, IMAGE_SCN_LNK_COMDAT);\n PyModule_AddIntMacro(m, IMAGE_SCN_NO_DEFER_SPEC_EXC);\n PyModule_AddIntMacro(m, IMAGE_SCN_GPREL);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_FARDATA);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_PURGEABLE);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_16BIT);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_LOCKED);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_PRELOAD);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_1BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_2BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_4BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_8BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_16BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_32BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_64BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_128BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_256BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_512BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_1024BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_2048BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_4096BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_8192BYTES);\n PyModule_AddIntMacro(m, IMAGE_SCN_ALIGN_MASK);\n PyModule_AddIntMacro(m, IMAGE_SCN_LNK_NRELOC_OVFL);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_DISCARDABLE);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_NOT_CACHED);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_NOT_PAGED);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_SHARED);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_EXECUTE);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_READ);\n PyModule_AddIntMacro(m, IMAGE_SCN_MEM_WRITE);\n\n return m;\n}\n"], ["/lsfg-vk/thirdparty/toml11/tools/expand/main.cpp", "#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n\nstd::optional\nis_include(const std::string& line, const std::filesystem::path& filepath)\n{\n // [ws] # [ws] include [ws] \\\".+\\\"\n auto iter = line.begin();\n while(iter < line.end())\n {\n if(*iter != ' ' && *iter != '\\t')\n {\n break;\n }\n ++iter;\n }\n if(iter == line.end() || *iter != '#') {return std::nullopt;}\n\n assert(*iter == '#');\n ++iter;\n\n while(iter < line.end())\n {\n if(*iter != ' ' && *iter != '\\t')\n {\n break;\n }\n ++iter;\n }\n if(iter == line.end() || *iter != 'i') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'n') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'c') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'l') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'u') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'd') {return std::nullopt;} else {++iter;}\n if(iter == line.end() || *iter != 'e') {return std::nullopt;} else {++iter;}\n\n while(iter < line.end())\n {\n if(*iter != ' ' && *iter != '\\t')\n {\n break;\n }\n ++iter;\n }\n if(iter == line.end() || *iter != '\"') {return std::nullopt;} else {++iter;}\n\n std::string filename;\n while(iter < line.end())\n {\n if(*iter == '\"') {break;}\n filename += *iter;\n ++iter;\n }\n if(iter == line.end() || *iter != '\"') {return std::nullopt;} else {++iter;}\n\n return std::filesystem::canonical(filepath.parent_path() / std::filesystem::path(filename));\n}\n\nstruct File\n{\n File() = default;\n\n explicit File(std::filesystem::path f)\n : filename(std::move(f))\n {\n std::ifstream ifs(filename);\n if( ! ifs.good())\n {\n throw std::runtime_error(\"file open error: \" + filename.string());\n }\n\n std::string line;\n while(std::getline(ifs, line))\n {\n if(const auto incl = is_include(line, filename))\n {\n includes.push_back(incl.value());\n }\n else\n {\n content.push_back(line);\n }\n }\n }\n\n File(std::filesystem::path f, std::vector c,\n std::vector i)\n : filename(std::move(f)), content(std::move(c)), includes(std::move(i))\n {}\n\n std::filesystem::path filename;\n std::vector content; // w/o include\n std::vector includes;\n};\n\nstruct Graph\n{\n struct Node\n {\n std::vector included;\n std::vector includes;\n };\n\n std::map nodes;\n};\n\nint main(int argc, char** argv)\n{\n using namespace std::literals::string_literals;\n if(argc != 2)\n {\n std::cerr << \"Usage: ./a.out path/to/toml.hpp > single_include/toml.hpp\" << std::endl;\n return 1;\n }\n\n const auto input_file = std::filesystem::path(std::string(argv[1]));\n assert(input_file.filename() == \"toml.hpp\");\n\n const auto include_path = input_file.parent_path();\n\n // -------------------------------------------------------------------------\n // load files and detect `include \"xxx.hpp\"`.\n // If the file has `_fwd` and `_impl`, expand those files first.\n\n std::set fwd_impl_files;\n for(const auto& entities : std::filesystem::directory_iterator(include_path/\"toml11\"/\"fwd\"))\n {\n if( ! entities.is_regular_file()) {continue;}\n std::string fname = entities.path().filename().string();\n if(fname.ends_with(\"_fwd.hpp\"))\n {\n for(const auto c : \"_fwd.hpp\"s) {fname.pop_back(); (void)c;}\n fwd_impl_files.insert(std::move(fname));\n }\n }\n for(const auto& entities : std::filesystem::directory_iterator(include_path/\"toml11\"/\"impl\"))\n {\n if( ! entities.is_regular_file()) {continue;}\n std::string fname = entities.path().filename().string();\n if(fname.ends_with(\"_impl.hpp\"))\n {\n for(const auto c : \"_impl.hpp\"s) {fname.pop_back(); (void)c;}\n // all impl files has fwd file\n assert(fwd_impl_files.contains(fname));\n }\n }\n\n const auto input = File(input_file);\n\n std::map files;\n files[input_file] = input;\n\n for(const auto& fname : input.includes)\n {\n if(fwd_impl_files.contains(fname.stem().string()))\n {\n std::cerr << \"expanding fwd/impl file of \" << fname.string() << std::endl;\n\n // expand the first include\n std::ifstream ifs(fname);\n\n std::vector content;\n std::vector includes;\n\n std::string line;\n while(std::getline(ifs, line))\n {\n // expand _fwd and _impl files first.\n const auto incl = is_include(line, fname);\n if(incl.has_value())\n {\n // if a file has _fwd/_impl files, it only includes fwd/impl files.\n assert(incl.value().string().ends_with(\"_impl.hpp\") ||\n incl.value().string().ends_with(\"_fwd.hpp\") );\n\n const File included(incl.value());\n for(const auto& l : included.content)\n {\n content.push_back(l);\n }\n for(const auto& i : included.includes)\n {\n includes.push_back(i);\n }\n }\n else\n {\n content.push_back(line);\n }\n }\n files[fname] = File(fname, std::move(content), std::move(includes));\n }\n else\n {\n files[fname] = File(fname);\n }\n std::cerr << \"file \" << fname << \" has \" << files.at(fname).content.size() << \" lines.\" << std::endl;\n }\n\n std::cerr << \"-------------------------------------------------------------\\n\";\n std::cerr << \"files have been read. next: constructing dependency graph...\\n\";\n std::cerr << \"-------------------------------------------------------------\\n\";\n\n // -------------------------------------------------------------------------\n // construct dependency graph\n\n Graph g;\n for(const auto& [k, v] : files)\n {\n g.nodes[k] = Graph::Node{};\n }\n\n for(const auto& [fname, file] : files)\n {\n for(auto incl : file.includes)\n {\n auto incl_stem = incl.stem().string();\n if(incl_stem.ends_with(\"_fwd\"))\n {\n for(const char c : \"_fwd\"s) {incl_stem.pop_back(); (void)c;}\n\n // include original file instaed\n incl = incl.parent_path() / \"..\" / std::filesystem::path(incl_stem + \".hpp\");\n }\n else if(incl_stem.ends_with(\"_impl\"))\n {\n for(const char c : \"_impl\"s) {incl_stem.pop_back(); (void)c;}\n\n // include original file instaed\n incl = incl.parent_path() / \"..\" / std::filesystem::path(incl_stem + \".hpp\");\n }\n incl = std::filesystem::canonical(incl);\n\n // avoid self include loop\n if(fname != incl)\n {\n std::cerr << fname << \" includes \" << incl << std::endl;\n\n g.nodes.at(fname).includes.push_back(incl);\n g.nodes.at(incl) .included.push_back(fname);\n }\n }\n }\n\n std::cerr << \"-------------------------------------------------------------\\n\";\n std::cerr << \"graph has been constructed. flattening...\\n\";\n std::cerr << \"-------------------------------------------------------------\\n\";\n\n // -------------------------------------------------------------------------\n // flatten graph by topological sort\n\n // collect files that does not include\n std::vector sources;\n for(const auto& [fname, node] : g.nodes)\n {\n if(node.includes.empty())\n {\n sources.push_back(fname);\n }\n }\n assert( ! sources.empty());\n\n std::vector sorted;\n while( ! sources.empty())\n {\n const auto file = sources.back();\n sorted.push_back(sources.back());\n sources.pop_back();\n\n for(const auto& included : g.nodes.at(file).included)\n {\n auto found = std::find(g.nodes.at(included).includes.begin(),\n g.nodes.at(included).includes.end(), file);\n g.nodes.at(included).includes.erase(found);\n\n if(g.nodes.at(included).includes.empty())\n {\n sources.push_back(included);\n }\n }\n }\n\n std::cerr << \"-------------------------------------------------------------\\n\";\n std::cerr << \"flattened. outputting...\\n\";\n std::cerr << \"-------------------------------------------------------------\\n\";\n\n // -------------------------------------------------------------------------\n // output all the file in the sorted order\n\n for(const auto& fname : sorted)\n {\n std::cerr << \"expanding: \" << fname << std::endl;\n for(const auto& line : files.at(fname).content)\n {\n std::cout << line << '\\n';\n }\n }\n\n return 0;\n}\n"], ["/lsfg-vk/src/extract/extract.cpp", "#include \"extract/extract.hpp\"\n#include \"config/config.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nusing namespace Extract;\n\nconst std::unordered_map nameIdxTable = {{\n { \"mipmaps\", 255 },\n { \"alpha[0]\", 267 },\n { \"alpha[1]\", 268 },\n { \"alpha[2]\", 269 },\n { \"alpha[3]\", 270 },\n { \"beta[0]\", 275 },\n { \"beta[1]\", 276 },\n { \"beta[2]\", 277 },\n { \"beta[3]\", 278 },\n { \"beta[4]\", 279 },\n { \"gamma[0]\", 257 },\n { \"gamma[1]\", 259 },\n { \"gamma[2]\", 260 },\n { \"gamma[3]\", 261 },\n { \"gamma[4]\", 262 },\n { \"delta[0]\", 257 },\n { \"delta[1]\", 263 },\n { \"delta[2]\", 264 },\n { \"delta[3]\", 265 },\n { \"delta[4]\", 266 },\n { \"delta[5]\", 258 },\n { \"delta[6]\", 271 },\n { \"delta[7]\", 272 },\n { \"delta[8]\", 273 },\n { \"delta[9]\", 274 },\n { \"generate\", 256 },\n { \"p_mipmaps\", 255 },\n { \"p_alpha[0]\", 290 },\n { \"p_alpha[1]\", 291 },\n { \"p_alpha[2]\", 292 },\n { \"p_alpha[3]\", 293 },\n { \"p_beta[0]\", 298 },\n { \"p_beta[1]\", 299 },\n { \"p_beta[2]\", 300 },\n { \"p_beta[3]\", 301 },\n { \"p_beta[4]\", 302 },\n { \"p_gamma[0]\", 280 },\n { \"p_gamma[1]\", 282 },\n { \"p_gamma[2]\", 283 },\n { \"p_gamma[3]\", 284 },\n { \"p_gamma[4]\", 285 },\n { \"p_delta[0]\", 280 },\n { \"p_delta[1]\", 286 },\n { \"p_delta[2]\", 287 },\n { \"p_delta[3]\", 288 },\n { \"p_delta[4]\", 289 },\n { \"p_delta[5]\", 281 },\n { \"p_delta[6]\", 294 },\n { \"p_delta[7]\", 295 },\n { \"p_delta[8]\", 296 },\n { \"p_delta[9]\", 297 },\n { \"p_generate\", 256 },\n}};\n\nnamespace {\n auto& shaders() {\n static std::unordered_map> shaderData;\n return shaderData;\n }\n\n int on_resource(void*, const peparse::resource& res) {\n if (res.type != peparse::RT_RCDATA || res.buf == nullptr || res.buf->bufLen <= 0)\n return 0;\n std::vector resource_data(res.buf->bufLen);\n std::copy_n(res.buf->buf, res.buf->bufLen, resource_data.data());\n shaders()[res.name] = resource_data;\n return 0;\n }\n\n const std::vector PATHS{{\n \".local/share/Steam/steamapps/common\",\n \".steam/steam/steamapps/common\",\n \".steam/debian-installation/steamapps/common\",\n \".var/app/com.valvesoftware.Steam/.local/share/Steam/steamapps/common\",\n \"snap/steam/common/.local/share/Steam/steamapps/common\"\n }};\n\n std::string getDllPath() {\n // overriden path\n std::string dllPath = Config::activeConf.dll;\n if (!dllPath.empty())\n return dllPath;\n // home based paths\n const char* home = getenv(\"HOME\");\n const std::string homeStr = home ? home : \"\";\n for (const auto& base : PATHS) {\n const std::filesystem::path path =\n std::filesystem::path(homeStr) / base / \"Lossless Scaling\" / \"Lossless.dll\";\n if (std::filesystem::exists(path))\n return path.string();\n }\n // xdg home\n const char* dataDir = getenv(\"XDG_DATA_HOME\");\n if (dataDir && *dataDir != '\\0')\n return std::string(dataDir) + \"/Steam/steamapps/common/Lossless Scaling/Lossless.dll\";\n // final fallback\n return \"Lossless.dll\";\n }\n}\n\nvoid Extract::extractShaders() {\n if (!shaders().empty())\n return;\n\n // parse the dll\n peparse::parsed_pe* dll = peparse::ParsePEFromFile(getDllPath().c_str());\n if (!dll)\n throw std::runtime_error(\"Unable to read Lossless.dll, is it installed?\");\n peparse::IterRsrc(dll, on_resource, nullptr);\n peparse::DestructParsedPE(dll);\n\n // ensure all shaders are present\n for (const auto& [name, idx] : nameIdxTable)\n if (shaders().find(idx) == shaders().end())\n throw std::runtime_error(\"Shader not found: \" + name + \".\\n- Is Lossless Scaling up to date?\");\n}\n\nstd::vector Extract::getShader(const std::string& name) {\n if (shaders().empty())\n throw std::runtime_error(\"Shaders are not loaded.\");\n\n auto hit = nameIdxTable.find(name);\n if (hit == nameIdxTable.end())\n throw std::runtime_error(\"Shader hash not found: \" + name);\n\n auto sit = shaders().find(hit->second);\n if (sit == shaders().end())\n throw std::runtime_error(\"Shader not found: \" + name);\n\n return sit->second;\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_bit.h", "#pragma once\n\n#if (defined(__x86_64__) && !defined(__arm64ec__)) || (defined(_M_X64) && !defined(_M_ARM64EC)) \\\n || defined(__i386__) || defined(_M_IX86) || defined(__e2k__)\n #define DXVK_ARCH_X86\n #if defined(__x86_64__) || defined(_M_X64) || defined(__e2k__)\n #define DXVK_ARCH_X86_64\n #endif\n#elif defined(__aarch64__) || defined(_M_ARM64) || defined(_M_ARM64EC)\n #define DXVK_ARCH_ARM64\n#endif\n\n#ifdef DXVK_ARCH_X86\n #ifndef _MSC_VER\n #if defined(_WIN32) && (defined(__AVX__) || defined(__AVX2__))\n #error \"AVX-enabled builds not supported due to stack alignment issues.\"\n #endif\n #if defined(__WINE__) && defined(__clang__)\n #pragma push_macro(\"_WIN32\")\n #undef _WIN32\n #endif\n #include \n #if defined(__WINE__) && defined(__clang__)\n #pragma pop_macro(\"_WIN32\")\n #endif\n #else\n #include \n #endif\n#endif\n\n#include \"util_likely.h\"\n#include \"util_math.h\"\n\n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace dxvk::bit {\n\n template\n T cast(const J& src) {\n static_assert(sizeof(T) == sizeof(J));\n static_assert(std::is_trivially_copyable::value && std::is_trivial::value);\n\n T dst;\n std::memcpy(&dst, &src, sizeof(T));\n return dst;\n }\n \n template\n T extract(T value, uint32_t fst, uint32_t lst) {\n return (value >> fst) & ~(~T(0) << (lst - fst + 1));\n }\n\n template\n T popcnt(T n) {\n n -= ((n >> 1u) & T(0x5555555555555555ull));\n n = (n & T(0x3333333333333333ull)) + ((n >> 2u) & T(0x3333333333333333ull));\n n = (n + (n >> 4u)) & T(0x0f0f0f0f0f0f0f0full);\n n *= T(0x0101010101010101ull);\n return n >> (8u * (sizeof(T) - 1u));\n }\n\n inline uint32_t tzcnt(uint32_t n) {\n #if defined(_MSC_VER) && !defined(__clang__)\n if(n == 0)\n return 32;\n return _tzcnt_u32(n);\n #elif defined(__BMI__)\n return __tzcnt_u32(n);\n #elif defined(DXVK_ARCH_X86) && (defined(__GNUC__) || defined(__clang__))\n // tzcnt is encoded as rep bsf, so we can use it on all\n // processors, but the behaviour of zero inputs differs:\n // - bsf: zf = 1, cf = ?, result = ?\n // - tzcnt: zf = 0, cf = 1, result = 32\n // We'll have to handle this case manually.\n uint32_t res;\n uint32_t tmp;\n asm (\n \"tzcnt %2, %0;\"\n \"mov $32, %1;\"\n \"test %2, %2;\"\n \"cmovz %1, %0;\"\n : \"=&r\" (res), \"=&r\" (tmp)\n : \"r\" (n)\n : \"cc\");\n return res;\n #elif defined(__GNUC__) || defined(__clang__)\n return n != 0 ? __builtin_ctz(n) : 32;\n #else\n uint32_t r = 31;\n n &= -n;\n r -= (n & 0x0000FFFF) ? 16 : 0;\n r -= (n & 0x00FF00FF) ? 8 : 0;\n r -= (n & 0x0F0F0F0F) ? 4 : 0;\n r -= (n & 0x33333333) ? 2 : 0;\n r -= (n & 0x55555555) ? 1 : 0;\n return n != 0 ? r : 32;\n #endif\n }\n\n inline uint32_t tzcnt(uint64_t n) {\n #if defined(DXVK_ARCH_X86_64) && defined(_MSC_VER) && !defined(__clang__)\n if(n == 0)\n return 64;\n return (uint32_t)_tzcnt_u64(n);\n #elif defined(DXVK_ARCH_X86_64) && defined(__BMI__)\n return __tzcnt_u64(n);\n #elif defined(DXVK_ARCH_X86_64) && (defined(__GNUC__) || defined(__clang__))\n uint64_t res;\n uint64_t tmp;\n asm (\n \"tzcnt %2, %0;\"\n \"mov $64, %1;\"\n \"test %2, %2;\"\n \"cmovz %1, %0;\"\n : \"=&r\" (res), \"=&r\" (tmp)\n : \"r\" (n)\n : \"cc\");\n return res;\n #elif defined(__GNUC__) || defined(__clang__)\n return n != 0 ? __builtin_ctzll(n) : 64;\n #else\n uint32_t lo = uint32_t(n);\n if (lo) {\n return tzcnt(lo);\n } else {\n uint32_t hi = uint32_t(n >> 32);\n return tzcnt(hi) + 32;\n }\n #endif\n }\n\n inline uint32_t bsf(uint32_t n) {\n #if (defined(__GNUC__) || defined(__clang__)) && !defined(__BMI__) && defined(DXVK_ARCH_X86)\n uint32_t res;\n asm (\"tzcnt %1,%0\"\n : \"=r\" (res)\n : \"r\" (n)\n : \"cc\");\n return res;\n #else\n return tzcnt(n);\n #endif\n }\n\n inline uint32_t bsf(uint64_t n) {\n #if (defined(__GNUC__) || defined(__clang__)) && !defined(__BMI__) && defined(DXVK_ARCH_X86_64)\n uint64_t res;\n asm (\"tzcnt %1,%0\"\n : \"=r\" (res)\n : \"r\" (n)\n : \"cc\");\n return res;\n #else\n return tzcnt(n);\n #endif\n }\n\n inline uint32_t lzcnt(uint32_t n) {\n #if defined(_MSC_VER) && !defined(__clang__) && !defined(__LZCNT__)\n unsigned long bsr;\n if(n == 0)\n return 32;\n _BitScanReverse(&bsr, n);\n return 31-bsr;\n #elif (defined(_MSC_VER) && !defined(__clang__)) || defined(__LZCNT__)\n return _lzcnt_u32(n);\n #elif defined(__GNUC__) || defined(__clang__)\n return n != 0 ? __builtin_clz(n) : 32;\n #else\n uint32_t r = 0;\n\n if (n == 0)\treturn 32;\n\n if (n <= 0x0000FFFF) { r += 16; n <<= 16; }\n if (n <= 0x00FFFFFF) { r += 8; n <<= 8; }\n if (n <= 0x0FFFFFFF) { r += 4; n <<= 4; }\n if (n <= 0x3FFFFFFF) { r += 2; n <<= 2; }\n if (n <= 0x7FFFFFFF) { r += 1; n <<= 1; }\n\n return r;\n #endif\n }\n\n inline uint32_t lzcnt(uint64_t n) {\n #if defined(_MSC_VER) && !defined(__clang__) && !defined(__LZCNT__) && defined(DXVK_ARCH_X86_64)\n unsigned long bsr;\n if(n == 0)\n return 64;\n _BitScanReverse64(&bsr, n);\n return 63-bsr;\n #elif defined(DXVK_ARCH_X86_64) && ((defined(_MSC_VER) && !defined(__clang__)) && defined(__LZCNT__))\n return _lzcnt_u64(n);\n #elif defined(DXVK_ARCH_X86_64) && (defined(__GNUC__) || defined(__clang__))\n return n != 0 ? __builtin_clzll(n) : 64;\n #else\n uint32_t lo = uint32_t(n);\n uint32_t hi = uint32_t(n >> 32u);\n return hi ? lzcnt(hi) : lzcnt(lo) + 32u;\n #endif\n }\n\n template\n uint32_t pack(T& dst, uint32_t& shift, T src, uint32_t count) {\n constexpr uint32_t Bits = 8 * sizeof(T);\n if (likely(shift < Bits))\n dst |= src << shift;\n shift += count;\n return shift > Bits ? shift - Bits : 0;\n }\n\n template\n uint32_t unpack(T& dst, T src, uint32_t& shift, uint32_t count) {\n constexpr uint32_t Bits = 8 * sizeof(T);\n if (likely(shift < Bits))\n dst = (src >> shift) & ((T(1) << count) - 1);\n shift += count;\n return shift > Bits ? shift - Bits : 0;\n }\n\n\n /**\n * \\brief Clears cache lines of memory\n *\n * Uses non-temporal stores. The memory region offset\n * and size are assumed to be aligned to 64 bytes.\n * \\param [in] mem Memory region to clear\n * \\param [in] size Number of bytes to clear\n */\n inline void bclear(void* mem, size_t size) {\n #if defined(DXVK_ARCH_X86) && (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER))\n auto zero = _mm_setzero_si128();\n\n #if defined(__clang__)\n #pragma nounroll\n #elif defined(__GNUC__)\n #pragma GCC unroll 0\n #endif\n for (size_t i = 0; i < size; i += 64u) {\n auto* ptr = reinterpret_cast<__m128i*>(mem) + i / sizeof(zero);\n _mm_stream_si128(ptr + 0u, zero);\n _mm_stream_si128(ptr + 1u, zero);\n _mm_stream_si128(ptr + 2u, zero);\n _mm_stream_si128(ptr + 3u, zero);\n }\n #else\n std::memset(mem, 0, size);\n #endif\n }\n\n\n /**\n * \\brief Compares two aligned structs bit by bit\n *\n * \\param [in] a First struct\n * \\param [in] b Second struct\n * \\returns \\c true if the structs are equal\n */\n template\n bool bcmpeq(const T* a, const T* b) {\n static_assert(alignof(T) >= 16);\n #if defined(DXVK_ARCH_X86) && (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER))\n auto ai = reinterpret_cast(a);\n auto bi = reinterpret_cast(b);\n\n size_t i = 0;\n\n #if defined(__clang__)\n #pragma nounroll\n #elif defined(__GNUC__)\n #pragma GCC unroll 0\n #endif\n\n for ( ; i < 2 * (sizeof(T) / 32); i += 2) {\n __m128i eq0 = _mm_cmpeq_epi8(\n _mm_load_si128(ai + i),\n _mm_load_si128(bi + i));\n __m128i eq1 = _mm_cmpeq_epi8(\n _mm_load_si128(ai + i + 1),\n _mm_load_si128(bi + i + 1));\n __m128i eq = _mm_and_si128(eq0, eq1);\n\n int mask = _mm_movemask_epi8(eq);\n if (mask != 0xFFFF)\n return false;\n }\n\n for ( ; i < sizeof(T) / 16; i++) {\n __m128i eq = _mm_cmpeq_epi8(\n _mm_load_si128(ai + i),\n _mm_load_si128(bi + i));\n\n int mask = _mm_movemask_epi8(eq);\n if (mask != 0xFFFF)\n return false;\n }\n\n return true;\n #else\n return !std::memcmp(a, b, sizeof(T));\n #endif\n }\n\n template \n class bitset {\n static constexpr size_t Dwords = align(Bits, 32) / 32;\n public:\n\n constexpr bitset()\n : m_dwords() {\n\n }\n\n constexpr bool get(uint32_t idx) const {\n uint32_t dword = 0;\n uint32_t bit = idx;\n\n // Compiler doesn't remove this otherwise.\n if constexpr (Dwords > 1) {\n dword = idx / 32;\n bit = idx % 32;\n }\n\n return m_dwords[dword] & (1u << bit);\n }\n\n constexpr void set(uint32_t idx, bool value) {\n uint32_t dword = 0;\n uint32_t bit = idx;\n\n // Compiler doesn't remove this otherwise.\n if constexpr (Dwords > 1) {\n dword = idx / 32;\n bit = idx % 32;\n }\n\n if (value)\n m_dwords[dword] |= 1u << bit;\n else\n m_dwords[dword] &= ~(1u << bit);\n }\n\n constexpr bool exchange(uint32_t idx, bool value) {\n bool oldValue = get(idx);\n set(idx, value);\n return oldValue;\n }\n\n constexpr void flip(uint32_t idx) {\n uint32_t dword = 0;\n uint32_t bit = idx;\n\n // Compiler doesn't remove this otherwise.\n if constexpr (Dwords > 1) {\n dword = idx / 32;\n bit = idx % 32;\n }\n\n m_dwords[dword] ^= 1u << bit;\n }\n\n constexpr void setAll() {\n if constexpr (Bits % 32 == 0) {\n for (size_t i = 0; i < Dwords; i++)\n m_dwords[i] = std::numeric_limits::max();\n }\n else {\n for (size_t i = 0; i < Dwords - 1; i++)\n m_dwords[i] = std::numeric_limits::max();\n\n m_dwords[Dwords - 1] = (1u << (Bits % 32)) - 1;\n }\n }\n\n constexpr void clearAll() {\n for (size_t i = 0; i < Dwords; i++)\n m_dwords[i] = 0;\n }\n\n constexpr bool any() const {\n for (size_t i = 0; i < Dwords; i++) {\n if (m_dwords[i] != 0)\n return true;\n }\n\n return false;\n }\n\n constexpr uint32_t& dword(uint32_t idx) {\n return m_dwords[idx];\n }\n\n constexpr size_t bitCount() {\n return Bits;\n }\n\n constexpr size_t dwordCount() {\n return Dwords;\n }\n\n constexpr bool operator [] (uint32_t idx) const {\n return get(idx);\n }\n\n constexpr void setN(uint32_t bits) {\n uint32_t fullDwords = bits / 32;\n uint32_t offset = bits % 32;\n\n for (size_t i = 0; i < fullDwords; i++)\n m_dwords[i] = std::numeric_limits::max();\n \n if (offset > 0)\n m_dwords[fullDwords] = (1u << offset) - 1;\n }\n\n private:\n\n uint32_t m_dwords[Dwords];\n\n };\n\n class bitvector {\n public:\n\n bool get(uint32_t idx) const {\n uint32_t dword = idx / 32;\n uint32_t bit = idx % 32;\n\n return m_dwords[dword] & (1u << bit);\n }\n\n void ensureSize(uint32_t bitCount) {\n uint32_t dword = bitCount / 32;\n if (unlikely(dword >= m_dwords.size())) {\n m_dwords.resize(dword + 1);\n }\n m_bitCount = std::max(m_bitCount, bitCount);\n }\n\n void set(uint32_t idx, bool value) {\n ensureSize(idx + 1);\n\n uint32_t dword = 0;\n uint32_t bit = idx;\n\n if (value)\n m_dwords[dword] |= 1u << bit;\n else\n m_dwords[dword] &= ~(1u << bit);\n }\n\n bool exchange(uint32_t idx, bool value) {\n ensureSize(idx + 1);\n\n bool oldValue = get(idx);\n set(idx, value);\n return oldValue;\n }\n\n void flip(uint32_t idx) {\n ensureSize(idx + 1);\n\n uint32_t dword = idx / 32;\n uint32_t bit = idx % 32;\n\n m_dwords[dword] ^= 1u << bit;\n }\n\n void setAll() {\n if (m_bitCount % 32 == 0) {\n for (size_t i = 0; i < m_dwords.size(); i++)\n m_dwords[i] = std::numeric_limits::max();\n }\n else {\n for (size_t i = 0; i < m_dwords.size() - 1; i++)\n m_dwords[i] = std::numeric_limits::max();\n\n m_dwords[m_dwords.size() - 1] = (1u << (m_bitCount % 32)) - 1;\n }\n }\n\n void clearAll() {\n for (size_t i = 0; i < m_dwords.size(); i++)\n m_dwords[i] = 0;\n }\n\n bool any() const {\n for (size_t i = 0; i < m_dwords.size(); i++) {\n if (m_dwords[i] != 0)\n return true;\n }\n\n return false;\n }\n\n uint32_t& dword(uint32_t idx) {\n return m_dwords[idx];\n }\n\n size_t bitCount() const {\n return m_bitCount;\n }\n\n size_t dwordCount() const {\n return m_dwords.size();\n }\n\n bool operator [] (uint32_t idx) const {\n return get(idx);\n }\n\n void setN(uint32_t bits) {\n ensureSize(bits);\n\n uint32_t fullDwords = bits / 32;\n uint32_t offset = bits % 32;\n\n for (size_t i = 0; i < fullDwords; i++)\n m_dwords[i] = std::numeric_limits::max();\n\n if (offset > 0)\n m_dwords[fullDwords] = (1u << offset) - 1;\n }\n\n private:\n\n std::vector m_dwords;\n uint32_t m_bitCount = 0;\n\n };\n\n template\n class BitMask {\n\n public:\n\n class iterator {\n public:\n using iterator_category = std::input_iterator_tag;\n using value_type = T;\n using difference_type = T;\n using pointer = const T*;\n using reference = T;\n\n explicit iterator(T flags)\n : m_mask(flags) { }\n\n iterator& operator ++ () {\n m_mask &= m_mask - 1;\n return *this;\n }\n\n iterator operator ++ (int) {\n iterator retval = *this;\n m_mask &= m_mask - 1;\n return retval;\n }\n\n T operator * () const {\n return bsf(m_mask);\n }\n\n bool operator == (iterator other) const { return m_mask == other.m_mask; }\n bool operator != (iterator other) const { return m_mask != other.m_mask; }\n\n private:\n\n T m_mask;\n\n };\n\n BitMask()\n : m_mask(0) { }\n\n explicit BitMask(T n)\n : m_mask(n) { }\n\n iterator begin() {\n return iterator(m_mask);\n }\n\n iterator end() {\n return iterator(0);\n }\n\n private:\n\n T m_mask;\n\n };\n\n\n /**\n * \\brief Encodes float as fixed point\n *\n * Rounds away from zero. If this is not suitable for\n * certain use cases, implement round to nearest even.\n * \\tparam T Integer type, may be signed\n * \\tparam I Integer bits\n * \\tparam F Fractional bits\n * \\param n Float to encode\n * \\returns Encoded fixed-point value\n */\n template\n T encodeFixed(float n) {\n if (n != n)\n return 0u;\n\n n *= float(1u << F);\n\n if constexpr (std::is_signed_v) {\n n = std::max(n, -float(1u << (I + F - 1u)));\n n = std::min(n, float(1u << (I + F - 1u)) - 1.0f);\n n += n < 0.0f ? -0.5f : 0.5f;\n } else {\n n = std::max(n, 0.0f);\n n = std::min(n, float(1u << (I + F)) - 1.0f);\n n += 0.5f;\n }\n\n T result = T(n);\n\n if constexpr (std::is_signed_v)\n result &= ((T(1u) << (I + F)) - 1u);\n\n return result;\n }\n\n\n /**\n * \\brief Decodes fixed-point integer to float\n *\n * \\tparam T Integer type, may be signed\n * \\tparam I Integer bits\n * \\tparam F Fractional bits\n * \\param n Number to decode\n * \\returns Decoded number\n */\n template\n float decodeFixed(T n) {\n // Sign-extend as necessary\n if constexpr (std::is_signed_v)\n n -= (n & (T(1u) << (I + F - 1u))) << 1u;\n\n return float(n) / float(1u << F);\n }\n\n\n /**\n * \\brief Inserts one null bit after each bit\n */\n inline uint32_t split2(uint32_t c) {\n c = (c ^ (c << 8u)) & 0x00ff00ffu;\n c = (c ^ (c << 4u)) & 0x0f0f0f0fu;\n c = (c ^ (c << 2u)) & 0x33333333u;\n c = (c ^ (c << 1u)) & 0x55555555u;\n return c;\n }\n\n\n /**\n * \\brief Inserts two null bits after each bit\n */\n inline uint64_t split3(uint64_t c) {\n c = (c | c << 32u) & 0x001f00000000ffffull;\n c = (c | c << 16u) & 0x001f0000ff0000ffull;\n c = (c | c << 8u) & 0x100f00f00f00f00full;\n c = (c | c << 4u) & 0x10c30c30c30c30c3ull;\n c = (c | c << 2u) & 0x1249249249249249ull;\n return c;\n }\n\n\n /**\n * \\brief Interleaves bits from two integers\n *\n * Both numbers must fit into 16 bits.\n * \\param [in] x X coordinate\n * \\param [in] y Y coordinate\n * \\returns Morton code of x and y\n */\n inline uint32_t interleave(uint16_t x, uint16_t y) {\n return split2(x) | (split2(y) << 1u);\n }\n\n\n /**\n * \\brief Interleaves bits from three integers\n *\n * All three numbers must fit into 16 bits.\n */\n inline uint64_t interleave(uint16_t x, uint16_t y, uint16_t z) {\n return split3(x) | (split3(y) << 1u) | (split3(z) << 2u);\n }\n\n\n /**\n * \\brief 48-bit integer storage type\n */\n struct uint48_t {\n explicit uint48_t(uint64_t n)\n : a(uint16_t(n)), b(uint16_t(n >> 16)), c(uint16_t(n >> 32)) { }\n\n uint16_t a;\n uint16_t b;\n uint16_t c;\n\n explicit operator uint64_t () const {\n // GCC generates worse code if we promote to uint64 directly\n uint32_t lo = uint32_t(a) | (uint32_t(b) << 16);\n return uint64_t(lo) | (uint64_t(c) << 32);\n }\n };\n\n}\n"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/src/buffer.cpp", "/*\nThe MIT License (MIT)\n\nCopyright (c) 2013 Andrew Ruef\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n*/\n\n#include \n#include \n\n// keep this header above \"windows.h\" because it contains many types\n#include \n\n#ifdef _WIN32\n\n#define WIN32_LEAN_AND_MEAN\n#define VC_EXTRALEAN\n\n#include \n#include \n#else\n#include \n#include \n#include \n#include \n#endif\n\nnamespace {\n\ninline std::uint16_t byteSwapUint16(std::uint16_t val) {\n#if defined(_MSC_VER) || defined(_MSC_FULL_VER)\n return _byteswap_ushort(val);\n#else\n return __builtin_bswap16(val);\n#endif\n}\n\ninline std::uint32_t byteSwapUint32(std::uint32_t val) {\n#if defined(_MSC_VER) || defined(_MSC_FULL_VER)\n return _byteswap_ulong(val);\n#else\n return __builtin_bswap32(val);\n#endif\n}\n\ninline uint64_t byteSwapUint64(std::uint64_t val) {\n#if defined(_MSC_VER) || defined(_MSC_FULL_VER)\n return _byteswap_uint64(val);\n#else\n return __builtin_bswap64(val);\n#endif\n}\n\n} // anonymous namespace\n\nnamespace peparse {\n\nextern std::uint32_t err;\nextern std::string err_loc;\n\nstruct buffer_detail {\n#ifdef _WIN32\n HANDLE file;\n HANDLE sec;\n#else\n int fd;\n#endif\n};\n\nbool readByte(bounded_buffer *b, std::uint32_t offset, std::uint8_t &out) {\n if (b == nullptr) {\n PE_ERR(PEERR_BUFFER);\n return false;\n }\n\n if (offset >= b->bufLen) {\n PE_ERR(PEERR_ADDRESS);\n return false;\n }\n\n std::uint8_t *tmp = (b->buf + offset);\n out = *tmp;\n\n return true;\n}\n\nbool readWord(bounded_buffer *b, std::uint32_t offset, std::uint16_t &out) {\n if (b == nullptr) {\n PE_ERR(PEERR_BUFFER);\n return false;\n }\n\n if (static_cast(offset) + 1 >= b->bufLen) {\n PE_ERR(PEERR_ADDRESS);\n return false;\n }\n\n std::uint16_t tmp;\n memcpy(&tmp, (b->buf + offset), sizeof(std::uint16_t));\n if (b->swapBytes) {\n out = byteSwapUint16(tmp);\n } else {\n out = tmp;\n }\n\n return true;\n}\n\nbool readDword(bounded_buffer *b, std::uint32_t offset, std::uint32_t &out) {\n if (b == nullptr) {\n PE_ERR(PEERR_BUFFER);\n return false;\n }\n\n if (static_cast(offset) + 3 >= b->bufLen) {\n PE_ERR(PEERR_ADDRESS);\n return false;\n }\n\n std::uint32_t tmp;\n memcpy(&tmp, (b->buf + offset), sizeof(std::uint32_t));\n if (b->swapBytes) {\n out = byteSwapUint32(tmp);\n } else {\n out = tmp;\n }\n\n return true;\n}\n\nbool readQword(bounded_buffer *b, std::uint32_t offset, std::uint64_t &out) {\n if (b == nullptr) {\n PE_ERR(PEERR_BUFFER);\n return false;\n }\n\n if (static_cast(offset) + 7 >= b->bufLen) {\n PE_ERR(PEERR_ADDRESS);\n return false;\n }\n\n std::uint64_t tmp;\n memcpy(&tmp, (b->buf + offset), sizeof(std::uint64_t));\n if (b->swapBytes) {\n out = byteSwapUint64(tmp);\n } else {\n out = tmp;\n }\n\n return true;\n}\n\nbool readChar16(bounded_buffer *b, std::uint32_t offset, char16_t &out) {\n if (b == nullptr) {\n PE_ERR(PEERR_BUFFER);\n return false;\n }\n\n if (static_cast(offset) + 1 >= b->bufLen) {\n PE_ERR(PEERR_ADDRESS);\n return false;\n }\n\n char16_t tmp;\n if (b->swapBytes) {\n std::uint8_t tmpBuf[2];\n tmpBuf[0] = *(b->buf + offset + 1);\n tmpBuf[1] = *(b->buf + offset);\n memcpy(&tmp, tmpBuf, sizeof(std::uint16_t));\n } else {\n memcpy(&tmp, (b->buf + offset), sizeof(std::uint16_t));\n }\n out = tmp;\n\n return true;\n}\n\nbounded_buffer *readFileToFileBuffer(const char *filePath) {\n#ifdef _WIN32\n HANDLE h = CreateFileA(filePath,\n GENERIC_READ,\n FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,\n nullptr,\n OPEN_EXISTING,\n FILE_ATTRIBUTE_NORMAL,\n nullptr);\n if (h == INVALID_HANDLE_VALUE) {\n PE_ERR(PEERR_OPEN);\n return nullptr;\n }\n\n DWORD fileSize = GetFileSize(h, nullptr);\n\n if (fileSize == INVALID_FILE_SIZE) {\n CloseHandle(h);\n PE_ERR(PEERR_OPEN);\n return nullptr;\n }\n\n#else\n // only where we have mmap / open / etc\n int fd = open(filePath, O_RDONLY);\n\n if (fd == -1) {\n PE_ERR(PEERR_OPEN);\n return nullptr;\n }\n#endif\n\n // make a buffer object\n bounded_buffer *p = new (std::nothrow) bounded_buffer();\n if (p == nullptr) {\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n memset(p, 0, sizeof(bounded_buffer));\n buffer_detail *d = new (std::nothrow) buffer_detail();\n\n if (d == nullptr) {\n delete p;\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n memset(d, 0, sizeof(buffer_detail));\n p->detail = d;\n\n// only where we have mmap / open / etc\n#ifdef _WIN32\n p->detail->file = h;\n\n HANDLE hMap = CreateFileMapping(h, nullptr, PAGE_READONLY, 0, 0, nullptr);\n\n if (hMap == nullptr) {\n CloseHandle(h);\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n p->detail->sec = hMap;\n\n LPVOID ptr = MapViewOfFile(hMap, FILE_MAP_READ, 0, 0, 0);\n\n if (ptr == nullptr) {\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n p->buf = reinterpret_cast(ptr);\n p->bufLen = fileSize;\n#else\n p->detail->fd = fd;\n\n struct stat s;\n memset(&s, 0, sizeof(struct stat));\n\n if (fstat(fd, &s) != 0) {\n close(fd);\n delete d;\n delete p;\n PE_ERR(PEERR_STAT);\n return nullptr;\n }\n\n void *maddr = mmap(nullptr,\n static_cast(s.st_size),\n PROT_READ,\n MAP_SHARED,\n fd,\n 0);\n\n if (maddr == MAP_FAILED) {\n close(fd);\n delete d;\n delete p;\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n p->buf = reinterpret_cast(maddr);\n p->bufLen = static_cast(s.st_size);\n#endif\n p->copy = false;\n p->swapBytes = false;\n\n return p;\n}\n\nbounded_buffer *makeBufferFromPointer(std::uint8_t *data, std::uint32_t sz) {\n if (data == nullptr) {\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n bounded_buffer *p = new (std::nothrow) bounded_buffer();\n\n if (p == nullptr) {\n PE_ERR(PEERR_MEM);\n return nullptr;\n }\n\n p->copy = true;\n p->detail = nullptr;\n p->buf = data;\n p->bufLen = sz;\n p->swapBytes = false;\n\n return p;\n}\n\n// split buffer inclusively from from to to by offset\nbounded_buffer *\nsplitBuffer(bounded_buffer *b, std::uint32_t from, std::uint32_t to) {\n if (b == nullptr) {\n return nullptr;\n }\n\n // safety checks\n if (to < from || to > b->bufLen) {\n return nullptr;\n }\n\n // make a new buffer\n auto newBuff = new (std::nothrow) bounded_buffer();\n if (newBuff == nullptr) {\n return nullptr;\n }\n\n newBuff->copy = true;\n newBuff->buf = b->buf + from;\n newBuff->bufLen = (to - from);\n\n return newBuff;\n}\n\nvoid deleteBuffer(bounded_buffer *b) {\n if (b == nullptr) {\n return;\n }\n\n if (!b->copy) {\n#ifdef _WIN32\n UnmapViewOfFile(b->buf);\n CloseHandle(b->detail->sec);\n CloseHandle(b->detail->file);\n#else\n munmap(b->buf, b->bufLen);\n close(b->detail->fd);\n#endif\n }\n\n delete b->detail;\n delete b;\n}\n\nstd::uint64_t bufLen(bounded_buffer *b) {\n return b->bufLen;\n}\n} // namespace peparse\n"], ["/lsfg-vk/thirdparty/dxbc/include/spirv/spirv_instruction.h", "#pragma once\n\n#include \"spirv_include.h\"\n\nnamespace dxvk {\n \n /**\n * \\brief SPIR-V instruction\n * \n * Helps parsing a single instruction, providing\n * access to the op code, instruction length and\n * instruction arguments.\n */\n class SpirvInstruction {\n \n public:\n \n SpirvInstruction() { }\n SpirvInstruction(uint32_t* code, uint32_t offset, uint32_t length)\n : m_code(code), m_offset(offset), m_length(length) { }\n \n /**\n * \\brief SPIR-V Op code\n * \\returns The op code\n */\n spv::Op opCode() const {\n return static_cast(\n this->arg(0) & spv::OpCodeMask);\n }\n \n /**\n * \\brief Instruction length\n * \\returns Number of DWORDs\n */\n uint32_t length() const {\n return this->arg(0) >> spv::WordCountShift;\n }\n \n /**\n * \\brief Instruction offset\n * \\returns Offset in DWORDs\n */\n uint32_t offset() const {\n return m_offset;\n }\n \n /**\n * \\brief Argument value\n * \n * Retrieves an argument DWORD. Note that some instructions\n * take 64-bit arguments which require more than one DWORD.\n * Arguments start at index 1. Calling this method with an\n * argument ID of 0 will return the opcode token.\n * \\param [in] idx Argument index, starting at 1\n * \\returns The argument value\n */\n uint32_t arg(uint32_t idx) const {\n const uint32_t index = m_offset + idx;\n return index < m_length ? m_code[index] : 0;\n }\n\n /**\n * \\brief Argument string\n *\n * Retrieves a pointer to a UTF-8-encoded string.\n * \\param [in] idx Argument index, starting at 1\n * \\returns Pointer to the literal string\n */\n const char* chr(uint32_t idx) const {\n const uint32_t index = m_offset + idx;\n return index < m_length ? reinterpret_cast(&m_code[index]) : nullptr;\n }\n \n /**\n * \\brief Changes the value of an argument\n * \n * \\param [in] idx Argument index, starting at 1\n * \\param [in] word New argument word\n */\n void setArg(uint32_t idx, uint32_t word) const {\n if (m_offset + idx < m_length)\n m_code[m_offset + idx] = word;\n }\n \n private:\n \n uint32_t* m_code = nullptr;\n uint32_t m_offset = 0;\n uint32_t m_length = 0;\n \n };\n \n \n /**\n * \\brief SPIR-V instruction iterator\n * \n * Convenient iterator that can be used\n * to process raw SPIR-V shader code.\n */\n class SpirvInstructionIterator {\n \n public:\n \n SpirvInstructionIterator() { }\n SpirvInstructionIterator(uint32_t* code, uint32_t offset, uint32_t length)\n : m_code (length != 0 ? code : nullptr),\n m_offset(length != 0 ? offset : 0),\n m_length(length) {\n if ((length >= 5) && (offset == 0) && (m_code[0] == spv::MagicNumber))\n this->advance(5);\n }\n \n SpirvInstructionIterator& operator ++ () {\n this->advance(SpirvInstruction(m_code, m_offset, m_length).length());\n return *this;\n }\n \n SpirvInstructionIterator operator ++ (int) {\n SpirvInstructionIterator result = *this;\n this->advance(SpirvInstruction(m_code, m_offset, m_length).length());\n return result;\n }\n \n SpirvInstruction operator * () const {\n return SpirvInstruction(m_code, m_offset, m_length);\n }\n \n bool operator == (const SpirvInstructionIterator& other) const {\n return this->m_code == other.m_code\n && this->m_offset == other.m_offset\n && this->m_length == other.m_length;\n }\n \n bool operator != (const SpirvInstructionIterator& other) const {\n return this->m_code != other.m_code\n || this->m_offset != other.m_offset\n || this->m_length != other.m_length;\n }\n \n private:\n \n uint32_t* m_code = nullptr;\n uint32_t m_offset = 0;\n uint32_t m_length = 0;\n \n void advance(uint32_t n) {\n if (m_offset + n < m_length) {\n m_offset += n;\n } else {\n m_code = nullptr;\n m_offset = 0;\n m_length = 0;\n }\n }\n \n };\n \n}"], ["/lsfg-vk/src/main.cpp", "#include \"config/config.hpp\"\n#include \"extract/extract.hpp\"\n#include \"utils/benchmark.hpp\"\n#include \"utils/utils.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace {\n __attribute__((constructor)) void lsfgvk_init() {\n std::cerr << std::unitbuf;\n\n // read configuration\n const std::string file = Utils::getConfigFile();\n try {\n Config::updateConfig(file);\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occured while trying to parse the configuration, IGNORING:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n return; // default configuration will unload\n }\n\n const auto name = Utils::getProcessName();\n try {\n Config::activeConf = Config::getConfig(name);\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: The configuration for \" << name.second << \" is invalid, IGNORING:\\n\";\n std::cerr << e.what() << '\\n';\n return; // default configuration will unload\n }\n\n // exit silently if not enabled\n auto& conf = Config::activeConf;\n if (!conf.enable && name.second != \"benchmark\")\n return; // default configuration will unload\n\n // print config\n std::cerr << \"lsfg-vk: Loaded configuration for \" << name.second << \":\\n\";\n if (!conf.dll.empty()) std::cerr << \" Using DLL from: \" << conf.dll << '\\n';\n std::cerr << \" Multiplier: \" << conf.multiplier << '\\n';\n std::cerr << \" Flow Scale: \" << conf.flowScale << '\\n';\n std::cerr << \" Performance Mode: \" << (conf.performance ? \"Enabled\" : \"Disabled\") << '\\n';\n std::cerr << \" HDR Mode: \" << (conf.hdr ? \"Enabled\" : \"Disabled\") << '\\n';\n if (conf.e_present != 2) std::cerr << \" ! Present Mode: \" << conf.e_present << '\\n';\n\n // remove mesa var in favor of config\n unsetenv(\"MESA_VK_WSI_PRESENT_MODE\"); // NOLINT\n\n // write latest file\n try {\n std::ofstream latest(\"/tmp/lsfg-vk_last\", std::ios::trunc);\n if (!latest.is_open())\n throw std::runtime_error(\"Failed to open /tmp/lsfg-vk_last for writing\");\n latest << \"exe: \" << name.first << '\\n';\n latest << \"comm: \" << name.second << '\\n';\n latest << \"pid: \" << getpid() << '\\n';\n if (!latest.good())\n throw std::runtime_error(\"Failed to write to /tmp/lsfg-vk_last\");\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred while trying to write the latest file, exiting:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n exit(EXIT_FAILURE);\n }\n\n // load shaders\n try {\n Extract::extractShaders();\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred while trying to extract the shaders, exiting:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n exit(EXIT_FAILURE);\n }\n std::cerr << \"lsfg-vk: Shaders extracted successfully.\\n\";\n\n // run benchmark if requested\n const char* benchmark_flag = std::getenv(\"LSFG_BENCHMARK\");\n if (!benchmark_flag)\n return;\n\n const std::string resolution(benchmark_flag);\n uint32_t width{};\n uint32_t height{};\n try {\n const size_t x = resolution.find('x');\n if (x == std::string::npos)\n throw std::runtime_error(\"Unable to find 'x' in benchmark string\");\n\n const std::string width_str = resolution.substr(0, x);\n const std::string height_str = resolution.substr(x + 1);\n if (width_str.empty() || height_str.empty())\n throw std::runtime_error(\"Invalid resolution\");\n\n const int32_t w = std::stoi(width_str);\n const int32_t h = std::stoi(height_str);\n if (w < 0 || h < 0)\n throw std::runtime_error(\"Resolution cannot be negative\");\n\n width = static_cast(w);\n height = static_cast(h);\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred while trying to parse the resolution, exiting:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n exit(EXIT_FAILURE);\n }\n\n std::thread benchmark([width, height]() {\n try {\n Benchmark::run(width, height);\n } catch (const std::exception& e) {\n std::cerr << \"lsfg-vk: An error occurred during the benchmark:\\n\";\n std::cerr << \"- \" << e.what() << '\\n';\n exit(EXIT_FAILURE);\n }\n });\n benchmark.detach();\n conf.enable = false;\n }\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_chunk_isgn.h", "class DxbcIsgn {\n public:\n DxbcIsgn(DxbcReader reader, DxbcTag tag) {\n uint32_t elementCount = reader.readu32();\n reader.skip(sizeof(uint32_t));\n \n std::array componentTypes = {\n DxbcScalarType::Uint32, DxbcScalarType::Uint32,\n DxbcScalarType::Sint32, DxbcScalarType::Float32,\n };\n\n // https://github.com/DarkStarSword/3d-fixes/blob/master/dx11shaderanalyse.py#L101\n bool hasStream = (tag == \"ISG1\") || (tag == \"OSG1\") || (tag == \"PSG1\") || (tag == \"OSG5\");\n bool hasPrecision = (tag == \"ISG1\") || (tag == \"OSG1\") || (tag == \"PSG1\");\n \n for (uint32_t i = 0; i < elementCount; i++) {\n DxbcSgnEntry entry;\n entry.streamId = hasStream ? reader.readu32() : 0;\n entry.semanticName = reader.clone(reader.readu32()).readString();\n entry.semanticIndex = reader.readu32();\n entry.systemValue = static_cast(reader.readu32());\n entry.componentType = componentTypes.at(reader.readu32());\n entry.registerId = reader.readu32();\n\n uint32_t mask = reader.readu32();\n\n entry.componentMask = bit::extract(mask, 0, 3);\n entry.componentUsed = bit::extract(mask, 8, 11);\n\n if (hasPrecision)\n reader.readu32();\n\n m_entries.push_back(entry);\n }\n }\n ~DxbcIsgn() {\n \n }\n const DxbcSgnEntry* findByRegister(\n uint32_t registerId) const;\n const DxbcSgnEntry* find(\n const std::string& semanticName,\n uint32_t semanticIndex,\n uint32_t streamIndex) const;\n DxbcRegMask regMask(\n uint32_t registerId) const {\n DxbcRegMask mask;\n\n for (auto e = this->begin(); e != this->end(); e++) {\n if (e->registerId == registerId)\n mask |= e->componentMask;\n }\n\n return mask;\n }\n uint32_t maxRegisterCount() const {\n uint32_t result = 0;\n for (auto e = this->begin(); e != this->end(); e++)\n result = std::max(result, e->registerId + 1);\n return result;\n }\n static bool compareSemanticNames(\n const std::string& a,\n const std::string& b) {\n if (a.size() != b.size())\n return false;\n \n for (size_t i = 0; i < a.size(); i++) {\n char ac = a[i];\n char bc = b[i];\n\n if (ac != bc) {\n if (ac >= 'A' && ac <= 'Z') ac += 'a' - 'A';\n if (bc >= 'A' && bc <= 'Z') bc += 'a' - 'A';\n\n if (ac != bc)\n return false;\n }\n }\n \n return true;\n }\n private:\n std::vector m_entries;\n};"], ["/lsfg-vk/thirdparty/pe-parse/dump-pe/main.cpp", "/*\nThe MIT License (MIT)\n\nCopyright (c) 2013 Andrew Ruef\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n*/\n\n#include \n#include \n#include \n#include \n#include \n\n#include \n\n#include \"vendor/argh.h\"\n\nusing namespace peparse;\n\nint printExps(void *N,\n const VA &funcAddr,\n std::uint16_t ordinal,\n const std::string &mod,\n const std::string &func,\n const std::string &fwd) {\n static_cast(N);\n\n auto address = static_cast(funcAddr);\n\n // save default formatting\n std::ios initial(nullptr);\n initial.copyfmt(std::cout);\n\n std::cout << \"EXP #\";\n std::cout << ordinal;\n std::cout << \": \";\n std::cout << mod;\n std::cout << \"!\";\n std::cout << func;\n std::cout << \": \";\n if (!fwd.empty()) {\n std::cout << fwd;\n } else {\n std::cout << std::showbase << std::hex << address;\n }\n std::cout << \"\\n\";\n\n // restore default formatting\n std::cout.copyfmt(initial);\n return 0;\n}\n\nint printImports(void *N,\n const VA &impAddr,\n const std::string &modName,\n const std::string &symName) {\n static_cast(N);\n\n auto address = static_cast(impAddr);\n\n std::cout << \"0x\" << std::hex << address << \" \" << modName << \"!\" << symName;\n std::cout << \"\\n\";\n return 0;\n}\n\nint printRelocs(void *N, const VA &relocAddr, const reloc_type &type) {\n static_cast(N);\n\n std::cout << \"TYPE: \";\n switch (type) {\n case RELOC_ABSOLUTE:\n std::cout << \"ABSOLUTE\";\n break;\n case RELOC_HIGH:\n std::cout << \"HIGH\";\n break;\n case RELOC_LOW:\n std::cout << \"LOW\";\n break;\n case RELOC_HIGHLOW:\n std::cout << \"HIGHLOW\";\n break;\n case RELOC_HIGHADJ:\n std::cout << \"HIGHADJ\";\n break;\n case RELOC_MIPS_JMPADDR:\n std::cout << \"MIPS_JMPADDR\";\n break;\n case RELOC_MIPS_JMPADDR16:\n std::cout << \"MIPS_JMPADD16\";\n break;\n case RELOC_DIR64:\n std::cout << \"DIR64\";\n break;\n default:\n std::cout << \"UNKNOWN\";\n break;\n }\n\n std::cout << \" VA: 0x\" << std::hex << relocAddr << \"\\n\";\n\n return 0;\n}\n\nint printDebugs(void *N,\n const std::uint32_t &type,\n const bounded_buffer *data) {\n static_cast(N);\n\n std::cout << \"Debug Directory Type: \";\n switch (type) {\n case 0:\n std::cout << \"IMAGE_DEBUG_TYPE_UNKNOWN\";\n break;\n case 1:\n std::cout << \"IMAGE_DEBUG_TYPE_COFF\";\n break;\n case 2:\n std::cout << \"IMAGE_DEBUG_TYPE_CODEVIEW\";\n break;\n case 3:\n std::cout << \"IMAGE_DEBUG_TYPE_FPO\";\n break;\n case 4:\n std::cout << \"IMAGE_DEBUG_TYPE_MISC\";\n break;\n case 5:\n std::cout << \"IMAGE_DEBUG_TYPE_EXCEPTION\";\n break;\n case 6:\n std::cout << \"IMAGE_DEBUG_TYPE_FIXUP\";\n break;\n case 7:\n std::cout << \"IMAGE_DEBUG_TYPE_OMAP_TO_SRC\";\n break;\n case 8:\n std::cout << \"IMAGE_DEBUG_TYPE_OMAP_FROM_SRC\";\n break;\n case 9:\n std::cout << \"IMAGE_DEBUG_TYPE_BORLAND\";\n break;\n case 10:\n std::cout << \"IMAGE_DEBUG_TYPE_RESERVED10\";\n break;\n case 11:\n std::cout << \"IMAGE_DEBUG_TYPE_CLSID\";\n break;\n case 12:\n std::cout << \"IMAGE_DEBUG_TYPE_VC_FEATURE\";\n break;\n case 13:\n std::cout << \"IMAGE_DEBUG_TYPE_POGO\";\n break;\n case 14:\n std::cout << \"IMAGE_DEBUG_TYPE_ILTCG\";\n break;\n case 15:\n std::cout << \"IMAGE_DEBUG_TYPE_MPX\";\n break;\n case 16:\n std::cout << \"IMAGE_DEBUG_TYPE_REPRO\";\n break;\n case 20:\n std::cout << \"IMAGE_DEBUG_TYPE_EX_DLLCHARACTERISTICS\";\n break;\n default:\n std::cout << \"INVALID\";\n break;\n }\n std::cout << \"\\n\";\n std::cout << \"Debug Directory Data: \";\n for (uint32_t i = 0; i < data->bufLen; i++) {\n std::cout << \" 0x\" << std::hex << static_cast(data->buf[i]);\n }\n std::cout << \"\\n\";\n\n return 0;\n}\n\nint printSymbols(void *N,\n const std::string &strName,\n const uint32_t &value,\n const int16_t §ionNumber,\n const uint16_t &type,\n const uint8_t &storageClass,\n const uint8_t &numberOfAuxSymbols) {\n static_cast(N);\n\n std::cout << \"Symbol Name: \" << strName << \"\\n\";\n std::cout << \"Symbol Value: 0x\" << std::hex << value << \"\\n\";\n\n std::cout << \"Symbol Section Number: \";\n switch (sectionNumber) {\n case IMAGE_SYM_UNDEFINED:\n std::cout << \"UNDEFINED\";\n break;\n case IMAGE_SYM_ABSOLUTE:\n std::cout << \"ABSOLUTE\";\n break;\n case IMAGE_SYM_DEBUG:\n std::cout << \"DEBUG\";\n break;\n default:\n std::cout << sectionNumber;\n break;\n }\n std::cout << \"\\n\";\n\n std::cout << \"Symbol Type: \";\n switch (type) {\n case IMAGE_SYM_TYPE_NULL:\n std::cout << \"NULL\";\n break;\n case IMAGE_SYM_TYPE_VOID:\n std::cout << \"VOID\";\n break;\n case IMAGE_SYM_TYPE_CHAR:\n std::cout << \"CHAR\";\n break;\n case IMAGE_SYM_TYPE_SHORT:\n std::cout << \"SHORT\";\n break;\n case IMAGE_SYM_TYPE_INT:\n std::cout << \"INT\";\n break;\n case IMAGE_SYM_TYPE_LONG:\n std::cout << \"LONG\";\n break;\n case IMAGE_SYM_TYPE_FLOAT:\n std::cout << \"FLOAT\";\n break;\n case IMAGE_SYM_TYPE_DOUBLE:\n std::cout << \"DOUBLE\";\n break;\n case IMAGE_SYM_TYPE_STRUCT:\n std::cout << \"STRUCT\";\n break;\n case IMAGE_SYM_TYPE_UNION:\n std::cout << \"UNION\";\n break;\n case IMAGE_SYM_TYPE_ENUM:\n std::cout << \"ENUM\";\n break;\n case IMAGE_SYM_TYPE_MOE:\n std::cout << \"IMAGE_SYM_TYPE_MOE\";\n break;\n case IMAGE_SYM_TYPE_BYTE:\n std::cout << \"BYTE\";\n break;\n case IMAGE_SYM_TYPE_WORD:\n std::cout << \"WORD\";\n break;\n case IMAGE_SYM_TYPE_UINT:\n std::cout << \"UINT\";\n break;\n case IMAGE_SYM_TYPE_DWORD:\n std::cout << \"DWORD\";\n break;\n default:\n std::cout << \"UNKNOWN\";\n break;\n }\n std::cout << \"\\n\";\n\n std::cout << \"Symbol Storage Class: \";\n switch (storageClass) {\n case IMAGE_SYM_CLASS_END_OF_FUNCTION:\n std::cout << \"FUNCTION\";\n break;\n case IMAGE_SYM_CLASS_NULL:\n std::cout << \"NULL\";\n break;\n case IMAGE_SYM_CLASS_AUTOMATIC:\n std::cout << \"AUTOMATIC\";\n break;\n case IMAGE_SYM_CLASS_EXTERNAL:\n std::cout << \"EXTERNAL\";\n break;\n case IMAGE_SYM_CLASS_STATIC:\n std::cout << \"STATIC\";\n break;\n case IMAGE_SYM_CLASS_REGISTER:\n std::cout << \"REGISTER\";\n break;\n case IMAGE_SYM_CLASS_EXTERNAL_DEF:\n std::cout << \"EXTERNAL DEF\";\n break;\n case IMAGE_SYM_CLASS_LABEL:\n std::cout << \"LABEL\";\n break;\n case IMAGE_SYM_CLASS_UNDEFINED_LABEL:\n std::cout << \"UNDEFINED LABEL\";\n break;\n case IMAGE_SYM_CLASS_MEMBER_OF_STRUCT:\n std::cout << \"MEMBER OF STRUCT\";\n break;\n default:\n std::cout << \"UNKNOWN\";\n break;\n }\n std::cout << \"\\n\";\n\n std::cout << \"Symbol Number of Aux Symbols: \"\n << static_cast(numberOfAuxSymbols) << \"\\n\";\n\n return 0;\n}\n\nint printRich(void *N, const rich_entry &r) {\n static_cast(N);\n std::cout << std::dec;\n std::cout << std::setw(10) << \"ProdId:\" << std::setw(7) << r.ProductId;\n std::cout << std::setw(10) << \"Build:\" << std::setw(7) << r.BuildNumber;\n std::cout << std::setw(10) << \"Name:\" << std::setw(40)\n << GetRichProductName(r.BuildNumber) << \" \"\n << GetRichObjectType(r.ProductId);\n std::cout << std::setw(10) << \"Count:\" << std::setw(7) << r.Count << \"\\n\";\n return 0;\n}\n\nint printRsrc(void *N, const resource &r) {\n static_cast(N);\n\n if (r.type_str.length())\n std::cout << \"Type (string): \" << r.type_str << \"\\n\";\n else\n std::cout << \"Type: 0x\" << std::hex << r.type << \"\\n\";\n\n if (r.name_str.length())\n std::cout << \"Name (string): \" << r.name_str << \"\\n\";\n else\n std::cout << \"Name: 0x\" << std::hex << r.name << \"\\n\";\n\n if (r.lang_str.length())\n std::cout << \"Lang (string): \" << r.lang_str << \"\\n\";\n else\n std::cout << \"Lang: 0x\" << std::hex << r.lang << \"\\n\";\n\n std::cout << \"Codepage: 0x\" << std::hex << r.codepage << \"\\n\";\n std::cout << \"RVA: \" << std::dec << r.RVA << \"\\n\";\n std::cout << \"Size: \" << std::dec << r.size << \"\\n\";\n return 0;\n}\n\nint printSecs(void *N,\n const VA &secBase,\n const std::string &secName,\n const image_section_header &s,\n const bounded_buffer *data) {\n static_cast(N);\n static_cast(s);\n\n std::cout << \"Sec Name: \" << secName << \"\\n\";\n std::cout << \"Sec Base: 0x\" << std::hex << secBase << \"\\n\";\n if (data)\n std::cout << \"Sec Size: \" << std::dec << data->bufLen << \"\\n\";\n else\n std::cout << \"Sec Size: 0\"\n << \"\\n\";\n return 0;\n}\n\n#define DUMP_FIELD(x) \\\n std::cout << \"\" #x << \": 0x\"; \\\n std::cout << std::hex << static_cast(p->peHeader.x) << \"\\n\";\n#define DUMP_DEC_FIELD(x) \\\n std::cout << \"\" #x << \": \"; \\\n std::cout << std::dec << static_cast(p->peHeader.x) << \"\\n\";\n#define DUMP_BOOL_FIELD(x) \\\n std::cout << \"\" #x << \": \"; \\\n std::cout << std::boolalpha << static_cast(p->peHeader.x) << \"\\n\";\n\nint main(int argc, char *argv[]) {\n\n argh::parser cmdl(argv);\n\n if (cmdl[{\"-h\", \"--help\"}] || argc <= 1) {\n std::cout << \"dump-pe utility from Trail of Bits\\n\";\n std::cout << \"Repository: https://github.com/trailofbits/pe-parse\\n\\n\";\n std::cout << \"Usage:\\n\\tdump-pe /path/to/executable.exe\\n\";\n return 0;\n } else if (cmdl[{\"-v\", \"--version\"}]) {\n std::cout << \"dump-pe (pe-parse) version \" << PEPARSE_VERSION << \"\\n\";\n return 0;\n }\n\n parsed_pe *p = ParsePEFromFile(cmdl[1].c_str());\n\n if (p == nullptr) {\n std::cout << \"Error: \" << GetPEErr() << \" (\" << GetPEErrString() << \")\"\n << \"\\n\";\n std::cout << \"Location: \" << GetPEErrLoc() << \"\\n\";\n return 1;\n }\n\n if (p != NULL) {\n // Print DOS header\n DUMP_FIELD(dos.e_magic);\n DUMP_FIELD(dos.e_cp);\n DUMP_FIELD(dos.e_crlc);\n DUMP_FIELD(dos.e_cparhdr);\n DUMP_FIELD(dos.e_minalloc);\n DUMP_FIELD(dos.e_maxalloc);\n DUMP_FIELD(dos.e_ss);\n DUMP_FIELD(dos.e_sp);\n DUMP_FIELD(dos.e_csum);\n DUMP_FIELD(dos.e_ip);\n DUMP_FIELD(dos.e_cs);\n DUMP_FIELD(dos.e_lfarlc);\n DUMP_FIELD(dos.e_ovno);\n DUMP_FIELD(dos.e_res[0]);\n DUMP_FIELD(dos.e_res[1]);\n DUMP_FIELD(dos.e_res[2]);\n DUMP_FIELD(dos.e_res[3]);\n DUMP_FIELD(dos.e_oemid);\n DUMP_FIELD(dos.e_oeminfo);\n DUMP_FIELD(dos.e_res2[0]);\n DUMP_FIELD(dos.e_res2[1]);\n DUMP_FIELD(dos.e_res2[2]);\n DUMP_FIELD(dos.e_res2[3]);\n DUMP_FIELD(dos.e_res2[4]);\n DUMP_FIELD(dos.e_res2[5]);\n DUMP_FIELD(dos.e_res2[6]);\n DUMP_FIELD(dos.e_res2[7]);\n DUMP_FIELD(dos.e_res2[8]);\n DUMP_FIELD(dos.e_res2[9]);\n DUMP_FIELD(dos.e_lfanew);\n // Print Rich header info\n DUMP_BOOL_FIELD(rich.isPresent);\n if (p->peHeader.rich.isPresent) {\n DUMP_FIELD(rich.DecryptionKey);\n DUMP_FIELD(rich.Checksum);\n DUMP_BOOL_FIELD(rich.isValid);\n IterRich(p, printRich, NULL);\n }\n // print out some things\n DUMP_FIELD(nt.Signature);\n DUMP_FIELD(nt.FileHeader.Machine);\n DUMP_FIELD(nt.FileHeader.NumberOfSections);\n DUMP_DEC_FIELD(nt.FileHeader.TimeDateStamp);\n DUMP_FIELD(nt.FileHeader.PointerToSymbolTable);\n DUMP_DEC_FIELD(nt.FileHeader.NumberOfSymbols);\n DUMP_FIELD(nt.FileHeader.SizeOfOptionalHeader);\n DUMP_FIELD(nt.FileHeader.Characteristics);\n if (p->peHeader.nt.OptionalMagic == NT_OPTIONAL_32_MAGIC) {\n DUMP_FIELD(nt.OptionalHeader.Magic);\n DUMP_DEC_FIELD(nt.OptionalHeader.MajorLinkerVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader.MinorLinkerVersion);\n DUMP_FIELD(nt.OptionalHeader.SizeOfCode);\n DUMP_FIELD(nt.OptionalHeader.SizeOfInitializedData);\n DUMP_FIELD(nt.OptionalHeader.SizeOfUninitializedData);\n DUMP_FIELD(nt.OptionalHeader.AddressOfEntryPoint);\n DUMP_FIELD(nt.OptionalHeader.BaseOfCode);\n DUMP_FIELD(nt.OptionalHeader.BaseOfData);\n DUMP_FIELD(nt.OptionalHeader.ImageBase);\n DUMP_FIELD(nt.OptionalHeader.SectionAlignment);\n DUMP_FIELD(nt.OptionalHeader.FileAlignment);\n DUMP_DEC_FIELD(nt.OptionalHeader.MajorOperatingSystemVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader.MinorOperatingSystemVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader.Win32VersionValue);\n DUMP_FIELD(nt.OptionalHeader.SizeOfImage);\n DUMP_FIELD(nt.OptionalHeader.SizeOfHeaders);\n DUMP_FIELD(nt.OptionalHeader.CheckSum);\n DUMP_FIELD(nt.OptionalHeader.Subsystem);\n DUMP_FIELD(nt.OptionalHeader.DllCharacteristics);\n DUMP_FIELD(nt.OptionalHeader.SizeOfStackReserve);\n DUMP_FIELD(nt.OptionalHeader.SizeOfStackCommit);\n DUMP_FIELD(nt.OptionalHeader.SizeOfHeapReserve);\n DUMP_FIELD(nt.OptionalHeader.SizeOfHeapCommit);\n DUMP_FIELD(nt.OptionalHeader.LoaderFlags);\n DUMP_DEC_FIELD(nt.OptionalHeader.NumberOfRvaAndSizes);\n } else {\n DUMP_FIELD(nt.OptionalHeader64.Magic);\n DUMP_DEC_FIELD(nt.OptionalHeader64.MajorLinkerVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader64.MinorLinkerVersion);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfCode);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfInitializedData);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfUninitializedData);\n DUMP_FIELD(nt.OptionalHeader64.AddressOfEntryPoint);\n DUMP_FIELD(nt.OptionalHeader64.BaseOfCode);\n DUMP_FIELD(nt.OptionalHeader64.ImageBase);\n DUMP_FIELD(nt.OptionalHeader64.SectionAlignment);\n DUMP_FIELD(nt.OptionalHeader64.FileAlignment);\n DUMP_DEC_FIELD(nt.OptionalHeader64.MajorOperatingSystemVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader64.MinorOperatingSystemVersion);\n DUMP_DEC_FIELD(nt.OptionalHeader64.Win32VersionValue);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfImage);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfHeaders);\n DUMP_FIELD(nt.OptionalHeader64.CheckSum);\n DUMP_FIELD(nt.OptionalHeader64.Subsystem);\n DUMP_FIELD(nt.OptionalHeader64.DllCharacteristics);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfStackReserve);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfStackCommit);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfHeapReserve);\n DUMP_FIELD(nt.OptionalHeader64.SizeOfHeapCommit);\n DUMP_FIELD(nt.OptionalHeader64.LoaderFlags);\n DUMP_DEC_FIELD(nt.OptionalHeader64.NumberOfRvaAndSizes);\n }\n\n#undef DUMP_FIELD\n#undef DUMP_DEC_FIELD\n\n std::cout << \"Imports: \"\n << \"\\n\";\n IterImpVAString(p, printImports, NULL);\n std::cout << \"Relocations: \"\n << \"\\n\";\n IterRelocs(p, printRelocs, NULL);\n std::cout << \"Debug Directories: \"\n << \"\\n\";\n IterDebugs(p, printDebugs, NULL);\n std::cout << \"Symbols (symbol table): \"\n << \"\\n\";\n IterSymbols(p, printSymbols, NULL);\n std::cout << \"Sections: \"\n << \"\\n\";\n IterSec(p, printSecs, NULL);\n std::cout << \"Exports: \"\n << \"\\n\";\n IterExpFull(p, printExps, NULL);\n\n // read the first 8 bytes from the entry point and print them\n VA entryPoint;\n if (GetEntryPoint(p, entryPoint)) {\n std::cout << \"First 8 bytes from entry point (0x\";\n std::cout << std::hex << entryPoint << \"):\"\n << \"\\n\";\n for (std::size_t i = 0; i < 8; i++) {\n std::uint8_t b;\n if (!ReadByteAtVA(p, i + entryPoint, b)) {\n std::cout << \" ERR\";\n } else {\n std::cout << \" 0x\" << std::hex << static_cast(b);\n }\n }\n\n std::cout << \"\\n\";\n }\n\n std::cout << \"Resources: \"\n << \"\\n\";\n IterRsrc(p, printRsrc, NULL);\n\n DestructParsedPE(p);\n\n return 0;\n }\n}\n"], ["/lsfg-vk/src/extract/trans.cpp", "#include \"extract/trans.hpp\"\n\n#include \n\n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n\nusing namespace Extract;\n\nstruct BindingOffsets {\n uint32_t bindingIndex{};\n uint32_t bindingOffset{};\n uint32_t setIndex{};\n uint32_t setOffset{};\n};\n\nstd::vector Extract::translateShader(std::vector bytecode) {\n // compile the shader\n dxvk::DxbcReader reader(reinterpret_cast(bytecode.data()), bytecode.size());\n dxvk::DxbcModule module(reader);\n const dxvk::DxbcModuleInfo info{};\n auto code = module.compile(info, \"CS\");\n\n // find all bindings\n std::vector bindingOffsets;\n std::vector varIds;\n for (auto ins : code) {\n if (ins.opCode() == spv::OpDecorate) {\n if (ins.arg(2) == spv::DecorationBinding) {\n const uint32_t varId = ins.arg(1);\n bindingOffsets.resize(std::max(bindingOffsets.size(), size_t(varId + 1)));\n bindingOffsets[varId].bindingIndex = ins.arg(3);\n bindingOffsets[varId].bindingOffset = ins.offset() + 3;\n varIds.push_back(varId);\n }\n\n if (ins.arg(2) == spv::DecorationDescriptorSet) {\n const uint32_t varId = ins.arg(1);\n bindingOffsets.resize(std::max(bindingOffsets.size(), size_t(varId + 1)));\n bindingOffsets[varId].setIndex = ins.arg(3);\n bindingOffsets[varId].setOffset = ins.offset() + 3;\n }\n }\n\n if (ins.opCode() == spv::OpFunction)\n break;\n }\n\n std::vector validBindings;\n for (const auto varId : varIds) {\n auto info = bindingOffsets[varId];\n\n if (info.bindingOffset)\n validBindings.push_back(info);\n }\n\n // patch binding offset\n #pragma clang diagnostic push\n #pragma clang diagnostic ignored \"-Wunsafe-buffer-usage\"\n for (size_t i = 0; i < validBindings.size(); i++)\n code.data()[validBindings.at(i).bindingOffset] // NOLINT\n = static_cast(i);\n #pragma clang diagnostic pop\n\n // return the new bytecode\n std::vector spirvBytecode(code.size());\n std::copy_n(reinterpret_cast(code.data()),\n code.size(), spirvBytecode.data());\n return spirvBytecode;\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_module.h", "class DxbcAnalyzer {\n public:\n DxbcModule(DxbcReader& reader) {\n for (uint32_t i = 0; i < m_header.numChunks(); i++) {\n \n // The chunk tag is stored at the beginning of each chunk\n auto chunkReader = reader.clone(m_header.chunkOffset(i));\n auto tag = chunkReader.readTag();\n \n // The chunk size follows right after the four-character\n // code. This does not include the eight bytes that are\n // consumed by the FourCC and chunk length entry.\n auto chunkLength = chunkReader.readu32();\n \n chunkReader = chunkReader.clone(8);\n chunkReader = chunkReader.resize(chunkLength);\n \n if ((tag == \"SHDR\") || (tag == \"SHEX\"))\n m_shexChunk = new DxbcShex(chunkReader);\n \n if ((tag == \"ISGN\") || (tag == \"ISG1\"))\n m_isgnChunk = new DxbcIsgn(chunkReader, tag);\n \n if ((tag == \"OSGN\") || (tag == \"OSG5\") || (tag == \"OSG1\"))\n m_osgnChunk = new DxbcIsgn(chunkReader, tag);\n \n if ((tag == \"PCSG\") || (tag == \"PSG1\"))\n m_psgnChunk = new DxbcIsgn(chunkReader, tag);\n }\n }\n ~DxbcModule() {\n \n }\n SpirvCodeBuffer compile(\n const DxbcModuleInfo& moduleInfo,\n const std::string& fileName) {\n if (m_shexChunk == nullptr)\n throw DxvkError(\"DxbcModule::compile: No SHDR/SHEX chunk\");\n \n DxbcAnalysisInfo analysisInfo;\n \n DxbcAnalyzer analyzer(moduleInfo,\n m_shexChunk->programInfo(),\n m_isgnChunk, m_osgnChunk,\n m_psgnChunk, analysisInfo);\n \n this->runAnalyzer(analyzer, m_shexChunk->slice());\n\n m_bindings = std::make_optional(analysisInfo.bindings);\n \n DxbcCompiler compiler(\n fileName, moduleInfo,\n m_shexChunk->programInfo(),\n m_isgnChunk, m_osgnChunk,\n m_psgnChunk, analysisInfo);\n \n this->runCompiler(compiler, m_shexChunk->slice());\n\n m_icb = compiler.getIcbData();\n\n return compiler.finalize();\n }\n SpirvCodeBuffer compilePassthroughShader(\n const DxbcModuleInfo& moduleInfo,\n const std::string& fileName) const {\n if (m_shexChunk == nullptr)\n throw DxvkError(\"DxbcModule::compile: No SHDR/SHEX chunk\");\n \n DxbcAnalysisInfo analysisInfo;\n\n DxbcCompiler compiler(\n fileName, moduleInfo,\n DxbcProgramType::GeometryShader,\n m_osgnChunk, m_osgnChunk,\n m_psgnChunk, analysisInfo);\n \n compiler.processXfbPassthrough();\n return compiler.finalize();\n }\n private:\n DxbcHeader m_header;\n Rc m_isgnChunk;\n Rc m_osgnChunk;\n Rc m_psgnChunk;\n Rc m_shexChunk;\n std::vector m_icb;\n std::optional m_bindings;\n void runAnalyzer(\n DxbcAnalyzer& analyzer,\n DxbcCodeSlice slice) const {\n DxbcDecodeContext decoder;\n \n while (!slice.atEnd()) {\n decoder.decodeInstruction(slice);\n \n analyzer.processInstruction(\n decoder.getInstruction());\n }\n }\n void runCompiler(\n DxbcCompiler& compiler,\n DxbcCodeSlice slice) const {\n DxbcDecodeContext decoder;\n \n while (!slice.atEnd()) {\n decoder.decodeInstruction(slice);\n \n compiler.processInstruction(\n decoder.getInstruction());\n }\n }\n};"], ["/lsfg-vk/thirdparty/dxbc/include/spirv/spirv_code_buffer.h", "class for {\n public:\n SpirvCodeBuffer() {\n stream.ignore(std::numeric_limits::max());\n std::streamsize length = stream.gcount();\n stream.clear();\n stream.seekg(0, std::ios_base::beg);\n \n std::vector buffer(length);\n stream.read(buffer.data(), length);\n buffer.resize(stream.gcount());\n \n m_code.resize(buffer.size() / sizeof(uint32_t));\n std::memcpy(reinterpret_cast(m_code.data()),\n buffer.data(), m_code.size() * sizeof(uint32_t));\n \n m_ptr = m_code.size();\n }\n explicit SpirvCodeBuffer(uint32_t size) {\n stream.ignore(std::numeric_limits::max());\n std::streamsize length = stream.gcount();\n stream.clear();\n stream.seekg(0, std::ios_base::beg);\n \n std::vector buffer(length);\n stream.read(buffer.data(), length);\n buffer.resize(stream.gcount());\n \n m_code.resize(buffer.size() / sizeof(uint32_t));\n std::memcpy(reinterpret_cast(m_code.data()),\n buffer.data(), m_code.size() * sizeof(uint32_t));\n \n m_ptr = m_code.size();\n }\n SpirvCodeBuffer(uint32_t size, const uint32_t* data) {\n stream.ignore(std::numeric_limits::max());\n std::streamsize length = stream.gcount();\n stream.clear();\n stream.seekg(0, std::ios_base::beg);\n \n std::vector buffer(length);\n stream.read(buffer.data(), length);\n buffer.resize(stream.gcount());\n \n m_code.resize(buffer.size() / sizeof(uint32_t));\n std::memcpy(reinterpret_cast(m_code.data()),\n buffer.data(), m_code.size() * sizeof(uint32_t));\n \n m_ptr = m_code.size();\n }\n SpirvCodeBuffer(std::istream& stream) {\n stream.ignore(std::numeric_limits::max());\n std::streamsize length = stream.gcount();\n stream.clear();\n stream.seekg(0, std::ios_base::beg);\n \n std::vector buffer(length);\n stream.read(buffer.data(), length);\n buffer.resize(stream.gcount());\n \n m_code.resize(buffer.size() / sizeof(uint32_t));\n std::memcpy(reinterpret_cast(m_code.data()),\n buffer.data(), m_code.size() * sizeof(uint32_t));\n \n m_ptr = m_code.size();\n }\n template\n SpirvCodeBuffer(const uint32_t (&data)[N])\n : SpirvCodeBuffer(N, data) { }\n ~SpirvCodeBuffer() { }\n uint32_t allocId() {\n constexpr size_t BoundIdsOffset = 3;\n\n if (m_code.size() <= BoundIdsOffset)\n return 0;\n\n return m_code[BoundIdsOffset]++;\n }\n void append(const SpirvInstruction& ins) {\n if (other.size() != 0) {\n const size_t size = m_code.size();\n m_code.resize(size + other.m_code.size());\n \n uint32_t* dst = this->m_code.data();\n const uint32_t* src = other.m_code.data();\n \n std::memcpy(dst + size, src, other.size());\n m_ptr += other.m_code.size();\n }\n }\n void append(const SpirvCodeBuffer& other) {\n if (other.size() != 0) {\n const size_t size = m_code.size();\n m_code.resize(size + other.m_code.size());\n \n uint32_t* dst = this->m_code.data();\n const uint32_t* src = other.m_code.data();\n \n std::memcpy(dst + size, src, other.size());\n m_ptr += other.m_code.size();\n }\n }\n void putWord(uint32_t word) {\n m_code.insert(m_code.begin() + m_ptr, word);\n m_ptr += 1;\n }\n void putIns(spv::Op opCode, uint16_t wordCount) {\n this->putWord(\n (static_cast(opCode) << 0)\n | (static_cast(wordCount) << 16));\n }\n void putInt32(uint32_t word) {\n this->putWord(word);\n }\n void putInt64(uint64_t value) {\n this->putWord(value >> 0);\n this->putWord(value >> 32);\n }\n void putFloat32(float value) {\n uint32_t tmp;\n static_assert(sizeof(tmp) == sizeof(value));\n std::memcpy(&tmp, &value, sizeof(value));\n this->putInt32(tmp);\n }\n void putFloat64(double value) {\n uint64_t tmp;\n static_assert(sizeof(tmp) == sizeof(value));\n std::memcpy(&tmp, &value, sizeof(value));\n this->putInt64(tmp);\n }\n void putStr(const char* str) {\n uint32_t word = 0;\n uint32_t nbit = 0;\n \n for (uint32_t i = 0; str[i] != '\\0'; str++) {\n word |= (static_cast(str[i]) & 0xFF) << nbit;\n \n if ((nbit += 8) == 32) {\n this->putWord(word);\n word = 0;\n nbit = 0;\n }\n }\n \n // Commit current word\n this->putWord(word);\n }\n void putHeader(uint32_t version, uint32_t boundIds) {\n this->putWord(spv::MagicNumber);\n this->putWord(version);\n this->putWord(0); // Generator\n this->putWord(boundIds);\n this->putWord(0); // Schema\n }\n void erase(size_t size) {\n m_code.erase(\n m_code.begin() + m_ptr,\n m_code.begin() + m_ptr + size);\n }\n uint32_t strLen(const char* str) {\n // Null-termination plus padding\n return (std::strlen(str) + 4) / 4;\n }\n void store(std::ostream& stream) const {\n stream.write(\n reinterpret_cast(m_code.data()),\n sizeof(uint32_t) * m_code.size());\n }\n private:\n std::vector m_code;\n size_t m_ptr = 0;\n};"], ["/lsfg-vk/thirdparty/dxbc/include/util/rc/util_rc_ptr.h", "#pragma once\n\n#include \n#include \n#include \n#include \n#include \n\nnamespace dxvk {\n\n /**\n * \\brief Pointer for reference-counted objects\n * \n * This only requires the given type to implement \\c incRef\n * and \\c decRef methods that adjust the reference count.\n * \\tparam T Object type\n */\n template\n class Rc {\n template\n friend class Rc;\n public:\n\n Rc() = default;\n Rc(std::nullptr_t) { }\n\n Rc(T* object)\n : m_object(object) {\n this->incRef();\n }\n\n Rc(const Rc& other)\n : m_object(other.m_object) {\n this->incRef();\n }\n\n template\n Rc(const Rc& other)\n : m_object(other.m_object) {\n this->incRef();\n }\n\n Rc(Rc&& other)\n : m_object(other.m_object) {\n other.m_object = nullptr;\n }\n\n template\n Rc(Rc&& other)\n : m_object(other.m_object) {\n other.m_object = nullptr;\n }\n\n Rc& operator = (std::nullptr_t) {\n this->decRef();\n m_object = nullptr;\n return *this;\n }\n\n Rc& operator = (const Rc& other) {\n other.incRef();\n this->decRef();\n m_object = other.m_object;\n return *this;\n }\n\n template\n Rc& operator = (const Rc& other) {\n other.incRef();\n this->decRef();\n m_object = other.m_object;\n return *this;\n }\n\n Rc& operator = (Rc&& other) {\n this->decRef();\n this->m_object = other.m_object;\n other.m_object = nullptr;\n return *this;\n }\n\n template\n Rc& operator = (Rc&& other) {\n this->decRef();\n this->m_object = other.m_object;\n other.m_object = nullptr;\n return *this;\n }\n\n ~Rc() {\n this->decRef();\n }\n\n T& operator * () const { return *m_object; }\n T* operator -> () const { return m_object; }\n T* ptr() const { return m_object; }\n\n template bool operator == (const Rc& other) const { return m_object == other.m_object; }\n template bool operator != (const Rc& other) const { return m_object != other.m_object; }\n\n template bool operator == (Tx* other) const { return m_object == other; }\n template bool operator != (Tx* other) const { return m_object != other; }\n\n bool operator == (std::nullptr_t) const { return m_object == nullptr; }\n bool operator != (std::nullptr_t) const { return m_object != nullptr; }\n \n explicit operator bool () const {\n return m_object != nullptr;\n }\n\n /**\n * \\brief Sets pointer without acquiring a reference\n *\n * Must only be use when a reference has been taken via\n * other means.\n * \\param [in] object Object pointer\n */\n void unsafeInsert(T* object) {\n this->decRef();\n m_object = object;\n }\n\n /**\n * \\brief Extracts raw pointer\n *\n * Sets the smart pointer to null without decrementing the\n * reference count. Must only be used when the reference\n * count is decremented in some other way.\n * \\returns Pointer to owned object\n */\n T* unsafeExtract() {\n return std::exchange(m_object, nullptr);\n }\n\n /**\n * \\brief Creates smart pointer without taking reference\n *\n * Must only be used when a refernece has been obtained via other means.\n * \\param [in] object Pointer to object to take ownership of\n */\n static Rc unsafeCreate(T* object) {\n return Rc(object, false);\n }\n\n private:\n\n T* m_object = nullptr;\n\n explicit Rc(T* object, bool)\n : m_object(object) { }\n\n force_inline void incRef() const {\n if (m_object != nullptr)\n m_object->incRef();\n }\n\n force_inline void decRef() const {\n if (m_object != nullptr) {\n if constexpr (std::is_void_vdecRef())>) {\n m_object->decRef();\n } else {\n // Deprecated, objects should manage themselves now.\n if (!m_object->decRef())\n delete m_object;\n }\n }\n }\n\n };\n\n template\n bool operator == (Tx* a, const Rc& b) { return b == a; }\n\n template\n bool operator != (Tx* a, const Rc& b) { return b != a; }\n\n struct RcHash {\n template\n size_t operator () (const Rc& rc) const {\n return reinterpret_cast(rc.ptr()) / sizeof(T);\n }\n };\n\n}\n\ntemplate\nstd::ostream& operator << (std::ostream& os, const dxvk::Rc& rc) {\n return os << rc.ptr();\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_small_vector.h", "#pragma once\n\n#include \n#include \n#include \n#include \n\n#include \"util_bit.h\"\n#include \"util_likely.h\"\n\nnamespace dxvk {\n\n template\n class small_vector {\n using storage = std::aligned_storage_t;\n public:\n\n constexpr static size_t MinCapacity = N;\n\n small_vector() { }\n\n small_vector(size_t size) {\n resize(size);\n }\n\n small_vector(const small_vector& other) {\n reserve(other.m_size);\n\n for (size_t i = 0; i < other.m_size; i++)\n *ptr(i) = *other.ptr(i);\n\n m_size = other.m_size;\n }\n\n small_vector& operator = (const small_vector& other) {\n for (size_t i = 0; i < m_size; i++)\n ptr(i)->~T();\n\n reserve(other.m_size);\n\n for (size_t i = 0; i < other.m_size; i++)\n *ptr(i) = *other.ptr(i);\n\n m_size = other.m_size;\n return *this;\n }\n\n small_vector(small_vector&& other) {\n if (other.m_size <= N) {\n for (size_t i = 0; i < other.m_size; i++)\n new (&u.m_data[i]) T(std::move(*other.ptr(i)));\n } else {\n u.m_ptr = other.u.m_ptr;\n m_capacity = other.m_capacity;\n\n other.u.m_ptr = nullptr;\n other.m_capacity = N;\n }\n\n m_size = other.m_size;\n other.m_size = 0;\n }\n\n small_vector& operator = (small_vector&& other) {\n for (size_t i = 0; i < m_size; i++)\n ptr(i)->~T();\n\n if (m_capacity > N)\n delete[] u.m_ptr;\n\n if (other.m_size <= N) {\n m_capacity = N;\n\n for (size_t i = 0; i < other.m_size; i++)\n new (&u.m_data[i]) T(std::move(*other.ptr(i)));\n } else {\n u.m_ptr = other.u.m_ptr;\n m_capacity = other.m_capacity;\n\n other.u.m_ptr = nullptr;\n other.m_capacity = N;\n }\n\n m_size = other.m_size;\n other.m_size = 0;\n return *this;\n }\n\n ~small_vector() {\n for (size_t i = 0; i < m_size; i++)\n ptr(i)->~T();\n\n if (m_capacity > N)\n delete[] u.m_ptr;\n }\n\n size_t size() const {\n return m_size;\n }\n\n void reserve(size_t n) {\n if (likely(n <= m_capacity))\n return;\n\n n = pick_capacity(n);\n\n storage* data = new storage[n];\n\n for (size_t i = 0; i < m_size; i++) {\n new (&data[i]) T(std::move(*ptr(i)));\n ptr(i)->~T();\n }\n\n if (m_capacity > N)\n delete[] u.m_ptr;\n\n m_capacity = n;\n u.m_ptr = data;\n }\n\n const T* data() const { return ptr(0); }\n T* data() { return ptr(0); }\n\n void resize(size_t n) {\n reserve(n);\n\n for (size_t i = n; i < m_size; i++)\n ptr(i)->~T();\n\n for (size_t i = m_size; i < n; i++)\n new (ptr(i)) T();\n\n m_size = n;\n }\n\n void push_back(const T& object) {\n reserve(m_size + 1);\n new (ptr(m_size++)) T(object);\n }\n\n void push_back(T&& object) {\n reserve(m_size + 1);\n new (ptr(m_size++)) T(std::move(object));\n }\n\n template\n T& emplace_back(Args... args) {\n reserve(m_size + 1);\n return *(new (ptr(m_size++)) T(std::forward(args)...));\n }\n\n void erase(size_t idx) {\n ptr(idx)->~T();\n\n for (size_t i = idx; i < m_size - 1; i++) {\n new (ptr(i)) T(std::move(*ptr(i + 1)));\n ptr(i + 1)->~T();\n }\n }\n\n void pop_back() {\n ptr(--m_size)->~T();\n }\n\n void clear() {\n for (size_t i = 0; i < m_size; i++)\n ptr(i)->~T();\n\n m_size = 0;\n }\n\n bool empty() const {\n return m_size == 0;\n }\n\n T& operator [] (size_t idx) { return *ptr(idx); }\n const T& operator [] (size_t idx) const { return *ptr(idx); }\n\n T& front() { return *ptr(0); }\n const T& front() const { return *ptr(0); }\n\n T& back() { return *ptr(m_size - 1); }\n const T& back() const { return *ptr(m_size - 1); }\n\n private:\n\n size_t m_capacity = N;\n size_t m_size = 0;\n\n union {\n storage* m_ptr;\n storage m_data[N];\n } u;\n\n size_t pick_capacity(size_t n) {\n // Pick next largest power of two for the new capacity\n return size_t(1u) << ((sizeof(n) * 8u) - bit::lzcnt(n - 1));\n }\n\n T* ptr(size_t idx) {\n return m_capacity == N\n ? reinterpret_cast(&u.m_data[idx])\n : reinterpret_cast(&u.m_ptr[idx]);\n }\n\n const T* ptr(size_t idx) const {\n return m_capacity == N\n ? reinterpret_cast(&u.m_data[idx])\n : reinterpret_cast(&u.m_ptr[idx]);\n }\n\n };\n\n}\n"], ["/lsfg-vk/framegen/src/common/exception.cpp", "#include \"common/exception.hpp\"\n\n#include \n\n#include \n#include \n#include \n#include \n#include \n\nusing namespace LSFG;\n\nvulkan_error::vulkan_error(VkResult result, const std::string& message)\n : std::runtime_error(std::format(\"{} (error {})\", message, static_cast(result))),\n result(result) {}\n\nvulkan_error::~vulkan_error() noexcept = default;\n\nrethrowable_error::rethrowable_error(const std::string& message, const std::exception& exe)\n : std::runtime_error(message) {\n this->message = std::format(\"{}\\n- {}\", message, exe.what());\n}\n\nrethrowable_error::~rethrowable_error() noexcept = default;\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_options.h", "#pragma once\n\n#include \n\n#include \"util_flags.h\"\n\nnamespace dxvk {\n\n struct D3D11Options;\n\n enum class DxbcFloatControlFlag : uint32_t {\n DenormFlushToZero32,\n DenormPreserve64,\n PreserveNan32,\n PreserveNan64,\n };\n\n using DxbcFloatControlFlags = Flags;\n\n struct DxbcOptions {\n DxbcOptions() {}\n\n // Clamp oDepth in fragment shaders if the depth\n // clip device feature is not supported\n bool useDepthClipWorkaround = false;\n\n /// Determines whether format qualifiers\n /// on typed UAV loads are required\n bool supportsTypedUavLoadR32 = false;\n\n /// Determines whether raw access chains are supported\n bool supportsRawAccessChains = false;\n\n /// Clear thread-group shared memory to zero\n bool zeroInitWorkgroupMemory = false;\n\n /// Declare vertex positions as invariant\n bool invariantPosition = false;\n\n /// Insert memory barriers after TGSM stoes\n bool forceVolatileTgsmAccess = false;\n\n /// Try to detect hazards in UAV access and insert\n /// barriers when we know control flow is uniform.\n bool forceComputeUavBarriers = false;\n\n /// Replace ld_ms with ld\n bool disableMsaa = false;\n\n /// Force sample rate shading by using sample\n /// interpolation for fragment shader inputs\n bool forceSampleRateShading = false;\n\n // Enable per-sample interlock if supported\n bool enableSampleShadingInterlock = false;\n\n /// Use tightly packed arrays for immediate\n /// constant buffers if possible\n bool supportsTightIcbPacking = false;\n\n /// Whether exporting point size is required\n bool needsPointSizeExport = true;\n\n /// Whether to enable sincos emulation\n bool sincosEmulation = false;\n\n /// Float control flags\n DxbcFloatControlFlags floatControl;\n\n /// Minimum storage buffer alignment\n VkDeviceSize minSsboAlignment = 0;\n };\n \n}\n"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/include/pe-parse/nt-headers.h", "/*\nThe MIT License (MIT)\n\nCopyright (c) 2013 Andrew Ruef\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n*/\n\n#pragma once\n\n#include \n#include \n#include \n\n// need to pack these structure definitions\n\n// some constant definitions\n// clang-format off\nnamespace peparse {\nconstexpr std::uint32_t RICH_MAGIC_END = 0x68636952;\nconstexpr std::uint32_t RICH_MAGIC_START = 0x536e6144;\nconstexpr std::uint32_t RICH_OFFSET = 0x80;\nconstexpr std::uint16_t MZ_MAGIC = 0x5A4D;\nconstexpr std::uint32_t NT_MAGIC = 0x00004550;\nconstexpr std::uint16_t NUM_DIR_ENTRIES = 16;\nconstexpr std::uint16_t NT_OPTIONAL_32_MAGIC = 0x10B;\nconstexpr std::uint16_t NT_OPTIONAL_64_MAGIC = 0x20B;\nconstexpr std::uint16_t NT_SHORT_NAME_LEN = 8;\nconstexpr std::uint16_t SYMTAB_RECORD_LEN = 18;\n\n#ifndef _PEPARSE_WINDOWS_CONFLICTS\n// Machine Types\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_UNKNOWN = 0x0;\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_ALPHA = 0x184; // Alpha_AXP\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_ALPHA64 = 0x284; // ALPHA64\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_AM33 = 0x1d3; // Matsushita AM33\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_AMD64 = 0x8664; // x64\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_ARM = 0x1c0; // ARM little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_ARM64 = 0xaa64; // ARM64 little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_ARMNT = 0x1c4; // ARM Thumb-2 little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_AXP64 = 0x284; // ALPHA64\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_CEE = 0xc0ee;\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_CEF = 0xcef;\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_EBC = 0xebc; // EFI byte code\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_I386 = 0x14c; // Intel 386 or later processors and compatible processors\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_IA64 = 0x200; // Intel Itanium processor family\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_LOONGARCH32 = 0x6232; // LoongArch 32-bit address space\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_LOONGARCH64 = 0x6264; // LoongArch 64-bit address space\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_M32R = 0x9041; // Mitsubishi M32R little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_MIPS16 = 0x266; // MIPS16\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_MIPSFPU = 0x366; // MIPS with FPU\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_MIPSFPU16 = 0x466; // MIPS16 with FPU\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_POWERPC = 0x1f0; // Power PC little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_POWERPCFP = 0x1f1; // Power PC with floating point support\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_POWERPCBE = 0x1f2; // Power PC big endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_R3000 = 0x162; // MIPS little endian, 0x160 big-endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_R4000 = 0x166; // MIPS little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_R10000 = 0x168; // MIPS little endian\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_RISCV32 = 0x5032; // RISC-V 32-bit address space\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_RISCV64 = 0x5064; // RISC-V 64-bit address space\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_RISCV128 = 0x5128; // RISC-V 128-bit address space\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_SH3 = 0x1a2; // Hitachi SH3\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_SH3DSP = 0x1a3; // Hitachi SH3 DSP\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_SH3E = 0x1a4; // Hitachi SH3E\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_SH4 = 0x1a6; // Hitachi SH4\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_SH5 = 0x1a8; // Hitachi SH5\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_THUMB = 0x1c2; // Thumb\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_TRICORE = 0x520; // Infineon\nconstexpr std::uint16_t IMAGE_FILE_MACHINE_WCEMIPSV2 = 0x169; // MIPS little-endian WCE v2\n\nconstexpr std::uint16_t IMAGE_FILE_RELOCS_STRIPPED = 0x0001;\nconstexpr std::uint16_t IMAGE_FILE_EXECUTABLE_IMAGE = 0x0002;\nconstexpr std::uint16_t IMAGE_FILE_LINE_NUMS_STRIPPED = 0x0004;\nconstexpr std::uint16_t IMAGE_FILE_LOCAL_SYMS_STRIPPED = 0x0008;\nconstexpr std::uint16_t IMAGE_FILE_AGGRESSIVE_WS_TRIM = 0x0010;\nconstexpr std::uint16_t IMAGE_FILE_LARGE_ADDRESS_AWARE = 0x0020;\nconstexpr std::uint16_t IMAGE_FILE_BYTES_REVERSED_LO = 0x0080;\nconstexpr std::uint16_t IMAGE_FILE_32BIT_MACHINE = 0x0100;\nconstexpr std::uint16_t IMAGE_FILE_DEBUG_STRIPPED = 0x0200;\nconstexpr std::uint16_t IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP = 0x0400;\nconstexpr std::uint16_t IMAGE_FILE_NET_RUN_FROM_SWAP = 0x0800;\nconstexpr std::uint16_t IMAGE_FILE_SYSTEM = 0x1000;\nconstexpr std::uint16_t IMAGE_FILE_DLL = 0x2000;\nconstexpr std::uint16_t IMAGE_FILE_UP_SYSTEM_ONLY = 0x4000;\nconstexpr std::uint16_t IMAGE_FILE_BYTES_REVERSED_HI = 0x8000;\n\nconstexpr std::uint32_t IMAGE_SCN_TYPE_NO_PAD = 0x00000008;\nconstexpr std::uint32_t IMAGE_SCN_CNT_CODE = 0x00000020;\nconstexpr std::uint32_t IMAGE_SCN_CNT_INITIALIZED_DATA = 0x00000040;\nconstexpr std::uint32_t IMAGE_SCN_CNT_UNINITIALIZED_DATA = 0x00000080;\nconstexpr std::uint32_t IMAGE_SCN_LNK_OTHER = 0x00000100;\nconstexpr std::uint32_t IMAGE_SCN_LNK_INFO = 0x00000200;\nconstexpr std::uint32_t IMAGE_SCN_LNK_REMOVE = 0x00000800;\nconstexpr std::uint32_t IMAGE_SCN_LNK_COMDAT = 0x00001000;\nconstexpr std::uint32_t IMAGE_SCN_NO_DEFER_SPEC_EXC = 0x00004000;\nconstexpr std::uint32_t IMAGE_SCN_GPREL = 0x00008000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_FARDATA = 0x00008000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_PURGEABLE = 0x00020000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_16BIT = 0x00020000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_LOCKED = 0x00040000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_PRELOAD = 0x00080000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_1BYTES = 0x00100000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_2BYTES = 0x00200000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_4BYTES = 0x00300000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_8BYTES = 0x00400000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_16BYTES = 0x00500000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_32BYTES = 0x00600000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_64BYTES = 0x00700000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_128BYTES = 0x00800000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_256BYTES = 0x00900000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_512BYTES = 0x00A00000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_1024BYTES = 0x00B00000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_2048BYTES = 0x00C00000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_4096BYTES = 0x00D00000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_8192BYTES = 0x00E00000;\nconstexpr std::uint32_t IMAGE_SCN_ALIGN_MASK = 0x00F00000;\nconstexpr std::uint32_t IMAGE_SCN_LNK_NRELOC_OVFL = 0x01000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_DISCARDABLE = 0x02000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_NOT_CACHED = 0x04000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_NOT_PAGED = 0x08000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_SHARED = 0x10000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_EXECUTE = 0x20000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_READ = 0x40000000;\nconstexpr std::uint32_t IMAGE_SCN_MEM_WRITE = 0x80000000;\n\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_UNKNOWN = 0;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_NATIVE = 1;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_WINDOWS_GUI = 2;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_WINDOWS_CUI = 3;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_OS2_CUI = 5;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_POSIX_CUI = 7;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_NATIVE_WINDOWS = 8;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_WINDOWS_CE_GUI = 9;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_EFI_APPLICATION = 10;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER = 11;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER = 12;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_EFI_ROM = 13;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_XBOX = 14;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION = 16;\nconstexpr std::uint16_t IMAGE_SUBSYSTEM_XBOX_CODE_CATALOG = 17;\n\n// Symbol section number values\nconstexpr std::int16_t IMAGE_SYM_UNDEFINED = 0;\nconstexpr std::int16_t IMAGE_SYM_ABSOLUTE = -1;\nconstexpr std::int16_t IMAGE_SYM_DEBUG = -2;\n\n// Symbol table types\nconstexpr std::uint16_t IMAGE_SYM_TYPE_NULL = 0;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_VOID = 1;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_CHAR = 2;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_SHORT = 3;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_INT = 4;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_LONG = 5;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_FLOAT = 6;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_DOUBLE = 7;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_STRUCT = 8;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_UNION = 9;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_ENUM = 10;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_MOE = 11;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_BYTE = 12;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_WORD = 13;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_UINT = 14;\nconstexpr std::uint16_t IMAGE_SYM_TYPE_DWORD = 15;\nconstexpr std::uint16_t IMAGE_SYM_DTYPE_NULL = 0;\nconstexpr std::uint16_t IMAGE_SYM_DTYPE_POINTER = 1;\nconstexpr std::uint16_t IMAGE_SYM_DTYPE_FUNCTION = 2;\nconstexpr std::uint16_t IMAGE_SYM_DTYPE_ARRAY = 3;\n\n// Symbol table storage classes\nconstexpr std::uint8_t IMAGE_SYM_CLASS_END_OF_FUNCTION = static_cast(-1);\nconstexpr std::uint8_t IMAGE_SYM_CLASS_NULL = 0;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_AUTOMATIC = 1;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_EXTERNAL = 2;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_STATIC = 3;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_REGISTER = 4;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_EXTERNAL_DEF = 5;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_LABEL = 6;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_UNDEFINED_LABEL = 7;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_MEMBER_OF_STRUCT = 8;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_ARGUMENT = 9;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_STRUCT_TAG = 10;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_MEMBER_OF_UNION = 11;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_UNION_TAG = 12;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_TYPE_DEFINITION = 13;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_UNDEFINED_STATIC = 14;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_ENUM_TAG = 15;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_MEMBER_OF_ENUM = 16;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_REGISTER_PARAM = 17;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_BIT_FIELD = 18;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_BLOCK = 100;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_FUNCTION = 101;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_END_OF_STRUCT = 102;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_FILE = 103;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_SECTION = 104;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_WEAK_EXTERNAL = 105;\nconstexpr std::uint8_t IMAGE_SYM_CLASS_CLR_TOKEN = 107;\n\n// Optional header DLL characteristics\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE = 0x0040;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY = 0x0080;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_NX_COMPAT = 0x0100;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_NO_ISOLATION = 0x0200;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_NO_SEH = 0x0400;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_NO_BIND = 0x0800;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_APPCONTAINER = 0x1000;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_WDM_DRIVER = 0x2000;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_GUARD_CF = 0x4000;\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE = 0x8000;\n\n// Extended DLL characteristics\nconstexpr std::uint16_t IMAGE_DLLCHARACTERISTICS_EX_CET_COMPAT = 0x0001;\n#endif\n// clang-format on\n\nstruct dos_header {\n std::uint16_t e_magic;\n std::uint16_t e_cblp;\n std::uint16_t e_cp;\n std::uint16_t e_crlc;\n std::uint16_t e_cparhdr;\n std::uint16_t e_minalloc;\n std::uint16_t e_maxalloc;\n std::uint16_t e_ss;\n std::uint16_t e_sp;\n std::uint16_t e_csum;\n std::uint16_t e_ip;\n std::uint16_t e_cs;\n std::uint16_t e_lfarlc;\n std::uint16_t e_ovno;\n std::uint16_t e_res[4];\n std::uint16_t e_oemid;\n std::uint16_t e_oeminfo;\n std::uint16_t e_res2[10];\n std::uint32_t e_lfanew;\n};\n\nstruct file_header {\n std::uint16_t Machine;\n std::uint16_t NumberOfSections;\n std::uint32_t TimeDateStamp;\n std::uint32_t PointerToSymbolTable;\n std::uint32_t NumberOfSymbols;\n std::uint16_t SizeOfOptionalHeader;\n std::uint16_t Characteristics;\n};\n\nstruct data_directory {\n std::uint32_t VirtualAddress;\n std::uint32_t Size;\n};\n\nenum data_directory_kind {\n DIR_EXPORT = 0,\n DIR_IMPORT = 1,\n DIR_RESOURCE = 2,\n DIR_EXCEPTION = 3,\n DIR_SECURITY = 4,\n DIR_BASERELOC = 5,\n DIR_DEBUG = 6,\n DIR_ARCHITECTURE = 7,\n DIR_GLOBALPTR = 8,\n DIR_TLS = 9,\n DIR_LOAD_CONFIG = 10,\n DIR_BOUND_IMPORT = 11,\n DIR_IAT = 12,\n DIR_DELAY_IMPORT = 13,\n DIR_COM_DESCRIPTOR = 14,\n DIR_RESERVED = 15,\n};\n\nstruct optional_header_32 {\n std::uint16_t Magic;\n std::uint8_t MajorLinkerVersion;\n std::uint8_t MinorLinkerVersion;\n std::uint32_t SizeOfCode;\n std::uint32_t SizeOfInitializedData;\n std::uint32_t SizeOfUninitializedData;\n std::uint32_t AddressOfEntryPoint;\n std::uint32_t BaseOfCode;\n std::uint32_t BaseOfData;\n std::uint32_t ImageBase;\n std::uint32_t SectionAlignment;\n std::uint32_t FileAlignment;\n std::uint16_t MajorOperatingSystemVersion;\n std::uint16_t MinorOperatingSystemVersion;\n std::uint16_t MajorImageVersion;\n std::uint16_t MinorImageVersion;\n std::uint16_t MajorSubsystemVersion;\n std::uint16_t MinorSubsystemVersion;\n std::uint32_t Win32VersionValue;\n std::uint32_t SizeOfImage;\n std::uint32_t SizeOfHeaders;\n std::uint32_t CheckSum;\n std::uint16_t Subsystem;\n std::uint16_t DllCharacteristics;\n std::uint32_t SizeOfStackReserve;\n std::uint32_t SizeOfStackCommit;\n std::uint32_t SizeOfHeapReserve;\n std::uint32_t SizeOfHeapCommit;\n std::uint32_t LoaderFlags;\n std::uint32_t NumberOfRvaAndSizes;\n data_directory DataDirectory[NUM_DIR_ENTRIES];\n};\n\n/*\n * This is used for PE32+ binaries. It is similar to optional_header_32\n * except some fields don't exist here (BaseOfData), and others are bigger.\n */\nstruct optional_header_64 {\n std::uint16_t Magic;\n std::uint8_t MajorLinkerVersion;\n std::uint8_t MinorLinkerVersion;\n std::uint32_t SizeOfCode;\n std::uint32_t SizeOfInitializedData;\n std::uint32_t SizeOfUninitializedData;\n std::uint32_t AddressOfEntryPoint;\n std::uint32_t BaseOfCode;\n std::uint64_t ImageBase;\n std::uint32_t SectionAlignment;\n std::uint32_t FileAlignment;\n std::uint16_t MajorOperatingSystemVersion;\n std::uint16_t MinorOperatingSystemVersion;\n std::uint16_t MajorImageVersion;\n std::uint16_t MinorImageVersion;\n std::uint16_t MajorSubsystemVersion;\n std::uint16_t MinorSubsystemVersion;\n std::uint32_t Win32VersionValue;\n std::uint32_t SizeOfImage;\n std::uint32_t SizeOfHeaders;\n std::uint32_t CheckSum;\n std::uint16_t Subsystem;\n std::uint16_t DllCharacteristics;\n std::uint64_t SizeOfStackReserve;\n std::uint64_t SizeOfStackCommit;\n std::uint64_t SizeOfHeapReserve;\n std::uint64_t SizeOfHeapCommit;\n std::uint32_t LoaderFlags;\n std::uint32_t NumberOfRvaAndSizes;\n data_directory DataDirectory[NUM_DIR_ENTRIES];\n};\n\nstruct nt_header_32 {\n std::uint32_t Signature;\n file_header FileHeader;\n optional_header_32 OptionalHeader;\n optional_header_64 OptionalHeader64;\n std::uint16_t OptionalMagic;\n};\n\nstruct rich_entry {\n std::uint16_t ProductId;\n std::uint16_t BuildNumber;\n std::uint32_t Count;\n};\n\nstruct rich_header {\n std::uint32_t StartSignature;\n std::vector Entries;\n std::uint32_t EndSignature;\n std::uint32_t DecryptionKey;\n std::uint32_t Checksum;\n bool isPresent;\n bool isValid;\n};\n\n/*\n * This structure is only used to know how far to move the offset\n * when parsing resources. The data is stored in a resource_dir_entry\n * struct but that also has extra information used in the parsing which\n * causes the size to be inaccurate.\n */\nstruct resource_dir_entry_sz {\n std::uint32_t ID;\n std::uint32_t RVA;\n};\n\nstruct resource_dir_entry {\n inline resource_dir_entry(void) : ID(0), RVA(0), type(0), name(0), lang(0) {\n }\n\n std::uint32_t ID;\n std::uint32_t RVA;\n std::uint32_t type;\n std::uint32_t name;\n std::uint32_t lang;\n std::string type_str;\n std::string name_str;\n std::string lang_str;\n};\n\nstruct resource_dir_table {\n std::uint32_t Characteristics;\n std::uint32_t TimeDateStamp;\n std::uint16_t MajorVersion;\n std::uint16_t MinorVersion;\n std::uint16_t NameEntries;\n std::uint16_t IDEntries;\n};\n\nstruct resource_dat_entry {\n std::uint32_t RVA;\n std::uint32_t size;\n std::uint32_t codepage;\n std::uint32_t reserved;\n};\n\nstruct image_section_header {\n std::uint8_t Name[NT_SHORT_NAME_LEN];\n union {\n std::uint32_t PhysicalAddress;\n std::uint32_t VirtualSize;\n } Misc;\n std::uint32_t VirtualAddress;\n std::uint32_t SizeOfRawData;\n std::uint32_t PointerToRawData;\n std::uint32_t PointerToRelocations;\n std::uint32_t PointerToLinenumbers;\n std::uint16_t NumberOfRelocations;\n std::uint16_t NumberOfLinenumbers;\n std::uint32_t Characteristics;\n};\n\nstruct import_dir_entry {\n std::uint32_t LookupTableRVA;\n std::uint32_t TimeStamp;\n std::uint32_t ForwarderChain;\n std::uint32_t NameRVA;\n std::uint32_t AddressRVA;\n};\n\nstruct export_dir_table {\n std::uint32_t ExportFlags;\n std::uint32_t TimeDateStamp;\n std::uint16_t MajorVersion;\n std::uint16_t MinorVersion;\n std::uint32_t NameRVA;\n std::uint32_t OrdinalBase;\n std::uint32_t AddressTableEntries;\n std::uint32_t NumberOfNamePointers;\n std::uint32_t ExportAddressTableRVA;\n std::uint32_t NamePointerRVA;\n std::uint32_t OrdinalTableRVA;\n};\n\nstruct debug_dir_entry {\n std::uint32_t Characteristics;\n std::uint32_t TimeStamp;\n std::uint16_t MajorVersion;\n std::uint16_t MinorVersion;\n std::uint32_t Type;\n std::uint32_t SizeOfData;\n std::uint32_t AddressOfRawData;\n std::uint32_t PointerToRawData;\n};\n\nenum reloc_type {\n RELOC_ABSOLUTE = 0,\n RELOC_HIGH = 1,\n RELOC_LOW = 2,\n RELOC_HIGHLOW = 3,\n RELOC_HIGHADJ = 4,\n RELOC_MIPS_JMPADDR = 5, // only valid on MIPS\n RELOC_ARM_MOV32 = 5, // only valid on ARM/Thumb\n RELOC_RISCV_HIGH20 = 5, // only valid on RISC-V\n RELOC_RESERVED = 6,\n RELOC_THUMB_MOV32 = 7, // only valid on Thumb\n RELOC_RISCV_LOW32I = 7, // only valid on RISC-V\n RELOC_RISCV_LOW12S = 8, // only valid on RISC-V\n RELOC_LOONGARCH32_MARK_LA = 8, // only valid on LoongArch 32\n RELOC_LOONGARCH64_MARK_LA = 8, // only valid on LoongArch 64\n RELOC_MIPS_JMPADDR16 = 9, // only valid on MIPS\n RELOC_IA64_IMM64 = 9,\n RELOC_DIR64 = 10\n};\n\nstruct reloc_block {\n std::uint32_t PageRVA;\n std::uint32_t BlockSize;\n};\n\nstruct image_load_config_code_integrity {\n std::uint16_t Flags;\n std::uint16_t Catalog;\n std::uint32_t CatalogOffset;\n std::uint32_t Reserved;\n};\n\nstruct image_load_config_32 {\n std::uint32_t Size;\n std::uint32_t TimeDateStamp;\n std::uint16_t MajorVersion;\n std::uint16_t MinorVersion;\n std::uint32_t GlobalFlagsClear;\n std::uint32_t GlobalFlagsSet;\n std::uint32_t CriticalSectionDefaultTimeout;\n std::uint32_t DeCommitFreeBlockThreshold;\n std::uint32_t DeCommitTotalFreeThreshold;\n std::uint32_t LockPrefixTable;\n std::uint32_t MaximumAllocationSize;\n std::uint32_t VirtualMemoryThreshold;\n std::uint32_t ProcessHeapFlags;\n std::uint32_t ProcessAffinityMask;\n std::uint16_t CSDVersion;\n std::uint16_t DependentLoadFlags;\n std::uint32_t EditList;\n std::uint32_t SecurityCookie;\n std::uint32_t SEHandlerTable;\n std::uint32_t SEHandlerCount;\n std::uint32_t GuardCFCheckFunctionPointer;\n std::uint32_t GuardCFDispatchFunctionPointer;\n std::uint32_t GuardCFFunctionTable;\n std::uint32_t GuardCFFunctionCount;\n std::uint32_t GuardFlags;\n image_load_config_code_integrity CodeIntegrity;\n std::uint32_t GuardAddressTakenIatEntryTable;\n std::uint32_t GuardAddressTakenIatEntryCount;\n std::uint32_t GuardLongJumpTargetTable;\n std::uint32_t GuardLongJumpTargetCount;\n std::uint32_t DynamicValueRelocTable;\n std::uint32_t CHPEMetadataPointer;\n std::uint32_t GuardRFFailureRoutine;\n std::uint32_t GuardRFFailureRoutineFunctionPointer;\n std::uint32_t DynamicValueRelocTableOffset;\n std::uint16_t DynamicValueRelocTableSection;\n std::uint16_t Reserved2;\n std::uint32_t GuardRFVerifyStackPointerFunctionPointer;\n std::uint32_t HotPatchTableOffset;\n std::uint32_t Reserved3;\n std::uint32_t EnclaveConfigurationPointer;\n std::uint32_t VolatileMetadataPointer;\n};\n\nstruct image_load_config_64 {\n std::uint32_t Size;\n std::uint32_t TimeDateStamp;\n std::uint16_t MajorVersion;\n std::uint16_t MinorVersion;\n std::uint32_t GlobalFlagsClear;\n std::uint32_t GlobalFlagsSet;\n std::uint32_t CriticalSectionDefaultTimeout;\n std::uint64_t DeCommitFreeBlockThreshold;\n std::uint64_t DeCommitTotalFreeThreshold;\n std::uint64_t LockPrefixTable;\n std::uint64_t MaximumAllocationSize;\n std::uint64_t VirtualMemoryThreshold;\n std::uint64_t ProcessAffinityMask;\n std::uint32_t ProcessHeapFlags;\n std::uint16_t CSDVersion;\n std::uint16_t DependentLoadFlags;\n std::uint64_t EditList;\n std::uint64_t SecurityCookie;\n std::uint64_t SEHandlerTable;\n std::uint64_t SEHandlerCount;\n std::uint64_t GuardCFCheckFunctionPointer;\n std::uint64_t GuardCFDispatchFunctionPointer;\n std::uint64_t GuardCFFunctionTable;\n std::uint64_t GuardCFFunctionCount;\n std::uint32_t GuardFlags;\n image_load_config_code_integrity CodeIntegrity;\n std::uint64_t GuardAddressTakenIatEntryTable;\n std::uint64_t GuardAddressTakenIatEntryCount;\n std::uint64_t GuardLongJumpTargetTable;\n std::uint64_t GuardLongJumpTargetCount;\n std::uint64_t DynamicValueRelocTable;\n std::uint64_t CHPEMetadataPointer;\n std::uint64_t GuardRFFailureRoutine;\n std::uint64_t GuardRFFailureRoutineFunctionPointer;\n std::uint32_t DynamicValueRelocTableOffset;\n std::uint16_t DynamicValueRelocTableSection;\n std::uint16_t Reserved2;\n std::uint64_t GuardRFVerifyStackPointerFunctionPointer;\n std::uint32_t HotPatchTableOffset;\n std::uint32_t Reserved3;\n std::uint64_t EnclaveConfigurationPointer;\n std::uint64_t VolatileMetadataPointer;\n};\n} // namespace peparse\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_tag.h", "#pragma once\n\n#include \"dxbc_include.h\"\n\nnamespace dxvk {\n \n /**\n * \\brief Four-character tag\n * \n * Used to identify chunks in the\n * compiled DXBC file by name.\n */\n class DxbcTag {\n \n public:\n \n DxbcTag() {\n for (size_t i = 0; i < 4; i++)\n m_chars[i] = '\\0';\n }\n \n DxbcTag(const char* tag) {\n for (size_t i = 0; i < 4; i++)\n m_chars[i] = tag[i];\n }\n \n bool operator == (const DxbcTag& other) const {\n bool result = true;\n for (size_t i = 0; i < 4; i++)\n result &= m_chars[i] == other.m_chars[i];\n return result;\n }\n \n bool operator != (const DxbcTag& other) const {\n return !this->operator == (other);\n }\n \n const char* operator & () const { return m_chars; }\n char* operator & () { return m_chars; }\n \n private:\n \n char m_chars[4];\n \n };\n \n}"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_common.h", "class DxbcProgramType {\n public:\n VkShaderStageFlagBits shaderStage() const {\n switch (m_type) {\n case DxbcProgramType::PixelShader : return VK_SHADER_STAGE_FRAGMENT_BIT;\n case DxbcProgramType::VertexShader : return VK_SHADER_STAGE_VERTEX_BIT;\n case DxbcProgramType::GeometryShader : return VK_SHADER_STAGE_GEOMETRY_BIT;\n case DxbcProgramType::HullShader : return VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;\n case DxbcProgramType::DomainShader : return VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;\n case DxbcProgramType::ComputeShader : return VK_SHADER_STAGE_COMPUTE_BIT;\n default: throw DxvkError(\"DxbcProgramInfo::shaderStage: Unsupported program type\");\n }\n }\n spv::ExecutionModel executionModel() const {\n switch (m_type) {\n case DxbcProgramType::PixelShader : return spv::ExecutionModelFragment;\n case DxbcProgramType::VertexShader : return spv::ExecutionModelVertex;\n case DxbcProgramType::GeometryShader : return spv::ExecutionModelGeometry;\n case DxbcProgramType::HullShader : return spv::ExecutionModelTessellationControl;\n case DxbcProgramType::DomainShader : return spv::ExecutionModelTessellationEvaluation;\n case DxbcProgramType::ComputeShader : return spv::ExecutionModelGLCompute;\n default: throw DxvkError(\"DxbcProgramInfo::executionModel: Unsupported program type\");\n }\n }\n private:\n DxbcProgramType m_type = DxbcProgramType::PixelShader;\n};"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/src/unicode_winapi.cpp", "/*\nThe MIT License (MIT)\n\nCopyright (c) 2020 Trail of Bits, Inc.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n*/\n\n#include \n#include \n\nnamespace peparse {\nstd::string from_utf16(const UCharString &u) {\n std::string result;\n std::size_t size = WideCharToMultiByte(CP_UTF8,\n 0,\n u.data(),\n static_cast(u.size()),\n nullptr,\n 0,\n nullptr,\n nullptr);\n\n if (size <= 0) {\n return result;\n }\n\n result.reserve(size);\n WideCharToMultiByte(CP_UTF8,\n 0,\n u.data(),\n static_cast(u.size()),\n &result[0],\n static_cast(result.capacity()),\n nullptr,\n nullptr);\n\n return result;\n}\n} // namespace peparse\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_flags.h", "#pragma once\n\n#include \n\n#include \"util_bit.h\"\n\nnamespace dxvk {\n \n template\n class Flags {\n \n public:\n \n using IntType = std::underlying_type_t;\n \n Flags() { }\n \n Flags(IntType t)\n : m_bits(t) { }\n \n template\n Flags(T f, Tx... fx) {\n this->set(f, fx...);\n }\n \n template\n void set(Tx... fx) {\n m_bits |= bits(fx...);\n }\n \n void set(Flags flags) {\n m_bits |= flags.m_bits;\n }\n \n template\n void clr(Tx... fx) {\n m_bits &= ~bits(fx...);\n }\n \n void clr(Flags flags) {\n m_bits &= ~flags.m_bits;\n }\n \n template\n bool any(Tx... fx) const {\n return (m_bits & bits(fx...)) != 0;\n }\n \n template\n bool all(Tx... fx) const {\n const IntType mask = bits(fx...);\n return (m_bits & mask) == mask;\n }\n \n bool test(T f) const {\n return this->any(f);\n }\n \n bool isClear() const {\n return m_bits == 0;\n }\n \n void clrAll() {\n m_bits = 0;\n }\n \n IntType raw() const {\n return m_bits;\n }\n \n Flags operator & (const Flags& other) const {\n return Flags(m_bits & other.m_bits);\n }\n \n Flags operator | (const Flags& other) const {\n return Flags(m_bits | other.m_bits);\n }\n \n Flags operator ^ (const Flags& other) const {\n return Flags(m_bits ^ other.m_bits);\n }\n\n bool operator == (const Flags& other) const {\n return m_bits == other.m_bits;\n }\n \n bool operator != (const Flags& other) const {\n return m_bits != other.m_bits;\n }\n \n private:\n \n IntType m_bits = 0;\n \n static IntType bit(T f) {\n return IntType(1) << static_cast(f);\n }\n \n template\n static IntType bits(T f, Tx... fx) {\n return bit(f) | bits(fx...);\n }\n \n static IntType bits() {\n return 0;\n }\n \n };\n \n}"], ["/lsfg-vk/thirdparty/dxbc/include/util/log/log.h", "#pragma once\n\n#include \n#include \n\nnamespace dxvk {\n \n enum class LogLevel : uint32_t {\n Trace = 0,\n Debug = 1,\n Info = 2,\n Warn = 3,\n Error = 4,\n None = 5,\n };\n\n /**\n * \\brief Logger\n * \n * Logger for one DLL. Creates a text file and\n * writes all log messages to that file.\n */\n class Logger {\n \n public:\n \n Logger() {}\n Logger(const std::string& file_name) {}\n ~Logger() {}\n \n static void trace(const std::string& message) {}\n static void debug(const std::string& message) {}\n static void info (const std::string& message) {}\n static void warn (const std::string& message) {}\n static void err (const std::string& message) {}\n static void log (LogLevel level, const std::string& message) {}\n \n static LogLevel logLevel() {\n return LogLevel::Warn;\n }\n\n };\n \n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxvk/dxvk_limits.h", "#pragma once\n\n#include \n\nnamespace dxvk {\n\n enum DxvkLimits : size_t {\n MaxNumRenderTargets = 8,\n MaxNumVertexAttributes = 32,\n MaxNumVertexBindings = 32,\n MaxNumXfbBuffers = 4,\n MaxNumXfbStreams = 4,\n MaxNumViewports = 16,\n MaxNumResourceSlots = 1216,\n MaxNumQueuedCommandBuffers = 32,\n MaxNumQueryCountPerPool = 128,\n MaxNumSpecConstants = 12,\n MaxUniformBufferSize = 65536,\n MaxVertexBindingStride = 2048,\n MaxPushConstantSize = 128,\n };\n\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/spirv/thirdparty/GLSL.std.450.h", "/*\n** Copyright (c) 2014-2024 The Khronos Group Inc.\n**\n** Permission is hereby granted, free of charge, to any person obtaining a copy\n** of this software and/or associated documentation files (the \"Materials\"),\n** to deal in the Materials without restriction, including without limitation\n** the rights to use, copy, modify, merge, publish, distribute, sublicense,\n** and/or sell copies of the Materials, and to permit persons to whom the\n** Materials are furnished to do so, subject to the following conditions:\n**\n** The above copyright notice and this permission notice shall be included in\n** all copies or substantial portions of the Materials.\n**\n** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS\n** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND\n** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ \n**\n** THE MATERIALS ARE PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS\n** IN THE MATERIALS.\n*/\n\n#ifndef GLSLstd450_H\n#define GLSLstd450_H\n\nstatic const int GLSLstd450Version = 100;\nstatic const int GLSLstd450Revision = 3;\n\nenum GLSLstd450 {\n GLSLstd450Bad = 0, // Don't use\n\n GLSLstd450Round = 1,\n GLSLstd450RoundEven = 2,\n GLSLstd450Trunc = 3,\n GLSLstd450FAbs = 4,\n GLSLstd450SAbs = 5,\n GLSLstd450FSign = 6,\n GLSLstd450SSign = 7,\n GLSLstd450Floor = 8,\n GLSLstd450Ceil = 9,\n GLSLstd450Fract = 10,\n\n GLSLstd450Radians = 11,\n GLSLstd450Degrees = 12,\n GLSLstd450Sin = 13,\n GLSLstd450Cos = 14,\n GLSLstd450Tan = 15,\n GLSLstd450Asin = 16,\n GLSLstd450Acos = 17,\n GLSLstd450Atan = 18,\n GLSLstd450Sinh = 19,\n GLSLstd450Cosh = 20,\n GLSLstd450Tanh = 21,\n GLSLstd450Asinh = 22,\n GLSLstd450Acosh = 23,\n GLSLstd450Atanh = 24,\n GLSLstd450Atan2 = 25,\n\n GLSLstd450Pow = 26,\n GLSLstd450Exp = 27,\n GLSLstd450Log = 28,\n GLSLstd450Exp2 = 29,\n GLSLstd450Log2 = 30,\n GLSLstd450Sqrt = 31,\n GLSLstd450InverseSqrt = 32,\n\n GLSLstd450Determinant = 33,\n GLSLstd450MatrixInverse = 34,\n\n GLSLstd450Modf = 35, // second operand needs an OpVariable to write to\n GLSLstd450ModfStruct = 36, // no OpVariable operand\n GLSLstd450FMin = 37,\n GLSLstd450UMin = 38,\n GLSLstd450SMin = 39,\n GLSLstd450FMax = 40,\n GLSLstd450UMax = 41,\n GLSLstd450SMax = 42,\n GLSLstd450FClamp = 43,\n GLSLstd450UClamp = 44,\n GLSLstd450SClamp = 45,\n GLSLstd450FMix = 46,\n GLSLstd450IMix = 47, // Reserved\n GLSLstd450Step = 48,\n GLSLstd450SmoothStep = 49,\n\n GLSLstd450Fma = 50,\n GLSLstd450Frexp = 51, // second operand needs an OpVariable to write to\n GLSLstd450FrexpStruct = 52, // no OpVariable operand\n GLSLstd450Ldexp = 53,\n\n GLSLstd450PackSnorm4x8 = 54,\n GLSLstd450PackUnorm4x8 = 55,\n GLSLstd450PackSnorm2x16 = 56,\n GLSLstd450PackUnorm2x16 = 57,\n GLSLstd450PackHalf2x16 = 58,\n GLSLstd450PackDouble2x32 = 59,\n GLSLstd450UnpackSnorm2x16 = 60,\n GLSLstd450UnpackUnorm2x16 = 61,\n GLSLstd450UnpackHalf2x16 = 62,\n GLSLstd450UnpackSnorm4x8 = 63,\n GLSLstd450UnpackUnorm4x8 = 64,\n GLSLstd450UnpackDouble2x32 = 65,\n\n GLSLstd450Length = 66,\n GLSLstd450Distance = 67,\n GLSLstd450Cross = 68,\n GLSLstd450Normalize = 69,\n GLSLstd450FaceForward = 70,\n GLSLstd450Reflect = 71,\n GLSLstd450Refract = 72,\n\n GLSLstd450FindILsb = 73,\n GLSLstd450FindSMsb = 74,\n GLSLstd450FindUMsb = 75,\n\n GLSLstd450InterpolateAtCentroid = 76,\n GLSLstd450InterpolateAtSample = 77,\n GLSLstd450InterpolateAtOffset = 78,\n\n GLSLstd450NMin = 79,\n GLSLstd450NMax = 80,\n GLSLstd450NClamp = 81,\n\n GLSLstd450Count\n};\n\n#endif // #ifndef GLSLstd450_H\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_reader.h", "class DxbcReader {\n public:\n template\n auto readEnum() {\n using Tx = std::underlying_type_t;\n return static_cast(this->readNum());\n }\n DxbcTag readTag() {\n DxbcTag tag;\n this->read(&tag, 4);\n return tag;\n }\n std::string readString() {\n std::string result;\n \n while (m_data[m_pos] != '\\0')\n result.push_back(m_data[m_pos++]);\n \n m_pos++;\n return result;\n }\n void read(void* dst, size_t n) {\n if (m_pos + n > m_size)\n throw DxvkError(\"DxbcReader::read: Unexpected end of file\");\n std::memcpy(dst, m_data + m_pos, n);\n m_pos += n;\n }\n void skip(size_t n) {\n if (m_pos + n > m_size)\n throw DxvkError(\"DxbcReader::skip: Unexpected end of file\");\n m_pos += n;\n }\n DxbcReader clone(size_t pos) const {\n if (pos > m_size)\n throw DxvkError(\"DxbcReader::clone: Invalid offset\");\n return DxbcReader(m_data + pos, m_size - pos);\n }\n DxbcReader resize(size_t size) const {\n if (size > m_size)\n throw DxvkError(\"DxbcReader::resize: Invalid size\");\n return DxbcReader(m_data, size, m_pos);\n }\n void store(std::ostream&& stream) const {\n stream.write(m_data, m_size);\n }\n private:\n const char* m_data = nullptr;\n size_t m_size = 0;\n size_t m_pos = 0;\n template\n T readNum() {\n T result;\n this->read(&result, sizeof(result));\n return result;\n }\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_chunk_shex.h", "class DxbcShex {\n public:\n DxbcShex(DxbcReader reader) {\n // The shader version and type are stored in a 32-bit unit,\n // where the first byte contains the major and minor version\n // numbers, and the high word contains the program type.\n reader.skip(2);\n auto pType = reader.readEnum();\n m_programInfo = DxbcProgramInfo(pType);\n \n // Read the actual shader code as an array of DWORDs.\n auto codeLength = reader.readu32() - 2;\n m_code.resize(codeLength);\n reader.read(m_code.data(), codeLength * sizeof(uint32_t));\n }\n ~DxbcShex() {\n \n }\n private:\n DxbcProgramInfo m_programInfo;\n std::vector m_code;\n};"], ["/lsfg-vk/thirdparty/toml11/src/skip.cpp", "#include \n#include \n#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n\nnamespace toml\n{\n\nnamespace detail\n{\ntemplate bool skip_whitespace (location& loc, const context&);\ntemplate bool skip_empty_lines (location& loc, const context&);\ntemplate void skip_comment_block (location& loc, const context&);\ntemplate void skip_empty_or_comment_lines(location& loc, const context&);\ntemplate void skip_string_like (location& loc, const context&);\ntemplate void skip_array_like (location& loc, const context&);\ntemplate void skip_inline_table_like (location& loc, const context&);\ntemplate void skip_value (location& loc, const context&);\ntemplate void skip_key_value_pair (location& loc, const context&);\ntemplate void skip_until_next_table (location& loc, const context&);\n\ntemplate bool skip_whitespace (location& loc, const context&);\ntemplate bool skip_empty_lines (location& loc, const context&);\ntemplate void skip_comment_block (location& loc, const context&);\ntemplate void skip_empty_or_comment_lines(location& loc, const context&);\ntemplate void skip_string_like (location& loc, const context&);\ntemplate void skip_array_like (location& loc, const context&);\ntemplate void skip_inline_table_like (location& loc, const context&);\ntemplate void skip_value (location& loc, const context&);\ntemplate void skip_key_value_pair (location& loc, const context&);\ntemplate void skip_until_next_table (location& loc, const context&);\n\n} // detail\n} // toml\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_math.h", "#pragma once\n\n#include \n\nnamespace dxvk {\n \n constexpr size_t CACHE_LINE_SIZE = 64;\n constexpr double pi = 3.14159265359;\n\n template\n constexpr T clamp(T n, T lo, T hi) {\n if (n < lo) return lo;\n if (n > hi) return hi;\n return n;\n }\n \n template\n constexpr T align(T what, U to) {\n return (what + to - 1) & ~(to - 1);\n }\n\n template\n constexpr T alignDown(T what, U to) {\n return (what / to) * to;\n }\n\n // Equivalent of std::clamp for use with floating point numbers\n // Handles (-){INFINITY,NAN} cases.\n // Will return min in cases of NAN, etc.\n inline float fclamp(float value, float min, float max) {\n return std::fmin(\n std::fmax(value, min), max);\n }\n\n template\n inline T divCeil(T dividend, T divisor) {\n return (dividend + divisor - 1) / divisor;\n }\n \n}\n"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/src/unicode_codecvt.cpp", "/*\nThe MIT License (MIT)\n\nCopyright (c) 2019 Trail of Bits, Inc.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n*/\n\n#include \n#include \n#include \n\nnamespace peparse {\n// See\n// https://stackoverflow.com/questions/38688417/utf-conversion-functions-in-c11\nstd::string from_utf16(const UCharString &u) {\n std::wstring_convert, char16_t> convert;\n return convert.to_bytes(u);\n}\n} // namespace peparse\n"], ["/lsfg-vk/thirdparty/toml11/src/serializer.cpp", "#include \n#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n\nnamespace toml\n{\nstruct type_config;\nstruct ordered_type_config;\n\ntemplate typename basic_value::string_type\nformat(const basic_value&, const spec);\n\ntemplate typename basic_value::string_type\nformat(const typename basic_value::key_type& k,\n const basic_value& v, const spec);\n\ntemplate typename basic_value::string_type\nformat(const std::vector::key_type>& ks,\n const basic_value& v, const spec s);\n\ntemplate typename basic_value::string_type\nformat(const basic_value&, const spec);\n\ntemplate typename basic_value::string_type\nformat(const typename basic_value::key_type& k,\n const basic_value& v, const spec);\n\ntemplate typename basic_value::string_type\nformat(const std::vector::key_type>& ks,\n const basic_value& v, const spec s);\n\nnamespace detail\n{\ntemplate class serializer<::toml::type_config>;\ntemplate class serializer<::toml::ordered_type_config>;\n} // detail\n} // toml\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_header.h", "class DxbcHeader {\n public:\n DxbcHeader(DxbcReader& reader) {\n // FourCC at the start of the file, must be 'DXBC'\n DxbcTag fourcc = reader.readTag();\n \n if (fourcc != \"DXBC\")\n throw DxvkError(\"DxbcHeader::DxbcHeader: Invalid fourcc, expected 'DXBC'\");\n \n // Stuff we don't actually need to store\n reader.skip(4 * sizeof(uint32_t)); // Check sum\n reader.skip(1 * sizeof(uint32_t)); // Constant 1\n reader.skip(1 * sizeof(uint32_t)); // Bytecode length\n \n // Number of chunks in the file\n uint32_t chunkCount = reader.readu32();\n \n // Chunk offsets are stored immediately after\n for (uint32_t i = 0; i < chunkCount; i++)\n m_chunkOffsets.push_back(reader.readu32());\n }\n ~DxbcHeader() {\n \n }\n private:\n std::vector m_chunkOffsets;\n};"], ["/lsfg-vk/thirdparty/dxbc/src/util/util_log.cpp", "#include \"log/log_debug.h\"\n\nnamespace dxvk::debug {\n \n std::string methodName(const std::string& prettyName) {\n size_t end = prettyName.find(\"(\");\n size_t begin = prettyName.substr(0, end).rfind(\" \") + 1;\n return prettyName.substr(begin,end - begin);\n }\n \n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/log/log_debug.h", "#pragma once\n\n#include \n\n#include \"log/log.h\"\n\n#ifdef _MSC_VER\n#define METHOD_NAME __FUNCSIG__\n#else\n#define METHOD_NAME __PRETTY_FUNCTION__\n#endif\n\n#define TRACE_ENABLED\n\n#ifdef TRACE_ENABLED\n#define TRACE(...) \\\n do { dxvk::debug::trace(METHOD_NAME, ##__VA_ARGS__); } while (0)\n#else\n#define TRACE(...) \\\n do { } while (0)\n#endif\n\nnamespace dxvk::debug {\n \n std::string methodName(const std::string& prettyName);\n \n inline void traceArgs(std::stringstream& stream) { }\n \n template\n void traceArgs(std::stringstream& stream, const Arg1& arg1) {\n stream << arg1;\n }\n \n template\n void traceArgs(std::stringstream& stream, const Arg1& arg1, const Arg2& arg2, const Args&... args) {\n stream << arg1 << \",\";\n traceArgs(stream, arg2, args...);\n }\n \n template\n void trace(const std::string& funcName, const Args&... args) {\n std::stringstream stream;\n stream << methodName(funcName) << \"(\";\n traceArgs(stream, args...);\n stream << \")\";\n Logger::trace(stream.str());\n }\n \n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_modinfo.h", "#pragma once\n\n#include \"dxbc_options.h\"\n\nnamespace dxvk {\n\n /**\n * \\brief Tessellation info\n * \n * Stores the maximum tessellation factor\n * to export from tessellation shaders.\n */\n struct DxbcTessInfo {\n float maxTessFactor;\n };\n\n /**\n * \\brief Xfb capture entry\n * \n * Stores an output variable to capture,\n * as well as the buffer to write it to.\n */\n struct DxbcXfbEntry {\n const char* semanticName;\n uint32_t semanticIndex;\n uint32_t componentIndex;\n uint32_t componentCount;\n uint32_t streamId;\n uint32_t bufferId;\n uint32_t offset;\n };\n\n /**\n * \\brief Xfb info\n * \n * Stores capture entries and output buffer\n * strides. This structure must only be\n * defined if \\c entryCount is non-zero.\n */\n struct DxbcXfbInfo {\n uint32_t entryCount;\n DxbcXfbEntry entries[128];\n uint32_t strides[4];\n int32_t rasterizedStream;\n };\n\n /**\n * \\brief Shader module info\n * \n * Stores information which may affect shader compilation.\n * This data can be supplied by the client API implementation.\n */\n struct DxbcModuleInfo {\n DxbcOptions options;\n DxbcTessInfo* tess;\n DxbcXfbInfo* xfb;\n };\n\n}"], ["/lsfg-vk/thirdparty/dxbc/include/util/rc/util_rc.h", "#pragma once\n\n#include \n\n#include \"../util_likely.h\"\n\nnamespace dxvk {\n \n /**\n * \\brief Reference-counted object\n */\n class RcObject {\n \n public:\n \n /**\n * \\brief Increments reference count\n * \\returns New reference count\n */\n force_inline uint32_t incRef() {\n return ++m_refCount;\n }\n \n /**\n * \\brief Decrements reference count\n * \\returns New reference count\n */\n force_inline uint32_t decRef() {\n return --m_refCount;\n }\n \n private:\n \n std::atomic m_refCount = { 0u };\n \n };\n \n}"], ["/lsfg-vk/thirdparty/dxbc/include/dxvk/dxvk_hash.h", "#pragma once\n\n#include \n\nnamespace dxvk {\n\n struct DxvkEq {\n template\n size_t operator () (const T& a, const T& b) const {\n return a.eq(b);\n }\n };\n\n struct DxvkHash {\n template\n size_t operator () (const T& object) const {\n return object.hash();\n }\n };\n\n class DxvkHashState {\n\n public:\n\n void add(size_t hash) {\n m_value ^= hash + 0x9e3779b9\n + (m_value << 6)\n + (m_value >> 2);\n }\n\n operator size_t () const {\n return m_value;\n }\n\n private:\n\n size_t m_value = 0;\n\n };\n\n}\n"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_include.h", "#pragma once\n\n#include \n#include \n\n#include \"dxvk_limits.h\"\n#include \"dxvk_pipelayout.h\"\n\n#include \"log/log.h\"\n#include \"log/log_debug.h\"\n\n#include \"rc/util_rc.h\"\n#include \"rc/util_rc_ptr.h\"\n\n#include \"util_bit.h\"\n#include \"util_enum.h\"\n#include \"util_error.h\"\n#include \"util_string.h\"\n#include \"util_flags.h\"\n#include \"util_small_vector.h\"\n"], ["/lsfg-vk/thirdparty/toml11/src/parser.cpp", "#include \n#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n\nnamespace toml\n{\n\ntemplate result, std::vector> try_parse(std::vector, std::string, spec);\ntemplate result, std::vector> try_parse(std::istream&, std::string, spec);\ntemplate result, std::vector> try_parse(std::string, spec);\ntemplate result, std::vector> try_parse(FILE*, std::string, spec);\ntemplate result, std::vector> try_parse_str(std::string, spec, cxx::source_location);\n\ntemplate basic_value parse(std::vector, std::string, spec);\ntemplate basic_value parse(std::istream&, std::string, spec);\ntemplate basic_value parse(std::string, spec);\ntemplate basic_value parse(FILE*, std::string, spec);\ntemplate basic_value parse_str(std::string, spec, cxx::source_location);\n\ntemplate result, std::vector> try_parse(std::vector, std::string, spec);\ntemplate result, std::vector> try_parse(std::istream&, std::string, spec);\ntemplate result, std::vector> try_parse(std::string, spec);\ntemplate result, std::vector> try_parse(FILE*, std::string, spec);\ntemplate result, std::vector> try_parse_str(std::string, spec, cxx::source_location);\n\ntemplate basic_value parse(std::vector, std::string, spec);\ntemplate basic_value parse(std::istream&, std::string, spec);\ntemplate basic_value parse(std::string, spec);\ntemplate basic_value parse(FILE*, std::string, spec);\ntemplate basic_value parse_str(std::string, spec, cxx::source_location);\n\n#if defined(TOML11_HAS_FILESYSTEM)\ntemplate cxx::enable_if_t::value, result, std::vector>> try_parse(const std::filesystem::path&, spec);\ntemplate cxx::enable_if_t::value, result, std::vector>> try_parse(const std::filesystem::path&, spec);\ntemplate cxx::enable_if_t::value, basic_value > parse (const std::filesystem::path&, spec);\ntemplate cxx::enable_if_t::value, basic_value > parse (const std::filesystem::path&, spec);\n#endif // filesystem\n\n} // toml\n"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/include/pe-parse/to_string.h", "#pragma once\n\n#include \n#include \n\n#if defined(_MSC_VER)\ntypedef std::basic_string UCharString;\n#else\ntypedef std::u16string UCharString;\n#endif\n\nnamespace peparse {\ntemplate \nstatic std::string to_string(T t, std::ios_base &(*f)(std::ios_base &) ) {\n std::ostringstream oss;\n oss << f << t;\n return oss.str();\n}\n\nstd::string from_utf16(const UCharString &u);\n} // namespace peparse\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_error.h", "#pragma once\n\n#include \n\nnamespace dxvk {\n \n /**\n * \\brief DXVK error\n * \n * A generic exception class that stores a\n * message. Exceptions should be logged.\n */\n class DxvkError {\n \n public:\n \n DxvkError() { }\n DxvkError(std::string&& message)\n : m_message(std::move(message)) { }\n \n const std::string& message() const {\n return m_message;\n }\n \n private:\n \n std::string m_message;\n \n };\n \n}"], ["/lsfg-vk/thirdparty/dxbc/include/spirv/spirv_include.h", "#pragma once\n\n#include \n#include \n\n#include \"log/log.h\"\n#include \"log/log_debug.h\"\n\n#include \"util_error.h\"\n#include \"util_flags.h\"\n#include \"util_likely.h\"\n#include \"util_string.h\"\n\n#include \"rc/util_rc.h\"\n#include \"rc/util_rc_ptr.h\"\n"], ["/lsfg-vk/thirdparty/toml11/src/context.cpp", "#include \n#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n\nnamespace toml\n{\nnamespace detail\n{\ntemplate class context<::toml::type_config>;\ntemplate class context<::toml::ordered_type_config>;\n} // detail\n} // toml\n"], ["/lsfg-vk/thirdparty/toml11/src/syntax.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/types.cpp", "#include \n#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n\nnamespace toml\n{\ntemplate class basic_value;\ntemplate class basic_value;\n} // toml\n"], ["/lsfg-vk/thirdparty/toml11/src/region.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/color.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/format.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/location.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/comments.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/scanner.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/datetime.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/value_t.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/literal.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/error_info.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/toml11/src/source_location.cpp", "#include \n\n#if ! defined(TOML11_COMPILE_SOURCES)\n#error \"Define `TOML11_COMPILE_SOURCES` before compiling source code!\"\n#endif\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_likely.h", "#pragma once\n\n#ifdef __GNUC__\n#define likely(x) __builtin_expect(bool(x),1)\n#define unlikely(x) __builtin_expect(bool(x),0)\n#define force_inline inline __attribute__((always_inline))\n#else\n#define likely(x) (x)\n#define unlikely(x) (x)\n#define force_inline inline\n#endif\n"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_enum.h", "#pragma once\n\n#define ENUM_NAME(name) \\\n case name: return os << #name\n\n#define ENUM_DEFAULT(name) \\\n default: return os << static_cast(e)\n"], ["/lsfg-vk/thirdparty/pe-parse/pe-parser-library/include/pe-parse/parse.h", "class parse {\n};"], ["/lsfg-vk/thirdparty/dxbc/include/util/util_string.h", "class util_string {\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_util.h", "class dxbc_util {\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_names.h", "class dxbc_names {\n};"], ["/lsfg-vk/thirdparty/dxbc/include/dxbc/dxbc_defs.h", "class DxbcOperandKind {\n};"]], "task_instance_info": {"created_time": "2025-08-20 20:01:24", "created_task_model": "DeepSeek-R1", "function_skeleton": "Sampler::Sampler(const Core::Device& device,\n VkSamplerAddressMode mode,\n VkCompareOp compare,\n bool isWhite) {}", "code_description": "This code segment functions to: Configure and instantiate a graphics resource with specified filtering, addressing, and border handling parameters while managing lifecycle through smart resource ownership."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```cpp\n[TASK_BEGIN]\nSampler::Sampler(const Core::Device& device,\n VkSamplerAddressMode mode,\n VkCompareOp compare,\n bool isWhite) {\n VkSamplerCreateInfo createInfo{};\n createInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;\n createInfo.magFilter = isWhite ? VK_FILTER_NEAREST : VK_FILTER_LINEAR;\n createInfo.minFilter = createInfo.magFilter;\n createInfo.addressModeU = mode;\n createInfo.addressModeV = mode;\n createInfo.addressModeW = mode;\n createInfo.anisotropyEnable = VK_FALSE;\n createInfo.maxAnisotropy = 1.0f;\n createInfo.borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK;\n createInfo.unnormalizedCoordinates = VK_FALSE;\n createInfo.compareEnable = compare != VK_COMPARE_OP_NEVER ? VK_TRUE : VK_FALSE;\n createInfo.compareOp = compare;\n createInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;\n createInfo.mipLodBias = 0.0f;\n createInfo.minLod = 0.0f;\n createInfo.maxLod = VK_LOD_CLAMP_NONE;\n\n VkResult result = vkCreateSampler(device.GetDevice(), &createInfo, nullptr, &_sampler);\n if (result != VK_SUCCESS) {\n throw LSFG::Common::VulkanException(\"Failed to create sampler!\", result);\n }\n}\n[TASK_END]\n```", "inference_time": "2025-08-20 20-02-49"}, "editdistance_info": {"edit_distance": 23.6301, "calculate_time": "2025-08-20 20:02:49", "true_code_clean": "Sampler::Sampler(const Core::Device& device,\n VkSamplerAddressMode mode,\n VkCompareOp compare,\n bool isWhite) {\n const VkSamplerCreateInfo desc{\n .sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,\n .magFilter = VK_FILTER_LINEAR,\n .minFilter = VK_FILTER_LINEAR,\n .mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR,\n .addressModeU = mode,\n .addressModeV = mode,\n .addressModeW = mode,\n .compareOp = compare,\n .maxLod = VK_LOD_CLAMP_NONE,\n .borderColor =\n isWhite ? VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE\n : VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK\n };\n VkSampler samplerHandle{};\n auto res = vkCreateSampler(device.handle(), &desc, nullptr, &samplerHandle);\n if (res != VK_SUCCESS || samplerHandle == VK_NULL_HANDLE)\n throw LSFG::vulkan_error(res, \"Unable to create sampler\");\n this->sampler = std::shared_ptr(\n new VkSampler(samplerHandle),\n [dev = device.handle()](VkSampler* samplerHandle) {\n vkDestroySampler(dev, *samplerHandle, nullptr);\n }\n );\n}", "predict_code_clean": "Sampler::Sampler(const Core::Device& device,\n VkSamplerAddressMode mode,\n VkCompareOp compare,\n bool isWhite) {\n VkSamplerCreateInfo createInfo{};\n createInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;\n createInfo.magFilter = isWhite ? VK_FILTER_NEAREST : VK_FILTER_LINEAR;\n createInfo.minFilter = createInfo.magFilter;\n createInfo.addressModeU = mode;\n createInfo.addressModeV = mode;\n createInfo.addressModeW = mode;\n createInfo.anisotropyEnable = VK_FALSE;\n createInfo.maxAnisotropy = 1.0f;\n createInfo.borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK;\n createInfo.unnormalizedCoordinates = VK_FALSE;\n createInfo.compareEnable = compare != VK_COMPARE_OP_NEVER ? VK_TRUE : VK_FALSE;\n createInfo.compareOp = compare;\n createInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;\n createInfo.mipLodBias = 0.0f;\n createInfo.minLod = 0.0f;\n createInfo.maxLod = VK_LOD_CLAMP_NONE;\n VkResult result = vkCreateSampler(device.GetDevice(), &createInfo, nullptr, &_sampler);\n if (result != VK_SUCCESS) {\n throw LSFG::Common::VulkanException(\"Failed to create sampler!\", result);\n }\n}"}}