text
stringlengths
5
1.04M
/* * (c) FFRI Security, Inc., 2021 / Author: FFRI Security, Inc. */ #include "pch.h" #include "pe_utils.h" #include "utils.h" PIMAGE_LOAD_CONFIG_DIRECTORY GetLoadConfigDirectory( uint64_t imageBase) { uint32_t rva = GetImageDataDirectoryEntryRva( imageBase, IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG); return (PIMAGE_LOAD_CONFIG_DIRECTORY)(rva + imageBase); } PIMAGE_IMPORT_DESCRIPTOR GetImportDescriptor( uint64_t imageBase) { uint32_t rva = GetImageDataDirectoryEntryRva( imageBase, IMAGE_DIRECTORY_ENTRY_IMPORT); return (PIMAGE_IMPORT_DESCRIPTOR)(rva + imageBase); } uint32_t GetImageDataDirectoryEntryRva( uint64_t imageBase, uint32_t dataDirectoryEntryId) { IMAGE_DOS_HEADER dosHeader {}; ULONG cb = 0; ReadMemory(imageBase, &dosHeader, sizeof(IMAGE_DOS_HEADER), &cb); if (cb != sizeof(IMAGE_DOS_HEADER)) { dprintf("Cannot read DOS header"); return 0; } IMAGE_NT_HEADERS ntHeaders {}; ReadMemory(imageBase + dosHeader.e_lfanew, &ntHeaders, sizeof(IMAGE_NT_HEADERS), &cb); if (cb != sizeof(IMAGE_NT_HEADERS)) { dprintf("Cannot read NT header"); return 0; } return ntHeaders.OptionalHeader.DataDirectory[dataDirectoryEntryId].VirtualAddress; } std::optional<ULONGLONG> GetIatBaseAddress( PIMAGE_IMPORT_DESCRIPTOR imageImportDesc, ULONGLONG imageBase) { ULONGLONG baseAddr = 0xffffffffffffffff; auto iter = imageImportDesc; auto name = ReadMemberFromStruct<IMAGE_IMPORT_DESCRIPTOR, DWORD>(iter, offsetof(IMAGE_IMPORT_DESCRIPTOR, Name)); while (name != NULL) { auto imageThunk = ReadMemberFromStruct<IMAGE_IMPORT_DESCRIPTOR, DWORD>(iter, offsetof(IMAGE_IMPORT_DESCRIPTOR, FirstThunk)) + imageBase; if (imageThunk < baseAddr) { baseAddr = imageThunk; } iter++; name = ReadMemberFromStruct<IMAGE_IMPORT_DESCRIPTOR, DWORD>(iter, offsetof(IMAGE_IMPORT_DESCRIPTOR, Name)); } if (baseAddr == 0xffffffffffffffff) { return std::nullopt; } return baseAddr; }
/** * @author Alessandro Bianco */ /** * @addtogroup DFNsTest * @{ */ #include <catch.hpp> #include <PointCloudFiltering/StatisticalOutlierRemoval.hpp> using namespace CDFF::DFN::PointCloudFiltering; using namespace PointCloudWrapper; using namespace PoseWrapper; namespace StatisticalOutlierRemovalTest { void RequireExist(const PointCloud& cloud, float x, float y, float z) { const float EPSILON = 0.00001; bool found = false; int numberOfPoints = GetNumberOfPoints(cloud); for(int pointIndex = 0; pointIndex < numberOfPoints; pointIndex++) { float pointX = GetXCoordinate(cloud, pointIndex); float pointY = GetYCoordinate(cloud, pointIndex); float pointZ = GetZCoordinate(cloud, pointIndex); if (pointX > x - EPSILON && pointX < x + EPSILON && pointY > y - EPSILON && pointY < y + EPSILON && pointZ > z - EPSILON && pointZ < z + EPSILON) { found = true; break; } } REQUIRE( found ); } void RequireNotExist(const PointCloud& cloud, float x, float y, float z) { const float EPSILON = 0.00001; bool found = false; int numberOfPoints = GetNumberOfPoints(cloud); for(int pointIndex = 0; pointIndex < numberOfPoints; pointIndex++) { float pointX = GetXCoordinate(cloud, pointIndex); float pointY = GetYCoordinate(cloud, pointIndex); float pointZ = GetZCoordinate(cloud, pointIndex); if (pointX > x - EPSILON && pointX < x + EPSILON && pointY > y - EPSILON && pointY < y + EPSILON && pointZ > z - EPSILON && pointZ < z + EPSILON) { found = true; break; } } REQUIRE( !found ); } float Next(float value, float limit, float resolution) { if (value + resolution < limit) { return value + resolution; } else { if (value >= limit) { return limit + resolution; } else { return limit; } } } } using namespace StatisticalOutlierRemovalTest; TEST_CASE( "DFN simple processing step succeeds (StatisticalOutlierRemoval)", "[SimpleProcess]" ) { const float squareSize = 0.1; const float squareResolution = 0.001; int numberOfValidPoints = 0; //Input plane and noise PointCloudPtr cloud = NewPointCloud(); for(float x = 0; x < squareSize; x = Next(x, squareSize, squareResolution)) { for (float y = 0; y < squareSize; y = Next(y, squareSize, squareResolution)) { AddPoint(*cloud, x, y, 0); numberOfValidPoints++; } } AddPoint(*cloud, 0, 0, 1); AddPoint(*cloud, 0, 1, -1); StatisticalOutlierRemoval* statisticalOutlierRemoval = new StatisticalOutlierRemoval; // Setup DFN statisticalOutlierRemoval->setConfigurationFile("../tests/ConfigurationFiles/DFNs/PointCloudFiltering/StatisticalOutlierRemoval_Conf1.yaml"); statisticalOutlierRemoval->configure(); statisticalOutlierRemoval->pointCloudInput(*cloud); statisticalOutlierRemoval->process(); const PointCloud& output = statisticalOutlierRemoval->filteredPointCloudOutput(); REQUIRE( GetNumberOfPoints(output) == numberOfValidPoints); RequireNotExist( output, 0, 0, 1); RequireNotExist( output, 0, 1, -1); delete statisticalOutlierRemoval; } TEST_CASE( "DFN negative processing step succeeds (StatisticalOutlierRemoval)", "[process]" ) { const float squareSize = 0.1; const float squareResolution = 0.001; int numberOfValidPoints = 0; //Input plane and noise PointCloudPtr cloud = NewPointCloud(); for(float x = 0; x < squareSize; x = Next(x, squareSize, squareResolution)) { for (float y = 0; y < squareSize; y = Next(y, squareSize, squareResolution)) { AddPoint(*cloud, x, y, 0); } numberOfValidPoints++; } AddPoint(*cloud, 0, 0, 1); AddPoint(*cloud, 0, 1, -1); StatisticalOutlierRemoval* statisticalOutlierRemoval = new StatisticalOutlierRemoval; // Setup DFN statisticalOutlierRemoval->setConfigurationFile("../tests/ConfigurationFiles/DFNs/PointCloudFiltering/StatisticalOutlierRemoval_Conf2.yaml"); statisticalOutlierRemoval->configure(); statisticalOutlierRemoval->pointCloudInput(*cloud); statisticalOutlierRemoval->process(); const PointCloud& output = statisticalOutlierRemoval->filteredPointCloudOutput(); REQUIRE( GetNumberOfPoints(output) == 2); RequireExist( output, 0, 0, 1); RequireExist( output, 0, 1, -1); delete statisticalOutlierRemoval; } TEST_CASE( "DFN configuration succeeds (StatisticalOutlierRemoval)", "[configure]" ) { // Instantiate DFN StatisticalOutlierRemoval* statisticalOutlierRemoval = new StatisticalOutlierRemoval; // Setup DFN statisticalOutlierRemoval->setConfigurationFile("../tests/ConfigurationFiles/DFNs/PointCloudFiltering/StatisticalOutlierRemoval_Conf1.yaml"); statisticalOutlierRemoval->configure(); // Cleanup delete statisticalOutlierRemoval; } /** @} */
// Generated by Haxe 4.0.0-rc.2+77068e10c #include <hxcpp.h> #ifndef INCLUDED_Reflect #include <Reflect.h> #endif #ifndef INCLUDED_lime_app__Event_lime_ui_GamepadButton_Void #include <lime/app/_Event_lime_ui_GamepadButton_Void.h> #endif HX_DEFINE_STACK_FRAME(_hx_pos_b2bdd2074bcac861_41_new,"lime.app._Event_lime_ui_GamepadButton_Void","new",0xbb4f6e91,"lime.app._Event_lime_ui_GamepadButton_Void.new","lime/app/Event.hx",41,0xbda45bec) HX_LOCAL_STACK_FRAME(_hx_pos_b2bdd2074bcac861_57_add,"lime.app._Event_lime_ui_GamepadButton_Void","add",0xbb459052,"lime.app._Event_lime_ui_GamepadButton_Void.add","lime/app/Event.hx",57,0xbda45bec) HX_LOCAL_STACK_FRAME(_hx_pos_b2bdd2074bcac861_81_cancel,"lime.app._Event_lime_ui_GamepadButton_Void","cancel",0x5c3283e9,"lime.app._Event_lime_ui_GamepadButton_Void.cancel","lime/app/Event.hx",81,0xbda45bec) HX_LOCAL_STACK_FRAME(_hx_pos_b2bdd2074bcac861_126_has,"lime.app._Event_lime_ui_GamepadButton_Void","has",0xbb4add8b,"lime.app._Event_lime_ui_GamepadButton_Void.has","lime/app/Event.hx",126,0xbda45bec) HX_LOCAL_STACK_FRAME(_hx_pos_b2bdd2074bcac861_142_remove,"lime.app._Event_lime_ui_GamepadButton_Void","remove",0xa88732b3,"lime.app._Event_lime_ui_GamepadButton_Void.remove","lime/app/Event.hx",142,0xbda45bec) HX_LOCAL_STACK_FRAME(_hx_pos_63b28171bf8eb3ff_82_dispatch,"lime.app._Event_lime_ui_GamepadButton_Void","dispatch",0xb0081ee9,"lime.app._Event_lime_ui_GamepadButton_Void.dispatch","lime/_internal/macros/EventMacro.hx",82,0xc5a10671) namespace lime{ namespace app{ void _Event_lime_ui_GamepadButton_Void_obj::__construct(){ HX_STACKFRAME(&_hx_pos_b2bdd2074bcac861_41_new) HXLINE( 43) this->canceled = false; HXLINE( 44) this->_hx___listeners = ::Array_obj< ::Dynamic>::__new(); HXLINE( 45) this->_hx___priorities = ::Array_obj< int >::__new(); HXLINE( 46) this->_hx___repeat = ::Array_obj< bool >::__new(); } Dynamic _Event_lime_ui_GamepadButton_Void_obj::__CreateEmpty() { return new _Event_lime_ui_GamepadButton_Void_obj; } void *_Event_lime_ui_GamepadButton_Void_obj::_hx_vtable = 0; Dynamic _Event_lime_ui_GamepadButton_Void_obj::__Create(hx::DynamicArray inArgs) { hx::ObjectPtr< _Event_lime_ui_GamepadButton_Void_obj > _hx_result = new _Event_lime_ui_GamepadButton_Void_obj(); _hx_result->__construct(); return _hx_result; } bool _Event_lime_ui_GamepadButton_Void_obj::_hx_isInstanceOf(int inClassId) { return inClassId==(int)0x00000001 || inClassId==(int)0x23f48759; } void _Event_lime_ui_GamepadButton_Void_obj::add( ::Dynamic listener,hx::Null< bool > __o_once,hx::Null< int > __o_priority){ bool once = __o_once.Default(false); int priority = __o_priority.Default(0); HX_STACKFRAME(&_hx_pos_b2bdd2074bcac861_57_add) HXLINE( 59) { HXLINE( 59) int _g = 0; HXDLIN( 59) int _g1 = this->_hx___priorities->length; HXDLIN( 59) while((_g < _g1)){ HXLINE( 59) _g = (_g + 1); HXDLIN( 59) int i = (_g - 1); HXLINE( 61) if ((priority > this->_hx___priorities->__get(i))) { HXLINE( 63) this->_hx___listeners->insert(i,listener); HXLINE( 64) this->_hx___priorities->insert(i,priority); HXLINE( 65) this->_hx___repeat->insert(i,!(once)); HXLINE( 66) return; } } } HXLINE( 70) this->_hx___listeners->push(listener); HXLINE( 71) this->_hx___priorities->push(priority); HXLINE( 72) this->_hx___repeat->push(!(once)); } HX_DEFINE_DYNAMIC_FUNC3(_Event_lime_ui_GamepadButton_Void_obj,add,(void)) void _Event_lime_ui_GamepadButton_Void_obj::cancel(){ HX_STACKFRAME(&_hx_pos_b2bdd2074bcac861_81_cancel) HXDLIN( 81) this->canceled = true; } HX_DEFINE_DYNAMIC_FUNC0(_Event_lime_ui_GamepadButton_Void_obj,cancel,(void)) bool _Event_lime_ui_GamepadButton_Void_obj::has( ::Dynamic listener){ HX_STACKFRAME(&_hx_pos_b2bdd2074bcac861_126_has) HXLINE( 128) { HXLINE( 128) int _g = 0; HXDLIN( 128) ::Array< ::Dynamic> _g1 = this->_hx___listeners; HXDLIN( 128) while((_g < _g1->length)){ HXLINE( 128) ::Dynamic l = _g1->__get(_g); HXDLIN( 128) _g = (_g + 1); HXLINE( 130) if (::Reflect_obj::compareMethods(l,listener)) { HXLINE( 130) return true; } } } HXLINE( 134) return false; } HX_DEFINE_DYNAMIC_FUNC1(_Event_lime_ui_GamepadButton_Void_obj,has,return ) void _Event_lime_ui_GamepadButton_Void_obj::remove( ::Dynamic listener){ HX_STACKFRAME(&_hx_pos_b2bdd2074bcac861_142_remove) HXLINE( 144) int i = this->_hx___listeners->length; HXLINE( 146) while(true){ HXLINE( 146) i = (i - 1); HXDLIN( 146) if (!((i >= 0))) { HXLINE( 146) goto _hx_goto_6; } HXLINE( 148) if (::Reflect_obj::compareMethods(this->_hx___listeners->__get(i),listener)) { HXLINE( 150) this->_hx___listeners->removeRange(i,1); HXLINE( 151) this->_hx___priorities->removeRange(i,1); HXLINE( 152) this->_hx___repeat->removeRange(i,1); } } _hx_goto_6:; } HX_DEFINE_DYNAMIC_FUNC1(_Event_lime_ui_GamepadButton_Void_obj,remove,(void)) void _Event_lime_ui_GamepadButton_Void_obj::dispatch(int a){ HX_STACKFRAME(&_hx_pos_63b28171bf8eb3ff_82_dispatch) HXLINE( 83) this->canceled = false; HXLINE( 85) ::Array< ::Dynamic> listeners = this->_hx___listeners; HXLINE( 86) ::Array< bool > repeat = this->_hx___repeat; HXLINE( 87) int i = 0; HXLINE( 89) while((i < listeners->length)){ HXLINE( 91) listeners->__get(i)(a); HXLINE( 93) if (!(repeat->__get(i))) { HXLINE( 95) this->remove(listeners->__get(i)); } else { HXLINE( 99) i = (i + 1); } HXLINE( 102) if (this->canceled) { HXLINE( 104) goto _hx_goto_8; } } _hx_goto_8:; } HX_DEFINE_DYNAMIC_FUNC1(_Event_lime_ui_GamepadButton_Void_obj,dispatch,(void)) hx::ObjectPtr< _Event_lime_ui_GamepadButton_Void_obj > _Event_lime_ui_GamepadButton_Void_obj::__new() { hx::ObjectPtr< _Event_lime_ui_GamepadButton_Void_obj > __this = new _Event_lime_ui_GamepadButton_Void_obj(); __this->__construct(); return __this; } hx::ObjectPtr< _Event_lime_ui_GamepadButton_Void_obj > _Event_lime_ui_GamepadButton_Void_obj::__alloc(hx::Ctx *_hx_ctx) { _Event_lime_ui_GamepadButton_Void_obj *__this = (_Event_lime_ui_GamepadButton_Void_obj*)(hx::Ctx::alloc(_hx_ctx, sizeof(_Event_lime_ui_GamepadButton_Void_obj), true, "lime.app._Event_lime_ui_GamepadButton_Void")); *(void **)__this = _Event_lime_ui_GamepadButton_Void_obj::_hx_vtable; __this->__construct(); return __this; } _Event_lime_ui_GamepadButton_Void_obj::_Event_lime_ui_GamepadButton_Void_obj() { } void _Event_lime_ui_GamepadButton_Void_obj::__Mark(HX_MARK_PARAMS) { HX_MARK_BEGIN_CLASS(_Event_lime_ui_GamepadButton_Void); HX_MARK_MEMBER_NAME(canceled,"canceled"); HX_MARK_MEMBER_NAME(_hx___repeat,"__repeat"); HX_MARK_MEMBER_NAME(_hx___priorities,"__priorities"); HX_MARK_MEMBER_NAME(_hx___listeners,"__listeners"); HX_MARK_END_CLASS(); } void _Event_lime_ui_GamepadButton_Void_obj::__Visit(HX_VISIT_PARAMS) { HX_VISIT_MEMBER_NAME(canceled,"canceled"); HX_VISIT_MEMBER_NAME(_hx___repeat,"__repeat"); HX_VISIT_MEMBER_NAME(_hx___priorities,"__priorities"); HX_VISIT_MEMBER_NAME(_hx___listeners,"__listeners"); } hx::Val _Event_lime_ui_GamepadButton_Void_obj::__Field(const ::String &inName,hx::PropertyAccess inCallProp) { switch(inName.length) { case 3: if (HX_FIELD_EQ(inName,"add") ) { return hx::Val( add_dyn() ); } if (HX_FIELD_EQ(inName,"has") ) { return hx::Val( has_dyn() ); } break; case 6: if (HX_FIELD_EQ(inName,"cancel") ) { return hx::Val( cancel_dyn() ); } if (HX_FIELD_EQ(inName,"remove") ) { return hx::Val( remove_dyn() ); } break; case 8: if (HX_FIELD_EQ(inName,"canceled") ) { return hx::Val( canceled ); } if (HX_FIELD_EQ(inName,"__repeat") ) { return hx::Val( _hx___repeat ); } if (HX_FIELD_EQ(inName,"dispatch") ) { return hx::Val( dispatch_dyn() ); } break; case 11: if (HX_FIELD_EQ(inName,"__listeners") ) { return hx::Val( _hx___listeners ); } break; case 12: if (HX_FIELD_EQ(inName,"__priorities") ) { return hx::Val( _hx___priorities ); } } return super::__Field(inName,inCallProp); } hx::Val _Event_lime_ui_GamepadButton_Void_obj::__SetField(const ::String &inName,const hx::Val &inValue,hx::PropertyAccess inCallProp) { switch(inName.length) { case 8: if (HX_FIELD_EQ(inName,"canceled") ) { canceled=inValue.Cast< bool >(); return inValue; } if (HX_FIELD_EQ(inName,"__repeat") ) { _hx___repeat=inValue.Cast< ::Array< bool > >(); return inValue; } break; case 11: if (HX_FIELD_EQ(inName,"__listeners") ) { _hx___listeners=inValue.Cast< ::Array< ::Dynamic> >(); return inValue; } break; case 12: if (HX_FIELD_EQ(inName,"__priorities") ) { _hx___priorities=inValue.Cast< ::Array< int > >(); return inValue; } } return super::__SetField(inName,inValue,inCallProp); } void _Event_lime_ui_GamepadButton_Void_obj::__GetFields(Array< ::String> &outFields) { outFields->push(HX_("canceled",59,18,26,1f)); outFields->push(HX_("__repeat",7b,02,ac,ae)); outFields->push(HX_("__priorities",e2,cb,e6,1c)); outFields->push(HX_("__listeners",5f,ae,ba,21)); super::__GetFields(outFields); }; #ifdef HXCPP_SCRIPTABLE static hx::StorageInfo _Event_lime_ui_GamepadButton_Void_obj_sMemberStorageInfo[] = { {hx::fsBool,(int)offsetof(_Event_lime_ui_GamepadButton_Void_obj,canceled),HX_("canceled",59,18,26,1f)}, {hx::fsObject /* ::Array< bool > */ ,(int)offsetof(_Event_lime_ui_GamepadButton_Void_obj,_hx___repeat),HX_("__repeat",7b,02,ac,ae)}, {hx::fsObject /* ::Array< int > */ ,(int)offsetof(_Event_lime_ui_GamepadButton_Void_obj,_hx___priorities),HX_("__priorities",e2,cb,e6,1c)}, {hx::fsObject /* ::Array< ::Dynamic> */ ,(int)offsetof(_Event_lime_ui_GamepadButton_Void_obj,_hx___listeners),HX_("__listeners",5f,ae,ba,21)}, { hx::fsUnknown, 0, null()} }; static hx::StaticInfo *_Event_lime_ui_GamepadButton_Void_obj_sStaticStorageInfo = 0; #endif static ::String _Event_lime_ui_GamepadButton_Void_obj_sMemberFields[] = { HX_("canceled",59,18,26,1f), HX_("__repeat",7b,02,ac,ae), HX_("__priorities",e2,cb,e6,1c), HX_("add",21,f2,49,00), HX_("cancel",7a,ed,33,b8), HX_("has",5a,3f,4f,00), HX_("remove",44,9c,88,04), HX_("__listeners",5f,ae,ba,21), HX_("dispatch",ba,ce,63,1e), ::String(null()) }; hx::Class _Event_lime_ui_GamepadButton_Void_obj::__mClass; void _Event_lime_ui_GamepadButton_Void_obj::__register() { _Event_lime_ui_GamepadButton_Void_obj _hx_dummy; _Event_lime_ui_GamepadButton_Void_obj::_hx_vtable = *(void **)&_hx_dummy; hx::Static(__mClass) = new hx::Class_obj(); __mClass->mName = HX_("lime.app._Event_lime_ui_GamepadButton_Void",1f,d2,56,ba); __mClass->mSuper = &super::__SGetClass(); __mClass->mConstructEmpty = &__CreateEmpty; __mClass->mConstructArgs = &__Create; __mClass->mGetStaticField = &hx::Class_obj::GetNoStaticField; __mClass->mSetStaticField = &hx::Class_obj::SetNoStaticField; __mClass->mStatics = hx::Class_obj::dupFunctions(0 /* sStaticFields */); __mClass->mMembers = hx::Class_obj::dupFunctions(_Event_lime_ui_GamepadButton_Void_obj_sMemberFields); __mClass->mCanCast = hx::TCanCast< _Event_lime_ui_GamepadButton_Void_obj >; #ifdef HXCPP_SCRIPTABLE __mClass->mMemberStorageInfo = _Event_lime_ui_GamepadButton_Void_obj_sMemberStorageInfo; #endif #ifdef HXCPP_SCRIPTABLE __mClass->mStaticStorageInfo = _Event_lime_ui_GamepadButton_Void_obj_sStaticStorageInfo; #endif hx::_hx_RegisterClass(__mClass->mName, __mClass); } } // end namespace lime } // end namespace app
//////////////////////////////////////////////////////////////////////////////// // /// Vookoo high level C++ Vulkan interface. // /// (C) Andy Thomason 2017 MIT License // /// This is a utility set alongside the vkcpp C++ interface to Vulkan which makes /// constructing Vulkan pipelines and resources very easy for beginners. // /// It is expected that once familar with the Vulkan C++ interface you may wish /// to "go it alone" but we hope that this will make the learning experience a joyful one. // /// You can use it with the demo framework, stand alone and mixed with C or C++ Vulkan objects. /// It should integrate with game engines nicely. // //////////////////////////////////////////////////////////////////////////////// #ifndef VKU_HPP #define VKU_HPP #include <array> #include <fstream> #include <iostream> #include <unordered_map> #include <vector> #include <thread> #include <chrono> #include <functional> #include <cstddef> #include <vulkan/spirv.hpp11> #include <vulkan/vulkan.hpp> namespace vku { /// Printf-style formatting function. template <class... Args> std::string format(const char* fmt, Args... args) { int n = snprintf(nullptr, 0, fmt, args...); std::string result(n, '\0'); snprintf(&*result.begin(), n + 1, fmt, args...); return result; } /// Utility function for finding memory types for uniforms and images. inline int findMemoryTypeIndex(const vk::PhysicalDeviceMemoryProperties& memprops, uint32_t memoryTypeBits, vk::MemoryPropertyFlags search) { for (int i = 0; i != memprops.memoryTypeCount; ++i, memoryTypeBits >>= 1) { if (memoryTypeBits & 1) { if ((memprops.memoryTypes[i].propertyFlags & search) == search) { return i; } } } return -1; } /// Execute commands immediately and wait for the device to finish. inline void executeImmediately(vk::Device device, vk::CommandPool commandPool, vk::Queue queue, const std::function<void(vk::CommandBuffer cb)>& func) { vk::CommandBufferAllocateInfo cbai{ commandPool, vk::CommandBufferLevel::ePrimary, 1 }; auto cbs = device.allocateCommandBuffers(cbai); cbs[0].begin(vk::CommandBufferBeginInfo{}); func(cbs[0]); cbs[0].end(); vk::SubmitInfo submit; submit.commandBufferCount = (uint32_t)cbs.size(); submit.pCommandBuffers = cbs.data(); queue.submit(submit, vk::Fence{}); device.waitIdle(); device.freeCommandBuffers(commandPool, cbs); } /// Scale a value by mip level, but do not reduce to zero. inline uint32_t mipScale(uint32_t value, uint32_t mipLevel) { return std::max(value >> mipLevel, (uint32_t)1); } /// Load a binary file into a vector. /// The vector will be zero-length if this fails. inline std::vector<uint8_t> loadFile(const std::string& filename) { std::ifstream is(filename, std::ios::binary | std::ios::ate); std::vector<uint8_t> bytes; if (!is.fail()) { size_t size = is.tellg(); is.seekg(0); bytes.resize(size); is.read((char*)bytes.data(), size); } return bytes; } /// Description of blocks for compressed formats. struct BlockParams { uint8_t blockWidth; uint8_t blockHeight; uint8_t bytesPerBlock; }; /// Get the details of vulkan texture formats. inline BlockParams getBlockParams(vk::Format format) { switch (format) { case vk::Format::eR4G4UnormPack8: return BlockParams{ 1, 1, 1 }; case vk::Format::eR4G4B4A4UnormPack16: return BlockParams{ 1, 1, 2 }; case vk::Format::eB4G4R4A4UnormPack16: return BlockParams{ 1, 1, 2 }; case vk::Format::eR5G6B5UnormPack16: return BlockParams{ 1, 1, 2 }; case vk::Format::eB5G6R5UnormPack16: return BlockParams{ 1, 1, 2 }; case vk::Format::eR5G5B5A1UnormPack16: return BlockParams{ 1, 1, 2 }; case vk::Format::eB5G5R5A1UnormPack16: return BlockParams{ 1, 1, 2 }; case vk::Format::eA1R5G5B5UnormPack16: return BlockParams{ 1, 1, 2 }; case vk::Format::eR8Unorm: return BlockParams{ 1, 1, 1 }; case vk::Format::eR8Snorm: return BlockParams{ 1, 1, 1 }; case vk::Format::eR8Uscaled: return BlockParams{ 1, 1, 1 }; case vk::Format::eR8Sscaled: return BlockParams{ 1, 1, 1 }; case vk::Format::eR8Uint: return BlockParams{ 1, 1, 1 }; case vk::Format::eR8Sint: return BlockParams{ 1, 1, 1 }; case vk::Format::eR8Srgb: return BlockParams{ 1, 1, 1 }; case vk::Format::eR8G8Unorm: return BlockParams{ 1, 1, 2 }; case vk::Format::eR8G8Snorm: return BlockParams{ 1, 1, 2 }; case vk::Format::eR8G8Uscaled: return BlockParams{ 1, 1, 2 }; case vk::Format::eR8G8Sscaled: return BlockParams{ 1, 1, 2 }; case vk::Format::eR8G8Uint: return BlockParams{ 1, 1, 2 }; case vk::Format::eR8G8Sint: return BlockParams{ 1, 1, 2 }; case vk::Format::eR8G8Srgb: return BlockParams{ 1, 1, 2 }; case vk::Format::eR8G8B8Unorm: return BlockParams{ 1, 1, 3 }; case vk::Format::eR8G8B8Snorm: return BlockParams{ 1, 1, 3 }; case vk::Format::eR8G8B8Uscaled: return BlockParams{ 1, 1, 3 }; case vk::Format::eR8G8B8Sscaled: return BlockParams{ 1, 1, 3 }; case vk::Format::eR8G8B8Uint: return BlockParams{ 1, 1, 3 }; case vk::Format::eR8G8B8Sint: return BlockParams{ 1, 1, 3 }; case vk::Format::eR8G8B8Srgb: return BlockParams{ 1, 1, 3 }; case vk::Format::eB8G8R8Unorm: return BlockParams{ 1, 1, 3 }; case vk::Format::eB8G8R8Snorm: return BlockParams{ 1, 1, 3 }; case vk::Format::eB8G8R8Uscaled: return BlockParams{ 1, 1, 3 }; case vk::Format::eB8G8R8Sscaled: return BlockParams{ 1, 1, 3 }; case vk::Format::eB8G8R8Uint: return BlockParams{ 1, 1, 3 }; case vk::Format::eB8G8R8Sint: return BlockParams{ 1, 1, 3 }; case vk::Format::eB8G8R8Srgb: return BlockParams{ 1, 1, 3 }; case vk::Format::eR8G8B8A8Unorm: return BlockParams{ 1, 1, 4 }; case vk::Format::eR8G8B8A8Snorm: return BlockParams{ 1, 1, 4 }; case vk::Format::eR8G8B8A8Uscaled: return BlockParams{ 1, 1, 4 }; case vk::Format::eR8G8B8A8Sscaled: return BlockParams{ 1, 1, 4 }; case vk::Format::eR8G8B8A8Uint: return BlockParams{ 1, 1, 4 }; case vk::Format::eR8G8B8A8Sint: return BlockParams{ 1, 1, 4 }; case vk::Format::eR8G8B8A8Srgb: return BlockParams{ 1, 1, 4 }; case vk::Format::eB8G8R8A8Unorm: return BlockParams{ 1, 1, 4 }; case vk::Format::eB8G8R8A8Snorm: return BlockParams{ 1, 1, 4 }; case vk::Format::eB8G8R8A8Uscaled: return BlockParams{ 1, 1, 4 }; case vk::Format::eB8G8R8A8Sscaled: return BlockParams{ 1, 1, 4 }; case vk::Format::eB8G8R8A8Uint: return BlockParams{ 1, 1, 4 }; case vk::Format::eB8G8R8A8Sint: return BlockParams{ 1, 1, 4 }; case vk::Format::eB8G8R8A8Srgb: return BlockParams{ 1, 1, 4 }; case vk::Format::eA8B8G8R8UnormPack32: return BlockParams{ 1, 1, 4 }; case vk::Format::eA8B8G8R8SnormPack32: return BlockParams{ 1, 1, 4 }; case vk::Format::eA8B8G8R8UscaledPack32: return BlockParams{ 1, 1, 4 }; case vk::Format::eA8B8G8R8SscaledPack32: return BlockParams{ 1, 1, 4 }; case vk::Format::eA8B8G8R8UintPack32: return BlockParams{ 1, 1, 4 }; case vk::Format::eA8B8G8R8SintPack32: return BlockParams{ 1, 1, 4 }; case vk::Format::eA8B8G8R8SrgbPack32: return BlockParams{ 1, 1, 4 }; case vk::Format::eA2R10G10B10UnormPack32: return BlockParams{ 1, 1, 4 }; case vk::Format::eA2R10G10B10SnormPack32: return BlockParams{ 1, 1, 4 }; case vk::Format::eA2R10G10B10UscaledPack32: return BlockParams{ 1, 1, 4 }; case vk::Format::eA2R10G10B10SscaledPack32: return BlockParams{ 1, 1, 4 }; case vk::Format::eA2R10G10B10UintPack32: return BlockParams{ 1, 1, 4 }; case vk::Format::eA2R10G10B10SintPack32: return BlockParams{ 1, 1, 4 }; case vk::Format::eA2B10G10R10UnormPack32: return BlockParams{ 1, 1, 4 }; case vk::Format::eA2B10G10R10SnormPack32: return BlockParams{ 1, 1, 4 }; case vk::Format::eA2B10G10R10UscaledPack32: return BlockParams{ 1, 1, 4 }; case vk::Format::eA2B10G10R10SscaledPack32: return BlockParams{ 1, 1, 4 }; case vk::Format::eA2B10G10R10UintPack32: return BlockParams{ 1, 1, 4 }; case vk::Format::eA2B10G10R10SintPack32: return BlockParams{ 1, 1, 4 }; case vk::Format::eR16Unorm: return BlockParams{ 1, 1, 2 }; case vk::Format::eR16Snorm: return BlockParams{ 1, 1, 2 }; case vk::Format::eR16Uscaled: return BlockParams{ 1, 1, 2 }; case vk::Format::eR16Sscaled: return BlockParams{ 1, 1, 2 }; case vk::Format::eR16Uint: return BlockParams{ 1, 1, 2 }; case vk::Format::eR16Sint: return BlockParams{ 1, 1, 2 }; case vk::Format::eR16Sfloat: return BlockParams{ 1, 1, 2 }; case vk::Format::eR16G16Unorm: return BlockParams{ 1, 1, 4 }; case vk::Format::eR16G16Snorm: return BlockParams{ 1, 1, 4 }; case vk::Format::eR16G16Uscaled: return BlockParams{ 1, 1, 4 }; case vk::Format::eR16G16Sscaled: return BlockParams{ 1, 1, 4 }; case vk::Format::eR16G16Uint: return BlockParams{ 1, 1, 4 }; case vk::Format::eR16G16Sint: return BlockParams{ 1, 1, 4 }; case vk::Format::eR16G16Sfloat: return BlockParams{ 1, 1, 4 }; case vk::Format::eR16G16B16Unorm: return BlockParams{ 1, 1, 6 }; case vk::Format::eR16G16B16Snorm: return BlockParams{ 1, 1, 6 }; case vk::Format::eR16G16B16Uscaled: return BlockParams{ 1, 1, 6 }; case vk::Format::eR16G16B16Sscaled: return BlockParams{ 1, 1, 6 }; case vk::Format::eR16G16B16Uint: return BlockParams{ 1, 1, 6 }; case vk::Format::eR16G16B16Sint: return BlockParams{ 1, 1, 6 }; case vk::Format::eR16G16B16Sfloat: return BlockParams{ 1, 1, 6 }; case vk::Format::eR16G16B16A16Unorm: return BlockParams{ 1, 1, 8 }; case vk::Format::eR16G16B16A16Snorm: return BlockParams{ 1, 1, 8 }; case vk::Format::eR16G16B16A16Uscaled: return BlockParams{ 1, 1, 8 }; case vk::Format::eR16G16B16A16Sscaled: return BlockParams{ 1, 1, 8 }; case vk::Format::eR16G16B16A16Uint: return BlockParams{ 1, 1, 8 }; case vk::Format::eR16G16B16A16Sint: return BlockParams{ 1, 1, 8 }; case vk::Format::eR16G16B16A16Sfloat: return BlockParams{ 1, 1, 8 }; case vk::Format::eR32Uint: return BlockParams{ 1, 1, 4 }; case vk::Format::eR32Sint: return BlockParams{ 1, 1, 4 }; case vk::Format::eR32Sfloat: return BlockParams{ 1, 1, 4 }; case vk::Format::eR32G32Uint: return BlockParams{ 1, 1, 8 }; case vk::Format::eR32G32Sint: return BlockParams{ 1, 1, 8 }; case vk::Format::eR32G32Sfloat: return BlockParams{ 1, 1, 8 }; case vk::Format::eR32G32B32Uint: return BlockParams{ 1, 1, 12 }; case vk::Format::eR32G32B32Sint: return BlockParams{ 1, 1, 12 }; case vk::Format::eR32G32B32Sfloat: return BlockParams{ 1, 1, 12 }; case vk::Format::eR32G32B32A32Uint: return BlockParams{ 1, 1, 16 }; case vk::Format::eR32G32B32A32Sint: return BlockParams{ 1, 1, 16 }; case vk::Format::eR32G32B32A32Sfloat: return BlockParams{ 1, 1, 16 }; case vk::Format::eR64Uint: return BlockParams{ 1, 1, 8 }; case vk::Format::eR64Sint: return BlockParams{ 1, 1, 8 }; case vk::Format::eR64Sfloat: return BlockParams{ 1, 1, 8 }; case vk::Format::eR64G64Uint: return BlockParams{ 1, 1, 16 }; case vk::Format::eR64G64Sint: return BlockParams{ 1, 1, 16 }; case vk::Format::eR64G64Sfloat: return BlockParams{ 1, 1, 16 }; case vk::Format::eR64G64B64Uint: return BlockParams{ 1, 1, 24 }; case vk::Format::eR64G64B64Sint: return BlockParams{ 1, 1, 24 }; case vk::Format::eR64G64B64Sfloat: return BlockParams{ 1, 1, 24 }; case vk::Format::eR64G64B64A64Uint: return BlockParams{ 1, 1, 32 }; case vk::Format::eR64G64B64A64Sint: return BlockParams{ 1, 1, 32 }; case vk::Format::eR64G64B64A64Sfloat: return BlockParams{ 1, 1, 32 }; case vk::Format::eB10G11R11UfloatPack32: return BlockParams{ 1, 1, 4 }; case vk::Format::eE5B9G9R9UfloatPack32: return BlockParams{ 1, 1, 4 }; case vk::Format::eD16Unorm: return BlockParams{ 1, 1, 4 }; case vk::Format::eX8D24UnormPack32: return BlockParams{ 1, 1, 4 }; case vk::Format::eD32Sfloat: return BlockParams{ 1, 1, 4 }; case vk::Format::eS8Uint: return BlockParams{ 1, 1, 1 }; case vk::Format::eD16UnormS8Uint: return BlockParams{ 1, 1, 3 }; case vk::Format::eD24UnormS8Uint: return BlockParams{ 1, 1, 4 }; case vk::Format::eD32SfloatS8Uint: return BlockParams{ 0, 0, 0 }; case vk::Format::eBc1RgbUnormBlock: return BlockParams{ 4, 4, 8 }; case vk::Format::eBc1RgbSrgbBlock: return BlockParams{ 4, 4, 8 }; case vk::Format::eBc1RgbaUnormBlock: return BlockParams{ 4, 4, 8 }; case vk::Format::eBc1RgbaSrgbBlock: return BlockParams{ 4, 4, 8 }; case vk::Format::eBc2UnormBlock: return BlockParams{ 4, 4, 16 }; case vk::Format::eBc2SrgbBlock: return BlockParams{ 4, 4, 16 }; case vk::Format::eBc3UnormBlock: return BlockParams{ 4, 4, 16 }; case vk::Format::eBc3SrgbBlock: return BlockParams{ 4, 4, 16 }; case vk::Format::eBc4UnormBlock: return BlockParams{ 4, 4, 16 }; case vk::Format::eBc4SnormBlock: return BlockParams{ 4, 4, 16 }; case vk::Format::eBc5UnormBlock: return BlockParams{ 4, 4, 16 }; case vk::Format::eBc5SnormBlock: return BlockParams{ 4, 4, 16 }; case vk::Format::eBc6HUfloatBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eBc6HSfloatBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eBc7UnormBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eBc7SrgbBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eEtc2R8G8B8UnormBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eEtc2R8G8B8SrgbBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eEtc2R8G8B8A1UnormBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eEtc2R8G8B8A1SrgbBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eEtc2R8G8B8A8UnormBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eEtc2R8G8B8A8SrgbBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eEacR11UnormBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eEacR11SnormBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eEacR11G11UnormBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eEacR11G11SnormBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eAstc4x4UnormBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eAstc4x4SrgbBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eAstc5x4UnormBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eAstc5x4SrgbBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eAstc5x5UnormBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eAstc5x5SrgbBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eAstc6x5UnormBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eAstc6x5SrgbBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eAstc6x6UnormBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eAstc6x6SrgbBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eAstc8x5UnormBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eAstc8x5SrgbBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eAstc8x6UnormBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eAstc8x6SrgbBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eAstc8x8UnormBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eAstc8x8SrgbBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eAstc10x5UnormBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eAstc10x5SrgbBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eAstc10x6UnormBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eAstc10x6SrgbBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eAstc10x8UnormBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eAstc10x8SrgbBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eAstc10x10UnormBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eAstc10x10SrgbBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eAstc12x10UnormBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eAstc12x10SrgbBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eAstc12x12UnormBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::eAstc12x12SrgbBlock: return BlockParams{ 0, 0, 0 }; case vk::Format::ePvrtc12BppUnormBlockIMG: return BlockParams{ 0, 0, 0 }; case vk::Format::ePvrtc14BppUnormBlockIMG: return BlockParams{ 0, 0, 0 }; case vk::Format::ePvrtc22BppUnormBlockIMG: return BlockParams{ 0, 0, 0 }; case vk::Format::ePvrtc24BppUnormBlockIMG: return BlockParams{ 0, 0, 0 }; case vk::Format::ePvrtc12BppSrgbBlockIMG: return BlockParams{ 0, 0, 0 }; case vk::Format::ePvrtc14BppSrgbBlockIMG: return BlockParams{ 0, 0, 0 }; case vk::Format::ePvrtc22BppSrgbBlockIMG: return BlockParams{ 0, 0, 0 }; case vk::Format::ePvrtc24BppSrgbBlockIMG: return BlockParams{ 0, 0, 0 }; } return BlockParams{ 0, 0, 0 }; } /// Factory for renderpasses. /// example: /// RenderpassMaker rpm; /// rpm.beginSubpass(vk::PipelineBindPoint::eGraphics); /// rpm.subpassColorAttachment(vk::ImageLayout::eColorAttachmentOptimal); /// /// rpm.attachmentDescription(attachmentDesc); /// rpm.subpassDependency(dependency); /// s.renderPass_ = rpm.createUnique(device); class RenderpassMaker { public: RenderpassMaker() {} /// Begin an attachment description. /// After this you can call attachment* many times void attachmentBegin(vk::Format format) { vk::AttachmentDescription desc{ {}, format }; s.attachmentDescriptions.push_back(desc); } void attachmentFlags(vk::AttachmentDescriptionFlags value) { s.attachmentDescriptions.back().flags = value; }; void attachmentFormat(vk::Format value) { s.attachmentDescriptions.back().format = value; }; void attachmentSamples(vk::SampleCountFlagBits value) { s.attachmentDescriptions.back().samples = value; }; void attachmentLoadOp(vk::AttachmentLoadOp value) { s.attachmentDescriptions.back().loadOp = value; }; void attachmentStoreOp(vk::AttachmentStoreOp value) { s.attachmentDescriptions.back().storeOp = value; }; void attachmentStencilLoadOp(vk::AttachmentLoadOp value) { s.attachmentDescriptions.back().stencilLoadOp = value; }; void attachmentStencilStoreOp(vk::AttachmentStoreOp value) { s.attachmentDescriptions.back().stencilStoreOp = value; }; void attachmentInitialLayout(vk::ImageLayout value) { s.attachmentDescriptions.back().initialLayout = value; }; void attachmentFinalLayout(vk::ImageLayout value) { s.attachmentDescriptions.back().finalLayout = value; }; /// Start a subpass description. /// After this you can can call subpassColorAttachment many times /// and subpassDepthStencilAttachment once. void subpassBegin(vk::PipelineBindPoint bp) { vk::SubpassDescription desc{}; desc.pipelineBindPoint = bp; s.subpassDescriptions.push_back(desc); } void subpassColorAttachment(vk::ImageLayout layout, uint32_t attachment) { vk::SubpassDescription& subpass = s.subpassDescriptions.back(); auto* p = getAttachmentReference(); p->layout = layout; p->attachment = attachment; if (subpass.colorAttachmentCount == 0) { subpass.pColorAttachments = p; } subpass.colorAttachmentCount++; } void subpassDepthStencilAttachment(vk::ImageLayout layout, uint32_t attachment) { vk::SubpassDescription& subpass = s.subpassDescriptions.back(); auto* p = getAttachmentReference(); p->layout = layout; p->attachment = attachment; subpass.pDepthStencilAttachment = p; } vk::UniqueRenderPass createUnique(const vk::Device& device) const { vk::RenderPassCreateInfo renderPassInfo{}; renderPassInfo.attachmentCount = (uint32_t)s.attachmentDescriptions.size(); renderPassInfo.pAttachments = s.attachmentDescriptions.data(); renderPassInfo.subpassCount = (uint32_t)s.subpassDescriptions.size(); renderPassInfo.pSubpasses = s.subpassDescriptions.data(); renderPassInfo.dependencyCount = (uint32_t)s.subpassDependencies.size(); renderPassInfo.pDependencies = s.subpassDependencies.data(); return device.createRenderPassUnique(renderPassInfo); } void dependencyBegin(uint32_t srcSubpass, uint32_t dstSubpass) { vk::SubpassDependency desc{}; desc.srcSubpass = srcSubpass; desc.dstSubpass = dstSubpass; s.subpassDependencies.push_back(desc); } void dependencySrcSubpass(uint32_t value) { s.subpassDependencies.back().srcSubpass = value; }; void dependencyDstSubpass(uint32_t value) { s.subpassDependencies.back().dstSubpass = value; }; void dependencySrcStageMask(vk::PipelineStageFlags value) { s.subpassDependencies.back().srcStageMask = value; }; void dependencyDstStageMask(vk::PipelineStageFlags value) { s.subpassDependencies.back().dstStageMask = value; }; void dependencySrcAccessMask(vk::AccessFlags value) { s.subpassDependencies.back().srcAccessMask = value; }; void dependencyDstAccessMask(vk::AccessFlags value) { s.subpassDependencies.back().dstAccessMask = value; }; void dependencyDependencyFlags(vk::DependencyFlags value) { s.subpassDependencies.back().dependencyFlags = value; }; private: constexpr static int max_refs = 64; vk::AttachmentReference* getAttachmentReference() { return (s.num_refs < max_refs) ? &s.attachmentReferences[s.num_refs++] : nullptr; } struct State { std::vector<vk::AttachmentDescription> attachmentDescriptions; std::vector<vk::SubpassDescription> subpassDescriptions; std::vector<vk::SubpassDependency> subpassDependencies; std::array<vk::AttachmentReference, max_refs> attachmentReferences; int num_refs = 0; bool ok_ = false; }; State s; }; /// Class for building shader modules and extracting metadata from shaders. class ShaderModule { public: ShaderModule() {} /// Construct a shader module from a file ShaderModule(const vk::Device& device, const std::string& filename) { auto file = std::ifstream(filename, std::ios::binary); if (file.bad()) { return; } file.seekg(0, std::ios::end); int length = (int)file.tellg(); s.opcodes_.resize((size_t)(length / 4)); file.seekg(0, std::ios::beg); file.read((char*)s.opcodes_.data(), s.opcodes_.size() * 4); vk::ShaderModuleCreateInfo ci; ci.codeSize = s.opcodes_.size() * 4; ci.pCode = s.opcodes_.data(); s.module_ = device.createShaderModuleUnique(ci); s.ok_ = true; } /// Construct a shader module from a memory template <class InIter> ShaderModule(const vk::Device& device, InIter begin, InIter end) { s.opcodes_.assign(begin, end); vk::ShaderModuleCreateInfo ci; ci.codeSize = s.opcodes_.size() * 4; ci.pCode = s.opcodes_.data(); s.module_ = device.createShaderModuleUnique(ci); s.ok_ = true; } /// A variable in a shader. struct Variable { // The name of the variable from the GLSL/HLSL std::string debugName; // The internal name (integer) of the variable int name; // The location in the binding. int location; // The binding in the descriptor set or I/O channel. int binding; // The descriptor set (for uniforms) int set; int instruction; // Storage class of the variable, eg. spv::StorageClass::Uniform spv::StorageClass storageClass; }; /// Get a list of variables from the shader. /// /// This exposes the Uniforms, inputs, outputs, push constants. /// See spv::StorageClass for more details. std::vector<Variable> getVariables() const { auto bound = s.opcodes_[3]; std::unordered_map<int, int> bindings; std::unordered_map<int, int> locations; std::unordered_map<int, int> sets; std::unordered_map<int, std::string> debugNames; for (int i = 5; i != s.opcodes_.size(); i += s.opcodes_[i] >> 16) { spv::Op op = spv::Op(s.opcodes_[i] & 0xffff); if (op == spv::Op::OpDecorate) { int name = s.opcodes_[i + 1]; auto decoration = spv::Decoration(s.opcodes_[i + 2]); if (decoration == spv::Decoration::Binding) { bindings[name] = s.opcodes_[i + 3]; } else if (decoration == spv::Decoration::Location) { locations[name] = s.opcodes_[i + 3]; } else if (decoration == spv::Decoration::DescriptorSet) { sets[name] = s.opcodes_[i + 3]; } } else if (op == spv::Op::OpName) { int name = s.opcodes_[i + 1]; debugNames[name] = (const char*)&s.opcodes_[i + 2]; } } std::vector<Variable> result; for (int i = 5; i != s.opcodes_.size(); i += s.opcodes_[i] >> 16) { spv::Op op = spv::Op(s.opcodes_[i] & 0xffff); if (op == spv::Op::OpVariable) { int name = s.opcodes_[i + 1]; auto sc = spv::StorageClass(s.opcodes_[i + 3]); Variable b; b.debugName = debugNames[name]; b.name = name; b.location = locations[name]; b.set = sets[name]; b.instruction = i; b.storageClass = sc; result.push_back(b); } } return std::move(result); } bool ok() const { return s.ok_; } VkShaderModule module() { return *s.module_; } /// Write a C++ consumable dump of the shader. /// Todo: make this more idiomatic. std::ostream& write(std::ostream& os) { os << "static const uint32_t shader[] = {\n"; char tmp[256]; auto p = s.opcodes_.begin(); snprintf(tmp, sizeof(tmp), " 0x%08x,0x%08x,0x%08x,0x%08x,0x%08x,\n", p[0], p[1], p[2], p[3], p[4]); os << tmp; for (int i = 5; i != s.opcodes_.size(); i += s.opcodes_[i] >> 16) { char *p = tmp + 2, *e = tmp + sizeof(tmp) - 2; for (int j = i; j != i + (s.opcodes_[i] >> 16); ++j) { p += snprintf(p, e - p, "0x%08x,", s.opcodes_[j]); if (p > e - 16) { *p++ = '\n'; *p = 0; os << tmp; p = tmp + 2; } } *p++ = '\n'; *p = 0; os << tmp; } os << "};\n\n"; return os; } private: struct State { std::vector<uint32_t> opcodes_; vk::UniqueShaderModule module_; bool ok_; }; State s; }; /// A class for building pipeline layouts. /// Pipeline layouts describe the descriptor sets and push constants used by the shaders. class PipelineLayoutMaker { public: PipelineLayoutMaker() {} /// Create a self-deleting pipeline layout object. vk::UniquePipelineLayout createUnique(const vk::Device& device) const { vk::PipelineLayoutCreateInfo pipelineLayoutInfo{ {}, (uint32_t)setLayouts_.size(), setLayouts_.data(), (uint32_t)pushConstantRanges_.size(), pushConstantRanges_.data() }; return std::move(device.createPipelineLayoutUnique(pipelineLayoutInfo)); } /// Add a descriptor set layout to the pipeline. void descriptorSetLayout(vk::DescriptorSetLayout layout) { setLayouts_.push_back(layout); } /// Add a push constant range to the pipeline. /// These describe the size and location of variables in the push constant area. void pushConstantRange(vk::ShaderStageFlags stageFlags_, uint32_t offset_, uint32_t size_) { pushConstantRanges_.emplace_back(stageFlags_, offset_, size_); } private: std::vector<vk::DescriptorSetLayout> setLayouts_; std::vector<vk::PushConstantRange> pushConstantRanges_; }; /// A class for building pipelines. /// All the state of the pipeline is exposed through individual calls. /// The pipeline encapsulates all the OpenGL state in a single object. /// This includes vertex buffer layouts, blend operations, shaders, line width etc. /// This class exposes all the values as individuals so a pipeline can be customised. /// The default is to generate a working pipeline. class PipelineMaker { public: PipelineMaker(uint32_t width, uint32_t height) { inputAssemblyState_.topology = vk::PrimitiveTopology::eTriangleList; viewport_ = vk::Viewport{ 0.0f, 0.0f, (float)width, (float)height, 0.0f, 1.0f }; scissor_ = vk::Rect2D{ { 0, 0 }, { width, height } }; rasterizationState_.lineWidth = 1.0f; // Set up depth test, but do not enable it. depthStencilState_.depthTestEnable = VK_FALSE; depthStencilState_.depthWriteEnable = VK_TRUE; depthStencilState_.depthCompareOp = vk::CompareOp::eLessOrEqual; depthStencilState_.depthBoundsTestEnable = VK_FALSE; depthStencilState_.back.failOp = vk::StencilOp::eKeep; depthStencilState_.back.passOp = vk::StencilOp::eKeep; depthStencilState_.back.compareOp = vk::CompareOp::eAlways; depthStencilState_.stencilTestEnable = VK_FALSE; depthStencilState_.front = depthStencilState_.back; } vk::UniquePipeline createUnique(const vk::Device& device, const vk::PipelineCache& pipelineCache, const vk::PipelineLayout& pipelineLayout, const vk::RenderPass& renderPass, bool defaultBlend = true) { // Add default colour blend attachment if necessary. if (colorBlendAttachments_.empty() && defaultBlend) { vk::PipelineColorBlendAttachmentState blend{}; blend.blendEnable = 0; blend.srcColorBlendFactor = vk::BlendFactor::eOne; blend.dstColorBlendFactor = vk::BlendFactor::eZero; blend.colorBlendOp = vk::BlendOp::eAdd; blend.srcAlphaBlendFactor = vk::BlendFactor::eOne; blend.dstAlphaBlendFactor = vk::BlendFactor::eZero; blend.alphaBlendOp = vk::BlendOp::eAdd; typedef vk::ColorComponentFlagBits ccbf; blend.colorWriteMask = ccbf::eR | ccbf::eG | ccbf::eB | ccbf::eA; colorBlendAttachments_.push_back(blend); } auto count = (uint32_t)colorBlendAttachments_.size(); colorBlendState_.attachmentCount = count; colorBlendState_.pAttachments = count ? colorBlendAttachments_.data() : nullptr; vk::PipelineViewportStateCreateInfo viewportState{ {}, 1, &viewport_, 1, &scissor_ }; vk::PipelineVertexInputStateCreateInfo vertexInputState; vertexInputState.vertexAttributeDescriptionCount = (uint32_t)vertexAttributeDescriptions_.size(); vertexInputState.pVertexAttributeDescriptions = vertexAttributeDescriptions_.data(); vertexInputState.vertexBindingDescriptionCount = (uint32_t)vertexBindingDescriptions_.size(); vertexInputState.pVertexBindingDescriptions = vertexBindingDescriptions_.data(); vk::PipelineDynamicStateCreateInfo dynState{ {}, (uint32_t)dynamicState_.size(), dynamicState_.data() }; vk::GraphicsPipelineCreateInfo pipelineInfo{}; pipelineInfo.pVertexInputState = &vertexInputState; pipelineInfo.stageCount = (uint32_t)modules_.size(); pipelineInfo.pStages = modules_.data(); pipelineInfo.pInputAssemblyState = &inputAssemblyState_; pipelineInfo.pViewportState = &viewportState; pipelineInfo.pRasterizationState = &rasterizationState_; pipelineInfo.pMultisampleState = &multisampleState_; pipelineInfo.pColorBlendState = &colorBlendState_; pipelineInfo.pDepthStencilState = &depthStencilState_; pipelineInfo.layout = pipelineLayout; pipelineInfo.renderPass = renderPass; pipelineInfo.pDynamicState = dynamicState_.empty() ? nullptr : &dynState; pipelineInfo.subpass = subpass_; return device.createGraphicsPipelineUnique(pipelineCache, pipelineInfo); } /// Add a shader module to the pipeline. void shader(vk::ShaderStageFlagBits stage, vku::ShaderModule& shader, const char* entryPoint = "main") { vk::PipelineShaderStageCreateInfo info{}; info.module = shader.module(); info.pName = entryPoint; info.stage = stage; modules_.emplace_back(info); } /// Add a blend state to the pipeline for one colour attachment. /// If you don't do this, a default is used. void colorBlend(const vk::PipelineColorBlendAttachmentState& state) { colorBlendAttachments_.push_back(state); } void subPass(uint32_t subpass) { subpass_ = subpass; } /// Begin setting colour blend value /// If you don't do this, a default is used. /// Follow this with blendEnable() blendSrcColorBlendFactor() etc. /// Default is a regular alpha blend. void blendBegin(vk::Bool32 enable) { colorBlendAttachments_.emplace_back(); auto& blend = colorBlendAttachments_.back(); blend.blendEnable = enable; blend.srcColorBlendFactor = vk::BlendFactor::eSrcAlpha; blend.dstColorBlendFactor = vk::BlendFactor::eOneMinusSrcAlpha; blend.colorBlendOp = vk::BlendOp::eAdd; blend.srcAlphaBlendFactor = vk::BlendFactor::eSrcAlpha; blend.dstAlphaBlendFactor = vk::BlendFactor::eOneMinusSrcAlpha; blend.alphaBlendOp = vk::BlendOp::eAdd; typedef vk::ColorComponentFlagBits ccbf; blend.colorWriteMask = ccbf::eR | ccbf::eG | ccbf::eB | ccbf::eA; } /// Enable or disable blending (called after blendBegin()) void blendEnable(vk::Bool32 value) { colorBlendAttachments_.back().blendEnable = value; } /// Source colour blend factor (called after blendBegin()) void blendSrcColorBlendFactor(vk::BlendFactor value) { colorBlendAttachments_.back().srcColorBlendFactor = value; } /// Destination colour blend factor (called after blendBegin()) void blendDstColorBlendFactor(vk::BlendFactor value) { colorBlendAttachments_.back().dstColorBlendFactor = value; } /// Blend operation (called after blendBegin()) void blendColorBlendOp(vk::BlendOp value) { colorBlendAttachments_.back().colorBlendOp = value; } /// Source alpha blend factor (called after blendBegin()) void blendSrcAlphaBlendFactor(vk::BlendFactor value) { colorBlendAttachments_.back().srcAlphaBlendFactor = value; } /// Destination alpha blend factor (called after blendBegin()) void blendDstAlphaBlendFactor(vk::BlendFactor value) { colorBlendAttachments_.back().dstAlphaBlendFactor = value; } /// Alpha operation (called after blendBegin()) void blendAlphaBlendOp(vk::BlendOp value) { colorBlendAttachments_.back().alphaBlendOp = value; } /// Colour write mask (called after blendBegin()) void blendColorWriteMask(vk::ColorComponentFlags value) { colorBlendAttachments_.back().colorWriteMask = value; } /// Add a vertex attribute to the pipeline. void vertexAttribute(uint32_t location_, uint32_t binding_, vk::Format format_, uint32_t offset_) { vertexAttributeDescriptions_.push_back({ location_, binding_, format_, offset_ }); } /// Add a vertex attribute to the pipeline. void vertexAttribute(const vk::VertexInputAttributeDescription& desc) { vertexAttributeDescriptions_.push_back(desc); } /// Add a vertex binding to the pipeline. /// Usually only one of these is needed to specify the stride. /// Vertices can also be delivered one per instance. void vertexBinding(uint32_t binding_, uint32_t stride_, vk::VertexInputRate inputRate_ = vk::VertexInputRate::eVertex) { vertexBindingDescriptions_.push_back({ binding_, stride_, inputRate_ }); } /// Add a vertex binding to the pipeline. /// Usually only one of these is needed to specify the stride. /// Vertices can also be delivered one per instance. void vertexBinding(const vk::VertexInputBindingDescription& desc) { vertexBindingDescriptions_.push_back(desc); } /// Specify the topology of the pipeline. /// Usually this is a triangle list, but points and lines are possible too. PipelineMaker& topology(vk::PrimitiveTopology topology) { inputAssemblyState_.topology = topology; return *this; } /// Enable or disable primitive restart. /// If using triangle strips, for example, this allows a special index value (0xffff or 0xffffffff) to start a new strip. PipelineMaker& primitiveRestartEnable(vk::Bool32 primitiveRestartEnable) { inputAssemblyState_.primitiveRestartEnable = primitiveRestartEnable; return *this; } /// Set a whole new input assembly state. /// Note you can set individual values with their own call PipelineMaker& inputAssemblyState(const vk::PipelineInputAssemblyStateCreateInfo& value) { inputAssemblyState_ = value; return *this; } /// Set the viewport value. /// Usually there is only one viewport, but you can have multiple viewports active for rendering cubemaps or VR stereo pair PipelineMaker& viewport(const vk::Viewport& value) { viewport_ = value; return *this; } /// Set the scissor value. /// This defines the area that the fragment shaders can write to. For example, if you are rendering a portal or a mirror. PipelineMaker& scissor(const vk::Rect2D& value) { scissor_ = value; return *this; } /// Set a whole rasterization state. /// Note you can set individual values with their own call PipelineMaker& rasterizationState(const vk::PipelineRasterizationStateCreateInfo& value) { rasterizationState_ = value; return *this; } PipelineMaker& depthClampEnable(vk::Bool32 value) { rasterizationState_.depthClampEnable = value; return *this; } PipelineMaker& rasterizerDiscardEnable(vk::Bool32 value) { rasterizationState_.rasterizerDiscardEnable = value; return *this; } PipelineMaker& polygonMode(vk::PolygonMode value) { rasterizationState_.polygonMode = value; return *this; } PipelineMaker& cullMode(vk::CullModeFlags value) { rasterizationState_.cullMode = value; return *this; } PipelineMaker& frontFace(vk::FrontFace value) { rasterizationState_.frontFace = value; return *this; } PipelineMaker& depthBiasEnable(vk::Bool32 value) { rasterizationState_.depthBiasEnable = value; return *this; } PipelineMaker& depthBiasConstantFactor(float value) { rasterizationState_.depthBiasConstantFactor = value; return *this; } PipelineMaker& depthBiasClamp(float value) { rasterizationState_.depthBiasClamp = value; return *this; } PipelineMaker& depthBiasSlopeFactor(float value) { rasterizationState_.depthBiasSlopeFactor = value; return *this; } PipelineMaker& lineWidth(float value) { rasterizationState_.lineWidth = value; return *this; } /// Set a whole multi sample state. /// Note you can set individual values with their own call PipelineMaker& multisampleState(const vk::PipelineMultisampleStateCreateInfo& value) { multisampleState_ = value; return *this; } PipelineMaker& rasterizationSamples(vk::SampleCountFlagBits value) { multisampleState_.rasterizationSamples = value; return *this; } PipelineMaker& sampleShadingEnable(vk::Bool32 value) { multisampleState_.sampleShadingEnable = value; return *this; } PipelineMaker& minSampleShading(float value) { multisampleState_.minSampleShading = value; return *this; } PipelineMaker& pSampleMask(const vk::SampleMask* value) { multisampleState_.pSampleMask = value; return *this; } PipelineMaker& alphaToCoverageEnable(vk::Bool32 value) { multisampleState_.alphaToCoverageEnable = value; return *this; } PipelineMaker& alphaToOneEnable(vk::Bool32 value) { multisampleState_.alphaToOneEnable = value; return *this; } /// Set a whole depth stencil state. /// Note you can set individual values with their own call PipelineMaker& depthStencilState(const vk::PipelineDepthStencilStateCreateInfo& value) { depthStencilState_ = value; return *this; } PipelineMaker& depthTestEnable(vk::Bool32 value) { depthStencilState_.depthTestEnable = value; return *this; } PipelineMaker& depthWriteEnable(vk::Bool32 value) { depthStencilState_.depthWriteEnable = value; return *this; } PipelineMaker& depthCompareOp(vk::CompareOp value) { depthStencilState_.depthCompareOp = value; return *this; } PipelineMaker& depthBoundsTestEnable(vk::Bool32 value) { depthStencilState_.depthBoundsTestEnable = value; return *this; } PipelineMaker& stencilTestEnable(vk::Bool32 value) { depthStencilState_.stencilTestEnable = value; return *this; } PipelineMaker& front(vk::StencilOpState value) { depthStencilState_.front = value; return *this; } PipelineMaker& back(vk::StencilOpState value) { depthStencilState_.back = value; return *this; } PipelineMaker& minDepthBounds(float value) { depthStencilState_.minDepthBounds = value; return *this; } PipelineMaker& maxDepthBounds(float value) { depthStencilState_.maxDepthBounds = value; return *this; } /// Set a whole colour blend state. /// Note you can set individual values with their own call PipelineMaker& colorBlendState(const vk::PipelineColorBlendStateCreateInfo& value) { colorBlendState_ = value; return *this; } PipelineMaker& logicOpEnable(vk::Bool32 value) { colorBlendState_.logicOpEnable = value; return *this; } PipelineMaker& logicOp(vk::LogicOp value) { colorBlendState_.logicOp = value; return *this; } PipelineMaker& blendConstants(float r, float g, float b, float a) { float* bc = colorBlendState_.blendConstants; bc[0] = r; bc[1] = g; bc[2] = b; bc[3] = a; return *this; } PipelineMaker& dynamicState(vk::DynamicState value) { dynamicState_.push_back(value); } private: vk::PipelineInputAssemblyStateCreateInfo inputAssemblyState_; vk::Viewport viewport_; vk::Rect2D scissor_; vk::PipelineRasterizationStateCreateInfo rasterizationState_; vk::PipelineMultisampleStateCreateInfo multisampleState_; vk::PipelineDepthStencilStateCreateInfo depthStencilState_; vk::PipelineColorBlendStateCreateInfo colorBlendState_; std::vector<vk::PipelineColorBlendAttachmentState> colorBlendAttachments_; std::vector<vk::PipelineShaderStageCreateInfo> modules_; std::vector<vk::VertexInputAttributeDescription> vertexAttributeDescriptions_; std::vector<vk::VertexInputBindingDescription> vertexBindingDescriptions_; std::vector<vk::DynamicState> dynamicState_; uint32_t subpass_ = 0; }; /// A class for building compute pipelines. class ComputePipelineMaker { public: ComputePipelineMaker() {} /// Add a shader module to the pipeline. void shader(vk::ShaderStageFlagBits stage, vku::ShaderModule& shader, const char* entryPoint = "main") { stage_.module = shader.module(); stage_.pName = entryPoint; stage_.stage = stage; } /// Set the compute shader module. ComputePipelineMaker& module(const vk::PipelineShaderStageCreateInfo& value) { stage_ = value; } /// Create a managed handle to a compute shader. vk::UniquePipeline createUnique(vk::Device device, const vk::PipelineCache& pipelineCache, const vk::PipelineLayout& pipelineLayout) { vk::ComputePipelineCreateInfo pipelineInfo{}; pipelineInfo.stage = stage_; pipelineInfo.layout = pipelineLayout; return device.createComputePipelineUnique(pipelineCache, pipelineInfo); } private: vk::PipelineShaderStageCreateInfo stage_; }; /// A generic buffer that may be used as a vertex buffer, uniform buffer or other kinds of memory resident data. /// Buffers require memory objects which represent GPU and CPU resources. class GenericBuffer { public: GenericBuffer() {} GenericBuffer(vk::Device device, vk::PhysicalDeviceMemoryProperties memprops, vk::BufferUsageFlags usage, vk::DeviceSize size, vk::MemoryPropertyFlags memflags = vk::MemoryPropertyFlagBits::eDeviceLocal) { // Create the buffer object without memory. vk::BufferCreateInfo ci{}; ci.size = size_ = size; ci.usage = usage; ci.sharingMode = vk::SharingMode::eExclusive; buffer_ = device.createBufferUnique(ci); // Find out how much memory and which heap to allocate from. auto memreq = device.getBufferMemoryRequirements(*buffer_); // Create a memory object to bind to the buffer. vk::MemoryAllocateInfo mai{}; mai.allocationSize = memreq.size; mai.memoryTypeIndex = vku::findMemoryTypeIndex(memprops, memreq.memoryTypeBits, memflags); mem_ = device.allocateMemoryUnique(mai); device.bindBufferMemory(*buffer_, *mem_, 0); } /// For a host buffer, copy memory to the buffer object. void updateLocal(const vk::Device& device, const void* value, vk::DeviceSize size) const { void* ptr = device.mapMemory(*mem_, 0, size_, vk::MemoryMapFlags{}); memcpy(ptr, value, (size_t)size); flush(device); device.unmapMemory(*mem_); } /// For a device local buffer, copy memory to the buffer object immediately. /// Note that this will stall the pipeline! void upload(vk::Device device, const vk::PhysicalDeviceMemoryProperties& memprops, vk::CommandPool commandPool, vk::Queue queue, const void* value, vk::DeviceSize size) const { if (size == 0) return; using buf = vk::BufferUsageFlagBits; using pfb = vk::MemoryPropertyFlagBits; auto tmp = vku::GenericBuffer(device, memprops, buf::eTransferSrc, size, pfb::eHostVisible); tmp.updateLocal(device, value, size); vku::executeImmediately(device, commandPool, queue, [&](vk::CommandBuffer cb) { vk::BufferCopy bc{ 0, 0, size }; cb.copyBuffer(tmp.buffer(), *buffer_, bc); }); } template <typename T> void upload(vk::Device device, const vk::PhysicalDeviceMemoryProperties& memprops, vk::CommandPool commandPool, vk::Queue queue, const std::vector<T>& value) const { upload(device, memprops, commandPool, queue, value.data(), value.size() * sizeof(T)); } template <typename T> void upload(vk::Device device, const vk::PhysicalDeviceMemoryProperties& memprops, vk::CommandPool commandPool, vk::Queue queue, const T& value) const { upload(device, memprops, commandPool, queue, &value, sizeof(value)); } void barrier(vk::CommandBuffer cb, vk::PipelineStageFlags srcStageMask, vk::PipelineStageFlags dstStageMask, vk::DependencyFlags dependencyFlags, vk::AccessFlags srcAccessMask, vk::AccessFlags dstAccessMask, uint32_t srcQueueFamilyIndex, uint32_t dstQueueFamilyIndex) const { vk::BufferMemoryBarrier bmb{ srcAccessMask, dstAccessMask, srcQueueFamilyIndex, dstQueueFamilyIndex, *buffer_, 0, size_ }; cb.pipelineBarrier(srcStageMask, dstStageMask, dependencyFlags, nullptr, bmb, nullptr); } template <class Type, class Allocator> void updateLocal(const vk::Device& device, const std::vector<Type, Allocator>& value) const { updateLocal(device, (void*)value.data(), vk::DeviceSize(value.size() * sizeof(Type))); } template <class Type> void updateLocal(const vk::Device& device, const Type& value) const { updateLocal(device, (void*)&value, vk::DeviceSize(sizeof(Type))); } void* map(const vk::Device& device) const { return device.mapMemory(*mem_, 0, size_, vk::MemoryMapFlags{}); }; void unmap(const vk::Device& device) const { return device.unmapMemory(*mem_); }; void flush(const vk::Device& device) const { vk::MappedMemoryRange mr{ *mem_, 0, size_ }; return device.flushMappedMemoryRanges(mr); } void invalidate(const vk::Device& device) const { vk::MappedMemoryRange mr{ *mem_, 0, size_ }; return device.invalidateMappedMemoryRanges(mr); } vk::Buffer buffer() const { return *buffer_; } vk::DeviceMemory mem() const { return *mem_; } vk::DeviceSize size() const { return size_; } private: vk::UniqueBuffer buffer_; vk::UniqueDeviceMemory mem_; vk::DeviceSize size_; }; /// This class is a specialisation of GenericBuffer for high performance vertex buffers on the GPU. /// You must upload the contents before use. class VertexBuffer : public GenericBuffer { public: VertexBuffer() {} VertexBuffer(const vk::Device& device, const vk::PhysicalDeviceMemoryProperties& memprops, size_t size) : GenericBuffer(device, memprops, vk::BufferUsageFlagBits::eVertexBuffer, size, vk::MemoryPropertyFlagBits::eDeviceLocal) {} }; /// This class is a specialisation of GenericBuffer for low performance vertex buffers on the host. class HostVertexBuffer : public GenericBuffer { public: HostVertexBuffer() {} template <class Type, class Allocator> HostVertexBuffer(const vk::Device& device, const vk::PhysicalDeviceMemoryProperties& memprops, const std::vector<Type, Allocator>& value) : GenericBuffer(device, memprops, vk::BufferUsageFlagBits::eVertexBuffer, value.size() * sizeof(Type), vk::MemoryPropertyFlagBits::eHostVisible) { updateLocal(device, value); } }; /// This class is a specialisation of GenericBuffer for high performance index buffers. /// You must upload the contents before use. class IndexBuffer : public GenericBuffer { public: IndexBuffer() {} IndexBuffer(const vk::Device& device, const vk::PhysicalDeviceMemoryProperties& memprops, vk::DeviceSize size) : GenericBuffer(device, memprops, vk::BufferUsageFlagBits::eIndexBuffer, size, vk::MemoryPropertyFlagBits::eDeviceLocal) {} }; /// This class is a specialisation of GenericBuffer for low performance vertex buffers in CPU memory. class HostIndexBuffer : public GenericBuffer { public: HostIndexBuffer() {} template <class Type, class Allocator> HostIndexBuffer(const vk::Device& device, const vk::PhysicalDeviceMemoryProperties& memprops, const std::vector<Type, Allocator>& value) : GenericBuffer(device, memprops, vk::BufferUsageFlagBits::eIndexBuffer, value.size() * sizeof(Type), vk::MemoryPropertyFlagBits::eHostVisible) { updateLocal(device, value); } }; /// This class is a specialisation of GenericBuffer for uniform buffers. class UniformBuffer : public GenericBuffer { public: UniformBuffer() {} /// Device local uniform buffer. UniformBuffer(const vk::Device& device, const vk::PhysicalDeviceMemoryProperties& memprops, size_t size) : GenericBuffer(device, memprops, vk::BufferUsageFlagBits::eUniformBuffer | vk::BufferUsageFlagBits::eTransferDst, (vk::DeviceSize)size, vk::MemoryPropertyFlagBits::eDeviceLocal) {} }; /// Convenience class for updating descriptor sets (uniforms) class DescriptorSetUpdater { public: DescriptorSetUpdater(int maxBuffers = 10, int maxImages = 10, int maxBufferViews = 0) { // we must pre-size these buffers as we take pointers to their members. bufferInfo_.resize(maxBuffers); imageInfo_.resize(maxImages); bufferViews_.resize(maxBufferViews); } /// Call this to begin a new descriptor set. void beginDescriptorSet(vk::DescriptorSet dstSet) { dstSet_ = dstSet; } /// Call this to begin a new set of images. void beginImages(uint32_t dstBinding, uint32_t dstArrayElement, vk::DescriptorType descriptorType) { vk::WriteDescriptorSet wdesc{}; wdesc.dstSet = dstSet_; wdesc.dstBinding = dstBinding; wdesc.dstArrayElement = dstArrayElement; wdesc.descriptorCount = 0; wdesc.descriptorType = descriptorType; wdesc.pImageInfo = imageInfo_.data() + numImages_; descriptorWrites_.push_back(wdesc); } /// Call this to add a combined image sampler. void image(vk::Sampler sampler, vk::ImageView imageView, vk::ImageLayout imageLayout) { if (!descriptorWrites_.empty() && numImages_ != imageInfo_.size() && descriptorWrites_.back().pImageInfo) { descriptorWrites_.back().descriptorCount++; imageInfo_[numImages_++] = vk::DescriptorImageInfo{ sampler, imageView, imageLayout }; } else { ok_ = false; } } /// Call this to start defining buffers. void beginBuffers(uint32_t dstBinding, uint32_t dstArrayElement, vk::DescriptorType descriptorType) { vk::WriteDescriptorSet wdesc{}; wdesc.dstSet = dstSet_; wdesc.dstBinding = dstBinding; wdesc.dstArrayElement = dstArrayElement; wdesc.descriptorCount = 0; wdesc.descriptorType = descriptorType; wdesc.pBufferInfo = bufferInfo_.data() + numBuffers_; descriptorWrites_.push_back(wdesc); } /// Call this to add a new buffer. void buffer(vk::Buffer buffer, vk::DeviceSize offset, vk::DeviceSize range) { if (!descriptorWrites_.empty() && numBuffers_ != bufferInfo_.size() && descriptorWrites_.back().pBufferInfo) { descriptorWrites_.back().descriptorCount++; bufferInfo_[numBuffers_++] = vk::DescriptorBufferInfo{ buffer, offset, range }; } else { ok_ = false; } } /// Call this to start adding buffer views. (for example, writable images). void beginBufferViews(uint32_t dstBinding, uint32_t dstArrayElement, vk::DescriptorType descriptorType) { vk::WriteDescriptorSet wdesc{}; wdesc.dstSet = dstSet_; wdesc.dstBinding = dstBinding; wdesc.dstArrayElement = dstArrayElement; wdesc.descriptorCount = 0; wdesc.descriptorType = descriptorType; wdesc.pTexelBufferView = bufferViews_.data() + numBufferViews_; descriptorWrites_.push_back(wdesc); } /// Call this to add a buffer view. (Texel images) void bufferView(vk::BufferView view) { if (!descriptorWrites_.empty() && numBufferViews_ != bufferViews_.size() && descriptorWrites_.back().pImageInfo) { descriptorWrites_.back().descriptorCount++; bufferViews_[numBufferViews_++] = view; } else { ok_ = false; } } /// Copy an existing descriptor. void copy(vk::DescriptorSet srcSet, uint32_t srcBinding, uint32_t srcArrayElement, vk::DescriptorSet dstSet, uint32_t dstBinding, uint32_t dstArrayElement, uint32_t descriptorCount) { descriptorCopies_.emplace_back(srcSet, srcBinding, srcArrayElement, dstSet, dstBinding, dstArrayElement, descriptorCount); } /// Call this to update the descriptor sets with their pointers (but not data). void update(const vk::Device& device) const { device.updateDescriptorSets(descriptorWrites_, descriptorCopies_); } /// Returns true if the updater is error free. bool ok() const { return ok_; } private: std::vector<vk::DescriptorBufferInfo> bufferInfo_; std::vector<vk::DescriptorImageInfo> imageInfo_; std::vector<vk::WriteDescriptorSet> descriptorWrites_; std::vector<vk::CopyDescriptorSet> descriptorCopies_; std::vector<vk::BufferView> bufferViews_; vk::DescriptorSet dstSet_; int numBuffers_ = 0; int numImages_ = 0; int numBufferViews_ = 0; bool ok_ = true; }; /// A factory class for descriptor set layouts. (An interface to the shaders) class DescriptorSetLayoutMaker { public: DescriptorSetLayoutMaker() {} void buffer(uint32_t binding, vk::DescriptorType descriptorType, vk::ShaderStageFlags stageFlags, uint32_t descriptorCount) { s.bindings.emplace_back(binding, descriptorType, descriptorCount, stageFlags, nullptr); } void image(uint32_t binding, vk::DescriptorType descriptorType, vk::ShaderStageFlags stageFlags, uint32_t descriptorCount) { s.bindings.emplace_back(binding, descriptorType, descriptorCount, stageFlags, nullptr); } void samplers(uint32_t binding, vk::DescriptorType descriptorType, vk::ShaderStageFlags stageFlags, const std::vector<vk::Sampler> immutableSamplers) { s.samplers.push_back(immutableSamplers); auto pImmutableSamplers = s.samplers.back().data(); s.bindings.emplace_back(binding, descriptorType, (uint32_t)immutableSamplers.size(), stageFlags, pImmutableSamplers); } void bufferView(uint32_t binding, vk::DescriptorType descriptorType, vk::ShaderStageFlags stageFlags, uint32_t descriptorCount) { s.bindings.emplace_back(binding, descriptorType, descriptorCount, stageFlags, nullptr); } /// Create a self-deleting descriptor set object. vk::UniqueDescriptorSetLayout createUnique(vk::Device device) const { vk::DescriptorSetLayoutCreateInfo dsci{}; dsci.bindingCount = (uint32_t)s.bindings.size(); dsci.pBindings = s.bindings.data(); return device.createDescriptorSetLayoutUnique(dsci); } private: struct State { std::vector<vk::DescriptorSetLayoutBinding> bindings; std::vector<std::vector<vk::Sampler>> samplers; int numSamplers = 0; }; State s; }; /// A factory class for descriptor sets (A set of uniform bindings) class DescriptorSetMaker { public: // Construct a new, empty DescriptorSetMaker. DescriptorSetMaker() {} /// Add another layout describing a descriptor set. void layout(vk::DescriptorSetLayout layout) { s.layouts.push_back(layout); } /// Allocate a vector of non-self-deleting descriptor sets /// Note: descriptor sets get freed with the pool, so this is the better choice. std::vector<vk::DescriptorSet> create(vk::Device device, vk::DescriptorPool descriptorPool) const { vk::DescriptorSetAllocateInfo dsai{}; dsai.descriptorPool = descriptorPool; dsai.descriptorSetCount = (uint32_t)s.layouts.size(); dsai.pSetLayouts = s.layouts.data(); return device.allocateDescriptorSets(dsai); } /// Allocate a vector of self-deleting descriptor sets. std::vector<vk::UniqueDescriptorSet> createUnique(vk::Device device, vk::DescriptorPool descriptorPool) const { vk::DescriptorSetAllocateInfo dsai{}; dsai.descriptorPool = descriptorPool; dsai.descriptorSetCount = (uint32_t)s.layouts.size(); dsai.pSetLayouts = s.layouts.data(); return device.allocateDescriptorSetsUnique(dsai); } private: struct State { std::vector<vk::DescriptorSetLayout> layouts; }; State s; }; /// Generic image with a view and memory object. /// Vulkan images need a memory object to hold the data and a view object for the GPU to access the data. class GenericImage { public: GenericImage() {} GenericImage(vk::Device device, const vk::PhysicalDeviceMemoryProperties& memprops, const vk::ImageCreateInfo& info, vk::ImageViewType viewType, vk::ImageAspectFlags aspectMask, bool makeHostImage) { create(device, memprops, info, viewType, aspectMask, makeHostImage); } vk::Image image() const { return *s.image; } vk::ImageView imageView() const { return *s.imageView; } vk::DeviceMemory mem() const { return *s.mem; } /// Clear the colour of an image. void clear(vk::CommandBuffer cb, const std::array<float, 4> colour = { 1, 1, 1, 1 }) { setLayout(cb, vk::ImageLayout::eTransferDstOptimal); vk::ClearColorValue ccv(colour); vk::ImageSubresourceRange range{ vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 }; cb.clearColorImage(*s.image, vk::ImageLayout::eTransferDstOptimal, ccv, range); } /// Update the image with an array of pixels. (Currently 2D only) void update(vk::Device device, const void* data, vk::DeviceSize bytesPerPixel) { const uint8_t* src = (const uint8_t*)data; for (uint32_t mipLevel = 0; mipLevel != info().mipLevels; ++mipLevel) { // Array images are layed out horizontally. eg. [left][front][right] etc. for (uint32_t arrayLayer = 0; arrayLayer != info().arrayLayers; ++arrayLayer) { vk::ImageSubresource subresource{ vk::ImageAspectFlagBits::eColor, mipLevel, arrayLayer }; auto srlayout = device.getImageSubresourceLayout(*s.image, subresource); uint8_t* dest = (uint8_t*)device.mapMemory(*s.mem, 0, s.size, vk::MemoryMapFlags{}) + srlayout.offset; size_t bytesPerLine = s.info.extent.width * bytesPerPixel; size_t srcStride = bytesPerLine * info().arrayLayers; for (int y = 0; y != s.info.extent.height; ++y) { memcpy(dest, src + arrayLayer * bytesPerLine, bytesPerLine); src += srcStride; dest += srlayout.rowPitch; } } } device.unmapMemory(*s.mem); } /// Copy another image to this one. This also changes the layout. void copy(vk::CommandBuffer cb, vku::GenericImage& srcImage) { srcImage.setLayout(cb, vk::ImageLayout::eTransferSrcOptimal); setLayout(cb, vk::ImageLayout::eTransferDstOptimal); for (uint32_t mipLevel = 0; mipLevel != info().mipLevels; ++mipLevel) { vk::ImageCopy region{}; region.srcSubresource = { vk::ImageAspectFlagBits::eColor, mipLevel, 0, 1 }; region.dstSubresource = { vk::ImageAspectFlagBits::eColor, mipLevel, 0, 1 }; region.extent = s.info.extent; cb.copyImage(srcImage.image(), vk::ImageLayout::eTransferSrcOptimal, *s.image, vk::ImageLayout::eTransferDstOptimal, region); } } /// Copy a subimage in a buffer to this image. void copy(vk::CommandBuffer cb, vk::Buffer buffer, uint32_t mipLevel, uint32_t arrayLayer, uint32_t width, uint32_t height, uint32_t depth, uint32_t offset) { setLayout(cb, vk::ImageLayout::eTransferDstOptimal); vk::BufferImageCopy region{}; region.bufferOffset = offset; vk::Extent3D extent; extent.width = width; extent.height = height; extent.depth = depth; region.imageSubresource = { vk::ImageAspectFlagBits::eColor, mipLevel, arrayLayer, 1 }; region.imageExtent = extent; cb.copyBufferToImage(buffer, *s.image, vk::ImageLayout::eTransferDstOptimal, region); } void upload(vk::Device device, std::vector<uint8_t>& bytes, vk::CommandPool commandPool, vk::PhysicalDeviceMemoryProperties memprops, vk::Queue queue) { vku::GenericBuffer stagingBuffer(device, memprops, (vk::BufferUsageFlags)vk::BufferUsageFlagBits::eTransferSrc, (vk::DeviceSize)bytes.size(), vk::MemoryPropertyFlagBits::eHostVisible); stagingBuffer.updateLocal(device, (const void*)bytes.data(), bytes.size()); // Copy the staging buffer to the GPU texture and set the layout. vku::executeImmediately(device, commandPool, queue, [&](vk::CommandBuffer cb) { auto bp = getBlockParams(s.info.format); vk::Buffer buf = stagingBuffer.buffer(); uint32_t offset = 0; for (uint32_t mipLevel = 0; mipLevel != s.info.mipLevels; ++mipLevel) { auto width = mipScale(s.info.extent.width, mipLevel); auto height = mipScale(s.info.extent.height, mipLevel); auto depth = mipScale(s.info.extent.depth, mipLevel); for (uint32_t face = 0; face != s.info.arrayLayers; ++face) { copy(cb, buf, mipLevel, face, width, height, depth, offset); offset += ((bp.bytesPerBlock + 3) & ~3) * (width * height); } } setLayout(cb, vk::ImageLayout::eShaderReadOnlyOptimal); }); } /// Change the layout of this image using a memory barrier. void setLayout(vk::CommandBuffer cb, vk::ImageLayout newLayout, vk::ImageAspectFlags aspectMask = vk::ImageAspectFlagBits::eColor) { if (newLayout == s.currentLayout) return; vk::ImageLayout oldLayout = s.currentLayout; s.currentLayout = newLayout; vk::ImageMemoryBarrier imageMemoryBarriers = {}; imageMemoryBarriers.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; imageMemoryBarriers.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; imageMemoryBarriers.oldLayout = oldLayout; imageMemoryBarriers.newLayout = newLayout; imageMemoryBarriers.image = *s.image; imageMemoryBarriers.subresourceRange = { aspectMask, 0, s.info.mipLevels, 0, s.info.arrayLayers }; // Put barrier on top vk::PipelineStageFlags srcStageMask{ vk::PipelineStageFlagBits::eTopOfPipe }; vk::PipelineStageFlags dstStageMask{ vk::PipelineStageFlagBits::eTopOfPipe }; vk::DependencyFlags dependencyFlags{}; vk::AccessFlags srcMask{}; vk::AccessFlags dstMask{}; typedef vk::ImageLayout il; typedef vk::AccessFlagBits afb; // Is it me, or are these the same? switch (oldLayout) { case il::eUndefined: break; case il::eGeneral: srcMask = afb::eTransferWrite; break; case il::eColorAttachmentOptimal: srcMask = afb::eColorAttachmentWrite; break; case il::eDepthStencilAttachmentOptimal: srcMask = afb::eDepthStencilAttachmentWrite; break; case il::eDepthStencilReadOnlyOptimal: srcMask = afb::eDepthStencilAttachmentRead; break; case il::eShaderReadOnlyOptimal: srcMask = afb::eShaderRead; break; case il::eTransferSrcOptimal: srcMask = afb::eTransferRead; break; case il::eTransferDstOptimal: srcMask = afb::eTransferWrite; break; case il::ePreinitialized: srcMask = afb::eTransferWrite | afb::eHostWrite; break; case il::ePresentSrcKHR: srcMask = afb::eMemoryRead; break; } switch (newLayout) { case il::eUndefined: break; case il::eGeneral: dstMask = afb::eTransferWrite; break; case il::eColorAttachmentOptimal: dstMask = afb::eColorAttachmentWrite; break; case il::eDepthStencilAttachmentOptimal: dstMask = afb::eDepthStencilAttachmentWrite; break; case il::eDepthStencilReadOnlyOptimal: dstMask = afb::eDepthStencilAttachmentRead; break; case il::eShaderReadOnlyOptimal: dstMask = afb::eShaderRead; break; case il::eTransferSrcOptimal: dstMask = afb::eTransferRead; break; case il::eTransferDstOptimal: dstMask = afb::eTransferWrite; break; case il::ePreinitialized: dstMask = afb::eTransferWrite; break; case il::ePresentSrcKHR: dstMask = afb::eMemoryRead; break; } imageMemoryBarriers.srcAccessMask = srcMask; imageMemoryBarriers.dstAccessMask = dstMask; auto memoryBarriers = nullptr; auto bufferMemoryBarriers = nullptr; cb.pipelineBarrier(srcStageMask, dstStageMask, dependencyFlags, memoryBarriers, bufferMemoryBarriers, imageMemoryBarriers); } /// Set what the image thinks is its current layout (ie. the old layout in an image barrier). void setCurrentLayout(vk::ImageLayout oldLayout) { s.currentLayout = oldLayout; } vk::Format format() const { return s.info.format; } vk::Extent3D extent() const { return s.info.extent; } const vk::ImageCreateInfo& info() const { return s.info; } protected: void create(vk::Device device, const vk::PhysicalDeviceMemoryProperties& memprops, const vk::ImageCreateInfo& info, vk::ImageViewType viewType, vk::ImageAspectFlags aspectMask, bool hostImage) { s.currentLayout = info.initialLayout; s.info = info; s.image = device.createImageUnique(info); // Find out how much memory and which heap to allocate from. auto memreq = device.getImageMemoryRequirements(*s.image); vk::MemoryPropertyFlags search{}; if (hostImage) search = vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostVisible; // Create a memory object to bind to the buffer. // Note: we don't expect to be able to map the buffer. vk::MemoryAllocateInfo mai{}; mai.allocationSize = s.size = memreq.size; mai.memoryTypeIndex = vku::findMemoryTypeIndex(memprops, memreq.memoryTypeBits, search); s.mem = device.allocateMemoryUnique(mai); device.bindImageMemory(*s.image, *s.mem, 0); if (!hostImage) { vk::ImageViewCreateInfo viewInfo{}; viewInfo.image = *s.image; viewInfo.viewType = viewType; viewInfo.format = info.format; viewInfo.components = { vk::ComponentSwizzle::eR, vk::ComponentSwizzle::eG, vk::ComponentSwizzle::eB, vk::ComponentSwizzle::eA }; viewInfo.subresourceRange = vk::ImageSubresourceRange{ aspectMask, 0, info.mipLevels, 0, info.arrayLayers }; s.imageView = device.createImageViewUnique(viewInfo); } } struct State { vk::UniqueImage image; vk::UniqueImageView imageView; vk::UniqueDeviceMemory mem; vk::DeviceSize size; vk::ImageLayout currentLayout; vk::ImageCreateInfo info; }; State s; }; /// A 2D texture image living on the GPU or a staging buffer visible to the CPU. class TextureImage2D : public GenericImage { public: TextureImage2D() {} TextureImage2D(vk::Device device, const vk::PhysicalDeviceMemoryProperties& memprops, uint32_t width, uint32_t height, uint32_t mipLevels = 1, vk::Format format = vk::Format::eR8G8B8A8Unorm, bool hostImage = false) { vk::ImageCreateInfo info; info.flags = {}; info.imageType = vk::ImageType::e2D; info.format = format; info.extent = vk::Extent3D{ width, height, 1U }; info.mipLevels = mipLevels; info.arrayLayers = 1; info.samples = vk::SampleCountFlagBits::e1; info.tiling = hostImage ? vk::ImageTiling::eLinear : vk::ImageTiling::eOptimal; info.usage = vk::ImageUsageFlagBits::eSampled | vk::ImageUsageFlagBits::eTransferSrc | vk::ImageUsageFlagBits::eTransferDst; info.sharingMode = vk::SharingMode::eExclusive; info.queueFamilyIndexCount = 0; info.pQueueFamilyIndices = nullptr; info.initialLayout = hostImage ? vk::ImageLayout::ePreinitialized : vk::ImageLayout::eUndefined; create(device, memprops, info, vk::ImageViewType::e2D, vk::ImageAspectFlagBits::eColor, hostImage); } private: }; /// A cube map texture image living on the GPU or a staging buffer visible to the CPU. class TextureImageCube : public GenericImage { public: TextureImageCube() {} TextureImageCube(vk::Device device, const vk::PhysicalDeviceMemoryProperties& memprops, uint32_t width, uint32_t height, uint32_t mipLevels = 1, vk::Format format = vk::Format::eR8G8B8A8Unorm, bool hostImage = false) { vk::ImageCreateInfo info; info.flags = {}; info.imageType = vk::ImageType::e2D; info.format = format; info.extent = vk::Extent3D{ width, height, 1U }; info.mipLevels = mipLevels; info.arrayLayers = 6; info.samples = vk::SampleCountFlagBits::e1; info.tiling = hostImage ? vk::ImageTiling::eLinear : vk::ImageTiling::eOptimal; info.usage = vk::ImageUsageFlagBits::eSampled | vk::ImageUsageFlagBits::eTransferSrc | vk::ImageUsageFlagBits::eTransferDst; info.sharingMode = vk::SharingMode::eExclusive; info.queueFamilyIndexCount = 0; info.pQueueFamilyIndices = nullptr; //info.initialLayout = hostImage ? vk::ImageLayout::ePreinitialized : vk::ImageLayout::eUndefined; info.initialLayout = vk::ImageLayout::ePreinitialized; create(device, memprops, info, vk::ImageViewType::eCube, vk::ImageAspectFlagBits::eColor, hostImage); } private: }; /// An image to use as a depth buffer on a renderpass. class DepthStencilImage : public GenericImage { public: DepthStencilImage() {} DepthStencilImage(vk::Device device, const vk::PhysicalDeviceMemoryProperties& memprops, uint32_t width, uint32_t height, vk::Format format = vk::Format::eD24UnormS8Uint) { vk::ImageCreateInfo info; info.flags = {}; info.imageType = vk::ImageType::e2D; info.format = format; info.extent = vk::Extent3D{ width, height, 1U }; info.mipLevels = 1; info.arrayLayers = 1; info.samples = vk::SampleCountFlagBits::e1; info.tiling = vk::ImageTiling::eOptimal; info.usage = vk::ImageUsageFlagBits::eDepthStencilAttachment | vk::ImageUsageFlagBits::eTransferSrc | vk::ImageUsageFlagBits::eSampled; info.sharingMode = vk::SharingMode::eExclusive; info.queueFamilyIndexCount = 0; info.pQueueFamilyIndices = nullptr; info.initialLayout = vk::ImageLayout::eUndefined; typedef vk::ImageAspectFlagBits iafb; create(device, memprops, info, vk::ImageViewType::e2D, iafb::eDepth, false); } private: }; /// An image to use as a colour buffer on a renderpass. class ColorAttachmentImage : public GenericImage { public: ColorAttachmentImage() {} ColorAttachmentImage(vk::Device device, const vk::PhysicalDeviceMemoryProperties& memprops, uint32_t width, uint32_t height, vk::Format format = vk::Format::eR8G8B8A8Unorm) { vk::ImageCreateInfo info; info.flags = {}; info.imageType = vk::ImageType::e2D; info.format = format; info.extent = vk::Extent3D{ width, height, 1U }; info.mipLevels = 1; info.arrayLayers = 1; info.samples = vk::SampleCountFlagBits::e1; info.tiling = vk::ImageTiling::eOptimal; info.usage = vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferSrc | vk::ImageUsageFlagBits::eSampled; info.sharingMode = vk::SharingMode::eExclusive; info.queueFamilyIndexCount = 0; info.pQueueFamilyIndices = nullptr; info.initialLayout = vk::ImageLayout::eUndefined; typedef vk::ImageAspectFlagBits iafb; create(device, memprops, info, vk::ImageViewType::e2D, iafb::eColor, false); } private: }; /// A class to help build samplers. /// Samplers tell the shader stages how to sample an image. /// They are used in combination with an image to make a combined image sampler /// used by texture() calls in shaders. /// They can also be passed to shaders directly for use on multiple image sources. class SamplerMaker { public: /// Default to a very basic sampler. SamplerMaker() { s.info.magFilter = vk::Filter::eNearest; s.info.minFilter = vk::Filter::eNearest; s.info.mipmapMode = vk::SamplerMipmapMode::eNearest; s.info.addressModeU = vk::SamplerAddressMode::eRepeat; s.info.addressModeV = vk::SamplerAddressMode::eRepeat; s.info.addressModeW = vk::SamplerAddressMode::eRepeat; s.info.mipLodBias = 0.0f; s.info.anisotropyEnable = 0; s.info.maxAnisotropy = 0.0f; s.info.compareEnable = 0; s.info.compareOp = vk::CompareOp::eNever; s.info.minLod = 0; s.info.maxLod = 0; s.info.borderColor = vk::BorderColor{}; s.info.unnormalizedCoordinates = 0; } //////////////////// // // Setters // SamplerMaker& flags(vk::SamplerCreateFlags value) { s.info.flags = value; return *this; } /// Set the magnify filter value. (for close textures) /// Options are: vk::Filter::eLinear and vk::Filter::eNearest SamplerMaker& magFilter(vk::Filter value) { s.info.magFilter = value; return *this; } /// Set the minnify filter value. (for far away textures) /// Options are: vk::Filter::eLinear and vk::Filter::eNearest SamplerMaker& minFilter(vk::Filter value) { s.info.minFilter = value; return *this; } /// Set the minnify filter value. (for far away textures) /// Options are: vk::SamplerMipmapMode::eLinear and vk::SamplerMipmapMode::eNearest SamplerMaker& mipmapMode(vk::SamplerMipmapMode value) { s.info.mipmapMode = value; return *this; } SamplerMaker& addressModeU(vk::SamplerAddressMode value) { s.info.addressModeU = value; return *this; } SamplerMaker& addressModeV(vk::SamplerAddressMode value) { s.info.addressModeV = value; return *this; } SamplerMaker& addressModeW(vk::SamplerAddressMode value) { s.info.addressModeW = value; return *this; } SamplerMaker& mipLodBias(float value) { s.info.mipLodBias = value; return *this; } SamplerMaker& anisotropyEnable(vk::Bool32 value) { s.info.anisotropyEnable = value; return *this; } SamplerMaker& maxAnisotropy(float value) { s.info.maxAnisotropy = value; return *this; } SamplerMaker& compareEnable(vk::Bool32 value) { s.info.compareEnable = value; return *this; } SamplerMaker& compareOp(vk::CompareOp value) { s.info.compareOp = value; return *this; } SamplerMaker& minLod(float value) { s.info.minLod = value; return *this; } SamplerMaker& maxLod(float value) { s.info.maxLod = value; return *this; } SamplerMaker& borderColor(vk::BorderColor value) { s.info.borderColor = value; return *this; } SamplerMaker& unnormalizedCoordinates(vk::Bool32 value) { s.info.unnormalizedCoordinates = value; return *this; } /// Allocate a self-deleting image. vk::UniqueSampler createUnique(vk::Device device) const { return device.createSamplerUnique(s.info); } /// Allocate a non self-deleting Sampler. vk::Sampler create(vk::Device device) const { return device.createSampler(s.info); } private: struct State { vk::SamplerCreateInfo info; }; State s; }; /// KTX files use OpenGL format values. This converts some common ones to Vulkan equivalents. inline vk::Format GLtoVKFormat(uint32_t glFormat) { switch (glFormat) { case 0x1907: return vk::Format::eR8G8B8Unorm; // GL_RGB case 0x1908: return vk::Format::eR8G8B8A8Unorm; // GL_RGBA case 0x83F0: return vk::Format::eBc1RgbUnormBlock; // GL_COMPRESSED_RGB_S3TC_DXT1_EXT case 0x83F1: return vk::Format::eBc1RgbaUnormBlock; // GL_COMPRESSED_RGBA_S3TC_DXT1_EXT case 0x83F2: return vk::Format::eBc3UnormBlock; // GL_COMPRESSED_RGBA_S3TC_DXT3_EXT case 0x83F3: return vk::Format::eBc5UnormBlock; // GL_COMPRESSED_RGBA_S3TC_DXT5_EXT } return vk::Format::eUndefined; } /// Layout of a KTX file in a buffer. class KTXFileLayout { public: KTXFileLayout() {} KTXFileLayout(uint8_t* begin, uint8_t* end) { uint8_t* p = begin; if (p + sizeof(Header) > end) return; header = *(Header*)p; static const uint8_t magic[] = { 0xAB, 0x4B, 0x54, 0x58, 0x20, 0x31, 0x31, 0xBB, 0x0D, 0x0A, 0x1A, 0x0A }; if (memcmp(magic, header.identifier, sizeof(magic))) { return; } if (header.endianness != 0x04030201) { swap(header.glType); swap(header.glTypeSize); swap(header.glFormat); swap(header.glInternalFormat); swap(header.glBaseInternalFormat); swap(header.pixelWidth); swap(header.pixelHeight); swap(header.pixelDepth); swap(header.numberOfArrayElements); swap(header.numberOfFaces); swap(header.numberOfMipmapLevels); swap(header.bytesOfKeyValueData); } header.numberOfArrayElements = std::max(1U, header.numberOfArrayElements); header.numberOfFaces = std::max(1U, header.numberOfFaces); header.numberOfMipmapLevels = std::max(1U, header.numberOfMipmapLevels); header.pixelDepth = std::max(1U, header.pixelDepth); format_ = GLtoVKFormat(header.glFormat); if (format_ == vk::Format::eUndefined) return; p += sizeof(Header); if (p + header.bytesOfKeyValueData > end) return; for (uint32_t i = 0; i < header.bytesOfKeyValueData;) { uint32_t keyAndValueByteSize = *(uint32_t*)(p + i); if (header.endianness != 0x04030201) swap(keyAndValueByteSize); std::string kv(p + i + 4, p + i + 4 + keyAndValueByteSize); i += keyAndValueByteSize + 4; i = (i + 3) & ~3; } p += header.bytesOfKeyValueData; for (uint32_t mipLevel = 0; mipLevel != header.numberOfMipmapLevels; ++mipLevel) { uint32_t imageSize = *(uint32_t*)(p); imageSize = (imageSize + 3) & ~3; uint32_t incr = imageSize * header.numberOfFaces * header.numberOfArrayElements; incr = (incr + 3) & ~3; if (p + incr > end) { // see https://www.khronos.org/registry/OpenGL-Refpages/gl4/html/glPixelStore.xhtml // fix bugs... https://github.com/dariomanesku/cmft/issues/29 header.numberOfMipmapLevels = mipLevel; break; } if (header.endianness != 0x04030201) swap(imageSize); //printf("%08x: is=%08x / %08x\n", p-begin, imageSize, end - begin); p += 4; imageOffsets_.push_back((uint32_t)(p - begin)); imageSizes_.push_back(imageSize); p += incr; } ok_ = true; } uint32_t offset(uint32_t mipLevel, uint32_t arrayLayer, uint32_t face) { return imageOffsets_[mipLevel] + (arrayLayer * header.numberOfFaces + face) * imageSizes_[mipLevel]; } uint32_t size(uint32_t mipLevel) { return imageSizes_[mipLevel]; } bool ok() const { return ok_; } vk::Format format() const { return format_; } uint32_t mipLevels() const { return header.numberOfMipmapLevels; } uint32_t arrayLayers() const { return header.numberOfArrayElements; } uint32_t faces() const { return header.numberOfFaces; } uint32_t width(uint32_t mipLevel) const { return mipScale(header.pixelWidth, mipLevel); } uint32_t height(uint32_t mipLevel) const { return mipScale(header.pixelHeight, mipLevel); } uint32_t depth(uint32_t mipLevel) const { return mipScale(header.pixelDepth, mipLevel); } void upload(vk::Device device, vku::GenericImage& image, std::vector<uint8_t>& bytes, vk::CommandPool commandPool, vk::PhysicalDeviceMemoryProperties memprops, vk::Queue queue) { vku::GenericBuffer stagingBuffer(device, memprops, (vk::BufferUsageFlags)vk::BufferUsageFlagBits::eTransferSrc, (vk::DeviceSize)bytes.size(), vk::MemoryPropertyFlagBits::eHostVisible); stagingBuffer.updateLocal(device, (const void*)bytes.data(), bytes.size()); // Copy the staging buffer to the GPU texture and set the layout. vku::executeImmediately(device, commandPool, queue, [&](vk::CommandBuffer cb) { vk::Buffer buf = stagingBuffer.buffer(); for (uint32_t mipLevel = 0; mipLevel != mipLevels(); ++mipLevel) { auto width = this->width(mipLevel); auto height = this->height(mipLevel); auto depth = this->depth(mipLevel); for (uint32_t face = 0; face != faces(); ++face) { image.copy(cb, buf, mipLevel, face, width, height, depth, offset(mipLevel, 0, face)); } } image.setLayout(cb, vk::ImageLayout::eShaderReadOnlyOptimal); }); } private: static void swap(uint32_t& value) { value = value >> 24 | (value & 0xff0000) >> 8 | (value & 0xff00) << 8 | value << 24; } struct Header { uint8_t identifier[12]; uint32_t endianness; uint32_t glType; uint32_t glTypeSize; uint32_t glFormat; uint32_t glInternalFormat; uint32_t glBaseInternalFormat; uint32_t pixelWidth; uint32_t pixelHeight; uint32_t pixelDepth; uint32_t numberOfArrayElements; uint32_t numberOfFaces; uint32_t numberOfMipmapLevels; uint32_t bytesOfKeyValueData; }; Header header; vk::Format format_; bool ok_ = false; std::vector<uint32_t> imageOffsets_; std::vector<uint32_t> imageSizes_; }; } // namespace vku #endif // VKU_HPP
#include "AOC_Precompiled.h" static uint locPart1(char const* aFile) { uint result = 0; // By line with parse function auto items = GC_File::Parse<int>(aFile, [](auto aLine, auto& anItem) { anItem = GC_Atoi(aLine); return true; }); for (auto item : items) { (void)item; } // By line parsing for (auto line : GC_File::ReadAllLines(aFile)) { } // By Block parsing (block of lines separate by two new lines) GC_String text; GC_File::ReadAllText(aFile, text); for (GC_StrSlice chunk; GC_Strtok(text, "\n\n", chunk);) { } return result; } DEFINE_TEST_G(Part1, Day15) { TEST_EQ(locPart1("AOC_Day15Test.txt"), 0); TEST_EQ(locPart1("AOC_Day15Part1.txt"), 0); } DEFINE_TEST_G(Part2, Day15) { TEST_EQ(locPart1("AOC_Day15Part2.txt"), 0); }
/* * Copyright (c) 2016, Facebook, Inc. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. An additional grant * of patent rights can be found in the PATENTS file in the same directory. * */ #include <folly/Conv.h> #include <glog/logging.h> #include <gtest/gtest.h> #include <memory> #include <proxygen/lib/http/codec/compress/HPACKContext.h> #include <proxygen/lib/http/codec/compress/experimental/hpack9/HPACKDecoder.h> #include <proxygen/lib/http/codec/compress/experimental/hpack9/HPACKEncoder.h> #include <proxygen/lib/http/codec/compress/Logging.h> using namespace folly; using namespace proxygen; using namespace std; using namespace testing; class HPACKContextTests : public testing::TestWithParam<bool> { }; class TestContext : public HPACKContext { public: TestContext(HPACK::MessageType msgType, uint32_t tableSize) : HPACKContext(msgType, tableSize) {} void add(const HPACKHeader& header) { table_.add(header); } const HeaderTable& getStaticTable() const override { return HPACK09::getStaticTable(); } }; TEST_F(HPACKContextTests, get_index) { TestContext context(HPACK::MessageType::REQ, HPACK::kTableSize); HPACKHeader method(":method", "POST"); // this will get it from the static table CHECK_EQ(context.getIndex(method), 3); } TEST_F(HPACKContextTests, is_static) { TestContext context(HPACK::MessageType::REQ, HPACK::kTableSize); // add 10 headers to the table for (int i = 1; i <= 10; i++) { HPACKHeader header("name" + folly::to<string>(i), "value" + folly::to<string>(i)); context.add(header); } EXPECT_EQ(context.getTable().size(), 10); EXPECT_EQ(context.isStatic(1), false); EXPECT_EQ(context.isStatic(10), false); EXPECT_EQ(context.isStatic(40), true); EXPECT_EQ(context.isStatic(60), true); EXPECT_EQ(context.isStatic(69), true); } TEST_F(HPACKContextTests, static_table) { auto& table = StaticHeaderTable::get(); const HPACKHeader& first = table[1]; const HPACKHeader& methodPost = table[3]; const HPACKHeader& last = table[table.size()]; // there are 60 entries in the spec CHECK_EQ(table.size(), 60); CHECK_EQ(table[3], HPACKHeader(":method", "POST")); CHECK_EQ(table[1].name, ":authority"); CHECK_EQ(table[table.size()].name, "www-authenticate"); } TEST_F(HPACKContextTests, static_index) { TestContext context(HPACK::MessageType::REQ, HPACK::kTableSize); HPACKHeader authority(":authority", ""); EXPECT_EQ(context.getHeader(1), authority); HPACKHeader post(":method", "POST"); EXPECT_EQ(context.getHeader(3), post); HPACKHeader contentLength("content-length", ""); EXPECT_EQ(context.getHeader(28), contentLength); } TEST_F(HPACKContextTests, encoder_multiple_values) { HPACKEncoder09 encoder(true); vector<HPACKHeader> req; req.push_back(HPACKHeader("accept-encoding", "gzip")); req.push_back(HPACKHeader("accept-encoding", "sdch,gzip")); // with the first encode both headers should be in the reference set unique_ptr<IOBuf> encoded = encoder.encode(req); EXPECT_TRUE(encoded->length() > 0); EXPECT_EQ(encoder.getTable().size(), 2); // sending the same request again should lead to a smaller but non // empty buffer unique_ptr<IOBuf> encoded2 = encoder.encode(req); EXPECT_LT(encoded2->computeChainDataLength(), encoded->computeChainDataLength()); EXPECT_GT(encoded2->computeChainDataLength(), 0); } TEST_F(HPACKContextTests, decoder_large_header) { // with this size basically the table will not be able to store any entry uint32_t size = 32; HPACKHeader header; HPACKEncoder09 encoder(true, size); HPACKDecoder09 decoder(size); vector<HPACKHeader> headers; headers.push_back(HPACKHeader(":path", "verylargeheader")); // add a static entry headers.push_back(HPACKHeader(":method", "GET")); auto buf = encoder.encode(headers); auto decoded = decoder.decode(buf.get()); EXPECT_EQ(encoder.getTable().size(), 0); EXPECT_EQ(encoder.getTable().referenceSet().size(), 0); EXPECT_EQ(decoder.getTable().size(), 0); EXPECT_EQ(decoder.getTable().referenceSet().size(), 0); } /** * testing invalid memory access in the decoder; it has to always call peek() */ TEST_F(HPACKContextTests, decoder_invalid_peek) { HPACKEncoder09 encoder(true); HPACKDecoder09 decoder; vector<HPACKHeader> headers; headers.push_back(HPACKHeader("x-fb-debug", "test")); unique_ptr<IOBuf> encoded = encoder.encode(headers); unique_ptr<IOBuf> first = IOBuf::create(128); // set a trap for indexed header and don't call append first->writableData()[0] = HPACK::HeaderEncoding::INDEXED; first->appendChain(std::move(encoded)); auto decoded = decoder.decode(first.get()); EXPECT_FALSE(decoder.hasError()); EXPECT_EQ(*decoded, headers); } /** * similar with the one above, but slightly different code paths */ TEST_F(HPACKContextTests, decoder_invalid_literal_peek) { HPACKEncoder09 encoder(true); HPACKDecoder09 decoder; vector<HPACKHeader> headers; headers.push_back(HPACKHeader("x-fb-random", "bla")); unique_ptr<IOBuf> encoded = encoder.encode(headers); unique_ptr<IOBuf> first = IOBuf::create(128); first->writableData()[0] = 0x3F; first->appendChain(std::move(encoded)); auto decoded = decoder.decode(first.get()); EXPECT_FALSE(decoder.hasError()); EXPECT_EQ(*decoded, headers); } /** * testing various error cases in HPACKDecoder::decodeLiterHeader() */ void checkError(const IOBuf* buf, const HPACK::DecodeError err) { HPACKDecoder09 decoder; auto decoded = decoder.decode(buf); EXPECT_TRUE(decoder.hasError()); EXPECT_EQ(decoder.getError(), err); } TEST_F(HPACKContextTests, decode_errors) { unique_ptr<IOBuf> buf = IOBuf::create(128); // 1. simulate an error decoding the index for an indexed header name // we try to encode index 65 buf->writableData()[0] = 0x3F; buf->append(1); // intentionally omit the second byte checkError(buf.get(), HPACK::DecodeError::BUFFER_UNDERFLOW); // 2. invalid index for indexed header name buf->writableData()[0] = 0x7F; buf->writableData()[1] = 0xFF; buf->writableData()[2] = 0x7F; buf->append(2); checkError(buf.get(), HPACK::DecodeError::INVALID_INDEX); // 3. buffer overflow when decoding literal header name buf->writableData()[0] = 0x00; // this will activate the non-indexed branch checkError(buf.get(), HPACK::DecodeError::BUFFER_UNDERFLOW); // 4. buffer overflow when decoding a header value // size for header name size and the actual header name buf->writableData()[1] = 0x01; buf->writableData()[2] = 'h'; checkError(buf.get(), HPACK::DecodeError::BUFFER_UNDERFLOW); // 5. buffer overflow decoding the index of an indexed header buf->writableData()[0] = 0xFF; // first bit is 1 to mark indexed header buf->writableData()[1] = 0x80; // first bit is 1 to continue the // variable-length encoding buf->writableData()[2] = 0x80; checkError(buf.get(), HPACK::DecodeError::BUFFER_UNDERFLOW); // 6. Increase the table size buf->writableData()[0] = 0x3F; buf->writableData()[1] = 0xFF; buf->writableData()[2] = 0x7F; checkError(buf.get(), HPACK::DecodeError::INVALID_TABLE_SIZE); // 7. integer overflow decoding the index of an indexed header buf->writableData()[0] = 0xFF; // first bit is 1 to mark indexed header buf->writableData()[1] = 0xFF; buf->writableData()[2] = 0xFF; buf->writableData()[3] = 0xFF; buf->writableData()[4] = 0xFF; buf->writableData()[5] = 0x7F; buf->append(3); checkError(buf.get(), HPACK::DecodeError::INTEGER_OVERFLOW); } TEST_P(HPACKContextTests, contextUpdate) { HPACKEncoder09 encoder(true); HPACKDecoder09 decoder; vector<HPACKHeader> headers; bool setDecoderSize = GetParam(); encoder.setHeaderTableSize(8192); if (setDecoderSize) { decoder.setHeaderTableMaxSize(8192); } headers.push_back(HPACKHeader("x-fb-random", "bla")); unique_ptr<IOBuf> encoded = encoder.encode(headers); unique_ptr<IOBuf> first = IOBuf::create(128); first->appendChain(std::move(encoded)); auto decoded = decoder.decode(first.get()); EXPECT_EQ(decoder.hasError(), !setDecoderSize); if (setDecoderSize) { EXPECT_EQ(*decoded, headers); } else { EXPECT_EQ(decoder.getError(), HPACK::DecodeError::INVALID_TABLE_SIZE); } } INSTANTIATE_TEST_CASE_P(Context, HPACKContextTests, ::testing::Values(true, false));
#include "RecoEgamma/EgammaPhotonAlgos/interface/PhotonEnergyCorrector.h" #include "RecoEcal/EgammaCoreTools/interface/EcalClusterFunctionFactory.h" #include "Geometry/Records/interface/CaloGeometryRecord.h" #include "Geometry/CaloGeometry/interface/CaloSubdetectorGeometry.h" #include "Geometry/CaloTopology/interface/CaloTopology.h" #include "Geometry/CaloEventSetup/interface/CaloTopologyRecord.h" #include "Geometry/CaloTopology/interface/CaloTopology.h" #include "DataFormats/EcalDetId/interface/EEDetId.h" #include "DataFormats/EcalDetId/interface/EBDetId.h" #include "RecoEgamma/EgammaPhotonAlgos/interface/EnergyUncertaintyPhotonSpecific.h" PhotonEnergyCorrector::PhotonEnergyCorrector(const edm::ParameterSet& config, edm::ConsumesCollector&& iC) : ecalClusterToolsESGetTokens_{std::move(iC)} { minR9Barrel_ = config.getParameter<double>("minR9Barrel"); minR9Endcap_ = config.getParameter<double>("minR9Endcap"); // get the geometry from the event setup: barrelEcalHits_ = config.getParameter<edm::InputTag>("barrelEcalHits"); endcapEcalHits_ = config.getParameter<edm::InputTag>("endcapEcalHits"); barrelEcalHitsToken_ = iC.consumes<EcalRecHitCollection>(config.getParameter<edm::InputTag>("barrelEcalHits")); endcapEcalHitsToken_ = iC.consumes<EcalRecHitCollection>(config.getParameter<edm::InputTag>("endcapEcalHits")); // candidateP4type_ = config.getParameter<std::string>("candidateP4type") ; // function to extract f(eta) correction std::string superClusterFunctionName = config.getParameter<std::string>("superClusterEnergyCorrFunction"); scEnergyFunction_ = EcalClusterFunctionFactory::get()->create(superClusterFunctionName, config); // function to extract corrections to cracks std::string superClusterCrackFunctionName = config.getParameter<std::string>("superClusterCrackEnergyCorrFunction"); scCrackEnergyFunction_ = EcalClusterFunctionFactory::get()->create(superClusterCrackFunctionName, config); // function to extract the error on the sc ecal correction std::string superClusterErrorFunctionName = config.getParameter<std::string>("superClusterEnergyErrorFunction"); scEnergyErrorFunction_ = EcalClusterFunctionFactory::get()->create(superClusterErrorFunctionName, config); // function to extract the error on the photon ecal correction std::string photonEnergyFunctionName = config.getParameter<std::string>("photonEcalEnergyCorrFunction"); photonEcalEnergyCorrFunction_ = EcalClusterFunctionFactory::get()->create(photonEnergyFunctionName, config); //ingredient for photon uncertainty photonUncertaintyCalculator_ = std::make_unique<EnergyUncertaintyPhotonSpecific>(config); if (config.existsAs<edm::ParameterSet>("regressionConfig")) { const edm::ParameterSet& regr_conf = config.getParameterSet("regressionConfig"); const std::string& mname = regr_conf.getParameter<std::string>("modifierName"); gedRegression_ = ModifyObjectValueFactory::get()->create(mname, regr_conf, iC); } // ingredient for energy regression weightsfromDB_ = config.getParameter<bool>("regressionWeightsFromDB"); w_file_ = config.getParameter<std::string>("energyRegressionWeightsFileLocation"); if (weightsfromDB_) w_db_ = config.getParameter<std::string>("energyRegressionWeightsDBLocation"); else w_db_ = "none"; regressionCorrector_ = std::make_unique<EGEnergyCorrector>(); } void PhotonEnergyCorrector::init(const edm::EventSetup& theEventSetup) { theEventSetup.get<CaloGeometryRecord>().get(theCaloGeom_); scEnergyFunction_->init(theEventSetup); scCrackEnergyFunction_->init(theEventSetup); scEnergyErrorFunction_->init(theEventSetup); photonEcalEnergyCorrFunction_->init(theEventSetup); if (weightsfromDB_) { if (!regressionCorrector_->IsInitialized()) regressionCorrector_->Initialize(theEventSetup, w_db_, weightsfromDB_); } if (!weightsfromDB_ && !(w_file_ == "none")) { if (!regressionCorrector_->IsInitialized()) regressionCorrector_->Initialize(theEventSetup, w_file_, weightsfromDB_); } photonUncertaintyCalculator_->init(theEventSetup); } void PhotonEnergyCorrector::calculate(edm::Event& evt, reco::Photon& thePhoton, int subdet, const reco::VertexCollection& vtxcol, const edm::EventSetup& iSetup) { double phoEcalEnergy = -9999.; double phoEcalEnergyError = -9999.; double phoRegr1Energy = -9999.; double phoRegr1EnergyError = -9999.; theCaloGeom_->getSubdetectorGeometry(DetId::Ecal, subdet); double minR9 = 0; if (subdet == EcalBarrel) { minR9 = minR9Barrel_; } else if (subdet == EcalEndcap) { minR9 = minR9Endcap_; } EcalClusterLazyTools lazyTools( evt, ecalClusterToolsESGetTokens_.get(iSetup), barrelEcalHitsToken_, endcapEcalHitsToken_); ////////////// Here default Ecal corrections based on electrons //////////////////////// if (thePhoton.r9() > minR9) { // f(eta) correction to e5x5 double deltaE = scEnergyFunction_->getValue(*(thePhoton.superCluster()), 1); float e5x5 = thePhoton.e5x5(); if (subdet == EcalBarrel) e5x5 = e5x5 * (1.0 + deltaE / thePhoton.superCluster()->rawEnergy()); phoEcalEnergy = e5x5 + thePhoton.superCluster()->preshowerEnergy(); } else { phoEcalEnergy = thePhoton.superCluster()->energy(); } // store the value in the Photon.h thePhoton.setCorrectedEnergy(reco::Photon::ecal_standard, phoEcalEnergy, phoEcalEnergyError, false); ////////////// Here Ecal corrections specific for photons //////////////////////// if (thePhoton.r9() > minR9) { // f(eta) correction to e5x5 double deltaE = scEnergyFunction_->getValue(*(thePhoton.superCluster()), 1); float e5x5 = thePhoton.e5x5(); if (subdet == EcalBarrel) e5x5 = e5x5 * (1.0 + deltaE / thePhoton.superCluster()->rawEnergy()); phoEcalEnergy = e5x5 + thePhoton.superCluster()->preshowerEnergy(); // add correction for cracks phoEcalEnergy *= scCrackEnergyFunction_->getValue(*(thePhoton.superCluster())); phoEcalEnergyError = photonUncertaintyCalculator_->computePhotonEnergyUncertainty_highR9( thePhoton.superCluster()->eta(), thePhoton.superCluster()->phiWidth() / thePhoton.superCluster()->etaWidth(), phoEcalEnergy); } else { // correction for low r9 phoEcalEnergy = photonEcalEnergyCorrFunction_->getValue(*(thePhoton.superCluster()), 1); phoEcalEnergy *= applyCrackCorrection(*(thePhoton.superCluster()), scCrackEnergyFunction_.get()); phoEcalEnergyError = photonUncertaintyCalculator_->computePhotonEnergyUncertainty_lowR9( thePhoton.superCluster()->eta(), thePhoton.superCluster()->phiWidth() / thePhoton.superCluster()->etaWidth(), phoEcalEnergy); } // store the value in the Photon.h thePhoton.setCorrectedEnergy(reco::Photon::ecal_photons, phoEcalEnergy, phoEcalEnergyError, false); ////////// Energy Regression ////////////////////// // if ((weightsfromDB_ && !gedRegression_) || (!weightsfromDB_ && !(w_file_ == "none"))) { std::pair<double, double> cor = regressionCorrector_->CorrectedEnergyWithError(thePhoton, vtxcol, lazyTools, iSetup); phoRegr1Energy = cor.first; phoRegr1EnergyError = cor.second; // store the value in the Photon.h thePhoton.setCorrectedEnergy(reco::Photon::regression1, phoRegr1Energy, phoRegr1EnergyError, false); } if (gedRegression_) { gedRegression_->modifyObject(thePhoton); // uses regression2 slot // force regresions1 and 2 to be the same (no reason to be different) thePhoton.setCorrectedEnergy(reco::Photon::regression1, thePhoton.getCorrectedEnergy(reco::Photon::regression2), thePhoton.getCorrectedEnergyError(reco::Photon::regression2), false); } /* std::cout << " ------------------------- " << std::endl; std::cout << " Corrector " << std::endl; std::cout << " P4 Type " << thePhoton.getCandidateP4type() << " candidate p4 " << thePhoton.p4() << std::endl; std::cout << " photon ecalEnergy " << thePhoton.getCorrectedEnergy(reco::Photon::ecal_photons) << " error " << thePhoton.getCorrectedEnergyError(reco::Photon::ecal_photons) << std::endl; std::cout << " ecal p4 from accessor " << thePhoton.p4(reco::Photon::ecal_photons) << std::endl; std::cout << " ------------------------- " << std::endl; std::cout << " reg1 energy " << thePhoton.getCorrectedEnergy(reco::Photon::regression1) << " error " << thePhoton.getCorrectedEnergyError(reco::Photon::regression1) << std::endl; std::cout << " New p4 from regression " << thePhoton.p4(reco::Photon::regression1) << std::endl; std::cout << " ------------------------- " << std::endl; */ } double PhotonEnergyCorrector::applyCrackCorrection(const reco::SuperCluster& cl, EcalClusterFunctionBaseClass* crackCorrectionFunction) { double crackcor = 1.; for (reco::CaloCluster_iterator cIt = cl.clustersBegin(); cIt != cl.clustersEnd(); ++cIt) { const reco::CaloClusterPtr cc = *cIt; crackcor *= ((cl.rawEnergy() + cc->energy() * (crackCorrectionFunction->getValue(*cc) - 1.)) / cl.rawEnergy()); } // loop on BCs return crackcor; }
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/core/profiler/utils/xplane_schema.h" #include "absl/container/flat_hash_map.h" #include "absl/strings/string_view.h" #include "absl/types/optional.h" #include "tensorflow/core/lib/gtl/map_util.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/profiler/utils/tf_op_utils.h" namespace tensorflow { namespace profiler { const absl::string_view kHostThreadsPlaneName = "/host:CPU"; const absl::string_view kGpuPlanePrefix = "/device:GPU:"; const absl::string_view kTpuPlanePrefix = "/device:TPU:"; // TODO(b/195582092): change it to /device:custom once all literals are // migrated. const absl::string_view kCustomPlanePrefix = "/device:CUSTOM:"; const absl::string_view kTpuRuntimePlaneName = "/host:TPU-runtime"; const absl::string_view kCuptiDriverApiPlaneName = "/host:CUPTI"; const absl::string_view kRoctracerApiPlaneName = "/host:ROCTRACER"; const absl::string_view kMetadataPlaneName = "/host:metadata"; const absl::string_view kTFStreamzPlaneName = "/host:tfstreamz"; const absl::string_view kPythonTracerPlaneName = "/host:python-tracer"; const absl::string_view kStepLineName = "Steps"; const absl::string_view kTensorFlowNameScopeLineName = "TensorFlow Name Scope"; const absl::string_view kTensorFlowOpLineName = "TensorFlow Ops"; const absl::string_view kXlaModuleLineName = "XLA Modules"; const absl::string_view kXlaOpLineName = "XLA Ops"; const absl::string_view kKernelLaunchLineName = "Launch Stats"; const absl::string_view kSourceLineName = "Source code"; const absl::string_view kDeviceVendorNvidia = "Nvidia"; const absl::string_view kDeviceVendorAMD = "AMD"; namespace { constexpr int kNumHostEventTypes = HostEventType::kLastHostEventType - HostEventType::kFirstHostEventType + 1; constexpr int kNumStatTypes = StatType::kLastStatType - StatType::kFirstStatType + 1; using HostEventTypeMap = absl::flat_hash_map<absl::string_view, HostEventType>; using HostEventTypeStrMap = absl::flat_hash_map<HostEventType, absl::string_view>; using StatTypeMap = absl::flat_hash_map<absl::string_view, StatType>; using StatTypeStrMap = absl::flat_hash_map<StatType, absl::string_view>; const HostEventTypeMap& GetHostEventTypeMap() { static auto* host_event_type_map = new HostEventTypeMap({ {"UnknownHostEventType", kUnknownHostEventType}, {"TraceContext", kTraceContext}, {"SessionRun", kSessionRun}, {"FunctionRun", kFunctionRun}, {"RunGraph", kRunGraph}, {"RunGraphDone", kRunGraphDone}, {"TfOpRun", kTfOpRun}, {"EagerExecute", kEagerKernelExecute}, {"ExecutorState::Process", kExecutorStateProcess}, {"ExecutorDoneCallback", kExecutorDoneCallback}, {"MemoryAllocation", kMemoryAllocation}, {"MemoryDeallocation", kMemoryDeallocation}, // Performance counter related. {"RemotePerfCounter", kRemotePerf}, // tf data captured function events. {"InstantiatedCapturedFunction::Run", kTfDataCapturedFunctionRun}, {"InstantiatedCapturedFunction::RunWithBorrowedArgs", kTfDataCapturedFunctionRunWithBorrowedArgs}, {"InstantiatedCapturedFunction::RunInstantiated", kTfDataCapturedFunctionRunInstantiated}, {"InstantiatedCapturedFunction::RunAsync", kTfDataCapturedFunctionRunAsync}, // Loop ops. {"ParallelForOp", kParallelForOp}, {"ForeverOp", kForeverOp}, {"WhileOp-EvalCond", kWhileOpEvalCond}, {"WhileOp-StartBody", kWhileOpStartBody}, {"ForOp", kForOp}, // tf.data related. {"IteratorGetNextOp::DoCompute", kIteratorGetNextOp}, {"IteratorGetNextAsOptionalOp::DoCompute", kIteratorGetNextAsOptionalOp}, {"Iterator", kIterator}, {"Iterator::Prefetch::Generator", kDeviceInputPipelineSecondIterator}, {"PrefetchProduce", kPrefetchProduce}, {"PrefetchConsume", kPrefetchConsume}, {"ParallelInterleaveProduce", kParallelInterleaveProduce}, {"ParallelInterleaveConsume", kParallelInterleaveConsume}, {"ParallelInterleaveInitializeInput", kParallelInterleaveInitializedInput}, {"ParallelMapProduce", kParallelMapProduce}, {"ParallelMapConsume", kParallelMapConsume}, {"MapAndBatchProduce", kMapAndBatchProduce}, {"MapAndBatchConsume", kMapAndBatchConsume}, {"ParseExampleProduce", kParseExampleProduce}, {"ParseExampleConsume", kParseExampleConsume}, {"ParallelBatchProduce", kParallelBatchProduce}, {"ParallelBatchConsume", kParallelBatchConsume}, // Batching related. {"BatchingSessionRun", kBatchingSessionRun}, {"ProcessBatch", kProcessBatch}, {"ConcatInputTensors", kConcatInputTensors}, {"MergeInputTensors", kMergeInputTensors}, {"ScheduleWithoutSplit", kScheduleWithoutSplit}, {"ScheduleWithSplit", kScheduleWithSplit}, {"ScheduleWithEagerSplit", kScheduleWithEagerSplit}, {"ASBSQueue::Schedule", kASBSQueueSchedule}, // TFRT related. {"TfrtModelRun", kTfrtModelRun}, // JAX related. {"LocalExecutable::ExecuteOnLocalDevices", kExecuteOnLocalDevices}, // GPU related. {"KernelLaunch", kKernelLaunch}, {"KernelExecute", kKernelExecute}, // TPU related. {"EnqueueRequestLocked", kEnqueueRequestLocked}, {"RunProgramRequest", kRunProgramRequest}, {"HostCallbackRequest", kHostCallbackRequest}, {"TransferH2DRequest", kTransferH2DRequest}, {"TransferPreprocessedH2DRequest", kTransferPreprocessedH2DRequest}, {"TransferD2HRequest", kTransferD2HRequest}, {"OnDeviceSendRequest", kOnDeviceSendRequest}, {"OnDeviceRecvRequest", kOnDeviceRecvRequest}, {"OnDeviceSendRecvLocalRequest", kOnDeviceSendRecvLocalRequest}, {"CustomWait", kCustomWait}, {"OnDeviceSendRequestMulti", kOnDeviceSendRequestMulti}, {"OnDeviceRecvRequestMulti", kOnDeviceRecvRequestMulti}, {"PjrtAsyncWait", kPjrtAsyncWait}, {"DoEnqueueProgram", kDoEnqueueProgram}, {"DoEnqueueContinuationProgram", kDoEnqueueContinuationProgram}, {"WriteHbm", kWriteHbm}, {"ReadHbm", kReadHbm}, {"TpuExecuteOp", kTpuExecuteOp}, {"CompleteCallbacks", kCompleteCallbacks}, {"TPUPartitionedCallOp-InitializeVarOnTPU", kTpuPartitionedCallOpInitializeVarOnTpu}, {"TPUPartitionedCallOp-ExecuteRemote", kTpuPartitionedCallOpExecuteRemote}, {"TPUPartitionedCallOp-ExecuteLocal", kTpuPartitionedCallOpExecuteLocal}, {"Linearize", kLinearize}, {"Delinearize", kDelinearize}, {"TransferBufferFromDevice-FastPath", kTransferBufferFromDeviceFastPath}, {"tpu::System::TransferToDevice=>IssueEvent", kTransferToDeviceIssueEvent}, {"tpu::System::TransferToDevice=>IssueEvent=>Done", kTransferToDeviceDone}, {"tpu::System::TransferFromDevice=>IssueEvent", kTransferFromDeviceIssueEvent}, {"tpu::System::TransferFromDevice=>IssueEvent=>Done", kTransferFromDeviceDone}, {"tpu::System::Execute", kTpuSystemExecute}, }); DCHECK_EQ(host_event_type_map->size(), kNumHostEventTypes); return *host_event_type_map; } const StatTypeMap& GetStatTypeMap() { static auto* stat_type_map = new StatTypeMap({ {"UnknownStatType", kUnknownStatType}, // TraceMe arguments. {"id", kStepId}, {"device_ordinal", kDeviceOrdinal}, {"chip_ordinal", kChipOrdinal}, {"node_ordinal", kNodeOrdinal}, {"model_id", kModelId}, {"queue_addr", kQueueAddr}, {"queue_id", kQueueId}, {"request_id", kRequestId}, {"run_id", kRunId}, {"replica_id", kReplicaId}, {"graph_type", kGraphType}, {"step_num", kStepNum}, {"iter_num", kIterNum}, {"index_on_host", kIndexOnHost}, {"allocator_name", kAllocatorName}, {"bytes_reserved", kBytesReserved}, {"bytes_allocated", kBytesAllocated}, {"bytes_available", kBytesAvailable}, {"fragmentation", kFragmentation}, {"peak_bytes_in_use", kPeakBytesInUse}, {"requested_bytes", kRequestedBytes}, {"allocation_bytes", kAllocationBytes}, {"addr", kAddress}, {"region_type", kRegionType}, {"data_type", kDataType}, {"shape", kTensorShapes}, {"layout", kTensorLayout}, {"kpi_name", kKpiName}, {"kpi_value", kKpiValue}, {"element_id", kElementId}, {"parent_id", kParentId}, // XPlane semantics related. {"_pt", kProducerType}, {"_ct", kConsumerType}, {"_p", kProducerId}, {"_c", kConsumerId}, {"_r", kIsRoot}, {"_a", kIsAsync}, // Device trace arguments. {"device_id", kDeviceId}, {"context_id", kContextId}, {"correlation_id", kCorrelationId}, {"memcpy_details", kMemcpyDetails}, {"memalloc_details", kMemallocDetails}, {"MemFree_details", kMemFreeDetails}, {"Memset_details", kMemsetDetails}, {"MemoryResidency_details", kMemoryResidencyDetails}, {"kernel_details", kKernelDetails}, {"nvtx_range", kNVTXRange}, {"stream", kStream}, // Stats added when processing traces. {"group_id", kGroupId}, {"flow", kFlow}, {"step_name", kStepName}, {"tf_op", kTfOp}, {"hlo_op", kHloOp}, {"hlo_category", kHloCategory}, {"hlo_module", kHloModule}, {"program_id", kProgramId}, {"equation", kEquation}, {"is_eager", kIsEager}, {"is_func", kIsFunc}, {"tf_function_call", kTfFunctionCall}, {"tracing_count", kTfFunctionTracingCount}, {"flops", kFlops}, {"bytes_accessed", kBytesAccessed}, {"source", kSourceInfo}, {"model_name", kModelName}, {"model_version", kModelVersion}, {"bytes_transferred", kBytesTransferred}, {"queue", kDmaQueue}, // Performance counter related. {"Raw Value", kRawValue}, {"Scaled Value", kScaledValue}, {"Thread Id", kThreadId}, {"matrix_unit_utilization_percent", kMatrixUnitUtilizationPercent}, // XLA metadata map related. {"Hlo Proto", kHloProto}, // Device capability related. {"clock_rate", kDevCapClockRateKHz}, {"core_count", kDevCapCoreCount}, {"memory_bandwidth", kDevCapMemoryBandwidth}, {"memory_size", kDevCapMemorySize}, {"compute_cap_major", kDevCapComputeCapMajor}, {"compute_cap_minor", kDevCapComputeCapMinor}, {"peak_teraflops_per_second", kDevCapPeakTeraflopsPerSecond}, {"peak_hbm_bw_gigabytes_per_second", kDevCapPeakHbmBwGigabytesPerSecond}, {"device_vendor", kDevVendor}, // Batching related. {"batch_size_after_padding", kBatchSizeAfterPadding}, {"padding_amount", kPaddingAmount}, {"batching_input_task_size", kBatchingInputTaskSize}, // GPU related metrics. {"theoretical_occupancy_pct", kTheoreticalOccupancyPct}, {"occupancy_min_grid_size", kOccupancyMinGridSize}, {"occupancy_suggested_block_size", kOccupancySuggestedBlockSize}, }); DCHECK_EQ(stat_type_map->size(), kNumStatTypes); return *stat_type_map; } const HostEventTypeStrMap& GetHostEventTypeStrMap() { static auto* host_event_type_str_map = new HostEventTypeStrMap( gtl::ReverseMap<HostEventTypeStrMap>(GetHostEventTypeMap())); return *host_event_type_str_map; } const StatTypeStrMap& GetStatTypeStrMap() { static auto* stat_type_str_map = new StatTypeStrMap(gtl::ReverseMap<StatTypeStrMap>(GetStatTypeMap())); return *stat_type_str_map; } } // namespace absl::string_view GetHostEventTypeStr(HostEventType event_type) { return GetHostEventTypeStrMap().at(event_type); } absl::optional<int64_t> FindHostEventType(absl::string_view event_name) { if (auto event_type = gtl::FindOrNull(GetHostEventTypeMap(), event_name)) { return *event_type; } return absl::nullopt; } absl::optional<int64_t> FindTfOpEventType(absl::string_view event_name) { // TF op names. Category category = ParseTfOpFullname(event_name).category; switch (category) { case Category::kTensorFlow: return HostEventType::kTfOpRun; case Category::kTfData: return HostEventType::kIterator; default: return absl::nullopt; } } absl::string_view GetStatTypeStr(StatType stat_type) { return GetStatTypeStrMap().at(stat_type); } absl::optional<int64_t> FindStatType(absl::string_view stat_name) { if (auto stat_type = gtl::FindOrNull(GetStatTypeMap(), stat_name)) { return *stat_type; } return absl::nullopt; } bool IsInternalEvent(absl::optional<int64_t> event_type) { // TODO(b/162102421): Introduce a prefix for internal event names. if (!event_type.has_value()) return false; switch (*event_type) { case HostEventType::kMemoryAllocation: case HostEventType::kMemoryDeallocation: case HostEventType::kPrefetchProduce: case HostEventType::kPrefetchConsume: case HostEventType::kParallelInterleaveProduce: case HostEventType::kParallelInterleaveConsume: case HostEventType::kParallelInterleaveInitializedInput: case HostEventType::kParallelMapProduce: case HostEventType::kParallelMapConsume: case HostEventType::kMapAndBatchProduce: case HostEventType::kMapAndBatchConsume: case HostEventType::kParseExampleProduce: case HostEventType::kParseExampleConsume: return true; default: return false; } } bool IsInternalStat(absl::optional<int64_t> stat_type) { // TODO(b/162102421): Introduce a prefix for internal stat names. if (!stat_type.has_value()) return false; switch (*stat_type) { case StatType::kKernelDetails: case StatType::kProducerType: case StatType::kProducerId: case StatType::kConsumerType: case StatType::kConsumerId: case StatType::kIsRoot: case StatType::kFlops: case StatType::kBytesAccessed: return true; default: return false; } } /*static*/ std::atomic<uint64_t> XFlow::next_flow_id_(0); } // namespace profiler } // namespace tensorflow
#include "ofApp.h" #include "ofMain.h" //======================================================================== int main() { ofSetupOpenGL(768, 1024, OF_WINDOW); // <-------- setup the GL context // this kicks off the running of my app // can be OF_WINDOW or OF_FULLSCREEN // pass in width and height too: ofRunApp(new ofApp()); }
// -*- C++ -*- // // $Id: Msg_WFMO_Reactor.inl 80826 2008-03-04 14:51:23Z wotte $ #if defined (ACE_WIN32) && !defined (ACE_LACKS_MSG_WFMO) ACE_BEGIN_VERSIONED_NAMESPACE_DECL ACE_INLINE int ACE_Msg_WFMO_Reactor::handle_events (ACE_Time_Value &how_long) { return this->event_handling (&how_long, 0); } ACE_INLINE int ACE_Msg_WFMO_Reactor::alertable_handle_events (ACE_Time_Value &how_long) { return this->event_handling (&how_long, MWMO_ALERTABLE); } ACE_INLINE int ACE_Msg_WFMO_Reactor::handle_events (ACE_Time_Value *how_long) { return this->event_handling (how_long, 0); } ACE_INLINE int ACE_Msg_WFMO_Reactor::alertable_handle_events (ACE_Time_Value *how_long) { return this->event_handling (how_long, MWMO_ALERTABLE); } ACE_END_VERSIONED_NAMESPACE_DECL #endif /* ACE_WIN32 && !ACE_LACKS_MSG_WFMO */
// Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2012 The Bitcoin developers // Distributed under the MIT/X11 software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "irc.h" #include "db.h" #include "net.h" #include "init.h" #include "addrman.h" #include "ui_interface.h" #ifdef WIN32 #include <string.h> #endif #ifdef USE_UPNP #include <miniupnpc/miniwget.h> #include <miniupnpc/miniupnpc.h> #include <miniupnpc/upnpcommands.h> #include <miniupnpc/upnperrors.h> #endif #if !defined(HAVE_MSG_NOSIGNAL) #define MSG_NOSIGNAL 0 #endif using namespace std; using namespace boost; static const int MAX_OUTBOUND_CONNECTIONS = 25; void ThreadMessageHandler2(void* parg); void ThreadSocketHandler2(void* parg); void ThreadOpenConnections2(void* parg); void ThreadOpenAddedConnections2(void* parg); #ifdef USE_UPNP void ThreadMapPort2(void* parg); #endif void ThreadDNSAddressSeed2(void* parg); bool OpenNetworkConnection(const CAddress& addrConnect, CSemaphoreGrant *grantOutbound = NULL, const char *strDest = NULL, bool fOneShot = false); struct LocalServiceInfo { int nScore; int nPort; }; // // Global state variables // bool fClient = false; bool fDiscover = true; bool fUseUPnP = false; uint64 nLocalServices = (fClient ? 0 : NODE_NETWORK); static CCriticalSection cs_mapLocalHost; static map<CNetAddr, LocalServiceInfo> mapLocalHost; static bool vfReachable[NET_MAX] = {}; static bool vfLimited[NET_MAX] = {}; static CNode* pnodeLocalHost = NULL; CAddress addrSeenByPeer(CService("0.0.0.0", 0), nLocalServices); uint64 nLocalHostNonce = 0; boost::array<int, THREAD_MAX> vnThreadsRunning; static std::vector<SOCKET> vhListenSocket; CAddrMan addrman; vector<CNode*> vNodes; CCriticalSection cs_vNodes; map<CInv, CDataStream> mapRelay; deque<pair<int64, CInv> > vRelayExpiration; CCriticalSection cs_mapRelay; map<CInv, int64> mapAlreadyAskedFor; static deque<string> vOneShots; CCriticalSection cs_vOneShots; set<CNetAddr> setservAddNodeAddresses; CCriticalSection cs_setservAddNodeAddresses; vector<std::string> vAddedNodes; CCriticalSection cs_vAddedNodes; static CSemaphore *semOutbound = NULL; void AddOneShot(string strDest) { LOCK(cs_vOneShots); vOneShots.push_back(strDest); } unsigned short GetListenPort() { return (unsigned short)(GetArg("-port", GetDefaultPort())); } void CNode::PushGetBlocks(CBlockIndex* pindexBegin, uint256 hashEnd) { // Filter out duplicate requests if (pindexBegin == pindexLastGetBlocksBegin && hashEnd == hashLastGetBlocksEnd) return; pindexLastGetBlocksBegin = pindexBegin; hashLastGetBlocksEnd = hashEnd; PushMessage("getblocks", CBlockLocator(pindexBegin), hashEnd); } // find 'best' local address for a particular peer bool GetLocal(CService& addr, const CNetAddr *paddrPeer) { if (fNoListen) return false; int nBestScore = -1; int nBestReachability = -1; { LOCK(cs_mapLocalHost); for (map<CNetAddr, LocalServiceInfo>::iterator it = mapLocalHost.begin(); it != mapLocalHost.end(); it++) { int nScore = (*it).second.nScore; int nReachability = (*it).first.GetReachabilityFrom(paddrPeer); if (nReachability > nBestReachability || (nReachability == nBestReachability && nScore > nBestScore)) { addr = CService((*it).first, (*it).second.nPort); nBestReachability = nReachability; nBestScore = nScore; } } } return nBestScore >= 0; } // get best local address for a particular peer as a CAddress CAddress GetLocalAddress(const CNetAddr *paddrPeer) { CAddress ret(CService("0.0.0.0",0),0); CService addr; if (GetLocal(addr, paddrPeer)) { ret = CAddress(addr); ret.nServices = nLocalServices; ret.nTime = GetAdjustedTime(); } return ret; } bool RecvLine(SOCKET hSocket, string& strLine) { strLine = ""; while (true) { char c; int nBytes = recv(hSocket, &c, 1, 0); if (nBytes > 0) { if (c == '\n') continue; if (c == '\r') return true; strLine += c; if (strLine.size() >= 9000) return true; } else if (nBytes <= 0) { if (fShutdown) return false; if (nBytes < 0) { int nErr = WSAGetLastError(); if (nErr == WSAEMSGSIZE) continue; if (nErr == WSAEWOULDBLOCK || nErr == WSAEINTR || nErr == WSAEINPROGRESS) { Sleep(10); continue; } } if (!strLine.empty()) return true; if (nBytes == 0) { // socket closed printf("socket closed\n"); return false; } else { // socket error int nErr = WSAGetLastError(); printf("recv failed: %d\n", nErr); return false; } } } } // used when scores of local addresses may have changed // pushes better local address to peers void static AdvertizeLocal() { LOCK(cs_vNodes); BOOST_FOREACH(CNode* pnode, vNodes) { if (pnode->fSuccessfullyConnected) { CAddress addrLocal = GetLocalAddress(&pnode->addr); if (addrLocal.IsRoutable() && (CService)addrLocal != (CService)pnode->addrLocal) { pnode->PushAddress(addrLocal); pnode->addrLocal = addrLocal; } } } } void SetReachable(enum Network net, bool fFlag) { LOCK(cs_mapLocalHost); vfReachable[net] = fFlag; if (net == NET_IPV6 && fFlag) vfReachable[NET_IPV4] = true; } // learn a new local address bool AddLocal(const CService& addr, int nScore) { if (!addr.IsRoutable()) return false; if (!fDiscover && nScore < LOCAL_MANUAL) return false; if (IsLimited(addr)) return false; printf("AddLocal(%s,%i)\n", addr.ToString().c_str(), nScore); { LOCK(cs_mapLocalHost); bool fAlready = mapLocalHost.count(addr) > 0; LocalServiceInfo &info = mapLocalHost[addr]; if (!fAlready || nScore >= info.nScore) { info.nScore = nScore + (fAlready ? 1 : 0); info.nPort = addr.GetPort(); } SetReachable(addr.GetNetwork()); } AdvertizeLocal(); return true; } bool AddLocal(const CNetAddr &addr, int nScore) { return AddLocal(CService(addr, GetListenPort()), nScore); } /** Make a particular network entirely off-limits (no automatic connects to it) */ void SetLimited(enum Network net, bool fLimited) { if (net == NET_UNROUTABLE) return; LOCK(cs_mapLocalHost); vfLimited[net] = fLimited; } bool IsLimited(enum Network net) { LOCK(cs_mapLocalHost); return vfLimited[net]; } bool IsLimited(const CNetAddr &addr) { return IsLimited(addr.GetNetwork()); } /** vote for a local address */ bool SeenLocal(const CService& addr) { { LOCK(cs_mapLocalHost); if (mapLocalHost.count(addr) == 0) return false; mapLocalHost[addr].nScore++; } AdvertizeLocal(); return true; } /** check whether a given address is potentially local */ bool IsLocal(const CService& addr) { LOCK(cs_mapLocalHost); return mapLocalHost.count(addr) > 0; } /** check whether a given address is in a network we can probably connect to */ bool IsReachable(const CNetAddr& addr) { LOCK(cs_mapLocalHost); enum Network net = addr.GetNetwork(); return vfReachable[net] && !vfLimited[net]; } bool GetMyExternalIP2(const CService& addrConnect, const char* pszGet, const char* pszKeyword, CNetAddr& ipRet) { SOCKET hSocket; if (!ConnectSocket(addrConnect, hSocket)) return error("GetMyExternalIP() : connection to %s failed", addrConnect.ToString().c_str()); send(hSocket, pszGet, strlen(pszGet), MSG_NOSIGNAL); string strLine; while (RecvLine(hSocket, strLine)) { if (strLine.empty()) // HTTP response is separated from headers by blank line { while (true) { if (!RecvLine(hSocket, strLine)) { closesocket(hSocket); return false; } if (pszKeyword == NULL) break; if (strLine.find(pszKeyword) != string::npos) { strLine = strLine.substr(strLine.find(pszKeyword) + strlen(pszKeyword)); break; } } closesocket(hSocket); if (strLine.find("<") != string::npos) strLine = strLine.substr(0, strLine.find("<")); strLine = strLine.substr(strspn(strLine.c_str(), " \t\n\r")); while (strLine.size() > 0 && isspace(strLine[strLine.size()-1])) strLine.resize(strLine.size()-1); CService addr(strLine,0,true); printf("GetMyExternalIP() received [%s] %s\n", strLine.c_str(), addr.ToString().c_str()); if (!addr.IsValid() || !addr.IsRoutable()) return false; ipRet.SetIP(addr); return true; } } closesocket(hSocket); return error("GetMyExternalIP() : connection closed"); } // We now get our external IP from the IRC server first and only use this as a backup bool GetMyExternalIP(CNetAddr& ipRet) { CService addrConnect; const char* pszGet; const char* pszKeyword; for (int nLookup = 0; nLookup <= 1; nLookup++) for (int nHost = 1; nHost <= 2; nHost++) { // We should be phasing out our use of sites like these. If we need // replacements, we should ask for volunteers to put this simple // php file on their web server that prints the client IP: // <?php echo $_SERVER["REMOTE_ADDR"]; ?> if (nHost == 1) { addrConnect = CService("91.198.22.70",80); // checkip.dyndns.org if (nLookup == 1) { CService addrIP("checkip.dyndns.org", 80, true); if (addrIP.IsValid()) addrConnect = addrIP; } pszGet = "GET / HTTP/1.1\r\n" "Host: checkip.dyndns.org\r\n" "User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)\r\n" "Connection: close\r\n" "\r\n"; pszKeyword = "Address:"; } else if (nHost == 2) { addrConnect = CService("74.208.43.192", 80); // www.showmyip.com if (nLookup == 1) { CService addrIP("www.showmyip.com", 80, true); if (addrIP.IsValid()) addrConnect = addrIP; } pszGet = "GET /simple/ HTTP/1.1\r\n" "Host: www.showmyip.com\r\n" "User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)\r\n" "Connection: close\r\n" "\r\n"; pszKeyword = NULL; // Returns just IP address } if (GetMyExternalIP2(addrConnect, pszGet, pszKeyword, ipRet)) return true; } return false; } void ThreadGetMyExternalIP(void* parg) { // Make this thread recognisable as the external IP detection thread RenameThread("bitcoin-ext-ip"); CNetAddr addrLocalHost; if (GetMyExternalIP(addrLocalHost)) { printf("GetMyExternalIP() returned %s\n", addrLocalHost.ToStringIP().c_str()); AddLocal(addrLocalHost, LOCAL_HTTP); } } void AddressCurrentlyConnected(const CService& addr) { addrman.Connected(addr); } CNode* FindNode(const CNetAddr& ip) { { LOCK(cs_vNodes); BOOST_FOREACH(CNode* pnode, vNodes) if ((CNetAddr)pnode->addr == ip) return (pnode); } return NULL; } CNode* FindNode(std::string addrName) { LOCK(cs_vNodes); BOOST_FOREACH(CNode* pnode, vNodes) if (pnode->addrName == addrName) return (pnode); return NULL; } CNode* FindNode(const CService& addr) { { LOCK(cs_vNodes); BOOST_FOREACH(CNode* pnode, vNodes) if ((CService)pnode->addr == addr) return (pnode); } return NULL; } CNode* ConnectNode(CAddress addrConnect, const char *pszDest, int64 nTimeout) { if (pszDest == NULL) { if (IsLocal(addrConnect)) return NULL; // Look for an existing connection CNode* pnode = FindNode((CService)addrConnect); if (pnode) { if (nTimeout != 0) pnode->AddRef(nTimeout); else pnode->AddRef(); return pnode; } } /// debug print printf("trying connection %s lastseen=%.1fhrs\n", pszDest ? pszDest : addrConnect.ToString().c_str(), pszDest ? 0 : (double)(GetAdjustedTime() - addrConnect.nTime)/3600.0); // Connect SOCKET hSocket; if (pszDest ? ConnectSocketByName(addrConnect, hSocket, pszDest, GetDefaultPort()) : ConnectSocket(addrConnect, hSocket)) { addrman.Attempt(addrConnect); /// debug print printf("connected %s\n", pszDest ? pszDest : addrConnect.ToString().c_str()); // Set to non-blocking #ifdef WIN32 u_long nOne = 1; if (ioctlsocket(hSocket, FIONBIO, &nOne) == SOCKET_ERROR) printf("ConnectSocket() : ioctlsocket non-blocking setting failed, error %d\n", WSAGetLastError()); #else if (fcntl(hSocket, F_SETFL, O_NONBLOCK) == SOCKET_ERROR) printf("ConnectSocket() : fcntl non-blocking setting failed, error %d\n", errno); #endif // Add node CNode* pnode = new CNode(hSocket, addrConnect, pszDest ? pszDest : "", false); if (nTimeout != 0) pnode->AddRef(nTimeout); else pnode->AddRef(); { LOCK(cs_vNodes); vNodes.push_back(pnode); } pnode->nTimeConnected = GetTime(); return pnode; } else { return NULL; } } void CNode::CloseSocketDisconnect() { fDisconnect = true; if (hSocket != INVALID_SOCKET) { printf("disconnecting node %s\n", addrName.c_str()); closesocket(hSocket); hSocket = INVALID_SOCKET; // in case this fails, we'll empty the recv buffer when the CNode is deleted TRY_LOCK(cs_vRecvMsg, lockRecv); if (lockRecv) vRecvMsg.clear(); } } void CNode::Cleanup() { } void CNode::PushVersion() { /// when NTP implemented, change to just nTime = GetAdjustedTime() int64 nTime = (fInbound ? GetAdjustedTime() : GetTime()); CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr) ? addr : CAddress(CService("0.0.0.0",0))); CAddress addrMe = GetLocalAddress(&addr); RAND_bytes((unsigned char*)&nLocalHostNonce, sizeof(nLocalHostNonce)); printf("send version message: version %d, blocks=%d, us=%s, them=%s, peer=%s\n", PROTOCOL_VERSION, nBestHeight, addrMe.ToString().c_str(), addrYou.ToString().c_str(), addr.ToString().c_str()); PushMessage("version", PROTOCOL_VERSION, nLocalServices, nTime, addrYou, addrMe, nLocalHostNonce, FormatSubVersion(CLIENT_NAME, CLIENT_VERSION, std::vector<string>()), nBestHeight); } std::map<CNetAddr, int64> CNode::setBanned; CCriticalSection CNode::cs_setBanned; void CNode::ClearBanned() { setBanned.clear(); } bool CNode::IsBanned(CNetAddr ip) { bool fResult = false; { LOCK(cs_setBanned); std::map<CNetAddr, int64>::iterator i = setBanned.find(ip); if (i != setBanned.end()) { int64 t = (*i).second; if (GetTime() < t) fResult = true; } } return fResult; } extern CMedianFilter<int> cPeerBlockCounts; bool CNode::Misbehaving(int howmuch) { if (addr.IsLocal()) { printf("Warning: Local node %s misbehaving (delta: %d)!\n", addrName.c_str(), howmuch); return false; } nMisbehavior += howmuch; if (nMisbehavior >= GetArg("-banscore", 100)) { int64 banTime = GetTime()+GetArg("-bantime", 60*60*24); // Default 24-hour ban printf("Misbehaving: %s (%d -> %d) DISCONNECTING\n", addr.ToString().c_str(), nMisbehavior-howmuch, nMisbehavior); { LOCK(cs_setBanned); if (setBanned[addr] < banTime) setBanned[addr] = banTime; } CloseSocketDisconnect(); cPeerBlockCounts.removeLast(nStartingHeight); // remove this node's reported number of blocks return true; } else printf("Misbehaving: %s (%d -> %d)\n", addr.ToString().c_str(), nMisbehavior-howmuch, nMisbehavior); return false; } #undef X #define X(name) stats.name = name void CNode::copyStats(CNodeStats &stats) { X(nServices); X(nLastSend); X(nLastRecv); X(nTimeConnected); X(addrName); X(nVersion); X(strSubVer); X(fInbound); X(nReleaseTime); X(nStartingHeight); X(nMisbehavior); } #undef X // requires LOCK(cs_vRecvMsg) bool CNode::ReceiveMsgBytes(const char *pch, unsigned int nBytes) { while (nBytes > 0) { // get current incomplete message, or create a new one if (vRecvMsg.empty() || vRecvMsg.back().complete()) vRecvMsg.push_back(CNetMessage(SER_NETWORK, nRecvVersion)); CNetMessage& msg = vRecvMsg.back(); // absorb network data int handled; if (!msg.in_data) handled = msg.readHeader(pch, nBytes); else handled = msg.readData(pch, nBytes); if (handled < 0) return false; pch += handled; nBytes -= handled; } return true; } int CNetMessage::readHeader(const char *pch, unsigned int nBytes) { // copy data to temporary parsing buffer unsigned int nRemaining = 24 - nHdrPos; unsigned int nCopy = std::min(nRemaining, nBytes); memcpy(&hdrbuf[nHdrPos], pch, nCopy); nHdrPos += nCopy; // if header incomplete, exit if (nHdrPos < 24) return nCopy; // deserialize to CMessageHeader try { hdrbuf >> hdr; } catch (std::exception &e) { return -1; } // reject messages larger than MAX_SIZE if (hdr.nMessageSize > MAX_SIZE) return -1; // switch state to reading message data in_data = true; vRecv.resize(hdr.nMessageSize); return nCopy; } int CNetMessage::readData(const char *pch, unsigned int nBytes) { unsigned int nRemaining = hdr.nMessageSize - nDataPos; unsigned int nCopy = std::min(nRemaining, nBytes); memcpy(&vRecv[nDataPos], pch, nCopy); nDataPos += nCopy; return nCopy; } void ThreadSocketHandler(void* parg) { // Make this thread recognisable as the networking thread RenameThread("bitcoin-net"); try { vnThreadsRunning[THREAD_SOCKETHANDLER]++; ThreadSocketHandler2(parg); vnThreadsRunning[THREAD_SOCKETHANDLER]--; } catch (std::exception& e) { vnThreadsRunning[THREAD_SOCKETHANDLER]--; PrintException(&e, "ThreadSocketHandler()"); } catch (...) { vnThreadsRunning[THREAD_SOCKETHANDLER]--; throw; // support pthread_cancel() } printf("ThreadSocketHandler exited\n"); } void ThreadSocketHandler2(void* parg) { printf("ThreadSocketHandler started\n"); list<CNode*> vNodesDisconnected; unsigned int nPrevNodeCount = 0; while (true) { // // Disconnect nodes // { LOCK(cs_vNodes); // Disconnect unused nodes vector<CNode*> vNodesCopy = vNodes; BOOST_FOREACH(CNode* pnode, vNodesCopy) { if (pnode->fDisconnect || (pnode->GetRefCount() <= 0 && pnode->vRecvMsg.empty() && pnode->vSend.empty())) { // remove from vNodes vNodes.erase(remove(vNodes.begin(), vNodes.end(), pnode), vNodes.end()); // release outbound grant (if any) pnode->grantOutbound.Release(); // close socket and cleanup pnode->CloseSocketDisconnect(); pnode->Cleanup(); // hold in disconnected pool until all refs are released pnode->nReleaseTime = max(pnode->nReleaseTime, GetTime() + 15 * 60); if (pnode->fNetworkNode || pnode->fInbound) pnode->Release(); vNodesDisconnected.push_back(pnode); } } // Delete disconnected nodes list<CNode*> vNodesDisconnectedCopy = vNodesDisconnected; BOOST_FOREACH(CNode* pnode, vNodesDisconnectedCopy) { // wait until threads are done using it if (pnode->GetRefCount() <= 0) { bool fDelete = false; { TRY_LOCK(pnode->cs_vSend, lockSend); if (lockSend) { TRY_LOCK(pnode->cs_vRecvMsg, lockRecv); if (lockRecv) { TRY_LOCK(pnode->cs_mapRequests, lockReq); if (lockReq) { TRY_LOCK(pnode->cs_inventory, lockInv); if (lockInv) fDelete = true; } } } } if (fDelete) { vNodesDisconnected.remove(pnode); delete pnode; } } } } if (vNodes.size() != nPrevNodeCount) { nPrevNodeCount = vNodes.size(); uiInterface.NotifyNumConnectionsChanged(vNodes.size()); } // // Find which sockets have data to receive // struct timeval timeout; timeout.tv_sec = 0; timeout.tv_usec = 50000; // frequency to poll pnode->vSend fd_set fdsetRecv; fd_set fdsetSend; fd_set fdsetError; FD_ZERO(&fdsetRecv); FD_ZERO(&fdsetSend); FD_ZERO(&fdsetError); SOCKET hSocketMax = 0; bool have_fds = false; BOOST_FOREACH(SOCKET hListenSocket, vhListenSocket) { FD_SET(hListenSocket, &fdsetRecv); hSocketMax = max(hSocketMax, hListenSocket); have_fds = true; } { LOCK(cs_vNodes); BOOST_FOREACH(CNode* pnode, vNodes) { if (pnode->hSocket == INVALID_SOCKET) continue; FD_SET(pnode->hSocket, &fdsetRecv); FD_SET(pnode->hSocket, &fdsetError); hSocketMax = max(hSocketMax, pnode->hSocket); have_fds = true; { TRY_LOCK(pnode->cs_vSend, lockSend); if (lockSend && !pnode->vSend.empty()) FD_SET(pnode->hSocket, &fdsetSend); } } } vnThreadsRunning[THREAD_SOCKETHANDLER]--; int nSelect = select(have_fds ? hSocketMax + 1 : 0, &fdsetRecv, &fdsetSend, &fdsetError, &timeout); vnThreadsRunning[THREAD_SOCKETHANDLER]++; if (fShutdown) return; if (nSelect == SOCKET_ERROR) { if (have_fds) { int nErr = WSAGetLastError(); printf("socket select error %d\n", nErr); for (unsigned int i = 0; i <= hSocketMax; i++) FD_SET(i, &fdsetRecv); } FD_ZERO(&fdsetSend); FD_ZERO(&fdsetError); Sleep(timeout.tv_usec/1000); } // // Accept new connections // BOOST_FOREACH(SOCKET hListenSocket, vhListenSocket) if (hListenSocket != INVALID_SOCKET && FD_ISSET(hListenSocket, &fdsetRecv)) { #ifdef USE_IPV6 struct sockaddr_storage sockaddr; #else struct sockaddr sockaddr; #endif socklen_t len = sizeof(sockaddr); SOCKET hSocket = accept(hListenSocket, (struct sockaddr*)&sockaddr, &len); CAddress addr; int nInbound = 0; if (hSocket != INVALID_SOCKET) if (!addr.SetSockAddr((const struct sockaddr*)&sockaddr)) printf("Warning: Unknown socket family\n"); { LOCK(cs_vNodes); BOOST_FOREACH(CNode* pnode, vNodes) if (pnode->fInbound) nInbound++; } if (hSocket == INVALID_SOCKET) { int nErr = WSAGetLastError(); if (nErr != WSAEWOULDBLOCK) printf("socket error accept failed: %d\n", nErr); } else if (nInbound >= GetArg("-maxconnections", 125) - MAX_OUTBOUND_CONNECTIONS) { { LOCK(cs_setservAddNodeAddresses); if (!setservAddNodeAddresses.count(addr)) closesocket(hSocket); } } else if (CNode::IsBanned(addr)) { printf("connection from %s dropped (banned)\n", addr.ToString().c_str()); closesocket(hSocket); } else { printf("accepted connection %s\n", addr.ToString().c_str()); CNode* pnode = new CNode(hSocket, addr, "", true); pnode->AddRef(); { LOCK(cs_vNodes); vNodes.push_back(pnode); } } } // // Service each socket // vector<CNode*> vNodesCopy; { LOCK(cs_vNodes); vNodesCopy = vNodes; BOOST_FOREACH(CNode* pnode, vNodesCopy) pnode->AddRef(); } BOOST_FOREACH(CNode* pnode, vNodesCopy) { if (fShutdown) return; // // Receive // if (pnode->hSocket == INVALID_SOCKET) continue; if (FD_ISSET(pnode->hSocket, &fdsetRecv) || FD_ISSET(pnode->hSocket, &fdsetError)) { TRY_LOCK(pnode->cs_vRecvMsg, lockRecv); if (lockRecv) { if (pnode->GetTotalRecvSize() > ReceiveFloodSize()) { if (!pnode->fDisconnect) printf("socket recv flood control disconnect (%u bytes)\n", pnode->GetTotalRecvSize()); pnode->CloseSocketDisconnect(); } else { // typical socket buffer is 8K-64K char pchBuf[0x10000]; int nBytes = recv(pnode->hSocket, pchBuf, sizeof(pchBuf), MSG_DONTWAIT); if (nBytes > 0) { if (!pnode->ReceiveMsgBytes(pchBuf, nBytes)) pnode->CloseSocketDisconnect(); pnode->nLastRecv = GetTime(); } else if (nBytes == 0) { // socket closed gracefully if (!pnode->fDisconnect) printf("socket closed\n"); pnode->CloseSocketDisconnect(); } else if (nBytes < 0) { // error int nErr = WSAGetLastError(); if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE && nErr != WSAEINTR && nErr != WSAEINPROGRESS) { if (!pnode->fDisconnect) printf("socket recv error %d\n", nErr); pnode->CloseSocketDisconnect(); } } } } } // // Send // if (pnode->hSocket == INVALID_SOCKET) continue; if (FD_ISSET(pnode->hSocket, &fdsetSend)) { TRY_LOCK(pnode->cs_vSend, lockSend); if (lockSend) { CDataStream& vSend = pnode->vSend; if (!vSend.empty()) { int nBytes = send(pnode->hSocket, &vSend[0], vSend.size(), MSG_NOSIGNAL | MSG_DONTWAIT); if (nBytes > 0) { vSend.erase(vSend.begin(), vSend.begin() + nBytes); pnode->nLastSend = GetTime(); } else if (nBytes < 0) { // error int nErr = WSAGetLastError(); if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE && nErr != WSAEINTR && nErr != WSAEINPROGRESS) { printf("socket send error %d\n", nErr); pnode->CloseSocketDisconnect(); } } } } } // // Inactivity checking // if (pnode->vSend.empty()) pnode->nLastSendEmpty = GetTime(); if (GetTime() - pnode->nTimeConnected > 60) { if (pnode->nLastRecv == 0 || pnode->nLastSend == 0) { printf("socket no message in first 60 seconds, %d %d\n", pnode->nLastRecv != 0, pnode->nLastSend != 0); pnode->fDisconnect = true; } else if (GetTime() - pnode->nLastSend > 90*60 && GetTime() - pnode->nLastSendEmpty > 90*60) { printf("socket not sending\n"); pnode->fDisconnect = true; } else if (GetTime() - pnode->nLastRecv > 90*60) { printf("socket inactivity timeout\n"); pnode->fDisconnect = true; } } } { LOCK(cs_vNodes); BOOST_FOREACH(CNode* pnode, vNodesCopy) pnode->Release(); } Sleep(10); } } #ifdef USE_UPNP void ThreadMapPort(void* parg) { // Make this thread recognisable as the UPnP thread RenameThread("bitcoin-UPnP"); try { vnThreadsRunning[THREAD_UPNP]++; ThreadMapPort2(parg); vnThreadsRunning[THREAD_UPNP]--; } catch (std::exception& e) { vnThreadsRunning[THREAD_UPNP]--; PrintException(&e, "ThreadMapPort()"); } catch (...) { vnThreadsRunning[THREAD_UPNP]--; PrintException(NULL, "ThreadMapPort()"); } printf("ThreadMapPort exited\n"); } void ThreadMapPort2(void* parg) { printf("ThreadMapPort started\n"); std::string port = strprintf("%u", GetListenPort()); const char * multicastif = 0; const char * minissdpdpath = 0; struct UPNPDev * devlist = 0; char lanaddr[64]; #ifndef UPNPDISCOVER_SUCCESS /* miniupnpc 1.5 */ devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0); #else /* miniupnpc 1.6 */ int error = 0; devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0, 0, &error); #endif struct UPNPUrls urls; struct IGDdatas data; int r; r = UPNP_GetValidIGD(devlist, &urls, &data, lanaddr, sizeof(lanaddr)); if (r == 1) { if (fDiscover) { char externalIPAddress[40]; r = UPNP_GetExternalIPAddress(urls.controlURL, data.first.servicetype, externalIPAddress); if(r != UPNPCOMMAND_SUCCESS) printf("UPnP: GetExternalIPAddress() returned %d\n", r); else { if(externalIPAddress[0]) { printf("UPnP: ExternalIPAddress = %s\n", externalIPAddress); AddLocal(CNetAddr(externalIPAddress), LOCAL_UPNP); } else printf("UPnP: GetExternalIPAddress failed.\n"); } } string strDesc = "HyperStake " + FormatFullVersion(); #ifndef UPNPDISCOVER_SUCCESS /* miniupnpc 1.5 */ r = UPNP_AddPortMapping(urls.controlURL, data.first.servicetype, port.c_str(), port.c_str(), lanaddr, strDesc.c_str(), "TCP", 0); #else /* miniupnpc 1.6 */ r = UPNP_AddPortMapping(urls.controlURL, data.first.servicetype, port.c_str(), port.c_str(), lanaddr, strDesc.c_str(), "TCP", 0, "0"); #endif if(r!=UPNPCOMMAND_SUCCESS) printf("AddPortMapping(%s, %s, %s) failed with code %d (%s)\n", port.c_str(), port.c_str(), lanaddr, r, strupnperror(r)); else printf("UPnP Port Mapping successful.\n"); int i = 1; while (true) { if (fShutdown || !fUseUPnP) { r = UPNP_DeletePortMapping(urls.controlURL, data.first.servicetype, port.c_str(), "TCP", 0); printf("UPNP_DeletePortMapping() returned : %d\n", r); freeUPNPDevlist(devlist); devlist = 0; FreeUPNPUrls(&urls); return; } if (i % 600 == 0) // Refresh every 20 minutes { #ifndef UPNPDISCOVER_SUCCESS /* miniupnpc 1.5 */ r = UPNP_AddPortMapping(urls.controlURL, data.first.servicetype, port.c_str(), port.c_str(), lanaddr, strDesc.c_str(), "TCP", 0); #else /* miniupnpc 1.6 */ r = UPNP_AddPortMapping(urls.controlURL, data.first.servicetype, port.c_str(), port.c_str(), lanaddr, strDesc.c_str(), "TCP", 0, "0"); #endif if(r!=UPNPCOMMAND_SUCCESS) printf("AddPortMapping(%s, %s, %s) failed with code %d (%s)\n", port.c_str(), port.c_str(), lanaddr, r, strupnperror(r)); else printf("UPnP Port Mapping successful.\n");; } Sleep(2000); i++; } } else { printf("No valid UPnP IGDs found\n"); freeUPNPDevlist(devlist); devlist = 0; if (r != 0) FreeUPNPUrls(&urls); while (true) { if (fShutdown || !fUseUPnP) return; Sleep(2000); } } } void MapPort() { if (fUseUPnP && vnThreadsRunning[THREAD_UPNP] < 1) { if (!NewThread(ThreadMapPort, NULL)) printf("Error: ThreadMapPort(ThreadMapPort) failed\n"); } } #else void MapPort() { // Intentionally left blank. } #endif // DNS seeds // Each pair gives a source name and a seed name. // The first name is used as information source for addrman. // The second name should resolve to a list of seed addresses. static const char *strDNSSeed[][2] = { {"temp seed", "hyp.bottlecaps.org"}, {"CCE block explorer", "hyp.altcointech.net"}, {"chainworks seed", "hyp.chainworks.info"}, }; void ThreadDNSAddressSeed(void* parg) { // Make this thread recognisable as the DNS seeding thread RenameThread("bitcoin-dnsseed"); try { vnThreadsRunning[THREAD_DNSSEED]++; ThreadDNSAddressSeed2(parg); vnThreadsRunning[THREAD_DNSSEED]--; } catch (std::exception& e) { vnThreadsRunning[THREAD_DNSSEED]--; PrintException(&e, "ThreadDNSAddressSeed()"); } catch (...) { vnThreadsRunning[THREAD_DNSSEED]--; throw; // support pthread_cancel() } printf("ThreadDNSAddressSeed exited\n"); } void ThreadDNSAddressSeed2(void* parg) { printf("ThreadDNSAddressSeed started\n"); int found = 0; if (!fTestNet) { printf("Loading addresses from DNS seeds (could take a while)\n"); for (unsigned int seed_idx = 0; seed_idx < ARRAYLEN(strDNSSeed); seed_idx++) { if (HaveNameProxy()) { AddOneShot(strDNSSeed[seed_idx][1]); } else { vector<CNetAddr> vaddr; vector<CAddress> vAdd; if (LookupHost(strDNSSeed[seed_idx][1], vaddr)) { BOOST_FOREACH(CNetAddr& ip, vaddr) { int nOneDay = 24*3600; CAddress addr = CAddress(CService(ip, GetDefaultPort())); addr.nTime = GetTime() - 3*nOneDay - GetRand(4*nOneDay); // use a random age between 3 and 7 days old vAdd.push_back(addr); found++; } } addrman.Add(vAdd, CNetAddr(strDNSSeed[seed_idx][0], true)); } } } printf("%d addresses found from DNS seeds\n", found); } unsigned int pnSeed[] = { //0x8785f050, }; void DumpAddresses() { int64 nStart = GetTimeMillis(); CAddrDB adb; adb.Write(addrman); printf("Flushed %d addresses to peers.dat %"PRI64d"ms\n", addrman.size(), GetTimeMillis() - nStart); } void ThreadDumpAddress2(void* parg) { vnThreadsRunning[THREAD_DUMPADDRESS]++; while (!fShutdown) { DumpAddresses(); vnThreadsRunning[THREAD_DUMPADDRESS]--; Sleep(100000); vnThreadsRunning[THREAD_DUMPADDRESS]++; } vnThreadsRunning[THREAD_DUMPADDRESS]--; } void ThreadDumpAddress(void* parg) { // Make this thread recognisable as the address dumping thread RenameThread("bitcoin-adrdump"); try { ThreadDumpAddress2(parg); } catch (std::exception& e) { PrintException(&e, "ThreadDumpAddress()"); } printf("ThreadDumpAddress exited\n"); } void ThreadOpenConnections(void* parg) { // Make this thread recognisable as the connection opening thread RenameThread("bitcoin-opencon"); try { vnThreadsRunning[THREAD_OPENCONNECTIONS]++; ThreadOpenConnections2(parg); vnThreadsRunning[THREAD_OPENCONNECTIONS]--; } catch (std::exception& e) { vnThreadsRunning[THREAD_OPENCONNECTIONS]--; PrintException(&e, "ThreadOpenConnections()"); } catch (...) { vnThreadsRunning[THREAD_OPENCONNECTIONS]--; PrintException(NULL, "ThreadOpenConnections()"); } printf("ThreadOpenConnections exited\n"); } void static ProcessOneShot() { string strDest; { LOCK(cs_vOneShots); if (vOneShots.empty()) return; strDest = vOneShots.front(); vOneShots.pop_front(); } CAddress addr; CSemaphoreGrant grant(*semOutbound, true); if (grant) { if (!OpenNetworkConnection(addr, &grant, strDest.c_str(), true)) AddOneShot(strDest); } } // ppcoin: stake minter thread void static ThreadStakeMinter(void* parg) { printf("ThreadStakeMinter started\n"); CWallet* pwallet = (CWallet*)parg; try { vnThreadsRunning[THREAD_MINTER]++; BitcoinMiner(pwallet, true); vnThreadsRunning[THREAD_MINTER]--; } catch (std::exception& e) { vnThreadsRunning[THREAD_MINTER]--; PrintException(&e, "ThreadStakeMinter()"); } catch (...) { vnThreadsRunning[THREAD_MINTER]--; PrintException(NULL, "ThreadStakeMinter()"); } printf("ThreadStakeMinter exiting, %d threads remaining\n", vnThreadsRunning[THREAD_MINTER]); } void ThreadOpenConnections2(void* parg) { printf("ThreadOpenConnections started\n"); // Connect to specific addresses if (mapArgs.count("-connect") && mapMultiArgs["-connect"].size() > 0) { for (int64 nLoop = 0;; nLoop++) { ProcessOneShot(); BOOST_FOREACH(string strAddr, mapMultiArgs["-connect"]) { CAddress addr; OpenNetworkConnection(addr, NULL, strAddr.c_str()); for (int i = 0; i < 10 && i < nLoop; i++) { Sleep(500); if (fShutdown) return; } } Sleep(500); } } // Initiate network connections int64 nStart = GetTime(); while (true) { ProcessOneShot(); vnThreadsRunning[THREAD_OPENCONNECTIONS]--; Sleep(500); vnThreadsRunning[THREAD_OPENCONNECTIONS]++; if (fShutdown) return; vnThreadsRunning[THREAD_OPENCONNECTIONS]--; CSemaphoreGrant grant(*semOutbound); vnThreadsRunning[THREAD_OPENCONNECTIONS]++; if (fShutdown) return; // Add seed nodes if IRC isn't working if (addrman.size()==0 && (GetTime() - nStart > 60) && !fTestNet) { std::vector<CAddress> vAdd; for (unsigned int i = 0; i < ARRAYLEN(pnSeed); i++) { // It'll only connect to one or two seed nodes because once it connects, // it'll get a pile of addresses with newer timestamps. // Seed nodes are given a random 'last seen time' of between one and two // weeks ago. const int64 nOneWeek = 7*24*60*60; struct in_addr ip; memcpy(&ip, &pnSeed[i], sizeof(ip)); CAddress addr(CService(ip, GetDefaultPort())); addr.nTime = GetTime()-GetRand(nOneWeek)-nOneWeek; vAdd.push_back(addr); } addrman.Add(vAdd, CNetAddr("127.0.0.1")); } // // Choose an address to connect to based on most recently seen // CAddress addrConnect; // Only connect out to one peer per network group (/16 for IPv4). // Do this here so we don't have to critsect vNodes inside mapAddresses critsect. int nOutbound = 0; set<vector<unsigned char> > setConnected; { LOCK(cs_vNodes); BOOST_FOREACH(CNode* pnode, vNodes) { if (!pnode->fInbound) { setConnected.insert(pnode->addr.GetGroup()); nOutbound++; } } } int64 nANow = GetAdjustedTime(); int nTries = 0; while (true) { // use an nUnkBias between 10 (no outgoing connections) and 90 (8 outgoing connections) CAddress addr = addrman.Select(10 + min(nOutbound,8)*10); // if we selected an invalid address, restart if (!addr.IsValid() || setConnected.count(addr.GetGroup()) || IsLocal(addr)) break; // If we didn't find an appropriate destination after trying 100 addresses fetched from addrman, // stop this loop, and let the outer loop run again (which sleeps, adds seed nodes, recalculates // already-connected network ranges, ...) before trying new addrman addresses. nTries++; if (nTries > 100) break; if (IsLimited(addr)) continue; // only consider very recently tried nodes after 30 failed attempts if (nANow - addr.nLastTry < 600 && nTries < 30) continue; // do not allow non-default ports, unless after 50 invalid addresses selected already if (addr.GetPort() != GetDefaultPort() && nTries < 50) continue; addrConnect = addr; break; } if (addrConnect.IsValid()) OpenNetworkConnection(addrConnect, &grant); } } void ThreadOpenAddedConnections(void* parg) { // Make this thread recognisable as the connection opening thread RenameThread("bitcoin-opencon"); try { vnThreadsRunning[THREAD_ADDEDCONNECTIONS]++; ThreadOpenAddedConnections2(parg); vnThreadsRunning[THREAD_ADDEDCONNECTIONS]--; } catch (std::exception& e) { vnThreadsRunning[THREAD_ADDEDCONNECTIONS]--; PrintException(&e, "ThreadOpenAddedConnections()"); } catch (...) { vnThreadsRunning[THREAD_ADDEDCONNECTIONS]--; PrintException(NULL, "ThreadOpenAddedConnections()"); } printf("ThreadOpenAddedConnections exited\n"); } void ThreadOpenAddedConnections2(void* parg) { { LOCK(cs_vAddedNodes); vAddedNodes = mapMultiArgs["-addnode"]; } if (HaveNameProxy()) { while(true) { list<string> lAddresses(0); { LOCK(cs_vAddedNodes); BOOST_FOREACH(string& strAddNode, vAddedNodes) lAddresses.push_back(strAddNode); } BOOST_FOREACH(string& strAddNode, lAddresses) { CAddress addr; CSemaphoreGrant grant(*semOutbound); OpenNetworkConnection(addr, &grant, strAddNode.c_str()); Sleep(500); } vnThreadsRunning[THREAD_ADDEDCONNECTIONS]--; Sleep(120000); // Retry every 2 minutes vnThreadsRunning[THREAD_ADDEDCONNECTIONS]++; } } for (unsigned int i = 0; true; i++) { list<string> lAddresses(0); { LOCK(cs_vAddedNodes); BOOST_FOREACH(string& strAddNode, vAddedNodes) lAddresses.push_back(strAddNode); } list<vector<CService> > lservAddressesToAdd(0); BOOST_FOREACH(string& strAddNode, lAddresses) { vector<CService> vservNode(0); if(Lookup(strAddNode.c_str(), vservNode, GetDefaultPort(), fNameLookup, 0)) { lservAddressesToAdd.push_back(vservNode); { LOCK(cs_setservAddNodeAddresses); BOOST_FOREACH(CService& serv, vservNode) setservAddNodeAddresses.insert(serv); } } } // Attempt to connect to each IP for each addnode entry until at least one is successful per addnode entry // (keeping in mind that addnode entries can have many IPs if fNameLookup) { LOCK(cs_vNodes); BOOST_FOREACH(CNode* pnode, vNodes) for (list<vector<CService> >::iterator it = lservAddressesToAdd.begin(); it != lservAddressesToAdd.end(); it++) BOOST_FOREACH(CService& addrNode, *(it)) if (pnode->addr == addrNode) { it = lservAddressesToAdd.erase(it); it--; break; } } BOOST_FOREACH(vector<CService>& vserv, lservAddressesToAdd) { CSemaphoreGrant grant(*semOutbound); OpenNetworkConnection(CAddress(vserv[i % vserv.size()]), &grant); Sleep(500); } vnThreadsRunning[THREAD_ADDEDCONNECTIONS]--; Sleep(120000); // Retry every 2 minutes vnThreadsRunning[THREAD_ADDEDCONNECTIONS]++; } } // if successful, this moves the passed grant to the constructed node bool OpenNetworkConnection(const CAddress& addrConnect, CSemaphoreGrant *grantOutbound, const char *strDest, bool fOneShot) { // // Initiate outbound network connection // if (fShutdown) return false; if (!strDest) if (IsLocal(addrConnect) || FindNode((CNetAddr)addrConnect) || CNode::IsBanned(addrConnect) || FindNode(addrConnect.ToStringIPPort().c_str())) return false; if (strDest && FindNode(strDest)) return false; vnThreadsRunning[THREAD_OPENCONNECTIONS]--; CNode* pnode = ConnectNode(addrConnect, strDest); vnThreadsRunning[THREAD_OPENCONNECTIONS]++; if (fShutdown) return false; if (!pnode) return false; if (grantOutbound) grantOutbound->MoveTo(pnode->grantOutbound); pnode->fNetworkNode = true; if (fOneShot) pnode->fOneShot = true; return true; } void ThreadMessageHandler(void* parg) { // Make this thread recognisable as the message handling thread RenameThread("bitcoin-msghand"); try { vnThreadsRunning[THREAD_MESSAGEHANDLER]++; ThreadMessageHandler2(parg); vnThreadsRunning[THREAD_MESSAGEHANDLER]--; } catch (std::exception& e) { vnThreadsRunning[THREAD_MESSAGEHANDLER]--; PrintException(&e, "ThreadMessageHandler()"); } catch (...) { vnThreadsRunning[THREAD_MESSAGEHANDLER]--; PrintException(NULL, "ThreadMessageHandler()"); } printf("ThreadMessageHandler exited\n"); } void ThreadMessageHandler2(void* parg) { printf("ThreadMessageHandler started\n"); SetThreadPriority(THREAD_PRIORITY_BELOW_NORMAL); while (!fShutdown) { vector<CNode*> vNodesCopy; { LOCK(cs_vNodes); vNodesCopy = vNodes; BOOST_FOREACH(CNode* pnode, vNodesCopy) pnode->AddRef(); } // Poll the connected nodes for messages CNode* pnodeTrickle = NULL; if (!vNodesCopy.empty()) pnodeTrickle = vNodesCopy[GetRand(vNodesCopy.size())]; BOOST_FOREACH(CNode* pnode, vNodesCopy) { if(pnode->fDisconnect) continue; // Receive messages { TRY_LOCK(pnode->cs_vRecvMsg, lockRecv); if (lockRecv) if(!ProcessMessages(pnode)) pnode->CloseSocketDisconnect(); } if (fShutdown) return; // Send messages { TRY_LOCK(pnode->cs_vSend, lockSend); if (lockSend) SendMessages(pnode, pnode == pnodeTrickle); } if (fShutdown) return; } { LOCK(cs_vNodes); BOOST_FOREACH(CNode* pnode, vNodesCopy) pnode->Release(); } // Wait and allow messages to bunch up. // Reduce vnThreadsRunning so StopNode has permission to exit while // we're sleeping, but we must always check fShutdown after doing this. vnThreadsRunning[THREAD_MESSAGEHANDLER]--; Sleep(100); if (fRequestShutdown) StartShutdown(); vnThreadsRunning[THREAD_MESSAGEHANDLER]++; if (fShutdown) return; } } bool BindListenPort(const CService &addrBind, string& strError) { strError = ""; int nOne = 1; #ifdef WIN32 // Initialize Windows Sockets WSADATA wsadata; int ret = WSAStartup(MAKEWORD(2,2), &wsadata); if (ret != NO_ERROR) { strError = strprintf("Error: TCP/IP socket library failed to start (WSAStartup returned error %d)", ret); printf("%s\n", strError.c_str()); return false; } #endif // Create socket for listening for incoming connections #ifdef USE_IPV6 struct sockaddr_storage sockaddr; #else struct sockaddr sockaddr; #endif socklen_t len = sizeof(sockaddr); if (!addrBind.GetSockAddr((struct sockaddr*)&sockaddr, &len)) { strError = strprintf("Error: bind address family for %s not supported", addrBind.ToString().c_str()); printf("%s\n", strError.c_str()); return false; } SOCKET hListenSocket = socket(((struct sockaddr*)&sockaddr)->sa_family, SOCK_STREAM, IPPROTO_TCP); if (hListenSocket == INVALID_SOCKET) { strError = strprintf("Error: Couldn't open socket for incoming connections (socket returned error %d)", WSAGetLastError()); printf("%s\n", strError.c_str()); return false; } #ifdef SO_NOSIGPIPE // Different way of disabling SIGPIPE on BSD setsockopt(hListenSocket, SOL_SOCKET, SO_NOSIGPIPE, (void*)&nOne, sizeof(int)); #endif #ifndef WIN32 // Allow binding if the port is still in TIME_WAIT state after // the program was closed and restarted. Not an issue on windows. setsockopt(hListenSocket, SOL_SOCKET, SO_REUSEADDR, (void*)&nOne, sizeof(int)); #endif #ifdef WIN32 // Set to non-blocking, incoming connections will also inherit this if (ioctlsocket(hListenSocket, FIONBIO, (u_long*)&nOne) == SOCKET_ERROR) #else if (fcntl(hListenSocket, F_SETFL, O_NONBLOCK) == SOCKET_ERROR) #endif { strError = strprintf("Error: Couldn't set properties on socket for incoming connections (error %d)", WSAGetLastError()); printf("%s\n", strError.c_str()); return false; } #ifdef USE_IPV6 // some systems don't have IPV6_V6ONLY but are always v6only; others do have the option // and enable it by default or not. Try to enable it, if possible. if (addrBind.IsIPv6()) { #ifdef IPV6_V6ONLY #ifdef WIN32 setsockopt(hListenSocket, IPPROTO_IPV6, IPV6_V6ONLY, (const char*)&nOne, sizeof(int)); #else setsockopt(hListenSocket, IPPROTO_IPV6, IPV6_V6ONLY, (void*)&nOne, sizeof(int)); #endif #endif #ifdef WIN32 int nProtLevel = 10 /* PROTECTION_LEVEL_UNRESTRICTED */; int nParameterId = 23 /* IPV6_PROTECTION_LEVEl */; // this call is allowed to fail setsockopt(hListenSocket, IPPROTO_IPV6, nParameterId, (const char*)&nProtLevel, sizeof(int)); #endif } #endif if (::bind(hListenSocket, (struct sockaddr*)&sockaddr, len) == SOCKET_ERROR) { int nErr = WSAGetLastError(); if (nErr == WSAEADDRINUSE) strError = strprintf(_("Unable to bind to %s on this computer. HyperStake is probably already running."), addrBind.ToString().c_str()); else strError = strprintf(_("Unable to bind to %s on this computer (bind returned error %d, %s)"), addrBind.ToString().c_str(), nErr, strerror(nErr)); printf("%s\n", strError.c_str()); return false; } printf("Bound to %s\n", addrBind.ToString().c_str()); // Listen for incoming connections if (listen(hListenSocket, SOMAXCONN) == SOCKET_ERROR) { strError = strprintf("Error: Listening for incoming connections failed (listen returned error %d)", WSAGetLastError()); printf("%s\n", strError.c_str()); return false; } vhListenSocket.push_back(hListenSocket); if (addrBind.IsRoutable() && fDiscover) AddLocal(addrBind, LOCAL_BIND); return true; } void static Discover() { if (!fDiscover) return; #ifdef WIN32 // Get local host IP char pszHostName[1000] = ""; if (gethostname(pszHostName, sizeof(pszHostName)) != SOCKET_ERROR) { vector<CNetAddr> vaddr; if (LookupHost(pszHostName, vaddr)) { BOOST_FOREACH (const CNetAddr &addr, vaddr) { AddLocal(addr, LOCAL_IF); } } } #else // Get local host ip struct ifaddrs* myaddrs; if (getifaddrs(&myaddrs) == 0) { for (struct ifaddrs* ifa = myaddrs; ifa != NULL; ifa = ifa->ifa_next) { if (ifa->ifa_addr == NULL) continue; if ((ifa->ifa_flags & IFF_UP) == 0) continue; if (strcmp(ifa->ifa_name, "lo") == 0) continue; if (strcmp(ifa->ifa_name, "lo0") == 0) continue; if (ifa->ifa_addr->sa_family == AF_INET) { struct sockaddr_in* s4 = (struct sockaddr_in*)(ifa->ifa_addr); CNetAddr addr(s4->sin_addr); if (AddLocal(addr, LOCAL_IF)) printf("IPv4 %s: %s\n", ifa->ifa_name, addr.ToString().c_str()); } #ifdef USE_IPV6 else if (ifa->ifa_addr->sa_family == AF_INET6) { struct sockaddr_in6* s6 = (struct sockaddr_in6*)(ifa->ifa_addr); CNetAddr addr(s6->sin6_addr); if (AddLocal(addr, LOCAL_IF)) printf("IPv6 %s: %s\n", ifa->ifa_name, addr.ToString().c_str()); } #endif } freeifaddrs(myaddrs); } #endif // Don't use external IPv4 discovery, when -onlynet="IPv6" if (!IsLimited(NET_IPV4)) NewThread(ThreadGetMyExternalIP, NULL); } void StartNode(void* parg) { // Make this thread recognisable as the startup thread RenameThread("bitcoin-start"); if (semOutbound == NULL) { // initialize semaphore int nMaxOutbound = min(MAX_OUTBOUND_CONNECTIONS, (int)GetArg("-maxconnections", 125)); semOutbound = new CSemaphore(nMaxOutbound); } if (pnodeLocalHost == NULL) pnodeLocalHost = new CNode(INVALID_SOCKET, CAddress(CService("127.0.0.1", 0), nLocalServices)); Discover(); // // Start threads // if (!GetBoolArg("-dnsseed", true)) printf("DNS seeding disabled\n"); else if (!NewThread(ThreadDNSAddressSeed, NULL)) printf("Error: NewThread(ThreadDNSAddressSeed) failed\n"); // Map ports with UPnP if (fUseUPnP) MapPort(); // Get addresses from IRC and advertise ours if (!NewThread(ThreadIRCSeed, NULL)) printf("Error: NewThread(ThreadIRCSeed) failed\n"); // Send and receive from sockets, accept connections if (!NewThread(ThreadSocketHandler, NULL)) printf("Error: NewThread(ThreadSocketHandler) failed\n"); // Initiate outbound connections from -addnode if (!NewThread(ThreadOpenAddedConnections, NULL)) printf("Error: NewThread(ThreadOpenAddedConnections) failed\n"); // Initiate outbound connections if (!NewThread(ThreadOpenConnections, NULL)) printf("Error: NewThread(ThreadOpenConnections) failed\n"); // Process messages if (!NewThread(ThreadMessageHandler, NULL)) printf("Error: NewThread(ThreadMessageHandler) failed\n"); // Dump network addresses if (!NewThread(ThreadDumpAddress, NULL)) printf("Error; NewThread(ThreadDumpAddress) failed\n"); // ppcoin: mint proof-of-stake blocks in the background if (!NewThread(ThreadStakeMinter, pwalletMain)) printf("Error: NewThread(ThreadStakeMinter) failed\n"); // Generate coins in the background GenerateBitcoins(GetBoolArg("-gen", false), pwalletMain); } bool StopNode() { printf("StopNode()\n"); fShutdown = true; nTransactionsUpdated++; int64 nStart = GetTime(); if (semOutbound) for (int i=0; i<MAX_OUTBOUND_CONNECTIONS; i++) semOutbound->post(); do { int nThreadsRunning = 0; for (int n = 0; n < THREAD_MAX; n++) nThreadsRunning += vnThreadsRunning[n]; if (nThreadsRunning == 0) break; if (GetTime() - nStart > 20) break; Sleep(20); } while(true); if (vnThreadsRunning[THREAD_SOCKETHANDLER] > 0) printf("ThreadSocketHandler still running\n"); if (vnThreadsRunning[THREAD_OPENCONNECTIONS] > 0) printf("ThreadOpenConnections still running\n"); if (vnThreadsRunning[THREAD_MESSAGEHANDLER] > 0) printf("ThreadMessageHandler still running\n"); if (vnThreadsRunning[THREAD_MINER] > 0) printf("ThreadBitcoinMiner still running\n"); if (vnThreadsRunning[THREAD_RPCLISTENER] > 0) printf("ThreadRPCListener still running\n"); if (vnThreadsRunning[THREAD_RPCHANDLER] > 0) printf("ThreadsRPCServer still running\n"); #ifdef USE_UPNP if (vnThreadsRunning[THREAD_UPNP] > 0) printf("ThreadMapPort still running\n"); #endif if (vnThreadsRunning[THREAD_DNSSEED] > 0) printf("ThreadDNSAddressSeed still running\n"); if (vnThreadsRunning[THREAD_ADDEDCONNECTIONS] > 0) printf("ThreadOpenAddedConnections still running\n"); if (vnThreadsRunning[THREAD_DUMPADDRESS] > 0) printf("ThreadDumpAddresses still running\n"); if (vnThreadsRunning[THREAD_MINTER] > 0) printf("ThreadStakeMinter still running\n"); while (vnThreadsRunning[THREAD_MESSAGEHANDLER] > 0 || vnThreadsRunning[THREAD_RPCHANDLER] > 0) Sleep(20); Sleep(50); DumpAddresses(); return true; } class CNetCleanup { public: CNetCleanup() { } ~CNetCleanup() { // Close sockets BOOST_FOREACH(CNode* pnode, vNodes) if (pnode->hSocket != INVALID_SOCKET) closesocket(pnode->hSocket); BOOST_FOREACH(SOCKET hListenSocket, vhListenSocket) if (hListenSocket != INVALID_SOCKET) if (closesocket(hListenSocket) == SOCKET_ERROR) printf("closesocket(hListenSocket) failed with error %d\n", WSAGetLastError()); #ifdef WIN32 // Shutdown Windows Sockets WSACleanup(); #endif } } instance_of_cnetcleanup;
/* * Copyright 2011 Christoph Bumiller * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "nv50/codegen/nv50_ir.h" #include "nv50/codegen/nv50_ir_target.h" namespace nv50_ir { const uint8_t Target::operationSrcNr[OP_LAST + 1] = { 0, 0, // NOP, PHI 0, 0, 0, 0, // UNION, SPLIT, MERGE, CONSTRAINT 1, 1, 2, // MOV, LOAD, STORE 2, 2, 2, 2, 2, 3, 3, 3, // ADD, SUB, MUL, DIV, MOD, MAD, FMA, SAD 1, 1, 1, // ABS, NEG, NOT 2, 2, 2, 2, 2, // AND, OR, XOR, SHL, SHR 2, 2, 1, // MAX, MIN, SAT 1, 1, 1, 1, // CEIL, FLOOR, TRUNC, CVT 3, 3, 3, 2, 3, 3, // SET_AND,OR,XOR, SET, SELP, SLCT 1, 1, 1, 1, 1, 1, // RCP, RSQ, LG2, SIN, COS, EX2 1, 1, 1, 1, 1, 2, // EXP, LOG, PRESIN, PREEX2, SQRT, POW 0, 0, 0, 0, 0, // BRA, CALL, RET, CONT, BREAK, 0, 0, 0, // PRERET,CONT,BREAK 0, 0, 0, 0, 0, 0, // BRKPT, JOINAT, JOIN, DISCARD, EXIT, MEMBAR 1, 1, 2, 1, 2, // VFETCH, PFETCH, EXPORT, LINTERP, PINTERP 1, 1, // EMIT, RESTART 1, 1, 1, // TEX, TXB, TXL, 1, 1, 1, 1, 1, // TXF, TXQ, TXD, TXG, TEXCSAA 1, 2, // SULD, SUST 1, 1, // DFDX, DFDY 1, 2, 2, 2, 0, 0, // RDSV, WRSV, PIXLD, QUADOP, QUADON, QUADPOP 2, 3, 2, 0, // POPCNT, INSBF, EXTBF, TEXBAR 0 }; const OpClass Target::operationClass[OP_LAST + 1] = { // NOP; PHI; UNION, SPLIT, MERGE, CONSTRAINT OPCLASS_OTHER, OPCLASS_PSEUDO, OPCLASS_PSEUDO, OPCLASS_PSEUDO, OPCLASS_PSEUDO, OPCLASS_PSEUDO, // MOV; LOAD; STORE OPCLASS_MOVE, OPCLASS_LOAD, OPCLASS_STORE, // ADD, SUB, MUL; DIV, MOD; MAD, FMA, SAD OPCLASS_ARITH, OPCLASS_ARITH, OPCLASS_ARITH, OPCLASS_ARITH, OPCLASS_ARITH, OPCLASS_ARITH, OPCLASS_ARITH, OPCLASS_ARITH, // ABS, NEG; NOT, AND, OR, XOR; SHL, SHR OPCLASS_CONVERT, OPCLASS_CONVERT, OPCLASS_LOGIC, OPCLASS_LOGIC, OPCLASS_LOGIC, OPCLASS_LOGIC, OPCLASS_SHIFT, OPCLASS_SHIFT, // MAX, MIN OPCLASS_COMPARE, OPCLASS_COMPARE, // SAT, CEIL, FLOOR, TRUNC; CVT OPCLASS_CONVERT, OPCLASS_CONVERT, OPCLASS_CONVERT, OPCLASS_CONVERT, OPCLASS_CONVERT, // SET(AND,OR,XOR); SELP, SLCT OPCLASS_COMPARE, OPCLASS_COMPARE, OPCLASS_COMPARE, OPCLASS_COMPARE, OPCLASS_COMPARE, OPCLASS_COMPARE, // RCP, RSQ, LG2, SIN, COS; EX2, EXP, LOG, PRESIN, PREEX2; SQRT, POW OPCLASS_SFU, OPCLASS_SFU, OPCLASS_SFU, OPCLASS_SFU, OPCLASS_SFU, OPCLASS_SFU, OPCLASS_SFU, OPCLASS_SFU, OPCLASS_SFU, OPCLASS_SFU, OPCLASS_SFU, OPCLASS_SFU, // BRA, CALL, RET; CONT, BREAK, PRE(RET,CONT,BREAK); BRKPT, JOINAT, JOIN OPCLASS_FLOW, OPCLASS_FLOW, OPCLASS_FLOW, OPCLASS_FLOW, OPCLASS_FLOW, OPCLASS_FLOW, OPCLASS_FLOW, OPCLASS_FLOW, OPCLASS_FLOW, OPCLASS_FLOW, OPCLASS_FLOW, // DISCARD, EXIT OPCLASS_FLOW, OPCLASS_FLOW, // MEMBAR OPCLASS_OTHER, // VFETCH, PFETCH, EXPORT OPCLASS_LOAD, OPCLASS_OTHER, OPCLASS_STORE, // LINTERP, PINTERP OPCLASS_SFU, OPCLASS_SFU, // EMIT, RESTART OPCLASS_OTHER, OPCLASS_OTHER, // TEX, TXB, TXL, TXF; TXQ, TXD, TXG, TEXCSAA OPCLASS_TEXTURE, OPCLASS_TEXTURE, OPCLASS_TEXTURE, OPCLASS_TEXTURE, OPCLASS_TEXTURE, OPCLASS_TEXTURE, OPCLASS_TEXTURE, OPCLASS_TEXTURE, // SULD, SUST OPCLASS_SURFACE, OPCLASS_SURFACE, // DFDX, DFDY, RDSV, WRSV; PIXLD, QUADOP, QUADON, QUADPOP OPCLASS_OTHER, OPCLASS_OTHER, OPCLASS_OTHER, OPCLASS_OTHER, OPCLASS_OTHER, OPCLASS_OTHER, OPCLASS_OTHER, OPCLASS_OTHER, // POPCNT, INSBF, EXTBF OPCLASS_OTHER, OPCLASS_OTHER, OPCLASS_OTHER, // TEXBAR OPCLASS_OTHER, OPCLASS_PSEUDO // LAST }; extern Target *getTargetNVC0(unsigned int chipset); extern Target *getTargetNV50(unsigned int chipset); Target *Target::create(unsigned int chipset) { switch (chipset & 0xf0) { case 0xc0: case 0xd0: case 0xe0: return getTargetNVC0(chipset); case 0x50: case 0x80: case 0x90: case 0xa0: return getTargetNV50(chipset); default: ERROR("unsupported target: NV%x\n", chipset); return 0; } } void Target::destroy(Target *targ) { delete targ; } CodeEmitter::CodeEmitter(const Target *target) : targ(target) { } void CodeEmitter::setCodeLocation(void *ptr, uint32_t size) { code = reinterpret_cast<uint32_t *>(ptr); codeSize = 0; codeSizeLimit = size; } void CodeEmitter::printBinary() const { uint32_t *bin = code - codeSize / 4; INFO("program binary (%u bytes)", codeSize); for (unsigned int pos = 0; pos < codeSize / 4; ++pos) { if ((pos % 8) == 0) INFO("\n"); INFO("%08x ", bin[pos]); } INFO("\n"); } static inline uint32_t sizeToBundlesNVE4(uint32_t size) { return (size + 55) / 56; } void CodeEmitter::prepareEmission(Program *prog) { for (ArrayList::Iterator fi = prog->allFuncs.iterator(); !fi.end(); fi.next()) { Function *func = reinterpret_cast<Function *>(fi.get()); func->binPos = prog->binSize; prepareEmission(func); // adjust sizes & positions for schedulding info: if (prog->getTarget()->hasSWSched) { BasicBlock *bb = NULL; for (int i = 0; i < func->bbCount; ++i) { bb = func->bbArray[i]; const uint32_t oldPos = bb->binPos; const uint32_t oldEnd = bb->binPos + bb->binSize; uint32_t adjPos = oldPos + sizeToBundlesNVE4(oldPos) * 8; uint32_t adjEnd = oldEnd + sizeToBundlesNVE4(oldEnd) * 8; bb->binPos = adjPos; bb->binSize = adjEnd - adjPos; } if (bb) func->binSize = bb->binPos + bb->binSize; } prog->binSize += func->binSize; } } void CodeEmitter::prepareEmission(Function *func) { func->bbCount = 0; func->bbArray = new BasicBlock * [func->cfg.getSize()]; BasicBlock::get(func->cfg.getRoot())->binPos = func->binPos; for (IteratorRef it = func->cfg.iteratorCFG(); !it->end(); it->next()) prepareEmission(BasicBlock::get(*it)); } void CodeEmitter::prepareEmission(BasicBlock *bb) { Instruction *i, *next; Function *func = bb->getFunction(); int j; unsigned int nShort; for (j = func->bbCount - 1; j >= 0 && !func->bbArray[j]->binSize; --j); for (; j >= 0; --j) { BasicBlock *in = func->bbArray[j]; Instruction *exit = in->getExit(); if (exit && exit->op == OP_BRA && exit->asFlow()->target.bb == bb) { in->binSize -= 8; func->binSize -= 8; for (++j; j < func->bbCount; ++j) func->bbArray[j]->binPos -= 8; in->remove(exit); } bb->binPos = in->binPos + in->binSize; if (in->binSize) // no more no-op branches to bb break; } func->bbArray[func->bbCount++] = bb; if (!bb->getExit()) return; // determine encoding size, try to group short instructions nShort = 0; for (i = bb->getEntry(); i; i = next) { next = i->next; i->encSize = getMinEncodingSize(i); if (next && i->encSize < 8) ++nShort; else if ((nShort & 1) && next && getMinEncodingSize(next) == 4) { if (i->isCommutationLegal(i->next)) { bb->permuteAdjacent(i, next); next->encSize = 4; next = i; i = i->prev; ++nShort; } else if (i->isCommutationLegal(i->prev) && next->next) { bb->permuteAdjacent(i->prev, i); next->encSize = 4; next = next->next; bb->binSize += 4; ++nShort; } else { i->encSize = 8; i->prev->encSize = 8; bb->binSize += 4; nShort = 0; } } else { i->encSize = 8; if (nShort & 1) { i->prev->encSize = 8; bb->binSize += 4; } nShort = 0; } bb->binSize += i->encSize; } if (bb->getExit()->encSize == 4) { assert(nShort); bb->getExit()->encSize = 8; bb->binSize += 4; if ((bb->getExit()->prev->encSize == 4) && !(nShort & 1)) { bb->binSize += 8; bb->getExit()->prev->encSize = 8; } } assert(!bb->getEntry() || (bb->getExit() && bb->getExit()->encSize == 8)); func->binSize += bb->binSize; } void Program::emitSymbolTable(struct nv50_ir_prog_info *info) { unsigned int n = 0, nMax = allFuncs.getSize(); info->bin.syms = (struct nv50_ir_prog_symbol *)MALLOC(nMax * sizeof(*info->bin.syms)); for (ArrayList::Iterator fi = allFuncs.iterator(); !fi.end(); fi.next(), ++n) { Function *f = (Function *)fi.get(); assert(n < nMax); info->bin.syms[n].label = f->getLabel(); info->bin.syms[n].offset = f->binPos; } info->bin.numSyms = n; } bool Program::emitBinary(struct nv50_ir_prog_info *info) { CodeEmitter *emit = target->getCodeEmitter(progType); emit->prepareEmission(this); if (dbgFlags & NV50_IR_DEBUG_BASIC) this->print(); if (!binSize) { code = NULL; return false; } code = reinterpret_cast<uint32_t *>(MALLOC(binSize)); if (!code) return false; emit->setCodeLocation(code, binSize); for (ArrayList::Iterator fi = allFuncs.iterator(); !fi.end(); fi.next()) { Function *fn = reinterpret_cast<Function *>(fi.get()); assert(emit->getCodeSize() == fn->binPos); for (int b = 0; b < fn->bbCount; ++b) for (Instruction *i = fn->bbArray[b]->getEntry(); i; i = i->next) emit->emitInstruction(i); } info->bin.relocData = emit->getRelocInfo(); emitSymbolTable(info); // the nvc0 driver will print the binary iself together with the header if ((dbgFlags & NV50_IR_DEBUG_BASIC) && getTarget()->getChipset() < 0xc0) emit->printBinary(); delete emit; return true; } #define RELOC_ALLOC_INCREMENT 8 bool CodeEmitter::addReloc(RelocEntry::Type ty, int w, uint32_t data, uint32_t m, int s) { unsigned int n = relocInfo ? relocInfo->count : 0; if (!(n % RELOC_ALLOC_INCREMENT)) { size_t size = sizeof(RelocInfo) + n * sizeof(RelocEntry); relocInfo = reinterpret_cast<RelocInfo *>( REALLOC(relocInfo, n ? size : 0, size + RELOC_ALLOC_INCREMENT * sizeof(RelocEntry))); if (!relocInfo) return false; if (n == 0) memset(relocInfo, 0, sizeof(RelocInfo)); } ++relocInfo->count; relocInfo->entry[n].data = data; relocInfo->entry[n].mask = m; relocInfo->entry[n].offset = codeSize + w * 4; relocInfo->entry[n].bitPos = s; relocInfo->entry[n].type = ty; return true; } void RelocEntry::apply(uint32_t *binary, const RelocInfo *info) const { uint32_t value = 0; switch (type) { case TYPE_CODE: value = info->codePos; break; case TYPE_BUILTIN: value = info->libPos; break; case TYPE_DATA: value = info->dataPos; break; default: assert(0); break; } value += data; value = (bitPos < 0) ? (value >> -bitPos) : (value << bitPos); binary[offset / 4] &= ~mask; binary[offset / 4] |= value & mask; } } // namespace nv50_ir #include "nv50/codegen/nv50_ir_driver.h" extern "C" { void nv50_ir_relocate_code(void *relocData, uint32_t *code, uint32_t codePos, uint32_t libPos, uint32_t dataPos) { nv50_ir::RelocInfo *info = reinterpret_cast<nv50_ir::RelocInfo *>(relocData); info->codePos = codePos; info->libPos = libPos; info->dataPos = dataPos; for (unsigned int i = 0; i < info->count; ++i) info->entry[i].apply(code, info); } void nv50_ir_get_target_library(uint32_t chipset, const uint32_t **code, uint32_t *size) { nv50_ir::Target *targ = nv50_ir::Target::create(chipset); targ->getBuiltinCode(code, size); nv50_ir::Target::destroy(targ); } }
// (C) Copyright 2008 CodeRage, LLC (turkanis at coderage dot com) // (C) Copyright 2003-2007 Jonathan Turkanis // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt.) // See http://www.boost.org/libs/iostreams for documentation. #ifndef BOOST_IOSTREAMS_BACK_INSERTER_HPP_INCLUDED #define BOOST_IOSTREAMS_BACK_INSERTER_HPP_INCLUDED #if defined(_MSC_VER) # pragma once #endif #include <boost/iostreams/detail/ios.hpp> // streamsize. #include <boost/iostreams/categories.hpp> namespace boost { namespace iostreams { template<typename Container> class back_insert_device { public: typedef typename Container::value_type char_type; typedef sink_tag category; back_insert_device(Container& cnt) : container(&cnt) { } std::streamsize write(const char_type* s, std::streamsize n) { container->insert(container->end(), s, s + n); return n; } protected: Container* container; }; template<typename Container> back_insert_device<Container> back_inserter(Container& cnt) { return back_insert_device<Container>(cnt); } } } // End namespaces iostreams, boost. #endif // #ifndef BOOST_IOSTREAMS_BACK_INSERTER_HPP_INCLUDED
/* -*- mode: c++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ /* Copyright (C) 2008 Paul Farrington This file is part of QuantLib, a free-software/open-source library for financial quantitative analysts and developers - http://quantlib.org/ QuantLib is free software: you can redistribute it and/or modify it under the terms of the QuantLib license. You should have received a copy of the license along with this program; if not, please email <quantlib-dev@lists.sf.net>. The license is also available online at <http://quantlib.org/license.shtml>. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the license for more details. */ #include <ql/instruments/quantobarrieroption.hpp> namespace QuantLib { QuantoBarrierOption::QuantoBarrierOption( Barrier::Type barrierType, Real barrier, Real rebate, const ext::shared_ptr<StrikedTypePayoff>& payoff, const ext::shared_ptr<Exercise>& exercise) : BarrierOption(barrierType, barrier, rebate, payoff, exercise) {} Real QuantoBarrierOption::qvega() const { calculate(); QL_REQUIRE(qvega_ != Null<Real>(), "exchange rate vega calculation failed"); return qvega_; } Real QuantoBarrierOption::qrho() const { calculate(); QL_REQUIRE(qrho_ != Null<Real>(), "foreign interest rate rho calculation failed"); return qrho_; } Real QuantoBarrierOption::qlambda() const { calculate(); QL_REQUIRE(qlambda_ != Null<Real>(), "quanto correlation sensitivity calculation failed"); return qlambda_; } void QuantoBarrierOption::setupExpired() const { BarrierOption::setupExpired(); qvega_ = qrho_ = qlambda_ = 0.0; } void QuantoBarrierOption::fetchResults( const PricingEngine::results* r) const { BarrierOption::fetchResults(r); const auto* quantoResults = dynamic_cast<const QuantoBarrierOption::results*>(r); QL_ENSURE(quantoResults != nullptr, "no quanto results returned from pricing engine"); qrho_ = quantoResults->qrho; qvega_ = quantoResults->qvega; qlambda_ = quantoResults->qlambda; } }
/// @file /// @author David Pilger <dpilger26@gmail.com> /// [GitHub Repository](https://github.com/dpilger26/NumCpp) /// @version 1.3 /// /// @section License /// Copyright 2020 David Pilger /// /// Permission is hereby granted, free of charge, to any person obtaining a copy of this /// software and associated documentation files(the "Software"), to deal in the Software /// without restriction, including without limitation the rights to use, copy, modify, /// merge, publish, distribute, sublicense, and/or sell copies of the Software, and to /// permit persons to whom the Software is furnished to do so, subject to the following /// conditions : /// /// The above copyright notice and this permission notice shall be included in all copies /// or substantial portions of the Software. /// /// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, /// INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR /// PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE /// FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR /// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER /// DEALINGS IN THE SOFTWARE. /// /// @section Description /// Functions for working with NdArrays /// #pragma once #include "NumCpp/NdArray.hpp" #include "NumCpp/Core/Types.hpp" namespace nc { //============================================================================ // Method Description: /// Return specified diagonals. /// /// NumPy Reference: https://www.numpy.org/devdocs/reference/generated/numpy.diagonal.html /// /// @param inArray /// @param inOffset (Defaults to 0) /// @param inAxis (Optional, default ROW) axis the offset is applied to /// @return /// NdArray /// template<typename dtype> NdArray<dtype> diagonal(const NdArray<dtype>& inArray, int32 inOffset = 0, Axis inAxis = Axis::ROW) noexcept { return inArray.diagonal(inOffset, inAxis); } }
#define BOOST_TEST_MODULE "test_read_excluded_volume_potential" #ifdef BOOST_TEST_DYN_LINK #include <boost/test/unit_test.hpp> #else #include <boost/test/included/unit_test.hpp> #endif #include <mjolnir/input/read_global_potential.hpp> #include <mjolnir/core/SimulatorTraits.hpp> #include <mjolnir/core/BoundaryCondition.hpp> #include <tuple> using test_types = std::tuple<double, float>; constexpr inline float tolerance_value(float) noexcept {return 1e-4;} constexpr inline double tolerance_value(double) noexcept {return 1e-8;} template<typename Real> decltype(boost::test_tools::tolerance(std::declval<Real>())) tolerance() {return boost::test_tools::tolerance(tolerance_value(Real()));} BOOST_AUTO_TEST_CASE_TEMPLATE(read_excluded_volume_noenv, T, test_types) { mjolnir::LoggerManager::set_default_logger("test_read_excluded_volume.log"); using real_type = T; using traits_type = mjolnir::SimulatorTraits<real_type, mjolnir::UnlimitedBoundary>; { using namespace toml::literals; const toml::value v = u8R"( epsilon = 3.14 ignore.molecule = "Nothing" ignore.particles_within.bond = 3 ignore.particles_within.contact = 1 parameters = [ {index = 0, radius = 2.0}, {index = 1, radius = 2.0}, {index = 3, radius = 3.0}, {index = 5, radius = 5.0}, {index = 7, radius = 7.0}, {index = 100, radius = 100.0}, ] )"_toml; const auto pot_para = mjolnir::read_excluded_volume_potential<traits_type>(v); const auto& pot = pot_para.first; const auto& para = dynamic_cast<mjolnir::ExcludedVolumeParameterList<traits_type> const&>(pot_para.second.cref()); const auto ignore_within = para.exclusion_list().ignore_topology(); const std::map<std::string, std::size_t> within( ignore_within.begin(), ignore_within.end()); BOOST_TEST(within.size() == 2u); BOOST_TEST(within.at("bond") == 3ul); BOOST_TEST(within.at("contact") == 1ul); BOOST_TEST(para.participants().size() == 6u); BOOST_TEST(para.participants().at(0) == 0u); BOOST_TEST(para.participants().at(1) == 1u); BOOST_TEST(para.participants().at(2) == 3u); BOOST_TEST(para.participants().at(3) == 5u); BOOST_TEST(para.participants().at(4) == 7u); BOOST_TEST(para.participants().at(5) == 100u); BOOST_TEST(para.parameters().at( 0).radius == real_type( 2.0), tolerance<real_type>()); BOOST_TEST(para.parameters().at( 1).radius == real_type( 2.0), tolerance<real_type>()); BOOST_TEST(para.parameters().at( 3).radius == real_type( 3.0), tolerance<real_type>()); BOOST_TEST(para.parameters().at( 5).radius == real_type( 5.0), tolerance<real_type>()); BOOST_TEST(para.parameters().at( 7).radius == real_type( 7.0), tolerance<real_type>()); BOOST_TEST(para.parameters().at(100).radius == real_type(100.0), tolerance<real_type>()); BOOST_TEST(pot.epsilon() == real_type(3.14), tolerance<real_type>()); } } BOOST_AUTO_TEST_CASE_TEMPLATE(read_excluded_volume_env, T, test_types) { mjolnir::LoggerManager::set_default_logger("test_read_excluded_volume.log"); using real_type = T; using traits_type = mjolnir::SimulatorTraits<real_type, mjolnir::UnlimitedBoundary>; { using namespace toml::literals; const toml::value v = u8R"( epsilon = 3.14 ignore.molecule = "Nothing" ignore.particles_within.bond = 3 ignore.particles_within.contact = 1 env.five = 5.0 env.seven = 7.0 env.toolarge = 100.0 parameters = [ {index = 0, radius = 2.0}, {index = 1, radius = 2.0}, {index = 3, radius = 3.0}, {index = 5, radius = "five"}, {index = 7, radius = "seven"}, {index = 100, radius = "toolarge"}, ] )"_toml; const auto pot_para = mjolnir::read_excluded_volume_potential<traits_type>(v); const auto& pot = pot_para.first; const auto& para = dynamic_cast<mjolnir::ExcludedVolumeParameterList<traits_type> const&>(pot_para.second.cref()); const auto ignore_within = para.exclusion_list().ignore_topology(); const std::map<std::string, std::size_t> within( ignore_within.begin(), ignore_within.end()); BOOST_TEST(within.size() == 2u); BOOST_TEST(within.at("bond") == 3ul); BOOST_TEST(within.at("contact") == 1ul); BOOST_TEST(para.participants().size() == 6u); BOOST_TEST(para.participants().at(0) == 0u); BOOST_TEST(para.participants().at(1) == 1u); BOOST_TEST(para.participants().at(2) == 3u); BOOST_TEST(para.participants().at(3) == 5u); BOOST_TEST(para.participants().at(4) == 7u); BOOST_TEST(para.participants().at(5) == 100u); BOOST_TEST(para.parameters().at( 0).radius == real_type( 2.0), tolerance<real_type>()); BOOST_TEST(para.parameters().at( 1).radius == real_type( 2.0), tolerance<real_type>()); BOOST_TEST(para.parameters().at( 3).radius == real_type( 3.0), tolerance<real_type>()); BOOST_TEST(para.parameters().at( 5).radius == real_type( 5.0), tolerance<real_type>()); BOOST_TEST(para.parameters().at( 7).radius == real_type( 7.0), tolerance<real_type>()); BOOST_TEST(para.parameters().at(100).radius == real_type(100.0), tolerance<real_type>()); BOOST_TEST(pot.epsilon() == real_type(3.14), tolerance<real_type>()); } } BOOST_AUTO_TEST_CASE_TEMPLATE(read_excluded_volume_ignore_self, T, test_types) { mjolnir::LoggerManager::set_default_logger("test_read_excluded_volume.log"); using real_type = T; using traits_type = mjolnir::SimulatorTraits<real_type, mjolnir::UnlimitedBoundary>; { using namespace toml::literals; const toml::value v = u8R"( epsilon = 3.14 ignore.molecule = "Self" ignore.particles_within.bond = 3 ignore.particles_within.contact = 1 parameters = [ {index = 0, radius = 2.0}, ] )"_toml; const auto pot_para = mjolnir::read_excluded_volume_potential<traits_type>(v); const auto& para = dynamic_cast<mjolnir::ExcludedVolumeParameterList<traits_type> const&>(pot_para.second.cref()); const auto ignore_within = para.exclusion_list().ignore_topology(); const std::map<std::string, std::size_t> within( ignore_within.begin(), ignore_within.end()); BOOST_TEST(within.size() == 2u); BOOST_TEST(within.at("bond") == 3ul); BOOST_TEST(within.at("contact") == 1ul); BOOST_TEST( para.exclusion_list().is_ignored_molecule(0, 0)); BOOST_TEST(!para.exclusion_list().is_ignored_molecule(0, 1)); BOOST_TEST( para.exclusion_list().is_ignored_molecule(1, 1)); // by default, no group is ignored BOOST_TEST(!para.exclusion_list().is_ignored_group("protein1", "protein1")); BOOST_TEST(!para.exclusion_list().is_ignored_group("protein1", "protein2")); BOOST_TEST(!para.exclusion_list().is_ignored_group("protein2", "protein2")); } } BOOST_AUTO_TEST_CASE_TEMPLATE(read_excluded_volume_ignore_others, T, test_types) { mjolnir::LoggerManager::set_default_logger("test_read_excluded_volume.log"); using real_type = T; using traits_type = mjolnir::SimulatorTraits<real_type, mjolnir::UnlimitedBoundary>; { using namespace toml::literals; const toml::value v = u8R"( epsilon = 3.14 ignore.molecule = "Others" ignore.particles_within.bond = 3 ignore.particles_within.contact = 1 parameters = [ {index = 0, radius = 2.0}, ] )"_toml; const auto pot_para = mjolnir::read_excluded_volume_potential<traits_type>(v); const auto& para = dynamic_cast<mjolnir::ExcludedVolumeParameterList<traits_type> const&>(pot_para.second.cref()); const auto ignore_within = para.exclusion_list().ignore_topology(); const std::map<std::string, std::size_t> within( ignore_within.begin(), ignore_within.end()); BOOST_TEST(within.size() == 2u); BOOST_TEST(within.at("bond") == 3ul); BOOST_TEST(within.at("contact") == 1ul); BOOST_TEST(!para.exclusion_list().is_ignored_molecule(0, 0)); BOOST_TEST( para.exclusion_list().is_ignored_molecule(0, 1)); BOOST_TEST(!para.exclusion_list().is_ignored_molecule(1, 1)); // by default, no group is ignored BOOST_TEST(!para.exclusion_list().is_ignored_group("protein1", "protein1")); BOOST_TEST(!para.exclusion_list().is_ignored_group("protein1", "protein2")); BOOST_TEST(!para.exclusion_list().is_ignored_group("protein2", "protein2")); } } BOOST_AUTO_TEST_CASE_TEMPLATE(read_excluded_volume_ignore_group, T, test_types) { mjolnir::LoggerManager::set_default_logger("test_read_excluded_volume.log"); using real_type = T; using traits_type = mjolnir::SimulatorTraits<real_type, mjolnir::UnlimitedBoundary>; { using namespace toml::literals; const toml::value v = u8R"( epsilon = 3.14 ignore.molecule = "Nothing" ignore.particles_within.bond = 3 ignore.particles_within.contact = 1 ignore.group.inter = [ ["protein1", "protein2"], # between these ["protein1", "protein3"], ] parameters = [ {index = 0, radius = 2.0}, ] )"_toml; const auto pot_para = mjolnir::read_excluded_volume_potential<traits_type>(v); const auto& para = dynamic_cast<mjolnir::ExcludedVolumeParameterList<traits_type> const&>(pot_para.second.cref()); const auto ignore_within = para.exclusion_list().ignore_topology(); const std::map<std::string, std::size_t> within( ignore_within.begin(), ignore_within.end()); BOOST_TEST(within.size() == 2u); BOOST_TEST(within.at("bond") == 3ul); BOOST_TEST(within.at("contact") == 1ul); BOOST_TEST(!para.exclusion_list().is_ignored_molecule(0, 0)); BOOST_TEST(!para.exclusion_list().is_ignored_molecule(0, 1)); BOOST_TEST(!para.exclusion_list().is_ignored_molecule(1, 1)); BOOST_TEST(!para.exclusion_list().is_ignored_group("protein1", "protein1")); BOOST_TEST( para.exclusion_list().is_ignored_group("protein1", "protein2")); BOOST_TEST( para.exclusion_list().is_ignored_group("protein1", "protein3")); BOOST_TEST( para.exclusion_list().is_ignored_group("protein2", "protein1")); BOOST_TEST(!para.exclusion_list().is_ignored_group("protein2", "protein2")); BOOST_TEST(!para.exclusion_list().is_ignored_group("protein2", "protein3")); BOOST_TEST( para.exclusion_list().is_ignored_group("protein3", "protein1")); BOOST_TEST(!para.exclusion_list().is_ignored_group("protein3", "protein2")); BOOST_TEST(!para.exclusion_list().is_ignored_group("protein3", "protein3")); } }
/* -*- Mode: C++; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /* * Copyright 2013 Couchbase, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "config.h" #include <queue> #include <sstream> #include "common.h" #include "executorpool.h" #include "executorthread.h" #include "taskqueue.h" #define LOG(...) extern "C" { static void *launch_executor_thread(void *arg) { ExecutorThread *executor = (ExecutorThread*) arg; executor->run(); return NULL; } } void ExecutorThread::start() { thread_create(&thread, launch_executor_thread, (void*)this); } void ExecutorThread::stop(bool wait) { if (!wait && (state == EXECUTOR_SHUTDOWN || state == EXECUTOR_DEAD)) { return; } state = EXECUTOR_SHUTDOWN; if (!wait) { LOG(EXTENSION_LOG_WARNING, "%s: Stopping", name.c_str()); return; } void *ret; thread_join(thread, &ret); LOG(EXTENSION_LOG_WARNING, "%s: Stopped", name.c_str()); } void ExecutorThread::run() { LOG(EXTENSION_LOG_DEBUG, "Thread %s running..", getName().c_str()); for (uint8_t tick = 1;; tick++) { { LockHolder lh(currentTaskMutex); currentTask.reset(); } if (state != EXECUTOR_RUNNING) { break; } if (TaskQueue *q = manager->nextTask(*this, tick)) { if (currentTask->isdead()) { // release capacity back to TaskQueue manager->doneWork(curTaskType); manager->cancel(currentTask->taskId, true); continue; } // Measure scheduling overhead as difference between the time // that the task wanted to wake up and the current time now = gethrtime(); hrtime_t woketime = currentTask->getWaketime(); currentTask->getTaskable().logQTime(currentTask->getTypeId(), now > woketime ? (now - woketime) / 1000 : 0); taskStart.store(now); LOG(EXTENSION_LOG_DEBUG, "%s: Run task \"%s\" id %" PRIu64, getName().c_str(), currentTask->getDescription().c_str(), uint64_t(currentTask->getId())); // Now Run the Task .... currentTask->setState(TASK_RUNNING, TASK_SNOOZED); bool again = currentTask->run(); // Task done, log it ... hrtime_t runtime((gethrtime() - taskStart) / 1000); currentTask->getTaskable().logRunTime(currentTask->getTypeId(), runtime); // Check if task is run once or needs to be rescheduled.. if (!again || currentTask->isdead()) { // release capacity back to TaskQueue manager->doneWork(curTaskType); manager->cancel(currentTask->taskId, true); } else { hrtime_t new_waketime; // if a task has not set snooze, update its waketime to now // before rescheduling for more accurate timing histograms currentTask->updateWaketimeIfLessThan(now); // release capacity back to TaskQueue .. manager->doneWork(curTaskType); new_waketime = q->reschedule(currentTask, curTaskType); // record min waketime ... if (new_waketime < waketime) { waketime = new_waketime; } LOG(EXTENSION_LOG_DEBUG, "%s: Reschedule a task" " \"%s\" id %" PRIu64 "[%" PRIu64 " %" PRIu64 " |%" PRIu64 "]", name.c_str(), currentTask->getDescription().c_str(), uint64_t(currentTask->getId()), uint64_t(new_waketime), uint64_t(currentTask->getWaketime()), uint64_t(waketime)); } } } state = EXECUTOR_DEAD; } void ExecutorThread::setCurrentTask(ExTask newTask) { LockHolder lh(currentTaskMutex); currentTask = newTask; } const std::string ExecutorThread::getStateName() { switch (state.load()) { case EXECUTOR_RUNNING: return std::string("running"); case EXECUTOR_WAITING: return std::string("waiting"); case EXECUTOR_SLEEPING: return std::string("sleeping"); case EXECUTOR_SHUTDOWN: return std::string("shutdown"); default: return std::string("dead"); } }
// Protocol Buffers - Google's data interchange format // Copyright 2008 Google Inc. All rights reserved. // https://developers.google.com/protocol-buffers/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Author: kenton@google.com (Kenton Varda) // Based on original Protocol Buffers design by // Sanjay Ghemawat, Jeff Dean, and others. #include <google/protobuf/compiler/cpp/cpp_file.h> #include <map> #include <memory> #ifndef _SHARED_PTR_H #include <google/protobuf/stubs/shared_ptr.h> #endif #include <set> #include <google/protobuf/compiler/cpp/cpp_enum.h> #include <google/protobuf/compiler/cpp/cpp_service.h> #include <google/protobuf/compiler/cpp/cpp_extension.h> #include <google/protobuf/compiler/cpp/cpp_helpers.h> #include <google/protobuf/compiler/cpp/cpp_message.h> #include <google/protobuf/compiler/cpp/cpp_field.h> #include <google/protobuf/io/printer.h> #include <google/protobuf/descriptor.pb.h> #include <google/protobuf/stubs/strutil.h> namespace google { namespace protobuf { namespace compiler { namespace cpp { // =================================================================== FileGenerator::FileGenerator(const FileDescriptor* file, const Options& options) : file_(file), options_(options), message_generators_( new google::protobuf::scoped_ptr<MessageGenerator>[file->message_type_count()]), enum_generators_( new google::protobuf::scoped_ptr<EnumGenerator>[file->enum_type_count()]), service_generators_( new google::protobuf::scoped_ptr<ServiceGenerator>[file->service_count()]), extension_generators_( new google::protobuf::scoped_ptr<ExtensionGenerator>[file->extension_count()]) { for (int i = 0; i < file->message_type_count(); i++) { message_generators_[i].reset( new MessageGenerator(file->message_type(i), options)); } for (int i = 0; i < file->enum_type_count(); i++) { enum_generators_[i].reset( new EnumGenerator(file->enum_type(i), options)); } for (int i = 0; i < file->service_count(); i++) { service_generators_[i].reset( new ServiceGenerator(file->service(i), options)); } for (int i = 0; i < file->extension_count(); i++) { extension_generators_[i].reset( new ExtensionGenerator(file->extension(i), options)); } SplitStringUsing(file_->package(), ".", &package_parts_); } FileGenerator::~FileGenerator() {} void FileGenerator::GenerateProtoHeader(io::Printer* printer, const string& info_path) { if (!options_.proto_h) { return; } string filename_identifier = FilenameIdentifier(file_->name()); GenerateTopHeaderGuard(printer, filename_identifier); GenerateLibraryIncludes(printer); for (int i = 0; i < file_->public_dependency_count(); i++) { const FileDescriptor* dep = file_->public_dependency(i); const char* extension = ".proto.h"; string dependency = StripProto(dep->name()) + extension; printer->Print( "#include \"$dependency$\" // IWYU pragma: export\n", "dependency", dependency); } GenerateMetadataPragma(printer, info_path); printer->Print( "// @@protoc_insertion_point(includes)\n"); GenerateForwardDeclarations(printer); // Open namespace. GenerateNamespaceOpeners(printer); GenerateGlobalStateFunctionDeclarations(printer); printer->Print("\n"); GenerateEnumDefinitions(printer); printer->Print(kThickSeparator); printer->Print("\n"); GenerateMessageDefinitions(printer); printer->Print("\n"); printer->Print(kThickSeparator); printer->Print("\n"); GenerateServiceDefinitions(printer); GenerateExtensionIdentifiers(printer); printer->Print("\n"); printer->Print(kThickSeparator); printer->Print("\n"); GenerateInlineFunctionDefinitions(printer); printer->Print( "\n" "// @@protoc_insertion_point(namespace_scope)\n" "\n"); // Close up namespace. GenerateNamespaceClosers(printer); // We need to specialize some templates in the ::google::protobuf namespace: GenerateProto2NamespaceEnumSpecializations(printer); printer->Print( "\n" "// @@protoc_insertion_point(global_scope)\n" "\n"); GenerateBottomHeaderGuard(printer, filename_identifier); } void FileGenerator::GeneratePBHeader(io::Printer* printer, const string& info_path) { string filename_identifier = FilenameIdentifier(file_->name() + (options_.proto_h ? ".pb.h" : "")); GenerateTopHeaderGuard(printer, filename_identifier); if (options_.proto_h) { printer->Print("#include \"$basename$.proto.h\" // IWYU pragma: export\n", "basename", StripProto(file_->name())); } else { GenerateLibraryIncludes(printer); } GenerateDependencyIncludes(printer); GenerateMetadataPragma(printer, info_path); printer->Print( "// @@protoc_insertion_point(includes)\n"); // Open namespace. GenerateNamespaceOpeners(printer); if (!options_.proto_h) { GenerateGlobalStateFunctionDeclarations(printer); GenerateMessageForwardDeclarations(printer); printer->Print("\n"); GenerateEnumDefinitions(printer); printer->Print(kThickSeparator); printer->Print("\n"); GenerateMessageDefinitions(printer); printer->Print("\n"); printer->Print(kThickSeparator); printer->Print("\n"); GenerateServiceDefinitions(printer); GenerateExtensionIdentifiers(printer); printer->Print("\n"); printer->Print(kThickSeparator); printer->Print("\n"); GenerateInlineFunctionDefinitions(printer); } printer->Print( "\n" "// @@protoc_insertion_point(namespace_scope)\n"); // Close up namespace. GenerateNamespaceClosers(printer); if (!options_.proto_h) { // We need to specialize some templates in the ::google::protobuf namespace: GenerateProto2NamespaceEnumSpecializations(printer); } printer->Print( "\n" "// @@protoc_insertion_point(global_scope)\n" "\n"); GenerateBottomHeaderGuard(printer, filename_identifier); } void FileGenerator::GenerateSource(io::Printer* printer) { const bool use_system_include = IsWellKnownMessage(file_); string header = StripProto(file_->name()) + (options_.proto_h ? ".proto.h" : ".pb.h"); printer->Print( "// Generated by the protocol buffer compiler. DO NOT EDIT!\n" "// source: $filename$\n" "\n" // The generated code calls accessors that might be deprecated. We don't // want the compiler to warn in generated code. "#define INTERNAL_SUPPRESS_PROTOBUF_FIELD_DEPRECATION\n" "#include $left$$header$$right$\n" "\n" "#include <algorithm>\n" // for swap() "\n" "#include <google/protobuf/stubs/common.h>\n" "#include <google/protobuf/stubs/port.h>\n" "#include <google/protobuf/stubs/once.h>\n" "#include <google/protobuf/io/coded_stream.h>\n" "#include <google/protobuf/wire_format_lite_inl.h>\n", "filename", file_->name(), "header", header, "left", use_system_include ? "<" : "\"", "right", use_system_include ? ">" : "\""); // Unknown fields implementation in lite mode uses StringOutputStream if (!UseUnknownFieldSet(file_, options_) && file_->message_type_count() > 0) { printer->Print( "#include <google/protobuf/io/zero_copy_stream_impl_lite.h>\n"); } if (HasDescriptorMethods(file_, options_)) { printer->Print( "#include <google/protobuf/descriptor.h>\n" "#include <google/protobuf/generated_message_reflection.h>\n" "#include <google/protobuf/reflection_ops.h>\n" "#include <google/protobuf/wire_format.h>\n"); } if (options_.proto_h) { // Use the smaller .proto.h files. for (int i = 0; i < file_->dependency_count(); i++) { const FileDescriptor* dep = file_->dependency(i); const char* extension = ".proto.h"; string dependency = StripProto(dep->name()) + extension; printer->Print( "#include \"$dependency$\"\n", "dependency", dependency); } } printer->Print( "// @@protoc_insertion_point(includes)\n"); GenerateNamespaceOpeners(printer); if (HasDescriptorMethods(file_, options_)) { printer->Print( "\n" "namespace {\n" "\n"); for (int i = 0; i < file_->message_type_count(); i++) { message_generators_[i]->GenerateDescriptorDeclarations(printer); } for (int i = 0; i < file_->enum_type_count(); i++) { printer->Print( "const ::google::protobuf::EnumDescriptor* $name$_descriptor_ = NULL;\n", "name", ClassName(file_->enum_type(i), false)); } if (HasGenericServices(file_, options_)) { for (int i = 0; i < file_->service_count(); i++) { printer->Print( "const ::google::protobuf::ServiceDescriptor* $name$_descriptor_ = NULL;\n", "name", file_->service(i)->name()); } } printer->Print( "\n" "} // namespace\n" "\n"); } // Define our externally-visible BuildDescriptors() function. (For the lite // library, all this does is initialize default instances.) GenerateBuildDescriptors(printer); // Generate enums. for (int i = 0; i < file_->enum_type_count(); i++) { enum_generators_[i]->GenerateMethods(printer); } // Generate classes. for (int i = 0; i < file_->message_type_count(); i++) { printer->Print("\n"); printer->Print(kThickSeparator); printer->Print("\n"); message_generators_[i]->GenerateClassMethods(printer); printer->Print("#if PROTOBUF_INLINE_NOT_IN_HEADERS\n"); // Generate class inline methods. message_generators_[i]->GenerateInlineMethods(printer, /* is_inline = */ false); printer->Print("#endif // PROTOBUF_INLINE_NOT_IN_HEADERS\n"); } if (HasGenericServices(file_, options_)) { // Generate services. for (int i = 0; i < file_->service_count(); i++) { if (i == 0) printer->Print("\n"); printer->Print(kThickSeparator); printer->Print("\n"); service_generators_[i]->GenerateImplementation(printer); } } // Define extensions. for (int i = 0; i < file_->extension_count(); i++) { extension_generators_[i]->GenerateDefinition(printer); } printer->Print( "\n" "// @@protoc_insertion_point(namespace_scope)\n"); GenerateNamespaceClosers(printer); printer->Print( "\n" "// @@protoc_insertion_point(global_scope)\n"); } class FileGenerator::ForwardDeclarations { public: ~ForwardDeclarations() { for (map<string, ForwardDeclarations *>::iterator it = namespaces_.begin(), end = namespaces_.end(); it != end; ++it) { delete it->second; } namespaces_.clear(); } ForwardDeclarations* AddOrGetNamespace(const string& ns_name) { ForwardDeclarations*& ns = namespaces_[ns_name]; if (ns == NULL) { ns = new ForwardDeclarations; } return ns; } map<string, const Descriptor*>& classes() { return classes_; } map<string, const EnumDescriptor*>& enums() { return enums_; } void Print(io::Printer* printer) const { for (map<string, const EnumDescriptor *>::const_iterator it = enums_.begin(), end = enums_.end(); it != end; ++it) { printer->Print("enum $enumname$ : int;\n", "enumname", it->first); printer->Annotate("enumname", it->second); printer->Print("bool $enumname$_IsValid(int value);\n", "enumname", it->first); } for (map<string, const Descriptor *>::const_iterator it = classes_.begin(), end = classes_.end(); it != end; ++it) { printer->Print("class $classname$;\n", "classname", it->first); printer->Annotate("classname", it->second); } for (map<string, ForwardDeclarations *>::const_iterator it = namespaces_.begin(), end = namespaces_.end(); it != end; ++it) { printer->Print("namespace $nsname$ {\n", "nsname", it->first); it->second->Print(printer); printer->Print("} // namespace $nsname$\n", "nsname", it->first); } } private: map<string, ForwardDeclarations*> namespaces_; map<string, const Descriptor*> classes_; map<string, const EnumDescriptor*> enums_; }; void FileGenerator::GenerateBuildDescriptors(io::Printer* printer) { // AddDescriptors() is a file-level procedure which adds the encoded // FileDescriptorProto for this .proto file to the global DescriptorPool for // generated files (DescriptorPool::generated_pool()). It either runs at // static initialization time (by default) or when default_instance() is // called for the first time (in LITE_RUNTIME mode with // GOOGLE_PROTOBUF_NO_STATIC_INITIALIZER flag enabled). This procedure also // constructs default instances and registers extensions. // // Its sibling, AssignDescriptors(), actually pulls the compiled // FileDescriptor from the DescriptorPool and uses it to populate all of // the global variables which store pointers to the descriptor objects. // It also constructs the reflection objects. It is called the first time // anyone calls descriptor() or GetReflection() on one of the types defined // in the file. // In optimize_for = LITE_RUNTIME mode, we don't generate AssignDescriptors() // and we only use AddDescriptors() to allocate default instances. if (HasDescriptorMethods(file_, options_)) { printer->Print( "\n" "void $assigndescriptorsname$() GOOGLE_ATTRIBUTE_COLD;\n" "void $assigndescriptorsname$() {\n", "assigndescriptorsname", GlobalAssignDescriptorsName(file_->name())); printer->Indent(); // Make sure the file has found its way into the pool. If a descriptor // is requested *during* static init then AddDescriptors() may not have // been called yet, so we call it manually. Note that it's fine if // AddDescriptors() is called multiple times. printer->Print( "$adddescriptorsname$();\n", "adddescriptorsname", GlobalAddDescriptorsName(file_->name())); // Get the file's descriptor from the pool. printer->Print( "const ::google::protobuf::FileDescriptor* file =\n" " ::google::protobuf::DescriptorPool::generated_pool()->FindFileByName(\n" " \"$filename$\");\n" // Note that this GOOGLE_CHECK is necessary to prevent a warning about "file" // being unused when compiling an empty .proto file. "GOOGLE_CHECK(file != NULL);\n", "filename", file_->name()); // Go through all the stuff defined in this file and generated code to // assign the global descriptor pointers based on the file descriptor. for (int i = 0; i < file_->message_type_count(); i++) { message_generators_[i]->GenerateDescriptorInitializer(printer, i); } for (int i = 0; i < file_->enum_type_count(); i++) { enum_generators_[i]->GenerateDescriptorInitializer(printer, i); } if (HasGenericServices(file_, options_)) { for (int i = 0; i < file_->service_count(); i++) { service_generators_[i]->GenerateDescriptorInitializer(printer, i); } } printer->Outdent(); printer->Print( "}\n" "\n"); // --------------------------------------------------------------- // protobuf_AssignDescriptorsOnce(): The first time it is called, calls // AssignDescriptors(). All later times, waits for the first call to // complete and then returns. printer->Print( "namespace {\n" "\n" "GOOGLE_PROTOBUF_DECLARE_ONCE(protobuf_AssignDescriptors_once_);\n" "inline void protobuf_AssignDescriptorsOnce() {\n" " ::google::protobuf::GoogleOnceInit(&protobuf_AssignDescriptors_once_,\n" " &$assigndescriptorsname$);\n" "}\n" "\n", "assigndescriptorsname", GlobalAssignDescriptorsName(file_->name())); // protobuf_RegisterTypes(): Calls // MessageFactory::InternalRegisterGeneratedType() for each message type. printer->Print( "void protobuf_RegisterTypes(const ::std::string&) GOOGLE_ATTRIBUTE_COLD;\n" "void protobuf_RegisterTypes(const ::std::string&) {\n" " protobuf_AssignDescriptorsOnce();\n"); printer->Indent(); for (int i = 0; i < file_->message_type_count(); i++) { message_generators_[i]->GenerateTypeRegistrations(printer); } printer->Outdent(); printer->Print( "}\n" "\n" "} // namespace\n"); } // ----------------------------------------------------------------- // ShutdownFile(): Deletes descriptors, default instances, etc. on shutdown. printer->Print( "\n" "void $shutdownfilename$() {\n", "shutdownfilename", GlobalShutdownFileName(file_->name())); printer->Indent(); for (int i = 0; i < file_->message_type_count(); i++) { message_generators_[i]->GenerateShutdownCode(printer); } printer->Outdent(); printer->Print( "}\n\n"); // ----------------------------------------------------------------- // Now generate the AddDescriptors() function. PrintHandlingOptionalStaticInitializers( file_, options_, printer, // With static initializers. // Note that we don't need any special synchronization in the following // code // because it is called at static init time before any threads exist. "void $adddescriptorsname$() GOOGLE_ATTRIBUTE_COLD;\n" "void $adddescriptorsname$() {\n" " static bool already_here = false;\n" " if (already_here) return;\n" " already_here = true;\n" " GOOGLE_PROTOBUF_VERIFY_VERSION;\n" "\n", // Without. "void $adddescriptorsname$_impl() {\n" " GOOGLE_PROTOBUF_VERIFY_VERSION;\n" "\n", // Vars. "adddescriptorsname", GlobalAddDescriptorsName(file_->name())); printer->Indent(); // Call the AddDescriptors() methods for all of our dependencies, to make // sure they get added first. for (int i = 0; i < file_->dependency_count(); i++) { const FileDescriptor* dependency = file_->dependency(i); // Print the namespace prefix for the dependency. string add_desc_name = QualifiedFileLevelSymbol( dependency->package(), GlobalAddDescriptorsName(dependency->name())); // Call its AddDescriptors function. printer->Print( "$name$();\n", "name", add_desc_name); } if (HasDescriptorMethods(file_, options_)) { // Embed the descriptor. We simply serialize the entire FileDescriptorProto // and embed it as a string literal, which is parsed and built into real // descriptors at initialization time. FileDescriptorProto file_proto; file_->CopyTo(&file_proto); string file_data; file_proto.SerializeToString(&file_data); #ifdef _MSC_VER bool breakdown_large_file = true; #else bool breakdown_large_file = false; #endif // Workaround for MSVC: "Error C1091: compiler limit: string exceeds 65535 // bytes in length". Declare a static array of characters rather than use a // string literal. if (breakdown_large_file && file_data.size() > 65535) { // This has to be explicitly marked as a signed char because the generated // code puts negative values in the array, and sometimes plain char is // unsigned. That implicit narrowing conversion is not allowed in C++11. // <http://stackoverflow.com/questions/4434140/narrowing-conversions-in-c0x-is-it-just-me-or-does-this-sound-like-a-breakin> // has details on why. printer->Print( "static const signed char descriptor[] = {\n"); printer->Indent(); // Only write 25 bytes per line. static const int kBytesPerLine = 25; for (int i = 0; i < file_data.size();) { for (int j = 0; j < kBytesPerLine && i < file_data.size(); ++i, ++j) { printer->Print( "$char$, ", "char", SimpleItoa(file_data[i])); } printer->Print( "\n"); } printer->Outdent(); printer->Print( "};\n"); printer->Print( "::google::protobuf::DescriptorPool::InternalAddGeneratedFile(descriptor, $size$);\n", "size", SimpleItoa(file_data.size())); } else { printer->Print( "::google::protobuf::DescriptorPool::InternalAddGeneratedFile("); // Only write 40 bytes per line. static const int kBytesPerLine = 40; for (int i = 0; i < file_data.size(); i += kBytesPerLine) { printer->Print("\n \"$data$\"", "data", EscapeTrigraphs( CEscape(file_data.substr(i, kBytesPerLine)))); } printer->Print( ", $size$);\n", "size", SimpleItoa(file_data.size())); } // Call MessageFactory::InternalRegisterGeneratedFile(). printer->Print( "::google::protobuf::MessageFactory::InternalRegisterGeneratedFile(\n" " \"$filename$\", &protobuf_RegisterTypes);\n", "filename", file_->name()); } // Allocate and initialize default instances. This can't be done lazily // since default instances are returned by simple accessors and are used with // extensions. Speaking of which, we also register extensions at this time. for (int i = 0; i < file_->message_type_count(); i++) { message_generators_[i]->GenerateDefaultInstanceAllocator(printer); } for (int i = 0; i < file_->extension_count(); i++) { extension_generators_[i]->GenerateRegistration(printer); } for (int i = 0; i < file_->message_type_count(); i++) { message_generators_[i]->GenerateDefaultInstanceInitializer(printer); } printer->Print( "::google::protobuf::internal::OnShutdown(&$shutdownfilename$);\n", "shutdownfilename", GlobalShutdownFileName(file_->name())); printer->Outdent(); printer->Print( "}\n" "\n"); PrintHandlingOptionalStaticInitializers( file_, options_, printer, // With static initializers. "// Force AddDescriptors() to be called at static initialization time.\n" "struct StaticDescriptorInitializer_$filename$ {\n" " StaticDescriptorInitializer_$filename$() {\n" " $adddescriptorsname$();\n" " }\n" "} static_descriptor_initializer_$filename$_;\n", // Without. "GOOGLE_PROTOBUF_DECLARE_ONCE($adddescriptorsname$_once_);\n" "void $adddescriptorsname$() {\n" " ::google::protobuf::GoogleOnceInit(&$adddescriptorsname$_once_,\n" " &$adddescriptorsname$_impl);\n" "}\n", // Vars. "adddescriptorsname", GlobalAddDescriptorsName(file_->name()), "filename", FilenameIdentifier(file_->name())); } void FileGenerator::GenerateNamespaceOpeners(io::Printer* printer) { if (package_parts_.size() > 0) printer->Print("\n"); for (int i = 0; i < package_parts_.size(); i++) { printer->Print("namespace $part$ {\n", "part", package_parts_[i]); } } void FileGenerator::GenerateNamespaceClosers(io::Printer* printer) { if (package_parts_.size() > 0) printer->Print("\n"); for (int i = package_parts_.size() - 1; i >= 0; i--) { printer->Print("} // namespace $part$\n", "part", package_parts_[i]); } } void FileGenerator::GenerateForwardDeclarations(io::Printer* printer) { ForwardDeclarations decls; for (int i = 0; i < file_->dependency_count(); i++) { FileGenerator dependency(file_->dependency(i), options_); dependency.FillForwardDeclarations(&decls); } FillForwardDeclarations(&decls); decls.Print(printer); } void FileGenerator::FillForwardDeclarations(ForwardDeclarations* decls) { for (int i = 0; i < file_->public_dependency_count(); i++) { FileGenerator dependency(file_->public_dependency(i), options_); dependency.FillForwardDeclarations(decls); } for (int i = 0; i < package_parts_.size(); i++) { decls = decls->AddOrGetNamespace(package_parts_[i]); } // Generate enum definitions. for (int i = 0; i < file_->message_type_count(); i++) { message_generators_[i]->FillEnumForwardDeclarations(&decls->enums()); } for (int i = 0; i < file_->enum_type_count(); i++) { enum_generators_[i]->FillForwardDeclaration(&decls->enums()); } // Generate forward declarations of classes. for (int i = 0; i < file_->message_type_count(); i++) { message_generators_[i]->FillMessageForwardDeclarations( &decls->classes()); } } void FileGenerator::GenerateTopHeaderGuard(io::Printer* printer, const string& filename_identifier) { // Generate top of header. printer->Print( "// Generated by the protocol buffer compiler. DO NOT EDIT!\n" "// source: $filename$\n" "\n" "#ifndef PROTOBUF_$filename_identifier$__INCLUDED\n" "#define PROTOBUF_$filename_identifier$__INCLUDED\n" "\n" "#include <string>\n", "filename", file_->name(), "filename_identifier", filename_identifier); printer->Print("\n"); } void FileGenerator::GenerateBottomHeaderGuard( io::Printer* printer, const string& filename_identifier) { printer->Print( "#endif // PROTOBUF_$filename_identifier$__INCLUDED\n", "filename_identifier", filename_identifier); } void FileGenerator::GenerateLibraryIncludes(io::Printer* printer) { printer->Print( "#include <google/protobuf/stubs/common.h>\n" "\n"); // Verify the protobuf library header version is compatible with the protoc // version before going any further. printer->Print( "#if GOOGLE_PROTOBUF_VERSION < $min_header_version$\n" "#error This file was generated by a newer version of protoc which is\n" "#error incompatible with your Protocol Buffer headers. Please update\n" "#error your headers.\n" "#endif\n" "#if $protoc_version$ < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION\n" "#error This file was generated by an older version of protoc which is\n" "#error incompatible with your Protocol Buffer headers. Please\n" "#error regenerate this file with a newer version of protoc.\n" "#endif\n" "\n", "min_header_version", SimpleItoa(protobuf::internal::kMinHeaderVersionForProtoc), "protoc_version", SimpleItoa(GOOGLE_PROTOBUF_VERSION)); // OK, it's now safe to #include other files. printer->Print( "#include <google/protobuf/arena.h>\n" "#include <google/protobuf/arenastring.h>\n" "#include <google/protobuf/generated_message_util.h>\n"); if (UseUnknownFieldSet(file_, options_)) { printer->Print( "#include <google/protobuf/metadata.h>\n"); } if (file_->message_type_count() > 0) { if (HasDescriptorMethods(file_, options_)) { printer->Print( "#include <google/protobuf/message.h>\n"); } else { printer->Print( "#include <google/protobuf/message_lite.h>\n"); } } printer->Print( "#include <google/protobuf/repeated_field.h>\n" "#include <google/protobuf/extension_set.h>\n"); if (HasMapFields(file_)) { printer->Print( "#include <google/protobuf/map.h>\n"); if (HasDescriptorMethods(file_, options_)) { printer->Print( "#include <google/protobuf/map_field_inl.h>\n"); } else { printer->Print( "#include <google/protobuf/map_field_lite.h>\n"); } } if (HasEnumDefinitions(file_)) { if (HasDescriptorMethods(file_, options_)) { printer->Print( "#include <google/protobuf/generated_enum_reflection.h>\n"); } else { printer->Print( "#include <google/protobuf/generated_enum_util.h>\n"); } } if (HasGenericServices(file_, options_)) { printer->Print( "#include <google/protobuf/service.h>\n"); } if (UseUnknownFieldSet(file_, options_) && file_->message_type_count() > 0) { printer->Print( "#include <google/protobuf/unknown_field_set.h>\n"); } if (IsAnyMessage(file_)) { printer->Print( "#include <google/protobuf/any.h>\n"); } } void FileGenerator::GenerateMetadataPragma(io::Printer* printer, const string& info_path) { if (!info_path.empty() && !options_.annotation_pragma_name.empty() && !options_.annotation_guard_name.empty()) { printer->Print( "#ifdef $guard$\n" "#pragma $pragma$ \"$info_path$\"\n" "#endif // $guard$\n", "guard", options_.annotation_guard_name, "pragma", options_.annotation_pragma_name, "info_path", info_path); } } void FileGenerator::GenerateDependencyIncludes(io::Printer* printer) { set<string> public_import_names; for (int i = 0; i < file_->public_dependency_count(); i++) { public_import_names.insert(file_->public_dependency(i)->name()); } for (int i = 0; i < file_->dependency_count(); i++) { const bool use_system_include = IsWellKnownMessage(file_->dependency(i)); const string& name = file_->dependency(i)->name(); bool public_import = (public_import_names.count(name) != 0); printer->Print( "#include $left$$dependency$.pb.h$right$$iwyu$\n", "dependency", StripProto(name), "iwyu", (public_import) ? " // IWYU pragma: export" : "", "left", use_system_include ? "<" : "\"", "right", use_system_include ? ">" : "\""); } } void FileGenerator::GenerateGlobalStateFunctionDeclarations( io::Printer* printer) { // Forward-declare the AddDescriptors, AssignDescriptors, and ShutdownFile // functions, so that we can declare them to be friends of each class. printer->Print( "\n" "// Internal implementation detail -- do not call these.\n" "void $dllexport_decl$$adddescriptorsname$();\n", "adddescriptorsname", GlobalAddDescriptorsName(file_->name()), "dllexport_decl", options_.dllexport_decl.empty() ? "" : options_.dllexport_decl + " "); printer->Print( // Note that we don't put dllexport_decl on these because they are only // called by the .pb.cc file in which they are defined. "void $assigndescriptorsname$();\n" "void $shutdownfilename$();\n" "\n", "assigndescriptorsname", GlobalAssignDescriptorsName(file_->name()), "shutdownfilename", GlobalShutdownFileName(file_->name())); } void FileGenerator::GenerateMessageForwardDeclarations(io::Printer* printer) { map<string, const Descriptor*> classes; for (int i = 0; i < file_->message_type_count(); i++) { message_generators_[i]->FillMessageForwardDeclarations(&classes); } for (map<string, const Descriptor *>::const_iterator it = classes.begin(), end = classes.end(); it != end; ++it) { printer->Print("class $classname$;\n", "classname", it->first); printer->Annotate("classname", it->second); } } void FileGenerator::GenerateMessageDefinitions(io::Printer* printer) { // Generate class definitions. for (int i = 0; i < file_->message_type_count(); i++) { if (i > 0) { printer->Print("\n"); printer->Print(kThinSeparator); printer->Print("\n"); } message_generators_[i]->GenerateClassDefinition(printer); } } void FileGenerator::GenerateEnumDefinitions(io::Printer* printer) { // Generate enum definitions. for (int i = 0; i < file_->message_type_count(); i++) { message_generators_[i]->GenerateEnumDefinitions(printer); } for (int i = 0; i < file_->enum_type_count(); i++) { enum_generators_[i]->GenerateDefinition(printer); } } void FileGenerator::GenerateServiceDefinitions(io::Printer* printer) { if (HasGenericServices(file_, options_)) { // Generate service definitions. for (int i = 0; i < file_->service_count(); i++) { if (i > 0) { printer->Print("\n"); printer->Print(kThinSeparator); printer->Print("\n"); } service_generators_[i]->GenerateDeclarations(printer); } printer->Print("\n"); printer->Print(kThickSeparator); printer->Print("\n"); } } void FileGenerator::GenerateExtensionIdentifiers(io::Printer* printer) { // Declare extension identifiers. for (int i = 0; i < file_->extension_count(); i++) { extension_generators_[i]->GenerateDeclaration(printer); } } void FileGenerator::GenerateInlineFunctionDefinitions(io::Printer* printer) { // An aside about inline functions in .proto.h mode: // // The PROTOBUF_INLINE_NOT_IN_HEADERS symbol controls conditionally // moving much of the inline functions to the .pb.cc file, which can be a // significant performance benefit for compilation time, at the expense // of non-inline function calls. // // However, in .proto.h mode, the definition of the internal dependent // base class must remain in the header, and can never be out-lined. The // dependent base class also needs access to has-bit manipuation // functions, so the has-bit functions must be unconditionally inlined in // proto_h mode. // // This gives us three flavors of functions: // // 1. Functions on the message not used by the internal dependent base // class: in .proto.h mode, only some functions are defined on the // message class; others are defined on the dependent base class. // These are guarded and can be out-lined. These are generated by // GenerateInlineMethods, and include has_* bit functions in // non-proto_h mode. // // 2. Functions on the internal dependent base class: these functions // are dependent on a template parameter, so they always need to // remain in the header. // // 3. Functions on the message that are used by the dependent base: the // dependent base class down casts itself to the message // implementation class to access these functions (the has_* bit // manipulation functions). Unlike #1, these functions must // unconditionally remain in the header. These are emitted by // GenerateDependentInlineMethods, even though they are not actually // dependent. printer->Print("#if !PROTOBUF_INLINE_NOT_IN_HEADERS\n"); // Generate class inline methods. for (int i = 0; i < file_->message_type_count(); i++) { if (i > 0) { printer->Print(kThinSeparator); printer->Print("\n"); } message_generators_[i]->GenerateInlineMethods(printer, /* is_inline = */ true); } printer->Print("#endif // !PROTOBUF_INLINE_NOT_IN_HEADERS\n"); for (int i = 0; i < file_->message_type_count(); i++) { if (i > 0) { printer->Print(kThinSeparator); printer->Print("\n"); } // Methods of the dependent base class must always be inline in the header. message_generators_[i]->GenerateDependentInlineMethods(printer); } } void FileGenerator::GenerateProto2NamespaceEnumSpecializations( io::Printer* printer) { // Emit GetEnumDescriptor specializations into google::protobuf namespace: if (HasEnumDefinitions(file_)) { // The SWIG conditional is to avoid a null-pointer dereference // (bug 1984964) in swig-1.3.21 resulting from the following syntax: // namespace X { void Y<Z::W>(); } // which appears in GetEnumDescriptor() specializations. printer->Print( "\n" "#ifndef SWIG\n" "namespace google {\nnamespace protobuf {\n" "\n"); for (int i = 0; i < file_->message_type_count(); i++) { message_generators_[i]->GenerateGetEnumDescriptorSpecializations(printer); } for (int i = 0; i < file_->enum_type_count(); i++) { enum_generators_[i]->GenerateGetEnumDescriptorSpecializations(printer); } printer->Print( "\n" "} // namespace protobuf\n} // namespace google\n" "#endif // SWIG\n"); } } } // namespace cpp } // namespace compiler } // namespace protobuf } // namespace google
#ifndef CSLIBS_NDT_MATCHING_CERES_SCAN_MATCH_COST_FUNCTOR_3D_RPY_HPP #define CSLIBS_NDT_MATCHING_CERES_SCAN_MATCH_COST_FUNCTOR_3D_RPY_HPP #include <cslibs_ndt/matching/ceres/map/scan_match_cost_functor_creator.hpp> #include <cslibs_ndt/matching/ceres/map/scan_match_cost_functor.hpp> #include <cslibs_math_3d/linear/point.hpp> #include <cslibs_math_3d/linear/quaternion.hpp> namespace cslibs_ndt { namespace matching { namespace ceres { template <typename base_t, typename points_t> class ScanMatchCostFunctor3dRPY : public base_t { static constexpr int N0 = 3; static constexpr int N1 = 3; template <template <typename,typename> class, typename> friend class ceres::ScanMatchCostFunctorCreator; public: template <typename T> inline Eigen::Quaternion<T> toEigen(const T* const raw_rotation_rpy) const { const T roll_2 = raw_rotation_rpy[0] * 0.5; const T pitch_2 = raw_rotation_rpy[1] * 0.5; const T yaw_2 = raw_rotation_rpy[2] * 0.5; const T cos_roll = ::ceres::cos(roll_2); const T sin_roll = ::ceres::sin(roll_2); const T cos_pitch = ::ceres::cos(pitch_2); const T sin_pitch = ::ceres::sin(pitch_2); const T cos_yaw = ::ceres::cos(yaw_2); const T sin_yaw = ::ceres::sin(yaw_2); const T data_x = sin_roll * cos_pitch * cos_yaw - cos_roll * sin_pitch * sin_yaw; const T data_y = cos_roll * sin_pitch * cos_yaw + sin_roll * cos_pitch * sin_yaw; const T data_z = cos_roll * cos_pitch * sin_yaw - sin_roll * sin_pitch * cos_yaw; const T data_w = cos_roll * cos_pitch * cos_yaw + sin_roll * sin_pitch * sin_yaw; return Eigen::Quaternion<T>{ data_w, data_x, data_y, data_z }; } template <typename T> inline bool operator()(const T* const raw_translation, const T* const raw_rotation_rpy, T* residual) const { const Eigen::Matrix<T, 3, 1> translation(raw_translation[0], raw_translation[1], raw_translation[2]); const Eigen::Quaternion<T> rotation = toEigen(raw_rotation_rpy); std::size_t i = 0; //const double size = static_cast<double>(points_.size()); for (const auto& point : points_) { const Eigen::Matrix<T, 3, 1> local(T(point(0)), T(point(1)), T(point(2))); const Eigen::Matrix<T, 3, 1> in_world = rotation * local + translation; this->Evaluate(in_world, &residual[i]); if (residual[i] == -residual[i]) // only nan test that works residual[i] = T(0.); residual[i] = weight_ * residual[i]; //::ceres::sqrt(weight_) * residual[i] / size; ++i; } return true; } private: template <typename ... args_t> explicit inline ScanMatchCostFunctor3dRPY(const double& weight, points_t&& points, const args_t &...args) : base_t(args...), weight_(weight), points_(points) { } const double weight_; const points_t points_; }; template <typename ndt_t, Flag flag_t> using ScanMatchCostFunctor3dRPYCreator = ScanMatchCostFunctorCreator<ScanMatchCostFunctor3dRPY, ScanMatchCostFunctor<ndt_t, flag_t>>; } } } #endif // CSLIBS_NDT_MATCHING_CERES_SCAN_MATCH_COST_FUNCTOR_3D_RPY_HPP
#include "bench.h" #include <contrib/libs/re2/re2/re2.h> #include <library/cpp/colorizer/output.h> #include <library/getopt/small/last_getopt.h> #include <library/json/json_value.h> #include <library/linear_regression/linear_regression.h> #include <library/threading/poor_man_openmp/thread_helper.h> #include <util/system/hp_timer.h> #include <util/system/info.h> #include <util/stream/output.h> #include <util/datetime/base.h> #include <util/random/random.h> #include <util/string/cast.h> #include <util/generic/xrange.h> #include <util/generic/algorithm.h> #include <util/generic/singleton.h> #include <util/system/spinlock.h> #include <util/generic/function.h> #include <util/generic/maybe.h> #include <util/generic/strbuf.h> #include <util/generic/intrlist.h> #include <util/stream/format.h> #include <util/system/yield.h> using re2::RE2; using namespace NBench; using namespace NColorizer; using namespace NLastGetopt; namespace { struct TOptions { double TimeBudget; }; struct TResult { TStringBuf TestName; ui64 Samples; ui64 Iterations; TMaybe<double> CyclesPerIteration; TMaybe<double> SecondsPerIteration; double RunTime; size_t TestId; // Sequential test id (zero-based) }; struct ITestRunner: public TIntrusiveListItem<ITestRunner> { virtual ~ITestRunner() = default; void Register(); virtual TStringBuf Name() const noexcept = 0; virtual TResult Run(const TOptions& opts) = 0; size_t SequentialId = 0; }; struct TCpuBenchmark: public ITestRunner { inline TCpuBenchmark(const char* name, NCpu::TUserFunc func) : F(func) , N(name) { Register(); } TResult Run(const TOptions& opts) override; TStringBuf Name() const noexcept override { return N; } std::function<NCpu::TUserFunc> F; const TStringBuf N; }; static inline TString DoFmtTime(double t) { if (t > 0.1) { return ToString(t) + " seconds"; } t *= 1000.0; if (t > 0.1) { return ToString(t) + " milliseconds"; } t *= 1000.0; if (t > 0.1) { return ToString(t) + " microseconds"; } t *= 1000.0; if (t < 0.05) { t = 0.0; } return ToString(t) + " nanoseconds"; } struct THiPerfTimer: public THPTimer { static inline TString FmtTime(double t) { return DoFmtTime(t); } }; struct TSimpleTimer { inline double Passed() const noexcept { return (TInstant::Now() - N).MicroSeconds() / 1000000.0; } static inline TString FmtTime(double t) { return DoFmtTime(t); } const TInstant N = TInstant::Now(); }; struct TCycleTimer { inline ui64 Passed() const noexcept { return GetCycleCount() - N; } static inline TString FmtTime(double t) { if (t < 0.5) { t = 0.0; } TString hr; if (t > 10 * 1000) { hr = " (" + ToString(HumanReadableSize(t, ESizeFormat::SF_QUANTITY)) + ")"; } return ToString(t) + hr + " cycles"; } const ui64 N = GetCycleCount(); }; template <class TMyTimer, class T> inline double Measure(T&& t, size_t n) { TMyTimer timer; t(n); return timer.Passed(); } struct TSampleIterator { inline size_t Next() noexcept { return M++; N *= 1.02; M += 1; return Max<double>(N, M); } double N = 1.0; size_t M = 1; }; using TSample = std::pair<size_t, double>; using TSamples = TVector<TSample>; struct TLinFunc { double A; double B; inline double operator()(double x) const noexcept { return A * x + B; } }; static TLinFunc CalcModel(const TSamples& s) { TKahanSLRSolver solver; for (const auto& p : s) { solver.Add(p.first, p.second); } double c = 0; double i = 0; solver.Solve(c, i); return TLinFunc{c, i}; } static inline TSamples RemoveOutliers(const TSamples& s, double fraction) { if (s.size() < 20) { return s; } const auto predictor = CalcModel(s); const auto errfunc = [&predictor](const TSample& p) -> double { //return (1.0 + fabs(predictor(p.first) - p.second)) / (1.0 + fabs(p.second)); //return fabs((predictor(p.first) - p.second)) / (1.0 + fabs(p.second)); //return fabs((predictor(p.first) - p.second)) / (1.0 + p.first); return fabs((predictor(p.first) - p.second)); }; using TSampleWithError = std::pair<const TSample*, double>; TVector<TSampleWithError> v; v.reserve(s.size()); for (const auto& p : s) { v.emplace_back(&p, errfunc(p)); } Sort(v.begin(), v.end(), [](const TSampleWithError& l, const TSampleWithError& r) -> bool { return (l.second < r.second) || ((l.second == r.second) && (l.first < r.first)); }); if (0) { for (const auto& x : v) { Cout << x.first->first << ", " << x.first->second << " -> " << x.second << Endl; } } TSamples ret; ret.reserve(v.size()); for (const auto i : xrange<size_t>(0, fraction * v.size())) { ret.push_back(*v[i].first); } return ret; } template <class TMyTimer, class T> static inline TResult RunTest(T&& func, double budget, ITestRunner& test) { THPTimer start; start.Passed(); TSampleIterator sample; TSamples samples; ui64 iters = 0; //warm up func(1); while (start.Passed() < budget) { if (start.Passed() < ((budget * samples.size()) / 2000000.0)) { ThreadYield(); } else { const size_t n = sample.Next(); iters += (ui64)n; samples.emplace_back(n, Measure<TMyTimer>(func, n)); } } auto filtered = RemoveOutliers(samples, 0.9); return {test.Name(), filtered.size(), iters, CalcModel(filtered).A, Nothing(), start.Passed(), test.SequentialId}; } using TTests = TIntrusiveListWithAutoDelete<ITestRunner, TDestructor>; static inline TTests& Tests() { return *Singleton<TTests>(); } void ITestRunner::Register() { Tests().PushBack(this); } TResult TCpuBenchmark::Run(const TOptions& opts) { return RunTest<TCycleTimer>([this](size_t n) { NCpu::TParams params{n}; F(params); }, opts.TimeBudget, *this); } enum EOutFormat { F_CONSOLE = 0 /* "console" */, F_CSV /* "csv" */, F_JSON /* "json" */ }; static TAdaptiveLock STDOUT_LOCK; struct IReporter { virtual void Report(TResult&& result) = 0; virtual void Finish() { } virtual ~IReporter() { } }; class TConsoleReporter: public IReporter { public: ~TConsoleReporter() override { } void Report(TResult&& r) override { with_lock (STDOUT_LOCK) { Cout << r; } } }; class TCSVReporter: public IReporter { public: TCSVReporter() { Cout << "Name\tSamples\tIterations\tRun_time\tPer_iteration_sec\tPer_iteration_cycles" << Endl; } ~TCSVReporter() override { } void Report(TResult&& r) override { with_lock (STDOUT_LOCK) { Cout << r.TestName << '\t' << r.Samples << '\t' << r.Iterations << '\t' << r.RunTime; Cout << '\t'; if (r.CyclesPerIteration) { Cout << TCycleTimer::FmtTime(*r.CyclesPerIteration); } else { Cout << '-'; } Cout << '\t'; if (r.SecondsPerIteration) { Cout << DoFmtTime(*r.SecondsPerIteration); } else { Cout << '-'; } Cout << Endl; } } }; class TJSONReporter: public IReporter { public: ~TJSONReporter() override { } void Report(TResult&& r) override { with_lock (ResultsLock_) { Results_.emplace_back(std::move(r)); } } void Finish() override { NJson::TJsonValue report; auto& bench = report["benchmark"]; bench.SetType(NJson::JSON_ARRAY); NJson::TJsonValue benchReport; for (const auto& result : Results_) { NJson::TJsonValue{}.Swap(benchReport); benchReport["name"] = result.TestName; benchReport["samples"] = result.Samples; benchReport["run_time"] = result.RunTime; if (result.CyclesPerIteration) { benchReport["per_iteration_cycles"] = *result.CyclesPerIteration; } if (result.SecondsPerIteration) { benchReport["per_iteration_secons"] = *result.SecondsPerIteration; } bench.AppendValue(benchReport); } Cout << report << Endl; } private: TAdaptiveLock ResultsLock_; TVector<TResult> Results_; }; class TOrderedReporter: public IReporter { public: TOrderedReporter(THolder<IReporter> slave) : Slave_(std::move(slave)) { } void Report(TResult&& result) override { with_lock (ResultsLock_) { OrderedResultQueue_.emplace(result.TestId, std::move(result)); while (!OrderedResultQueue_.empty() && OrderedResultQueue_.begin()->first <= ExpectedTestId_) { Slave_->Report(std::move(OrderedResultQueue_.begin()->second)); OrderedResultQueue_.erase(OrderedResultQueue_.begin()); ++ExpectedTestId_; } } } void Finish() override { for (auto& it : OrderedResultQueue_) { Slave_->Report(std::move(it.second)); } OrderedResultQueue_.clear(); Slave_->Finish(); } private: THolder<IReporter> Slave_; size_t ExpectedTestId_ = 0; TMap<size_t, TResult> OrderedResultQueue_; TAdaptiveLock ResultsLock_; }; static THolder<IReporter> MakeReporter(const EOutFormat type) { switch (type) { case F_CONSOLE: return MakeHolder<TConsoleReporter>(); case F_CSV: return MakeHolder<TCSVReporter>(); case F_JSON: return MakeHolder<TJSONReporter>(); default: break; } return MakeHolder<TConsoleReporter>(); // make compiler happy } static THolder<IReporter> MakeOrderedReporter(const EOutFormat type) { return MakeHolder<TOrderedReporter>(MakeReporter(type)); } static void EnumerateTests(TVector<ITestRunner*>& tests) { for (size_t id : xrange(tests.size())) { tests[id]->SequentialId = id; } } } template <> EOutFormat FromStringImpl<EOutFormat>(const char* data, size_t len) { const auto s = TStringBuf{data, len}; if (AsStringBuf("console") == s) { return F_CONSOLE; } else if (AsStringBuf("csv") == s) { return F_CSV; } else if (AsStringBuf("json") == s) { return F_JSON; } ythrow TFromStringException{} << "failed to convert '" << s << '\''; } template <> void Out<TResult>(IOutputStream& out, const TResult& r) { out << "----------- " << LightRed() << r.TestName << Old() << " ---------------" << Endl << " samples: " << White() << r.Samples << Old() << Endl << " iterations: " << White() << r.Iterations << Old() << Endl << " iterations hr: " << White() << HumanReadableSize(r.Iterations, SF_QUANTITY) << Old() << Endl << " run time: " << White() << r.RunTime << Old() << Endl; if (r.CyclesPerIteration) { out << " per iteration: " << White() << TCycleTimer::FmtTime(*r.CyclesPerIteration) << Old() << Endl; } if (r.SecondsPerIteration) { out << " per iteration: " << White() << DoFmtTime(*r.SecondsPerIteration) << Old() << Endl; } } NCpu::TRegistar::TRegistar(const char* name, TUserFunc func) { static_assert(sizeof(TCpuBenchmark) + alignof(TCpuBenchmark) < sizeof(Buf), "fix Buf size"); new (AlignUp(Buf, alignof(TCpuBenchmark))) TCpuBenchmark(name, func); } namespace { struct TProgOpts { TProgOpts(int argc, char** argv) { TOpts opts = TOpts::Default(); opts.AddHelpOption(); opts.AddLongOption('b', "budget") .StoreResult(&TimeBudget) .RequiredArgument("SEC") .Optional() .Help("overall time budget"); opts.AddLongOption('l', "list") .NoArgument() .StoreValue(&ListTests, true) .Help("list all tests"); opts.AddLongOption('t', "threads") .StoreResult(&Threads) .OptionalValue(ToString((NSystemInfo::CachedNumberOfCpus() + 1) / 2), "JOBS") .DefaultValue("1") .Help("run benchmarks in parallel"); opts.AddLongOption('f', "format") .StoreResult(&OutFormat) .RequiredArgument("FORMAT") .DefaultValue("console") .Help("output format (console|csv|json)"); opts.SetFreeArgDefaultTitle("REGEXP", "RE2 regular expression to filter tests"); const TOptsParseResult parseResult{&opts, argc, argv}; for (const auto& regexp : parseResult.GetFreeArgs()) { Filters.push_back(MakeHolder<RE2>(regexp.data(), RE2::Quiet)); Y_ENSURE(Filters.back()->ok(), "incorrect RE2 expression '" << regexp << "'"); } } bool MatchFilters(const TStringBuf& name) const { if (!Filters) { return true; } for (auto&& re : Filters) { if (RE2::FullMatchN({name.data(), name.size()}, *re, nullptr, 0)) { return true; } } return false; } bool ListTests = false; double TimeBudget = -1.0; TVector<THolder<RE2>> Filters; size_t Threads = 0; EOutFormat OutFormat; }; } int NBench::Main(int argc, char** argv) { const TProgOpts opts(argc, argv); TVector<ITestRunner*> tests; for (auto&& it : Tests()) { if (opts.MatchFilters(it.Name())) { tests.push_back(&it); } } EnumerateTests(tests); if (opts.ListTests) { for (const auto* const it : tests) { Cout << it->Name() << Endl; } return 0; } if (!tests) { return 0; } double timeBudget = opts.TimeBudget; if (timeBudget < 0) { timeBudget = 5.0 * tests.size(); } const TOptions testOpts = {timeBudget / tests.size()}; const auto reporter = MakeOrderedReporter(opts.OutFormat); std::function<void(ITestRunner**)> func = [&](ITestRunner** it) { auto&& res = (*it)->Run(testOpts); reporter->Report(std::move(res)); }; if (opts.Threads > 1) { NYmp::SetThreadCount(opts.Threads); NYmp::ParallelForStaticChunk(tests.data(), tests.data() + tests.size(), 1, func); } else { for (auto it : tests) { func(&it); } } reporter->Finish(); return 0; }
/* * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ /** * @file ClientState.hpp * @brief * */ #pragma once #include <atomic> #include "util/Utf8String.hpp" #include "util/memory/stl/Map.hpp" #include "Action.hpp" #include "ClientCore.hpp" #include "mqtt/Common.hpp" namespace awsiotsdk { namespace mqtt { class ClientState : public ClientCoreState { protected: bool is_session_present_; std::atomic_bool is_connected_; std::atomic_bool is_auto_reconnect_enabled_; std::atomic_bool is_auto_reconnect_required_; std::atomic_bool is_pingreq_pending_; uint16_t last_sent_packet_id_; std::chrono::seconds keep_alive_timeout_; std::chrono::seconds min_reconnect_backoff_timeout_; std::chrono::seconds max_reconnect_backoff_timeout_; std::chrono::milliseconds mqtt_command_timeout_; std::shared_ptr<ActionData> p_connect_data_; std::atomic_bool trigger_disconnect_callback_; public: util::Map<util::String, std::shared_ptr<Subscription>> subscription_map_; // Rule of 5 stuff // Disable copying and moving because class contains std::atomic<> types used for thread synchronization ClientState() = delete; // Default constructor ClientState(const ClientState &) = delete; // Delete Copy constructor ClientState(ClientState &&) = delete; // Move constructor ClientState &operator=(const ClientState &) & = delete; // Delete Copy assignment operator ClientState &operator=(ClientState &&) & = delete; // Move assignment operator ~ClientState() = default; // Default destructor ClientState(std::chrono::milliseconds mqtt_command_timeout); static std::shared_ptr<ClientState> Create(std::chrono::milliseconds mqtt_command_timeout); bool IsSessionPresent() { return is_session_present_; } void SetSessionPresent(bool value) { is_session_present_ = value; } bool IsConnected() { return is_connected_; } void SetConnected(bool value) { is_connected_ = value; if (value) { is_auto_reconnect_required_ = false; } SetProcessQueuedActions(value); } bool IsAutoReconnectEnabled() { return is_auto_reconnect_enabled_; } void SetAutoReconnectEnabled(bool value) { is_auto_reconnect_enabled_ = value; } bool IsAutoReconnectRequired() { return is_auto_reconnect_required_; } void SetAutoReconnectRequired(bool value) { is_auto_reconnect_required_ = value; } bool IsPingreqPending() { return is_pingreq_pending_; } void SetPingreqPending(bool value) { is_pingreq_pending_ = value; } bool isDisconnectCallbackPending() { return trigger_disconnect_callback_; } void setDisconnectCallbackPending(bool value) { trigger_disconnect_callback_ = value; } virtual uint16_t GetNextPacketId(); virtual uint16_t GetNextActionId() { return GetNextPacketId(); } /** * @brief Get duration of Keep alive interval in seconds * @return std::chrono::seconds Keep alive interval duration */ std::chrono::seconds GetKeepAliveTimeout() { return keep_alive_timeout_; } void SetKeepAliveTimeout(std::chrono::seconds keep_alive_timeout) { keep_alive_timeout_ = keep_alive_timeout; } std::chrono::milliseconds GetMqttCommandTimeout() { return mqtt_command_timeout_; } void SetMqttCommandTimeout(std::chrono::milliseconds mqtt_command_timeout) { mqtt_command_timeout_ = mqtt_command_timeout; } std::chrono::seconds GetMinReconnectBackoffTimeout() { return min_reconnect_backoff_timeout_; } void SetMinReconnectBackoffTimeout(std::chrono::seconds min_reconnect_backoff_timeout) { min_reconnect_backoff_timeout_ = min_reconnect_backoff_timeout; } std::chrono::seconds GetMaxReconnectBackoffTimeout() { return max_reconnect_backoff_timeout_; } void SetMaxReconnectBackoffTimeout(std::chrono::seconds max_reconnect_backoff_timeout) { max_reconnect_backoff_timeout_ = max_reconnect_backoff_timeout; } std::shared_ptr<ActionData> GetAutoReconnectData() { return p_connect_data_; } void SetAutoReconnectData(std::shared_ptr<ActionData> p_connect_data) { p_connect_data_ = p_connect_data; } std::shared_ptr<Subscription> GetSubscription(util::String p_topic_name); std::shared_ptr<Subscription> SetSubscriptionPacketInfo(util::String p_topic_name, uint16_t packet_id, uint8_t index_in_packet); ResponseCode SetSubscriptionActive(uint16_t packet_id, uint8_t index_in_sub_packet, mqtt::QoS max_qos); ResponseCode RemoveSubscription(uint16_t packet_id, uint8_t index_in_sub_packet); ResponseCode RemoveAllSubscriptionsForPacketId(uint16_t packet_id); ResponseCode RemoveSubscription(util::String p_topic_name); }; } }
// Autogenerated from CppHeaderCreator // Created by Sc2ad // ========================================================================= #pragma once // Begin includes #include "extern/beatsaber-hook/shared/utils/typedefs.h" #include "extern/beatsaber-hook/shared/utils/byref.hpp" // Completed includes // Begin forward declares // Forward declaring namespace: Org::BouncyCastle::Asn1 namespace Org::BouncyCastle::Asn1 { // Forward declaring type: Asn1Object class Asn1Object; // Forward declaring type: DerApplicationSpecific class DerApplicationSpecific; // Forward declaring type: Asn1Encodable class Asn1Encodable; } // Forward declaring namespace: System::Text namespace System::Text { // Forward declaring type: StringBuilder class StringBuilder; } // Completed forward declares // Type namespace: Org.BouncyCastle.Asn1.Utilities namespace Org::BouncyCastle::Asn1::Utilities { // Size: 0x10 #pragma pack(push, 1) // Autogenerated type: Org.BouncyCastle.Asn1.Utilities.Asn1Dump // [TokenAttribute] Offset: FFFFFFFF class Asn1Dump : public ::Il2CppObject { public: // Creating value type constructor for type: Asn1Dump Asn1Dump() noexcept {} // Get static field: static private readonly System.String NewLine static ::Il2CppString* _get_NewLine(); // Set static field: static private readonly System.String NewLine static void _set_NewLine(::Il2CppString* value); // static private System.Void .cctor() // Offset: 0x1477208 static void _cctor(); // static private System.Void AsString(System.String indent, System.Boolean verbose, Org.BouncyCastle.Asn1.Asn1Object obj, System.Text.StringBuilder buf) // Offset: 0x1474008 static void AsString(::Il2CppString* indent, bool verbose, Org::BouncyCastle::Asn1::Asn1Object* obj, System::Text::StringBuilder* buf); // static private System.String outputApplicationSpecific(System.String type, System.String indent, System.Boolean verbose, Org.BouncyCastle.Asn1.DerApplicationSpecific app) // Offset: 0x14767BC static ::Il2CppString* outputApplicationSpecific(::Il2CppString* type, ::Il2CppString* indent, bool verbose, Org::BouncyCastle::Asn1::DerApplicationSpecific* app); // static public System.String DumpAsString(Org.BouncyCastle.Asn1.Asn1Encodable obj) // Offset: 0x1477004 static ::Il2CppString* DumpAsString(Org::BouncyCastle::Asn1::Asn1Encodable* obj); // static public System.String DumpAsString(Org.BouncyCastle.Asn1.Asn1Encodable obj, System.Boolean verbose) // Offset: 0x147706C static ::Il2CppString* DumpAsString(Org::BouncyCastle::Asn1::Asn1Encodable* obj, bool verbose); // static private System.String dumpBinaryDataAsString(System.String indent, System.Byte[] bytes) // Offset: 0x147654C static ::Il2CppString* dumpBinaryDataAsString(::Il2CppString* indent, ::Array<uint8_t>* bytes); // static private System.String calculateAscString(System.Byte[] bytes, System.Int32 off, System.Int32 len) // Offset: 0x147713C static ::Il2CppString* calculateAscString(::Array<uint8_t>* bytes, int off, int len); }; // Org.BouncyCastle.Asn1.Utilities.Asn1Dump #pragma pack(pop) } #include "extern/beatsaber-hook/shared/utils/il2cpp-type-check.hpp" DEFINE_IL2CPP_ARG_TYPE(Org::BouncyCastle::Asn1::Utilities::Asn1Dump*, "Org.BouncyCastle.Asn1.Utilities", "Asn1Dump"); #include "extern/beatsaber-hook/shared/utils/il2cpp-utils-methods.hpp" // Writing MetadataGetter for method: Org::BouncyCastle::Asn1::Utilities::Asn1Dump::_cctor // Il2CppName: .cctor template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<void (*)()>(&Org::BouncyCastle::Asn1::Utilities::Asn1Dump::_cctor)> { static const MethodInfo* get() { return ::il2cpp_utils::FindMethod(classof(Org::BouncyCastle::Asn1::Utilities::Asn1Dump*), ".cctor", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{}); } }; // Writing MetadataGetter for method: Org::BouncyCastle::Asn1::Utilities::Asn1Dump::AsString // Il2CppName: AsString template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<void (*)(::Il2CppString*, bool, Org::BouncyCastle::Asn1::Asn1Object*, System::Text::StringBuilder*)>(&Org::BouncyCastle::Asn1::Utilities::Asn1Dump::AsString)> { static const MethodInfo* get() { static auto* indent = &::il2cpp_utils::GetClassFromName("System", "String")->byval_arg; static auto* verbose = &::il2cpp_utils::GetClassFromName("System", "Boolean")->byval_arg; static auto* obj = &::il2cpp_utils::GetClassFromName("Org.BouncyCastle.Asn1", "Asn1Object")->byval_arg; static auto* buf = &::il2cpp_utils::GetClassFromName("System.Text", "StringBuilder")->byval_arg; return ::il2cpp_utils::FindMethod(classof(Org::BouncyCastle::Asn1::Utilities::Asn1Dump*), "AsString", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{indent, verbose, obj, buf}); } }; // Writing MetadataGetter for method: Org::BouncyCastle::Asn1::Utilities::Asn1Dump::outputApplicationSpecific // Il2CppName: outputApplicationSpecific template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<::Il2CppString* (*)(::Il2CppString*, ::Il2CppString*, bool, Org::BouncyCastle::Asn1::DerApplicationSpecific*)>(&Org::BouncyCastle::Asn1::Utilities::Asn1Dump::outputApplicationSpecific)> { static const MethodInfo* get() { static auto* type = &::il2cpp_utils::GetClassFromName("System", "String")->byval_arg; static auto* indent = &::il2cpp_utils::GetClassFromName("System", "String")->byval_arg; static auto* verbose = &::il2cpp_utils::GetClassFromName("System", "Boolean")->byval_arg; static auto* app = &::il2cpp_utils::GetClassFromName("Org.BouncyCastle.Asn1", "DerApplicationSpecific")->byval_arg; return ::il2cpp_utils::FindMethod(classof(Org::BouncyCastle::Asn1::Utilities::Asn1Dump*), "outputApplicationSpecific", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{type, indent, verbose, app}); } }; // Writing MetadataGetter for method: Org::BouncyCastle::Asn1::Utilities::Asn1Dump::DumpAsString // Il2CppName: DumpAsString template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<::Il2CppString* (*)(Org::BouncyCastle::Asn1::Asn1Encodable*)>(&Org::BouncyCastle::Asn1::Utilities::Asn1Dump::DumpAsString)> { static const MethodInfo* get() { static auto* obj = &::il2cpp_utils::GetClassFromName("Org.BouncyCastle.Asn1", "Asn1Encodable")->byval_arg; return ::il2cpp_utils::FindMethod(classof(Org::BouncyCastle::Asn1::Utilities::Asn1Dump*), "DumpAsString", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{obj}); } }; // Writing MetadataGetter for method: Org::BouncyCastle::Asn1::Utilities::Asn1Dump::DumpAsString // Il2CppName: DumpAsString template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<::Il2CppString* (*)(Org::BouncyCastle::Asn1::Asn1Encodable*, bool)>(&Org::BouncyCastle::Asn1::Utilities::Asn1Dump::DumpAsString)> { static const MethodInfo* get() { static auto* obj = &::il2cpp_utils::GetClassFromName("Org.BouncyCastle.Asn1", "Asn1Encodable")->byval_arg; static auto* verbose = &::il2cpp_utils::GetClassFromName("System", "Boolean")->byval_arg; return ::il2cpp_utils::FindMethod(classof(Org::BouncyCastle::Asn1::Utilities::Asn1Dump*), "DumpAsString", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{obj, verbose}); } }; // Writing MetadataGetter for method: Org::BouncyCastle::Asn1::Utilities::Asn1Dump::dumpBinaryDataAsString // Il2CppName: dumpBinaryDataAsString template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<::Il2CppString* (*)(::Il2CppString*, ::Array<uint8_t>*)>(&Org::BouncyCastle::Asn1::Utilities::Asn1Dump::dumpBinaryDataAsString)> { static const MethodInfo* get() { static auto* indent = &::il2cpp_utils::GetClassFromName("System", "String")->byval_arg; static auto* bytes = &il2cpp_functions::array_class_get(::il2cpp_utils::GetClassFromName("System", "Byte"), 1)->byval_arg; return ::il2cpp_utils::FindMethod(classof(Org::BouncyCastle::Asn1::Utilities::Asn1Dump*), "dumpBinaryDataAsString", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{indent, bytes}); } }; // Writing MetadataGetter for method: Org::BouncyCastle::Asn1::Utilities::Asn1Dump::calculateAscString // Il2CppName: calculateAscString template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<::Il2CppString* (*)(::Array<uint8_t>*, int, int)>(&Org::BouncyCastle::Asn1::Utilities::Asn1Dump::calculateAscString)> { static const MethodInfo* get() { static auto* bytes = &il2cpp_functions::array_class_get(::il2cpp_utils::GetClassFromName("System", "Byte"), 1)->byval_arg; static auto* off = &::il2cpp_utils::GetClassFromName("System", "Int32")->byval_arg; static auto* len = &::il2cpp_utils::GetClassFromName("System", "Int32")->byval_arg; return ::il2cpp_utils::FindMethod(classof(Org::BouncyCastle::Asn1::Utilities::Asn1Dump*), "calculateAscString", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{bytes, off, len}); } };
#include "MainConstants.h" #include "SceneGame.h" #include "LangMgr.h" #include "Debug.h" #include <iostream> SceneGame::SceneGame() { // Инициализируем языковой модуль lang_mgr = LangMgr::Init(); // Выставляем количество жизней lives = 3; lives_caption = lang_mgr->getPhrase("scene_lives") + ": " + std::to_string(lives); // Задаем параметры главного персонажа hero = new MainHeroMgr(); hero->setRealPosition(10, 350); hero->setWidthHeight(56, 71); hero->setSpriteFilePath("assets/sprites/main_hero/Grue.png"); hero->setMoveStep(15); // Задаем параметры текста font_color = {0, 0, 255}; font_game_info = new ui::FontMgr(); font_game_info->setFontName("assets/fonts/XoloniumBold.ttf"); font_game_info->setFontSize(26); font_game_info->setFontColor(font_color); font_game_info->setLetterSizeInPX(20); // Задаем параметры камеры cam_main = new Camera(); cam_main->setPosition(0, 0); cam_main->setWidthHeight(SCREEN_WIDTH, SCREEN_HEIGHT); cam_main->setGameObjectForWatch(hero); cam_main->setGameObjectRelativePos(50, 350); cam_main->setGameObjectWatchingMode(true); cam_main->updatePosition(); SDL_Rect hero_pos = cam_main->getGameObjectAreaInCam(hero); hero->setPosition(hero_pos.x, hero_pos.y); } SceneGame::~SceneGame() { delete font_game_info; delete cam_main; delete hero; debug() << "SceneGame end" << std::endl; } void SceneGame::render(SDL_Renderer *renderer) { if(!b_paused) { if(b_first_render) { setFirstRenderState(false); if(!hero->init(renderer)) std::cout << "Error: " << hero->getErrorText() << std::endl; } SDL_SetRenderDrawColor( renderer, 255, 255, 255, 255 ); SDL_RenderClear( renderer ); SDL_SetRenderDrawColor( renderer, 255, 255, 255, 255 ); hero->draw(renderer); font_game_info->paintText(renderer, lives_caption, SCREEN_HEIGHT - 30, 30, ui::fontAlign::right); SDL_RenderPresent(renderer); } } void SceneGame::render_clean(SDL_Renderer *renderer) { // Стираем текущие объекты сцены } gameReaction SceneGame::process_mouse_motion(Sint32 x, Sint32 y) { if(!b_paused) { } return gameReaction::gr_ignore; } gameReaction SceneGame::process_mouse_button_event(SDL_MouseButtonEvent m_btn_event) { return gameReaction::gr_ignore; } gameReaction SceneGame::process_keyboard_keydown(SDL_Keycode keycode) { if(!b_paused) { hero->process_keyboard_keydown(keycode); cam_main->updatePosition(); SDL_Rect hero_pos = cam_main->getGameObjectAreaInCam(hero); hero->setPosition(hero_pos.x, hero_pos.y); debug() << "hero_x: " << hero_pos.x << " hero_y: " << hero_pos.y << "\n"; debug() << "cam_x: " << cam_main->getPositionBeginX() << " cam_y: " << cam_main->getPositionBeginY() << "\n\n"; } return gameReaction::gr_ignore; }
#include <iostream> #include <map> #include <vector> #include <algorithm> #include <sstream> using namespace std; template<typename K, typename V> class Repository{ map<K, V> rep; string _typename; public: Repository(string _typename = ""){ this->_typename = _typename; } void add(K k, V v){ if(rep.count(k) != 0) throw "fail: " + _typename + " ja existe"; rep[k] = v; } V& get(K k){ auto it = rep.find(k); if(it == rep.end()) throw "fail: " + _typename + " nao existe"; return it->second; } bool existe(K k){ return rep.count(k) != 0; } void remove(K k){ auto it = rep.find(k); if(it == rep.end()) throw "fail: " + _typename + " nao existe"; rep.erase(it); } vector<K> getKeys(){ vector<K> keys; for(auto& par : rep) keys.push_back(par.first); return keys; } vector<pair<K, V>> getPairs(){ vector<K> pairs; for(auto& par : rep) pairs.push_back(par); return pairs; } vector<V> getValues(){ vector<V> values; for(auto& par : rep) values.push_back(par.second); return values; } }; class Cliente{ public: string name; string fullname; float saldo; Cliente(string name = "", string fullname = ""){ this->name = name; this->fullname = fullname; this->saldo = 0; } friend ostream& operator<<(ostream &os, Cliente cliente); }; ostream& operator<<(ostream &os, Cliente cliente){ os << cliente.name << " : " << cliente.fullname << " : " << cliente.saldo; return os; } class Transacao{ public: int id; string idCli; float value; Transacao(int id = 0, string idCli = "", float value = 0.0){ this->id = id; this->idCli = idCli; this->value = value; } }; ostream& operator<<(ostream& os, Transacao tr){ os << "id:" << tr.id << " [" << tr.idCli << " " << tr.value << "]"; return os; } template<typename T> T read(stringstream& ss){ T t; ss >> t; return t; } class Agiota{ public: Repository<string, Cliente> repCli; Repository<int, Transacao> repTr; int nextTrId = 0; float saldo; Agiota(float saldo = 0.0): repCli("cliente"), repTr("transacao"){ this->saldo = saldo; } void pushTransacao(string idCli, float value){ repTr.add(nextTrId, Transacao(nextTrId, idCli, value)); nextTrId++; } void receber(string idCli, float value){ Cliente& cliente = repCli.get(idCli); if(value > cliente.saldo) throw string("fail: dinheiro demais"); cliente.saldo -= value; this->saldo += value; pushTransacao(idCli, -value); } void emprestar(string idCli, float value){ if(value > this->saldo) throw string("fail: fundos insuficientes"); Cliente& cliente = repCli.get(idCli); cliente.saldo += value; this->saldo -= value; pushTransacao(idCli, value); } void matar(string idCli){ repCli.remove(idCli); for(auto chave : repTr.getKeys()){ if(repTr.get(chave).idCli == idCli) repTr.remove(chave); } } }; string operator+(string s, int i){ return s + to_string(i); } class Controller{ public: Agiota agiota; void shell(string line){ stringstream ss(line); string op; ss >> op; if(op == "init"){ agiota = Agiota(read<float>(ss)); }else if(op == "addCli"){ string nome, fullname; ss >> nome; getline(ss, fullname); agiota.repCli.add(nome, Cliente(nome, fullname.substr(1))); }else if(op == "resumo"){ for(auto& cli : agiota.repCli.getValues()) cout << cli << endl; cout << "saldo : " << agiota.saldo << endl; }else if(op == "matar"){ agiota.matar(read<string>(ss)); }else if(op == "emprestar"){ string nome; float value; ss >> nome >> value; agiota.emprestar(nome, value); }else if(op == "filtrar"){ auto idCli = read<string>(ss); for(auto& tr : agiota.repTr.getValues()) if(tr.idCli == idCli) cout << tr << endl; cout << "saldo : " << agiota.repCli.get(idCli).saldo << endl; }else if(op == "receber"){ string nome; float value; ss >> nome >> value; agiota.receber(nome, value); }else if(op == "historico"){ for(auto& tr : agiota.repTr.getValues()) cout << tr << endl; }else cout << "fail: comando invalido" << endl; } void exec(){ string line; while(true){ getline(cin, line); cout << "$" << line << endl; if(line == "end") break; try{ shell(line); }catch(string s){ cout << s << endl; } } } }; int main(){ Controller controller; controller.exec(); }
/*************************************************************************/ /* timer.cpp */ /*************************************************************************/ /* This file is part of: */ /* GODOT ENGINE */ /* http://www.godotengine.org */ /*************************************************************************/ /* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */ /* */ /* Permission is hereby granted, free of charge, to any person obtaining */ /* a copy of this software and associated documentation files (the */ /* "Software"), to deal in the Software without restriction, including */ /* without limitation the rights to use, copy, modify, merge, publish, */ /* distribute, sublicense, and/or sell copies of the Software, and to */ /* permit persons to whom the Software is furnished to do so, subject to */ /* the following conditions: */ /* */ /* The above copyright notice and this permission notice shall be */ /* included in all copies or substantial portions of the Software. */ /* */ /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ /* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ /* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ /* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ /* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /*************************************************************************/ #include "timer.h" void Timer::_notification(int p_what) { switch(p_what) { case NOTIFICATION_READY: { if (autostart) { #ifdef TOOLS_ENABLED if (get_tree()->is_editor_hint() && get_tree()->get_edited_scene_root() && (get_tree()->get_edited_scene_root()==this || get_tree()->get_edited_scene_root()->is_a_parent_of(this))) break; #endif start(); } } break; case NOTIFICATION_PROCESS: { time_left -= get_process_delta_time(); if (time_left<0) { if (!one_shot) time_left=wait_time+time_left; else stop(); emit_signal("timeout"); } } break; } } void Timer::set_wait_time(float p_time) { ERR_EXPLAIN("time should be greater than zero."); ERR_FAIL_COND(p_time<=0); wait_time=p_time; } float Timer::get_wait_time() const { return wait_time; } void Timer::set_one_shot(bool p_one_shot) { one_shot=p_one_shot; } bool Timer::is_one_shot() const { return one_shot; } void Timer::set_autostart(bool p_start) { autostart=p_start; } bool Timer::has_autostart() const { return autostart; } void Timer::start() { time_left=wait_time; set_process(true); } void Timer::stop() { time_left=-1; set_process(false); autostart=false; } float Timer::get_time_left() const { return time_left >0 ? time_left : 0; } void Timer::_bind_methods() { ObjectTypeDB::bind_method(_MD("set_wait_time","time_sec"),&Timer::set_wait_time); ObjectTypeDB::bind_method(_MD("get_wait_time"),&Timer::get_wait_time); ObjectTypeDB::bind_method(_MD("set_one_shot","enable"),&Timer::set_one_shot); ObjectTypeDB::bind_method(_MD("is_one_shot"),&Timer::is_one_shot); ObjectTypeDB::bind_method(_MD("set_autostart","enable"),&Timer::set_autostart); ObjectTypeDB::bind_method(_MD("has_autostart"),&Timer::has_autostart); ObjectTypeDB::bind_method(_MD("start"),&Timer::start); ObjectTypeDB::bind_method(_MD("stop"),&Timer::stop); ObjectTypeDB::bind_method(_MD("get_time_left"),&Timer::get_time_left); ADD_SIGNAL( MethodInfo("timeout") ); ADD_PROPERTY( PropertyInfo(Variant::REAL, "wait_time", PROPERTY_HINT_EXP_RANGE, "0.01,4096,0.01" ), _SCS("set_wait_time"), _SCS("get_wait_time") ); ADD_PROPERTY( PropertyInfo(Variant::BOOL, "one_shot" ), _SCS("set_one_shot"), _SCS("is_one_shot") ); ADD_PROPERTY( PropertyInfo(Variant::BOOL, "autostart" ), _SCS("set_autostart"), _SCS("has_autostart") ); } Timer::Timer() { autostart=false; wait_time=1; one_shot=false; time_left=-1; }
#include <csignal> #include "fmt/format.h" #include "utils.hpp" #include "vm.hpp" auto main(int argc, const char* argv[]) -> int { // image-file must be passed as argument. if (argc < 2) { fmt::print(stderr, "{}\n", "Error! Usage: vm.exe [image-file] ..."); return -1; } auto vm = vm::Virtual_Machine(); // load the image-file for (auto i = 1; i < argc; ++i) { if (!vm.read_file(argv[i])) { fmt::print(stderr, "{} {}\n", "Failed to load image:", argv[i]); return -1; } } signal(SIGINT, handle_interrupt); disable_input_buffering(); vm.run(); restore_input_buffering(); return 0; }
//-------------------------------------------------------------- // Timer.cpp // Pinball // Created by David Haylock on 14/01/2018. //! Pinball. /** This is a Timer Object. */ //-------------------------------------------------------------- #include "Timer.h" //-------------------------------------------------------------- Timer::Timer() { bTimerReached = true; bInvalidate = false; } //-------------------------------------------------------------- Timer::~Timer() { } //-------------------------------------------------------------- void Timer::setup(float timerLength,string timerName,bool bLoop) { bTimerReached = true; bInvalidate = false; this->bLoop = bLoop; this->timerLength = timerLength; this->timerName = timerName; cout << this->timerName << endl; ofAddListener(ofEvents().update, this, &Timer::update); } //-------------------------------------------------------------- void Timer::update(ofEventArgs &event) { float timer = ofGetElapsedTimeMillis() - startTime; if (!bTimerReached) { timeLeft = timerLength - timer; } if (timer >= timerLength && !bTimerReached) { bTimerReached = true; if (!bInvalidate) { string evt = timerName + " Finished"; ofNotifyEvent(timerFinished, evt, this); } if (bLoop) { start(); } } } //-------------------------------------------------------------- float Timer::getTimeLeft() { return timeLeft; } //-------------------------------------------------------------- float Timer::getTimerLength() { return timerLength; } //-------------------------------------------------------------- bool Timer::hasTimerFinished() { return bTimerReached; } //-------------------------------------------------------------- void Timer::setNewTimerLength(int timerLength) { this->timerLength = timerLength; } //-------------------------------------------------------------- void Timer::start() { bInvalidate = false; if (bTimerReached) { string evt = timerName + " Started"; ofNotifyEvent(timerStarted, evt, this); bTimerReached = false; startTime = ofGetElapsedTimeMillis(); } } //-------------------------------------------------------------- void Timer::stop() { bTimerReached = true; bLoop = false; } //-------------------------------------------------------------- void Timer::invalidate() { bInvalidate = true; string evt = timerName + " Invalidated"; ofNotifyEvent(timerInvalidated, evt, this); }
/* * Copyright © 2018 Google, Inc. * * This is part of HarfBuzz, a text shaping library. * * Permission is hereby granted, without written agreement and without * license or royalty fees, to use, copy, modify, and distribute this * software and its documentation for any purpose, provided that the * above copyright notice and the following two paragraphs appear in * all copies of this software. * * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. * * Google Author(s): Garret Rieger, Rod Sheeter, Behdad Esfahbod */ #include "hb-subset.hh" #include "hb-set.hh" /** * hb_subset_input_create_or_fail: * * Return value: New subset input. * * Since: 1.8.0 **/ hb_subset_input_t * hb_subset_input_create_or_fail () { hb_subset_input_t *input = hb_object_create<hb_subset_input_t>(); if (unlikely (!input)) return nullptr; input->unicodes = hb_set_create (); input->glyphs = hb_set_create (); input->drop_layout = true; return input; } /** * hb_subset_input_reference: (skip) * @subset_input: a subset_input. * * * * Return value: * * Since: 1.8.0 **/ hb_subset_input_t * hb_subset_input_reference (hb_subset_input_t *subset_input) { return hb_object_reference (subset_input); } /** * hb_subset_input_destroy: * @subset_input: a subset_input. * * Since: 1.8.0 **/ void hb_subset_input_destroy (hb_subset_input_t *subset_input) { if (!hb_object_destroy (subset_input)) return; hb_set_destroy (subset_input->unicodes); hb_set_destroy (subset_input->glyphs); free (subset_input); } /** * hb_subset_input_unicode_set: * @subset_input: a subset_input. * * Since: 1.8.0 **/ HB_EXTERN hb_set_t * hb_subset_input_unicode_set (hb_subset_input_t *subset_input) { return subset_input->unicodes; } /** * hb_subset_input_glyph_set: * @subset_input: a subset_input. * * Since: 1.8.0 **/ HB_EXTERN hb_set_t * hb_subset_input_glyph_set (hb_subset_input_t *subset_input) { return subset_input->glyphs; } HB_EXTERN void hb_subset_input_set_drop_hints (hb_subset_input_t *subset_input, hb_bool_t drop_hints) { subset_input->drop_hints = drop_hints; } HB_EXTERN hb_bool_t hb_subset_input_get_drop_hints (hb_subset_input_t *subset_input) { return subset_input->drop_hints; } HB_EXTERN void hb_subset_input_set_drop_layout (hb_subset_input_t *subset_input, hb_bool_t drop_layout) { subset_input->drop_layout = drop_layout; } HB_EXTERN hb_bool_t hb_subset_input_get_drop_layout (hb_subset_input_t *subset_input) { return subset_input->drop_layout; } HB_EXTERN void hb_subset_input_set_desubroutinize (hb_subset_input_t *subset_input, hb_bool_t desubroutinize) { subset_input->desubroutinize = desubroutinize; } HB_EXTERN hb_bool_t hb_subset_input_get_desubroutinize (hb_subset_input_t *subset_input) { return subset_input->desubroutinize; }
/* Copyright (c) 2010-present Advanced Micro Devices, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef MEMORY_H_ #define MEMORY_H_ #include "top.hpp" #include "utils/flags.hpp" #include "thread/monitor.hpp" #include "platform/context.hpp" #include "platform/object.hpp" #include "platform/interop.hpp" #include "device/device.hpp" #include <atomic> #include <utility> #include <vector> #include <list> #include <map> #include <unordered_map> #include <memory> #define CL_MEM_FOLLOW_USER_NUMA_POLICY (1u << 31) #define ROCCLR_MEM_HSA_SIGNAL_MEMORY (1u << 30) namespace device { class Memory; class VirtualDevice; } // namespace device namespace amd { // Forward declaration of the amd::Image and amd::Buffer classes. class Image; class Buffer; class Pipe; struct BufferRect : public amd::EmbeddedObject { //! Default constructor BufferRect() : rowPitch_(0), slicePitch_(0), start_(0), end_(0) {} //! Creates BufferRect object bool create(const size_t* bufferOrigin, //!< Start locaiton in the buffer const size_t* region, //!< Copy region size_t bufferRowPitch, //!< Provided buffer's row pitch size_t bufferSlicePitch //!< Provided buffer's slice pitch ); //! Returns the plain offset for the (X, Y, Z) location size_t offset(size_t x, //!< Coordinate in X dimension size_t y, //!< Coordinate in Y dimension size_t z //!< Coordinate in Z dimension ) const { return start_ + x + y * rowPitch_ + z * slicePitch_; } size_t rowPitch_; //!< Calculated row pitch for the buffer rect size_t slicePitch_; //!< Calculated slice pitch for the buffer rect size_t start_; //!< Start offset for the copy region size_t end_; //!< Relative end offset from start for the copy region }; class HostMemoryReference { public: //! Default constructor HostMemoryReference(void* hostMem = NULL) : alloced_(false), hostMem_(hostMem), size_(0) {} //! Default destructor ~HostMemoryReference() { assert(!alloced_ && "Host buffer not deallocated"); } //! Creates host memory reference object bool allocateMemory(size_t size, const Context& context); // Frees system memory if it was allocated void deallocateMemory(const Context& context); //! Get the host memory pointer void* hostMem() const { return hostMem_; } //! Get the host memory size size_t size() const { return size_; } //! Set the host memory pointer void setHostMem(void* hostMem, const Context& context) { deallocateMemory(context); hostMem_ = hostMem; } //! Returns true if the host memory has been allocated by this object, false // if it has been allocated elsewhere. bool alloced() const { return alloced_; } private: //! Disable copy constructor HostMemoryReference(const HostMemoryReference&); //! Disable operator= HostMemoryReference& operator=(const HostMemoryReference&); bool alloced_; //!< TRUE if memory was allocated void* hostMem_; //!< Host memory pointer size_t size_; //!< The host memory size }; class Memory : public amd::RuntimeObject { typedef void(CL_CALLBACK* DestructorCallBackFunction)(cl_mem memobj, void* user_data); enum AllocState { AllocInit = 0, AllocCreate = 1, AllocComplete = 2, AllocRealloced = 3 }; struct DestructorCallBackEntry { struct DestructorCallBackEntry* next_; DestructorCallBackFunction callback_; void* data_; DestructorCallBackEntry(DestructorCallBackFunction callback, void* data) : callback_(callback), data_(data) {} }; protected: typedef cl_mem_object_type Type; typedef cl_mem_flags Flags; typedef DeviceMap<const Device*, device::Memory*> DeviceMemory; //! Returns the number of devices this memory object is associated, including P2P access uint32_t NumDevicesWithP2P(); size_t numDevices_; //!< Number of devices //! The device memory objects included in this memory DeviceMemory* deviceMemories_; //! The device alloced state std::unordered_map<const Device*, std::atomic<AllocState>> deviceAlloced_; //! Linked list of destructor callbacks. std::atomic<DestructorCallBackEntry*> destructorCallbacks_; SharedReference<Context> context_; //!< Owning context Memory* parent_; const Type type_; //!< Object type (Buffer, Image2D, Image3D) HostMemoryReference hostMemRef_; //!< Host-side memory reference(or NULL if none) size_t origin_; size_t size_; //!< Size in bytes Flags flags_; //!< Construction flags size_t version_; //!< Update count, used for coherency const Device* lastWriter_; //!< Which device wrote most recently (NULL if host) InteropObject* interopObj_; //!< Interop object device::VirtualDevice* vDev_; //!< Memory object belongs to a virtual device only std::atomic_uint mapCount_; //!< Keep track of number of mappings for a memory object void* svmHostAddress_; //!< svm host address; union { struct { uint32_t isParent_ : 1; //!< This object is a parent uint32_t forceSysMemAlloc_ : 1; //!< Forces system memory allocation uint32_t svmPtrCommited_ : 1; //!< svm host address committed flag uint32_t canBeCached_ : 1; //!< flag to if the object can be cached uint32_t p2pAccess_ : 1; //!< Memory object allows P2P access }; uint32_t flagsEx_; }; private: //! Disable default assignment operator Memory& operator=(const Memory&); //! Disable default copy operator Memory(const Memory&); Monitor lockMemoryOps_; //!< Lock to serialize memory operations std::list<Memory*> subBuffers_; //!< List of all subbuffers for this memory object device::Memory* svmBase_; //!< svmBase allocation for MGPU case protected: //! The constructor creates a memory object but does not allocate either host memory //! or device memory. Default parameters are appropriate for Buffer creation. Memory(Context& context, //!< Context object Type type, //!< Memory type Flags flags, //!< Object's flags size_t size, //!< Memory size void* svmPtr = NULL //!< svm host memory address, NULL if no SVM mem object ); Memory(Memory& parent, //!< Context object Flags flags, //!< Object's flags size_t offset, //!< Memory offset size_t size, //!< Memory size Type type = 0 //!< Memory type ); //! Memory object destructor virtual ~Memory(); //! Copies initialization data to the backing store virtual void copyToBackingStore(void* initFrom //!< Pointer to the initialization memory ); //! Initializes the device memory array virtual void initDeviceMemory(); void setSize(size_t size) { size_ = size; } void setInteropObj(InteropObject* obj) { interopObj_ = obj; } void resetAllocationState(); public: //! Placement new operator. void* operator new(size_t size, //!< Original allocation size const Context& context //!< Context this memory object is allocated in. ); // Provide a "matching" placement delete operator. void operator delete(void*, //!< Pointer to deallocate const Context& context //!< Context this memory object is allocated in. ); // and a regular delete operator to satisfy synthesized methods. void operator delete(void* //!< Pointer to deallocate ); //! Returns the memory lock object amd::Monitor& lockMemoryOps() { return lockMemoryOps_; } //! Adds a view into the list void addSubBuffer(Memory* item); //! virtual function used to distinguish memory objects from other CL objects virtual ObjectType objectType() const { return ObjectTypeMemory; } //! Removes a subbuffer from the list void removeSubBuffer(Memory* item); //! Returns the list of all subbuffers std::list<Memory*>& subBuffers() { return subBuffers_; } //! Returns the number of devices size_t numDevices() const { return numDevices_; } //! static_cast to Buffer with sanity check virtual Buffer* asBuffer() { return NULL; } //! static_cast to Image with sanity check virtual Image* asImage() { return NULL; } //! static_cast to Pipe with sanity check virtual Pipe* asPipe() { return NULL; } //! Creates and initializes device (cache) memory for all devices virtual bool create(void* initFrom = NULL, //!< Pointer to the initialization data bool sysMemAlloc = false, //!< Allocate device memory in system memory bool skipAlloc = false, //!< Skip device memory allocation bool forceAlloc = false //!< Force device memory allocation ); //! Allocates device (cache) memory for a specific device bool addDeviceMemory(const Device* dev //!< Device object ); //! Replaces device (cache) memory for a specific device void replaceDeviceMemory(const Device* dev, //!< Device object device::Memory* dm //!< New device memory object for replacement ); //! Find the section for the given device. Return NULL if not found. device::Memory* getDeviceMemory(const Device& dev, //!< Device object bool alloc = true //!< Allocates memory ); //! Allocate host memory (as required) bool allocHostMemory(void* initFrom, //!< Host memory provided by the application bool allocHostMem, //!< Force system memory allocation bool forceCopy = false //!< Force system memory allocation ); // Accessors Memory* parent() const { return parent_; } bool isParent() const { return isParent_; } size_t getOrigin() const { return origin_; } size_t getSize() const { return size_; } Flags getMemFlags() const { return flags_; } Type getType() const { return type_; } const Device* getLastWriter() { return lastWriter_; } const HostMemoryReference* getHostMemRef() const { return &hostMemRef_; } void* getHostMem() const { return hostMemRef_.hostMem(); } void setHostMem(void* mem) { hostMemRef_.setHostMem(mem, context_()); } size_t getVersion() const { return version_; } Context& getContext() const { return context_(); } bool isInterop() const { return (getInteropObj() != NULL) ? true : false; } InteropObject* getInteropObj() const { return interopObj_; } bool setDestructorCallback(DestructorCallBackFunction callback, void* data); //! Signal that a write has occurred to a cached version void signalWrite(const Device* writer); //! Force an asynchronous writeback from the most-recent dirty cache to host void cacheWriteBack(void); //! Returns true if the specified area covers memory intirely virtual bool isEntirelyCovered(const Coord3D& origin, //!< Origin location of the covered region const Coord3D& region //!< Covered region dimensions ) const = 0; //! Returns true if the specified area is not degenerate and is inside of allocated memory virtual bool validateRegion(const Coord3D& origin, //!< Origin location of the covered region const Coord3D& region //!< Covered region dimensions ) const = 0; void setVirtualDevice(device::VirtualDevice* vDev) { vDev_ = vDev; } device::VirtualDevice* getVirtualDevice() const { return vDev_; } bool forceSysMemAlloc() const { return forceSysMemAlloc_; } void incMapCount() { ++mapCount_; } void decMapCount() { --mapCount_; } uint mapCount() const { return mapCount_; } bool usesSvmPointer() const; void* getSvmPtr() const { return svmHostAddress_; } //!< svm pointer accessor; void setSvmPtr(void* ptr) { svmHostAddress_ = ptr; } //!< svm pointer setter; bool isSvmPtrCommited() const { return svmPtrCommited_; } //!< svm host address committed accessor; void commitSvmMemory(); //!< svm host address committed accessor; void uncommitSvmMemory(); void setCacheStatus(bool canBeCached) { canBeCached_ = canBeCached; } //!< set the memobject cached status bool canBeCached() const { return canBeCached_; } //!< get the memobject cached status //! Check if this objects allows P2P access bool P2PAccess() const { return p2pAccess_; } //! Returns the base device memory object for possible P2P access device::Memory* BaseP2PMemory() const { return deviceMemories_[0].value_; } device::Memory* svmBase() const { return svmBase_; } //!< Returns SVM base for MGPU case }; //! Buffers are a specialization of memory. Just a wrapper, really, //! but this gives us flexibility for later changes. class Buffer : public Memory { protected: cl_bus_address_amd busAddress_; //! Initializes the device memory array which is nested // after'Image1DD3D10' object in memory layout. virtual void initDeviceMemory(); Buffer(Context& context, Type type, Flags flags, size_t size) : Memory(context, type, flags, size) {} public: Buffer(Context& context, Flags flags, size_t size, void* svmPtr = NULL) : Memory(context, CL_MEM_OBJECT_BUFFER, flags, size, svmPtr) {} Buffer(Memory& parent, Flags flags, size_t origin, size_t size) : Memory(parent, flags, origin, size) {} bool create(void* initFrom = NULL, //!< Pointer to the initialization data bool sysMemAlloc = false, //!< Allocate device memory in system memory bool skipAlloc = false, //!< Skip device memory allocation bool forceAlloc = false //!< Force device memory allocation ); //! static_cast to Buffer with sanity check virtual Buffer* asBuffer() { return this; } //! Returns true if the specified area covers buffer entirely bool isEntirelyCovered(const Coord3D& origin, //!< Origin location of the covered region const Coord3D& region //!< Covered region dimensions ) const; //! Returns true if the specified area is not degenerate and is inside of allocated memory bool validateRegion(const Coord3D& origin, //!< Origin location of the covered region const Coord3D& region //!< Covered region dimensions ) const; cl_bus_address_amd busAddress() const { return busAddress_; } }; //! Pipes are a specialization of Buffers. class Pipe : public Buffer { protected: size_t packetSize_; //!< Size in bytes of pipe packet size_t maxPackets_; //!< Number of max pipe packets bool initialized_; //!< Mark if the pipe is initialized virtual void initDeviceMemory(); public: Pipe(Context& context, Flags flags, size_t size, size_t pipe_packet_size, size_t pipe_max_packets) : Buffer(context, CL_MEM_OBJECT_PIPE, flags, size), initialized_(false) { packetSize_ = pipe_packet_size; maxPackets_ = pipe_max_packets; } //! static_cast to Pipe with sanity check virtual Pipe* asPipe() { return this; } //! Returns pipe size pitch in bytes size_t getPacketSize() const { return packetSize_; } //! return max number of pipe packets size_t getMaxNumPackets() const { return maxPackets_; } }; //! Images are a specialization of memory class Image : public Memory { public: // declaration of list of supported formats static const cl_image_format supportedFormats[]; static const cl_image_format supportedFormatsRA[]; static const cl_image_format supportedDepthStencilFormats[]; static uint32_t numSupportedFormats(const Context& context, cl_mem_object_type image_type, cl_mem_flags flags = 0); static uint32_t getSupportedFormats(const Context& context, cl_mem_object_type image_type, const uint32_t num_entries, cl_image_format* image_formats, cl_mem_flags flags = 0); //! Helper struct to manipulate image formats. struct Format : public cl_image_format { //! Construct a new ImageFormat wrapper. Format(const cl_image_format& format) { image_channel_order = format.image_channel_order; image_channel_data_type = format.image_channel_data_type; } //! Return true if this is a valid image format, false otherwise. bool isValid() const; //! Returns true if this format is supported by runtime, false otherwise bool isSupported(const Context& context, cl_mem_object_type image_type = 0, cl_mem_flags flags = 0) const; //! Compare 2 image formats. bool operator==(const Format& rhs) const { return image_channel_order == rhs.image_channel_order && image_channel_data_type == rhs.image_channel_data_type; } bool operator!=(const Format& rhs) const { return !(*this == rhs); } //! Return the number of channels. size_t getNumChannels() const; //! Return the element size in bytes. size_t getElementSize() const; //! Get the channel order by indices. R = 0, G = 1, B = 2, A = 3. void getChannelOrder(uint8_t* channelOrder) const; //! Adjust colorRGBA according to format, and set it in colorFormat. void formatColor(const void* colorRGBA, void* colorFormat) const; }; struct Impl { amd::Coord3D region_; //!< Image size size_t rp_; //!< Image row pitch size_t sp_; //!< Image slice pitch const Format format_; //!< Image format void* reserved_; size_t bp_; Impl(const Format& format, Coord3D region, size_t rp, size_t sp = 0, size_t bp = 0) : region_(region), rp_(rp), sp_(sp), format_(format), bp_(bp) { DEBUG_ONLY(reserved_ = NULL); } }; private: Impl impl_; //!< Image object description size_t dim_; //!< Image dimension uint mipLevels_; //!< The number of mip levels uint baseMipLevel_; //!< The base mip level for a view protected: Image(const Format& format, Image& parent, uint baseMipLevel = 0, cl_mem_flags flags = 0); ///! Initializes the device memory array which is nested // after'Image' object in memory layout. virtual void initDeviceMemory(); //! Copies initialization data to the backing store virtual void copyToBackingStore(void* initFrom //!< Pointer to the initialization memory ); void initDimension(); public: Image(Context& context, Type type, Flags flags, const Format& format, size_t width, size_t height, size_t depth, size_t rowPitch, size_t slicePitch, uint mipLevels = 1); Image(Buffer& buffer, Type type, Flags flags, const Format& format, size_t width, size_t height, size_t depth, size_t rowPitch, size_t slicePitch); //! Validate image dimensions with supported sizes static bool validateDimensions( const std::vector<amd::Device*>& devices, //!< List of devices for validation cl_mem_object_type type, //!< Image type size_t width, //!< Image width size_t height, //!< Image height size_t depth, //!< Image depth size_t arraySize //!< Image array size ); const Format& getImageFormat() const { return impl_.format_; } //! static_cast to Buffer with sanity check virtual Image* asImage() { return this; } //! Returns true if specified area covers image entirely bool isEntirelyCovered(const Coord3D& origin, //!< Origin location of the covered region const Coord3D& region //!< Covered region dimensions ) const; //! Returns true if the specified area is not degenerate and is inside of allocated memory bool validateRegion(const Coord3D& origin, //!< Origin location of the covered region const Coord3D& region //!< Covered region dimensions ) const; //! Returns true if the slice value for the image is valid bool isRowSliceValid(size_t rowPitch, //!< The row pitch value size_t slicePitch, //!< The slice pitch value size_t width, //!< The width of the copy region size_t height //!< The height of the copy region ) const; //! Creates a view memory object virtual Image* createView(const Context& context, //!< Context for a view creation const Format& format, //!< The new format for a view device::VirtualDevice* vDev, //!< Virtual device object uint baseMipLevel = 0, //!< Base mip level for a view cl_mem_flags flags = 0 //!< Memory allocation flags ); //! Returns the impl for this image. Impl& getImpl() { return impl_; } //! Returns the number of dimensions. size_t getDims() const { return dim_; } //! Base virtual methods to be overridden in derived image classes //! //! Returns width of image in pixels size_t getWidth() const { return impl_.region_[0]; } //! Returns height of image in pixels size_t getHeight() const { return impl_.region_[1]; } //! Returns image's row pitch in bytes size_t getRowPitch() const { return impl_.rp_; } //! Returns image's byte pitch size_t getBytePitch() const { return impl_.bp_; } //! Returns depth of the image in pixels/slices size_t getDepth() const { return impl_.region_[2]; } //! Returns image's slice pitch in bytes size_t getSlicePitch() const { return impl_.sp_; } //! Returns image's slice pitch in bytes uint getMipLevels() const { return mipLevels_; } //! Returns image's slice pitch in bytes uint getBaseMipLevel() const { return baseMipLevel_; } //! Get the image covered region const Coord3D& getRegion() const { return impl_.region_; } //! Sets the byte pitch obtained from HWL void setBytePitch(size_t bytePitch) { impl_.bp_ = bytePitch; } }; //! SVM-related functionality. class SvmBuffer : AllStatic { public: //! Allocate a shared buffer that is accessible by all devices in the context static void* malloc(Context& context, cl_svm_mem_flags flags, size_t size, size_t alignment, const amd::Device* curDev = nullptr); //! Release shared buffer static void free(const Context& context, void* ptr); //! Fill the destination buffer \a dst with the contents of the source //! buffer \a src \times times. static void memFill(void* dst, const void* src, size_t srcSize, size_t times); //! Return true if \a ptr is a pointer allocated using SvmBuffer::malloc //! that has not been deallocated afterwards static bool malloced(const void* ptr); private: static void Add(uintptr_t k, uintptr_t v); static void Remove(uintptr_t k); static bool Contains(uintptr_t ptr); static std::map<uintptr_t, uintptr_t> Allocated_; // !< Allocated buffers static Monitor AllocatedLock_; }; //! Liquid flash extension class LiquidFlashFile : public RuntimeObject { private: std::wstring name_; cl_file_flags_amd flags_; void* handle_; uint32_t blockSize_; uint64_t fileSize_; public: LiquidFlashFile(const wchar_t* name, cl_file_flags_amd flags) : name_(name), flags_(flags), handle_(NULL), blockSize_(0), fileSize_(0) {} ~LiquidFlashFile(); bool open(); void close(); uint32_t blockSize() const { return blockSize_; }; uint64_t fileSize() const { return fileSize_; }; bool transferBlock(bool read, void* dst, uint64_t bufferSize, uint64_t fileOffset, uint64_t bufferOffset, uint64_t size) const; virtual ObjectType objectType() const { return ObjectTypeLiquidFlashFile; } }; } // namespace amd #endif // MEMORY_H_
/** \file interrupt_link.cpp * \brief Interrupt handler callback implementation. * * The functions in this file link wraps the methods of the module class in C friendly callbacks. * If I recall, this was necessary because the interrupt handler functions called from the system NVIC needed to be defined in C code. * */ #include "interrupt_link.hpp" #include "calib.hpp" /// Assign the callback function pointers to the wrapper functions and the pointer to the module class to the void pointer modulePointer. void linkInterrupts(ViaCalib * voidPointer) { modulePointer = (void *) voidPointer; uiTimerCallback = *uiTimerHandler; mainRisingEdgeCallback = &mainRisingEdgeHandler; mainFallingEdgeCallback = &mainFallingEdgeHandler; auxRisingEdgeCallback = &auxRisingEdgeHandler; auxFallingEdgeCallback = &auxFallingEdgeHandler; buttonPressedCallback = &buttonPressedHandler; buttonReleasedCallback = &buttonReleasedHandler; dacHalfTransferCallback = &dacHalfTransferHandler; dacTransferCompleteCallback = &dacTransferCompleteHandler; dacTimerCallback = &dacTimerHandler; sdaadcConversionCompleteCallback = &sdaadcConversionCompleteHandler; adcConversionCompleteCallback = &adcConversionCompleteHandler; auxTimer1InterruptCallback = &auxTimer1InterruptHandler; auxTimer2InterruptCallback = &auxTimer2InterruptHandler; } //@{ /// Cast the void pointer to used to reference the module class to an actual pointer to the class. Use that to call the appropriate handler method. void uiTimerHandler(void * voidPointer) { ViaCalib * modulePointer = (ViaCalib *) voidPointer; modulePointer->ui_dispatch(TIMEOUT_SIG); } void mainRisingEdgeHandler(void * voidPointer) { ViaCalib * modulePointer = (ViaCalib *) voidPointer; modulePointer->mainRisingEdgeCallback(); } void mainFallingEdgeHandler(void * voidPointer) { ViaCalib * modulePointer = (ViaCalib *) voidPointer; modulePointer->mainFallingEdgeCallback(); } void auxRisingEdgeHandler(void * voidPointer) { ViaCalib * modulePointer = (ViaCalib *) voidPointer; modulePointer->auxRisingEdgeCallback(); } void auxFallingEdgeHandler(void * voidPointer) { ViaCalib * modulePointer = (ViaCalib *) voidPointer; modulePointer->auxFallingEdgeCallback(); } void buttonPressedHandler(void * voidPointer) { ViaCalib * modulePointer = (ViaCalib *) voidPointer; modulePointer->buttonPressedCallback(); } void buttonReleasedHandler(void * voidPointer) { ViaCalib * modulePointer = (ViaCalib *) voidPointer; modulePointer->buttonReleasedCallback(); } void adcConversionCompleteHandler(void * voidPointer) { ViaCalib * modulePointer = (ViaCalib *) voidPointer; modulePointer->slowConversionCallback(); } void dacHalfTransferHandler(void * voidPointer) { ViaCalib * modulePointer = (ViaCalib *) voidPointer; modulePointer->halfTransferCallback(); } void dacTransferCompleteHandler(void * voidPointer) { ViaCalib * modulePointer = (ViaCalib *) voidPointer; modulePointer->transferCompleteCallback(); } void dacTimerHandler(void *) { ; } void sdaadcConversionCompleteHandler(void *) { ; } void auxTimer1InterruptHandler(void * voidPointer) { ViaCalib * modulePointer = (ViaCalib *) voidPointer; modulePointer->auxTimer1InterruptCallback(); } void auxTimer2InterruptHandler(void * voidPointer) { ViaCalib * modulePointer = (ViaCalib *) voidPointer; modulePointer->auxTimer2InterruptCallback(); } //@}
 #pragma once #include "../Graphics/ColorStructs.hpp" namespace ln { struct Vertex { static const Vertex Default; Vector4 position; Vector4 normal; Vector4 uv; Color color; Vector4 tangent; // 接線ベクトル, 従法線の符号 Vertex() : position(0.0f, 0.0f, 0.0f, 1.0f) , normal(0.0f, 0.0f, 1.0f, 0.0f) , uv(0.0f, 1.0f, 0.0f, 0.0f) , color(0.0f, 0.0f, 0.0f, 1.0f) , tangent(1.0f, 0.0f, 0.0f, 1.0f) {} Vertex(const Vector3& position_, const Vector3& normal_, const Vector2& uv_, const Color& color_, const Vector3& tangent_ = Vector3::UnitX) : position(position_.x, position_.y, position_.z, 1.0f) , normal(normal_.x, normal_.y, normal_.z, 0.0f) , uv(uv_.x, uv_.y, 0.0f, 0.0f) , color(color_.r, color_.g, color_.b, color_.a) , tangent(tangent_.x, tangent_.y, tangent_.z, 1.0f) {} void set(const Vector3& position_, const Vector3& normal_, const Vector2& uv_, const Color& color_, const Vector3& tangent_ = Vector3::UnitX) { position.set(position_.x, position_.y, position_.z, 1.0f); normal.set(normal_.x, normal_.y, normal_.z, 0.0f); uv.set(uv_.x, uv_.y, 0.0f, 0.0f); color.set(color_.r, color_.g, color_.b, color_.a); tangent.set(tangent_.x, tangent_.y, tangent_.z, 1.0f); } void setPosition(const Vector3& value) { position.set(value.x, value.y, value.z, position.w); } void setPosition(float x, float y, float z) { position.set(x, y, z, position.w); } void setNormal(const Vector3& value) { normal.set(value.x, value.y, value.z, normal.w); } void setUV(float x, float y) { uv.set(x, y, uv.z, uv.w); } void setUV(const Vector2& value) { uv.set(value.x, value.y, uv.z, uv.w); } void transformPosition(const Matrix& m) { position = Vector4(Vector3::transformCoord(position.xyz(), m), 1.0f); } void transformNormalDirection(const Matrix& m) { Vector3 t = normal.xyz(); t.transformDirection(m); normal = Vector4(t, 0.0f); } }; struct VertexBlendWeight { static const VertexBlendWeight Default; float indices[4]; float weights[4]; }; struct VertexMorphTarget { Vector3 position; Vector3 normal; Vector3 tangent; }; // TEXCOORD_1,TEXCOORD_2,TEXCOORD_3 struct VertexAdditionalUV { Vector4 uv[3]; }; struct VertexSdefInfo { Vector4 sdefC; Vector3 sdefR0; Vector3 sdefR1; }; struct VertexMmdExtra { float edgeWeight; float index; }; } // namespace ln
// Copyright 2017 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "src/arguments-inl.h" #include "src/counters.h" #include "src/objects-inl.h" #include "src/objects/bigint.h" #include "src/runtime/runtime-utils.h" namespace v8 { namespace internal { RUNTIME_FUNCTION(Runtime_BigIntCompareToBigInt) { SealHandleScope shs(isolate); DCHECK_EQ(3, args.length()); CONVERT_ARG_HANDLE_CHECKED(Smi, mode, 0); CONVERT_ARG_HANDLE_CHECKED(BigInt, lhs, 1); CONVERT_ARG_HANDLE_CHECKED(BigInt, rhs, 2); bool result = ComparisonResultToBool(static_cast<Operation>(mode->value()), BigInt::CompareToBigInt(lhs, rhs)); return *isolate->factory()->ToBoolean(result); } RUNTIME_FUNCTION(Runtime_BigIntCompareToNumber) { SealHandleScope shs(isolate); DCHECK_EQ(3, args.length()); CONVERT_ARG_HANDLE_CHECKED(Smi, mode, 0); CONVERT_ARG_HANDLE_CHECKED(BigInt, lhs, 1); CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 2); bool result = ComparisonResultToBool(static_cast<Operation>(mode->value()), BigInt::CompareToNumber(lhs, rhs)); return *isolate->factory()->ToBoolean(result); } RUNTIME_FUNCTION(Runtime_BigIntCompareToString) { HandleScope scope(isolate); DCHECK_EQ(3, args.length()); CONVERT_ARG_HANDLE_CHECKED(Smi, mode, 0); CONVERT_ARG_HANDLE_CHECKED(BigInt, lhs, 1); CONVERT_ARG_HANDLE_CHECKED(String, rhs, 2); bool result = ComparisonResultToBool(static_cast<Operation>(mode->value()), BigInt::CompareToString(isolate, lhs, rhs)); return *isolate->factory()->ToBoolean(result); } RUNTIME_FUNCTION(Runtime_BigIntEqualToBigInt) { SealHandleScope shs(isolate); DCHECK_EQ(2, args.length()); CONVERT_ARG_HANDLE_CHECKED(BigInt, lhs, 0); CONVERT_ARG_HANDLE_CHECKED(BigInt, rhs, 1); bool result = BigInt::EqualToBigInt(*lhs, *rhs); return *isolate->factory()->ToBoolean(result); } RUNTIME_FUNCTION(Runtime_BigIntEqualToNumber) { SealHandleScope shs(isolate); DCHECK_EQ(2, args.length()); CONVERT_ARG_HANDLE_CHECKED(BigInt, lhs, 0); CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1); bool result = BigInt::EqualToNumber(lhs, rhs); return *isolate->factory()->ToBoolean(result); } RUNTIME_FUNCTION(Runtime_BigIntEqualToString) { HandleScope scope(isolate); DCHECK_EQ(2, args.length()); CONVERT_ARG_HANDLE_CHECKED(BigInt, lhs, 0); CONVERT_ARG_HANDLE_CHECKED(String, rhs, 1); bool result = BigInt::EqualToString(isolate, lhs, rhs); return *isolate->factory()->ToBoolean(result); } RUNTIME_FUNCTION(Runtime_BigIntToBoolean) { SealHandleScope shs(isolate); DCHECK_EQ(1, args.length()); CONVERT_ARG_HANDLE_CHECKED(BigInt, bigint, 0); return *isolate->factory()->ToBoolean(bigint->ToBoolean()); } RUNTIME_FUNCTION(Runtime_BigIntToNumber) { HandleScope scope(isolate); DCHECK_EQ(1, args.length()); CONVERT_ARG_HANDLE_CHECKED(BigInt, x, 0); return *BigInt::ToNumber(isolate, x); } RUNTIME_FUNCTION(Runtime_ToBigInt) { HandleScope scope(isolate); DCHECK_EQ(1, args.length()); CONVERT_ARG_HANDLE_CHECKED(Object, x, 0); RETURN_RESULT_OR_FAILURE(isolate, BigInt::FromObject(isolate, x)); } RUNTIME_FUNCTION(Runtime_BigIntBinaryOp) { HandleScope scope(isolate); DCHECK_EQ(3, args.length()); CONVERT_ARG_HANDLE_CHECKED(Object, left_obj, 0); CONVERT_ARG_HANDLE_CHECKED(Object, right_obj, 1); CONVERT_SMI_ARG_CHECKED(opcode, 2); Operation op = static_cast<Operation>(opcode); if (!left_obj->IsBigInt() || !right_obj->IsBigInt()) { THROW_NEW_ERROR_RETURN_FAILURE( isolate, NewTypeError(MessageTemplate::kBigIntMixedTypes)); } Handle<BigInt> left(Handle<BigInt>::cast(left_obj)); Handle<BigInt> right(Handle<BigInt>::cast(right_obj)); MaybeHandle<BigInt> result; switch (op) { case Operation::kAdd: result = BigInt::Add(isolate, left, right); break; case Operation::kSubtract: result = BigInt::Subtract(isolate, left, right); break; case Operation::kMultiply: result = BigInt::Multiply(isolate, left, right); break; case Operation::kDivide: result = BigInt::Divide(isolate, left, right); break; case Operation::kModulus: result = BigInt::Remainder(isolate, left, right); break; case Operation::kExponentiate: result = BigInt::Exponentiate(isolate, left, right); break; case Operation::kBitwiseAnd: result = BigInt::BitwiseAnd(isolate, left, right); break; case Operation::kBitwiseOr: result = BigInt::BitwiseOr(isolate, left, right); break; case Operation::kBitwiseXor: result = BigInt::BitwiseXor(isolate, left, right); break; case Operation::kShiftLeft: result = BigInt::LeftShift(isolate, left, right); break; case Operation::kShiftRight: result = BigInt::SignedRightShift(isolate, left, right); break; case Operation::kShiftRightLogical: result = BigInt::UnsignedRightShift(isolate, left, right); break; default: UNREACHABLE(); } RETURN_RESULT_OR_FAILURE(isolate, result); } RUNTIME_FUNCTION(Runtime_BigIntUnaryOp) { HandleScope scope(isolate); DCHECK_EQ(2, args.length()); CONVERT_ARG_HANDLE_CHECKED(BigInt, x, 0); CONVERT_SMI_ARG_CHECKED(opcode, 1); Operation op = static_cast<Operation>(opcode); MaybeHandle<BigInt> result; switch (op) { case Operation::kBitwiseNot: result = BigInt::BitwiseNot(isolate, x); break; case Operation::kNegate: result = BigInt::UnaryMinus(isolate, x); break; case Operation::kIncrement: result = BigInt::Increment(isolate, x); break; case Operation::kDecrement: result = BigInt::Decrement(isolate, x); break; default: UNREACHABLE(); } RETURN_RESULT_OR_FAILURE(isolate, result); } } // namespace internal } // namespace v8
// -*- C++ -*- Copyright (c) Microsoft Corporation; see license.txt #include "Spatial.h" namespace hh { // Given 10000 random data points uniformly sampled over the unit cube, // find the closest 10 neighbors: // spatialtest -gn ? -pn 10000 // average time // ? 10000 1000 // 1 1.1745 .14967 // 2 .25267 .05333 // 3 .11967 .03883 // 4 .07450 .03217 // 5 .05583 .02917 // 10 .03266 .02517 // 20 .02733 .02433 // 30 .02500 .02833 // 40 .02500 .03550 // 50 .02550 .04733 // 70 .03067 .08950 // 100 .04383 .20933 // 200 .18033 1.4403 // optimal #cells/point: // .1-30 .06-40 // // for 100'000 data points, gn=40, time is .03100 (still very good) // gn=50, time is .02583 // *** BPointSpatial void BPointSpatial::clear() { for (auto& cell : _map.values()) { HH_SSTAT(Spspcelln, cell.num()); } _map.clear(); } void BPointSpatial::enter(Univ id, const Point* pp) { Ind ci = point_to_indices(*pp); assertx(indices_inbounds(ci)); int en = encode(ci); _map[en].push(Node(id, pp)); // first creates empty Array<Node> if not present } void BPointSpatial::remove(Univ id, const Point* pp) { Ind ci = point_to_indices(*pp); assertx(indices_inbounds(ci)); int en = encode(ci); Array<Node>& ar = _map.get(en); int ind = -1; for_int(i, ar.num()) { if (ar[i].id==id) { assertx(ind<0); ind = i; } } assertx(ind>=0); ar.erase(ind, 1); if (!ar.num()) _map.remove(en); } void BPointSpatial::shrink_to_fit() { for (auto& cell : _map.values()) { cell.shrink_to_fit(); } } void BPointSpatial::add_cell(const Ind& ci, Pqueue<Univ>& pq, const Point& pcenter, Set<Univ>&) const { // SHOW("add_cell", ci); int en = encode(ci); bool present; auto& cell = _map.retrieve(en, present); if (!present) return; for (const Node& e : cell) { // SHOW("enter", *e.p, dist2(pcenter, *e.p)); pq.enter(Conv<const Node*>::e(&e), dist2(pcenter, *e.p)); } } Univ BPointSpatial::pq_id(Univ id) const { const Node* e = Conv<const Node*>::d(id); return e->id; } // *** IPointSpatial IPointSpatial::IPointSpatial(int gn, CArrayView<Point> arp) : Spatial(gn), _pp(arp.data()) { for_int(i, arp.num()) { Ind ci = point_to_indices(arp[i]); assertx(indices_inbounds(ci)); int en = encode(ci); _map[en].push(i); // first creates empty Array<int> if not present } if (0) for (auto& cell : _map.values()) { cell.shrink_to_fit(); } } void IPointSpatial::clear() { for (auto& cell : _map.values()) { HH_SSTAT(Spspcelln, cell.num()); } _map.clear(); } void IPointSpatial::add_cell(const Ind& ci, Pqueue<Univ>& pq, const Point& pcenter, Set<Univ>&) const { int en = encode(ci); bool present; auto& cell = _map.retrieve(en, present); if (!present) return; for (int i : cell) { pq.enter(Conv<int>::e(i), dist2(pcenter, _pp[i])); } } Univ IPointSpatial::pq_id(Univ id) const { return id; } // *** SpatialSearch BSpatialSearch::BSpatialSearch(const Spatial& sp, const Point& p, float maxdis) : _sp(sp), _pcenter(p), _maxdis(maxdis) { // SHOW("search", p, maxdis); Ind ci = _sp.point_to_indices(_pcenter); assertx(_sp.indices_inbounds(ci)); for_int(i, 2) for_int(c, 3) { _ssi[i][c] = ci[c]; } consider(ci); get_closest_next_cell(); } BSpatialSearch::~BSpatialSearch() { HH_SSTAT(Sssncellsv, _ncellsv); HH_SSTAT(Sssnelemsv, _nelemsv); } bool BSpatialSearch::done() { for (;;) { if (!_pq.empty()) return false; if (_disbv2>=square(_maxdis)) return true; expand_search_space(); } } Univ BSpatialSearch::next(float* pdis2) { Univ u; for (;;) { if (_pq.empty()) assertx(!done()); // refill _pq float dis2 = _pq.min_priority(); if (dis2>_disbv2) { expand_search_space(); continue; } u = _pq.min(); _sp.pq_refine(_pq, _pcenter); if (_pq.min()!=u || _pq.min_priority()!=dis2) continue; if (pdis2) *pdis2 = _pq.min_priority(); u = _pq.remove_min(); break; } return _sp.pq_id(u); } void BSpatialSearch::consider(const Ind& ci) { // SHOW("consider", ci); _ncellsv++; int n = _pq.num(); _sp.add_cell(ci, _pq, _pcenter, _setevis); _nelemsv += _pq.num()-n; } void BSpatialSearch::get_closest_next_cell() { float mindis = 1e10f; for_int(c, 3) { if (_ssi[0][c]>0) { float a = _pcenter[c]-_sp.index_to_float(_ssi[0][c]); if (a<mindis) { mindis = a; _axis = c; _dir = 0; } } if (_ssi[1][c]<_sp._gn-1) { float a = _sp.index_to_float(_ssi[1][c]+1)-_pcenter[c]; if (a<mindis) { mindis = a; _axis = c; _dir = 1; } } } // mindis may be big if all of space has been searched _disbv2 = square(mindis); } void BSpatialSearch::expand_search_space() { ASSERTX(_axis>=0 && _axis<3 && _dir>=0 && _dir<=1); // SHOW("expand", _axis, _dir, _ssi); Vec2<Ind> bi = _ssi; _ssi[_dir][_axis] += _dir ? 1 : -1; bi[0][_axis] = bi[1][_axis] = _ssi[_dir][_axis]; // consider the layer whose axis's value is _ssi[_dir][_axis] for (const Ind& cit : coordsL(bi[0], bi[1]+1)) { consider(cit); } get_closest_next_cell(); } } // namespace hh
#include "databaseadminitemedit.h" #include "ui_databaseadminitemedit.h" #include <QSpinBox> #include <QCheckBox> #include <QDateTimeEdit> #include <QLineEdit> DatabaseAdminItemEdit::DatabaseAdminItemEdit(QWidget *parent) : QDialog(parent), ui(new Ui::DatabaseAdminItemEdit) { ui->setupUi(this); } void DatabaseAdminItemEdit::SetColumns(QList<DatabaseColumn> Columns) { this->Columns = Columns; foreach(DatabaseColumn Column, Columns) { QLabel *Label = new QLabel(this); Label->setText(Column.Description); QWidget *DataItem; switch(Column.Type) { case DatabaseColumn::String: DataItem = new QLineEdit(this); break; case DatabaseColumn::Int: {QSpinBox * w = new QSpinBox(this);w->setMinimum(-1000000);w->setMaximum(1000000);DataItem = w;} break; case DatabaseColumn::Bool: DataItem = new QCheckBox(this); break; case DatabaseColumn::Date: DataItem = new QDateTimeEdit(this); break; } Widgets[Column.Id] = DataItem; ui->MainLayout->addRow(Label,DataItem); } } void DatabaseAdminItemEdit::HideGroupBox() { ui->comboBox->setVisible(false); } void DatabaseAdminItemEdit::SetGroupList(QStringList Groups) { ui->comboBox->addItems(Groups); } void DatabaseAdminItemEdit::SetSelectedGroup(int index) { ui->comboBox->setCurrentIndex(index); } int DatabaseAdminItemEdit::GetSelectedIndex() { return ui->comboBox->currentIndex(); } DatabaseItem DatabaseAdminItemEdit::GetDatabaseItem() { DatabaseItem res; int index = 0; foreach (DatabaseColumn Column, Columns) { QVariant val; QWidget*DataItem = Widgets[Column.Id]; switch(Column.Type) { case DatabaseColumn::String: val = qobject_cast<QLineEdit*>(DataItem)->text(); break; case DatabaseColumn::Int: val = qobject_cast<QSpinBox*>(DataItem)->value(); break; case DatabaseColumn::Bool: val = qobject_cast<QCheckBox*>(DataItem)->isChecked(); break; case DatabaseColumn::Date: val = qobject_cast<QDateTimeEdit*>(DataItem)->dateTime(); break; } res.Data[Column.Id] = val; index++; } return res; } void DatabaseAdminItemEdit::SetDatabaseItem(DatabaseItem Item) { foreach (DatabaseColumn Column, Columns) { QVariant val; bool found = false; QHashIterator<int,QVariant> i(Item.Data); while (i.hasNext()) { i.next(); if(i.key() == Column.Id) { val = i.value(); found = true; break; } } if(found) { QWidget*DataItem = Widgets[Column.Id]; switch(Column.Type) { case DatabaseColumn::String: qobject_cast<QLineEdit*>(DataItem)->setText(val.toString()); break; case DatabaseColumn::Int: qobject_cast<QSpinBox*>(DataItem)->setValue(val.toInt()); break; case DatabaseColumn::Bool: qobject_cast<QCheckBox*>(DataItem)->setChecked(val.toBool()); break; case DatabaseColumn::Date: qobject_cast<QDateTimeEdit*>(DataItem)->setDateTime(val.toDateTime()); break; } } } } DatabaseAdminItemEdit::~DatabaseAdminItemEdit() { delete ui; }
// We check that the Finalizer is run using FileCheck, and that the expected // files are generated by deleting them (leaving things tidy) // RUN: %cmc -mCM_printfargs -mdump_asm -Qxcm_jit_target=skl /Qxcm_asm_output=asm_output_slash %w 2>&1 \ // RUN: | FileCheck %w // RUN: rm %W.isa asm_output_slash_0.visaasm asm_output_slash_0.asm asm_output_slash_0.dat #include <cm/cm.h> _GENX_MAIN_ void test() { } #ifdef CM_GENX #warning CM_GENX defined #endif #ifdef CM_GEN7_5 #warning CM_GEN7_5 defined #endif #ifdef CM_GEN8 #warning CM_GEN8 defined #endif #ifdef CM_GEN8_5 #warning CM_GEN8_5 defined #endif #ifdef CM_GEN9 #warning CM_GEN9 defined #endif #ifdef CM_GEN9_5 #warning CM_GEN9_5 defined #endif #ifdef CM_GEN10 #warning CM_GEN10 defined #endif // CHECK: cm_asm_output_slash.cpp(14,2): warning: CM_GENX defined [-W#warnings] // CHECK: cm_asm_output_slash.cpp(30,2): warning: CM_GEN9 defined [-W#warnings] // CHECK: 2 warnings generated. // CHECK: -platform SKL
// Copyright (c) 2015-2020 by the parties listed in the AUTHORS file. // All rights reserved. Use of this source code is governed by // a BSD-style license that can be found in the LICENSE file. #include <toast_test.hpp> #include <cmath> #include <limits> void TOASTqarrayTest::SetUp() { q1 = {0.50487417, 0.61426059, 0.60118994, 0.07972857}; q1inv = {-0.50487417, -0.61426059, -0.60118994, 0.07972857}; q2 = {0.43561544, 0.33647027, 0.40417115, 0.73052901}; qtonormalize = {1.0, 2.0, 3.0, 4.0, 2.0, 3.0, 4.0, 5.0}; qnormalized = {0.18257419, 0.36514837, 0.54772256, 0.73029674, 0.27216553, 0.40824829, 0.54433105, 0.68041382}; vec = {0.57734543, 0.30271255, 0.75831218}; vec2 = {0.57734543, 8.30271255, 5.75831218, 1.57734543, 3.30271255, 0.75831218}; qeasy = {0.3, 0.3, 0.1, 0.9, 0.3, 0.3, 0.1, 0.9}; mult_result = {0.44954009, 0.53339352, 0.37370443, -0.61135101}; rot_by_q1 = {0.4176698, 0.84203849, 0.34135482}; rot_by_q2 = {0.8077876, 0.3227185, 0.49328689}; return; } TEST_F(TOASTqarrayTest, arraylist_dot1) { double check; double result; toast::AlignedVector <double> pone(3); check = 0.0; for (size_t i = 0; i < 3; ++i) { pone[i] = vec[i] + 1.0; check += vec[i] * pone[i]; } toast::qa_list_dot(1, 3, 3, vec.data(), pone.data(), &result); EXPECT_DOUBLE_EQ(check, result); } TEST_F(TOASTqarrayTest, arraylist_dot2) { double check[2]; toast::AlignedVector <double> result(2); toast::AlignedVector <double> pone(6); for (size_t i = 0; i < 2; ++i) { check[i] = 0.0; for (size_t j = 0; j < 3; ++j) { pone[3 * i + j] = vec2[3 * i + j] + 1.0; check[i] += vec2[3 * i + j] * pone[3 * i + j]; } } toast::qa_list_dot(2, 3, 3, vec2.data(), pone.data(), result.data()); for (size_t i = 0; i < 2; ++i) { EXPECT_DOUBLE_EQ(check[i], result[i]); } } TEST_F(TOASTqarrayTest, inv) { toast::AlignedVector <double> result(4); for (size_t i = 0; i < 4; ++i) { result[i] = q1[i]; } toast::qa_inv(1, result.data()); for (size_t i = 0; i < 4; ++i) { EXPECT_FLOAT_EQ(q1inv[i], result[i]); } } TEST_F(TOASTqarrayTest, norm) { toast::AlignedVector <double> result(4); toast::qa_normalize(1, 4, 4, qtonormalize.data(), result.data()); for (size_t i = 0; i < 4; ++i) { EXPECT_FLOAT_EQ(qnormalized[i], result[i]); } } TEST_F(TOASTqarrayTest, mult) { toast::AlignedVector <double> result(4); toast::qa_mult(1, q1.data(), 1, q2.data(), result.data()); for (size_t i = 0; i < 4; ++i) { EXPECT_FLOAT_EQ(mult_result[i], result[i]); } } TEST_F(TOASTqarrayTest, multarray) { size_t n = 3; toast::AlignedVector <double> in1(4 * n); toast::AlignedVector <double> in2(4 * n); toast::AlignedVector <double> result(4 * n); toast::AlignedVector <double> null(4 * n); null[0] = 0.0; null[1] = 0.0; null[2] = 0.0; null[3] = 1.0; for (size_t i = 0; i < n; ++i) { for (size_t j = 0; j < 4; ++j) { in1[4 * i + j] = q1[j]; in2[4 * i + j] = q2[j]; } } toast::qa_mult(n, in1.data(), n, in2.data(), result.data()); for (size_t i = 0; i < n; ++i) { for (size_t j = 0; j < 4; ++j) { EXPECT_FLOAT_EQ(mult_result[j], result[4 * i + j]); } } toast::qa_mult(n, in1.data(), 1, null.data(), result.data()); for (size_t i = 0; i < n; ++i) { for (size_t j = 0; j < 4; ++j) { EXPECT_FLOAT_EQ(in1[j], result[4 * i + j]); } } } TEST_F(TOASTqarrayTest, rot1) { toast::AlignedVector <double> result(3); toast::qa_rotate(1, q1.data(), 1, vec.data(), result.data()); for (size_t i = 0; i < 3; ++i) { EXPECT_FLOAT_EQ(rot_by_q1[i], result[i]); } } TEST_F(TOASTqarrayTest, rotarray) { size_t n = 2; toast::AlignedVector <double> qin(4 * n); toast::AlignedVector <double> vin(3 * n); toast::AlignedVector <double> result(3 * n); for (size_t i = 0; i < 4; ++i) { qin[i] = q1[i]; qin[4 + i] = q2[i]; } for (size_t i = 0; i < n; ++i) { for (size_t j = 0; j < 3; ++j) { vin[3 * i + j] = vec[j]; } } toast::qa_rotate(n, qin.data(), n, vin.data(), result.data()); for (size_t i = 0; i < 3; ++i) { EXPECT_FLOAT_EQ(rot_by_q1[i], result[i]); EXPECT_FLOAT_EQ(rot_by_q2[i], result[3 + i]); } } TEST_F(TOASTqarrayTest, slerp) { size_t n = 2; size_t ninterp = 4; toast::AlignedVector <double> q = {2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0}; toast::AlignedVector <double> qinterp(16); toast::AlignedVector <double> time = {0.0, 9.0}; toast::AlignedVector <double> targettime = {0.0, 3.0, 4.5, 9.0}; toast::AlignedVector <double> qcheck1(4); toast::AlignedVector <double> qcheck2(4); toast::qa_normalize_inplace(n, 4, 4, q.data()); toast::qa_slerp(n, ninterp, time.data(), targettime.data(), q.data(), qinterp.data()); for (size_t i = 0; i < 4; ++i) { qcheck1[i] = (2.0 / 3.0) * q[i] + (1.0 / 3.0) * q[4 + i]; qcheck2[i] = 0.5 * (q[i] + q[4 + i]); } toast::qa_normalize_inplace(1, 4, 4, qcheck1.data()); toast::qa_normalize_inplace(1, 4, 4, qcheck2.data()); for (size_t i = 0; i < 4; ++i) { EXPECT_FLOAT_EQ(q[i], qinterp[i]); EXPECT_FLOAT_EQ(q[4 + i], qinterp[12 + i]); ASSERT_NEAR(qcheck1[i], qinterp[4 + i], 1.0e-4); ASSERT_NEAR(qcheck2[i], qinterp[8 + i], 1.0e-4); } } TEST_F(TOASTqarrayTest, rotation) { toast::AlignedVector <double> result(4); toast::AlignedVector <double> axis = {0.0, 0.0, 1.0}; double ang = toast::PI * 30.0 / 180.0; toast::qa_from_axisangle(1, axis.data(), 1, &ang, result.data()); EXPECT_FLOAT_EQ(0.0, result[0]); EXPECT_FLOAT_EQ(0.0, result[1]); EXPECT_FLOAT_EQ(::sin(15.0 * toast::PI / 180.0), result[2]); EXPECT_FLOAT_EQ(::cos(15.0 * toast::PI / 180.0), result[3]); } TEST_F(TOASTqarrayTest, toaxisangle) { double in[4] = {0.0, 0.0, ::sin(15.0 * toast::PI / 180.0), ::cos(15.0 * toast::PI / 180.0)}; double axis[3]; double ang; double checkaxis[3] = {0.0, 0.0, 1.0}; double checkang = 30.0 * toast::PI / 180.0; toast::qa_to_axisangle(1, in, axis, &ang); EXPECT_FLOAT_EQ(checkang, ang); for (size_t i = 0; i < 3; ++i) { EXPECT_FLOAT_EQ(checkaxis[i], axis[i]); } } TEST_F(TOASTqarrayTest, exp) { toast::AlignedVector <double> result(8); toast::AlignedVector <double> check = {0.71473568, 0.71473568, 0.23824523, 2.22961712, 0.71473568, 0.71473568, 0.23824523, 2.22961712}; toast::qa_exp(2, qeasy.data(), result.data()); for (size_t i = 0; i < 8; ++i) { EXPECT_FLOAT_EQ(check[i], result[i]); } } TEST_F(TOASTqarrayTest, ln) { toast::AlignedVector <double> result(8); toast::AlignedVector <double> check = {0.31041794, 0.31041794, 0.10347265, 0.0, 0.31041794, 0.31041794, 0.10347265, 0.0}; toast::qa_ln(2, qeasy.data(), result.data()); for (size_t i = 0; i < 8; ++i) { EXPECT_FLOAT_EQ(check[i], result[i]); } } TEST_F(TOASTqarrayTest, pow) { toast::AlignedVector <double> p(2); toast::AlignedVector <double> result(8); toast::AlignedVector <double> check1 = {0.672, 0.672, 0.224, 0.216, 0.672, 0.672, 0.224, 0.216}; toast::AlignedVector <double> check2 = {0.03103127, 0.03103127, 0.01034376, 0.99898305, 0.03103127, 0.03103127, 0.01034376, 0.99898305}; p[0] = 3.0; p[1] = 3.0; toast::qa_pow(2, 2, p.data(), qeasy.data(), result.data()); for (size_t i = 0; i < 8; ++i) { EXPECT_FLOAT_EQ(check1[i], result[i]); } p[0] = 0.1; p[1] = 0.1; toast::qa_pow(2, 2, p.data(), qeasy.data(), result.data()); for (size_t i = 0; i < 8; ++i) { EXPECT_FLOAT_EQ(check2[i], result[i]); } } TEST_F(TOASTqarrayTest, torotmat) { toast::AlignedVector <double> result(9); toast::AlignedVector <double> check = {8.00000000e-01, -2.77555756e-17, 6.00000000e-01, 3.60000000e-01, 8.00000000e-01, -4.80000000e-01, -4.80000000e-01, 6.00000000e-01, 6.40000000e-01}; toast::qa_to_rotmat(qeasy.data(), result.data()); for (size_t i = 0; i < 9; ++i) { if (::fabs(check[i]) > 1.0e-12) { EXPECT_FLOAT_EQ(check[i], result[i]); } } } TEST_F(TOASTqarrayTest, fromrotmat) { toast::AlignedVector <double> result(9); toast::AlignedVector <double> qresult(4); toast::qa_to_rotmat(qeasy.data(), result.data()); toast::qa_from_rotmat(result.data(), qresult.data()); for (size_t i = 0; i < 4; ++i) { EXPECT_FLOAT_EQ(qeasy[i], qresult[i]); } } TEST_F(TOASTqarrayTest, fromvectors) { toast::AlignedVector <double> result(4); toast::AlignedVector <double> check = {0.0, 0.0, ::sin(15.0 * toast::PI / 180.0), ::cos(15.0 * toast::PI / 180.0)}; double ang = 30.0 * toast::PI / 180.0; toast::AlignedVector <double> v1 = {1.0, 0.0, 0.0}; toast::AlignedVector <double> v2 = {::cos(ang), ::sin(ang), 0.0}; toast::qa_from_vectors(1, v1.data(), v2.data(), result.data()); for (size_t i = 0; i < 4; ++i) { EXPECT_FLOAT_EQ(check[i], result[i]); } } TEST_F(TOASTqarrayTest, thetaphipa) { size_t n_theta = 5; size_t n_phi = 5; size_t n = n_theta * n_phi; double xaxis[3] = {1.0, 0.0, 0.0}; double zaxis[3] = {0.0, 0.0, 1.0}; toast::AlignedVector <double> theta(n); toast::AlignedVector <double> phi(n); toast::AlignedVector <double> pa(n); toast::AlignedVector <double> check_theta(n); toast::AlignedVector <double> check_phi(n); toast::AlignedVector <double> check_pa(n); toast::AlignedVector <double> quat(4 * n); // First run tests in Healpix convention... for (size_t i = 0; i < n_theta; ++i) { for (size_t j = 0; j < n_phi; ++j) { theta[i * n_phi + j] = (0.5 + (double)i) * toast::PI / (double)n_theta; phi[i * n_phi + j] = (double)j * toast::TWOPI / (double)n_phi; pa[i * n_phi + j] = (double)j * toast::TWOPI / (double)n_phi - toast::PI; } } // convert to quaternions toast::qa_from_angles(n, theta.data(), phi.data(), pa.data(), quat.data(), false); // check that the resulting quaternions rotate the Z and X // axes to the correct place. double dir[3]; double orient[3]; double check; for (size_t i = 0; i < n; ++i) { toast::qa_rotate(1, &(quat[4 * i]), 1, zaxis, dir); toast::qa_rotate(1, &(quat[4 * i]), 1, xaxis, orient); ASSERT_NEAR(toast::PI_2 - ::asin(dir[2]), theta[i], 1.0e-6); check = ::atan2(dir[1], dir[0]); if (check < 0.0) { check += toast::TWOPI; } if (check >= toast::TWOPI) { check -= toast::TWOPI; } if (::fabs(check) < 2.0 * std::numeric_limits <float>::epsilon()) { check = 0.0; } if (::fabs(check - toast::TWOPI) < 2.0 * std::numeric_limits <float>::epsilon()) { check = 0.0; } ASSERT_NEAR(check, phi[i], 1.0e-6); check = ::atan2(orient[0] * dir[1] - orient[1] * dir[0], -(orient[0] * dir[2] * dir[0]) - (orient[1] * dir[2] * dir[1]) + (orient[2] * (dir[0] * dir[0] + dir[1] * dir[1]))); ASSERT_NEAR(check, pa[i], 1.0e-6); } toast::qa_to_angles(n, quat.data(), check_theta.data(), check_phi.data(), check_pa.data(), false); for (size_t i = 0; i < n; ++i) { ASSERT_NEAR(theta[i], check_theta[i], 1.0e-6); check = check_phi[i]; if (check < 0.0) { check += toast::TWOPI; } if (check >= toast::TWOPI) { check -= toast::TWOPI; } if (::fabs(check) < 2.0 * std::numeric_limits <float>::epsilon()) { check = 0.0; } if (::fabs(check - toast::TWOPI) < 2.0 * std::numeric_limits <float>::epsilon()) { check = 0.0; } ASSERT_NEAR(phi[i], check, 1.0e-6); ASSERT_NEAR(pa[i], check_pa[i], 1.0e-6); } toast::qa_to_position(n, quat.data(), check_theta.data(), check_phi.data()); for (size_t i = 0; i < n; ++i) { ASSERT_NEAR(theta[i], check_theta[i], 1.0e-6); check = check_phi[i]; if (check < 0.0) { check += toast::TWOPI; } if (check >= toast::TWOPI) { check -= toast::TWOPI; } if (::fabs(check) < 2.0 * std::numeric_limits <float>::epsilon()) { check = 0.0; } if (::fabs(check - toast::TWOPI) < 2.0 * std::numeric_limits <float>::epsilon()) { check = 0.0; } ASSERT_NEAR(phi[i], check, 1.0e-6); } // Now run tests in IAU convention... for (size_t i = 0; i < n_theta; ++i) { for (size_t j = 0; j < n_phi; ++j) { theta[i * n_phi + j] = (0.5 + (double)i) * toast::PI / (double)n_theta; phi[i * n_phi + j] = (double)j * toast::TWOPI / (double)n_phi; pa[i * n_phi + j] = -(double)j * toast::TWOPI / (double)n_phi + toast::PI; } } // convert to quaternions toast::qa_from_angles(n, theta.data(), phi.data(), pa.data(), quat.data(), true); // check that the resulting quaternions rotate the Z and X // axes to the correct place. for (size_t i = 0; i < n; ++i) { toast::qa_rotate(1, &(quat[4 * i]), 1, zaxis, dir); toast::qa_rotate(1, &(quat[4 * i]), 1, xaxis, orient); ASSERT_NEAR(toast::PI_2 - ::asin(dir[2]), theta[i], 1.0e-6); check = ::atan2(dir[1], dir[0]); if (check < 0.0) { check += toast::TWOPI; } if (check >= toast::TWOPI) { check -= toast::TWOPI; } if (::fabs(check) < 2.0 * std::numeric_limits <float>::epsilon()) { check = 0.0; } if (::fabs(check - toast::TWOPI) < 2.0 * std::numeric_limits <float>::epsilon()) { check = 0.0; } ASSERT_NEAR(check, phi[i], 1.0e-6); check = -::atan2(orient[0] * dir[1] - orient[1] * dir[0], -(orient[0] * dir[2] * dir[0]) - (orient[1] * dir[2] * dir[1]) + (orient[2] * (dir[0] * dir[0] + dir[1] * dir[1]))); if (::fabs(::fabs(check - pa[i]) - toast::TWOPI) < std::numeric_limits <float>::epsilon()) { // we are at the same angle, just with 2PI rotation. } else if (::fabs(::fabs(pa[i] - check) - toast::TWOPI) < std::numeric_limits <float>::epsilon()) { // we are at the same angle, just with 2PI rotation. } else { ASSERT_NEAR(check, pa[i], 1.0e-6); } } toast::qa_to_angles(n, quat.data(), check_theta.data(), check_phi.data(), check_pa.data(), true); for (size_t i = 0; i < n; ++i) { ASSERT_NEAR(theta[i], check_theta[i], 1.0e-6); check = check_phi[i]; if (check < 0.0) { check += toast::TWOPI; } if (check >= toast::TWOPI) { check -= toast::TWOPI; } if (::fabs(check) < 2.0 * std::numeric_limits <float>::epsilon()) { check = 0.0; } if (::fabs(check - toast::TWOPI) < 2.0 * std::numeric_limits <float>::epsilon()) { check = 0.0; } ASSERT_NEAR(phi[i], check, 1.0e-6); ASSERT_NEAR(pa[i], check_pa[i], 1.0e-6); } }
// Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2020 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <txmempool.h> #include <consensus/consensus.h> #include <consensus/tx_verify.h> #include <consensus/validation.h> #include <optional.h> #include <validation.h> #include <policy/policy.h> #include <policy/fees.h> #include <policy/settings.h> #include <reverse_iterator.h> #include <util/system.h> #include <util/moneystr.h> #include <util/time.h> #include <validationinterface.h> #include <insight/insight.h> #include <anon.h> #include <chainparams.h> CTxMemPoolEntry::CTxMemPoolEntry(const CTransactionRef& _tx, const CAmount& _nFee, int64_t _nTime, unsigned int _entryHeight, bool _spendsCoinbase, int64_t _sigOpsCost, LockPoints lp) : tx(_tx), nFee(_nFee), nTxWeight(GetTransactionWeight(*tx)), nUsageSize(RecursiveDynamicUsage(tx)), nTime(_nTime), entryHeight(_entryHeight), spendsCoinbase(_spendsCoinbase), sigOpCost(_sigOpsCost), lockPoints(lp), m_epoch(0) { nCountWithDescendants = 1; nSizeWithDescendants = GetTxSize(); nModFeesWithDescendants = nFee; feeDelta = 0; nCountWithAncestors = 1; nSizeWithAncestors = GetTxSize(); nModFeesWithAncestors = nFee; nSigOpCostWithAncestors = sigOpCost; } void CTxMemPoolEntry::UpdateFeeDelta(int64_t newFeeDelta) { nModFeesWithDescendants += newFeeDelta - feeDelta; nModFeesWithAncestors += newFeeDelta - feeDelta; feeDelta = newFeeDelta; } void CTxMemPoolEntry::UpdateLockPoints(const LockPoints& lp) { lockPoints = lp; } size_t CTxMemPoolEntry::GetTxSize() const { return GetVirtualTransactionSize(nTxWeight, sigOpCost); } // Update the given tx for any in-mempool descendants. // Assumes that CTxMemPool::m_children is correct for the given tx and all // descendants. void CTxMemPool::UpdateForDescendants(txiter updateIt, cacheMap &cachedDescendants, const std::set<uint256> &setExclude) { CTxMemPoolEntry::Children stageEntries, descendants; stageEntries = updateIt->GetMemPoolChildrenConst(); while (!stageEntries.empty()) { const CTxMemPoolEntry& descendant = *stageEntries.begin(); descendants.insert(descendant); stageEntries.erase(descendant); const CTxMemPoolEntry::Children& children = descendant.GetMemPoolChildrenConst(); for (const CTxMemPoolEntry& childEntry : children) { cacheMap::iterator cacheIt = cachedDescendants.find(mapTx.iterator_to(childEntry)); if (cacheIt != cachedDescendants.end()) { // We've already calculated this one, just add the entries for this set // but don't traverse again. for (txiter cacheEntry : cacheIt->second) { descendants.insert(*cacheEntry); } } else if (!descendants.count(childEntry)) { // Schedule for later processing stageEntries.insert(childEntry); } } } // descendants now contains all in-mempool descendants of updateIt. // Update and add to cached descendant map int64_t modifySize = 0; CAmount modifyFee = 0; int64_t modifyCount = 0; for (const CTxMemPoolEntry& descendant : descendants) { if (!setExclude.count(descendant.GetTx().GetHash())) { modifySize += descendant.GetTxSize(); modifyFee += descendant.GetModifiedFee(); modifyCount++; cachedDescendants[updateIt].insert(mapTx.iterator_to(descendant)); // Update ancestor state for each descendant mapTx.modify(mapTx.iterator_to(descendant), update_ancestor_state(updateIt->GetTxSize(), updateIt->GetModifiedFee(), 1, updateIt->GetSigOpCost())); } } mapTx.modify(updateIt, update_descendant_state(modifySize, modifyFee, modifyCount)); } // vHashesToUpdate is the set of transaction hashes from a disconnected block // which has been re-added to the mempool. // for each entry, look for descendants that are outside vHashesToUpdate, and // add fee/size information for such descendants to the parent. // for each such descendant, also update the ancestor state to include the parent. void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<uint256> &vHashesToUpdate) { AssertLockHeld(cs); // For each entry in vHashesToUpdate, store the set of in-mempool, but not // in-vHashesToUpdate transactions, so that we don't have to recalculate // descendants when we come across a previously seen entry. cacheMap mapMemPoolDescendantsToUpdate; // Use a set for lookups into vHashesToUpdate (these entries are already // accounted for in the state of their ancestors) std::set<uint256> setAlreadyIncluded(vHashesToUpdate.begin(), vHashesToUpdate.end()); // Iterate in reverse, so that whenever we are looking at a transaction // we are sure that all in-mempool descendants have already been processed. // This maximizes the benefit of the descendant cache and guarantees that // CTxMemPool::m_children will be updated, an assumption made in // UpdateForDescendants. for (const uint256 &hash : reverse_iterate(vHashesToUpdate)) { // calculate children from mapNextTx txiter it = mapTx.find(hash); if (it == mapTx.end()) { continue; } auto iter = mapNextTx.lower_bound(COutPoint(hash, 0)); // First calculate the children, and update CTxMemPool::m_children to // include them, and update their CTxMemPoolEntry::m_parents to include this tx. // we cache the in-mempool children to avoid duplicate updates { const auto epoch = GetFreshEpoch(); for (; iter != mapNextTx.end() && iter->first->hash == hash; ++iter) { const uint256 &childHash = iter->second->GetHash(); txiter childIter = mapTx.find(childHash); assert(childIter != mapTx.end()); // We can skip updating entries we've encountered before or that // are in the block (which are already accounted for). if (!visited(childIter) && !setAlreadyIncluded.count(childHash)) { UpdateChild(it, childIter, true); UpdateParent(childIter, it, true); } } } // release epoch guard for UpdateForDescendants UpdateForDescendants(it, mapMemPoolDescendantsToUpdate, setAlreadyIncluded); } } bool CTxMemPool::CalculateMemPoolAncestors(const CTxMemPoolEntry &entry, setEntries &setAncestors, uint64_t limitAncestorCount, uint64_t limitAncestorSize, uint64_t limitDescendantCount, uint64_t limitDescendantSize, std::string &errString, bool fSearchForParents /* = true */) const { CTxMemPoolEntry::Parents staged_ancestors; const CTransaction &tx = entry.GetTx(); if (fSearchForParents) { // Get parents of this transaction that are in the mempool // GetMemPoolParents() is only valid for entries in the mempool, so we // iterate mapTx to find parents. for (unsigned int i = 0; i < tx.vin.size(); i++) { if (tx.vin[i].IsAnonInput()) { continue; } Optional<txiter> piter = GetIter(tx.vin[i].prevout.hash); if (piter) { staged_ancestors.insert(**piter); if (staged_ancestors.size() + 1 > limitAncestorCount) { errString = strprintf("too many unconfirmed parents [limit: %u]", limitAncestorCount); return false; } } } } else { // If we're not searching for parents, we require this to be an // entry in the mempool already. txiter it = mapTx.iterator_to(entry); staged_ancestors = it->GetMemPoolParentsConst(); } size_t totalSizeWithAncestors = entry.GetTxSize(); while (!staged_ancestors.empty()) { const CTxMemPoolEntry& stage = staged_ancestors.begin()->get(); txiter stageit = mapTx.iterator_to(stage); setAncestors.insert(stageit); staged_ancestors.erase(stage); totalSizeWithAncestors += stageit->GetTxSize(); if (stageit->GetSizeWithDescendants() + entry.GetTxSize() > limitDescendantSize) { errString = strprintf("exceeds descendant size limit for tx %s [limit: %u]", stageit->GetTx().GetHash().ToString(), limitDescendantSize); return false; } else if (stageit->GetCountWithDescendants() + 1 > limitDescendantCount) { errString = strprintf("too many descendants for tx %s [limit: %u]", stageit->GetTx().GetHash().ToString(), limitDescendantCount); return false; } else if (totalSizeWithAncestors > limitAncestorSize) { errString = strprintf("exceeds ancestor size limit [limit: %u]", limitAncestorSize); return false; } const CTxMemPoolEntry::Parents& parents = stageit->GetMemPoolParentsConst(); for (const CTxMemPoolEntry& parent : parents) { txiter parent_it = mapTx.iterator_to(parent); // If this is a new ancestor, add it. if (setAncestors.count(parent_it) == 0) { staged_ancestors.insert(parent); } if (staged_ancestors.size() + setAncestors.size() + 1 > limitAncestorCount) { errString = strprintf("too many unconfirmed ancestors [limit: %u]", limitAncestorCount); return false; } } } return true; } void CTxMemPool::UpdateAncestorsOf(bool add, txiter it, setEntries &setAncestors) { CTxMemPoolEntry::Parents parents = it->GetMemPoolParents(); // add or remove this tx as a child of each parent for (const CTxMemPoolEntry& parent : parents) { UpdateChild(mapTx.iterator_to(parent), it, add); } const int64_t updateCount = (add ? 1 : -1); const int64_t updateSize = updateCount * it->GetTxSize(); const CAmount updateFee = updateCount * it->GetModifiedFee(); for (txiter ancestorIt : setAncestors) { mapTx.modify(ancestorIt, update_descendant_state(updateSize, updateFee, updateCount)); } } void CTxMemPool::UpdateEntryForAncestors(txiter it, const setEntries &setAncestors) { int64_t updateCount = setAncestors.size(); int64_t updateSize = 0; CAmount updateFee = 0; int64_t updateSigOpsCost = 0; for (txiter ancestorIt : setAncestors) { updateSize += ancestorIt->GetTxSize(); updateFee += ancestorIt->GetModifiedFee(); updateSigOpsCost += ancestorIt->GetSigOpCost(); } mapTx.modify(it, update_ancestor_state(updateSize, updateFee, updateCount, updateSigOpsCost)); } void CTxMemPool::UpdateChildrenForRemoval(txiter it) { const CTxMemPoolEntry::Children& children = it->GetMemPoolChildrenConst(); for (const CTxMemPoolEntry& updateIt : children) { UpdateParent(mapTx.iterator_to(updateIt), it, false); } } void CTxMemPool::UpdateForRemoveFromMempool(const setEntries &entriesToRemove, bool updateDescendants) { // For each entry, walk back all ancestors and decrement size associated with this // transaction const uint64_t nNoLimit = std::numeric_limits<uint64_t>::max(); if (updateDescendants) { // updateDescendants should be true whenever we're not recursively // removing a tx and all its descendants, eg when a transaction is // confirmed in a block. // Here we only update statistics and not data in CTxMemPool::Parents // and CTxMemPoolEntry::Children (which we need to preserve until we're // finished with all operations that need to traverse the mempool). for (txiter removeIt : entriesToRemove) { setEntries setDescendants; CalculateDescendants(removeIt, setDescendants); setDescendants.erase(removeIt); // don't update state for self int64_t modifySize = -((int64_t)removeIt->GetTxSize()); CAmount modifyFee = -removeIt->GetModifiedFee(); int modifySigOps = -removeIt->GetSigOpCost(); for (txiter dit : setDescendants) { mapTx.modify(dit, update_ancestor_state(modifySize, modifyFee, -1, modifySigOps)); } } } for (txiter removeIt : entriesToRemove) { setEntries setAncestors; const CTxMemPoolEntry &entry = *removeIt; std::string dummy; // Since this is a tx that is already in the mempool, we can call CMPA // with fSearchForParents = false. If the mempool is in a consistent // state, then using true or false should both be correct, though false // should be a bit faster. // However, if we happen to be in the middle of processing a reorg, then // the mempool can be in an inconsistent state. In this case, the set // of ancestors reachable via GetMemPoolParents()/GetMemPoolChildren() // will be the same as the set of ancestors whose packages include this // transaction, because when we add a new transaction to the mempool in // addUnchecked(), we assume it has no children, and in the case of a // reorg where that assumption is false, the in-mempool children aren't // linked to the in-block tx's until UpdateTransactionsFromBlock() is // called. // So if we're being called during a reorg, ie before // UpdateTransactionsFromBlock() has been called, then // GetMemPoolParents()/GetMemPoolChildren() will differ from the set of // mempool parents we'd calculate by searching, and it's important that // we use the cached notion of ancestor transactions as the set of // things to update for removal. CalculateMemPoolAncestors(entry, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy, false); // Note that UpdateAncestorsOf severs the child links that point to // removeIt in the entries for the parents of removeIt. UpdateAncestorsOf(false, removeIt, setAncestors); } // After updating all the ancestor sizes, we can now sever the link between each // transaction being removed and any mempool children (ie, update CTxMemPoolEntry::m_parents // for each direct child of a transaction being removed). for (txiter removeIt : entriesToRemove) { UpdateChildrenForRemoval(removeIt); } } void CTxMemPoolEntry::UpdateDescendantState(int64_t modifySize, CAmount modifyFee, int64_t modifyCount) { nSizeWithDescendants += modifySize; assert(int64_t(nSizeWithDescendants) > 0); nModFeesWithDescendants += modifyFee; nCountWithDescendants += modifyCount; assert(int64_t(nCountWithDescendants) > 0); } void CTxMemPoolEntry::UpdateAncestorState(int64_t modifySize, CAmount modifyFee, int64_t modifyCount, int64_t modifySigOps) { nSizeWithAncestors += modifySize; assert(int64_t(nSizeWithAncestors) > 0); nModFeesWithAncestors += modifyFee; nCountWithAncestors += modifyCount; assert(int64_t(nCountWithAncestors) > 0); nSigOpCostWithAncestors += modifySigOps; assert(int(nSigOpCostWithAncestors) >= 0); } CTxMemPool::CTxMemPool(CBlockPolicyEstimator* estimator) : nTransactionsUpdated(0), minerPolicyEstimator(estimator), m_epoch(0), m_has_epoch_guard(false) { _clear(); //lock free clear // Sanity checks off by default for performance, because otherwise // accepting transactions becomes O(N^2) where N is the number // of transactions in the pool nCheckFrequency = 0; } bool CTxMemPool::isSpent(const COutPoint& outpoint) const { LOCK(cs); return mapNextTx.count(outpoint); } unsigned int CTxMemPool::GetTransactionsUpdated() const { return nTransactionsUpdated; } void CTxMemPool::AddTransactionsUpdated(unsigned int n) { nTransactionsUpdated += n; } void CTxMemPool::addUnchecked(const CTxMemPoolEntry &entry, setEntries &setAncestors, bool validFeeEstimate) { // Add to memory pool without checking anything. // Used by AcceptToMemoryPool(), which DOES do // all the appropriate checks. indexed_transaction_set::iterator newit = mapTx.insert(entry).first; // Update transaction for any feeDelta created by PrioritiseTransaction // TODO: refactor so that the fee delta is calculated before inserting // into mapTx. CAmount delta{0}; ApplyDelta(entry.GetTx().GetHash(), delta); if (delta) { mapTx.modify(newit, update_fee_delta(delta)); } // Update cachedInnerUsage to include contained transaction's usage. // (When we update the entry for in-mempool parents, memory usage will be // further updated.) cachedInnerUsage += entry.DynamicMemoryUsage(); const CTransaction& tx = newit->GetTx(); std::set<uint256> setParentTransactions; for (unsigned int i = 0; i < tx.vin.size(); i++) { if (tx.vin[i].IsAnonInput()) continue; mapNextTx.insert(std::make_pair(&tx.vin[i].prevout, &tx)); setParentTransactions.insert(tx.vin[i].prevout.hash); } // Don't bother worrying about child transactions of this one. // Normal case of a new transaction arriving is that there can't be any // children, because such children would be orphans. // An exception to that is if a transaction enters that used to be in a block. // In that case, our disconnect block logic will call UpdateTransactionsFromBlock // to clean up the mess we're leaving here. // Update ancestors with information about this tx for (const auto& pit : GetIterSet(setParentTransactions)) { UpdateParent(newit, pit, true); } UpdateAncestorsOf(true, newit, setAncestors); UpdateEntryForAncestors(newit, setAncestors); nTransactionsUpdated++; totalTxSize += entry.GetTxSize(); if (minerPolicyEstimator) {minerPolicyEstimator->processTransaction(entry, validFeeEstimate);} vTxHashes.emplace_back(tx.GetWitnessHash(), newit); newit->vTxHashesIdx = vTxHashes.size() - 1; } void CTxMemPool::addAddressIndex(const CTxMemPoolEntry &entry, const CCoinsViewCache &view) { LOCK(cs); const CTransaction& tx = entry.GetTx(); if (!tx.IsParticlVersion()) return; std::vector<CMempoolAddressDeltaKey> inserted; uint256 txhash = tx.GetHash(); for (unsigned int j = 0; j < tx.vin.size(); j++) { const CTxIn input = tx.vin[j]; if (input.IsAnonInput()) { continue; } const Coin &coin = view.AccessCoin(input.prevout); // prevout should only ever be OUTPUT_STANDARD or OUTPUT_CT assert(coin.nType == OUTPUT_STANDARD || coin.nType == OUTPUT_CT); std::vector<uint8_t> hashBytes; const CScript *pScript = &coin.out.scriptPubKey; int scriptType = 0; CAmount nValue = coin.nType == OUTPUT_CT ? 0 : coin.out.nValue; if (!ExtractIndexInfo(pScript, scriptType, hashBytes) || scriptType == 0) { continue; } CMempoolAddressDeltaKey key(scriptType, uint256(hashBytes.data(), hashBytes.size()), txhash, j, 1); CMempoolAddressDelta delta(count_seconds(entry.GetTime()), nValue * -1, input.prevout.hash, input.prevout.n); mapAddress.insert(std::make_pair(key, delta)); inserted.push_back(key); } for (unsigned int k = 0; k < tx.vpout.size(); k++) { const CTxOutBase *out = tx.vpout[k].get(); if (!out->IsType(OUTPUT_STANDARD) && !out->IsType(OUTPUT_CT)) { continue; } const CScript *pScript; if (!(pScript = out->GetPScriptPubKey())) { LogPrintf("ERROR: %s - expected script pointer.\n", __func__); continue; } CAmount nValue = out->IsType(OUTPUT_STANDARD) ? out->GetValue() : 0; std::vector<uint8_t> hashBytes; int scriptType = 0; if (!ExtractIndexInfo(pScript, scriptType, hashBytes) || scriptType == 0) continue; CMempoolAddressDeltaKey key(scriptType, uint256(hashBytes.data(), hashBytes.size()), txhash, k, 0); mapAddress.insert(std::make_pair(key, CMempoolAddressDelta(count_seconds(entry.GetTime()), nValue))); inserted.push_back(key); } mapAddressInserted.insert(std::make_pair(txhash, inserted)); } bool CTxMemPool::getAddressIndex(std::vector<std::pair<uint256, int> > &addresses, std::vector<std::pair<CMempoolAddressDeltaKey, CMempoolAddressDelta> > &results) const { LOCK(cs); for (std::vector<std::pair<uint256, int> >::iterator it = addresses.begin(); it != addresses.end(); it++) { addressDeltaMap::const_iterator ait = mapAddress.lower_bound(CMempoolAddressDeltaKey((*it).second, (*it).first)); while (ait != mapAddress.end() && (*ait).first.addressBytes == (*it).first && (*ait).first.type == (*it).second) { results.push_back(*ait); ait++; } } return true; } bool CTxMemPool::removeAddressIndex(const uint256 txhash) { LOCK(cs); addressDeltaMapInserted::iterator it = mapAddressInserted.find(txhash); if (it != mapAddressInserted.end()) { std::vector<CMempoolAddressDeltaKey> keys = (*it).second; for (std::vector<CMempoolAddressDeltaKey>::iterator mit = keys.begin(); mit != keys.end(); mit++) { mapAddress.erase(*mit); } mapAddressInserted.erase(it); } return true; } void CTxMemPool::addSpentIndex(const CTxMemPoolEntry &entry, const CCoinsViewCache &view) { LOCK(cs); const CTransaction& tx = entry.GetTx(); if (!tx.IsParticlVersion()) return; std::vector<CSpentIndexKey> inserted; uint256 txhash = tx.GetHash(); for (unsigned int j = 0; j < tx.vin.size(); j++) { const CTxIn input = tx.vin[j]; if (input.IsAnonInput()) { continue; } const Coin &coin = view.AccessCoin(input.prevout); assert(coin.nType == OUTPUT_STANDARD || coin.nType == OUTPUT_CT); std::vector<uint8_t> hashBytes; const CScript *pScript = &coin.out.scriptPubKey; int scriptType = 0; CAmount nValue = coin.nType == OUTPUT_CT ? -1 : coin.out.nValue; if (!ExtractIndexInfo(pScript, scriptType, hashBytes)) { continue; } uint256 addressHash; if (scriptType != 0) { addressHash = uint256(hashBytes.data(), hashBytes.size()); } CSpentIndexKey key = CSpentIndexKey(input.prevout.hash, input.prevout.n); CSpentIndexValue value = CSpentIndexValue(txhash, j, -1, nValue, scriptType, addressHash); mapSpent.insert(std::make_pair(key, value)); inserted.push_back(key); } mapSpentInserted.insert(std::make_pair(txhash, inserted)); } bool CTxMemPool::getSpentIndex(const CSpentIndexKey &key, CSpentIndexValue &value) const { LOCK(cs); mapSpentIndex::const_iterator it = mapSpent.find(key); if (it != mapSpent.end()) { value = it->second; return true; } return false; } bool CTxMemPool::removeSpentIndex(const uint256 &txhash) { LOCK(cs); mapSpentIndexInserted::iterator it = mapSpentInserted.find(txhash); if (it != mapSpentInserted.end()) { std::vector<CSpentIndexKey> keys = (*it).second; for (std::vector<CSpentIndexKey>::iterator mit = keys.begin(); mit != keys.end(); mit++) { mapSpent.erase(*mit); } mapSpentInserted.erase(it); } return true; } void CTxMemPool::removeUnchecked(txiter it, MemPoolRemovalReason reason) { // We increment mempool sequence value no matter removal reason // even if not directly reported below. uint64_t mempool_sequence = GetAndIncrementSequence(); if (reason != MemPoolRemovalReason::BLOCK) { // Notify clients that a transaction has been removed from the mempool // for any reason except being included in a block. Clients interested // in transactions included in blocks can subscribe to the BlockConnected // notification. GetMainSignals().TransactionRemovedFromMempool(it->GetSharedTx(), reason, mempool_sequence); } const uint256 hash = it->GetTx().GetHash(); for (const CTxIn& txin : it->GetTx().vin) { if (txin.IsAnonInput()) { RemoveKeyImagesFromMempool(hash, txin, *this); continue; } mapNextTx.erase(txin.prevout); } RemoveUnbroadcastTx(hash, true /* add logging because unchecked */ ); if (vTxHashes.size() > 1) { vTxHashes[it->vTxHashesIdx] = std::move(vTxHashes.back()); vTxHashes[it->vTxHashesIdx].second->vTxHashesIdx = it->vTxHashesIdx; vTxHashes.pop_back(); if (vTxHashes.size() * 2 < vTxHashes.capacity()) vTxHashes.shrink_to_fit(); } else vTxHashes.clear(); totalTxSize -= it->GetTxSize(); cachedInnerUsage -= it->DynamicMemoryUsage(); cachedInnerUsage -= memusage::DynamicUsage(it->GetMemPoolParentsConst()) + memusage::DynamicUsage(it->GetMemPoolChildrenConst()); mapTx.erase(it); nTransactionsUpdated++; if (minerPolicyEstimator) {minerPolicyEstimator->removeTx(hash, false);} removeAddressIndex(hash); removeSpentIndex(hash); } // Calculates descendants of entry that are not already in setDescendants, and adds to // setDescendants. Assumes entryit is already a tx in the mempool and CTxMemPoolEntry::m_children // is correct for tx and all descendants. // Also assumes that if an entry is in setDescendants already, then all // in-mempool descendants of it are already in setDescendants as well, so that we // can save time by not iterating over those entries. void CTxMemPool::CalculateDescendants(txiter entryit, setEntries& setDescendants) const { setEntries stage; if (setDescendants.count(entryit) == 0) { stage.insert(entryit); } // Traverse down the children of entry, only adding children that are not // accounted for in setDescendants already (because those children have either // already been walked, or will be walked in this iteration). while (!stage.empty()) { txiter it = *stage.begin(); setDescendants.insert(it); stage.erase(it); const CTxMemPoolEntry::Children& children = it->GetMemPoolChildrenConst(); for (const CTxMemPoolEntry& child : children) { txiter childiter = mapTx.iterator_to(child); if (!setDescendants.count(childiter)) { stage.insert(childiter); } } } } void CTxMemPool::removeRecursive(const CTransaction &origTx, MemPoolRemovalReason reason) { // Remove transaction from memory pool AssertLockHeld(cs); setEntries txToRemove; txiter origit = mapTx.find(origTx.GetHash()); if (origit != mapTx.end()) { txToRemove.insert(origit); } else { // When recursively removing but origTx isn't in the mempool // be sure to remove any children that are in the pool. This can // happen during chain re-orgs if origTx isn't re-accepted into // the mempool for any reason. for (unsigned int i = 0; i < origTx.GetNumVOuts(); i++) { auto it = mapNextTx.find(COutPoint(origTx.GetHash(), i)); if (it == mapNextTx.end()) continue; txiter nextit = mapTx.find(it->second->GetHash()); assert(nextit != mapTx.end()); txToRemove.insert(nextit); } } setEntries setAllRemoves; for (txiter it : txToRemove) { CalculateDescendants(it, setAllRemoves); } RemoveStaged(setAllRemoves, false, reason); } void CTxMemPool::removeForReorg(const CCoinsViewCache *pcoins, unsigned int nMemPoolHeight, int flags) { // Remove transactions spending a coinbase which are now immature and no-longer-final transactions AssertLockHeld(cs); setEntries txToRemove; for (indexed_transaction_set::const_iterator it = mapTx.begin(); it != mapTx.end(); it++) { const CTransaction& tx = it->GetTx(); LockPoints lp = it->GetLockPoints(); bool validLP = TestLockPointValidity(&lp); if (!CheckFinalTx(tx, flags) || !CheckSequenceLocks(*this, tx, flags, &lp, validLP)) { // Note if CheckSequenceLocks fails the LockPoints may still be invalid // So it's critical that we remove the tx and not depend on the LockPoints. txToRemove.insert(it); } else if (it->GetSpendsCoinbase()) { for (const CTxIn& txin : tx.vin) { if (txin.IsAnonInput()) continue; indexed_transaction_set::const_iterator it2 = mapTx.find(txin.prevout.hash); if (it2 != mapTx.end()) continue; const Coin &coin = pcoins->AccessCoin(txin.prevout); if (nCheckFrequency != 0) assert(!coin.IsSpent()); if (coin.IsSpent() || (coin.IsCoinBase() && ((signed long)nMemPoolHeight) - coin.nHeight < COINBASE_MATURITY)) { txToRemove.insert(it); break; } } } if (!validLP) { mapTx.modify(it, update_lock_points(lp)); } } setEntries setAllRemoves; for (txiter it : txToRemove) { CalculateDescendants(it, setAllRemoves); } RemoveStaged(setAllRemoves, false, MemPoolRemovalReason::REORG); } void CTxMemPool::removeConflicts(const CTransaction &tx) { // Remove transactions which depend on inputs of tx, recursively AssertLockHeld(cs); for (const auto &txin : tx.vin) { if (txin.IsAnonInput()) { uint32_t nInputs, nRingSize; txin.GetAnonInfo(nInputs, nRingSize); const std::vector<uint8_t> &vKeyImages = txin.scriptData.stack[0]; for (size_t k = 0; k < nInputs; ++k) { const CCmpPubKey &ki = *((CCmpPubKey*)&vKeyImages[k*33]); uint256 txhashKI; if (HaveKeyImage(ki, txhashKI)) { txiter origit = mapTx.find(txhashKI); if (origit != mapTx.end()) { const CTransaction& txConflict = origit->GetTx(); if (txConflict != tx) { if (LogAcceptCategory(BCLog::RINGCT)) LogPrintf("Clearing conflicting anon tx from mempool, removed:%s, tx:%s\n", txhashKI.ToString(), tx.GetHash().ToString()); ClearPrioritisation(txConflict.GetHash()); removeRecursive(txConflict, MemPoolRemovalReason::CONFLICT); }; }; }; }; continue; }; auto it = mapNextTx.find(txin.prevout); if (it != mapNextTx.end()) { const CTransaction &txConflict = *it->second; if (txConflict != tx) { ClearPrioritisation(txConflict.GetHash()); removeRecursive(txConflict, MemPoolRemovalReason::CONFLICT); } } } } /** * Called when a block is connected. Removes from mempool and updates the miner fee estimator. */ void CTxMemPool::removeForBlock(const std::vector<CTransactionRef>& vtx, unsigned int nBlockHeight) { AssertLockHeld(cs); std::vector<const CTxMemPoolEntry*> entries; for (const auto& tx : vtx) { uint256 hash = tx->GetHash(); indexed_transaction_set::iterator i = mapTx.find(hash); if (i != mapTx.end()) entries.push_back(&*i); } // Before the txs in the new block have been removed from the mempool, update policy estimates if (minerPolicyEstimator) {minerPolicyEstimator->processBlock(nBlockHeight, entries);} for (const auto& tx : vtx) { txiter it = mapTx.find(tx->GetHash()); if (it != mapTx.end()) { setEntries stage; stage.insert(it); RemoveStaged(stage, true, MemPoolRemovalReason::BLOCK); } removeConflicts(*tx); ClearPrioritisation(tx->GetHash()); } lastRollingFeeUpdate = GetTime(); blockSinceLastRollingFeeBump = true; } void CTxMemPool::_clear() { mapTx.clear(); mapNextTx.clear(); totalTxSize = 0; cachedInnerUsage = 0; lastRollingFeeUpdate = GetTime(); blockSinceLastRollingFeeBump = false; rollingMinimumFeeRate = 0; ++nTransactionsUpdated; } void CTxMemPool::clear() { LOCK(cs); _clear(); } static void CheckInputsAndUpdateCoins(const CTransaction& tx, CCoinsViewCache& mempoolDuplicate, const int64_t spendheight) { TxValidationState dummy_state; // Not used. CheckTxInputs() should always pass dummy_state.SetStateInfo(GetTime(), spendheight, Params().GetConsensus(), fParticlMode, false); CAmount txfee = 0; bool fCheckResult = tx.IsCoinBase() || Consensus::CheckTxInputs(tx, dummy_state, mempoolDuplicate, spendheight, txfee); //assert(fCheckResult) if (!fCheckResult) { // It's possible for CheckTxInputs to fail if block was disconnected at the same time as a hardfork LogPrintf("ERROR: %s CheckTxInputs failed! Reason: %s.\n", __func__, dummy_state.GetRejectReason()); return; } UpdateCoins(tx, mempoolDuplicate, std::numeric_limits<int>::max()); } void CTxMemPool::check(const CCoinsViewCache *pcoins) const { LOCK(cs); if (nCheckFrequency == 0) return; if (GetRand(std::numeric_limits<uint32_t>::max()) >= nCheckFrequency) return; LogPrint(BCLog::MEMPOOL, "Checking mempool with %u transactions and %u inputs\n", (unsigned int)mapTx.size(), (unsigned int)mapNextTx.size()); uint64_t checkTotal = 0; uint64_t innerUsage = 0; CCoinsViewCache mempoolDuplicate(const_cast<CCoinsViewCache*>(pcoins)); const int64_t spendheight = GetSpendHeight(mempoolDuplicate); std::list<const CTxMemPoolEntry*> waitingOnDependants; for (indexed_transaction_set::const_iterator it = mapTx.begin(); it != mapTx.end(); it++) { unsigned int i = 0; checkTotal += it->GetTxSize(); innerUsage += it->DynamicMemoryUsage(); const CTransaction& tx = it->GetTx(); innerUsage += memusage::DynamicUsage(it->GetMemPoolParentsConst()) + memusage::DynamicUsage(it->GetMemPoolChildrenConst()); bool fDependsWait = false; CTxMemPoolEntry::Parents setParentCheck; for (const CTxIn &txin : tx.vin) { if (txin.IsAnonInput()) continue; // Check that every mempool transaction's inputs refer to available coins, or other mempool tx's. indexed_transaction_set::const_iterator it2 = mapTx.find(txin.prevout.hash); if (it2 != mapTx.end()) { const CTransaction& tx2 = it2->GetTx(); if (fParticlMode) assert(tx2.vpout.size() > txin.prevout.n && tx2.vpout[txin.prevout.n] != nullptr && (tx2.vpout[txin.prevout.n]->IsStandardOutput() || tx2.vpout[txin.prevout.n]->IsType(OUTPUT_CT))); else assert(tx2.vout.size() > txin.prevout.n && !tx2.vout[txin.prevout.n].IsNull()); fDependsWait = true; setParentCheck.insert(*it2); } else { assert(pcoins->HaveCoin(txin.prevout)); } // Check whether its inputs are marked in mapNextTx. auto it3 = mapNextTx.find(txin.prevout); assert(it3 != mapNextTx.end()); assert(it3->first == &txin.prevout); assert(it3->second == &tx); i++; } auto comp = [](const CTxMemPoolEntry& a, const CTxMemPoolEntry& b) -> bool { return a.GetTx().GetHash() == b.GetTx().GetHash(); }; assert(setParentCheck.size() == it->GetMemPoolParentsConst().size()); assert(std::equal(setParentCheck.begin(), setParentCheck.end(), it->GetMemPoolParentsConst().begin(), comp)); // Verify ancestor state is correct. setEntries setAncestors; uint64_t nNoLimit = std::numeric_limits<uint64_t>::max(); std::string dummy; CalculateMemPoolAncestors(*it, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy); uint64_t nCountCheck = setAncestors.size() + 1; uint64_t nSizeCheck = it->GetTxSize(); CAmount nFeesCheck = it->GetModifiedFee(); int64_t nSigOpCheck = it->GetSigOpCost(); for (txiter ancestorIt : setAncestors) { nSizeCheck += ancestorIt->GetTxSize(); nFeesCheck += ancestorIt->GetModifiedFee(); nSigOpCheck += ancestorIt->GetSigOpCost(); } assert(it->GetCountWithAncestors() == nCountCheck); assert(it->GetSizeWithAncestors() == nSizeCheck); assert(it->GetSigOpCostWithAncestors() == nSigOpCheck); assert(it->GetModFeesWithAncestors() == nFeesCheck); // Check children against mapNextTx CTxMemPoolEntry::Children setChildrenCheck; auto iter = mapNextTx.lower_bound(COutPoint(it->GetTx().GetHash(), 0)); uint64_t child_sizes = 0; for (; iter != mapNextTx.end() && iter->first->hash == it->GetTx().GetHash(); ++iter) { txiter childit = mapTx.find(iter->second->GetHash()); assert(childit != mapTx.end()); // mapNextTx points to in-mempool transactions if (setChildrenCheck.insert(*childit).second) { child_sizes += childit->GetTxSize(); } } assert(setChildrenCheck.size() == it->GetMemPoolChildrenConst().size()); assert(std::equal(setChildrenCheck.begin(), setChildrenCheck.end(), it->GetMemPoolChildrenConst().begin(), comp)); // Also check to make sure size is greater than sum with immediate children. // just a sanity check, not definitive that this calc is correct... assert(it->GetSizeWithDescendants() >= child_sizes + it->GetTxSize()); if (fDependsWait) waitingOnDependants.push_back(&(*it)); else { CheckInputsAndUpdateCoins(tx, mempoolDuplicate, spendheight); } } unsigned int stepsSinceLastRemove = 0; while (!waitingOnDependants.empty()) { const CTxMemPoolEntry* entry = waitingOnDependants.front(); waitingOnDependants.pop_front(); if (!mempoolDuplicate.HaveInputs(entry->GetTx())) { waitingOnDependants.push_back(entry); stepsSinceLastRemove++; assert(stepsSinceLastRemove < waitingOnDependants.size()); } else { CheckInputsAndUpdateCoins(entry->GetTx(), mempoolDuplicate, spendheight); stepsSinceLastRemove = 0; } } for (auto it = mapNextTx.cbegin(); it != mapNextTx.cend(); it++) { uint256 hash = it->second->GetHash(); indexed_transaction_set::const_iterator it2 = mapTx.find(hash); const CTransaction& tx = it2->GetTx(); assert(it2 != mapTx.end()); assert(&tx == it->second); } assert(totalTxSize == checkTotal); assert(innerUsage == cachedInnerUsage); } bool CTxMemPool::CompareDepthAndScore(const uint256& hasha, const uint256& hashb, bool wtxid) { LOCK(cs); indexed_transaction_set::const_iterator i = wtxid ? get_iter_from_wtxid(hasha) : mapTx.find(hasha); if (i == mapTx.end()) return false; indexed_transaction_set::const_iterator j = wtxid ? get_iter_from_wtxid(hashb) : mapTx.find(hashb); if (j == mapTx.end()) return true; uint64_t counta = i->GetCountWithAncestors(); uint64_t countb = j->GetCountWithAncestors(); if (counta == countb) { return CompareTxMemPoolEntryByScore()(*i, *j); } return counta < countb; } namespace { class DepthAndScoreComparator { public: bool operator()(const CTxMemPool::indexed_transaction_set::const_iterator& a, const CTxMemPool::indexed_transaction_set::const_iterator& b) { uint64_t counta = a->GetCountWithAncestors(); uint64_t countb = b->GetCountWithAncestors(); if (counta == countb) { return CompareTxMemPoolEntryByScore()(*a, *b); } return counta < countb; } }; } // namespace std::vector<CTxMemPool::indexed_transaction_set::const_iterator> CTxMemPool::GetSortedDepthAndScore() const { std::vector<indexed_transaction_set::const_iterator> iters; AssertLockHeld(cs); iters.reserve(mapTx.size()); for (indexed_transaction_set::iterator mi = mapTx.begin(); mi != mapTx.end(); ++mi) { iters.push_back(mi); } std::sort(iters.begin(), iters.end(), DepthAndScoreComparator()); return iters; } void CTxMemPool::queryHashes(std::vector<uint256>& vtxid) const { LOCK(cs); auto iters = GetSortedDepthAndScore(); vtxid.clear(); vtxid.reserve(mapTx.size()); for (auto it : iters) { vtxid.push_back(it->GetTx().GetHash()); } } static TxMempoolInfo GetInfo(CTxMemPool::indexed_transaction_set::const_iterator it) { return TxMempoolInfo{it->GetSharedTx(), it->GetTime(), it->GetFee(), it->GetTxSize(), it->GetModifiedFee() - it->GetFee()}; } std::vector<TxMempoolInfo> CTxMemPool::infoAll() const { LOCK(cs); auto iters = GetSortedDepthAndScore(); std::vector<TxMempoolInfo> ret; ret.reserve(mapTx.size()); for (auto it : iters) { ret.push_back(GetInfo(it)); } return ret; } CTransactionRef CTxMemPool::get(const uint256& hash) const { LOCK(cs); indexed_transaction_set::const_iterator i = mapTx.find(hash); if (i == mapTx.end()) return nullptr; return i->GetSharedTx(); } TxMempoolInfo CTxMemPool::info(const GenTxid& gtxid) const { LOCK(cs); indexed_transaction_set::const_iterator i = (gtxid.IsWtxid() ? get_iter_from_wtxid(gtxid.GetHash()) : mapTx.find(gtxid.GetHash())); if (i == mapTx.end()) return TxMempoolInfo(); return GetInfo(i); } TxMempoolInfo CTxMemPool::info(const uint256& txid) const { return info(GenTxid{false, txid}); } void CTxMemPool::PrioritiseTransaction(const uint256& hash, const CAmount& nFeeDelta) { { LOCK(cs); CAmount &delta = mapDeltas[hash]; delta += nFeeDelta; txiter it = mapTx.find(hash); if (it != mapTx.end()) { mapTx.modify(it, update_fee_delta(delta)); // Now update all ancestors' modified fees with descendants setEntries setAncestors; uint64_t nNoLimit = std::numeric_limits<uint64_t>::max(); std::string dummy; CalculateMemPoolAncestors(*it, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy, false); for (txiter ancestorIt : setAncestors) { mapTx.modify(ancestorIt, update_descendant_state(0, nFeeDelta, 0)); } // Now update all descendants' modified fees with ancestors setEntries setDescendants; CalculateDescendants(it, setDescendants); setDescendants.erase(it); for (txiter descendantIt : setDescendants) { mapTx.modify(descendantIt, update_ancestor_state(0, nFeeDelta, 0, 0)); } ++nTransactionsUpdated; } } LogPrintf("PrioritiseTransaction: %s feerate += %s\n", hash.ToString(), FormatMoney(nFeeDelta)); } void CTxMemPool::ApplyDelta(const uint256& hash, CAmount &nFeeDelta) const { AssertLockHeld(cs); std::map<uint256, CAmount>::const_iterator pos = mapDeltas.find(hash); if (pos == mapDeltas.end()) return; const CAmount &delta = pos->second; nFeeDelta += delta; } void CTxMemPool::ClearPrioritisation(const uint256& hash) { AssertLockHeld(cs); mapDeltas.erase(hash); } bool CTxMemPool::HaveKeyImage(const CCmpPubKey &ki, uint256 &hash) const { LOCK(cs); std::map<CCmpPubKey, uint256>::const_iterator mi; mi = mapKeyImages.find(ki); if (mi != mapKeyImages.end()) { hash = mi->second; return true; }; return false; }; const CTransaction* CTxMemPool::GetConflictTx(const COutPoint& prevout) const { const auto it = mapNextTx.find(prevout); return it == mapNextTx.end() ? nullptr : it->second; } Optional<CTxMemPool::txiter> CTxMemPool::GetIter(const uint256& txid) const { auto it = mapTx.find(txid); if (it != mapTx.end()) return it; return Optional<txiter>{}; } CTxMemPool::setEntries CTxMemPool::GetIterSet(const std::set<uint256>& hashes) const { CTxMemPool::setEntries ret; for (const auto& h : hashes) { const auto mi = GetIter(h); if (mi) ret.insert(*mi); } return ret; } bool CTxMemPool::HasNoInputsOf(const CTransaction &tx) const { for (unsigned int i = 0; i < tx.vin.size(); i++) { if (tx.vin[i].IsAnonInput()) { continue; } if (exists(tx.vin[i].prevout.hash)) return false; } return true; } CCoinsViewMemPool::CCoinsViewMemPool(CCoinsView* baseIn, const CTxMemPool& mempoolIn) : CCoinsViewBacked(baseIn), mempool(mempoolIn) { } bool CCoinsViewMemPool::GetCoin(const COutPoint &outpoint, Coin &coin) const { // If an entry in the mempool exists, always return that one, as it's guaranteed to never // conflict with the underlying cache, and it cannot have pruned entries (as it contains full) // transactions. First checking the underlying cache risks returning a pruned entry instead. CTransactionRef ptx = mempool.get(outpoint.hash); if (ptx) { if (ptx->IsParticlVersion()) { if (outpoint.n < ptx->vpout.size()) { const CTxOutBase *out = ptx->vpout[outpoint.n].get(); const CScript *ps = out->GetPScriptPubKey(); if (!ps) // Data / anon output return false; CTxOut txout(0, *ps); if (out->IsType(OUTPUT_STANDARD)) txout.nValue = out->GetValue(); coin = Coin(txout, MEMPOOL_HEIGHT, false); if (out->IsType(OUTPUT_CT)) { coin.nType = OUTPUT_CT; coin.commitment = ((CTxOutCT*)out)->commitment; }; return true; }; return false; }; if (outpoint.n < ptx->vout.size()) { coin = Coin(ptx->vout[outpoint.n], MEMPOOL_HEIGHT, false); return true; } else { return false; } } return base->GetCoin(outpoint, coin); } size_t CTxMemPool::DynamicMemoryUsage() const { LOCK(cs); // Estimate the overhead of mapTx to be 15 pointers + an allocation, as no exact formula for boost::multi_index_contained is implemented. return memusage::MallocUsage(sizeof(CTxMemPoolEntry) + 15 * sizeof(void*)) * mapTx.size() + memusage::DynamicUsage(mapNextTx) + memusage::DynamicUsage(mapDeltas) + memusage::DynamicUsage(vTxHashes) + cachedInnerUsage; } void CTxMemPool::RemoveUnbroadcastTx(const uint256& txid, const bool unchecked) { LOCK(cs); if (m_unbroadcast_txids.erase(txid)) { LogPrint(BCLog::MEMPOOL, "Removed %i from set of unbroadcast txns%s\n", txid.GetHex(), (unchecked ? " before confirmation that txn was sent out" : "")); } } void CTxMemPool::RemoveStaged(setEntries &stage, bool updateDescendants, MemPoolRemovalReason reason) { AssertLockHeld(cs); UpdateForRemoveFromMempool(stage, updateDescendants); for (txiter it : stage) { removeUnchecked(it, reason); } } int CTxMemPool::Expire(std::chrono::seconds time) { AssertLockHeld(cs); indexed_transaction_set::index<entry_time>::type::iterator it = mapTx.get<entry_time>().begin(); setEntries toremove; while (it != mapTx.get<entry_time>().end() && it->GetTime() < time) { toremove.insert(mapTx.project<0>(it)); it++; } setEntries stage; for (txiter removeit : toremove) { CalculateDescendants(removeit, stage); } RemoveStaged(stage, false, MemPoolRemovalReason::EXPIRY); return stage.size(); } void CTxMemPool::addUnchecked(const CTxMemPoolEntry &entry, bool validFeeEstimate) { setEntries setAncestors; uint64_t nNoLimit = std::numeric_limits<uint64_t>::max(); std::string dummy; CalculateMemPoolAncestors(entry, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy); return addUnchecked(entry, setAncestors, validFeeEstimate); } void CTxMemPool::UpdateChild(txiter entry, txiter child, bool add) { AssertLockHeld(cs); CTxMemPoolEntry::Children s; if (add && entry->GetMemPoolChildren().insert(*child).second) { cachedInnerUsage += memusage::IncrementalDynamicUsage(s); } else if (!add && entry->GetMemPoolChildren().erase(*child)) { cachedInnerUsage -= memusage::IncrementalDynamicUsage(s); } } void CTxMemPool::UpdateParent(txiter entry, txiter parent, bool add) { AssertLockHeld(cs); CTxMemPoolEntry::Parents s; if (add && entry->GetMemPoolParents().insert(*parent).second) { cachedInnerUsage += memusage::IncrementalDynamicUsage(s); } else if (!add && entry->GetMemPoolParents().erase(*parent)) { cachedInnerUsage -= memusage::IncrementalDynamicUsage(s); } } CFeeRate CTxMemPool::GetMinFee(size_t sizelimit) const { LOCK(cs); if (!blockSinceLastRollingFeeBump || rollingMinimumFeeRate == 0) return CFeeRate(llround(rollingMinimumFeeRate)); int64_t time = GetTime(); if (time > lastRollingFeeUpdate + 10) { double halflife = ROLLING_FEE_HALFLIFE; if (DynamicMemoryUsage() < sizelimit / 4) halflife /= 4; else if (DynamicMemoryUsage() < sizelimit / 2) halflife /= 2; rollingMinimumFeeRate = rollingMinimumFeeRate / pow(2.0, (time - lastRollingFeeUpdate) / halflife); lastRollingFeeUpdate = time; if (rollingMinimumFeeRate < (double)incrementalRelayFee.GetFeePerK() / 2) { rollingMinimumFeeRate = 0; return CFeeRate(0); } } return std::max(CFeeRate(llround(rollingMinimumFeeRate)), incrementalRelayFee); } void CTxMemPool::trackPackageRemoved(const CFeeRate& rate) { AssertLockHeld(cs); if (rate.GetFeePerK() > rollingMinimumFeeRate) { rollingMinimumFeeRate = rate.GetFeePerK(); blockSinceLastRollingFeeBump = false; } } void CTxMemPool::TrimToSize(size_t sizelimit, std::vector<COutPoint>* pvNoSpendsRemaining) { AssertLockHeld(cs); unsigned nTxnRemoved = 0; CFeeRate maxFeeRateRemoved(0); while (!mapTx.empty() && DynamicMemoryUsage() > sizelimit) { indexed_transaction_set::index<descendant_score>::type::iterator it = mapTx.get<descendant_score>().begin(); // We set the new mempool min fee to the feerate of the removed set, plus the // "minimum reasonable fee rate" (ie some value under which we consider txn // to have 0 fee). This way, we don't allow txn to enter mempool with feerate // equal to txn which were removed with no block in between. CFeeRate removed(it->GetModFeesWithDescendants(), it->GetSizeWithDescendants()); removed += incrementalRelayFee; trackPackageRemoved(removed); maxFeeRateRemoved = std::max(maxFeeRateRemoved, removed); setEntries stage; CalculateDescendants(mapTx.project<0>(it), stage); nTxnRemoved += stage.size(); std::vector<CTransaction> txn; if (pvNoSpendsRemaining) { txn.reserve(stage.size()); for (txiter iter : stage) txn.push_back(iter->GetTx()); } RemoveStaged(stage, false, MemPoolRemovalReason::SIZELIMIT); if (pvNoSpendsRemaining) { for (const CTransaction& tx : txn) { for (const CTxIn& txin : tx.vin) { if (txin.IsAnonInput()) continue; if (exists(txin.prevout.hash)) continue; pvNoSpendsRemaining->push_back(txin.prevout); } } } } if (maxFeeRateRemoved > CFeeRate(0)) { LogPrint(BCLog::MEMPOOL, "Removed %u txn, rolling minimum fee bumped to %s\n", nTxnRemoved, maxFeeRateRemoved.ToString()); } } uint64_t CTxMemPool::CalculateDescendantMaximum(txiter entry) const { // find parent with highest descendant count std::vector<txiter> candidates; setEntries counted; candidates.push_back(entry); uint64_t maximum = 0; while (candidates.size()) { txiter candidate = candidates.back(); candidates.pop_back(); if (!counted.insert(candidate).second) continue; const CTxMemPoolEntry::Parents& parents = candidate->GetMemPoolParentsConst(); if (parents.size() == 0) { maximum = std::max(maximum, candidate->GetCountWithDescendants()); } else { for (const CTxMemPoolEntry& i : parents) { candidates.push_back(mapTx.iterator_to(i)); } } } return maximum; } void CTxMemPool::GetTransactionAncestry(const uint256& txid, size_t& ancestors, size_t& descendants) const { LOCK(cs); auto it = mapTx.find(txid); ancestors = descendants = 0; if (it != mapTx.end()) { ancestors = it->GetCountWithAncestors(); descendants = CalculateDescendantMaximum(it); } } bool CTxMemPool::IsLoaded() const { LOCK(cs); return m_is_loaded; } void CTxMemPool::SetIsLoaded(bool loaded) { LOCK(cs); m_is_loaded = loaded; } CTxMemPool::EpochGuard CTxMemPool::GetFreshEpoch() const { return EpochGuard(*this); } CTxMemPool::EpochGuard::EpochGuard(const CTxMemPool& in) : pool(in) { assert(!pool.m_has_epoch_guard); ++pool.m_epoch; pool.m_has_epoch_guard = true; } CTxMemPool::EpochGuard::~EpochGuard() { // prevents stale results being used ++pool.m_epoch; pool.m_has_epoch_guard = false; } SaltedTxidHasher::SaltedTxidHasher() : k0(GetRand(std::numeric_limits<uint64_t>::max())), k1(GetRand(std::numeric_limits<uint64_t>::max())) {}
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file ctc_loss.cc * \brief CPU Implementation of CTC Loss op */ #include "./ctc_loss-inl.h" #include "../../../3rdparty/ctc_include/detail/cpu_ctc.h" namespace mshadow { template <typename DType> ctcStatus_t compute_ctc_cost(const Tensor<cpu, 3, DType> activations, DType *costs, DType *grads, int *labels, int *label_lengths, int *data_lengths, void *workspace, bool isTraining, int blank_label) { int minibatch = static_cast<int>(activations.size(1)); int alphabet_size = static_cast<int>(activations.size(2)); mxnet_warpctc::CpuCTC<DType> ctc(alphabet_size, minibatch, workspace, blank_label); if (isTraining) { return ctc.cost_and_grad(activations.dptr_, grads, costs, labels, label_lengths, data_lengths); } else { return ctc.score_forward(activations.dptr_, costs, labels, label_lengths, data_lengths); } } } // namespace mshadow namespace mxnet { namespace op { DMLC_REGISTER_PARAMETER(CTCLossOpParam); NNVM_REGISTER_OP(CTCLoss) .add_alias("ctc_loss") .add_alias("_contrib_CTCLoss") .add_alias("_contrib_ctc_loss") .describe(R"code(Connectionist Temporal Classification Loss. .. note:: The existing alias ``contrib_CTCLoss`` is deprecated. The shapes of the inputs and outputs: - **data**: `(sequence_length, batch_size, alphabet_size)` - **label**: `(batch_size, label_sequence_length)` - **out**: `(batch_size)` The `data` tensor consists of sequences of activation vectors (without applying softmax), with i-th channel in the last dimension corresponding to i-th label for i between 0 and alphabet_size-1 (i.e always 0-indexed). Alphabet size should include one additional value reserved for blank label. When `blank_label` is ``"first"``, the ``0``-th channel is be reserved for activation of blank label, or otherwise if it is "last", ``(alphabet_size-1)``-th channel should be reserved for blank label. ``label`` is an index matrix of integers. When `blank_label` is ``"first"``, the value 0 is then reserved for blank label, and should not be passed in this matrix. Otherwise, when `blank_label` is ``"last"``, the value `(alphabet_size-1)` is reserved for blank label. If a sequence of labels is shorter than *label_sequence_length*, use the special padding value at the end of the sequence to conform it to the correct length. The padding value is `0` when `blank_label` is ``"first"``, and `-1` otherwise. For example, suppose the vocabulary is `[a, b, c]`, and in one batch we have three sequences 'ba', 'cbb', and 'abac'. When `blank_label` is ``"first"``, we can index the labels as `{'a': 1, 'b': 2, 'c': 3}`, and we reserve the 0-th channel for blank label in data tensor. The resulting `label` tensor should be padded to be:: [[2, 1, 0, 0], [3, 2, 2, 0], [1, 2, 1, 3]] When `blank_label` is ``"last"``, we can index the labels as `{'a': 0, 'b': 1, 'c': 2}`, and we reserve the channel index 3 for blank label in data tensor. The resulting `label` tensor should be padded to be:: [[1, 0, -1, -1], [2, 1, 1, -1], [0, 1, 0, 2]] ``out`` is a list of CTC loss values, one per example in the batch. See *Connectionist Temporal Classification: Labelling Unsegmented Sequence Data with Recurrent Neural Networks*, A. Graves *et al*. for more information on the definition and the algorithm. )code" ADD_FILELINE) .set_attr_parser(ParamParser<CTCLossOpParam>) .set_num_inputs(CTCLossOpNumInputs) .set_num_outputs(2) .set_attr<nnvm::FListInputNames>("FListInputNames", CTCLossOpListInputNames) .set_attr<nnvm::FListOutputNames>("FListOutputNAmes", [](const NodeAttrs& attrs) { return std::vector<std::string>{"out", "grad"}; }) .set_attr<nnvm::FNumVisibleOutputs>("FNumVisibleOutputs", [](const NodeAttrs& attrs) { return 1; }) .set_attr<mxnet::FInferShape>("FInferShape", CTCLossOpShape) .set_attr<nnvm::FInferType>("FInferType", CTCLossOpType) .set_attr<FInferStorageType>("FInferStorageType", CTCLossOpStorageType) .set_attr<FResourceRequest>("FResourceRequest", [](const NodeAttrs& attrs) { return std::vector<ResourceRequest>{ResourceRequest::kTempSpace}; }) .set_attr<FCompute>("FCompute<cpu>", CTCLossOpForward<cpu>) .set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseOut{"_backward_ctc_loss"}) .add_argument("data", "NDArray-or-Symbol", "Input ndarray") .add_argument("label", "NDArray-or-Symbol", "Ground-truth labels for the loss.") .add_argument("data_lengths", "NDArray-or-Symbol", "Lengths of data for each of the samples. Only required " "when use_data_lengths is true.") .add_argument("label_lengths", "NDArray-or-Symbol", "Lengths of labels for each of the samples. Only required " "when use_label_lengths is true.") .add_arguments(CTCLossOpParam::__FIELDS__()); NNVM_REGISTER_OP(_backward_ctc_loss) .set_attr_parser(ParamParser<CTCLossOpParam>) .set_num_inputs(1) .set_num_outputs(CTCLossOpNumInputs) .set_attr<nnvm::TIsBackward>("TIsBackward", true) .set_attr<FCompute>("FCompute<cpu>", CTCLossOpBackward<cpu>); } // namespace op } // namespace mxnet
/** @copyright Evgeny Sidorov 2020 Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt) */ /****************************************************************************/ /** @file validator/validate.hpp * * Defines validate() helper. * */ /****************************************************************************/ #ifndef DRACOSHA_VALIDATOR_VALIDATE_HPP #define DRACOSHA_VALIDATOR_VALIDATE_HPP #include <stdexcept> #include <dracosha/validator/error.hpp> #include <dracosha/validator/validators.hpp> #include <dracosha/validator/adapters/default_adapter.hpp> #include <dracosha/validator/adapters/reporting_adapter.hpp> #include <dracosha/validator/adapters/prevalidation_adapter.hpp> DRACOSHA_VALIDATOR_NAMESPACE_BEGIN /** * @brief Implementation of a helper to invoke validate() as a single callable. */ struct validate_t { /** * @brief Validate object with validator and put result to the last argument. * @brief obj Object to validate. * @brief validator Validator. * @brief err Error to put validation result to. */ template <typename ObjectT, typename ValidatorT> void operator() ( ObjectT&& obj, ValidatorT&& validator, error& err ) const { err.set_value(validator.apply(std::forward<ObjectT>(obj))); } /** * @brief Validate object with validator and put validation result with error description to the last argument. * @brief obj Object to validate. * @brief validator Validator. * @brief err Error to put validation result to. */ template <typename ObjectT, typename ValidatorT> void operator() ( ObjectT&& obj, ValidatorT&& validator, error_report& err ) const { err.reset(); err.set_value(validator.apply( make_reporting_adapter( std::forward<ObjectT>(obj), err._message ) )); } /** * @brief Validate object with validator and throw validation_error if operation fails. * @brief obj Object to validate. * @brief validator Validator. * * @throws validation_error if validation fails. */ template <typename ObjectT, typename ValidatorT> void operator() ( ObjectT&& obj, ValidatorT&& validator ) const { error_report err; (*this)(std::forward<ObjectT>(obj),std::forward<ValidatorT>(validator),err); if (err) { throw validation_error(err); } } /** * @brief Pre-validate object's member with validator and put validation result with error description to the last argument. * @brief member Path of the member to validate. * @brief obj Object to validate. * @brief validator Validator. */ template <typename MemberT, typename ValueT, typename ValidatorT> void operator() ( MemberT&& member, ValueT&& val, ValidatorT&& validator, error_report& err ) const { err.reset(); err.set_value(validator.apply( make_prevalidation_adapter( std::forward<MemberT>(member), std::forward<ValueT>(val), err._message ) )); } /** * @brief Pre-validate object's member with validator and throw validation_error if operation fails. * @brief member Path of the member to validate. * @brief obj Object to validate. * @brief validator Validator. * * @throws validation_error if validation fails. */ template <typename MemberT, typename ValueT, typename ValidatorT> void operator() ( MemberT&& member, ValueT&& val, ValidatorT&& validator ) const { error_report err; (*this)(std::forward<MemberT>(member),std::forward<ValueT>(val),std::forward<ValidatorT>(validator),err); if (err) { throw validation_error(err); } } }; /** * @brief Helper to invoke validate() as a single callable. */ constexpr validate_t validate{}; DRACOSHA_VALIDATOR_NAMESPACE_END #endif // DRACOSHA_VALIDATOR_VALIDATE_HPP
#include "gpuerrorlog.hpp" #include <mutex> #include <signal.h> #include <windowregistry.hpp> REGISTER_WINDOW_CLASS(GpuErrorLog, Menu::Tools, "gpu-error-log", "OpenGL Log") struct logentry { GLenum source; GLenum type; GLuint id; GLenum severity; std::string message; }; static std::mutex mutex; static std::vector<logentry> entries; void APIENTRY GpuErrorLog::LogMessage(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam) { (void)userParam; std::lock_guard<std::mutex> lock(mutex); logentry log; log.source = source; log.type = type; log.id = id; log.severity = severity; log.message = std::string(message, length); entries.insert(entries.begin(), log); if(entries.size() > 1000) entries.pop_back(); fprintf(stderr, "[GL] %s\n", log.message.c_str()); /* if(breakOnNextError && severity == GL_DEBUG_SEVERITY_HIGH) { #ifdef _MSC_VER #define DEBUG_BREAK __debugbreak() #else raise(SIGTRAP); #endif breakOnNextError = false; } */ } GpuErrorLog::GpuErrorLog() : Window("OpenGL Error Log") { } GpuErrorLog::~GpuErrorLog() { } void GpuErrorLog::OnUpdate() { bool copyToClipboard = false; bool scrollToEnd = false; bool logEnabled = glIsEnabled(GL_DEBUG_OUTPUT); if(ImGui::BeginMenuBar()) { if(ImGui::BeginMenu("Log")) { // if(ImGui::MenuItem("Copy to clipboard")) copyToClipboard = true; // if(ImGui::MenuItem("Scroll to end")) scrollToEnd = true; ImGui::MenuItem("Enabled", nullptr, &logEnabled); ImGui::Separator(); if(ImGui::MenuItem("Close")) this->Close(); ImGui::EndMenu(); } ImGui::EndMenuBar(); } if(logEnabled != glIsEnabled(GL_DEBUG_OUTPUT)) { if(logEnabled) glEnable(GL_DEBUG_OUTPUT); else glDisable(GL_DEBUG_OUTPUT); } ImGui::BeginChild("ScrollingRegion", ImVec2(0,0), false, ImGuiWindowFlags_HorizontalScrollbar); // Display every line as a separate entry so we can change their color or add custom widgets. If you only want raw text you can use ImGui::TextUnformatted(log.begin(), log.end()); // NB- if you have thousands of entries this approach may be too inefficient and may require user-side clipping to only process visible items. // You can seek and display only the lines that are visible using the ImGuiListClipper helper, if your elements are evenly spaced and you have cheap random access to the elements. // To use the clipper we could replace the 'for (int i = 0; i < Items.Size; i++)' loop with: // ImGuiListClipper clipper(Items.Size); // while (clipper.Step()) // for (int i = clipper.DisplayStart; i < clipper.DisplayEnd; i++) // However take note that you can not use this code as is if a filter is active because it breaks the 'cheap random-access' property. We would need random-access on the post-filtered list. // A typical application wanting coarse clipping and filtering may want to pre-compute an array of indices that passed the filtering test, recomputing this array when user changes the filter, // and appending newly elements as they are inserted. This is left as a task to the user until we can manage to improve this example code! // If your items are of variable size you may want to implement code similar to what ImGuiListClipper does. Or split your data into fixed height items to allow random-seeking into your list. ImGui::PushStyleVar(ImGuiStyleVar_ItemSpacing, ImVec2(4,1)); // Tighten spacing if (copyToClipboard) ImGui::LogToClipboard(); std::lock_guard<std::mutex> lock(mutex); for(auto const & log : entries) { ImVec4 col = ImVec4(1.0f,1.0f,1.0f,1.0f); // A better implementation may store a type per-item. For the sample let's just parse the text. switch(log.severity) { case GL_DEBUG_SEVERITY_HIGH: col = ImColor(1.0f,0.4f,0.4f,1.0f); break; case GL_DEBUG_SEVERITY_MEDIUM: col = ImColor(0.8f,1.0f,0.0f,1.0f); break; case GL_DEBUG_SEVERITY_LOW: col = ImColor(0.4f,1.0f,0.0f,1.0f); break; case GL_DEBUG_SEVERITY_NOTIFICATION: col = ImColor(0.8f,0.8f,0.8f,1.0f); break; } // else if (strncmp(item, "# ", 2) == 0) col = ImColor(1.0f,0.78f,0.58f,1.0f); ImGui::PushStyleColor(ImGuiCol_Text, col); ImGui::TextWrapped("%s", log.message.c_str()); ImGui::PopStyleColor(); } if (copyToClipboard) ImGui::LogFinish(); if (scrollToEnd) ImGui::SetScrollHereY(); ImGui::PopStyleVar(); ImGui::EndChild(); }
#pragma once #include <string> #include <memory> #define GLFW_INCLUDE_GLU #include <GL/glew.h> #include <GLFW/glfw3.h> #include <glm/glm.hpp> #include <glm/gtc/matrix_transform.hpp> #include <glm/gtc/noise.hpp> #include "MACGrid.hpp" #include "Camera.hpp" class Volume { public: Volume(std::shared_ptr<MACGrid> grids); ~Volume(); void initialize(); void update(); void draw() const; GLuint getProgramID() const; GLuint getVaoID() const; GLuint getCamPosID() const; GLuint getLightPosID() const; GLuint getLightIntensityID() const; GLuint getMatrixID() const; private: void initVAO(); void initShaders(); std::string ReadFile(const std::string &filename); std::shared_ptr<MACGrid> m_grids; // ID GLuint programID; GLuint vaoID; GLuint vboID; GLuint indexID; GLuint volumeTexID; GLuint cameraPosID; GLuint LightPosID; GLuint LightIntensityID; GLuint MatrixID; GLuint absorptionID; GLuint numID; GLuint ratioID; };
// Copyright (C) 2011 Davis E. King (davis@dlib.net) // License: Boost Software License See LICENSE.txt for the full license. #include "metadata_editor.h" #include <dlib/array.h> #include <dlib/queue.h> #include <dlib/static_set.h> #include <dlib/misc_api.h> #include <dlib/image_io.h> #include <dlib/array2d.h> #include <dlib/pixel.h> #include <dlib/image_transforms.h> #include <dlib/image_processing.h> #include <sstream> #include <ctime> using namespace std; using namespace dlib; extern const char* VERSION; // ---------------------------------------------------------------------------------------- metadata_editor:: metadata_editor( const std::string& filename_ ) : mbar(*this), lb_images(*this), image_pos(0), display(*this), overlay_label_name(*this), overlay_label(*this), keyboard_jump_pos(0), last_keyboard_jump_pos_update(0) { file metadata_file(filename_); filename = metadata_file.full_name(); // Make our current directory be the one that contains the metadata file. We // do this because that file might contain relative paths to the image files // we are supposed to be loading. set_current_dir(get_parent_directory(metadata_file).full_name()); load_image_dataset_metadata(metadata, filename); dlib::array<std::string>::expand_1a files; files.resize(metadata.images.size()); for (unsigned long i = 0; i < metadata.images.size(); ++i) { files[i] = metadata.images[i].filename; } lb_images.load(files); lb_images.enable_multiple_select(); lb_images.set_click_handler(*this, &metadata_editor::on_lb_images_clicked); overlay_label_name.set_text("Next Label: "); overlay_label.set_width(200); display.set_image_clicked_handler(*this, &metadata_editor::on_image_clicked); display.set_overlay_rects_changed_handler(*this, &metadata_editor::on_overlay_rects_changed); display.set_overlay_rect_selected_handler(*this, &metadata_editor::on_overlay_rect_selected); overlay_label.set_text_modified_handler(*this, &metadata_editor::on_overlay_label_changed); mbar.set_number_of_menus(2); mbar.set_menu_name(0,"File",'F'); mbar.set_menu_name(1,"Help",'H'); mbar.menu(0).add_menu_item(menu_item_text("Save",*this,&metadata_editor::file_save,'S')); mbar.menu(0).add_menu_item(menu_item_text("Save As",*this,&metadata_editor::file_save_as,'A')); mbar.menu(0).add_menu_item(menu_item_separator()); mbar.menu(0).add_menu_item(menu_item_text("Remove Selected Images",*this,&metadata_editor::remove_selected_images,'R')); mbar.menu(0).add_menu_item(menu_item_separator()); mbar.menu(0).add_menu_item(menu_item_text("Exit",static_cast<base_window&>(*this),&drawable_window::close_window,'x')); mbar.menu(1).add_menu_item(menu_item_text("About",*this,&metadata_editor::display_about,'A')); // set the size of this window. on_window_resized(); load_image_and_set_size(0); on_window_resized(); if (image_pos < lb_images.size() ) lb_images.select(image_pos); // make sure the window is centered on the screen. unsigned long width, height; get_size(width, height); unsigned long screen_width, screen_height; get_display_size(screen_width, screen_height); set_pos((screen_width-width)/2, (screen_height-height)/2); show(); } // ---------------------------------------------------------------------------------------- metadata_editor:: ~metadata_editor( ) { close_window(); } // ---------------------------------------------------------------------------------------- void metadata_editor:: add_labelable_part_name ( const std::string& name ) { display.add_labelable_part_name(name); } // ---------------------------------------------------------------------------------------- void metadata_editor:: file_save() { save_metadata_to_file(filename); } // ---------------------------------------------------------------------------------------- void metadata_editor:: save_metadata_to_file ( const std::string& file ) { try { save_image_dataset_metadata(metadata, file); } catch (dlib::error& e) { message_box("Error saving file", e.what()); } } // ---------------------------------------------------------------------------------------- void metadata_editor:: file_save_as() { save_file_box(*this, &metadata_editor::save_metadata_to_file); } // ---------------------------------------------------------------------------------------- void metadata_editor:: remove_selected_images() { dlib::queue<unsigned long>::kernel_1a list; lb_images.get_selected(list); list.reset(); unsigned long min_idx = lb_images.size(); while (list.move_next()) { lb_images.unselect(list.element()); min_idx = std::min(min_idx, list.element()); } // remove all the selected items from metadata.images dlib::static_set<unsigned long>::kernel_1a to_remove; to_remove.load(list); std::vector<dlib::image_dataset_metadata::image> images; for (unsigned long i = 0; i < metadata.images.size(); ++i) { if (to_remove.is_member(i) == false) { images.push_back(metadata.images[i]); } } images.swap(metadata.images); // reload metadata into lb_images dlib::array<std::string>::expand_1a files; files.resize(metadata.images.size()); for (unsigned long i = 0; i < metadata.images.size(); ++i) { files[i] = metadata.images[i].filename; } lb_images.load(files); if (min_idx != 0) min_idx--; select_image(min_idx); } // ---------------------------------------------------------------------------------------- void metadata_editor:: on_window_resized( ) { drawable_window::on_window_resized(); unsigned long width, height; get_size(width, height); lb_images.set_pos(0,mbar.bottom()+1); lb_images.set_size(180, height - mbar.height()); overlay_label_name.set_pos(lb_images.right()+10, mbar.bottom() + (overlay_label.height()-overlay_label_name.height())/2+1); overlay_label.set_pos(overlay_label_name.right(), mbar.bottom()+1); display.set_pos(lb_images.right(), overlay_label.bottom()+3); display.set_size(width - display.left(), height - display.top()); } // ---------------------------------------------------------------------------------------- void propagate_boxes( dlib::image_dataset_metadata::dataset& data, unsigned long prev, unsigned long next ) { if (prev == next || next >= data.images.size()) return; array2d<rgb_pixel> img1, img2; dlib::load_image(img1, data.images[prev].filename); dlib::load_image(img2, data.images[next].filename); for (unsigned long i = 0; i < data.images[prev].boxes.size(); ++i) { correlation_tracker tracker; tracker.start_track(img1, data.images[prev].boxes[i].rect); tracker.update(img2); dlib::image_dataset_metadata::box box = data.images[prev].boxes[i]; box.rect = tracker.get_position(); data.images[next].boxes.push_back(box); } } // ---------------------------------------------------------------------------------------- void propagate_labels( const std::string& label, dlib::image_dataset_metadata::dataset& data, unsigned long prev, unsigned long next ) { if (prev == next || next >= data.images.size()) return; for (unsigned long i = 0; i < data.images[prev].boxes.size(); ++i) { if (data.images[prev].boxes[i].label != label) continue; // figure out which box in the next image matches the current one the best const rectangle cur = data.images[prev].boxes[i].rect; double best_overlap = 0; unsigned long best_idx = 0; for (unsigned long j = 0; j < data.images[next].boxes.size(); ++j) { const rectangle next_box = data.images[next].boxes[j].rect; const double overlap = cur.intersect(next_box).area()/(double)(cur+next_box).area(); if (overlap > best_overlap) { best_overlap = overlap; best_idx = j; } } // If we found a matching rectangle in the next image and the best match doesn't // already have a label. if (best_overlap > 0.5 && data.images[next].boxes[best_idx].label == "") { data.images[next].boxes[best_idx].label = label; } } } // ---------------------------------------------------------------------------------------- bool has_label_or_all_boxes_labeled ( const std::string& label, const dlib::image_dataset_metadata::image& img ) { if (label.size() == 0) return true; bool all_boxes_labeled = true; for (unsigned long i = 0; i < img.boxes.size(); ++i) { if (img.boxes[i].label == label) return true; if (img.boxes[i].label.size() == 0) all_boxes_labeled = false; } return all_boxes_labeled; } // ---------------------------------------------------------------------------------------- void metadata_editor:: on_keydown ( unsigned long key, bool is_printable, unsigned long state ) { drawable_window::on_keydown(key, is_printable, state); if (is_printable) { if (key == '\t') { overlay_label.give_input_focus(); overlay_label.select_all_text(); } // If the user types a number then jump to that image. if ('0' <= key && key <= '9' && metadata.images.size() != 0 && !overlay_label.has_input_focus()) { time_t curtime = time(0); // If it's been a while since the user typed numbers then forget the last jump // position and start accumulating numbers over again. if (curtime-last_keyboard_jump_pos_update >= 2) keyboard_jump_pos = 0; last_keyboard_jump_pos_update = curtime; keyboard_jump_pos *= 10; keyboard_jump_pos += key-'0'; if (keyboard_jump_pos >= metadata.images.size()) keyboard_jump_pos = metadata.images.size()-1; image_pos = keyboard_jump_pos; select_image(image_pos); } else { last_keyboard_jump_pos_update = 0; } if (key == 'd' && (state&base_window::KBD_MOD_ALT)) { remove_selected_images(); } if (key == 'e' && !overlay_label.has_input_focus()) { display_equialized_image = !display_equialized_image; select_image(image_pos); } return; } if (key == base_window::KEY_UP) { if ((state&KBD_MOD_CONTROL) && (state&KBD_MOD_SHIFT)) { // Don't do anything if there are no boxes in the current image. if (metadata.images[image_pos].boxes.size() == 0) return; // Also don't do anything if there *are* boxes in the next image. if (image_pos > 1 && metadata.images[image_pos-1].boxes.size() != 0) return; propagate_boxes(metadata, image_pos, image_pos-1); } else if (state&base_window::KBD_MOD_CONTROL) { // If the label we are supposed to propagate doesn't exist in the current image // then don't advance. if (!has_label_or_all_boxes_labeled(display.get_default_overlay_rect_label(),metadata.images[image_pos])) return; // if the next image is going to be empty then fast forward to the next one while (image_pos > 1 && metadata.images[image_pos-1].boxes.size() == 0) --image_pos; propagate_labels(display.get_default_overlay_rect_label(), metadata, image_pos, image_pos-1); } select_image(image_pos-1); } else if (key == base_window::KEY_DOWN) { if ((state&KBD_MOD_CONTROL) && (state&KBD_MOD_SHIFT)) { // Don't do anything if there are no boxes in the current image. if (metadata.images[image_pos].boxes.size() == 0) return; // Also don't do anything if there *are* boxes in the next image. if (image_pos+1 < metadata.images.size() && metadata.images[image_pos+1].boxes.size() != 0) return; propagate_boxes(metadata, image_pos, image_pos+1); } else if (state&base_window::KBD_MOD_CONTROL) { // If the label we are supposed to propagate doesn't exist in the current image // then don't advance. if (!has_label_or_all_boxes_labeled(display.get_default_overlay_rect_label(),metadata.images[image_pos])) return; // if the next image is going to be empty then fast forward to the next one while (image_pos+1 < metadata.images.size() && metadata.images[image_pos+1].boxes.size() == 0) ++image_pos; propagate_labels(display.get_default_overlay_rect_label(), metadata, image_pos, image_pos+1); } select_image(image_pos+1); } } // ---------------------------------------------------------------------------------------- void metadata_editor:: select_image( unsigned long idx ) { if (idx < lb_images.size()) { // unselect all currently selected images dlib::queue<unsigned long>::kernel_1a list; lb_images.get_selected(list); list.reset(); while (list.move_next()) { lb_images.unselect(list.element()); } lb_images.select(idx); load_image(idx); } else if (lb_images.size() == 0) { display.clear_overlay(); array2d<unsigned char> empty_img; display.set_image(empty_img); } } // ---------------------------------------------------------------------------------------- void metadata_editor:: on_lb_images_clicked( unsigned long idx ) { load_image(idx); } // ---------------------------------------------------------------------------------------- std::vector<dlib::image_display::overlay_rect> get_overlays ( const dlib::image_dataset_metadata::image& data, color_mapper& string_to_color ) { std::vector<dlib::image_display::overlay_rect> temp(data.boxes.size()); for (unsigned long i = 0; i < temp.size(); ++i) { temp[i].rect = data.boxes[i].rect; temp[i].label = data.boxes[i].label; temp[i].parts = data.boxes[i].parts; temp[i].crossed_out = data.boxes[i].ignore; temp[i].color = string_to_color(data.boxes[i].label); } return temp; } // ---------------------------------------------------------------------------------------- void metadata_editor:: load_image( unsigned long idx ) { if (idx >= metadata.images.size()) return; image_pos = idx; array2d<rgb_pixel> img; display.clear_overlay(); try { dlib::load_image(img, metadata.images[idx].filename); set_title(metadata.name + " #"+cast_to_string(idx)+": " +metadata.images[idx].filename); } catch (exception& e) { message_box("Error loading image", e.what()); } if (display_equialized_image) equalize_histogram(img); display.set_image(img); display.add_overlay(get_overlays(metadata.images[idx], string_to_color)); } // ---------------------------------------------------------------------------------------- void metadata_editor:: load_image_and_set_size( unsigned long idx ) { if (idx >= metadata.images.size()) return; image_pos = idx; array2d<rgb_pixel> img; display.clear_overlay(); try { dlib::load_image(img, metadata.images[idx].filename); set_title(metadata.name + " #"+cast_to_string(idx)+": " +metadata.images[idx].filename); } catch (exception& e) { message_box("Error loading image", e.what()); } unsigned long screen_width, screen_height; get_display_size(screen_width, screen_height); unsigned long needed_width = display.left() + img.nc() + 4; unsigned long needed_height = display.top() + img.nr() + 4; if (needed_width < 300) needed_width = 300; if (needed_height < 300) needed_height = 300; if (needed_width > 100 + screen_width) needed_width = screen_width - 100; if (needed_height > 100 + screen_height) needed_height = screen_height - 100; set_size(needed_width, needed_height); if (display_equialized_image) equalize_histogram(img); display.set_image(img); display.add_overlay(get_overlays(metadata.images[idx], string_to_color)); } // ---------------------------------------------------------------------------------------- void metadata_editor:: on_overlay_rects_changed( ) { using namespace dlib::image_dataset_metadata; if (image_pos < metadata.images.size()) { const std::vector<image_display::overlay_rect>& rects = display.get_overlay_rects(); std::vector<box>& boxes = metadata.images[image_pos].boxes; boxes.clear(); for (unsigned long i = 0; i < rects.size(); ++i) { box temp; temp.label = rects[i].label; temp.rect = rects[i].rect; temp.parts = rects[i].parts; temp.ignore = rects[i].crossed_out; boxes.push_back(temp); } } } // ---------------------------------------------------------------------------------------- void metadata_editor:: on_image_clicked( const point& /*p*/, bool /*is_double_click*/, unsigned long /*btn*/ ) { display.set_default_overlay_rect_color(string_to_color(trim(overlay_label.text()))); } // ---------------------------------------------------------------------------------------- void metadata_editor:: on_overlay_label_changed( ) { display.set_default_overlay_rect_label(trim(overlay_label.text())); } // ---------------------------------------------------------------------------------------- void metadata_editor:: on_overlay_rect_selected( const image_display::overlay_rect& orect ) { overlay_label.set_text(orect.label); display.set_default_overlay_rect_label(orect.label); display.set_default_overlay_rect_color(string_to_color(orect.label)); } // ---------------------------------------------------------------------------------------- void metadata_editor:: display_about( ) { std::ostringstream sout; sout << wrap_string("Image Labeler v" + string(VERSION) + "." ,0,0) << endl << endl; sout << wrap_string("This program is a tool for labeling images with rectangles. " ,0,0) << endl << endl; sout << wrap_string("You can add a new rectangle by holding the shift key, left clicking " "the mouse, and dragging it. New rectangles are given the label from the \"Next Label\" " "field at the top of the application. You can quickly edit the contents of the Next Label field " "by hitting the tab key. Double clicking " "a rectangle selects it and the delete key removes it. You can also mark " "a rectangle as ignored by hitting the i or END keys when it is selected. Ignored " "rectangles are visually displayed with an X through them. You can remove an image " "entirely by selecting it in the list on the left and pressing alt+d." ,0,0) << endl << endl; sout << wrap_string("It is also possible to label object parts by selecting a rectangle and " "then right clicking. A popup menu will appear and you can select a part label. " "Note that you must define the allowable part labels by giving --parts on the " "command line. An example would be '--parts \"leye reye nose mouth\"'." ,0,0) << endl << endl; sout << wrap_string("Additionally, you can hold ctrl and then scroll the mouse wheel to zoom. A normal left click " "and drag allows you to navigate around the image. Holding ctrl and " "left clicking a rectangle will give it the label from the Next Label field. " "Holding shift + right click and then dragging allows you to move things around. " "Holding ctrl and pressing the up or down keyboard keys will propagate " "rectangle labels from one image to the next and also skip empty images. " "Similarly, holding ctrl+shift will propagate entire boxes via a visual tracking " "algorithm from one image to the next. " "Finally, typing a number on the keyboard will jump you to a specific image.",0,0) << endl << endl; sout << wrap_string("You can also toggle image histogram equalization by pressing the e key." ,0,0) << endl; message_box("About Image Labeler",sout.str()); } // ----------------------------------------------------------------------------------------
// Copyright (c) 2011-2016 The Cryptonote developers // Copyright (c) 2014-2017 XDN-project developers // Distributed under the MIT/X11 software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. static void #if defined(AESNI) cn_slow_hash_aesni #else cn_slow_hash_noaesni #endif (void *restrict context, const void *restrict data, size_t length, void *restrict hash) { #define ctx ((struct cn_ctx *) context) ALIGNED_DECL(uint8_t ExpandedKey[256], 16); size_t i; __m128i *longoutput, *expkey, *xmminput, b_x; ALIGNED_DECL(uint64_t a[2], 16); hash_process(&ctx->state.hs, (const uint8_t*) data, length); memcpy(ctx->text, ctx->state.init, INIT_SIZE_BYTE); #if defined(AESNI) memcpy(ExpandedKey, ctx->state.hs.b, AES_KEY_SIZE); ExpandAESKey256(ExpandedKey); #else ctx->aes_ctx = oaes_alloc(); oaes_key_import_data(ctx->aes_ctx, ctx->state.hs.b, AES_KEY_SIZE); memcpy(ExpandedKey, ctx->aes_ctx->key->exp_data, ctx->aes_ctx->key->exp_data_len); #endif longoutput = (__m128i *) ctx->long_state; expkey = (__m128i *) ExpandedKey; xmminput = (__m128i *) ctx->text; //for (i = 0; likely(i < MEMORY); i += INIT_SIZE_BYTE) // aesni_parallel_noxor(&ctx->long_state[i], ctx->text, ExpandedKey); for (i = 0; likely(i < MEMORY); i += INIT_SIZE_BYTE) { #if defined(AESNI) for(size_t j = 0; j < 10; j++) { xmminput[0] = _mm_aesenc_si128(xmminput[0], expkey[j]); xmminput[1] = _mm_aesenc_si128(xmminput[1], expkey[j]); xmminput[2] = _mm_aesenc_si128(xmminput[2], expkey[j]); xmminput[3] = _mm_aesenc_si128(xmminput[3], expkey[j]); xmminput[4] = _mm_aesenc_si128(xmminput[4], expkey[j]); xmminput[5] = _mm_aesenc_si128(xmminput[5], expkey[j]); xmminput[6] = _mm_aesenc_si128(xmminput[6], expkey[j]); xmminput[7] = _mm_aesenc_si128(xmminput[7], expkey[j]); } #else aesb_pseudo_round((uint8_t *) &xmminput[0], (uint8_t *) &xmminput[0], (uint8_t *) expkey); aesb_pseudo_round((uint8_t *) &xmminput[1], (uint8_t *) &xmminput[1], (uint8_t *) expkey); aesb_pseudo_round((uint8_t *) &xmminput[2], (uint8_t *) &xmminput[2], (uint8_t *) expkey); aesb_pseudo_round((uint8_t *) &xmminput[3], (uint8_t *) &xmminput[3], (uint8_t *) expkey); aesb_pseudo_round((uint8_t *) &xmminput[4], (uint8_t *) &xmminput[4], (uint8_t *) expkey); aesb_pseudo_round((uint8_t *) &xmminput[5], (uint8_t *) &xmminput[5], (uint8_t *) expkey); aesb_pseudo_round((uint8_t *) &xmminput[6], (uint8_t *) &xmminput[6], (uint8_t *) expkey); aesb_pseudo_round((uint8_t *) &xmminput[7], (uint8_t *) &xmminput[7], (uint8_t *) expkey); #endif _mm_store_si128(&(longoutput[(i >> 4)]), xmminput[0]); _mm_store_si128(&(longoutput[(i >> 4) + 1]), xmminput[1]); _mm_store_si128(&(longoutput[(i >> 4) + 2]), xmminput[2]); _mm_store_si128(&(longoutput[(i >> 4) + 3]), xmminput[3]); _mm_store_si128(&(longoutput[(i >> 4) + 4]), xmminput[4]); _mm_store_si128(&(longoutput[(i >> 4) + 5]), xmminput[5]); _mm_store_si128(&(longoutput[(i >> 4) + 6]), xmminput[6]); _mm_store_si128(&(longoutput[(i >> 4) + 7]), xmminput[7]); } for (i = 0; i < 2; i++) { ctx->a[i] = ((uint64_t *)ctx->state.k)[i] ^ ((uint64_t *)ctx->state.k)[i+4]; ctx->b[i] = ((uint64_t *)ctx->state.k)[i+2] ^ ((uint64_t *)ctx->state.k)[i+6]; } b_x = _mm_load_si128((__m128i *)ctx->b); a[0] = ctx->a[0]; a[1] = ctx->a[1]; for(i = 0; likely(i < 0x80000); i++) { __m128i c_x = _mm_load_si128((__m128i *)&ctx->long_state[a[0] & 0x1FFFF0]); __m128i a_x = _mm_load_si128((__m128i *)a); ALIGNED_DECL(uint64_t c[2], 16); ALIGNED_DECL(uint64_t b[2], 16); uint64_t *nextblock, *dst; #if defined(AESNI) c_x = _mm_aesenc_si128(c_x, a_x); #else aesb_single_round((uint8_t *) &c_x, (uint8_t *) &c_x, (uint8_t *) &a_x); #endif _mm_store_si128((__m128i *)c, c_x); //__builtin_prefetch(&ctx->long_state[c[0] & 0x1FFFF0], 0, 1); b_x = _mm_xor_si128(b_x, c_x); _mm_store_si128((__m128i *)&ctx->long_state[a[0] & 0x1FFFF0], b_x); nextblock = (uint64_t *)&ctx->long_state[c[0] & 0x1FFFF0]; b[0] = nextblock[0]; b[1] = nextblock[1]; { uint64_t hi, lo; // hi,lo = 64bit x 64bit multiply of c[0] and b[0] #if defined(__GNUC__) && defined(__x86_64__) __asm__("mulq %3\n\t" : "=d" (hi), "=a" (lo) : "%a" (c[0]), "rm" (b[0]) : "cc" ); #else lo = mul128(c[0], b[0], &hi); #endif a[0] += hi; a[1] += lo; } dst = (uint64_t *) &ctx->long_state[c[0] & 0x1FFFF0]; dst[0] = a[0]; dst[1] = a[1]; a[0] ^= b[0]; a[1] ^= b[1]; b_x = c_x; //__builtin_prefetch(&ctx->long_state[a[0] & 0x1FFFF0], 0, 3); } memcpy(ctx->text, ctx->state.init, INIT_SIZE_BYTE); #if defined(AESNI) memcpy(ExpandedKey, &ctx->state.hs.b[32], AES_KEY_SIZE); ExpandAESKey256(ExpandedKey); #else oaes_key_import_data(ctx->aes_ctx, &ctx->state.hs.b[32], AES_KEY_SIZE); memcpy(ExpandedKey, ctx->aes_ctx->key->exp_data, ctx->aes_ctx->key->exp_data_len); #endif //for (i = 0; likely(i < MEMORY); i += INIT_SIZE_BYTE) // aesni_parallel_xor(&ctx->text, ExpandedKey, &ctx->long_state[i]); for (i = 0; likely(i < MEMORY); i += INIT_SIZE_BYTE) { xmminput[0] = _mm_xor_si128(longoutput[(i >> 4)], xmminput[0]); xmminput[1] = _mm_xor_si128(longoutput[(i >> 4) + 1], xmminput[1]); xmminput[2] = _mm_xor_si128(longoutput[(i >> 4) + 2], xmminput[2]); xmminput[3] = _mm_xor_si128(longoutput[(i >> 4) + 3], xmminput[3]); xmminput[4] = _mm_xor_si128(longoutput[(i >> 4) + 4], xmminput[4]); xmminput[5] = _mm_xor_si128(longoutput[(i >> 4) + 5], xmminput[5]); xmminput[6] = _mm_xor_si128(longoutput[(i >> 4) + 6], xmminput[6]); xmminput[7] = _mm_xor_si128(longoutput[(i >> 4) + 7], xmminput[7]); #if defined(AESNI) for(size_t j = 0; j < 10; j++) { xmminput[0] = _mm_aesenc_si128(xmminput[0], expkey[j]); xmminput[1] = _mm_aesenc_si128(xmminput[1], expkey[j]); xmminput[2] = _mm_aesenc_si128(xmminput[2], expkey[j]); xmminput[3] = _mm_aesenc_si128(xmminput[3], expkey[j]); xmminput[4] = _mm_aesenc_si128(xmminput[4], expkey[j]); xmminput[5] = _mm_aesenc_si128(xmminput[5], expkey[j]); xmminput[6] = _mm_aesenc_si128(xmminput[6], expkey[j]); xmminput[7] = _mm_aesenc_si128(xmminput[7], expkey[j]); } #else aesb_pseudo_round((uint8_t *) &xmminput[0], (uint8_t *) &xmminput[0], (uint8_t *) expkey); aesb_pseudo_round((uint8_t *) &xmminput[1], (uint8_t *) &xmminput[1], (uint8_t *) expkey); aesb_pseudo_round((uint8_t *) &xmminput[2], (uint8_t *) &xmminput[2], (uint8_t *) expkey); aesb_pseudo_round((uint8_t *) &xmminput[3], (uint8_t *) &xmminput[3], (uint8_t *) expkey); aesb_pseudo_round((uint8_t *) &xmminput[4], (uint8_t *) &xmminput[4], (uint8_t *) expkey); aesb_pseudo_round((uint8_t *) &xmminput[5], (uint8_t *) &xmminput[5], (uint8_t *) expkey); aesb_pseudo_round((uint8_t *) &xmminput[6], (uint8_t *) &xmminput[6], (uint8_t *) expkey); aesb_pseudo_round((uint8_t *) &xmminput[7], (uint8_t *) &xmminput[7], (uint8_t *) expkey); #endif } #if !defined(AESNI) oaes_free((OAES_CTX **) &ctx->aes_ctx); #endif memcpy(ctx->state.init, ctx->text, INIT_SIZE_BYTE); hash_permutation(&ctx->state.hs); extra_hashes[ctx->state.hs.b[0] & 3](&ctx->state, 200, hash); }
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <GLES2/gl2.h> #include <GLES2/gl2ext.h> #include <GLES2/gl2extchromium.h> #include <GLES3/gl3.h> #include "base/strings/string_split.h" #include "base/strings/string_util.h" #include "gpu/command_buffer/tests/gl_manager.h" #include "gpu/command_buffer/tests/gl_test_utils.h" #include "testing/gmock/include/gmock/gmock.h" #include "testing/gtest/include/gtest/gtest.h" #include "ui/gl/gl_switches.h" #define SHADER_VERSION_300(Src) "#version 300 es\n" #Src namespace gpu { class OpenGLES3FunctionTest : public testing::Test { protected: void SetUp() override { GLManager::Options options; options.context_type = CONTEXT_TYPE_OPENGLES3; gl_.Initialize(options); } void TearDown() override { gl_.Destroy(); } bool IsApplicable() const { return gl_.IsInitialized(); } GLManager gl_; }; #if defined(OS_ANDROID) // Test is failing for Lollipop 64 bit Tester. // See crbug/550292. #define MAYBE_GetFragDataLocationInvalid DISABLED_GetFragDataLocationInvalid #else #define MAYBE_GetFragDataLocationInvalid GetFragDataLocationInvalid #endif TEST_F(OpenGLES3FunctionTest, MAYBE_GetFragDataLocationInvalid) { if (!IsApplicable()) { return; } // clang-format off static const char* kVertexShader = SHADER_VERSION_300( in vec4 position; void main() { gl_Position = position; }); static const char* kFragColorShader = SHADER_VERSION_300( precision mediump float; uniform vec4 src; out vec4 FragColor; void main() { FragColor = src; }); // clang-format on GLuint vsid = GLTestHelper::LoadShader(GL_VERTEX_SHADER, kVertexShader); GLuint fsid = GLTestHelper::LoadShader(GL_FRAGMENT_SHADER, kFragColorShader); GLuint program = glCreateProgram(); glAttachShader(program, vsid); glAttachShader(program, fsid); glDeleteShader(vsid); glDeleteShader(fsid); GLint location = glGetFragDataLocation(program, "FragColor"); EXPECT_EQ(static_cast<GLenum>(GL_INVALID_OPERATION), glGetError()); EXPECT_EQ(-1, location); location = glGetFragDataLocation(program, "Unknown"); EXPECT_EQ(static_cast<GLenum>(GL_INVALID_OPERATION), glGetError()); EXPECT_EQ(-1, location); glLinkProgram(program); location = glGetFragDataLocation(program, "FragColor"); EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError()); EXPECT_EQ(0, location); location = glGetFragDataLocation(program, "Unknown"); EXPECT_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError()); EXPECT_EQ(-1, location); glDeleteProgram(program); } TEST_F(OpenGLES3FunctionTest, GetStringiTest) { if (!IsApplicable()) { return; } std::string extensionString = reinterpret_cast<const char*>(glGetString(GL_EXTENSIONS)); std::vector<std::string> extensions = base::SplitString(extensionString, base::kWhitespaceASCII, base::TRIM_WHITESPACE, base::SPLIT_WANT_NONEMPTY); int num_extensions = 0; glGetIntegerv(GL_NUM_EXTENSIONS, &num_extensions); EXPECT_EQ(extensions.size(), static_cast<size_t>(num_extensions)); std::set<std::string> extensions_from_string(extensions.begin(), extensions.end()); std::set<std::string> extensions_from_stringi; for (int i = 0; i < num_extensions; ++i) { extensions_from_stringi.insert( reinterpret_cast<const char*>(glGetStringi(GL_EXTENSIONS, i))); } EXPECT_EQ(extensions_from_string, extensions_from_stringi); } } // namespace gpu
// Copyright © 2017-2020 Trust Wallet. // // This file is part of Trust. The full Trust copyright notice, including // terms governing use, modification, and redistribution, is contained in the // file LICENSE at the root of the source code distribution tree. #include "Bitcoin/OutPoint.h" #include "Bitcoin/Script.h" #include "Bitcoin/Transaction.h" #include "Bitcoin/TransactionBuilder.h" #include "Bitcoin/TransactionSigner.h" #include "Hash.h" #include "HexCoding.h" #include "PrivateKey.h" #include "proto/Bitcoin.pb.h" #include "../interface/TWTestUtilities.h" #include <TrustWalletCore/TWBitcoinScript.h> #include <TrustWalletCore/TWAnySigner.h> #include <TrustWalletCore/TWHash.h> #include <TrustWalletCore/TWPrivateKey.h> #include <gtest/gtest.h> using namespace TW; using namespace TW::Bitcoin; TEST(BitcoinSigning, SignP2PKH) { auto hash0 = parse_hex("fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f"); auto hash1 = parse_hex("ef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a"); // Setup input Proto::SigningInput input; input.set_hash_type(TWBitcoinSigHashTypeAll); input.set_amount(335'790'000); input.set_byte_fee(1); input.set_to_address("1Bp9U1ogV3A14FMvKbRJms7ctyso4Z4Tcx"); input.set_change_address("1FQc5LdgGHMHEN9nwkjmz6tWkxhPpxBvBU"); auto utxoKey0 = PrivateKey(parse_hex("bbc27228ddcb9209d7fd6f36b02f7dfa6252af40bb2f1cbc7a557da8027ff866")); auto pubKey0 = utxoKey0.getPublicKey(TWPublicKeyTypeSECP256k1); auto utxoPubkeyHash = Hash::ripemd(Hash::sha256(pubKey0.bytes)); ASSERT_EQ(hex(utxoPubkeyHash.begin(), utxoPubkeyHash.end()), "b7cd046b6d522a3d61dbcb5235c0e9cc97265457"); input.add_private_key(utxoKey0.bytes.data(), utxoKey0.bytes.size()); auto utxoKey1 = parse_hex("619c335025c7f4012e556c2a58b2506e30b8511b53ade95ea316fd8c3286feb9"); input.add_private_key(utxoKey1.data(), utxoKey1.size()); auto utxo0Script = Script::buildPayToPublicKeyHash(utxoPubkeyHash); Data scriptHash; utxo0Script.matchPayToPublicKeyHash(scriptHash); ASSERT_EQ(hex(scriptHash), "b7cd046b6d522a3d61dbcb5235c0e9cc97265457"); auto utxo0 = input.add_utxo(); utxo0->set_script(utxo0Script.bytes.data(), utxo0Script.bytes.size()); utxo0->set_amount(625'000'000); utxo0->mutable_out_point()->set_hash(hash0.data(), hash0.size()); utxo0->mutable_out_point()->set_index(0); utxo0->mutable_out_point()->set_sequence(UINT32_MAX); auto utxo1 = input.add_utxo(); auto utxo1Script = parse_hex("00141d0f172a0ecb48aee1be1f2687d2963ae33f71a1"); utxo1->set_script(utxo1Script.data(), utxo1Script.size()); utxo1->set_amount(600'000'000); utxo1->mutable_out_point()->set_hash(hash1.data(), hash1.size()); utxo1->mutable_out_point()->set_index(1); utxo1->mutable_out_point()->set_sequence(UINT32_MAX); // Sign auto result = TransactionSigner<Transaction, TransactionBuilder>(std::move(input)).sign(); ASSERT_TRUE(result) << result.error(); auto signedTx = result.payload(); Data serialized; signedTx.encode(true, serialized); ASSERT_EQ(hex(serialized), "01000000" "0001" "01" "fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f" "00000000" "6a47304402202819d70d4bec472113a1392cadc0860a7a1b34ea0869abb4bdce3290c3aba086022023eff75f410ad19cdbe6c6a017362bd554ce5fb906c13534ddc306be117ad30a012103c9f4836b9a4f77fc0d81f7bcb01b7f1b35916864b9476c241ce9fc198bd25432" "ffffffff" "02" "b0bf031400000000" "1976a914769bdff96a02f9135a1d19b749db6a78fe07dc9088ac" "aefd3c1100000000" "1976a9149e089b6889e032d46e3b915a3392edfd616fb1c488ac" "0000000000" ); } TEST(BitcoinSigning, EncodeP2WPKH) { auto unsignedTx = Transaction(1, 0x11); auto hash0 = parse_hex("fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f"); auto outpoint0 = TW::Bitcoin::OutPoint(hash0, 0); unsignedTx.inputs.emplace_back(outpoint0, Script(), 0xffffffee); auto hash1 = parse_hex("ef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a"); auto outpoint1 = TW::Bitcoin::OutPoint(hash1, 1); unsignedTx.inputs.emplace_back(outpoint1, Script(), UINT32_MAX); auto outScript0 = Script(parse_hex("76a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac")); unsignedTx.outputs.emplace_back(112340000, outScript0); auto outScript1 = Script(parse_hex("76a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac")); unsignedTx.outputs.emplace_back(223450000, outScript1); auto unsignedData = std::vector<uint8_t>(); unsignedTx.encode(false, unsignedData); ASSERT_EQ(hex(unsignedData.begin(), unsignedData.end()), "" "01000000" "02" "fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f0000000000eeffffff" "ef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a0100000000ffffffff" "02" "202cb206000000001976a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac" "9093510d000000001976a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac" "11000000"); } TEST(BitcoinSigning, SignP2WPKH) { auto hash0 = parse_hex("fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f"); auto hash1 = parse_hex("ef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a"); // Setup input Proto::SigningInput input; input.set_hash_type(TWBitcoinSigHashTypeAll); input.set_amount(335'790'000); input.set_byte_fee(1); input.set_to_address("1Bp9U1ogV3A14FMvKbRJms7ctyso4Z4Tcx"); input.set_change_address("1FQc5LdgGHMHEN9nwkjmz6tWkxhPpxBvBU"); auto utxoKey0 = parse_hex("bbc27228ddcb9209d7fd6f36b02f7dfa6252af40bb2f1cbc7a557da8027ff866"); input.add_private_key(utxoKey0.data(), utxoKey0.size()); auto utxoKey1 = parse_hex("619c335025c7f4012e556c2a58b2506e30b8511b53ade95ea316fd8c3286feb9"); input.add_private_key(utxoKey1.data(), utxoKey1.size()); auto scriptPub1 = Script(parse_hex("00141d0f172a0ecb48aee1be1f2687d2963ae33f71a1")); auto scriptHash = std::vector<uint8_t>(); scriptPub1.matchPayToWitnessPublicKeyHash(scriptHash); auto scriptHashHex = hex(scriptHash.begin(), scriptHash.end()); ASSERT_EQ(scriptHashHex, "1d0f172a0ecb48aee1be1f2687d2963ae33f71a1"); auto redeemScript = Script::buildPayToPublicKeyHash(scriptHash); auto scriptString = std::string(redeemScript.bytes.begin(), redeemScript.bytes.end()); (*input.mutable_scripts())[scriptHashHex] = scriptString; auto utxo0 = input.add_utxo(); auto utxo0Script = parse_hex("2103c9f4836b9a4f77fc0d81f7bcb01b7f1b35916864b9476c241ce9fc198bd25432ac"); utxo0->set_script(utxo0Script.data(), utxo0Script.size()); utxo0->set_amount(625'000'000); utxo0->mutable_out_point()->set_hash(hash0.data(), hash0.size()); utxo0->mutable_out_point()->set_index(0); utxo0->mutable_out_point()->set_sequence(UINT32_MAX); auto utxo1 = input.add_utxo(); auto utxo1Script = parse_hex("00141d0f172a0ecb48aee1be1f2687d2963ae33f71a1"); utxo1->set_script(utxo1Script.data(), utxo1Script.size()); utxo1->set_amount(600'000'000); utxo1->mutable_out_point()->set_hash(hash1.data(), hash1.size()); utxo1->mutable_out_point()->set_index(1); utxo1->mutable_out_point()->set_sequence(UINT32_MAX); // Sign auto result = TransactionSigner<Transaction, TransactionBuilder>(std::move(input)).sign(); ASSERT_TRUE(result) << result.error(); auto signedTx = result.payload(); // txid = "03b30d55430f08365d19a62d3bd32e459ab50984fbcf22921ecc85f1e09dc6ed" // witid = "20bc58d07d91a3bae9e6f4d617d8f6271723d1a7673e486cc0ecaf9e758e2c22" Data serialized; signedTx.encode(true, serialized); ASSERT_EQ(hex(serialized), "01000000" "0001" "01" "fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f" "00000000" "49483045022100b6006eb0fe2da8cbbd204f702b1ffdb1e29c49f3de51c4983d420bf9f9125635022032a195b153ccb2c4978333b4aad72aaa7e6a0b334a14621d5d817a42489cb0d301" "ffffffff" "02" "b0bf031400000000" "1976a914769bdff96a02f9135a1d19b749db6a78fe07dc9088ac" "aefd3c1100000000" "1976a9149e089b6889e032d46e3b915a3392edfd616fb1c488ac" "0000000000" ); } TEST(BitcoinSigning, SignP2WPKH_HashSingle_TwoInput) { auto hash0 = parse_hex("fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f"); auto hash1 = parse_hex("ef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a"); // Setup input Proto::SigningInput input; input.set_hash_type(TWBitcoinSigHashTypeSingle); input.set_amount(335'790'000); input.set_byte_fee(1); input.set_to_address("1Bp9U1ogV3A14FMvKbRJms7ctyso4Z4Tcx"); input.set_change_address("1FQc5LdgGHMHEN9nwkjmz6tWkxhPpxBvBU"); auto utxoKey0 = parse_hex("bbc27228ddcb9209d7fd6f36b02f7dfa6252af40bb2f1cbc7a557da8027ff866"); input.add_private_key(utxoKey0.data(), utxoKey0.size()); auto utxoKey1 = parse_hex("619c335025c7f4012e556c2a58b2506e30b8511b53ade95ea316fd8c3286feb9"); input.add_private_key(utxoKey1.data(), utxoKey1.size()); auto scriptPub1 = Script(parse_hex("00141d0f172a0ecb48aee1be1f2687d2963ae33f71a1")); auto scriptHash = std::vector<uint8_t>(); scriptPub1.matchPayToWitnessPublicKeyHash(scriptHash); auto scriptHashHex = hex(scriptHash.begin(), scriptHash.end()); ASSERT_EQ(scriptHashHex, "1d0f172a0ecb48aee1be1f2687d2963ae33f71a1"); auto redeemScript = Script::buildPayToPublicKeyHash(scriptHash); auto scriptString = std::string(redeemScript.bytes.begin(), redeemScript.bytes.end()); (*input.mutable_scripts())[scriptHashHex] = scriptString; auto utxo0 = input.add_utxo(); auto utxo0Script = parse_hex("2103c9f4836b9a4f77fc0d81f7bcb01b7f1b35916864b9476c241ce9fc198bd25432ac"); utxo0->set_script(utxo0Script.data(), utxo0Script.size()); utxo0->set_amount(210'000'000); utxo0->mutable_out_point()->set_hash(hash0.data(), hash0.size()); utxo0->mutable_out_point()->set_index(0); utxo0->mutable_out_point()->set_sequence(UINT32_MAX); auto utxo1 = input.add_utxo(); auto utxo1Script = parse_hex("00141d0f172a0ecb48aee1be1f2687d2963ae33f71a1"); utxo1->set_script(utxo1Script.data(), utxo1Script.size()); utxo1->set_amount(210'000'000); utxo1->mutable_out_point()->set_hash(hash1.data(), hash1.size()); utxo1->mutable_out_point()->set_index(1); utxo1->mutable_out_point()->set_sequence(UINT32_MAX); // Sign auto result = TransactionSigner<Transaction, TransactionBuilder>(std::move(input)).sign(); ASSERT_TRUE(result) << result.error(); auto signedTx = result.payload(); Data serialized; signedTx.encode(true, serialized); ASSERT_EQ(hex(serialized), "01000000" "0001" "02" "fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f" "00000000" "49483045022100fd8591c3611a07b55f509ec850534c7a9c49713c9b8fa0e844ea06c2e65e19d702205e3806676192e790bc93dd4c28e937c4bf97b15f189158ba1a30d7ecff5ee75503" "ffffffff" "ef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a" "0100000000" "ffffffff" "02" "b0bf031400000000" "1976a914769bdff96a02f9135a1d19b749db6a78fe07dc9088ac" "daef040500000000" "1976a9149e089b6889e032d46e3b915a3392edfd616fb1c488ac" "0002" "47304402206b91d2c69022a54652731b4302eabe59c87949cf62f4c5674c7d4c0d1fbf898102200cee8eeb6ef9542426788c06ed51004799b730083ae3d4daf3c3d5fdc2275d1d0321025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee6357" "00000000" ); } TEST(BitcoinSigning, SignP2WPKH_HashAnyoneCanPay_TwoInput) { auto hash0 = parse_hex("fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f"); auto hash1 = parse_hex("ef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a"); // Setup input Proto::SigningInput input; input.set_hash_type(TWBitcoinSigHashTypeAnyoneCanPay); input.set_amount(335'790'000); input.set_byte_fee(1); input.set_to_address("1Bp9U1ogV3A14FMvKbRJms7ctyso4Z4Tcx"); input.set_change_address("1FQc5LdgGHMHEN9nwkjmz6tWkxhPpxBvBU"); auto utxoKey0 = parse_hex("bbc27228ddcb9209d7fd6f36b02f7dfa6252af40bb2f1cbc7a557da8027ff866"); input.add_private_key(utxoKey0.data(), utxoKey0.size()); auto utxoKey1 = parse_hex("619c335025c7f4012e556c2a58b2506e30b8511b53ade95ea316fd8c3286feb9"); input.add_private_key(utxoKey1.data(), utxoKey1.size()); auto scriptPub1 = Script(parse_hex("00141d0f172a0ecb48aee1be1f2687d2963ae33f71a1")); auto scriptHash = std::vector<uint8_t>(); scriptPub1.matchPayToWitnessPublicKeyHash(scriptHash); auto scriptHashHex = hex(scriptHash.begin(), scriptHash.end()); ASSERT_EQ(scriptHashHex, "1d0f172a0ecb48aee1be1f2687d2963ae33f71a1"); auto redeemScript = Script::buildPayToPublicKeyHash(scriptHash); auto scriptString = std::string(redeemScript.bytes.begin(), redeemScript.bytes.end()); (*input.mutable_scripts())[scriptHashHex] = scriptString; auto utxo0 = input.add_utxo(); auto utxo0Script = parse_hex("2103c9f4836b9a4f77fc0d81f7bcb01b7f1b35916864b9476c241ce9fc198bd25432ac"); utxo0->set_script(utxo0Script.data(), utxo0Script.size()); utxo0->set_amount(210'000'000); utxo0->mutable_out_point()->set_hash(hash0.data(), hash0.size()); utxo0->mutable_out_point()->set_index(0); utxo0->mutable_out_point()->set_sequence(UINT32_MAX); auto utxo1 = input.add_utxo(); auto utxo1Script = parse_hex("00141d0f172a0ecb48aee1be1f2687d2963ae33f71a1"); utxo1->set_script(utxo1Script.data(), utxo1Script.size()); utxo1->set_amount(210'000'000); utxo1->mutable_out_point()->set_hash(hash1.data(), hash1.size()); utxo1->mutable_out_point()->set_index(1); utxo1->mutable_out_point()->set_sequence(UINT32_MAX); // Sign auto result = TransactionSigner<Transaction, TransactionBuilder>(std::move(input)).sign(); ASSERT_TRUE(result) << result.error(); auto signedTx = result.payload(); Data serialized; signedTx.encode(true, serialized); ASSERT_EQ(hex(serialized), "01000000" "0001" "02" "fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f" "00000000" "4847304402206ed3e388d440cb845eef2fce0740b83bdd77764ad0e7dd815a20760718291a5302203f78d743350d80aa2508e90d5a984636c5503d02c1e8656442f0f0275db95baa80" "ffffffff" "ef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a" "01000000" "00" "ffffffff" "02" "b0bf031400000000" "1976a914769bdff96a02f9135a1d19b749db6a78fe07dc9088ac" "daef040500000000" "1976a9149e089b6889e032d46e3b915a3392edfd616fb1c488ac" "0002" "483045022100a5eedab7da09317141e35730256ef9b76da0c2442995a1c2b5458ee7d8834ba302201dc10b47cd4e2e53c7253770cd6907c94c828317d217e3065db009345acf41ac8021025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee6357" "00000000" ); } TEST(BitcoinSigning, EncodeP2WSH) { auto unsignedTx = Transaction(1, 0); auto outpoint0 = OutPoint(parse_hex("0001000000000000000000000000000000000000000000000000000000000000"), 0); unsignedTx.inputs.emplace_back(outpoint0, Script(), UINT32_MAX); auto outScript0 = Script(parse_hex("76a9144c9c3dfac4207d5d8cb89df5722cb3d712385e3f88ac")); unsignedTx.outputs.emplace_back(1000, outScript0); auto unsignedData = std::vector<uint8_t>(); unsignedTx.encode(false, unsignedData); ASSERT_EQ(hex(unsignedData.begin(), unsignedData.end()), "" "01000000" "01" "00010000000000000000000000000000000000000000000000000000000000000000000000ffffffff" "01" "e8030000000000001976a9144c9c3dfac4207d5d8cb89df5722cb3d712385e3f88ac" "00000000"); } Proto::SigningInput buildInputP2WSH(enum TWBitcoinSigHashType hashType, bool omitScript = false) { Proto::SigningInput input; input.set_hash_type(hashType); input.set_amount(1000); input.set_byte_fee(1); input.set_to_address("1Bp9U1ogV3A14FMvKbRJms7ctyso4Z4Tcx"); input.set_change_address("1FQc5LdgGHMHEN9nwkjmz6tWkxhPpxBvBU"); auto utxoKey0 = parse_hex("ed00a0841cd53aedf89b0c616742d1d2a930f8ae2b0fb514765a17bb62c7521a"); input.add_private_key(utxoKey0.data(), utxoKey0.size()); auto utxoKey1 = parse_hex("619c335025c7f4012e556c2a58b2506e30b8511b53ade95ea316fd8c3286feb9"); input.add_private_key(utxoKey1.data(), utxoKey1.size()); if (!omitScript) { auto redeemScript = Script(parse_hex("2103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc71ac")); auto scriptHash = "593128f9f90e38b706c18623151e37d2da05c229"; auto scriptString = std::string(redeemScript.bytes.begin(), redeemScript.bytes.end()); (*input.mutable_scripts())[scriptHash] = scriptString; } auto utxo0 = input.add_utxo(); auto p2wsh = Script::buildPayToWitnessScriptHash(parse_hex("ff25429251b5a84f452230a3c75fd886b7fc5a7865ce4a7bb7a9d7c5be6da3db")); utxo0->set_script(p2wsh.bytes.data(), p2wsh.bytes.size()); utxo0->set_amount(1226); auto hash0 = parse_hex("0001000000000000000000000000000000000000000000000000000000000000"); utxo0->mutable_out_point()->set_hash(hash0.data(), hash0.size()); utxo0->mutable_out_point()->set_index(0); utxo0->mutable_out_point()->set_sequence(UINT32_MAX); return input; } TEST(BitcoinSigning, SignP2WSH) { // Setup input const auto input = buildInputP2WSH(TWBitcoinSigHashTypeAll); // Sign auto result = TransactionSigner<Transaction, TransactionBuilder>(std::move(input)).sign(); ASSERT_TRUE(result) << result.error(); auto signedTx = result.payload(); // txid = "b588f910d7ff03d5fbc3da91f62e48bab47153229c8d1b114b43cb31b9c4d0dd" // witid = "16a17dd8f6e507220010c56c07a8479e3f909f87791683577d4e6aad61ab113a" Data serialized; signedTx.encode(true, serialized); ASSERT_EQ(hex(serialized), "01000000" "0001" "01" "0001000000000000000000000000000000000000000000000000000000000000" "00000000" "00" "ffffffff" "01" "e803000000000000" "1976a914769bdff96a02f9135a1d19b749db6a78fe07dc9088ac" "02" "4730440220252e92b8757f1e5577c54ce5deb8072914c1f03333128777dee96ebceeb6a99b02202b7298789316779d0aa7595abeedc03054405c42ab9859e67d9253d2c9a0cdfa01232103596d3451025c" "19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc71ac" "00000000" ); } TEST(BitcoinSigning, SignP2WSH_HashNone) { // Setup input const auto input = buildInputP2WSH(TWBitcoinSigHashTypeNone); // Sign auto result = TransactionSigner<Transaction, TransactionBuilder>(std::move(input)).sign(); ASSERT_TRUE(result) << result.error(); auto signedTx = result.payload(); Data serialized; signedTx.encode(true, serialized); ASSERT_EQ(hex(serialized), "01000000" "0001" "01" "0001000000000000000000000000000000000000000000000000000000000000" "00000000" "00" "ffffffff" "01" "e803000000000000" "1976a914769bdff96a02f9135a1d19b749db6a78fe07dc9088ac" "02" "483045022100caa585732cfc50226a90834a306d23d5d2ab1e94af2c66136a637e3d9bad3688022069028750908e53a663bb1f434fd655bcc0cf8d394c6fa1fd5a4983790135722e02232103596d3451025c" "19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc71ac" "00000000" ); } TEST(BitcoinSigning, SignP2WSH_HashSingle) { // Setup input const auto input = buildInputP2WSH(TWBitcoinSigHashTypeSingle); // Sign auto result = TransactionSigner<Transaction, TransactionBuilder>(std::move(input)).sign(); ASSERT_TRUE(result) << result.error(); auto signedTx = result.payload(); Data serialized; signedTx.encode(true, serialized); ASSERT_EQ(hex(serialized), "01000000" "0001" "01" "0001000000000000000000000000000000000000000000000000000000000000" "00000000" "00" "ffffffff" "01" "e803000000000000" "1976a914769bdff96a02f9135a1d19b749db6a78fe07dc9088ac" "02" "47304402201ba80b2c48fe82915297dc9782ae2141e40263001fafd21b02c04a092503f01e0220666d6c63475c6c52abd09371c200ac319bcf4a7c72eb3782e95790f5c847f0b903232103596d3451025c" "19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc71ac" "00000000" ); } TEST(BitcoinSigning, SignP2WSH_HashAnyoneCanPay) { // Setup input const auto input = buildInputP2WSH(TWBitcoinSigHashTypeAnyoneCanPay); // Sign auto result = TransactionSigner<Transaction, TransactionBuilder>(std::move(input)).sign(); ASSERT_TRUE(result) << result.error(); auto signedTx = result.payload(); Data serialized; signedTx.encode(true, serialized); ASSERT_EQ(hex(serialized), "01000000" "0001" "01" "0001000000000000000000000000000000000000000000000000000000000000" "00000000" "00" "ffffffff" "01" "e803000000000000" "1976a914769bdff96a02f9135a1d19b749db6a78fe07dc9088ac" "02" "47304402206fc6f499c9b0080dd444b410ca0599b59321e7891fc8e59ab215f6d2995b2e5f0220182466b434e91d14c9d247d3726d3c7f22a2a1cbf6c172314e1155b307f467b080232103596d3451025c" "19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc71ac" "00000000" ); } TEST(BitcoinSigning, SignP2WSH_NegativeMissingScript) { // Setup input, with omitted script const auto input = buildInputP2WSH(TWBitcoinSigHashTypeAll, true); // Sign auto result = TransactionSigner<Transaction, TransactionBuilder>(std::move(input)).sign(); ASSERT_FALSE(result) << result.error(); } TEST(BitcoinSigning, EncodeP2SH_P2WPKH) { auto unsignedTx = Transaction(1, 0x492); auto outpoint0 = OutPoint(parse_hex("db6b1b20aa0fd7b23880be2ecbd4a98130974cf4748fb66092ac4d3ceb1a5477"), 1); unsignedTx.inputs.emplace_back(outpoint0, Script(), 0xfffffffe); auto outScript0 = Script(parse_hex("76a914a457b684d7f0d539a46a45bbc043f35b59d0d96388ac")); unsignedTx.outputs.emplace_back(199'996'600, outScript0); auto outScript1 = Script(parse_hex("76a914fd270b1ee6abcaea97fea7ad0402e8bd8ad6d77c88ac")); unsignedTx.outputs.emplace_back(800'000'000, outScript1); auto unsignedData = std::vector<uint8_t>(); unsignedTx.encode(false, unsignedData); ASSERT_EQ(hex(unsignedData.begin(), unsignedData.end()), "" "01000000" "01" "db6b1b20aa0fd7b23880be2ecbd4a98130974cf4748fb66092ac4d3ceb1a54770100000000feffffff" "02" "b8b4eb0b000000001976a914a457b684d7f0d539a46a45bbc043f35b59d0d96388ac" "0008af2f000000001976a914fd270b1ee6abcaea97fea7ad0402e8bd8ad6d77c88ac" "92040000"); } TEST(BitcoinSigning, SignP2SH_P2WPKH) { // Setup input Proto::SigningInput input; input.set_hash_type(TWBitcoinSigHashTypeAll); input.set_amount(200'000'000); input.set_byte_fee(1); input.set_to_address("1Bp9U1ogV3A14FMvKbRJms7ctyso4Z4Tcx"); input.set_change_address("1FQc5LdgGHMHEN9nwkjmz6tWkxhPpxBvBU"); auto utxoKey0 = PrivateKey(parse_hex("eb696a065ef48a2192da5b28b694f87544b30fae8327c4510137a922f32c6dcf")); auto pubKey0 = utxoKey0.getPublicKey(TWPublicKeyTypeSECP256k1); auto utxoPubkeyHash = Hash::ripemd(Hash::sha256(pubKey0.bytes)); ASSERT_EQ(hex(utxoPubkeyHash.begin(), utxoPubkeyHash.end()), "79091972186c449eb1ded22b78e40d009bdf0089"); input.add_private_key(utxoKey0.bytes.data(), utxoKey0.bytes.size()); auto redeemScript = Script::buildPayToWitnessPublicKeyHash(utxoPubkeyHash); auto scriptHash = Hash::ripemd(Hash::sha256(redeemScript.bytes)); ASSERT_EQ(hex(scriptHash.begin(), scriptHash.end()), "4733f37cf4db86fbc2efed2500b4f4e49f312023"); auto scriptString = std::string(redeemScript.bytes.begin(), redeemScript.bytes.end()); (*input.mutable_scripts())[hex(scriptHash.begin(), scriptHash.end())] = scriptString; auto utxo0 = input.add_utxo(); auto utxo0Script = Script(parse_hex("a9144733f37cf4db86fbc2efed2500b4f4e49f31202387")); utxo0->set_script(utxo0Script.bytes.data(), utxo0Script.bytes.size()); utxo0->set_amount(1000'000'000); auto hash0 = DATA("db6b1b20aa0fd7b23880be2ecbd4a98130974cf4748fb66092ac4d3ceb1a5477"); utxo0->mutable_out_point()->set_hash(TWDataBytes(hash0.get()), TWDataSize(hash0.get())); utxo0->mutable_out_point()->set_index(1); utxo0->mutable_out_point()->set_sequence(UINT32_MAX); // Sign auto result = TransactionSigner<Transaction, TransactionBuilder>(std::move(input)).sign(); ASSERT_TRUE(result) << result.error(); auto signedTx = result.payload(); // txid = "060046204220fd00b81fd6426e391acb9670d1e61e8f0224f37276cc34f49e8c" // witid = "3911b16643972437d27a759b5647a552c7a2e433364b531374f3761967bf8fd7" Data serialized; signedTx.encode(true, serialized); ASSERT_EQ(hex(serialized), "01000000000101db6b1b20aa0fd7b23880be2ecbd4a98130974cf4748fb66092ac4d3ceb1a5477010000001716001479091972186c449eb1ded22b78e40d009bdf0089ffffffff0200c2eb0b000000001976a914769bdff96a02f9135a1d19b749db6a78fe07dc9088ac1e07af2f000000001976a9149e089b6889e032d46e3b915a3392edfd616fb1c488ac02473044022009195d870ecc40f54130008e392904e77d32b738c1add19d1d8ebba4edf812e602204f49de6dc60d9a3c3703e1e642942f8834f3a2cd81a6562a34b293942ce42f40012103ad1d8e89212f0b92c74d23bb710c00662ad1470198ac48c43f7d6f93a2a2687300000000"); } TEST(BitcoinSigning, EncodeP2SH_P2WSH) { auto unsignedTx = Transaction(1, 0); auto hash0 = parse_hex("36641869ca081e70f394c6948e8af409e18b619df2ed74aa106c1ca29787b96e"); auto outpoint0 = OutPoint(hash0, 1); unsignedTx.inputs.emplace_back(outpoint0, Script(), 0xffffffff); auto outScript0 = Script(parse_hex("76a914389ffce9cd9ae88dcc0631e88a821ffdbe9bfe2688ac")); unsignedTx.outputs.emplace_back(0x0000000035a4e900, outScript0); auto outScript1 = Script(parse_hex("76a9147480a33f950689af511e6e84c138dbbd3c3ee41588ac")); unsignedTx.outputs.emplace_back(0x00000000052f83c0, outScript1); auto unsignedData = std::vector<uint8_t>(); unsignedTx.encode(false, unsignedData); ASSERT_EQ(hex(unsignedData.begin(), unsignedData.end()), "" "01000000" "01" "36641869ca081e70f394c6948e8af409e18b619df2ed74aa106c1ca29787b96e0100000000ffffffff" "02" "00e9a435000000001976a914389ffce9cd9ae88dcc0631e88a821ffdbe9bfe2688ac" "c0832f05000000001976a9147480a33f950689af511e6e84c138dbbd3c3ee41588ac" "00000000"); } TEST(BitcoinSigning, SignP2SH_P2WSH) { auto emptyScript = Script(); auto unsignedTx = Transaction(1, 0); auto outpoint0 = OutPoint(parse_hex("36641869ca081e70f394c6948e8af409e18b619df2ed74aa106c1ca29787b96e"), 1); unsignedTx.inputs.emplace_back(outpoint0, emptyScript, 0xffffffff); auto outScript0 = Script(parse_hex("76a914389ffce9cd9ae88dcc0631e88a821ffdbe9bfe2688ac")); unsignedTx.outputs.emplace_back(0x0000000035a4e900, outScript0); auto outScript1 = Script(parse_hex("76a9147480a33f950689af511e6e84c138dbbd3c3ee41588ac")); unsignedTx.outputs.emplace_back(0x00000000052f83c0, outScript1); // Setup signing input auto input = Proto::SigningInput(); auto key0 = parse_hex("730fff80e1413068a05b57d6a58261f07551163369787f349438ea38ca80fac6"); input.add_private_key(key0.data(), key0.size()); auto key1 = parse_hex("11fa3d25a17cbc22b29c44a484ba552b5a53149d106d3d853e22fdd05a2d8bb3"); input.add_private_key(key1.data(), key1.size()); auto key2 = parse_hex("77bf4141a87d55bdd7f3cd0bdccf6e9e642935fec45f2f30047be7b799120661"); input.add_private_key(key2.data(), key2.size()); auto key3 = parse_hex("14af36970f5025ea3e8b5542c0f8ebe7763e674838d08808896b63c3351ffe49"); input.add_private_key(key3.data(), key3.size()); auto key4 = parse_hex("fe9a95c19eef81dde2b95c1284ef39be497d128e2aa46916fb02d552485e0323"); input.add_private_key(key4.data(), key4.size()); auto key5 = parse_hex("428a7aee9f0c2af0cd19af3cf1c78149951ea528726989b2e83e4778d2c3f890"); input.add_private_key(key5.data(), key5.size()); auto redeemScript = Script::buildPayToWitnessScriptHash(parse_hex("a16b5755f7f6f96dbd65f5f0d6ab9418b89af4b1f14a1bb8a09062c35f0dcb54")); auto scriptHash = Hash::ripemd(Hash::sha256(redeemScript.bytes)); auto scriptString = std::string(redeemScript.bytes.begin(), redeemScript.bytes.end()); (*input.mutable_scripts())[hex(scriptHash.begin(), scriptHash.end())] = scriptString; auto witnessScript = Script(parse_hex("" "56" "210307b8ae49ac90a048e9b53357a2354b3334e9c8bee813ecb98e99a7e07e8c3ba3" "2103b28f0c28bfab54554ae8c658ac5c3e0ce6e79ad336331f78c428dd43eea8449b" "21034b8113d703413d57761b8b9781957b8c0ac1dfe69f492580ca4195f50376ba4a" "21033400f6afecb833092a9a21cfdf1ed1376e58c5d1f47de74683123987e967a8f4" "2103a6d48b1131e94ba04d9737d61acdaa1322008af9602b3b14862c07a1789aac16" "2102d8b661b0b3302ee2f162b09e07a55ad5dfbe673a9f01d9f0c19617681024306b" "56ae" )); auto witnessScriptHash = Hash::ripemd(Hash::sha256(witnessScript.bytes)); auto witnessScriptString = std::string(witnessScript.bytes.begin(), witnessScript.bytes.end()); (*input.mutable_scripts())[hex(witnessScriptHash.begin(), witnessScriptHash.end())] = witnessScriptString; auto utxo0Script = Script(parse_hex("a9149993a429037b5d912407a71c252019287b8d27a587")); auto utxo = input.add_utxo(); utxo->mutable_out_point()->set_hash(outpoint0.hash.data(), outpoint0.hash.size()); utxo->mutable_out_point()->set_index(outpoint0.index); utxo->mutable_out_point()->set_sequence(UINT32_MAX); utxo->set_script(utxo0Script.bytes.data(), utxo0Script.bytes.size()); utxo->set_amount(987654321); // Sign auto signer = TransactionSigner<Transaction, TransactionBuilder>(std::move(input)); signer.transaction = unsignedTx; signer.plan.utxos = {*utxo}; auto result = signer.sign(); ASSERT_TRUE(result) << result.error(); auto signedTx = result.payload(); auto expected = "" "01000000" "0001" "01" "36641869ca081e70f394c6948e8af409e18b619df2ed74aa106c1ca29787b96e0100000023220020a16b5755f7f6f96dbd65f5f0d6ab9418b89af4b1f14a1bb8a09062c35f0dcb54ffffffff" "02" "00e9a43500000000" "1976a914389ffce9cd9ae88dcc0631e88a821ffdbe9bfe2688ac" "c0832f0500000000" "1976a9147480a33f950689af511e6e84c138dbbd3c3ee41588ac" "08" "00" "47304402201992f5426ae0bab04cf206d7640b7e00410297bfe5487637f6c2427ee8496be002204ad4e64dc2d269f593cc4820db1fc1e8dc34774f602945115ce485940e05c64200" "47304402201e412363fa554b994528fd44149f3985b18bb901289ef6b71105b27c7d0e336c0220595e4a1e67154337757562ed5869127533e3e5084c3c2e128518f5f0b85b721800" "473044022003b0a20ccf545b3f12c5ade10db8717e97b44da2e800387adfd82c95caf529d902206aee3a2395530d52f476d0ddd9d20ba062820ae6f4e1be4921c3630395743ad900" "483045022100ed7a0eeaf72b84351bceac474b0c0510f67065b1b334f77e6843ed102f968afe022004d97d0cfc4bf5651e46487d6f87bd4af6aef894459f9778f2293b0b2c5b7bc700" "483045022100934a0c364820588154aed2d519cbcc61969d837b91960f4abbf0e374f03aa39d022036b5c58b754bd44cb5c7d34806c89d9778ea1a1c900618a841e9fbfbe805ff9b00" "473044022044e3b59b06931d46f857c82fa1d53d89b116a40a581527eac35c5eb5b7f0785302207d0f8b5d063ffc6749fb4e133db7916162b540c70dee40ec0b21e142d8843b3a00" "cf56" "210307b8ae49ac90a048e9b53357a2354b3334e9c8bee813ecb98e99a7e07e8c3ba3" "2103b28f0c28bfab54554ae8c658ac5c3e0ce6e79ad336331f78c428dd43eea8449b" "21034b8113d703413d57761b8b9781957b8c0ac1dfe69f492580ca4195f50376ba4a" "21033400f6afecb833092a9a21cfdf1ed1376e58c5d1f47de74683123987e967a8f4" "2103a6d48b1131e94ba04d9737d61acdaa1322008af9602b3b14862c07a1789aac16" "2102d8b661b0b3302ee2f162b09e07a55ad5dfbe673a9f01d9f0c19617681024306b" "56ae" "00000000"; auto serialized = std::vector<uint8_t>(); signedTx.encode(true, serialized); ASSERT_EQ(hex(serialized.begin(), serialized.end()), expected); } TEST(BitcoinSigning, Sign_NegativeNoUtxos) { auto hash0 = parse_hex("fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f"); auto hash1 = parse_hex("ef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a"); // Setup input Proto::SigningInput input; input.set_hash_type(TWBitcoinSigHashTypeAll); input.set_amount(335'790'000); input.set_byte_fee(1); input.set_to_address("1Bp9U1ogV3A14FMvKbRJms7ctyso4Z4Tcx"); input.set_change_address("1FQc5LdgGHMHEN9nwkjmz6tWkxhPpxBvBU"); auto scriptPub1 = Script(parse_hex("00141d0f172a0ecb48aee1be1f2687d2963ae33f71a1")); auto scriptHash = std::vector<uint8_t>(); scriptPub1.matchPayToWitnessPublicKeyHash(scriptHash); auto scriptHashHex = hex(scriptHash.begin(), scriptHash.end()); ASSERT_EQ(scriptHashHex, "1d0f172a0ecb48aee1be1f2687d2963ae33f71a1"); auto redeemScript = Script::buildPayToPublicKeyHash(scriptHash); auto scriptString = std::string(redeemScript.bytes.begin(), redeemScript.bytes.end()); (*input.mutable_scripts())[scriptHashHex] = scriptString; // Sign auto result = TransactionSigner<Transaction, TransactionBuilder>(std::move(input)).sign(); // Fails as there are 0 utxos ASSERT_FALSE(result) << result.error(); } TEST(BitcoinSigning, Sign_NegativeInvalidAddress) { auto hash0 = parse_hex("fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f"); auto hash1 = parse_hex("ef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a"); // Setup input Proto::SigningInput input; input.set_hash_type(TWBitcoinSigHashTypeAll); input.set_amount(335'790'000); input.set_byte_fee(1); input.set_to_address("THIS-IS-NOT-A-BITCOIN-ADDRESS"); input.set_change_address("THIS-IS-NOT-A-BITCOIN-ADDRESS-EITHER"); auto utxoKey0 = parse_hex("bbc27228ddcb9209d7fd6f36b02f7dfa6252af40bb2f1cbc7a557da8027ff866"); input.add_private_key(utxoKey0.data(), utxoKey0.size()); auto utxoKey1 = parse_hex("619c335025c7f4012e556c2a58b2506e30b8511b53ade95ea316fd8c3286feb9"); input.add_private_key(utxoKey1.data(), utxoKey1.size()); auto scriptPub1 = Script(parse_hex("00141d0f172a0ecb48aee1be1f2687d2963ae33f71a1")); auto scriptHash = std::vector<uint8_t>(); scriptPub1.matchPayToWitnessPublicKeyHash(scriptHash); auto scriptHashHex = hex(scriptHash.begin(), scriptHash.end()); ASSERT_EQ(scriptHashHex, "1d0f172a0ecb48aee1be1f2687d2963ae33f71a1"); auto redeemScript = Script::buildPayToPublicKeyHash(scriptHash); auto scriptString = std::string(redeemScript.bytes.begin(), redeemScript.bytes.end()); (*input.mutable_scripts())[scriptHashHex] = scriptString; auto utxo0 = input.add_utxo(); auto utxo0Script = parse_hex("2103c9f4836b9a4f77fc0d81f7bcb01b7f1b35916864b9476c241ce9fc198bd25432ac"); utxo0->set_script(utxo0Script.data(), utxo0Script.size()); utxo0->set_amount(625'000'000); utxo0->mutable_out_point()->set_hash(hash0.data(), hash0.size()); utxo0->mutable_out_point()->set_index(0); utxo0->mutable_out_point()->set_sequence(UINT32_MAX); auto utxo1 = input.add_utxo(); auto utxo1Script = parse_hex("00141d0f172a0ecb48aee1be1f2687d2963ae33f71a1"); utxo1->set_script(utxo1Script.data(), utxo1Script.size()); utxo1->set_amount(600'000'000); utxo1->mutable_out_point()->set_hash(hash1.data(), hash1.size()); utxo1->mutable_out_point()->set_index(1); utxo1->mutable_out_point()->set_sequence(UINT32_MAX); // Sign auto result = TransactionSigner<Transaction, TransactionBuilder>(std::move(input)).sign(); ASSERT_FALSE(result) << result.error(); }
#include <MBE/Core/PixelMask.h> using namespace mbe; PixelMask::PixelMask(const sf::Texture & texture) : pixelMaskSize(texture.getSize()) { this->CreateMaskFromTexture(texture); } PixelMask::PixelMask(const sf::Sprite & sprite) : pixelMaskSize(sprite.getTexture()->getSize()) { this->CreateMaskFromTexture(*sprite.getTexture()); } PixelMask::PixelMask(const sf::RectangleShape & rectangle) : pixelMaskSize(rectangle.getSize()) { // Convert the rectange to a texture sf::RenderTexture renderTexture; renderTexture.create(static_cast<unsigned int>(rectangle.getSize().x), static_cast<unsigned int>(rectangle.getSize().y)); sf::RectangleShape tempRectangle(rectangle); tempRectangle.setPosition(0.0f, 0.0f); renderTexture.draw(tempRectangle); renderTexture.display(); CreateMaskFromTexture(renderTexture.getTexture()); } bool PixelMask::Contains(sf::Vector2f point) const { unsigned int x = static_cast<unsigned int>(std::round(point.x)); unsigned int y = static_cast<unsigned int>(std::round(point.y)); // If the point lies within the pixel mask if (x >= 0 && x < pixelMaskSize.x && y >= 0 && y < pixelMaskSize.y) // Get the pixel using the formula: y * width + x return (pixelMask[y * pixelMaskSize.x + x]); return false; } bool PixelMask::Contains(sf::Vector2f point, const sf::IntRect & subRect) const { int x = static_cast<unsigned int>(std::round(point.x)); int y = static_cast<unsigned int>(std::round(point.y)); // Check whether the subrect lies fully within the pixel mask if (subRect.left + subRect.width > (int)pixelMaskSize.x || subRect.top + subRect.height > (int)pixelMaskSize.y) throw std::runtime_error("PixelMask: The subrect does not lie fully within the pixel mask"); // Check whether the point lies in the subrect if (x < 0 || x > subRect.width || y < 0 || y > subRect.height) return false; else //return pixelMask[x + subRect.left][y + subRect.top] using the formula: y * width + x return pixelMask[(y + subRect.top) * pixelMaskSize.x + (x + subRect.left)]; } void PixelMask::CreateMaskFromTexture(const sf::Texture & texture) { // Create an Image from the given texture sf::Image image(texture.copyToImage()); // measure the time this function takes //sf::Clock clock; //sf::Time time = sf::Time::Zero; //clock.restart(); // Allocate memory for the pixel mask pixelMask = std::make_unique<bool[]>(pixelMaskSize.x * pixelMaskSize.y); // Loop through every pixel of the texture for (unsigned int y = 0; y < pixelMaskSize.y; y++) { for (unsigned int x = 0; x < pixelMaskSize.x; x++) { // If the pixel is not transparrent if (image.getPixel(x, y).a > 0) // Some part of the texture is there --> insert true pixelMask[y * pixelMaskSize.x + x] = true; else // The user can't see this part of the texture --> insert false pixelMask[y * pixelMaskSize.x + x] = false; } } //time = clock.restart(); //std::cout << std::endl << "The creation of the pixel mask took: " << time.asMicroseconds() << " microseconds (" << time.asSeconds() << ")"; }
// Copyright (c) 2011-2015 The Bitcoin Core developers // Copyright (c) 2014-2017 The Dash Core developers // Copyright (c) 2018-2018 The Essence Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "sendcoinsdialog.h" #include "ui_sendcoinsdialog.h" #include "addresstablemodel.h" #include "bitcoinunits.h" #include "clientmodel.h" #include "coincontroldialog.h" #include "guiutil.h" #include "optionsmodel.h" #include "platformstyle.h" #include "sendcoinsentry.h" #include "walletmodel.h" #include "base58.h" #include "coincontrol.h" #include "main.h" // mempool and minRelayTxFee #include "ui_interface.h" #include "txmempool.h" #include "wallet/wallet.h" #include "darksend.h" #include <QMessageBox> #include <QScrollBar> #include <QSettings> #include <QTextDocument> SendCoinsDialog::SendCoinsDialog(const PlatformStyle *platformStyle, QWidget *parent) : QDialog(parent), ui(new Ui::SendCoinsDialog), clientModel(0), model(0), fNewRecipientAllowed(true), fFeeMinimized(true), platformStyle(platformStyle) { ui->setupUi(this); QString theme = GUIUtil::getThemeName(); if (!platformStyle->getImagesOnButtons()) { ui->addButton->setIcon(QIcon()); ui->clearButton->setIcon(QIcon()); ui->sendButton->setIcon(QIcon()); } else { ui->addButton->setIcon(QIcon(":/icons/" + theme + "/add")); ui->clearButton->setIcon(QIcon(":/icons/" + theme + "/remove")); ui->sendButton->setIcon(QIcon(":/icons/" + theme + "/send")); } GUIUtil::setupAddressWidget(ui->lineEditCoinControlChange, this); addEntry(); connect(ui->addButton, SIGNAL(clicked()), this, SLOT(addEntry())); connect(ui->clearButton, SIGNAL(clicked()), this, SLOT(clear())); // Coin Control connect(ui->pushButtonCoinControl, SIGNAL(clicked()), this, SLOT(coinControlButtonClicked())); connect(ui->checkBoxCoinControlChange, SIGNAL(stateChanged(int)), this, SLOT(coinControlChangeChecked(int))); connect(ui->lineEditCoinControlChange, SIGNAL(textEdited(const QString &)), this, SLOT(coinControlChangeEdited(const QString &))); // Essence specific QSettings settings; if (!settings.contains("bUseDarkSend")) settings.setValue("bUseDarkSend", false); if (!settings.contains("bUseInstantX")) settings.setValue("bUseInstantX", false); bool fUsePrivateSend = settings.value("bUseDarkSend").toBool(); bool fUseInstantSend = settings.value("bUseInstantX").toBool(); if(fLiteMode) { ui->checkUsePrivateSend->setChecked(false); ui->checkUsePrivateSend->setVisible(false); ui->checkUseInstantSend->setVisible(false); CoinControlDialog::coinControl->fUsePrivateSend = false; CoinControlDialog::coinControl->fUseInstantSend = false; } else{ ui->checkUsePrivateSend->setChecked(fUsePrivateSend); ui->checkUseInstantSend->setChecked(fUseInstantSend); CoinControlDialog::coinControl->fUsePrivateSend = fUsePrivateSend; CoinControlDialog::coinControl->fUseInstantSend = fUseInstantSend; } connect(ui->checkUsePrivateSend, SIGNAL(stateChanged ( int )), this, SLOT(updateDisplayUnit())); connect(ui->checkUseInstantSend, SIGNAL(stateChanged ( int )), this, SLOT(updateInstantSend())); // Coin Control: clipboard actions QAction *clipboardQuantityAction = new QAction(tr("Copy quantity"), this); QAction *clipboardAmountAction = new QAction(tr("Copy amount"), this); QAction *clipboardFeeAction = new QAction(tr("Copy fee"), this); QAction *clipboardAfterFeeAction = new QAction(tr("Copy after fee"), this); QAction *clipboardBytesAction = new QAction(tr("Copy bytes"), this); QAction *clipboardPriorityAction = new QAction(tr("Copy priority"), this); QAction *clipboardLowOutputAction = new QAction(tr("Copy dust"), this); QAction *clipboardChangeAction = new QAction(tr("Copy change"), this); connect(clipboardQuantityAction, SIGNAL(triggered()), this, SLOT(coinControlClipboardQuantity())); connect(clipboardAmountAction, SIGNAL(triggered()), this, SLOT(coinControlClipboardAmount())); connect(clipboardFeeAction, SIGNAL(triggered()), this, SLOT(coinControlClipboardFee())); connect(clipboardAfterFeeAction, SIGNAL(triggered()), this, SLOT(coinControlClipboardAfterFee())); connect(clipboardBytesAction, SIGNAL(triggered()), this, SLOT(coinControlClipboardBytes())); connect(clipboardPriorityAction, SIGNAL(triggered()), this, SLOT(coinControlClipboardPriority())); connect(clipboardLowOutputAction, SIGNAL(triggered()), this, SLOT(coinControlClipboardLowOutput())); connect(clipboardChangeAction, SIGNAL(triggered()), this, SLOT(coinControlClipboardChange())); ui->labelCoinControlQuantity->addAction(clipboardQuantityAction); ui->labelCoinControlAmount->addAction(clipboardAmountAction); ui->labelCoinControlFee->addAction(clipboardFeeAction); ui->labelCoinControlAfterFee->addAction(clipboardAfterFeeAction); ui->labelCoinControlBytes->addAction(clipboardBytesAction); ui->labelCoinControlPriority->addAction(clipboardPriorityAction); ui->labelCoinControlLowOutput->addAction(clipboardLowOutputAction); ui->labelCoinControlChange->addAction(clipboardChangeAction); // init transaction fee section if (!settings.contains("fFeeSectionMinimized")) settings.setValue("fFeeSectionMinimized", true); if (!settings.contains("nFeeRadio") && settings.contains("nTransactionFee") && settings.value("nTransactionFee").toLongLong() > 0) // compatibility settings.setValue("nFeeRadio", 1); // custom if (!settings.contains("nFeeRadio")) settings.setValue("nFeeRadio", 0); // recommended if (!settings.contains("nCustomFeeRadio") && settings.contains("nTransactionFee") && settings.value("nTransactionFee").toLongLong() > 0) // compatibility settings.setValue("nCustomFeeRadio", 1); // total at least if (!settings.contains("nCustomFeeRadio")) settings.setValue("nCustomFeeRadio", 0); // per kilobyte if (!settings.contains("nSmartFeeSliderPosition")) settings.setValue("nSmartFeeSliderPosition", 0); if (!settings.contains("nTransactionFee")) settings.setValue("nTransactionFee", (qint64)DEFAULT_TRANSACTION_FEE); if (!settings.contains("fPayOnlyMinFee")) settings.setValue("fPayOnlyMinFee", false); if (!settings.contains("fSendFreeTransactions")) settings.setValue("fSendFreeTransactions", false); ui->groupFee->setId(ui->radioSmartFee, 0); ui->groupFee->setId(ui->radioCustomFee, 1); ui->groupFee->button((int)std::max(0, std::min(1, settings.value("nFeeRadio").toInt())))->setChecked(true); ui->groupCustomFee->setId(ui->radioCustomPerKilobyte, 0); ui->groupCustomFee->setId(ui->radioCustomAtLeast, 1); ui->groupCustomFee->button((int)std::max(0, std::min(1, settings.value("nCustomFeeRadio").toInt())))->setChecked(true); ui->sliderSmartFee->setValue(settings.value("nSmartFeeSliderPosition").toInt()); ui->customFee->setValue(settings.value("nTransactionFee").toLongLong()); ui->checkBoxMinimumFee->setChecked(settings.value("fPayOnlyMinFee").toBool()); ui->checkBoxFreeTx->setChecked(settings.value("fSendFreeTransactions").toBool()); minimizeFeeSection(settings.value("fFeeSectionMinimized").toBool()); } void SendCoinsDialog::setClientModel(ClientModel *clientModel) { this->clientModel = clientModel; if (clientModel) { connect(clientModel, SIGNAL(numBlocksChanged(int,QDateTime,double)), this, SLOT(updateSmartFeeLabel())); } } void SendCoinsDialog::setModel(WalletModel *model) { this->model = model; if(model && model->getOptionsModel()) { for(int i = 0; i < ui->entries->count(); ++i) { SendCoinsEntry *entry = qobject_cast<SendCoinsEntry*>(ui->entries->itemAt(i)->widget()); if(entry) { entry->setModel(model); } } setBalance(model->getBalance(), model->getUnconfirmedBalance(), model->getImmatureBalance(), model->getAnonymizedBalance(), model->getWatchBalance(), model->getWatchUnconfirmedBalance(), model->getWatchImmatureBalance()); connect(model, SIGNAL(balanceChanged(CAmount,CAmount,CAmount,CAmount,CAmount,CAmount,CAmount)), this, SLOT(setBalance(CAmount,CAmount,CAmount,CAmount,CAmount,CAmount,CAmount))); connect(model->getOptionsModel(), SIGNAL(displayUnitChanged(int)), this, SLOT(updateDisplayUnit())); updateDisplayUnit(); // Coin Control connect(model->getOptionsModel(), SIGNAL(displayUnitChanged(int)), this, SLOT(coinControlUpdateLabels())); connect(model->getOptionsModel(), SIGNAL(coinControlFeaturesChanged(bool)), this, SLOT(coinControlFeatureChanged(bool))); ui->frameCoinControl->setVisible(model->getOptionsModel()->getCoinControlFeatures()); coinControlUpdateLabels(); // fee section connect(ui->sliderSmartFee, SIGNAL(valueChanged(int)), this, SLOT(updateSmartFeeLabel())); connect(ui->sliderSmartFee, SIGNAL(valueChanged(int)), this, SLOT(updateGlobalFeeVariables())); connect(ui->sliderSmartFee, SIGNAL(valueChanged(int)), this, SLOT(coinControlUpdateLabels())); connect(ui->groupFee, SIGNAL(buttonClicked(int)), this, SLOT(updateFeeSectionControls())); connect(ui->groupFee, SIGNAL(buttonClicked(int)), this, SLOT(updateGlobalFeeVariables())); connect(ui->groupFee, SIGNAL(buttonClicked(int)), this, SLOT(coinControlUpdateLabels())); connect(ui->groupCustomFee, SIGNAL(buttonClicked(int)), this, SLOT(updateGlobalFeeVariables())); connect(ui->groupCustomFee, SIGNAL(buttonClicked(int)), this, SLOT(coinControlUpdateLabels())); connect(ui->customFee, SIGNAL(valueChanged()), this, SLOT(updateGlobalFeeVariables())); connect(ui->customFee, SIGNAL(valueChanged()), this, SLOT(coinControlUpdateLabels())); connect(ui->checkBoxMinimumFee, SIGNAL(stateChanged(int)), this, SLOT(setMinimumFee())); connect(ui->checkBoxMinimumFee, SIGNAL(stateChanged(int)), this, SLOT(updateFeeSectionControls())); connect(ui->checkBoxMinimumFee, SIGNAL(stateChanged(int)), this, SLOT(updateGlobalFeeVariables())); connect(ui->checkBoxMinimumFee, SIGNAL(stateChanged(int)), this, SLOT(coinControlUpdateLabels())); connect(ui->checkBoxFreeTx, SIGNAL(stateChanged(int)), this, SLOT(updateGlobalFeeVariables())); connect(ui->checkBoxFreeTx, SIGNAL(stateChanged(int)), this, SLOT(coinControlUpdateLabels())); ui->customFee->setSingleStep(CWallet::GetRequiredFee(1000)); updateFeeSectionControls(); updateMinFeeLabel(); updateSmartFeeLabel(); updateGlobalFeeVariables(); } } SendCoinsDialog::~SendCoinsDialog() { QSettings settings; settings.setValue("fFeeSectionMinimized", fFeeMinimized); settings.setValue("nFeeRadio", ui->groupFee->checkedId()); settings.setValue("nCustomFeeRadio", ui->groupCustomFee->checkedId()); settings.setValue("nSmartFeeSliderPosition", ui->sliderSmartFee->value()); settings.setValue("nTransactionFee", (qint64)ui->customFee->value()); settings.setValue("fPayOnlyMinFee", ui->checkBoxMinimumFee->isChecked()); settings.setValue("fSendFreeTransactions", ui->checkBoxFreeTx->isChecked()); delete ui; } void SendCoinsDialog::on_sendButton_clicked() { if(!model || !model->getOptionsModel()) return; QList<SendCoinsRecipient> recipients; bool valid = true; for(int i = 0; i < ui->entries->count(); ++i) { SendCoinsEntry *entry = qobject_cast<SendCoinsEntry*>(ui->entries->itemAt(i)->widget()); if(entry) { if(entry->validate()) { recipients.append(entry->getValue()); } else { valid = false; } } } if(!valid || recipients.isEmpty()) { return; } QString strFunds = tr("using") + " <b>" + tr("anonymous funds") + "</b>"; QString strFee = ""; recipients[0].inputType = ONLY_DENOMINATED; if(ui->checkUsePrivateSend->isChecked()) { recipients[0].inputType = ONLY_DENOMINATED; strFunds = tr("using") + " <b>" + tr("anonymous funds") + "</b>"; QString strNearestAmount( BitcoinUnits::formatWithUnit( model->getOptionsModel()->getDisplayUnit(), vecPrivateSendDenominations.back())); strFee = QString(tr( "(privatesend requires this amount to be rounded up to the nearest %1)." ).arg(strNearestAmount)); } else { recipients[0].inputType = ALL_COINS; strFunds = tr("using") + " <b>" + tr("any available funds (not anonymous)") + "</b>"; } if(ui->checkUseInstantSend->isChecked()) { recipients[0].fUseInstantSend = true; strFunds += " "; strFunds += tr("and InstantSend"); } else { recipients[0].fUseInstantSend = false; } fNewRecipientAllowed = false; // request unlock only if was locked or unlocked for mixing: // this way we let users unlock by walletpassphrase or by menu // and make many transactions while unlocking through this dialog // will call relock WalletModel::EncryptionStatus encStatus = model->getEncryptionStatus(); if(encStatus == model->Locked || encStatus == model->UnlockedForMixingOnly) { WalletModel::UnlockContext ctx(model->requestUnlock()); if(!ctx.isValid()) { // Unlock wallet was cancelled fNewRecipientAllowed = true; return; } send(recipients, strFee, strFunds); return; } // already unlocked or not encrypted at all send(recipients, strFee, strFunds); } void SendCoinsDialog::send(QList<SendCoinsRecipient> recipients, QString strFee, QString strFunds) { // prepare transaction for getting txFee earlier WalletModelTransaction currentTransaction(recipients); WalletModel::SendCoinsReturn prepareStatus; if (model->getOptionsModel()->getCoinControlFeatures()) // coin control enabled prepareStatus = model->prepareTransaction(currentTransaction, CoinControlDialog::coinControl); else prepareStatus = model->prepareTransaction(currentTransaction); // process prepareStatus and on error generate message shown to user processSendCoinsReturn(prepareStatus, BitcoinUnits::formatWithUnit(model->getOptionsModel()->getDisplayUnit(), currentTransaction.getTransactionFee())); if(prepareStatus.status != WalletModel::OK) { fNewRecipientAllowed = true; return; } CAmount txFee = currentTransaction.getTransactionFee(); // Format confirmation message QStringList formatted; Q_FOREACH(const SendCoinsRecipient &rcp, currentTransaction.getRecipients()) { // generate bold amount string QString amount = "<b>" + BitcoinUnits::formatHtmlWithUnit(model->getOptionsModel()->getDisplayUnit(), rcp.amount); amount.append("</b> ").append(strFunds); // generate monospace address string QString address = "<span style='font-family: monospace;'>" + rcp.address; address.append("</span>"); QString recipientElement; if (!rcp.paymentRequest.IsInitialized()) // normal payment { if(rcp.label.length() > 0) // label with address { recipientElement = tr("%1 to %2").arg(amount, GUIUtil::HtmlEscape(rcp.label)); recipientElement.append(QString(" (%1)").arg(address)); } else // just address { recipientElement = tr("%1 to %2").arg(amount, address); } } else if(!rcp.authenticatedMerchant.isEmpty()) // authenticated payment request { recipientElement = tr("%1 to %2").arg(amount, GUIUtil::HtmlEscape(rcp.authenticatedMerchant)); } else // unauthenticated payment request { recipientElement = tr("%1 to %2").arg(amount, address); } formatted.append(recipientElement); } QString questionString = tr("Are you sure you want to send?"); questionString.append("<br /><br />%1"); if(txFee > 0) { // append fee string if a fee is required questionString.append("<hr /><span style='color:#aa0000;'>"); questionString.append(BitcoinUnits::formatHtmlWithUnit(model->getOptionsModel()->getDisplayUnit(), txFee)); questionString.append("</span> "); questionString.append(tr("are added as transaction fee")); questionString.append(" "); questionString.append(strFee); // append transaction size questionString.append(" (" + QString::number((double)currentTransaction.getTransactionSize() / 1000) + " kB)"); } // add total amount in all subdivision units questionString.append("<hr />"); CAmount totalAmount = currentTransaction.getTotalTransactionAmount() + txFee; QStringList alternativeUnits; Q_FOREACH(BitcoinUnits::Unit u, BitcoinUnits::availableUnits()) { if(u != model->getOptionsModel()->getDisplayUnit()) alternativeUnits.append(BitcoinUnits::formatHtmlWithUnit(u, totalAmount)); } // Show total amount + all alternative units questionString.append(tr("Total Amount = <b>%1</b><br />= %2") .arg(BitcoinUnits::formatHtmlWithUnit(model->getOptionsModel()->getDisplayUnit(), totalAmount)) .arg(alternativeUnits.join("<br />= "))); // Limit number of displayed entries int messageEntries = formatted.size(); int displayedEntries = 0; for(int i = 0; i < formatted.size(); i++){ if(i >= MAX_SEND_POPUP_ENTRIES){ formatted.removeLast(); i--; } else{ displayedEntries = i+1; } } questionString.append("<hr />"); questionString.append(tr("<b>(%1 of %2 entries displayed)</b>").arg(displayedEntries).arg(messageEntries)); // Display message box QMessageBox::StandardButton retval = QMessageBox::question(this, tr("Confirm send coins"), questionString.arg(formatted.join("<br />")), QMessageBox::Yes | QMessageBox::Cancel, QMessageBox::Cancel); if(retval != QMessageBox::Yes) { fNewRecipientAllowed = true; return; } // now send the prepared transaction WalletModel::SendCoinsReturn sendStatus = model->sendCoins(currentTransaction); // process sendStatus and on error generate message shown to user processSendCoinsReturn(sendStatus); if (sendStatus.status == WalletModel::OK) { accept(); CoinControlDialog::coinControl->UnSelectAll(); coinControlUpdateLabels(); } fNewRecipientAllowed = true; } void SendCoinsDialog::clear() { // Remove entries until only one left while(ui->entries->count()) { ui->entries->takeAt(0)->widget()->deleteLater(); } addEntry(); updateTabsAndLabels(); } void SendCoinsDialog::reject() { clear(); } void SendCoinsDialog::accept() { clear(); } SendCoinsEntry *SendCoinsDialog::addEntry() { SendCoinsEntry *entry = new SendCoinsEntry(platformStyle, this); entry->setModel(model); ui->entries->addWidget(entry); connect(entry, SIGNAL(removeEntry(SendCoinsEntry*)), this, SLOT(removeEntry(SendCoinsEntry*))); connect(entry, SIGNAL(payAmountChanged()), this, SLOT(coinControlUpdateLabels())); connect(entry, SIGNAL(subtractFeeFromAmountChanged()), this, SLOT(coinControlUpdateLabels())); // Focus the field, so that entry can start immediately entry->clear(); entry->setFocus(); ui->scrollAreaWidgetContents->resize(ui->scrollAreaWidgetContents->sizeHint()); qApp->processEvents(); QScrollBar* bar = ui->scrollArea->verticalScrollBar(); if(bar) bar->setSliderPosition(bar->maximum()); updateTabsAndLabels(); return entry; } void SendCoinsDialog::updateTabsAndLabels() { setupTabChain(0); coinControlUpdateLabels(); } void SendCoinsDialog::removeEntry(SendCoinsEntry* entry) { entry->hide(); // If the last entry is about to be removed add an empty one if (ui->entries->count() == 1) addEntry(); entry->deleteLater(); updateTabsAndLabels(); } QWidget *SendCoinsDialog::setupTabChain(QWidget *prev) { for(int i = 0; i < ui->entries->count(); ++i) { SendCoinsEntry *entry = qobject_cast<SendCoinsEntry*>(ui->entries->itemAt(i)->widget()); if(entry) { prev = entry->setupTabChain(prev); } } QWidget::setTabOrder(prev, ui->sendButton); QWidget::setTabOrder(ui->sendButton, ui->clearButton); QWidget::setTabOrder(ui->clearButton, ui->addButton); return ui->addButton; } void SendCoinsDialog::setAddress(const QString &address) { SendCoinsEntry *entry = 0; // Replace the first entry if it is still unused if(ui->entries->count() == 1) { SendCoinsEntry *first = qobject_cast<SendCoinsEntry*>(ui->entries->itemAt(0)->widget()); if(first->isClear()) { entry = first; } } if(!entry) { entry = addEntry(); } entry->setAddress(address); } void SendCoinsDialog::pasteEntry(const SendCoinsRecipient &rv) { if(!fNewRecipientAllowed) return; SendCoinsEntry *entry = 0; // Replace the first entry if it is still unused if(ui->entries->count() == 1) { SendCoinsEntry *first = qobject_cast<SendCoinsEntry*>(ui->entries->itemAt(0)->widget()); if(first->isClear()) { entry = first; } } if(!entry) { entry = addEntry(); } entry->setValue(rv); updateTabsAndLabels(); } bool SendCoinsDialog::handlePaymentRequest(const SendCoinsRecipient &rv) { // Just paste the entry, all pre-checks // are done in paymentserver.cpp. pasteEntry(rv); return true; } void SendCoinsDialog::setBalance(const CAmount& balance, const CAmount& unconfirmedBalance, const CAmount& immatureBalance, const CAmount& anonymizedBalance, const CAmount& watchBalance, const CAmount& watchUnconfirmedBalance, const CAmount& watchImmatureBalance) { Q_UNUSED(unconfirmedBalance); Q_UNUSED(immatureBalance); Q_UNUSED(anonymizedBalance); Q_UNUSED(watchBalance); Q_UNUSED(watchUnconfirmedBalance); Q_UNUSED(watchImmatureBalance); if(model && model->getOptionsModel()) { uint64_t bal = 0; QSettings settings; settings.setValue("bUseDarkSend", ui->checkUsePrivateSend->isChecked()); if(ui->checkUsePrivateSend->isChecked()) { bal = anonymizedBalance; } else { bal = balance; } ui->labelBalance->setText(BitcoinUnits::formatWithUnit(model->getOptionsModel()->getDisplayUnit(), bal)); } } void SendCoinsDialog::updateDisplayUnit() { setBalance(model->getBalance(), model->getUnconfirmedBalance(), model->getImmatureBalance(), model->getAnonymizedBalance(), model->getWatchBalance(), model->getWatchUnconfirmedBalance(), model->getWatchImmatureBalance()); CoinControlDialog::coinControl->fUsePrivateSend = ui->checkUsePrivateSend->isChecked(); coinControlUpdateLabels(); ui->customFee->setDisplayUnit(model->getOptionsModel()->getDisplayUnit()); updateMinFeeLabel(); updateSmartFeeLabel(); } void SendCoinsDialog::updateInstantSend() { QSettings settings; settings.setValue("bUseInstantX", ui->checkUseInstantSend->isChecked()); CoinControlDialog::coinControl->fUseInstantSend = ui->checkUseInstantSend->isChecked(); coinControlUpdateLabels(); } void SendCoinsDialog::processSendCoinsReturn(const WalletModel::SendCoinsReturn &sendCoinsReturn, const QString &msgArg) { QPair<QString, CClientUIInterface::MessageBoxFlags> msgParams; // Default to a warning message, override if error message is needed msgParams.second = CClientUIInterface::MSG_WARNING; // This comment is specific to SendCoinsDialog usage of WalletModel::SendCoinsReturn. // WalletModel::TransactionCommitFailed is used only in WalletModel::sendCoins() // all others are used only in WalletModel::prepareTransaction() switch(sendCoinsReturn.status) { case WalletModel::InvalidAddress: msgParams.first = tr("The recipient address is not valid. Please recheck."); break; case WalletModel::InvalidAmount: msgParams.first = tr("The amount to pay must be larger than 0."); break; case WalletModel::AmountExceedsBalance: msgParams.first = tr("The amount exceeds your balance."); break; case WalletModel::AmountWithFeeExceedsBalance: msgParams.first = tr("The total exceeds your balance when the %1 transaction fee is included.").arg(msgArg); break; case WalletModel::DuplicateAddress: msgParams.first = tr("Duplicate address found: addresses should only be used once each."); break; case WalletModel::TransactionCreationFailed: msgParams.first = tr("Transaction creation failed!"); msgParams.second = CClientUIInterface::MSG_ERROR; break; case WalletModel::TransactionCommitFailed: msgParams.first = tr("The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here."); msgParams.second = CClientUIInterface::MSG_ERROR; break; case WalletModel::AbsurdFee: msgParams.first = tr("A fee higher than %1 is considered an absurdly high fee.").arg(BitcoinUnits::formatWithUnit(model->getOptionsModel()->getDisplayUnit(), maxTxFee)); break; case WalletModel::PaymentRequestExpired: msgParams.first = tr("Payment request expired."); msgParams.second = CClientUIInterface::MSG_ERROR; break; // included to prevent a compiler warning. case WalletModel::OK: default: return; } Q_EMIT message(tr("Send Coins"), msgParams.first, msgParams.second); } void SendCoinsDialog::minimizeFeeSection(bool fMinimize) { ui->labelFeeMinimized->setVisible(fMinimize); ui->buttonChooseFee ->setVisible(fMinimize); ui->buttonMinimizeFee->setVisible(!fMinimize); ui->frameFeeSelection->setVisible(!fMinimize); ui->horizontalLayoutSmartFee->setContentsMargins(0, (fMinimize ? 0 : 6), 0, 0); fFeeMinimized = fMinimize; } void SendCoinsDialog::on_buttonChooseFee_clicked() { minimizeFeeSection(false); } void SendCoinsDialog::on_buttonMinimizeFee_clicked() { updateFeeMinimizedLabel(); minimizeFeeSection(true); } void SendCoinsDialog::setMinimumFee() { ui->radioCustomPerKilobyte->setChecked(true); ui->customFee->setValue(CWallet::GetRequiredFee(1000)); } void SendCoinsDialog::updateFeeSectionControls() { ui->sliderSmartFee ->setEnabled(ui->radioSmartFee->isChecked()); ui->labelSmartFee ->setEnabled(ui->radioSmartFee->isChecked()); ui->labelSmartFee2 ->setEnabled(ui->radioSmartFee->isChecked()); ui->labelSmartFee3 ->setEnabled(ui->radioSmartFee->isChecked()); ui->labelFeeEstimation ->setEnabled(ui->radioSmartFee->isChecked()); ui->labelSmartFeeNormal ->setEnabled(ui->radioSmartFee->isChecked()); ui->labelSmartFeeFast ->setEnabled(ui->radioSmartFee->isChecked()); ui->checkBoxMinimumFee ->setEnabled(ui->radioCustomFee->isChecked()); ui->labelMinFeeWarning ->setEnabled(ui->radioCustomFee->isChecked()); ui->radioCustomPerKilobyte ->setEnabled(ui->radioCustomFee->isChecked() && !ui->checkBoxMinimumFee->isChecked()); ui->radioCustomAtLeast ->setEnabled(ui->radioCustomFee->isChecked() && !ui->checkBoxMinimumFee->isChecked() && CoinControlDialog::coinControl->HasSelected()); ui->customFee ->setEnabled(ui->radioCustomFee->isChecked() && !ui->checkBoxMinimumFee->isChecked()); } void SendCoinsDialog::updateGlobalFeeVariables() { if (ui->radioSmartFee->isChecked()) { nTxConfirmTarget = defaultConfirmTarget - ui->sliderSmartFee->value(); payTxFee = CFeeRate(0); } else { nTxConfirmTarget = defaultConfirmTarget; payTxFee = CFeeRate(ui->customFee->value()); // if user has selected to set a minimum absolute fee, pass the value to coincontrol // set nMinimumTotalFee to 0 in case of user has selected that the fee is per KB CoinControlDialog::coinControl->nMinimumTotalFee = ui->radioCustomAtLeast->isChecked() ? ui->customFee->value() : 0; } fSendFreeTransactions = ui->checkBoxFreeTx->isChecked(); } void SendCoinsDialog::updateFeeMinimizedLabel() { if(!model || !model->getOptionsModel()) return; if (ui->radioSmartFee->isChecked()) ui->labelFeeMinimized->setText(ui->labelSmartFee->text()); else { ui->labelFeeMinimized->setText(BitcoinUnits::formatWithUnit(model->getOptionsModel()->getDisplayUnit(), ui->customFee->value()) + ((ui->radioCustomPerKilobyte->isChecked()) ? "/kB" : "")); } } void SendCoinsDialog::updateMinFeeLabel() { if (model && model->getOptionsModel()) ui->checkBoxMinimumFee->setText(tr("Pay only the required fee of %1").arg( BitcoinUnits::formatWithUnit(model->getOptionsModel()->getDisplayUnit(), CWallet::GetRequiredFee(1000)) + "/kB") ); } void SendCoinsDialog::updateSmartFeeLabel() { if(!model || !model->getOptionsModel()) return; int nBlocksToConfirm = defaultConfirmTarget - ui->sliderSmartFee->value(); int estimateFoundAtBlocks = nBlocksToConfirm; CFeeRate feeRate = mempool.estimateSmartFee(nBlocksToConfirm, &estimateFoundAtBlocks); if (feeRate <= CFeeRate(0)) // not enough data => minfee { ui->labelSmartFee->setText(BitcoinUnits::formatWithUnit(model->getOptionsModel()->getDisplayUnit(), std::max(CWallet::fallbackFee.GetFeePerK(), CWallet::GetRequiredFee(1000))) + "/kB"); ui->labelSmartFee2->show(); // (Smart fee not initialized yet. This usually takes a few blocks...) ui->labelFeeEstimation->setText(""); } else { ui->labelSmartFee->setText(BitcoinUnits::formatWithUnit(model->getOptionsModel()->getDisplayUnit(), std::max(feeRate.GetFeePerK(), CWallet::GetRequiredFee(1000))) + "/kB"); ui->labelSmartFee2->hide(); ui->labelFeeEstimation->setText(tr("Estimated to begin confirmation within %n block(s).", "", estimateFoundAtBlocks)); } updateFeeMinimizedLabel(); } // Coin Control: copy label "Quantity" to clipboard void SendCoinsDialog::coinControlClipboardQuantity() { GUIUtil::setClipboard(ui->labelCoinControlQuantity->text()); } // Coin Control: copy label "Amount" to clipboard void SendCoinsDialog::coinControlClipboardAmount() { GUIUtil::setClipboard(ui->labelCoinControlAmount->text().left(ui->labelCoinControlAmount->text().indexOf(" "))); } // Coin Control: copy label "Fee" to clipboard void SendCoinsDialog::coinControlClipboardFee() { GUIUtil::setClipboard(ui->labelCoinControlFee->text().left(ui->labelCoinControlFee->text().indexOf(" ")).replace(ASYMP_UTF8, "")); } // Coin Control: copy label "After fee" to clipboard void SendCoinsDialog::coinControlClipboardAfterFee() { GUIUtil::setClipboard(ui->labelCoinControlAfterFee->text().left(ui->labelCoinControlAfterFee->text().indexOf(" ")).replace(ASYMP_UTF8, "")); } // Coin Control: copy label "Bytes" to clipboard void SendCoinsDialog::coinControlClipboardBytes() { GUIUtil::setClipboard(ui->labelCoinControlBytes->text().replace(ASYMP_UTF8, "")); } // Coin Control: copy label "Priority" to clipboard void SendCoinsDialog::coinControlClipboardPriority() { GUIUtil::setClipboard(ui->labelCoinControlPriority->text()); } // Coin Control: copy label "Dust" to clipboard void SendCoinsDialog::coinControlClipboardLowOutput() { GUIUtil::setClipboard(ui->labelCoinControlLowOutput->text()); } // Coin Control: copy label "Change" to clipboard void SendCoinsDialog::coinControlClipboardChange() { GUIUtil::setClipboard(ui->labelCoinControlChange->text().left(ui->labelCoinControlChange->text().indexOf(" ")).replace(ASYMP_UTF8, "")); } // Coin Control: settings menu - coin control enabled/disabled by user void SendCoinsDialog::coinControlFeatureChanged(bool checked) { ui->frameCoinControl->setVisible(checked); if (!checked && model) // coin control features disabled CoinControlDialog::coinControl->SetNull(); coinControlUpdateLabels(); } // Coin Control: button inputs -> show actual coin control dialog void SendCoinsDialog::coinControlButtonClicked() { CoinControlDialog dlg(platformStyle); dlg.setModel(model); dlg.exec(); coinControlUpdateLabels(); } // Coin Control: checkbox custom change address void SendCoinsDialog::coinControlChangeChecked(int state) { if (state == Qt::Unchecked) { CoinControlDialog::coinControl->destChange = CNoDestination(); ui->labelCoinControlChangeLabel->clear(); } else // use this to re-validate an already entered address coinControlChangeEdited(ui->lineEditCoinControlChange->text()); ui->lineEditCoinControlChange->setEnabled((state == Qt::Checked)); } // Coin Control: custom change address changed void SendCoinsDialog::coinControlChangeEdited(const QString& text) { if (model && model->getAddressTableModel()) { // Default to no change address until verified CoinControlDialog::coinControl->destChange = CNoDestination(); ui->labelCoinControlChangeLabel->setStyleSheet("QLabel{color:red;}"); CBitcoinAddress addr = CBitcoinAddress(text.toStdString()); if (text.isEmpty()) // Nothing entered { ui->labelCoinControlChangeLabel->setText(""); } else if (!addr.IsValid()) // Invalid address { ui->labelCoinControlChangeLabel->setText(tr("Warning: Invalid Essence address")); } else // Valid address { CKeyID keyid; addr.GetKeyID(keyid); if (!model->havePrivKey(keyid)) // Unknown change address { ui->labelCoinControlChangeLabel->setText(tr("Warning: Unknown change address")); } else // Known change address { ui->labelCoinControlChangeLabel->setStyleSheet("QLabel{color:black;}"); // Query label QString associatedLabel = model->getAddressTableModel()->labelForAddress(text); if (!associatedLabel.isEmpty()) ui->labelCoinControlChangeLabel->setText(associatedLabel); else ui->labelCoinControlChangeLabel->setText(tr("(no label)")); CoinControlDialog::coinControl->destChange = addr.Get(); } } } } // Coin Control: update labels void SendCoinsDialog::coinControlUpdateLabels() { if (!model || !model->getOptionsModel()) return; if (model->getOptionsModel()->getCoinControlFeatures()) { // enable minium absolute fee UI controls ui->radioCustomAtLeast->setVisible(true); // only enable the feature if inputs are selected ui->radioCustomAtLeast->setEnabled(CoinControlDialog::coinControl->HasSelected()); } else { // in case coin control is disabled (=default), hide minimum absolute fee UI controls ui->radioCustomAtLeast->setVisible(false); return; } // set pay amounts CoinControlDialog::payAmounts.clear(); CoinControlDialog::fSubtractFeeFromAmount = false; for(int i = 0; i < ui->entries->count(); ++i) { SendCoinsEntry *entry = qobject_cast<SendCoinsEntry*>(ui->entries->itemAt(i)->widget()); if(entry && !entry->isHidden()) { SendCoinsRecipient rcp = entry->getValue(); CoinControlDialog::payAmounts.append(rcp.amount); if (rcp.fSubtractFeeFromAmount) CoinControlDialog::fSubtractFeeFromAmount = true; } } ui->checkUsePrivateSend->setChecked(CoinControlDialog::coinControl->fUsePrivateSend); if (CoinControlDialog::coinControl->HasSelected()) { // actual coin control calculation CoinControlDialog::updateLabels(model, this); // show coin control stats ui->labelCoinControlAutomaticallySelected->hide(); ui->widgetCoinControl->show(); } else { // hide coin control stats ui->labelCoinControlAutomaticallySelected->show(); ui->widgetCoinControl->hide(); ui->labelCoinControlInsuffFunds->hide(); } }
//===-- Args.cpp ----------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "lldb/Utility/Args.h" #include "lldb/Utility/ConstString.h" #include "lldb/Utility/FileSpec.h" #include "lldb/Utility/Stream.h" #include "lldb/Utility/StringList.h" #include "llvm/ADT/StringSwitch.h" using namespace lldb; using namespace lldb_private; // A helper function for argument parsing. // Parses the initial part of the first argument using normal double quote // rules: backslash escapes the double quote and itself. The parsed string is // appended to the second argument. The function returns the unparsed portion // of the string, starting at the closing quote. static llvm::StringRef ParseDoubleQuotes(llvm::StringRef quoted, std::string &result) { // Inside double quotes, '\' and '"' are special. static const char *k_escapable_characters = "\"\\"; while (true) { // Skip over over regular characters and append them. size_t regular = quoted.find_first_of(k_escapable_characters); result += quoted.substr(0, regular); quoted = quoted.substr(regular); // If we have reached the end of string or the closing quote, we're done. if (quoted.empty() || quoted.front() == '"') break; // We have found a backslash. quoted = quoted.drop_front(); if (quoted.empty()) { // A lone backslash at the end of string, let's just append it. result += '\\'; break; } // If the character after the backslash is not an allowed escapable // character, we leave the character sequence untouched. if (strchr(k_escapable_characters, quoted.front()) == nullptr) result += '\\'; result += quoted.front(); quoted = quoted.drop_front(); } return quoted; } static size_t ArgvToArgc(const char **argv) { if (!argv) return 0; size_t count = 0; while (*argv++) ++count; return count; } // Trims all whitespace that can separate command line arguments from the left // side of the string. static llvm::StringRef ltrimForArgs(llvm::StringRef str) { static const char *k_space_separators = " \t"; return str.ltrim(k_space_separators); } // A helper function for SetCommandString. Parses a single argument from the // command string, processing quotes and backslashes in a shell-like manner. // The function returns a tuple consisting of the parsed argument, the quote // char used, and the unparsed portion of the string starting at the first // unqouted, unescaped whitespace character. static std::tuple<std::string, char, llvm::StringRef> ParseSingleArgument(llvm::StringRef command) { // Argument can be split into multiple discontiguous pieces, for example: // "Hello ""World" // this would result in a single argument "Hello World" (without the quotes) // since the quotes would be removed and there is not space between the // strings. std::string arg; // Since we can have multiple quotes that form a single command in a command // like: "Hello "world'!' (which will make a single argument "Hello world!") // we remember the first quote character we encounter and use that for the // quote character. char first_quote_char = '\0'; bool arg_complete = false; do { // Skip over over regular characters and append them. size_t regular = command.find_first_of(" \t\r\"'`\\"); arg += command.substr(0, regular); command = command.substr(regular); if (command.empty()) break; char special = command.front(); command = command.drop_front(); switch (special) { case '\\': if (command.empty()) { arg += '\\'; break; } // If the character after the backslash is not an allowed escapable // character, we leave the character sequence untouched. if (strchr(" \t\\'\"`", command.front()) == nullptr) arg += '\\'; arg += command.front(); command = command.drop_front(); break; case ' ': case '\t': case '\r': // We are not inside any quotes, we just found a space after an argument. // We are done. arg_complete = true; break; case '"': case '\'': case '`': // We found the start of a quote scope. if (first_quote_char == '\0') first_quote_char = special; if (special == '"') command = ParseDoubleQuotes(command, arg); else { // For single quotes, we simply skip ahead to the matching quote // character (or the end of the string). size_t quoted = command.find(special); arg += command.substr(0, quoted); command = command.substr(quoted); } // If we found a closing quote, skip it. if (!command.empty()) command = command.drop_front(); break; } } while (!arg_complete); return std::make_tuple(arg, first_quote_char, command); } Args::ArgEntry::ArgEntry(llvm::StringRef str, char quote) : quote(quote) { size_t size = str.size(); ptr.reset(new char[size + 1]); ::memcpy(data(), str.data() ? str.data() : "", size); ptr[size] = 0; } // Args constructor Args::Args(llvm::StringRef command) { SetCommandString(command); } Args::Args(const Args &rhs) { *this = rhs; } Args::Args(const StringList &list) : Args() { for (const std::string &arg : list) AppendArgument(arg); } Args &Args::operator=(const Args &rhs) { Clear(); m_argv.clear(); m_entries.clear(); for (auto &entry : rhs.m_entries) { m_entries.emplace_back(entry.ref(), entry.quote); m_argv.push_back(m_entries.back().data()); } m_argv.push_back(nullptr); return *this; } // Destructor Args::~Args() {} void Args::Dump(Stream &s, const char *label_name) const { if (!label_name) return; int i = 0; for (auto &entry : m_entries) { s.Indent(); s.Format("{0}[{1}]=\"{2}\"\n", label_name, i++, entry.ref()); } s.Format("{0}[{1}]=NULL\n", label_name, i); s.EOL(); } bool Args::GetCommandString(std::string &command) const { command.clear(); for (size_t i = 0; i < m_entries.size(); ++i) { if (i > 0) command += ' '; command += m_entries[i].ref(); } return !m_entries.empty(); } bool Args::GetQuotedCommandString(std::string &command) const { command.clear(); for (size_t i = 0; i < m_entries.size(); ++i) { if (i > 0) command += ' '; if (m_entries[i].quote) { command += m_entries[i].quote; command += m_entries[i].ref(); command += m_entries[i].quote; } else { command += m_entries[i].ref(); } } return !m_entries.empty(); } void Args::SetCommandString(llvm::StringRef command) { Clear(); m_argv.clear(); command = ltrimForArgs(command); std::string arg; char quote; while (!command.empty()) { std::tie(arg, quote, command) = ParseSingleArgument(command); m_entries.emplace_back(arg, quote); m_argv.push_back(m_entries.back().data()); command = ltrimForArgs(command); } m_argv.push_back(nullptr); } size_t Args::GetArgumentCount() const { return m_entries.size(); } const char *Args::GetArgumentAtIndex(size_t idx) const { if (idx < m_argv.size()) return m_argv[idx]; return nullptr; } char **Args::GetArgumentVector() { assert(!m_argv.empty()); // TODO: functions like execve and posix_spawnp exhibit undefined behavior // when argv or envp is null. So the code below is actually wrong. However, // other code in LLDB depends on it being null. The code has been acting // this way for some time, so it makes sense to leave it this way until // someone has the time to come along and fix it. return (m_argv.size() > 1) ? m_argv.data() : nullptr; } const char **Args::GetConstArgumentVector() const { assert(!m_argv.empty()); return (m_argv.size() > 1) ? const_cast<const char **>(m_argv.data()) : nullptr; } void Args::Shift() { // Don't pop the last NULL terminator from the argv array if (m_entries.empty()) return; m_argv.erase(m_argv.begin()); m_entries.erase(m_entries.begin()); } void Args::Unshift(llvm::StringRef arg_str, char quote_char) { InsertArgumentAtIndex(0, arg_str, quote_char); } void Args::AppendArguments(const Args &rhs) { assert(m_argv.size() == m_entries.size() + 1); assert(m_argv.back() == nullptr); m_argv.pop_back(); for (auto &entry : rhs.m_entries) { m_entries.emplace_back(entry.ref(), entry.quote); m_argv.push_back(m_entries.back().data()); } m_argv.push_back(nullptr); } void Args::AppendArguments(const char **argv) { size_t argc = ArgvToArgc(argv); assert(m_argv.size() == m_entries.size() + 1); assert(m_argv.back() == nullptr); m_argv.pop_back(); for (auto arg : llvm::makeArrayRef(argv, argc)) { m_entries.emplace_back(arg, '\0'); m_argv.push_back(m_entries.back().data()); } m_argv.push_back(nullptr); } void Args::AppendArgument(llvm::StringRef arg_str, char quote_char) { InsertArgumentAtIndex(GetArgumentCount(), arg_str, quote_char); } void Args::InsertArgumentAtIndex(size_t idx, llvm::StringRef arg_str, char quote_char) { assert(m_argv.size() == m_entries.size() + 1); assert(m_argv.back() == nullptr); if (idx > m_entries.size()) return; m_entries.emplace(m_entries.begin() + idx, arg_str, quote_char); m_argv.insert(m_argv.begin() + idx, m_entries[idx].data()); } void Args::ReplaceArgumentAtIndex(size_t idx, llvm::StringRef arg_str, char quote_char) { assert(m_argv.size() == m_entries.size() + 1); assert(m_argv.back() == nullptr); if (idx >= m_entries.size()) return; m_entries[idx] = ArgEntry(arg_str, quote_char); m_argv[idx] = m_entries[idx].data(); } void Args::DeleteArgumentAtIndex(size_t idx) { if (idx >= m_entries.size()) return; m_argv.erase(m_argv.begin() + idx); m_entries.erase(m_entries.begin() + idx); } void Args::SetArguments(size_t argc, const char **argv) { Clear(); auto args = llvm::makeArrayRef(argv, argc); m_entries.resize(argc); m_argv.resize(argc + 1); for (size_t i = 0; i < args.size(); ++i) { char quote = ((args[i][0] == '\'') || (args[i][0] == '"') || (args[i][0] == '`')) ? args[i][0] : '\0'; m_entries[i] = ArgEntry(args[i], quote); m_argv[i] = m_entries[i].data(); } } void Args::SetArguments(const char **argv) { SetArguments(ArgvToArgc(argv), argv); } void Args::Clear() { m_entries.clear(); m_argv.clear(); m_argv.push_back(nullptr); } const char *Args::GetShellSafeArgument(const FileSpec &shell, const char *unsafe_arg, std::string &safe_arg) { struct ShellDescriptor { ConstString m_basename; const char *m_escapables; }; static ShellDescriptor g_Shells[] = {{ConstString("bash"), " '\"<>()&"}, {ConstString("tcsh"), " '\"<>()&$"}, {ConstString("sh"), " '\"<>()&"}}; // safe minimal set const char *escapables = " '\""; if (auto basename = shell.GetFilename()) { for (const auto &Shell : g_Shells) { if (Shell.m_basename == basename) { escapables = Shell.m_escapables; break; } } } safe_arg.assign(unsafe_arg); size_t prev_pos = 0; while (prev_pos < safe_arg.size()) { // Escape spaces and quotes size_t pos = safe_arg.find_first_of(escapables, prev_pos); if (pos != std::string::npos) { safe_arg.insert(pos, 1, '\\'); prev_pos = pos + 2; } else break; } return safe_arg.c_str(); } lldb::Encoding Args::StringToEncoding(llvm::StringRef s, lldb::Encoding fail_value) { return llvm::StringSwitch<lldb::Encoding>(s) .Case("uint", eEncodingUint) .Case("sint", eEncodingSint) .Case("ieee754", eEncodingIEEE754) .Case("vector", eEncodingVector) .Default(fail_value); } uint32_t Args::StringToGenericRegister(llvm::StringRef s) { if (s.empty()) return LLDB_INVALID_REGNUM; uint32_t result = llvm::StringSwitch<uint32_t>(s) .Case("pc", LLDB_REGNUM_GENERIC_PC) .Case("sp", LLDB_REGNUM_GENERIC_SP) .Case("fp", LLDB_REGNUM_GENERIC_FP) .Cases("ra", "lr", LLDB_REGNUM_GENERIC_RA) .Case("flags", LLDB_REGNUM_GENERIC_FLAGS) .Case("arg1", LLDB_REGNUM_GENERIC_ARG1) .Case("arg2", LLDB_REGNUM_GENERIC_ARG2) .Case("arg3", LLDB_REGNUM_GENERIC_ARG3) .Case("arg4", LLDB_REGNUM_GENERIC_ARG4) .Case("arg5", LLDB_REGNUM_GENERIC_ARG5) .Case("arg6", LLDB_REGNUM_GENERIC_ARG6) .Case("arg7", LLDB_REGNUM_GENERIC_ARG7) .Case("arg8", LLDB_REGNUM_GENERIC_ARG8) .Default(LLDB_INVALID_REGNUM); return result; } void Args::EncodeEscapeSequences(const char *src, std::string &dst) { dst.clear(); if (src) { for (const char *p = src; *p != '\0'; ++p) { size_t non_special_chars = ::strcspn(p, "\\"); if (non_special_chars > 0) { dst.append(p, non_special_chars); p += non_special_chars; if (*p == '\0') break; } if (*p == '\\') { ++p; // skip the slash switch (*p) { case 'a': dst.append(1, '\a'); break; case 'b': dst.append(1, '\b'); break; case 'f': dst.append(1, '\f'); break; case 'n': dst.append(1, '\n'); break; case 'r': dst.append(1, '\r'); break; case 't': dst.append(1, '\t'); break; case 'v': dst.append(1, '\v'); break; case '\\': dst.append(1, '\\'); break; case '\'': dst.append(1, '\''); break; case '"': dst.append(1, '"'); break; case '0': // 1 to 3 octal chars { // Make a string that can hold onto the initial zero char, up to 3 // octal digits, and a terminating NULL. char oct_str[5] = {'\0', '\0', '\0', '\0', '\0'}; int i; for (i = 0; (p[i] >= '0' && p[i] <= '7') && i < 4; ++i) oct_str[i] = p[i]; // We don't want to consume the last octal character since the main // for loop will do this for us, so we advance p by one less than i // (even if i is zero) p += i - 1; unsigned long octal_value = ::strtoul(oct_str, nullptr, 8); if (octal_value <= UINT8_MAX) { dst.append(1, static_cast<char>(octal_value)); } } break; case 'x': // hex number in the format if (isxdigit(p[1])) { ++p; // Skip the 'x' // Make a string that can hold onto two hex chars plus a // NULL terminator char hex_str[3] = {*p, '\0', '\0'}; if (isxdigit(p[1])) { ++p; // Skip the first of the two hex chars hex_str[1] = *p; } unsigned long hex_value = strtoul(hex_str, nullptr, 16); if (hex_value <= UINT8_MAX) dst.append(1, static_cast<char>(hex_value)); } else { dst.append(1, 'x'); } break; default: // Just desensitize any other character by just printing what came // after the '\' dst.append(1, *p); break; } } } } } void Args::ExpandEscapedCharacters(const char *src, std::string &dst) { dst.clear(); if (src) { for (const char *p = src; *p != '\0'; ++p) { if (llvm::isPrint(*p)) dst.append(1, *p); else { switch (*p) { case '\a': dst.append("\\a"); break; case '\b': dst.append("\\b"); break; case '\f': dst.append("\\f"); break; case '\n': dst.append("\\n"); break; case '\r': dst.append("\\r"); break; case '\t': dst.append("\\t"); break; case '\v': dst.append("\\v"); break; case '\'': dst.append("\\'"); break; case '"': dst.append("\\\""); break; case '\\': dst.append("\\\\"); break; default: { // Just encode as octal dst.append("\\0"); char octal_str[32]; snprintf(octal_str, sizeof(octal_str), "%o", *p); dst.append(octal_str); } break; } } } } } std::string Args::EscapeLLDBCommandArgument(const std::string &arg, char quote_char) { const char *chars_to_escape = nullptr; switch (quote_char) { case '\0': chars_to_escape = " \t\\'\"`"; break; case '"': chars_to_escape = "$\"`\\"; break; case '`': case '\'': return arg; default: assert(false && "Unhandled quote character"); return arg; } std::string res; res.reserve(arg.size()); for (char c : arg) { if (::strchr(chars_to_escape, c)) res.push_back('\\'); res.push_back(c); } return res; } OptionsWithRaw::OptionsWithRaw(llvm::StringRef arg_string) { SetFromString(arg_string); } void OptionsWithRaw::SetFromString(llvm::StringRef arg_string) { const llvm::StringRef original_args = arg_string; arg_string = ltrimForArgs(arg_string); std::string arg; char quote; // If the string doesn't start with a dash, we just have no options and just // a raw part. if (!arg_string.startswith("-")) { m_suffix = std::string(original_args); return; } bool found_suffix = false; while (!arg_string.empty()) { // The length of the prefix before parsing. std::size_t prev_prefix_length = original_args.size() - arg_string.size(); // Parse the next argument from the remaining string. std::tie(arg, quote, arg_string) = ParseSingleArgument(arg_string); // If we get an unquoted '--' argument, then we reached the suffix part // of the command. Args::ArgEntry entry(arg, quote); if (!entry.IsQuoted() && arg == "--") { // The remaining line is the raw suffix, and the line we parsed so far // needs to be interpreted as arguments. m_has_args = true; m_suffix = std::string(arg_string); found_suffix = true; // The length of the prefix after parsing. std::size_t prefix_length = original_args.size() - arg_string.size(); // Take the string we know contains all the arguments and actually parse // it as proper arguments. llvm::StringRef prefix = original_args.take_front(prev_prefix_length); m_args = Args(prefix); m_arg_string = prefix; // We also record the part of the string that contains the arguments plus // the delimiter. m_arg_string_with_delimiter = original_args.take_front(prefix_length); // As the rest of the string became the raw suffix, we are done here. break; } arg_string = ltrimForArgs(arg_string); } // If we didn't find a suffix delimiter, the whole string is the raw suffix. if (!found_suffix) { found_suffix = true; m_suffix = std::string(original_args); } } void llvm::yaml::MappingTraits<Args::ArgEntry>::mapping(IO &io, Args::ArgEntry &v) { MappingNormalization<NormalizedArgEntry, Args::ArgEntry> keys(io, v); io.mapRequired("value", keys->value); io.mapRequired("quote", keys->quote); } void llvm::yaml::MappingTraits<Args>::mapping(IO &io, Args &v) { io.mapRequired("entries", v.m_entries); // Recompute m_argv vector. v.m_argv.clear(); for (auto &entry : v.m_entries) v.m_argv.push_back(entry.data()); v.m_argv.push_back(nullptr); }
#include "CalLikelihoodTool.h" #include "GaudiKernel/GaudiException.h" #include "src/Utilities/CalException.h" /** * @class CalLastLayerLikelihoodTool * * Algorithm for correction of energy leak through the bottom of the CAL using * the correlation between that vallue and the energy deposit in the last layer. * */ class CalLastLayerLikelihoodTool : public CalLikelihoodTool { public: //! constructor CalLastLayerLikelihoodTool(const std::string& type, const std::string& name, const IInterface* parent); virtual ~CalLastLayerLikelihoodTool(){} StatusCode initialize(); //! Energy leak correction using correlation to the energy in the last layer /*! This method uses the correlation between the energy lost in through the * botton of the CAL and the energy deposited in the last layer of the * calorimeter. * We used the Monte Carlo simulation of the LAT to determine this * correlation at several energies, from 200 MeV up to 50 GeV, * and angles from 0 to 32\deg. * See CalLikelihoodTool.calculateEvent for more information * * \par The method takes 2 arguments: * \param CalCluster * \param TkrVertex * *\return CalCorToolResult with an energy and error estimate. * *\author */ Event::CalCorToolResult* doEnergyCorr(Event::CalCluster*, Event::TkrTree* ); private: int m_calNLayers; }; #include "GaudiKernel/DeclareFactoryEntries.h" DECLARE_TOOL_FACTORY(CalLastLayerLikelihoodTool) ; CalLastLayerLikelihoodTool::CalLastLayerLikelihoodTool( const std::string& type, const std::string& name, const IInterface* parent) : CalLikelihoodTool(type,name,parent) { // declare base interface for all consecutive concrete classes declareInterface<ICalEnergyCorr>(this); declareProperty("dataFile", m_dataFile="$(CALRECONXMLPATH)/CalLastLayerLikelihood.data"); }; StatusCode CalLastLayerLikelihoodTool::initialize() { StatusCode sc= CalLikelihoodTool::initialize(); if( sc==StatusCode::SUCCESS ) { if (!m_detSvc->getNumericConstByName(std::string("CALnLayer"), &m_calNLayers)) { throw GaudiException("GlastDetSvc cannot find [CALnLayer]", name(), StatusCode::FAILURE); } } return sc; } Event::CalCorToolResult* CalLastLayerLikelihoodTool::doEnergyCorr(Event::CalCluster* cluster, Event::TkrTree* tree) //Purpose and method: // // This function performs: // The main actions are: // - check wheter the event meets basic requirements (CUTS) // - calculate energy by TKR correction method using LikelihoodTool // // TDS input: CalCluster // TDS output: CalClusters { MsgStream log(msgSvc(), "CalLastLayerLikelihoodTool::doEnergyCorr"); Event::CalCorToolResult* corResult = 0; // if reconstructed tracker data doesn't exist or number of tracks is 0: if (tree == 0) { log << MSG::DEBUG << "Ending doEnergyCorr: No TKR Reconstruction" << endreq; return corResult; } if (!cluster) { log << MSG::DEBUG << "Ending doEnergyCorr: No Cluster" << endreq; return corResult; } const Vector& trackDirection = -tree->getAxisParams()->getEventAxis(); const Point& trackPosition = tree->getAxisParams()->getEventPosition(); // CUTS // this checks whether a set of PDF parameters exist for this event's // energy, direction and point of impact. // Energy: if( cluster->getMomParams().getEnergy() < minTrialEnergy()*.1 ) { log << MSG::DEBUG << "Ending doEnergyCorr: " "CAL Energy below Method Minimum" << endreq; return corResult; } if( cluster->getMomParams().getEnergy() > maxTrialEnergy() ) { log << MSG::DEBUG << "Ending doEnergyCorr: " "CAL Energy above Method Maximum" << endreq; return corResult; } // direction: slope must be above \f$cos(32\circ)$\f if( fabs(trackDirection.z()) < minSlope() ) { log << MSG::DEBUG << "Ending doEnergyCorr: Slope is too Small." << endreq; return corResult; } int vertexPos = findTkrVertex(trackPosition); if( vertexPos < 0 || vertexPos > 15 ) { log << MSG::DEBUG << "Ending doEnergyCorr: " "Vertex is out of Method Reach." << endreq; return corResult; } double geometricCut= findGeometricCut(trackPosition, trackDirection, cluster); if( geometricCut<.15 ) { log << MSG::DEBUG << "Ending doEnergyCorr: " "Geometric Cut too low." << endreq; return corResult; } // CALCULATE // - get number of hits in TKR double calE7= 0.; if( hasCorrelation() ) { calE7= (*cluster)[m_calNLayers-1].getEnergy(); } setEventPDFdata(vertexPos+(geometricCut>.5)*16); setEventPDFparameters(fabs(trackDirection.z()), cluster->getMomParams().getEnergy(), calE7); log << MSG::VERBOSE << "PDF Index: " << vertexPos+(geometricCut>.5)*16 << endreq << "Parameters: " << fabs(trackDirection.z()) << ", " << cluster->getMomParams().getEnergy() << ", " << calE7 << endreq; corResult= calculateEvent(cluster, log); if( corResult ) (*corResult)["GeometricCut"] = geometricCut ; log << MSG::DEBUG << "Ending doEnergyCorr: Reconstruction Done" << endreq; return corResult; }
/*############################################################################## HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems®. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ############################################################################## */ #include "platform.h" #include "jliball.hpp" #include "eclrtl.hpp" #include "eclhelper.hpp" #include "rtlds_imp.hpp" #include "rtlread_imp.hpp" #include "rtlrecord.hpp" #include "roxiemem.hpp" #define FIRST_CHUNK_SIZE 0x100 #define DOUBLE_LIMIT 0x10000 // must be a power of 2 unsigned getNextSize(unsigned max, unsigned required) { if (required > DOUBLE_LIMIT) { max = (required + DOUBLE_LIMIT) & ~(DOUBLE_LIMIT-1); if (required >= max) throw MakeStringException(-1, "getNextSize: Request for %d bytes oldMax = %d", required, max); } else { if (max == 0) max = FIRST_CHUNK_SIZE; while (required >= max) max += max; } return max; } //--------------------------------------------------------------------------- RtlDatasetBuilder::RtlDatasetBuilder() { maxSize = 0; buffer = NULL; totalSize = 0; } RtlDatasetBuilder::~RtlDatasetBuilder() { free(buffer); } void RtlDatasetBuilder::ensure(size32_t required) { if (required > maxSize) { maxSize = getNextSize(maxSize, required); byte * newbuffer = (byte *)realloc(buffer, maxSize); if (!newbuffer) throw MakeStringException(-1, "Failed to allocate temporary dataset (requesting %d bytes)", maxSize); buffer = newbuffer; } self = buffer + totalSize; } byte * RtlDatasetBuilder::ensureCapacity(size32_t required, const char * fieldName) { ensure(totalSize + required); return self; // self is updated by ensure() } void RtlDatasetBuilder::flushDataset() { } void RtlDatasetBuilder::getData(size32_t & len, void * & data) { flushDataset(); len = totalSize; data = malloc(totalSize); memcpy(data, buffer, totalSize); } size32_t RtlDatasetBuilder::getSize() { flushDataset(); return totalSize; } byte * RtlDatasetBuilder::queryData() { flushDataset(); return buffer; } void RtlDatasetBuilder::reportMissingRow() const { throw MakeStringException(MSGAUD_user, 1000, "RtlDatasetBuilder::row() is NULL"); } //--------------------------------------------------------------------------- RtlFixedDatasetBuilder::RtlFixedDatasetBuilder(unsigned _recordSize, unsigned maxRows) { recordSize = _recordSize; if ((int)maxRows > 0) ensure(recordSize * maxRows); } byte * RtlFixedDatasetBuilder::createSelf() { self = ensureCapacity(recordSize, NULL); return self; } //--------------------------------------------------------------------------- RtlLimitedFixedDatasetBuilder::RtlLimitedFixedDatasetBuilder(unsigned _recordSize, unsigned _maxRows, DefaultRowCreator _rowCreator, IResourceContext *_ctx) : RtlFixedDatasetBuilder(_recordSize, _maxRows) { maxRows = _maxRows; if ((int)maxRows < 0) maxRows = 0; rowCreator = _rowCreator; ctx = _ctx; } byte * RtlLimitedFixedDatasetBuilder::createRow() { if (totalSize >= maxRows * recordSize) return NULL; return RtlFixedDatasetBuilder::createRow(); } void RtlLimitedFixedDatasetBuilder::flushDataset() { if (rowCreator) { while (totalSize < maxRows * recordSize) { createRow(); size32_t size = rowCreator(rowBuilder(), ctx); finalizeRow(size); } } RtlFixedDatasetBuilder::flushDataset(); } //--------------------------------------------------------------------------- RtlVariableDatasetBuilder::RtlVariableDatasetBuilder(IRecordSize & _recordSize) { recordSize = &_recordSize; maxRowSize = recordSize->getMinRecordSize(); // initial size } byte * RtlVariableDatasetBuilder::createSelf() { self = ensureCapacity(maxRowSize, NULL); return self; } void RtlVariableDatasetBuilder::deserializeRow(IOutputRowDeserializer & deserializer, IRowDeserializerSource & in) { createRow(); size32_t rowSize = deserializer.deserialize(rowBuilder(), in); finalizeRow(rowSize); } //--------------------------------------------------------------------------- RtlLimitedVariableDatasetBuilder::RtlLimitedVariableDatasetBuilder(IRecordSize & _recordSize, unsigned _maxRows, DefaultRowCreator _rowCreator, IResourceContext *_ctx) : RtlVariableDatasetBuilder(_recordSize) { numRows = 0; maxRows = _maxRows; rowCreator = _rowCreator; ctx = _ctx; } byte * RtlLimitedVariableDatasetBuilder::createRow() { if (numRows >= maxRows) return NULL; numRows++; return RtlVariableDatasetBuilder::createRow(); } void RtlLimitedVariableDatasetBuilder::flushDataset() { if (rowCreator) { while (numRows < maxRows) { createRow(); size32_t thisSize = rowCreator(rowBuilder(), ctx); finalizeRow(thisSize); } } RtlVariableDatasetBuilder::flushDataset(); } //--------------------------------------------------------------------------- const byte * * rtlRowsAttr::linkrows() const { if (rows) rtlLinkRowset(rows); return rows; } void rtlRowsAttr::set(size32_t _count, const byte * * _rows) { setown(_count, rtlLinkRowset(_rows)); } void rtlRowsAttr::setRow(IEngineRowAllocator * rowAllocator, const byte * _row) { setown(1, rowAllocator->appendRowOwn(NULL, 1, rowAllocator->linkRow(_row))); } void rtlRowsAttr::setown(size32_t _count, const byte * * _rows) { dispose(); count = _count; rows = _rows; } void rtlRowsAttr::dispose() { if (rows) rtlReleaseRowset(count, rows); } //--------------------------------------------------------------------------- void rtlReportFieldOverflow(unsigned size, unsigned max, const RtlFieldInfo * field) { if (field) rtlReportFieldOverflow(size, max, field->name); else rtlReportRowOverflow(size, max); } void RtlRowBuilderBase::reportMissingRow() const { throw MakeStringException(MSGAUD_user, 1000, "RtlRowBuilderBase::row() is NULL"); } byte * RtlDynamicRowBuilder::ensureCapacity(size32_t required, const char * fieldName) { if (required > maxLength) { if (!self) self = (byte *)rowAllocator->createRow(required, maxLength); else if (required > maxLength) { void * next = rowAllocator->resizeRow(required, self, maxLength); if (!next) { rtlReportFieldOverflow(required, maxLength, fieldName); return NULL; } self = static_cast<byte *>(next); } } return self; } void RtlDynamicRowBuilder::swapWith(RtlDynamicRowBuilder & other) { size32_t savedMaxLength = maxLength; void * savedSelf = getUnfinalizedClear(); setown(other.getMaxLength(), other.getUnfinalizedClear()); other.setown(savedMaxLength, savedSelf); } //--------------------------------------------------------------------------- byte * RtlStaticRowBuilder::ensureCapacity(size32_t required, const char * fieldName) { if (required <= maxLength) return static_cast<byte *>(self); rtlReportFieldOverflow(required, maxLength, fieldName); return NULL; } byte * RtlStaticRowBuilder::createSelf() { throwUnexpected(); } //--------------------------------------------------------------------------- RtlLinkedDatasetBuilder::RtlLinkedDatasetBuilder(IEngineRowAllocator * _rowAllocator, int _choosenLimit) : builder(_rowAllocator, false) { rowAllocator = LINK(_rowAllocator); rowset = NULL; count = 0; max = 0; choosenLimit = (unsigned)_choosenLimit; } RtlLinkedDatasetBuilder::~RtlLinkedDatasetBuilder() { builder.clear(); if (rowset) rowAllocator->releaseRowset(count, rowset); ::Release(rowAllocator); } void RtlLinkedDatasetBuilder::clear() { builder.clear(); if (rowset) rowAllocator->releaseRowset(count, rowset); rowset = NULL; count = 0; max = 0; } void RtlLinkedDatasetBuilder::append(const void * source) { if (count < choosenLimit) { ensure(count+1); rowset[count] = source ? (byte *)rowAllocator->linkRow(source) : NULL; count++; } } void RtlLinkedDatasetBuilder::appendRows(size32_t num, const byte * * rows) { if (num && (count < choosenLimit)) { unsigned maxNumToAdd = (count + num < choosenLimit) ? num : choosenLimit - count; unsigned numAdded = 0; ensure(count+maxNumToAdd); for (unsigned i=0; i < num; i++) { const byte *row = rows[i]; if (row) { rowset[count+numAdded] = (const byte *) rowAllocator->linkRow(row); numAdded++; if (numAdded == maxNumToAdd) break; } } count += numAdded; } } void RtlLinkedDatasetBuilder::appendOwn(const void * row) { assertex(!builder.exists()); if (count < choosenLimit) { ensure(count+1); rowset[count] = (byte *)row; count++; } else rowAllocator->releaseRow(row); } byte * RtlLinkedDatasetBuilder::createRow() { if (count >= choosenLimit) return NULL; return builder.getSelf(); } //------------------------------------------------------------------------------------ RtlStreamedDatasetBuilder::RtlStreamedDatasetBuilder(IEngineRowAllocator * _rowAllocator, int _choosenLimit) : RtlLinkedDatasetBuilder(_rowAllocator, _choosenLimit) { } IRowStream * RtlStreamedDatasetBuilder::createDataset() { return createRowStream(getcount(), queryrows()); } //cloned from thorcommon.cpp class RtlChildRowLinkerWalker : implements IIndirectMemberVisitor { public: virtual void visitRowset(size32_t count, const byte * * rows) override { rtlLinkRowset(rows); } virtual void visitRow(const byte * row) override { rtlLinkRow(row); } }; void RtlLinkedDatasetBuilder::cloneRow(size32_t len, const void * row) { if (count >= choosenLimit) return; byte * self = builder.ensureCapacity(len, NULL); memcpy(self, row, len); IOutputMetaData * meta = rowAllocator->queryOutputMeta(); if (meta->getMetaFlags() & MDFneeddestruct) { RtlChildRowLinkerWalker walker; meta->walkIndirectMembers(self, walker); } finalizeRow(len); } void RtlLinkedDatasetBuilder::deserializeRow(IOutputRowDeserializer & deserializer, IRowDeserializerSource & in) { builder.ensureRow(); size32_t rowSize = deserializer.deserialize(builder, in); finalizeRow(rowSize); } void RtlLinkedDatasetBuilder::finalizeRows() { if (count != max) resize(count); } void RtlLinkedDatasetBuilder::finalizeRow(size32_t rowSize) { assertex(builder.exists()); const void * next = builder.finalizeRowClear(rowSize); appendOwn(next); } const byte * * RtlLinkedDatasetBuilder::linkrows() { finalizeRows(); return rtlLinkRowset(rowset); } void RtlLinkedDatasetBuilder::expand(size32_t required) { assertex(required <= choosenLimit); //MORE: Next factoring change this so it passes this logic over to the row allocator size32_t newMax = max ? max : 4; while (newMax < required) { newMax += newMax; assertex(newMax); } if (newMax > choosenLimit) newMax = choosenLimit; resize(newMax); } void RtlLinkedDatasetBuilder::resize(size32_t required) { rowset = rowAllocator->reallocRows(rowset, max, required); max = required; } void appendRowsToRowset(size32_t & targetCount, const byte * * & targetRowset, IEngineRowAllocator * rowAllocator, size32_t extraCount, const byte * * extraRows) { if (extraCount) { size32_t prevCount = targetCount; const byte * * expandedRowset = rowAllocator->reallocRows(targetRowset, prevCount, prevCount+extraCount); unsigned numAdded = 0; for (unsigned i=0; i < extraCount; i++) { const byte *extraRow = extraRows[i]; if (extraRow) { expandedRowset[prevCount+numAdded] = (byte *)rowAllocator->linkRow(extraRow); numAdded++; } } targetCount = prevCount + numAdded; targetRowset = expandedRowset; } } const void * rtlCloneRow(IEngineRowAllocator * rowAllocator, size32_t len, const void * row) { RtlDynamicRowBuilder builder(*rowAllocator); byte * self = builder.ensureCapacity(len, NULL); memcpy(self, row, len); IOutputMetaData * meta = rowAllocator->queryOutputMeta(); if (meta->getMetaFlags() & MDFneeddestruct) { RtlChildRowLinkerWalker walker; meta->walkIndirectMembers(self, walker); } return builder.finalizeRowClear(len); } void rtlLinkChildren(void * self, IOutputMetaData & meta) { RtlChildRowLinkerWalker walker; meta.walkIndirectMembers(static_cast<byte *>(self), walker); } void rtlCopyRowLinkChildren(void * self, size32_t len, const void * row, IOutputMetaData & meta) { memcpy(self, row, len); RtlChildRowLinkerWalker walker; meta.walkIndirectMembers(static_cast<byte *>(self), walker); } //--------------------------------------------------------------------------- RtlLinkedDictionaryBuilder::RtlLinkedDictionaryBuilder(IEngineRowAllocator * _rowAllocator, IHThorHashLookupInfo *_hashInfo, unsigned _initialSize) : builder(_rowAllocator, false) { init(_rowAllocator, _hashInfo, _initialSize); } RtlLinkedDictionaryBuilder::RtlLinkedDictionaryBuilder(IEngineRowAllocator * _rowAllocator, IHThorHashLookupInfo *_hashInfo) : builder(_rowAllocator, false) { init(_rowAllocator, _hashInfo, 8); } void RtlLinkedDictionaryBuilder::init(IEngineRowAllocator * _rowAllocator, IHThorHashLookupInfo *_hashInfo, unsigned _initialSize) { hash = _hashInfo->queryHash(); compare = _hashInfo->queryCompare(); if (_initialSize >= 4) initialSize = _initialSize; else initialSize = 4; rowAllocator = LINK(_rowAllocator); table = NULL; usedCount = 0; usedLimit = 0; tableSize = 0; } RtlLinkedDictionaryBuilder::~RtlLinkedDictionaryBuilder() { // builder.clear(); if (table) rowAllocator->releaseRowset(tableSize, table); ::Release(rowAllocator); } void RtlLinkedDictionaryBuilder::append(const void * source) { if (source) { appendOwn(rowAllocator->linkRow(source)); } } void RtlLinkedDictionaryBuilder::appendOwn(const void * source) { if (source) { checkSpace(); unsigned rowidx = hash->hash(source) % tableSize; for (;;) { const void *entry = table[rowidx]; if (entry && compare->docompare(source, entry)==0) { rowAllocator->releaseRow(source); break; } if (!entry) { table[rowidx] = (byte *) source; usedCount++; break; } rowidx++; if (rowidx==tableSize) rowidx = 0; } } } void RtlLinkedDictionaryBuilder::checkSpace() { if (!table) { table = rowAllocator->createRowset(initialSize); tableSize = initialSize; memset(table, 0, tableSize*sizeof(byte *)); usedLimit = (tableSize * 3) / 4; usedCount = 0; } else if (usedCount >= usedLimit) { // Rehash const byte * * oldTable = table; unsigned oldSize = tableSize; table = rowAllocator->createRowset(tableSize*2); tableSize = tableSize*2; // Don't update until we have successfully allocated, so that we remain consistent if createRowset throws an exception. memset(table, 0, tableSize * sizeof(byte *)); usedLimit = (tableSize * 3) / 4; usedCount = 0; unsigned i; for (i = 0; i < oldSize; i++) { append(oldTable[i]); // we link the rows here... } rowAllocator->releaseRowset(oldSize, oldTable); // ... because this will release them } } void RtlLinkedDictionaryBuilder::deserializeRow(IOutputRowDeserializer & deserializer, IRowDeserializerSource & in) { builder.ensureRow(); size32_t rowSize = deserializer.deserialize(builder, in); finalizeRow(rowSize); } void RtlLinkedDictionaryBuilder::appendRows(size32_t num, const byte * * rows) { // MORE - if we know that the source is already a hashtable, we can optimize the add to an empty table... for (unsigned i=0; i < num; i++) append(rows[i]); } void RtlLinkedDictionaryBuilder::finalizeRow(size32_t rowSize) { assertex(builder.exists()); const void * next = builder.finalizeRowClear(rowSize); appendOwn(next); } void RtlLinkedDictionaryBuilder::cloneRow(size32_t len, const void * row) { byte * self = builder.ensureCapacity(len, NULL); memcpy(self, row, len); IOutputMetaData * meta = rowAllocator->queryOutputMeta(); if (meta->getMetaFlags() & MDFneeddestruct) { RtlChildRowLinkerWalker walker; meta->walkIndirectMembers(self, walker); } finalizeRow(len); } extern ECLRTL_API unsigned __int64 rtlDictionaryCount(size32_t tableSize, const byte **table) { unsigned __int64 ret = 0; for (size32_t i = 0; i < tableSize; i++) if (table[i]) ret++; return ret; } extern ECLRTL_API bool rtlDictionaryExists(size32_t tableSize, const byte **table) { unsigned __int64 ret = 0; for (size32_t i = 0; i < tableSize; i++) if (table[i]) return true; return false; } extern ECLRTL_API const byte *rtlDictionaryLookup(IHThorHashLookupInfo &hashInfo, size32_t tableSize, const byte **table, const byte *source, const byte *defaultRow) { if (!tableSize) return (const byte *) rtlLinkRow(defaultRow); IHash *hash = hashInfo.queryHashLookup(); ICompare *compare = hashInfo.queryCompareLookup(); unsigned rowidx = hash->hash(source) % tableSize; for (;;) { const void *entry = table[rowidx]; if (!entry) return (const byte *) rtlLinkRow(defaultRow); if (compare->docompare(source, entry)==0) return (const byte *) rtlLinkRow(entry); rowidx++; if (rowidx==tableSize) rowidx = 0; } } // Optimized cases for common single-field lookups extern ECLRTL_API const byte *rtlDictionaryLookupString(size32_t tableSize, const byte **table, size32_t searchLen, const char *searchFor, const byte *defaultRow) { if (!tableSize) return (const byte *) rtlLinkRow(defaultRow); unsigned hash = rtlHash32Data(rtlTrimStrLen(searchLen, searchFor), searchFor, HASH32_INIT); unsigned rowidx = hash % tableSize; for (;;) { const char *entry = (const char *) table[rowidx]; if (!entry) return (const byte *) rtlLinkRow(defaultRow); if (rtlCompareStrStr(searchLen, searchFor, * (size32_t *) entry, entry+sizeof(size32_t))==0) return (const byte *) rtlLinkRow(entry); rowidx++; if (rowidx==tableSize) rowidx = 0; } } extern ECLRTL_API const byte *rtlDictionaryLookupStringN(size32_t tableSize, const byte **table, size32_t N, size32_t searchLen, const char *searchFor, const byte *defaultRow) { if (!tableSize) return (byte *) rtlLinkRow(defaultRow); unsigned hash = rtlHash32Data(rtlTrimStrLen(searchLen, searchFor), searchFor, HASH32_INIT); unsigned rowidx = hash % tableSize; for (;;) { const char *entry = (const char *) table[rowidx]; if (!entry) return (byte *) rtlLinkRow(defaultRow); if (rtlCompareStrStr(searchLen, searchFor, N, entry)==0) return (byte *) rtlLinkRow(entry); rowidx++; if (rowidx==tableSize) rowidx = 0; } } extern ECLRTL_API const byte *rtlDictionaryLookupSigned(size32_t tableSize, const byte **table, __int64 searchFor, const byte *defaultRow) { if (!tableSize) return (const byte *) rtlLinkRow(defaultRow); unsigned hash = rtlHash32Data8(&searchFor, HASH32_INIT); unsigned rowidx = hash % tableSize; for (;;) { const void *entry = table[rowidx]; if (!entry) return (const byte *) rtlLinkRow(defaultRow); if (* (__int64 *) entry == searchFor) return (const byte *) rtlLinkRow(entry); rowidx++; if (rowidx==tableSize) rowidx = 0; } } extern ECLRTL_API const byte *rtlDictionaryLookupUnsigned(size32_t tableSize, const byte **table, __uint64 searchFor, const byte *defaultRow) { if (!tableSize) return (const byte *) rtlLinkRow(defaultRow); unsigned hash = rtlHash32Data8(&searchFor, HASH32_INIT); unsigned rowidx = hash % tableSize; for (;;) { const void *entry = table[rowidx]; if (!entry) return (const byte *) rtlLinkRow(defaultRow); if (* (__uint64 *) entry == searchFor) return (const byte *) rtlLinkRow(entry); rowidx++; if (rowidx==tableSize) rowidx = 0; } } extern ECLRTL_API const byte *rtlDictionaryLookupSignedN(size32_t tableSize, const byte **table, size32_t size, __int64 searchFor, const byte *defaultRow) { if (!tableSize) return (const byte *) rtlLinkRow(defaultRow); unsigned hash = rtlHash32Data8(&searchFor, HASH32_INIT); unsigned rowidx = hash % tableSize; for (;;) { const void *entry = table[rowidx]; if (!entry) return (const byte *) rtlLinkRow(defaultRow); if (rtlReadInt(entry, size) == searchFor) return (const byte *) rtlLinkRow(entry); rowidx++; if (rowidx==tableSize) rowidx = 0; } } extern ECLRTL_API const byte *rtlDictionaryLookupUnsignedN(size32_t tableSize, const byte **table, size32_t size, __uint64 searchFor, const byte *defaultRow) { if (!tableSize) return (const byte *) rtlLinkRow(defaultRow); unsigned hash = rtlHash32Data8(&searchFor, HASH32_INIT); unsigned rowidx = hash % tableSize; for (;;) { const void *entry = table[rowidx]; if (!entry) return (const byte *) rtlLinkRow(defaultRow); if (rtlReadUInt(entry, size) == searchFor) return (const byte *) rtlLinkRow(entry); rowidx++; if (rowidx==tableSize) rowidx = 0; } } extern ECLRTL_API bool rtlDictionaryLookupExists(IHThorHashLookupInfo &hashInfo, size32_t tableSize, const byte **table, const byte *source) { if (!tableSize) return false; IHash *hash = hashInfo.queryHashLookup(); ICompare *compare = hashInfo.queryCompareLookup(); unsigned rowidx = hash->hash(source) % tableSize; for (;;) { const void *entry = table[rowidx]; if (!entry) return false; if (compare->docompare(source, entry)==0) return true; rowidx++; if (rowidx==tableSize) rowidx = 0; } } // Optimized exists cases for common single-field lookups extern ECLRTL_API bool rtlDictionaryLookupExistsString(size32_t tableSize, const byte **table, size32_t searchLen, const char *searchFor) { if (!tableSize) return false; unsigned hash = rtlHash32Data(rtlTrimStrLen(searchLen, searchFor), searchFor, HASH32_INIT); unsigned rowidx = hash % tableSize; for (;;) { const char *entry = (const char *) table[rowidx]; if (!entry) return false; if (rtlCompareStrStr(searchLen, searchFor, * (size32_t *) entry, entry+sizeof(size32_t))==0) return true; rowidx++; if (rowidx==tableSize) rowidx = 0; } } extern ECLRTL_API bool rtlDictionaryLookupExistsStringN(size32_t tableSize, const byte **table, size32_t N, size32_t searchLen, const char *searchFor) { if (!tableSize) return false; unsigned hash = rtlHash32Data(rtlTrimStrLen(searchLen, searchFor), searchFor, HASH32_INIT); unsigned rowidx = hash % tableSize; for (;;) { const char *entry = (const char *) table[rowidx]; if (!entry) return false; if (rtlCompareStrStr(searchLen, searchFor, N, entry)==0) return true; rowidx++; if (rowidx==tableSize) rowidx = 0; } } extern ECLRTL_API bool rtlDictionaryLookupExistsSigned(size32_t tableSize, const byte **table, __int64 searchFor) { if (!tableSize) return false; unsigned hash = rtlHash32Data8(&searchFor, HASH32_INIT); unsigned rowidx = hash % tableSize; for (;;) { const void *entry = table[rowidx]; if (!entry) return false; if (* (__int64 *) entry == searchFor) return true; rowidx++; if (rowidx==tableSize) rowidx = 0; } } extern ECLRTL_API bool rtlDictionaryLookupExistsUnsigned(size32_t tableSize, const byte **table, __uint64 searchFor) { if (!tableSize) return false; unsigned hash = rtlHash32Data8(&searchFor, HASH32_INIT); unsigned rowidx = hash % tableSize; for (;;) { const void *entry = table[rowidx]; if (!entry) return false; if (* (__uint64 *) entry == searchFor) return true; rowidx++; if (rowidx==tableSize) rowidx = 0; } } extern ECLRTL_API bool rtlDictionaryLookupExistsSignedN(size32_t tableSize, const byte **table, size32_t size, __int64 searchFor) { if (!tableSize) return false; unsigned hash = rtlHash32Data8(&searchFor, HASH32_INIT); unsigned rowidx = hash % tableSize; for (;;) { const void *entry = table[rowidx]; if (!entry) return false; if (rtlReadInt(entry, size) == searchFor) return true; rowidx++; if (rowidx==tableSize) rowidx = 0; } } extern ECLRTL_API bool rtlDictionaryLookupExistsUnsignedN(size32_t tableSize, const byte **table, size32_t size, __uint64 searchFor) { if (!tableSize) return false; unsigned hash = rtlHash32Data8(&searchFor, HASH32_INIT); unsigned rowidx = hash % tableSize; for (;;) { const void *entry = table[rowidx]; if (!entry) return false; if (rtlReadUInt(entry, size) == searchFor) return true; rowidx++; if (rowidx==tableSize) rowidx = 0; } } unsigned CHThorDictHelper::hash(const void * self) { return hashFields(rec.fields, (const byte *) self, HASH32_INIT, true); } int CHThorDictHelper::docompare(const void *left, const void *right) const { return compareFields(rec.fields, (const byte *) left, (const byte *) right, true); } //--------------------------------------------------------------------------------------------------------------------- // Serialization helper classes //These definitions should be shared with thorcommon, but to do that //they would need to be moved to an rtlds.ipp header, which thorcommon then included. class ECLRTL_API CMemoryBufferSerializeTarget : implements IRowSerializerTarget { public: CMemoryBufferSerializeTarget(MemoryBuffer & _buffer) : buffer(_buffer) { } virtual void put(size32_t len, const void * ptr) { buffer.append(len, ptr); } virtual size32_t beginNested(size32_t count) { unsigned pos = buffer.length(); buffer.append((size32_t)0); return pos; } virtual void endNested(size32_t sizePos) { unsigned pos = buffer.length(); buffer.rewrite(sizePos); buffer.append((size32_t)(pos - (sizePos + sizeof(size32_t)))); buffer.rewrite(pos); } protected: MemoryBuffer & buffer; }; class ECLRTL_API CRowBuilderSerializeTarget : implements IRowSerializerTarget { public: CRowBuilderSerializeTarget(ARowBuilder & _builder) : builder(_builder) { offset = 0; } virtual void put(size32_t len, const void * ptr) { byte * data = builder.ensureCapacity(offset + len, ""); memcpy(data+offset, ptr, len); offset += len; } virtual size32_t beginNested(size32_t count) { unsigned pos = offset; offset += sizeof(size32_t); builder.ensureCapacity(offset, ""); return pos; } virtual void endNested(size32_t sizePos) { byte * self = builder.getSelf(); *(size32_t *)(self + sizePos) = offset - (sizePos + sizeof(size32_t)); } inline size32_t length() const { return offset; } protected: ARowBuilder & builder; size32_t offset; }; //--------------------------------------------------------------------------------------------------------------------- // internal serialization helper functions inline void doDeserializeRowset(RtlLinkedDatasetBuilder & builder, IOutputRowDeserializer & deserializer, IRowDeserializerSource & in, offset_t marker, bool isGrouped) { byte eogPending = false; while (!in.finishedNested(marker)) { if (isGrouped && eogPending) builder.appendEOG(); builder.deserializeRow(deserializer, in); if (isGrouped) in.read(1, &eogPending); } } inline void doSerializeRowset(IRowSerializerTarget & out, IOutputRowSerializer * serializer, size32_t count, const byte * * rows, bool isGrouped) { for (unsigned i=0; i < count; i++) { const byte *row = rows[i]; if (row) { serializer->serialize(out, rows[i]); if (isGrouped) { byte eogPending = (i+1 < count) && (rows[i+1] == NULL); out.put(1, &eogPending); } } else { assert(isGrouped); // should not be seeing NULLs otherwise - should not use this function for DICTIONARY } } } inline void doSerializeRowsetStripNulls(IRowSerializerTarget & out, IOutputRowSerializer * serializer, size32_t count, const byte * * rows) { for (unsigned i=0; i < count; i++) { const byte *row = rows[i]; if (row) serializer->serialize(out, rows[i]); } } inline void doDeserializeRowset(size32_t & count, const byte * * & rowset, IEngineRowAllocator * _rowAllocator, IOutputRowDeserializer * deserializer, offset_t marker, IRowDeserializerSource & in, bool isGrouped) { RtlLinkedDatasetBuilder builder(_rowAllocator); doDeserializeRowset(builder, *deserializer, in, marker, isGrouped); count = builder.getcount(); rowset = builder.linkrows(); } inline void doDeserializeChildRowset(size32_t & count, const byte * * & rowset, IEngineRowAllocator * _rowAllocator, IOutputRowDeserializer * deserializer, IRowDeserializerSource & in, bool isGrouped) { offset_t marker = in.beginNested(); doDeserializeRowset(count, rowset, _rowAllocator, deserializer, marker, in, isGrouped); } //-------------------------------------------------------------------------------------------------------------------- //Serialize/deserialize functions call for child datasets in the row serializer extern ECLRTL_API void rtlDeserializeChildRowset(size32_t & count, const byte * * & rowset, IEngineRowAllocator * _rowAllocator, IOutputRowDeserializer * deserializer, IRowDeserializerSource & in) { doDeserializeChildRowset(count, rowset, _rowAllocator, deserializer, in, false); } extern ECLRTL_API void rtlDeserializeChildGroupedRowset(size32_t & count, const byte * * & rowset, IEngineRowAllocator * _rowAllocator, IOutputRowDeserializer * deserializer, IRowDeserializerSource & in) { doDeserializeChildRowset(count, rowset, _rowAllocator, deserializer, in, true); } extern ECLRTL_API void rtlSerializeChildRowset(IRowSerializerTarget & out, IOutputRowSerializer * serializer, size32_t count, const byte * * rows) { size32_t marker = out.beginNested(count); doSerializeRowset(out, serializer, count, rows, false); out.endNested(marker); } extern ECLRTL_API void rtlSerializeChildGroupedRowset(IRowSerializerTarget & out, IOutputRowSerializer * serializer, size32_t count, const byte * * rows) { size32_t marker = out.beginNested(count); doSerializeRowset(out, serializer, count, rows, true); out.endNested(marker); } //-------------------------------------------------------------------------------------------------------------------- //Serialize/deserialize functions used to serialize data from the master to the slave (defined in eclrtl.hpp) to/from a MemoryBuffer extern void deserializeRowsetX(size32_t & count, const byte * * & rowset, IEngineRowAllocator * _rowAllocator, IOutputRowDeserializer * deserializer, MemoryBuffer &in) { Owned<ISerialStream> stream = createMemoryBufferSerialStream(in); CThorStreamDeserializerSource rowSource(stream); doDeserializeChildRowset(count, rowset, _rowAllocator, deserializer, rowSource, false); } extern void deserializeGroupedRowsetX(size32_t & count, const byte * * & rowset, IEngineRowAllocator * _rowAllocator, IOutputRowDeserializer * deserializer, MemoryBuffer &in) { Owned<ISerialStream> stream = createMemoryBufferSerialStream(in); CThorStreamDeserializerSource rowSource(stream); doDeserializeChildRowset(count, rowset, _rowAllocator, deserializer, rowSource, true); } extern void serializeRowsetX(size32_t count, const byte * * rows, IOutputRowSerializer * serializer, MemoryBuffer & buffer) { CMemoryBufferSerializeTarget out(buffer); rtlSerializeChildRowset(out, serializer, count, rows); } extern void serializeGroupedRowsetX(size32_t count, const byte * * rows, IOutputRowSerializer * serializer, MemoryBuffer & buffer) { CMemoryBufferSerializeTarget out(buffer); rtlSerializeChildGroupedRowset(out, serializer, count, rows); } //-------------------------------------------------------------------------------------------------------------------- // Functions for converting between different representations - where the source/target are complete datasets inline void doDataset2RowsetX(size32_t & count, const byte * * & rowset, IEngineRowAllocator * rowAllocator, IOutputRowDeserializer * deserializer, size32_t lenSrc, const void * src, bool isGrouped) { Owned<ISerialStream> stream = createMemorySerialStream(src, lenSrc); CThorStreamDeserializerSource source(stream); doDeserializeRowset(count, rowset, rowAllocator, deserializer, lenSrc, source, isGrouped); } extern ECLRTL_API void rtlDataset2RowsetX(size32_t & count, const byte * * & rowset, IEngineRowAllocator * rowAllocator, IOutputRowDeserializer * deserializer, size32_t lenSrc, const void * src, bool isGrouped) { doDataset2RowsetX(count, rowset, rowAllocator, deserializer, lenSrc, src, isGrouped); } extern ECLRTL_API void rtlDataset2RowsetX(size32_t & count, const byte * * & rowset, IEngineRowAllocator * rowAllocator, IOutputRowDeserializer * deserializer, size32_t lenSrc, const void * src) { doDataset2RowsetX(count, rowset, rowAllocator, deserializer, lenSrc, src, false); } extern ECLRTL_API void rtlGroupedDataset2RowsetX(size32_t & count, const byte * * & rowset, IEngineRowAllocator * rowAllocator, IOutputRowDeserializer * deserializer, size32_t lenSrc, const void * src) { doDataset2RowsetX(count, rowset, rowAllocator, deserializer, lenSrc, src, true); } inline void doRowset2DatasetX(unsigned & tlen, void * & tgt, IOutputRowSerializer * serializer, size32_t count, const byte * * rows, bool isGrouped) { MemoryBuffer buffer; CMemoryBufferSerializeTarget out(buffer); doSerializeRowset(out, serializer, count, rows, isGrouped); rtlFree(tgt); tlen = buffer.length(); tgt = buffer.detach(); // not strictly speaking correct - it should have been allocated with rtlMalloc(); } extern ECLRTL_API void rtlRowset2DatasetX(unsigned & tlen, void * & tgt, IOutputRowSerializer * serializer, size32_t count, const byte * * rows, bool isGrouped) { doRowset2DatasetX(tlen, tgt, serializer, count, rows, isGrouped); } extern ECLRTL_API void rtlRowset2DatasetX(unsigned & tlen, void * & tgt, IOutputRowSerializer * serializer, size32_t count, const byte * * rows) { doRowset2DatasetX(tlen, tgt, serializer, count, rows, false); } extern ECLRTL_API void rtlGroupedRowset2DatasetX(unsigned & tlen, void * & tgt, IOutputRowSerializer * serializer, size32_t count, const byte * * rows) { doRowset2DatasetX(tlen, tgt, serializer, count, rows, true); } //-------------------------------------------------------------------------------------------------------------------- // Serialize/deserialize rows to a memory buffer void serializeRow(const void * row, IOutputRowSerializer * serializer, MemoryBuffer & buffer) { CMemoryBufferSerializeTarget out(buffer); serializer->serialize(out, static_cast<const byte *>(row)); } extern ECLRTL_API byte * rtlDeserializeBufferRow(IEngineRowAllocator * rowAllocator, IOutputRowDeserializer * deserializer, MemoryBuffer & buffer) { Owned<ISerialStream> stream = createMemoryBufferSerialStream(buffer); CThorStreamDeserializerSource source(stream); RtlDynamicRowBuilder rowBuilder(rowAllocator); size32_t rowSize = deserializer->deserialize(rowBuilder, source); return static_cast<byte *>(const_cast<void *>(rowBuilder.finalizeRowClear(rowSize))); } //-------------------------------------------------------------------------------------------------------------------- // serialize/deserialize a row to a builder or another row extern ECLRTL_API byte * rtlDeserializeRow(IEngineRowAllocator * rowAllocator, IOutputRowDeserializer * deserializer, const void * src) { const size32_t unknownSourceLength = 0x7fffffff; Owned<ISerialStream> stream = createMemorySerialStream(src, unknownSourceLength); CThorStreamDeserializerSource source(stream); RtlDynamicRowBuilder rowBuilder(*rowAllocator); size32_t rowSize = deserializer->deserialize(rowBuilder, source); return static_cast<byte *>(const_cast<void *>(rowBuilder.finalizeRowClear(rowSize))); } extern ECLRTL_API size32_t rtlDeserializeToBuilder(ARowBuilder & builder, IOutputRowDeserializer * deserializer, const void * src) { const size32_t unknownSourceLength = 0x7fffffff; Owned<ISerialStream> stream = createMemorySerialStream(src, unknownSourceLength); CThorStreamDeserializerSource source(stream); return deserializer->deserialize(builder, source); } extern ECLRTL_API size32_t rtlSerializeToBuilder(ARowBuilder & builder, IOutputRowSerializer * serializer, const void * src) { CRowBuilderSerializeTarget target(builder); serializer->serialize(target, (const byte *)src); return target.length(); } //-------------------------------------------------------------------------------------------------------------------- static void doSerializeDictionary(IRowSerializerTarget & out, IOutputRowSerializer * serializer, size32_t count, const byte * * rows) { out.put(sizeof(count), &count); size32_t idx = 0; while (idx < count) { byte numRows = 0; while (numRows < 255 && idx+numRows < count && rows[idx+numRows] != NULL) numRows++; out.put(1, &numRows); for (int i = 0; i < numRows; i++) { const byte *nextrec = rows[idx+i]; assert(nextrec); serializer->serialize(out, nextrec); } idx += numRows; byte numNulls = 0; while (numNulls < 255 && idx+numNulls < count && rows[idx+numNulls] == NULL) numNulls++; out.put(1, &numNulls); idx += numNulls; } } static void doDeserializeDictionary(size32_t & count, const byte * * & rowset, IEngineRowAllocator * rowAllocator, IOutputRowDeserializer * deserializer, offset_t marker, IRowDeserializerSource & in) { RtlLinkedDatasetBuilder builder(rowAllocator); size32_t totalRows; in.read(sizeof(totalRows), &totalRows); builder.ensure(totalRows); byte nullsPending = 0; byte rowsPending = 0; while (!in.finishedNested(marker)) { in.read(1, &rowsPending); for (int i = 0; i < rowsPending; i++) builder.deserializeRow(*deserializer, in); in.read(1, &nullsPending); for (int i = 0; i < nullsPending; i++) builder.appendEOG(); } count = builder.getcount(); assertex(count==totalRows); rowset = builder.linkrows(); } static void doDeserializeDictionaryFromDataset(size32_t & count, const byte * * & rowset, IEngineRowAllocator * rowAllocator, IOutputRowDeserializer * deserializer, IHThorHashLookupInfo & hashInfo, offset_t marker, IRowDeserializerSource & in) { RtlLinkedDictionaryBuilder builder(rowAllocator, &hashInfo); while (!in.finishedNested(marker)) builder.deserializeRow(*deserializer, in); count = builder.getcount(); rowset = builder.linkrows(); } extern ECLRTL_API void rtlSerializeDictionary(IRowSerializerTarget & out, IOutputRowSerializer * serializer, size32_t count, const byte * * rows) { doSerializeDictionary(out, serializer, count, rows); } extern ECLRTL_API void rtlSerializeDictionaryToDataset(IRowSerializerTarget & out, IOutputRowSerializer * serializer, size32_t count, const byte * * rows) { doSerializeRowsetStripNulls(out, serializer, count, rows); } extern ECLRTL_API void rtlDeserializeChildDictionary(size32_t & count, const byte * * & rowset, IEngineRowAllocator * rowAllocator, IOutputRowDeserializer * deserializer, IRowDeserializerSource & in) { offset_t marker = in.beginNested(); // MORE: Would this be better as a count? doDeserializeDictionary(count, rowset, rowAllocator, deserializer, marker, in); } extern ECLRTL_API void rtlDeserializeChildDictionaryFromDataset(size32_t & count, const byte * * & rowset, IEngineRowAllocator * rowAllocator, IOutputRowDeserializer * deserializer, IHThorHashLookupInfo & hashInfo, IRowDeserializerSource & in) { offset_t marker = in.beginNested(); // MORE: Would this be better as a count? doDeserializeDictionaryFromDataset(count, rowset, rowAllocator, deserializer, hashInfo, marker, in); } extern ECLRTL_API void rtlSerializeChildDictionary(IRowSerializerTarget & out, IOutputRowSerializer * serializer, size32_t count, const byte * * rows) { size32_t marker = out.beginNested(count); doSerializeDictionary(out, serializer, count, rows); out.endNested(marker); } extern ECLRTL_API void rtlSerializeChildDictionaryToDataset(IRowSerializerTarget & out, IOutputRowSerializer * serializer, size32_t count, const byte * * rows) { size32_t marker = out.beginNested(count); doSerializeRowsetStripNulls(out, serializer, count, rows); out.endNested(marker); } extern void deserializeDictionaryX(size32_t & count, const byte * * & rowset, IEngineRowAllocator * _rowAllocator, IOutputRowDeserializer * deserializer, MemoryBuffer &in) { Owned<ISerialStream> stream = createMemoryBufferSerialStream(in); CThorStreamDeserializerSource rowSource(stream); rtlDeserializeChildDictionary(count, rowset, _rowAllocator, deserializer, rowSource); } extern void serializeDictionaryX(size32_t count, const byte * * rows, IOutputRowSerializer * serializer, MemoryBuffer & buffer) { CMemoryBufferSerializeTarget out(buffer); rtlSerializeChildDictionary(out, serializer, count, rows); } extern ECLRTL_API void rtlDeserializeDictionary(size32_t & count, const byte * * & rowset, IEngineRowAllocator * rowAllocator, IOutputRowDeserializer * deserializer, size32_t lenSrc, const void * src) { Owned<ISerialStream> stream = createMemorySerialStream(src, lenSrc); CThorStreamDeserializerSource in(stream); doDeserializeDictionary(count, rowset, rowAllocator, deserializer, lenSrc, in); } extern ECLRTL_API void rtlDeserializeDictionaryFromDataset(size32_t & count, const byte * * & rowset, IEngineRowAllocator * rowAllocator, IOutputRowDeserializer * deserializer, IHThorHashLookupInfo & hashInfo, size32_t lenSrc, const void * src) { Owned<ISerialStream> stream = createMemorySerialStream(src, lenSrc); CThorStreamDeserializerSource in(stream); doDeserializeDictionaryFromDataset(count, rowset, rowAllocator, deserializer, hashInfo, lenSrc, in); } extern ECLRTL_API void rtlSerializeDictionary(unsigned & tlen, void * & tgt, IOutputRowSerializer * serializer, size32_t count, const byte * * rows) { MemoryBuffer buffer; CMemoryBufferSerializeTarget out(buffer); doSerializeDictionary(out, serializer, count, rows); rtlFree(tgt); tlen = buffer.length(); tgt = buffer.detach(); // not strictly speaking correct - it should have been allocated with rtlMalloc(); } extern ECLRTL_API void rtlSerializeDictionaryToDataset(unsigned & tlen, void * & tgt, IOutputRowSerializer * serializer, size32_t count, const byte * * rows) { MemoryBuffer buffer; CMemoryBufferSerializeTarget out(buffer); doSerializeRowsetStripNulls(out, serializer, count, rows); rtlFree(tgt); tlen = buffer.length(); tgt = buffer.detach(); // not strictly speaking correct - it should have been allocated with rtlMalloc(); } extern ECLRTL_API void rtlCreateDictionaryFromDataset(size32_t & count, const byte * * & rowset, IEngineRowAllocator * rowAllocator, IHThorHashLookupInfo & hashInfo) { RtlLinkedDictionaryBuilder builder(rowAllocator, &hashInfo); builder.appendRows(count, rowset); rowAllocator->releaseRowset(count, rowset); count = builder.getcount(); rowset = builder.linkrows(); } //--------------------------------------------------------------------------- RtlDatasetCursor::RtlDatasetCursor(size32_t _len, const void * _data) { setDataset(_len, _data); } bool RtlDatasetCursor::exists() { return (end != buffer); } const byte * RtlDatasetCursor::first() { if (buffer != end) cur = buffer; return cur; } const byte * RtlDatasetCursor::get() { return cur; } void RtlDatasetCursor::setDataset(size32_t _len, const void * _data) { buffer = (const byte *)_data; end = buffer + _len; cur = NULL; } bool RtlDatasetCursor::isValid() { return (cur != NULL); } /* const byte * RtlDatasetCursor::next() { if (cur) { cur += getRowSize(); if (cur >= end) cur = NULL; } return cur; } */ //--------------------------------------------------------------------------- RtlFixedDatasetCursor::RtlFixedDatasetCursor(size32_t _len, const void * _data, unsigned _recordSize) : RtlDatasetCursor(_len, _data) { recordSize = _recordSize; } RtlFixedDatasetCursor::RtlFixedDatasetCursor() : RtlDatasetCursor(0, NULL) { recordSize = 1; } size32_t RtlFixedDatasetCursor::count() { return (size32_t)((end - buffer) / recordSize); } size32_t RtlFixedDatasetCursor::getSize() { return recordSize; } void RtlFixedDatasetCursor::init(size32_t _len, const void * _data, unsigned _recordSize) { recordSize = _recordSize; setDataset(_len, _data); } const byte * RtlFixedDatasetCursor::next() { if (cur) { cur += recordSize; if (cur >= end) cur = NULL; } return cur; } const byte * RtlFixedDatasetCursor::select(unsigned idx) { cur = buffer + idx * recordSize; if (cur >= end) cur = NULL; return cur; } //--------------------------------------------------------------------------- RtlVariableDatasetCursor::RtlVariableDatasetCursor(size32_t _len, const void * _data, IRecordSize & _recordSize) : RtlDatasetCursor(_len, _data) { recordSize = &_recordSize; } RtlVariableDatasetCursor::RtlVariableDatasetCursor() : RtlDatasetCursor(0, NULL) { recordSize = NULL; } void RtlVariableDatasetCursor::init(size32_t _len, const void * _data, IRecordSize & _recordSize) { recordSize = &_recordSize; setDataset(_len, _data); } size32_t RtlVariableDatasetCursor::count() { const byte * finger = buffer; unsigned c = 0; while (finger < end) { finger += recordSize->getRecordSize(finger); c++; } assertex(finger == end); return c; } size32_t RtlVariableDatasetCursor::getSize() { return recordSize->getRecordSize(cur); } const byte * RtlVariableDatasetCursor::next() { if (cur) { cur += recordSize->getRecordSize(cur); if (cur >= end) cur = NULL; } return cur; } const byte * RtlVariableDatasetCursor::select(unsigned idx) { const byte * finger = buffer; unsigned c = 0; while (finger < end) { if (c == idx) { cur = finger; return cur; } finger += recordSize->getRecordSize(finger); c++; } assertex(finger == end); cur = NULL; return NULL; } //--------------------------------------------------------------------------- RtlLinkedDatasetCursor::RtlLinkedDatasetCursor(unsigned _numRows, const byte * * _rows) : numRows(_numRows), rows(_rows) { cur = (unsigned)-1; } RtlLinkedDatasetCursor::RtlLinkedDatasetCursor() { numRows = 0; rows = NULL; cur = (unsigned)-1; } void RtlLinkedDatasetCursor::init(unsigned _numRows, const byte * * _rows) { numRows = _numRows; rows = _rows; cur = (unsigned)-1; } const byte * RtlLinkedDatasetCursor::first() { cur = 0; return cur < numRows ? rows[cur] : NULL; } const byte * RtlLinkedDatasetCursor::get() { return cur < numRows ? rows[cur] : NULL; } bool RtlLinkedDatasetCursor::isValid() { return (cur < numRows); } const byte * RtlLinkedDatasetCursor::next() { if (cur < numRows) cur++; return cur < numRows ? rows[cur] : NULL; } const byte * RtlLinkedDatasetCursor::select(unsigned idx) { cur = idx; return cur < numRows ? rows[cur] : NULL; } //--------------------------------------------------------------------------- RtlSafeLinkedDatasetCursor::RtlSafeLinkedDatasetCursor(unsigned _numRows, const byte * * _rows) { init(_numRows, _rows); } RtlSafeLinkedDatasetCursor::~RtlSafeLinkedDatasetCursor() { ReleaseRoxieRowset(numRows, rows); } void RtlSafeLinkedDatasetCursor::init(unsigned _numRows, const byte * * _rows) { ReleaseRoxieRowset(numRows, rows); numRows = _numRows; rows = _rows; cur = (unsigned)-1; LinkRoxieRowset(rows); } //--------------------------------------------------------------------------- RtlStreamedDatasetCursor::RtlStreamedDatasetCursor(IRowStream * _stream) { init(_stream); } RtlStreamedDatasetCursor::RtlStreamedDatasetCursor() { } void RtlStreamedDatasetCursor::init(IRowStream * _stream) { stream.set(_stream); cur.clear(); } const byte * RtlStreamedDatasetCursor::first() { cur.setown(stream->nextRow()); return cur.getbytes(); } const byte * RtlStreamedDatasetCursor::next() { cur.setown(stream->nextRow()); return cur.getbytes(); } //--------------------------------------------------------------------------- bool rtlCheckInList(const void * lhs, IRtlDatasetCursor * cursor, ICompare * compare) { const byte * cur; for (cur = cursor->first(); cur; cur = cursor->next()) { if (compare->docompare(lhs, cur) == 0) return true; } return false; } void rtlSetToSetX(bool & outIsAll, size32_t & outLen, void * & outData, bool inIsAll, size32_t inLen, const void * inData) { outIsAll = inIsAll; outLen = inLen; outData = malloc(inLen); memcpy(outData, inData, inLen); } void rtlAppendSetX(bool & outIsAll, size32_t & outLen, void * & outData, bool leftIsAll, size32_t leftLen, const void * leftData, bool rightIsAll, size32_t rightLen, const void * rightData) { outIsAll = leftIsAll | rightIsAll; if (outIsAll) { outLen = 0; outData = NULL; } else { outLen = leftLen+rightLen; outData = malloc(outLen); memcpy(outData, leftData, leftLen); memcpy((byte*)outData+leftLen, rightData, rightLen); } } //------------------------------------------------------------------------------ RtlCompoundIterator::RtlCompoundIterator() { ok = false; numLevels = 0; iters = NULL; cursors = NULL; } RtlCompoundIterator::~RtlCompoundIterator() { delete [] iters; delete [] cursors; } void RtlCompoundIterator::addIter(unsigned idx, IRtlDatasetSimpleCursor * iter, const byte * * cursor) { assertex(idx < numLevels); iters[idx] = iter; cursors[idx] = cursor; } void RtlCompoundIterator::init(unsigned _numLevels) { numLevels = _numLevels; iters = new IRtlDatasetSimpleCursor * [numLevels]; cursors = new const byte * * [numLevels]; } //Could either duplicate this function, N times, or have it as a helper function that accesses pre-defined virtuals. bool RtlCompoundIterator::first(unsigned level) { IRtlDatasetSimpleCursor * curIter = iters[level]; if (level == 0) { const byte * cur = curIter->first(); setCursor(level, cur); return (cur != NULL); } if (!first(level-1)) return false; for (;;) { const byte * cur = curIter->first(); if (cur) { setCursor(level, cur); return true; } if (!next(level-1)) return false; } } bool RtlCompoundIterator::next(unsigned level) { IRtlDatasetSimpleCursor * curIter = iters[level]; const byte * cur = curIter->next(); if (cur) { setCursor(level, cur); return true; } if (level == 0) return false; for (;;) { if (!next(level-1)) return false; const byte * cur = curIter->first(); if (cur) { setCursor(level, cur); return true; } } } //------------------------------------------------------------------------------ void RtlSimpleIterator::addIter(unsigned idx, IRtlDatasetSimpleCursor * _iter, const byte * * _cursor) { assertex(idx == 0); iter = _iter; cursor = _cursor; *cursor = NULL; } bool RtlSimpleIterator::first() { const byte * cur = iter->first(); *cursor = cur; return (cur != NULL); } bool RtlSimpleIterator::next() { const byte * cur = iter->next(); *cursor = cur; return (cur != NULL); } //// byte * MemoryBufferBuilder::ensureCapacity(size32_t required, const char * fieldName) { dbgassertex(buffer); if (required > reserved) { void * next = buffer->reserve(required-reserved); self = (byte *)next - reserved; reserved = required; } return self; } void MemoryBufferBuilder::finishRow(size32_t length) { dbgassertex(buffer); assertex(length <= reserved); size32_t newLength = (buffer->length() - reserved) + length; buffer->setLength(newLength); self = NULL; reserved = 0; }
//===--- Generics.cpp ---- Utilities for transforming generics ------------===// // // This source file is part of the Swift.org open source project // // Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// #define DEBUG_TYPE "generic-specializer" #include "swift/SILOptimizer/Utils/Generics.h" #include "swift/AST/GenericEnvironment.h" #include "swift/AST/GenericSignatureBuilder.h" #include "swift/AST/TypeMatcher.h" #include "swift/Basic/Statistic.h" #include "swift/SIL/DebugUtils.h" #include "swift/SIL/InstructionUtils.h" #include "swift/SIL/OptimizationRemark.h" #include "swift/SILOptimizer/Utils/GenericCloner.h" #include "swift/SILOptimizer/Utils/SpecializationMangler.h" #include "swift/Strings.h" using namespace swift; STATISTIC(NumPreventedGenericSpecializationLoops, "# of prevented infinite generic specializations loops"); STATISTIC(NumPreventedTooComplexGenericSpecializations, "# of prevented generic specializations with too complex " "generic type parameters"); /// Set to true to enable the support for partial specialization. llvm::cl::opt<bool> EnablePartialSpecialization( "sil-partial-specialization", llvm::cl::init(false), llvm::cl::desc("Enable partial specialization of generics")); /// If set, then generic specialization tries to specialize using /// all substitutions, even if they the replacement types are generic. llvm::cl::opt<bool> SupportGenericSubstitutions( "sil-partial-specialization-with-generic-substitutions", llvm::cl::init(false), llvm::cl::desc("Enable partial specialization with generic substitutions")); /// Set to true to print detected infinite generic specialization loops that /// were prevented. llvm::cl::opt<bool> PrintGenericSpecializationLoops( "sil-print-generic-specialization-loops", llvm::cl::init(false), llvm::cl::desc("Print detected infinite generic specialization loops that " "were prevented")); static bool OptimizeGenericSubstitutions = false; /// Max depth of a type which can be processed by the generic /// specializer. /// E.g. the depth of Array<Array<Array<T>>> is 3. /// No specializations will be produced, if any of generic parameters contains /// a bound generic type with the depth higher than this threshold static const unsigned TypeDepthThreshold = 50; /// Set the width threshold rather high, because some projects uses very wide /// tuples to model fixed size arrays. static const unsigned TypeWidthThreshold = 2000; /// Compute the width and the depth of a type. /// We compute both, because some pathological test-cases result in very /// wide types and some others result in very deep types. It is important /// to bail as soon as we hit the threshold on any of both dimensions to /// prevent compiler hangs and crashes. static std::pair<unsigned, unsigned> getTypeDepthAndWidth(Type t) { unsigned Depth = 0; unsigned Width = 0; if (auto *BGT = t->getAs<BoundGenericType>()) { auto *NTD = BGT->getNominalOrBoundGenericNominal(); if (NTD) { auto StoredProperties = NTD->getStoredProperties(); Width += std::distance(StoredProperties.begin(), StoredProperties.end()); } Depth++; unsigned MaxTypeDepth = 0; auto GenericArgs = BGT->getGenericArgs(); for (auto Ty : GenericArgs) { unsigned TypeWidth; unsigned TypeDepth; std::tie(TypeDepth, TypeWidth) = getTypeDepthAndWidth(Ty); if (TypeDepth > MaxTypeDepth) MaxTypeDepth = TypeDepth; Width += TypeWidth; } Depth += MaxTypeDepth; return std::make_pair(Depth, Width); } if (auto *TupleTy = t->getAs<TupleType>()) { Width += TupleTy->getNumElements(); Depth++; unsigned MaxTypeDepth = 0; auto ElementTypes = TupleTy->getElementTypes(); for (auto Ty : ElementTypes) { unsigned TypeWidth; unsigned TypeDepth; std::tie(TypeDepth, TypeWidth) = getTypeDepthAndWidth(Ty); if (TypeDepth > MaxTypeDepth) MaxTypeDepth = TypeDepth; Width += TypeWidth; } Depth += MaxTypeDepth; return std::make_pair(Depth, Width); } if (auto *FnTy = t->getAs<SILFunctionType>()) { Depth++; unsigned MaxTypeDepth = 0; auto Params = FnTy->getParameters(); Width += Params.size(); for (auto Param : Params) { unsigned TypeWidth; unsigned TypeDepth; std::tie(TypeDepth, TypeWidth) = getTypeDepthAndWidth(Param.getType()); if (TypeDepth > MaxTypeDepth) MaxTypeDepth = TypeDepth; Width += TypeWidth; } auto Results = FnTy->getResults(); Width += Results.size(); for (auto Result : Results) { unsigned TypeWidth; unsigned TypeDepth; std::tie(TypeDepth, TypeWidth) = getTypeDepthAndWidth(Result.getType()); if (TypeDepth > MaxTypeDepth) MaxTypeDepth = TypeDepth; Width += TypeWidth; } if (FnTy->hasErrorResult()) { Width += 1; unsigned TypeWidth; unsigned TypeDepth; std::tie(TypeDepth, TypeWidth) = getTypeDepthAndWidth(FnTy->getErrorResult().getType()); if (TypeDepth > MaxTypeDepth) MaxTypeDepth = TypeDepth; Width += TypeWidth; } Depth += MaxTypeDepth; return std::make_pair(Depth, Width); } if (auto *FnTy = t->getAs<FunctionType>()) { Depth++; unsigned MaxTypeDepth = 0; auto Params = FnTy->getParams(); Width += Params.size(); for (auto &Param : Params) { unsigned TypeWidth; unsigned TypeDepth; std::tie(TypeDepth, TypeWidth) = getTypeDepthAndWidth(Param.getType()); if (TypeDepth > MaxTypeDepth) MaxTypeDepth = TypeDepth; Width += TypeWidth; } unsigned TypeWidth; unsigned TypeDepth; std::tie(TypeDepth, TypeWidth) = getTypeDepthAndWidth(FnTy->getResult()); if (TypeDepth > MaxTypeDepth) MaxTypeDepth = TypeDepth; Width += TypeWidth; Depth += MaxTypeDepth; return std::make_pair(Depth, Width); } if (auto *MT = t->getAs<MetatypeType>()) { Depth += 1; unsigned TypeWidth; unsigned TypeDepth; std::tie(TypeDepth, TypeWidth) = getTypeDepthAndWidth(MT->getInstanceType()); Width += TypeWidth; Depth += TypeDepth; return std::make_pair(Depth, Width); } return std::make_pair(Depth, Width); } static bool isTypeTooComplex(Type t) { unsigned TypeWidth; unsigned TypeDepth; std::tie(TypeDepth, TypeWidth) = getTypeDepthAndWidth(t); return TypeWidth >= TypeWidthThreshold || TypeDepth >= TypeDepthThreshold; } namespace { /// A helper class used to check whether one type is structurally contained /// the other type either completely or partially. class TypeComparator : public TypeMatcher<TypeComparator> { bool IsContained = false; public: bool isEqual(CanType T1, CanType T2) { return T1 == T2; } /// Check whether the type T1 is different from T2 and contained in the type /// T2. bool isStrictlyContainedIn(CanType T1, CanType T2) { if (isEqual(T1, T2)) return false; return T2.findIf([&T1, this](Type T) -> bool { return isEqual(T->getCanonicalType(), T1); }); } /// Check whether the type T1 is strictly or partially contained in the type /// T2. /// Partially contained means that if you drop the common structural "prefix" /// of T1 and T2 and get T1' and T2' then T1' is strictly contained in T2'. bool isPartiallyContainedIn(CanType T1, CanType T2) { if (isStrictlyContainedIn(T1, T2)) return true; match(T1, T2); return IsContained; } /// This method is invoked aftre skipping a common prefix of two types, /// when a structural difference is found. bool mismatch(TypeBase *firstType, TypeBase *secondType, Type sugaredFirstType) { auto firstCanType = firstType->getCanonicalType(); auto secondCanType = secondType->getCanonicalType(); if (isEqual(firstCanType, secondCanType)) return false; if (isStrictlyContainedIn(firstCanType, secondCanType)) { IsContained = true; return false; } return false; } }; } // anonymous namespace /// Checks if a second substitution map is an expanded version of /// the first substitution map. /// This is the case if at least one of the substitution type in Subs2 is /// "bigger" than the corresponding substitution type in Subs1. /// Type T2 is "smaller" than type T1 if T2 is structurally contained in T1. static bool growingSubstitutions(SubstitutionMap Subs1, SubstitutionMap Subs2) { auto Replacements1 = Subs1.getReplacementTypes(); auto Replacements2 = Subs2.getReplacementTypes(); assert(Replacements1.size() == Replacements2.size()); TypeComparator TypeCmp; // Perform component-wise comparisions for substitutions. for (unsigned idx : indices(Replacements1)) { auto Type1 = Replacements1[idx]->getCanonicalType(); auto Type2 = Replacements2[idx]->getCanonicalType(); // If types are the same, the substitution type does not grow. if (TypeCmp.isEqual(Type2, Type1)) continue; // If the new substitution type is getting smaller, the // substitution type does not grow. if (TypeCmp.isPartiallyContainedIn(Type2, Type1)) continue; if (TypeCmp.isPartiallyContainedIn(Type1, Type2)) { LLVM_DEBUG(llvm::dbgs() << "Type:\n"; Type1.dump(); llvm::dbgs() << "is (partially) contained in type:\n"; Type2.dump(); llvm::dbgs() << "Replacements[" << idx << "] has got bigger since last time.\n"); return true; } // None of the types is contained in the other type. // They are not comparable in this sense. } // The substitition list is not growing. return false; } /// Checks whether specializing a given generic apply would create an infinite /// cycle in the generic specializations graph. This can be the case if there is /// a loop in the specialization graph and generic parameters at each iteration /// of such a loop are getting bigger and bigger. /// The specialization graph is represented by means of SpecializationInformation. /// We use this meta-information about specializations to detect cycles in this /// graph. static bool createsInfiniteSpecializationLoop(ApplySite Apply) { if (!Apply) return false; auto *Callee = Apply.getCalleeFunction(); SILFunction *Caller = nullptr; Caller = Apply.getFunction(); int numAcceptedCycles = 1; // Name of the function to be specialized. auto GenericFunc = Callee; LLVM_DEBUG(llvm::dbgs() << "\n\n\nChecking for a specialization cycle:\n" << "Caller: " << Caller->getName() << "\n" << "Callee: " << Callee->getName() << "\n"; llvm::dbgs() << "Substitutions:\n"; Apply.getSubstitutionMap().dump(llvm::dbgs()); ); auto *CurSpecializationInfo = Apply.getSpecializationInfo(); if (CurSpecializationInfo) { LLVM_DEBUG(llvm::dbgs() << "Scan call-site's history\n"); } else if (Caller->isSpecialization()) { CurSpecializationInfo = Caller->getSpecializationInfo(); LLVM_DEBUG(llvm::dbgs() << "Scan caller's specialization history\n"); } while (CurSpecializationInfo) { LLVM_DEBUG(llvm::dbgs() << "Current caller is a specialization:\n" << "Caller: " << CurSpecializationInfo->getCaller()->getName() << "\n" << "Parent: " << CurSpecializationInfo->getParent()->getName() << "\n"; llvm::dbgs() << "Substitutions:\n"; for (auto Replacement : CurSpecializationInfo->getSubstitutions() .getReplacementTypes()) { Replacement->dump(); }); if (CurSpecializationInfo->getParent() == GenericFunc) { LLVM_DEBUG(llvm::dbgs() << "Found a call graph loop, checking substitutions\n"); // Consider if components of the substitution list gets bigger compared to // the previously seen specialization of the same generic function. if (growingSubstitutions(CurSpecializationInfo->getSubstitutions(), Apply.getSubstitutionMap())) { LLVM_DEBUG(llvm::dbgs() << "Found a generic specialization loop!\n"); // Accept a cycles up to a limit. This is necessary to generate // efficient code for some library functions, like compactMap, which // contain small specialization cycles. if (numAcceptedCycles == 0) return true; numAcceptedCycles--; } } // Get the next element of the specialization history. auto *CurCaller = CurSpecializationInfo->getCaller(); CurSpecializationInfo = nullptr; if (!CurCaller) break; LLVM_DEBUG(llvm::dbgs() << "\nCurrent caller is: " << CurCaller->getName() << "\n"); if (!CurCaller->isSpecialization()) break; CurSpecializationInfo = CurCaller->getSpecializationInfo(); } assert(!CurSpecializationInfo); LLVM_DEBUG(llvm::dbgs() << "Stop the scan: Current caller is not a specialization\n"); return false; } // ============================================================================= // ReabstractionInfo // ============================================================================= static bool shouldNotSpecializeCallee(SILFunction *Callee, SubstitutionMap Subs = {}) { if (Callee->hasSemanticsAttr("optimize.sil.specialize.generic.never")) return true; if (Subs.hasAnySubstitutableParams() && Callee->hasSemanticsAttr("optimize.sil.specialize.generic.partial.never")) return true; return false; } /// Prepares the ReabstractionInfo object for further processing and checks /// if the current function can be specialized at all. /// Returns false, if the current function cannot be specialized. /// Returns true otherwise. bool ReabstractionInfo::prepareAndCheck(ApplySite Apply, SILFunction *Callee, SubstitutionMap ParamSubs, OptRemark::Emitter *ORE) { if (shouldNotSpecializeCallee(Callee)) return false; SpecializedGenericEnv = nullptr; SpecializedGenericSig = nullptr; auto CalleeGenericSig = Callee->getLoweredFunctionType()->getGenericSignature(); auto CalleeGenericEnv = Callee->getGenericEnvironment(); this->Callee = Callee; this->Apply = Apply; // Get the original substitution map. CalleeParamSubMap = ParamSubs; using namespace OptRemark; // We do not support partial specialization. if (!EnablePartialSpecialization && CalleeParamSubMap.hasArchetypes()) { LLVM_DEBUG(llvm::dbgs() << " Partial specialization is not supported.\n"); LLVM_DEBUG(ParamSubs.dump(llvm::dbgs())); return false; } // Perform some checks to see if we need to bail. if (CalleeParamSubMap.hasDynamicSelf()) { REMARK_OR_DEBUG(ORE, [&]() { return RemarkMissed("DynamicSelf", *Apply.getInstruction()) << IndentDebug(4) << "Cannot specialize with dynamic self"; }); return false; } // Check if the substitution contains any generic types that are too deep. // If this is the case, bail to avoid the explosion in the number of // generated specializations. for (auto Replacement : ParamSubs.getReplacementTypes()) { if (isTypeTooComplex(Replacement)) { REMARK_OR_DEBUG(ORE, [&]() { return RemarkMissed("TypeTooDeep", *Apply.getInstruction()) << IndentDebug(4) << "Cannot specialize because the generic type is too deep"; }); NumPreventedTooComplexGenericSpecializations++; return false; } } // Check if we have substitutions which replace generic type parameters with // concrete types or unbound generic types. bool HasConcreteGenericParams = false; bool HasNonArchetypeGenericParams = false; HasUnboundGenericParams = false; for (auto GP : CalleeGenericSig->getSubstitutableParams()) { // Check only the substitutions for the generic parameters. // Ignore any dependent types, etc. auto Replacement = Type(GP).subst(CalleeParamSubMap); if (!Replacement->is<ArchetypeType>()) HasNonArchetypeGenericParams = true; if (Replacement->hasArchetype()) { HasUnboundGenericParams = true; // Check if the replacement is an archetype which is more specific // than the corresponding archetype in the original generic signature. // If this is the case, then specialization makes sense, because // it would produce something more specific. if (CalleeGenericEnv) { if (auto Archetype = Replacement->getAs<ArchetypeType>()) { auto OrigArchetype = CalleeGenericEnv->mapTypeIntoContext(GP)->castTo<ArchetypeType>(); if (Archetype->requiresClass() && !OrigArchetype->requiresClass()) HasNonArchetypeGenericParams = true; if (Archetype->getLayoutConstraint() && !OrigArchetype->getLayoutConstraint()) HasNonArchetypeGenericParams = true; } } continue; } HasConcreteGenericParams = true; } if (HasUnboundGenericParams) { // Bail if we cannot specialize generic substitutions, but all substitutions // were generic. if (!HasConcreteGenericParams && !SupportGenericSubstitutions) { LLVM_DEBUG(llvm::dbgs() << " Partial specialization is not supported if " "all substitutions are generic.\n"); LLVM_DEBUG(ParamSubs.dump(llvm::dbgs())); return false; } if (!HasNonArchetypeGenericParams && !HasConcreteGenericParams) { LLVM_DEBUG(llvm::dbgs() << " Partial specialization is not supported if " "all substitutions are archetypes.\n"); LLVM_DEBUG(ParamSubs.dump(llvm::dbgs())); return false; } // We need a generic environment for the partial specialization. if (!CalleeGenericEnv) return false; // Bail if the callee should not be partially specialized. if (shouldNotSpecializeCallee(Callee, ParamSubs)) return false; } // Check if specializing this call site would create in an infinite generic // specialization loop. if (createsInfiniteSpecializationLoop(Apply)) { REMARK_OR_DEBUG(ORE, [&]() { return RemarkMissed("SpecializationLoop", *Apply.getInstruction()) << IndentDebug(4) << "Generic specialization is not supported if it would result in " "a generic specialization of infinite depth. Callee " << NV("Callee", Callee) << " occurs multiple times on the call chain"; }); if (PrintGenericSpecializationLoops) llvm::errs() << "Detected and prevented an infinite " "generic specialization loop for callee: " << Callee->getName() << '\n'; NumPreventedGenericSpecializationLoops++; return false; } return true; } bool ReabstractionInfo::canBeSpecialized(ApplySite Apply, SILFunction *Callee, SubstitutionMap ParamSubs) { ReabstractionInfo ReInfo; return ReInfo.prepareAndCheck(Apply, Callee, ParamSubs); } ReabstractionInfo::ReabstractionInfo(ApplySite Apply, SILFunction *Callee, SubstitutionMap ParamSubs, bool ConvertIndirectToDirect, OptRemark::Emitter *ORE) { if (!prepareAndCheck(Apply, Callee, ParamSubs, ORE)) return; this->ConvertIndirectToDirect = ConvertIndirectToDirect; SILFunction *Caller = nullptr; if (Apply) Caller = Apply.getFunction(); if (!EnablePartialSpecialization || !HasUnboundGenericParams) { // Fast path for full specializations. performFullSpecializationPreparation(Callee, ParamSubs); } else { performPartialSpecializationPreparation(Caller, Callee, ParamSubs); } verify(); if (SpecializedGenericSig) { LLVM_DEBUG(llvm::dbgs() << "\n\nPartially specialized types for function: " << Callee->getName() << "\n\n"; llvm::dbgs() << "Original generic function type:\n" << Callee->getLoweredFunctionType() << "\n" << "Partially specialized generic function type:\n" << SpecializedType << "\n\n"); } // Some sanity checks. auto SpecializedFnTy = getSpecializedType(); auto SpecializedSubstFnTy = SpecializedFnTy; if (SpecializedFnTy->isPolymorphic() && !getCallerParamSubstitutionMap().empty()) { auto CalleeFnTy = Callee->getLoweredFunctionType(); assert(CalleeFnTy->isPolymorphic()); auto CalleeSubstFnTy = CalleeFnTy->substGenericArgs( Callee->getModule(), getCalleeParamSubstitutionMap()); assert(!CalleeSubstFnTy->isPolymorphic() && "Substituted callee type should not be polymorphic"); assert(!CalleeSubstFnTy->hasTypeParameter() && "Substituted callee type should not have type parameters"); SpecializedSubstFnTy = SpecializedFnTy->substGenericArgs( Callee->getModule(), getCallerParamSubstitutionMap()); assert(!SpecializedSubstFnTy->isPolymorphic() && "Substituted callee type should not be polymorphic"); assert(!SpecializedSubstFnTy->hasTypeParameter() && "Substituted callee type should not have type parameters"); auto SpecializedCalleeSubstFnTy = createSpecializedType(CalleeSubstFnTy, Callee->getModule()); if (SpecializedSubstFnTy != SpecializedCalleeSubstFnTy) { llvm::dbgs() << "SpecializedFnTy:\n" << SpecializedFnTy << "\n"; llvm::dbgs() << "SpecializedSubstFnTy:\n" << SpecializedSubstFnTy << "\n"; getCallerParamSubstitutionMap().getCanonical().dump(llvm::dbgs()); llvm::dbgs() << "\n\n"; llvm::dbgs() << "CalleeFnTy:\n" << CalleeFnTy << "\n"; llvm::dbgs() << "SpecializedCalleeSubstFnTy:\n" << SpecializedCalleeSubstFnTy << "\n"; ParamSubs.getCanonical().dump(llvm::dbgs()); llvm::dbgs() << "\n\n"; assert(SpecializedSubstFnTy == SpecializedCalleeSubstFnTy && "Substituted function types should be the same"); } } // If the new type is the same, there is nothing to do and // no specialization should be performed. if (getSubstitutedType() == Callee->getLoweredFunctionType()) { LLVM_DEBUG(llvm::dbgs() << "The new specialized type is the same as " "the original " "type. Don't specialize!\n"; llvm::dbgs() << "The type is: " << getSubstitutedType() << "\n"); SpecializedType = CanSILFunctionType(); SubstitutedType = CanSILFunctionType(); SpecializedGenericSig = nullptr; SpecializedGenericEnv = nullptr; return; } if (SpecializedGenericSig) { // It is a partial specialization. LLVM_DEBUG(llvm::dbgs() << "Specializing the call:\n"; Apply.getInstruction()->dumpInContext(); llvm::dbgs() << "\n\nPartially specialized types for function: " << Callee->getName() << "\n\n"; llvm::dbgs() << "Callee generic function type:\n" << Callee->getLoweredFunctionType() << "\n\n"; llvm::dbgs() << "Callee's call substitution:\n"; getCalleeParamSubstitutionMap().getCanonical().dump(llvm::dbgs()); llvm::dbgs() << "Partially specialized generic function type:\n" << getSpecializedType() << "\n\n"; llvm::dbgs() << "\nSpecialization call substitution:\n"; getCallerParamSubstitutionMap().getCanonical().dump(llvm::dbgs()); ); } } bool ReabstractionInfo::canBeSpecialized() const { return getSpecializedType(); } bool ReabstractionInfo::isFullSpecialization() const { return !getCalleeParamSubstitutionMap().hasArchetypes(); } bool ReabstractionInfo::isPartialSpecialization() const { return getCalleeParamSubstitutionMap().hasArchetypes(); } void ReabstractionInfo::createSubstitutedAndSpecializedTypes() { auto &M = Callee->getModule(); // Find out how the function type looks like after applying the provided // substitutions. if (!SubstitutedType) { SubstitutedType = createSubstitutedType(Callee, CallerInterfaceSubs, HasUnboundGenericParams); } assert(!SubstitutedType->hasArchetype() && "Substituted function type should not contain archetypes"); // Check which parameters and results can be converted from // indirect to direct ones. NumFormalIndirectResults = SubstitutedType->getNumIndirectFormalResults(); Conversions.resize(NumFormalIndirectResults + SubstitutedType->getParameters().size()); CanGenericSignature CanSig; if (SpecializedGenericSig) CanSig = SpecializedGenericSig->getCanonicalSignature(); Lowering::GenericContextScope GenericScope(M.Types, CanSig); SILFunctionConventions substConv(SubstitutedType, M); if (SubstitutedType->getNumDirectFormalResults() == 0) { // The original function has no direct result yet. Try to convert the first // indirect result to a direct result. // TODO: We could also convert multiple indirect results by returning a // tuple type and created tuple_extract instructions at the call site. unsigned IdxForResult = 0; for (SILResultInfo RI : SubstitutedType->getIndirectFormalResults()) { assert(RI.isFormalIndirect()); if (substConv.getSILType(RI).isLoadable(M) && !RI.getType()->isVoid() && shouldExpand(M, substConv.getSILType(RI).getObjectType())) { Conversions.set(IdxForResult); break; } ++IdxForResult; } } // Try to convert indirect incoming parameters to direct parameters. unsigned IdxForParam = NumFormalIndirectResults; for (SILParameterInfo PI : SubstitutedType->getParameters()) { auto IdxToInsert = IdxForParam; ++IdxForParam; if (!substConv.getSILType(PI).isLoadable(M)) { continue; } switch (PI.getConvention()) { case ParameterConvention::Indirect_In: case ParameterConvention::Indirect_In_Guaranteed: Conversions.set(IdxToInsert); break; case ParameterConvention::Indirect_In_Constant: case ParameterConvention::Indirect_Inout: case ParameterConvention::Indirect_InoutAliasable: case ParameterConvention::Direct_Owned: case ParameterConvention::Direct_Unowned: case ParameterConvention::Direct_Guaranteed: break; } } // Produce a specialized type, which is the substituted type with // the parameters/results passing conventions adjusted according // to the conversions selected above. SpecializedType = createSpecializedType(SubstitutedType, M); } /// Create a new substituted type with the updated signature. CanSILFunctionType ReabstractionInfo::createSubstitutedType(SILFunction *OrigF, SubstitutionMap SubstMap, bool HasUnboundGenericParams) { auto &M = OrigF->getModule(); if ((SpecializedGenericSig && SpecializedGenericSig->areAllParamsConcrete()) || !HasUnboundGenericParams) { SpecializedGenericSig = nullptr; SpecializedGenericEnv = nullptr; } CanGenericSignature CanSpecializedGenericSig; if (SpecializedGenericSig) CanSpecializedGenericSig = SpecializedGenericSig->getCanonicalSignature(); // First substitute concrete types into the existing function type. CanSILFunctionType FnTy; { Lowering::GenericContextScope GenericScope(M.Types, CanSpecializedGenericSig); FnTy = OrigF->getLoweredFunctionType()->substGenericArgs(M, SubstMap); // FIXME: Some of the added new requirements may not have been taken into // account by the substGenericArgs. So, canonicalize in the context of the // specialized signature. FnTy = cast<SILFunctionType>( CanSpecializedGenericSig->getCanonicalTypeInContext(FnTy)); } assert(FnTy); // Use the new specialized generic signature. auto NewFnTy = SILFunctionType::get( CanSpecializedGenericSig, FnTy->getExtInfo(), FnTy->getCoroutineKind(), FnTy->getCalleeConvention(), FnTy->getParameters(), FnTy->getYields(), FnTy->getResults(), FnTy->getOptionalErrorResult(), M.getASTContext(), FnTy->getWitnessMethodConformanceOrNone()); // This is an interface type. It should not have any archetypes. assert(!NewFnTy->hasArchetype()); return NewFnTy; } /// Convert the substituted function type into a specialized function type based /// on the ReabstractionInfo. CanSILFunctionType ReabstractionInfo:: createSpecializedType(CanSILFunctionType SubstFTy, SILModule &M) const { llvm::SmallVector<SILResultInfo, 8> SpecializedResults; llvm::SmallVector<SILYieldInfo, 8> SpecializedYields; llvm::SmallVector<SILParameterInfo, 8> SpecializedParams; unsigned IndirectResultIdx = 0; for (SILResultInfo RI : SubstFTy->getResults()) { if (RI.isFormalIndirect()) { if (isFormalResultConverted(IndirectResultIdx++)) { // Convert the indirect result to a direct result. SILType SILResTy = SILType::getPrimitiveObjectType(RI.getType()); // Indirect results are passed as owned, so we also need to pass the // direct result as owned (except it's a trivial type). auto C = (SILResTy.isTrivial(M) ? ResultConvention::Unowned : ResultConvention::Owned); SpecializedResults.push_back(SILResultInfo(RI.getType(), C)); continue; } } // No conversion: re-use the original, substituted result info. SpecializedResults.push_back(RI); } unsigned ParamIdx = 0; for (SILParameterInfo PI : SubstFTy->getParameters()) { if (!isParamConverted(ParamIdx++)) { // No conversion: re-use the original, substituted parameter info. SpecializedParams.push_back(PI); continue; } // Convert the indirect parameter to a direct parameter. SILType SILParamTy = SILType::getPrimitiveObjectType(PI.getType()); // Indirect parameters are passed as owned/guaranteed, so we also // need to pass the direct/guaranteed parameter as // owned/guaranteed (except it's a trivial type). auto C = ParameterConvention::Direct_Unowned; if (!SILParamTy.isTrivial(M)) { if (PI.isGuaranteed()) { C = ParameterConvention::Direct_Guaranteed; } else { C = ParameterConvention::Direct_Owned; } } SpecializedParams.push_back(SILParameterInfo(PI.getType(), C)); } for (SILYieldInfo YI : SubstFTy->getYields()) { // For now, always just use the original, substituted parameter info. SpecializedYields.push_back(YI); } return SILFunctionType::get( SubstFTy->getGenericSignature(), SubstFTy->getExtInfo(), SubstFTy->getCoroutineKind(), SubstFTy->getCalleeConvention(), SpecializedParams, SpecializedYields, SpecializedResults, SubstFTy->getOptionalErrorResult(), M.getASTContext(), SubstFTy->getWitnessMethodConformanceOrNone()); } /// Create a new generic signature from an existing one by adding /// additional requirements. static std::pair<GenericEnvironment *, GenericSignature *> getGenericEnvironmentAndSignatureWithRequirements( GenericSignature *OrigGenSig, GenericEnvironment *OrigGenericEnv, ArrayRef<Requirement> Requirements, SILModule &M) { // Form a new generic signature based on the old one. GenericSignatureBuilder Builder(M.getASTContext()); // First, add the old generic signature. Builder.addGenericSignature(OrigGenSig); auto Source = GenericSignatureBuilder::FloatingRequirementSource::forAbstract(); // For each substitution with a concrete type as a replacement, // add a new concrete type equality requirement. for (auto &Req : Requirements) { Builder.addRequirement(Req, Source, M.getSwiftModule()); } auto NewGenSig = std::move(Builder).computeGenericSignature( SourceLoc(), /*allowConcreteGenericParams=*/true); auto NewGenEnv = NewGenSig->createGenericEnvironment(); return { NewGenEnv, NewGenSig }; } /// This is a fast path for full specializations. /// There is no need to form a new generic signature in such cases, /// because the specialized function will be non-generic. void ReabstractionInfo::performFullSpecializationPreparation( SILFunction *Callee, SubstitutionMap ParamSubs) { assert((!EnablePartialSpecialization || !HasUnboundGenericParams) && "Only full specializations are handled here"); SILModule &M = Callee->getModule(); this->Callee = Callee; // Get the original substitution map. ClonerParamSubMap = ParamSubs; SubstitutedType = Callee->getLoweredFunctionType()->substGenericArgs( M, ClonerParamSubMap); CallerParamSubMap = {}; createSubstitutedAndSpecializedTypes(); } /// If the archetype (or any of its dependent types) has requirements /// depending on other archetypes, return true. /// Otherwise return false. static bool hasNonSelfContainedRequirements(ArchetypeType *Archetype, GenericSignature *Sig, GenericEnvironment *Env) { auto Reqs = Sig->getRequirements(); auto CurrentGP = Archetype->getInterfaceType() ->getCanonicalType() ->getRootGenericParam(); for (auto Req : Reqs) { switch(Req.getKind()) { case RequirementKind::Conformance: case RequirementKind::Superclass: case RequirementKind::Layout: // FIXME: Second type of a superclass requirement may contain // generic parameters. continue; case RequirementKind::SameType: { // Check if this requirement contains more than one generic param. // If this is the case, then these archetypes are interdependent and // we should return true. auto First = Req.getFirstType()->getCanonicalType(); auto Second = Req.getSecondType()->getCanonicalType(); llvm::SmallSetVector<TypeBase *, 2> UsedGenericParams; First.visit([&](Type Ty) { if (auto *GP = Ty->getAs<GenericTypeParamType>()) { UsedGenericParams.insert(GP); } }); Second.visit([&](Type Ty) { if (auto *GP = Ty->getAs<GenericTypeParamType>()) { UsedGenericParams.insert(GP); } }); if (UsedGenericParams.count(CurrentGP) && UsedGenericParams.size() > 1) return true; } } } return false; } /// Collect all requirements for a generic parameter corresponding to a given /// archetype. static void collectRequirements(ArchetypeType *Archetype, GenericSignature *Sig, GenericEnvironment *Env, SmallVectorImpl<Requirement> &CollectedReqs) { auto Reqs = Sig->getRequirements(); auto CurrentGP = Archetype->getInterfaceType() ->getCanonicalType() ->getRootGenericParam(); CollectedReqs.clear(); for (auto Req : Reqs) { switch(Req.getKind()) { case RequirementKind::Conformance: case RequirementKind::Superclass: case RequirementKind::Layout: // If it is a generic param or something derived from it, add this // requirement. // FIXME: Second type of a superclass requirement may contain // generic parameters. if (Req.getFirstType()->getCanonicalType()->getRootGenericParam() == CurrentGP) CollectedReqs.push_back(Req); continue; case RequirementKind::SameType: { // Check if this requirement contains more than one generic param. // If this is the case, then these archetypes are interdependent and // we should return true. auto First = Req.getFirstType()->getCanonicalType(); auto Second = Req.getSecondType()->getCanonicalType(); llvm::SmallSetVector<GenericTypeParamType *, 2> UsedGenericParams; First.visit([&](Type Ty) { if (auto *GP = Ty->getAs<GenericTypeParamType>()) { UsedGenericParams.insert(GP); } }); Second.visit([&](Type Ty) { if (auto *GP = Ty->getAs<GenericTypeParamType>()) { UsedGenericParams.insert(GP); } }); if (!UsedGenericParams.count(CurrentGP)) continue; if (UsedGenericParams.size() != 1) { llvm::dbgs() << "Strange requirement for " << CurrentGP->getCanonicalType() << "\n"; Req.dump(); } assert(UsedGenericParams.size() == 1); CollectedReqs.push_back(Req); continue; } } } } /// Returns true if a given substitution should participate in the /// partial specialization. /// /// TODO: /// If a replacement is an archetype or a dependent type /// of an archetype, then it does not make sense to substitute /// it into the signature of the specialized function, because /// it does not provide any benefits at runtime and may actually /// lead to performance degradations. /// /// If a replacement is a loadable type, it is most likely /// rather beneficial to specialize using this substitution, because /// it would allow for more efficient codegen for this type. /// /// If a substitution simply replaces a generic parameter in the callee /// by a generic parameter in the caller and this generic parameter /// in the caller does have more "specific" conformances or requirements, /// then it does name make any sense to perform this substitutions. /// In particular, if the generic parameter in the callee is unconstrained /// (i.e. just T), then providing a more specific generic parameter with some /// conformances does not help, because the body of the callee does not invoke /// any methods from any of these new conformances, unless these conformances /// or requirements influence the layout of the generic type, e.g. "class", /// "Trivial of size N", "HeapAllocationObject", etc. /// (NOTE: It could be that additional conformances can still be used due /// to conditional conformances or something like that, if the caller /// has an invocation like: "G<T>().method(...)". In this case, G<T>().method() /// and G<T:P>().method() may be resolved differently). /// /// We may need to analyze the uses of the generic type inside /// the function body (recursively). It is ever loaded/stored? /// Do we create objects of this type? Which conformances are /// really used? static bool shouldBePartiallySpecialized(Type Replacement, GenericSignature *Sig, GenericEnvironment *Env) { // If replacement is a concrete type, this substitution // should participate. if (!Replacement->hasArchetype()) return true; // We cannot handle opened existentials yet. if (Replacement->hasOpenedExistential()) return false; if (!SupportGenericSubstitutions) { // Don't partially specialize if the replacement contains an archetype. if (Replacement->hasArchetype()) return false; } // If the archetype used (or any of its dependent types) has requirements // depending on other caller's archetypes, then we don't want to specialize // on it as it may require introducing more generic parameters, which // is not beneficial. // Collect the archetypes used by the replacement type. llvm::SmallSetVector<ArchetypeType *, 2> UsedArchetypes; Replacement.visit([&](Type Ty) { if (auto Archetype = Ty->getAs<ArchetypeType>()) { UsedArchetypes.insert(Archetype->getPrimary()); } }); // Check if any of the used archetypes are non-self contained when // it comes to requirements. for (auto *UsedArchetype : UsedArchetypes) { if (hasNonSelfContainedRequirements(UsedArchetype, Sig, Env)) { LLVM_DEBUG(llvm::dbgs() << "Requirements of the archetype depend on other " "caller's generic " "parameters! It cannot be partially specialized:\n"; UsedArchetype->dump(); llvm::dbgs() << "This archetype is used in the substitution: " << Replacement << "\n"); return false; } } if (OptimizeGenericSubstitutions) { // Is it an unconstrained generic parameter? if (auto Archetype = Replacement->getAs<ArchetypeType>()) { if (Archetype->getConformsTo().empty()) { // TODO: If Replacement add a new layout constraint, then // it may be still useful to perform the partial specialization. return false; } } } return true; } namespace swift { /// A helper class for creating partially specialized function signatures. /// /// The following naming convention is used to describe the members and /// functions: /// Caller - the function which invokes the callee. /// Callee - the callee to be specialized. /// Specialized - the specialized callee which is being created. class FunctionSignaturePartialSpecializer { /// Maps caller's generic parameters to generic parameters of the specialized /// function. llvm::DenseMap<SubstitutableType *, Type> CallerInterfaceToSpecializedInterfaceMapping; /// Maps callee's generic parameters to generic parameters of the specialized /// function. llvm::DenseMap<SubstitutableType *, Type> CalleeInterfaceToSpecializedInterfaceMapping; /// Maps the generic parameters of the specialized function to the caller's /// contextual types. llvm::DenseMap<SubstitutableType *, Type> SpecializedInterfaceToCallerArchetypeMapping; /// A SubstitutionMap for re-mapping caller's interface types /// to interface types of the specialized function. SubstitutionMap CallerInterfaceToSpecializedInterfaceMap; /// Maps callee's interface types to caller's contextual types. /// It is computed from the original substitutions. SubstitutionMap CalleeInterfaceToCallerArchetypeMap; /// Maps callee's interface types to specialized functions interface types. SubstitutionMap CalleeInterfaceToSpecializedInterfaceMap; /// Maps the generic parameters of the specialized function to the caller's /// contextual types. SubstitutionMap SpecializedInterfaceToCallerArchetypeMap; /// Generic signatures and environments for the caller, callee and /// the specialized function. GenericSignature *CallerGenericSig; GenericEnvironment *CallerGenericEnv; GenericSignature *CalleeGenericSig; GenericEnvironment *CalleeGenericEnv; GenericSignature *SpecializedGenericSig; GenericEnvironment *SpecializedGenericEnv; SILModule &M; ModuleDecl *SM; ASTContext &Ctx; /// This is a builder for a new partially specialized generic signature. GenericSignatureBuilder Builder; /// Set of newly created generic type parameters. SmallVector<GenericTypeParamType*, 4> AllGenericParams; /// Archetypes used in the substitutions of an apply instructions. /// These are the contextual archetypes of the caller function, which /// invokes a generic function that is being specialized. llvm::SmallSetVector<ArchetypeType *, 2> UsedCallerArchetypes; /// Number of created generic parameters so far. unsigned GPIdx = 0; void createGenericParamsForUsedCallerArchetypes(); void createGenericParamsForCalleeGenericParams(); void addRequirements(ArrayRef<Requirement> Reqs, SubstitutionMap &SubsMap); void addCallerRequirements(); void addCalleeRequirements(); std::pair<GenericEnvironment *, GenericSignature *> getSpecializedGenericEnvironmentAndSignature(); void computeCallerInterfaceToSpecializedInterfaceMap(); void computeCalleeInterfaceToSpecializedInterfaceMap(); void computeSpecializedInterfaceToCallerArchetypeMap(); /// Collect all used archetypes from all the substitutions. /// Take into account only those archetypes that occur in the /// substitutions of generic parameters which will be partially /// specialized. Ignore all others. void collectUsedCallerArchetypes(SubstitutionMap ParamSubs); /// Create a new generic parameter. GenericTypeParamType *createGenericParam(); public: FunctionSignaturePartialSpecializer(SILModule &M, GenericSignature *CallerGenericSig, GenericEnvironment *CallerGenericEnv, GenericSignature *CalleeGenericSig, GenericEnvironment *CalleeGenericEnv, SubstitutionMap ParamSubs) : CallerGenericSig(CallerGenericSig), CallerGenericEnv(CallerGenericEnv), CalleeGenericSig(CalleeGenericSig), CalleeGenericEnv(CalleeGenericEnv), M(M), SM(M.getSwiftModule()), Ctx(M.getASTContext()), Builder(Ctx) { SpecializedGenericSig = nullptr; SpecializedGenericEnv = nullptr; CalleeInterfaceToCallerArchetypeMap = ParamSubs; } /// This constructor is used by when processing @_specialize. /// In this case, the caller and the callee are the same function. FunctionSignaturePartialSpecializer(SILModule &M, GenericSignature *CalleeGenericSig, GenericEnvironment *CalleeGenericEnv, ArrayRef<Requirement> Requirements) : CallerGenericSig(CalleeGenericSig), CallerGenericEnv(CalleeGenericEnv), CalleeGenericSig(CalleeGenericSig), CalleeGenericEnv(CalleeGenericEnv), M(M), SM(M.getSwiftModule()), Ctx(M.getASTContext()), Builder(Ctx) { // Create the new generic signature using provided requirements. std::tie(SpecializedGenericEnv, SpecializedGenericSig) = getGenericEnvironmentAndSignatureWithRequirements( CalleeGenericSig, CalleeGenericEnv, Requirements, M); // Compute SubstitutionMaps required for re-mapping. // Callee's generic signature and specialized generic signature // use the same set of generic parameters, i.e. each generic // parameter should be mapped to itself. for (auto GP : CalleeGenericSig->getGenericParams()) { CalleeInterfaceToSpecializedInterfaceMapping[GP] = Type(GP); } computeCalleeInterfaceToSpecializedInterfaceMap(); // Each generic parameter of the callee is mapped to its own // archetype. SpecializedInterfaceToCallerArchetypeMap = SubstitutionMap::get( SpecializedGenericSig, [&](SubstitutableType *type) -> Type { return CalleeGenericEnv->mapTypeIntoContext(type); }, LookUpConformanceInSignature(*SpecializedGenericSig)); } GenericSignature *getSpecializedGenericSignature() { return SpecializedGenericSig; } GenericEnvironment *getSpecializedGenericEnvironment() { return SpecializedGenericEnv; } void createSpecializedGenericSignature(SubstitutionMap ParamSubs); void createSpecializedGenericSignatureWithNonGenericSubs(); SubstitutionMap computeClonerParamSubs(); SubstitutionMap getCallerParamSubs(); void computeCallerInterfaceSubs(SubstitutionMap &CallerInterfaceSubs); }; } // end of namespace GenericTypeParamType * FunctionSignaturePartialSpecializer::createGenericParam() { auto GP = GenericTypeParamType::get(0, GPIdx++, Ctx); AllGenericParams.push_back(GP); Builder.addGenericParameter(GP); return GP; } /// Collect all used caller's archetypes from all the substitutions. void FunctionSignaturePartialSpecializer::collectUsedCallerArchetypes( SubstitutionMap ParamSubs) { for (auto Replacement : ParamSubs.getReplacementTypes()) { if (!Replacement->hasArchetype()) continue; // If the substitution will not be performed in the specialized // function, there is no need to check for any archetypes inside // the replacement. if (!shouldBePartiallySpecialized(Replacement, CallerGenericSig, CallerGenericEnv)) continue; // Add used generic parameters/archetypes. Replacement.visit([&](Type Ty) { if (auto Archetype = Ty->getAs<ArchetypeType>()) { UsedCallerArchetypes.insert(Archetype->getPrimary()); } }); } } void FunctionSignaturePartialSpecializer:: computeCallerInterfaceToSpecializedInterfaceMap() { if (!CallerGenericSig) return; CallerInterfaceToSpecializedInterfaceMap = SubstitutionMap::get( CallerGenericSig, [&](SubstitutableType *type) -> Type { return CallerInterfaceToSpecializedInterfaceMapping.lookup(type); }, LookUpConformanceInSignature(*CallerGenericSig)); LLVM_DEBUG(llvm::dbgs() << "\n\nCallerInterfaceToSpecializedInterfaceMap map:\n"; CallerInterfaceToSpecializedInterfaceMap.dump(llvm::dbgs())); } void FunctionSignaturePartialSpecializer:: computeSpecializedInterfaceToCallerArchetypeMap() { // Define a substitution map for re-mapping interface types of // the specialized function to contextual types of the caller. SpecializedInterfaceToCallerArchetypeMap = SubstitutionMap::get( SpecializedGenericSig, [&](SubstitutableType *type) -> Type { LLVM_DEBUG(llvm::dbgs() << "Mapping specialized interface type to caller " "archetype:\n"; llvm::dbgs() << "Interface type: "; type->dump(); llvm::dbgs() << "Archetype: "; auto Archetype = SpecializedInterfaceToCallerArchetypeMapping.lookup(type); if (Archetype) Archetype->dump(); else llvm::dbgs() << "Not found!\n";); return SpecializedInterfaceToCallerArchetypeMapping.lookup(type); }, LookUpConformanceInSignature(*SpecializedGenericSig)); LLVM_DEBUG(llvm::dbgs() << "\n\nSpecializedInterfaceToCallerArchetypeMap map:\n"; SpecializedInterfaceToCallerArchetypeMap.dump(llvm::dbgs())); } void FunctionSignaturePartialSpecializer:: computeCalleeInterfaceToSpecializedInterfaceMap() { CalleeInterfaceToSpecializedInterfaceMap = SubstitutionMap::get( CalleeGenericSig, [&](SubstitutableType *type) -> Type { return CalleeInterfaceToSpecializedInterfaceMapping.lookup(type); }, LookUpConformanceInSignature(*CalleeGenericSig)); LLVM_DEBUG(llvm::dbgs() << "\n\nCalleeInterfaceToSpecializedInterfaceMap:\n"; CalleeInterfaceToSpecializedInterfaceMap.dump(llvm::dbgs())); } /// Generate a new generic type parameter for each used archetype from /// the caller. void FunctionSignaturePartialSpecializer:: createGenericParamsForUsedCallerArchetypes() { for (auto CallerArchetype : UsedCallerArchetypes) { auto CallerGenericParam = CallerArchetype->getInterfaceType(); assert(CallerGenericParam->is<GenericTypeParamType>()); LLVM_DEBUG(llvm::dbgs() << "\n\nChecking used caller archetype:\n"; CallerArchetype->dump(); llvm::dbgs() << "It corresponds to the caller generic parameter:\n"; CallerGenericParam->dump()); // Create an equivalent generic parameter. auto SubstGenericParam = createGenericParam(); auto SubstGenericParamCanTy = SubstGenericParam->getCanonicalType(); (void)SubstGenericParamCanTy; CallerInterfaceToSpecializedInterfaceMapping [CallerGenericParam->getCanonicalType() ->castTo<GenericTypeParamType>()] = SubstGenericParam; SpecializedInterfaceToCallerArchetypeMapping[SubstGenericParam] = CallerArchetype; LLVM_DEBUG(llvm::dbgs() << "\nCreated a new specialized generic parameter:\n"; SubstGenericParam->dump(); llvm::dbgs() << "Created a mapping " "(caller interface -> specialize interface):\n" << CallerGenericParam << " -> " << SubstGenericParamCanTy << "\n"; llvm::dbgs() << "Created a mapping" "(specialized interface -> caller archetype):\n" << SubstGenericParamCanTy << " -> " << CallerArchetype->getCanonicalType() << "\n"); } } /// Create a new generic parameter for each of the callee's generic parameters /// which requires a substitution. void FunctionSignaturePartialSpecializer:: createGenericParamsForCalleeGenericParams() { auto Source = GenericSignatureBuilder::FloatingRequirementSource::forAbstract(); for (auto GP : CalleeGenericSig->getGenericParams()) { auto CanTy = GP->getCanonicalType(); auto CanTyInContext = CalleeGenericSig->getCanonicalTypeInContext(CanTy); auto Replacement = CanTyInContext.subst(CalleeInterfaceToCallerArchetypeMap); LLVM_DEBUG(llvm::dbgs() << "\n\nChecking callee generic parameter:\n"; CanTy->dump()); if (!Replacement) { LLVM_DEBUG(llvm::dbgs() << "No replacement found. Skipping.\n"); continue; } LLVM_DEBUG(llvm::dbgs() << "Replacement found:\n"; Replacement->dump()); bool ShouldSpecializeGP = shouldBePartiallySpecialized( Replacement, CallerGenericSig, CallerGenericEnv); if (ShouldSpecializeGP) { LLVM_DEBUG(llvm::dbgs() << "Should be partially specialized.\n"); } else { LLVM_DEBUG(llvm::dbgs() << "Should not be partially specialized.\n"); } // Create an equivalent generic parameter in the specialized // generic environment. auto SubstGenericParam = createGenericParam(); auto SubstGenericParamCanTy = SubstGenericParam->getCanonicalType(); // Remember which specialized generic parameter correspond's to callee's // generic parameter. CalleeInterfaceToSpecializedInterfaceMapping[GP] = SubstGenericParam; LLVM_DEBUG(llvm::dbgs() << "\nCreated a new specialized generic parameter:\n"; SubstGenericParam->dump(); llvm::dbgs() << "Created a mapping " "(callee interface -> specialized interface):\n" << CanTy << " -> " << SubstGenericParamCanTy << "\n"); if (!ShouldSpecializeGP) { // Remember the original substitution from the apply instruction. SpecializedInterfaceToCallerArchetypeMapping[SubstGenericParam] = Replacement; LLVM_DEBUG(llvm::dbgs() << "Created a mapping (specialized interface -> " "caller archetype):\n" << Type(SubstGenericParam) << " -> " << Replacement << "\n"); continue; } // Add a same type requirement based on the provided generic parameter // substitutions. auto ReplacementCallerInterfaceTy = Replacement->mapTypeOutOfContext(); auto SpecializedReplacementCallerInterfaceTy = ReplacementCallerInterfaceTy.subst( CallerInterfaceToSpecializedInterfaceMap); assert(!SpecializedReplacementCallerInterfaceTy->hasError()); Requirement Req(RequirementKind::SameType, SubstGenericParamCanTy, SpecializedReplacementCallerInterfaceTy); Builder.addRequirement(Req, Source, SM); LLVM_DEBUG(llvm::dbgs() << "Added a requirement:\n"; Req.dump()); if (ReplacementCallerInterfaceTy->is<GenericTypeParamType>()) { // Remember that the new generic parameter corresponds // to the same caller archetype, which corresponds to // the ReplacementCallerInterfaceTy. SpecializedInterfaceToCallerArchetypeMapping[SubstGenericParam] = SpecializedInterfaceToCallerArchetypeMapping.lookup( ReplacementCallerInterfaceTy .subst(CallerInterfaceToSpecializedInterfaceMap) ->castTo<SubstitutableType>()); LLVM_DEBUG(llvm::dbgs() << "Created a mapping (specialized interface -> " "caller archetype):\n" << Type(SubstGenericParam) << " -> " << SpecializedInterfaceToCallerArchetypeMapping[SubstGenericParam] ->getCanonicalType() << "\n"); continue; } SpecializedInterfaceToCallerArchetypeMapping[SubstGenericParam] = Replacement; LLVM_DEBUG(llvm::dbgs() << "Created a mapping (specialized interface -> " "caller archetype):\n" << Type(SubstGenericParam) << " -> " << SpecializedInterfaceToCallerArchetypeMapping[SubstGenericParam] ->getCanonicalType() << "\n"); } } /// Add requirements from a given list of requirements to the /// GenericSignatureBuilder. Re-map them using the provided SubstitutionMap. void FunctionSignaturePartialSpecializer::addRequirements( ArrayRef<Requirement> Reqs, SubstitutionMap &SubsMap) { auto source = GenericSignatureBuilder::FloatingRequirementSource::forAbstract(); for (auto &reqReq : Reqs) { LLVM_DEBUG(llvm::dbgs() << "\n\nRe-mapping the requirement:\n"; reqReq.dump()); Builder.addRequirement(*reqReq.subst(SubsMap), source, SM); } } /// Add requirements from the caller's signature. void FunctionSignaturePartialSpecializer::addCallerRequirements() { for (auto CallerArchetype : UsedCallerArchetypes) { // Add requirements for this caller generic parameter and its dependent // types. SmallVector<Requirement, 4> CollectedReqs; collectRequirements(CallerArchetype, CallerGenericSig, CallerGenericEnv, CollectedReqs); if (!CollectedReqs.empty()) { LLVM_DEBUG(llvm::dbgs() << "Adding caller archetype requirements:\n"; for (auto Req : CollectedReqs) { Req.dump(); } CallerInterfaceToSpecializedInterfaceMap.dump(llvm::dbgs()); ); addRequirements(CollectedReqs, CallerInterfaceToSpecializedInterfaceMap); } } } /// Add requirements from the callee's signature. void FunctionSignaturePartialSpecializer::addCalleeRequirements() { if (CalleeGenericSig) addRequirements(CalleeGenericSig->getRequirements(), CalleeInterfaceToSpecializedInterfaceMap); } std::pair<GenericEnvironment *, GenericSignature *> FunctionSignaturePartialSpecializer:: getSpecializedGenericEnvironmentAndSignature() { if (AllGenericParams.empty()) return { nullptr, nullptr }; // Finalize the archetype builder. auto GenSig = std::move(Builder).computeGenericSignature( SourceLoc(), /*allowConcreteGenericParams=*/true); auto GenEnv = GenSig->createGenericEnvironment(); return { GenEnv, GenSig }; } SubstitutionMap FunctionSignaturePartialSpecializer::computeClonerParamSubs() { return SubstitutionMap::get( CalleeGenericSig, [&](SubstitutableType *type) -> Type { LLVM_DEBUG(llvm::dbgs() << "\ngetSubstitution for ClonerParamSubs:\n" << Type(type) << "\n" << "in generic signature:\n"; CalleeGenericSig->dump()); auto SpecializedInterfaceTy = Type(type).subst(CalleeInterfaceToSpecializedInterfaceMap); return SpecializedGenericEnv->mapTypeIntoContext( SpecializedInterfaceTy); }, LookUpConformanceInSignature(*SpecializedGenericSig)); } SubstitutionMap FunctionSignaturePartialSpecializer::getCallerParamSubs() { return SpecializedInterfaceToCallerArchetypeMap; } void FunctionSignaturePartialSpecializer::computeCallerInterfaceSubs( SubstitutionMap &CallerInterfaceSubs) { CallerInterfaceSubs = SubstitutionMap::get( CalleeGenericSig, [&](SubstitutableType *type) -> Type { // First, map callee's interface type to specialized interface type. auto Ty = Type(type).subst(CalleeInterfaceToSpecializedInterfaceMap); Type SpecializedInterfaceTy = SpecializedGenericEnv->mapTypeIntoContext(Ty) ->mapTypeOutOfContext(); assert(!SpecializedInterfaceTy->hasError()); return SpecializedInterfaceTy; }, LookUpConformanceInSignature(*CalleeGenericSig)); LLVM_DEBUG(llvm::dbgs() << "\n\nCallerInterfaceSubs map:\n"; CallerInterfaceSubs.dump(llvm::dbgs())); } /// Fast-path for the case when generic substitutions are not supported. void FunctionSignaturePartialSpecializer:: createSpecializedGenericSignatureWithNonGenericSubs() { // Simply create a set of same-type requirements based on concrete // substitutions. SmallVector<Requirement, 4> Requirements; for (auto GP : CalleeGenericSig->getSubstitutableParams()) { auto Replacement = Type(GP).subst(CalleeInterfaceToCallerArchetypeMap); if (Replacement->hasArchetype()) continue; // Replacement is concrete. Add a same type requirement. Requirement Req(RequirementKind::SameType, GP, Replacement); Requirements.push_back(Req); } // Create a new generic signature by taking the existing one // and adding new requirements to it. No need to introduce // any new generic parameters. auto GenPair = getGenericEnvironmentAndSignatureWithRequirements( CalleeGenericSig, CalleeGenericEnv, Requirements, M); if (GenPair.second) { SpecializedGenericSig = GenPair.second->getCanonicalSignature(); SpecializedGenericEnv = GenPair.first; } for (auto GP : CalleeGenericSig->getGenericParams()) { CalleeInterfaceToSpecializedInterfaceMapping[GP] = Type(GP); } computeCalleeInterfaceToSpecializedInterfaceMap(); SpecializedInterfaceToCallerArchetypeMap = CalleeInterfaceToCallerArchetypeMap; } void FunctionSignaturePartialSpecializer::createSpecializedGenericSignature( SubstitutionMap ParamSubs) { // Collect all used caller's archetypes from all the substitutions. collectUsedCallerArchetypes(ParamSubs); // Generate a new generic type parameter for each used archetype from // the caller. createGenericParamsForUsedCallerArchetypes(); // Create a SubstitutionMap for re-mapping caller's interface types // to interface types of the specialized function. computeCallerInterfaceToSpecializedInterfaceMap(); // Add generic parameters that will come from the callee. // Introduce a new generic parameter in the new generic signature // for each generic parameter from the callee. createGenericParamsForCalleeGenericParams(); computeCalleeInterfaceToSpecializedInterfaceMap(); // Add requirements from the callee's generic signature. addCalleeRequirements(); // Add requirements from the caller's generic signature. addCallerRequirements(); auto GenPair = getSpecializedGenericEnvironmentAndSignature(); if (GenPair.second) { SpecializedGenericSig = GenPair.second->getCanonicalSignature(); SpecializedGenericEnv = GenPair.first; computeSpecializedInterfaceToCallerArchetypeMap(); } } /// Builds a new generic and function signatures for a partial specialization. /// Allows for partial specializations even if substitutions contain /// type parameters. /// /// The new generic signature has the following generic parameters: /// - For each substitution with a concrete type CT as a replacement for a /// generic type T, it introduces a generic parameter T' and a /// requirement T' == CT /// - For all other substitutions that are considered for partial specialization, /// it collects first the archetypes used in the replacements. Then for each such /// archetype A a new generic parameter T' introduced. /// - If there is a substitution for type T and this substitution is excluded /// from partial specialization (e.g. because it is impossible or would result /// in a less efficient code), then a new generic parameter T' is introduced, /// which does not get any additional, more specific requirements based on the /// substitutions. /// /// After all generic parameters are added according to the rules above, /// the requirements of the callee's signature are re-mapped by re-formulating /// them in terms of the newly introduced generic parameters. In case a remapped /// requirement does not contain any generic types, it can be omitted, because /// it is fulfilled already. /// /// If any of the generic parameters were introduced for caller's archetypes, /// their requirements from the caller's signature are re-mapped by /// re-formulating them in terms of the newly introduced generic parameters. void ReabstractionInfo::performPartialSpecializationPreparation( SILFunction *Caller, SILFunction *Callee, SubstitutionMap ParamSubs) { SILModule &M = Callee->getModule(); // Caller is the SILFunction containing the apply instruction. CanGenericSignature CallerGenericSig; GenericEnvironment *CallerGenericEnv = nullptr; if (Caller) { CallerGenericSig = Caller->getLoweredFunctionType()->getGenericSignature(); CallerGenericEnv = Caller->getGenericEnvironment(); } // Callee is the generic function being called by the apply instruction. auto CalleeFnTy = Callee->getLoweredFunctionType(); auto CalleeGenericSig = CalleeFnTy->getGenericSignature(); auto CalleeGenericEnv = Callee->getGenericEnvironment(); LLVM_DEBUG(llvm::dbgs() << "\n\nTrying partial specialization for: " << Callee->getName() << "\n"; llvm::dbgs() << "Callee generic signature is:\n"; CalleeGenericSig->dump()); FunctionSignaturePartialSpecializer FSPS(M, CallerGenericSig, CallerGenericEnv, CalleeGenericSig, CalleeGenericEnv, ParamSubs); // Create the partially specialized generic signature and generic environment. if (SupportGenericSubstitutions) FSPS.createSpecializedGenericSignature(ParamSubs); else FSPS.createSpecializedGenericSignatureWithNonGenericSubs(); // Once the specialized signature is known, compute different // maps and function types based on it. The specializer will need // them for cloning and specializing the function body, rewriting // the original apply instruction, etc. finishPartialSpecializationPreparation(FSPS); } void ReabstractionInfo::finishPartialSpecializationPreparation( FunctionSignaturePartialSpecializer &FSPS) { SpecializedGenericSig = FSPS.getSpecializedGenericSignature(); SpecializedGenericEnv = FSPS.getSpecializedGenericEnvironment(); if (SpecializedGenericSig) { LLVM_DEBUG(llvm::dbgs() << "\nCreated SpecializedGenericSig:\n"; SpecializedGenericSig->dump(); SpecializedGenericEnv->dump()); } // Create substitution lists for the caller and cloner. ClonerParamSubMap = FSPS.computeClonerParamSubs(); CallerParamSubMap = FSPS.getCallerParamSubs(); // Create a substitution map for the caller interface substitutions. FSPS.computeCallerInterfaceSubs(CallerInterfaceSubs); if (CalleeParamSubMap.empty()) { // It can happen if there is no caller or it is an eager specialization. CalleeParamSubMap = CallerParamSubMap; } HasUnboundGenericParams = SpecializedGenericSig && !SpecializedGenericSig->areAllParamsConcrete(); createSubstitutedAndSpecializedTypes(); if (getSubstitutedType() != Callee->getLoweredFunctionType()) { if (getSubstitutedType()->isPolymorphic()) LLVM_DEBUG(llvm::dbgs() << "Created new specialized type: " << SpecializedType << "\n"); } } /// Perform some sanity checks for the requirements provided in @_specialize. static void checkSpecializationRequirements(ArrayRef<Requirement> Requirements) { for (auto &Req : Requirements) { if (Req.getKind() == RequirementKind::SameType) { auto FirstType = Req.getFirstType(); auto SecondType = Req.getSecondType(); assert(FirstType && SecondType); assert(!FirstType->hasArchetype()); assert(!SecondType->hasArchetype()); // Only one of the types should be concrete. assert(FirstType->hasTypeParameter() != SecondType->hasTypeParameter() && "Only concrete type same-type requirements are supported by " "generic specialization"); (void) FirstType; (void) SecondType; continue; } if (Req.getKind() == RequirementKind::Layout) { continue; } llvm_unreachable("Unknown type of requirement in generic specialization"); } } /// This constructor is used when processing @_specialize. ReabstractionInfo::ReabstractionInfo(SILFunction *Callee, ArrayRef<Requirement> Requirements) { if (shouldNotSpecializeCallee(Callee)) return; // Perform some sanity checks for the requirements. checkSpecializationRequirements(Requirements); this->Callee = Callee; ConvertIndirectToDirect = true; SILModule &M = Callee->getModule(); auto CalleeGenericSig = Callee->getLoweredFunctionType()->getGenericSignature(); auto *CalleeGenericEnv = Callee->getGenericEnvironment(); FunctionSignaturePartialSpecializer FSPS(M, CalleeGenericSig, CalleeGenericEnv, Requirements); finishPartialSpecializationPreparation(FSPS); } // ============================================================================= // GenericFuncSpecializer // ============================================================================= GenericFuncSpecializer::GenericFuncSpecializer(SILFunction *GenericFunc, SubstitutionMap ParamSubs, IsSerialized_t Serialized, const ReabstractionInfo &ReInfo) : M(GenericFunc->getModule()), GenericFunc(GenericFunc), ParamSubs(ParamSubs), Serialized(Serialized), ReInfo(ReInfo) { assert(GenericFunc->isDefinition() && "Expected definition to specialize!"); auto FnTy = ReInfo.getSpecializedType(); if (ReInfo.isPartialSpecialization()) { Mangle::PartialSpecializationMangler Mangler( GenericFunc, FnTy, Serialized, /*isReAbstracted*/ true); ClonedName = Mangler.mangle(); } else { Mangle::GenericSpecializationMangler Mangler( GenericFunc, ParamSubs, Serialized, /*isReAbstracted*/ true); ClonedName = Mangler.mangle(); } LLVM_DEBUG(llvm::dbgs() << " Specialized function " << ClonedName << '\n'); } /// Return an existing specialization if one exists. SILFunction *GenericFuncSpecializer::lookupSpecialization() { if (SILFunction *SpecializedF = M.lookUpFunction(ClonedName)) { if (ReInfo.getSpecializedType() != SpecializedF->getLoweredFunctionType()) { llvm::dbgs() << "Looking for a function: " << ClonedName << "\n" << "Expected type: " << ReInfo.getSpecializedType() << "\n" << "Found type: " << SpecializedF->getLoweredFunctionType() << "\n"; } assert(ReInfo.getSpecializedType() == SpecializedF->getLoweredFunctionType() && "Previously specialized function does not match expected type."); LLVM_DEBUG(llvm::dbgs() << "Found an existing specialization for: " << ClonedName << "\n"); return SpecializedF; } LLVM_DEBUG(llvm::dbgs() << "Could not find an existing specialization for: " << ClonedName << "\n"); return nullptr; } /// Forward decl for prespecialization support. static bool linkSpecialization(SILModule &M, SILFunction *F); void ReabstractionInfo::verify() const { assert((!SpecializedGenericSig && !SpecializedGenericEnv && !getSpecializedType()->isPolymorphic()) || (SpecializedGenericSig && SpecializedGenericEnv && getSpecializedType()->isPolymorphic())); } /// Create a new specialized function if possible, and cache it. SILFunction *GenericFuncSpecializer::tryCreateSpecialization() { // Do not create any new specializations at Onone. if (!GenericFunc->shouldOptimize()) return nullptr; LLVM_DEBUG(llvm::dbgs() << "Creating a specialization: " << ClonedName << "\n";); ReInfo.verify(); // Create a new function. SILFunction *SpecializedF = GenericCloner::cloneFunction( GenericFunc, Serialized, ReInfo, // Use these substitutions inside the new specialized function being // created. ReInfo.getClonerParamSubstitutionMap(), ClonedName); assert((SpecializedF->getLoweredFunctionType()->isPolymorphic() && SpecializedF->getGenericEnvironment()) || (!SpecializedF->getLoweredFunctionType()->isPolymorphic() && !SpecializedF->getGenericEnvironment())); assert(!SpecializedF->hasQualifiedOwnership()); // Check if this specialization should be linked for prespecialization. linkSpecialization(M, SpecializedF); // Store the meta-information about how this specialization was created. auto *Caller = ReInfo.getApply() ? ReInfo.getApply().getFunction() : nullptr; SubstitutionMap Subs = Caller ? ReInfo.getApply().getSubstitutionMap() : ReInfo.getClonerParamSubstitutionMap(); SpecializedF->setClassSubclassScope(SubclassScope::NotApplicable); SpecializedF->setSpecializationInfo( GenericSpecializationInformation::create(Caller, GenericFunc, Subs)); return SpecializedF; } // ============================================================================= // Apply substitution // ============================================================================= /// Fix the case where a void function returns the result of an apply, which is /// also a call of a void-returning function. /// We always want a void function returning a tuple _instruction_. static void fixUsedVoidType(SILValue VoidVal, SILLocation Loc, SILBuilder &Builder) { assert(VoidVal->getType().isVoid()); if (VoidVal->use_empty()) return; auto *NewVoidVal = Builder.createTuple(Loc, VoidVal->getType(), { }); VoidVal->replaceAllUsesWith(NewVoidVal); } /// Prepare call arguments. Perform re-abstraction if required. static void prepareCallArguments(ApplySite AI, SILBuilder &Builder, const ReabstractionInfo &ReInfo, SmallVectorImpl<SILValue> &Arguments, SILValue &StoreResultTo) { /// SIL function conventions for the original apply site with substitutions. SILLocation Loc = AI.getLoc(); auto substConv = AI.getSubstCalleeConv(); unsigned ArgIdx = AI.getCalleeArgIndexOfFirstAppliedArg(); for (auto &Op : AI.getArgumentOperands()) { auto handleConversion = [&]() { // Rewriting SIL arguments is only for lowered addresses. if (!substConv.useLoweredAddresses()) return false; if (ArgIdx < substConv.getSILArgIndexOfFirstParam()) { // Handle result arguments. unsigned formalIdx = substConv.getIndirectFormalResultIndexForSILArg(ArgIdx); if (ReInfo.isFormalResultConverted(formalIdx)) { // The result is converted from indirect to direct. We need to insert // a store later. assert(!StoreResultTo); StoreResultTo = Op.get(); return true; } } else { // Handle arguments for formal parameters. unsigned paramIdx = ArgIdx - substConv.getSILArgIndexOfFirstParam(); if (ReInfo.isParamConverted(paramIdx)) { // An argument is converted from indirect to direct. Instead of the // address we pass the loaded value. SILValue Val = Builder.createLoad( Loc, Op.get(), LoadOwnershipQualifier::Unqualified); Arguments.push_back(Val); return true; } } return false; }; if (!handleConversion()) Arguments.push_back(Op.get()); ++ArgIdx; } } /// Return a substituted callee function type. static CanSILFunctionType getCalleeSubstFunctionType(SILValue Callee, SubstitutionMap Subs) { // Create a substituted callee type. auto CanFnTy = Callee->getType().castTo<SILFunctionType>(); return CanFnTy->substGenericArgs(*Callee->getModule(), Subs); } /// Create a new apply based on an old one, but with a different /// function being applied. static ApplySite replaceWithSpecializedCallee(ApplySite AI, SILValue Callee, SILBuilder &Builder, const ReabstractionInfo &ReInfo) { SILLocation Loc = AI.getLoc(); SmallVector<SILValue, 4> Arguments; SILValue StoreResultTo; prepareCallArguments(AI, Builder, ReInfo, Arguments, StoreResultTo); // Create a substituted callee type. SubstitutionMap Subs; if (ReInfo.getSpecializedType()->isPolymorphic()) { Subs = ReInfo.getCallerParamSubstitutionMap(); } auto CalleeSubstFnTy = getCalleeSubstFunctionType(Callee, Subs); auto CalleeSILSubstFnTy = SILType::getPrimitiveObjectType(CalleeSubstFnTy); SILFunctionConventions substConv(CalleeSubstFnTy, Builder.getModule()); if (auto *TAI = dyn_cast<TryApplyInst>(AI)) { SILBasicBlock *ResultBB = TAI->getNormalBB(); assert(ResultBB->getSinglePredecessorBlock() == TAI->getParent()); auto *NewTAI = Builder.createTryApply(Loc, Callee, Subs, Arguments, ResultBB, TAI->getErrorBB()); if (StoreResultTo) { assert(substConv.useLoweredAddresses()); // The original normal result of the try_apply is an empty tuple. assert(ResultBB->getNumArguments() == 1); Builder.setInsertionPoint(ResultBB->begin()); fixUsedVoidType(ResultBB->getArgument(0), Loc, Builder); SILArgument *Arg = ResultBB->replacePHIArgument( 0, StoreResultTo->getType().getObjectType(), ValueOwnershipKind::Owned); // Store the direct result to the original result address. Builder.createStore(Loc, Arg, StoreResultTo, StoreOwnershipQualifier::Unqualified); } return NewTAI; } if (auto *A = dyn_cast<ApplyInst>(AI)) { auto *NewAI = Builder.createApply(Loc, Callee, Subs, Arguments, A->isNonThrowing()); if (StoreResultTo) { assert(substConv.useLoweredAddresses()); if (!CalleeSILSubstFnTy.isNoReturnFunction()) { // Store the direct result to the original result address. fixUsedVoidType(A, Loc, Builder); Builder.createStore(Loc, NewAI, StoreResultTo, StoreOwnershipQualifier::Unqualified); } else { Builder.createUnreachable(Loc); // unreachable should be the terminator instruction. // So, split the current basic block right after the // inserted unreachable instruction. Builder.getInsertionPoint()->getParent()->split( Builder.getInsertionPoint()); } } A->replaceAllUsesWith(NewAI); return NewAI; } if (auto *A = dyn_cast<BeginApplyInst>(AI)) { assert(!StoreResultTo); auto *NewAI = Builder.createBeginApply(Loc, Callee, Subs, Arguments, A->isNonThrowing()); A->replaceAllUsesPairwiseWith(NewAI); return NewAI; } if (auto *PAI = dyn_cast<PartialApplyInst>(AI)) { auto *NewPAI = Builder.createPartialApply( Loc, Callee, Subs, Arguments, PAI->getType().getAs<SILFunctionType>()->getCalleeConvention()); PAI->replaceAllUsesWith(NewPAI); return NewPAI; } llvm_unreachable("unhandled kind of apply"); } /// Create a new apply based on an old one, but with a different /// function being applied. ApplySite swift:: replaceWithSpecializedFunction(ApplySite AI, SILFunction *NewF, const ReabstractionInfo &ReInfo) { SILBuilderWithScope Builder(AI.getInstruction()); FunctionRefInst *FRI = Builder.createFunctionRef(AI.getLoc(), NewF); return replaceWithSpecializedCallee(AI, FRI, Builder, ReInfo); } namespace { class ReabstractionThunkGenerator { SILFunction *OrigF; SILModule &M; SILFunction *SpecializedFunc; const ReabstractionInfo &ReInfo; PartialApplyInst *OrigPAI; IsSerialized_t Serialized = IsNotSerialized; std::string ThunkName; RegularLocation Loc; SmallVector<SILValue, 4> Arguments; public: ReabstractionThunkGenerator(const ReabstractionInfo &ReInfo, PartialApplyInst *OrigPAI, SILFunction *SpecializedFunc) : OrigF(OrigPAI->getCalleeFunction()), M(OrigF->getModule()), SpecializedFunc(SpecializedFunc), ReInfo(ReInfo), OrigPAI(OrigPAI), Loc(RegularLocation::getAutoGeneratedLocation()) { if (OrigF->isSerialized() && OrigPAI->getFunction()->isSerialized()) Serialized = IsSerializable; { if (!ReInfo.isPartialSpecialization()) { Mangle::GenericSpecializationMangler Mangler( OrigF, ReInfo.getCalleeParamSubstitutionMap(), Serialized, /*isReAbstracted*/ false); ThunkName = Mangler.mangle(); } else { Mangle::PartialSpecializationMangler Mangler( OrigF, ReInfo.getSpecializedType(), Serialized, /*isReAbstracted*/ false); ThunkName = Mangler.mangle(); } } } SILFunction *createThunk(); protected: SILValue createReabstractionThunkApply(SILBuilder &Builder); SILArgument *convertReabstractionThunkArguments(SILBuilder &Builder); }; } // anonymous namespace SILFunction *ReabstractionThunkGenerator::createThunk() { SILFunction *Thunk = M.getOrCreateSharedFunction( Loc, ThunkName, ReInfo.getSubstitutedType(), IsBare, IsTransparent, Serialized, ProfileCounter(), IsThunk); // Re-use an existing thunk. if (!Thunk->empty()) return Thunk; Thunk->setGenericEnvironment(ReInfo.getSpecializedGenericEnvironment()); // Set proper generic context scope for the type lowering. CanSILFunctionType SpecType = SpecializedFunc->getLoweredFunctionType(); Lowering::GenericContextScope GenericScope(M.Types, SpecType->getGenericSignature()); SILBasicBlock *EntryBB = Thunk->createBasicBlock(); SILBuilder Builder(EntryBB); // If the original specialized function had unqualified ownership, set the // thunk to have unqualified ownership as well. // // This is a stop gap measure to allow for easy inlining. We could always make // the Thunk qualified, but then we would need to either fix the inliner to // inline qualified into unqualified functions /or/ have the // OwnershipModelEliminator run as part of the normal compilation pipeline // (which we are not doing yet). if (!SpecializedFunc->hasQualifiedOwnership()) { Thunk->setUnqualifiedOwnership(); } if (!SILModuleConventions(M).useLoweredAddresses()) { for (auto SpecArg : SpecializedFunc->getArguments()) { SILArgument *NewArg = EntryBB->createFunctionArgument(SpecArg->getType(), SpecArg->getDecl()); Arguments.push_back(NewArg); } SILValue ReturnValue = createReabstractionThunkApply(Builder); Builder.createReturn(Loc, ReturnValue); return Thunk; } // Handle lowered addresses. SILArgument *ReturnValueAddr = convertReabstractionThunkArguments(Builder); SILValue ReturnValue = createReabstractionThunkApply(Builder); if (ReturnValueAddr) { // Need to store the direct results to the original indirect address. Builder.createStore(Loc, ReturnValue, ReturnValueAddr, StoreOwnershipQualifier::Unqualified); SILType VoidTy = OrigPAI->getSubstCalleeType()->getDirectFormalResultsType(); assert(VoidTy.isVoid()); ReturnValue = Builder.createTuple(Loc, VoidTy, {}); } Builder.createReturn(Loc, ReturnValue); return Thunk; } /// Create a call to a reabstraction thunk. Return the call's direct result. SILValue ReabstractionThunkGenerator::createReabstractionThunkApply( SILBuilder &Builder) { SILFunction *Thunk = &Builder.getFunction(); auto *FRI = Builder.createFunctionRef(Loc, SpecializedFunc); auto Subs = Thunk->getForwardingSubstitutionMap(); auto specConv = SpecializedFunc->getConventions(); if (!SpecializedFunc->getLoweredFunctionType()->hasErrorResult()) { return Builder.createApply(Loc, FRI, Subs, Arguments, false); } // Create the logic for calling a throwing function. SILBasicBlock *NormalBB = Thunk->createBasicBlock(); SILBasicBlock *ErrorBB = Thunk->createBasicBlock(); Builder.createTryApply(Loc, FRI, Subs, Arguments, NormalBB, ErrorBB); auto *ErrorVal = ErrorBB->createPHIArgument( SpecializedFunc->mapTypeIntoContext(specConv.getSILErrorType()), ValueOwnershipKind::Owned); Builder.setInsertionPoint(ErrorBB); Builder.createThrow(Loc, ErrorVal); SILValue ReturnValue = NormalBB->createPHIArgument( SpecializedFunc->mapTypeIntoContext(specConv.getSILResultType()), ValueOwnershipKind::Owned); Builder.setInsertionPoint(NormalBB); return ReturnValue; } /// Create SIL arguments for a reabstraction thunk with lowered addresses. This /// may involve replacing indirect arguments with loads and stores. Return the /// SILArgument for the address of an indirect result, or nullptr. /// /// FIXME: Remove this if we don't need to create reabstraction thunks after /// address lowering. SILArgument *ReabstractionThunkGenerator::convertReabstractionThunkArguments( SILBuilder &Builder) { SILFunction *Thunk = &Builder.getFunction(); CanSILFunctionType SpecType = SpecializedFunc->getLoweredFunctionType(); CanSILFunctionType SubstType = ReInfo.getSubstitutedType(); auto specConv = SpecializedFunc->getConventions(); (void)specConv; SILFunctionConventions substConv(SubstType, M); assert(specConv.useLoweredAddresses()); // ReInfo.NumIndirectResults corresponds to SubstTy's formal indirect // results. SpecTy may have fewer formal indirect results. assert(SubstType->getNumIndirectFormalResults() >= SpecType->getNumIndirectFormalResults()); SILBasicBlock *EntryBB = Thunk->getEntryBlock(); SILArgument *ReturnValueAddr = nullptr; auto SpecArgIter = SpecializedFunc->getArguments().begin(); auto cloneSpecializedArgument = [&]() { // No change to the argument. SILArgument *SpecArg = *SpecArgIter++; SILArgument *NewArg = EntryBB->createFunctionArgument(SpecArg->getType(), SpecArg->getDecl()); Arguments.push_back(NewArg); }; // ReInfo.NumIndirectResults corresponds to SubstTy's formal indirect // results. SpecTy may have fewer formal indirect results. assert(SubstType->getNumIndirectFormalResults() >= SpecType->getNumIndirectFormalResults()); unsigned resultIdx = 0; for (auto substRI : SubstType->getIndirectFormalResults()) { if (ReInfo.isFormalResultConverted(resultIdx++)) { // Convert an originally indirect to direct specialized result. // Store the result later. // FIXME: This only handles a single result! Partial specialization could // induce some combination of direct and indirect results. SILType ResultTy = SpecializedFunc->mapTypeIntoContext(substConv.getSILType(substRI)); assert(ResultTy.isAddress()); assert(!ReturnValueAddr); ReturnValueAddr = EntryBB->createFunctionArgument(ResultTy); continue; } // If the specialized result is already indirect, simply clone the indirect // result argument. assert((*SpecArgIter)->getType().isAddress()); cloneSpecializedArgument(); } assert(SpecArgIter == SpecializedFunc->getArgumentsWithoutIndirectResults().begin()); unsigned numParams = SpecType->getNumParameters(); assert(numParams == SubstType->getNumParameters()); for (unsigned paramIdx = 0; paramIdx < numParams; ++paramIdx) { if (ReInfo.isParamConverted(paramIdx)) { // Convert an originally indirect to direct specialized parameter. assert(!specConv.isSILIndirect(SpecType->getParameters()[paramIdx])); // Instead of passing the address, pass the loaded value. SILType ParamTy = SpecializedFunc->mapTypeIntoContext( substConv.getSILType(SubstType->getParameters()[paramIdx])); assert(ParamTy.isAddress()); SILArgument *SpecArg = *SpecArgIter++; SILArgument *NewArg = EntryBB->createFunctionArgument(ParamTy, SpecArg->getDecl()); auto *ArgVal = Builder.createLoad(Loc, NewArg, LoadOwnershipQualifier::Unqualified); Arguments.push_back(ArgVal); continue; } // Simply clone unconverted direct or indirect parameters. cloneSpecializedArgument(); } assert(SpecArgIter == SpecializedFunc->getArguments().end()); return ReturnValueAddr; } void swift::trySpecializeApplyOfGeneric( ApplySite Apply, DeadInstructionSet &DeadApplies, llvm::SmallVectorImpl<SILFunction *> &NewFunctions, OptRemark::Emitter &ORE) { assert(Apply.hasSubstitutions() && "Expected an apply with substitutions!"); auto *F = Apply.getFunction(); auto *RefF = cast<FunctionRefInst>(Apply.getCallee())->getReferencedFunction(); LLVM_DEBUG(llvm::dbgs() << "\n\n*** ApplyInst in function " << F->getName() << ":\n"; Apply.getInstruction()->dumpInContext()); // If the caller is fragile but the callee is not, bail out. // Specializations have shared linkage, which means they do // not have an external entry point, Since the callee is not // fragile we cannot serialize the body of the specialized // callee either. if (F->isSerialized() && !RefF->hasValidLinkageForFragileInline()) return; if (shouldNotSpecializeCallee(RefF)) return; // If the caller and callee are both fragile, preserve the fragility when // cloning the callee. Otherwise, strip it off so that we can optimize // the body more. IsSerialized_t Serialized = IsNotSerialized; if (F->isSerialized() && RefF->isSerialized()) Serialized = IsSerializable; // If it is OnoneSupport consider all specializations as non-serialized // as we do not SIL serialize their bodies. // It is important to set this flag here, because it affects the // mangling of the specialization's name. if (Apply.getModule().isOptimizedOnoneSupportModule()) Serialized = IsNotSerialized; ReabstractionInfo ReInfo(Apply, RefF, Apply.getSubstitutionMap(), /*ConvertIndirectToDirect=*/true, &ORE); if (!ReInfo.canBeSpecialized()) return; SILModule &M = F->getModule(); bool needAdaptUsers = false; bool replacePartialApplyWithoutReabstraction = false; auto *PAI = dyn_cast<PartialApplyInst>(Apply); if (PAI && ReInfo.hasConversions()) { // If we have a partial_apply and we converted some results/parameters from // indirect to direct there are 3 cases: // 1) All uses of the partial_apply are apply sites again. In this case // we can just adapt all the apply sites which use the partial_apply. // 2) The result of the partial_apply is re-abstracted anyway (and the // re-abstracted function type matches with our specialized type). In // this case we can just skip the existing re-abstraction. // 3) For all other cases we need to create a new re-abstraction thunk. needAdaptUsers = true; for (Operand *Use : PAI->getUses()) { SILInstruction *User = Use->getUser(); if (isa<RefCountingInst>(User)) continue; if (User->isDebugInstruction()) continue; auto FAS = FullApplySite::isa(User); if (FAS && FAS.getCallee() == PAI) continue; auto *PAIUser = dyn_cast<PartialApplyInst>(User); if (PAIUser && isPartialApplyOfReabstractionThunk(PAIUser)) { CanSILFunctionType NewPAType = ReInfo.createSpecializedType(PAI->getFunctionType(), M); if (PAIUser->getFunctionType() == NewPAType) continue; } replacePartialApplyWithoutReabstraction = true; break; } } GenericFuncSpecializer FuncSpecializer(RefF, Apply.getSubstitutionMap(), Serialized, ReInfo); SILFunction *SpecializedF = FuncSpecializer.lookupSpecialization(); if (SpecializedF) { // Even if the pre-specialization exists already, try to preserve it // if it is one of our known pre-specializations for -Onone support. linkSpecialization(M, SpecializedF); } else { SpecializedF = FuncSpecializer.tryCreateSpecialization(); if (!SpecializedF) return; LLVM_DEBUG(llvm::dbgs() << "Created specialized function: " << SpecializedF->getName() << "\n" << "Specialized function type: " << SpecializedF->getLoweredFunctionType() << "\n"); assert(!SpecializedF->hasQualifiedOwnership()); NewFunctions.push_back(SpecializedF); } ORE.emit([&]() { std::string Str; llvm::raw_string_ostream OS(Str); SpecializedF->getLoweredFunctionType().print( OS, PrintOptions::printQuickHelpDeclaration()); using namespace OptRemark; return RemarkPassed("Specialized", *Apply.getInstruction()) << "Specialized function " << NV("Function", RefF) << " with type " << NV("FuncType", OS.str()); }); assert(ReInfo.getSpecializedType() == SpecializedF->getLoweredFunctionType() && "Previously specialized function does not match expected type."); DeadApplies.insert(Apply.getInstruction()); if (replacePartialApplyWithoutReabstraction) { // There are some unknown users of the partial_apply. Therefore we need a // thunk which converts from the re-abstracted function back to the // original function with indirect parameters/results. auto *PAI = cast<PartialApplyInst>(Apply.getInstruction()); SILBuilderWithScope Builder(PAI); SILFunction *Thunk = ReabstractionThunkGenerator(ReInfo, PAI, SpecializedF).createThunk(); NewFunctions.push_back(Thunk); auto *FRI = Builder.createFunctionRef(PAI->getLoc(), Thunk); SmallVector<SILValue, 4> Arguments; for (auto &Op : PAI->getArgumentOperands()) { Arguments.push_back(Op.get()); } auto Subs = ReInfo.getCallerParamSubstitutionMap(); auto *NewPAI = Builder.createPartialApply( PAI->getLoc(), FRI, Subs, Arguments, PAI->getType().getAs<SILFunctionType>()->getCalleeConvention()); PAI->replaceAllUsesWith(NewPAI); DeadApplies.insert(PAI); return; } // Make the required changes to the call site. ApplySite newApply = replaceWithSpecializedFunction(Apply, SpecializedF, ReInfo); if (needAdaptUsers) { // Adapt all known users of the partial_apply. This is needed in case we // converted some indirect parameters/results to direct ones. auto *NewPAI = cast<PartialApplyInst>(newApply); ReInfo.prunePartialApplyArgs(NewPAI->getNumArguments()); for (Operand *Use : NewPAI->getUses()) { SILInstruction *User = Use->getUser(); if (auto FAS = FullApplySite::isa(User)) { SILBuilder Builder(User); replaceWithSpecializedCallee(FAS, NewPAI, Builder, ReInfo); DeadApplies.insert(FAS.getInstruction()); continue; } if (auto *PAI = dyn_cast<PartialApplyInst>(User)) { // This is a partial_apply of a re-abstraction thunk. Just skip this. assert(PAI->getType() == NewPAI->getType()); PAI->replaceAllUsesWith(NewPAI); DeadApplies.insert(PAI); } } } } // ============================================================================= // Prespecialized symbol lookup. // // This uses the SIL linker to checks for the does not load the body of the pres // ============================================================================= /// Link a specialization for generating prespecialized code. /// /// For now, it is performed only for specializations in the /// standard library. But in the future, one could think of /// maintaining a cache of optimized specializations. /// /// Mark specializations as public, so that they can be used by user /// applications. These specializations are generated during -O compilation of /// the library, but only used only by client code compiled at -Onone. They /// should be never inlined. static bool linkSpecialization(SILModule &M, SILFunction *F) { if (F->getLinkage() == SILLinkage::Public) return true; // Do not remove functions that are known prespecializations. // Keep them around. Change their linkage to public, so that other // applications can refer to them. if (M.isOptimizedOnoneSupportModule()) { if (isKnownPrespecialization(F->getName())) { F->setLinkage(SILLinkage::Public); F->setSerialized(IsNotSerialized); return true; } } return false; } /// The list of classes and functions from the stdlib /// whose specializations we want to preserve. static const char *const KnownPrespecializations[] = { "Array", "_ArrayBuffer", "_ContiguousArrayBuffer", "Range", "RangeIterator", "CountableRange", "CountableRangeIterator", "ClosedRange", "ClosedRangeIterator", "CountableClosedRange", "CountableClosedRangeIterator", "IndexingIterator", "Collection", "ReversedCollection", "MutableCollection", "BidirectionalCollection", "RandomAccessCollection", "ReversedRandomAccessCollection", "RangeReplaceableCollection", "_allocateUninitializedArray", "UTF8", "UTF16", "String", "_StringBuffer", "_toStringReadOnlyPrintable", }; bool swift::isKnownPrespecialization(StringRef SpecName) { // TODO: Once there is an efficient API to check if // a given symbol is a specialization of a specific type, // use it instead. Doing demangling just for this check // is just wasteful. auto DemangledNameString = swift::Demangle::demangleSymbolAsString(SpecName); StringRef DemangledName = DemangledNameString; LLVM_DEBUG(llvm::dbgs() << "Check if known: " << DemangledName << "\n"); auto pos = DemangledName.find("generic ", 0); auto oldpos = pos; if (pos == StringRef::npos) return false; // Create "of Swift" llvm::SmallString<64> OfString; llvm::raw_svector_ostream buffer(OfString); buffer << "of "; buffer << STDLIB_NAME <<'.'; StringRef OfStr = buffer.str(); LLVM_DEBUG(llvm::dbgs() << "Check substring: " << OfStr << "\n"); pos = DemangledName.find(OfStr, oldpos); if (pos == StringRef::npos) { // Create "of (extension in Swift).Swift" llvm::SmallString<64> OfString; llvm::raw_svector_ostream buffer(OfString); buffer << "of (extension in " << STDLIB_NAME << "):"; buffer << STDLIB_NAME << '.'; OfStr = buffer.str(); pos = DemangledName.find(OfStr, oldpos); LLVM_DEBUG(llvm::dbgs() << "Check substring: " << OfStr << "\n"); if (pos == StringRef::npos) return false; } pos += OfStr.size(); for (auto NameStr : KnownPrespecializations) { StringRef Name = NameStr; auto pos1 = DemangledName.find(Name, pos); if (pos1 == pos && !isalpha(DemangledName[pos1+Name.size()])) { return true; } } return false; } /// Try to look up an existing specialization in the specialization cache. /// If it is found, it tries to link this specialization. /// /// For now, it performs a lookup only in the standard library. /// But in the future, one could think of maintaining a cache /// of optimized specializations. static SILFunction *lookupExistingSpecialization(SILModule &M, StringRef FunctionName) { // Try to link existing specialization only in -Onone mode. // All other compilation modes perform specialization themselves. // TODO: Cache optimized specializations and perform lookup here? // Only check that this function exists, but don't read // its body. It can save some compile-time. if (isKnownPrespecialization(FunctionName)) return M.findFunction(FunctionName, SILLinkage::PublicExternal); return nullptr; } SILFunction *swift::lookupPrespecializedSymbol(SILModule &M, StringRef FunctionName) { // First check if the module contains a required specialization already. auto *Specialization = M.lookUpFunction(FunctionName); if (Specialization) { if (Specialization->getLinkage() == SILLinkage::PublicExternal) return Specialization; } // Then check if the required specialization can be found elsewhere. Specialization = lookupExistingSpecialization(M, FunctionName); if (!Specialization) return nullptr; assert(hasPublicVisibility(Specialization->getLinkage()) && "Pre-specializations should have public visibility"); Specialization->setLinkage(SILLinkage::PublicExternal); assert(Specialization->isExternalDeclaration() && "Specialization should be a public external declaration"); LLVM_DEBUG(llvm::dbgs() << "Found existing specialization for: " << FunctionName << '\n'; llvm::dbgs() << swift::Demangle::demangleSymbolAsString( Specialization->getName()) << "\n\n"); return Specialization; }
#include <iostream> using namespace std; class Rectangle { int width, height; public: void set_values (int, int); int area() {return width * height;} }; void Rectangle::set_values (int x, int y){ width = x; height = y; } int main(){ Rectangle rect, rectb; rect.set_values(3, 4); rectb.set_values(5, 6); cout << "rect area: " << rect.area() << "\n"; cout << "rectb area: " << rectb.area() << "\n"; return 0; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. //***************************************************************************** // File: frameinfo.cpp // // // Code to find control info about a stack frame. // //***************************************************************************** #include "stdafx.h" // Include so we can get information out of ComMethodFrame #ifdef FEATURE_COMINTEROP #include "COMToClrCall.h" #endif // Get a frame pointer from a RegDisplay. // This is mostly used for chains and stub frames (i.e. internal frames), where we don't need an exact // frame pointer. This is why it is okay to use the current SP instead of the caller SP on IA64. // We should really rename this and possibly roll it into GetFramePointer() when we move the stackwalker // to OOP. FramePointer GetSP(REGDISPLAY * pRDSrc) { FramePointer fp = FramePointer::MakeFramePointer( (LPVOID)GetRegdisplaySP(pRDSrc)); return fp; } // Get a frame pointer from a RegDisplay. FramePointer GetFramePointer(REGDISPLAY * pRDSrc) { return FramePointer::MakeFramePointer(GetRegdisplaySP(pRDSrc)); } //--------------------------------------------------------------------------------------- // // Convert a FramePointer to a StackFrame and return it. // // Arguments: // fp - the FramePointer to be converted // // Return Value: // a StackFrame equivalent to the given FramePointer // // Notes: // We really should consolidate the two abstractions for "stack frame identifiers" // (StackFrame and FramePointer) when we move the debugger stackwalker to OOP. // FORCEINLINE StackFrame ConvertFPToStackFrame(FramePointer fp) { return StackFrame((UINT_PTR)fp.GetSPValue()); } /* ------------------------------------------------------------------------- * * DebuggerFrameInfo routines * ------------------------------------------------------------------------- */ //struct DebuggerFrameData: Contains info used by the DebuggerWalkStackProc // to do a stack walk. The info and pData fields are handed to the pCallback // routine at each frame, struct DebuggerFrameData { // Initialize this struct. Only done at the start of a stackwalk. void Init( Thread * _pThread, FramePointer _targetFP, BOOL fIgnoreNonmethodFrames, // generally true for stackwalking and false for stepping DebuggerStackCallback _pCallback, void *_pData ) { LIMITED_METHOD_CONTRACT; this->pCallback = _pCallback; this->pData = _pData; this->cRealCounter = 0; this->thread = _pThread; this->targetFP = _targetFP; this->targetFound = (_targetFP == LEAF_MOST_FRAME); this->ignoreNonmethodFrames = fIgnoreNonmethodFrames; // For now, we can tie these to flags together. // In everett, we disable SIS (For backwards compat). this->fProvideInternalFrames = (fIgnoreNonmethodFrames != 0); this->fNeedToSendEnterManagedChain = false; this->fTrackingUMChain = false; this->fHitExitFrame = false; this->info.eStubFrameType = STUBFRAME_NONE; this->info.quickUnwind = false; this->info.frame = NULL; this->needParentInfo = false; #ifdef FEATURE_EH_FUNCLETS this->fpParent = LEAF_MOST_FRAME; this->info.fIsLeaf = true; this->info.fIsFunclet = false; this->info.fIsFilter = false; #endif // FEATURE_EH_FUNCLETS // Look strange? Go to definition of this field. I dare you. this->info.fIgnoreThisFrameIfSuppressingUMChainFromComPlusMethodFrameGeneric = false; #if defined(_DEBUG) this->previousFP = LEAF_MOST_FRAME; #endif // _DEBUG } // True if we need the next CrawlFrame to fill out part of this FrameInfo's data. bool needParentInfo; // The FrameInfo that we'll dispatch to the pCallback. This matches against // the CrawlFrame for that frame that the callback belongs too. FrameInfo info; // Regdisplay that the EE stackwalker is updating. REGDISPLAY regDisplay; #ifdef FEATURE_EH_FUNCLETS // This is used to skip funclets in a stackwalk. It marks the frame pointer to which we should skip. FramePointer fpParent; #endif // FEATURE_EH_FUNCLETS #if defined(_DEBUG) // For debugging, track the previous FramePointer so we can assert that we're // making progress through the stack. FramePointer previousFP; #endif // _DEBUG // whether we have hit an exit frame or not (i.e. a M2U frame) bool fHitExitFrame; private: // The scope of this field is each section of managed method frames on the stack. bool fNeedToSendEnterManagedChain; // Flag set when we first stack-walk to decide if we want to ignore certain frames. // Stepping doesn't ignore these frames; end user stacktraces do. BOOL ignoreNonmethodFrames; // Do we want callbacks for internal frames? // Steppers generally don't. User stack-walk does. bool fProvideInternalFrames; // Info for tracking unmanaged chains. // We track the starting (leaf) context for an unmanaged chain, as well as the // ending (root) framepointer. bool fTrackingUMChain; REGDISPLAY rdUMChainStart; FramePointer fpUMChainEnd; // Thread that the stackwalk is for. Thread *thread; // Target FP indicates at what point in the stackwalk we'll start dispatching callbacks. // Naturally, if this is LEAF_MOST_FRAME, then all callbacks will be dispatched FramePointer targetFP; bool targetFound; // Count # of callbacks we could have dispatched (assuming targetFP==LEAF_MOST_FRAME). // Useful for detecting leaf. int cRealCounter; // Callback & user-data supplied to that callback. DebuggerStackCallback pCallback; void *pData; private: // Raw invoke. This just does some consistency asserts, // and invokes the callback if we're in the requested target range. StackWalkAction RawInvokeCallback(FrameInfo * pInfo) { #ifdef _DEBUG _ASSERTE(pInfo != NULL); MethodDesc * md = pInfo->md; // Invoke the callback to the user. Log what we're invoking. LOG((LF_CORDB, LL_INFO10000, "DSWCallback: MD=%s,0x%p, Chain=%x, Stub=%x, Frame=0x%p, Internal=%d\n", ((md == NULL) ? "None" : md->m_pszDebugMethodName), md, pInfo->chainReason, pInfo->eStubFrameType, pInfo->frame, pInfo->internal)); // Make sure we're providing a valid FrameInfo for the callback. pInfo->AssertValid(); #endif // Update counter. This provides a convenient check for leaf FrameInfo. this->cRealCounter++; // Only invoke if we're past the target. if (!this->targetFound && IsEqualOrCloserToLeaf(this->targetFP, this->info.fp)) { this->targetFound = true; } if (this->targetFound) { return (pCallback)(pInfo, pData); } else { LOG((LF_CORDB, LL_INFO10000, "Not invoking yet.\n")); } return SWA_CONTINUE; } public: // Invoke a callback. This may do extra logic to preserve the interface between // the LS stackwalker and the LS: // - don't invoke if we're not at the target yet // - send EnterManagedChains if we need it. StackWalkAction InvokeCallback(FrameInfo * pInfo) { // Track if we've sent any managed code yet. // If we haven't, then don't send the enter-managed chain. This catches cases // when we have leaf-most unmanaged chain. if ((pInfo->frame == NULL) && (pInfo->md != NULL)) { this->fNeedToSendEnterManagedChain = true; } // Do tracking to decide if we need to send a Enter-Managed chain. if (pInfo->HasChainMarker()) { if (pInfo->managed) { // If we're dispatching a managed-chain, then we don't need to send another one. fNeedToSendEnterManagedChain = false; } else { // If we're dispatching an UM chain, then send the Managed one. // Note that the only unmanaged chains are ThreadStart chains and UM chains. if (fNeedToSendEnterManagedChain) { fNeedToSendEnterManagedChain = false; FrameInfo f; // Assume entry chain's FP is one pointer-width after the upcoming UM chain. FramePointer fpRoot = FramePointer::MakeFramePointer( (BYTE*) GetRegdisplaySP(&pInfo->registers) - sizeof(DWORD*)); f.InitForEnterManagedChain(fpRoot); if (RawInvokeCallback(&f) == SWA_ABORT) { return SWA_ABORT; } } } } return RawInvokeCallback(pInfo); } // Note that we should start tracking an Unmanaged Chain. void BeginTrackingUMChain(FramePointer fpRoot, REGDISPLAY * pRDSrc) { LIMITED_METHOD_CONTRACT; _ASSERTE(!this->fTrackingUMChain); CopyREGDISPLAY(&this->rdUMChainStart, pRDSrc); this->fTrackingUMChain = true; this->fpUMChainEnd = fpRoot; this->fHitExitFrame = false; LOG((LF_CORDB, LL_EVERYTHING, "UM Chain starting at Frame=0x%p\n", this->fpUMChainEnd.GetSPValue())); // This UM chain may get cancelled later, so don't even worry about toggling the fNeedToSendEnterManagedChain bit here. // Invoke() will track whether to send an Enter-Managed chain or not. } // For various heuristics, we may not want to send an UM chain. void CancelUMChain() { LIMITED_METHOD_CONTRACT; _ASSERTE(this->fTrackingUMChain); this->fTrackingUMChain = false; } // True iff we're currently tracking an unmanaged chain. bool IsTrackingUMChain() { LIMITED_METHOD_CONTRACT; return this->fTrackingUMChain; } // Get/Set Regdisplay that starts an Unmanaged chain. REGDISPLAY * GetUMChainStartRD() { LIMITED_METHOD_CONTRACT; _ASSERTE(fTrackingUMChain); return &rdUMChainStart; } // Get/Set FramePointer that ends an unmanaged chain. void SetUMChainEnd(FramePointer fp) { LIMITED_METHOD_CONTRACT; _ASSERTE(fTrackingUMChain); fpUMChainEnd = fp; } FramePointer GetUMChainEnd() { LIMITED_METHOD_CONTRACT; _ASSERTE(fTrackingUMChain); return fpUMChainEnd; } // Get thread we're currently tracing. Thread * GetThread() { LIMITED_METHOD_CONTRACT; return thread; } // Returns true if we're on the leaf-callback (ie, we haven't dispatched a callback yet. bool IsLeafCallback() { LIMITED_METHOD_CONTRACT; return cRealCounter == 0; } bool ShouldProvideInternalFrames() { LIMITED_METHOD_CONTRACT; return fProvideInternalFrames; } bool ShouldIgnoreNonmethodFrames() { LIMITED_METHOD_CONTRACT; return ignoreNonmethodFrames != 0; } }; //--------------------------------------------------------------------------------------- // // On IA64, the offset given by the OS during stackwalking is actually the offset at the call instruction. // This is different from x86 and X64, where the offset is immediately after the call instruction. In order // to have a uniform behaviour, we need to do adjust the relative offset on IA64. This function is a nop on // other platforms. // // Arguments: // pCF - the CrawlFrame for the current method frame // pInfo - This is the FrameInfo for the current method frame. We need to use the fIsLeaf field, // since no adjustment is necessary for leaf frames. // // Return Value: // returns the adjusted relative offset // inline ULONG AdjustRelOffset(CrawlFrame *pCF, FrameInfo *pInfo) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(pCF != NULL); } CONTRACTL_END; #if defined(TARGET_ARM) return pCF->GetRelOffset() & ~THUMB_CODE; #else return pCF->GetRelOffset(); #endif } //--------------------------------------------------------------------------------------- // // Even when there is an exit frame in the explicit frame chain, it does not necessarily mean that we have // actually called out to unmanaged code yet or that we actually have a managed call site. Given an exit // frame, this function determines if we have a managed call site and have already called out to unmanaged // code. If we have, then we return the caller SP as the potential frame pointer. Otherwise we return // LEAF_MOST_FRAME. // // Arguments: // pFrame - the exit frame to be checked // pData - the state of the current frame maintained by the debugger stackwalker // pPotentialFP - This is an out parameter. It returns the caller SP of the last managed caller if // there is a managed call site and we have already called out to unmanaged code. // Otherwise, LEAF_MOST_FRAME is returned. // // Return Value: // true - we have a managed call site and we have called out to unmanaged code // false - otherwise // bool HasExitRuntime(Frame *pFrame, DebuggerFrameData *pData, FramePointer *pPotentialFP) { CONTRACTL { NOTHROW; GC_NOTRIGGER; // Callers demand this function be GC_NOTRIGGER. MODE_ANY; PRECONDITION(pFrame->GetFrameType() == Frame::TYPE_EXIT); } CONTRACTL_END; #ifdef TARGET_X86 TADDR returnIP, returnSP; EX_TRY { // This is a real issue. This may be called while holding GC-forbid locks, and so // this function can't trigger a GC. However, the only impl we have calls GC-trigger functions. CONTRACT_VIOLATION(GCViolation); pFrame->GetUnmanagedCallSite(NULL, &returnIP, &returnSP); } EX_CATCH { // We never expect an actual exception here (maybe in oom). // If we get an exception, then simulate the default behavior for GetUnmanagedCallSite. returnIP = NULL; returnSP = NULL; // this will cause us to return true. } EX_END_CATCH(SwallowAllExceptions); LOG((LF_CORDB, LL_INFO100000, "DWSP: TYPE_EXIT: returnIP=0x%08x, returnSP=0x%08x, frame=0x%08x, threadFrame=0x%08x, regSP=0x%08x\n", returnIP, returnSP, pFrame, pData->GetThread()->GetFrame(), GetRegdisplaySP(&pData->regDisplay))); if (pPotentialFP != NULL) { *pPotentialFP = FramePointer::MakeFramePointer((void*)returnSP); } return ((pFrame != pData->GetThread()->GetFrame()) || (returnSP == NULL) || ((TADDR)GetRegdisplaySP(&pData->regDisplay) <= returnSP)); #else // TARGET_X86 // DebuggerExitFrame always return a NULL returnSP on x86. if (pFrame->GetVTablePtr() == DebuggerExitFrame::GetMethodFrameVPtr()) { if (pPotentialFP != NULL) { *pPotentialFP = LEAF_MOST_FRAME; } return true; } else if (pFrame->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr()) { InlinedCallFrame *pInlinedFrame = static_cast<InlinedCallFrame *>(pFrame); LPVOID sp = (LPVOID)pInlinedFrame->GetCallSiteSP(); // The sp returned below is the sp of the caller, which is either an IL stub in the normal case // or a normal managed method in the inlined pinvoke case. // This sp may be the same as the frame's address, so we need to use the largest // possible bsp value to make sure that this frame pointer is closer to the root than // the frame pointer made from the frame address itself. if (pPotentialFP != NULL) { *pPotentialFP = FramePointer::MakeFramePointer( (LPVOID)sp ); } return ((pFrame != pData->GetThread()->GetFrame()) || InlinedCallFrame::FrameHasActiveCall(pInlinedFrame)); } else { // It'll be nice if there's a way to assert that the current frame is indeed of a // derived class of TransitionFrame. TransitionFrame *pTransFrame = static_cast<TransitionFrame*>(pFrame); LPVOID sp = (LPVOID)pTransFrame->GetSP(); // The sp returned below is the sp of the caller, which is either an IL stub in the normal case // or a normal managed method in the inlined pinvoke case. // This sp may be the same as the frame's address, so we need to use the largest // possible bsp value to make sure that this frame pointer is closer to the root than // the frame pointer made from the frame address itself. if (pPotentialFP != NULL) { *pPotentialFP = FramePointer::MakeFramePointer( (LPVOID)sp ); } return true; } #endif // TARGET_X86 } #ifdef _DEBUG //----------------------------------------------------------------------------- // Debug helpers to get name of Frame. //----------------------------------------------------------------------------- LPCUTF8 FrameInfo::DbgGetClassName() { return (md == NULL) ? ("None") : (md->m_pszDebugClassName); } LPCUTF8 FrameInfo::DbgGetMethodName() { return (md == NULL) ? ("None") : (md->m_pszDebugMethodName); } //----------------------------------------------------------------------------- // Debug helper to asserts invariants about a FrameInfo before we dispatch it. //----------------------------------------------------------------------------- void FrameInfo::AssertValid() { LIMITED_METHOD_CONTRACT; bool fMethod = this->HasMethodFrame(); bool fStub = this->HasStubFrame(); bool fChain = this->HasChainMarker(); // Can't be both Stub & Chain _ASSERTE(!fStub || !fChain); // Must be at least a Method, Stub or Chain or Internal _ASSERTE(fMethod || fStub || fChain || this->internal); // Check Managed status is consistent if (fMethod) { _ASSERTE(this->managed); // We only report managed methods } if (fChain) { if (!managed) { // Only certain chains can be unmanaged _ASSERTE((this->chainReason == CHAIN_THREAD_START) || (this->chainReason == CHAIN_ENTER_UNMANAGED)); } else { // UM chains can never be managed. _ASSERTE((this->chainReason != CHAIN_ENTER_UNMANAGED)); } } // FramePointer should be valid _ASSERTE(this->fp != LEAF_MOST_FRAME); _ASSERTE((this->fp != ROOT_MOST_FRAME) || (chainReason== CHAIN_THREAD_START) || (chainReason == CHAIN_ENTER_UNMANAGED)); // If we have a Method, then we need an AppDomain. // (RS will need it to do lookup) if (fMethod) { _ASSERTE(currentAppDomain != NULL); _ASSERTE(managed); // Stubs may have a method w/o any code (eg, PInvoke wrapper). // @todo - Frame::TYPE_TP_METHOD_FRAME breaks this assert. Are there other cases too? //_ASSERTE(fStub || (pIJM != NULL)); } if (fStub) { // All stubs (except LightWeightFunctions) match up w/a Frame. _ASSERTE(this->frame || (eStubFrameType == STUBFRAME_LIGHTWEIGHT_FUNCTION)); } } #endif //----------------------------------------------------------------------------- // Get the DJI associated w/ this frame. This is a convenience function. // This is recommended over using MethodDescs because DJI's are version-aware. //----------------------------------------------------------------------------- DebuggerJitInfo * FrameInfo::GetJitInfoFromFrame() const { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; // Not all FrameInfo objects correspond to actual code. if (HasChainMarker() || HasStubFrame() || (frame != NULL)) { return NULL; } DebuggerJitInfo *ji = NULL; // @todo - we shouldn't need both a MD and an IP here. EX_TRY { _ASSERTE(this->md != NULL); ji = g_pDebugger->GetJitInfo(this->md, (const BYTE*)GetControlPC(&(this->registers))); _ASSERTE(ji != NULL); _ASSERTE(ji->m_nativeCodeVersion.GetMethodDesc() == this->md); } EX_CATCH { ji = NULL; } EX_END_CATCH(SwallowAllExceptions); return ji; } //----------------------------------------------------------------------------- // Get the DMI associated w/ this frame. This is a convenience function. // DMIs are 1:1 with the (token, module) pair. //----------------------------------------------------------------------------- DebuggerMethodInfo * FrameInfo::GetMethodInfoFromFrameOrThrow() { CONTRACTL { THROWS; GC_NOTRIGGER; } CONTRACTL_END; MethodDesc * pDesc = this->md; mdMethodDef token = pDesc-> GetMemberDef(); Module * pRuntimeModule = pDesc->GetModule(); DebuggerMethodInfo *dmi = g_pDebugger->GetOrCreateMethodInfo(pRuntimeModule, token); return dmi; } //----------------------------------------------------------------------------- // Init a FrameInfo for a UM chain. // We need a stackrange to give to an unmanaged debugger. // pRDSrc->Esp will provide the start (leaf) marker. // fpRoot will provide the end (root) portion. //----------------------------------------------------------------------------- void FrameInfo::InitForUMChain(FramePointer fpRoot, REGDISPLAY * pRDSrc) { _ASSERTE(pRDSrc != NULL); // Mark that we're an UM Chain (and nothing else). this->frame = NULL; this->md = NULL; // Fp will be the end (root) of the stack range. // pRDSrc->Sp will be the start (leaf) of the stack range. CopyREGDISPLAY(&(this->registers), pRDSrc); this->fp = fpRoot; this->quickUnwind = false; this->internal = false; this->managed = false; // These parts of the FrameInfo can be ignored for a UM chain. this->relOffset = 0; this->pIJM = NULL; this->MethodToken = METHODTOKEN(NULL, 0); this->currentAppDomain = NULL; this->exactGenericArgsToken = NULL; InitForScratchFrameInfo(); this->chainReason = CHAIN_ENTER_UNMANAGED; this->eStubFrameType = STUBFRAME_NONE; #ifdef _DEBUG FramePointer fpLeaf = GetSP(pRDSrc); _ASSERTE(IsCloserToLeaf(fpLeaf, fpRoot)); #endif #ifdef _DEBUG // After we just init it, it had better be valid. this->AssertValid(); #endif } //--------------------------------------------------------------------------------------- // // This is just a small helper to initialize the fields which are specific to 64-bit. Note that you should // only call this function on a scratch FrameInfo. Never call it on the FrameInfo used by the debugger // stackwalker to store information on the current frame. // void FrameInfo::InitForScratchFrameInfo() { #ifdef FEATURE_EH_FUNCLETS // The following flags cannot be trashed when we are calling this function on the curret FrameInfo // (the one we keep track of across multiple stackwalker callbacks). Thus, make sure you do not call // this function from InitForDynamicMethod(). In all other cases, we can call this method after we // call InitFromStubHelper() because we are working on a local scratch variable. this->fIsLeaf = false; this->fIsFunclet = false; this->fIsFilter = false; #endif // FEATURE_EH_FUNCLETS } //----------------------------------------------------------------------------- // // Init a FrameInfo for a stub. Stub frames map to internal frames on the RS. Stubs which we care about // usually contain an explicit frame which translates to an internal frame on the RS. Dynamic method is // the sole exception. // // Arguments: // pCF - the CrawlFrame containing the state of the current frame // pMDHint - some stubs have associated MethodDesc but others don't, // which is why this argument can be NULL // type - the type of the stub/internal frame // void FrameInfo::InitFromStubHelper( CrawlFrame * pCF, MethodDesc * pMDHint, // NULL ok CorDebugInternalFrameType type ) { _ASSERTE(pCF != NULL); Frame * pFrame = pCF->GetFrame(); LOG((LF_CORDB, LL_EVERYTHING, "InitFromStubHelper. Frame=0x%p, type=%d\n", pFrame, type)); // All Stubs have a Frame except for LightWeight methods _ASSERTE((type == STUBFRAME_LIGHTWEIGHT_FUNCTION) || (pFrame != NULL)); REGDISPLAY *pRDSrc = pCF->GetRegisterSet(); this->frame = pFrame; // Stub frames may be associated w/ a Method (as a hint). However this method // will never have a JitManager b/c it will never have IL (if it had IL, we'd be a // regulare frame, not a stub frame) this->md = pMDHint; CopyREGDISPLAY(&this->registers, pRDSrc); // FramePointer must match up w/ an EE Frame b/c that's how we match // we Exception callbacks. if (pFrame != NULL) { this->fp = FramePointer::MakeFramePointer( (LPVOID) pFrame); } else { this->fp = GetSP(pRDSrc); } this->quickUnwind = false; this->internal = false; this->managed = true; this->relOffset = 0; this->ambientSP = NULL; // Method associated w/a stub will never have a JitManager. this->pIJM = NULL; this->MethodToken = METHODTOKEN(NULL, 0); this->currentAppDomain = AppDomain::GetCurrentDomain(); this->exactGenericArgsToken = NULL; // Stub frames are mutually exclusive with chain markers. this->chainReason = CHAIN_NONE; this->eStubFrameType = type; #ifdef _DEBUG // After we just init it, it had better be valid. this->AssertValid(); #endif } //----------------------------------------------------------------------------- // Initialize a FrameInfo to be used for an "InternalFrame" // Frame should be a derived class of FramedMethodFrame. // FrameInfo's MethodDesc will be for managed wrapper for native call. //----------------------------------------------------------------------------- void FrameInfo::InitForM2UInternalFrame(CrawlFrame * pCF) { // For a M2U call, there's a managed method wrapping the unmanaged call. Use that. Frame * pFrame = pCF->GetFrame(); _ASSERTE(pFrame->GetTransitionType() == Frame::TT_M2U); FramedMethodFrame * pM2U = static_cast<FramedMethodFrame*> (pFrame); MethodDesc * pMDWrapper = pM2U->GetFunction(); // Soem M2U transitions may not have a function associated w/ them, // so pMDWrapper may be NULL. PInvokeCalliFrame is an example. InitFromStubHelper(pCF, pMDWrapper, STUBFRAME_M2U); InitForScratchFrameInfo(); } //----------------------------------------------------------------------------- // Initialize for the U2M case... //----------------------------------------------------------------------------- void FrameInfo::InitForU2MInternalFrame(CrawlFrame * pCF) { PREFIX_ASSUME(pCF != NULL); MethodDesc * pMDHint = NULL; #ifdef FEATURE_COMINTEROP Frame * pFrame = pCF->GetFrame(); PREFIX_ASSUME(pFrame != NULL); // For regular U2M PInvoke cases, we don't care about MD b/c it's just going to // be the next frame. // If we're a COM2CLR call, perhaps we can get the MD for the interface. if (pFrame->GetVTablePtr() == ComMethodFrame::GetMethodFrameVPtr()) { ComMethodFrame* pCOMFrame = static_cast<ComMethodFrame*> (pFrame); ComCallMethodDesc* pCMD = reinterpret_cast<ComCallMethodDesc *> (pCOMFrame->ComMethodFrame::GetDatum()); pMDHint = pCMD->GetInterfaceMethodDesc(); // Some COM-interop cases don't have an intermediate interface method desc, so // pMDHint may be null. } #endif InitFromStubHelper(pCF, pMDHint, STUBFRAME_U2M); InitForScratchFrameInfo(); } //----------------------------------------------------------------------------- // Init for an AD transition //----------------------------------------------------------------------------- void FrameInfo::InitForADTransition(CrawlFrame * pCF) { Frame * pFrame; pFrame = pCF->GetFrame(); _ASSERTE(pFrame->GetTransitionType() == Frame::TT_AppDomain); MethodDesc * pMDWrapper = NULL; InitFromStubHelper(pCF, pMDWrapper, STUBFRAME_APPDOMAIN_TRANSITION); InitForScratchFrameInfo(); } //----------------------------------------------------------------------------- // Init frame for a dynamic method. //----------------------------------------------------------------------------- void FrameInfo::InitForDynamicMethod(CrawlFrame * pCF) { // These are just stack markers that there's a dynamic method on the callstack. InitFromStubHelper(pCF, NULL, STUBFRAME_LIGHTWEIGHT_FUNCTION); // Do not call InitForScratchFrameInfo() here! Please refer to the comment in that function. } //----------------------------------------------------------------------------- // Init an internal frame to mark a func-eval. //----------------------------------------------------------------------------- void FrameInfo::InitForFuncEval(CrawlFrame * pCF) { // We don't store a MethodDesc hint referring to the method we're going to invoke because // uses of stub frames will assume the MD is relative to the AppDomain the frame is in. // For cross-AD funcevals, we're invoking a method in a domain other than the one this frame // is in. MethodDesc * pMDHint = NULL; // Add a stub frame here to mark that there is a FuncEvalFrame on the stack. InitFromStubHelper(pCF, pMDHint, STUBFRAME_FUNC_EVAL); InitForScratchFrameInfo(); } //--------------------------------------------------------------------------------------- // // Initialize a FrameInfo for sending the CHAIN_THREAD_START reason. // The common case is that the chain is NOT managed, since the lowest (closest to the root) managed method // is usually called from unmanaged code. In fact, in Whidbey, we should never have a managed chain. // // Arguments: // pRDSrc - a REGDISPLAY for the beginning (the leafmost frame) of the chain // void FrameInfo::InitForThreadStart(Thread * pThread, REGDISPLAY * pRDSrc) { this->frame = (Frame *) FRAME_TOP; this->md = NULL; CopyREGDISPLAY(&(this->registers), pRDSrc); this->fp = FramePointer::MakeFramePointer(pThread->GetCachedStackBase()); this->quickUnwind = false; this->internal = false; this->managed = false; this->relOffset = 0; this->pIJM = NULL; this->MethodToken = METHODTOKEN(NULL, 0); this->currentAppDomain = NULL; this->exactGenericArgsToken = NULL; InitForScratchFrameInfo(); this->chainReason = CHAIN_THREAD_START; this->eStubFrameType = STUBFRAME_NONE; #ifdef _DEBUG // After we just init it, it had better be valid. this->AssertValid(); #endif } //--------------------------------------------------------------------------------------- // // Initialize a FrameInfo for sending a CHAIN_ENTER_MANAGED. // A Enter-Managed chain is always sent immediately before an UM chain, meaning that the Enter-Managed chain // is closer to the leaf than the UM chain. // // Arguments: // fpRoot - This is the frame pointer for the Enter-Managed chain. It is currently arbitrarily set // to be one stack slot higher (closer to the leaf) than the frame pointer of the beginning // of the upcoming UM chain. // void FrameInfo::InitForEnterManagedChain(FramePointer fpRoot) { // Nobody should use a EnterManagedChain's Frame*, but there's no // good value to enforce that. this->frame = (Frame *) FRAME_TOP; this->md = NULL; memset((void *)&this->registers, 0, sizeof(this->registers)); this->fp = fpRoot; this->quickUnwind = true; this->internal = false; this->managed = true; this->relOffset = 0; this->pIJM = NULL; this->MethodToken = METHODTOKEN(NULL, 0); this->currentAppDomain = NULL; this->exactGenericArgsToken = NULL; InitForScratchFrameInfo(); this->chainReason = CHAIN_ENTER_MANAGED; this->eStubFrameType = STUBFRAME_NONE; } //----------------------------------------------------------------------------- // Do tracking for UM chains. // This may invoke the UMChain callback and M2U callback. //----------------------------------------------------------------------------- StackWalkAction TrackUMChain(CrawlFrame *pCF, DebuggerFrameData *d) { Frame *frame = g_pEEInterface->GetFrame(pCF); // If we encounter an ExitFrame out in the wild, then we'll convert it to an UM chain. if (!d->IsTrackingUMChain()) { if ((frame != NULL) && (frame != FRAME_TOP) && (frame->GetFrameType() == Frame::TYPE_EXIT)) { LOG((LF_CORDB, LL_EVERYTHING, "DWSP. ExitFrame while not tracking\n")); REGDISPLAY* pRDSrc = pCF->GetRegisterSet(); d->BeginTrackingUMChain(GetSP(pRDSrc), pRDSrc); // fall through and we'll send the UM chain. } else { return SWA_CONTINUE; } } _ASSERTE(d->IsTrackingUMChain()); // If we're tracking an UM chain, then we need to: // - possibly refine the start & end values as we get new information in the stacktrace. // - possibly cancel the UM chain for various heuristics. // - possibly dispatch if we've hit managed code again. bool fDispatchUMChain = false; // UM Chain stops when managed code starts again. if (frame != NULL) { // If it's just a EE Frame, then update this as a possible end of stack range for the UM chain. // (The end of a stack range is closer to the root.) d->SetUMChainEnd(FramePointer::MakeFramePointer((LPVOID)(frame))); Frame::ETransitionType t = frame->GetTransitionType(); int ft = frame->GetFrameType(); // Sometimes we may not want to show an UM chain b/c we know it's just // code inside of mscorwks. (Eg: Funcevals & AD transitions both fall into this category). // These are perfectly valid UM chains and we could give them if we wanted to. if ((t == Frame::TT_AppDomain) || (ft == Frame::TYPE_FUNC_EVAL)) { d->CancelUMChain(); return SWA_CONTINUE; } // If we hit an M2U frame, then go ahead and dispatch the UM chain now. // This will likely also be an exit frame. if (t == Frame::TT_M2U) { fDispatchUMChain = true; } // If we get an Exit frame, we can use that to "prune" the UM chain to a more friendly state. // This heuristic is optional, it just eliminates lots of internal mscorwks frames from the callstack. // Note that this heuristic is only useful if we get a callback on the entry frame // (e.g. UMThkCallFrame) between the callback on the native marker and the callback on the exit frame. // Otherwise the REGDISPLAY will be the same. if (ft == Frame::TYPE_EXIT) { // If we have a valid reg-display (non-null IP) then update it. // We may have an invalid reg-display if we have an exit frame on an inactive thread. REGDISPLAY * pNewRD = pCF->GetRegisterSet(); if (GetControlPC(pNewRD) != NULL) { LOG((LF_CORDB, LL_EVERYTHING, "DWSP. updating RD while tracking UM chain\n")); CopyREGDISPLAY(d->GetUMChainStartRD(), pNewRD); } FramePointer fpLeaf = GetSP(d->GetUMChainStartRD()); _ASSERTE(IsCloserToLeaf(fpLeaf, d->GetUMChainEnd())); _ASSERTE(!d->fHitExitFrame); // should only have 1 exit frame per UM chain code. d->fHitExitFrame = true; FramePointer potentialFP; FramePointer fpNewChainEnd = d->GetUMChainEnd(); // Check to see if we are inside the unmanaged call. We want to make sure we only report an exit frame after // we've really exited. There is a short period between where we setup the frame and when we actually exit // the runtime. This check is intended to ensure we're actually outside now. if (HasExitRuntime(frame, d, &potentialFP)) { LOG((LF_CORDB, LL_EVERYTHING, "HasExitRuntime. potentialFP=0x%p\n", potentialFP.GetSPValue())); // If we have no call site, manufacture a FP using the current frame. // If we do have a call site, then the FP is actually going to be the caller SP, // where the caller is the last managed method before calling out to unmanaged code. if (potentialFP == LEAF_MOST_FRAME) { fpNewChainEnd = FramePointer::MakeFramePointer((LPVOID)((BYTE*)frame - sizeof(LPVOID))); } else { fpNewChainEnd = potentialFP; } } // For IL stubs, we may actually push an uninitialized InlinedCallFrame frame onto the frame chain // in jitted managed code, and then later on initialize it in a native runtime helper. In this case, if // HasExitRuntime() is false (meaning the frame is uninitialized), then we are actually still in managed // code and have not made the call to native code yet, so we should report an unmanaged chain. else { d->CancelUMChain(); return SWA_CONTINUE; } fDispatchUMChain = true; // If we got a valid chain end, then prune the UM chain accordingly. // Note that some EE Frames will give invalid info back so we have to check. // PInvokeCalliFrame is one example (when doing MC++ function pointers) if (IsCloserToRoot(fpNewChainEnd, fpLeaf)) { d->SetUMChainEnd(fpNewChainEnd); } else { _ASSERTE(IsCloserToLeaf(fpLeaf, d->GetUMChainEnd())); } } // end ExitFrame // Only CLR internal code / stubs can push Frames onto the Frame chain. // So if we hit a raw interceptor frame before we hit any managed frame, then this whole // UM chain must still be in CLR internal code. // Either way, this UM chain has ended (and some new chain based off the frame has started) // so we need to either Cancel the chain or dispatch it. if (frame->GetInterception() != Frame::INTERCEPTION_NONE) { // Interceptors may contain calls out to unmanaged code (such as unmanaged dllmain when // loading a new dll), so we need to dispatch these. // These extra UM chains don't show in Everett, and so everett debuggers on whidbey // may see new chains. // We need to ensure that whidbey debuggers are updated first. fDispatchUMChain = true; } } else { // If it's a real method (not just an EE Frame), then the UM chain is over. fDispatchUMChain = true; } if (fDispatchUMChain) { // Check if we should cancel the UM chain. // We need to discriminate between the following 2 cases: // 1) Managed -(a)-> mscorwks -(b)-> Managed (leaf) // 2) Native -(a)-> mscorwks -(b)-> Managed (leaf) // // --INCORRECT RATIONALE SEE "CORRECTION" BELOW-- // Case 1 could happen if a managed call injects a stub (such as w/ delegates). // In both cases, the (mscorwks-(b)->managed) transition causes a IsNativeMarker callback // which initiates a UM chain. In case 1, we want to cancel the UM chain, but // in case 2 we want to dispatch it. // The difference is case #2 will have some EE Frame at (b) and case #1 won't. // That EE Frame should have caused us to dispatch the call for the managed method, and // thus by the time we get around to dispatching the UM Chain, we shouldn't have a managed // method waiting to be dispatched in the DebuggerFrameData. // --END INCORRECT RATIONALE-- // // This is kind of messed up. First of all, the assertions on case 2 is not true on 64-bit. // We won't have an explicit frame at (b). Secondly, case 1 is not always true either. // Consider the case where we are calling a cctor at prestub time. This is what the stack may // look like: managed -> PrestubMethodFrame -> GCFrame -> managed (cctor) (leaf). In this case, // we will actually send the UM chain because we will have dispatched the call for the managed // method (the cctor) when we get a callback for the GCFrame. // // --INCORRECT SEE "CORRECTION" BELOW-- // Keep in mind that this is just a heuristic to reduce the number of UM chains we are sending // over to the RS. // --END INCORRECT -- // // CORRECTION: These UM chains also feed into the results of at least ControllerStackInfo and probably other // places. Issue 650903 is a concrete example of how not filtering a UM chain causes correctness // issues in the LS. This code may still have bugs in it based on those incorrect assumptions. // A narrow fix for 650903 is the only thing that was changed at the time of adding this comment. if (d->needParentInfo && d->info.HasMethodFrame()) { LOG((LF_CORDB, LL_EVERYTHING, "Cancelling UM Chain b/c it's internal\n")); d->CancelUMChain(); return SWA_CONTINUE; } // If we're NOT ignoring non-method frames, and we didn't get an explicit ExitFrame somewhere // in this chain, then don't send the non-leaf UM chain. // The practical cause here is that w/o an exit frame, we don't know where the UM chain // is starting (could be from anywhere in mscorwks). And we can't patch any random spot in // mscorwks. // Sending leaf-UM chains is OK b/c we can't step-out to them (they're the leaf, duh). // (ignoreNonmethodFrames is generally false for stepping and true for regular // end-user stacktraces.) // // This check is probably unnecessary. The client of the debugger stackwalker should make // the decision themselves as to what to do with the UM chain callbacks. // // -- INCORRECT SEE SEE "CORRECTION" BELOW -- // Currently, both // ControllerStackInfo and InterceptorStackInfo ignore UM chains completely anyway. // (For an example, refer to the cctor example in the previous comment.) // -- END INCORRECT -- // // CORRECTION: See issue 650903 for a concrete example of ControllerStackInfo getting a different // result based on a UM chain that wasn't filtered. This code may still have issues in // it based on those incorrect assumptions. A narrow fix for 650903 is the only thing // that was changed at the time of adding this comment. if (!d->fHitExitFrame && !d->ShouldIgnoreNonmethodFrames() && !d->IsLeafCallback()) { LOG((LF_CORDB, LL_EVERYTHING, "Cancelling UM Chain b/c it's stepper not requested\n")); d->CancelUMChain(); return SWA_CONTINUE; } // Ok, we haven't cancelled it yet, so go ahead and send the UM chain. FrameInfo f; FramePointer fpRoot = d->GetUMChainEnd(); FramePointer fpLeaf = GetSP(d->GetUMChainStartRD()); // If we didn't actually get any range, then don't bother sending it. if (fpRoot == fpLeaf) { d->CancelUMChain(); return SWA_CONTINUE; } f.InitForUMChain(fpRoot, d->GetUMChainStartRD()); #ifdef FEATURE_COMINTEROP if ((frame != NULL) && (frame->GetVTablePtr() == ComPlusMethodFrame::GetMethodFrameVPtr())) { // This condition is part of the fix for 650903. (See // code:ControllerStackInfo::WalkStack and code:DebuggerStepper::TrapStepOut // for the other parts.) Here, we know that the frame we're looking it may be // a ComPlusMethodFrameGeneric (this info is not otherwise plubmed down into // the walker; even though the walker does get to see "f.frame", that may not // be "frame"). Given this, if the walker chooses to ignore these frames // (while doing a Step Out during managed-only debugging), then it can ignore // this frame. f.fIgnoreThisFrameIfSuppressingUMChainFromComPlusMethodFrameGeneric = true; } #endif // FEATURE_COMINTEROP if (d->InvokeCallback(&f) == SWA_ABORT) { // don't need to cancel if they abort. return SWA_ABORT; } d->CancelUMChain(); // now that we've sent it, we're done. // Check for a M2U internal frame. if (d->ShouldProvideInternalFrames() && (frame != NULL) && (frame != FRAME_TOP)) { // We want to dispatch a M2U transition right after we dispatch the UM chain. Frame::ETransitionType t = frame->GetTransitionType(); if (t == Frame::TT_M2U) { // Frame for a M2U transition. FrameInfo fM2U; fM2U.InitForM2UInternalFrame(pCF); if (d->InvokeCallback(&fM2U) == SWA_ABORT) { return SWA_ABORT; } } } } return SWA_CONTINUE; } //--------------------------------------------------------------------------------------- // // A frame pointer is a unique identifier for a particular stack location. This function returns the // frame pointer for the current frame, whether it is a method frame or an explicit frame. // // Arguments: // pData - the state of the current frame maintained by the debugger stackwalker // pCF - the CrawlFrame for the current callback by the real stackwalker (i.e. StackWalkFramesEx()); // this is NULL for the case where we fake an extra callbakc to top off a debugger stackwalk // // Return Value: // the frame pointer for the current frame // FramePointer GetFramePointerForDebugger(DebuggerFrameData* pData, CrawlFrame* pCF) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; FramePointer fpResult; #if defined(FEATURE_EH_FUNCLETS) if (pData->info.frame == NULL) { // This is a managed method frame. fpResult = FramePointer::MakeFramePointer((LPVOID)GetRegdisplayStackMark(&pData->info.registers)); } else { // This is an actual frame. fpResult = FramePointer::MakeFramePointer((LPVOID)(pData->info.frame)); } #else // !FEATURE_EH_FUNCLETS if ((pCF == NULL || !pCF->IsFrameless()) && pData->info.frame != NULL) { // // If we're in an explicit frame now, and the previous frame was // also an explicit frame, pPC will not have been updated. So // use the address of the frame itself as fp. // fpResult = FramePointer::MakeFramePointer((LPVOID)(pData->info.frame)); LOG((LF_CORDB, LL_INFO100000, "GFPFD: Two explicit frames in a row; using frame address 0x%p\n", pData->info.frame)); } else { // // Otherwise use pPC as the frame pointer, as this will be // pointing to the return address on the stack. // fpResult = FramePointer::MakeFramePointer((LPVOID)GetRegdisplayStackMark(&(pData->regDisplay))); } #endif // !FEATURE_EH_FUNCLETS LOG((LF_CORDB, LL_INFO100000, "GFPFD: Frame pointer is 0x%p\n", fpResult.GetSPValue())); return fpResult; } #ifdef FEATURE_EH_FUNCLETS //--------------------------------------------------------------------------------------- // // This function is called to determine if we should start skipping funclets. If we should, then we return the // frame pointer for the parent method frame. Otherwise we return LEAF_MOST_FRAME. If we are already skipping // frames, then we return the current frame pointer for the parent method frame. // // The return value of this function corresponds to the return value of ExceptionTracker::FindParentStackFrame(). // Refer to that function for more information. // // Arguments: // fpCurrentParentMarker - This is the current frame pointer of the parent method frame. It can be // LEAF_MOST_FRAME if we are not currently skipping funclets. // pCF - the CrawlFrame for the current callback from the real stackwalker // fIsNonFilterFuncletFrame - whether the current frame is a non-filter funclet frame // // Return Value: // LEAF_MOST_FRAME - skipping not required // ROOT_MOST_FRAME - skip one frame and try again // anything else - skip all frames up to but not including the returned frame pointer // inline FramePointer CheckForParentFP(FramePointer fpCurrentParentMarker, CrawlFrame* pCF, bool fIsNonFilterFuncletFrame) { WRAPPER_NO_CONTRACT; if (fpCurrentParentMarker == LEAF_MOST_FRAME) { // When we encounter a funclet, we simply stop processing frames until we hit the parent // of the funclet. Funclets and their parents have the same MethodDesc pointers, and they // should really be treated as one frame. However, we report both of them and let the callers // decide what they want to do with them. For example, DebuggerThread::TraceAndSendStack() // should never report both frames, but ControllerStackInfo::GetStackInfo() may need both to // determine where to put a patch. We use the fpParent as a flag to indicate if we are // searching for a parent of a funclet. // // Note that filter funclets are an exception. We don't skip them. if (fIsNonFilterFuncletFrame) { // We really should be using the same structure, but FramePointer is used everywhere in the debugger...... StackFrame sfParent = g_pEEInterface->FindParentStackFrame(pCF); return FramePointer::MakeFramePointer((LPVOID)sfParent.SP); } else { return LEAF_MOST_FRAME; } } else { // Just return the current marker if we are already skipping frames. return fpCurrentParentMarker; } } #endif // FEATURE_EH_FUNCLETS //----------------------------------------------------------------------------- // StackWalkAction DebuggerWalkStackProc(): This is the callback called // by the EE stackwalker. // Note that since we don't know what the frame pointer for frame // X is until we've looked at the caller of frame X, we actually end up // stashing the info and pData pointers in the DebuggerFrameDat struct, and // then invoking pCallback when we've moved up one level, into the caller's // frame. We use the needParentInfo field to indicate that the previous frame // needed this (parental) info, and so when it's true we should invoke // pCallback. // What happens is this: if the previous frame set needParentInfo, then we // do pCallback (and set needParentInfo to false). // Then we look at the current frame - if it's frameless (ie, // managed), then we set needParentInfo to callback in the next frame. // Otherwise we must be at a chain boundary, and so we set the chain reason // appropriately. We then figure out what type of frame it is, setting // flags depending on the type. If the user should see this frame, then // we'll set needParentInfo to record it's existence. Lastly, if we're in // a funky frame, we'll explicitly update the register set, since the // CrawlFrame doesn't do it automatically. //----------------------------------------------------------------------------- StackWalkAction DebuggerWalkStackProc(CrawlFrame *pCF, void *data) { DebuggerFrameData *d = (DebuggerFrameData *)data; if (pCF->IsNativeMarker()) { #ifdef FEATURE_EH_FUNCLETS // The tricky part here is that we want to skip all frames between a funclet method frame // and the parent method frame UNLESS the funclet is a filter. Moreover, we should never // let a native marker execute the rest of this method, so we just short-circuit it here. if ((d->fpParent != LEAF_MOST_FRAME) || d->info.IsNonFilterFuncletFrame()) { return SWA_CONTINUE; } #endif // FEATURE_EH_FUNCLETS // This REGDISPLAY is for the native method immediately following the managed method for which // we have received the previous callback, i.e. the native caller of the last managed method // we have encountered. REGDISPLAY* pRDSrc = pCF->GetRegisterSet(); d->BeginTrackingUMChain(GetSP(pRDSrc), pRDSrc); return SWA_CONTINUE; } // Note that a CrawlFrame may have both a methoddesc & an EE Frame. Frame *frame = g_pEEInterface->GetFrame(pCF); MethodDesc *md = pCF->GetFunction(); LOG((LF_CORDB, LL_EVERYTHING, "Calling DebuggerWalkStackProc. Frame=0x%p, md=0x%p(%s), native_marker=%d\n", frame, md, (md == NULL || md == (MethodDesc*)POISONC) ? "null" : md->m_pszDebugMethodName, pCF->IsNativeMarker() )); // The fp for a frame must be obtained from the _next_ frame. Fill it in now for the previous frame, if appropriate. if (d->needParentInfo) { LOG((LF_CORDB, LL_INFO100000, "DWSP: NeedParentInfo.\n")); d->info.fp = GetFramePointerForDebugger(d, pCF); #if defined(_DEBUG) && !defined(TARGET_ARM) && !defined(TARGET_ARM64) // Make sure the stackwalk is making progress. // On ARM this is invalid as the stack pointer does necessarily have to move when unwinding a frame. _ASSERTE(IsCloserToLeaf(d->previousFP, d->info.fp)); d->previousFP = d->info.fp; #endif // _DEBUG && !TARGET_ARM d->needParentInfo = false; { // Don't invoke Stubs if we're not asking for internal frames. bool fDoInvoke = true; if (!d->ShouldProvideInternalFrames()) { if (d->info.HasStubFrame()) { fDoInvoke = false; } } LOG((LF_CORDB, LL_INFO1000000, "DWSP: handling our target\n")); if (fDoInvoke) { if (d->InvokeCallback(&d->info) == SWA_ABORT) { return SWA_ABORT; } } // @todo - eventually we should be initing our frame-infos properly // and thus should be able to remove this. d->info.eStubFrameType = STUBFRAME_NONE; } } // if (d->needParentInfo) #ifdef FEATURE_EH_FUNCLETS // The tricky part here is that we want to skip all frames between a funclet method frame // and the parent method frame UNLESS the funclet is a filter. We only have to check for fpParent // here (instead of checking d->info.fIsFunclet and d->info.fIsFilter as well, as in the beginning of // this method) is because at this point, fpParent is already set by the code above. if (d->fpParent == LEAF_MOST_FRAME) #endif // FEATURE_EH_FUNCLETS { // Track the UM chain after we flush any managed goo from the last iteration. if (TrackUMChain(pCF, d) == SWA_ABORT) { return SWA_ABORT; } } // Track if we want to send a callback for this Frame / Method bool use=false; // // Examine the frame. // // We assume that the stack walker is just updating the // register display we passed in - assert it to be sure _ASSERTE(pCF->GetRegisterSet() == &d->regDisplay); #ifdef FEATURE_EH_FUNCLETS Frame* pPrevFrame = d->info.frame; // Here we need to determine if we are in a non-leaf frame, in which case we want to adjust the relative offset. // Also, we need to check if this frame has faulted (throws a native exception), since if it has, then it should be // considered the leaf frame (and thus we don't need to update the relative offset). if (pCF->IsActiveFrame() || pCF->HasFaulted()) { d->info.fIsLeaf = true; } else if ( (pPrevFrame != NULL) && (pPrevFrame->GetFrameType() == Frame::TYPE_EXIT) && !HasExitRuntime(pPrevFrame, d, NULL) ) { // This is for the inlined NDirectMethodFrameGeneric case. We have not exit the runtime yet, so the current // frame should still be regarded as the leaf frame. d->info.fIsLeaf = true; } else { d->info.fIsLeaf = false; } d->info.fIsFunclet = pCF->IsFunclet(); d->info.fIsFilter = false; if (d->info.fIsFunclet) { d->info.fIsFilter = pCF->IsFilterFunclet(); } if (pCF->IsFrameless()) { // Check if we are skipping. if (d->fpParent != LEAF_MOST_FRAME) { // If fpParent is ROOT_MOST_FRAME, then we just need to skip one frame. Otherwise, we should stop // skipping if the current frame pointer matches fpParent. In either case, clear fpParent, and // then check again. if ((d->fpParent == ROOT_MOST_FRAME) || ExceptionTracker::IsUnwoundToTargetParentFrame(pCF, ConvertFPToStackFrame(d->fpParent))) { LOG((LF_CORDB, LL_INFO100000, "DWSP: Stopping to skip funclet at 0x%p.\n", d->fpParent.GetSPValue())); d->fpParent = LEAF_MOST_FRAME; d->fpParent = CheckForParentFP(d->fpParent, pCF, d->info.IsNonFilterFuncletFrame()); } } } #endif // FEATURE_EH_FUNCLETS d->info.frame = frame; d->info.ambientSP = NULL; // Record the appdomain that the thread was in when it // was running code for this frame. d->info.currentAppDomain = AppDomain::GetCurrentDomain(); // Grab all the info from CrawlFrame that we need to // check for "Am I in an exeption code blob?" now. #ifdef FEATURE_EH_FUNCLETS // We are still searching for the parent of the last funclet we encounter. if (d->fpParent != LEAF_MOST_FRAME) { // We do nothing here. LOG((LF_CORDB, LL_INFO100000, "DWSP: Skipping to parent method frame at 0x%p.\n", d->fpParent.GetSPValue())); } else #endif // FEATURE_EH_FUNCLETS // We should ignore IL stubs with no frames in our stackwalking. // The only exception is dynamic methods. We want to report them when SIS is turned on. if ((md != NULL) && md->IsILStub() && pCF->IsFrameless()) { #ifdef FEATURE_MULTICASTSTUB_AS_IL if(md->AsDynamicMethodDesc()->IsMulticastStub()) { use = true; d->info.managed = true; d->info.internal = false; } #endif // We do nothing here. LOG((LF_CORDB, LL_INFO100000, "DWSP: Skip frameless IL stub.\n")); } else // For frames w/o method data, send them as an internal stub frame. if ((md != NULL) && md->IsDynamicMethod()) { // Only Send the frame if "InternalFrames" are requested. // Else completely ignore it. if (d->ShouldProvideInternalFrames()) { d->info.InitForDynamicMethod(pCF); // We'll loop around to get the FramePointer. Only modification to FrameInfo // after this is filling in framepointer and resetting MD. use = true; } } else if (pCF->IsFrameless()) { // Regular managed-method. LOG((LF_CORDB, LL_INFO100000, "DWSP: Is frameless.\n")); use = true; d->info.managed = true; d->info.internal = false; d->info.chainReason = CHAIN_NONE; d->needParentInfo = true; // Possibly need chain reason d->info.relOffset = AdjustRelOffset(pCF, &(d->info)); d->info.pIJM = pCF->GetJitManager(); d->info.MethodToken = pCF->GetMethodToken(); #ifdef TARGET_X86 // This is collecting the ambientSP a lot more than we actually need it. Only time we need it is // inspecting local vars that are based off the ambient esp. d->info.ambientSP = pCF->GetAmbientSPFromCrawlFrame(); #endif } else { d->info.pIJM = NULL; d->info.MethodToken = METHODTOKEN(NULL, 0); // // Retrieve any interception info // // Each interception type in the switch statement below is associated with a chain reason. // The other chain reasons are: // CHAIN_INTERCEPTION - not used // CHAIN_PROCESS_START - not used // CHAIN_THREAD_START - thread start // CHAIN_ENTER_MANAGED - managed chain // CHAIN_ENTER_UNMANAGED - unmanaged chain // CHAIN_DEBUGGER_EVAL - not used // CHAIN_CONTEXT_SWITCH - not used // CHAIN_FUNC_EVAL - funceval switch (frame->GetInterception()) { case Frame::INTERCEPTION_CLASS_INIT: // // Fall through // // V2 assumes that the only thing the prestub intercepts is the class constructor case Frame::INTERCEPTION_PRESTUB: d->info.chainReason = CHAIN_CLASS_INIT; break; case Frame::INTERCEPTION_EXCEPTION: d->info.chainReason = CHAIN_EXCEPTION_FILTER; break; case Frame::INTERCEPTION_CONTEXT: d->info.chainReason = CHAIN_CONTEXT_POLICY; break; case Frame::INTERCEPTION_SECURITY: d->info.chainReason = CHAIN_SECURITY; break; default: d->info.chainReason = CHAIN_NONE; } // // Look at the frame type to figure out how to treat it. // LOG((LF_CORDB, LL_INFO100000, "DWSP: Chain reason is 0x%X.\n", d->info.chainReason)); switch (frame->GetFrameType()) { case Frame::TYPE_ENTRY: // We now ignore entry + exit frames. case Frame::TYPE_EXIT: case Frame::TYPE_HELPER_METHOD_FRAME: case Frame::TYPE_INTERNAL: /* If we have a specific interception type, use it. However, if this is the top-most frame (with a specific type), we can ignore it and it wont appear in the stack-trace */ #define INTERNAL_FRAME_ACTION(d, use) \ (d)->info.managed = true; \ (d)->info.internal = false; \ use = true LOG((LF_CORDB, LL_INFO100000, "DWSP: Frame type is TYPE_INTERNAL.\n")); if (d->info.chainReason == CHAIN_NONE || pCF->IsActiveFrame()) { use = false; } else { INTERNAL_FRAME_ACTION(d, use); } break; case Frame::TYPE_INTERCEPTION: case Frame::TYPE_SECURITY: // Security is a sub-type of interception LOG((LF_CORDB, LL_INFO100000, "DWSP: Frame type is TYPE_INTERCEPTION/TYPE_SECURITY.\n")); d->info.managed = true; d->info.internal = true; use = true; break; case Frame::TYPE_CALL: LOG((LF_CORDB, LL_INFO100000, "DWSP: Frame type is TYPE_CALL.\n")); // In V4, StubDispatchFrame is only used on 64-bit (and PPC?) but not on x86. x86 uses a // different code path which sets up a HelperMethodFrame instead. In V4.5, x86 and ARM // both use the 64-bit code path and they set up a StubDispatchFrame as well. This causes // a problem in the debugger stackwalker (see Dev11 Issue 13229) since the two frame types // are treated differently. More specifically, a StubDispatchFrame causes the debugger // stackwalk to make an invalid callback, i.e. a callback which is not for a managed method, // an explicit frame, or a chain. // // Ideally we would just change the StubDispatchFrame to behave like a HMF, but it's // too big of a change for an in-place release. For now I'm just making surgical fixes in // the debugger stackwalker. This may introduce behavioural changes in on X64, but the // chance of that is really small. StubDispatchFrame is only used in the virtual stub // disptch code path. It stays on the stack in a small time window and it's not likely to // be on the stack while some managed methods closer to the leaf are on the stack. There is // only one scenario I know of, and that's the repro for Dev11 13229, but that's for x86 only. // The jitted code on X64 behaves differently. // // Note that there is a corresponding change in DacDbiInterfaceImpl::GetInternalFrameType(). if (frame->GetVTablePtr() == StubDispatchFrame::GetMethodFrameVPtr()) { use = false; } else { d->info.managed = true; d->info.internal = false; use = true; } break; case Frame::TYPE_FUNC_EVAL: LOG((LF_CORDB, LL_INFO100000, "DWSP: Frame type is TYPE_FUNC_EVAL.\n")); d->info.managed = true; d->info.internal = true; // This is actually a nop. We reset the chain reason in InitForFuncEval() below. // So is a FuncEvalFrame a chain or an internal frame? d->info.chainReason = CHAIN_FUNC_EVAL; { // We only show a FuncEvalFrame if the funceval is not trying to abort the thread. FuncEvalFrame *pFuncEvalFrame = static_cast<FuncEvalFrame *>(frame); use = pFuncEvalFrame->ShowFrame() ? true : false; } // Send Internal frame. This is "inside" (leafmost) the chain, so we send it first // since sending starts from the leaf. if (use && d->ShouldProvideInternalFrames()) { FrameInfo f; f.InitForFuncEval(pCF); if (d->InvokeCallback(&f) == SWA_ABORT) { return SWA_ABORT; } } break; // Put frames we want to ignore here: case Frame::TYPE_MULTICAST: LOG((LF_CORDB, LL_INFO100000, "DWSP: Frame type is TYPE_MULTICAST.\n")); if (d->ShouldIgnoreNonmethodFrames()) { // Multicast frames exist only to gc protect the arguments // between invocations of a delegate. They don't have code that // we can (currently) show the user (we could change this with // work, but why bother? It's an internal stub, and even if the // user could see it, they can't modify it). LOG((LF_CORDB, LL_INFO100000, "DWSP: Skipping frame 0x%x b/c it's " "a multicast frame!\n", frame)); use = false; } else { LOG((LF_CORDB, LL_INFO100000, "DWSP: NOT Skipping frame 0x%x even thought it's " "a multicast frame!\n", frame)); INTERNAL_FRAME_ACTION(d, use); } break; default: _ASSERTE(!"Invalid frame type!"); break; } } // Check for ICorDebugInternalFrame stuff. // These callbacks are dispatched out of band. if (d->ShouldProvideInternalFrames() && (frame != NULL) && (frame != FRAME_TOP)) { Frame::ETransitionType t = frame->GetTransitionType(); FrameInfo f; bool fUse = false; if (t == Frame::TT_U2M) { // We can invoke the Internal U2M frame now. f.InitForU2MInternalFrame(pCF); fUse = true; } else if (t == Frame::TT_AppDomain) { // Internal frame for an Appdomain transition. // We used to ignore frames for ADs which we hadn't sent a Create event for yet. In V3 we send AppDomain // create events immediately (before any assemblies are loaded), so this should no longer be an issue. f.InitForADTransition(pCF); fUse = true; } // Frame's setup. Now invoke the callback. if (fUse) { if (d->InvokeCallback(&f) == SWA_ABORT) { return SWA_ABORT; } } } // should we give frames? if (use) { // // If we are returning a complete stack walk from the helper thread, then we // need to gather information to instantiate generics. However, a stepper doing // a stackwalk does not need this information, so skip in that case. // if (d->ShouldIgnoreNonmethodFrames()) { // Finding sizes of value types on the argument stack while // looking for the arg runs the class loader in non-load mode. ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE(); d->info.exactGenericArgsToken = pCF->GetExactGenericArgsToken(); } else { d->info.exactGenericArgsToken = NULL; } d->info.md = md; CopyREGDISPLAY(&(d->info.registers), &(d->regDisplay)); #if defined(TARGET_AMD64) LOG((LF_CORDB, LL_INFO100000, "DWSP: Saving REGDISPLAY with sp = 0x%p, pc = 0x%p.\n", GetRegdisplaySP(&(d->info.registers)), GetControlPC(&(d->info.registers)))); #endif // TARGET_AMD64 d->needParentInfo = true; LOG((LF_CORDB, LL_INFO100000, "DWSP: Setting needParentInfo\n")); } #if defined(FEATURE_EH_FUNCLETS) d->fpParent = CheckForParentFP(d->fpParent, pCF, d->info.IsNonFilterFuncletFrame()); #endif // FEATURE_EH_FUNCLETS // // The stackwalker doesn't update the register set for the // case where a non-frameless frame is returning to another // non-frameless frame. Cover this case. // // !!! This assumes that updating the register set multiple times // for a given frame times is not a bad thing... // if (!pCF->IsFrameless()) { LOG((LF_CORDB, LL_INFO100000, "DWSP: updating regdisplay.\n")); pCF->GetFrame()->UpdateRegDisplay(&d->regDisplay); } return SWA_CONTINUE; } #if defined(TARGET_X86) && defined(FEATURE_INTEROP_DEBUGGING) // Helper to get the Wait-Sleep-Join bit from the thread bool IsInWaitSleepJoin(Thread * pThread) { // Partial User state is sufficient because that has the bit we're checking against. CorDebugUserState cts = g_pEEInterface->GetPartialUserState(pThread); return ((cts & USER_WAIT_SLEEP_JOIN) != 0); } //----------------------------------------------------------------------------- // Decide if we should send an UM leaf chain. // This goes through a bunch of heuristics. // The driving guidelines here are: // - we try not to send an UM chain if it's just internal mscorwks stuff // and we know it can't have native user code. // (ex, anything beyond a filter context, various hijacks, etc). // - If it may have native user code, we send it anyway. //----------------------------------------------------------------------------- bool ShouldSendUMLeafChain(Thread * pThread) { // If we're in shutodown, don't bother trying to sniff for an UM leaf chain. // @todo - we'd like to never even be trying to stack trace on shutdown, this // comes up when we do helper thread duty on shutdown. if (g_fProcessDetach) { return false; } if (pThread->IsUnstarted() || pThread->IsDead()) { return false; } // If a thread is suspended for sync purposes, it was suspended from managed // code and the only native code is a mscorwks hijack. // There are a few caveats here: // - This means a thread will lose it's UM chain. But what if a user inactive thread // enters the CLR from native code and hits a GC toggle? We'll lose that entire // UM chain. // - at a managed-only stop, preemptive threads are still live. Thus a thread // may not have this state set, run a little, try to enter the GC, and then get // this state set. Thus we'll lose the UM chain right out from under our noses. Thread::ThreadState ts = pThread->GetSnapshotState(); if ((ts & Thread::TS_SyncSuspended) != 0) { // If we've been stopped inside the runtime (eg, at a gc-toggle) but // not actually at a stopping context, then the thread must have some // leafframes in mscorwks. // We can detect this case by checking if GetManagedStoppedCtx(pThread) == NULL. // This is very significant for notifcations (like LogMessage) that are // dispatches from within mscorwks w/o a filter context. // We don't send a UM chain for these cases because that would // cause managed debug events to be dispatched w/ UM chains on the callstack. // And that just seems wrong ... return false; } #ifdef FEATURE_HIJACK if ((ts & Thread::TS_Hijacked) != 0) { return false; } #endif // This is pretty subjective. If we have a thread stopped in a managed sleep, // managed wait, or managed join, then don't bother showing the native end of the // stack. This check can be removed w/o impacting correctness. // @todo - may be a problem if Sleep/Wait/Join go through a hosting interface // which lands us in native user code. // Partial User state is sufficient because that has the bit we're checking against. if (IsInWaitSleepJoin(pThread)) { return false; } // If we're tracing ourselves, we must be in managed code. // Native user code can't initiate a managed stackwalk. if (pThread == GetThread()) { return false; } return true; } //----------------------------------------------------------------------------- // Prepare a Leaf UM chain. This assumes we should send an UM leaf chain. // Returns true if we actually prep for an UM leaf, // false if we don't. //----------------------------------------------------------------------------- bool PrepareLeafUMChain(DebuggerFrameData * pData, CONTEXT * pCtxTemp) { // Get the current user context (depends on if we're the active thread or not). Thread * thread = pData->GetThread(); REGDISPLAY * pRDSrc = NULL; REGDISPLAY rdTemp; #ifdef _DEBUG // Anybody stopped at an native debug event (and hijacked) should have a filter ctx. if (thread->GetInteropDebuggingHijacked() && (thread->GetFrame() != NULL) && (thread->GetFrame() != FRAME_TOP)) { _ASSERTE(g_pEEInterface->GetThreadFilterContext(thread) != NULL); } #endif // If we're hijacked, then we assume we're in native code. This covers the active thread case. if (g_pEEInterface->GetThreadFilterContext(thread) != NULL) { LOG((LF_CORDB, LL_EVERYTHING, "DWS - sending special case UM Chain.\n")); // This will get it from the filter ctx. pRDSrc = &(pData->regDisplay); } else { // For inactive thread, we may not be hijacked. So just get the current ctx. // This will use a filter ctx if we have one. // We may suspend a thread in native code w/o hijacking it, so it's still at it's live context. // This can happen when we get a debug event on 1 thread; and then switch to look at another thread. // This is very common when debugging apps w/ cross-thread causality (including COM STA objects) pRDSrc = &rdTemp; bool fOk; // We need to get thread's context (InitRegDisplay will do that under the covers). // If this is our thread, we're in bad shape. Fortunately that should never happen. _ASSERTE(thread != GetThread()); Thread::SuspendThreadResult str = thread->SuspendThread(); if (str != Thread::STR_Success) { return false; } // @todo - this context is less important because the RS will overwrite it with the live context. // We don't need to even bother getting it. We can just intialize the regdisplay w/ a sentinal. fOk = g_pEEInterface->InitRegDisplay(thread, pRDSrc, pCtxTemp, false); thread->ResumeThread(); if (!fOk) { return false; } } // By now we have a Regdisplay from somewhere (filter ctx, current ctx, etc). _ASSERTE(pRDSrc != NULL); // If we're stopped in mscorwks (b/c of a handler for a managed BP), then the filter ctx will // still be set out in jitted code. // If our regdisplay is out in UM code , then send a UM chain. BYTE* ip = (BYTE*) GetControlPC(pRDSrc); if (g_pEEInterface->IsManagedNativeCode(ip)) { return false; } LOG((LF_CORDB, LL_EVERYTHING, "DWS - sending leaf UM Chain.\n")); // Get the ending fp. We may not have any managed goo on the stack (eg, native thread called // into a managed method and then returned from it). FramePointer fpRoot; Frame * pFrame = thread->GetFrame(); if ((pFrame != NULL) && (pFrame != FRAME_TOP)) { fpRoot = FramePointer::MakeFramePointer((void*) pFrame); } else { fpRoot= ROOT_MOST_FRAME; } // Start tracking an UM chain. We won't actually send the UM chain until // we hit managed code. Since this is the leaf, we don't need to send an // Enter-Managed chain either. pData->BeginTrackingUMChain(fpRoot, pRDSrc); return true; } #endif // defined(TARGET_X86) && defined(FEATURE_INTEROP_DEBUGGING) //----------------------------------------------------------------------------- // Entry function for the debugger's stackwalking layer. // This will invoke pCallback(FrameInfo * pInfo, pData) for each 'frame' //----------------------------------------------------------------------------- StackWalkAction DebuggerWalkStack(Thread *thread, FramePointer targetFP, CONTEXT *context, BOOL contextValid, DebuggerStackCallback pCallback, void *pData, BOOL fIgnoreNonmethodFrames) { _ASSERTE(context != NULL); DebuggerFrameData data; StackWalkAction result = SWA_CONTINUE; bool fRegInit = false; LOG((LF_CORDB, LL_EVERYTHING, "DebuggerWalkStack called\n")); if(contextValid || g_pEEInterface->GetThreadFilterContext(thread) != NULL) { fRegInit = g_pEEInterface->InitRegDisplay(thread, &data.regDisplay, context, contextValid != 0); _ASSERTE(fRegInit); } if (!fRegInit) { #if defined(CONTEXT_EXTENDED_REGISTERS) // Note: the size of a CONTEXT record contains the extended registers, but the context pointer we're given // here may not have room for them. Therefore, we only set the non-extended part of the context to 0. memset((void *)context, 0, offsetof(CONTEXT, ExtendedRegisters)); #else memset((void *)context, 0, sizeof(CONTEXT)); #endif memset((void *)&data, 0, sizeof(data)); #if defined(TARGET_X86) // @todo - this seems pointless. context->Eip will be 0; and when we copy it over to the DebuggerRD, // the context will be completely null. data.regDisplay.ControlPC = context->Eip; data.regDisplay.PCTAddr = (TADDR)&(context->Eip); #else // // @TODO: this should be the code for all platforms now that it uses FillRegDisplay, // which encapsulates the platform variances. This could all be avoided if we used // StackWalkFrames instead of StackWalkFramesEx. // ::SetIP(context, 0); ::SetSP(context, 0); FillRegDisplay(&data.regDisplay, context); ::SetSP(data.regDisplay.pCallerContext, 0); #endif } data.Init(thread, targetFP, fIgnoreNonmethodFrames, pCallback, pData); #if defined(TARGET_X86) && defined(FEATURE_INTEROP_DEBUGGING) CONTEXT ctxTemp; // Temp context for Leaf UM chain. Need it here so that it stays alive for whole stackwalk. // Important case for Interop Debugging - // We may be stopped in Native Code (perhaps at a BP) w/ no Transition frame on the stack! // We still need to send an UM Chain for this case. if (ShouldSendUMLeafChain(thread)) { // It's possible this may fail (eg, GetContext fails on win9x), so we're not guaranteed // to be sending an UM chain even though we want to. PrepareLeafUMChain(&data, &ctxTemp); } #endif // defined(TARGET_X86) && defined(FEATURE_INTEROP_DEBUGGING) if ((result != SWA_FAILED) && !thread->IsUnstarted() && !thread->IsDead()) { int flags = 0; result = g_pEEInterface->StackWalkFramesEx(thread, &data.regDisplay, DebuggerWalkStackProc, &data, flags | HANDLESKIPPEDFRAMES | NOTIFY_ON_U2M_TRANSITIONS | ALLOW_ASYNC_STACK_WALK | SKIP_GSCOOKIE_CHECK); } else { result = SWA_DONE; } if (result == SWA_DONE || result == SWA_FAILED) // SWA_FAILED if no frames { // Since Debugger StackWalk callbacks are delayed 1 frame from EE stackwalk callbacks, we // have to touch up the 1 leftover here. // // This is safe only because we use the REGDISPLAY of the native marker callback for any subsequent // explicit frames which do not update the REGDISPLAY. It's kind of fragile. If we can change // the x86 real stackwalker to unwind one frame ahead of time, we can get rid of this code. if (data.needParentInfo) { data.info.fp = GetFramePointerForDebugger(&data, NULL); if (data.InvokeCallback(&data.info) == SWA_ABORT) { return SWA_ABORT; } } // // Top off the stack trace as necessary w/ a thread-start chain. // REGDISPLAY * pRegDisplay = &(data.regDisplay); if (data.IsTrackingUMChain()) { // This is the common case b/c managed code gets called from native code. pRegDisplay = data.GetUMChainStartRD(); } // All Thread starts in unmanaged code (at something like kernel32!BaseThreadStart), // so all ThreadStart chains must be unmanaged. // InvokeCallback will fabricate the EnterManaged chain if we haven't already sent one. data.info.InitForThreadStart(thread, pRegDisplay); result = data.InvokeCallback(&data.info); } return result; }
/*---------------------------------------------------------------------------*\ Copyright (C) 2016 Applied CCM ------------------------------------------------------------------------------- License This file is part of CAELUS. CAELUS is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. CAELUS is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with CAELUS. If not, see <http://www.gnu.org/licenses/>. \*---------------------------------------------------------------------------*/ #include "kOmegaSSTDDES.hpp" #include "addToRunTimeSelectionTable.hpp" namespace CML { namespace incompressible { namespace LESModels { defineTypeNameAndDebug(kOmegaSSTDDES, 0); addToRunTimeSelectionTable(LESModel, kOmegaSSTDDES, dictionary); tmp<volScalarField> kOmegaSSTDDES::FDES() const { return max ( Lt()/(CDES_*delta())*(scalar(1.0) - F1()), scalar(1.0) ); } kOmegaSSTDDES::kOmegaSSTDDES ( volVectorField const& U, surfaceScalarField const& phi, transportModel& transport, word const& turbulenceModelName, word const& modelName ) : kOmegaSSTDES(U, phi, transport, turbulenceModelName, modelName) {} } } }
#include "guiutil.h" #include "bitcoinaddressvalidator.h" #include "walletmodel.h" #include "bitcoinunits.h" #include <QString> #include <QDateTime> #include <QDoubleValidator> #include <QFont> #include <QLineEdit> #include <QUrl> #include <QTextDocument> // For Qt::escape #include <QAbstractItemView> #include <QApplication> #include <QClipboard> #include <QFileDialog> #include <QDesktopServices> #include <QThread> namespace GUIUtil { QString dateTimeStr(const QDateTime &date) { return date.date().toString(Qt::SystemLocaleShortDate) + QString(" ") + date.toString("hh:mm"); } QString dateTimeStr(qint64 nTime) { return dateTimeStr(QDateTime::fromTime_t((qint32)nTime)); } QFont bitcoinAddressFont() { QFont font("Monospace"); font.setStyleHint(QFont::TypeWriter); return font; } void setupAddressWidget(QLineEdit *widget, QWidget *parent) { widget->setMaxLength(BitcoinAddressValidator::MaxAddressLength); widget->setValidator(new BitcoinAddressValidator(parent)); widget->setFont(bitcoinAddressFont()); } void setupAmountWidget(QLineEdit *widget, QWidget *parent) { QDoubleValidator *amountValidator = new QDoubleValidator(parent); amountValidator->setDecimals(8); amountValidator->setBottom(0.0); widget->setValidator(amountValidator); widget->setAlignment(Qt::AlignRight|Qt::AlignVCenter); } bool parseBitcoinURI(const QUrl &uri, SendCoinsRecipient *out) { if(uri.scheme() != QString("SPOTS")) return false; SendCoinsRecipient rv; rv.address = uri.path(); rv.amount = 0; QList<QPair<QString, QString> > items = uri.queryItems(); for (QList<QPair<QString, QString> >::iterator i = items.begin(); i != items.end(); i++) { bool fShouldReturnFalse = false; if (i->first.startsWith("req-")) { i->first.remove(0, 4); fShouldReturnFalse = true; } if (i->first == "label") { rv.label = i->second; fShouldReturnFalse = false; } else if (i->first == "amount") { if(!i->second.isEmpty()) { if(!BitcoinUnits::parse(BitcoinUnits::BTC, i->second, &rv.amount)) { return false; } } fShouldReturnFalse = false; } if (fShouldReturnFalse) return false; } if(out) { *out = rv; } return true; } bool parseBitcoinURI(QString uri, SendCoinsRecipient *out) { // Convert bitcoin:// to bitcoin: // // Cannot handle this later, because bitcoin:// will cause Qt to see the part after // as host, // which will lowercase it (and thus invalidate the address). if(uri.startsWith("SPOTS://")) { uri.replace(0, 9, "SPOTS:"); } QUrl uriInstance(uri); return parseBitcoinURI(uriInstance, out); } QString HtmlEscape(const QString& str, bool fMultiLine) { QString escaped = Qt::escape(str); if(fMultiLine) { escaped = escaped.replace("\n", "<br>\n"); } return escaped; } QString HtmlEscape(const std::string& str, bool fMultiLine) { return HtmlEscape(QString::fromStdString(str), fMultiLine); } void copyEntryData(QAbstractItemView *view, int column, int role) { if(!view || !view->selectionModel()) return; QModelIndexList selection = view->selectionModel()->selectedRows(column); if(!selection.isEmpty()) { // Copy first item QApplication::clipboard()->setText(selection.at(0).data(role).toString()); } } QString getSaveFileName(QWidget *parent, const QString &caption, const QString &dir, const QString &filter, QString *selectedSuffixOut) { QString selectedFilter; QString myDir; if(dir.isEmpty()) // Default to user documents location { myDir = QDesktopServices::storageLocation(QDesktopServices::DocumentsLocation); } else { myDir = dir; } QString result = QFileDialog::getSaveFileName(parent, caption, myDir, filter, &selectedFilter); /* Extract first suffix from filter pattern "Description (*.foo)" or "Description (*.foo *.bar ...) */ QRegExp filter_re(".* \\(\\*\\.(.*)[ \\)]"); QString selectedSuffix; if(filter_re.exactMatch(selectedFilter)) { selectedSuffix = filter_re.cap(1); } /* Add suffix if needed */ QFileInfo info(result); if(!result.isEmpty()) { if(info.suffix().isEmpty() && !selectedSuffix.isEmpty()) { /* No suffix specified, add selected suffix */ if(!result.endsWith(".")) result.append("."); result.append(selectedSuffix); } } /* Return selected suffix if asked to */ if(selectedSuffixOut) { *selectedSuffixOut = selectedSuffix; } return result; } Qt::ConnectionType blockingGUIThreadConnection() { if(QThread::currentThread() != QCoreApplication::instance()->thread()) { return Qt::BlockingQueuedConnection; } else { return Qt::DirectConnection; } } bool checkPoint(const QPoint &p, const QWidget *w) { QWidget *atW = qApp->widgetAt(w->mapToGlobal(p)); if(!atW) return false; return atW->topLevelWidget() == w; } bool isObscured(QWidget *w) { return !(checkPoint(QPoint(0, 0), w) && checkPoint(QPoint(w->width() - 1, 0), w) && checkPoint(QPoint(0, w->height() - 1), w) && checkPoint(QPoint(w->width() - 1, w->height() - 1), w) && checkPoint(QPoint(w->width()/2, w->height()/2), w)); } } // namespace GUIUtil
/* * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include <aws/dynamodb/model/TimeToLiveDescription.h> #include <aws/core/utils/json/JsonSerializer.h> #include <utility> using namespace Aws::Utils::Json; using namespace Aws::Utils; namespace Aws { namespace DynamoDB { namespace Model { TimeToLiveDescription::TimeToLiveDescription() : m_timeToLiveStatus(TimeToLiveStatus::NOT_SET), m_timeToLiveStatusHasBeenSet(false), m_attributeNameHasBeenSet(false) { } TimeToLiveDescription::TimeToLiveDescription(const JsonValue& jsonValue) : m_timeToLiveStatus(TimeToLiveStatus::NOT_SET), m_timeToLiveStatusHasBeenSet(false), m_attributeNameHasBeenSet(false) { *this = jsonValue; } TimeToLiveDescription& TimeToLiveDescription::operator =(const JsonValue& jsonValue) { if(jsonValue.ValueExists("TimeToLiveStatus")) { m_timeToLiveStatus = TimeToLiveStatusMapper::GetTimeToLiveStatusForName(jsonValue.GetString("TimeToLiveStatus")); m_timeToLiveStatusHasBeenSet = true; } if(jsonValue.ValueExists("AttributeName")) { m_attributeName = jsonValue.GetString("AttributeName"); m_attributeNameHasBeenSet = true; } return *this; } JsonValue TimeToLiveDescription::Jsonize() const { JsonValue payload; if(m_timeToLiveStatusHasBeenSet) { payload.WithString("TimeToLiveStatus", TimeToLiveStatusMapper::GetNameForTimeToLiveStatus(m_timeToLiveStatus)); } if(m_attributeNameHasBeenSet) { payload.WithString("AttributeName", m_attributeName); } return payload; } } // namespace Model } // namespace DynamoDB } // namespace Aws
//================================================================================================= /*! // \file src/mathtest/dmatsmatadd/UDaHCb.cpp // \brief Source file for the UDaHCb dense matrix/sparse matrix addition math test // // Copyright (C) 2012-2018 Klaus Iglberger - All Rights Reserved // // This file is part of the Blaze library. You can redistribute it and/or modify it under // the terms of the New (Revised) BSD License. Redistribution and use in source and binary // forms, with or without modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // 3. Neither the names of the Blaze development group nor the names of its contributors // may be used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= //************************************************************************************************* // Includes //************************************************************************************************* #include <cstdlib> #include <iostream> #include <blaze/math/CompressedMatrix.h> #include <blaze/math/DynamicMatrix.h> #include <blaze/math/HermitianMatrix.h> #include <blaze/math/UpperMatrix.h> #include <blazetest/mathtest/Creator.h> #include <blazetest/mathtest/dmatsmatadd/OperationTest.h> #include <blazetest/system/MathTest.h> //================================================================================================= // // MAIN FUNCTION // //================================================================================================= //************************************************************************************************* int main() { std::cout << " Running 'UDaHCb'..." << std::endl; using blazetest::mathtest::NumericA; using blazetest::mathtest::NumericB; try { // Matrix type definitions using UDa = blaze::UpperMatrix< blaze::DynamicMatrix<NumericA> >; using HCb = blaze::HermitianMatrix< blaze::CompressedMatrix<NumericB> >; // Creator type definitions using CUDa = blazetest::Creator<UDa>; using CHCb = blazetest::Creator<HCb>; // Running tests with small matrices for( size_t i=0UL; i<=6UL; ++i ) { for( size_t j=0UL; j<=i*i; ++j ) { RUN_DMATSMATADD_OPERATION_TEST( CUDa( i ), CHCb( i, j ) ); } } // Running tests with large matrices RUN_DMATSMATADD_OPERATION_TEST( CUDa( 67UL ), CHCb( 67UL, 7UL ) ); RUN_DMATSMATADD_OPERATION_TEST( CUDa( 128UL ), CHCb( 128UL, 16UL ) ); } catch( std::exception& ex ) { std::cerr << "\n\n ERROR DETECTED during dense matrix/sparse matrix addition:\n" << ex.what() << "\n"; return EXIT_FAILURE; } return EXIT_SUCCESS; } //*************************************************************************************************
/* ,--. ,--. ,--. ,--. ,-' '-.,--.--.,--,--.,---.| |,-.,-' '-.`--' ,---. ,--,--, Copyright 2018 '-. .-'| .--' ,-. | .--'| /'-. .-',--.| .-. || \ Tracktion Software | | | | \ '-' \ `--.| \ \ | | | |' '-' '| || | Corporation `---' `--' `--`--'`---'`--'`--' `---' `--' `---' `--''--' www.tracktion.com Tracktion Engine uses a GPL/commercial licence - see LICENCE.md for details. */ namespace tracktion_engine { static File getApplicationSettingsFile() { return Engine::getEngines()[0]->getPropertyStorage().getAppPrefsFolder().getChildFile ("Settings.xml"); } static PropertiesFile::Options getSettingsOptions() { PropertiesFile::Options opts; opts.millisecondsBeforeSaving = 2000; opts.storageFormat = PropertiesFile::storeAsXML; return opts; } struct ApplicationSettings : public PropertiesFile, public DeletedAtShutdown { ApplicationSettings() : PropertiesFile (getApplicationSettingsFile(), getSettingsOptions()) {} ~ApplicationSettings() { clearSingletonInstance(); } JUCE_DECLARE_SINGLETON (ApplicationSettings, false) }; PropertiesFile* getApplicationSettings() { return ApplicationSettings::getInstance(); } JUCE_IMPLEMENT_SINGLETON (ApplicationSettings) //============================================================================== String PropertyStorage::settingToString (SettingID setting) { switch (setting) { case SettingID::audio_device_setup: return "audio_device_setup"; case SettingID::audiosettings: return "audiosettings"; case SettingID::addAntiDenormalNoise: return "addAntiDenormalNoiseXXX"; // This setting is obsolete (hopefully) case SettingID::autoFreeze: return "autoFreeze"; case SettingID::autoTempoMatch: return "AutoTempoMatch"; case SettingID::autoTempoDetect: return "AutoTempoDetect"; case SettingID::automapVst: return "AutomapVst"; case SettingID::automapNative: return "AutomapNative"; case SettingID::automapGuids1: return "AutomapGuids1"; case SettingID::automapGuids2: return "AutomapGuids2"; case SettingID::cacheSizeSamples: return "cacheSizeSamples"; case SettingID::clickTrackMidiNoteBig: return "clickTrackMidiNoteBig"; case SettingID::clickTrackMidiNoteLittle: return "clickTrackMidiNoteLittle"; case SettingID::clickTrackSampleSmall: return "clickTrackSampleSmall"; case SettingID::clickTrackSampleBig: return "clickTrackSampleBig"; case SettingID::crossfadeBlock: return "crossfadeBlock"; case SettingID::compCrossfadeMs: return "compCrossfadeMs"; case SettingID::countInMode: return "countInMode"; case SettingID::customMidiControllers: return "customMidiControllers"; case SettingID::deadMansPedal: return "deadMansPedal"; case SettingID::cpu: return "cpu"; case SettingID::defaultMidiOutDevice: return "defaultMidiDevice"; case SettingID::defaultWaveOutDevice: return "defaultWaveDevice"; case SettingID::defaultMidiInDevice: return "defaultMidiInDevice"; case SettingID::defaultWaveInDevice: return "defaultWaveInDevice"; case SettingID::externControlIn: return "externControlIn"; case SettingID::externControlOut: return "externControlOut"; case SettingID::externControlShowSelection: return "externControlShowSelection"; case SettingID::externControlSelectionColour: return "externControlSelectionColour"; case SettingID::externControlEnable: return "externControlEnable"; case SettingID::externOscInputPort: return "externOscInputPort"; case SettingID::externOscOutputPort: return "externOscOutputPort"; case SettingID::externOscOutputAddr: return "externOscOutputAddr"; case SettingID::filterControlMappingPresets: return "FilterControlMappingPresets"; case SettingID::filterGui: return "filterGui"; case SettingID::findExamples: return "findExamples"; case SettingID::fitClipsToRegion: return "fitClipsToRegion"; case SettingID::freezePoint: return "freezePoint"; case SettingID::hasEnabledMidiDefaultDevs: return "hasEnabledMidiDefaultDevs"; case SettingID::knownPluginList: return "knownPluginList"; case SettingID::knownPluginList64: return "knownPluginList64"; case SettingID::lameEncoder: return "lameEncoder"; case SettingID::lastClickTrackLevel: return "lastClickTrackLevel"; case SettingID::lastEditRender: return "lastEditRender"; case SettingID::lowLatencyBuffer: return "lowLatencyBuffer"; case SettingID::glideLength: return "glideLength"; case SettingID::grooveTemplates: return "GrooveTemplates"; case SettingID::MCUoneTouchRecord: return "MCUoneTouchRecord"; case SettingID::midiin: return "midiin"; case SettingID::midiout: return "midiout"; case SettingID::midiEditorOctaves: return "midiEditorOctaves"; case SettingID::midiProgramManager: return "MidiProgramManager"; case SettingID::maxLatency: return "maxLatency"; case SettingID::newMarker: return "newMarker"; case SettingID::numThreadsForPluginScanning: return "numThreadsForPluginScanning"; case SettingID::projectList: return "projectList"; case SettingID::projects: return "projects"; case SettingID::recentProjects: return "recentProjects"; case SettingID::renameClipRenamesSource: return "renameClipRenamesSource"; case SettingID::renameMode: return "renameMode"; case SettingID::renderRecentFilesList: return "renderRecentFilesList"; case SettingID::safeRecord: return "safeRecord"; case SettingID::resetCursorOnStop: return "resetCursorOnStop"; case SettingID::retrospectiveRecord: return "retrospectiveRecord"; case SettingID::reWireEnabled: return "ReWireEnabled"; case SettingID::simplifyAfterRecording: return "simplifyAfterRecording"; case SettingID::sendControllerOffMessages: return "sendControllerOffMessages"; case SettingID::tempDirectory: return "tempDirectory"; case SettingID::snapCursor: return "snapCursor"; case SettingID::trackExpansionMode: return "trackExpansionMode"; case SettingID::use64Bit: return "use64Bit"; case SettingID::xFade: return "xFade"; case SettingID::xtCount: return "xtCount"; case SettingID::xtIndices: return "xtIndices"; case SettingID::virtualmididevices: return "virtualmididevices"; case SettingID::virtualmidiin: return "virtualmidiin"; case SettingID::useSeparateProcessForScanning: return "useSeparateProcessForScanning"; case SettingID::useRealtime: return "useRealtime"; case SettingID::wavein: return "wavein"; case SettingID::waveout: return "waveout"; case SettingID::windowsDoubleClick: return "windowsDoubleClick"; case SettingID::renderFormat: return "renderFormat"; case SettingID::trackRenderSampRate: return "trackRenderSampRate"; case SettingID::trackRenderBits: return "trackRenderBits"; case SettingID::bypassFilters: return "bypassFilters"; case SettingID::markedRegion: return "markedRegion"; case SettingID::editClipRenderSampRate: return "editClipRenderSampRate"; case SettingID::editClipRenderBits: return "editClipRenderBits"; case SettingID::editClipRenderDither: return "editClipRenderDither"; case SettingID::editClipRealtime: return "editClipRealtime"; case SettingID::editClipRenderStereo: return "editClipRenderStereo"; case SettingID::editClipRenderNormalise: return "editClipRenderNormalise"; case SettingID::editClipRenderRMS: return "editClipRenderRMS"; case SettingID::editClipRenderRMSLevelDb: return "editClipRenderRMSLevelDb"; case SettingID::editClipRenderPeakLevelDb: return "editClipRenderPeakLevelDb"; case SettingID::editClipPassThroughFilters: return "editClipPassThroughFilters"; case SettingID::exportFormat: return "exportFormat"; case SettingID::renderOnlySelectedClips: return "renderOnlySelectedClips"; case SettingID::renderOnlyMarked: return "renderOnlyMarked"; case SettingID::renderNormalise: return "renderNormalise"; case SettingID::renderRMS: return "renderRMS"; case SettingID::renderRMSLevelDb: return "renderRMSLevelDb"; case SettingID::renderPeakLevelDb: return "renderPeakLevelDb"; case SettingID::renderTrimSilence: return "renderTrimSilence"; case SettingID::renderSampRate: return "renderSampRate"; case SettingID::renderStereo: return "renderStereo"; case SettingID::renderBits: return "renderBits"; case SettingID::renderDither: return "renderDither"; case SettingID::quality: return "quality"; case SettingID::addId3Info: return "addId3Info"; case SettingID::realtime: return "realtime"; case SettingID::passThroughFilters: return "passThroughFilters"; case SettingID::invalid: return "invalid"; } return {}; } //============================================================================== File PropertyStorage::getAppCacheFolder() { return getAppPrefsFolder(); } File PropertyStorage::getAppPrefsFolder() { auto f = File::getSpecialLocation (File::userApplicationDataDirectory).getChildFile (getApplicationName()); if (! f.isDirectory()) f.createDirectory(); return f; } String PropertyStorage::getUserName() { return SystemStats::getFullUserName(); } String PropertyStorage::getApplicationVersion() { return "Unknown"; } //============================================================================== void PropertyStorage::removeProperty (SettingID setting) { auto& as = *ApplicationSettings::getInstance(); as.removeValue (PropertyStorage::settingToString (setting)); } var PropertyStorage::getProperty (SettingID setting, const var& defaultValue) { auto& as = *ApplicationSettings::getInstance(); return as.getValue (PropertyStorage::settingToString (setting), defaultValue); } void PropertyStorage::setProperty (SettingID setting, const var& value) { auto& as = *ApplicationSettings::getInstance(); as.setValue (PropertyStorage::settingToString (setting), value); } std::unique_ptr<XmlElement> PropertyStorage::getXmlProperty (SettingID setting) { auto& as = *ApplicationSettings::getInstance(); return std::unique_ptr<XmlElement> (as.getXmlValue (PropertyStorage::settingToString (setting))); } void PropertyStorage::setXmlProperty (SettingID setting, const juce::XmlElement& xml) { auto& as = *ApplicationSettings::getInstance(); as.setValue (PropertyStorage::settingToString (setting), &xml); } //============================================================================== void PropertyStorage::removePropertyItem (SettingID setting, StringRef item) { auto& as = *ApplicationSettings::getInstance(); as.removeValue (PropertyStorage::settingToString (setting) + "_" + item); } var PropertyStorage::getPropertyItem (SettingID setting, StringRef item, const var& defaultValue) { auto& as = *ApplicationSettings::getInstance(); return as.getValue (PropertyStorage::settingToString (setting) + "_" + item, defaultValue); } void PropertyStorage::setPropertyItem (SettingID setting, StringRef item, const var& value) { auto& as = *ApplicationSettings::getInstance(); as.setValue (PropertyStorage::settingToString (setting) + "_" + item, value); } std::unique_ptr<XmlElement> PropertyStorage::getXmlPropertyItem (SettingID setting, StringRef item) { auto& as = *ApplicationSettings::getInstance(); return std::unique_ptr<XmlElement> (as.getXmlValue (PropertyStorage::settingToString (setting) + "_" + item)); } void PropertyStorage::setXmlPropertyItem (SettingID setting, StringRef item, const juce::XmlElement& xml) { auto& as = *ApplicationSettings::getInstance(); as.setValue (PropertyStorage::settingToString (setting) + "_" + item, &xml); } //============================================================================== File PropertyStorage::getDefaultLoadSaveDirectory (juce::StringRef) { return File::getSpecialLocation (File::userDocumentsDirectory); } void PropertyStorage::setDefaultLoadSaveDirectory (juce::StringRef, const juce::File&) { } File PropertyStorage::getDefaultLoadSaveDirectory (ProjectItem::Category) { return File::getSpecialLocation (File::userDocumentsDirectory); } }
//===- LoopUtils.cpp ---- Misc utilities for loop transformation ----------===// // // Copyright 2019 The MLIR Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // ============================================================================= // // This file implements miscellaneous loop transformation routines. // //===----------------------------------------------------------------------===// #include "mlir/Transforms/LoopUtils.h" #include "mlir/Analysis/AffineAnalysis.h" #include "mlir/Analysis/LoopAnalysis.h" #include "mlir/Analysis/SliceAnalysis.h" #include "mlir/Analysis/Utils.h" #include "mlir/Dialect/AffineOps/AffineOps.h" #include "mlir/Dialect/LoopOps/LoopOps.h" #include "mlir/IR/AffineMap.h" #include "mlir/IR/BlockAndValueMapping.h" #include "mlir/IR/Function.h" #include "mlir/Transforms/RegionUtils.h" #include "mlir/Transforms/Utils.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #define DEBUG_TYPE "LoopUtils" using namespace mlir; using llvm::SetVector; using llvm::SmallMapVector; /// Computes the cleanup loop lower bound of the loop being unrolled with /// the specified unroll factor; this bound will also be upper bound of the main /// part of the unrolled loop. Computes the bound as an AffineMap with its /// operands or a null map when the trip count can't be expressed as an affine /// expression. void mlir::getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor, AffineMap *map, SmallVectorImpl<Value *> *operands, OpBuilder &b) { auto lbMap = forOp.getLowerBoundMap(); // Single result lower bound map only. if (lbMap.getNumResults() != 1) { *map = AffineMap(); return; } AffineMap tripCountMap; SmallVector<Value *, 4> tripCountOperands; buildTripCountMapAndOperands(forOp, &tripCountMap, &tripCountOperands); // Sometimes the trip count cannot be expressed as an affine expression. if (!tripCountMap) { *map = AffineMap(); return; } unsigned step = forOp.getStep(); auto lb = b.create<AffineApplyOp>(forOp.getLoc(), lbMap, forOp.getLowerBoundOperands()); // For each upper bound expr, get the range. // Eg: affine.for %i = lb to min (ub1, ub2), // where tripCountExprs yield (tr1, tr2), we create affine.apply's: // lb + tr1 - tr1 % ufactor, lb + tr2 - tr2 % ufactor; the results of all // these affine.apply's make up the cleanup loop lower bound. SmallVector<AffineExpr, 4> bumpExprs(tripCountMap.getNumResults()); SmallVector<Value *, 4> bumpValues(tripCountMap.getNumResults()); for (unsigned i = 0, e = tripCountMap.getNumResults(); i < e; i++) { auto tripCountExpr = tripCountMap.getResult(i); bumpExprs[i] = (tripCountExpr - tripCountExpr % unrollFactor) * step; auto bumpMap = AffineMap::get(tripCountMap.getNumDims(), tripCountMap.getNumSymbols(), bumpExprs[i]); bumpValues[i] = b.create<AffineApplyOp>(forOp.getLoc(), bumpMap, tripCountOperands); } SmallVector<AffineExpr, 4> newUbExprs(tripCountMap.getNumResults()); for (unsigned i = 0, e = bumpExprs.size(); i < e; i++) newUbExprs[i] = b.getAffineDimExpr(0) + b.getAffineDimExpr(i + 1); operands->clear(); operands->push_back(lb); operands->append(bumpValues.begin(), bumpValues.end()); *map = AffineMap::get(1 + tripCountMap.getNumResults(), 0, newUbExprs); // Simplify the map + operands. fullyComposeAffineMapAndOperands(map, operands); *map = simplifyAffineMap(*map); canonicalizeMapAndOperands(map, operands); // Remove any affine.apply's that became dead from the simplification above. for (auto *v : bumpValues) { if (v->use_empty()) { v->getDefiningOp()->erase(); } } if (lb.use_empty()) lb.erase(); } /// Promotes the loop body of a forOp to its containing block if the forOp /// was known to have a single iteration. // TODO(bondhugula): extend this for arbitrary affine bounds. LogicalResult mlir::promoteIfSingleIteration(AffineForOp forOp) { Optional<uint64_t> tripCount = getConstantTripCount(forOp); if (!tripCount.hasValue() || tripCount.getValue() != 1) return failure(); // TODO(mlir-team): there is no builder for a max. if (forOp.getLowerBoundMap().getNumResults() != 1) return failure(); // Replaces all IV uses to its single iteration value. auto *iv = forOp.getInductionVar(); Operation *op = forOp.getOperation(); if (!iv->use_empty()) { if (forOp.hasConstantLowerBound()) { OpBuilder topBuilder(op->getParentOfType<FuncOp>().getBody()); auto constOp = topBuilder.create<ConstantIndexOp>( forOp.getLoc(), forOp.getConstantLowerBound()); iv->replaceAllUsesWith(constOp); } else { AffineBound lb = forOp.getLowerBound(); SmallVector<Value *, 4> lbOperands(lb.operand_begin(), lb.operand_end()); OpBuilder builder(op->getBlock(), Block::iterator(op)); if (lb.getMap() == builder.getDimIdentityMap()) { // No need of generating an affine.apply. iv->replaceAllUsesWith(lbOperands[0]); } else { auto affineApplyOp = builder.create<AffineApplyOp>( op->getLoc(), lb.getMap(), lbOperands); iv->replaceAllUsesWith(affineApplyOp); } } } // Move the loop body operations, except for terminator, to the loop's // containing block. auto *block = op->getBlock(); forOp.getBody()->getOperations().back().erase(); block->getOperations().splice(Block::iterator(op), forOp.getBody()->getOperations()); forOp.erase(); return success(); } /// Promotes all single iteration for op's in the FuncOp, i.e., moves /// their body into the containing Block. void mlir::promoteSingleIterationLoops(FuncOp f) { // Gathers all innermost loops through a post order pruned walk. f.walk([](AffineForOp forOp) { promoteIfSingleIteration(forOp); }); } /// Generates a 'affine.for' op with the specified lower and upper bounds /// while generating the right IV remappings for the shifted operations. The /// operation blocks that go into the loop are specified in instGroupQueue /// starting from the specified offset, and in that order; the first element of /// the pair specifies the shift applied to that group of operations; note /// that the shift is multiplied by the loop step before being applied. Returns /// nullptr if the generated loop simplifies to a single iteration one. static AffineForOp generateLoop(AffineMap lbMap, AffineMap ubMap, const std::vector<std::pair<uint64_t, ArrayRef<Operation *>>> &instGroupQueue, unsigned offset, AffineForOp srcForInst, OpBuilder b) { SmallVector<Value *, 4> lbOperands(srcForInst.getLowerBoundOperands()); SmallVector<Value *, 4> ubOperands(srcForInst.getUpperBoundOperands()); assert(lbMap.getNumInputs() == lbOperands.size()); assert(ubMap.getNumInputs() == ubOperands.size()); auto loopChunk = b.create<AffineForOp>(srcForInst.getLoc(), lbOperands, lbMap, ubOperands, ubMap, srcForInst.getStep()); auto *loopChunkIV = loopChunk.getInductionVar(); auto *srcIV = srcForInst.getInductionVar(); BlockAndValueMapping operandMap; OpBuilder bodyBuilder = loopChunk.getBodyBuilder(); for (auto it = instGroupQueue.begin() + offset, e = instGroupQueue.end(); it != e; ++it) { uint64_t shift = it->first; auto insts = it->second; // All 'same shift' operations get added with their operands being // remapped to results of cloned operations, and their IV used remapped. // Generate the remapping if the shift is not zero: remappedIV = newIV - // shift. if (!srcIV->use_empty() && shift != 0) { auto ivRemap = bodyBuilder.create<AffineApplyOp>( srcForInst.getLoc(), bodyBuilder.getSingleDimShiftAffineMap( -static_cast<int64_t>(srcForInst.getStep() * shift)), loopChunkIV); operandMap.map(srcIV, ivRemap); } else { operandMap.map(srcIV, loopChunkIV); } for (auto *op : insts) { if (!isa<AffineTerminatorOp>(op)) bodyBuilder.clone(*op, operandMap); } }; if (succeeded(promoteIfSingleIteration(loopChunk))) return AffineForOp(); return loopChunk; } /// Skew the operations in the body of a 'affine.for' operation with the /// specified operation-wise shifts. The shifts are with respect to the /// original execution order, and are multiplied by the loop 'step' before being /// applied. A shift of zero for each operation will lead to no change. // The skewing of operations with respect to one another can be used for // example to allow overlap of asynchronous operations (such as DMA // communication) with computation, or just relative shifting of operations // for better register reuse, locality or parallelism. As such, the shifts are // typically expected to be at most of the order of the number of operations. // This method should not be used as a substitute for loop distribution/fission. // This method uses an algorithm// in time linear in the number of operations // in the body of the for loop - (using the 'sweep line' paradigm). This method // asserts preservation of SSA dominance. A check for that as well as that for // memory-based dependence preservation check rests with the users of this // method. LogicalResult mlir::instBodySkew(AffineForOp forOp, ArrayRef<uint64_t> shifts, bool unrollPrologueEpilogue) { if (forOp.getBody()->begin() == std::prev(forOp.getBody()->end())) return success(); // If the trip counts aren't constant, we would need versioning and // conditional guards (or context information to prevent such versioning). The // better way to pipeline for such loops is to first tile them and extract // constant trip count "full tiles" before applying this. auto mayBeConstTripCount = getConstantTripCount(forOp); if (!mayBeConstTripCount.hasValue()) { LLVM_DEBUG(forOp.emitRemark("non-constant trip count loop not handled")); return success(); } uint64_t tripCount = mayBeConstTripCount.getValue(); assert(isInstwiseShiftValid(forOp, shifts) && "shifts will lead to an invalid transformation\n"); int64_t step = forOp.getStep(); unsigned numChildInsts = forOp.getBody()->getOperations().size(); // Do a linear time (counting) sort for the shifts. uint64_t maxShift = 0; for (unsigned i = 0; i < numChildInsts; i++) { maxShift = std::max(maxShift, shifts[i]); } // Such large shifts are not the typical use case. if (maxShift >= numChildInsts) { forOp.emitWarning("not shifting because shifts are unrealistically large"); return success(); } // An array of operation groups sorted by shift amount; each group has all // operations with the same shift in the order in which they appear in the // body of the 'affine.for' op. std::vector<std::vector<Operation *>> sortedInstGroups(maxShift + 1); unsigned pos = 0; for (auto &op : *forOp.getBody()) { auto shift = shifts[pos++]; sortedInstGroups[shift].push_back(&op); } // Unless the shifts have a specific pattern (which actually would be the // common use case), prologue and epilogue are not meaningfully defined. // Nevertheless, if 'unrollPrologueEpilogue' is set, we will treat the first // loop generated as the prologue and the last as epilogue and unroll these // fully. AffineForOp prologue; AffineForOp epilogue; // Do a sweep over the sorted shifts while storing open groups in a // vector, and generating loop portions as necessary during the sweep. A block // of operations is paired with its shift. std::vector<std::pair<uint64_t, ArrayRef<Operation *>>> instGroupQueue; auto origLbMap = forOp.getLowerBoundMap(); uint64_t lbShift = 0; OpBuilder b(forOp.getOperation()); for (uint64_t d = 0, e = sortedInstGroups.size(); d < e; ++d) { // If nothing is shifted by d, continue. if (sortedInstGroups[d].empty()) continue; if (!instGroupQueue.empty()) { assert(d >= 1 && "Queue expected to be empty when the first block is found"); // The interval for which the loop needs to be generated here is: // [lbShift, min(lbShift + tripCount, d)) and the body of the // loop needs to have all operations in instQueue in that order. AffineForOp res; if (lbShift + tripCount * step < d * step) { res = generateLoop( b.getShiftedAffineMap(origLbMap, lbShift), b.getShiftedAffineMap(origLbMap, lbShift + tripCount * step), instGroupQueue, 0, forOp, b); // Entire loop for the queued op groups generated, empty it. instGroupQueue.clear(); lbShift += tripCount * step; } else { res = generateLoop(b.getShiftedAffineMap(origLbMap, lbShift), b.getShiftedAffineMap(origLbMap, d), instGroupQueue, 0, forOp, b); lbShift = d * step; } if (!prologue && res) prologue = res; epilogue = res; } else { // Start of first interval. lbShift = d * step; } // Augment the list of operations that get into the current open interval. instGroupQueue.push_back({d, sortedInstGroups[d]}); } // Those operations groups left in the queue now need to be processed (FIFO) // and their loops completed. for (unsigned i = 0, e = instGroupQueue.size(); i < e; ++i) { uint64_t ubShift = (instGroupQueue[i].first + tripCount) * step; epilogue = generateLoop(b.getShiftedAffineMap(origLbMap, lbShift), b.getShiftedAffineMap(origLbMap, ubShift), instGroupQueue, i, forOp, b); lbShift = ubShift; if (!prologue) prologue = epilogue; } // Erase the original for op. forOp.erase(); if (unrollPrologueEpilogue && prologue) loopUnrollFull(prologue); if (unrollPrologueEpilogue && !epilogue && epilogue.getOperation() != prologue.getOperation()) loopUnrollFull(epilogue); return success(); } // Collect perfectly nested loops starting from `rootForOps`. Loops are // perfectly nested if each loop is the first and only non-terminator operation // in the parent loop. Collect at most `maxLoops` loops and append them to // `forOps`. template <typename T> void getPerfectlyNestedLoopsImpl( SmallVectorImpl<T> &forOps, T rootForOp, unsigned maxLoops = std::numeric_limits<unsigned>::max()) { for (unsigned i = 0; i < maxLoops; ++i) { forOps.push_back(rootForOp); Block &body = rootForOp.region().front(); if (body.begin() != std::prev(body.end(), 2)) return; rootForOp = dyn_cast<T>(&body.front()); if (!rootForOp) return; } } /// Get perfectly nested sequence of loops starting at root of loop nest /// (the first op being another AffineFor, and the second op - a terminator). /// A loop is perfectly nested iff: the first op in the loop's body is another /// AffineForOp, and the second op is a terminator). void mlir::getPerfectlyNestedLoops(SmallVectorImpl<AffineForOp> &nestedLoops, AffineForOp root) { getPerfectlyNestedLoopsImpl(nestedLoops, root); } void mlir::getPerfectlyNestedLoops(SmallVectorImpl<loop::ForOp> &nestedLoops, loop::ForOp root) { getPerfectlyNestedLoopsImpl(nestedLoops, root); } /// Unrolls this loop completely. LogicalResult mlir::loopUnrollFull(AffineForOp forOp) { Optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp); if (mayBeConstantTripCount.hasValue()) { uint64_t tripCount = mayBeConstantTripCount.getValue(); if (tripCount == 1) { return promoteIfSingleIteration(forOp); } return loopUnrollByFactor(forOp, tripCount); } return failure(); } /// Unrolls and jams this loop by the specified factor or by the trip count (if /// constant) whichever is lower. LogicalResult mlir::loopUnrollUpToFactor(AffineForOp forOp, uint64_t unrollFactor) { Optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp); if (mayBeConstantTripCount.hasValue() && mayBeConstantTripCount.getValue() < unrollFactor) return loopUnrollByFactor(forOp, mayBeConstantTripCount.getValue()); return loopUnrollByFactor(forOp, unrollFactor); } /// Unrolls this loop by the specified factor. Returns success if the loop /// is successfully unrolled. LogicalResult mlir::loopUnrollByFactor(AffineForOp forOp, uint64_t unrollFactor) { assert(unrollFactor >= 1 && "unroll factor should be >= 1"); if (unrollFactor == 1) return promoteIfSingleIteration(forOp); if (forOp.getBody()->empty() || forOp.getBody()->begin() == std::prev(forOp.getBody()->end())) return failure(); // Loops where the lower bound is a max expression isn't supported for // unrolling since the trip count can be expressed as an affine function when // both the lower bound and the upper bound are multi-result maps. However, // one meaningful way to do such unrolling would be to specialize the loop for // the 'hotspot' case and unroll that hotspot. if (forOp.getLowerBoundMap().getNumResults() != 1) return failure(); // If the trip count is lower than the unroll factor, no unrolled body. // TODO(bondhugula): option to specify cleanup loop unrolling. Optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp); if (mayBeConstantTripCount.hasValue() && mayBeConstantTripCount.getValue() < unrollFactor) return failure(); // Generate the cleanup loop if trip count isn't a multiple of unrollFactor. Operation *op = forOp.getOperation(); if (getLargestDivisorOfTripCount(forOp) % unrollFactor != 0) { OpBuilder builder(op->getBlock(), ++Block::iterator(op)); auto cleanupForInst = cast<AffineForOp>(builder.clone(*op)); AffineMap cleanupMap; SmallVector<Value *, 4> cleanupOperands; getCleanupLoopLowerBound(forOp, unrollFactor, &cleanupMap, &cleanupOperands, builder); assert(cleanupMap && "cleanup loop lower bound map for single result lower bound maps " "can always be determined"); cleanupForInst.setLowerBound(cleanupOperands, cleanupMap); // Promote the loop body up if this has turned into a single iteration loop. promoteIfSingleIteration(cleanupForInst); // Adjust upper bound of the original loop; this is the same as the lower // bound of the cleanup loop. forOp.setUpperBound(cleanupOperands, cleanupMap); } // Scale the step of loop being unrolled by unroll factor. int64_t step = forOp.getStep(); forOp.setStep(step * unrollFactor); // Builder to insert unrolled bodies just before the terminator of the body of // 'forOp'. OpBuilder builder = forOp.getBodyBuilder(); // Keep a pointer to the last non-terminator operation in the original block // so that we know what to clone (since we are doing this in-place). Block::iterator srcBlockEnd = std::prev(forOp.getBody()->end(), 2); // Unroll the contents of 'forOp' (append unrollFactor-1 additional copies). auto *forOpIV = forOp.getInductionVar(); for (unsigned i = 1; i < unrollFactor; i++) { BlockAndValueMapping operandMap; // If the induction variable is used, create a remapping to the value for // this unrolled instance. if (!forOpIV->use_empty()) { // iv' = iv + 1/2/3...unrollFactor-1; auto d0 = builder.getAffineDimExpr(0); auto bumpMap = AffineMap::get(1, 0, {d0 + i * step}); auto ivUnroll = builder.create<AffineApplyOp>(forOp.getLoc(), bumpMap, forOpIV); operandMap.map(forOpIV, ivUnroll); } // Clone the original body of 'forOp'. for (auto it = forOp.getBody()->begin(); it != std::next(srcBlockEnd); it++) { builder.clone(*it, operandMap); } } // Promote the loop body up if this has turned into a single iteration loop. promoteIfSingleIteration(forOp); return success(); } /// Performs loop interchange on 'forOpA' and 'forOpB', where 'forOpB' is /// nested within 'forOpA' as the only non-terminator operation in its block. void mlir::interchangeLoops(AffineForOp forOpA, AffineForOp forOpB) { auto *forOpAInst = forOpA.getOperation(); assert(&*forOpA.getBody()->begin() == forOpB.getOperation()); auto &forOpABody = forOpA.getBody()->getOperations(); auto &forOpBBody = forOpB.getBody()->getOperations(); // 1) Splice forOpA's non-terminator operations (which is just forOpB) just // before forOpA (in ForOpA's parent's block) this should leave 'forOpA's // body containing only the terminator. forOpAInst->getBlock()->getOperations().splice(Block::iterator(forOpAInst), forOpABody, forOpABody.begin(), std::prev(forOpABody.end())); // 2) Splice forOpB's non-terminator operations into the beginning of forOpA's // body (this leaves forOpB's body containing only the terminator). forOpABody.splice(forOpABody.begin(), forOpBBody, forOpBBody.begin(), std::prev(forOpBBody.end())); // 3) Splice forOpA into the beginning of forOpB's body. forOpBBody.splice(forOpBBody.begin(), forOpAInst->getBlock()->getOperations(), Block::iterator(forOpAInst)); } // Checks each dependence component against the permutation to see if the // desired loop interchange would violate dependences by making the // dependence component lexicographically negative. static bool checkLoopInterchangeDependences( const std::vector<llvm::SmallVector<DependenceComponent, 2>> &depCompsVec, ArrayRef<AffineForOp> loops, ArrayRef<unsigned> loopPermMap) { // Invert permutation map. unsigned maxLoopDepth = loops.size(); llvm::SmallVector<unsigned, 4> loopPermMapInv; loopPermMapInv.resize(maxLoopDepth); for (unsigned i = 0; i < maxLoopDepth; ++i) loopPermMapInv[loopPermMap[i]] = i; // Check each dependence component against the permutation to see if the // desired loop interchange permutation would make the dependence vectors // lexicographically negative. // Example 1: [-1, 1][0, 0] // Example 2: [0, 0][-1, 1] for (unsigned i = 0, e = depCompsVec.size(); i < e; ++i) { const llvm::SmallVector<DependenceComponent, 2> &depComps = depCompsVec[i]; assert(depComps.size() >= maxLoopDepth); // Check if the first non-zero dependence component is positive. // This iterates through loops in the desired order. for (unsigned j = 0; j < maxLoopDepth; ++j) { unsigned permIndex = loopPermMapInv[j]; assert(depComps[permIndex].lb.hasValue()); int64_t depCompLb = depComps[permIndex].lb.getValue(); if (depCompLb > 0) break; if (depCompLb < 0) return false; } } return true; } /// Checks if the loop interchange permutation 'loopPermMap' of the perfectly /// nested sequence of loops in 'loops' would violate dependences. bool mlir::isValidLoopInterchangePermutation(ArrayRef<AffineForOp> loops, ArrayRef<unsigned> loopPermMap) { // Gather dependence components for dependences between all ops in loop nest // rooted at 'loops[0]', at loop depths in range [1, maxLoopDepth]. assert(loopPermMap.size() == loops.size()); unsigned maxLoopDepth = loops.size(); std::vector<llvm::SmallVector<DependenceComponent, 2>> depCompsVec; getDependenceComponents(loops[0], maxLoopDepth, &depCompsVec); return checkLoopInterchangeDependences(depCompsVec, loops, loopPermMap); } /// Performs a sequence of loop interchanges of loops in perfectly nested /// sequence of loops in 'loops', as specified by permutation in 'loopPermMap'. unsigned mlir::interchangeLoops(ArrayRef<AffineForOp> loops, ArrayRef<unsigned> loopPermMap) { Optional<unsigned> loopNestRootIndex; for (int i = loops.size() - 1; i >= 0; --i) { int permIndex = static_cast<int>(loopPermMap[i]); // Store the index of the for loop which will be the new loop nest root. if (permIndex == 0) loopNestRootIndex = i; if (permIndex > i) { // Sink loop 'i' by 'permIndex - i' levels deeper into the loop nest. sinkLoop(loops[i], permIndex - i); } } assert(loopNestRootIndex.hasValue()); return loopNestRootIndex.getValue(); } // Sinks all sequential loops to the innermost levels (while preserving // relative order among them) and moves all parallel loops to the // outermost (while again preserving relative order among them). AffineForOp mlir::sinkSequentialLoops(AffineForOp forOp) { SmallVector<AffineForOp, 4> loops; getPerfectlyNestedLoops(loops, forOp); if (loops.size() < 2) return forOp; // Gather dependence components for dependences between all ops in loop nest // rooted at 'loops[0]', at loop depths in range [1, maxLoopDepth]. unsigned maxLoopDepth = loops.size(); std::vector<llvm::SmallVector<DependenceComponent, 2>> depCompsVec; getDependenceComponents(loops[0], maxLoopDepth, &depCompsVec); // Mark loops as either parallel or sequential. llvm::SmallVector<bool, 8> isParallelLoop(maxLoopDepth, true); for (unsigned i = 0, e = depCompsVec.size(); i < e; ++i) { llvm::SmallVector<DependenceComponent, 2> &depComps = depCompsVec[i]; assert(depComps.size() >= maxLoopDepth); for (unsigned j = 0; j < maxLoopDepth; ++j) { DependenceComponent &depComp = depComps[j]; assert(depComp.lb.hasValue() && depComp.ub.hasValue()); if (depComp.lb.getValue() != 0 || depComp.ub.getValue() != 0) isParallelLoop[j] = false; } } // Count the number of parallel loops. unsigned numParallelLoops = 0; for (unsigned i = 0, e = isParallelLoop.size(); i < e; ++i) if (isParallelLoop[i]) ++numParallelLoops; // Compute permutation of loops that sinks sequential loops (and thus raises // parallel loops) while preserving relative order. llvm::SmallVector<unsigned, 4> loopPermMap(maxLoopDepth); unsigned nextSequentialLoop = numParallelLoops; unsigned nextParallelLoop = 0; for (unsigned i = 0; i < maxLoopDepth; ++i) { if (isParallelLoop[i]) { loopPermMap[i] = nextParallelLoop++; } else { loopPermMap[i] = nextSequentialLoop++; } } // Check if permutation 'loopPermMap' would violate dependences. if (!checkLoopInterchangeDependences(depCompsVec, loops, loopPermMap)) return forOp; // Perform loop interchange according to permutation 'loopPermMap'. unsigned loopNestRootIndex = interchangeLoops(loops, loopPermMap); return loops[loopNestRootIndex]; } /// Performs a series of loop interchanges to sink 'forOp' 'loopDepth' levels /// deeper in the loop nest. void mlir::sinkLoop(AffineForOp forOp, unsigned loopDepth) { for (unsigned i = 0; i < loopDepth; ++i) { AffineForOp nextForOp = cast<AffineForOp>(forOp.getBody()->front()); interchangeLoops(forOp, nextForOp); } } // Factors out common behavior to add a new `iv` (resp. `iv` + `offset`) to the // lower (resp. upper) loop bound. When called for both the lower and upper // bounds, the resulting IR resembles: // // ```mlir // affine.for %i = max (`iv, ...) to min (`iv` + `offset`) { // ... // } // ``` static void augmentMapAndBounds(OpBuilder &b, Value *iv, AffineMap *map, SmallVector<Value *, 4> *operands, int64_t offset = 0) { auto bounds = llvm::to_vector<4>(map->getResults()); bounds.push_back(b.getAffineDimExpr(map->getNumDims()) + offset); operands->insert(operands->begin() + map->getNumDims(), iv); *map = AffineMap::get(map->getNumDims() + 1, map->getNumSymbols(), bounds); canonicalizeMapAndOperands(map, operands); } // Stripmines `forOp` by `factor` and sinks it under each of the `targets`. // Stripmine-sink is a primitive building block for generalized tiling of // imperfectly nested loops. // This transformation is purely mechanical and does not check legality, // profitability or even structural correctness. It is the user's // responsibility to specify `targets` that are dominated by `forOp`. // Returns the new AffineForOps, one per `targets`, nested immediately under // each of the `targets`. static SmallVector<AffineForOp, 8> stripmineSink(AffineForOp forOp, uint64_t factor, ArrayRef<AffineForOp> targets) { auto originalStep = forOp.getStep(); auto scaledStep = originalStep * factor; forOp.setStep(scaledStep); auto *op = forOp.getOperation(); OpBuilder b(op->getBlock(), ++Block::iterator(op)); // Lower-bound map creation. auto lbMap = forOp.getLowerBoundMap(); SmallVector<Value *, 4> lbOperands(forOp.getLowerBoundOperands()); augmentMapAndBounds(b, forOp.getInductionVar(), &lbMap, &lbOperands); // Upper-bound map creation. auto ubMap = forOp.getUpperBoundMap(); SmallVector<Value *, 4> ubOperands(forOp.getUpperBoundOperands()); augmentMapAndBounds(b, forOp.getInductionVar(), &ubMap, &ubOperands, /*offset=*/scaledStep); auto *iv = forOp.getInductionVar(); SmallVector<AffineForOp, 8> innerLoops; for (auto t : targets) { // Insert newForOp before the terminator of `t`. OpBuilder b = t.getBodyBuilder(); auto newForOp = b.create<AffineForOp>(t.getLoc(), lbOperands, lbMap, ubOperands, ubMap, originalStep); auto begin = t.getBody()->begin(); // Skip terminator and `newForOp` which is just before the terminator. auto nOps = t.getBody()->getOperations().size() - 2; newForOp.getBody()->getOperations().splice( newForOp.getBody()->getOperations().begin(), t.getBody()->getOperations(), begin, std::next(begin, nOps)); replaceAllUsesInRegionWith(iv, newForOp.getInductionVar(), newForOp.region()); innerLoops.push_back(newForOp); } return innerLoops; } static Loops stripmineSink(loop::ForOp forOp, Value *factor, ArrayRef<loop::ForOp> targets) { auto *originalStep = forOp.step(); auto *iv = forOp.getInductionVar(); OpBuilder b(forOp); forOp.setStep(b.create<MulIOp>(forOp.getLoc(), originalStep, factor)); Loops innerLoops; for (auto t : targets) { // Save information for splicing ops out of t when done auto begin = t.getBody()->begin(); auto nOps = t.getBody()->getOperations().size(); // Insert newForOp before the terminator of `t`. OpBuilder b(t.getBodyBuilder()); Value *stepped = b.create<AddIOp>(t.getLoc(), iv, forOp.step()); Value *less = b.create<CmpIOp>(t.getLoc(), CmpIPredicate::slt, forOp.upperBound(), stepped); Value *ub = b.create<SelectOp>(t.getLoc(), less, forOp.upperBound(), stepped); // Splice [begin, begin + nOps - 1) into `newForOp` and replace uses. auto newForOp = b.create<loop::ForOp>(t.getLoc(), iv, ub, originalStep); newForOp.getBody()->getOperations().splice( newForOp.getBody()->getOperations().begin(), t.getBody()->getOperations(), begin, std::next(begin, nOps - 1)); replaceAllUsesInRegionWith(iv, newForOp.getInductionVar(), newForOp.region()); innerLoops.push_back(newForOp); } return innerLoops; } // Stripmines a `forOp` by `factor` and sinks it under a single `target`. // Returns the new AffineForOps, nested immediately under `target`. template <typename ForType, typename SizeType> static ForType stripmineSink(ForType forOp, SizeType factor, ForType target) { // TODO(ntv): Use cheap structural assertions that targets are nested under // forOp and that targets are not nested under each other when DominanceInfo // exposes the capability. It seems overkill to construct a whole function // dominance tree at this point. auto res = stripmineSink(forOp, factor, ArrayRef<ForType>{target}); assert(res.size() == 1 && "Expected 1 inner forOp"); return res[0]; } template <typename ForType, typename SizeType> static SmallVector<SmallVector<ForType, 8>, 8> tileImpl(ArrayRef<ForType> forOps, ArrayRef<SizeType> sizes, ArrayRef<ForType> targets) { SmallVector<SmallVector<ForType, 8>, 8> res; SmallVector<ForType, 8> currentTargets(targets.begin(), targets.end()); for (auto it : llvm::zip(forOps, sizes)) { auto step = stripmineSink(std::get<0>(it), std::get<1>(it), currentTargets); res.push_back(step); currentTargets = step; } return res; } SmallVector<SmallVector<AffineForOp, 8>, 8> mlir::tile(ArrayRef<AffineForOp> forOps, ArrayRef<uint64_t> sizes, ArrayRef<AffineForOp> targets) { return tileImpl(forOps, sizes, targets); } SmallVector<Loops, 8> mlir::tile(ArrayRef<loop::ForOp> forOps, ArrayRef<Value *> sizes, ArrayRef<loop::ForOp> targets) { return tileImpl(forOps, sizes, targets); } template <typename ForType, typename SizeType> static SmallVector<ForType, 8> tileImpl(ArrayRef<ForType> forOps, ArrayRef<SizeType> sizes, ForType target) { SmallVector<ForType, 8> res; for (auto loops : tile(forOps, sizes, ArrayRef<ForType>{target})) { assert(loops.size() == 1); res.push_back(loops[0]); } return res; } SmallVector<AffineForOp, 8> mlir::tile(ArrayRef<AffineForOp> forOps, ArrayRef<uint64_t> sizes, AffineForOp target) { return tileImpl(forOps, sizes, target); } Loops mlir::tile(ArrayRef<loop::ForOp> forOps, ArrayRef<Value *> sizes, loop::ForOp target) { return tileImpl(forOps, sizes, target); } Loops mlir::tilePerfectlyNested(loop::ForOp rootForOp, ArrayRef<Value *> sizes) { // Collect perfectly nested loops. If more size values provided than nested // loops available, truncate `sizes`. SmallVector<loop::ForOp, 4> forOps; forOps.reserve(sizes.size()); getPerfectlyNestedLoopsImpl(forOps, rootForOp, sizes.size()); if (forOps.size() < sizes.size()) sizes = sizes.take_front(forOps.size()); return ::tile(forOps, sizes, forOps.back()); } // Build the IR that performs ceil division of a positive value by a constant: // ceildiv(a, B) = divis(a + (B-1), B) // where divis is rounding-to-zero division. static Value *ceilDivPositive(OpBuilder &builder, Location loc, Value *dividend, int64_t divisor) { assert(divisor > 0 && "expected positive divisor"); assert(dividend->getType().isIndex() && "expected index-typed value"); Value *divisorMinusOneCst = builder.create<ConstantIndexOp>(loc, divisor - 1); Value *divisorCst = builder.create<ConstantIndexOp>(loc, divisor); Value *sum = builder.create<AddIOp>(loc, dividend, divisorMinusOneCst); return builder.create<DivISOp>(loc, sum, divisorCst); } // Build the IR that performs ceil division of a positive value by another // positive value: // ceildiv(a, b) = divis(a + (b - 1), b) // where divis is rounding-to-zero division. static Value *ceilDivPositive(OpBuilder &builder, Location loc, Value *dividend, Value *divisor) { assert(dividend->getType().isIndex() && "expected index-typed value"); Value *cstOne = builder.create<ConstantIndexOp>(loc, 1); Value *divisorMinusOne = builder.create<SubIOp>(loc, divisor, cstOne); Value *sum = builder.create<AddIOp>(loc, dividend, divisorMinusOne); return builder.create<DivISOp>(loc, sum, divisor); } // Hoist the ops within `outer` that appear before `inner`. // Such ops include the ops that have been introduced by parametric tiling. // Ops that come from triangular loops (i.e. that belong to the program slice // rooted at `outer`) and ops that have side effects cannot be hoisted. // Return failure when any op fails to hoist. static LogicalResult hoistOpsBetween(loop::ForOp outer, loop::ForOp inner) { SetVector<Operation *> forwardSlice; getForwardSlice(outer.getOperation(), &forwardSlice, [&inner](Operation *op) { return op != inner.getOperation(); }); LogicalResult status = success(); SmallVector<Operation *, 8> toHoist; for (auto &op : outer.getBody()->getOperations()) { // Stop when encountering the inner loop. if (&op == inner.getOperation()) break; // Skip over non-hoistable ops. if (forwardSlice.count(&op) > 0) { status = failure(); continue; } // Skip loop::ForOp, these are not considered a failure. if (op.getNumRegions() > 0) continue; // Skip other ops with regions. if (op.getNumRegions() > 0) { status = failure(); continue; } // Skip if op has side effects. // TODO(ntv): loads to immutable memory regions are ok. if (!op.hasNoSideEffect()) { status = failure(); continue; } toHoist.push_back(&op); } auto *outerForOp = outer.getOperation(); for (auto *op : toHoist) op->moveBefore(outerForOp); return status; } // Traverse the interTile and intraTile loops and try to hoist ops such that // bands of perfectly nested loops are isolated. // Return failure if either perfect interTile or perfect intraTile bands cannot // be formed. static LogicalResult tryIsolateBands(const TileLoops &tileLoops) { LogicalResult status = success(); auto &interTile = tileLoops.first; auto &intraTile = tileLoops.second; auto size = interTile.size(); assert(size == intraTile.size()); if (size <= 1) return success(); for (unsigned s = 1; s < size; ++s) status = succeeded(status) ? hoistOpsBetween(intraTile[0], intraTile[s]) : failure(); for (unsigned s = 1; s < size; ++s) status = succeeded(status) ? hoistOpsBetween(interTile[0], interTile[s]) : failure(); return status; } TileLoops mlir::extractFixedOuterLoops(loop::ForOp rootForOp, ArrayRef<int64_t> sizes) { // Collect prefectly nested loops. If more size values provided than nested // loops available, truncate `sizes`. SmallVector<loop::ForOp, 4> forOps; forOps.reserve(sizes.size()); getPerfectlyNestedLoopsImpl(forOps, rootForOp, sizes.size()); if (forOps.size() < sizes.size()) sizes = sizes.take_front(forOps.size()); // Compute the tile sizes such that i-th outer loop executes size[i] // iterations. Given that the loop current executes // numIterations = ceildiv((upperBound - lowerBound), step) // iterations, we need to tile with size ceildiv(numIterations, size[i]). SmallVector<Value *, 4> tileSizes; tileSizes.reserve(sizes.size()); for (unsigned i = 0, e = sizes.size(); i < e; ++i) { assert(sizes[i] > 0 && "expected strictly positive size for strip-mining"); auto forOp = forOps[i]; OpBuilder builder(forOp); auto loc = forOp.getLoc(); Value *diff = builder.create<SubIOp>(loc, forOp.upperBound(), forOp.lowerBound()); Value *numIterations = ceilDivPositive(builder, loc, diff, forOp.step()); Value *iterationsPerBlock = ceilDivPositive(builder, loc, numIterations, sizes[i]); tileSizes.push_back(iterationsPerBlock); } // Call parametric tiling with the given sizes. auto intraTile = tile(forOps, tileSizes, forOps.back()); TileLoops tileLoops = std::make_pair(forOps, intraTile); // TODO(ntv, zinenko) for now we just ignore the result of band isolation. // In the future, mapping decisions may be impacted by the ability to // isolate perfectly nested bands. tryIsolateBands(tileLoops); return tileLoops; } // Replaces all uses of `orig` with `replacement` except if the user is listed // in `exceptions`. static void replaceAllUsesExcept(Value *orig, Value *replacement, const SmallPtrSetImpl<Operation *> &exceptions) { for (auto &use : llvm::make_early_inc_range(orig->getUses())) { if (exceptions.count(use.getOwner()) == 0) use.set(replacement); } } // Transform a loop with a strictly positive step // for %i = %lb to %ub step %s // into a 0-based loop with step 1 // for %ii = 0 to ceildiv(%ub - %lb, %s) step 1 { // %i = %ii * %s + %lb // Insert the induction variable remapping in the body of `inner`, which is // expected to be either `loop` or another loop perfectly nested under `loop`. // Insert the definition of new bounds immediate before `outer`, which is // expected to be either `loop` or its parent in the loop nest. static void normalizeLoop(loop::ForOp loop, loop::ForOp outer, loop::ForOp inner) { OpBuilder builder(outer); Location loc = loop.getLoc(); // Check if the loop is already known to have a constant zero lower bound or // a constant one step. bool isZeroBased = false; if (auto ubCst = dyn_cast_or_null<ConstantIndexOp>(loop.lowerBound()->getDefiningOp())) isZeroBased = ubCst.getValue() == 0; bool isStepOne = false; if (auto stepCst = dyn_cast_or_null<ConstantIndexOp>(loop.step()->getDefiningOp())) isStepOne = stepCst.getValue() == 1; if (isZeroBased && isStepOne) return; // Compute the number of iterations the loop executes: ceildiv(ub - lb, step) // assuming the step is strictly positive. Update the bounds and the step // of the loop to go from 0 to the number of iterations, if necessary. // TODO(zinenko): introduce support for negative steps or emit dynamic asserts // on step positivity, whatever gets implemented first. Value *diff = builder.create<SubIOp>(loc, loop.upperBound(), loop.lowerBound()); Value *numIterations = ceilDivPositive(builder, loc, diff, loop.step()); loop.setUpperBound(numIterations); Value *lb = loop.lowerBound(); if (!isZeroBased) { Value *cst0 = builder.create<ConstantIndexOp>(loc, 0); loop.setLowerBound(cst0); } Value *step = loop.step(); if (!isStepOne) { Value *cst1 = builder.create<ConstantIndexOp>(loc, 1); loop.setStep(cst1); } // Insert code computing the value of the original loop induction variable // from the "normalized" one. builder.setInsertionPointToStart(inner.getBody()); Value *scaled = isStepOne ? loop.getInductionVar() : builder.create<MulIOp>(loc, loop.getInductionVar(), step); Value *shifted = isZeroBased ? scaled : builder.create<AddIOp>(loc, scaled, lb); SmallPtrSet<Operation *, 2> preserve{scaled->getDefiningOp(), shifted->getDefiningOp()}; replaceAllUsesExcept(loop.getInductionVar(), shifted, preserve); } void mlir::coalesceLoops(MutableArrayRef<loop::ForOp> loops) { if (loops.size() < 2) return; loop::ForOp innermost = loops.back(); loop::ForOp outermost = loops.front(); // 1. Make sure all loops iterate from 0 to upperBound with step 1. This // allows the following code to assume upperBound is the number of iterations. for (auto loop : loops) normalizeLoop(loop, outermost, innermost); // 2. Emit code computing the upper bound of the coalesced loop as product // of the number of iterations of all loops. OpBuilder builder(outermost); Location loc = outermost.getLoc(); Value *upperBound = outermost.upperBound(); for (auto loop : loops.drop_front()) upperBound = builder.create<MulIOp>(loc, upperBound, loop.upperBound()); outermost.setUpperBound(upperBound); builder.setInsertionPointToStart(outermost.getBody()); // 3. Remap induction variables. For each original loop, the value of the // induction variable can be obtained by dividing the induction variable of // the linearized loop by the total number of iterations of the loops nested // in it modulo the number of iterations in this loop (remove the values // related to the outer loops): // iv_i = floordiv(iv_linear, product-of-loop-ranges-until-i) mod range_i. // Compute these iteratively from the innermost loop by creating a "running // quotient" of division by the range. Value *previous = outermost.getInductionVar(); for (unsigned i = 0, e = loops.size(); i < e; ++i) { unsigned idx = loops.size() - i - 1; if (i != 0) previous = builder.create<DivISOp>(loc, previous, loops[idx + 1].upperBound()); Value *iv = (i == e - 1) ? previous : builder.create<RemISOp>(loc, previous, loops[idx].upperBound()); replaceAllUsesInRegionWith(loops[idx].getInductionVar(), iv, loops.back().region()); } // 4. Move the operations from the innermost just above the second-outermost // loop, delete the extra terminator and the second-outermost loop. loop::ForOp second = loops[1]; innermost.getBody()->back().erase(); outermost.getBody()->getOperations().splice( Block::iterator(second.getOperation()), innermost.getBody()->getOperations()); second.erase(); } void mlir::mapLoopToProcessorIds(loop::ForOp forOp, ArrayRef<Value *> processorId, ArrayRef<Value *> numProcessors) { assert(processorId.size() == numProcessors.size()); if (processorId.empty()) return; OpBuilder b(forOp); Location loc(forOp.getLoc()); Value *mul = processorId.front(); for (unsigned i = 1, e = processorId.size(); i < e; ++i) mul = b.create<AddIOp>(loc, b.create<MulIOp>(loc, mul, numProcessors[i]), processorId[i]); Value *lb = b.create<AddIOp>(loc, forOp.lowerBound(), b.create<MulIOp>(loc, forOp.step(), mul)); forOp.setLowerBound(lb); Value *step = forOp.step(); for (auto *numProcs : numProcessors) step = b.create<MulIOp>(loc, step, numProcs); forOp.setStep(step); } /// Given a memref region, determine the lowest depth at which transfers can be /// placed for it, and return the corresponding block, start and end positions /// in the block for placing incoming (read) and outgoing (write) copies /// respectively. The lowest depth depends on whether the region being accessed /// is hoistable with respect to one or more immediately surrounding loops. static void findHighestBlockForPlacement(const MemRefRegion &region, Block &block, Block::iterator &begin, Block::iterator &end, Block **copyPlacementBlock, Block::iterator *copyInPlacementStart, Block::iterator *copyOutPlacementStart) { const auto *cst = region.getConstraints(); SmallVector<Value *, 4> symbols; cst->getIdValues(cst->getNumDimIds(), cst->getNumDimAndSymbolIds(), &symbols); SmallVector<AffineForOp, 4> enclosingFors; getLoopIVs(*block.begin(), &enclosingFors); // Walk up loop parents till we find an IV on which this region is // symbolic/variant. auto it = enclosingFors.rbegin(); for (auto e = enclosingFors.rend(); it != e; ++it) { // TODO(bondhugula): also need to be checking this for regions symbols that // aren't loop IVs, whether we are within their resp. defs' dominance scope. if (llvm::is_contained(symbols, it->getInductionVar())) break; } if (it != enclosingFors.rbegin()) { auto lastInvariantIV = *std::prev(it); *copyInPlacementStart = Block::iterator(lastInvariantIV.getOperation()); *copyOutPlacementStart = std::next(*copyInPlacementStart); *copyPlacementBlock = lastInvariantIV.getOperation()->getBlock(); } else { *copyInPlacementStart = begin; *copyOutPlacementStart = end; *copyPlacementBlock = &block; } } // Info comprising stride and number of elements transferred every stride. struct StrideInfo { int64_t stride; int64_t numEltPerStride; }; /// Returns striding information for a copy/transfer of this region with /// potentially multiple striding levels from outermost to innermost. For an /// n-dimensional region, there can be at most n-1 levels of striding /// successively nested. // TODO(bondhugula): make this work with non-identity layout maps. static void getMultiLevelStrides(const MemRefRegion &region, ArrayRef<int64_t> bufferShape, SmallVectorImpl<StrideInfo> *strideInfos) { if (bufferShape.size() <= 1) return; int64_t numEltPerStride = 1; int64_t stride = 1; for (int d = bufferShape.size() - 1; d >= 1; d--) { int64_t dimSize = region.memref->getType().cast<MemRefType>().getDimSize(d); stride *= dimSize; numEltPerStride *= bufferShape[d]; // A stride is needed only if the region has a shorter extent than the // memref along the dimension *and* has an extent greater than one along the // next major dimension. if (bufferShape[d] < dimSize && bufferShape[d - 1] > 1) { strideInfos->push_back({stride, numEltPerStride}); } } } /// Generates a point-wise copy from/to `memref' to/from `fastMemRef' and /// returns the outermost AffineForOp of the copy loop nest. `memIndicesStart' /// holds the lower coordinates of the region in the original memref to copy /// in/out. If `copyOut' is true, generates a copy-out; otherwise a copy-in. static AffineForOp generatePointWiseCopy(Location loc, Value *memref, Value *fastMemRef, AffineMap memAffineMap, ArrayRef<Value *> memIndicesStart, ArrayRef<int64_t> fastBufferShape, bool isCopyOut, OpBuilder b) { assert(!memIndicesStart.empty() && "only 1-d or more memrefs"); // The copy-in nest is generated as follows as an example for a 2-d region: // for x = ... // for y = ... // fast_buf[x][y] = buf[mem_x + x][mem_y + y] SmallVector<Value *, 4> fastBufIndices, memIndices; AffineForOp copyNestRoot; for (unsigned d = 0, e = fastBufferShape.size(); d < e; ++d) { auto forOp = b.create<AffineForOp>(loc, 0, fastBufferShape[d]); if (d == 0) copyNestRoot = forOp; b = forOp.getBodyBuilder(); fastBufIndices.push_back(forOp.getInductionVar()); Value *memBase = (memAffineMap == b.getMultiDimIdentityMap(memAffineMap.getNumDims())) ? memIndicesStart[d] : b.create<AffineApplyOp>( loc, AffineMap::get(memAffineMap.getNumDims(), memAffineMap.getNumSymbols(), memAffineMap.getResult(d)), memIndicesStart); // Construct the subscript for the slow memref being copied. SmallVector<Value *, 2> operands = {memBase, forOp.getInductionVar()}; auto memIndex = b.create<AffineApplyOp>( loc, AffineMap::get(2, 0, b.getAffineDimExpr(0) + b.getAffineDimExpr(1)), operands); memIndices.push_back(memIndex); } if (!isCopyOut) { // Copy in. auto load = b.create<AffineLoadOp>(loc, memref, memIndices); b.create<AffineStoreOp>(loc, load, fastMemRef, fastBufIndices); return copyNestRoot; } // Copy out. auto load = b.create<AffineLoadOp>(loc, fastMemRef, fastBufIndices); b.create<AffineStoreOp>(loc, load, memref, memIndices); return copyNestRoot; } static InFlightDiagnostic LLVM_ATTRIBUTE_UNUSED emitRemarkForBlock(Block &block) { return block.getParentOp()->emitRemark(); } /// Creates a buffer in the faster memory space for the specified memref region; /// generates a copy from the lower memory space to this one, and replaces all /// loads/stores in the block range [`begin', `end') of `block' to load/store /// from that buffer. Returns failure if copies could not be generated due to /// yet unimplemented cases. `copyInPlacementStart` and `copyOutPlacementStart` /// in copyPlacementBlock specify the insertion points where the incoming copies /// and outgoing copies, respectively, should be inserted (the insertion happens /// right before the insertion point). Since `begin` can itself be invalidated /// due to the memref rewriting done from this method, the output argument /// `nBegin` is set to its replacement (set to `begin` if no invalidation /// happens). Since outgoing copies could have been inserted at `end`, the /// output argument `nEnd` is set to the new end. `sizeInBytes` is set to the /// size of the fast buffer allocated. static LogicalResult generateCopy( const MemRefRegion &region, Block *block, Block::iterator begin, Block::iterator end, Block *copyPlacementBlock, Block::iterator copyInPlacementStart, Block::iterator copyOutPlacementStart, AffineCopyOptions copyOptions, DenseMap<Value *, Value *> &fastBufferMap, DenseSet<Operation *> &copyNests, uint64_t *sizeInBytes, Block::iterator *nBegin, Block::iterator *nEnd) { *nBegin = begin; *nEnd = end; FuncOp f = begin->getParentOfType<FuncOp>(); OpBuilder topBuilder(f.getBody()); Value *zeroIndex = topBuilder.create<ConstantIndexOp>(f.getLoc(), 0); if (begin == end) return success(); // Is the copy out point at the end of the block where we are doing // explicit copying. bool isCopyOutAtEndOfBlock = (end == copyOutPlacementStart); // Copies for read regions are going to be inserted at 'begin'. OpBuilder prologue(copyPlacementBlock, copyInPlacementStart); // Copies for write regions are going to be inserted at 'end'. OpBuilder epilogue(copyPlacementBlock, copyOutPlacementStart); OpBuilder &b = region.isWrite() ? epilogue : prologue; // Builder to create constants at the top level. auto func = copyPlacementBlock->getParent()->getParentOfType<FuncOp>(); OpBuilder top(func.getBody()); auto loc = region.loc; auto *memref = region.memref; auto memRefType = memref->getType().cast<MemRefType>(); auto layoutMaps = memRefType.getAffineMaps(); if (layoutMaps.size() > 1 || (layoutMaps.size() == 1 && !layoutMaps[0].isIdentity())) { LLVM_DEBUG(llvm::dbgs() << "Non-identity layout map not yet supported\n"); return failure(); } // Indices to use for the copying. // Indices for the original memref being copied from/to. SmallVector<Value *, 4> memIndices; // Indices for the faster buffer being copied into/from. SmallVector<Value *, 4> bufIndices; unsigned rank = memRefType.getRank(); SmallVector<int64_t, 4> fastBufferShape; // Compute the extents of the buffer. std::vector<SmallVector<int64_t, 4>> lbs; SmallVector<int64_t, 8> lbDivisors; lbs.reserve(rank); Optional<int64_t> numElements = region.getConstantBoundingSizeAndShape( &fastBufferShape, &lbs, &lbDivisors); if (!numElements.hasValue()) { LLVM_DEBUG(llvm::dbgs() << "Non-constant region size not supported\n"); return failure(); } if (numElements.getValue() == 0) { LLVM_DEBUG(llvm::dbgs() << "Nothing to copy\n"); *sizeInBytes = 0; return success(); } const FlatAffineConstraints *cst = region.getConstraints(); // 'regionSymbols' hold values that this memory region is symbolic/parametric // on; these typically include loop IVs surrounding the level at which the // copy generation is being done or other valid symbols in MLIR. SmallVector<Value *, 8> regionSymbols; cst->getIdValues(rank, cst->getNumIds(), &regionSymbols); // Construct the index expressions for the fast memory buffer. The index // expression for a particular dimension of the fast buffer is obtained by // subtracting out the lower bound on the original memref's data region // along the corresponding dimension. // Index start offsets for faster memory buffer relative to the original. SmallVector<AffineExpr, 4> offsets; offsets.reserve(rank); for (unsigned d = 0; d < rank; d++) { assert(lbs[d].size() == cst->getNumCols() - rank && "incorrect bound size"); AffineExpr offset = top.getAffineConstantExpr(0); for (unsigned j = 0, e = cst->getNumCols() - rank - 1; j < e; j++) { offset = offset + lbs[d][j] * top.getAffineDimExpr(j); } assert(lbDivisors[d] > 0); offset = (offset + lbs[d][cst->getNumCols() - 1 - rank]).floorDiv(lbDivisors[d]); // Set copy start location for this dimension in the lower memory space // memref. if (auto caf = offset.dyn_cast<AffineConstantExpr>()) { auto indexVal = caf.getValue(); if (indexVal == 0) { memIndices.push_back(zeroIndex); } else { memIndices.push_back( top.create<ConstantIndexOp>(loc, indexVal).getResult()); } } else { // The coordinate for the start location is just the lower bound along the // corresponding dimension on the memory region (stored in 'offset'). auto map = AffineMap::get( cst->getNumDimIds() + cst->getNumSymbolIds() - rank, 0, offset); memIndices.push_back(b.create<AffineApplyOp>(loc, map, regionSymbols)); } // The fast buffer is copied into at location zero; addressing is relative. bufIndices.push_back(zeroIndex); // Record the offsets since they are needed to remap the memory accesses of // the original memref further below. offsets.push_back(offset); } // The faster memory space buffer. Value *fastMemRef; // Check if a buffer was already created. bool existingBuf = fastBufferMap.count(memref) > 0; if (!existingBuf) { AffineMap fastBufferLayout = b.getMultiDimIdentityMap(rank); auto fastMemRefType = MemRefType::get(fastBufferShape, memRefType.getElementType(), fastBufferLayout, copyOptions.fastMemorySpace); // Create the fast memory space buffer just before the 'affine.for' // operation. fastMemRef = prologue.create<AllocOp>(loc, fastMemRefType).getResult(); // Record it. fastBufferMap[memref] = fastMemRef; // fastMemRefType is a constant shaped memref. *sizeInBytes = getMemRefSizeInBytes(fastMemRefType).getValue(); LLVM_DEBUG(emitRemarkForBlock(*block) << "Creating fast buffer of type " << fastMemRefType << " and size " << llvm::divideCeil(*sizeInBytes, 1024) << " KiB\n"); } else { // Reuse the one already created. fastMemRef = fastBufferMap[memref]; *sizeInBytes = 0; } auto numElementsSSA = top.create<ConstantIndexOp>(loc, numElements.getValue()); SmallVector<StrideInfo, 4> strideInfos; getMultiLevelStrides(region, fastBufferShape, &strideInfos); // TODO(bondhugula): use all stride levels once DmaStartOp is extended for // multi-level strides. if (strideInfos.size() > 1) { LLVM_DEBUG(llvm::dbgs() << "Only up to one level of stride supported\n"); return failure(); } Value *stride = nullptr; Value *numEltPerStride = nullptr; if (!strideInfos.empty()) { stride = top.create<ConstantIndexOp>(loc, strideInfos[0].stride); numEltPerStride = top.create<ConstantIndexOp>(loc, strideInfos[0].numEltPerStride); } // Record the last operation where we want the memref replacement to end. We // later do the memref replacement only in [begin, postDomFilter] so // that the original memref's used in the data movement code themselves don't // get replaced. auto postDomFilter = std::prev(end); // Create fully composed affine maps for each memref. auto memAffineMap = b.getMultiDimIdentityMap(memIndices.size()); fullyComposeAffineMapAndOperands(&memAffineMap, &memIndices); auto bufAffineMap = b.getMultiDimIdentityMap(bufIndices.size()); fullyComposeAffineMapAndOperands(&bufAffineMap, &bufIndices); if (!copyOptions.generateDma) { // Point-wise copy generation. auto copyNest = generatePointWiseCopy(loc, memref, fastMemRef, memAffineMap, memIndices, fastBufferShape, /*isCopyOut=*/region.isWrite(), b); // Record this so that we can skip it from yet another copy. copyNests.insert(copyNest); // Since new ops are being appended (for copy out's), adjust the end to // mark end of block range being processed if necessary. if (region.isWrite() && isCopyOutAtEndOfBlock) *nEnd = Block::iterator(copyNest.getOperation()); } else { // DMA generation. // Create a tag (single element 1-d memref) for the DMA. auto tagMemRefType = MemRefType::get({1}, top.getIntegerType(32), {}, copyOptions.tagMemorySpace); auto tagMemRef = prologue.create<AllocOp>(loc, tagMemRefType); SmallVector<Value *, 4> tagIndices({zeroIndex}); auto tagAffineMap = b.getMultiDimIdentityMap(tagIndices.size()); fullyComposeAffineMapAndOperands(&tagAffineMap, &tagIndices); if (!region.isWrite()) { // DMA non-blocking read from original buffer to fast buffer. b.create<AffineDmaStartOp>(loc, memref, memAffineMap, memIndices, fastMemRef, bufAffineMap, bufIndices, tagMemRef, tagAffineMap, tagIndices, numElementsSSA, stride, numEltPerStride); } else { // DMA non-blocking write from fast buffer to the original memref. auto op = b.create<AffineDmaStartOp>( loc, fastMemRef, bufAffineMap, bufIndices, memref, memAffineMap, memIndices, tagMemRef, tagAffineMap, tagIndices, numElementsSSA, stride, numEltPerStride); // Since new ops may be appended at 'end' (for outgoing DMAs), adjust the // end to mark end of block range being processed. if (isCopyOutAtEndOfBlock) *nEnd = Block::iterator(op.getOperation()); } // Matching DMA wait to block on completion; tag always has a 0 index. b.create<AffineDmaWaitOp>(loc, tagMemRef, tagAffineMap, zeroIndex, numElementsSSA); // Generate dealloc for the tag. auto tagDeallocOp = epilogue.create<DeallocOp>(loc, tagMemRef); if (*nEnd == end && isCopyOutAtEndOfBlock) // Since new ops are being appended (for outgoing DMAs), adjust the end to // mark end of range of the original. *nEnd = Block::iterator(tagDeallocOp.getOperation()); } // Generate dealloc for the buffer. if (!existingBuf) { auto bufDeallocOp = epilogue.create<DeallocOp>(loc, fastMemRef); // When generating pointwise copies, `nEnd' has to be set to deallocOp on // the fast buffer (since it marks the new end insertion point). if (!copyOptions.generateDma && *nEnd == end && isCopyOutAtEndOfBlock) *nEnd = Block::iterator(bufDeallocOp.getOperation()); } // Replace all uses of the old memref with the faster one while remapping // access indices (subtracting out lower bound offsets for each dimension). // Ex: to replace load %A[%i, %j] with load %Abuf[%i - %iT, %j - %jT], // index remap will be (%i, %j) -> (%i - %iT, %j - %jT), // i.e., affine.apply (d0, d1, d2, d3) -> (d2-d0, d3-d1) (%iT, %jT, %i, %j), // and (%iT, %jT) will be the 'extraOperands' for 'rep all memref uses with'. // d2, d3 correspond to the original indices (%i, %j). SmallVector<AffineExpr, 4> remapExprs; remapExprs.reserve(rank); for (unsigned i = 0; i < rank; i++) { // The starting operands of indexRemap will be regionSymbols (the symbols on // which the memref region is parametric); then those corresponding to // the memref's original indices follow. auto dimExpr = b.getAffineDimExpr(regionSymbols.size() + i); remapExprs.push_back(dimExpr - offsets[i]); } auto indexRemap = AffineMap::get(regionSymbols.size() + rank, 0, remapExprs); // Record the begin since it may be invalidated by memref replacement. Block::iterator prevOfBegin; bool isBeginAtStartOfBlock = (begin == block->begin()); if (!isBeginAtStartOfBlock) prevOfBegin = std::prev(begin); // *Only* those uses within the range [begin, end) of 'block' are replaced. replaceAllMemRefUsesWith(memref, fastMemRef, /*extraIndices=*/{}, indexRemap, /*extraOperands=*/regionSymbols, /*symbolOperands=*/{}, /*domInstFilter=*/&*begin, /*postDomInstFilter=*/&*postDomFilter); *nBegin = isBeginAtStartOfBlock ? block->begin() : std::next(prevOfBegin); return success(); } /// Construct the memref region to just include the entire memref. Returns false /// dynamic shaped memref's for now. `numParamLoopIVs` is the number of /// enclosing loop IVs of opInst (starting from the outermost) that the region /// is parametric on. static bool getFullMemRefAsRegion(Operation *opInst, unsigned numParamLoopIVs, MemRefRegion *region) { unsigned rank; if (auto loadOp = dyn_cast<AffineLoadOp>(opInst)) { rank = loadOp.getMemRefType().getRank(); region->memref = loadOp.getMemRef(); region->setWrite(false); } else if (auto storeOp = dyn_cast<AffineStoreOp>(opInst)) { rank = storeOp.getMemRefType().getRank(); region->memref = storeOp.getMemRef(); region->setWrite(true); } else { assert(false && "expected load or store op"); return false; } auto memRefType = region->memref->getType().cast<MemRefType>(); if (!memRefType.hasStaticShape()) return false; auto *regionCst = region->getConstraints(); // Just get the first numSymbols IVs, which the memref region is parametric // on. SmallVector<AffineForOp, 4> ivs; getLoopIVs(*opInst, &ivs); ivs.resize(numParamLoopIVs); SmallVector<Value *, 4> symbols; extractForInductionVars(ivs, &symbols); regionCst->reset(rank, numParamLoopIVs, 0); regionCst->setIdValues(rank, rank + numParamLoopIVs, symbols); // Memref dim sizes provide the bounds. for (unsigned d = 0; d < rank; d++) { auto dimSize = memRefType.getDimSize(d); assert(dimSize > 0 && "filtered dynamic shapes above"); regionCst->addConstantLowerBound(d, 0); regionCst->addConstantUpperBound(d, dimSize - 1); } return true; } /// Generates copies for a contiguous sequence of operations in `block` in the /// iterator range [`begin', `end'), where `end' can't be past the terminator of /// the block (since additional operations are potentially inserted right before /// `end'. Returns the total size of the fast buffers used. // Since we generate alloc's and dealloc's for all fast buffers (before and // after the range of operations resp.), all of the fast memory capacity is // assumed to be available for processing this block range. uint64_t mlir::affineDataCopyGenerate(Block::iterator begin, Block::iterator end, const AffineCopyOptions &copyOptions, DenseSet<Operation *> &copyNests) { if (begin == end) return 0; assert(begin->getBlock() == std::prev(end)->getBlock() && "Inconsistent block begin/end args"); assert(end != end->getBlock()->end() && "end can't be the block terminator"); Block *block = begin->getBlock(); // Copies will be generated for this depth, i.e., symbolic in all loops // surrounding the this block range. unsigned copyDepth = getNestingDepth(*begin); LLVM_DEBUG(llvm::dbgs() << "Generating copies at depth " << copyDepth << "\n"); LLVM_DEBUG(llvm::dbgs() << "from begin: " << *begin << "\n"); LLVM_DEBUG(llvm::dbgs() << "to inclusive end: " << *std::prev(end) << "\n"); // List of memory regions to copy for. We need a map vector to have a // guaranteed iteration order to write test cases. CHECK-DAG doesn't help here // since the alloc's for example are identical except for the SSA id. SmallMapVector<Value *, std::unique_ptr<MemRefRegion>, 4> readRegions; SmallMapVector<Value *, std::unique_ptr<MemRefRegion>, 4> writeRegions; // Map from original memref's to the fast buffers that their accesses are // replaced with. DenseMap<Value *, Value *> fastBufferMap; // To check for errors when walking the block. bool error = false; // Walk this range of operations to gather all memory regions. block->walk(begin, end, [&](Operation *opInst) { // Gather regions to allocate to buffers in faster memory space. if (auto loadOp = dyn_cast<AffineLoadOp>(opInst)) { if ((loadOp.getMemRefType().getMemorySpace() != copyOptions.slowMemorySpace)) return; } else if (auto storeOp = dyn_cast<AffineStoreOp>(opInst)) { if (storeOp.getMemRefType().getMemorySpace() != copyOptions.slowMemorySpace) return; } else { // Neither load nor a store op. return; } // Compute the MemRefRegion accessed. auto region = std::make_unique<MemRefRegion>(opInst->getLoc()); if (failed(region->compute(opInst, copyDepth))) { LLVM_DEBUG(llvm::dbgs() << "Error obtaining memory region: semi-affine maps?\n"); LLVM_DEBUG(llvm::dbgs() << "over-approximating to the entire memref\n"); if (!getFullMemRefAsRegion(opInst, copyDepth, region.get())) { LLVM_DEBUG( opInst->emitError("non-constant memref sizes not yet supported")); error = true; return; } } // Each memref has a single buffer associated with it irrespective of how // many load's and store's happen on it. // TODO(bondhugula): in the future, when regions don't intersect and satisfy // other properties (based on load/store regions), we could consider // multiple buffers per memref. // Add to the appropriate region if it's not already in it, or take a // bounding box union with the existing one if it's already in there. // Note that a memref may have both read and write regions - so update the // region in the other list if one exists (write in case of read and vice // versa) since there is a single bounding box for a memref across all reads // and writes that happen on it. // Attempts to update; returns true if 'region' exists in targetRegions. auto updateRegion = [&](const SmallMapVector<Value *, std::unique_ptr<MemRefRegion>, 4> &targetRegions) { auto it = targetRegions.find(region->memref); if (it == targetRegions.end()) return false; // Perform a union with the existing region. if (failed(it->second->unionBoundingBox(*region))) { LLVM_DEBUG(llvm::dbgs() << "Memory region bounding box failed; " "over-approximating to the entire memref\n"); // If the union fails, we will overapproximate. if (!getFullMemRefAsRegion(opInst, copyDepth, region.get())) { LLVM_DEBUG(opInst->emitError( "non-constant memref sizes not yet supported")); error = true; return true; } it->second->getConstraints()->clearAndCopyFrom( *region->getConstraints()); } else { // Union was computed and stored in 'it->second': copy to 'region'. region->getConstraints()->clearAndCopyFrom( *it->second->getConstraints()); } return true; }; bool existsInRead = updateRegion(readRegions); if (error) return; bool existsInWrite = updateRegion(writeRegions); if (error) return; // Finally add it to the region list. if (region->isWrite() && !existsInWrite) { writeRegions[region->memref] = std::move(region); } else if (!region->isWrite() && !existsInRead) { readRegions[region->memref] = std::move(region); } }); if (error) { begin->emitError( "copy generation failed for one or more memref's in this block\n"); return 0; } uint64_t totalCopyBuffersSizeInBytes = 0; bool ret = true; auto processRegions = [&](const SmallMapVector<Value *, std::unique_ptr<MemRefRegion>, 4> &regions) { for (const auto &regionEntry : regions) { // For each region, hoist copy in/out past all hoistable // 'affine.for's. Block::iterator copyInPlacementStart, copyOutPlacementStart; Block *copyPlacementBlock; findHighestBlockForPlacement( *regionEntry.second, *block, begin, end, &copyPlacementBlock, &copyInPlacementStart, &copyOutPlacementStart); uint64_t sizeInBytes; Block::iterator nBegin, nEnd; LogicalResult iRet = generateCopy( *regionEntry.second, block, begin, end, copyPlacementBlock, copyInPlacementStart, copyOutPlacementStart, copyOptions, fastBufferMap, copyNests, &sizeInBytes, &nBegin, &nEnd); if (succeeded(iRet)) { // begin/end could have been invalidated, and need update. begin = nBegin; end = nEnd; totalCopyBuffersSizeInBytes += sizeInBytes; } ret = ret & succeeded(iRet); } }; processRegions(readRegions); processRegions(writeRegions); if (!ret) { begin->emitError( "copy generation failed for one or more memref's in this block\n"); return totalCopyBuffersSizeInBytes; } // For a range of operations, a note will be emitted at the caller. AffineForOp forOp; uint64_t sizeInKib = llvm::divideCeil(totalCopyBuffersSizeInBytes, 1024); if (llvm::DebugFlag && (forOp = dyn_cast<AffineForOp>(&*begin))) { forOp.emitRemark() << sizeInKib << " KiB of copy buffers in fast memory space for this block\n"; } if (totalCopyBuffersSizeInBytes > copyOptions.fastMemCapacityBytes) { StringRef str = "Total size of all copy buffers' for this block " "exceeds fast memory capacity\n"; block->getParentOp()->emitError(str); } return totalCopyBuffersSizeInBytes; }
/* Q Light Controller vcxypadfixtureeditor_test.cpp Copyright (C) Heikki Junnila Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.txt Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <QtTest> #define private public #define protected public #include "vcxypadfixtureeditor.h" #undef private #undef protected #include "vcxypadfixtureeditor_test.h" #include "qlcfixturemode.h" #include "qlcfixturedef.h" #include "qlcfile.h" #include "doc.h" #include "../../../engine/test/common/resource_paths.h" void VCXYPadFixtureEditor_Test::initTestCase() { m_doc = new Doc(this); QDir dir(INTERNAL_FIXTUREDIR); dir.setFilter(QDir::Files); dir.setNameFilters(QStringList() << QString("*%1").arg(KExtFixture)); QVERIFY(m_doc->fixtureDefCache()->loadMap(dir) == true); QLCFixtureDef* def = m_doc->fixtureDefCache()->fixtureDef("Futurelight", "DJScan250"); QVERIFY(def != NULL); QLCFixtureMode* mode = def->modes()[0]; QVERIFY(mode != NULL); Fixture* fxi = new Fixture(m_doc); fxi->setName("Fixture 1"); fxi->setFixtureDefinition(def, mode); fxi->setAddress(0); fxi = new Fixture(m_doc); fxi->setName("Fixture 2"); fxi->setFixtureDefinition(def, mode); fxi->setAddress(10); fxi = new Fixture(m_doc); fxi->setName("Fixture 3"); fxi->setFixtureDefinition(def, mode); fxi->setAddress(20); } void VCXYPadFixtureEditor_Test::initial() { QList <VCXYPadFixture> list; VCXYPadFixture fxi(m_doc); fxi.setDisplayMode(VCXYPadFixture::Percentage); fxi.setHead(GroupHead(0, 0)); fxi.setX(0.1, 0.2, false); fxi.setY(0.3, 0.4, true); list << fxi; fxi.setHead(GroupHead(1, 0)); fxi.setX(0, 1, true); fxi.setY(0, 1, false); list << fxi; VCXYPadFixtureEditor fe(NULL, list); QCOMPARE(fe.fixtures(), list); QCOMPARE(fe.m_xMin->value(), 10); QCOMPARE(fe.m_xMax->value(), 20); QCOMPARE(fe.m_xReverse->isChecked(), false); QCOMPARE(fe.m_yMin->value(), 30); QCOMPARE(fe.m_yMax->value(), 40); QCOMPARE(fe.m_yReverse->isChecked(), true); list.clear(); VCXYPadFixtureEditor fe2(NULL, list); QCOMPARE(fe2.fixtures().isEmpty(), true); QCOMPARE(fe2.m_xMin->value(), 0); QCOMPARE(fe2.m_xMax->value(), 100); QCOMPARE(fe2.m_xReverse->isChecked(), false); QCOMPARE(fe2.m_yMin->value(), 0); QCOMPARE(fe2.m_yMax->value(), 100); QCOMPARE(fe2.m_yReverse->isChecked(), false); } void VCXYPadFixtureEditor_Test::valueSlots() { QList <VCXYPadFixture> list; VCXYPadFixtureEditor fe(NULL, list); fe.m_xMin->setValue(50); fe.m_xMax->setValue(20); QCOMPARE(fe.m_xMin->value(), 19); QCOMPARE(fe.m_xMax->value(), 20); fe.m_xMin->setValue(40); QCOMPARE(fe.m_xMin->value(), 40); QCOMPARE(fe.m_xMax->value(), 41); fe.m_yMin->setValue(50); fe.m_yMax->setValue(20); QCOMPARE(fe.m_yMin->value(), 19); QCOMPARE(fe.m_yMax->value(), 20); fe.m_yMin->setValue(40); QCOMPARE(fe.m_yMin->value(), 40); QCOMPARE(fe.m_yMax->value(), 41); } void VCXYPadFixtureEditor_Test::accept() { QList <VCXYPadFixture> list; VCXYPadFixture fxi(m_doc); fxi.setDisplayMode(VCXYPadFixture::Percentage); fxi.setHead(GroupHead(0, 0)); fxi.setX(0, 1, false); fxi.setY(0, 1, false); list << fxi; fxi.setHead(GroupHead(1, 0)); fxi.setX(0.5, 0.6, true); fxi.setY(0.5, 0.6, true); list << fxi; VCXYPadFixtureEditor fe(NULL, list); fe.m_xMin->setValue(10); fe.m_xMax->setValue(20); fe.m_yMin->setValue(30); fe.m_yMax->setValue(40); fe.accept(); QCOMPARE(fe.m_xMin->value(), 10); QCOMPARE(fe.m_xMax->value(), 20); QCOMPARE(fe.m_yMin->value(), 30); QCOMPARE(fe.m_yMax->value(), 40); list = fe.fixtures(); QCOMPARE(list[0].head().fxi, quint32(0)); QCOMPARE(list[0].head().head, 0); QCOMPARE(list[0].xMin(), qreal(0.1)); QCOMPARE(list[0].xMax(), qreal(0.2)); QCOMPARE(list[0].yMin(), qreal(0.3)); QCOMPARE(list[0].yMax(), qreal(0.4)); QCOMPARE(list[1].head().fxi, quint32(1)); QCOMPARE(list[1].head().head, 0); QCOMPARE(list[1].xMin(), qreal(0.1)); QCOMPARE(list[1].xMax(), qreal(0.2)); QCOMPARE(list[1].yMin(), qreal(0.3)); QCOMPARE(list[1].yMax(), qreal(0.4)); } void VCXYPadFixtureEditor_Test::cleanupTestCase() { delete m_doc; m_doc = NULL; } QTEST_MAIN(VCXYPadFixtureEditor_Test)
/*============================================================================= Copyright (c) 2017 Paul Fultz II static.cpp Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) ==============================================================================*/ #include <boost/hof/static.hpp> #include "test.hpp" // TODO: Test infix static constexpr boost::hof::static_<binary_class> binary_static = {}; static constexpr boost::hof::static_<void_class> void_static = {}; static constexpr boost::hof::static_<mono_class> mono_static = {}; BOOST_HOF_TEST_CASE() { void_static(1); BOOST_HOF_TEST_CHECK(3 == binary_static(1, 2)); BOOST_HOF_TEST_CHECK(3 == mono_static(2)); }
// prettycalc.cpp : Файл представлен исключительно в ознакомительных // целях и не является самостоятельной единицей. #include "stdafx.h" int main() { int array[5][2]; // только пять первых строк или меньше std::cout << "Pretty Calc" << std::endl; int n = 5; for (int i = 0; i < n; i++) { std::cin >> array[i][0]; std::cin >> array[i][1]; if (array[i][0] <= 0 || array[i][1] <= 0) { if ((array[i][0] < 0 || array[i][1] < 0) && i > 0) { n = i; break; } std::cout << "Error" << std::endl; return 0; } } int res = 0; for (int i = 0; i< n; i++) { res += array[i][0] * array[i][1]; } std::cout << "Result: " << res << std::endl; }
#include <iostream> #include <some/TestClass2.hpp> some::TestClass2::TestClass2() { std::cout << "TestClass2 constructor" << std::endl; } some::TestClass2::~TestClass2() { std::cout << "TestClass2 destructor" << std::endl; }
/** * @file IVectorWriter.hpp * @brief Interface for writing out vectors. * @author Dominique LaSalle <wildriver@domnet.org> * Copyright 2015-2016 * @version 1 * @date 2016-02-07 */ #ifndef WILDRIVER_IVECTORWRITER_HPP #define WILDRIVER_IVECTORWRITER_HPP #include <vector> #include "base.h" namespace WildRiver { class IVectorWriter { public: /** * @brief Virtual destructor. */ virtual ~IVectorWriter() { } /** * @brief Set the size of the vector. * * @param size The new size of the vector. */ virtual void setSize( ind_t size) = 0; /** * @brief Write the vector to the underlying medium. * * @param vals The dense array of values in the vector. * @param progress The variable to update as teh vector is saved (can be * null). */ virtual void write( val_t const * vals, double * progress) = 0; }; } #endif
//--------------------------------------------------------------------------- // Greenplum Database // Copyright (C) 2015 VMware, Inc. or its affiliates. // // @filename: // CDMLTest.cpp // // @doc: // Test for optimizing DML queries //--------------------------------------------------------------------------- #include "unittest/gpopt/minidump/CDMLTest.h" #include "gpos/base.h" #include "gpos/memory/CAutoMemoryPool.h" #include "gpos/task/CAutoTraceFlag.h" #include "gpos/test/CUnittest.h" #include "gpopt/exception.h" #include "gpopt/minidump/CMinidumperUtils.h" #include "unittest/gpopt/CTestUtils.h" using namespace gpopt; ULONG CDMLTest::m_ulDMLTestCounter = 0; // start from first test // minidump files const CHAR *rgszDMLFileNames[] = { "../data/dxl/minidump/Insert.mdp", "../data/dxl/minidump/MultipleUpdateWithJoinOnDistCol.mdp", "../data/dxl/minidump/UpdatingNonDistributionColumnFunc.mdp", "../data/dxl/minidump/UpdatingMultipleColumn.mdp", "../data/dxl/minidump/UpdateWithHashJoin.mdp", "../data/dxl/minidump/UpdatingDistributionColumn.mdp", "../data/dxl/minidump/UpdatingNonDistColSameTable.mdp", "../data/dxl/minidump/InsertRandomDistr.mdp", // GPDB_12_MERGE_FIXME: Renable these after we support DML on partitioned tables // "../data/dxl/minidump/InsertMismatchedDistrubution.mdp", // "../data/dxl/minidump/InsertMismatchedDistrubution-2.mdp", // "../data/dxl/minidump/DeleteMismatchedDistribution.mdp", // "../data/dxl/minidump/UpdateNoDistKeyMismatchedDistribution.mdp", // "../data/dxl/minidump/UpdateDistKeyMismatchedDistribution.mdp", "../data/dxl/minidump/InsertConstTupleRandomDistribution.mdp", "../data/dxl/minidump/InsertMasterOnlyTable.mdp", "../data/dxl/minidump/InsertMasterOnlyTableConstTuple.mdp", "../data/dxl/minidump/InsertSort.mdp", "../data/dxl/minidump/InsertSortDistributed2MasterOnly.mdp", "../data/dxl/minidump/InsertProjectSort.mdp", "../data/dxl/minidump/InsertAssertSort.mdp", "../data/dxl/minidump/UpdateRandomDistr.mdp", "../data/dxl/minidump/DeleteRandomDistr.mdp", "../data/dxl/minidump/InsertConstTuple.mdp", "../data/dxl/minidump/InsertConstTupleVolatileFunction.mdp", "../data/dxl/minidump/InsertConstTupleVolatileFunctionMOTable.mdp", "../data/dxl/minidump/InsertPrimaryKeyFromMOTable.mdp", "../data/dxl/minidump/InsertNULLNotNULLConstraint.mdp", "../data/dxl/minidump/Insert-AO.mdp", // "../data/dxl/minidump/Insert-AO-Partitioned.mdp", // "../data/dxl/minidump/Insert-AO-Partitioned-SortDisabled.mdp", "../data/dxl/minidump/DML-Replicated-Input.mdp", "../data/dxl/minidump/InsertWithTriggers.mdp", "../data/dxl/minidump/DeleteWithTriggers.mdp", "../data/dxl/minidump/UpdateWithTriggers.mdp", "../data/dxl/minidump/InsertNotNullCols.mdp", "../data/dxl/minidump/InsertCheckConstraint.mdp", "../data/dxl/minidump/InsertWithDroppedCol.mdp", "../data/dxl/minidump/UpdateCheckConstraint.mdp", "../data/dxl/minidump/UpdateDistrKey.mdp", "../data/dxl/minidump/UpdateNoCardinalityAssert.mdp", "../data/dxl/minidump/SelfUpdate.mdp", "../data/dxl/minidump/UpdateWithOids.mdp", "../data/dxl/minidump/UpdateUniqueConstraint.mdp", "../data/dxl/minidump/UpdateUniqueConstraint-2.mdp", "../data/dxl/minidump/UpdateVolatileFunction.mdp", // "../data/dxl/minidump/UpdatePartTable.mdp", // "../data/dxl/minidump/UpdateDroppedCols.mdp", "../data/dxl/minidump/UpdateCardinalityAssert.mdp", "../data/dxl/minidump/UpdateNotNullCols.mdp", "../data/dxl/minidump/UpdateZeroRows.mdp", "../data/dxl/minidump/InsertNoEnforceConstraints.mdp", "../data/dxl/minidump/UpdateNoEnforceConstraints.mdp", "../data/dxl/minidump/Insert-With-HJ-CTE-Agg.mdp", "../data/dxl/minidump/CTAS-with-Limit.mdp", "../data/dxl/minidump/CTAS-With-Global-Local-Agg.mdp", "../data/dxl/minidump/Delete-With-Limit-In-Subquery.mdp", "../data/dxl/minidump/DML-With-WindowFunc-OuterRef.mdp", "../data/dxl/minidump/DML-Filter-With-OuterRef.mdp", "../data/dxl/minidump/DML-UnionAll-With-OuterRef.mdp", "../data/dxl/minidump/DML-ComputeScalar-With-Outerref.mdp", "../data/dxl/minidump/DML-UnionAll-With-Universal-Child.mdp", "../data/dxl/minidump/DML-With-MasterOnlyTable-1.mdp", "../data/dxl/minidump/DML-With-HJ-And-UniversalChild.mdp", "../data/dxl/minidump/DML-With-Join-With-Universal-Child.mdp", "../data/dxl/minidump/DML-With-CorrelatedNLJ-With-Universal-Child.mdp", "../data/dxl/minidump/DML-Volatile-Function.mdp", }; //--------------------------------------------------------------------------- // @function: // CDMLTest::EresUnittest // // @doc: // Unittest for expressions // //--------------------------------------------------------------------------- GPOS_RESULT CDMLTest::EresUnittest() { CUnittest rgut[] = { GPOS_UNITTEST_FUNC(EresUnittest_RunTests), }; GPOS_RESULT eres = CUnittest::EresExecute(rgut, GPOS_ARRAY_SIZE(rgut)); // reset metadata cache CMDCache::Reset(); return eres; } //--------------------------------------------------------------------------- // @function: // CDMLTest::EresUnittest_RunTests // // @doc: // Run all Minidump-based tests with plan matching // //--------------------------------------------------------------------------- GPOS_RESULT CDMLTest::EresUnittest_RunTests() { return CTestUtils::EresUnittest_RunTests(rgszDMLFileNames, &m_ulDMLTestCounter, GPOS_ARRAY_SIZE(rgszDMLFileNames)); } // EOF
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "src/developer/debug/zxdb/expr/parse_string.h" #include <ctype.h> #include <stdlib.h> #include "src/lib/fxl/logging.h" namespace zxdb { namespace { // A character sequence made of any source character except for parentheses, backslash and spaces. bool IsValidCRawStringDelimeter(char c) { return c != '(' && c != ')' && c != '\\' && !isspace(c); } std::optional<StringLiteralBegin> DoesBeginRawCStringLiteral(std::string_view input, size_t begin) { // This only supports raw string literals and not the various flavors of Unicode prefixes. if (input.size() <= begin + 2 || input[begin] != 'R' || input[begin + 1] != '"') return std::nullopt; // Skip over the delimiter. size_t cur = begin + 2; while (input.size() > cur && IsValidCRawStringDelimeter(input[cur])) cur++; // Expecting a paren to begin the string. if (cur == input.size() || input[cur] != '(') return std::nullopt; StringLiteralBegin info; info.language = ExprLanguage::kC; info.is_raw = true; info.raw_marker = input.substr(begin + 2, cur - begin - 2); info.string_begin = begin; info.contents_begin = cur + 1; return info; } // Rust raw strings start with 'r', some number of '#' characters, and a quote. std::optional<StringLiteralBegin> DoesBeginRawRustStringLiteral(std::string_view input, size_t begin) { // This only supports "raw" strings, not "byte" strings. It could be enhanced in the future. if (input.size() <= begin + 2 || input[begin] != 'r' || input[begin + 1] != '#') return std::nullopt; size_t cur = begin + 1; while (input.size() > cur && input[cur] == '#') cur++; if (cur == input.size() || input[cur] != '"') return std::nullopt; StringLiteralBegin info; info.language = ExprLanguage::kRust; info.is_raw = true; info.raw_marker = input.substr(begin + 1, cur - begin - 1); info.string_begin = begin; info.contents_begin = cur + 1; return info; } // Determines if the current index marks the beginning of the end of the string. If it does, // returns the index of the character immediately following the string (which might point to // one-past-the-end of the input). Otherwise returns 0. size_t EndsStringLiteral(std::string_view input, const StringLiteralBegin& info, size_t cur) { FXL_DCHECK(cur < input.size()); if (!info.is_raw) { if (input[cur] == '"') return cur + 1; return 0; } switch (info.language) { case ExprLanguage::kC: if (input.size() - cur >= info.raw_marker.size() + 2) { if (input[cur] == ')' && input[cur + info.raw_marker.size() + 1] == '"' && input.substr(cur + 1, info.raw_marker.size()) == info.raw_marker) return cur + info.raw_marker.size() + 2; } break; case ExprLanguage::kRust: if (input.size() - cur >= info.raw_marker.size() + 1) { if (input[cur] == '"' && input.substr(cur + 1, info.raw_marker.size()) == info.raw_marker) return cur + info.raw_marker.size() + 1; } } return 0; } bool IsHexDigit(char c) { return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'F') || (c >= 'a' && c <= 'f'); } bool IsOctalDigit(char c) { return c >= '0' && c <= '7'; } // See HandleEscaped() below for the parameter description. |*cur| should point to the first hex // digit. Err HandleHexEscaped(std::string_view input, const StringLiteralBegin& info, size_t* cur, size_t* error_location, std::string* result) { if (!IsHexDigit(input[*cur])) { *error_location = *cur; return Err("Expecting hex escape sequence."); } std::string hex_digits; switch (info.language) { case ExprLanguage::kC: // C reads hex digits until there are no more. for (size_t i = *cur; i < input.size() && IsHexDigit(input[i]); i++) hex_digits.push_back(input[i]); break; case ExprLanguage::kRust: // Rust requires exactly two characters. if (*cur + 1 >= input.size() || !IsHexDigit(input[*cur + 1])) { *error_location = *cur; return Err("Expecting two hex digits."); } hex_digits.push_back(input[*cur]); hex_digits.push_back(input[*cur + 1]); break; } char* endptr = nullptr; unsigned long value = strtoul(hex_digits.c_str(), &endptr, 16); if (endptr != hex_digits.data() + hex_digits.size()) { *error_location = *cur; return Err("Unexpected hex input."); } (*cur) += hex_digits.size(); result->push_back(static_cast<unsigned char>(value)); return Err(); } // See HandleEscaped() below for the parameter description. |*cur| should point to the first octal // digit. Err HandleOctalEscaped(std::string_view input, const StringLiteralBegin& info, size_t* cur, size_t* error_location, std::string* result) { if (!IsOctalDigit(input[*cur])) { *error_location = *cur; return Err("Expecting hex escape sequence."); } std::string octal_digits; for (size_t i = *cur; i < input.size() && IsOctalDigit(input[i]); i++) octal_digits.push_back(input[i]); char* endptr = nullptr; unsigned long value = strtoul(octal_digits.c_str(), &endptr, 8); if (endptr != octal_digits.data() + octal_digits.size()) { *error_location = *cur; return Err("Unexpected octal input."); } (*cur) += octal_digits.size(); result->push_back(static_cast<unsigned char>(value)); return Err(); } // On input, |*cur| should point to a valid character in |input| immediately following a backslash. // On success, |*cur| will be updated to point to the character immediately following the escape. Err HandleEscaped(std::string_view input, const StringLiteralBegin& info, size_t* cur, size_t* error_location, std::string* result) { // Shared C/Rust escape sequences. switch (input[*cur]) { // clang-format off case 'n': result->push_back('\n'); ++(*cur); return Err(); case 'r': result->push_back('\r'); ++(*cur); return Err(); case 't': result->push_back('\t'); ++(*cur); return Err(); case '\\': result->push_back('\\'); ++(*cur); return Err(); case '\'': result->push_back('\''); ++(*cur); return Err(); case '"': result->push_back('"'); ++(*cur); return Err(); default: break; // clang-format on } if (input[*cur] == 'x') { // Hex digit. ++(*cur); if (*cur == input.size()) { *error_location = *cur - 2; // Point to backslash. return Err("End of input found in hex escape."); } return HandleHexEscaped(input, info, cur, error_location, result); } if (info.language == ExprLanguage::kC) { // C-specific escape sequences. switch (input[*cur]) { // clang-format off case '?': result->push_back('?'); ++(*cur); return Err(); case 'a': result->push_back('\a'); ++(*cur); return Err(); case 'b': result->push_back('\b'); ++(*cur); return Err(); case 'f': result->push_back('\f'); ++(*cur); return Err(); case 'v': result->push_back('\v'); ++(*cur); return Err(); default: break; // clang-format on } if (input[*cur] == 'u' || input[*cur] == 'U') return Err("Unicode escape sequences are not supported."); if (IsOctalDigit(input[*cur])) { // Octal. return HandleOctalEscaped(input, info, cur, error_location, result); } } if (info.language == ExprLanguage::kRust) { // Rust-specific escape sequences. if (input[*cur] == '0') { // Null. result->push_back(0); ++(*cur); return Err(); } if (input[*cur] == 'u') return Err("Unicode escape sequences are not supported."); } *error_location = *cur - 1; // Point to backslash. return Err("Unknown escape sequence."); } } // namespace std::optional<StringLiteralBegin> DoesBeginStringLiteral(ExprLanguage lang, std::string_view input, size_t cur) { if (cur >= input.size()) return std::nullopt; // No room. StringLiteralBegin info; info.language = lang; if (input[cur] == '"') { // Regular literal string. Leave the raw string marker empty. info.string_begin = cur; info.contents_begin = cur + 1; return info; } switch (lang) { case ExprLanguage::kC: return DoesBeginRawCStringLiteral(input, cur); case ExprLanguage::kRust: return DoesBeginRawRustStringLiteral(input, cur); } FXL_NOTREACHED(); return std::nullopt; } ErrOr<std::string> ParseStringLiteral(std::string_view input, const StringLiteralBegin& info, size_t* in_out_cur, size_t* error_location) { FXL_DCHECK(info.contents_begin <= input.size()); std::string result; size_t cur = info.contents_begin; while (cur < input.size()) { if (size_t end = EndsStringLiteral(input, info, cur)) { *in_out_cur = end; return result; } if (!info.is_raw && input[cur] == '\\') { cur++; // Advance over backslash. if (cur == input.size()) { *error_location = cur - 1; return Err("Hit end of input before the end of the escape sequence."); } Err err = HandleEscaped(input, info, &cur, error_location, &result); if (err.has_error()) return err; } else { // Non-escaped. result.push_back(input[cur]); cur++; } } // Hit the end without an end-of-string. *error_location = info.string_begin; return Err("Hit end of input before the end of the string."); } } // namespace zxdb
// dllmain.cpp : Implementation of DllMain. #include "stdafx.h" #include "resource.h" #include "wptbho_i.h" #include "dllmain.h" #include "xdlldata.h" CwptbhoModule _AtlModule; HINSTANCE dll_hinstance = NULL; // DLL Entry Point extern "C" BOOL WINAPI DllMain(HINSTANCE hInstance, DWORD dwReason, LPVOID lpReserved) { // Don't attach to Windows Explorer if (dwReason == DLL_PROCESS_ATTACH) { TCHAR pszLoader[MAX_PATH]; GetModuleFileName(NULL, pszLoader, MAX_PATH); if (!lstrcmpi(PathFindFileName(pszLoader), _T("explorer.exe"))) return FALSE; } #ifdef _MERGE_PROXYSTUB if (!PrxDllMain(hInstance, dwReason, lpReserved)) return FALSE; #endif dll_hinstance = hInstance; return _AtlModule.DllMain(dwReason, lpReserved); } // Used to determine whether the DLL can be unloaded by OLE. STDAPI DllCanUnloadNow(void) { #ifdef _MERGE_PROXYSTUB HRESULT hr = PrxDllCanUnloadNow(); if (hr != S_OK) return hr; #endif return _AtlModule.DllCanUnloadNow(); } // Returns a class factory to create an object of the requested type. STDAPI DllGetClassObject(REFCLSID rclsid, REFIID riid, LPVOID* ppv) { #ifdef _MERGE_PROXYSTUB if (PrxDllGetClassObject(rclsid, riid, ppv) == S_OK) return S_OK; #endif return _AtlModule.DllGetClassObject(rclsid, riid, ppv); } // DllRegisterServer - Adds entries to the system registry. STDAPI DllRegisterServer(void) { // registers object, typelib and all interfaces in typelib HRESULT hr = _AtlModule.DllRegisterServer(); #ifdef _MERGE_PROXYSTUB if (FAILED(hr)) return hr; hr = PrxDllRegisterServer(); #endif return hr; } // DllUnregisterServer - Removes entries from the system registry. STDAPI DllUnregisterServer(void) { HRESULT hr = _AtlModule.DllUnregisterServer(); #ifdef _MERGE_PROXYSTUB if (FAILED(hr)) return hr; hr = PrxDllRegisterServer(); if (FAILED(hr)) return hr; hr = PrxDllUnregisterServer(); #endif return hr; } // DllInstall - Adds/Removes entries to the system registry per user per machine. STDAPI DllInstall(BOOL bInstall, LPCWSTR pszCmdLine) { HRESULT hr = E_FAIL; static const wchar_t szUserSwitch[] = L"user"; if (pszCmdLine != NULL) { if (_wcsnicmp(pszCmdLine, szUserSwitch, _countof(szUserSwitch)) == 0) { ATL::AtlSetPerUserRegistration(true); } } if (bInstall) { hr = DllRegisterServer(); if (FAILED(hr)) { DllUnregisterServer(); } } else { hr = DllUnregisterServer(); } return hr; }
// Copyright (c) 2019-present, Facebook, Inc. // All rights reserved. // // This source code is licensed under the license found in the // LICENSE file in the root directory of this source tree. // #include <iostream> #include <cstdlib> #include <string> #include <vector> #include <fstream> #include <iomanip> #include <bits/stdc++.h> using namespace std; int f_gold ( int ar [ ], int ar_size ) { int res = 0; for ( int i = 0; i < ar_size; i ++ ) res = res ^ ar [ i ]; return res; } //TOFILL int main() { int n_success = 0; vector<vector<int>> param0 {{1,1,7,10,12,19,20,22,23,25,27,32,33,39,43,44,45,46,47,47,48,49,50,51,55,58,68,69,73,76,77,79,81,84,92,95,99},{-12,-40,-68},{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1},{86,56,98,58,7,40,84,45,69,77,36,50,72,99,95},{-90,-68,-66,-66,-58,-54,-52,-48,-40,-30,-26,-24,-20,-14,-10,-8,-6,-6,-6,18,30,34,36,42,50,56,64,68,70,74,92,92,98},{0,0,1,0,0,0,1,1,1,0,1,0,1,0,1,1,0,1,0,0,1,0,1,1,1,0,1,0,1,1,1,1,0,0,1,0,0,0},{3,21,47,51,78,84,84,85,86,99},{-26,-72,44,62,-22,22,28,-28,32,-72,72,96,92,-52,-2,-22,-76,-88,-74,-8,-30,54,0,-62,14,-92,-58,72,40,46,96,86,-54,-92,46,92,20,-96,-92,-70,-94,78,-92,-54,-90},{0,0,0,0,0,0,1,1,1,1,1,1,1,1,1},{69,1,12,81,78,18,81,47,49,19,99,40,52,47,71,69,80,72,66,84,72,6,98,89,3,87,81,85,37,14,5,36,26,74}}; vector<int> param1 {36,2,21,9,26,27,9,33,9,22}; for(int i = 0; i < param0.size(); ++i) { if(f_filled(&param0[i].front(),param1[i]) == f_gold(&param0[i].front(),param1[i])) { n_success+=1; } } cout << "#Results:" << " " << n_success << ", " << param0.size(); return 0; }
// Copyright (c) 2009-2012 The Bitcoin developers // Copyright (c) 2015-2017 The PIVX developers // Distributed under the MIT/X11 software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "activemasternode.h" #include "db.h" #include "init.h" #include "main.h" #include "masternode-budget.h" #include "masternode-payments.h" #include "masternodeconfig.h" #include "masternodeman.h" #include "rpcserver.h" #include "utilmoneystr.h" #include <boost/tokenizer.hpp> #include <fstream> using namespace json_spirit; void SendMoney(const CTxDestination& address, CAmount nValue, CWalletTx& wtxNew, AvailableCoinsType coin_type = ALL_COINS) { // Check amount if (nValue <= 0) throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid amount"); if (nValue > pwalletMain->GetBalance()) throw JSONRPCError(RPC_WALLET_INSUFFICIENT_FUNDS, "Insufficient funds"); string strError; if (pwalletMain->IsLocked()) { strError = "Error: Wallet locked, unable to create transaction!"; LogPrintf("SendMoney() : %s", strError); throw JSONRPCError(RPC_WALLET_ERROR, strError); } // Parse Monger address CScript scriptPubKey = GetScriptForDestination(address); // Create and send the transaction CReserveKey reservekey(pwalletMain); CAmount nFeeRequired; if (!pwalletMain->CreateTransaction(scriptPubKey, nValue, wtxNew, reservekey, nFeeRequired, strError, NULL, coin_type)) { if (nValue + nFeeRequired > pwalletMain->GetBalance()) strError = strprintf("Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!", FormatMoney(nFeeRequired)); LogPrintf("SendMoney() : %s\n", strError); throw JSONRPCError(RPC_WALLET_ERROR, strError); } if (!pwalletMain->CommitTransaction(wtxNew, reservekey)) throw JSONRPCError(RPC_WALLET_ERROR, "Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here."); } Value obfuscation(const Array& params, bool fHelp) { throw runtime_error("Obfuscation is not supported any more. User Zerocoin\n"); if (fHelp || params.size() == 0) throw runtime_error( "obfuscation <Mongeraddress> <amount>\n" "Mongeraddress, reset, or auto (AutoDenominate)" "<amount> is a real and will be rounded to the next 0.1" + HelpRequiringPassphrase()); if (pwalletMain->IsLocked()) throw JSONRPCError(RPC_WALLET_UNLOCK_NEEDED, "Error: Please enter the wallet passphrase with walletpassphrase first."); if (params[0].get_str() == "auto") { if (fMasterNode) return "ObfuScation is not supported from masternodes"; return "DoAutomaticDenominating " + (obfuScationPool.DoAutomaticDenominating() ? "successful" : ("failed: " + obfuScationPool.GetStatus())); } if (params[0].get_str() == "reset") { obfuScationPool.Reset(); return "successfully reset obfuscation"; } if (params.size() != 2) throw runtime_error( "obfuscation <Mongeraddress> <amount>\n" "Mongeraddress, denominate, or auto (AutoDenominate)" "<amount> is a real and will be rounded to the next 0.1" + HelpRequiringPassphrase()); CBitcoinAddress address(params[0].get_str()); if (!address.IsValid()) throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid Monger address"); // Amount CAmount nAmount = AmountFromValue(params[1]); // Wallet comments CWalletTx wtx; // string strError = pwalletMain->SendMoneyToDestination(address.Get(), nAmount, wtx, ONLY_DENOMINATED); SendMoney(address.Get(), nAmount, wtx, ONLY_DENOMINATED); // if (strError != "") // throw JSONRPCError(RPC_WALLET_ERROR, strError); return wtx.GetHash().GetHex(); } Value getpoolinfo(const Array& params, bool fHelp) { if (fHelp || params.size() != 0) throw runtime_error( "getpoolinfo\n" "\nReturns anonymous pool-related information\n" "\nResult:\n" "{\n" " \"current\": \"addr\", (string) Monger address of current masternode\n" " \"state\": xxxx, (string) unknown\n" " \"entries\": xxxx, (numeric) Number of entries\n" " \"accepted\": xxxx, (numeric) Number of entries accepted\n" "}\n" "\nExamples:\n" + HelpExampleCli("getpoolinfo", "") + HelpExampleRpc("getpoolinfo", "")); Object obj; obj.push_back(Pair("current_masternode", mnodeman.GetCurrentMasterNode()->addr.ToString())); obj.push_back(Pair("state", obfuScationPool.GetState())); obj.push_back(Pair("entries", obfuScationPool.GetEntriesCount())); obj.push_back(Pair("entries_accepted", obfuScationPool.GetCountEntriesAccepted())); return obj; } // This command is retained for backwards compatibility, but is depreciated. // Future removal of this command is planned to keep things clean. Value masternode(const Array& params, bool fHelp) { string strCommand; if (params.size() >= 1) strCommand = params[0].get_str(); if (fHelp || (strCommand != "start" && strCommand != "start-alias" && strCommand != "start-many" && strCommand != "start-all" && strCommand != "start-missing" && strCommand != "start-disabled" && strCommand != "list" && strCommand != "list-conf" && strCommand != "count" && strCommand != "enforce" && strCommand != "debug" && strCommand != "current" && strCommand != "winners" && strCommand != "genkey" && strCommand != "connect" && strCommand != "outputs" && strCommand != "status" && strCommand != "calcscore")) throw runtime_error( "masternode \"command\"...\n" "\nSet of commands to execute masternode related actions\n" "This command is depreciated, please see individual command documentation for future reference\n\n" "\nArguments:\n" "1. \"command\" (string or set of strings, required) The command to execute\n" "\nAvailable commands:\n" " count - Print count information of all known masternodes\n" " current - Print info on current masternode winner\n" " debug - Print masternode status\n" " genkey - Generate new masternodeprivkey\n" " outputs - Print masternode compatible outputs\n" " start - Start masternode configured in Monger.conf\n" " start-alias - Start single masternode by assigned alias configured in masternode.conf\n" " start-<mode> - Start masternodes configured in masternode.conf (<mode>: 'all', 'missing', 'disabled')\n" " status - Print masternode status information\n" " list - Print list of all known masternodes (see masternodelist for more info)\n" " list-conf - Print masternode.conf in JSON format\n" " winners - Print list of masternode winners\n"); if (strCommand == "list") { Array newParams(params.size() - 1); std::copy(params.begin() + 1, params.end(), newParams.begin()); return listmasternodes(newParams, fHelp); } if (strCommand == "connect") { Array newParams(params.size() -1); std::copy(params.begin() + 1, params.end(), newParams.begin()); return masternodeconnect(newParams, fHelp); } if (strCommand == "count") { Array newParams(params.size() - 1); std::copy(params.begin() + 1, params.end(), newParams.begin()); return getmasternodecount(newParams, fHelp); } if (strCommand == "current") { Array newParams(params.size() - 1); std::copy(params.begin() + 1, params.end(), newParams.begin()); return masternodecurrent(newParams, fHelp); } if (strCommand == "debug") { Array newParams(params.size() - 1); std::copy(params.begin() + 1, params.end(), newParams.begin()); return masternodedebug(newParams, fHelp); } if (strCommand == "start" || strCommand == "start-alias" || strCommand == "start-many" || strCommand == "start-all" || strCommand == "start-missing" || strCommand == "start-disabled") { return startmasternode(params, fHelp); } if (strCommand == "genkey") { Array newParams(params.size() - 1); std::copy(params.begin() + 1, params.end(), newParams.begin()); return createmasternodekey(newParams, fHelp); } if (strCommand == "list-conf") { Array newParams(params.size() - 1); std::copy(params.begin() + 1, params.end(), newParams.begin()); return listmasternodeconf(newParams, fHelp); } if (strCommand == "outputs") { Array newParams(params.size() - 1); std::copy(params.begin() + 1, params.end(), newParams.begin()); return getmasternodeoutputs(newParams, fHelp); } if (strCommand == "status") { Array newParams(params.size() - 1); std::copy(params.begin() + 1, params.end(), newParams.begin()); return getmasternodestatus(newParams, fHelp); } if (strCommand == "winners") { Array newParams(params.size() - 1); std::copy(params.begin() + 1, params.end(), newParams.begin()); return getmasternodewinners(newParams, fHelp); } if (strCommand == "calcscore") { Array newParams(params.size() - 1); std::copy(params.begin() + 1, params.end(), newParams.begin()); return getmasternodescores(newParams, fHelp); } return Value::null; } Value listmasternodes(const Array& params, bool fHelp) { std::string strFilter = ""; if (params.size() == 1) strFilter = params[0].get_str(); if (fHelp || (params.size() > 1)) throw runtime_error( "listmasternodes ( \"filter\" )\n" "\nGet a ranked list of masternodes\n" "\nArguments:\n" "1. \"filter\" (string, optional) Filter search text. Partial match by txhash, status, or addr.\n" "\nResult:\n" "[\n" " {\n" " \"rank\": n, (numeric) Masternode Rank (or 0 if not enabled)\n" " \"txhash\": \"hash\", (string) Collateral transaction hash\n" " \"outidx\": n, (numeric) Collateral transaction output index\n" " \"status\": s, (string) Status (ENABLED/EXPIRED/REMOVE/etc)\n" " \"addr\": \"addr\", (string) Masternode Monger address\n" " \"version\": v, (numeric) Masternode protocol version\n" " \"lastseen\": ttt, (numeric) The time in seconds since epoch (Jan 1 1970 GMT) of the last seen\n" " \"activetime\": ttt, (numeric) The time in seconds since epoch (Jan 1 1970 GMT) masternode has been active\n" " \"lastpaid\": ttt, (numeric) The time in seconds since epoch (Jan 1 1970 GMT) masternode was last paid\n" " }\n" " ,...\n" "]\n" "\nExamples:\n" + HelpExampleCli("masternodelist", "") + HelpExampleRpc("masternodelist", "")); Array ret; int nHeight; { LOCK(cs_main); CBlockIndex* pindex = chainActive.Tip(); if(!pindex) return 0; nHeight = pindex->nHeight; } std::vector<pair<int, CMasternode> > vMasternodeRanks = mnodeman.GetMasternodeRanks(nHeight); BOOST_FOREACH (PAIRTYPE(int, CMasternode) & s, vMasternodeRanks) { Object obj; std::string strVin = s.second.vin.prevout.ToStringShort(); std::string strTxHash = s.second.vin.prevout.hash.ToString(); uint32_t oIdx = s.second.vin.prevout.n; CMasternode* mn = mnodeman.Find(s.second.vin); if (mn != NULL) { if (strFilter != "" && strTxHash.find(strFilter) == string::npos && mn->Status().find(strFilter) == string::npos && CBitcoinAddress(mn->pubKeyCollateralAddress.GetID()).ToString().find(strFilter) == string::npos) continue; std::string strStatus = mn->Status(); std::string strHost; int port; SplitHostPort(mn->addr.ToString(), port, strHost); CNetAddr node = CNetAddr(strHost, false); std::string strNetwork = GetNetworkName(node.GetNetwork()); obj.push_back(Pair("rank", (strStatus == "ENABLED" ? s.first : 0))); obj.push_back(Pair("network", strNetwork)); obj.push_back(Pair("txhash", strTxHash)); obj.push_back(Pair("outidx", (uint64_t)oIdx)); obj.push_back(Pair("status", strStatus)); obj.push_back(Pair("addr", CBitcoinAddress(mn->pubKeyCollateralAddress.GetID()).ToString())); obj.push_back(Pair("version", mn->protocolVersion)); obj.push_back(Pair("lastseen", (int64_t)mn->lastPing.sigTime)); obj.push_back(Pair("activetime", (int64_t)(mn->lastPing.sigTime - mn->sigTime))); obj.push_back(Pair("lastpaid", (int64_t)mn->GetLastPaid())); ret.push_back(obj); } } return ret; } Value masternodeconnect(const Array& params, bool fHelp) { if (fHelp || (params.size() != 1)) throw runtime_error( "masternodeconnect \"address\"\n" "\nAttempts to connect to specified masternode address\n" "\nArguments:\n" "1. \"address\" (string, required) IP or net address to connect to\n" "\nExamples:\n" + HelpExampleCli("masternodeconnect", "\"192.168.0.6:7121\"") + HelpExampleRpc("masternodeconnect", "\"192.168.0.6:7121\"")); std::string strAddress = params[0].get_str(); CService addr = CService(strAddress); CNode* pnode = ConnectNode((CAddress)addr, NULL, false); if (pnode) { pnode->Release(); return Value::null; } else { throw runtime_error("error connecting\n"); } } Value getmasternodecount (const Array& params, bool fHelp) { if (fHelp || (params.size() > 0)) throw runtime_error( "getmasternodecount\n" "\nGet masternode count values\n" "\nResult:\n" "{\n" " \"total\": n, (numeric) Total masternodes\n" " \"stable\": n, (numeric) Stable count\n" " \"obfcompat\": n, (numeric) Obfuscation Compatible\n" " \"enabled\": n, (numeric) Enabled masternodes\n" " \"inqueue\": n (numeric) Masternodes in queue\n" "}\n" "\nExamples:\n" + HelpExampleCli("getmasternodecount", "") + HelpExampleRpc("getmasternodecount", "")); Object obj; int nCount = 0; int ipv4 = 0, ipv6 = 0, onion = 0; if (chainActive.Tip()) mnodeman.GetNextMasternodeInQueueForPayment(chainActive.Tip()->nHeight, true, nCount); mnodeman.CountNetworks(ActiveProtocol(), ipv4, ipv6, onion); obj.push_back(Pair("total", mnodeman.size())); obj.push_back(Pair("stable", mnodeman.stable_size())); obj.push_back(Pair("obfcompat", mnodeman.CountEnabled(ActiveProtocol()))); obj.push_back(Pair("enabled", mnodeman.CountEnabled())); obj.push_back(Pair("inqueue", nCount)); obj.push_back(Pair("ipv4", ipv4)); obj.push_back(Pair("ipv6", ipv6)); obj.push_back(Pair("onion", onion)); return obj; } Value masternodecurrent (const Array& params, bool fHelp) { if (fHelp || (params.size() != 0)) throw runtime_error( "masternodecurrent\n" "\nGet current masternode winner\n" "\nResult:\n" "{\n" " \"protocol\": xxxx, (numeric) Protocol version\n" " \"txhash\": \"xxxx\", (string) Collateral transaction hash\n" " \"pubkey\": \"xxxx\", (string) MN Public key\n" " \"lastseen\": xxx, (numeric) Time since epoch of last seen\n" " \"activeseconds\": xxx, (numeric) Seconds MN has been active\n" "}\n" "\nExamples:\n" + HelpExampleCli("masternodecurrent", "") + HelpExampleRpc("masternodecurrent", "")); CMasternode* winner = mnodeman.GetCurrentMasterNode(1); if (winner) { Object obj; obj.push_back(Pair("protocol", (int64_t)winner->protocolVersion)); obj.push_back(Pair("txhash", winner->vin.prevout.hash.ToString())); obj.push_back(Pair("pubkey", CBitcoinAddress(winner->pubKeyCollateralAddress.GetID()).ToString())); obj.push_back(Pair("lastseen", (winner->lastPing == CMasternodePing()) ? winner->sigTime : (int64_t)winner->lastPing.sigTime)); obj.push_back(Pair("activeseconds", (winner->lastPing == CMasternodePing()) ? 0 : (int64_t)(winner->lastPing.sigTime - winner->sigTime))); return obj; } throw runtime_error("unknown"); } Value masternodedebug (const Array& params, bool fHelp) { if (fHelp || (params.size() != 0)) throw runtime_error( "masternodedebug\n" "\nPrint masternode status\n" "\nResult:\n" "\"status\" (string) Masternode status message\n" "\nExamples:\n" + HelpExampleCli("masternodedebug", "") + HelpExampleRpc("masternodedebug", "")); if (activeMasternode.status != ACTIVE_MASTERNODE_INITIAL || !masternodeSync.IsSynced()) return activeMasternode.GetStatus(); CTxIn vin = CTxIn(); CPubKey pubkey = CScript(); CKey key; if (!activeMasternode.GetMasterNodeVin(vin, pubkey, key)) throw runtime_error("Missing masternode input, please look at the documentation for instructions on masternode creation\n"); else return activeMasternode.GetStatus(); } Value startmasternode (const Array& params, bool fHelp) { std::string strCommand; if (params.size() >= 1) { strCommand = params[0].get_str(); // Backwards compatibility with legacy 'masternode' super-command forwarder if (strCommand == "start") strCommand = "local"; if (strCommand == "start-alias") strCommand = "alias"; if (strCommand == "start-all") strCommand = "all"; if (strCommand == "start-many") strCommand = "many"; if (strCommand == "start-missing") strCommand = "missing"; if (strCommand == "start-disabled") strCommand = "disabled"; } if (fHelp || params.size() < 2 || params.size() > 3 || (params.size() == 2 && (strCommand != "local" && strCommand != "all" && strCommand != "many" && strCommand != "missing" && strCommand != "disabled")) || (params.size() == 3 && strCommand != "alias")) throw runtime_error( "startmasternode \"local|all|many|missing|disabled|alias\" lockwallet ( \"alias\" )\n" "\nAttempts to start one or more masternode(s)\n" "\nArguments:\n" "1. set (string, required) Specify which set of masternode(s) to start.\n" "2. lockwallet (boolean, required) Lock wallet after completion.\n" "3. alias (string) Masternode alias. Required if using 'alias' as the set.\n" "\nResult: (for 'local' set):\n" "\"status\" (string) Masternode status message\n" "\nResult: (for other sets):\n" "{\n" " \"overall\": \"xxxx\", (string) Overall status message\n" " \"detail\": [\n" " {\n" " \"node\": \"xxxx\", (string) Node name or alias\n" " \"result\": \"xxxx\", (string) 'success' or 'failed'\n" " \"error\": \"xxxx\" (string) Error message, if failed\n" " }\n" " ,...\n" " ]\n" "}\n" "\nExamples:\n" + HelpExampleCli("startmasternode", "\"alias\" \"0\" \"my_mn\"") + HelpExampleRpc("startmasternode", "\"alias\" \"0\" \"my_mn\"")); bool fLock = (params[1].get_str() == "true" ? true : false); if (strCommand == "local") { if (!fMasterNode) throw runtime_error("you must set masternode=1 in the configuration\n"); if (pwalletMain->IsLocked()) throw JSONRPCError(RPC_WALLET_UNLOCK_NEEDED, "Error: Please enter the wallet passphrase with walletpassphrase first."); if (activeMasternode.status != ACTIVE_MASTERNODE_STARTED) { activeMasternode.status = ACTIVE_MASTERNODE_INITIAL; // TODO: consider better way activeMasternode.ManageStatus(); if (fLock) pwalletMain->Lock(); } return activeMasternode.GetStatus(); } if (strCommand == "all" || strCommand == "many" || strCommand == "missing" || strCommand == "disabled") { if (pwalletMain->IsLocked()) throw JSONRPCError(RPC_WALLET_UNLOCK_NEEDED, "Error: Please enter the wallet passphrase with walletpassphrase first."); if ((strCommand == "missing" || strCommand == "disabled") && (masternodeSync.RequestedMasternodeAssets <= MASTERNODE_SYNC_LIST || masternodeSync.RequestedMasternodeAssets == MASTERNODE_SYNC_FAILED)) { throw runtime_error("You can't use this command until masternode list is synced\n"); } std::vector<CMasternodeConfig::CMasternodeEntry> mnEntries; mnEntries = masternodeConfig.getEntries(); int successful = 0; int failed = 0; Array resultsObj; BOOST_FOREACH (CMasternodeConfig::CMasternodeEntry mne, masternodeConfig.getEntries()) { std::string errorMessage; int nIndex; if(!mne.castOutputIndex(nIndex)) continue; CTxIn vin = CTxIn(uint256(mne.getTxHash()), uint32_t(nIndex)); CMasternode* pmn = mnodeman.Find(vin); if (pmn != NULL) { if (strCommand == "missing") continue; if (strCommand == "disabled" && pmn->IsEnabled()) continue; } bool result = activeMasternode.Register(mne.getIp(), mne.getPrivKey(), mne.getTxHash(), mne.getOutputIndex(), errorMessage); Object statusObj; statusObj.push_back(Pair("alias", mne.getAlias())); statusObj.push_back(Pair("result", result ? "success" : "failed")); if (result) { successful++; statusObj.push_back(Pair("error", "")); } else { failed++; statusObj.push_back(Pair("error", errorMessage)); } resultsObj.push_back(statusObj); } if (fLock) pwalletMain->Lock(); Object returnObj; returnObj.push_back(Pair("overall", strprintf("Successfully started %d masternodes, failed to start %d, total %d", successful, failed, successful + failed))); returnObj.push_back(Pair("detail", resultsObj)); return returnObj; } if (strCommand == "alias") { std::string alias = params[2].get_str(); if (pwalletMain->IsLocked()) throw JSONRPCError(RPC_WALLET_UNLOCK_NEEDED, "Error: Please enter the wallet passphrase with walletpassphrase first."); bool found = false; int successful = 0; int failed = 0; Array resultsObj; Object statusObj; statusObj.push_back(Pair("alias", alias)); BOOST_FOREACH (CMasternodeConfig::CMasternodeEntry mne, masternodeConfig.getEntries()) { if (mne.getAlias() == alias) { found = true; std::string errorMessage; bool result = activeMasternode.Register(mne.getIp(), mne.getPrivKey(), mne.getTxHash(), mne.getOutputIndex(), errorMessage); statusObj.push_back(Pair("result", result ? "successful" : "failed")); if (result) { successful++; statusObj.push_back(Pair("error", "")); } else { failed++; statusObj.push_back(Pair("error", errorMessage)); } break; } } if (!found) { failed++; statusObj.push_back(Pair("result", "failed")); statusObj.push_back(Pair("error", "could not find alias in config. Verify with list-conf.")); } resultsObj.push_back(statusObj); if (fLock) pwalletMain->Lock(); Object returnObj; returnObj.push_back(Pair("overall", strprintf("Successfully started %d masternodes, failed to start %d, total %d", successful, failed, successful + failed))); returnObj.push_back(Pair("detail", resultsObj)); return returnObj; } return Value::null; } Value createmasternodekey (const Array& params, bool fHelp) { if (fHelp || (params.size() != 0)) throw runtime_error( "createmasternodekey\n" "\nCreate a new masternode private key\n" "\nResult:\n" "\"key\" (string) Masternode private key\n" "\nExamples:\n" + HelpExampleCli("createmasternodekey", "") + HelpExampleRpc("createmasternodekey", "")); CKey secret; secret.MakeNewKey(false); return CBitcoinSecret(secret).ToString(); } Value getmasternodeoutputs (const Array& params, bool fHelp) { if (fHelp || (params.size() != 0)) throw runtime_error( "getmasternodeoutputs\n" "\nPrint all masternode transaction outputs\n" "\nResult:\n" "[\n" " {\n" " \"txhash\": \"xxxx\", (string) output transaction hash\n" " \"outputidx\": n (numeric) output index number\n" " }\n" " ,...\n" "]\n" "\nExamples:\n" + HelpExampleCli("getmasternodeoutputs", "") + HelpExampleRpc("getmasternodeoutputs", "")); // Find possible candidates vector<COutput> possibleCoins = activeMasternode.SelectCoinsMasternode(); Array ret; BOOST_FOREACH (COutput& out, possibleCoins) { Object obj; obj.push_back(Pair("txhash", out.tx->GetHash().ToString())); obj.push_back(Pair("outputidx", out.i)); ret.push_back(obj); } return ret; } Value listmasternodeconf (const Array& params, bool fHelp) { std::string strFilter = ""; if (params.size() == 1) strFilter = params[0].get_str(); if (fHelp || (params.size() > 1)) throw runtime_error( "listmasternodeconf ( \"filter\" )\n" "\nPrint masternode.conf in JSON format\n" "\nArguments:\n" "1. \"filter\" (string, optional) Filter search text. Partial match on alias, address, txHash, or status.\n" "\nResult:\n" "[\n" " {\n" " \"alias\": \"xxxx\", (string) masternode alias\n" " \"address\": \"xxxx\", (string) masternode IP address\n" " \"privateKey\": \"xxxx\", (string) masternode private key\n" " \"txHash\": \"xxxx\", (string) transaction hash\n" " \"outputIndex\": n, (numeric) transaction output index\n" " \"status\": \"xxxx\" (string) masternode status\n" " }\n" " ,...\n" "]\n" "\nExamples:\n" + HelpExampleCli("listmasternodeconf", "") + HelpExampleRpc("listmasternodeconf", "")); std::vector<CMasternodeConfig::CMasternodeEntry> mnEntries; mnEntries = masternodeConfig.getEntries(); Array ret; BOOST_FOREACH (CMasternodeConfig::CMasternodeEntry mne, masternodeConfig.getEntries()) { int nIndex; if(!mne.castOutputIndex(nIndex)) continue; CTxIn vin = CTxIn(uint256(mne.getTxHash()), uint32_t(nIndex)); CMasternode* pmn = mnodeman.Find(vin); std::string strStatus = pmn ? pmn->Status() : "MISSING"; if (strFilter != "" && mne.getAlias().find(strFilter) == string::npos && mne.getIp().find(strFilter) == string::npos && mne.getTxHash().find(strFilter) == string::npos && strStatus.find(strFilter) == string::npos) continue; Object mnObj; mnObj.push_back(Pair("alias", mne.getAlias())); mnObj.push_back(Pair("address", mne.getIp())); mnObj.push_back(Pair("privateKey", mne.getPrivKey())); mnObj.push_back(Pair("txHash", mne.getTxHash())); mnObj.push_back(Pair("outputIndex", mne.getOutputIndex())); mnObj.push_back(Pair("status", strStatus)); ret.push_back(mnObj); } return ret; } Value getmasternodestatus (const Array& params, bool fHelp) { if (fHelp || (params.size() != 0)) throw runtime_error( "getmasternodestatus\n" "\nPrint masternode status\n" "\nResult:\n" "{\n" " \"txhash\": \"xxxx\", (string) Collateral transaction hash\n" " \"outputidx\": n, (numeric) Collateral transaction output index number\n" " \"netaddr\": \"xxxx\", (string) Masternode network address\n" " \"addr\": \"xxxx\", (string) Monger address for masternode payments\n" " \"status\": \"xxxx\", (string) Masternode status\n" " \"message\": \"xxxx\" (string) Masternode status message\n" "}\n" "\nExamples:\n" + HelpExampleCli("getmasternodestatus", "") + HelpExampleRpc("getmasternodestatus", "")); if (!fMasterNode) throw runtime_error("This is not a masternode"); CMasternode* pmn = mnodeman.Find(activeMasternode.vin); if (pmn) { Object mnObj; mnObj.push_back(Pair("txhash", activeMasternode.vin.prevout.hash.ToString())); mnObj.push_back(Pair("outputidx", (uint64_t)activeMasternode.vin.prevout.n)); mnObj.push_back(Pair("netaddr", activeMasternode.service.ToString())); mnObj.push_back(Pair("addr", CBitcoinAddress(pmn->pubKeyCollateralAddress.GetID()).ToString())); mnObj.push_back(Pair("status", activeMasternode.status)); mnObj.push_back(Pair("message", activeMasternode.GetStatus())); return mnObj; } throw runtime_error("Masternode not found in the list of available masternodes. Current status: " + activeMasternode.GetStatus()); } Value getmasternodewinners (const Array& params, bool fHelp) { if (fHelp || params.size() > 3) throw runtime_error( "getmasternodewinners ( blocks \"filter\" )\n" "\nPrint the masternode winners for the last n blocks\n" "\nArguments:\n" "1. blocks (numeric, optional) Number of previous blocks to show (default: 10)\n" "2. filter (string, optional) Search filter matching MN address\n" "\nResult (single winner):\n" "[\n" " {\n" " \"nHeight\": n, (numeric) block height\n" " \"winner\": {\n" " \"address\": \"xxxx\", (string) Monger MN Address\n" " \"nVotes\": n, (numeric) Number of votes for winner\n" " }\n" " }\n" " ,...\n" "]\n" "\nResult (multiple winners):\n" "[\n" " {\n" " \"nHeight\": n, (numeric) block height\n" " \"winner\": [\n" " {\n" " \"address\": \"xxxx\", (string) Monger MN Address\n" " \"nVotes\": n, (numeric) Number of votes for winner\n" " }\n" " ,...\n" " ]\n" " }\n" " ,...\n" "]\n" "\nExamples:\n" + HelpExampleCli("getmasternodewinners", "") + HelpExampleRpc("getmasternodewinners", "")); int nHeight; { LOCK(cs_main); CBlockIndex* pindex = chainActive.Tip(); if(!pindex) return 0; nHeight = pindex->nHeight; } int nLast = 10; std::string strFilter = ""; if (params.size() >= 1) nLast = atoi(params[0].get_str()); if (params.size() == 2) strFilter = params[1].get_str(); Array ret; for (int i = nHeight - nLast; i < nHeight + 20; i++) { Object obj; obj.push_back(Pair("nHeight", i)); std::string strPayment = GetRequiredPaymentsString(i); if (strFilter != "" && strPayment.find(strFilter) == std::string::npos) continue; if (strPayment.find(',') != std::string::npos) { Array winner; boost::char_separator<char> sep(","); boost::tokenizer< boost::char_separator<char> > tokens(strPayment, sep); BOOST_FOREACH (const string& t, tokens) { Object addr; std::size_t pos = t.find(":"); std::string strAddress = t.substr(0,pos); uint64_t nVotes = atoi(t.substr(pos+1)); addr.push_back(Pair("address", strAddress)); addr.push_back(Pair("nVotes", nVotes)); winner.push_back(addr); } obj.push_back(Pair("winner", winner)); } else if (strPayment.find("Unknown") == std::string::npos) { Object winner; std::size_t pos = strPayment.find(":"); std::string strAddress = strPayment.substr(0,pos); uint64_t nVotes = atoi(strPayment.substr(pos+1)); winner.push_back(Pair("address", strAddress)); winner.push_back(Pair("nVotes", nVotes)); obj.push_back(Pair("winner", winner)); } else { Object winner; winner.push_back(Pair("address", strPayment)); winner.push_back(Pair("nVotes", 0)); obj.push_back(Pair("winner", winner)); } ret.push_back(obj); } return ret; } Value getmasternodescores (const Array& params, bool fHelp) { if (fHelp || params.size() > 1) throw runtime_error( "getmasternodescores ( blocks )\n" "\nPrint list of winning masternode by score\n" "\nArguments:\n" "1. blocks (numeric, optional) Show the last n blocks (default 10)\n" "\nResult:\n" "{\n" " xxxx: \"xxxx\" (numeric : string) Block height : Masternode hash\n" " ,...\n" "}\n" "\nExamples:\n" + HelpExampleCli("getmasternodescores", "") + HelpExampleRpc("getmasternodescores", "")); int nLast = 10; if (params.size() == 1) { try { nLast = std::stoi(params[0].get_str()); } catch (const boost::bad_lexical_cast &) { throw runtime_error("Exception on param 2"); } } Object obj; std::vector<CMasternode> vMasternodes = mnodeman.GetFullMasternodeVector(); for (int nHeight = chainActive.Tip()->nHeight - nLast; nHeight < chainActive.Tip()->nHeight + 20; nHeight++) { uint256 nHigh = 0; CMasternode* pBestMasternode = NULL; BOOST_FOREACH (CMasternode& mn, vMasternodes) { uint256 n = mn.CalculateScore(1, nHeight - 100); if (n > nHigh) { nHigh = n; pBestMasternode = &mn; } } if (pBestMasternode) obj.push_back(Pair(strprintf("%d", nHeight), pBestMasternode->vin.prevout.hash.ToString().c_str())); } return obj; }
/** * Copyright (c) 2017-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <iostream> #include <string> #include <vector> #include <gflags/gflags.h> #include <glog/logging.h> #include <gtest/gtest.h> #include <ATen/ATen.h> #include "tc/aten/aten_compiler.h" #include "tc/core/mapping_options.h" #include "../test/test_harness.h" #include "../test/test_harness_aten_cuda.h" #include "example_fixture.h" #include "tc/c2/context.h" #include "tc/core/cuda/cuda.h" #include "tc/core/flags.h" using namespace caffe2; DEFINE_uint32(N, 32, "Batch size (NCHW notation)"); DEFINE_uint32(G, 32, "Number of groups (NCHW notation)"); DEFINE_uint32(C, 4, "Input channels (NCHW notation)"); DEFINE_uint32(F, 4, "Output filters (NCHW notation)"); DEFINE_uint32(H, 56, "Image width (NCHW notation)"); DEFINE_uint32(W, 56, "Image height (NCHW notation)"); DEFINE_uint32(KH, 3, "Kernel width (NCHW notation)"); DEFINE_uint32(KW, 3, "Kernel height (NCHW notation)"); class GroupConvolution : public Benchmark { public: void runGroupConvolution( uint32_t N, uint32_t G, uint32_t C, uint32_t F, uint32_t H, uint32_t W, uint32_t KH, uint32_t KW, const tc::MappingOptions& options, bool useFlags = false); }; void GroupConvolution::runGroupConvolution( uint32_t N, uint32_t G, uint32_t C, uint32_t F, uint32_t H, uint32_t W, uint32_t KH, uint32_t KW, const tc::MappingOptions& options, bool useFlags) { Workspace w; auto AddInput = TestHarness::AddDeterministicallyRandomInput<float, CUDAContext>; AddInput(w, vector<TIndex>{N, G * C, H, W}, "I"); AddInput(w, vector<TIndex>{G * F, C, KH, KW}, "W"); AddInput(w, {G * F}, "B"); Argument groupArg = MakeArgument<int>("group", G); Argument kernelHArg = MakeArgument<int>("kernel_h", KH); Argument kernelWArg = MakeArgument<int>("kernel_w", KW); OperatorDef op_def = TestHarness::ConfigureCUDA( "Conv", {"I", "W", "B"}, {"O"}, {groupArg, kernelHArg, kernelWArg}); std::unique_ptr<OperatorBase> net(CreateOperator(op_def, &w)); ASSERT_TRUE(net.get()); net->Run(); caffe2::Tensor<caffe2::CUDAContext> expectedBlob( w.GetBlob("O")->Get<caffe2::TensorCUDA>()); at::Tensor refOutput = makeATenTensor(expectedBlob, at::Backend::CUDA, at::kFloat) .resize_({N, G, F, H - KH + 1, W - KW + 1}); auto checkFun = [&, refOutput]( const std::vector<at::Tensor>& inputs, const std::vector<at::Tensor>& outputs) { TC_CUDA_RUNTIMEAPI_ENFORCE(cudaDeviceSynchronize()); double prec = 1e-6; // relax precision to account for CUDNN Winograd kernels std::cout << "Checking expected output relative precision @" << prec; checkRtol(outputs[0].sub(refOutput), inputs, C * KH * KW, prec); return true; }; // Use the underlying C2 tensors CUDA pointers at::Tensor tI = makeATenTensor( w.GetBlob("I")->Get<caffe2::TensorCUDA>(), at::Backend::CUDA, at::kFloat) .resize_({N, G, C, H, W}); at::Tensor tW = makeATenTensor( w.GetBlob("W")->Get<caffe2::TensorCUDA>(), at::Backend::CUDA, at::kFloat) .resize_({G, F, C, KH, KW}); at::Tensor tB = makeATenTensor( w.GetBlob("B")->Get<caffe2::TensorCUDA>(), at::Backend::CUDA, at::kFloat) .resize_({G, F}); std::vector<at::Tensor> inputs = {tI, tW, tB}; std::string tc = R"( def group_convolution(float(N,G,C,H,W) I, float(G,F,C,KH,KW) W1, float(G,F) B) -> (O) { O(n, g, f, h, w) +=! I(n, g, c, h + kh, w + kw) * W1(g, f, c, kh, kw) O(n, g, f, h, w) = O(n, g, f, h, w) + B(g, f) } )"; std::string suffix = std::string("_N_") + std::to_string(FLAGS_N) + std::string("_G_") + std::to_string(FLAGS_G) + std::string("_C_") + std::to_string(FLAGS_C) + std::string("_F_") + std::to_string(FLAGS_F) + std::string("_W_") + std::to_string(FLAGS_W) + std::string("_H_") + std::to_string(FLAGS_H) + std::string("_KW_") + std::to_string(FLAGS_KW) + std::string("_KH_") + std::to_string(FLAGS_KH); if (useFlags && FLAGS_validate_proto) { validateProto( FLAGS_save_tuner_proto_prefix + std::string("/group_convolution_cache") + suffix, tc, "group_convolution", inputs, checkFun); } else { std::vector<at::Tensor> outputs; Check(tc, "group_convolution", options, inputs, outputs, checkFun); if (useFlags) { autotune( FLAGS_save_tuner_proto_prefix + std::string("/group_convolution_cache") + suffix, FLAGS_save_tuner_proto_prefix + std::string("/group_convolution_best") + suffix, tc, "group_convolution", inputs, options, {options}, checkFun); } } } TEST_F(GroupConvolution, GroupConvolution) { auto N = FLAGS_N; auto G = FLAGS_G; auto C = FLAGS_C; auto F = FLAGS_F; auto H = FLAGS_H; auto W = FLAGS_W; auto KH = FLAGS_KH; auto KW = FLAGS_KW; // If num threads is too small just get some better default auto threads = (W >= 10) ? std::vector<size_t>{W / 4, H / 2} : std::vector<size_t>{4, 8, 4}; auto options = tc::MappingOptions::makeNaiveMappingOptions() .tile({1, 1, 1}) .mapToThreads(threads) .mapToBlocks({32, 32}) .useSharedMemory(true) .usePrivateMemory(false) .unroll(1); runGroupConvolution(N, G, C, F, H, W, KH, KW, options, true); } TEST_F( GroupConvolution, GroupConvolution_P100_autotuned_N_32_G_32_C_16_F_16_W_14_H_14_KW_3_KH_3) { uint32_t N = 32; uint32_t G = 32; uint32_t C = 16; uint32_t F = 16; uint32_t W = 14; uint32_t H = 14; uint32_t KW = 3; uint32_t KH = 3; auto options = tc::MappingOptions::makeNaiveMappingOptions() .useSharedMemory(true) .usePrivateMemory(true) .unrollCopyShared(true) .outerScheduleFusionStrategy(tc::FusionStrategy::Preserve3Coincident) .fixParametersBeforeScheduling(false) .tile(1, 1) .tileImperfectlyNested(false) .mapToBlocks(3, 32) .mapToThreads(8, 16, 1) .unroll(32); runGroupConvolution(N, G, C, F, H, W, KH, KW, options, true); } TEST_F( GroupConvolution, GroupConvolution_P100_autotuned_N_32_G_32_C_32_F_32_W_7_H_7_KW_3_KH_3) { uint32_t N = 32; uint32_t G = 32; uint32_t C = 32; uint32_t F = 32; uint32_t W = 7; uint32_t H = 7; uint32_t KW = 3; uint32_t KH = 3; auto options = tc::MappingOptions::makeNaiveMappingOptions() .outerScheduleFusionStrategy(tc::FusionStrategy::Preserve3Coincident) .outerScheduleAllowSkewing(false) .outerSchedulePositiveOrthant(true) .intraTileScheduleFusionStrategy( tc::FusionStrategy::Preserve3Coincident) .intraTileScheduleAllowSkewing(false) .intraTileSchedulePositiveOrthant(true) .tile(1, 2, 3) .mapToThreads(8, 7, 4) .mapToBlocks(128, 16, 64) .unroll(16) .tileImperfectlyNested(false) .useSharedMemory(true) .usePrivateMemory(true) .unrollCopyShared(true) .matchLibraryCalls(true); runGroupConvolution(N, G, C, F, H, W, KH, KW, options, true); } TEST_F( GroupConvolution, GroupConvolution_P100_autotuned_N_32_G_32_C_4_F_4_W_56_H_56_KW_3_KH_3) { uint32_t N = 32; uint32_t G = 32; uint32_t C = 4; uint32_t F = 4; uint32_t W = 56; uint32_t H = 56; uint32_t KW = 3; uint32_t KH = 3; auto options = tc::MappingOptions::makeNaiveMappingOptions() .outerScheduleFusionStrategy(tc::FusionStrategy::Preserve3Coincident) .outerScheduleAllowSkewing(false) .outerSchedulePositiveOrthant(true) .intraTileScheduleFusionStrategy( tc::FusionStrategy::Preserve3Coincident) .intraTileScheduleAllowSkewing(false) .intraTileSchedulePositiveOrthant(true) .tile(1, 1, 7, 7) .mapToThreads(56, 7) .mapToBlocks(16, 64, 1) .unroll(2) .tileImperfectlyNested(false) .useSharedMemory(true) .usePrivateMemory(false) .unrollCopyShared(false) .matchLibraryCalls(true); runGroupConvolution(N, G, C, F, H, W, KH, KW, options, true); } TEST_F( GroupConvolution, GroupConvolution_P100_autotuned_N_32_G_32_C_8_F_8_W_28_H_28_KW_3_KH_3) { uint32_t N = 32; uint32_t G = 32; uint32_t C = 8; uint32_t F = 8; uint32_t W = 28; uint32_t H = 28; uint32_t KW = 3; uint32_t KH = 3; auto options = tc::MappingOptions::makeNaiveMappingOptions() .outerScheduleFusionStrategy(tc::FusionStrategy::Max) .outerScheduleAllowSkewing(false) .outerSchedulePositiveOrthant(true) .intraTileScheduleFusionStrategy( tc::FusionStrategy::Preserve3Coincident) .intraTileScheduleAllowSkewing(false) .intraTileSchedulePositiveOrthant(true) .tile(1, 1, 256, 14, 16) .mapToThreads(16, 14) .mapToBlocks(7, 16) .unroll(16) .tileImperfectlyNested(false) .useSharedMemory(true) .usePrivateMemory(false) .unrollCopyShared(true) .matchLibraryCalls(true); runGroupConvolution(N, G, C, F, H, W, KH, KW, options, true); } // So slow we consider this unimplemented TEST_F(GroupConvolution, ATenGroupConvolutionReference) { #if 0 // this is a bad, too slow implementation, in fact it's NYI atm auto N = FLAGS_N; auto G = FLAGS_G; auto C = FLAGS_C; auto F = FLAGS_F; auto W = FLAGS_W; auto H = FLAGS_H; auto KW = FLAGS_KW; auto KH = FLAGS_KH; Reference( [&]() { at::Tensor I = at::CUDA(at::kFloat).rand({N, G * C, W, H}); at::Tensor W = at::CUDA(at::kFloat).rand({G * F, C, KW, KH}); at::Tensor B = at::CUDA(at::kFloat).rand({G * F}); return std::vector<at::Tensor>{I, W, B}; }, [&](std::vector<at::Tensor>& inputs) { // in order to perform the group conv, we will be looping auto I1 = inputs[0].contiguous(); auto W2 = inputs[1]; auto B1 = inputs[2]; std::vector<at::Tensor> outputs(G); for (int g = 0; g < G; ++g) { // for each group, first partition out the input tensors auto input_g = subtensor(I1, 1, G, g); auto weight_g = subtensor(W2, 0, G, g); auto bias_g = subtensor(B1, 0, G, g); outputs[g] = at::conv2d(input_g, weight_g, at::IntList({KH, KW}), bias_g); } // now its time to concatenate the output tensors auto output = outputs[0].type().tensor(); at::cat_out(output, outputs, 1); return output; }); #endif std::cout << "No ATenGroupConvolutionReference available\n"; } TEST_F(GroupConvolution, C2GroupConvolutionReference) { auto N = FLAGS_N; auto G = FLAGS_G; auto C = FLAGS_C; auto F = FLAGS_F; auto W = FLAGS_W; auto H = FLAGS_H; auto KW = FLAGS_KW; auto KH = FLAGS_KH; Workspace w; auto AddInput = TestHarness::AddDeterministicallyRandomInput<float, CUDAContext>; AddInput(w, vector<TIndex>{N, G * C, W, H}, "I"); AddInput(w, vector<TIndex>{G * F, C, KW, KH}, "W"); AddInput(w, {G * F}, "B"); Argument groupArg = MakeArgument<int>("group", G); Argument kernelHArg = MakeArgument<int>("kernel_h", KH); Argument kernelWArg = MakeArgument<int>("kernel_w", KW); OperatorDef ndef = TestHarness::ConfigureCUDA( "Conv", {"I", "W", "B"}, {"O"}, {groupArg, kernelHArg, kernelWArg}); std::unique_ptr<OperatorBase> net(CreateOperator(ndef, &w)); Reference([&]() { return true; }, [&](bool flag) { net->Run(); }); } int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); ::gflags::ParseCommandLineFlags(&argc, &argv, true); ::google::InitGoogleLogging(argv[0]); setAtenSeed(tc::initRandomSeed(), at::Backend::CUDA); return RUN_ALL_TESTS(); }
#include <phosphor-logging/elog-errors.hpp> #include <phosphor-logging/log.hpp> #include <xyz/openbmc_project/Common/error.hpp> const char* propIntf = "org.freedesktop.DBus.Properties"; const char* mapperBusName = "xyz.openbmc_project.ObjectMapper"; const char* mapperPath = "/xyz/openbmc_project/object_mapper"; const char* mapperIntf = "xyz.openbmc_project.ObjectMapper"; const char* methodGetObject = "GetObject"; const char* methodGet = "Get"; using namespace phosphor::logging; using namespace sdbusplus::xyz::openbmc_project::Common::Error; using Value = std::variant<int64_t, double, std::string, bool>; std::string getService(sdbusplus::bus::bus& bus, const std::string& path, const char* intf) { /* Get mapper object for sensor path */ auto mapper = bus.new_method_call(mapperBusName, mapperPath, mapperIntf, methodGetObject); mapper.append(path.c_str()); mapper.append(std::vector<std::string>({intf})); std::unordered_map<std::string, std::vector<std::string>> resp; try { auto msg = bus.call(mapper); msg.read(resp); } catch (const sdbusplus::exception::SdBusError& ex) { if (ex.name() == std::string(sdbusplus::xyz::openbmc_project::Common:: Error::ResourceNotFound::errName)) { // The service isn't on D-Bus yet. return std::string{}; } throw; } if (resp.begin() == resp.end()) { // Shouldn't happen, if the mapper can't find it it is handled above. throw std::runtime_error("Unable to find Object: " + path); } return resp.begin()->first; } template <typename T> T getDbusProperty(sdbusplus::bus::bus& bus, const std::string& service, const std::string& path, const std::string& intf, const std::string& property) { Value value; auto method = bus.new_method_call(service.c_str(), path.c_str(), propIntf, methodGet); method.append(intf, property); try { auto msg = bus.call(method); msg.read(value); } catch (const sdbusplus::exception::SdBusError& ex) { return std::numeric_limits<T>::quiet_NaN(); } return std::get<T>(value); }
// Copyright (c) 2014-2017 The GermanCC Core developers // Distributed under the MIT/X11 software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "core_io.h" #include "governance.h" #include "governance-classes.h" #include "governance-object.h" #include "governance-vote.h" #include "instantx.h" #include "masternode-sync.h" #include "masternodeman.h" #include "messagesigner.h" #include "util.h" #include <univalue.h> CGovernanceObject::CGovernanceObject() : cs(), nObjectType(GOVERNANCE_OBJECT_UNKNOWN), nHashParent(), nRevision(0), nTime(0), nDeletionTime(0), nCollateralHash(), strData(), vinMasternode(), vchSig(), fCachedLocalValidity(false), strLocalValidityError(), fCachedFunding(false), fCachedValid(true), fCachedDelete(false), fCachedEndorsed(false), fDirtyCache(true), fExpired(false), fUnparsable(false), mapCurrentMNVotes(), mapOrphanVotes(), fileVotes() { // PARSE JSON DATA STORAGE (STRDATA) LoadData(); } CGovernanceObject::CGovernanceObject(uint256 nHashParentIn, int nRevisionIn, int64_t nTimeIn, uint256 nCollateralHashIn, std::string strDataIn) : cs(), nObjectType(GOVERNANCE_OBJECT_UNKNOWN), nHashParent(nHashParentIn), nRevision(nRevisionIn), nTime(nTimeIn), nDeletionTime(0), nCollateralHash(nCollateralHashIn), strData(strDataIn), vinMasternode(), vchSig(), fCachedLocalValidity(false), strLocalValidityError(), fCachedFunding(false), fCachedValid(true), fCachedDelete(false), fCachedEndorsed(false), fDirtyCache(true), fExpired(false), fUnparsable(false), mapCurrentMNVotes(), mapOrphanVotes(), fileVotes() { // PARSE JSON DATA STORAGE (STRDATA) LoadData(); } CGovernanceObject::CGovernanceObject(const CGovernanceObject& other) : cs(), nObjectType(other.nObjectType), nHashParent(other.nHashParent), nRevision(other.nRevision), nTime(other.nTime), nDeletionTime(other.nDeletionTime), nCollateralHash(other.nCollateralHash), strData(other.strData), vinMasternode(other.vinMasternode), vchSig(other.vchSig), fCachedLocalValidity(other.fCachedLocalValidity), strLocalValidityError(other.strLocalValidityError), fCachedFunding(other.fCachedFunding), fCachedValid(other.fCachedValid), fCachedDelete(other.fCachedDelete), fCachedEndorsed(other.fCachedEndorsed), fDirtyCache(other.fDirtyCache), fExpired(other.fExpired), fUnparsable(other.fUnparsable), mapCurrentMNVotes(other.mapCurrentMNVotes), mapOrphanVotes(other.mapOrphanVotes), fileVotes(other.fileVotes) {} bool CGovernanceObject::ProcessVote(CNode* pfrom, const CGovernanceVote& vote, CGovernanceException& exception, CConnman& connman) { if(!mnodeman.Has(vote.GetMasternodeOutpoint())) { std::ostringstream ostr; ostr << "CGovernanceObject::ProcessVote -- Masternode index not found"; exception = CGovernanceException(ostr.str(), GOVERNANCE_EXCEPTION_WARNING); if(mapOrphanVotes.Insert(vote.GetMasternodeOutpoint(), vote_time_pair_t(vote, GetAdjustedTime() + GOVERNANCE_ORPHAN_EXPIRATION_TIME))) { if(pfrom) { mnodeman.AskForMN(pfrom, vote.GetMasternodeOutpoint(), connman); } LogPrintf("%s\n", ostr.str()); } else { LogPrint("gobject", "%s\n", ostr.str()); } return false; } vote_m_it it = mapCurrentMNVotes.find(vote.GetMasternodeOutpoint()); if(it == mapCurrentMNVotes.end()) { it = mapCurrentMNVotes.insert(vote_m_t::value_type(vote.GetMasternodeOutpoint(), vote_rec_t())).first; } vote_rec_t& recVote = it->second; vote_signal_enum_t eSignal = vote.GetSignal(); if(eSignal == VOTE_SIGNAL_NONE) { std::ostringstream ostr; ostr << "CGovernanceObject::ProcessVote -- Vote signal: none"; LogPrint("gobject", "%s\n", ostr.str()); exception = CGovernanceException(ostr.str(), GOVERNANCE_EXCEPTION_WARNING); return false; } if(eSignal > MAX_SUPPORTED_VOTE_SIGNAL) { std::ostringstream ostr; ostr << "CGovernanceObject::ProcessVote -- Unsupported vote signal: " << CGovernanceVoting::ConvertSignalToString(vote.GetSignal()); LogPrintf("%s\n", ostr.str()); exception = CGovernanceException(ostr.str(), GOVERNANCE_EXCEPTION_PERMANENT_ERROR, 20); return false; } vote_instance_m_it it2 = recVote.mapInstances.find(int(eSignal)); if(it2 == recVote.mapInstances.end()) { it2 = recVote.mapInstances.insert(vote_instance_m_t::value_type(int(eSignal), vote_instance_t())).first; } vote_instance_t& voteInstance = it2->second; // Reject obsolete votes if(vote.GetTimestamp() < voteInstance.nCreationTime) { std::ostringstream ostr; ostr << "CGovernanceObject::ProcessVote -- Obsolete vote"; LogPrint("gobject", "%s\n", ostr.str()); exception = CGovernanceException(ostr.str(), GOVERNANCE_EXCEPTION_NONE); return false; } int64_t nNow = GetAdjustedTime(); int64_t nVoteTimeUpdate = voteInstance.nTime; if(governance.AreRateChecksEnabled()) { int64_t nTimeDelta = nNow - voteInstance.nTime; if(nTimeDelta < GOVERNANCE_UPDATE_MIN) { std::ostringstream ostr; ostr << "CGovernanceObject::ProcessVote -- Masternode voting too often" << ", MN outpoint = " << vote.GetMasternodeOutpoint().ToStringShort() << ", governance object hash = " << GetHash().ToString() << ", time delta = " << nTimeDelta; LogPrint("gobject", "%s\n", ostr.str()); exception = CGovernanceException(ostr.str(), GOVERNANCE_EXCEPTION_TEMPORARY_ERROR); nVoteTimeUpdate = nNow; return false; } } // Finally check that the vote is actually valid (done last because of cost of signature verification) if(!vote.IsValid(true)) { std::ostringstream ostr; ostr << "CGovernanceObject::ProcessVote -- Invalid vote" << ", MN outpoint = " << vote.GetMasternodeOutpoint().ToStringShort() << ", governance object hash = " << GetHash().ToString() << ", vote hash = " << vote.GetHash().ToString(); LogPrintf("%s\n", ostr.str()); exception = CGovernanceException(ostr.str(), GOVERNANCE_EXCEPTION_PERMANENT_ERROR, 20); governance.AddInvalidVote(vote); return false; } if(!mnodeman.AddGovernanceVote(vote.GetMasternodeOutpoint(), vote.GetParentHash())) { std::ostringstream ostr; ostr << "CGovernanceObject::ProcessVote -- Unable to add governance vote" << ", MN outpoint = " << vote.GetMasternodeOutpoint().ToStringShort() << ", governance object hash = " << GetHash().ToString(); LogPrint("gobject", "%s\n", ostr.str()); exception = CGovernanceException(ostr.str(), GOVERNANCE_EXCEPTION_PERMANENT_ERROR); return false; } voteInstance = vote_instance_t(vote.GetOutcome(), nVoteTimeUpdate, vote.GetTimestamp()); if(!fileVotes.HasVote(vote.GetHash())) { fileVotes.AddVote(vote); } fDirtyCache = true; return true; } void CGovernanceObject::ClearMasternodeVotes() { vote_m_it it = mapCurrentMNVotes.begin(); while(it != mapCurrentMNVotes.end()) { if(!mnodeman.Has(it->first)) { fileVotes.RemoveVotesFromMasternode(it->first); mapCurrentMNVotes.erase(it++); } else { ++it; } } } std::string CGovernanceObject::GetSignatureMessage() const { LOCK(cs); std::string strMessage = nHashParent.ToString() + "|" + boost::lexical_cast<std::string>(nRevision) + "|" + boost::lexical_cast<std::string>(nTime) + "|" + strData + "|" + vinMasternode.prevout.ToStringShort() + "|" + nCollateralHash.ToString(); return strMessage; } void CGovernanceObject::SetMasternodeVin(const COutPoint& outpoint) { vinMasternode = CTxIn(outpoint); } bool CGovernanceObject::Sign(CKey& keyMasternode, CPubKey& pubKeyMasternode) { std::string strError; std::string strMessage = GetSignatureMessage(); LOCK(cs); if(!CMessageSigner::SignMessage(strMessage, vchSig, keyMasternode)) { LogPrintf("CGovernanceObject::Sign -- SignMessage() failed\n"); return false; } if(!CMessageSigner::VerifyMessage(pubKeyMasternode, vchSig, strMessage, strError)) { LogPrintf("CGovernanceObject::Sign -- VerifyMessage() failed, error: %s\n", strError); return false; } LogPrint("gobject", "CGovernanceObject::Sign -- pubkey id = %s, vin = %s\n", pubKeyMasternode.GetID().ToString(), vinMasternode.prevout.ToStringShort()); return true; } bool CGovernanceObject::CheckSignature(CPubKey& pubKeyMasternode) { std::string strError; std::string strMessage = GetSignatureMessage(); LOCK(cs); if(!CMessageSigner::VerifyMessage(pubKeyMasternode, vchSig, strMessage, strError)) { LogPrintf("CGovernance::CheckSignature -- VerifyMessage() failed, error: %s\n", strError); return false; } return true; } int CGovernanceObject::GetObjectSubtype() { // todo - 12.1 // - detect subtype from strData json, obj["subtype"] if(nObjectType == GOVERNANCE_OBJECT_TRIGGER) return TRIGGER_SUPERBLOCK; return -1; } uint256 CGovernanceObject::GetHash() const { // CREATE HASH OF ALL IMPORTANT PIECES OF DATA CHashWriter ss(SER_GETHASH, PROTOCOL_VERSION); ss << nHashParent; ss << nRevision; ss << nTime; ss << strData; ss << vinMasternode; ss << vchSig; // fee_tx is left out on purpose uint256 h1 = ss.GetHash(); DBG( printf("CGovernanceObject::GetHash %i %li %s\n", nRevision, nTime, strData.c_str()); ); return h1; } /** Return the actual object from the strData JSON structure. Returns an empty object on error. */ UniValue CGovernanceObject::GetJSONObject() { UniValue obj(UniValue::VOBJ); if(strData.empty()) { return obj; } UniValue objResult(UniValue::VOBJ); GetData(objResult); std::vector<UniValue> arr1 = objResult.getValues(); std::vector<UniValue> arr2 = arr1.at( 0 ).getValues(); obj = arr2.at( 1 ); return obj; } /** * LoadData * -------------------------------------------------------- * * Attempt to load data from strData * */ void CGovernanceObject::LoadData() { // todo : 12.1 - resolved //return; if(strData.empty()) { return; } try { // ATTEMPT TO LOAD JSON STRING FROM STRDATA UniValue objResult(UniValue::VOBJ); GetData(objResult); DBG( cout << "CGovernanceObject::LoadData strData = " << GetDataAsString() << endl; ); UniValue obj = GetJSONObject(); nObjectType = obj["type"].get_int(); } catch(std::exception& e) { fUnparsable = true; std::ostringstream ostr; ostr << "CGovernanceObject::LoadData Error parsing JSON" << ", e.what() = " << e.what(); DBG( cout << ostr.str() << endl; ); LogPrintf("%s\n", ostr.str()); return; } catch(...) { fUnparsable = true; std::ostringstream ostr; ostr << "CGovernanceObject::LoadData Unknown Error parsing JSON"; DBG( cout << ostr.str() << endl; ); LogPrintf("%s\n", ostr.str()); return; } } /** * GetData - Example usage: * -------------------------------------------------------- * * Decode governance object data into UniValue(VOBJ) * */ void CGovernanceObject::GetData(UniValue& objResult) { UniValue o(UniValue::VOBJ); std::string s = GetDataAsString(); o.read(s); objResult = o; } /** * GetData - As * -------------------------------------------------------- * */ std::string CGovernanceObject::GetDataAsHex() { return strData; } std::string CGovernanceObject::GetDataAsString() { std::vector<unsigned char> v = ParseHex(strData); std::string s(v.begin(), v.end()); return s; } void CGovernanceObject::UpdateLocalValidity() { LOCK(cs_main); // THIS DOES NOT CHECK COLLATERAL, THIS IS CHECKED UPON ORIGINAL ARRIVAL fCachedLocalValidity = IsValidLocally(strLocalValidityError, false); }; bool CGovernanceObject::IsValidLocally(std::string& strError, bool fCheckCollateral) { bool fMissingMasternode = false; bool fMissingConfirmations = false; return IsValidLocally(strError, fMissingMasternode, fMissingConfirmations, fCheckCollateral); } bool CGovernanceObject::IsValidLocally(std::string& strError, bool& fMissingMasternode, bool& fMissingConfirmations, bool fCheckCollateral) { fMissingMasternode = false; fMissingConfirmations = false; if(fUnparsable) { strError = "Object data unparseable"; return false; } switch(nObjectType) { case GOVERNANCE_OBJECT_PROPOSAL: case GOVERNANCE_OBJECT_TRIGGER: case GOVERNANCE_OBJECT_WATCHDOG: break; default: strError = strprintf("Invalid object type %d", nObjectType); return false; } // IF ABSOLUTE NO COUNT (NO-YES VALID VOTES) IS MORE THAN 10% OF THE NETWORK MASTERNODES, OBJ IS INVALID // CHECK COLLATERAL IF REQUIRED (HIGH CPU USAGE) if(fCheckCollateral) { if((nObjectType == GOVERNANCE_OBJECT_TRIGGER) || (nObjectType == GOVERNANCE_OBJECT_WATCHDOG)) { std::string strOutpoint = vinMasternode.prevout.ToStringShort(); masternode_info_t infoMn; if(!mnodeman.GetMasternodeInfo(vinMasternode.prevout, infoMn)) { CMasternode::CollateralStatus err = CMasternode::CheckCollateral(vinMasternode.prevout); if (err == CMasternode::COLLATERAL_OK) { fMissingMasternode = true; strError = "Masternode not found: " + strOutpoint; } else if (err == CMasternode::COLLATERAL_UTXO_NOT_FOUND) { strError = "Failed to find Masternode UTXO, missing masternode=" + strOutpoint + "\n"; } else if (err == CMasternode::COLLATERAL_INVALID_AMOUNT) { strError = "Masternode UTXO should have 100000 GerCC, missing masternode=" + strOutpoint + "\n"; } return false; } // Check that we have a valid MN signature if(!CheckSignature(infoMn.pubKeyMasternode)) { strError = "Invalid masternode signature for: " + strOutpoint + ", pubkey id = " + infoMn.pubKeyMasternode.GetID().ToString(); return false; } return true; } if (!IsCollateralValid(strError, fMissingConfirmations)) return false; } /* TODO - There might be an issue with multisig in the coinbase on mainnet, we will add support for it in a future release. - Post 12.2+ (test multisig coinbase transaction) */ // 12.1 - todo - compile error // if(address.IsPayToScriptHash()) { // strError = "Governance system - multisig is not currently supported"; // return false; // } return true; } CAmount CGovernanceObject::GetMinCollateralFee() { // Only 1 type has a fee for the moment but switch statement allows for future object types switch(nObjectType) { case GOVERNANCE_OBJECT_PROPOSAL: return GOVERNANCE_PROPOSAL_FEE_TX; case GOVERNANCE_OBJECT_TRIGGER: return 0; case GOVERNANCE_OBJECT_WATCHDOG: return 0; default: return MAX_MONEY; } } bool CGovernanceObject::IsCollateralValid(std::string& strError, bool& fMissingConfirmations) { strError = ""; fMissingConfirmations = false; CAmount nMinFee = GetMinCollateralFee(); uint256 nExpectedHash = GetHash(); CTransaction txCollateral; uint256 nBlockHash; // RETRIEVE TRANSACTION IN QUESTION if(!GetTransaction(nCollateralHash, txCollateral, Params().GetConsensus(), nBlockHash, true)){ strError = strprintf("Can't find collateral tx %s", txCollateral.ToString()); LogPrintf("CGovernanceObject::IsCollateralValid -- %s\n", strError); return false; } if(txCollateral.vout.size() < 1) { strError = strprintf("tx vout size less than 1 | %d", txCollateral.vout.size()); LogPrintf("CGovernanceObject::IsCollateralValid -- %s\n", strError); return false; } // LOOK FOR SPECIALIZED GOVERNANCE SCRIPT (PROOF OF BURN) CScript findScript; findScript << OP_RETURN << ToByteVector(nExpectedHash); DBG( cout << "IsCollateralValid: txCollateral.vout.size() = " << txCollateral.vout.size() << endl; ); DBG( cout << "IsCollateralValid: findScript = " << ScriptToAsmStr( findScript, false ) << endl; ); DBG( cout << "IsCollateralValid: nMinFee = " << nMinFee << endl; ); bool foundOpReturn = false; BOOST_FOREACH(const CTxOut o, txCollateral.vout) { DBG( cout << "IsCollateralValid txout : " << o.ToString() << ", o.nValue = " << o.nValue << ", o.scriptPubKey = " << ScriptToAsmStr( o.scriptPubKey, false ) << endl; ); if(!o.scriptPubKey.IsPayToPublicKeyHash() && !o.scriptPubKey.IsUnspendable()) { strError = strprintf("Invalid Script %s", txCollateral.ToString()); LogPrintf ("CGovernanceObject::IsCollateralValid -- %s\n", strError); return false; } if(o.scriptPubKey == findScript && o.nValue >= nMinFee) { DBG( cout << "IsCollateralValid foundOpReturn = true" << endl; ); foundOpReturn = true; } else { DBG( cout << "IsCollateralValid No match, continuing" << endl; ); } } if(!foundOpReturn){ strError = strprintf("Couldn't find opReturn %s in %s", nExpectedHash.ToString(), txCollateral.ToString()); LogPrintf ("CGovernanceObject::IsCollateralValid -- %s\n", strError); return false; } // GET CONFIRMATIONS FOR TRANSACTION AssertLockHeld(cs_main); int nConfirmationsIn = instantsend.GetConfirmations(nCollateralHash); if (nBlockHash != uint256()) { BlockMap::iterator mi = mapBlockIndex.find(nBlockHash); if (mi != mapBlockIndex.end() && (*mi).second) { CBlockIndex* pindex = (*mi).second; if (chainActive.Contains(pindex)) { nConfirmationsIn += chainActive.Height() - pindex->nHeight + 1; } } } if(nConfirmationsIn < GOVERNANCE_FEE_CONFIRMATIONS) { strError = strprintf("Collateral requires at least %d confirmations to be relayed throughout the network (it has only %d)", GOVERNANCE_FEE_CONFIRMATIONS, nConfirmationsIn); if (nConfirmationsIn >= GOVERNANCE_MIN_RELAY_FEE_CONFIRMATIONS) { fMissingConfirmations = true; strError += ", pre-accepted -- waiting for required confirmations"; } else { strError += ", rejected -- try again later"; } LogPrintf ("CGovernanceObject::IsCollateralValid -- %s\n", strError); return false; } strError = "valid"; return true; } int CGovernanceObject::CountMatchingVotes(vote_signal_enum_t eVoteSignalIn, vote_outcome_enum_t eVoteOutcomeIn) const { int nCount = 0; for(vote_m_cit it = mapCurrentMNVotes.begin(); it != mapCurrentMNVotes.end(); ++it) { const vote_rec_t& recVote = it->second; vote_instance_m_cit it2 = recVote.mapInstances.find(eVoteSignalIn); if(it2 == recVote.mapInstances.end()) { continue; } const vote_instance_t& voteInstance = it2->second; if(voteInstance.eOutcome == eVoteOutcomeIn) { ++nCount; } } return nCount; } /** * Get specific vote counts for each outcome (funding, validity, etc) */ int CGovernanceObject::GetAbsoluteYesCount(vote_signal_enum_t eVoteSignalIn) const { return GetYesCount(eVoteSignalIn) - GetNoCount(eVoteSignalIn); } int CGovernanceObject::GetAbsoluteNoCount(vote_signal_enum_t eVoteSignalIn) const { return GetNoCount(eVoteSignalIn) - GetYesCount(eVoteSignalIn); } int CGovernanceObject::GetYesCount(vote_signal_enum_t eVoteSignalIn) const { return CountMatchingVotes(eVoteSignalIn, VOTE_OUTCOME_YES); } int CGovernanceObject::GetNoCount(vote_signal_enum_t eVoteSignalIn) const { return CountMatchingVotes(eVoteSignalIn, VOTE_OUTCOME_NO); } int CGovernanceObject::GetAbstainCount(vote_signal_enum_t eVoteSignalIn) const { return CountMatchingVotes(eVoteSignalIn, VOTE_OUTCOME_ABSTAIN); } bool CGovernanceObject::GetCurrentMNVotes(const COutPoint& mnCollateralOutpoint, vote_rec_t& voteRecord) { vote_m_it it = mapCurrentMNVotes.find(mnCollateralOutpoint); if (it == mapCurrentMNVotes.end()) { return false; } voteRecord = it->second; return true; } void CGovernanceObject::Relay(CConnman& connman) { // Do not relay until fully synced if(!masternodeSync.IsSynced()) { LogPrint("gobject", "CGovernanceObject::Relay -- won't relay until fully synced\n"); return; } CInv inv(MSG_GOVERNANCE_OBJECT, GetHash()); connman.RelayInv(inv, MIN_GOVERNANCE_PEER_PROTO_VERSION); } void CGovernanceObject::UpdateSentinelVariables() { // CALCULATE MINIMUM SUPPORT LEVELS REQUIRED int nMnCount = mnodeman.CountEnabled(); if(nMnCount == 0) return; // CALCULATE THE MINUMUM VOTE COUNT REQUIRED FOR FULL SIGNAL // todo - 12.1 - should be set to `10` after governance vote compression is implemented int nAbsVoteReq = std::max(Params().GetConsensus().nGovernanceMinQuorum, nMnCount / 10); int nAbsDeleteReq = std::max(Params().GetConsensus().nGovernanceMinQuorum, (2 * nMnCount) / 3); // todo - 12.1 - Temporarily set to 1 for testing - reverted //nAbsVoteReq = 1; // SET SENTINEL FLAGS TO FALSE fCachedFunding = false; fCachedValid = true; //default to valid fCachedEndorsed = false; fDirtyCache = false; // SET SENTINEL FLAGS TO TRUE IF MIMIMUM SUPPORT LEVELS ARE REACHED // ARE ANY OF THESE FLAGS CURRENTLY ACTIVATED? if(GetAbsoluteYesCount(VOTE_SIGNAL_FUNDING) >= nAbsVoteReq) fCachedFunding = true; if((GetAbsoluteYesCount(VOTE_SIGNAL_DELETE) >= nAbsDeleteReq) && !fCachedDelete) { fCachedDelete = true; if(nDeletionTime == 0) { nDeletionTime = GetAdjustedTime(); } } if(GetAbsoluteYesCount(VOTE_SIGNAL_ENDORSED) >= nAbsVoteReq) fCachedEndorsed = true; if(GetAbsoluteNoCount(VOTE_SIGNAL_VALID) >= nAbsVoteReq) fCachedValid = false; } void CGovernanceObject::swap(CGovernanceObject& first, CGovernanceObject& second) // nothrow { // enable ADL (not necessary in our case, but good practice) using std::swap; // by swapping the members of two classes, // the two classes are effectively swapped swap(first.nHashParent, second.nHashParent); swap(first.nRevision, second.nRevision); swap(first.nTime, second.nTime); swap(first.nDeletionTime, second.nDeletionTime); swap(first.nCollateralHash, second.nCollateralHash); swap(first.strData, second.strData); swap(first.nObjectType, second.nObjectType); // swap all cached valid flags swap(first.fCachedFunding, second.fCachedFunding); swap(first.fCachedValid, second.fCachedValid); swap(first.fCachedDelete, second.fCachedDelete); swap(first.fCachedEndorsed, second.fCachedEndorsed); swap(first.fDirtyCache, second.fDirtyCache); swap(first.fExpired, second.fExpired); } void CGovernanceObject::CheckOrphanVotes(CConnman& connman) { int64_t nNow = GetAdjustedTime(); const vote_mcache_t::list_t& listVotes = mapOrphanVotes.GetItemList(); vote_mcache_t::list_cit it = listVotes.begin(); while(it != listVotes.end()) { bool fRemove = false; const COutPoint& key = it->key; const vote_time_pair_t& pairVote = it->value; const CGovernanceVote& vote = pairVote.first; if(pairVote.second < nNow) { fRemove = true; } else if(!mnodeman.Has(vote.GetMasternodeOutpoint())) { ++it; continue; } CGovernanceException exception; if(!ProcessVote(NULL, vote, exception, connman)) { LogPrintf("CGovernanceObject::CheckOrphanVotes -- Failed to add orphan vote: %s\n", exception.what()); } else { vote.Relay(connman); fRemove = true; } ++it; if(fRemove) { mapOrphanVotes.Erase(key, pairVote); } } }
#include "extensions/transport_sockets/tls/context_impl.h" #include <algorithm> #include <memory> #include <string> #include <vector> #include "envoy/admin/v3/certs.pb.h" #include "envoy/common/exception.h" #include "envoy/common/platform.h" #include "envoy/ssl/ssl_socket_extended_info.h" #include "envoy/stats/scope.h" #include "envoy/type/matcher/v3/string.pb.h" #include "common/common/assert.h" #include "common/common/base64.h" #include "common/common/fmt.h" #include "common/common/hex.h" #include "common/common/utility.h" #include "common/network/address_impl.h" #include "common/protobuf/utility.h" #include "common/runtime/runtime_features.h" #include "common/stats/utility.h" #include "extensions/transport_sockets/tls/utility.h" #include "absl/container/node_hash_set.h" #include "absl/strings/match.h" #include "absl/strings/str_join.h" #include "openssl/evp.h" #include "openssl/hmac.h" #include "openssl/rand.h" namespace Envoy { namespace Extensions { namespace TransportSockets { namespace Tls { namespace { bool cbsContainsU16(CBS& cbs, uint16_t n) { while (CBS_len(&cbs) > 0) { uint16_t v; if (!CBS_get_u16(&cbs, &v)) { return false; } if (v == n) { return true; } } return false; } } // namespace int ContextImpl::sslExtendedSocketInfoIndex() { CONSTRUCT_ON_FIRST_USE(int, []() -> int { int ssl_context_index = SSL_get_ex_new_index(0, nullptr, nullptr, nullptr, nullptr); RELEASE_ASSERT(ssl_context_index >= 0, ""); return ssl_context_index; }()); } ContextImpl::ContextImpl(Stats::Scope& scope, const Envoy::Ssl::ContextConfig& config, TimeSource& time_source) : scope_(scope), stats_(generateStats(scope)), time_source_(time_source), tls_max_version_(config.maxProtocolVersion()), stat_name_set_(scope.symbolTable().makeSet("TransportSockets::Tls")), unknown_ssl_cipher_(stat_name_set_->add("unknown_ssl_cipher")), unknown_ssl_curve_(stat_name_set_->add("unknown_ssl_curve")), unknown_ssl_algorithm_(stat_name_set_->add("unknown_ssl_algorithm")), unknown_ssl_version_(stat_name_set_->add("unknown_ssl_version")), ssl_ciphers_(stat_name_set_->add("ssl.ciphers")), ssl_versions_(stat_name_set_->add("ssl.versions")), ssl_curves_(stat_name_set_->add("ssl.curves")), ssl_sigalgs_(stat_name_set_->add("ssl.sigalgs")), capabilities_(config.capabilities()) { const auto tls_certificates = config.tlsCertificates(); tls_contexts_.resize(std::max(static_cast<size_t>(1), tls_certificates.size())); for (auto& ctx : tls_contexts_) { ctx.ssl_ctx_.reset(SSL_CTX_new(TLS_method())); int rc = SSL_CTX_set_app_data(ctx.ssl_ctx_.get(), this); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); rc = SSL_CTX_set_min_proto_version(ctx.ssl_ctx_.get(), config.minProtocolVersion()); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); rc = SSL_CTX_set_max_proto_version(ctx.ssl_ctx_.get(), config.maxProtocolVersion()); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); if (!capabilities_.provides_ciphers_and_curves && !SSL_CTX_set_strict_cipher_list(ctx.ssl_ctx_.get(), config.cipherSuites().c_str())) { // Break up a set of ciphers into each individual cipher and try them each individually in // order to attempt to log which specific one failed. Example of config.cipherSuites(): // "-ALL:[ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305]:ECDHE-ECDSA-AES128-SHA". // // "-" is both an operator when in the leading position of a token (-ALL: don't allow this // cipher), and the common separator in names (ECDHE-ECDSA-AES128-GCM-SHA256). Don't split on // it because it will separate pieces of the same cipher. When it is a leading character, it // is removed below. std::vector<absl::string_view> ciphers = StringUtil::splitToken(config.cipherSuites(), ":+![|]", false); std::vector<std::string> bad_ciphers; for (const auto& cipher : ciphers) { std::string cipher_str(cipher); if (absl::StartsWith(cipher_str, "-")) { cipher_str.erase(cipher_str.begin()); } if (!SSL_CTX_set_strict_cipher_list(ctx.ssl_ctx_.get(), cipher_str.c_str())) { bad_ciphers.push_back(cipher_str); } } throw EnvoyException(fmt::format("Failed to initialize cipher suites {}. The following " "ciphers were rejected when tried individually: {}", config.cipherSuites(), absl::StrJoin(bad_ciphers, ", "))); } if (!capabilities_.provides_ciphers_and_curves && !SSL_CTX_set1_curves_list(ctx.ssl_ctx_.get(), config.ecdhCurves().c_str())) { throw EnvoyException(absl::StrCat("Failed to initialize ECDH curves ", config.ecdhCurves())); } } int verify_mode = SSL_VERIFY_NONE; int verify_mode_validation_context = SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT; if (config.certificateValidationContext() != nullptr) { envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext:: TrustChainVerification verification = config.certificateValidationContext()->trustChainVerification(); if (verification == envoy::extensions::transport_sockets::tls::v3:: CertificateValidationContext::ACCEPT_UNTRUSTED) { verify_mode = SSL_VERIFY_PEER; // Ensure client-certs will be requested even if we have // nothing to verify against verify_mode_validation_context = SSL_VERIFY_PEER; } } #ifdef BORINGSSL_FIPS if (!capabilities_.is_fips_compliant) { throw EnvoyException( "Can't load a FIPS noncompliant custom handshaker while running in FIPS compliant mode."); } #endif if (config.certificateValidationContext() != nullptr && !config.certificateValidationContext()->caCert().empty() && !config.capabilities().provides_certificates) { ca_file_path_ = config.certificateValidationContext()->caCertPath(); bssl::UniquePtr<BIO> bio( BIO_new_mem_buf(const_cast<char*>(config.certificateValidationContext()->caCert().data()), config.certificateValidationContext()->caCert().size())); RELEASE_ASSERT(bio != nullptr, ""); // Based on BoringSSL's X509_load_cert_crl_file(). bssl::UniquePtr<STACK_OF(X509_INFO)> list( PEM_X509_INFO_read_bio(bio.get(), nullptr, nullptr, nullptr)); if (list == nullptr) { throw EnvoyException(absl::StrCat("Failed to load trusted CA certificates from ", config.certificateValidationContext()->caCertPath())); } for (auto& ctx : tls_contexts_) { X509_STORE* store = SSL_CTX_get_cert_store(ctx.ssl_ctx_.get()); bool has_crl = false; for (const X509_INFO* item : list.get()) { if (item->x509) { X509_STORE_add_cert(store, item->x509); if (ca_cert_ == nullptr) { X509_up_ref(item->x509); ca_cert_.reset(item->x509); } } if (item->crl) { X509_STORE_add_crl(store, item->crl); has_crl = true; } } if (ca_cert_ == nullptr) { throw EnvoyException(absl::StrCat("Failed to load trusted CA certificates from ", config.certificateValidationContext()->caCertPath())); } if (has_crl) { X509_STORE_set_flags(store, X509_V_FLAG_CRL_CHECK | X509_V_FLAG_CRL_CHECK_ALL); } verify_mode = SSL_VERIFY_PEER; verify_trusted_ca_ = true; // NOTE: We're using SSL_CTX_set_cert_verify_callback() instead of X509_verify_cert() // directly. However, our new callback is still calling X509_verify_cert() under // the hood. Therefore, to ignore cert expiration, we need to set the callback // for X509_verify_cert to ignore that error. if (config.certificateValidationContext()->allowExpiredCertificate()) { X509_STORE_set_verify_cb(store, ContextImpl::ignoreCertificateExpirationCallback); } } } if (config.certificateValidationContext() != nullptr && !config.certificateValidationContext()->certificateRevocationList().empty()) { bssl::UniquePtr<BIO> bio(BIO_new_mem_buf( const_cast<char*>( config.certificateValidationContext()->certificateRevocationList().data()), config.certificateValidationContext()->certificateRevocationList().size())); RELEASE_ASSERT(bio != nullptr, ""); // Based on BoringSSL's X509_load_cert_crl_file(). bssl::UniquePtr<STACK_OF(X509_INFO)> list( PEM_X509_INFO_read_bio(bio.get(), nullptr, nullptr, nullptr)); if (list == nullptr) { throw EnvoyException( absl::StrCat("Failed to load CRL from ", config.certificateValidationContext()->certificateRevocationListPath())); } for (auto& ctx : tls_contexts_) { X509_STORE* store = SSL_CTX_get_cert_store(ctx.ssl_ctx_.get()); for (const X509_INFO* item : list.get()) { if (item->crl) { X509_STORE_add_crl(store, item->crl); } } X509_STORE_set_flags(store, X509_V_FLAG_CRL_CHECK | X509_V_FLAG_CRL_CHECK_ALL); } } const Envoy::Ssl::CertificateValidationContextConfig* cert_validation_config = config.certificateValidationContext(); if (cert_validation_config != nullptr) { if (!cert_validation_config->verifySubjectAltNameList().empty()) { verify_subject_alt_name_list_ = cert_validation_config->verifySubjectAltNameList(); verify_mode = verify_mode_validation_context; } if (!cert_validation_config->subjectAltNameMatchers().empty()) { for (const envoy::type::matcher::v3::StringMatcher& matcher : cert_validation_config->subjectAltNameMatchers()) { subject_alt_name_matchers_.push_back(Matchers::StringMatcherImpl(matcher)); } verify_mode = verify_mode_validation_context; } if (!cert_validation_config->verifyCertificateHashList().empty()) { for (auto hash : cert_validation_config->verifyCertificateHashList()) { // Remove colons from the 95 chars long colon-separated "fingerprint" // in order to get the hex-encoded string. if (hash.size() == 95) { hash.erase(std::remove(hash.begin(), hash.end(), ':'), hash.end()); } const auto& decoded = Hex::decode(hash); if (decoded.size() != SHA256_DIGEST_LENGTH) { throw EnvoyException(absl::StrCat("Invalid hex-encoded SHA-256 ", hash)); } verify_certificate_hash_list_.push_back(decoded); } verify_mode = verify_mode_validation_context; } if (!cert_validation_config->verifyCertificateSpkiList().empty()) { for (const auto& hash : cert_validation_config->verifyCertificateSpkiList()) { const auto decoded = Base64::decode(hash); if (decoded.size() != SHA256_DIGEST_LENGTH) { throw EnvoyException(absl::StrCat("Invalid base64-encoded SHA-256 ", hash)); } verify_certificate_spki_list_.emplace_back(decoded.begin(), decoded.end()); } verify_mode = verify_mode_validation_context; } } if (!capabilities_.verifies_peer_certificates) { for (auto& ctx : tls_contexts_) { if (verify_mode != SSL_VERIFY_NONE) { SSL_CTX_set_verify(ctx.ssl_ctx_.get(), verify_mode, nullptr); SSL_CTX_set_cert_verify_callback(ctx.ssl_ctx_.get(), ContextImpl::verifyCallback, this); } } } absl::node_hash_set<int> cert_pkey_ids; if (!capabilities_.provides_certificates) { for (uint32_t i = 0; i < tls_certificates.size(); ++i) { auto& ctx = tls_contexts_[i]; // Load certificate chain. const auto& tls_certificate = tls_certificates[i].get(); ctx.cert_chain_file_path_ = tls_certificate.certificateChainPath(); bssl::UniquePtr<BIO> bio( BIO_new_mem_buf(const_cast<char*>(tls_certificate.certificateChain().data()), tls_certificate.certificateChain().size())); RELEASE_ASSERT(bio != nullptr, ""); ctx.cert_chain_.reset(PEM_read_bio_X509_AUX(bio.get(), nullptr, nullptr, nullptr)); if (ctx.cert_chain_ == nullptr || !SSL_CTX_use_certificate(ctx.ssl_ctx_.get(), ctx.cert_chain_.get())) { while (uint64_t err = ERR_get_error()) { ENVOY_LOG_MISC(debug, "SSL error: {}:{}:{}:{}", err, ERR_lib_error_string(err), ERR_func_error_string(err), ERR_GET_REASON(err), ERR_reason_error_string(err)); } throw EnvoyException( absl::StrCat("Failed to load certificate chain from ", ctx.cert_chain_file_path_)); } // Read rest of the certificate chain. while (true) { bssl::UniquePtr<X509> cert(PEM_read_bio_X509(bio.get(), nullptr, nullptr, nullptr)); if (cert == nullptr) { break; } if (!SSL_CTX_add_extra_chain_cert(ctx.ssl_ctx_.get(), cert.get())) { throw EnvoyException( absl::StrCat("Failed to load certificate chain from ", ctx.cert_chain_file_path_)); } // SSL_CTX_add_extra_chain_cert() takes ownership. cert.release(); } // Check for EOF. const uint32_t err = ERR_peek_last_error(); if (ERR_GET_LIB(err) == ERR_LIB_PEM && ERR_GET_REASON(err) == PEM_R_NO_START_LINE) { ERR_clear_error(); } else { throw EnvoyException( absl::StrCat("Failed to load certificate chain from ", ctx.cert_chain_file_path_)); } // The must staple extension means the certificate promises to carry // with it an OCSP staple. https://tools.ietf.org/html/rfc7633#section-6 constexpr absl::string_view tls_feature_ext = "1.3.6.1.5.5.7.1.24"; constexpr absl::string_view must_staple_ext_value = "\x30\x3\x02\x01\x05"; auto must_staple = Utility::getCertificateExtensionValue(*ctx.cert_chain_, tls_feature_ext); if (must_staple == must_staple_ext_value) { ctx.is_must_staple_ = true; } bssl::UniquePtr<EVP_PKEY> public_key(X509_get_pubkey(ctx.cert_chain_.get())); const int pkey_id = EVP_PKEY_id(public_key.get()); if (!cert_pkey_ids.insert(pkey_id).second) { throw EnvoyException(fmt::format("Failed to load certificate chain from {}, at most one " "certificate of a given type may be specified", ctx.cert_chain_file_path_)); } ctx.is_ecdsa_ = pkey_id == EVP_PKEY_EC; switch (pkey_id) { case EVP_PKEY_EC: { // We only support P-256 ECDSA today. const EC_KEY* ecdsa_public_key = EVP_PKEY_get0_EC_KEY(public_key.get()); // Since we checked the key type above, this should be valid. ASSERT(ecdsa_public_key != nullptr); const EC_GROUP* ecdsa_group = EC_KEY_get0_group(ecdsa_public_key); if (ecdsa_group == nullptr || EC_GROUP_get_curve_name(ecdsa_group) != NID_X9_62_prime256v1) { throw EnvoyException(fmt::format("Failed to load certificate chain from {}, only P-256 " "ECDSA certificates are supported", ctx.cert_chain_file_path_)); } ctx.is_ecdsa_ = true; } break; case EVP_PKEY_RSA: { // We require RSA certificates with 2048-bit or larger keys. const RSA* rsa_public_key = EVP_PKEY_get0_RSA(public_key.get()); // Since we checked the key type above, this should be valid. ASSERT(rsa_public_key != nullptr); const unsigned rsa_key_length = RSA_size(rsa_public_key); #ifdef BORINGSSL_FIPS if (rsa_key_length != 2048 / 8 && rsa_key_length != 3072 / 8 && rsa_key_length != 4096 / 8) { throw EnvoyException( fmt::format("Failed to load certificate chain from {}, only RSA certificates with " "2048-bit, 3072-bit or 4096-bit keys are supported in FIPS mode", ctx.cert_chain_file_path_)); } #else if (rsa_key_length < 2048 / 8) { throw EnvoyException( fmt::format("Failed to load certificate chain from {}, only RSA " "certificates with 2048-bit or larger keys are supported", ctx.cert_chain_file_path_)); } #endif } break; #ifdef BORINGSSL_FIPS default: throw EnvoyException(fmt::format("Failed to load certificate chain from {}, only RSA and " "ECDSA certificates are supported in FIPS mode", ctx.cert_chain_file_path_)); #endif } Envoy::Ssl::PrivateKeyMethodProviderSharedPtr private_key_method_provider = tls_certificate.privateKeyMethod(); // We either have a private key or a BoringSSL private key method provider. if (private_key_method_provider) { ctx.private_key_method_provider_ = private_key_method_provider; // The provider has a reference to the private key method for the context lifetime. Ssl::BoringSslPrivateKeyMethodSharedPtr private_key_method = private_key_method_provider->getBoringSslPrivateKeyMethod(); if (private_key_method == nullptr) { throw EnvoyException( fmt::format("Failed to get BoringSSL private key method from provider")); } #ifdef BORINGSSL_FIPS if (!ctx.private_key_method_provider_->checkFips()) { throw EnvoyException( fmt::format("Private key method doesn't support FIPS mode with current parameters")); } #endif SSL_CTX_set_private_key_method(ctx.ssl_ctx_.get(), private_key_method.get()); } else { // Load private key. bio.reset(BIO_new_mem_buf(const_cast<char*>(tls_certificate.privateKey().data()), tls_certificate.privateKey().size())); RELEASE_ASSERT(bio != nullptr, ""); bssl::UniquePtr<EVP_PKEY> pkey( PEM_read_bio_PrivateKey(bio.get(), nullptr, nullptr, !tls_certificate.password().empty() ? const_cast<char*>(tls_certificate.password().c_str()) : nullptr)); if (pkey == nullptr || !SSL_CTX_use_PrivateKey(ctx.ssl_ctx_.get(), pkey.get())) { throw EnvoyException(fmt::format("Failed to load private key from {}, Cause: {}", tls_certificate.privateKeyPath(), Utility::getLastCryptoError().value_or("unknown"))); } #ifdef BORINGSSL_FIPS // Verify that private keys are passing FIPS pairwise consistency tests. switch (pkey_id) { case EVP_PKEY_EC: { const EC_KEY* ecdsa_private_key = EVP_PKEY_get0_EC_KEY(pkey.get()); if (!EC_KEY_check_fips(ecdsa_private_key)) { throw EnvoyException(fmt::format("Failed to load private key from {}, ECDSA key failed " "pairwise consistency test required in FIPS mode", tls_certificate.privateKeyPath())); } } break; case EVP_PKEY_RSA: { RSA* rsa_private_key = EVP_PKEY_get0_RSA(pkey.get()); if (!RSA_check_fips(rsa_private_key)) { throw EnvoyException(fmt::format("Failed to load private key from {}, RSA key failed " "pairwise consistency test required in FIPS mode", tls_certificate.privateKeyPath())); } } break; } #endif } } } // use the server's cipher list preferences for (auto& ctx : tls_contexts_) { SSL_CTX_set_options(ctx.ssl_ctx_.get(), SSL_OP_CIPHER_SERVER_PREFERENCE); } if (config.certificateValidationContext() != nullptr) { allow_untrusted_certificate_ = config.certificateValidationContext()->trustChainVerification() == envoy::extensions::transport_sockets::tls::v3::CertificateValidationContext:: ACCEPT_UNTRUSTED; } parsed_alpn_protocols_ = parseAlpnProtocols(config.alpnProtocols()); // Use the SSL library to iterate over the configured ciphers. // // Note that if a negotiated cipher suite is outside of this set, we'll issue an ENVOY_BUG. for (TlsContext& tls_context : tls_contexts_) { for (const SSL_CIPHER* cipher : SSL_CTX_get_ciphers(tls_context.ssl_ctx_.get())) { stat_name_set_->rememberBuiltin(SSL_CIPHER_get_name(cipher)); } } // Add supported cipher suites from the TLS 1.3 spec: // https://tools.ietf.org/html/rfc8446#appendix-B.4 // AES-CCM cipher suites are removed (no BoringSSL support). // // Note that if a negotiated cipher suite is outside of this set, we'll issue an ENVOY_BUG. stat_name_set_->rememberBuiltins( {"TLS_AES_128_GCM_SHA256", "TLS_AES_256_GCM_SHA384", "TLS_CHACHA20_POLY1305_SHA256"}); // All supported curves. Source: // https://github.com/google/boringssl/blob/3743aafdacff2f7b083615a043a37101f740fa53/ssl/ssl_key_share.cc#L302-L309 // // Note that if a negotiated curve is outside of this set, we'll issue an ENVOY_BUG. stat_name_set_->rememberBuiltins({"P-224", "P-256", "P-384", "P-521", "X25519", "CECPQ2"}); // All supported signature algorithms. Source: // https://github.com/google/boringssl/blob/3743aafdacff2f7b083615a043a37101f740fa53/ssl/ssl_privkey.cc#L436-L453 // // Note that if a negotiated algorithm is outside of this set, we'll issue an ENVOY_BUG. stat_name_set_->rememberBuiltins({ "rsa_pkcs1_md5_sha1", "rsa_pkcs1_sha1", "rsa_pkcs1_sha256", "rsa_pkcs1_sha384", "rsa_pkcs1_sha512", "ecdsa_sha1", "ecdsa_secp256r1_sha256", "ecdsa_secp384r1_sha384", "ecdsa_secp521r1_sha512", "rsa_pss_rsae_sha256", "rsa_pss_rsae_sha384", "rsa_pss_rsae_sha512", "ed25519", }); // All supported protocol versions. // // Note that if a negotiated version is outside of this set, we'll issue an ENVOY_BUG. stat_name_set_->rememberBuiltins({"TLSv1", "TLSv1.1", "TLSv1.2", "TLSv1.3"}); } int ServerContextImpl::alpnSelectCallback(const unsigned char** out, unsigned char* outlen, const unsigned char* in, unsigned int inlen) { // Currently this uses the standard selection algorithm in priority order. const uint8_t* alpn_data = parsed_alpn_protocols_.data(); size_t alpn_data_size = parsed_alpn_protocols_.size(); if (SSL_select_next_proto(const_cast<unsigned char**>(out), outlen, alpn_data, alpn_data_size, in, inlen) != OPENSSL_NPN_NEGOTIATED) { return SSL_TLSEXT_ERR_NOACK; } else { return SSL_TLSEXT_ERR_OK; } } std::vector<uint8_t> ContextImpl::parseAlpnProtocols(const std::string& alpn_protocols) { if (alpn_protocols.empty()) { return {}; } if (alpn_protocols.size() >= 65535) { throw EnvoyException("Invalid ALPN protocol string"); } std::vector<uint8_t> out(alpn_protocols.size() + 1); size_t start = 0; for (size_t i = 0; i <= alpn_protocols.size(); i++) { if (i == alpn_protocols.size() || alpn_protocols[i] == ',') { if (i - start > 255) { throw EnvoyException("Invalid ALPN protocol string"); } out[start] = i - start; start = i + 1; } else { out[i + 1] = alpn_protocols[i]; } } return out; } bssl::UniquePtr<SSL> ContextImpl::newSsl(const Network::TransportSocketOptions*) { // We use the first certificate for a new SSL object, later in the // SSL_CTX_set_select_certificate_cb() callback following ClientHello, we replace with the // selected certificate via SSL_set_SSL_CTX(). return bssl::UniquePtr<SSL>(SSL_new(tls_contexts_[0].ssl_ctx_.get())); } int ContextImpl::ignoreCertificateExpirationCallback(int ok, X509_STORE_CTX* ctx) { if (!ok) { int err = X509_STORE_CTX_get_error(ctx); if (err == X509_V_ERR_CERT_HAS_EXPIRED || err == X509_V_ERR_CERT_NOT_YET_VALID) { return 1; } } return ok; } int ContextImpl::verifyCallback(X509_STORE_CTX* store_ctx, void* arg) { ContextImpl* impl = reinterpret_cast<ContextImpl*>(arg); SSL* ssl = reinterpret_cast<SSL*>( X509_STORE_CTX_get_ex_data(store_ctx, SSL_get_ex_data_X509_STORE_CTX_idx())); auto cert = bssl::UniquePtr<X509>(SSL_get_peer_certificate(ssl)); return impl->doVerifyCertChain( store_ctx, reinterpret_cast<Envoy::Ssl::SslExtendedSocketInfo*>( SSL_get_ex_data(ssl, ContextImpl::sslExtendedSocketInfoIndex())), *cert, static_cast<const Network::TransportSocketOptions*>(SSL_get_app_data(ssl))); } int ContextImpl::doVerifyCertChain( X509_STORE_CTX* store_ctx, Ssl::SslExtendedSocketInfo* ssl_extended_info, X509& leaf_cert, const Network::TransportSocketOptions* transport_socket_options) { if (verify_trusted_ca_) { int ret = X509_verify_cert(store_ctx); if (ssl_extended_info) { ssl_extended_info->setCertificateValidationStatus( ret == 1 ? Envoy::Ssl::ClientValidationStatus::Validated : Envoy::Ssl::ClientValidationStatus::Failed); } if (ret <= 0) { stats_.fail_verify_error_.inc(); return allow_untrusted_certificate_ ? 1 : ret; } } Envoy::Ssl::ClientValidationStatus validated = verifyCertificate( &leaf_cert, transport_socket_options && !transport_socket_options->verifySubjectAltNameListOverride().empty() ? transport_socket_options->verifySubjectAltNameListOverride() : verify_subject_alt_name_list_, subject_alt_name_matchers_); if (ssl_extended_info) { if (ssl_extended_info->certificateValidationStatus() == Envoy::Ssl::ClientValidationStatus::NotValidated) { ssl_extended_info->setCertificateValidationStatus(validated); } else if (validated != Envoy::Ssl::ClientValidationStatus::NotValidated) { ssl_extended_info->setCertificateValidationStatus(validated); } } return allow_untrusted_certificate_ ? 1 : (validated != Envoy::Ssl::ClientValidationStatus::Failed); } Envoy::Ssl::ClientValidationStatus ContextImpl::verifyCertificate( X509* cert, const std::vector<std::string>& verify_san_list, const std::vector<Matchers::StringMatcherImpl>& subject_alt_name_matchers) { Envoy::Ssl::ClientValidationStatus validated = Envoy::Ssl::ClientValidationStatus::NotValidated; if (!verify_san_list.empty()) { if (!verifySubjectAltName(cert, verify_san_list)) { stats_.fail_verify_san_.inc(); return Envoy::Ssl::ClientValidationStatus::Failed; } validated = Envoy::Ssl::ClientValidationStatus::Validated; } if (!subject_alt_name_matchers.empty() && !matchSubjectAltName(cert, subject_alt_name_matchers)) { stats_.fail_verify_san_.inc(); return Envoy::Ssl::ClientValidationStatus::Failed; } if (!verify_certificate_hash_list_.empty() || !verify_certificate_spki_list_.empty()) { const bool valid_certificate_hash = !verify_certificate_hash_list_.empty() && verifyCertificateHashList(cert, verify_certificate_hash_list_); const bool valid_certificate_spki = !verify_certificate_spki_list_.empty() && verifyCertificateSpkiList(cert, verify_certificate_spki_list_); if (!valid_certificate_hash && !valid_certificate_spki) { stats_.fail_verify_cert_hash_.inc(); return Envoy::Ssl::ClientValidationStatus::Failed; } validated = Envoy::Ssl::ClientValidationStatus::Validated; } return validated; } void ContextImpl::incCounter(const Stats::StatName name, absl::string_view value, const Stats::StatName fallback) const { const Stats::StatName value_stat_name = stat_name_set_->getBuiltin(value, fallback); ENVOY_BUG(value_stat_name != fallback, absl::StrCat("Unexpected ", scope_.symbolTable().toString(name), " value: ", value)); Stats::Utility::counterFromElements(scope_, {name, value_stat_name}).inc(); } void ContextImpl::logHandshake(SSL* ssl) const { stats_.handshake_.inc(); if (SSL_session_reused(ssl)) { stats_.session_reused_.inc(); } incCounter(ssl_ciphers_, SSL_get_cipher_name(ssl), unknown_ssl_cipher_); incCounter(ssl_versions_, SSL_get_version(ssl), unknown_ssl_version_); const uint16_t curve_id = SSL_get_curve_id(ssl); if (curve_id) { incCounter(ssl_curves_, SSL_get_curve_name(curve_id), unknown_ssl_curve_); } const uint16_t sigalg_id = SSL_get_peer_signature_algorithm(ssl); if (sigalg_id) { const char* sigalg = SSL_get_signature_algorithm_name(sigalg_id, 1 /* include curve */); incCounter(ssl_sigalgs_, sigalg, unknown_ssl_algorithm_); } bssl::UniquePtr<X509> cert(SSL_get_peer_certificate(ssl)); if (!cert.get()) { stats_.no_certificate_.inc(); } } std::vector<Ssl::PrivateKeyMethodProviderSharedPtr> ContextImpl::getPrivateKeyMethodProviders() { std::vector<Envoy::Ssl::PrivateKeyMethodProviderSharedPtr> providers; for (auto& tls_context : tls_contexts_) { Envoy::Ssl::PrivateKeyMethodProviderSharedPtr provider = tls_context.getPrivateKeyMethodProvider(); if (provider) { providers.push_back(provider); } } return providers; } bool ContextImpl::matchSubjectAltName( X509* cert, const std::vector<Matchers::StringMatcherImpl>& subject_alt_name_matchers) { bssl::UniquePtr<GENERAL_NAMES> san_names( static_cast<GENERAL_NAMES*>(X509_get_ext_d2i(cert, NID_subject_alt_name, nullptr, nullptr))); if (san_names == nullptr) { return false; } for (const GENERAL_NAME* general_name : san_names.get()) { const std::string san = Utility::generalNameAsString(general_name); for (auto& config_san_matcher : subject_alt_name_matchers) { // For DNS SAN, if the StringMatcher type is exact, we have to follow DNS matching semantics. if (general_name->type == GEN_DNS && config_san_matcher.matcher().match_pattern_case() == envoy::type::matcher::v3::StringMatcher::MatchPatternCase::kExact ? dnsNameMatch(config_san_matcher.matcher().exact(), absl::string_view(san)) : config_san_matcher.match(san)) { return true; } } } return false; } bool ContextImpl::verifySubjectAltName(X509* cert, const std::vector<std::string>& subject_alt_names) { bssl::UniquePtr<GENERAL_NAMES> san_names( static_cast<GENERAL_NAMES*>(X509_get_ext_d2i(cert, NID_subject_alt_name, nullptr, nullptr))); if (san_names == nullptr) { return false; } for (const GENERAL_NAME* general_name : san_names.get()) { const std::string san = Utility::generalNameAsString(general_name); for (auto& config_san : subject_alt_names) { if (general_name->type == GEN_DNS ? dnsNameMatch(config_san, san.c_str()) : config_san == san) { return true; } } } return false; } bool ContextImpl::dnsNameMatch(const absl::string_view dns_name, const absl::string_view pattern) { if (dns_name == pattern) { return true; } size_t pattern_len = pattern.length(); if (pattern_len > 1 && pattern[0] == '*' && pattern[1] == '.') { if (dns_name.length() > pattern_len - 1) { const size_t off = dns_name.length() - pattern_len + 1; return dns_name.substr(0, off).find('.') == std::string::npos && dns_name.substr(off, pattern_len - 1) == pattern.substr(1, pattern_len - 1); } } return false; } bool ContextImpl::verifyCertificateHashList( X509* cert, const std::vector<std::vector<uint8_t>>& expected_hashes) { std::vector<uint8_t> computed_hash(SHA256_DIGEST_LENGTH); unsigned int n; X509_digest(cert, EVP_sha256(), computed_hash.data(), &n); RELEASE_ASSERT(n == computed_hash.size(), ""); for (const auto& expected_hash : expected_hashes) { if (computed_hash == expected_hash) { return true; } } return false; } bool ContextImpl::verifyCertificateSpkiList( X509* cert, const std::vector<std::vector<uint8_t>>& expected_hashes) { X509_PUBKEY* pubkey = X509_get_X509_PUBKEY(cert); if (pubkey == nullptr) { return false; } uint8_t* spki = nullptr; const int len = i2d_X509_PUBKEY(pubkey, &spki); if (len < 0) { return false; } bssl::UniquePtr<uint8_t> free_spki(spki); std::vector<uint8_t> computed_hash(SHA256_DIGEST_LENGTH); SHA256(spki, len, computed_hash.data()); for (const auto& expected_hash : expected_hashes) { if (computed_hash == expected_hash) { return true; } } return false; } SslStats ContextImpl::generateStats(Stats::Scope& store) { std::string prefix("ssl."); return {ALL_SSL_STATS(POOL_COUNTER_PREFIX(store, prefix), POOL_GAUGE_PREFIX(store, prefix), POOL_HISTOGRAM_PREFIX(store, prefix))}; } size_t ContextImpl::daysUntilFirstCertExpires() const { int daysUntilExpiration = Utility::getDaysUntilExpiration(ca_cert_.get(), time_source_); for (auto& ctx : tls_contexts_) { daysUntilExpiration = std::min<int>( Utility::getDaysUntilExpiration(ctx.cert_chain_.get(), time_source_), daysUntilExpiration); } if (daysUntilExpiration < 0) { // Ensure that the return value is unsigned return 0; } return daysUntilExpiration; } absl::optional<uint64_t> ContextImpl::secondsUntilFirstOcspResponseExpires() const { absl::optional<uint64_t> secs_until_expiration; for (auto& ctx : tls_contexts_) { if (ctx.ocsp_response_) { uint64_t next_expiration = ctx.ocsp_response_->secondsUntilExpiration(); secs_until_expiration = std::min<uint64_t>( next_expiration, secs_until_expiration.value_or(std::numeric_limits<uint64_t>::max())); } } return secs_until_expiration; } Envoy::Ssl::CertificateDetailsPtr ContextImpl::getCaCertInformation() const { if (ca_cert_ == nullptr) { return nullptr; } return certificateDetails(ca_cert_.get(), getCaFileName(), nullptr); } std::vector<Envoy::Ssl::CertificateDetailsPtr> ContextImpl::getCertChainInformation() const { std::vector<Envoy::Ssl::CertificateDetailsPtr> cert_details; for (const auto& ctx : tls_contexts_) { if (ctx.cert_chain_ == nullptr) { continue; } cert_details.emplace_back(certificateDetails(ctx.cert_chain_.get(), ctx.getCertChainFileName(), ctx.ocsp_response_.get())); } return cert_details; } Envoy::Ssl::CertificateDetailsPtr ContextImpl::certificateDetails(X509* cert, const std::string& path, const Ocsp::OcspResponseWrapper* ocsp_response) const { Envoy::Ssl::CertificateDetailsPtr certificate_details = std::make_unique<envoy::admin::v3::CertificateDetails>(); certificate_details->set_path(path); certificate_details->set_serial_number(Utility::getSerialNumberFromCertificate(*cert)); certificate_details->set_days_until_expiration( Utility::getDaysUntilExpiration(cert, time_source_)); if (ocsp_response) { auto* ocsp_details = certificate_details->mutable_ocsp_details(); ProtobufWkt::Timestamp* valid_from = ocsp_details->mutable_valid_from(); TimestampUtil::systemClockToTimestamp(ocsp_response->getThisUpdate(), *valid_from); ProtobufWkt::Timestamp* expiration = ocsp_details->mutable_expiration(); TimestampUtil::systemClockToTimestamp(ocsp_response->getNextUpdate(), *expiration); } ProtobufWkt::Timestamp* valid_from = certificate_details->mutable_valid_from(); TimestampUtil::systemClockToTimestamp(Utility::getValidFrom(*cert), *valid_from); ProtobufWkt::Timestamp* expiration_time = certificate_details->mutable_expiration_time(); TimestampUtil::systemClockToTimestamp(Utility::getExpirationTime(*cert), *expiration_time); for (auto& dns_san : Utility::getSubjectAltNames(*cert, GEN_DNS)) { envoy::admin::v3::SubjectAlternateName& subject_alt_name = *certificate_details->add_subject_alt_names(); subject_alt_name.set_dns(dns_san); } for (auto& uri_san : Utility::getSubjectAltNames(*cert, GEN_URI)) { envoy::admin::v3::SubjectAlternateName& subject_alt_name = *certificate_details->add_subject_alt_names(); subject_alt_name.set_uri(uri_san); } for (auto& ip_san : Utility::getSubjectAltNames(*cert, GEN_IPADD)) { envoy::admin::v3::SubjectAlternateName& subject_alt_name = *certificate_details->add_subject_alt_names(); subject_alt_name.set_ip_address(ip_san); } return certificate_details; } ClientContextImpl::ClientContextImpl(Stats::Scope& scope, const Envoy::Ssl::ClientContextConfig& config, TimeSource& time_source) : ContextImpl(scope, config, time_source), server_name_indication_(config.serverNameIndication()), allow_renegotiation_(config.allowRenegotiation()), max_session_keys_(config.maxSessionKeys()) { // This should be guaranteed during configuration ingestion for client contexts. ASSERT(tls_contexts_.size() == 1); if (!parsed_alpn_protocols_.empty()) { for (auto& ctx : tls_contexts_) { const int rc = SSL_CTX_set_alpn_protos(ctx.ssl_ctx_.get(), parsed_alpn_protocols_.data(), parsed_alpn_protocols_.size()); RELEASE_ASSERT(rc == 0, Utility::getLastCryptoError().value_or("")); } } if (!config.signingAlgorithmsForTest().empty()) { const uint16_t sigalgs = parseSigningAlgorithmsForTest(config.signingAlgorithmsForTest()); RELEASE_ASSERT(sigalgs != 0, fmt::format("unsupported signing algorithm {}", config.signingAlgorithmsForTest())); for (auto& ctx : tls_contexts_) { const int rc = SSL_CTX_set_verify_algorithm_prefs(ctx.ssl_ctx_.get(), &sigalgs, 1); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); } } if (max_session_keys_ > 0) { SSL_CTX_set_session_cache_mode(tls_contexts_[0].ssl_ctx_.get(), SSL_SESS_CACHE_CLIENT); SSL_CTX_sess_set_new_cb( tls_contexts_[0].ssl_ctx_.get(), [](SSL* ssl, SSL_SESSION* session) -> int { ContextImpl* context_impl = static_cast<ContextImpl*>(SSL_CTX_get_app_data(SSL_get_SSL_CTX(ssl))); ClientContextImpl* client_context_impl = dynamic_cast<ClientContextImpl*>(context_impl); RELEASE_ASSERT(client_context_impl != nullptr, ""); // for Coverity return client_context_impl->newSessionKey(session); }); } } bool ContextImpl::parseAndSetAlpn(const std::vector<std::string>& alpn, SSL& ssl) { std::vector<uint8_t> parsed_alpn = parseAlpnProtocols(absl::StrJoin(alpn, ",")); if (!parsed_alpn.empty()) { const int rc = SSL_set_alpn_protos(&ssl, parsed_alpn.data(), parsed_alpn.size()); // This should only if memory allocation fails, e.g. OOM. RELEASE_ASSERT(rc == 0, Utility::getLastCryptoError().value_or("")); return true; } return false; } bssl::UniquePtr<SSL> ClientContextImpl::newSsl(const Network::TransportSocketOptions* options) { bssl::UniquePtr<SSL> ssl_con(ContextImpl::newSsl(options)); const std::string server_name_indication = options && options->serverNameOverride().has_value() ? options->serverNameOverride().value() : server_name_indication_; if (!server_name_indication.empty()) { const int rc = SSL_set_tlsext_host_name(ssl_con.get(), server_name_indication.c_str()); RELEASE_ASSERT(rc, Utility::getLastCryptoError().value_or("")); } if (options && !options->verifySubjectAltNameListOverride().empty()) { SSL_set_app_data(ssl_con.get(), options); SSL_set_verify(ssl_con.get(), SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT, nullptr); } // We determine what ALPN using the following precedence: // 1. Option-provided ALPN override. // 2. ALPN statically configured in the upstream TLS context. // 3. Option-provided ALPN fallback. // At this point in the code the ALPN has already been set (if present) to the value specified in // the TLS context. We've stored this value in parsed_alpn_protocols_ so we can check that to see // if it's already been set. bool has_alpn_defined = !parsed_alpn_protocols_.empty(); if (options) { // ALPN override takes precedence over TLS context specified, so blindly overwrite it. has_alpn_defined |= parseAndSetAlpn(options->applicationProtocolListOverride(), *ssl_con); } if (options && !has_alpn_defined && !options->applicationProtocolFallback().empty()) { // If ALPN hasn't already been set (either through TLS context or override), use the fallback. parseAndSetAlpn(options->applicationProtocolFallback(), *ssl_con); } if (allow_renegotiation_) { SSL_set_renegotiate_mode(ssl_con.get(), ssl_renegotiate_freely); } if (max_session_keys_ > 0) { if (session_keys_single_use_) { // Stored single-use session keys, use write/write locks. absl::WriterMutexLock l(&session_keys_mu_); if (!session_keys_.empty()) { // Use the most recently stored session key, since it has the highest // probability of still being recognized/accepted by the server. SSL_SESSION* session = session_keys_.front().get(); SSL_set_session(ssl_con.get(), session); // Remove single-use session key (TLS 1.3) after first use. if (SSL_SESSION_should_be_single_use(session)) { session_keys_.pop_front(); } } } else { // Never stored single-use session keys, use read/write locks. absl::ReaderMutexLock l(&session_keys_mu_); if (!session_keys_.empty()) { // Use the most recently stored session key, since it has the highest // probability of still being recognized/accepted by the server. SSL_SESSION* session = session_keys_.front().get(); SSL_set_session(ssl_con.get(), session); } } } return ssl_con; } int ClientContextImpl::newSessionKey(SSL_SESSION* session) { // In case we ever store single-use session key (TLS 1.3), // we need to switch to using write/write locks. if (SSL_SESSION_should_be_single_use(session)) { session_keys_single_use_ = true; } absl::WriterMutexLock l(&session_keys_mu_); // Evict oldest entries. while (session_keys_.size() >= max_session_keys_) { session_keys_.pop_back(); } // Add new session key at the front of the queue, so that it's used first. session_keys_.push_front(bssl::UniquePtr<SSL_SESSION>(session)); return 1; // Tell BoringSSL that we took ownership of the session. } uint16_t ClientContextImpl::parseSigningAlgorithmsForTest(const std::string& sigalgs) { // This is used only when testing RSA/ECDSA certificate selection, so only the signing algorithms // used in tests are supported here. if (sigalgs == "rsa_pss_rsae_sha256") { return SSL_SIGN_RSA_PSS_RSAE_SHA256; } else if (sigalgs == "ecdsa_secp256r1_sha256") { return SSL_SIGN_ECDSA_SECP256R1_SHA256; } return 0; } ServerContextImpl::ServerContextImpl(Stats::Scope& scope, const Envoy::Ssl::ServerContextConfig& config, const std::vector<std::string>& server_names, TimeSource& time_source) : ContextImpl(scope, config, time_source), session_ticket_keys_(config.sessionTicketKeys()), ocsp_staple_policy_(config.ocspStaplePolicy()) { if (config.tlsCertificates().empty() && !config.capabilities().provides_certificates) { throw EnvoyException("Server TlsCertificates must have a certificate specified"); } // Compute the session context ID hash. We use all the certificate identities, // since we should have a common ID for session resumption no matter what cert // is used. We do this early because it can throw an EnvoyException. const SessionContextID session_id = generateHashForSessionContextId(server_names); // First, configure the base context for ClientHello interception. // TODO(htuch): replace with SSL_IDENTITY when we have this as a means to do multi-cert in // BoringSSL. if (!config.capabilities().provides_certificates) { SSL_CTX_set_select_certificate_cb( tls_contexts_[0].ssl_ctx_.get(), [](const SSL_CLIENT_HELLO* client_hello) -> ssl_select_cert_result_t { return static_cast<ServerContextImpl*>( SSL_CTX_get_app_data(SSL_get_SSL_CTX(client_hello->ssl))) ->selectTlsContext(client_hello); }); } const auto tls_certificates = config.tlsCertificates(); for (uint32_t i = 0; i < tls_certificates.size(); ++i) { auto& ctx = tls_contexts_[i]; if (!config.capabilities().verifies_peer_certificates && config.certificateValidationContext() != nullptr && !config.certificateValidationContext()->caCert().empty()) { ctx.addClientValidationContext(*config.certificateValidationContext(), config.requireClientCertificate()); } if (!parsed_alpn_protocols_.empty() && !config.capabilities().handles_alpn_selection) { SSL_CTX_set_alpn_select_cb( ctx.ssl_ctx_.get(), [](SSL*, const unsigned char** out, unsigned char* outlen, const unsigned char* in, unsigned int inlen, void* arg) -> int { return static_cast<ServerContextImpl*>(arg)->alpnSelectCallback(out, outlen, in, inlen); }, this); } // If the handshaker handles session tickets natively, don't call // `SSL_CTX_set_tlsext_ticket_key_cb`. if (config.disableStatelessSessionResumption()) { SSL_CTX_set_options(ctx.ssl_ctx_.get(), SSL_OP_NO_TICKET); } else if (!session_ticket_keys_.empty() && !config.capabilities().handles_session_resumption) { SSL_CTX_set_tlsext_ticket_key_cb( ctx.ssl_ctx_.get(), [](SSL* ssl, uint8_t* key_name, uint8_t* iv, EVP_CIPHER_CTX* ctx, HMAC_CTX* hmac_ctx, int encrypt) -> int { ContextImpl* context_impl = static_cast<ContextImpl*>(SSL_CTX_get_app_data(SSL_get_SSL_CTX(ssl))); ServerContextImpl* server_context_impl = dynamic_cast<ServerContextImpl*>(context_impl); RELEASE_ASSERT(server_context_impl != nullptr, ""); // for Coverity return server_context_impl->sessionTicketProcess(ssl, key_name, iv, ctx, hmac_ctx, encrypt); }); } if (config.sessionTimeout() && !config.capabilities().handles_session_resumption) { auto timeout = config.sessionTimeout().value().count(); SSL_CTX_set_timeout(ctx.ssl_ctx_.get(), uint32_t(timeout)); } int rc = SSL_CTX_set_session_id_context(ctx.ssl_ctx_.get(), session_id.data(), session_id.size()); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); auto& ocsp_resp_bytes = tls_certificates[i].get().ocspStaple(); if (ocsp_resp_bytes.empty()) { if (Runtime::runtimeFeatureEnabled( "envoy.reloadable_features.require_ocsp_response_for_must_staple_certs") && ctx.is_must_staple_) { throw EnvoyException("OCSP response is required for must-staple certificate"); } if (ocsp_staple_policy_ == Ssl::ServerContextConfig::OcspStaplePolicy::MustStaple) { throw EnvoyException("Required OCSP response is missing from TLS context"); } } else { auto response = std::make_unique<Ocsp::OcspResponseWrapper>(ocsp_resp_bytes, time_source_); if (!response->matchesCertificate(*ctx.cert_chain_)) { throw EnvoyException("OCSP response does not match its TLS certificate"); } ctx.ocsp_response_ = std::move(response); } } } ServerContextImpl::SessionContextID ServerContextImpl::generateHashForSessionContextId(const std::vector<std::string>& server_names) { uint8_t hash_buffer[EVP_MAX_MD_SIZE]; unsigned hash_length; bssl::ScopedEVP_MD_CTX md; int rc = EVP_DigestInit(md.get(), EVP_sha256()); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); // Hash the CommonName/SANs of all the server certificates. This makes sure that sessions can only // be resumed to certificate(s) for the same name(s), but allows resuming to unique certs in the // case that different Envoy instances each have their own certs. All certificates in a // ServerContextImpl context are hashed together, since they all constitute a match on a filter // chain for resumption purposes. if (!capabilities_.provides_certificates) { for (const auto& ctx : tls_contexts_) { X509* cert = SSL_CTX_get0_certificate(ctx.ssl_ctx_.get()); RELEASE_ASSERT(cert != nullptr, "TLS context should have an active certificate"); X509_NAME* cert_subject = X509_get_subject_name(cert); RELEASE_ASSERT(cert_subject != nullptr, "TLS certificate should have a subject"); const int cn_index = X509_NAME_get_index_by_NID(cert_subject, NID_commonName, -1); if (cn_index >= 0) { X509_NAME_ENTRY* cn_entry = X509_NAME_get_entry(cert_subject, cn_index); RELEASE_ASSERT(cn_entry != nullptr, "certificate subject CN should be present"); ASN1_STRING* cn_asn1 = X509_NAME_ENTRY_get_data(cn_entry); if (ASN1_STRING_length(cn_asn1) <= 0) { throw EnvoyException("Invalid TLS context has an empty subject CN"); } rc = EVP_DigestUpdate(md.get(), ASN1_STRING_data(cn_asn1), ASN1_STRING_length(cn_asn1)); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); } unsigned san_count = 0; bssl::UniquePtr<GENERAL_NAMES> san_names(static_cast<GENERAL_NAMES*>( X509_get_ext_d2i(cert, NID_subject_alt_name, nullptr, nullptr))); if (san_names != nullptr) { for (const GENERAL_NAME* san : san_names.get()) { switch (san->type) { case GEN_IPADD: rc = EVP_DigestUpdate(md.get(), san->d.iPAddress->data, san->d.iPAddress->length); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); ++san_count; break; case GEN_DNS: rc = EVP_DigestUpdate(md.get(), ASN1_STRING_data(san->d.dNSName), ASN1_STRING_length(san->d.dNSName)); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); ++san_count; break; case GEN_URI: rc = EVP_DigestUpdate(md.get(), ASN1_STRING_data(san->d.uniformResourceIdentifier), ASN1_STRING_length(san->d.uniformResourceIdentifier)); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); ++san_count; break; } } } // It's possible that the certificate doesn't have a subject, but // does have SANs. Make sure that we have one or the other. if (cn_index < 0 && san_count == 0) { throw EnvoyException("Invalid TLS context has neither subject CN nor SAN names"); } rc = X509_NAME_digest(X509_get_issuer_name(cert), EVP_sha256(), hash_buffer, &hash_length); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); RELEASE_ASSERT(hash_length == SHA256_DIGEST_LENGTH, fmt::format("invalid SHA256 hash length {}", hash_length)); rc = EVP_DigestUpdate(md.get(), hash_buffer, hash_length); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); } } // Hash all the settings that affect whether the server will allow/accept // the client connection. This ensures that the client is always validated against // the correct settings, even if session resumption across different listeners // is enabled. if (ca_cert_ != nullptr) { rc = X509_digest(ca_cert_.get(), EVP_sha256(), hash_buffer, &hash_length); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); RELEASE_ASSERT(hash_length == SHA256_DIGEST_LENGTH, fmt::format("invalid SHA256 hash length {}", hash_length)); rc = EVP_DigestUpdate(md.get(), hash_buffer, hash_length); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); // verify_subject_alt_name_list_ can only be set with a ca_cert for (const std::string& name : verify_subject_alt_name_list_) { rc = EVP_DigestUpdate(md.get(), name.data(), name.size()); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); } } for (const auto& hash : verify_certificate_hash_list_) { rc = EVP_DigestUpdate(md.get(), hash.data(), hash.size() * sizeof(std::remove_reference<decltype(hash)>::type::value_type)); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); } for (const auto& hash : verify_certificate_spki_list_) { rc = EVP_DigestUpdate(md.get(), hash.data(), hash.size() * sizeof(std::remove_reference<decltype(hash)>::type::value_type)); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); } // Hash configured SNIs for this context, so that sessions cannot be resumed across different // filter chains, even when using the same server certificate. for (const auto& name : server_names) { rc = EVP_DigestUpdate(md.get(), name.data(), name.size()); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); } SessionContextID session_id; // Ensure that the output size of the hash we are using is no greater than // TLS session ID length that we want to generate. static_assert(session_id.size() == SHA256_DIGEST_LENGTH, "hash size mismatch"); static_assert(session_id.size() == SSL_MAX_SSL_SESSION_ID_LENGTH, "TLS session ID size mismatch"); rc = EVP_DigestFinal(md.get(), session_id.data(), &hash_length); RELEASE_ASSERT(rc == 1, Utility::getLastCryptoError().value_or("")); RELEASE_ASSERT(hash_length == session_id.size(), "SHA256 hash length must match TLS Session ID size"); return session_id; } int ServerContextImpl::sessionTicketProcess(SSL*, uint8_t* key_name, uint8_t* iv, EVP_CIPHER_CTX* ctx, HMAC_CTX* hmac_ctx, int encrypt) { const EVP_MD* hmac = EVP_sha256(); const EVP_CIPHER* cipher = EVP_aes_256_cbc(); if (encrypt == 1) { // Encrypt RELEASE_ASSERT(!session_ticket_keys_.empty(), ""); // TODO(ggreenway): validate in SDS that session_ticket_keys_ cannot be empty, // or if we allow it to be emptied, reconfigure the context so this callback // isn't set. const Envoy::Ssl::ServerContextConfig::SessionTicketKey& key = session_ticket_keys_.front(); static_assert(std::tuple_size<decltype(key.name_)>::value == SSL_TICKET_KEY_NAME_LEN, "Expected key.name length"); std::copy_n(key.name_.begin(), SSL_TICKET_KEY_NAME_LEN, key_name); const int rc = RAND_bytes(iv, EVP_CIPHER_iv_length(cipher)); ASSERT(rc); // This RELEASE_ASSERT is logically a static_assert, but we can't actually get // EVP_CIPHER_key_length(cipher) at compile-time RELEASE_ASSERT(key.aes_key_.size() == EVP_CIPHER_key_length(cipher), ""); if (!EVP_EncryptInit_ex(ctx, cipher, nullptr, key.aes_key_.data(), iv)) { return -1; } if (!HMAC_Init_ex(hmac_ctx, key.hmac_key_.data(), key.hmac_key_.size(), hmac, nullptr)) { return -1; } return 1; // success } else { // Decrypt bool is_enc_key = true; // first element is the encryption key for (const Envoy::Ssl::ServerContextConfig::SessionTicketKey& key : session_ticket_keys_) { static_assert(std::tuple_size<decltype(key.name_)>::value == SSL_TICKET_KEY_NAME_LEN, "Expected key.name length"); if (std::equal(key.name_.begin(), key.name_.end(), key_name)) { if (!HMAC_Init_ex(hmac_ctx, key.hmac_key_.data(), key.hmac_key_.size(), hmac, nullptr)) { return -1; } RELEASE_ASSERT(key.aes_key_.size() == EVP_CIPHER_key_length(cipher), ""); if (!EVP_DecryptInit_ex(ctx, cipher, nullptr, key.aes_key_.data(), iv)) { return -1; } // If our current encryption was not the decryption key, renew return is_enc_key ? 1 // success; do not renew : 2; // success: renew key } is_enc_key = false; } return 0; // decryption failed } } bool ServerContextImpl::isClientEcdsaCapable(const SSL_CLIENT_HELLO* ssl_client_hello) { CBS client_hello; CBS_init(&client_hello, ssl_client_hello->client_hello, ssl_client_hello->client_hello_len); // This is the TLSv1.3 case (TLSv1.2 on the wire and the supported_versions extensions present). // We just need to look at signature algorithms. const uint16_t client_version = ssl_client_hello->version; if (client_version == TLS1_2_VERSION && tls_max_version_ == TLS1_3_VERSION) { // If the supported_versions extension is found then we assume that the client is competent // enough that just checking the signature_algorithms is sufficient. const uint8_t* supported_versions_data; size_t supported_versions_len; if (SSL_early_callback_ctx_extension_get(ssl_client_hello, TLSEXT_TYPE_supported_versions, &supported_versions_data, &supported_versions_len)) { const uint8_t* signature_algorithms_data; size_t signature_algorithms_len; if (SSL_early_callback_ctx_extension_get(ssl_client_hello, TLSEXT_TYPE_signature_algorithms, &signature_algorithms_data, &signature_algorithms_len)) { CBS signature_algorithms_ext, signature_algorithms; CBS_init(&signature_algorithms_ext, signature_algorithms_data, signature_algorithms_len); if (!CBS_get_u16_length_prefixed(&signature_algorithms_ext, &signature_algorithms) || CBS_len(&signature_algorithms_ext) != 0) { return false; } if (cbsContainsU16(signature_algorithms, SSL_SIGN_ECDSA_SECP256R1_SHA256)) { return true; } } return false; } } // Otherwise we are < TLSv1.3 and need to look at both the curves in the supported_groups for // ECDSA and also for a compatible cipher suite. https://tools.ietf.org/html/rfc4492#section-5.1.1 const uint8_t* curvelist_data; size_t curvelist_len; if (!SSL_early_callback_ctx_extension_get(ssl_client_hello, TLSEXT_TYPE_supported_groups, &curvelist_data, &curvelist_len)) { return false; } CBS curvelist; CBS_init(&curvelist, curvelist_data, curvelist_len); // We only support P256 ECDSA curves today. if (!cbsContainsU16(curvelist, SSL_CURVE_SECP256R1)) { return false; } // The client must have offered an ECDSA ciphersuite that we like. CBS cipher_suites; CBS_init(&cipher_suites, ssl_client_hello->cipher_suites, ssl_client_hello->cipher_suites_len); while (CBS_len(&cipher_suites) > 0) { uint16_t cipher_id; if (!CBS_get_u16(&cipher_suites, &cipher_id)) { return false; } // All tls_context_ share the same set of enabled ciphers, so we can just look at the base // context. if (tls_contexts_[0].isCipherEnabled(cipher_id, client_version)) { return true; } } return false; } bool ServerContextImpl::isClientOcspCapable(const SSL_CLIENT_HELLO* ssl_client_hello) { const uint8_t* status_request_data; size_t status_request_len; if (SSL_early_callback_ctx_extension_get(ssl_client_hello, TLSEXT_TYPE_status_request, &status_request_data, &status_request_len)) { return true; } return false; } OcspStapleAction ServerContextImpl::ocspStapleAction(const ContextImpl::TlsContext& ctx, bool client_ocsp_capable) { if (!client_ocsp_capable) { return OcspStapleAction::ClientNotCapable; } auto& response = ctx.ocsp_response_; if (!Runtime::runtimeFeatureEnabled("envoy.reloadable_features.check_ocsp_policy")) { // Expiration check is disabled. Proceed as if the policy is LenientStapling and the response // is not expired. return response ? OcspStapleAction::Staple : OcspStapleAction::NoStaple; } auto policy = ocsp_staple_policy_; if (ctx.is_must_staple_) { // The certificate has the must-staple extension, so upgrade the policy to match. policy = Ssl::ServerContextConfig::OcspStaplePolicy::MustStaple; } const bool valid_response = response && !response->isExpired(); switch (policy) { case Ssl::ServerContextConfig::OcspStaplePolicy::LenientStapling: if (!valid_response) { return OcspStapleAction::NoStaple; } return OcspStapleAction::Staple; case Ssl::ServerContextConfig::OcspStaplePolicy::StrictStapling: if (valid_response) { return OcspStapleAction::Staple; } if (response) { // Expired response. return OcspStapleAction::Fail; } return OcspStapleAction::NoStaple; case Ssl::ServerContextConfig::OcspStaplePolicy::MustStaple: if (!valid_response) { return OcspStapleAction::Fail; } return OcspStapleAction::Staple; default: NOT_REACHED_GCOVR_EXCL_LINE; } } enum ssl_select_cert_result_t ServerContextImpl::selectTlsContext(const SSL_CLIENT_HELLO* ssl_client_hello) { const bool client_ecdsa_capable = isClientEcdsaCapable(ssl_client_hello); const bool client_ocsp_capable = isClientOcspCapable(ssl_client_hello); // Fallback on first certificate. const TlsContext* selected_ctx = &tls_contexts_[0]; auto ocsp_staple_action = ocspStapleAction(*selected_ctx, client_ocsp_capable); for (const auto& ctx : tls_contexts_) { if (client_ecdsa_capable != ctx.is_ecdsa_) { continue; } auto action = ocspStapleAction(ctx, client_ocsp_capable); if (action == OcspStapleAction::Fail) { continue; } selected_ctx = &ctx; ocsp_staple_action = action; break; } if (client_ocsp_capable) { stats_.ocsp_staple_requests_.inc(); } switch (ocsp_staple_action) { case OcspStapleAction::Staple: { // We avoid setting the OCSP response if the client didn't request it, but doing so is safe. RELEASE_ASSERT(selected_ctx->ocsp_response_, "OCSP response must be present under OcspStapleAction::Staple"); auto& resp_bytes = selected_ctx->ocsp_response_->rawBytes(); int rc = SSL_set_ocsp_response(ssl_client_hello->ssl, resp_bytes.data(), resp_bytes.size()); RELEASE_ASSERT(rc != 0, ""); stats_.ocsp_staple_responses_.inc(); } break; case OcspStapleAction::NoStaple: stats_.ocsp_staple_omitted_.inc(); break; case OcspStapleAction::Fail: stats_.ocsp_staple_failed_.inc(); return ssl_select_cert_error; case OcspStapleAction::ClientNotCapable: break; } RELEASE_ASSERT(SSL_set_SSL_CTX(ssl_client_hello->ssl, selected_ctx->ssl_ctx_.get()) != nullptr, ""); return ssl_select_cert_success; } void ServerContextImpl::TlsContext::addClientValidationContext( const Envoy::Ssl::CertificateValidationContextConfig& config, bool require_client_cert) { bssl::UniquePtr<BIO> bio( BIO_new_mem_buf(const_cast<char*>(config.caCert().data()), config.caCert().size())); RELEASE_ASSERT(bio != nullptr, ""); // Based on BoringSSL's SSL_add_file_cert_subjects_to_stack(). bssl::UniquePtr<STACK_OF(X509_NAME)> list(sk_X509_NAME_new( [](const X509_NAME** a, const X509_NAME** b) -> int { return X509_NAME_cmp(*a, *b); })); RELEASE_ASSERT(list != nullptr, ""); for (;;) { bssl::UniquePtr<X509> cert(PEM_read_bio_X509(bio.get(), nullptr, nullptr, nullptr)); if (cert == nullptr) { break; } X509_NAME* name = X509_get_subject_name(cert.get()); if (name == nullptr) { throw EnvoyException( absl::StrCat("Failed to load trusted client CA certificates from ", config.caCertPath())); } // Check for duplicates. if (sk_X509_NAME_find(list.get(), nullptr, name)) { continue; } bssl::UniquePtr<X509_NAME> name_dup(X509_NAME_dup(name)); if (name_dup == nullptr || !sk_X509_NAME_push(list.get(), name_dup.release())) { throw EnvoyException( absl::StrCat("Failed to load trusted client CA certificates from ", config.caCertPath())); } } // Check for EOF. const uint32_t err = ERR_peek_last_error(); if (ERR_GET_LIB(err) == ERR_LIB_PEM && ERR_GET_REASON(err) == PEM_R_NO_START_LINE) { ERR_clear_error(); } else { throw EnvoyException( absl::StrCat("Failed to load trusted client CA certificates from ", config.caCertPath())); } SSL_CTX_set_client_CA_list(ssl_ctx_.get(), list.release()); // SSL_VERIFY_PEER or stronger mode was already set in ContextImpl::ContextImpl(). if (require_client_cert) { SSL_CTX_set_verify(ssl_ctx_.get(), SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT, nullptr); } } bool ServerContextImpl::TlsContext::isCipherEnabled(uint16_t cipher_id, uint16_t client_version) { const SSL_CIPHER* c = SSL_get_cipher_by_value(cipher_id); if (c == nullptr) { return false; } // Skip TLS 1.2 only ciphersuites unless the client supports it. if (SSL_CIPHER_get_min_version(c) > client_version) { return false; } if (SSL_CIPHER_get_auth_nid(c) != NID_auth_ecdsa) { return false; } for (const SSL_CIPHER* our_c : SSL_CTX_get_ciphers(ssl_ctx_.get())) { if (SSL_CIPHER_get_id(our_c) == SSL_CIPHER_get_id(c)) { return true; } } return false; } bool ContextImpl::verifyCertChain(X509& leaf_cert, STACK_OF(X509) & intermediates, std::string& error_details) { bssl::UniquePtr<X509_STORE_CTX> ctx(X509_STORE_CTX_new()); // It doesn't matter which SSL context is used, because they share the same // cert validation config. X509_STORE* store = SSL_CTX_get_cert_store(tls_contexts_[0].ssl_ctx_.get()); if (!X509_STORE_CTX_init(ctx.get(), store, &leaf_cert, &intermediates)) { error_details = "Failed to verify certificate chain: X509_STORE_CTX_init"; return false; } int res = doVerifyCertChain(ctx.get(), nullptr, leaf_cert, nullptr); if (res <= 0) { const int n = X509_STORE_CTX_get_error(ctx.get()); const int depth = X509_STORE_CTX_get_error_depth(ctx.get()); error_details = absl::StrCat("X509_verify_cert: certificate verification error at depth ", depth, ": ", X509_verify_cert_error_string(n)); return false; } return true; } } // namespace Tls } // namespace TransportSockets } // namespace Extensions } // namespace Envoy
// Fizzy: A fast WebAssembly interpreter // Copyright 2019-2020 The Fizzy Authors. // SPDX-License-Identifier: Apache-2.0 #pragma once #include <cstdint> namespace fizzy { struct InstructionMetrics { /// The minimum number of the stack items required for the instruction. int8_t stack_height_required; /// The stack height change caused by the instruction execution, /// i.e. stack height _after_ execution - stack height _before_ execution. int8_t stack_height_change; }; const InstructionMetrics* get_instruction_metrics_table() noexcept; } // namespace fizzy
#pragma once #include <tagsql/core/operators/equal.h++> namespace tagsql { struct generic_equal_t { template<typename ContextIndependentTag> using operator_t = operators::equal_t<ContextIndependentTag>; static auto repr(std::string right, std::string left) -> std::string { return std::move(right) + " = " + std::move(left); } template<typename Left, typename Right> struct check_compatibility { template<typename L, typename R> static auto compare(L && l, R && r) -> decltype(l == r, std::true_type()); static auto compare(...) -> std::false_type; static constexpr bool value = decltype(compare(std::declval<Left>(), std::declval<Right>()))::value; static_assert(!value, "incompatible operands : cannot be compared using == "); }; }; }
// This is a personal academic project. Dear PVS-Studio, please check it. // PVS-Studio Static Code Analyzer for C, C++ and C#: http://www.viva64.com // PVS-settings // PVS-settings end #include<iostream> #include<vector> #include<set> #define kektor vector //)))))))))))))))) using namespace std; kektor<kektor<char> > g; //char kektor<char> used; kektor<int> order; set<int>component; int n; //-V707 void dfs1(int v) { used[v] = true; for (int i = 0; i < n; ++i) { if (!g[v][i]) continue; if (!used[i]) dfs1(i); } order.push_back(v); } void dfs2(int v) { used[v] = true; component.insert(v); for (int i = 0; i < n; ++i) { if (!g[i][v]) continue; if (!used[i]) dfs2(i); } } int main() { int a, rc = 0, lc = 0; cin >> n; g.resize(n); for (int i = 0; i < n; ++i) { g[i].resize(n); for (int j = 0; j < n; ++j) { cin >> a; g[i][j] = a; } } used.assign(n, false); for (int i = 0; i < n; ++i) if (!used[i]) dfs1(i); used.assign(n, false); for (int i = 0; i < n; ++i) { int v = order[n - 1 - i]; if (!used[v]) { dfs2(v); int root = 1, leaf = 1; for (auto iter = component.begin(); iter != component.end(); ++iter) { for (int j = 0; j < n; ++j) { leaf = leaf && !(g[*iter][j] && component.find(j) == component.end()); root = root && !(g[j][*iter] && component.find(j) == component.end()); } } rc += root; lc += leaf; component.clear(); } } cout << ((rc > lc) ? rc : lc); return 0; }
#ifndef OSMIUM_INDEX_MULTIMAP_SPARSE_MEM_ARRAY_HPP #define OSMIUM_INDEX_MULTIMAP_SPARSE_MEM_ARRAY_HPP /* This file is part of Osmium (http://osmcode.org/libosmium). Copyright 2013-2016 Jochen Topf <jochen@topf.org> and others (see README). Boost Software License - Version 1.0 - August 17th, 2003 Permission is hereby granted, free of charge, to any person or organization obtaining a copy of the software and accompanying documentation covered by this license (the "Software") to use, reproduce, display, distribute, execute, and transmit the Software, and to prepare derivative works of the Software, and to permit third-parties to whom the Software is furnished to do so, all subject to the following: The copyright notices in the Software and this entire statement, including the above license grant, this restriction and the following disclaimer, must be included in all copies of the Software, in whole or in part, and all derivative works of the Software, unless such copies or derivative works are solely in the form of machine-executable object code generated by a source language processor. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <vector> #include <osmium/index/detail/vector_multimap.hpp> namespace osmium { namespace index { namespace multimap { template <typename T> using StdVectorWrap = std::vector<T>; template <typename TId, typename TValue> using SparseMemArray = VectorBasedSparseMultimap<TId, TValue, StdVectorWrap>; } // namespace multimap } // namespace index } // namespace osmium #endif // OSMIUM_INDEX_MULTIMAP_SPARSE_MEM_ARRAY_HPP
#include "v3/include/AsyncLeaseGrantResponse.hpp" #include "v3/include/action_constants.hpp" void etcdv3::AsyncLeaseGrantResponse::ParseResponse(LeaseGrantResponse& resp) { index = resp.header().revision(); value.kvs.set_lease(resp.id()); value.set_ttl(resp.ttl()); error_message = resp.error(); }
//===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // UNSUPPORTED: c++98, c++03, c++11, c++14 // <unordered_multiset> // class unordered_multiset // node_type extract(key_type const&); #include <unordered_set> #include "test_macros.h" #include "min_allocator.h" #include "Counter.h" template <class Container, class KeyTypeIter> void test(Container& c, KeyTypeIter first, KeyTypeIter last) { size_t sz = c.size(); assert((size_t)std::distance(first, last) == sz); for (KeyTypeIter copy = first; copy != last; ++copy) { typename Container::node_type t = c.extract(*copy); assert(!t.empty()); --sz; assert(t.value() == *copy); assert(t.get_allocator() == c.get_allocator()); assert(sz == c.size()); } assert(c.size() == 0); for (KeyTypeIter copy = first; copy != last; ++copy) { typename Container::node_type t = c.extract(*copy); assert(t.empty()); } } int main(int, char**) { { std::unordered_multiset<int> m = {1, 2, 3, 4, 5, 6}; int keys[] = {1, 2, 3, 4, 5, 6}; test(m, std::begin(keys), std::end(keys)); } { std::unordered_multiset<Counter<int>> m = {1, 2, 3, 4, 5, 6}; { Counter<int> keys[] = {1, 2, 3, 4, 5, 6}; assert(Counter_base::gConstructed == 6+6); test(m, std::begin(keys), std::end(keys)); } assert(Counter_base::gConstructed == 0); } { using min_alloc_set = std::unordered_multiset<int, std::hash<int>, std::equal_to<int>, min_allocator<int>>; min_alloc_set m = {1, 2, 3, 4, 5, 6}; int keys[] = {1, 2, 3, 4, 5, 6}; test(m, std::begin(keys), std::end(keys)); } return 0; }
//===----------------------------------------------------------------------===// // DuckDB // // duckdb/function/compression/compression.hpp // // //===----------------------------------------------------------------------===// #pragma once #include "duckdb/function/compression_function.hpp" #include "duckdb/function/function_set.hpp" namespace duckdb { struct ConstantFun { static CompressionFunction GetFunction(PhysicalType type); static bool TypeIsSupported(PhysicalType type); }; struct UncompressedFun { static CompressionFunction GetFunction(PhysicalType type); static bool TypeIsSupported(PhysicalType type); }; struct RLEFun { static CompressionFunction GetFunction(PhysicalType type); static bool TypeIsSupported(PhysicalType type); }; struct BitpackingFun { static CompressionFunction GetFunction(PhysicalType type); static bool TypeIsSupported(PhysicalType type); }; struct DictionaryCompressionFun { static CompressionFunction GetFunction(PhysicalType type); static bool TypeIsSupported(PhysicalType type); }; } // namespace duckdb
// // List.cpp // edgeRuntime // // Created by Abdelrahaman Aly on 22/11/13. // Copyright (c) 2013 Abdelrhaman Aly. All rights reserved. // //Generic Headers #include <iostream> //Custom Headers #include "List.h" namespace Utils { //TODO: Add code from template list here }
// // blocking_udp_echo_client.cpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2014 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #include <cstdlib> #include <cstring> #include <iostream> #include <boost/asio.hpp> using boost::asio::ip::udp; enum { max_length = 1024 }; int main(int argc, char* argv[]) { try { if (argc != 3) { std::cerr << "Usage: blocking_udp_echo_client <host> <port>\n"; return 1; } boost::asio::io_service io_service; udp::socket s(io_service, udp::endpoint(udp::v4(), 0)); udp::resolver resolver(io_service); udp::endpoint endpoint = *resolver.resolve({udp::v4(), argv[1], argv[2]}); std::cout << "Enter message: "; char request[max_length]; std::cin.getline(request, max_length); size_t request_length = std::strlen(request); s.send_to(boost::asio::buffer(request, request_length), endpoint); char reply[max_length]; udp::endpoint sender_endpoint; size_t reply_length = s.receive_from( boost::asio::buffer(reply, max_length), sender_endpoint); std::cout << "Reply is: "; std::cout.write(reply, reply_length); std::cout << "\n"; } catch (std::exception& e) { std::cerr << "Exception: " << e.what() << "\n"; } return 0; }
/* * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ #include "webrtc/modules/rtp_rtcp/source/forward_error_correction_internal.h" #include <cassert> #include <cstring> #include "webrtc/modules/rtp_rtcp/source/fec_private_tables_bursty.h" #include "webrtc/modules/rtp_rtcp/source/fec_private_tables_random.h" namespace { // Allow for different modes of protection for packets in UEP case. enum ProtectionMode { kModeNoOverlap, kModeOverlap, kModeBiasFirstPacket, }; // Fits an input mask (sub_mask) to an output mask. // The mask is a matrix where the rows are the FEC packets, // and the columns are the source packets the FEC is applied to. // Each row of the mask is represented by a number of mask bytes. // // \param[in] num_mask_bytes The number of mask bytes of output mask. // \param[in] num_sub_mask_bytes The number of mask bytes of input mask. // \param[in] num_rows The number of rows of the input mask. // \param[in] sub_mask A pointer to hold the input mask, of size // [0, num_rows * num_sub_mask_bytes] // \param[out] packet_mask A pointer to hold the output mask, of size // [0, x * num_mask_bytes], where x >= num_rows. void FitSubMask(int num_mask_bytes, int num_sub_mask_bytes, int num_rows, const uint8_t* sub_mask, uint8_t* packet_mask) { if (num_mask_bytes == num_sub_mask_bytes) { memcpy(packet_mask, sub_mask, num_rows * num_sub_mask_bytes); } else { for (int i = 0; i < num_rows; ++i) { int pkt_mask_idx = i * num_mask_bytes; int pkt_mask_idx2 = i * num_sub_mask_bytes; for (int j = 0; j < num_sub_mask_bytes; ++j) { packet_mask[pkt_mask_idx] = sub_mask[pkt_mask_idx2]; pkt_mask_idx++; pkt_mask_idx2++; } } } } // Shifts a mask by number of columns (bits), and fits it to an output mask. // The mask is a matrix where the rows are the FEC packets, // and the columns are the source packets the FEC is applied to. // Each row of the mask is represented by a number of mask bytes. // // \param[in] num_mask_bytes The number of mask bytes of output mask. // \param[in] num_sub_mask_bytes The number of mask bytes of input mask. // \param[in] num_column_shift The number columns to be shifted, and // the starting row for the output mask. // \param[in] end_row The ending row for the output mask. // \param[in] sub_mask A pointer to hold the input mask, of size // [0, (end_row_fec - start_row_fec) * // num_sub_mask_bytes] // \param[out] packet_mask A pointer to hold the output mask, of size // [0, x * num_mask_bytes], // where x >= end_row_fec. // TODO (marpan): This function is doing three things at the same time: // shift within a byte, byte shift and resizing. // Split up into subroutines. void ShiftFitSubMask(int num_mask_bytes, int res_mask_bytes, int num_column_shift, int end_row, const uint8_t* sub_mask, uint8_t* packet_mask) { // Number of bit shifts within a byte const int num_bit_shifts = (num_column_shift % 8); const int num_byte_shifts = num_column_shift >> 3; // Modify new mask with sub-mask21. // Loop over the remaining FEC packets. for (int i = num_column_shift; i < end_row; ++i) { // Byte index of new mask, for row i and column res_mask_bytes, // offset by the number of bytes shifts int pkt_mask_idx = i * num_mask_bytes + res_mask_bytes - 1 + num_byte_shifts; // Byte index of sub_mask, for row i and column res_mask_bytes int pkt_mask_idx2 = (i - num_column_shift) * res_mask_bytes + res_mask_bytes - 1; uint8_t shift_right_curr_byte = 0; uint8_t shift_left_prev_byte = 0; uint8_t comb_new_byte = 0; // Handle case of num_mask_bytes > res_mask_bytes: // For a given row, copy the rightmost "numBitShifts" bits // of the last byte of sub_mask into output mask. if (num_mask_bytes > res_mask_bytes) { shift_left_prev_byte = (sub_mask[pkt_mask_idx2] << (8 - num_bit_shifts)); packet_mask[pkt_mask_idx + 1] = shift_left_prev_byte; } // For each row i (FEC packet), shift the bit-mask of the sub_mask. // Each row of the mask contains "resMaskBytes" of bytes. // We start from the last byte of the sub_mask and move to first one. for (int j = res_mask_bytes - 1; j > 0; j--) { // Shift current byte of sub21 to the right by "numBitShifts". shift_right_curr_byte = sub_mask[pkt_mask_idx2] >> num_bit_shifts; // Fill in shifted bits with bits from the previous (left) byte: // First shift the previous byte to the left by "8-numBitShifts". shift_left_prev_byte = (sub_mask[pkt_mask_idx2 - 1] << (8 - num_bit_shifts)); // Then combine both shifted bytes into new mask byte. comb_new_byte = shift_right_curr_byte | shift_left_prev_byte; // Assign to new mask. packet_mask[pkt_mask_idx] = comb_new_byte; pkt_mask_idx--; pkt_mask_idx2--; } // For the first byte in the row (j=0 case). shift_right_curr_byte = sub_mask[pkt_mask_idx2] >> num_bit_shifts; packet_mask[pkt_mask_idx] = shift_right_curr_byte; } } } // namespace namespace webrtc { namespace internal { PacketMaskTable::PacketMaskTable(FecMaskType fec_mask_type, int num_media_packets) : fec_mask_type_(InitMaskType(fec_mask_type, num_media_packets)), fec_packet_mask_table_(InitMaskTable(fec_mask_type_)) {} // Sets |fec_mask_type_| to the type of packet mask selected. The type of // packet mask selected is based on |fec_mask_type| and |num_media_packets|. // If |num_media_packets| is larger than the maximum allowed by |fec_mask_type| // for the bursty type, then the random type is selected. FecMaskType PacketMaskTable::InitMaskType(FecMaskType fec_mask_type, int num_media_packets) { // The mask should not be bigger than |packetMaskTbl|. assert(num_media_packets <= static_cast<int>(sizeof(kPacketMaskRandomTbl) / sizeof(*kPacketMaskRandomTbl))); switch (fec_mask_type) { case kFecMaskRandom: { return kFecMaskRandom; } case kFecMaskBursty: { int max_media_packets = static_cast<int>(sizeof(kPacketMaskBurstyTbl) / sizeof(*kPacketMaskBurstyTbl)); if (num_media_packets > max_media_packets) { return kFecMaskRandom; } else { return kFecMaskBursty; } } } assert(false); return kFecMaskRandom; } // Returns the pointer to the packet mask tables corresponding to type // |fec_mask_type|. const uint8_t*** PacketMaskTable::InitMaskTable(FecMaskType fec_mask_type) { switch (fec_mask_type) { case kFecMaskRandom: { return kPacketMaskRandomTbl; } case kFecMaskBursty: { return kPacketMaskBurstyTbl; } } assert(false); return kPacketMaskRandomTbl; } // Remaining protection after important (first partition) packet protection void RemainingPacketProtection(int num_media_packets, int num_fec_remaining, int num_fec_for_imp_packets, int num_mask_bytes, ProtectionMode mode, uint8_t* packet_mask, const PacketMaskTable& mask_table) { if (mode == kModeNoOverlap) { // sub_mask21 const int l_bit = (num_media_packets - num_fec_for_imp_packets) > 16 ? 1 : 0; const int res_mask_bytes = (l_bit == 1) ? kMaskSizeLBitSet : kMaskSizeLBitClear; const uint8_t* packet_mask_sub_21 = mask_table.fec_packet_mask_table()[ num_media_packets - num_fec_for_imp_packets - 1][num_fec_remaining - 1]; ShiftFitSubMask(num_mask_bytes, res_mask_bytes, num_fec_for_imp_packets, (num_fec_for_imp_packets + num_fec_remaining), packet_mask_sub_21, packet_mask); } else if (mode == kModeOverlap || mode == kModeBiasFirstPacket) { // sub_mask22 const uint8_t* packet_mask_sub_22 = mask_table .fec_packet_mask_table()[num_media_packets - 1][num_fec_remaining - 1]; FitSubMask(num_mask_bytes, num_mask_bytes, num_fec_remaining, packet_mask_sub_22, &packet_mask[num_fec_for_imp_packets * num_mask_bytes]); if (mode == kModeBiasFirstPacket) { for (int i = 0; i < num_fec_remaining; ++i) { int pkt_mask_idx = i * num_mask_bytes; packet_mask[pkt_mask_idx] = packet_mask[pkt_mask_idx] | (1 << 7); } } } else { assert(false); } } // Protection for important (first partition) packets void ImportantPacketProtection(int num_fec_for_imp_packets, int num_imp_packets, int num_mask_bytes, uint8_t* packet_mask, const PacketMaskTable& mask_table) { const int l_bit = num_imp_packets > 16 ? 1 : 0; const int num_imp_mask_bytes = (l_bit == 1) ? kMaskSizeLBitSet : kMaskSizeLBitClear; // Get sub_mask1 from table const uint8_t* packet_mask_sub_1 = mask_table.fec_packet_mask_table()[ num_imp_packets - 1][num_fec_for_imp_packets - 1]; FitSubMask(num_mask_bytes, num_imp_mask_bytes, num_fec_for_imp_packets, packet_mask_sub_1, packet_mask); } // This function sets the protection allocation: i.e., how many FEC packets // to use for num_imp (1st partition) packets, given the: number of media // packets, number of FEC packets, and number of 1st partition packets. int SetProtectionAllocation(int num_media_packets, int num_fec_packets, int num_imp_packets) { // TODO (marpan): test different cases for protection allocation: // Use at most (alloc_par * num_fec_packets) for important packets. float alloc_par = 0.5; int max_num_fec_for_imp = alloc_par * num_fec_packets; int num_fec_for_imp_packets = (num_imp_packets < max_num_fec_for_imp) ? num_imp_packets : max_num_fec_for_imp; // Fall back to equal protection in this case if (num_fec_packets == 1 && (num_media_packets > 2 * num_imp_packets)) { num_fec_for_imp_packets = 0; } return num_fec_for_imp_packets; } // Modification for UEP: reuse the off-line tables for the packet masks. // Note: these masks were designed for equal packet protection case, // assuming random packet loss. // Current version has 3 modes (options) to build UEP mask from existing ones. // Various other combinations may be added in future versions. // Longer-term, we may add another set of tables specifically for UEP cases. // TODO (marpan): also consider modification of masks for bursty loss cases. // Mask is characterized as (#packets_to_protect, #fec_for_protection). // Protection factor defined as: (#fec_for_protection / #packets_to_protect). // Let k=num_media_packets, n=total#packets, (n-k)=num_fec_packets, // m=num_imp_packets. // For ProtectionMode 0 and 1: // one mask (sub_mask1) is used for 1st partition packets, // the other mask (sub_mask21/22, for 0/1) is for the remaining FEC packets. // In both mode 0 and 1, the packets of 1st partition (num_imp_packets) are // treated equally important, and are afforded more protection than the // residual partition packets. // For num_imp_packets: // sub_mask1 = (m, t): protection = t/(m), where t=F(k,n-k,m). // t=F(k,n-k,m) is the number of packets used to protect first partition in // sub_mask1. This is determined from the function SetProtectionAllocation(). // For the left-over protection: // Mode 0: sub_mask21 = (k-m,n-k-t): protection = (n-k-t)/(k-m) // mode 0 has no protection overlap between the two partitions. // For mode 0, we would typically set t = min(m, n-k). // Mode 1: sub_mask22 = (k, n-k-t), with protection (n-k-t)/(k) // mode 1 has protection overlap between the two partitions (preferred). // For ProtectionMode 2: // This gives 1st packet of list (which is 1st packet of 1st partition) more // protection. In mode 2, the equal protection mask (which is obtained from // mode 1 for t=0) is modified (more "1s" added in 1st column of packet mask) // to bias higher protection for the 1st source packet. // Protection Mode 2 may be extended for a sort of sliding protection // (i.e., vary the number/density of "1s" across columns) across packets. void UnequalProtectionMask(int num_media_packets, int num_fec_packets, int num_imp_packets, int num_mask_bytes, uint8_t* packet_mask, const PacketMaskTable& mask_table) { // Set Protection type and allocation // TODO (marpan): test/update for best mode and some combinations thereof. ProtectionMode mode = kModeOverlap; int num_fec_for_imp_packets = 0; if (mode != kModeBiasFirstPacket) { num_fec_for_imp_packets = SetProtectionAllocation( num_media_packets, num_fec_packets, num_imp_packets); } int num_fec_remaining = num_fec_packets - num_fec_for_imp_packets; // Done with setting protection type and allocation // // Generate sub_mask1 // if (num_fec_for_imp_packets > 0) { ImportantPacketProtection(num_fec_for_imp_packets, num_imp_packets, num_mask_bytes, packet_mask, mask_table); } // // Generate sub_mask2 // if (num_fec_remaining > 0) { RemainingPacketProtection(num_media_packets, num_fec_remaining, num_fec_for_imp_packets, num_mask_bytes, mode, packet_mask, mask_table); } } void GeneratePacketMasks(int num_media_packets, int num_fec_packets, int num_imp_packets, bool use_unequal_protection, const PacketMaskTable& mask_table, uint8_t* packet_mask) { assert(num_media_packets > 0); assert(num_fec_packets <= num_media_packets && num_fec_packets > 0); assert(num_imp_packets <= num_media_packets && num_imp_packets >= 0); int l_bit = num_media_packets > 16 ? 1 : 0; const int num_mask_bytes = (l_bit == 1) ? kMaskSizeLBitSet : kMaskSizeLBitClear; // Equal-protection for these cases. if (!use_unequal_protection || num_imp_packets == 0) { // Retrieve corresponding mask table directly:for equal-protection case. // Mask = (k,n-k), with protection factor = (n-k)/k, // where k = num_media_packets, n=total#packets, (n-k)=num_fec_packets. memcpy(packet_mask, mask_table.fec_packet_mask_table()[ num_media_packets - 1][num_fec_packets - 1], num_fec_packets * num_mask_bytes); } else //UEP case { UnequalProtectionMask(num_media_packets, num_fec_packets, num_imp_packets, num_mask_bytes, packet_mask, mask_table); } // End of UEP modification } //End of GetPacketMasks } // namespace internal } // namespace webrtc
/********************************************************************* * Software License Agreement (BSD License) * * Copyright (c) 2012, Willow Garage, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. *********************************************************************/ /* Author: Ioan Sucan, Dave Coleman */ #include <moveit/kinematics_plugin_loader/kinematics_plugin_loader.h> #include <moveit/rdf_loader/rdf_loader.h> #include <pluginlib/class_loader.hpp> #include <boost/thread/mutex.hpp> #include <sstream> #include <vector> #include <map> #include <memory> #include "rclcpp/rclcpp.hpp" #include <moveit/profiler/profiler.h> namespace kinematics_plugin_loader { rclcpp::Logger LOGGER = rclcpp::get_logger("kinematics_plugin_loader"); template <rclcpp::ParameterType ParamType> rclcpp::Parameter declare_parameter(const rclcpp::Node::SharedPtr& node, const std::string& parameter_name) { if (!node->has_parameter(parameter_name)) node->declare_parameter(parameter_name, ParamType); rclcpp::Parameter parameter; if (!node->get_parameter(parameter_name, parameter)) RCLCPP_DEBUG_STREAM(LOGGER, "Parameter `" << parameter_name << "` doesn't exists"); return parameter; } class KinematicsPluginLoader::KinematicsLoaderImpl { public: /** * \brief Pimpl Implementation of KinematicsLoader * \param robot_description * \param possible_kinematics_solvers * \param search_res * \param iksolver_to_tip_links - a map between each ik solver and a vector of custom-specified tip link(s) */ KinematicsLoaderImpl(const rclcpp::Node::SharedPtr& node, const std::string& robot_description, const std::map<std::string, std::vector<std::string>>& possible_kinematics_solvers, const std::map<std::string, std::vector<double>>& search_res, const std::map<std::string, std::vector<std::string>>& iksolver_to_tip_links) : node_(node) , robot_description_(robot_description) , possible_kinematics_solvers_(possible_kinematics_solvers) , search_res_(search_res) , iksolver_to_tip_links_(iksolver_to_tip_links) { try { kinematics_loader_ = std::make_shared<pluginlib::ClassLoader<kinematics::KinematicsBase>>("moveit_core", "kinematics::" "KinematicsBase"); } catch (pluginlib::PluginlibException& e) { RCLCPP_ERROR(LOGGER, "Unable to construct kinematics loader. Error: %s", e.what()); } } /** * \brief Helper function to decide which, and how many, tip frames a planning group has * \param jmg - joint model group pointer * \return tips - list of valid links in a planning group to plan for */ std::vector<std::string> chooseTipFrames(const moveit::core::JointModelGroup* jmg) { std::vector<std::string> tips; std::map<std::string, std::vector<std::string>>::const_iterator ik_it = iksolver_to_tip_links_.find(jmg->getName()); // Check if tips were loaded onto the rosparam server previously if (ik_it != iksolver_to_tip_links_.end()) { // the tip is being chosen based on a corresponding rosparam ik link RCLCPP_DEBUG(LOGGER, "Choosing tip frame of kinematic solver for group %s" "based on values in rosparam server.", jmg->getName().c_str()); tips = ik_it->second; } else { // get the last link in the chain RCLCPP_DEBUG(LOGGER, "Choosing tip frame of kinematic solver for group %s" "based on last link in chain", jmg->getName().c_str()); tips.push_back(jmg->getLinkModels().back()->getName()); } // Error check if (tips.empty()) { RCLCPP_ERROR(LOGGER, "Error choosing kinematic solver tip frame(s)."); } // Debug tip choices std::stringstream tip_debug; tip_debug << "Planning group '" << jmg->getName() << "' has tip(s): "; for (const auto& tip : tips) tip_debug << tip << ", "; RCLCPP_DEBUG_STREAM(LOGGER, tip_debug.str()); return tips; } kinematics::KinematicsBasePtr allocKinematicsSolver(const moveit::core::JointModelGroup* jmg) { kinematics::KinematicsBasePtr result; if (!kinematics_loader_) { RCLCPP_ERROR(LOGGER, "Invalid kinematics loader."); return result; } if (!jmg) { RCLCPP_ERROR(LOGGER, "Specified group is NULL. Cannot allocate kinematics solver."); return result; } const std::vector<const moveit::core::LinkModel*>& links = jmg->getLinkModels(); if (links.empty()) { RCLCPP_ERROR(LOGGER, "No links specified for group '%s'. Cannot allocate kinematics solver.", jmg->getName().c_str()); return result; } RCLCPP_DEBUG(LOGGER, "Trying to allocate kinematics solver for group '%s'", jmg->getName().c_str()); std::map<std::string, std::vector<std::string>>::const_iterator it = possible_kinematics_solvers_.find(jmg->getName()); if (it == possible_kinematics_solvers_.end()) { RCLCPP_DEBUG(LOGGER, "No kinematics solver available for this group"); return result; } const std::string& base = links.front()->getParentJointModel()->getParentLinkModel() ? links.front()->getParentJointModel()->getParentLinkModel()->getName() : jmg->getParentModel().getModelFrame(); // just to be sure, do not call the same pluginlib instance allocation function in parallel boost::mutex::scoped_lock slock(lock_); for (std::size_t i = 0; !result && i < it->second.size(); ++i) { try { result = kinematics_loader_->createUniqueInstance(it->second[i]); if (result) { // choose the tip of the IK solver const std::vector<std::string> tips = chooseTipFrames(jmg); // choose search resolution double search_res = search_res_.find(jmg->getName())->second[i]; // we know this exists, by construction if (!result->initialize(node_, jmg->getParentModel(), jmg->getName(), (base.empty() || base[0] != '/') ? base : base.substr(1), tips, search_res)) { RCLCPP_ERROR(LOGGER, "Kinematics solver of type '%s' could not be initialized for group '%s'", it->second[i].c_str(), jmg->getName().c_str()); result.reset(); continue; } result->setDefaultTimeout(jmg->getDefaultIKTimeout()); RCLCPP_DEBUG(LOGGER, "Successfully allocated and initialized a kinematics solver of type '%s' with search " "resolution %lf for group '%s' at address %p", it->second[i].c_str(), search_res, jmg->getName().c_str(), result.get()); break; } } catch (pluginlib::PluginlibException& e) { RCLCPP_ERROR(LOGGER, "The kinematics plugin (%s) failed to load. Error: %s", it->first.c_str(), e.what()); } } if (!result) { RCLCPP_DEBUG(LOGGER, "No usable kinematics solver was found for this group.\n" "Did you load kinematics.yaml into your node's namespace?"); } return result; } // cache solver between two consecutive calls // first call in RobotModelLoader::loadKinematicsSolvers() is just to check suitability for jmg // second call in JointModelGroup::setSolverAllocators() is to actually retrieve the instance for use kinematics::KinematicsBasePtr allocKinematicsSolverWithCache(const moveit::core::JointModelGroup* jmg) { boost::mutex::scoped_lock slock(cache_lock_); kinematics::KinematicsBasePtr& cached = instances_[jmg]; if (cached.unique()) return std::move(cached); // pass on unique instance // create a new instance and store in instances_ cached = allocKinematicsSolver(jmg); return cached; } void status() const { for (std::map<std::string, std::vector<std::string>>::const_iterator it = possible_kinematics_solvers_.begin(); it != possible_kinematics_solvers_.end(); ++it) { for (std::size_t i = 0; i < it->second.size(); ++i) { RCLCPP_INFO(LOGGER, "Solver for group '%s': '%s' (search resolution = %lf)", it->first.c_str(), it->second[i].c_str(), search_res_.at(it->first)[i]); } } } private: const rclcpp::Node::SharedPtr node_; std::string robot_description_; std::map<std::string, std::vector<std::string>> possible_kinematics_solvers_; std::map<std::string, std::vector<double>> search_res_; std::map<std::string, std::vector<std::string>> iksolver_to_tip_links_; // a map between each ik solver and a vector // of custom-specified tip link(s) std::shared_ptr<pluginlib::ClassLoader<kinematics::KinematicsBase>> kinematics_loader_; std::map<const moveit::core::JointModelGroup*, kinematics::KinematicsBasePtr> instances_; boost::mutex lock_; boost::mutex cache_lock_; }; void KinematicsPluginLoader::status() const { if (loader_) loader_->status(); else RCLCPP_INFO(LOGGER, "Loader function was never required"); } moveit::core::SolverAllocatorFn KinematicsPluginLoader::getLoaderFunction() { moveit::tools::Profiler::ScopedStart prof_start; moveit::tools::Profiler::ScopedBlock prof_block("KinematicsPluginLoader::getLoaderFunction"); if (loader_) return boost::bind(&KinematicsLoaderImpl::allocKinematicsSolverWithCache, loader_.get(), boost::placeholders::_1); rdf_loader::RDFLoader rml(node_, robot_description_); robot_description_ = rml.getRobotDescription(); return getLoaderFunction(rml.getSRDF()); } moveit::core::SolverAllocatorFn KinematicsPluginLoader::getLoaderFunction(const srdf::ModelSharedPtr& srdf_model) { moveit::tools::Profiler::ScopedStart prof_start; moveit::tools::Profiler::ScopedBlock prof_block("KinematicsPluginLoader::getLoaderFunction(SRDF)"); if (!loader_) { RCLCPP_DEBUG(LOGGER, "Configuring kinematics solvers"); groups_.clear(); std::map<std::string, std::vector<std::string>> possible_kinematics_solvers; std::map<std::string, std::vector<double>> search_res; std::map<std::string, std::vector<std::string>> iksolver_to_tip_links; if (srdf_model) { const std::vector<srdf::Model::Group>& known_groups = srdf_model->getGroups(); if (default_search_resolution_ <= std::numeric_limits<double>::epsilon()) default_search_resolution_ = kinematics::KinematicsBase::DEFAULT_SEARCH_DISCRETIZATION; if (default_solver_plugin_.empty()) { RCLCPP_DEBUG(LOGGER, "Loading settings for kinematics solvers from the ROS param server ..."); // read the list of plugin names for possible kinematics solvers for (const srdf::Model::Group& known_group : known_groups) { std::string base_param_name = known_group.name_; std::string ksolver_param_name = base_param_name + ".kinematics_solver"; RCLCPP_DEBUG(LOGGER, "Looking for param %s ", ksolver_param_name.c_str()); rclcpp::Parameter ksolver_param = declare_parameter<rclcpp::ParameterType::PARAMETER_STRING>(node_, ksolver_param_name); if (ksolver_param.get_type() == rclcpp::ParameterType::PARAMETER_NOT_SET) { base_param_name = robot_description_ + "_kinematics." + known_group.name_; ksolver_param_name = base_param_name + ".kinematics_solver"; RCLCPP_DEBUG(LOGGER, "Looking for param %s ", ksolver_param_name.c_str()); ksolver_param = declare_parameter<rclcpp::ParameterType::PARAMETER_STRING>(node_, ksolver_param_name); } if (ksolver_param.get_type() != rclcpp::ParameterType::PARAMETER_NOT_SET) { RCLCPP_DEBUG(LOGGER, "Found param %s", ksolver_param_name.c_str()); const auto& ksolver = ksolver_param.as_string(); std::stringstream ss(ksolver); bool first = true; while (ss.good() && !ss.eof()) { if (first) { first = false; groups_.push_back(known_group.name_); } std::string solver; ss >> solver >> std::ws; possible_kinematics_solvers[known_group.name_].push_back(solver); RCLCPP_DEBUG(LOGGER, "Using kinematics solver '%s' for group '%s'.", solver.c_str(), known_group.name_.c_str()); } } std::string ksolver_timeout_param_name = base_param_name + ".kinematics_solver_timeout"; rclcpp::Parameter ksolver_timeout_param = declare_parameter<rclcpp::ParameterType::PARAMETER_DOUBLE>(node_, ksolver_timeout_param_name); if (ksolver_timeout_param.get_type() != rclcpp::ParameterType::PARAMETER_NOT_SET) { if (ksolver_timeout_param.get_type() == rclcpp::ParameterType::PARAMETER_DOUBLE) { ik_timeout_[known_group.name_] = ksolver_timeout_param.as_double(); } else if (ksolver_timeout_param.get_type() == rclcpp::ParameterType::PARAMETER_INTEGER) { // just in case this is an int ik_timeout_[known_group.name_] = ksolver_timeout_param.as_int(); } } std::string ksolver_res_param_name = base_param_name + ".kinematics_solver_search_resolution"; rclcpp::Parameter ksolver_res_param = declare_parameter<rclcpp::ParameterType::PARAMETER_DOUBLE>(node_, ksolver_res_param_name); if (ksolver_res_param.get_type() != rclcpp::ParameterType::PARAMETER_NOT_SET) { if (ksolver_res_param.get_type() == rclcpp::ParameterType::PARAMETER_STRING) { const auto& ksolver_res = ksolver_res_param.as_string(); std::stringstream ss(ksolver_res); while (ss.good() && !ss.eof()) { double res; ss >> res >> std::ws; search_res[known_group.name_].push_back(res); } } // handle the case this param is just one value and parsed as a double else if (ksolver_res_param.get_type() == rclcpp::ParameterType::PARAMETER_DOUBLE) { search_res[known_group.name_].push_back(ksolver_res_param.as_double()); } else if (ksolver_res_param.get_type() == rclcpp::ParameterType::PARAMETER_INTEGER) { search_res[known_group.name_].push_back(ksolver_res_param.as_int()); } } // Allow a kinematic solver's tip links to be specified on the rosparam server as an array std::string ksolver_ik_links_param_name = base_param_name + ".kinematics_solver_ik_links"; rclcpp::Parameter ksolver_ik_links_param = declare_parameter<rclcpp::ParameterType::PARAMETER_STRING_ARRAY>(node_, ksolver_ik_links_param_name); if (ksolver_ik_links_param.get_type() != rclcpp::ParameterType::PARAMETER_NOT_SET) { if (ksolver_ik_links_param.get_type() == rclcpp::ParameterType::PARAMETER_STRING_ARRAY) { const auto& ksolver_ik_links = ksolver_ik_links_param.as_string_array(); for (auto& ksolver_ik_link : ksolver_ik_links) { RCLCPP_DEBUG(LOGGER, "found tip %s for group %s", ksolver_ik_link.c_str(), known_group.name_.c_str()); iksolver_to_tip_links[known_group.name_].push_back(ksolver_ik_link); } } else { RCLCPP_WARN(LOGGER, "the parameter '%s' needs to be of type 'STRING_ARRAY'", ksolver_ik_links_param_name.c_str()); } } // make sure there is a default resolution at least specified for every solver (in case it was not specified // on the param server) while (search_res[known_group.name_].size() < possible_kinematics_solvers[known_group.name_].size()) search_res[known_group.name_].push_back(default_search_resolution_); } } else { RCLCPP_DEBUG(LOGGER, "Using specified default settings for kinematics solvers ..."); for (const srdf::Model::Group& known_group : known_groups) { possible_kinematics_solvers[known_group.name_].resize(1, default_solver_plugin_); search_res[known_group.name_].resize(1, default_search_resolution_); ik_timeout_[known_group.name_] = default_solver_timeout_; groups_.push_back(known_group.name_); } } } loader_ = std::make_shared<KinematicsLoaderImpl>(node_, robot_description_, possible_kinematics_solvers, search_res, iksolver_to_tip_links); } return boost::bind(&KinematicsPluginLoader::KinematicsLoaderImpl::allocKinematicsSolverWithCache, loader_.get(), boost::placeholders::_1); } } // namespace kinematics_plugin_loader
#pragma once #include <chrono> class timer { using time_point_t = std::chrono::time_point<std::chrono::system_clock, std::chrono::nanoseconds>; public: void start() { _start = std::chrono::system_clock::now(); } void end() { _end = std::chrono::system_clock::now(); } auto duration() { std::chrono::duration<double> diff = _end - _start; return diff.count(); } template <class functor> static auto estimate(functor f) { timer t; t.start(); f(); t.end(); return t.duration(); } private: time_point_t _start; time_point_t _end; };
// Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "ash/system/privacy_screen/privacy_screen_toast_controller.h" #include "ash/accessibility/accessibility_controller_impl.h" #include "ash/shelf/shelf.h" #include "ash/shell.h" #include "ash/system/status_area_widget.h" #include "ash/system/tray/tray_constants.h" #include "ash/system/tray/tray_utils.h" #include "ash/system/unified/unified_system_tray.h" #include "ash/system/unified/unified_system_tray_bubble.h" #include "ash/system/unified/unified_system_tray_view.h" namespace ash { PrivacyScreenToastController::PrivacyScreenToastController( UnifiedSystemTray* tray) : tray_(tray) { Shell::Get()->privacy_screen_controller()->AddObserver(this); } PrivacyScreenToastController::~PrivacyScreenToastController() { close_timer_.Stop(); if (bubble_widget_ && !bubble_widget_->IsClosed()) bubble_widget_->CloseNow(); } void PrivacyScreenToastController::ShowToast() { // If the bubble already exists, update the content of the bubble and extend // the autoclose timer. if (bubble_widget_) { UpdateToastView(); if (!mouse_hovered_) StartAutoCloseTimer(); return; } tray_->CloseSecondaryBubbles(); TrayBubbleView::InitParams init_params; init_params.shelf_alignment = tray_->shelf()->alignment(); init_params.preferred_width = kPrivacyScreenToastMinWidth; init_params.delegate = this; init_params.parent_window = tray_->GetBubbleWindowContainer(); init_params.anchor_view = nullptr; init_params.anchor_mode = TrayBubbleView::AnchorMode::kRect; init_params.anchor_rect = tray_->shelf()->GetSystemTrayAnchorRect(); // Decrease bottom and right insets to compensate for the adjustment of // the respective edges in Shelf::GetSystemTrayAnchorRect(). init_params.insets = GetTrayBubbleInsets(); init_params.corner_radius = kUnifiedTrayCornerRadius; init_params.has_shadow = false; init_params.translucent = true; bubble_view_ = new TrayBubbleView(init_params); toast_view_ = new PrivacyScreenToastView(this); bubble_view_->AddChildView(toast_view_); bubble_widget_ = views::BubbleDialogDelegateView::CreateBubble(bubble_view_); TrayBackgroundView::InitializeBubbleAnimations(bubble_widget_); bubble_view_->InitializeAndShowBubble(); StartAutoCloseTimer(); UpdateToastView(); tray_->SetTrayBubbleHeight( bubble_widget_->GetWindowBoundsInScreen().height()); // Activate the bubble so ChromeVox can announce the toast. if (Shell::Get()->accessibility_controller()->spoken_feedback_enabled()) { bubble_widget_->widget_delegate()->SetCanActivate(true); bubble_widget_->Activate(); } } void PrivacyScreenToastController::HideToast() { close_timer_.Stop(); if (!bubble_widget_ || bubble_widget_->IsClosed()) return; bubble_widget_->Close(); tray_->SetTrayBubbleHeight(0); } void PrivacyScreenToastController::BubbleViewDestroyed() { close_timer_.Stop(); bubble_view_ = nullptr; bubble_widget_ = nullptr; } void PrivacyScreenToastController::OnMouseEnteredView() { close_timer_.Stop(); mouse_hovered_ = true; } void PrivacyScreenToastController::OnMouseExitedView() { StartAutoCloseTimer(); mouse_hovered_ = false; } base::string16 PrivacyScreenToastController::GetAccessibleNameForBubble() { if (!toast_view_) return base::string16(); return toast_view_->GetAccessibleName(); } void PrivacyScreenToastController::OnPrivacyScreenSettingChanged(bool enabled) { if (tray_->IsBubbleShown()) return; ShowToast(); } void PrivacyScreenToastController::StartAutoCloseTimer() { close_timer_.Stop(); // Don't start the timer if the toast is focused. if (toast_view_ && toast_view_->IsButtonFocused()) return; int autoclose_delay = kTrayPopupAutoCloseDelayInSeconds; if (Shell::Get()->accessibility_controller()->spoken_feedback_enabled()) autoclose_delay = kTrayPopupAutoCloseDelayInSecondsWithSpokenFeedback; close_timer_.Start(FROM_HERE, base::TimeDelta::FromSeconds(autoclose_delay), this, &PrivacyScreenToastController::HideToast); } void PrivacyScreenToastController::UpdateToastView() { if (toast_view_) { auto* privacy_screen_controller = Shell::Get()->privacy_screen_controller(); toast_view_->SetPrivacyScreenEnabled( /*enabled=*/privacy_screen_controller->GetEnabled(), /*managed=*/privacy_screen_controller->IsManaged()); int width = base::ClampToRange(toast_view_->GetPreferredSize().width(), kPrivacyScreenToastMinWidth, kPrivacyScreenToastMaxWidth); bubble_view_->SetPreferredWidth(width); } } void PrivacyScreenToastController::ButtonPressed(views::Button* sender, const ui::Event& event) { auto* privacy_screen_controller = Shell::Get()->privacy_screen_controller(); privacy_screen_controller->SetEnabled( !privacy_screen_controller->GetEnabled()); } void PrivacyScreenToastController::StopAutocloseTimer() { close_timer_.Stop(); } } // namespace ash
// The MIT License (MIT) // // Copyright (c) 2015-2017 Simon Ninon <simon.ninon@gmail.com> // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. #include <cpp_redis/logger.hpp> #include <cpp_redis/network/redis_connection.hpp> #include <cpp_redis/redis_error.hpp> #ifndef __CPP_REDIS_USE_CUSTOM_TCP_CLIENT #include <cpp_redis/network/tcp_client.hpp> #endif /* __CPP_REDIS_USE_CUSTOM_TCP_CLIENT */ namespace cpp_redis { namespace network { #ifndef __CPP_REDIS_USE_CUSTOM_TCP_CLIENT redis_connection::redis_connection(void) : redis_connection(std::make_shared<tcp_client>()) { } #endif /* __CPP_REDIS_USE_CUSTOM_TCP_CLIENT */ redis_connection::redis_connection(const std::shared_ptr<tcp_client_iface>& client) : m_client(client) , m_reply_callback(nullptr) , m_disconnection_handler(nullptr) { __CPP_REDIS_LOG(vdebug, "cpp_redis::network::redis_connection created"); } redis_connection::~redis_connection(void) { m_client->disconnect(true); __CPP_REDIS_LOG(vdebug, "cpp_redis::network::redis_connection destroyed"); } void redis_connection::connect(const std::string& host, std::size_t port, const disconnection_handler_t& client_disconnection_handler, const reply_callback_t& client_reply_callback) { try { __CPP_REDIS_LOG(vdebug, "cpp_redis::network::redis_connection attempts to connect"); //! connect client m_client->connect(host, port); m_client->set_on_disconnection_handler(std::bind(&redis_connection::tcp_client_disconnection_handler, this)); //! start to read asynchronously tcp_client_iface::read_request request = {__CPP_REDIS_READ_SIZE, std::bind(&redis_connection::tcp_client_receive_handler, this, std::placeholders::_1)}; m_client->async_read(request); __CPP_REDIS_LOG(vdebug, "cpp_redis::network::redis_connection connected"); } catch (const std::exception& e) { __CPP_REDIS_LOG(error, std::string("cpp_redis::network::redis_connection ") + e.what()); throw redis_error(e.what()); } m_reply_callback = client_reply_callback; m_disconnection_handler = client_disconnection_handler; } void redis_connection::disconnect(bool wait_for_removal) { __CPP_REDIS_LOG(vdebug, "cpp_redis::network::redis_connection attempts to disconnect"); m_client->disconnect(wait_for_removal); __CPP_REDIS_LOG(vdebug, "cpp_redis::network::redis_connection disconnected"); } bool redis_connection::is_connected(void) { return m_client->is_connected(); } std::string redis_connection::build_command(const std::vector<std::string>& redis_cmd) { std::string cmd = "*" + std::to_string(redis_cmd.size()) + "\r\n"; for (const auto& cmd_part : redis_cmd) cmd += "$" + std::to_string(cmd_part.length()) + "\r\n" + cmd_part + "\r\n"; return cmd; } redis_connection& redis_connection::send(const std::vector<std::string>& redis_cmd) { std::lock_guard<std::mutex> lock(m_buffer_mutex); m_buffer += build_command(redis_cmd); __CPP_REDIS_LOG(vdebug, "cpp_redis::network::redis_connection stored new command in the send buffer"); return *this; } //! commit pipelined transaction redis_connection& redis_connection::commit(void) { std::lock_guard<std::mutex> lock(m_buffer_mutex); //! ensure buffer is cleared __CPP_REDIS_LOG(vdebug, "cpp_redis::network::redis_connection attempts to send pipelined commands"); std::string buffer = std::move(m_buffer); try { tcp_client_iface::write_request request = {std::vector<char>{buffer.begin(), buffer.end()}, nullptr}; m_client->async_write(request); } catch (const std::exception& e) { m_buffer = std::move(buffer); __CPP_REDIS_LOG(error, std::string("cpp_redis::network::redis_connection ") + e.what()); throw redis_error(e.what()); } __CPP_REDIS_LOG(vdebug, "cpp_redis::network::redis_connection sent pipelined commands"); return *this; } void redis_connection::call_disconnection_handler(void) { if (m_disconnection_handler) { __CPP_REDIS_LOG(vdebug, "cpp_redis::network::redis_connection calls disconnection handler"); m_disconnection_handler(*this); } } void redis_connection::tcp_client_receive_handler(const tcp_client_iface::read_result& result) { if (!result.success) { return; } try { __CPP_REDIS_LOG(vdebug, "cpp_redis::network::redis_connection receives packet, attempts to build reply"); m_builder << std::string(result.buffer.begin(), result.buffer.end()); } catch (const redis_error&) { __CPP_REDIS_LOG(error, "cpp_redis::network::redis_connection could not build reply (invalid format), disconnecting"); call_disconnection_handler(); return; } while (m_builder.reply_available()) { __CPP_REDIS_LOG(vdebug, "cpp_redis::network::redis_connection reply fully built"); auto reply = m_builder.get_front(); m_builder.pop_front(); if (m_reply_callback) { __CPP_REDIS_LOG(vdebug, "cpp_redis::network::redis_connection executes reply callback"); m_reply_callback(*this, reply); } } try { tcp_client_iface::read_request request = {__CPP_REDIS_READ_SIZE, std::bind(&redis_connection::tcp_client_receive_handler, this, std::placeholders::_1)}; m_client->async_read(request); } catch (const std::exception&) { //! Client disconnected in the meantime } } void redis_connection::tcp_client_disconnection_handler(void) { __CPP_REDIS_LOG(vdebug, "cpp_redis::network::redis_connection has been disconnected"); if (m_disconnection_handler) { __CPP_REDIS_LOG(vdebug, "cpp_redis::network::redis_connection calls disconnection handler"); m_disconnection_handler(*this); } } } //! network } //! cpp_redis
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include <algorithm> #include <random> #include "core/mlas/inc/mlas.h" #include "core/util/math.h" #include "default_providers.h" #include "gtest/gtest.h" #include "test/providers/provider_test_utils.h" namespace onnxruntime { namespace test { namespace { struct QuantizedTensor { QuantizedTensor(const std::vector<float>& data) { // find input range min and max auto min = *std::min_element(data.begin(), data.end()); auto max = *std::max_element(data.begin(), data.end()); // ensure the data range includes zero min = std::min(min, 0.f); max = std::max(max, 0.f); constexpr float qmin = std::numeric_limits<uint8_t>::min(); constexpr float qmax = std::numeric_limits<uint8_t>::max(); // compute scale and zero point scale_ = (max - min) / (qmax - qmin); const auto initial_zero_point = qmin - min / scale_; zero_point_ = static_cast<uint8_t>(std::round(std::max(qmin, std::min(qmax, initial_zero_point)))); // quantize the data quantized_.resize(data.size()); for (size_t i = 0; i < data.size(); i++) { const float clamped_val = std::max(qmin, std::min(qmax, std::round(data[i] / scale_) + zero_point_)); quantized_[i] = static_cast<uint8_t>(clamped_val); } } QuantizedTensor(const std::vector<uint8_t>& data, float scale, uint8_t zero_point) : quantized_(data), scale_(scale), zero_point_(zero_point) { } std::vector<uint8_t> quantized_; float scale_; uint8_t zero_point_; }; struct QuantizedBiasTensor { QuantizedBiasTensor(const std::vector<float>& data, const QuantizedTensor& X, const QuantizedTensor& W) { scale_ = X.scale_ * W.scale_; // quantize the data quantized_.resize(data.size()); for (size_t i = 0; i < data.size(); i++) { quantized_[i] = static_cast<int32_t>(std::floor(data[i] / (X.scale_ * W.scale_))); } } QuantizedBiasTensor(const std::vector<int32_t>& data, float scale) : quantized_(data), scale_(scale) { } std::vector<int32_t> quantized_; float scale_; }; void TestQLinearConvOp(OpTester& test, const QuantizedTensor& X, const std::vector<int64_t>& X_shape, const QuantizedTensor& W, const std::vector<int64_t>& W_shape, const QuantizedBiasTensor* B, const QuantizedTensor& Y, const std::vector<int64_t>& Y_shape, bool all_input_initializer_except_x = false, const std::unordered_set<std::string>& excluded_provider_types = {}) { test.AddInput<uint8_t>("x", X_shape, X.quantized_); test.AddInput<float>("x_scale", {}, {X.scale_}, all_input_initializer_except_x); test.AddInput<uint8_t>("x_zero_point", {}, {X.zero_point_}, all_input_initializer_except_x); test.AddInput<uint8_t>("w", W_shape, W.quantized_, all_input_initializer_except_x); test.AddInput<float>("w_scale", {}, {W.scale_}, all_input_initializer_except_x); test.AddInput<uint8_t>("w_zero_point", {}, {W.zero_point_}, all_input_initializer_except_x); test.AddInput<float>("y_scale", {}, {Y.scale_}, all_input_initializer_except_x); test.AddInput<uint8_t>("y_zero_point", {}, {Y.zero_point_}, all_input_initializer_except_x); if (B != nullptr) { const std::vector<int64_t> B_shape{static_cast<int64_t>(B->quantized_.size())}; test.AddInput<int32_t>("b", B_shape, B->quantized_, all_input_initializer_except_x); } test.AddOutput<uint8_t>("y", Y_shape, Y.quantized_); test.Run(OpTester::ExpectResult::kExpectSuccess, "", excluded_provider_types); } void RunConv2DTest(bool all_input_initializer_except_x) { QuantizedTensor X({0.45246148109436035f, 0.15498268604278564f, 0.11199361085891724f, -0.39421093463897705f, 0.2626858949661255f, 0.13414543867111206f, -0.27184486389160156f, -0.43028733134269714f, -0.26825493574142456f, 0.3893144130706787f, -0.13631996512413025f, -0.009590476751327515f, -0.48771554231643677f, -0.25256502628326416f, -0.2812897562980652f, 0.4043201804161072f, 0.07795023918151855f, 0.326981782913208f, 0.13114392757415771f, -0.4416425824165344f, 0.12446999549865723f, 0.36739975214004517f, 0.1698915958404541f, 0.2008744478225708f, 0.23339951038360596f, 0.38613730669021606f, 0.11117297410964966f, 0.3877097964286804f, 0.20812749862670898f, -0.34297940135002136f, -0.029246658086776733f, -0.20483523607254028f, -0.19244328141212463f, -0.11104947328567505f, -0.32830488681793213f, -0.01800677180290222f, 0.3618946671485901f, -0.40949052572250366f, -0.18248388171195984f, -0.3349453806877136f, -0.34091079235076904f, 0.006497859954833984f, 0.4537564516067505f, 0.08006560802459717f, -0.14788749814033508f, 0.034442365169525146f, -0.33322954177856445f, 0.06049239635467529f, 0.42619407176971436f}); QuantizedTensor W({-0.4406261742115021f}); QuantizedTensor Y({-0.19936637580394745f, -0.06828942894935608f, -0.04934731498360634f, 0.17369966208934784f, -0.11574628204107285f, -0.05910799279808998f, 0.1197819635272026f, 0.18959586322307587f, 0.1182001456618309f, -0.17154212296009064f, 0.06006614491343498f, 0.0042258151806890965f, 0.21490024030208588f, 0.11128675937652588f, 0.12394362688064575f, -0.17815405130386353f, -0.034346915781497955f, -0.14407673478126526f, -0.05778544768691063f, 0.19459928572177887f, -0.05484473705291748f, -0.16188594698905945f, -0.07485868036746979f, -0.08851054310798645f, -0.10284193605184555f, -0.17014220356941223f, -0.04898572340607643f, -0.17083507776260376f, -0.09170642495155334f, 0.1511256992816925f, 0.012886842712759972f, 0.09025576710700989f, 0.08479554951190948f, 0.0489313043653965f, 0.14465972781181335f, 0.007934254594147205f, -0.15946026146411896f, 0.1804322451353073f, 0.08040717244148254f, 0.1475857049226761f, 0.15021422505378723f, -0.0028631272725760937f, -0.19993697106838226f, -0.03527900204062462f, 0.06516310572624207f, -0.015176207758486271f, 0.14682966470718384f, -0.02665453404188156f, -0.18779225647449493f}); OpTester test("QLinearConv", 10); TestQLinearConvOp(test, X, {1, 1, 7, 7}, W, {1, 1, 1, 1}, nullptr, Y, {1, 1, 7, 7}, all_input_initializer_except_x); } TEST(QLinearConvTest, Conv2DTest) { RunConv2DTest(false); } TEST(QLinearConvTest, Conv2DTestAllInputInitializerExceptX) { RunConv2DTest(true); } TEST(QLinearConvTest, Conv3DTest) { QuantizedTensor X({0.010772407054901123f, -0.43806642293930054f, 0.455391526222229f, -0.28657248616218567f, 0.45676887035369873f, -0.0320507287979126f, 0.4229400157928467f, -0.18730869889259338f, -0.45851585268974304f, 0.042054951190948486f, -0.13332295417785645f, -0.25374430418014526f, -0.23845627903938293f, 0.12214112281799316f, -0.1778157651424408f, 0.1891845464706421f, 0.37962496280670166f, -0.033982306718826294f, 0.12737131118774414f, -0.040284961462020874f, 0.46427029371261597f, -0.22687292098999023f, 0.17398333549499512f, -0.3014046251773834f, -0.4043419063091278f, -0.33206477761268616f, 0.04655301570892334f, -0.4947906732559204f, 0.0755157470703125f, 0.1173025369644165f, 0.47043120861053467f, 0.4824737310409546f, -0.37734976410865784f, -0.056491583585739136f, -0.10790631175041199f, 0.043476223945617676f, 0.24469023942947388f, -0.4100031852722168f, 0.0616222620010376f, 0.2296960949897766f, 0.27883386611938477f, 0.08150351047515869f, 0.2453773021697998f, 0.08250969648361206f, -0.1471814215183258f, -0.43011274933815f, 0.027180075645446777f, 0.3605625033378601f, 0.24954384565353394f, -0.22505927085876465f, -0.36272895336151123f, -0.47674262523651123f, 0.11275297403335571f, 0.49773406982421875f, 0.2686365246772766f, 0.025525271892547607f, -0.3037869930267334f, 0.41126757860183716f, 0.36149072647094727f, 0.00883406400680542f, -0.07959523797035217f, 0.3601323366165161f, 0.17322391271591187f, -0.012007325887680054f}); QuantizedTensor W({0.32824617624282837f}); QuantizedTensor Y({0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0035360013134777546f, 0.14948052167892456f, 0.0f, 0.0f, -0.15050607919692993f, -0.043762750923633575f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, -0.12386361509561539f, -0.03541983291506767f, 0.0f, 0.0f, 0.09152615070343018f, 0.08054415881633759f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}); OpTester test("QLinearConv", 10); test.AddAttribute("pads", std::vector<int64_t>{2, 2, 2, 2, 2, 2}); test.AddAttribute("strides", std::vector<int64_t>{2, 2, 2}); TestQLinearConvOp(test, X, {1, 1, 4, 4, 4}, W, {1, 1, 1, 1, 1}, nullptr, Y, {1, 1, 4, 4, 4}); } void RunConv2DWithBiasTest(bool all_input_initializer_except_x) { QuantizedTensor X({6, 81, 214, 151, 234, 42, 50, 89, 30, 91, 125, 141, 52, 31, 58, 224, 84, 251, 67, 137, 223, 119, 79, 220, 249, 75, 131, 246, 113, 56, 54, 197, 110, 142, 126, 171, 53, 228, 240, 83, 229, 218, 185, 9, 80, 116, 176, 193, 175, 253}, 0.01f, 135); QuantizedTensor W({234, 229, 13, 187, 98, 161, 246, 188, 252, 107, 49, 72, 53, 212, 175, 47, 21, 14, 86, 230, 16, 177, 82, 166, 75, 220, 169, 119, 34, 205, 27, 9, 44, 74, 40, 8, 28, 139, 240, 106, 63, 2, 255, 156, 128, 222, 73, 51, 66, 48, 81, 247, 180, 91, 206, 239, 190, 146, 227, 235, 10, 130, 95, 232, 121, 133, 231, 162, 108, 105, 254, 143}, 0.15f, 110); QuantizedBiasTensor B({-1123, 3212, 1723, -621}, X.scale_ * W.scale_); QuantizedTensor Y({67, 81, 66, 75, 71, 101, 20, 8, 44, 94, 83, 73, 133, 125, 54, 144, 165, 56, 53, 88, 130, 118, 170, 168, 140, 109, 103, 80, 122, 142, 129, 100, 39, 61, 141, 133, 59, 155, 68, 129, 74, 132, 83, 143, 146, 152, 81, 127, 82, 112, 131, 64, 82, 68, 93, 149, 146, 137, 201, 118, 112, 183, 171, 144, 85, 122, 86, 63, 163, 245, 95, 152, 126, 80, 82, 49, 136, 160, 187, 147, 29, 20, 135, 174, 126, 124, 36, 56, 0, 83, 134, 171, 119, 109, 85, 155, 157, 167, 194, 130}, 0.75f, 121); OpTester test("QLinearConv", 10); test.AddAttribute("pads", std::vector<int64_t>{1, 1, 1, 1}); TestQLinearConvOp(test, X, {1, 2, 5, 5}, W, {4, 2, 3, 3}, &B, Y, {1, 4, 5, 5}, all_input_initializer_except_x, {}); } TEST(QLinearConvTest, WithBias_2D) { RunConv2DWithBiasTest(false); } TEST(QLinearConvTest, WithBias_2D_AllInputInitializerExceptX) { RunConv2DWithBiasTest(true); } TEST(QLinearConvTest, WithGroup_2D) { QuantizedTensor X({98, 166, 219, 195, 46, 97, 27, 211, 239, 1, 28, 208, 143, 144, 215, 252, 79, 5, 154, 56, 122, 191, 94, 25, 221, 48, 37, 182, 68, 245, 210, 206, 183, 22, 163, 104, 242, 112, 161, 66, 181, 235, 117, 75, 236, 61, 115, 36, 120, 253, 165, 214, 159, 132, 11, 201, 30, 249, 89, 171, 186, 67, 225, 197, 135, 142, 241, 169, 170, 164, 178, 58, 50, 51, 200, 43, 199, 126, 222, 123, 227, 42, 3, 21, 124, 220, 24, 47, 63, 110}, 0.01f, 135); QuantizedTensor W({220, 111, 73, 254, 235, 151, 6, 156, 129, 204, 234, 198, 44, 89, 202, 82, 118, 189, 71, 120, 123, 121, 110, 83, 173, 248, 108, 229, 124, 68, 85, 239, 133, 213, 112, 122, 170, 231, 225, 195, 192, 9, 232, 97, 160, 227, 67, 137}, 0.15f, 110); QuantizedBiasTensor B({-1853, 598, -17854, 14592, 42, -366}, X.scale_ * W.scale_); QuantizedTensor Y({113, 128, 70, 64, 125, 162, 80, 189, 112, 147, 121, 111, 96, 68, 94, 101, 77, 88, 223, 128, 163, 194, 138, 164, 122, 109, 117, 91, 72, 121, 134, 155, 127, 125, 98, 128}, 0.75f, 121); OpTester test("QLinearConv", 10); test.AddAttribute("group", static_cast<int64_t>(3)); test.AddAttribute("pads", std::vector<int64_t>{0, 0, 1, 1}); test.AddAttribute("strides", std::vector<int64_t>{2, 2}); TestQLinearConvOp(test, X, {1, 6, 3, 5}, W, {6, 2, 2, 2}, &B, Y, {1, 6, 2, 3}, false, {}); } template <typename ActType, typename FilterType> class QLinearConvOpTester { private: template <typename T> struct QuantizedTensor { std::vector<T> data_; std::vector<int64_t> shape_; std::vector<float> scale_; T zero_point_{0}; }; std::default_random_engine generator_{1234}; QuantizedTensor<ActType> X_; QuantizedTensor<FilterType> W_; std::vector<int32_t> B_; std::vector<int64_t> pads_; std::vector<int64_t> strides_; std::vector<int64_t> dilations_; int64_t groups_{0}; float output_scale_{1.0f}; ActType output_zero_point_{0}; static size_t ShapeSize(const std::vector<int64_t>& shape) { return static_cast<size_t>(std::accumulate(shape.cbegin(), shape.cend(), 1LL, std::multiplies<int64_t>())); } template <typename T> void GenerateRandom(QuantizedTensor<T>& tensor, const std::vector<int64_t>& shape, float scale, T zero_point, int32_t min_value, int32_t max_value) { std::uniform_int_distribution<int32_t> distribution(min_value, max_value); size_t shape_size = ShapeSize(shape); tensor.data_.resize(shape_size); for (size_t n = 0; n < shape_size; n++) { tensor.data_[n] = static_cast<T>(distribution(generator_)); } tensor.shape_ = shape; tensor.scale_ = {scale}; tensor.zero_point_ = {zero_point}; } template <typename T> struct RequantizeValues { RequantizeValues(int32_t zero_point) { min_value_ = static_cast<float>(static_cast<int32_t>(std::numeric_limits<T>::min()) - zero_point); max_value_ = static_cast<float>(static_cast<int32_t>(std::numeric_limits<T>::max()) - zero_point); zero_point_ = static_cast<float>(zero_point); } float min_value_; float max_value_; float zero_point_; }; inline float RoundHalfToEven(float input) { if (!std::isfinite(input)) { return input; } // std::remainder returns x - n, where n is the integral value nearest to x. When |x - n| = 0.5, n is chosen to be even return input - std::remainderf(input, 1.f); } template <typename T> T RequantizeOutput(int32_t sum, float scale, RequantizeValues<T>& requantize_values) { float f = static_cast<float>(sum) * scale; f = std::min(f, requantize_values.max_value_); f = std::max(f, requantize_values.min_value_); return static_cast<T>(RoundHalfToEven(f) + requantize_values.zero_point_); } static bool NextPosition(int64_t N, const int64_t* shape, int64_t* dims) { // Loop over spatial axes in reverse order to choose an index, like counting. bool incremented = false; for (int64_t d_i = N - 1; d_i >= 0; --d_i) { int64_t d_max = shape[d_i]; ORT_ENFORCE(dims[d_i] < d_max); if (dims[d_i] == d_max - 1) { dims[d_i] = 0; } else { // dims[d_i] < d_max - 1 ++dims[d_i]; incremented = true; break; } } return incremented; } void ComputeExpectedOutput(std::vector<ActType>& Y_data, std::vector<int64_t>& Y_shape) { ORT_ENFORCE(W_.shape_.size() > 2); ORT_ENFORCE(X_.shape_.size() == W_.shape_.size()); const size_t kernel_rank = W_.shape_.size() - 2; const int64_t batch_count = X_.shape_[0]; const int64_t input_channels = X_.shape_[1]; const int64_t output_channels = W_.shape_[0]; const int64_t group_count = std::max<int64_t>(groups_, 1LL); const int64_t group_input_channels = W_.shape_[1]; const int64_t group_output_channels = output_channels / group_count; ORT_ENFORCE(input_channels == group_input_channels * group_count); ORT_ENFORCE(output_channels == group_output_channels * group_count); const int64_t* input_shape = X_.shape_.data() + 2; const int64_t* kernel_shape = W_.shape_.data() + 2; std::vector<int64_t> pads(pads_); if (pads.empty()) { pads.resize(kernel_rank * 2, 0); } std::vector<int64_t> dilations(dilations_); if (dilations.empty()) { dilations.resize(kernel_rank, 1); } std::vector<int64_t> strides(strides_); if (strides.empty()) { strides.resize(kernel_rank, 1); } // Compute the expected shape of the output. Y_shape.reserve(kernel_rank + 2); Y_shape.push_back(batch_count); Y_shape.push_back(output_channels); for (size_t n = 0; n < kernel_rank; n++) { Y_shape.push_back(((input_shape[n] + pads[n] + pads[kernel_rank + n]) - (dilations[n] * (kernel_shape[n] - 1) + 1)) / strides[n] + 1); } const int64_t* output_shape = Y_shape.data() + 2; Y_data.resize(ShapeSize(Y_shape)); const int64_t input_image_size = std::accumulate( input_shape, input_shape + kernel_rank, 1LL, std::multiplies<int64_t>()); const int64_t kernel_size = std::accumulate( kernel_shape, kernel_shape + kernel_rank, 1LL, std::multiplies<int64_t>()); const int32_t X_zero_point = X_.zero_point_; const int32_t W_zero_point = W_.zero_point_; const ActType* Xdata = X_.data_.data(); ActType* Ydata = Y_data.data(); RequantizeValues<ActType> requantize_values(output_zero_point_); for (int64_t batch = 0; batch < batch_count; batch++) { const FilterType* weight_group = W_.data_.data(); for (int64_t group = 0; group < group_count; group++) { const FilterType* weight_row = weight_group; for (int64_t oc = 0; oc < group_output_channels; oc++) { int64_t channel_index = group * group_output_channels + oc; int32_t bias = B_.empty() ? 0 : B_[channel_index]; float weight_scale = W_.scale_[(W_.scale_.size() == 1) ? 0 : channel_index]; float requantize_scale = (X_.scale_[0] * weight_scale) / output_scale_; std::vector<int64_t> d_output(kernel_rank, 0); std::vector<int64_t> d_kernel(kernel_rank, 0); do { int32_t sum = bias; const ActType* input_image = Xdata; const FilterType* weight_data = weight_row; for (int64_t ic = 0; ic < group_input_channels; ic++) { do { int64_t input_offset = 0; bool is_padding = false; for (size_t axis = 0; axis < kernel_rank; ++axis) { int64_t input_dim = d_kernel[axis] * dilations[axis] + d_output[axis] * strides[axis] - pads[axis]; is_padding |= !math::is_a_ge_zero_and_a_lt_b(input_dim, input_shape[axis]); input_offset *= input_shape[axis]; input_offset += input_dim; } int32_t w_value = static_cast<int32_t>(*weight_data++) - W_zero_point; if (!is_padding) { int32_t x_value = static_cast<int32_t>(input_image[input_offset]) - X_zero_point; sum += x_value * w_value; } } while (NextPosition(kernel_rank, kernel_shape, d_kernel.data())); input_image += input_image_size; } *Ydata++ = RequantizeOutput<ActType>(sum, requantize_scale, requantize_values); } while (NextPosition(kernel_rank, output_shape, d_output.data())); weight_row += group_input_channels * kernel_size; } Xdata += group_input_channels * input_image_size; weight_group += group_output_channels * group_input_channels * kernel_size; } } } void Run(bool all_input_initializer_except_x) { OpTester test("QLinearConv", 10); std::vector<ActType> Y_data; std::vector<int64_t> Y_shape; ComputeExpectedOutput(Y_data, Y_shape); test.AddInput<ActType>("x", X_.shape_, X_.data_); test.AddInput<float>("x_scale", {}, X_.scale_, all_input_initializer_except_x); test.AddInput<ActType>("x_zero_point", {}, {X_.zero_point_}, all_input_initializer_except_x); const std::vector<int64_t> W_scale_shape{static_cast<int64_t>(W_.scale_.size())}; test.AddInput<FilterType>("w", W_.shape_, W_.data_, all_input_initializer_except_x); test.AddInput<float>("w_scale", W_scale_shape, W_.scale_, all_input_initializer_except_x); test.AddInput<FilterType>("w_zero_point", {}, {W_.zero_point_}, all_input_initializer_except_x); test.AddInput<float>("y_scale", {}, {output_scale_}, all_input_initializer_except_x); test.AddInput<ActType>("y_zero_point", {}, {output_zero_point_}, all_input_initializer_except_x); if (!B_.empty()) { const std::vector<int64_t> B_shape{static_cast<int64_t>(B_.size())}; test.AddInput<int32_t>("b", B_shape, B_, all_input_initializer_except_x); } float abs_error = 0.0f; // For quantized models, NNAPI's rounding is different than CPU provider // Sometimes the result is within +/-1 of result of CPU provider // For ONNX, we use rounding to nearest ties to even. // For NNAPI, it is using std::round which is HALF_AWAY_FROM_ZERO, see // https://android.googlesource.com/platform/frameworks/ml/+/refs/heads/master/nn/common/operations/Quantize.cpp // Use 1 as abs_error which is the smallest possbile for uint8_t // // NOTE, for now the tolerance will only apply if the NNAPI is actually used, // if for any reason the execution falls back to CPU, we still expect an exact match // See, 'void Check<uint8_t>(...' in onnxruntime/test/providers/provider_test_utils.cc #ifdef USE_NNAPI abs_error = 1.0f; #endif test.AddOutput<ActType>("y", Y_shape, Y_data, false /* sort_output */, 0.0f /* rel_error */, abs_error); if (!pads_.empty()) { test.AddAttribute("pads", pads_); } if (!strides_.empty()) { test.AddAttribute("strides", strides_); } if (!dilations_.empty()) { test.AddAttribute("dilations", dilations_); } if (groups_ > 0) { test.AddAttribute("group", groups_); } test.Run(OpTester::ExpectResult::kExpectSuccess, ""); } public: QLinearConvOpTester() { } void GenerateRandomInput(const std::vector<int64_t>& shape, float scale, ActType zero_point) { GenerateRandom(X_, shape, scale, zero_point, std::numeric_limits<ActType>::min(), std::numeric_limits<ActType>::max()); } void GenerateRandomWeights(const std::vector<int64_t>& shape, float scale, FilterType zero_point) { if (std::is_signed<FilterType>::value) { GenerateRandom(W_, shape, scale, zero_point, -63, 63); } else { GenerateRandom(W_, shape, scale, zero_point, 0, 255); } } void SetWeightScales(const std::vector<float>& scales) { W_.scale_ = scales; } void GenerateRandomBias() { ORT_ENFORCE(W_.shape_.size() >= 1); const size_t output_channels = static_cast<size_t>(W_.shape_[0]); B_.resize(output_channels); std::uniform_int_distribution<int32_t> distribution(-423, 423); for (size_t n = 0; n < output_channels; n++) { B_[n] = distribution(generator_); } } void SetPads(const std::vector<int64_t>& pads) { pads_ = pads; } void SetStrides(const std::vector<int64_t>& strides) { strides_ = strides; } void SetDilations(const std::vector<int64_t>& dilations) { dilations_ = dilations; } void SetGroups(int64_t groups) { groups_ = groups; } void SetOutputScaleAndZeroPoint(float output_scale, ActType output_zero_point) { output_scale_ = output_scale; output_zero_point_ = output_zero_point; } void Run() { for (bool all_input_initializer_except_x : std::initializer_list<bool>{false, true}) { Run(all_input_initializer_except_x); } } }; TEST(QLinearConvTest, Conv1D_U8S8) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({3, 24, 15}, .05f, 4); test.GenerateRandomWeights({32, 24, 3}, .125f, 0); test.GenerateRandomBias(); test.SetPads({1, 1}); test.SetOutputScaleAndZeroPoint(.55f, 54); test.Run(); } TEST(QLinearConvTest, Conv2D_U8S8_Sym_M64_C64) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({1, 64, 15, 11}, .05f, 4); test.GenerateRandomWeights({64, 64, 3, 3}, .125f, 0); test.GenerateRandomBias(); test.SetPads({1, 1, 1, 1}); test.SetOutputScaleAndZeroPoint(.55f, 54); test.Run(); } TEST(QLinearConvTest, Conv2D_U8S8_Sym_M16_C4) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({1, 4, 3, 3}, .05f, 4); test.GenerateRandomWeights({16, 4, 3, 3}, .125f, 0); test.GenerateRandomBias(); test.SetPads({0, 0, 0, 0}); test.SetOutputScaleAndZeroPoint(.55f, 54); test.Run(); } TEST(QLinearConvTest, Conv2D_U8S8_Sym_M16_C4_Bias) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({1, 4, 3, 3}, .05f, 4); test.GenerateRandomWeights({16, 4, 3, 3}, .125f, 0); test.GenerateRandomBias(); test.SetPads({0, 0, 0, 0}); test.SetOutputScaleAndZeroPoint(.55f, 54); test.Run(); } TEST(QLinearConvTest, Conv2D_U8S8_Sym_M16_C4_Bias_Pads) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({1, 4, 3, 3}, .05f, 4); test.GenerateRandomWeights({16, 4, 3, 3}, .125f, 0); test.GenerateRandomBias(); test.SetPads({1, 1, 1, 1}); test.SetOutputScaleAndZeroPoint(.55f, 54); test.Run(); } TEST(QLinearConvTest, Conv2D_U8S8_Sym_M48_C48_Bias_Pads) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({1, 48, 15, 11}, .05f, 4); test.GenerateRandomWeights({48, 48, 3, 3}, .125f, 0); test.GenerateRandomBias(); test.SetPads({1, 1, 1, 1}); test.SetOutputScaleAndZeroPoint(.55f, 54); test.Run(); } TEST(QLinearConvTest, Conv2D_U8S8_Sym_M32_C32_Bias_Pads) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({1, 32, 15, 11}, .05f, 4); test.GenerateRandomWeights({32, 32, 3, 3}, .125f, 0); test.GenerateRandomBias(); test.SetPads({1, 1, 1, 1}); test.SetOutputScaleAndZeroPoint(.55f, 54); test.Run(); } TEST(QLinearConvTest, Conv2D_U8S8) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({3, 24, 15, 11}, .05f, 4); test.GenerateRandomWeights({32, 24, 3, 3}, .125f, 0); test.GenerateRandomBias(); test.SetPads({1, 1, 1, 1}); test.SetOutputScaleAndZeroPoint(.55f, 54); test.Run(); } TEST(QLinearConvTest, Conv3D_U8S8) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({2, 2, 15, 11, 6}, .05f, 4); test.GenerateRandomWeights({5, 2, 3, 3, 3}, .125f, 0); test.GenerateRandomBias(); test.SetPads({1, 1, 1, 1, 1, 1}); test.SetOutputScaleAndZeroPoint(.55f, 54); test.Run(); } TEST(QLinearConvTest, Conv1D_U8S8_Pointwise) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({3, 24, 15}, .05f, 4); test.GenerateRandomWeights({32, 24, 1}, .125f, 0); test.GenerateRandomBias(); test.SetOutputScaleAndZeroPoint(.55f, 54); test.Run(); } TEST(QLinearConvTest, Conv2D_U8S8_Pointwise) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({3, 24, 15, 11}, .05f, 4); test.GenerateRandomWeights({32, 24, 1, 1}, .125f, 0); test.GenerateRandomBias(); test.SetOutputScaleAndZeroPoint(.55f, 54); test.Run(); } TEST(QLinearConvTest, Conv2D_U8U8_Pointwise) { QLinearConvOpTester<uint8_t, uint8_t> test; test.GenerateRandomInput({3, 24, 19, 19}, .05f, 4); test.GenerateRandomWeights({32, 24, 1, 1}, .105f, 126); test.GenerateRandomBias(); test.SetOutputScaleAndZeroPoint(.75f, 114); test.Run(); } TEST(QLinearConvTest, Conv3D_U8S8_Pointwise) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({2, 2, 15, 11, 6}, .05f, 4); test.GenerateRandomWeights({5, 2, 1, 1, 1}, .125f, 0); test.GenerateRandomBias(); test.SetOutputScaleAndZeroPoint(.55f, 54); test.Run(); } TEST(QLinearConvTest, Conv1D_U8S8_Dilations) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({1, 4, 19}, .02f, 20); test.GenerateRandomWeights({6, 4, 3}, .11f, 0); test.SetDilations({2}); test.SetOutputScaleAndZeroPoint(.24f, 15); test.Run(); } TEST(QLinearConvTest, Conv2D_U8S8_Dilations) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({1, 4, 19, 16}, .02f, 20); test.GenerateRandomWeights({6, 4, 3, 2}, .11f, 0); test.SetDilations({2, 2}); test.SetOutputScaleAndZeroPoint(.24f, 15); test.Run(); } TEST(QLinearConvTest, Conv3D_U8S8_Dilations) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({1, 2, 19, 16, 8}, .02f, 20); test.GenerateRandomWeights({6, 2, 3, 2, 2}, .11f, 0); test.SetDilations({2, 2, 2}); test.SetOutputScaleAndZeroPoint(.24f, 15); test.Run(); } TEST(QLinearConvTest, Conv1D_U8S8_Strides) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({1, 7, 18}, .04f, 16); test.GenerateRandomWeights({5, 7, 2}, .14f, 0); test.SetStrides({2}); test.SetOutputScaleAndZeroPoint(.31f, 30); test.Run(); } TEST(QLinearConvTest, Conv2D_U8S8_Strides) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({1, 7, 18, 24}, .04f, 16); test.GenerateRandomWeights({5, 7, 2, 3}, .14f, 0); test.SetStrides({2, 2}); test.SetOutputScaleAndZeroPoint(.31f, 30); test.Run(); } TEST(QLinearConvTest, Conv3D_U8S8_Strides) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({1, 3, 18, 24, 18}, .04f, 16); test.GenerateRandomWeights({2, 3, 2, 3, 2}, .14f, 0); test.SetStrides({2, 2, 2}); test.SetOutputScaleAndZeroPoint(.31f, 30); test.Run(); } TEST(QLinearConvTest, Conv1D_U8S8_Groups) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({1, 8, 13}, .03f, 7); test.GenerateRandomWeights({12, 4, 3}, .10f, 0); test.GenerateRandomBias(); test.SetPads({1, 1}); test.SetGroups(2); test.SetOutputScaleAndZeroPoint(.76f, 88); test.Run(); } TEST(QLinearConvTest, Conv2D_U8S8_Groups) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({1, 8, 13, 17}, .03f, 7); test.GenerateRandomWeights({12, 4, 3, 3}, .10f, 0); test.GenerateRandomBias(); test.SetPads({1, 1, 1, 1}); test.SetGroups(2); test.SetOutputScaleAndZeroPoint(.76f, 88); test.Run(); } TEST(QLinearConvTest, Conv3D_U8S8_Groups) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({2, 4, 13, 17, 13}, .03f, 7); test.GenerateRandomWeights({6, 2, 3, 3, 3}, .10f, 0); test.GenerateRandomBias(); test.SetPads({1, 1, 1, 1, 1, 1}); test.SetGroups(2); test.SetOutputScaleAndZeroPoint(.76f, 88); test.Run(); } TEST(QLinearConvTest, Conv2D_U8S8_Groups_PerChannel) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({1, 8, 13, 17}, .03f, 7); test.GenerateRandomWeights({10, 4, 3, 3}, .10f, 0); test.SetWeightScales({.15f, .14f, .11f, .13f, .15f, .09f, .12f, .16f, .17f, .07f}); test.GenerateRandomBias(); test.SetPads({1, 1, 1, 1}); test.SetGroups(2); test.SetOutputScaleAndZeroPoint(.76f, 88); test.Run(); } TEST(QLinearConvTest, Conv2D_U8S8_Groups_Pointwise) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({1, 12, 17, 13}, .03f, 7); test.GenerateRandomWeights({15, 4, 1, 1}, .10f, 0); test.GenerateRandomBias(); test.SetGroups(3); test.SetOutputScaleAndZeroPoint(.26f, 88); test.Run(); } TEST(QLinearConvTest, Conv3D_U8S8_Groups_Pointwise) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({2, 4, 13, 17, 13}, .03f, 7); test.GenerateRandomWeights({6, 2, 1, 1, 1}, .10f, 0); test.GenerateRandomBias(); test.SetGroups(2); test.SetOutputScaleAndZeroPoint(.26f, 88); test.Run(); } TEST(QLinearConvTest, Conv1D_U8S8_Depthwise) { for (int8_t weight_zero_point : std::initializer_list<int8_t>{0, 2}) { for (int64_t channels : std::initializer_list<int64_t>{7, 8, 9, 16, 25, 40, 64}) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({1, channels, 25}, .03f, 12); test.GenerateRandomWeights({channels, 1, 3}, .10f, weight_zero_point); test.GenerateRandomBias(); test.SetPads({1, 1}); test.SetGroups(channels); test.SetOutputScaleAndZeroPoint(.21f, 88); test.Run(); } } } TEST(QLinearConvTest, Conv2D_U8S8_Depthwise) { for (int8_t weight_zero_point : std::initializer_list<int8_t>{0, 2}) { for (int64_t channels : std::initializer_list<int64_t>{7, 8, 9, 16, 25, 32, 40, 64}) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({1, channels, 25, 25}, .03f, 12); test.GenerateRandomWeights({channels, 1, 5, 5}, .10f, weight_zero_point); test.GenerateRandomBias(); test.SetPads({2, 2, 2, 2}); test.SetGroups(channels); test.SetOutputScaleAndZeroPoint(.76f, 88); test.Run(); } } } TEST(QLinearConvTest, Conv2D_U8S8_Depthwise_PerChannel) { for (int8_t weight_zero_point : std::initializer_list<int8_t>{0, -2}) { for (int64_t channels : std::initializer_list<int64_t>{7, 8, 9, 16, 40, 48, 52, 64, 192, 999, 1024, 1024 + 16, 1024 + 32, 1024 + 48, 999}) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({1, channels, 17, 17}, .03f, 12); test.GenerateRandomWeights({channels, 1, 3, 3}, .10f, weight_zero_point); std::vector<float> weight_scales; for (int64_t i = 0; i < channels; i++) { weight_scales.push_back(.10f + static_cast<float>(i) * .002f); } test.SetWeightScales(weight_scales); test.GenerateRandomBias(); test.SetPads({1, 1, 1, 1}); test.SetGroups(channels); test.SetOutputScaleAndZeroPoint(.76f, 88); test.Run(); } } } TEST(QLinearConvTest, Conv2D_U8S8_Depthwise_NoBias) { for (int8_t weight_zero_point : std::initializer_list<int8_t>{0, 2}) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({1, 88, 19, 19}, .03f, 10); test.GenerateRandomWeights({88, 1, 3, 3}, .10f, weight_zero_point); test.SetPads({1, 1, 1, 1}); test.SetGroups(88); test.SetOutputScaleAndZeroPoint(.76f, 88); test.Run(); } } TEST(QLinearConvTest, Conv2D_U8S8_Depthwise_Kernelsize) { for (int64_t channels : std::initializer_list<int64_t>{16, 64}) { for (const auto& kd : std::initializer_list<std::pair<int64_t, int64_t>>{{3LL, 3LL}, {1LL, 9LL}, {5LL, 5LL}}) { for (int with_bias : std::initializer_list<int>{0, 1}) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({1, channels, 17, 17}, .03f, 12); test.GenerateRandomWeights({channels, 1, kd.first, kd.second}, .10f, 0); if (with_bias) { test.GenerateRandomBias(); } test.SetPads({0, 1, 0, 1}); test.SetGroups(channels); test.SetOutputScaleAndZeroPoint(.76f, 88); test.Run(); } } } } TEST(QLinearConvTest, Conv2D_U8S8_Depthwise_Kernelsize_PerChannel) { for (int64_t channels : std::initializer_list<int64_t>{32, 96}) { for (const auto& kd : std::initializer_list<std::pair<int64_t, int64_t>>{{3LL, 3LL}, {5LL, 5LL}, {25LL, 1LL}}) { for (int with_bias : std::initializer_list<int>{0, 1}) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({1, channels, 37, 37}, .03f, 12); test.GenerateRandomWeights({channels, 1, kd.first, kd.second}, .10f, 0); std::vector<float> weight_scales; for (int64_t i = 0; i < channels; i++) { weight_scales.push_back(.10f + static_cast<float>(i) * .002f); } test.SetWeightScales(weight_scales); if (with_bias) { test.GenerateRandomBias(); } test.SetPads({1, 0, 1, 0}); test.SetGroups(channels); test.SetOutputScaleAndZeroPoint(.76f, 88); test.Run(); } } } } TEST(QLinearConvTest, Conv2D_U8U8_Depthwise) { for (int64_t channels : std::initializer_list<int64_t>{3, 8, 13, 24, 31, 64}) { QLinearConvOpTester<uint8_t, uint8_t> test; test.GenerateRandomInput({1, channels, 25, 25}, .03f, 12); test.GenerateRandomWeights({channels, 1, 3, 3}, .10f, 167); test.GenerateRandomBias(); test.SetPads({2, 0, 2, 0}); test.SetGroups(channels); test.SetOutputScaleAndZeroPoint(.76f, 88); test.Run(); } } TEST(QLinearConvTest, Conv2D_U8S8_DepthwisePointwise) { // Tests the combination of using the depthwise convolution path along with the // pointed convolution optimization that avoids im2col. for (int64_t channels : std::initializer_list<int64_t>{8, 16, 27}) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({1, channels, 18, 18}, .03f, 12); test.GenerateRandomWeights({channels, 1, 1, 1}, .05f, 0); test.GenerateRandomBias(); test.SetGroups(channels); test.SetOutputScaleAndZeroPoint(.24f, 88); test.Run(); } } TEST(QLinearConvTest, Conv3D_U8S8_Depthwise) { for (int64_t channels : std::initializer_list<int64_t>{6, 8, 31, 64}) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({1, channels, 15, 11, 13}, .02f, 135); test.GenerateRandomWeights({channels, 1, 3, 3, 3}, .09f, 0); test.GenerateRandomBias(); test.SetGroups(channels); test.SetOutputScaleAndZeroPoint(.85f, 112); test.Run(); } } TEST(QLinearConvTest, Conv2D_U8S8_Requantize_NoBias) { for (int64_t channels = 1; channels <= 32; channels++) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({1, 8, 5, 5}, .05f, 4); test.GenerateRandomWeights({channels, 8, 3, 3}, .125f, 0); test.SetPads({1, 1, 1, 1}); test.SetOutputScaleAndZeroPoint(.55f, 56); test.Run(); } } TEST(QLinearConvTest, Conv2D_U8S8_Requantize_Bias) { for (int64_t channels = 1; channels <= 32; channels++) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({1, 8, 5, 5}, .05f, 4); test.GenerateRandomWeights({channels, 8, 3, 3}, .125f, 0); test.GenerateRandomBias(); test.SetPads({1, 1, 1, 1}); test.SetOutputScaleAndZeroPoint(.55f, 56); test.Run(); } } TEST(QLinearConvTest, Conv2D_U8S8_Requantize_Bias_PerChannel) { for (int64_t channels : std::initializer_list<int64_t>{1, 6, 8, 15, 16, 17, 31, 32, 48, 64, 200}) { QLinearConvOpTester<uint8_t, int8_t> test; test.GenerateRandomInput({1, 8, 5, 5}, .05f, 4); test.GenerateRandomWeights({channels, 8, 3, 3}, .125f, 0); std::vector<float> weight_scales; for (int64_t i = 0; i < channels; i++) { weight_scales.push_back(.120f + .002f * static_cast<float>(i)); } test.SetWeightScales(weight_scales); test.GenerateRandomBias(); test.SetPads({1, 1, 1, 1}); test.SetOutputScaleAndZeroPoint(.55f, 56); test.Run(); } } TEST(QLinearConvTest, Conv1D_S8S8) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({3, 24, 15}, .05f, 4); test.GenerateRandomWeights({32, 24, 3}, .125f, 0); test.GenerateRandomBias(); test.SetPads({1, 1}); test.SetOutputScaleAndZeroPoint(.55f, 54); test.Run(); } TEST(QLinearConvTest, Conv2D_S8S8_Sym_M64_C64) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({1, 64, 15, 11}, .05f, 4); test.GenerateRandomWeights({64, 64, 3, 3}, .125f, 0); test.GenerateRandomBias(); test.SetPads({1, 1, 1, 1}); test.SetOutputScaleAndZeroPoint(.55f, 54); test.Run(); } TEST(QLinearConvTest, Conv2D_S8S8_Sym_M16_C4) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({1, 4, 3, 3}, .05f, 4); test.GenerateRandomWeights({16, 4, 3, 3}, .125f, 0); test.GenerateRandomBias(); test.SetPads({0, 0, 0, 0}); test.SetOutputScaleAndZeroPoint(.55f, 54); test.Run(); } TEST(QLinearConvTest, Conv2D_S8S8_Sym_M16_C4_Bias) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({1, 4, 3, 3}, .05f, 4); test.GenerateRandomWeights({16, 4, 3, 3}, .125f, 0); test.GenerateRandomBias(); test.SetPads({0, 0, 0, 0}); test.SetOutputScaleAndZeroPoint(.55f, 54); test.Run(); } TEST(QLinearConvTest, Conv2D_S8S8_Sym_M16_C4_Bias_Pads) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({1, 4, 3, 3}, .05f, -4); test.GenerateRandomWeights({16, 4, 3, 3}, .125f, 0); test.GenerateRandomBias(); test.SetPads({1, 1, 1, 1}); test.SetOutputScaleAndZeroPoint(.55f, 54); test.Run(); } TEST(QLinearConvTest, Conv2D_S8S8_Sym_M48_C48_Bias_Pads) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({1, 48, 15, 11}, .05f, -4); test.GenerateRandomWeights({48, 48, 3, 3}, .125f, 0); test.GenerateRandomBias(); test.SetPads({1, 1, 1, 1}); test.SetOutputScaleAndZeroPoint(.55f, 54); test.Run(); } TEST(QLinearConvTest, Conv2D_S8S8_Sym_M32_C32_Bias_Pads) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({1, 32, 15, 11}, .05f, -4); test.GenerateRandomWeights({32, 32, 3, 3}, .125f, 0); test.GenerateRandomBias(); test.SetPads({1, 1, 1, 1}); test.SetOutputScaleAndZeroPoint(.55f, 54); test.Run(); } TEST(QLinearConvTest, Conv2D_S8S8) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({3, 24, 15, 11}, .05f, 4); test.GenerateRandomWeights({32, 24, 3, 3}, .125f, 0); test.GenerateRandomBias(); test.SetPads({1, 1, 1, 1}); test.SetOutputScaleAndZeroPoint(.55f, 54); test.Run(); } TEST(QLinearConvTest, Conv3D_S8S8) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({2, 2, 15, 11, 6}, .05f, -4); test.GenerateRandomWeights({5, 2, 3, 3, 3}, .125f, 0); test.GenerateRandomBias(); test.SetPads({1, 1, 1, 1, 1, 1}); test.SetOutputScaleAndZeroPoint(.55f, 54); test.Run(); } TEST(QLinearConvTest, Conv1D_S8S8_Pointwise) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({3, 24, 15}, .05f, 4); test.GenerateRandomWeights({32, 24, 1}, .125f, 0); test.GenerateRandomBias(); test.SetOutputScaleAndZeroPoint(.55f, 54); test.Run(); } TEST(QLinearConvTest, Conv2D_S8S8_Pointwise) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({3, 24, 15, 11}, .05f, -4); test.GenerateRandomWeights({32, 24, 1, 1}, .125f, 0); test.GenerateRandomBias(); test.SetOutputScaleAndZeroPoint(.55f, -54); test.Run(); } TEST(QLinearConvTest, Conv2D_S8U8_Pointwise) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({3, 24, 19, 19}, .05f, 4); test.GenerateRandomWeights({32, 24, 1, 1}, .105f, 126); test.GenerateRandomBias(); test.SetOutputScaleAndZeroPoint(.75f, -14); test.Run(); } TEST(QLinearConvTest, Conv3D_S8S8_Pointwise) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({2, 2, 15, 11, 6}, .05f, 4); test.GenerateRandomWeights({5, 2, 1, 1, 1}, .125f, 0); test.GenerateRandomBias(); test.SetOutputScaleAndZeroPoint(.55f, 54); test.Run(); } TEST(QLinearConvTest, Conv1D_S8S8_Dilations) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({1, 4, 19}, .02f, 20); test.GenerateRandomWeights({6, 4, 3}, .11f, 0); test.SetDilations({2}); test.SetOutputScaleAndZeroPoint(.24f, -15); test.Run(); } TEST(QLinearConvTest, Conv2D_S8S8_Dilations) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({1, 4, 19, 16}, .02f, -20); test.GenerateRandomWeights({6, 4, 3, 2}, .11f, 0); test.SetDilations({2, 2}); test.SetOutputScaleAndZeroPoint(.24f, 15); test.Run(); } TEST(QLinearConvTest, Conv3D_S8S8_Dilations) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({1, 2, 19, 16, 8}, .02f, 20); test.GenerateRandomWeights({6, 2, 3, 2, 2}, .11f, 0); test.SetDilations({2, 2, 2}); test.SetOutputScaleAndZeroPoint(.24f, -15); test.Run(); } TEST(QLinearConvTest, Conv1D_S8S8_Strides) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({1, 7, 18}, .04f, 16); test.GenerateRandomWeights({5, 7, 2}, .14f, 0); test.SetStrides({2}); test.SetOutputScaleAndZeroPoint(.31f, 30); test.Run(); } TEST(QLinearConvTest, Conv2D_S8S8_Strides) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({1, 7, 18, 24}, .04f, 16); test.GenerateRandomWeights({5, 7, 2, 3}, .14f, 0); test.SetStrides({2, 2}); test.SetOutputScaleAndZeroPoint(.31f, -30); test.Run(); } TEST(QLinearConvTest, Conv3D_S8S8_Strides) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({1, 3, 18, 24, 18}, .04f, -16); test.GenerateRandomWeights({2, 3, 2, 3, 2}, .14f, 0); test.SetStrides({2, 2, 2}); test.SetOutputScaleAndZeroPoint(.31f, 30); test.Run(); } TEST(QLinearConvTest, Conv1D_S8S8_Groups) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({1, 8, 13}, .03f, -7); test.GenerateRandomWeights({12, 4, 3}, .10f, 0); test.GenerateRandomBias(); test.SetPads({1, 1}); test.SetGroups(2); test.SetOutputScaleAndZeroPoint(.76f, -88); test.Run(); } TEST(QLinearConvTest, Conv2D_S8S8_Groups) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({1, 8, 13, 17}, .03f, 7); test.GenerateRandomWeights({12, 4, 3, 3}, .10f, 0); test.GenerateRandomBias(); test.SetPads({1, 1, 1, 1}); test.SetGroups(2); test.SetOutputScaleAndZeroPoint(.76f, -88); test.Run(); } TEST(QLinearConvTest, Conv3D_S8S8_Groups) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({2, 4, 13, 17, 13}, .03f, 7); test.GenerateRandomWeights({6, 2, 3, 3, 3}, .10f, 0); test.GenerateRandomBias(); test.SetPads({1, 1, 1, 1, 1, 1}); test.SetGroups(2); test.SetOutputScaleAndZeroPoint(.76f, -88); test.Run(); } TEST(QLinearConvTest, Conv2D_S8S8_Groups_PerChannel) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({1, 8, 13, 17}, .03f, -7); test.GenerateRandomWeights({10, 4, 3, 3}, .10f, 0); test.SetWeightScales({.15f, .14f, .11f, .13f, .15f, .09f, .12f, .16f, .17f, .07f}); test.GenerateRandomBias(); test.SetPads({1, 1, 1, 1}); test.SetGroups(2); test.SetOutputScaleAndZeroPoint(.76f, 88); test.Run(); } TEST(QLinearConvTest, Conv2D_S8S8_Groups_Pointwise) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({1, 12, 17, 13}, .03f, 7); test.GenerateRandomWeights({15, 4, 1, 1}, .10f, 0); test.GenerateRandomBias(); test.SetGroups(3); test.SetOutputScaleAndZeroPoint(.26f, -8); test.Run(); } TEST(QLinearConvTest, Conv3D_S8S8_Groups_Pointwise) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({2, 4, 13, 17, 13}, .03f, 7); test.GenerateRandomWeights({6, 2, 1, 1, 1}, .10f, 0); test.GenerateRandomBias(); test.SetGroups(2); test.SetOutputScaleAndZeroPoint(.26f, 8); test.Run(); } TEST(QLinearConvTest, Conv1D_S8S8_Depthwise) { for (int8_t weight_zero_point : std::initializer_list<int8_t>{-2, 0, 2}) { for (int64_t channels : std::initializer_list<int64_t>{7, 8, 9, 16, 25, 64}) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({1, channels, 25}, .03f, 12); test.GenerateRandomWeights({channels, 1, 3}, .10f, weight_zero_point); test.GenerateRandomBias(); test.SetPads({1, 1}); test.SetGroups(channels); test.SetOutputScaleAndZeroPoint(.21f, -8); test.Run(); } } } TEST(QLinearConvTest, Conv2D_S8S8_Depthwise) { for (int8_t weight_zero_point : std::initializer_list<int8_t>{-2, 0, 2}) { for (int64_t channels : std::initializer_list<int64_t>{7, 8, 9, 16, 25, 32, 64}) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({1, channels, 25, 25}, .03f, 12); test.GenerateRandomWeights({channels, 1, 5, 5}, .10f, weight_zero_point); test.GenerateRandomBias(); test.SetPads({2, 2, 2, 2}); test.SetGroups(channels); test.SetOutputScaleAndZeroPoint(.76f, 8); test.Run(); } } } TEST(QLinearConvTest, Conv2D_S8S8_Depthwise_PerChannel) { for (int8_t weight_zero_point : std::initializer_list<int8_t>{0, -2}) { for (int64_t channels : std::initializer_list<int64_t>{7, 8, 9, 16, 40, 48, 52, 64, 192, 999, 1024, 1024 + 16, 1024 + 32, 1024 + 48, 999}) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({1, channels, 17, 17}, .03f, 12); test.GenerateRandomWeights({channels, 1, 3, 3}, .10f, weight_zero_point); std::vector<float> weight_scales; for (int64_t i = 0; i < channels; i++) { weight_scales.push_back(.10f + static_cast<float>(i) * .002f); } test.SetWeightScales(weight_scales); test.GenerateRandomBias(); test.SetPads({1, 1, 1, 1}); test.SetGroups(channels); test.SetOutputScaleAndZeroPoint(.76f, -8); test.Run(); } } } TEST(QLinearConvTest, Conv2D_S8S8_Depthwise_NoBias) { for (int8_t weight_zero_point : std::initializer_list<int8_t>{0, 2}) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({1, 80, 19, 19}, .03f, -10); test.GenerateRandomWeights({80, 1, 3, 3}, .10f, weight_zero_point); test.SetPads({1, 1, 1, 1}); test.SetGroups(80); test.SetOutputScaleAndZeroPoint(.76f, 8); test.Run(); } } TEST(QLinearConvTest, Conv2D_S8U8_Depthwise) { for (int64_t channels : std::initializer_list<int64_t>{3, 8, 13, 24, 31, 64}) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({1, channels, 25, 25}, .03f, 12); test.GenerateRandomWeights({channels, 1, 3, 3}, .10f, -15); test.GenerateRandomBias(); test.SetPads({2, 0, 2, 0}); test.SetGroups(channels); test.SetOutputScaleAndZeroPoint(.76f, 8); test.Run(); } } TEST(QLinearConvTest, Conv2D_S8S8_DepthwisePointwise) { // Tests the combination of using the depthwise convolution path along with the // pointed convolution optimization that avoids im2col. QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({1, 27, 18, 18}, .03f, 12); test.GenerateRandomWeights({27, 1, 1, 1}, .05f, 0); test.GenerateRandomBias(); test.SetGroups(27); test.SetOutputScaleAndZeroPoint(.24f, -8); test.Run(); } TEST(QLinearConvTest, Conv3D_S8S8_Depthwise) { for (int64_t channels : std::initializer_list<int64_t>{6, 8, 31, 64}) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({1, channels, 15, 11, 13}, .02f, 7); test.GenerateRandomWeights({channels, 1, 3, 3, 3}, .09f, 0); test.GenerateRandomBias(); test.SetGroups(channels); test.SetOutputScaleAndZeroPoint(.85f, -16); test.Run(); } } TEST(QLinearConvTest, Conv2D_S8S8_Requantize_NoBias) { for (int64_t channels = 1; channels <= 32; channels++) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({1, 8, 5, 5}, .05f, -40); test.GenerateRandomWeights({channels, 8, 3, 3}, .125f, 0); test.SetPads({1, 1, 1, 1}); test.SetOutputScaleAndZeroPoint(.55f, -18); test.Run(); } } TEST(QLinearConvTest, Conv2D_S8S8_Requantize_Bias) { for (int64_t channels = 1; channels <= 32; channels++) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({1, 8, 5, 5}, .05f, -60); test.GenerateRandomWeights({channels, 8, 3, 3}, .125f, 0); test.GenerateRandomBias(); test.SetPads({1, 1, 1, 1}); test.SetOutputScaleAndZeroPoint(.55f, -56); test.Run(); } } TEST(QLinearConvTest, Conv2D_S8S8_Requantize_Bias_PerChannel) { for (int64_t channels : std::initializer_list<int64_t>{1, 6, 8, 15, 16, 17, 31, 32, 48, 64, 200}) { QLinearConvOpTester<int8_t, int8_t> test; test.GenerateRandomInput({1, 8, 5, 5}, .05f, 4); test.GenerateRandomWeights({channels, 8, 3, 3}, .125f, 0); std::vector<float> weight_scales; for (int64_t i = 0; i < channels; i++) { weight_scales.push_back(.120f + .002f * static_cast<float>(i)); } test.SetWeightScales(weight_scales); test.GenerateRandomBias(); test.SetPads({1, 1, 1, 1}); test.SetOutputScaleAndZeroPoint(.55f, -64); test.Run(); } } #ifndef ENABLE_TRAINING // Prepacking is enabled only on non-training builds TEST(QLinearConvTest, SharedPrepackedWeights) { QuantizedTensor X({0.45246148109436035f, 0.15498268604278564f, 0.11199361085891724f, -0.39421093463897705f, 0.2626858949661255f, 0.13414543867111206f, -0.27184486389160156f, -0.43028733134269714f, -0.26825493574142456f, 0.3893144130706787f, -0.13631996512413025f, -0.009590476751327515f, -0.48771554231643677f, -0.25256502628326416f, -0.2812897562980652f, 0.4043201804161072f, 0.07795023918151855f, 0.326981782913208f, 0.13114392757415771f, -0.4416425824165344f, 0.12446999549865723f, 0.36739975214004517f, 0.1698915958404541f, 0.2008744478225708f, 0.23339951038360596f, 0.38613730669021606f, 0.11117297410964966f, 0.3877097964286804f, 0.20812749862670898f, -0.34297940135002136f, -0.029246658086776733f, -0.20483523607254028f, -0.19244328141212463f, -0.11104947328567505f, -0.32830488681793213f, -0.01800677180290222f, 0.3618946671485901f, -0.40949052572250366f, -0.18248388171195984f, -0.3349453806877136f, -0.34091079235076904f, 0.006497859954833984f, 0.4537564516067505f, 0.08006560802459717f, -0.14788749814033508f, 0.034442365169525146f, -0.33322954177856445f, 0.06049239635467529f, 0.42619407176971436f}); QuantizedTensor W({-0.4406261742115021f}); QuantizedTensor Y({-0.19936637580394745f, -0.06828942894935608f, -0.04934731498360634f, 0.17369966208934784f, -0.11574628204107285f, -0.05910799279808998f, 0.1197819635272026f, 0.18959586322307587f, 0.1182001456618309f, -0.17154212296009064f, 0.06006614491343498f, 0.0042258151806890965f, 0.21490024030208588f, 0.11128675937652588f, 0.12394362688064575f, -0.17815405130386353f, -0.034346915781497955f, -0.14407673478126526f, -0.05778544768691063f, 0.19459928572177887f, -0.05484473705291748f, -0.16188594698905945f, -0.07485868036746979f, -0.08851054310798645f, -0.10284193605184555f, -0.17014220356941223f, -0.04898572340607643f, -0.17083507776260376f, -0.09170642495155334f, 0.1511256992816925f, 0.012886842712759972f, 0.09025576710700989f, 0.08479554951190948f, 0.0489313043653965f, 0.14465972781181335f, 0.007934254594147205f, -0.15946026146411896f, 0.1804322451353073f, 0.08040717244148254f, 0.1475857049226761f, 0.15021422505378723f, -0.0028631272725760937f, -0.19993697106838226f, -0.03527900204062462f, 0.06516310572624207f, -0.015176207758486271f, 0.14682966470718384f, -0.02665453404188156f, -0.18779225647449493f}); OpTester test("QLinearConv", 10); test.AddInput<uint8_t>("x", {1, 1, 7, 7}, X.quantized_); test.AddInput<float>("x_scale", {}, {X.scale_}, true); test.AddInput<uint8_t>("x_zero_point", {}, {X.zero_point_}, true); test.AddInput<uint8_t>("w", {1, 1, 1, 1}, W.quantized_, true); test.AddInput<float>("w_scale", {}, {W.scale_}, true); test.AddInput<uint8_t>("w_zero_point", {}, {W.zero_point_}, true); test.AddInput<float>("y_scale", {}, {Y.scale_}, true); test.AddInput<uint8_t>("y_zero_point", {}, {Y.zero_point_}, true); test.AddOutput<uint8_t>("y", {1, 1, 7, 7}, Y.quantized_); // W OrtValue W_ortvalue; Tensor::InitOrtValue(DataTypeImpl::GetType<uint8_t>(), TensorShape({1, 1, 1, 1}), W.quantized_.data(), OrtMemoryInfo(CPU, OrtAllocatorType::OrtDeviceAllocator), W_ortvalue); SessionOptions so; // Set up weight(s) as a shared initializer to be shared between sessions ASSERT_EQ(so.AddInitializer("w", &W_ortvalue), Status::OK()); // We want all sessions running using this OpTester to be able to share pre-packed weights if applicable test.EnableSharingOfPrePackedWeightsAcrossSessions(); // Pre-packing is limited just to the CPU EP for now and we will only test the CPU EP // and we want to ensure that it is available in this build auto cpu_ep = []() -> std::vector<std::unique_ptr<IExecutionProvider>> { std::vector<std::unique_ptr<IExecutionProvider>> execution_providers; execution_providers.push_back(DefaultCpuExecutionProvider()); return execution_providers; }; size_t number_of_pre_packed_weights_counter_session_1 = 0; size_t number_of_shared_pre_packed_weights_counter = 0; // Session 1 { auto ep_vec = cpu_ep(); test.Run(so, OpTester::ExpectResult::kExpectSuccess, "", {}, nullptr, &ep_vec, {}, &number_of_pre_packed_weights_counter_session_1, &number_of_shared_pre_packed_weights_counter); // Assert that no pre-packed weights have been shared thus far ASSERT_EQ(number_of_shared_pre_packed_weights_counter, static_cast<size_t>(0)); } auto number_of_elements_in_shared_prepacked_buffers_container = test.GetNumPrePackedWeightsShared(); // Assert that the number of elements in the shared container // is the same as the number of weights that have been pre-packed ASSERT_EQ(number_of_pre_packed_weights_counter_session_1, number_of_elements_in_shared_prepacked_buffers_container); // On some platforms/architectures MLAS may choose to not do any pre-packing and the number of elements // that have been pre-packed will be zero in which case we do not continue with the testing // of "sharing" of pre-packed weights as there are no pre-packed weights to be shared at all. if (number_of_pre_packed_weights_counter_session_1 == 0) return; // Session 2 { size_t number_of_pre_packed_weights_counter_session_2 = 0; auto ep_vec = cpu_ep(); test.Run(so, OpTester::ExpectResult::kExpectSuccess, "", {}, nullptr, &ep_vec, {}, &number_of_pre_packed_weights_counter_session_2, &number_of_shared_pre_packed_weights_counter); // Assert that the same number of weights were pre-packed in both sessions ASSERT_EQ(number_of_pre_packed_weights_counter_session_1, number_of_pre_packed_weights_counter_session_2); // Assert that the number of pre-packed weights that were shared equals // the number of pre-packed weights in the second session ASSERT_EQ(number_of_pre_packed_weights_counter_session_2, static_cast<size_t>(number_of_shared_pre_packed_weights_counter)); } } #endif } // namespace } // namespace test } // namespace onnxruntime
//给定一个字符串,逐个翻转字符串中的每个单词。 // // 说明: // // // 无空格字符构成一个 单词 。 // 输入字符串可以在前面或者后面包含多余的空格,但是反转后的字符不能包括。 // 如果两个单词间有多余的空格,将反转后单词间的空格减少到只含一个。 // // // // // 示例 1: // // 输入:"the sky is blue" //输出:"blue is sky the" // // // 示例 2: // // 输入:"  hello world!  " //输出:"world! hello" //解释:输入字符串可以在前面或者后面包含多余的空格,但是反转后的字符不能包括。 // // // 示例 3: // // 输入:"a good   example" //输出:"example good a" //解释:如果两个单词间有多余的空格,将反转后单词间的空格减少到只含一个。 // // // 示例 4: // // 输入:s = " Bob Loves Alice " //输出:"Alice Loves Bob" // // // 示例 5: // // 输入:s = "Alice does not even like bob" //输出:"bob like even not does Alice" // // // // // 提示: // // // 1 <= s.length <= 104 // s 包含英文大小写字母、数字和空格 ' ' // s 中 至少存在一个 单词 // // // // // // // // 进阶: // // // 请尝试使用 O(1) 额外空间复杂度的原地解法。 // // Related Topics 字符串 // 👍 318 👎 0 #include "iostream" #include "cassert" #include "vector" #include "string" #include "functional" #include "stack" using namespace std; //leetcode submit region begin(Prohibit modification and deletion) class Solution { public: string reverseWords(string s) {int left = 0, right = s.size() - 1; // 去掉字符串开头的空白字符 while (left <= right && s[left] == ' ') ++left; // 去掉字符串末尾的空白字符 while (left <= right && s[right] == ' ') --right; stack<string> words; string word; while (left <= right) { char c = s[left]; if (word.size() && c == ' ') { words.push(word); word = ""; } else if (c != ' ') { word += c; } ++left; } words.push(word); string ans; while (!words.empty()) { ans += words.top(); words.pop(); if (!words.empty()) ans += ' '; } return ans; } // string reverseWords(string s) { // const char *charArr = s.data(); // unsigned int charSize = s.size(); // vector<string> stringArr; // char *charTempArr = new char[charSize + 1]; // 如果 s 只有一个单词,还需要在他后面补充\0, 所以长度要加一 // int charTempArrIdx = 0; // for (int i = 0; i < charSize; ++i) { // char temp = charArr[i]; // if (temp == ' ') { // if (charTempArrIdx != 0) { // charTempArr[charTempArrIdx] = '\0'; // stringArr.emplace_back(charTempArr); // charTempArrIdx = 0; // } // } else { // // 一个单词 // charTempArr[charTempArrIdx++] = temp; // } // } // // 最后个 // if (charTempArrIdx != 0) { // charTempArr[charTempArrIdx] = '\0'; // stringArr.emplace_back(charTempArr); // } // // unsigned int leftIdx = 0; // unsigned int rightIdx = stringArr.size()-1; // while (leftIdx < rightIdx){ // string temp = stringArr.at(rightIdx); // stringArr[rightIdx] = stringArr[leftIdx]; // stringArr[leftIdx] = temp; // leftIdx++; // rightIdx--; // } // leftIdx = 0; // string res; // while (leftIdx < stringArr.size()){ // res.append(stringArr[leftIdx++]).append(" "); // } // // 删除最后一个多余的“ ” // res.pop_back(); // return res; // } }; //leetcode submit region end(Prohibit modification and deletion) int main() { Solution solution; // const string &basicString1 = solution.reverseWords("EPY2giL"); const string &basicString2 = solution.reverseWords("the sky is blue"); return 0; }
#include <stdio.h> int main() { int n, k, i; scanf("%d%d", &n, &k); for(i = 0; i < k; i++) { if(n%10 != 0) n--; else n /= 10; } printf("%d", n); }
#include <QApplication> #include "guiutil.h" #include "bitcoinaddressvalidator.h" #include "bitcoinunits.h" #include "walletmodel.h" #include "init.h" #include "util.h" #ifdef WIN32 #ifdef _WIN32_WINNT #undef _WIN32_WINNT #endif #define _WIN32_WINNT 0x0501 #ifdef _WIN32_IE #undef _WIN32_IE #endif #define _WIN32_IE 0x0501 #define WIN32_LEAN_AND_MEAN 1 #ifndef NOMINMAX #define NOMINMAX #endif #include "shellapi.h" #include "shlobj.h" #include "shlwapi.h" #endif #include <boost/filesystem.hpp> #include <boost/filesystem/fstream.hpp> #if BOOST_FILESYSTEM_VERSION >= 3 #include <boost/filesystem/detail/utf8_codecvt_facet.hpp> #endif #include <QAbstractItemView> #include <QClipboard> #include <QDateTime> #include <QDesktopServices> #include <QDoubleValidator> #include <QFileDialog> #include <QFont> #include <QLineEdit> #include <QSettings> #include <QTextDocument> // For Qt::escape #include <QThread> #if QT_VERSION < 0x050000 #include <QUrl> #else #include <QUrlQuery> #endif #if BOOST_FILESYSTEM_VERSION >= 3 static boost::filesystem::detail::utf8_codecvt_facet utf8; #endif #if defined(Q_OS_MAC) extern double NSAppKitVersionNumber; #if !defined(NSAppKitVersionNumber10_8) #define NSAppKitVersionNumber10_8 #endif #if !defined(NSAppKitVersionNumber10_9) #define NSAppKitVersionNumber10_9 #endif #endif namespace GUIUtil { QString dateTimeStr(const QDateTime &date) { return date.date().toString(Qt::SystemLocaleShortDate) + QString(" ") + date.toString("hh:mm"); } QString dateTimeStr(qint64 nTime) { return dateTimeStr(QDateTime::fromTime_t((qint32)nTime)); } QFont bitcoinAddressFont() { QFont font("Monospace"); #if QT_VERSION >= 0x040800 font.setStyleHint(QFont::Monospace); #else font.setStyleHint(QFont::TypeWriter); #endif return font; } void setupAddressWidget(QLineEdit *widget, QWidget *parent) { widget->setMaxLength(BitcoinAddressValidator::MaxAddressLength); widget->setValidator(new BitcoinAddressValidator(parent)); widget->setFont(bitcoinAddressFont()); } void setupAmountWidget(QLineEdit *widget, QWidget *parent) { QDoubleValidator *amountValidator = new QDoubleValidator(parent); amountValidator->setDecimals(8); amountValidator->setBottom(0.0); widget->setValidator(amountValidator); widget->setAlignment(Qt::AlignRight|Qt::AlignVCenter); } bool parseBitcoinURI(const QUrl &uri, SendCoinsRecipient *out) { // NovaCoin: check prefix if(uri.scheme() != QString("Bitradio")) return false; SendCoinsRecipient rv; rv.address = uri.path(); rv.amount = 0; #if QT_VERSION < 0x050000 QList<QPair<QString, QString> > items = uri.queryItems(); #else QUrlQuery uriQuery(uri); QList<QPair<QString, QString> > items = uriQuery.queryItems(); #endif for (QList<QPair<QString, QString> >::iterator i = items.begin(); i != items.end(); i++) { bool fShouldReturnFalse = false; if (i->first.startsWith("req-")) { i->first.remove(0, 4); fShouldReturnFalse = true; } if (i->first == "label") { rv.label = i->second; fShouldReturnFalse = false; } else if (i->first == "amount") { if(!i->second.isEmpty()) { if(!BitcoinUnits::parse(BitcoinUnits::BTC, i->second, &rv.amount)) { return false; } } fShouldReturnFalse = false; } if (fShouldReturnFalse) return false; } if(out) { *out = rv; } return true; } bool parseBitcoinURI(QString uri, SendCoinsRecipient *out) { // Convert Bitradio:// to Bitradio: // // Cannot handle this later, because bitcoin:// will cause Qt to see the part after // as host, // which will lower-case it (and thus invalidate the address). if(uri.startsWith("Bitradio://", Qt::CaseInsensitive)) { uri.replace(0, 11, "Bitradio:"); } QUrl uriInstance(uri); return parseBitcoinURI(uriInstance, out); } QString HtmlEscape(const QString& str, bool fMultiLine) { #if QT_VERSION < 0x050000 QString escaped = Qt::escape(str); #else QString escaped = str.toHtmlEscaped(); #endif escaped = escaped.replace(" ", "&nbsp;"); if(fMultiLine) { escaped = escaped.replace("\n", "<br>\n"); } return escaped; } QString HtmlEscape(const std::string& str, bool fMultiLine) { return HtmlEscape(QString::fromStdString(str), fMultiLine); } void copyEntryData(QAbstractItemView *view, int column, int role) { if(!view || !view->selectionModel()) return; QModelIndexList selection = view->selectionModel()->selectedRows(column); if(!selection.isEmpty()) { // Copy first item setClipboard(selection.at(0).data(role).toString()); } } QString getSaveFileName(QWidget *parent, const QString &caption, const QString &dir, const QString &filter, QString *selectedSuffixOut) { QString selectedFilter; QString myDir; if(dir.isEmpty()) // Default to user documents location { #if QT_VERSION < 0x050000 myDir = QDesktopServices::storageLocation(QDesktopServices::DocumentsLocation); #else myDir = QStandardPaths::writableLocation(QStandardPaths::DocumentsLocation); #endif } else { myDir = dir; } QString result = QDir::toNativeSeparators(QFileDialog::getSaveFileName(parent, caption, myDir, filter, &selectedFilter)); /* Extract first suffix from filter pattern "Description (*.foo)" or "Description (*.foo *.bar ...) */ QRegExp filter_re(".* \\(\\*\\.(.*)[ \\)]"); QString selectedSuffix; if(filter_re.exactMatch(selectedFilter)) { selectedSuffix = filter_re.cap(1); } /* Add suffix if needed */ QFileInfo info(result); if(!result.isEmpty()) { if(info.suffix().isEmpty() && !selectedSuffix.isEmpty()) { /* No suffix specified, add selected suffix */ if(!result.endsWith(".")) result.append("."); result.append(selectedSuffix); } } /* Return selected suffix if asked to */ if(selectedSuffixOut) { *selectedSuffixOut = selectedSuffix; } return result; } Qt::ConnectionType blockingGUIThreadConnection() { if(QThread::currentThread() != qApp->thread()) { return Qt::BlockingQueuedConnection; } else { return Qt::DirectConnection; } } bool checkPoint(const QPoint &p, const QWidget *w) { QWidget *atW = QApplication::widgetAt(w->mapToGlobal(p)); if (!atW) return false; return atW->topLevelWidget() == w; } bool isObscured(QWidget *w) { return !(checkPoint(QPoint(0, 0), w) && checkPoint(QPoint(w->width() - 1, 0), w) && checkPoint(QPoint(0, w->height() - 1), w) && checkPoint(QPoint(w->width() - 1, w->height() - 1), w) && checkPoint(QPoint(w->width() / 2, w->height() / 2), w)); } void openDebugLogfile() { boost::filesystem::path pathDebug = GetDataDir() / "debug.log"; /* Open debug.log with the associated application */ if (boost::filesystem::exists(pathDebug)) QDesktopServices::openUrl(QUrl::fromLocalFile(boostPathToQString(pathDebug))); } void showBackups() { boost::filesystem::path pathBackups = GetDataDir() / "backups"; /* Open folder with default browser */ if (boost::filesystem::exists(pathBackups)) QDesktopServices::openUrl(QUrl::fromLocalFile(boostPathToQString(pathBackups))); } void SubstituteFonts(const QString& language) { #if defined(Q_OS_MAC) // Background: // OSX's default font changed in 10.9 and QT is unable to find it with its // usual fallback methods when building against the 10.7 sdk or lower. // The 10.8 SDK added a function to let it find the correct fallback font. // If this fallback is not properly loaded, some characters may fail to // render correctly. // // The same thing happened with 10.10. .Helvetica Neue DeskInterface is now default. // // Solution: If building with the 10.7 SDK or lower and the user's platform // is 10.9 or higher at runtime, substitute the correct font. This needs to // happen before the QApplication is created. #if defined(MAC_OS_X_VERSION_MAX_ALLOWED) && MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_8 if (floor(NSAppKitVersionNumber) > NSAppKitVersionNumber10_8) { if (floor(NSAppKitVersionNumber) <= NSAppKitVersionNumber10_9) /* On a 10.9 - 10.9.x system */ QFont::insertSubstitution(".Lucida Grande UI", "Lucida Grande"); else { /* 10.10 or later system */ if (language == "zh_CN" || language == "zh_TW" || language == "zh_HK") // traditional or simplified Chinese QFont::insertSubstitution(".Helvetica Neue DeskInterface", "Heiti SC"); else if (language == "ja") // Japanesee QFont::insertSubstitution(".Helvetica Neue DeskInterface", "Songti SC"); else QFont::insertSubstitution(".Helvetica Neue DeskInterface", "Lucida Grande"); } } #endif #endif } ToolTipToRichTextFilter::ToolTipToRichTextFilter(int size_threshold, QObject *parent) : QObject(parent), size_threshold(size_threshold) { } bool ToolTipToRichTextFilter::eventFilter(QObject *obj, QEvent *evt) { if(evt->type() == QEvent::ToolTipChange) { QWidget *widget = static_cast<QWidget*>(obj); QString tooltip = widget->toolTip(); if(tooltip.size() > size_threshold && !tooltip.startsWith("<qt") && !Qt::mightBeRichText(tooltip)) { // Envelope with <qt></qt> to make sure Qt detects this as rich text // Escape the current message as HTML and replace \n by <br> tooltip = "<qt>" + HtmlEscape(tooltip, true) + "</qt>"; widget->setToolTip(tooltip); return true; } } return QObject::eventFilter(obj, evt); } void TableViewLastColumnResizingFixer::connectViewHeadersSignals() { connect(tableView->horizontalHeader(), SIGNAL(sectionResized(int,int,int)), this, SLOT(on_sectionResized(int,int,int))); connect(tableView->horizontalHeader(), SIGNAL(geometriesChanged()), this, SLOT(on_geometriesChanged())); } // We need to disconnect these while handling the resize events, otherwise we can enter infinite loops. void TableViewLastColumnResizingFixer::disconnectViewHeadersSignals() { disconnect(tableView->horizontalHeader(), SIGNAL(sectionResized(int,int,int)), this, SLOT(on_sectionResized(int,int,int))); disconnect(tableView->horizontalHeader(), SIGNAL(geometriesChanged()), this, SLOT(on_geometriesChanged())); } // Setup the resize mode, handles compatibility for Qt5 and below as the method signatures changed. // Refactored here for readability. void TableViewLastColumnResizingFixer::setViewHeaderResizeMode(int logicalIndex, QHeaderView::ResizeMode resizeMode) { #if QT_VERSION < 0x050000 tableView->horizontalHeader()->setResizeMode(logicalIndex, resizeMode); #else tableView->horizontalHeader()->setSectionResizeMode(logicalIndex, resizeMode); #endif } void TableViewLastColumnResizingFixer::resizeColumn(int nColumnIndex, int width) { tableView->setColumnWidth(nColumnIndex, width); tableView->horizontalHeader()->resizeSection(nColumnIndex, width); } int TableViewLastColumnResizingFixer::getColumnsWidth() { int nColumnsWidthSum = 0; for (int i = 0; i < columnCount; i++) { nColumnsWidthSum += tableView->horizontalHeader()->sectionSize(i); } return nColumnsWidthSum; } int TableViewLastColumnResizingFixer::getAvailableWidthForColumn(int column) { int nResult = lastColumnMinimumWidth; int nTableWidth = tableView->horizontalHeader()->width(); if (nTableWidth > 0) { int nOtherColsWidth = getColumnsWidth() - tableView->horizontalHeader()->sectionSize(column); nResult = std::max(nResult, nTableWidth - nOtherColsWidth); } return nResult; } // Make sure we don't make the columns wider than the tables viewport width. void TableViewLastColumnResizingFixer::adjustTableColumnsWidth() { disconnectViewHeadersSignals(); resizeColumn(lastColumnIndex, getAvailableWidthForColumn(lastColumnIndex)); connectViewHeadersSignals(); int nTableWidth = tableView->horizontalHeader()->width(); int nColsWidth = getColumnsWidth(); if (nColsWidth > nTableWidth) { resizeColumn(secondToLastColumnIndex,getAvailableWidthForColumn(secondToLastColumnIndex)); } } // Make column use all the space available, useful during window resizing. void TableViewLastColumnResizingFixer::stretchColumnWidth(int column) { disconnectViewHeadersSignals(); resizeColumn(column, getAvailableWidthForColumn(column)); connectViewHeadersSignals(); } // When a section is resized this is a slot-proxy for ajustAmountColumnWidth(). void TableViewLastColumnResizingFixer::on_sectionResized(int logicalIndex, int oldSize, int newSize) { adjustTableColumnsWidth(); int remainingWidth = getAvailableWidthForColumn(logicalIndex); if (newSize > remainingWidth) { resizeColumn(logicalIndex, remainingWidth); } } // When the tabless geometry is ready, we manually perform the stretch of the "Message" column, // as the "Stretch" resize mode does not allow for interactive resizing. void TableViewLastColumnResizingFixer::on_geometriesChanged() { if ((getColumnsWidth() - this->tableView->horizontalHeader()->width()) != 0) { disconnectViewHeadersSignals(); resizeColumn(secondToLastColumnIndex, getAvailableWidthForColumn(secondToLastColumnIndex)); connectViewHeadersSignals(); } } /** * Initializes all internal variables and prepares the * the resize modes of the last 2 columns of the table and */ TableViewLastColumnResizingFixer::TableViewLastColumnResizingFixer(QTableView* table, int lastColMinimumWidth, int allColsMinimumWidth) : tableView(table), lastColumnMinimumWidth(lastColMinimumWidth), allColumnsMinimumWidth(allColsMinimumWidth) { columnCount = tableView->horizontalHeader()->count(); lastColumnIndex = columnCount - 1; secondToLastColumnIndex = columnCount - 2; tableView->horizontalHeader()->setMinimumSectionSize(allColumnsMinimumWidth); setViewHeaderResizeMode(secondToLastColumnIndex, QHeaderView::Interactive); setViewHeaderResizeMode(lastColumnIndex, QHeaderView::Interactive); } #ifdef WIN32 boost::filesystem::path static StartupShortcutPath() { return GetSpecialFolderPath(CSIDL_STARTUP) / "Bitradio.lnk"; } bool GetStartOnSystemStartup() { // check for Bitradio.lnk return boost::filesystem::exists(StartupShortcutPath()); } bool SetStartOnSystemStartup(bool fAutoStart) { // If the shortcut exists already, remove it for updating boost::filesystem::remove(StartupShortcutPath()); if (fAutoStart) { CoInitialize(NULL); // Get a pointer to the IShellLink interface. IShellLink* psl = NULL; HRESULT hres = CoCreateInstance(CLSID_ShellLink, NULL, CLSCTX_INPROC_SERVER, IID_IShellLink, reinterpret_cast<void**>(&psl)); if (SUCCEEDED(hres)) { // Get the current executable path TCHAR pszExePath[MAX_PATH]; GetModuleFileName(NULL, pszExePath, sizeof(pszExePath)); TCHAR pszArgs[5] = TEXT("-min"); // Set the path to the shortcut target psl->SetPath(pszExePath); PathRemoveFileSpec(pszExePath); psl->SetWorkingDirectory(pszExePath); psl->SetShowCmd(SW_SHOWMINNOACTIVE); psl->SetArguments(pszArgs); // Query IShellLink for the IPersistFile interface for // saving the shortcut in persistent storage. IPersistFile* ppf = NULL; hres = psl->QueryInterface(IID_IPersistFile, reinterpret_cast<void**>(&ppf)); if (SUCCEEDED(hres)) { WCHAR pwsz[MAX_PATH]; // Ensure that the string is ANSI. MultiByteToWideChar(CP_ACP, 0, StartupShortcutPath().string().c_str(), -1, pwsz, MAX_PATH); // Save the link by calling IPersistFile::Save. hres = ppf->Save(pwsz, TRUE); ppf->Release(); psl->Release(); CoUninitialize(); return true; } psl->Release(); } CoUninitialize(); return false; } return true; } #elif defined(Q_OS_LINUX) // Follow the Desktop Application Autostart Spec: // http://standards.freedesktop.org/autostart-spec/autostart-spec-latest.html boost::filesystem::path static GetAutostartDir() { namespace fs = boost::filesystem; char* pszConfigHome = getenv("XDG_CONFIG_HOME"); if (pszConfigHome) return fs::path(pszConfigHome) / "autostart"; char* pszHome = getenv("HOME"); if (pszHome) return fs::path(pszHome) / ".config" / "autostart"; return fs::path(); } boost::filesystem::path static GetAutostartFilePath() { return GetAutostartDir() / "Bitradio.desktop"; } bool GetStartOnSystemStartup() { boost::filesystem::ifstream optionFile(GetAutostartFilePath()); if (!optionFile.good()) return false; // Scan through file for "Hidden=true": std::string line; while (!optionFile.eof()) { getline(optionFile, line); if (line.find("Hidden") != std::string::npos && line.find("true") != std::string::npos) return false; } optionFile.close(); return true; } bool SetStartOnSystemStartup(bool fAutoStart) { if (!fAutoStart) boost::filesystem::remove(GetAutostartFilePath()); else { char pszExePath[MAX_PATH+1]; memset(pszExePath, 0, sizeof(pszExePath)); if (readlink("/proc/self/exe", pszExePath, sizeof(pszExePath)-1) == -1) return false; boost::filesystem::create_directories(GetAutostartDir()); boost::filesystem::ofstream optionFile(GetAutostartFilePath(), std::ios_base::out|std::ios_base::trunc); if (!optionFile.good()) return false; // Write a Bitradio.desktop file to the autostart directory: optionFile << "[Desktop Entry]\n"; optionFile << "Type=Application\n"; optionFile << "Name=Bitradio\n"; optionFile << "Exec=" << pszExePath << " -min\n"; optionFile << "Terminal=false\n"; optionFile << "Hidden=false\n"; optionFile.close(); } return true; } #elif defined(Q_OS_MAC) // based on: https://github.com/Mozketo/LaunchAtLoginController/blob/master/LaunchAtLoginController.m #include <CoreFoundation/CoreFoundation.h> #include <CoreServices/CoreServices.h> LSSharedFileListItemRef findStartupItemInList(LSSharedFileListRef list, CFURLRef findUrl); LSSharedFileListItemRef findStartupItemInList(LSSharedFileListRef list, CFURLRef findUrl) { // loop through the list of startup items and try to find the Bitradio app CFArrayRef listSnapshot = LSSharedFileListCopySnapshot(list, NULL); for(int i = 0; i < CFArrayGetCount(listSnapshot); i++) { LSSharedFileListItemRef item = (LSSharedFileListItemRef)CFArrayGetValueAtIndex(listSnapshot, i); UInt32 resolutionFlags = kLSSharedFileListNoUserInteraction | kLSSharedFileListDoNotMountVolumes; CFURLRef currentItemURL = NULL; LSSharedFileListItemResolve(item, resolutionFlags, &currentItemURL, NULL); if(currentItemURL && CFEqual(currentItemURL, findUrl)) { // found CFRelease(currentItemURL); return item; } if(currentItemURL) { CFRelease(currentItemURL); } } return NULL; } bool GetStartOnSystemStartup() { CFURLRef bitcoinAppUrl = CFBundleCopyBundleURL(CFBundleGetMainBundle()); LSSharedFileListRef loginItems = LSSharedFileListCreate(NULL, kLSSharedFileListSessionLoginItems, NULL); LSSharedFileListItemRef foundItem = findStartupItemInList(loginItems, bitcoinAppUrl); return !!foundItem; // return boolified object } bool SetStartOnSystemStartup(bool fAutoStart) { CFURLRef bitcoinAppUrl = CFBundleCopyBundleURL(CFBundleGetMainBundle()); LSSharedFileListRef loginItems = LSSharedFileListCreate(NULL, kLSSharedFileListSessionLoginItems, NULL); LSSharedFileListItemRef foundItem = findStartupItemInList(loginItems, bitcoinAppUrl); if(fAutoStart && !foundItem) { // add Bitradio app to startup item list LSSharedFileListInsertItemURL(loginItems, kLSSharedFileListItemBeforeFirst, NULL, NULL, bitcoinAppUrl, NULL, NULL); } else if(!fAutoStart && foundItem) { // remove item LSSharedFileListItemRemove(loginItems, foundItem); } return true; } #else bool GetStartOnSystemStartup() { return false; } bool SetStartOnSystemStartup(bool fAutoStart) { return false; } #endif HelpMessageBox::HelpMessageBox(QWidget *parent) : QMessageBox(parent) { header = tr("Bitradio-Qt") + " " + tr("version") + " " + QString::fromStdString(FormatFullVersion()) + "\n\n" + tr("Usage:") + "\n" + " Bitradio-qt [" + tr("command-line options") + "] " + "\n"; coreOptions = QString::fromStdString(HelpMessage()); uiOptions = tr("UI options") + ":\n" + " -lang=<lang> " + tr("Set language, for example \"de_DE\" (default: system locale)") + "\n" + " -min " + tr("Start minimized") + "\n" + " -splash " + tr("Show splash screen on startup (default: 1)") + "\n"; setWindowTitle(tr("Bitradio-Qt")); setTextFormat(Qt::PlainText); // setMinimumWidth is ignored for QMessageBox so put in non-breaking spaces to make it wider. setText(header + QString(QChar(0x2003)).repeated(50)); setDetailedText(coreOptions + "\n" + uiOptions); } void HelpMessageBox::printToConsole() { // On other operating systems, the expected action is to print the message to the console. QString strUsage = header + "\n" + coreOptions + "\n" + uiOptions; fprintf(stdout, "%s", strUsage.toStdString().c_str()); } void HelpMessageBox::showOrPrint() { #if defined(WIN32) // On Windows, show a message box, as there is no stderr/stdout in windowed applications exec(); #else // On other operating systems, print help text to console printToConsole(); #endif } void SetBlackThemeQSS(QApplication& app) { app.setStyleSheet("QWidget { background: rgb(41,44,48); }" "QFrame { border: none; }" "QComboBox { color: rgb(255,255,255); }" "QComboBox QAbstractItemView::item { color: rgb(255,255,255); }" "QPushButton { background: rgb(226,189,121); color: rgb(21,21,21); }" "QDoubleSpinBox { background: rgb(63,67,72); color: rgb(255,255,255); border-color: rgb(194,194,194); }" "QLineEdit { background: rgb(63,67,72); color: rgb(255,255,255); border-color: rgb(194,194,194); }" "QTextEdit { background: rgb(63,67,72); color: rgb(255,255,255); }" "QPlainTextEdit { background: rgb(63,67,72); color: rgb(255,255,255); }" "QMenuBar { background: rgb(41,44,48); color: rgb(110,116,126); }" "QMenu { background: rgb(30,32,36); color: rgb(222,222,222); }" "QMenu::item:selected { background-color: rgb(48,140,198); }" "QLabel { color: rgb(120,127,139); }" "QScrollBar { color: rgb(255,255,255); }" "QCheckBox { color: rgb(120,127,139); }" "QRadioButton { color: rgb(120,127,139); }" "QTabBar::tab { color: rgb(120,127,139); border: 1px solid rgb(78,79,83); border-bottom: none; padding: 5px; }" "QTabBar::tab:selected { background: rgb(41,44,48); }" "QTabBar::tab:!selected { background: rgb(24,26,30); margin-top: 2px; }" "QTabWidget::pane { border: 1px solid rgb(78,79,83); }" "QToolButton { background: rgb(30,32,36); color: rgb(116,122,134); border: none; border-left-color: rgb(30,32,36); border-left-style: solid; border-left-width: 6px; margin-top: 8px; margin-bottom: 8px; }" "QToolButton:checked { color: rgb(255,255,255); border: none; border-left-color: rgb(215,173,94); border-left-style: solid; border-left-width: 6px; }" "QProgressBar { color: rgb(149,148,148); border-color: rgb(255,255,255); border-width: 3px; border-style: solid; }" "QProgressBar::chunk { background: rgb(255,255,255); }" "QTreeView::item { background: rgb(41,44,48); color: rgb(212,213,213); }" "QTreeView::item:selected { background-color: rgb(48,140,198); }" "QTableView { background: rgb(66,71,78); color: rgb(212,213,213); gridline-color: rgb(157,160,165); }" "QHeaderView::section { background: rgb(29,34,39); color: rgb(255,255,255); }" "QToolBar { background: rgb(30,32,36); border: none; }"); } void setClipboard(const QString& str) { QApplication::clipboard()->setText(str, QClipboard::Clipboard); QApplication::clipboard()->setText(str, QClipboard::Selection); } #if BOOST_FILESYSTEM_VERSION >= 3 boost::filesystem::path qstringToBoostPath(const QString &path) { return boost::filesystem::path(path.toStdString(), utf8); } QString boostPathToQString(const boost::filesystem::path &path) { return QString::fromStdString(path.string(utf8)); } #else #warning Conversion between boost path and QString can use invalid character encoding with boost_filesystem v2 and older boost::filesystem::path qstringToBoostPath(const QString &path) { return boost::filesystem::path(path.toStdString()); } QString boostPathToQString(const boost::filesystem::path &path) { return QString::fromStdString(path.string()); } #endif } // namespace GUIUtil
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "sky/viewer/platform/weblayertreeview_impl.h" #include "base/message_loop/message_loop_proxy.h" #include "cc/layers/layer.h" #include "cc/output/begin_frame_args.h" #include "cc/scheduler/begin_frame_source.h" #include "cc/trees/layer_tree_host.h" #include "mojo/cc/context_provider_mojo.h" #include "mojo/cc/output_surface_mojo.h" #include "mojo/converters/surfaces/surfaces_type_converters.h" #include "mojo/services/public/cpp/view_manager/view.h" #include "sky/engine/public/web/WebWidget.h" #include "sky/viewer/cc/web_layer_impl.h" namespace sky { WebLayerTreeViewImpl::WebLayerTreeViewImpl( scoped_refptr<base::MessageLoopProxy> compositor_message_loop_proxy, mojo::SurfacesServicePtr surfaces_service, mojo::GpuPtr gpu_service) : widget_(NULL), view_(NULL), surfaces_service_(surfaces_service.Pass()), gpu_service_(gpu_service.Pass()), main_thread_compositor_task_runner_(base::MessageLoopProxy::current()), weak_factory_(this) { main_thread_bound_weak_ptr_ = weak_factory_.GetWeakPtr(); surfaces_service_->CreateSurfaceConnection( base::Bind(&WebLayerTreeViewImpl::OnSurfaceConnectionCreated, main_thread_bound_weak_ptr_)); cc::LayerTreeSettings settings; // For web contents, layer transforms should scale up the contents of layers // to keep content always crisp when possible. settings.layer_transforms_should_scale_layer_contents = true; cc::SharedBitmapManager* shared_bitmap_manager = NULL; gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager = NULL; layer_tree_host_ = cc::LayerTreeHost::CreateThreaded(this, shared_bitmap_manager, gpu_memory_buffer_manager, settings, base::MessageLoopProxy::current(), compositor_message_loop_proxy, nullptr); DCHECK(layer_tree_host_); } WebLayerTreeViewImpl::~WebLayerTreeViewImpl() { layer_tree_host_.reset(); } void WebLayerTreeViewImpl::WillBeginMainFrame(int frame_id) { } void WebLayerTreeViewImpl::DidBeginMainFrame() { } void WebLayerTreeViewImpl::BeginMainFrame(const cc::BeginFrameArgs& args) { VLOG(2) << "WebLayerTreeViewImpl::BeginMainFrame"; double frame_time_sec = (args.frame_time - base::TimeTicks()).InSecondsF(); double deadline_sec = (args.deadline - base::TimeTicks()).InSecondsF(); double interval_sec = args.interval.InSecondsF(); blink::WebBeginFrameArgs web_begin_frame_args( frame_time_sec, deadline_sec, interval_sec); widget_->beginFrame(web_begin_frame_args); } void WebLayerTreeViewImpl::Layout() { widget_->layout(); } void WebLayerTreeViewImpl::ApplyViewportDeltas( const gfx::Vector2d& scroll_delta, float page_scale, float top_controls_delta) { } void WebLayerTreeViewImpl::ApplyViewportDeltas(const gfx::Vector2d& inner_delta, const gfx::Vector2d& outer_delta, float page_scale, float top_controls_delta){ } void WebLayerTreeViewImpl::RequestNewOutputSurface(bool fallback) { layer_tree_host_->SetOutputSurface(output_surface_.Pass()); } void WebLayerTreeViewImpl::DidInitializeOutputSurface() { } void WebLayerTreeViewImpl::WillCommit() { } void WebLayerTreeViewImpl::DidCommit() { widget_->didCommitFrameToCompositor(); } void WebLayerTreeViewImpl::DidCommitAndDrawFrame() { } void WebLayerTreeViewImpl::DidCompleteSwapBuffers() { } void WebLayerTreeViewImpl::setSurfaceReady() { } void WebLayerTreeViewImpl::setRootLayer(const blink::WebLayer& layer) { layer_tree_host_->SetRootLayer( static_cast<const sky_viewer_cc::WebLayerImpl*>(&layer)->layer()); } void WebLayerTreeViewImpl::clearRootLayer() { layer_tree_host_->SetRootLayer(scoped_refptr<cc::Layer>()); } void WebLayerTreeViewImpl::setViewportSize( const blink::WebSize& device_viewport_size) { layer_tree_host_->SetViewportSize(device_viewport_size); } blink::WebSize WebLayerTreeViewImpl::deviceViewportSize() const { return layer_tree_host_->device_viewport_size(); } void WebLayerTreeViewImpl::setDeviceScaleFactor(float device_scale_factor) { layer_tree_host_->SetDeviceScaleFactor(device_scale_factor); } float WebLayerTreeViewImpl::deviceScaleFactor() const { return layer_tree_host_->device_scale_factor(); } void WebLayerTreeViewImpl::setBackgroundColor(blink::WebColor color) { layer_tree_host_->set_background_color(color); } void WebLayerTreeViewImpl::setHasTransparentBackground( bool has_transparent_background) { layer_tree_host_->set_has_transparent_background(has_transparent_background); } void WebLayerTreeViewImpl::setOverhangBitmap(const SkBitmap& bitmap) { layer_tree_host_->SetOverhangBitmap(bitmap); } void WebLayerTreeViewImpl::setVisible(bool visible) { layer_tree_host_->SetVisible(visible); } void WebLayerTreeViewImpl::registerForAnimations(blink::WebLayer* layer) { cc::Layer* cc_layer = static_cast<sky_viewer_cc::WebLayerImpl*>(layer)->layer(); cc_layer->layer_animation_controller()->SetAnimationRegistrar( layer_tree_host_->animation_registrar()); } void WebLayerTreeViewImpl::registerViewportLayers( const blink::WebLayer* pageScaleLayer, const blink::WebLayer* innerViewportScrollLayer, const blink::WebLayer* outerViewportScrollLayer) { layer_tree_host_->RegisterViewportLayers( static_cast<const sky_viewer_cc::WebLayerImpl*>(pageScaleLayer)->layer(), static_cast<const sky_viewer_cc::WebLayerImpl*>(innerViewportScrollLayer) ->layer(), // The outer viewport layer will only exist when using pinch virtual // viewports. outerViewportScrollLayer ? static_cast<const sky_viewer_cc::WebLayerImpl*>( outerViewportScrollLayer)->layer() : NULL); } void WebLayerTreeViewImpl::clearViewportLayers() { layer_tree_host_->RegisterViewportLayers(scoped_refptr<cc::Layer>(), scoped_refptr<cc::Layer>(), scoped_refptr<cc::Layer>()); } void WebLayerTreeViewImpl::setNeedsAnimate() { layer_tree_host_->SetNeedsAnimate(); } bool WebLayerTreeViewImpl::commitRequested() const { return layer_tree_host_->CommitRequested(); } void WebLayerTreeViewImpl::finishAllRendering() { layer_tree_host_->FinishAllRendering(); } void WebLayerTreeViewImpl::OnSurfaceConnectionCreated(mojo::SurfacePtr surface, uint32_t id_namespace) { mojo::CommandBufferPtr cb; gpu_service_->CreateOffscreenGLES2Context(GetProxy(&cb)); scoped_refptr<cc::ContextProvider> context_provider( new mojo::ContextProviderMojo(cb.PassMessagePipe())); output_surface_.reset(new mojo::OutputSurfaceMojo( this, context_provider, surface.Pass(), id_namespace)); layer_tree_host_->SetLayerTreeHostClientReady(); } void WebLayerTreeViewImpl::DidCreateSurface(cc::SurfaceId id) { main_thread_compositor_task_runner_->PostTask( FROM_HERE, base::Bind(&WebLayerTreeViewImpl::DidCreateSurfaceOnMainThread, main_thread_bound_weak_ptr_, id)); } void WebLayerTreeViewImpl::DidCreateSurfaceOnMainThread(cc::SurfaceId id) { view_->SetSurfaceId(mojo::SurfaceId::From(id)); } } // namespace sky
//$Id$ //------------------------------------------------------------------------------ // Integrator //------------------------------------------------------------------------------ // GMAT: General Mission Analysis Tool. // // Copyright (c) 2002 - 2020 United States Government as represented by the // Administrator of the National Aeronautics and Space Administration. // All Other Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // You may not use this file except in compliance with the License. // You may obtain a copy of the License at: // http://www.apache.org/licenses/LICENSE-2.0. // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language // governing permissions and limitations under the License. // // *** File Name : Integrator.hpp // *** Created : October 1, 2002 // ************************************************************************** // *** Developed By : Thinking Systems, Inc. (www.thinksysinc.com) *** // *** For: Flight Dynamics Analysis Branch (Code 572) *** // *** Under Contract: P.O. GSFC S-66617-G *** // *** *** // *** This software is subject to the Sofware Usage Agreement described *** // *** by NASA Case Number GSC-14735-1. The Softare Usage Agreement *** // *** must be included in any distribution. Removal of this header is *** // *** strictly prohibited. *** // *** *** // *** *** // *** Header Version: July 12, 2002 *** // ************************************************************************** // Module Type : ANSI C++ Source // Development Environment : Visual C++ 7.0 // Modification History : 11/26/2002 - D. Conway, Thinking Systems, Inc. // Original delivery // // : 1/7/2003 - D. Conway, Thinking Systems, Inc. // Updated interfaces based on GSFC feedback // // : 2/19/2003 - D. Conway, Thinking Systems, Inc. // Removed parms for fixed step mode // // : 3/3/2003 - D. Conway, Thinking Systems, Inc. // Added error control threshold; moved time // granularity into this class // // : 09/26/2003 - W. Waktola, // Mission Applications Branch // Changes: // - Updated style using GMAT cpp style guide // // : 10/10/2003 - W. Waktola, // Mission Applications Branch // Changes: // - SetParameter() to SetRealParameter() // - GetParameter() to GetRealParameter() // - virtual char* // GetParameterName(const int parm) const to // virtual std::string // GetParameterName(const int parm) const // // : 10/16/2003 - W. Waktola, // Missions Applications Branch // Changes: // - All double types to Real types // - All primitive int types to Integer types // Removals: // - GetParameterName(), replaced by // GetParameterText() // Additions: // - IntegratorParamCount // - PARAMTER_TEXT[] // - PARAMETER_TYPE[] // - GetIntegerParameter() // - SetIntegerParameter() // - GetParameterText() // - GetParameterID() // - GetParameterType() // - GetParameterTypeString() // // ************************************************************************** /** \brief Base class for numerical integrators * * This class provides the interfaces needed to numerically integrate first * order differential equations. The basic equations that are solved must * have the form * * \f[ {{dr^i}\over{dt}} = f(t,r) \f] * * The right hand side of this equation, \f$ f(t,r) \f$, is provided by the * PhysicalModel class instance that is passed to the Integrator. Given this * instance, the Integrator classes solve the equation above for * \f$ r^i(t+\delta t) \f$, given \f$ r^i(t) \f$ and an integration step * \f$ \delta t \f$. * * Most Integrators provide the capability to estimate the accuracy of the * integration and adapt the stepsize accordingly. The member variable * tolerance is used to specify the desired integration accuracy. * * Sometimes you want to keep the maximum step taken at some fixed value. This * implementation provides a mechanism for specifying a maximum allowed step by * toggling the useMaxStep parameter. It can be convenient to request steps of * a specified size, regardless of the stepsize error control algorithm. This * class provides a Boolean flag (useFixedInterval) that can be used for that * purpose, and a specialized stepping method (StepFixedInterval(dt))to take the * step. * * This class provides data structures that can be used to run in fixed step * mode. These parameters (fixedStep, fixedStepsize) are not used by this base * class; classes derived from this one use the parameters when implementing * fixed step propagation. The recommended approach is to code the Step() * method so that if the integrator is running in fixed step mode, the system * takes multiple steps to achieve propagation across the requested * (fixedStepsize) interval while maintaining the specified integration * accuracy, as specified by the tolerance variable. */ #ifndef Integrator_hpp #define Integrator_hpp #include "gmatdefs.hpp" #include "Propagator.hpp" class GMAT_API Integrator : public Propagator { public: Integrator(const std::string &typeStr, const std::string &nomme = ""); Integrator(const Integrator&); Integrator& operator=(const Integrator& i); virtual ~Integrator(); // Parameter accessor methods -- overridden from GmatBase virtual std::string GetParameterText(const Integer id) const; virtual Integer GetParameterID(const std::string &str) const; virtual Gmat::ParameterType GetParameterType(const Integer id) const; virtual std::string GetParameterTypeString(const Integer id) const; virtual bool IsParameterReadOnly(const Integer id) const; virtual Real GetRealParameter(const Integer id) const; virtual Real GetRealParameter(const std::string &label) const; virtual Real SetRealParameter(const Integer id, const Real value); virtual Real SetRealParameter(const std::string &label, const Real value); virtual Integer GetIntegerParameter(const Integer id) const; virtual Integer GetIntegerParameter(const std::string &label) const; virtual Integer SetIntegerParameter(const Integer id, const Integer value); virtual Integer SetIntegerParameter(const std::string &label, const Integer value); virtual bool GetBooleanParameter(const Integer id) const; virtual bool SetBooleanParameter(const Integer id, const bool value); virtual bool TakeAction(const std::string &action, const std::string &actionData = ""); virtual void SetPhysicalModel(PhysicalModel *pPhysicalModel); virtual Real GetStepTaken(); virtual bool UsesErrorControl(); //-------------------------------------------------------------------------- // virtual void Initialize() //-------------------------------------------------------------------------- /** * Method used to initialize the integrator * * Each integrator will have class specific work to do to setup the initial * data structures and data elements. This method is provided for that * purpose. At a minimum, be sure to call the Propagator::Initialize() * method. * * @return true upon successful initialization. */ //-------------------------------------------------------------------------- virtual bool Initialize() = 0; virtual bool RawStep() = 0; virtual Integer GetPropagatorOrder(void) const; protected: //------------------------------------------------------------------------------ // virtual Real EstimateError() //------------------------------------------------------------------------------ /** * Interface used to estimate the error in the current step * * This method definition is provided to make the interface to the error * estimation routine consistent for all integrators. The method should * calculate a local estimate of the error from the integration and return * the largest error estimate found. There are several alternatives that * users of this class can implement: the error could be calculated based on * the largest error in the individual components of the state vector, as * the magnitude of the state vector (that is, the L2 (rss) norm of the * error estimate vector). The estimated error should never be negative, * so a return value less than 0.0 can be used to indicate an error * condition. * * For convenience, a member array pointer, errorEstimates, is provided in * this class. This array is not initialized -- derived classes take * responsibility for allocating and freeing the memory for this array based * on their estimation needs. * * @return The largest error found in the estimate */ //------------------------------------------------------------------------------ virtual Real EstimateError() = 0; //------------------------------------------------------------------------------ // virtual bool AdaptStep(Real maxerror) //------------------------------------------------------------------------------ /** * Interface used to change the stepsize of the current step * * This method adapts the step to a step compatible with the desired * integration accuracy. The implementation of this method is algorithm * specific. Implementors should be sure to use the minimumStep and * maximumStep parameters to ensure that the stepsize does not grow or * shrink without bounds, and thus escape from the domain of interest for * the system. * * @param maxerror The largest error found in the propagation step. * * @return true if the step was adapted correctly, false otherwise */ //------------------------------------------------------------------------------ virtual bool AdaptStep(Real maxerror) = 0; enum { ACCURACY = PropagatorParamCount, // Accuracy parameter for Integrators ERROR_THRESHOLD, // Accuracy parameter for Integrators SMALLEST_INTERVAL, // Smallest time interval -- used to hedge fixed step mode MIN_STEP, // Minimum stepsize for the Integrator -- smaller steps fail MAX_STEP, // Maximum stepsize for the Integrator -- larger steps get truncated MAX_STEP_ATTEMPTS, // Number of attempts to take before giving up STOP_IF_ACCURACY_VIOLATED, IntegratorParamCount }; // Start with the parameter IDs and associates strings static const std::string PARAMETER_TEXT[IntegratorParamCount - PropagatorParamCount]; static const Gmat::ParameterType PARAMETER_TYPE[IntegratorParamCount - PropagatorParamCount]; /// The level of "acceptable" relative error for the integrator Real tolerance; /// Flag used to activate fixed step mode bool fixedStep; /// Step to take in fixed step mode Real fixedStepsize; /// Minimum allowed step to take (always positive - sign handled elsewhere) Real minimumStep; /// Maximum allowed step to take (always positive - sign handled elsewhere) Real maximumStep; /// Accuracy of the time step interval Real smallestTime; /// Number of failed attempts tried Integer stepAttempts; /// Number of failed attempts allowed before reporting failure Integer maxStepAttempts; /// Flag indicating whether or not execution should stop if/when the accuracy is violated bool stopIfAccuracyViolated; /// Flag indicating whether or not the warning for the accuracy violation has already been /// written, for this integrator, for this run bool accuracyWarningTriggered; /// String used to indicate object type in some warning messages std::string typeSource; /// Actual interval taken by the step Real stepTaken; /// Remaining time for a specified or fixed timestep Real timeleft; /// Derivative array pointer (obtained from the PhysicalModel instance) const Real * ddt; /// An array of the error estimates, sized by the dimension of the system Real * errorEstimates; /// An array of the error estimates, sized by the dimension of the system Real errorThreshold; /// Indicator for the integrator derivative order -- 2 for Nystrom methods Integer derivativeOrder; /// Flag indicating whether integrator has error control bool hasErrorControl; }; #endif
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2022, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <lbann-dev@llnl.gov> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #ifndef LBANN_EXECUTION_ALGORITHMS_KFAC_KFAC_UTIL_HPP_INCLUDED #define LBANN_EXECUTION_ALGORITHMS_KFAC_KFAC_UTIL_HPP_INCLUDED #include "lbann/base.hpp" #include "lbann/layers/learning/convolution.hpp" #include "lbann/layers/learning/fully_connected.hpp" #include "lbann/layers/learning/gru.hpp" #include "lbann/layers/regularizers/batch_normalization.hpp" #include "lbann/execution_algorithms/kfac/kfac_block.hpp" // Forward declarations namespace lbann { class KFAC; template <El::Device Device> class kfac_block; } namespace lbann { namespace kfac { enum class kfac_inverse_strategy { ALL, // Apply round-robin assingment to all of the layers. may cause load imbalance. EACH, // Apply round-robin assingment to every type of layers. may // not work well for small networks. ROOT, // Use only the root GPU. This is only for testing. }; enum class kfac_reduce_scatter_mode { ALLREDUCE, // Use lbann_comm::allreduce REDUCE_SCATTER, // Use El::ReduceScatter REDUCE, // Use El::Reduce for each block }; enum class kfac_allgather_mode { ALLREDUCE, // Use lbann_comm::allreduce ALLGATHER, // Use El::ReduceScatter BROADCAST // Use El::Broadcast for each block }; /** @brief Gets the inverse matrix of A. **/ template <El::Device Device> void get_matrix_inverse( El::AbstractMatrix<DataType>& Ainv, El::AbstractMatrix<DataType>& Linv, const El::AbstractMatrix<DataType>& A, bool report_time, DataType damping, DataType damping_bn_err, bool is_bn, const El::SyncInfo<Device>& sync_info); /** @brief Gets statistics of a given matrix. **/ template <El::Device Device> std::string get_matrix_stat( const El::Matrix<DataType, Device>& X, const char *name); /** @brief Perform all-reduce on the lower triangular of a symmetric matrix. **/ template <El::Device Device> void allreduce_lower_tri( El::AbstractMatrix<DataType>& A, El::AbstractMatrix<DataType>& AL, lbann_comm *comm, const El::SyncInfo<Device>& sync_info); /** @brief Get whether a global buffer is needed. **/ bool is_reduce_scatter_buffer_required(kfac_reduce_scatter_mode mode); /** @brief Perform reduce-scatter on one or more blocks. **/ template <El::Device Device> void reduce_scatter_blocks( const std::vector<std::pair<size_t, El::AbstractMatrix<DataType>*>>& blocks, El::Matrix<DataType, Device>& global_buffer, lbann_comm *comm, kfac_reduce_scatter_mode mode); /** @brief Get whether local and global buffers are needed. **/ std::pair<bool, bool> is_allgather_buffer_required(kfac_allgather_mode mode); /** @brief Perform reduce-scatter on one or more blocks. **/ template <El::Device Device> void allgather_blocks( const std::vector<std::pair<size_t, El::AbstractMatrix<DataType>*>>& blocks, El::Matrix<DataType, Device>& send_buffer, El::Matrix<DataType, Device>& recv_buffer, lbann_comm *comm, kfac_allgather_mode mode); /** @brief Perform allgather for inverse matrices **/ template <El::Device Device> void allgather_inverse_matrices( const std::vector<std::shared_ptr<kfac_block<Device>>>& blocks, El::Matrix<DataType, Device>& global_buffer, lbann_comm *comm); /** @brief Perform allgather for inverse matrices size**/ template <El::Device Device> void allgather_inverse_matrices_sizes( const std::vector<std::shared_ptr<kfac_block<Device>>>& blocks, El::Matrix<double, El::Device::CPU>& global_buffer, lbann_comm *comm); /** @brief Add the damping value to the diagonal elements of A. **/ template <El::Device Device> void add_to_diagonal( El::Matrix<DataType, Device>& A, DataType value, DataType value_bn_err, bool is_bn, const El::SyncInfo<Device>& sync_info); /** @brief Fill the upper trianglar with the lower trianglar. **/ template <El::Device Device> void fill_upper_tri( El::Matrix<DataType, Device>& A, const El::SyncInfo<Device>& sync_info); /** @brief Update a Kronecker factor matrix using decay. * * Aave = Aave * decay + A * (1-decay) **/ template <El::Device Device> void update_kronecker_average( El::Matrix<DataType, Device>& Aave, const El::Matrix<DataType, Device>& A, size_t count, double decay, const El::SyncInfo<Device>& sync_info); /** @brief Substitute the identity matrix. * TODO: Replace with El::Identity<El::Device::GPU> * once it gets supported. **/ template <El::Device Device> void identity( El::Matrix<DataType, Device>& A, const El::SyncInfo<Device>& sync_info); /** @brief Pack the lower triangular of a symmetric matrix. **/ template <El::Device Device> void pack_lower_tri( El::Matrix<DataType, Device>& L, const El::Matrix<DataType, Device>& A, const El::SyncInfo<Device>& sync_info); /** @brief Unpack the lower triangular of a symmetric matrix. **/ template <El::Device Device> void unpack_lower_tri( El::Matrix<DataType, Device>& A, const El::Matrix<DataType, Device>& L, const El::SyncInfo<Device>& sync_info); } // namespace kfac } // namespace lbann #endif // LBANN_EXECUTION_ALGORITHMS_KFAC_KFAC_UTIL_HPP_INCLUDED
// Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima). // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <gmock/gmock.h> #include <gtest/gtest.h> #include <fastdds/dds/domain/DomainParticipantFactory.hpp> #include <fastdds/dds/domain/qos/DomainParticipantQos.hpp> #include <fastdds/dds/publisher/Publisher.hpp> #include <fastdds/dds/publisher/qos/PublisherQos.hpp> #include <fastdds/dds/subscriber/Subscriber.hpp> #include <fastdds/dds/subscriber/qos/SubscriberQos.hpp> #include <fastdds/dds/topic/qos/TopicQos.hpp> #include <dds/domain/DomainParticipant.hpp> #include <dds/domain/qos/DomainParticipantQos.hpp> #include <dds/core/types.hpp> #include <dds/sub/Subscriber.hpp> #include <dds/pub/Publisher.hpp> #include <dds/topic/Topic.hpp> namespace eprosima { namespace fastdds { namespace dds { // Mocked TopicDataType for Topic creation tests class TopicDataTypeMock : public TopicDataType { bool serialize( void* /*data*/, fastrtps::rtps::SerializedPayload_t* /*payload*/) override { return true; } bool deserialize( fastrtps::rtps::SerializedPayload_t* /*payload*/, void* /*data*/) override { return true; } std::function<uint32_t()> getSerializedSizeProvider( void* /*data*/) override { return std::function<uint32_t()>(); } void * createData() override { return nullptr; } void deleteData( void* /*data*/) override { } bool getKey( void* /*data*/, fastrtps::rtps::InstanceHandle_t* /*ihandle*/, bool /*force_md5*/) override { return true; } }; TEST(ParticipantTest, DomainParticipantFactoryGetInstance) { DomainParticipantFactory* factory = DomainParticipantFactory::get_instance(); ASSERT_NE(factory, nullptr); ASSERT_EQ(factory, DomainParticipantFactory::get_instance()); } TEST(ParticipantTests, ChangeDomainParticipantFactoryQos) { DomainParticipantFactoryQos qos; DomainParticipantFactory::get_instance()->get_qos(qos); ASSERT_EQ(qos.entity_factory().autoenable_created_entities, true); EntityFactoryQosPolicy entity_factory = qos.entity_factory(); entity_factory.autoenable_created_entities = false; qos.entity_factory(entity_factory); ASSERT_TRUE(DomainParticipantFactory::get_instance()->set_qos(qos) == ReturnCode_t::RETCODE_OK); DomainParticipantFactoryQos fqos; DomainParticipantFactory::get_instance()->get_qos(fqos); ASSERT_EQ(qos, fqos); ASSERT_EQ(fqos.entity_factory().autoenable_created_entities, false); } TEST(ParticipantTests, CreateDomainParticipant) { DomainParticipant* participant = DomainParticipantFactory::get_instance()->create_participant(0); ASSERT_NE(participant, nullptr); } TEST(ParticipantTests, CreatePSMDomainParticipant) { ::dds::domain::DomainParticipant participant = ::dds::core::null; participant = ::dds::domain::DomainParticipant(0, PARTICIPANT_QOS_DEFAULT); ASSERT_NE(participant, ::dds::core::null); } TEST(ParticipantTests, DeleteDomainParticipant) { DomainParticipant* participant = DomainParticipantFactory::get_instance()->create_participant(0); ASSERT_TRUE(DomainParticipantFactory::get_instance()->delete_participant(participant) == ReturnCode_t::RETCODE_OK); } TEST(ParticipantTests, ChangeDefaultParticipantQos) { DomainParticipantQos qos; DomainParticipantFactory::get_instance()->get_default_participant_qos(qos); ASSERT_EQ(qos, PARTICIPANT_QOS_DEFAULT); EntityFactoryQosPolicy entity_factory = qos.entity_factory(); entity_factory.autoenable_created_entities = false; qos.entity_factory(entity_factory); ASSERT_TRUE(DomainParticipantFactory::get_instance()->set_default_participant_qos(qos) == ReturnCode_t::RETCODE_OK); DomainParticipantQos pqos; DomainParticipantFactory::get_instance()->get_default_participant_qos(pqos); ASSERT_EQ(qos, pqos); ASSERT_EQ(pqos.entity_factory().autoenable_created_entities, false); } TEST(ParticipantTests, ChangePSMDefaultParticipantQos) { ::dds::domain::DomainParticipant participant = ::dds::domain::DomainParticipant(0, PARTICIPANT_QOS_DEFAULT); ::dds::domain::qos::DomainParticipantQos qos = participant.default_participant_qos(); ASSERT_EQ(qos, PARTICIPANT_QOS_DEFAULT); EntityFactoryQosPolicy entity_factory = qos.entity_factory(); entity_factory.autoenable_created_entities = false; qos.entity_factory(entity_factory); ASSERT_NO_THROW(participant.default_participant_qos(qos)); ::dds::domain::qos::DomainParticipantQos pqos = participant.default_participant_qos(); ASSERT_EQ(qos, pqos); ASSERT_EQ(pqos.entity_factory().autoenable_created_entities, false); } TEST(ParticipantTests, ChangeDomainParticipantQos) { DomainParticipant* participant = DomainParticipantFactory::get_instance()->create_participant(0); DomainParticipantQos qos; participant->get_qos(qos); ASSERT_EQ(qos, PARTICIPANT_QOS_DEFAULT); qos.entity_factory().autoenable_created_entities = false; ASSERT_TRUE(participant->set_qos(qos) == ReturnCode_t::RETCODE_OK); DomainParticipantQos pqos; participant->get_qos(pqos); ASSERT_FALSE(pqos == PARTICIPANT_QOS_DEFAULT); ASSERT_EQ(qos, pqos); ASSERT_EQ(qos.entity_factory().autoenable_created_entities, false); } TEST(ParticipantTests, ChangePSMDomainParticipantQos) { ::dds::domain::DomainParticipant participant = ::dds::core::null; participant = ::dds::domain::DomainParticipant(0, PARTICIPANT_QOS_DEFAULT); ::dds::domain::qos::DomainParticipantQos qos = participant.qos(); ASSERT_EQ(qos, PARTICIPANT_QOS_DEFAULT); qos.entity_factory().autoenable_created_entities = false; ASSERT_NO_THROW(participant.qos(qos)); ::dds::domain::qos::DomainParticipantQos pqos; pqos = participant.qos(); ASSERT_FALSE(pqos == PARTICIPANT_QOS_DEFAULT); ASSERT_EQ(qos, pqos); ASSERT_EQ(qos.entity_factory().autoenable_created_entities, false); } TEST(ParticipantTests, CreatePublisher) { DomainParticipant* participant = DomainParticipantFactory::get_instance()->create_participant(0); Publisher* publisher = participant->create_publisher(PUBLISHER_QOS_DEFAULT); ASSERT_NE(publisher, nullptr); } TEST(ParticipantTests, CreatePSMPublisher) { ::dds::domain::DomainParticipant participant = ::dds::domain::DomainParticipant(0, PARTICIPANT_QOS_DEFAULT); ::dds::pub::Publisher publisher = ::dds::core::null; publisher = ::dds::pub::Publisher(participant); ASSERT_NE(publisher, ::dds::core::null); } TEST(ParticipantTests, CreateSubscriber) { DomainParticipant* participant = DomainParticipantFactory::get_instance()->create_participant(0); Subscriber* subscriber = participant->create_subscriber(SUBSCRIBER_QOS_DEFAULT); ASSERT_NE(subscriber, nullptr); } TEST(ParticipantTests, CreatePSMSubscriber) { ::dds::domain::DomainParticipant participant = ::dds::domain::DomainParticipant(0, PARTICIPANT_QOS_DEFAULT); ::dds::sub::Subscriber subscriber = ::dds::core::null; subscriber = ::dds::sub::Subscriber(participant, SUBSCRIBER_QOS_DEFAULT); ASSERT_NE(subscriber, ::dds::core::null); } TEST(ParticipantTests, DeletePublisher) { DomainParticipant* participant = DomainParticipantFactory::get_instance()->create_participant(0); Publisher* publisher = participant->create_publisher(PUBLISHER_QOS_DEFAULT); ASSERT_TRUE(participant->delete_publisher(publisher) == ReturnCode_t::RETCODE_OK); } TEST(ParticipantTests, DeleteSubscriber) { DomainParticipant* participant = DomainParticipantFactory::get_instance()->create_participant(0); Subscriber* subscriber = participant->create_subscriber(SUBSCRIBER_QOS_DEFAULT); ASSERT_TRUE(participant->delete_subscriber(subscriber) == ReturnCode_t::RETCODE_OK); } TEST(ParticipantTests, ChangeDefaultTopicQos) { DomainParticipant* participant = DomainParticipantFactory::get_instance()->create_participant(0); TopicQos qos; participant->get_default_topic_qos(qos); ASSERT_EQ(qos, TOPIC_QOS_DEFAULT); qos.reliability().kind = BEST_EFFORT_RELIABILITY_QOS; ASSERT_TRUE(participant->set_default_topic_qos(qos) == ReturnCode_t::RETCODE_OK); TopicQos tqos; participant->get_default_topic_qos(tqos); ASSERT_EQ(qos, tqos); ASSERT_EQ(tqos.reliability().kind, BEST_EFFORT_RELIABILITY_QOS); } TEST(ParticipantTests, ChangePSMDefaultTopicQos) { ::dds::domain::DomainParticipant participant = ::dds::domain::DomainParticipant(0); ::dds::topic::qos::TopicQos qos = participant.default_topic_qos(); ASSERT_EQ(qos, TOPIC_QOS_DEFAULT); qos.ownership().kind = EXCLUSIVE_OWNERSHIP_QOS; ASSERT_NO_THROW(participant.default_topic_qos(qos)); ::dds::topic::qos::TopicQos tqos = participant.default_topic_qos(); ASSERT_EQ(qos, tqos); ASSERT_EQ(tqos.ownership().kind, EXCLUSIVE_OWNERSHIP_QOS); } TEST(ParticipantTests, CreateTopic) { DomainParticipant* participant = DomainParticipantFactory::get_instance()->create_participant(0); TypeSupport type_(new TopicDataTypeMock()); participant->register_type(type_, "footype"); Topic* topic = participant->create_topic("footopic", "footype", TOPIC_QOS_DEFAULT); ASSERT_NE(topic, nullptr); } TEST(ParticipantTests, PSMCreateTopic) { ::dds::domain::DomainParticipant participant = ::dds::domain::DomainParticipant(0); TypeSupport type_(new TopicDataTypeMock()); participant->register_type(type_, "footype"); ::dds::topic::Topic topic = ::dds::core::null; topic = ::dds::topic::Topic(participant, "footopic", "footype", TOPIC_QOS_DEFAULT); ASSERT_NE(topic, ::dds::core::null); } TEST(ParticipantTests, DeleteTopic) { DomainParticipant* participant = DomainParticipantFactory::get_instance()->create_participant(0); DomainParticipant* participant2 = DomainParticipantFactory::get_instance()->create_participant(1); TypeSupport type_(new TopicDataTypeMock()); participant->register_type(type_, "footype"); Topic* topic = participant->create_topic("footopic", "footype", TOPIC_QOS_DEFAULT); ASSERT_TRUE(participant->delete_topic(nullptr) == ReturnCode_t::RETCODE_BAD_PARAMETER); ASSERT_TRUE(participant2->delete_topic(topic) == ReturnCode_t::RETCODE_PRECONDITION_NOT_MET); ASSERT_TRUE(participant->delete_topic(topic) == ReturnCode_t::RETCODE_OK); } } // namespace dds } // namespace fastdds } // namespace eprosima int main( int argc, char** argv) { testing::InitGoogleMock(&argc, argv); return RUN_ALL_TESTS(); }
/* MIT License Copyright (c) 2019 Advanced Micro Devices, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ //--------------------------------------------------------------------------------------- // Construction and execution of dependency graph using task scheduler //--------------------------------------------------------------------------------------- #include "FEMFXAsyncThreading.h" #include "FEMFXConstraintSolveTaskGraph.h" #include "FEMFXConstraintSolver.h" #include "FEMFXScene.h" #if FM_CONSTRAINT_ISLAND_DEPENDENCY_GRAPH namespace AMD { // Allocate task graph and nodes needed for one outer iteration of constraint solve. FmConstraintSolveTaskGraph* FmAllocConstraintSolveTaskGraph( FmScene* scene, FmConstraintSolverData* constraintSolverData, const FmConstraintIsland* constraintIsland, FmTaskFuncCallback partitionPairPgsTask, FmTaskFuncCallback partitionPairPgsTaskWrapped, // Wrapped task is run within FmExecuteTask loop for tail call optimization FmTaskFuncCallback partitionMpcgTask, FmTaskFuncCallback partitionMpcgTaskWrapped, FmTaskFuncCallback partitionGsTask, FmTaskFuncCallback partitionGsTaskWrapped, FmTaskFuncCallback externalConstraintsTask, FmTaskFuncCallback externalConstraintsTaskWrapped) { FmConstraintSolveTaskGraph* taskGraph = new FmConstraintSolveTaskGraph(); taskGraph->solveData.scene = scene; taskGraph->solveData.constraintSolverData = constraintSolverData; taskGraph->solveData.constraintIsland = constraintIsland; uint numPartitionPairs = constraintSolverData->numPartitionPairs; uint numPartitions = constraintSolverData->numPartitions; taskGraph->SetCallbacks( scene->taskSystemCallbacks.SubmitAsyncTask #if !FM_ASYNC_THREADING , scene->taskSystemCallbacks.CreateTaskWaitCounter, scene->taskSystemCallbacks.WaitForTaskWaitCounter, scene->taskSystemCallbacks.DestroyTaskWaitCounter, scene->taskSystemCallbacks.SubmitTask #endif ); taskGraph->partitionPairConstraintNodes = new FmPartitionPairConstraintNodeState[numPartitionPairs + 1]; // one more node added for processing of external constraints taskGraph->numPartitionPairs = numPartitionPairs; taskGraph->numPartitions = numPartitions; for (uint i = 0; i < numPartitionPairs + 1; i++) { uint weight = (uint)-1; if (i < numPartitionPairs) { FmPartitionPair& partitionPair = constraintSolverData->partitionPairs[i]; weight = partitionPair.numConstraints; } FmTaskFuncCallback taskFunc; FmTaskFuncCallback taskFuncWrapped; const char* taskName; if (i == numPartitionPairs) { taskFunc = externalConstraintsTask; taskFuncWrapped = externalConstraintsTaskWrapped; taskName = "ExternalConstraints"; } else { taskFunc = partitionPairPgsTask; taskFuncWrapped = partitionPairPgsTaskWrapped; taskName = "PartitionPairPgs"; } taskGraph->partitionPairConstraintNodes[i].firstIterationNode.Init(taskName, taskGraph, taskFunc, taskFuncWrapped, (int)i, weight); taskGraph->partitionPairConstraintNodes[i].repeat0Node.Init(taskName, taskGraph, taskFunc, taskFuncWrapped, (int)i, weight); taskGraph->partitionPairConstraintNodes[i].repeat1Node.Init(taskName, taskGraph, taskFunc, taskFuncWrapped, (int)i, weight); taskGraph->partitionPairConstraintNodes[i].nextOuterIterationNode.Init(taskName, taskGraph, taskFunc, taskFuncWrapped, (int)i, weight); } taskGraph->partitionObjectNodes = new FmPartitionObjectNodeState[numPartitions]; for (uint i = 0; i < numPartitions; i++) { taskGraph->partitionObjectNodes[i].mpcgSolveNode.Init("PartitionMpcg", taskGraph, partitionMpcgTask, partitionMpcgTaskWrapped, (int)i); taskGraph->partitionObjectNodes[i].gsIterationRbDeltaNode.Init("PartitionGs", taskGraph, partitionGsTask, partitionGsTaskWrapped, (int)i); } return taskGraph; } // Free task graph. void FmDestroyConstraintSolveTaskGraph(FmConstraintSolveTaskGraph* taskGraph) { if (taskGraph) { taskGraph->FmTaskGraph::Destroy(); delete[] taskGraph->partitionPairConstraintNodes; delete[] taskGraph->partitionObjectNodes; delete taskGraph; } } FM_WRAPPED_TASK_DECLARATION(FmNodeTaskFuncProcessPartitionPairConstraints) FM_WRAPPED_TASK_DECLARATION(FmNodeTaskFuncProcessPartitionMpcg) FM_WRAPPED_TASK_DECLARATION(FmNodeTaskFuncProcessPartitionGsIterationOrRbResponse) FM_WRAPPED_TASK_DECLARATION(FmNodeTaskFuncExternalPgsIteration) void FmCreateConstraintSolveTaskGraph(FmScene* scene, FmConstraintSolverData* constraintSolverData, const FmConstraintIsland* constraintIsland) { FM_ASSERT(constraintSolverData->taskGraph == NULL); constraintSolverData->taskGraph = FmAllocConstraintSolveTaskGraph( scene, constraintSolverData, constraintIsland, FM_TASK_AND_WRAPPED_TASK_ARGS(FmNodeTaskFuncProcessPartitionPairConstraints), FM_TASK_AND_WRAPPED_TASK_ARGS(FmNodeTaskFuncProcessPartitionMpcg), FM_TASK_AND_WRAPPED_TASK_ARGS(FmNodeTaskFuncProcessPartitionGsIterationOrRbResponse), FM_TASK_AND_WRAPPED_TASK_ARGS(FmNodeTaskFuncExternalPgsIteration)); uint numPartitionPairs = constraintSolverData->numPartitionPairs; uint numPartitions = constraintSolverData->numPartitions; uint numColors = constraintSolverData->numPartitionPairIndependentSets; // Link GS and MPCG nodes to preceding and subsequent constraint nodes. for (uint partitionIdx = 0; partitionIdx < numPartitions; partitionIdx++) { // Check for dependencies from last to first color. // Only need to link to nodes in the first preceding color found, which will be linked to any previous nodes with the same partition. for (int colorIdx = (int)numColors - 1; colorIdx >= 0; colorIdx--) { FmGraphColoringSet& color = constraintSolverData->partitionPairIndependentSets[colorIdx]; uint numInColor = color.numElements; bool foundInColor = false; for (uint colorElemIdx = 0; colorElemIdx < numInColor; colorElemIdx++) { uint pairIdx = color.pStart[colorElemIdx]; FmPartitionPair& partitionPair = constraintSolverData->partitionPairs[pairIdx]; if (partitionIdx == partitionPair.partitionIdA || partitionIdx == partitionPair.partitionIdB) { FmMakePartitionTaskDependency(constraintSolverData->taskGraph, pairIdx, partitionIdx); foundInColor = true; } } if (foundInColor) { break; } } // Link to partition pair nodes in the next outer iteration for (uint colorIdx = 0; colorIdx < numColors; colorIdx++) { FmGraphColoringSet& color = constraintSolverData->partitionPairIndependentSets[colorIdx]; uint numInColor = color.numElements; bool foundInColor = false; for (uint colorElemIdx = 0; colorElemIdx < numInColor; colorElemIdx++) { uint pairIdx = color.pStart[colorElemIdx]; FmPartitionPair& partitionPair = constraintSolverData->partitionPairs[pairIdx]; if (partitionIdx == partitionPair.partitionIdA || partitionIdx == partitionPair.partitionIdB) { FmMakeNextOuterIterationDependency(constraintSolverData->taskGraph, partitionIdx, pairIdx); foundInColor = true; } } if (foundInColor) { break; } } } FmPartitionPairConstraintNodeState* partitionPairConstraintNodes = constraintSolverData->taskGraph->partitionPairConstraintNodes; // Link constraint nodes with dependencies for (uint colorIdx = 0; colorIdx < numColors; colorIdx++) { FmGraphColoringSet& color = constraintSolverData->partitionPairIndependentSets[colorIdx]; uint numInColor = color.numElements; for (uint colorElemIdx = 0; colorElemIdx < numInColor; colorElemIdx++) { uint pairIdx = color.pStart[colorElemIdx]; FmPartitionPair& partitionPair = constraintSolverData->partitionPairs[pairIdx]; bool nodeHasRigidBodies = partitionPair.numRigidBodies > 0; bool nodeHasSameIterationSuccessorWithRigidBodies = false; // Create edges from start node to any node without predecessor if (partitionPairConstraintNodes[pairIdx].numPredecessorsSameIteration == 0) { FmMakeStartDependency(constraintSolverData->taskGraph, pairIdx); } bool foundA = false; bool foundB = false; // Make links to subsequent colors with the same partitions. // Only need to link to nodes in the first subsequent color found; links will be made from that color to any subsequent nodes with this partition. for (uint futureColorIdx = colorIdx + 1; futureColorIdx < numColors * 2; futureColorIdx++) { bool nextIteration = futureColorIdx >= numColors; uint nextColorIdx = futureColorIdx % numColors; FmGraphColoringSet& nextColor = constraintSolverData->partitionPairIndependentSets[nextColorIdx]; uint numInNextColor = nextColor.numElements; bool foundAInColor = false; bool foundBInColor = false; for (uint nextColorElemIdx = 0; nextColorElemIdx < numInNextColor; nextColorElemIdx++) { uint nextPairIdx = nextColor.pStart[nextColorElemIdx]; FmPartitionPair& nextPartitionPair = constraintSolverData->partitionPairs[nextPairIdx]; if (!foundA && (partitionPair.partitionIdA == nextPartitionPair.partitionIdA || partitionPair.partitionIdA == nextPartitionPair.partitionIdB)) { if (nextIteration) { FmMakeNextIterationDependency(constraintSolverData->taskGraph, pairIdx, nextPairIdx); } else { FmMakeSameIterationDependency(constraintSolverData->taskGraph, pairIdx, nextPairIdx); partitionPairConstraintNodes[nextPairIdx].numPredecessorsSameIteration++; if (nodeHasRigidBodies) { partitionPairConstraintNodes[nextPairIdx].hasSameIterationPredecessorWithRigidBodies = true; } if (nextPartitionPair.numRigidBodies > 0) { nodeHasSameIterationSuccessorWithRigidBodies = true; } } foundAInColor = true; if (partitionPair.partitionIdA == partitionPair.partitionIdB) { foundBInColor = true; } } if (partitionPair.partitionIdA != partitionPair.partitionIdB) { if (!foundB && (partitionPair.partitionIdB == nextPartitionPair.partitionIdA || partitionPair.partitionIdB == nextPartitionPair.partitionIdB)) { if (nextIteration) { FmMakeNextIterationDependency(constraintSolverData->taskGraph, pairIdx, nextPairIdx); } else { FmMakeSameIterationDependency(constraintSolverData->taskGraph, pairIdx, nextPairIdx); partitionPairConstraintNodes[nextPairIdx].numPredecessorsSameIteration++; if (nodeHasRigidBodies) { partitionPairConstraintNodes[nextPairIdx].hasSameIterationPredecessorWithRigidBodies = true; } if (nextPartitionPair.numRigidBodies > 0) { nodeHasSameIterationSuccessorWithRigidBodies = true; } } foundBInColor = true; } } } if (foundAInColor) { foundA = true; } if (foundBInColor) { foundB = true; } if (foundA && foundB) { break; } } // Add node for processing external constraints, if island has rigid bodies, and callback to external system exists if (constraintIsland->numUserRigidBodyIslands > 0 && constraintIsland->innerIterationCallback) { if (nodeHasRigidBodies && !nodeHasSameIterationSuccessorWithRigidBodies) { FmMakeSameIterationDependency(constraintSolverData->taskGraph, pairIdx, numPartitionPairs); } if (nodeHasRigidBodies && !partitionPairConstraintNodes[pairIdx].hasSameIterationPredecessorWithRigidBodies) { FmMakeNextIterationDependency(constraintSolverData->taskGraph, numPartitionPairs, pairIdx); } } } } } // Run task graph to execute one outer iteration of constraint solve, and call its follow task when finished void FmRunConstraintSolveTaskGraphAsync(FmConstraintSolveTaskGraph* taskGraph, FmTaskFuncCallback followTask, void* followTaskData) { taskGraph->StartAsync(followTask, followTaskData); } #if !FM_ASYNC_THREADING // Run task graph to execute one outer iteration of constraint solve. void FmRunConstraintSolveTaskGraph(FmConstraintSolveTaskGraph* taskGraph) { taskGraph->StartAndWait(); } #endif // Make edge from start node to partition pair node. void FmMakeStartDependency(FmConstraintSolveTaskGraph* graph, uint partitionPairIdx) { graph->AddToStart(&graph->partitionPairConstraintNodes[partitionPairIdx].firstIterationNode); } // Make edge from partition pair node to different node in the same PGS iteration. void FmMakeSameIterationDependency(FmConstraintSolveTaskGraph* graph, uint partitionPairAIdx, uint partitionPairBIdx) { graph->partitionPairConstraintNodes[partitionPairAIdx].firstIterationNode.AddSuccessor(&graph->partitionPairConstraintNodes[partitionPairBIdx].firstIterationNode); graph->partitionPairConstraintNodes[partitionPairAIdx].repeat0Node.AddSuccessor(&graph->partitionPairConstraintNodes[partitionPairBIdx].repeat0Node); graph->partitionPairConstraintNodes[partitionPairAIdx].repeat1Node.AddSuccessor(&graph->partitionPairConstraintNodes[partitionPairBIdx].repeat1Node); graph->partitionPairConstraintNodes[partitionPairAIdx].nextOuterIterationNode.AddSuccessor(&graph->partitionPairConstraintNodes[partitionPairBIdx].nextOuterIterationNode); } // Register dependency between partition pair node and node in the next PGS iteration. void FmMakeNextIterationDependency(FmConstraintSolveTaskGraph* graph, uint partitionPairAIdx, uint partitionPairBIdx) { // Register a predecessor but not an edge, which allows the messages to not be passed if exiting iterations graph->partitionPairConstraintNodes[partitionPairAIdx].nextIteration0Successors.AddSuccessor(&graph->partitionPairConstraintNodes[partitionPairBIdx].repeat0Node); graph->partitionPairConstraintNodes[partitionPairAIdx].nextIteration1Successors.AddSuccessor(&graph->partitionPairConstraintNodes[partitionPairBIdx].repeat1Node); } // Register dependency between partition pair node and partition node in the next solving phase. void FmMakePartitionTaskDependency(FmConstraintSolveTaskGraph* graph, uint partitionPairIdx, uint partitionIdx) { graph->partitionPairConstraintNodes[partitionPairIdx].partitionGsSuccessors.AddSuccessor(&graph->partitionObjectNodes[partitionIdx].gsIterationRbDeltaNode); graph->partitionPairConstraintNodes[partitionPairIdx].partitionMpcgSuccessors.AddSuccessor(&graph->partitionObjectNodes[partitionIdx].mpcgSolveNode); } // Register dependency between partition node and partition pair node in the next outer solve iteration void FmMakeNextOuterIterationDependency(FmConstraintSolveTaskGraph* graph, uint partitionIdx, uint partitionPairIdx) { graph->partitionObjectNodes[partitionIdx].partitionPairSuccessors.AddSuccessor(&graph->partitionPairConstraintNodes[partitionPairIdx].nextOuterIterationNode); } // Send messages to partition pair nodes in next iteration. void FmNextIterationMessages(FmConstraintSolveTaskGraph* graph, uint partitionPairIdx, uint iteration, FmTaskGraphNode** ppNextNode) { FmPartitionPairConstraintNodeState& node = graph->partitionPairConstraintNodes[partitionPairIdx]; if ((iteration % 2) == 0) { node.nextIteration0Successors.SignalSuccessors(0, ppNextNode); } else { node.nextIteration1Successors.SignalSuccessors(0, ppNextNode); } } // Send messages to partition nodes to run MPCG. void FmPartitionMpcgTaskMessages(FmConstraintSolveTaskGraph* graph, uint partitionPairIdx, uint outerIteration, FmTaskGraphNode** ppNextNode) { FmPartitionPairConstraintNodeState& node = graph->partitionPairConstraintNodes[partitionPairIdx]; node.partitionMpcgSuccessors.SignalSuccessors((int32_t)outerIteration, ppNextNode); } // Send messages to partition nodes to run GS iteration. void FmPartitionGsTaskMessages(FmConstraintSolveTaskGraph* graph, uint partitionPairIdx, uint outerIteration, FmTaskGraphNode** ppNextNode) { FmPartitionPairConstraintNodeState& node = graph->partitionPairConstraintNodes[partitionPairIdx]; node.partitionGsSuccessors.SignalSuccessors((int32_t)outerIteration, ppNextNode); } // Send messages to partition pair nodes in next outer iteration. void FmNextOuterIterationMessages(FmConstraintSolveTaskGraph* graph, uint partitionIdx, FmTaskGraphNode** ppNextNode) { graph->partitionObjectNodes[partitionIdx].partitionPairSuccessors.SignalSuccessors(0, ppNextNode); } } #endif
/* * Copyright 2004-2018 Cray Inc. * Other additional copyright holders may be indicated within. * * The entirety of this work is licensed under the Apache License, * Version 2.0 (the "License"); you may not use this file except * in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "baseAST.h" #include "astutil.h" #include "CForLoop.h" #include "CatchStmt.h" #include "DeferStmt.h" #include "driver.h" #include "expr.h" #include "ForallStmt.h" #include "ForLoop.h" #include "IfExpr.h" #include "log.h" #include "LoopExpr.h" #include "UnmanagedClassType.h" #include "ModuleSymbol.h" #include "ParamForLoop.h" #include "parser.h" #include "passes.h" #include "runpasses.h" #include "scopeResolve.h" #include "stmt.h" #include "stringutil.h" #include "symbol.h" #include "TryStmt.h" #include "type.h" #include "WhileStmt.h" #include <ostream> #include <sstream> #include <string> // // declare global vectors gSymExprs, gCallExprs, gFnSymbols, ... // #define decl_gvecs(type) Vec<type*> g##type##s foreach_ast(decl_gvecs); static int uid = 1; #define decl_counters(type) \ int n##type = g##type##s.n, k##type = n##type*sizeof(type)/1024 #define sum_gvecs(type) g##type##s.n #define def_vec_hash(SomeType) \ template<> \ uintptr_t _vec_hasher(SomeType* obj) { \ if (obj == NULL) { \ return 0; \ } else { \ return (uintptr_t)((BaseAST*)obj)->id; \ } \ } foreach_ast(def_vec_hash); def_vec_hash(Symbol); def_vec_hash(Type); def_vec_hash(BaseAST); #undef def_vec_hash // // Throughout printStatistics(), "n" indicates the number of nodes; // "k" indicates how many KiB memory they occupy: k = n * sizeof(node) / 1024. // void printStatistics(const char* pass) { static int last_nasts = -1; static int maxK = -1, maxN = -1; if (!strcmp(pass, "makeBinary")) { if (strstr(fPrintStatistics, "m")) { fprintf(stderr, "Maximum # of ASTS: %d\n", maxN); fprintf(stderr, "Maximum Size (KB): %d\n", maxK); } } int nasts = foreach_ast_sep(sum_gvecs, +); if (last_nasts == nasts) { fprintf(stderr, "%23s%s\n", "", pass); return; } foreach_ast(decl_counters); int nStmt = nBlockStmt + nCondStmt + nDeferStmt + nGotoStmt + nUseStmt + nExternBlockStmt + nForallStmt + nTryStmt + nForwardingStmt + nCatchStmt; int kStmt = kBlockStmt + kCondStmt + kDeferStmt + kGotoStmt + kUseStmt + kExternBlockStmt + kForallStmt + kTryStmt + kForwardingStmt + kCatchStmt; int nExpr = nUnresolvedSymExpr + nSymExpr + nDefExpr + nCallExpr + nContextCallExpr + nLoopExpr + nNamedExpr + nIfExpr; int kExpr = kUnresolvedSymExpr + kSymExpr + kDefExpr + kCallExpr + kContextCallExpr + kLoopExpr + kNamedExpr + kIfExpr; int nSymbol = nModuleSymbol+nVarSymbol+nArgSymbol+nShadowVarSymbol+nTypeSymbol+nFnSymbol+nEnumSymbol+nLabelSymbol; int kSymbol = kModuleSymbol+kVarSymbol+kArgSymbol+kShadowVarSymbol+kTypeSymbol+kFnSymbol+kEnumSymbol+kLabelSymbol; int nType = nPrimitiveType+nEnumType+nAggregateType+nUnmanagedClassType; int kType = kPrimitiveType+kEnumType+kAggregateType+kUnmanagedClassType; fprintf(stderr, "%7d asts (%6dK) %s\n", nStmt+nExpr+nSymbol+nType, kStmt+kExpr+kSymbol+kType, pass); if (nStmt+nExpr+nSymbol+nType > maxN) maxN = nStmt+nExpr+nSymbol+nType; if (kStmt+kExpr+kSymbol+kType > maxK) maxK = kStmt+kExpr+kSymbol+kType; if (strstr(fPrintStatistics, "n")) fprintf(stderr, " Stmt %9d Cond %9d Block %9d Goto %9d\n", nStmt, nCondStmt, nBlockStmt, nGotoStmt); if (strstr(fPrintStatistics, "k") && strstr(fPrintStatistics, "n")) fprintf(stderr, " Stmt %9dK Cond %9dK Block %9dK Goto %9dK\n", kStmt, kCondStmt, kBlockStmt, kGotoStmt); if (strstr(fPrintStatistics, "k") && !strstr(fPrintStatistics, "n")) fprintf(stderr, " Stmt %6dK Cond %6dK Block %6dK Goto %6dK\n", kStmt, kCondStmt, kBlockStmt, kGotoStmt); if (strstr(fPrintStatistics, "n")) fprintf(stderr, " Expr %9d Unre %9d Sym %9d Def %9d Call %9d Forall %9d Named %9d If %9d\n", nExpr, nUnresolvedSymExpr, nSymExpr, nDefExpr, nCallExpr, nLoopExpr, nNamedExpr, nIfExpr); if (strstr(fPrintStatistics, "k") && strstr(fPrintStatistics, "n")) fprintf(stderr, " Expr %9dK Unre %9dK Sym %9dK Def %9dK Call %9dK Forall %9dk Named %9dK If %9dK\n", kExpr, kUnresolvedSymExpr, kSymExpr, kDefExpr, kCallExpr, kLoopExpr, kNamedExpr, kIfExpr); if (strstr(fPrintStatistics, "k") && !strstr(fPrintStatistics, "n")) fprintf(stderr, " Expr %6dK Unre %6dK Sym %6dK Def %6dK Call %6dK Forall %6dk Named %6dK If %6dK\n", kExpr, kUnresolvedSymExpr, kSymExpr, kDefExpr, kCallExpr, kLoopExpr, kNamedExpr, kIfExpr); if (strstr(fPrintStatistics, "n")) fprintf(stderr, " Sym %9d Mod %9d Var %9d Arg %9d Shd %9d Type %9d Fn %9d Enum %9d Label %9d\n", nSymbol, nModuleSymbol, nVarSymbol, nArgSymbol, nShadowVarSymbol, nTypeSymbol, nFnSymbol, nEnumSymbol, nLabelSymbol); if (strstr(fPrintStatistics, "k") && strstr(fPrintStatistics, "n")) fprintf(stderr, " Sym %9dK Mod %9dK Var %9dK Arg %9dK Shd %9dK Type %9dK Fn %9dK Enum %9dK Label %9dK\n", kSymbol, kModuleSymbol, kVarSymbol, kArgSymbol, kShadowVarSymbol, kTypeSymbol, kFnSymbol, kEnumSymbol, kLabelSymbol); if (strstr(fPrintStatistics, "k") && !strstr(fPrintStatistics, "n")) fprintf(stderr, " Sym %6dK Mod %6dK Var %6dK Arg %6dK Shd %6dK Type %6dK Fn %6dK Enum %6dK Label %6dK\n", kSymbol, kModuleSymbol, kVarSymbol, kArgSymbol, kShadowVarSymbol, kTypeSymbol, kFnSymbol, kEnumSymbol, kLabelSymbol); if (strstr(fPrintStatistics, "n")) fprintf(stderr, " Type %9d Prim %9d Enum %9d Class %9d \n", nType, nPrimitiveType, nEnumType, nAggregateType); if (strstr(fPrintStatistics, "k") && strstr(fPrintStatistics, "n")) fprintf(stderr, " Type %9dK Prim %9dK Enum %9dK Class %9dK\n", kType, kPrimitiveType, kEnumType, kAggregateType); if (strstr(fPrintStatistics, "k") && !strstr(fPrintStatistics, "n")) fprintf(stderr, " Type %6dK Prim %6dK Enum %6dK Class %6dK\n", kType, kPrimitiveType, kEnumType, kAggregateType); last_nasts = nasts; } // for debugging purposes only void trace_remove(BaseAST* ast, char flag) { // crash if deletedIdHandle is not initialized but deletedIdFilename is if (deletedIdON() == true) { fprintf(deletedIdHandle, "%d %c %p %d\n", currentPassNo, flag, ast, ast->id); } if (ast->id == breakOnRemoveID) { if (deletedIdON() == true) fflush(deletedIdHandle); gdbShouldBreakHere(); } // There should never be an attempt to delete a global type. if (flag != 'z' && // At least, not before compiler shutdown. isPrimitiveType(ast) && toPrimitiveType(ast)->symbol->hasFlag(FLAG_GLOBAL_TYPE_SYMBOL)) INT_FATAL(ast, "Unexpected attempt to eviscerate a global type symbol."); } #define clean_gvec(type) \ int i##type = 0; \ forv_Vec(type, ast, g##type##s) { \ if (isAlive(ast) || isRootModuleWithType(ast, type)) { \ g##type##s.v[i##type++] = ast; \ } else { \ trace_remove(ast, 'x'); \ delete ast; ast = 0; \ } \ } \ g##type##s.n = i##type static void clean_modvec(Vec<ModuleSymbol*>& modvec) { int aliveMods = 0; forv_Vec(ModuleSymbol, mod, modvec) { if (isAlive(mod) || isRootModuleWithType(mod, ModuleSymbol)) { modvec.v[aliveMods++] = mod; } } modvec.n = aliveMods; } void cleanAst() { // Important: Sometimes scopeResolve will create dummy UseStmts that are // never inserted into the tree, and will be deleted in between passes. // // If we do not destroy the caches, they may contain pointers back to these // dummy uses. destroyModuleUsesCaches(); // // clear back pointers to dead ast instances // forv_Vec(TypeSymbol, ts, gTypeSymbols) { for (int i = 0; i < ts->type->methods.n; i++) { FnSymbol* method = ts->type->methods.v[i]; if (method && !isAliveQuick(method)) { ts->type->methods.v[i] = NULL; } if (AggregateType* ct = toAggregateType(ts->type)) { if (ct->defaultInitializer != NULL && isAliveQuick(ct->defaultInitializer) == false) { ct->defaultInitializer = NULL; } if (ct->hasDestructor() == true && isAliveQuick(ct->getDestructor()) == false) { ct->setDestructor(NULL); } } } if (AggregateType* at = toAggregateType(ts->type)) { for (int i = 0; i < at->dispatchChildren.n; i++) { if (AggregateType* type = at->dispatchChildren.v[i]) { if (isAlive(type) == false) { at->dispatchChildren.v[i] = NULL; } } } } } removedIterResumeLabels.clear(); copiedIterResumeGotos.clear(); // clean the other module vectors, without deleting the ast instances // (they will be deleted with the clean_gvec call for ModuleSymbols.) clean_modvec(allModules); clean_modvec(userModules); // // clean global vectors and delete dead ast instances // foreach_ast(clean_gvec); } void destroyAst() { #define destroy_gvec(type) \ forv_Vec(type, ast, g##type##s) { \ trace_remove(ast, 'z'); \ delete ast; \ } foreach_ast(destroy_gvec); } void verify() { verifyRemovedIterResumeGotos(); verifyCopiedIterResumeGotos(); #define verify_gvec(type) \ forv_Vec(type, ast, g##type##s) { \ if (isAlive(ast)) { \ ast->verify(); \ } \ } foreach_ast(verify_gvec); // rootModule does not pass isAlive(), yet is "alive" - needs to be verified rootModule->verify(); } int breakOnID = -1; int breakOnRemoveID = -1; int lastNodeIDUsed() { return uid - 1; } // This is here so that we can break on the creation of a particular // BaseAST instance in gdb. static void checkid(int id) { if (id == breakOnID) { gdbShouldBreakHere(); } } BaseAST::BaseAST(AstTag type) : astTag(type), id(uid++), astloc(yystartlineno, yyfilename) { checkid(id); if (astloc.filename) { // OK, set from yyfilename } else { if (currentAstLoc.filename) { astloc = currentAstLoc; } else { // neither yy* nor currentAstLoc are set INT_FATAL("no line number available"); } } } const std::string BaseAST::tabText = " "; BaseAST::~BaseAST() { } int BaseAST::linenum() const { return astloc.lineno; } const char* BaseAST::fname() const { return astloc.filename; } const char* BaseAST::stringLoc(void) const { const int tmpBuffSize = 256; char tmpBuff[tmpBuffSize]; snprintf(tmpBuff, tmpBuffSize, "%s:%d", fname(), linenum()); return astr(tmpBuff); } ModuleSymbol* BaseAST::getModule() { ModuleSymbol* retval = NULL; if (ModuleSymbol* x = toModuleSymbol(this)) { retval = x; } else if (Type* x = toType(this)) { if (x->symbol != NULL) retval = x->symbol->getModule(); } else if (Symbol* x = toSymbol(this)) { if (x->defPoint != NULL) retval = x->defPoint->getModule(); } else if (Expr* x = toExpr(this)) { if (x->parentSymbol != NULL) retval = x->parentSymbol->getModule(); } else { INT_FATAL(this, "Unexpected case in BaseAST::getModule()"); } return retval; } Type* BaseAST::typeInfo() { QualifiedType qt = this->qualType(); return qt.type(); } bool BaseAST::isRef() { return this->qualType().isRef(); } bool BaseAST::isWideRef() { return this->qualType().isWideRef(); } bool BaseAST::isRefOrWideRef() { return this->qualType().isRefOrWideRef(); } FnSymbol* BaseAST::getFunction() { if (ModuleSymbol* x = toModuleSymbol(this)) return x->initFn; else if (FnSymbol* x = toFnSymbol(this)) return x; else if (Type* x = toType(this)) return x->symbol->getFunction(); else if (Symbol* x = toSymbol(this)) return x->defPoint->getFunction(); else if (Expr* x = toExpr(this)) return x->parentSymbol->getFunction(); else INT_FATAL(this, "Unexpected case in BaseAST::getFunction()"); return NULL; } Type* BaseAST::getValType() { Type* type = typeInfo(); INT_ASSERT(type); if (type->symbol->hasFlag(FLAG_REF)) return type->getField("_val")->type; else if (type->symbol->hasFlag(FLAG_WIDE_REF)) return type->getField("addr")->getValType(); else return type; } Type* BaseAST::getRefType() { Type* type = typeInfo(); INT_ASSERT(type); if (type->symbol->hasFlag(FLAG_REF)) return type; else if (type->symbol->hasFlag(FLAG_WIDE_REF)) return type->getField("addr")->type; else return type->refType; } Type* BaseAST::getWideRefType() { Type* type = typeInfo(); INT_ASSERT(type); if (type->symbol->hasFlag(FLAG_REF)) return wideRefMap.get(type); else if (type->symbol->hasFlag(FLAG_WIDE_REF)) return type; else return wideRefMap.get(type->getRefType()); } const char* BaseAST::astTagAsString() const { const char* retval = "BaseAST??"; switch (astTag) { case E_SymExpr: retval = "SymExpr"; break; case E_UnresolvedSymExpr: retval = "UnresolvedSymExpr"; break; case E_DefExpr: retval = "DefExpr"; break; case E_CallExpr: retval = "CallExpr"; break; case E_ContextCallExpr: retval = "ContextCallExpr"; break; case E_LoopExpr: retval = "LoopExpr"; break; case E_NamedExpr: retval = "NamedExpr"; break; case E_IfExpr: retval = "IfExpr"; break; case E_UseStmt: retval = "UseStmt"; break; case E_BlockStmt: { // see AST_CHILDREN_CALL const BlockStmt* stmt = toConstBlockStmt(this); if (false) retval = ""; else if (stmt->isCForLoop()) retval = "CForLoop"; else if (stmt->isForLoop()) retval = "ForLoop"; else if (stmt->isParamForLoop()) retval = "ParamForLoop"; else if (stmt->isWhileDoStmt()) retval = "WhileDoStmt"; else if (stmt->isDoWhileStmt()) retval = "DoWhileStmt"; else retval = "BlockStmt"; } break; case E_CondStmt: retval = "CondStmt"; break; case E_DeferStmt: retval = "DeferStmt"; break; case E_GotoStmt: retval = "GotoStmt"; break; case E_ForwardingStmt: retval = "ForwardingStmt"; break; case E_ForallStmt: retval = "ForallStmt"; break; case E_ExternBlockStmt: retval = "ExternBlockStmt"; break; case E_TryStmt: retval = "TryStmt"; break; case E_CatchStmt: retval = "CatchStmt"; break; case E_ModuleSymbol: retval = "ModuleSymbol"; break; case E_VarSymbol: retval = "VarSymbol"; break; case E_ArgSymbol: retval = "ArgSymbol"; break; case E_ShadowVarSymbol: retval = "ShadowVarSymbol"; break; case E_TypeSymbol: retval = "TypeSymbol"; break; case E_FnSymbol: retval = "FnSymbol"; break; case E_EnumSymbol: retval = "EnumSymbol"; break; case E_LabelSymbol: retval = "LabelSymbol"; break; case E_PrimitiveType: retval = "PrimitiveType"; break; case E_EnumType: retval = "EnumType"; break; case E_AggregateType: retval = "AggregateType"; break; case E_UnmanagedClassType: retval = "UnmanagedClassType"; break; } return retval; } void BaseAST::printTabs(std::ostream *file, unsigned int tabs) { for (unsigned int i = 0; i < tabs; i++) { *file << this->tabText; } } // This method is the same for several subclasses of BaseAST, so it is defined // her on BaseAST. 'doc' is not defined as a member of BaseAST, so it must be // taken as an argument here. // // TODO: Can BaseAST define a 'doc' member? What if `chpl --doc` went away and // `chpldoc` was compiled with a special #define (e.g. -DCHPLDOC) so the // 'doc' member and all doc-related methods would only be available to // chpldoc? (thomasvandoren, 2015-02-21) void BaseAST::printDocsDescription(const char *doc, std::ostream *file, unsigned int tabs) { if (doc != NULL) { std::stringstream sStream(ltrimAllLines(doc)); std::string line; while (std::getline(sStream, line)) { this->printTabs(file, tabs); *file << line; *file << std::endl; } } } astlocT currentAstLoc(0,NULL); void registerModule(ModuleSymbol* mod) { switch (mod->modTag) { case MOD_USER: userModules.add(mod); case MOD_STANDARD: case MOD_INTERNAL: if (strcmp(mod->name, "_root")) allModules.add(mod); break; default: INT_FATAL(mod, "Unable to register module"); } } #define SUB_SYMBOL(x) \ do { \ if (x) \ if (Symbol* y = map->get(x)) \ x = y; \ } while (0) #define SUB_TYPE(x) \ do { \ if (x) \ if (Symbol* y = map->get(x->symbol)) \ x = y->type; \ } while (0) void update_symbols(BaseAST* ast, SymbolMap* map) { if (SymExpr* sym_expr = toSymExpr(ast)) { if (sym_expr->symbol()) if (Symbol* y = map->get(sym_expr->symbol())) sym_expr->setSymbol(y); } else if (DefExpr* defExpr = toDefExpr(ast)) { SUB_TYPE(defExpr->sym->type); } else if (LoopStmt* ls = toLoopStmt(ast)) { LabelSymbol* breakLabel = ls->breakLabelGet(); LabelSymbol* continueLabel = ls->continueLabelGet(); if (breakLabel != 0) { if (LabelSymbol* y = toLabelSymbol(map->get(breakLabel))) { ls->breakLabelSet(y); } } if (continueLabel != 0) { if (LabelSymbol* y = toLabelSymbol(map->get(continueLabel))) { ls->continueLabelSet(y); } } } else if (ForallStmt* forall = toForallStmt(ast)) { if (forall->fContinueLabel) { if (LabelSymbol* y = toLabelSymbol(map->get(forall->fContinueLabel))) forall->fContinueLabel = y; } else if (forall->fErrorHandlerLabel) { if (LabelSymbol* y = toLabelSymbol(map->get(forall->fErrorHandlerLabel))) forall->fErrorHandlerLabel = y; } } else if (VarSymbol* ps = toVarSymbol(ast)) { SUB_TYPE(ps->type); } else if (FnSymbol* ps = toFnSymbol(ast)) { SUB_TYPE(ps->type); SUB_TYPE(ps->retType); SUB_SYMBOL(ps->_this); } else if (ArgSymbol* ps = toArgSymbol(ast)) { SUB_TYPE(ps->type); } else if (ShadowVarSymbol* ss = toShadowVarSymbol(ast)) { SUB_TYPE(ss->type); } AST_CHILDREN_CALL(ast, update_symbols, map); } GenRet baseASTCodegen(BaseAST* ast) { GenRet ret = ast->codegen(); if (!ret.chplType) ret.chplType = ast->typeInfo(); ret.isUnsigned = ! is_signed(ret.chplType); return ret; } GenRet baseASTCodegenInt(int x) { return baseASTCodegen(new_IntSymbol(x, INT_SIZE_64)); } GenRet baseASTCodegenString(const char* str) { return baseASTCodegen(new_CStringSymbol(str)); } /************************************* | ************************************** * * * * ************************************** | *************************************/ bool isLoopStmt(const BaseAST* a) { const BlockStmt* stmt = toConstBlockStmt(a); return (stmt != 0 && stmt->isLoopStmt()) ? true : false; } bool isWhileStmt(const BaseAST* a) { const BlockStmt* stmt = toConstBlockStmt(a); return (stmt != 0 && stmt->isWhileStmt()) ? true : false; } bool isWhileDoStmt(const BaseAST* a) { const BlockStmt* stmt = toConstBlockStmt(a); return (stmt != 0 && stmt->isWhileDoStmt()) ? true : false; } bool isDoWhileStmt(const BaseAST* a) { const BlockStmt* stmt = toConstBlockStmt(a); return (stmt != 0 && stmt->isDoWhileStmt()) ? true : false; } bool isParamForLoop(const BaseAST* a) { const BlockStmt* stmt = toConstBlockStmt(a); return (stmt != 0 && stmt->isParamForLoop()) ? true : false; } bool isForLoop(const BaseAST* a) { const BlockStmt* stmt = toConstBlockStmt(a); return (stmt != 0 && stmt->isForLoop()) ? true : false; } bool isCoforallLoop(const BaseAST* a) { const BlockStmt* stmt = toConstBlockStmt(a); return (stmt != 0 && stmt->isCoforallLoop()) ? true : false; } bool isCForLoop(const BaseAST* a) { const BlockStmt* stmt = toConstBlockStmt(a); return (stmt != 0 && stmt->isCForLoop()) ? true : false; } /* Create a throw-away ast with a given filename and line number. This can be used e.g. to pass a line and filename to USR_FATAL since it only takes those from an AST, not directly. */ VarSymbol* createASTforLineNumber(const char* filename, int line){ astlocT astloc(line, filename); astlocMarker markAstLoc(astloc); VarSymbol* lineTemp = newTemp(); return lineTemp; } /************************************* | ************************************** * * * Definitions for astlocMarker * * * ************************************** | *************************************/ // constructor, invoked upon SET_LINENO astlocMarker::astlocMarker(astlocT newAstLoc) : previousAstLoc(currentAstLoc) { //previousAstLoc = currentAstLoc; currentAstLoc = newAstLoc; } // constructor, for special occasions astlocMarker::astlocMarker(int lineno, const char* filename) : previousAstLoc(currentAstLoc) { currentAstLoc.lineno = lineno; currentAstLoc.filename = astr(filename); } // destructor, invoked upon leaving SET_LINENO's scope astlocMarker::~astlocMarker() { currentAstLoc = previousAstLoc; }
// Copyright (c) 2016-2019 The Bitcoin Unlimited developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "requestManager.h" #include "blockrelay/blockrelay_common.h" #include "blockrelay/compactblock.h" #include "blockrelay/graphene.h" #include "blockrelay/mempool_sync.h" #include "blockrelay/thinblock.h" #include "chain.h" #include "chainparams.h" #include "consensus/consensus.h" #include "consensus/params.h" #include "consensus/validation.h" #include "dosman.h" #include "leakybucket.h" #include "main.h" #include "net.h" #include "nodestate.h" #include "parallel.h" #include "primitives/block.h" #include "rpc/server.h" #include "stat.h" #include "tinyformat.h" #include "txmempool.h" #include "txorphanpool.h" #include "unlimited.h" #include "util.h" #include "utilstrencodings.h" #include "utiltime.h" #include "validation/validation.h" #include "validationinterface.h" #include "version.h" #include "xversionkeys.h" #include <boost/accumulators/accumulators.hpp> #include <boost/accumulators/statistics/mean.hpp> #include <boost/accumulators/statistics/stats.hpp> #include <boost/accumulators/statistics/variance.hpp> #include <boost/lexical_cast.hpp> #include <inttypes.h> #include <thread> #include "logFile.h" using namespace std; extern CTweak<unsigned int> maxBlocksInTransitPerPeer; extern CTweak<unsigned int> blockDownloadWindow; // Request management extern CRequestManager requester; // Any ping < 25 ms is good unsigned int ACCEPTABLE_PING_USEC = 25 * 1000; // When should I request an object from someone else (in microseconds) unsigned int MIN_TX_REQUEST_RETRY_INTERVAL = DEFAULT_MIN_TX_REQUEST_RETRY_INTERVAL; unsigned int txReqRetryInterval = MIN_TX_REQUEST_RETRY_INTERVAL; // When should I request a block from someone else (in microseconds) unsigned int MIN_BLK_REQUEST_RETRY_INTERVAL = DEFAULT_MIN_BLK_REQUEST_RETRY_INTERVAL; unsigned int blkReqRetryInterval = MIN_BLK_REQUEST_RETRY_INTERVAL; // defined in main.cpp. should be moved into a utilities file but want to make rebasing easier extern bool CanDirectFetch(const Consensus::Params &consensusParams); /** Find the last common ancestor two blocks have. * Both pa and pb must be non-nullptr. */ static CBlockIndex *LastCommonAncestor(CBlockIndex *pa, CBlockIndex *pb) { if (pa->nHeight > pb->nHeight) { pa = pa->GetAncestor(pb->nHeight); } else if (pb->nHeight > pa->nHeight) { pb = pb->GetAncestor(pa->nHeight); } while (pa != pb && pa && pb) { pa = pa->pprev; pb = pb->pprev; } // Eventually all chain branches meet at the genesis block. assert(pa == pb); return pa; } static bool IsBlockType(const CInv &obj) { return ((obj.type == MSG_BLOCK) || (obj.type == MSG_CMPCT_BLOCK) || (obj.type == MSG_XTHINBLOCK) || (obj.type == MSG_GRAPHENEBLOCK)); } // Constructor for CRequestManagerNodeState struct CRequestManagerNodeState::CRequestManagerNodeState() { nDownloadingSince = 0; nBlocksInFlight = 0; nNumRequests = 0; nLastRequest = 0; } CRequestManager::CRequestManager() : inFlightTxns("reqMgr/inFlight", STAT_OP_MAX), receivedTxns("reqMgr/received"), rejectedTxns("reqMgr/rejected"), droppedTxns("reqMgr/dropped", STAT_KEEP), pendingTxns("reqMgr/pending", STAT_KEEP), requestPacer(15000, 10000) // Max and average # of requests that can be made per second { inFlight = 0; nOutbound = 0; sendIter = mapTxnInfo.end(); sendBlkIter = mapBlkInfo.end(); } void CRequestManager::Cleanup() { LOCK(cs_objDownloader); sendIter = mapTxnInfo.end(); sendBlkIter = mapBlkInfo.end(); MapBlocksInFlightClear(); OdMap::iterator i = mapTxnInfo.begin(); while (i != mapTxnInfo.end()) { auto prev = i; ++i; cleanup(prev); // cleanup erases which is why I need to advance the iterator first } i = mapBlkInfo.begin(); while (i != mapBlkInfo.end()) { auto prev = i; ++i; cleanup(prev); // cleanup erases which is why I need to advance the iterator first } } void CRequestManager::cleanup(OdMap::iterator &itemIt) { CUnknownObj &item = itemIt->second; // Because we'll ignore anything deleted from the map, reduce the # of requests in flight by every request we made // for this object inFlight -= item.outstandingReqs; droppedTxns -= (item.outstandingReqs - 1); pendingTxns -= 1; // remove all the source nodes item.availableFrom.clear(); if (item.obj.type == MSG_TX) { if (sendIter == itemIt) ++sendIter; mapTxnInfo.erase(itemIt); } else { if (sendBlkIter == itemIt) ++sendBlkIter; mapBlkInfo.erase(itemIt); } } // Get this object from somewhere, asynchronously. void CRequestManager::AskFor(const CInv &obj, CNode *from, unsigned int priority) { // LOG(REQ, "ReqMgr: Ask for %s.\n", obj.ToString().c_str()); LOCK(cs_objDownloader); if (obj.type == MSG_TX) { // Don't allow the in flight requests to grow unbounded. if (mapTxnInfo.size() >= (size_t)(MAX_INV_SZ * 2 * GetArg("-blockmaxsize", DEFAULT_BLOCK_MAX_SIZE))) { LOG(REQ, "Tx request buffer full: Dropping request for %s", obj.hash.ToString()); return; } uint256 temp = obj.hash; OdMap::value_type v(temp, CUnknownObj()); std::pair<OdMap::iterator, bool> result = mapTxnInfo.insert(v); OdMap::iterator &item = result.first; CUnknownObj &data = item->second; data.obj = obj; if (result.second) // inserted { pendingTxns += 1; // all other fields are zeroed on creation } // else the txn already existed so nothing to do data.priority = max(priority, data.priority); // Got the data, now add the node as a source if we're not already processing // this txn. If we add more sources here while processing a txn then we could // end up with dangling noderefs when the peer tries to disconnect. if (!data.fProcessing) data.AddSource(from); } else if (IsBlockType(obj)) { uint256 temp = obj.hash; OdMap::value_type v(temp, CUnknownObj()); std::pair<OdMap::iterator, bool> result = mapBlkInfo.insert(v); OdMap::iterator &item = result.first; CUnknownObj &data = item->second; data.obj = obj; // if (result.second) // means this was inserted rather than already existed // { } nothing to do data.priority = max(priority, data.priority); if (data.AddSource(from)) { // LOG(BLK, "%s available at %s\n", obj.ToString().c_str(), from->addrName.c_str()); } } else { DbgAssert(!"Request manager does not handle objects of this type", return ); } } // Get these objects from somewhere, asynchronously. void CRequestManager::AskFor(const std::vector<CInv> &objArray, CNode *from, unsigned int priority) { // In order to maintain locking order, we must lock cs_objDownloader first and before possibly taking cs_vNodes. // Also, locking here prevents anyone from asking again for any of these objects again before we've notified the // request manager of them all. In addition this helps keep blocks batached and requests for batches of blocks // in a better order. LOCK(cs_objDownloader); for (auto &inv : objArray) { AskFor(inv, from, priority); } } void CRequestManager::AskForDuringIBD(const std::vector<CInv> &objArray, CNode *from, unsigned int priority) { // This is block and peer that was selected in FindNextBlocksToDownload() so we want to add it as a block // source first so that it gets requested first. if (from) AskFor(objArray, from, priority); // We can't hold cs_vNodes in the for loop below because it is out of order with cs_objDownloader which is // taken in ProcessBlockAvailability. We can't take cs_objDownloader earlier because it deadlocks with the // CNodeStateAccessor. So make a copy of vNodes here std::vector<CNode *> vNodesCopy; { LOCK(cs_vNodes); vNodesCopy = vNodes; for (CNode *pnode : vNodesCopy) { pnode->AddRef(); } } // Add the other peers as potential sources in the event the RequestManager needs to make a re-request // for this block. Only add NETWORK nodes that have block availability. for (CNode *pnode : vNodesCopy) { // skip the peer we added above and skip non NETWORK nodes if ((pnode == from) || (pnode->fClient)) { pnode->Release(); continue; } // Make sure pindexBestKnownBlock is up to date. ProcessBlockAvailability(pnode->id); // check block availability for this peer and only askfor a block if it is available. CNodeStateAccessor state(nodestate, pnode->id); if (state != nullptr) { if (state->pindexBestKnownBlock != nullptr && state->pindexBestKnownBlock->nChainWork > chainActive.Tip()->nChainWork) { AskFor(objArray, pnode, priority); } } pnode->Release(); // Release the refs we took } } bool CRequestManager::AlreadyAskedForBlock(const uint256 &hash) { LOCK(cs_objDownloader); OdMap::iterator item = mapBlkInfo.find(hash); if (item != mapBlkInfo.end()) return true; return false; } void CRequestManager::UpdateTxnResponseTime(const CInv &obj, CNode *pfrom) { int64_t now = GetStopwatchMicros(); LOCK(cs_objDownloader); if (pfrom && obj.type == MSG_TX) { OdMap::iterator item = mapTxnInfo.find(obj.hash); if (item == mapTxnInfo.end()) return; pfrom->txReqLatency << (now - item->second.lastRequestTime); receivedTxns += 1; } } void CRequestManager::ProcessingTxn(const uint256 &hash, CNode *pfrom) { LOCK(cs_objDownloader); OdMap::iterator item = mapTxnInfo.find(hash); if (item == mapTxnInfo.end()) return; item->second.fProcessing = true; LOG(REQ, "ReqMgr: Processing %s (received from %s).\n", item->second.obj.ToString(), pfrom ? pfrom->GetLogName() : "unknown"); // As a last step we must clear all sources to release the noderef's. If we don't do this // then if the transaction ends up being a double spend, an orphan that is never reclaimed, or // perhaps some other validation failure, it would result in having dangling noderef's which then // prevent a node from fully disconnecting and thus preventing the CNode from calling it's destructor. // // However in the case of blocks we don't do this because if a block fails to validate we // reset the fProcessing flag to false so that we can get another block and check its validity. // This is so that we can prevent a DOS attack where a corrupted block is fed to us in order // to prevent us from downloading the good block. item->second.availableFrom.clear(); } void CRequestManager::ProcessingBlock(const uint256 &hash, CNode *pfrom) { LOCK(cs_objDownloader); OdMap::iterator item = mapBlkInfo.find(hash); if (item == mapBlkInfo.end()) return; item->second.fProcessing = true; LOG(BLK, "ReqMgr: Processing %s (received from %s).\n", item->second.obj.ToString(), pfrom ? pfrom->GetLogName() : "unknown"); } // This block has failed to be accepted so in case this is some sort of attack block // we need to set the fProcessing flag back to false. // // We don't have to remove the source because it would have already been removed if/when we // requested the block and if this was an unsolicited block or attack block then the source // would never have been added to the request manager. void CRequestManager::BlockRejected(const CInv &obj, CNode *pfrom) { LOCK(cs_objDownloader); OdMap::iterator item = mapBlkInfo.find(obj.hash); if (item == mapBlkInfo.end()) return; item->second.fProcessing = false; } // Indicate that we got this object. void CRequestManager::Received(const CInv &obj, CNode *pfrom) { LOCK(cs_objDownloader); if (obj.type == MSG_TX) { OdMap::iterator item = mapTxnInfo.find(obj.hash); if (item == mapTxnInfo.end()) return; LOG(REQ, "ReqMgr: TX received for %s.\n", item->second.obj.ToString().c_str()); cleanup(item); } else if (IsBlockType(obj)) { OdMap::iterator item = mapBlkInfo.find(obj.hash); if (item == mapBlkInfo.end()) return; LOG(BLK, "%s removed from request queue (received from %s).\n", item->second.obj.ToString().c_str(), pfrom ? pfrom->GetLogName() : "unknown"); cleanup(item); } } // Indicate that we got this object. void CRequestManager::AlreadyReceived(CNode *pnode, const CInv &obj) { LOCK(cs_objDownloader); OdMap::iterator item = mapTxnInfo.find(obj.hash); if (item == mapTxnInfo.end()) { item = mapBlkInfo.find(obj.hash); if (item == mapBlkInfo.end()) return; // Not in any map } LOG(REQ, "ReqMgr: Already received %s. Removing request.\n", item->second.obj.ToString().c_str()); // If we have it already make sure to mark it as received here or we'll end up disconnecting this // peer later when we think this block download attempt has timed out. MarkBlockAsReceived(obj.hash, pnode); // will be decremented in the item cleanup: if (inFlight) inFlight--; cleanup(item); // remove the item } // Indicate that we got this object, from and bytes are optional (for node performance tracking) void CRequestManager::Rejected(const CInv &obj, CNode *from, unsigned char reason) { LOCK(cs_objDownloader); OdMap::iterator item; if (obj.type == MSG_TX) { item = mapTxnInfo.find(obj.hash); if (item == mapTxnInfo.end()) { LOG(REQ, "ReqMgr: Item already removed. Unknown txn rejected %s\n", obj.ToString().c_str()); return; } if (inFlight) inFlight--; if (item->second.outstandingReqs) item->second.outstandingReqs--; rejectedTxns += 1; } else if (IsBlockType(obj)) { item = mapBlkInfo.find(obj.hash); if (item == mapBlkInfo.end()) { LOG(REQ, "ReqMgr: Item already removed. Unknown block rejected %s\n", obj.ToString().c_str()); return; } } if (reason == REJECT_MALFORMED) { } else if (reason == REJECT_INVALID) { } else if (reason == REJECT_OBSOLETE) { } else if (reason == REJECT_CHECKPOINT) { } else if (reason == REJECT_INSUFFICIENTFEE) { item->second.rateLimited = true; } else if (reason == REJECT_DUPLICATE) { // TODO figure out why this might happen. } else if (reason == REJECT_NONSTANDARD) { // Not going to be in any memory pools... does the TX request also look in blocks? // TODO remove from request manager (and mark never receivable?) // TODO verify that the TX request command also looks in blocks? } else if (reason == REJECT_DUST) { } else { LOG(REQ, "ReqMgr: Unknown TX rejection code [0x%x].\n", reason); // assert(0); // TODO } } CNodeRequestData::CNodeRequestData(CNodeRef n) { noderef = n; requestCount = 0; desirability = 0; const int MaxLatency = 10 * 1000 * 1000; // After 10 seconds latency I don't care // Calculate how much we like this node: // Prefer thin block nodes over low latency ones when the chain is syncd if (noderef.get()->ThinBlockCapable() && IsChainNearlySyncd()) { desirability += MaxLatency; } // The bigger the latency (in microseconds), the less we want to request from this node int latency = noderef.get()->txReqLatency.GetTotalTyped(); // data has never been requested from this node. Should we encourage investigation into whether this node is fast, // or stick with nodes that we do have data on? if (latency == 0) { latency = 80 * 1000; // assign it a reasonably average latency (80ms) for sorting purposes } if (latency > MaxLatency) latency = MaxLatency; desirability -= latency; } // requires cs_objDownloader bool CUnknownObj::AddSource(CNode *from) { // node is not in the request list if (std::find_if(availableFrom.begin(), availableFrom.end(), MatchCNodeRequestData(from)) == availableFrom.end()) { LOG(REQ, "AddSource %s is available at %s.\n", obj.ToString(), from->GetLogName()); CNodeRef noderef(from); CNodeRequestData req(noderef); for (ObjectSourceList::iterator i = availableFrom.begin(); i != availableFrom.end(); ++i) { if (i->desirability < req.desirability) { availableFrom.insert(i, req); return true; } } availableFrom.push_back(req); return true; } return false; } void CRequestManager::RequestCorruptedBlock(const uint256 &blockHash) { // set it to MSG_BLOCK here but it should get overwritten in RequestBlock CInv obj(MSG_BLOCK, blockHash); std::vector<CInv> vGetBlocks; vGetBlocks.push_back(obj); AskForDuringIBD(vGetBlocks, nullptr); } static bool IsGrapheneVersionSupported(CNode *pfrom) { try { NegotiateGrapheneVersion(pfrom); return true; } catch (const std::runtime_error &error) { return false; } } bool CRequestManager::RequestBlock(CNode *pfrom, CInv obj) { CInv inv2(obj); CDataStream ss(SER_NETWORK, PROTOCOL_VERSION); if (IsChainNearlySyncd() && (!thinrelay.HasBlockRelayTimerExpired(obj.hash) || !thinrelay.IsBlockRelayTimerEnabled())) { // Ask for Graphene blocks // Must download a graphene block from a graphene enabled peer. if (IsGrapheneBlockEnabled() && pfrom->GrapheneCapable() && IsGrapheneVersionSupported(pfrom)) { if (thinrelay.AddBlockInFlight(pfrom, inv2.hash, NetMsgType::GRAPHENEBLOCK)) { MarkBlockAsInFlight(pfrom->GetId(), obj.hash); // Instead of building a bloom filter here as we would for an xthin, we actually // just need to fill in CMempoolInfo inv2.type = MSG_GRAPHENEBLOCK; CMemPoolInfo receiverMemPoolInfo = GetGrapheneMempoolInfo(); ss << inv2; ss << receiverMemPoolInfo; graphenedata.UpdateOutBoundMemPoolInfo( ::GetSerializeSize(receiverMemPoolInfo, SER_NETWORK, PROTOCOL_VERSION)); pfrom->PushMessage(NetMsgType::GET_GRAPHENE, ss); LOG(GRAPHENE, "Requesting graphene block %s from peer %s\n", inv2.hash.ToString(), pfrom->GetLogName()); logFile("GRPHNBLCKREQSENT -- graphene block " + obj.hash.ToString() + " request of size " + std::to_string(::GetSerializeSize(ss, SER_NETWORK, PROTOCOL_VERSION)) + " (bytes), mempool size " + std::to_string(receiverMemPoolInfo.nTx) + " txs sent to peer " + pfrom->GetLogName()); return true; } } // Ask for an xthin if Graphene is not possible. // Must download an xthinblock from a xthin peer. if (IsThinBlocksEnabled() && pfrom->ThinBlockCapable()) { if (thinrelay.AddBlockInFlight(pfrom, inv2.hash, NetMsgType::XTHINBLOCK)) { MarkBlockAsInFlight(pfrom->GetId(), obj.hash); CBloomFilter filterMemPool; inv2.type = MSG_XTHINBLOCK; std::vector<uint256> vOrphanHashes; { READLOCK(orphanpool.cs_orphanpool); for (auto &mi : orphanpool.mapOrphanTransactions) vOrphanHashes.emplace_back(mi.first); } BuildSeededBloomFilter(filterMemPool, vOrphanHashes, inv2.hash, pfrom); ss << inv2; ss << filterMemPool; pfrom->PushMessage(NetMsgType::GET_XTHIN, ss); LOG(THIN, "Requesting xthinblock %s from peer %s\n", inv2.hash.ToString(), pfrom->GetLogName()); return true; } } // Ask for a compact block if Graphene or xthin is not possible. // Must download an xthinblock from a xthin peer. if (IsCompactBlocksEnabled() && pfrom->CompactBlockCapable()) { if (thinrelay.AddBlockInFlight(pfrom, inv2.hash, NetMsgType::CMPCTBLOCK)) { MarkBlockAsInFlight(pfrom->GetId(), obj.hash); std::vector<CInv> vGetData; inv2.type = MSG_CMPCT_BLOCK; vGetData.push_back(inv2); pfrom->PushMessage(NetMsgType::GETDATA, vGetData); LOG(CMPCT, "Requesting compact block %s from peer %s\n", inv2.hash.ToString(), pfrom->GetLogName()); logFile("CMPCTBLCKREQSENT -- compact block " + obj.hash.ToString() + " request of size " + std::to_string(::GetSerializeSize(vGetData, SER_NETWORK, PROTOCOL_VERSION)) + " (bytes) sent to peer " + pfrom->GetLogName()); return true; } } } // Request a full block if the BlockRelayTimer has expired. if (!IsChainNearlySyncd() || thinrelay.HasBlockRelayTimerExpired(obj.hash) || !thinrelay.IsBlockRelayTimerEnabled()) { std::vector<CInv> vToFetch; inv2.type = MSG_BLOCK; vToFetch.push_back(inv2); MarkBlockAsInFlight(pfrom->GetId(), obj.hash); pfrom->PushMessage(NetMsgType::GETDATA, vToFetch); LOG(THIN | GRAPHENE | CMPCT, "Requesting Regular Block %s from peer %s\n", inv2.hash.ToString(), pfrom->GetLogName()); logFile("NORMALBLCKREQSENT -- normal block " + obj.hash.ToString() + " request of size " + std::to_string(::GetSerializeSize(vToFetch, SER_NETWORK, PROTOCOL_VERSION)) + " (bytes) sent to peer " + pfrom->GetLogName()); return true; } return false; // no block was requested } void CRequestManager::ResetLastBlockRequestTime(const uint256 &hash) { LOCK(cs_objDownloader); OdMap::iterator itemIter = sendBlkIter; itemIter = mapBlkInfo.find(hash); if (itemIter != mapBlkInfo.end()) { CUnknownObj &item = itemIter->second; item.outstandingReqs--; item.lastRequestTime = 0; } } struct CompareIteratorByNodeRef { bool operator()(const CNodeRef &a, const CNodeRef &b) const { return a.get() < b.get(); } }; void CRequestManager::SendRequests() { int64_t now = 0; // TODO: if a node goes offline, rerequest txns from someone else and cleanup references right away LOCK(cs_objDownloader); if (sendBlkIter == mapBlkInfo.end()) sendBlkIter = mapBlkInfo.begin(); // Modify retry interval. If we're doing IBD or if Traffic Shaping is ON we want to have a longer interval because // those blocks and txns can take much longer to download. unsigned int _blkReqRetryInterval = MIN_BLK_REQUEST_RETRY_INTERVAL; unsigned int _txReqRetryInterval = MIN_TX_REQUEST_RETRY_INTERVAL; if (IsTrafficShapingEnabled()) { _blkReqRetryInterval *= 6; _txReqRetryInterval *= (12 * 2); } else if ((!IsChainNearlySyncd() && Params().NetworkIDString() != "regtest")) { _blkReqRetryInterval *= 2; _txReqRetryInterval *= 8; } // When we are still doing an initial sync we want to batch request the blocks instead of just // asking for one at time. We can do this because there will be no XTHIN requests possible during // this time. bool fBatchBlockRequests = IsInitialBlockDownload(); std::map<CNodeRef, std::vector<CInv>, CompareIteratorByNodeRef> mapBatchBlockRequests; // Batch any transaction requests when possible. The process of batching and requesting batched transactions // is simlilar to batched block requests, however, we don't make the distinction of whether we're in the process // of syncing the chain, as we do with block requests. std::map<CNodeRef, std::vector<CInv>, CompareIteratorByNodeRef> mapBatchTxnRequests; // Get Blocks while (sendBlkIter != mapBlkInfo.end()) { now = GetStopwatchMicros(); OdMap::iterator itemIter = sendBlkIter; if (itemIter == mapBlkInfo.end()) break; ++sendBlkIter; // move it forward up here in case we need to erase the item we are working with. CUnknownObj &item = itemIter->second; // If we've already received the item and it's in processing then skip it here so we don't // end up re-requesting it again. if (item.fProcessing) continue; // if never requested then lastRequestTime==0 so this will always be true if (now - item.lastRequestTime > _blkReqRetryInterval) { if (!item.availableFrom.empty()) { CNodeRequestData next; // Go thru the availableFrom list, looking for the first node that isn't disconnected while (!item.availableFrom.empty() && (next.noderef.get() == nullptr)) { next = item.availableFrom.front(); // Grab the next location where we can find this object. item.availableFrom.pop_front(); if (next.noderef.get() != nullptr) { // Do not request from this node if it was disconnected if (next.noderef.get()->fDisconnect) { next.noderef.~CNodeRef(); // force the loop to get another node } } } if (next.noderef.get() != nullptr) { // If item.lastRequestTime is true then we've requested at least once and we'll try a re-request if (item.lastRequestTime) { LOG(REQ, "Block request timeout for %s. Retrying\n", item.obj.ToString().c_str()); } CInv obj = item.obj; item.outstandingReqs++; int64_t then = item.lastRequestTime; item.lastRequestTime = now; bool fReqBlkResult = false; if (fBatchBlockRequests) { mapBatchBlockRequests[next.noderef].emplace_back(obj); } else { LEAVE_CRITICAL_SECTION(cs_objDownloader); // item and itemIter are now invalid fReqBlkResult = RequestBlock(next.noderef.get(), obj); ENTER_CRITICAL_SECTION(cs_objDownloader); if (!fReqBlkResult) { // having released cs_objDownloader, item and itemiter may be invalid. // So in the rare case that we could not request the block we need to // find the item again (if it exists) and set the tracking back to what it was itemIter = mapBlkInfo.find(obj.hash); if (itemIter != mapBlkInfo.end()) { item = itemIter->second; item.outstandingReqs--; item.lastRequestTime = then; } } } // If there was a request then release the ref otherwise put the item back into the list so // we don't lose the block source. if (fReqBlkResult) { next.noderef.~CNodeRef(); } else { // We never asked for the block, typically because the graphene block timer hasn't timed out // yet but we only have sources for an xthinblock. When this happens we add the node back to // the end of the list so that we don't lose the source, when/if the graphene timer has // a time out and we are then ready to ask for an xthinblock. item.availableFrom.push_back(next); } } else { // We requested from all available sources so remove the source. This should not // happen and would indicate some other problem. LOG(REQ, "Block %s has no sources. Removing\n", item.obj.ToString()); cleanup(itemIter); } } else { // There can be no block sources because a node dropped out. In this case, nothing can be done so // remove the item. LOG(REQ, "Block %s has no available sources. Removing\n", item.obj.ToString()); cleanup(itemIter); } } } // send batched requests if any. if (fBatchBlockRequests && !mapBatchBlockRequests.empty()) { LEAVE_CRITICAL_SECTION(cs_objDownloader); { for (auto iter : mapBatchBlockRequests) { for (auto &inv : iter.second) { MarkBlockAsInFlight(iter.first.get()->GetId(), inv.hash); } iter.first.get()->PushMessage(NetMsgType::GETDATA, iter.second); LOG(REQ, "Sent batched request with %d blocks to node %s\n", iter.second.size(), iter.first.get()->GetLogName()); } } ENTER_CRITICAL_SECTION(cs_objDownloader); mapBatchBlockRequests.clear(); } // Get Transactions if (sendIter == mapTxnInfo.end()) sendIter = mapTxnInfo.begin(); while ((sendIter != mapTxnInfo.end()) && requestPacer.try_leak(1)) { now = GetStopwatchMicros(); OdMap::iterator itemIter = sendIter; if (itemIter == mapTxnInfo.end()) break; ++sendIter; // move it forward up here in case we need to erase the item we are working with. CUnknownObj &item = itemIter->second; // If we've already received the item and it's in processing then skip it here so we don't // end up re-requesting it again. if (item.fProcessing) continue; // if never requested then lastRequestTime==0 so this will always be true if (now - item.lastRequestTime > _txReqRetryInterval) { if (!item.rateLimited) { // If item.lastRequestTime is true then we've requested at least once, so this is a rerequest -> a txn // request was dropped. if (item.lastRequestTime) { LOG(REQ, "Request timeout for %s. Retrying\n", item.obj.ToString().c_str()); // Not reducing inFlight; it's still outstanding and will be cleaned up when // item is removed from map. // Note we can never be sure its really dropped verses just delayed for a long // time so this is not authoritative. droppedTxns += 1; } if (item.availableFrom.empty()) { // There can be no block sources because a node dropped out. In this case, nothing can be done so // remove the item. LOG(REQ, "Tx has no sources for %s. Removing\n", item.obj.ToString().c_str()); cleanup(itemIter); } else // Ok, we have at least one source so request this item. { CNodeRequestData next; // Go thru the availableFrom list, looking for the first node that isn't disconnected while (!item.availableFrom.empty() && (next.noderef.get() == nullptr)) { next = item.availableFrom.front(); // Grab the next location where we can find this object. item.availableFrom.pop_front(); if (next.noderef.get() != nullptr) { if (next.noderef.get()->fDisconnect) // Node was disconnected so we can't request from it { next.noderef.~CNodeRef(); // force the loop to get another node } } } if (next.noderef.get() != nullptr) { // This commented code skips requesting TX if the node is not synced. The request // manager should not make this decision but rather the caller should not give us the TX. if (1) { item.outstandingReqs++; item.lastRequestTime = now; mapBatchTxnRequests[next.noderef].emplace_back(item.obj); // If we have 1000 requests for this peer then send them right away. if (mapBatchTxnRequests[next.noderef].size() >= 1000) { LEAVE_CRITICAL_SECTION(cs_objDownloader); { next.noderef.get()->PushMessage( NetMsgType::GETDATA, mapBatchTxnRequests[next.noderef]); LOG(REQ, "Sent batched request with %d transations to node %s\n", mapBatchTxnRequests[next.noderef].size(), next.noderef.get()->GetLogName()); } ENTER_CRITICAL_SECTION(cs_objDownloader); mapBatchTxnRequests.erase(next.noderef); } // Now that we've completed setting up our request for this transaction // we're done with this node, for this item, and can delete it. next.noderef.~CNodeRef(); } inFlight++; inFlightTxns << inFlight; } else { // We requested from all available sources so remove the source. This should not // happen and would indicate some other problem. LOG(REQ, "Tx has no sources for %s. Removing\n", item.obj.ToString().c_str()); cleanup(itemIter); } } } } } // send batched requests if any. if (!mapBatchTxnRequests.empty()) { LEAVE_CRITICAL_SECTION(cs_objDownloader); { for (auto iter : mapBatchTxnRequests) { iter.first.get()->PushMessage(NetMsgType::GETDATA, iter.second); LOG(REQ, "Sent batched request with %d transations to node %s\n", iter.second.size(), iter.first.get()->GetLogName()); } } ENTER_CRITICAL_SECTION(cs_objDownloader); mapBatchTxnRequests.clear(); } } bool CRequestManager::CheckForRequestDOS(CNode *pfrom, const CChainParams &chainparams) { // Check for Misbehaving and DOS // If they make more than MAX_THINTYPE_OBJECT_REQUESTS requests in 10 minutes then assign misbehavior points. // // Other networks have variable mining rates, so only apply these rules to mainnet only. if (chainparams.NetworkIDString() == "main") { LOCK(cs_objDownloader); std::map<NodeId, CRequestManagerNodeState>::iterator it = mapRequestManagerNodeState.find(pfrom->GetId()); DbgAssert(it != mapRequestManagerNodeState.end(), return false); CRequestManagerNodeState *state = &it->second; // First decay the previous value uint64_t nNow = GetTime(); state->nNumRequests = std::pow(1.0 - 1.0 / 600.0, (double)(nNow - state->nLastRequest)); // Now add one request and update the time state->nNumRequests++; state->nLastRequest = nNow; if (state->nNumRequests >= MAX_THINTYPE_OBJECT_REQUESTS) { pfrom->fDisconnect = true; return error("Disconnecting %s. Making too many (%f) thin object requests.", pfrom->GetLogName(), state->nNumRequests); } } return true; } // Check whether the last unknown block a peer advertised is not yet known. void CRequestManager::ProcessBlockAvailability(NodeId nodeid) { CNodeStateAccessor state(nodestate, nodeid); DbgAssert(state != nullptr, return ); if (!state->hashLastUnknownBlock.IsNull()) { auto *pindex = LookupBlockIndex(state->hashLastUnknownBlock); if (pindex && pindex->nChainWork > 0) { if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) { state->pindexBestKnownBlock = pindex; } state->hashLastUnknownBlock.SetNull(); } } } // Update tracking information about which blocks a peer is assumed to have. void CRequestManager::UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) { auto *pindex = LookupBlockIndex(hash); CNodeStateAccessor state(nodestate, nodeid); DbgAssert(state != nullptr, return ); ProcessBlockAvailability(nodeid); if (pindex && pindex->nChainWork > 0) { // An actually better block was announced. if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) { state->pindexBestKnownBlock = pindex; } } else { // An unknown block was announced; just assume that the latest one is the best one. state->hashLastUnknownBlock = hash; } } void CRequestManager::RequestNextBlocksToDownload(CNode *pto) { AssertLockHeld(cs_main); int nBlocksInFlight = 0; { LOCK(cs_objDownloader); nBlocksInFlight = mapRequestManagerNodeState[pto->GetId()].nBlocksInFlight; } if (!pto->fDisconnectRequest && !pto->fDisconnect && !pto->fClient && nBlocksInFlight < (int)pto->nMaxBlocksInTransit) { std::vector<CBlockIndex *> vToDownload; FindNextBlocksToDownload(pto, pto->nMaxBlocksInTransit.load() - nBlocksInFlight, vToDownload); // LOG(REQ, "IBD AskFor %d blocks from peer=%s\n", vToDownload.size(), pto->GetLogName()); std::vector<CInv> vGetBlocks; for (CBlockIndex *pindex : vToDownload) { CInv inv(MSG_BLOCK, pindex->GetBlockHash()); if (!AlreadyHaveBlock(inv)) { vGetBlocks.emplace_back(inv); // LOG(REQ, "AskFor block %s (%d) peer=%s\n", pindex->GetBlockHash().ToString(), // pindex->nHeight, pto->GetLogName()); } } if (!vGetBlocks.empty()) { std::vector<CInv> vToFetchNew; { LOCK(cs_objDownloader); for (CInv &inv : vGetBlocks) { // If this block is already in flight then don't ask for it again during the IBD process. // // If it's an additional source for a new peer then it would have been added already in // FindNextBlocksToDownload(). std::map<uint256, std::map<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(inv.hash); if (itInFlight != mapBlocksInFlight.end()) { continue; } vToFetchNew.push_back(inv); } } vGetBlocks.swap(vToFetchNew); if (!IsInitialBlockDownload()) { AskFor(vGetBlocks, pto); } else { AskForDuringIBD(vGetBlocks, pto); } } } } // Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has // at most count entries. void CRequestManager::FindNextBlocksToDownload(CNode *node, unsigned int count, std::vector<CBlockIndex *> &vBlocks) { if (count == 0) return; NodeId nodeid = node->GetId(); vBlocks.reserve(vBlocks.size() + count); // Make sure pindexBestKnownBlock is up to date, we'll need it. ProcessBlockAvailability(nodeid); CNodeStateAccessor state(nodestate, nodeid); DbgAssert(state != nullptr, return ); LOCK(cs_main); if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < chainActive.Tip()->nChainWork) { // This peer has nothing interesting. return; } if (state->pindexLastCommonBlock == nullptr) { // Bootstrap quickly by guessing a parent of our best tip is the forking point. // Guessing wrong in either direction is not a problem. state->pindexLastCommonBlock = chainActive[std::min(state->pindexBestKnownBlock->nHeight, chainActive.Height())]; } // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor // of its current tip anymore. Go back enough to fix that. state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock); if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) return; std::vector<CBlockIndex *> vToFetch; CBlockIndex *pindexWalk = state->pindexLastCommonBlock; // Never fetch further than the current chain tip + the block download window. We need to ensure // the if running in pruning mode we don't download too many blocks ahead and as a result use to // much disk space to store unconnected blocks. int nWindowEnd = chainActive.Height() + BLOCK_DOWNLOAD_WINDOW.load(); int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1); while (pindexWalk->nHeight < nMaxHeight) { // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive // as iterating over ~100 CBlockIndex* entries anyway. int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128)); vToFetch.resize(nToFetch); pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch); vToFetch[nToFetch - 1] = pindexWalk; for (unsigned int i = nToFetch - 1; i > 0; i--) { vToFetch[i - 1] = vToFetch[i]->pprev; } // Iterate over those blocks in vToFetch (in forward direction), adding the ones that // are not yet downloaded and not in flight to vBlocks. In the mean time, update // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's // already part of our chain (and therefore don't need it even if pruned). for (CBlockIndex *pindex : vToFetch) { uint256 blockHash = pindex->GetBlockHash(); if (AlreadyAskedForBlock(blockHash)) { // Only add a new source if there is a block in flight from a different peer. This prevents // us from re-adding a source for the same peer and possibly downloading two duplicate blocks. // This edge condition can typically happen when we were only connected to only one peer and we // exceed the download timeout causing us to re-request the same block from the same peer. std::map<uint256, std::map<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(blockHash); if (itInFlight != mapBlocksInFlight.end() && !itInFlight->second.count(nodeid)) { AskFor(CInv(MSG_BLOCK, blockHash), node); // Add another source continue; } } if (!pindex->IsValid(BLOCK_VALID_TREE)) { // We consider the chain that this peer is on invalid. return; } if (pindex->nStatus & BLOCK_HAVE_DATA || chainActive.Contains(pindex)) { if (pindex->nChainTx) state->pindexLastCommonBlock = pindex; } else { // Return if we've reached the end of the download window. if (pindex->nHeight > nWindowEnd) { return; } // Return if we've reached the end of the number of blocks we can download for this peer. vBlocks.push_back(pindex); if (vBlocks.size() == count) { return; } } } } } void CRequestManager::RequestMempoolSync(CNode *pto) { LOCK(cs_mempoolsync); NodeId nodeId = pto->GetId(); if ((mempoolSyncRequested.count(nodeId) == 0 || ((GetStopwatchMicros() - mempoolSyncRequested[nodeId].lastUpdated) > MEMPOOLSYNC_FREQ_US)) && pto->canSyncMempoolWithPeers) { // Similar to Graphene, receiver must send CMempoolInfo CMempoolSyncInfo receiverMemPoolInfo = GetMempoolSyncInfo(); mempoolSyncRequested[nodeId] = CMempoolSyncState( GetStopwatchMicros(), receiverMemPoolInfo.shorttxidk0, receiverMemPoolInfo.shorttxidk1, false); if (NegotiateMempoolSyncVersion(pto) > 0) pto->PushMessage(NetMsgType::GET_MEMPOOLSYNC, receiverMemPoolInfo); else { CInv inv; CDataStream ss(SER_NETWORK, PROTOCOL_VERSION); ss << inv; ss << receiverMemPoolInfo; pto->PushMessage(NetMsgType::GET_MEMPOOLSYNC, ss); } LOG(MPOOLSYNC, "Requesting mempool synchronization from peer %s\n", pto->GetLogName()); lastMempoolSync = GetStopwatchMicros(); } } // indicate whether we requested this block. void CRequestManager::MarkBlockAsInFlight(NodeId nodeid, const uint256 &hash) { // If started then clear the timers used for preferential downloading thinrelay.ClearBlockRelayTimer(hash); // Add to inflight, if it hasn't already been marked inflight for this node id. LOCK(cs_objDownloader); std::map<uint256, std::map<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash); if (itInFlight == mapBlocksInFlight.end() || !itInFlight->second.count(nodeid)) { // Get a request manager nodestate pointer. std::map<NodeId, CRequestManagerNodeState>::iterator it = mapRequestManagerNodeState.find(nodeid); DbgAssert(it != mapRequestManagerNodeState.end(), return ); CRequestManagerNodeState *state = &it->second; // Add queued block to nodestate and add iterator for queued block to mapBlocksInFlight int64_t nNow = GetStopwatchMicros(); QueuedBlock newentry = {hash, nNow}; std::list<QueuedBlock>::iterator it2 = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(), newentry); mapBlocksInFlight[hash][nodeid] = it2; // Increment blocks in flight for this node and if applicable the time we started downloading. state->nBlocksInFlight++; if (state->nBlocksInFlight == 1) { // We're starting a block download (batch) from this peer. state->nDownloadingSince = GetStopwatchMicros(); } } } // Returns a bool if successful in indicating we received this block. bool CRequestManager::MarkBlockAsReceived(const uint256 &hash, CNode *pnode) { if (!pnode) return false; LOCK(cs_objDownloader); NodeId nodeid = pnode->GetId(); // Check if we have any block in flight, for this hash, that we asked for. std::map<uint256, std::map<NodeId, std::list<QueuedBlock>::iterator> >::iterator itHash = mapBlocksInFlight.find(hash); if (itHash == mapBlocksInFlight.end()) return false; // Lookup this block for this nodeid and if we have one in flight then mark it as received. std::map<NodeId, std::list<QueuedBlock>::iterator>::iterator itInFlight = itHash->second.find(nodeid); if (itInFlight != itHash->second.end()) { // Get a request manager nodestate pointer. std::map<NodeId, CRequestManagerNodeState>::iterator it = mapRequestManagerNodeState.find(nodeid); DbgAssert(it != mapRequestManagerNodeState.end(), return false); CRequestManagerNodeState *state = &it->second; int64_t getdataTime = itInFlight->second->nTime; int64_t now = GetStopwatchMicros(); double nResponseTime = (double)(now - getdataTime) / 1000000.0; // calculate avg block response time over a range of blocks to be used for IBD tuning. uint8_t blockRange = 50; { LOCK(pnode->cs_nAvgBlkResponseTime); if (pnode->nAvgBlkResponseTime < 0) pnode->nAvgBlkResponseTime = 0.0; if (pnode->nAvgBlkResponseTime > 0) pnode->nAvgBlkResponseTime -= (pnode->nAvgBlkResponseTime / blockRange); pnode->nAvgBlkResponseTime += nResponseTime / blockRange; // Protect nOverallAverageResponseTime and nIterations with cs_overallaverage. static CCriticalSection cs_overallaverage; static double nOverallAverageResponseTime = 00.0; static uint32_t nIterations = 0; // Get the average value for overall average response time (s) of all nodes. { LOCK(cs_overallaverage); uint32_t nOverallRange = blockRange * nMaxOutConnections; if (nIterations <= nOverallRange) nIterations++; if (nOverallRange > 0) { if (nIterations > nOverallRange) { nOverallAverageResponseTime -= (nOverallAverageResponseTime / nOverallRange); } nOverallAverageResponseTime += nResponseTime / nOverallRange; } else { LOG(IBD, "Calculation of average response time failed and will be inaccurate due to division by " "zero.\n"); } // Request for a disconnect if over the response time limit. We don't do an fDisconnect = true here // because we want to drain the queue for any blocks that are still returning. This prevents us from // having to re-request all those blocks again. // // We only check wether to issue a disconnect during initial sync and we only disconnect up to two // peers at a time if and only if all our outbound slots have been used to prevent any sudden loss of // all peers. We do this for two peers and not one in the event that one of the peers is hung and their // block queue does not drain; in that event we would end up waiting for 10 minutes before finally // disconnecting. // // We disconnect a peer only if their average response time is more than 4 times the overall average. static int nStartDisconnections GUARDED_BY(cs_overallaverage) = BEGIN_PRUNING_PEERS; if (!pnode->fDisconnectRequest && (nOutbound >= nMaxOutConnections - 1 || nOutbound >= nStartDisconnections) && IsInitialBlockDownload() && nIterations > nOverallRange && pnode->nAvgBlkResponseTime > nOverallAverageResponseTime * 4) { LOG(IBD, "disconnecting %s because too slow , overall avg %d peer avg %d\n", pnode->GetLogName(), nOverallAverageResponseTime, pnode->nAvgBlkResponseTime); pnode->InitiateGracefulDisconnect(); // We must not return here but continue in order // to update the vBlocksInFlight stats. // Increment so we start disconnecting at a higher number of peers each time. This // helps to improve the very beginning of IBD such that we don't have to wait for all outbound // connections to be established before we start pruning the slow peers and yet we don't end // up suddenly overpruning. nStartDisconnections = nOutbound; if (nStartDisconnections < nMaxOutConnections) nStartDisconnections++; } } if (pnode->nAvgBlkResponseTime < 0.2) { pnode->nMaxBlocksInTransit.store(64); } else if (pnode->nAvgBlkResponseTime < 0.5) { pnode->nMaxBlocksInTransit.store(56); } else if (pnode->nAvgBlkResponseTime < 0.9) { pnode->nMaxBlocksInTransit.store(48); } else if (pnode->nAvgBlkResponseTime < 1.4) { pnode->nMaxBlocksInTransit.store(32); } else if (pnode->nAvgBlkResponseTime < 2.0) { pnode->nMaxBlocksInTransit.store(24); } else { pnode->nMaxBlocksInTransit.store(16); } LOG(THIN | BLK, "Average block response time is %.2f seconds for %s\n", pnode->nAvgBlkResponseTime, pnode->GetLogName()); } // if there are no blocks in flight then ask for a few more blocks if (state->nBlocksInFlight <= 0) pnode->nMaxBlocksInTransit.fetch_add(4); if (maxBlocksInTransitPerPeer.Value() != 0) { pnode->nMaxBlocksInTransit.store(maxBlocksInTransitPerPeer.Value()); } if (blockDownloadWindow.Value() != 0) { BLOCK_DOWNLOAD_WINDOW.store(blockDownloadWindow.Value()); } LOG(THIN | BLK, "BLOCK_DOWNLOAD_WINDOW is %d nMaxBlocksInTransit is %d\n", BLOCK_DOWNLOAD_WINDOW.load(), pnode->nMaxBlocksInTransit.load()); // Update the appropriate response time based on the type of block received. if (IsChainNearlySyncd()) { // Update Thinblock stats if (thinrelay.IsBlockInFlight(pnode, NetMsgType::XTHINBLOCK, hash)) { thindata.UpdateResponseTime(nResponseTime); } // Update Graphene stats if (thinrelay.IsBlockInFlight(pnode, NetMsgType::GRAPHENEBLOCK, hash)) { graphenedata.UpdateResponseTime(nResponseTime); } // Update CompactBlock stats if (thinrelay.IsBlockInFlight(pnode, NetMsgType::CMPCTBLOCK, hash)) { compactdata.UpdateResponseTime(nResponseTime); } } if (state->vBlocksInFlight.begin() == itInFlight->second) { // First block on the queue was received, update the start download time for the next one state->nDownloadingSince = std::max(state->nDownloadingSince, (int64_t)GetStopwatchMicros()); } // In order to prevent a dangling iterator we must erase from vBlocksInFlight after mapBlockInFlight // however that will invalidate the iterator held by mapBlocksInFlight. Use a temporary to work around this. std::list<QueuedBlock>::iterator tmp = itInFlight->second; state->nBlocksInFlight--; MapBlocksInFlightErase(hash, nodeid); state->vBlocksInFlight.erase(tmp); return true; } return false; } void CRequestManager::MapBlocksInFlightErase(const uint256 &hash, NodeId nodeid) { // If there are more than one block in flight for the same block hash then we only remove // the entry for this particular node, otherwise entirely remove the hash from mapBlocksInFlight. LOCK(cs_objDownloader); std::map<uint256, std::map<NodeId, std::list<QueuedBlock>::iterator> >::iterator itHash = mapBlocksInFlight.find(hash); if (itHash != mapBlocksInFlight.end()) { itHash->second.erase(nodeid); } } bool CRequestManager::MapBlocksInFlightEmpty() { LOCK(cs_objDownloader); return mapBlocksInFlight.empty(); } void CRequestManager::MapBlocksInFlightClear() { LOCK(cs_objDownloader); mapBlocksInFlight.clear(); } void CRequestManager::GetBlocksInFlight(std::vector<uint256> &vBlocksInFlight, NodeId nodeid) { LOCK(cs_objDownloader); for (auto &iter : mapRequestManagerNodeState[nodeid].vBlocksInFlight) { vBlocksInFlight.emplace_back(iter.hash); } } int CRequestManager::GetNumBlocksInFlight(NodeId nodeid) { LOCK(cs_objDownloader); return mapRequestManagerNodeState[nodeid].nBlocksInFlight; } void CRequestManager::RemoveNodeState(NodeId nodeid) { LOCK(cs_objDownloader); std::vector<uint256> vBlocksInFlight; GetBlocksInFlight(vBlocksInFlight, nodeid); for (const uint256 &hash : vBlocksInFlight) { // Erase mapblocksinflight entries for this node. MapBlocksInFlightErase(hash, nodeid); // Reset all requests times to zero so that we can immediately re-request these blocks ResetLastBlockRequestTime(hash); } mapRequestManagerNodeState.erase(nodeid); } void CRequestManager::DisconnectOnDownloadTimeout(CNode *pnode, const Consensus::Params &consensusParams, int64_t nNow) { // In case there is a block that has been in flight from this peer for 2 + 0.5 * N times the block interval // (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout. // We compensate for other peers to prevent killing off peers due to our own downstream link // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes // to unreasonably increase our timeout. LOCK(cs_objDownloader); NodeId nodeid = pnode->GetId(); if (!pnode->fDisconnect && mapRequestManagerNodeState[nodeid].vBlocksInFlight.size() > 0) { if (nNow > mapRequestManagerNodeState[nodeid].nDownloadingSince + consensusParams.nPowTargetSpacing * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER)) { LOGA("Timeout downloading block %s from peer %s, disconnecting\n", mapRequestManagerNodeState[nodeid].vBlocksInFlight.front().hash.ToString(), pnode->GetLogName()); pnode->fDisconnect = true; } } }
// MIT License // // shrinkler-gba: Port of the Shrinkler Amiga executable cruncher for the GBA // Copyright (c) 2021 Thomas Mathys // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. #if defined(__CYGWIN__) #if defined(__GNUC__) || defined(__clang__) #pragma GCC diagnostic ignored "-Wunused-parameter" #endif #define _POSIX_C_SOURCE 200112L #endif #define BOOST_TEST_MODULE shrinklergba-unittest #include <boost/test/included/unit_test.hpp>
/* ----------------------------------------------------------------------- *//** * * @file SystemInformation_impl.hpp * *//* ----------------------------------------------------------------------- */ #ifndef MADLIB_POSTGRES_SYSTEMINFORMATION_IMPL_HPP #define MADLIB_POSTGRES_SYSTEMINFORMATION_IMPL_HPP namespace madlib { namespace dbconnector { namespace postgres { namespace { /** * @brief Initialize an OID-to-data hash table */ inline void initializeOidHashTable(HTAB*& ioHashTable, MemoryContext inCacheContext, size_t inEntrySize, const char* inTabName, uint32_t inMaxNElems) { if (ioHashTable == NULL) { HASHCTL ctl; ctl.keysize = sizeof(Oid); ctl.entrysize = inEntrySize; ctl.hash = oid_hash; ctl.hcxt = inCacheContext; ioHashTable = madlib_hash_create( /* tabname -- a name for the table (for debugging purposes) */ inTabName, /* nelem -- maximum number of elements expected */ inMaxNElems, /* info: additional table parameters, as indicated by flags */ &ctl, /* flags -- bitmask indicating which parameters to take from *info */ HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT ); } } /** * @brief Store cached system-catalog information in backend function handle * * @param inFmgrInfo Backend handle to the function * @param inSysInfo Our system-catalog information that should be stored * * A set-returning function uses \c fn_extra to store cross-call information. * See, e.g., init_MultiFuncCall() in funcapi.c. Fortunately, it stores a point * to a <tt>struct FuncCallContext</tt> in \c fn_extra, which in turn allows to * store user-defined data. */ inline void setSystemInformationInFmgrInfo(FmgrInfo* inFmgrInfo, SystemInformation* inSysInfo) { (inFmgrInfo->fn_retset ? static_cast<FuncCallContext*>(inFmgrInfo->fn_extra)->user_fctx : inFmgrInfo->fn_extra) = inSysInfo; } /** * @brief Get cached system-catalog information from backend function handle * * @see setSystemInformationInFmgrInfo() */ inline SystemInformation* getSystemInformationFromFmgrInfo(FmgrInfo* inFmgrInfo) { return static_cast<SystemInformation*>(inFmgrInfo->fn_retset ? static_cast<FuncCallContext*>(inFmgrInfo->fn_extra)->user_fctx : inFmgrInfo->fn_extra); } /** * @brief Get memory context from backend function handle * * The memory context returned may be used for storing user-defined data. In * SystemInformation::get(), we will use it for allocating a * <tt>struct SystemInformation</tt>. * * @see setSystemInformationInFmgrInfo() */ inline MemoryContext getMemoryContextFromFmgrInfo(FmgrInfo* inFmgrInfo) { return inFmgrInfo->fn_retset ? static_cast<FuncCallContext*>(inFmgrInfo->fn_extra) ->multi_call_memory_ctx : inFmgrInfo->fn_mcxt; } } // namespace /** * @brief Get (and cache) information from the PostgreSQL catalog * * @param inFmgrInfo System-catalog information about the function. If no * SystemInformation is currently available in struct FmgrInfo, this * function will store it (and thus change the FmgrInfo struct). */ inline SystemInformation* SystemInformation::get(FunctionCallInfo fcinfo) { madlib_assert(fcinfo->flinfo, std::invalid_argument("Incomplete FunctionCallInfoData.")); SystemInformation* sysInfo = getSystemInformationFromFmgrInfo(fcinfo->flinfo); if (!sysInfo) { MemoryContext memCtxt = getMemoryContextFromFmgrInfo(fcinfo->flinfo); sysInfo = static_cast<SystemInformation*>( madlib_MemoryContextAllocZero( memCtxt, sizeof(SystemInformation))); sysInfo->entryFuncOID = fcinfo->flinfo->fn_oid; sysInfo->cacheContext = memCtxt; sysInfo->collationOID = PG_GET_COLLATION(); setSystemInformationInFmgrInfo(fcinfo->flinfo, sysInfo); } return sysInfo; } /** * @brief Get (and cache) information about a PostgreSQL type * * @param inPGInfo An PGInformation structure containing all cached information * @param inTypeID The OID of the type of interest */ inline TypeInformation* SystemInformation::typeInformation(Oid inTypeID) { TypeInformation* cachedTypeInfo = NULL; bool found = true; MemoryContext oldContext; HeapTuple tup; Form_pg_type pgType; // We arrange to look up info about types only once per series of // calls, assuming the type info doesn't change underneath us. initializeOidHashTable(types, cacheContext, sizeof(TypeInformation), "C++ AL / TypeInformation hash table", 12); // BACKEND: Since we pass HASH_FIND, this function call will never perform // an allocation. There is nothing in the code path that would raise an // exception (including oid_hash()), so we are *not* wrapping in a PG_TRY() // block for performance reasons. cachedTypeInfo = static_cast<TypeInformation*>( hash_search(types, &inTypeID, HASH_FIND, &found)); if (!found) { cachedTypeInfo = static_cast<TypeInformation*>( madlib_hash_search(types, &inTypeID, HASH_ENTER, &found)); // cachedTypeInfo.oid is already set tup = madlib_SearchSysCache1(TYPEOID, ObjectIdGetDatum(inTypeID)); // BACKEND: HeapTupleIsValid is just a macro if (!HeapTupleIsValid(tup)) { throw std::runtime_error("Error while looking up a type in the " "system catalog."); } else { // BACKEND: GETSTRUCT is just a macro pgType = reinterpret_cast<Form_pg_type>(GETSTRUCT(tup)); std::strncpy(cachedTypeInfo->name, pgType->typname.data, NAMEDATALEN); cachedTypeInfo->len = pgType->typlen; cachedTypeInfo->byval = pgType->typbyval; cachedTypeInfo->type = pgType->typtype; if (cachedTypeInfo->type == TYPTYPE_COMPOSITE) { // BACKEND: MemoryContextSwitchTo just changes a global // variable oldContext = MemoryContextSwitchTo(cacheContext); // Since type ID != RECORDOID, typmod will not be used and // we can set it to -1 // (RECORDOID is a pseudo type and used for transient record // types. They are identified by an index in array // RecordCacheArray defined in typcache.c.) cachedTypeInfo->tupdesc = madlib_lookup_rowtype_tupdesc_copy( /* type_id */ inTypeID, /* typmod */ -1); MemoryContextSwitchTo(oldContext); } else { cachedTypeInfo->tupdesc = NULL; } madlib_ReleaseSysCache(tup); } } return cachedTypeInfo; } /** * @brief Get (and cache) information about a PostgreSQL function * * @param inFuncID The OID of the function of interest * @return */ inline FunctionInformation* SystemInformation::functionInformation(Oid inFuncID) { FunctionInformation* cachedFuncInfo = NULL; bool found = true; HeapTuple tup; Form_pg_proc pgFunc; // We arrange to look up info about functions only once per series of // calls, assuming the function info doesn't change underneath us. initializeOidHashTable(functions, cacheContext, sizeof(FunctionInformation), "C++ AL / FunctionInformation hash table", 8); // BACKEND: Since we pass HASH_FIND, this function call will never perform // an allocation. There is nothing in the code path that would raise an // exception (including oid_hash()), so we are *not* wrapping in a PG_TRY() // block for performance reasons. cachedFuncInfo = static_cast<FunctionInformation*>( hash_search(functions, &inFuncID, HASH_FIND, &found)); if (!found) { cachedFuncInfo = static_cast<FunctionInformation*>( madlib_hash_search(functions, &inFuncID, HASH_ENTER, &found)); // cachedFuncInfo.oid is already set cachedFuncInfo->mSysInfo = this; tup = madlib_SearchSysCache1(PROCOID, ObjectIdGetDatum(inFuncID)); if (!HeapTupleIsValid(tup)) { throw std::runtime_error("Error while looking up a function in the " "system catalog."); } else { // BACKEND: GETSTRUCT is just a macro pgFunc = reinterpret_cast<Form_pg_proc>(GETSTRUCT(tup)); cachedFuncInfo->cxx_func = NULL; cachedFuncInfo->flinfo.fn_oid = InvalidOid; // The number of arguments (excluding OUT params) cachedFuncInfo->nargs = static_cast<uint16_t>(pgFunc->proargtypes.dim1); cachedFuncInfo->polymorphic = false; cachedFuncInfo->isstrict = pgFunc->proisstrict; cachedFuncInfo->secdef = pgFunc->prosecdef; Oid* allargs; // We could use get_func_arg_info() but unfortunately that also // copied names and modes bool onlyINArguments = false; Datum allargtypes = madlib_SysCacheGetAttr(PROCOID, tup, Anum_pg_proc_proallargtypes, &onlyINArguments); if (onlyINArguments) { allargs = pgFunc->proargtypes.values; } else { // See get_func_arg_info(). We expect the arrays to be 1-D // arrays of the right types; verify that. // Ensure that array is not toasted. We do not worry about // a possible memory leak here. This code will be run only // once per entry function (into the C++ AL) and query, and // memory will already be garbage collected once the currenty // entry point is left. ArrayType* arr = madlib_DatumGetArrayTypeP(allargtypes); int numargs = ARR_DIMS(arr)[0]; madlib_assert(ARR_NDIM(arr) == 1 && ARR_DIMS(arr)[0] >= 0 && !ARR_HASNULL(arr) && ARR_ELEMTYPE(arr) == OIDOID && numargs >= pgFunc->pronargs, std::runtime_error("In SystemInformation::" "functionInformation(): proallargtypes is not a vaid " "one-dimensional Oid array")); allargs = reinterpret_cast<Oid*>(ARR_DATA_PTR(arr)); } for (int i = 0; i < pgFunc->pronargs; ++i) { // Note that pgFunc->pronargs is the number of all arguments // (including OUT params) if (typeInformation(allargs[i])->getType() == TYPTYPE_PSEUDO) { cachedFuncInfo->polymorphic = true; break; } } if (cachedFuncInfo->nargs == 0) { cachedFuncInfo->argtypes = NULL; } else { cachedFuncInfo->argtypes = static_cast<Oid*>( madlib_MemoryContextAlloc(cacheContext, cachedFuncInfo->nargs * sizeof(Oid))); std::memcpy(cachedFuncInfo->argtypes, pgFunc->proargtypes.values, cachedFuncInfo->nargs * sizeof(Oid)); } cachedFuncInfo->rettype = pgFunc->prorettype; // If the return type is RECORDOID, we cannot yet determine the // tuple description, even if the function is not polymorphic. // For that, the expression parse tree is required. // If the return type is composite but not RECORDOID, the tuple // description will be stored with the type information, so no // need to have it here. cachedFuncInfo->tupdesc = NULL; madlib_ReleaseSysCache(tup); } } return cachedFuncInfo; } /** * @brief Retrieve tuple description for a composite type * * @param inTypeMod Transient record types have tye ID RECORDOID and are * identified by an index in array RecordCacheArray (defined in typcache.c). * This index is stored in the tdtypmod field of struct tupleDesc. * See the description of \c tupleDesc in tupdesc.h. */ inline TupleDesc TypeInformation::getTupleDesc(int32_t inTypeMod) { if (tupdesc != NULL) // We have the tuple description in our cache, so return it return tupdesc; else if (oid == RECORDOID && inTypeMod >= 0) { // It is an anonymous type, but PostgreSQL has it in its own cache // In this case lookup_rowtype_tupdesc_noerror(RECORDOID, ...) // does currently not perform any actions that could rise an exception. // We might not want to rely on that, however (at the expense of // performance). TupleDesc pgCachedTupDesc = lookup_rowtype_tupdesc_noerror(oid, inTypeMod, /* noerror */ true); // The tupleDesc is in the cache (RecordCacheArray defined in // typcache.c) even before lookup_rowtype... is called. There is no // need to release the tupleDesc, though we do in order to avoid any // side effect. ReleaseTupleDesc(pgCachedTupDesc); return pgCachedTupDesc; } return NULL; } /** * @brief Determine whether a specified type is composite * * This is our cached replacement for type_is_rowtype() in lsyscache.c */ inline bool TypeInformation::isCompositeType() { return oid == RECORDOID || type == TYPTYPE_COMPOSITE; } /** * @brief Retrieve the name of the specified type */ inline const char* TypeInformation::getName() { return name; } inline bool TypeInformation::isByValue() { return byval; } inline int16_t TypeInformation::getLen() { return len; } inline char TypeInformation::getType() { return type; } /** * @brief Retrieve the type of a function argument * * @param inArgID The ID of the argument (first argument is 0) * @param inFmgrInfo System-catalog information about the function. For * polymorphic functions, this should be non-NULL and contain the * expression parse tree. * @return The OID of the type of the argument. The type ID of polymorphic * arguments is resolved if possible, i.e., if the expression parse tree * is available in <tt>inFmgrInfo->fn_expr</tt>. */ inline Oid FunctionInformation::getArgumentType(uint16_t inArgID, FmgrInfo* inFmgrInfo) { if (!inFmgrInfo) inFmgrInfo = getFuncMgrInfo(); madlib_assert(inFmgrInfo && oid == inFmgrInfo->fn_oid, std::runtime_error( "Invalid arguments passed to FunctionInformation::getArgumentType().")); Oid typeID = argtypes[inArgID]; TypeInformation* cachedTypeInfo = mSysInfo->typeInformation(typeID); if (cachedTypeInfo->getType() == TYPTYPE_PSEUDO && inFmgrInfo->fn_expr != NULL) { // Type is a pseudotype, so the actual type information is given // by the expression parse tree. // This would fail if no expression parse tree is present, i.e., if // inFmgrInfo->fn_expr == NULL. typeID = madlib_get_fn_expr_argtype(inFmgrInfo, inArgID); } return typeID; } /** * @brief Retrieve the function return type * * @param fcinfo Information about function call, including the function OID * @return The actual return type (i.e., with resolved pseudo-types) */ inline Oid FunctionInformation::getReturnType(FunctionCallInfo fcinfo) { madlib_assert(fcinfo->flinfo && oid == fcinfo->flinfo->fn_oid, std::runtime_error("Invalid arguments passed to " "FunctionInformation::getReturnType().")); Oid returnType = rettype; if (rettype != RECORDOID && mSysInfo->typeInformation(rettype)->type == TYPTYPE_PSEUDO) { // The function is polymorphic, and the result type thus depends on the // expression parse tree. Note that the condition in the if-clause is // sufficient condition for cachedFuncInfo->polymorphic, but not a // necessary condition. (A function could have input arguments with // pseudo types, but a fixed return type.) madlib_assert(polymorphic, std::logic_error("Logical error: Function returns non-record " "pseudo type but is not polymorphic.")); // This is not a composite type, so no need to pass anything for // resultTupleDesc madlib_get_call_result_type(fcinfo, &returnType, /* resultTupleDesc */ NULL); } return returnType; } /** * @brief Retrieve the tuple description of a function's return type * * @param fcinfo Information about function call, including the function OID * @return The tuple description if the return type is composite and NULL * otherwise */ inline TupleDesc FunctionInformation::getReturnTupleDesc(FunctionCallInfo fcinfo) { madlib_assert(fcinfo->flinfo && oid == fcinfo->flinfo->fn_oid, std::runtime_error("Invalid arguments passed to " "FunctionInformation::getReturnTupleDesc().")); TupleDesc returnTupDesc = tupdesc; if (returnTupDesc == NULL) { if (rettype == RECORDOID) { MADLIB_PG_TRY { // The return type is known, so no need to pass anything for // resultTypeId // get_call_result_type() creates the tuple description using // lookup_rowtype_tupdesc_copy(), which is not reference-counted // So there is no need to release the TupleDesc get_call_result_type(fcinfo, /* resultTypeId */ NULL, &returnTupDesc); } MADLIB_PG_DEFAULT_CATCH_AND_END_TRY; if (returnTupDesc == NULL) { throw std::runtime_error("MADLIB-870: C++ abstract layer has " "not supported UDFs that return RECORD type without " "tuple described at call time"); } MADLIB_PG_TRY { if (!polymorphic) { // Since the function is not polymorphic, we can store the // tuple description for other calls MemoryContext oldContext = MemoryContextSwitchTo(mSysInfo->cacheContext); tupdesc = CreateTupleDescCopyConstr(returnTupDesc); MemoryContextSwitchTo(oldContext); } } MADLIB_PG_DEFAULT_CATCH_AND_END_TRY; } else { TypeInformation* cachedTypeInfo = mSysInfo->typeInformation(rettype); if (cachedTypeInfo->type == TYPTYPE_COMPOSITE) returnTupDesc = cachedTypeInfo->tupdesc; } } return returnTupDesc; } /** * @brief Retrieve (cross-call) system-catalog information about the function * * @param inFuncID The OID of the function of interest * @return A filled FmgrInfo struct which uses the current SystemInformation * object as system-catalog cache * * If no FmgrInfo data is stored for the function of interest, this * function creates a new struct FmgrInfo and stores it in the current * FunctionInformation object. It also links the new FmgrInfo struct to the * currently used SystemInformation object. * * @note Cached FmgrInfo data is *not* used for entry functions (i.e., the * immediate call by the backend). For the entry function, a complete * struct FunctionCallInfoData is passed by the backend itself, which is * used instead (as it also contains the parse tree). */ inline FmgrInfo* FunctionInformation::getFuncMgrInfo() { // Initially flinfo.fn_oid is set to InvalidOid if (flinfo.fn_oid != oid) { // Check permissions if (madlib_pg_proc_aclcheck(oid, GetUserId(), ACL_EXECUTE) != ACLCHECK_OK) { throw std::invalid_argument(std::string("No privilege to run " "function '") + getFullName() + "'."); } // cacheContext will be set as fn_mcxt. madlib_fmgr_info_cxt(oid, &flinfo, mSysInfo->cacheContext); if (!secdef) { // If the function is SECURITY DEFINER then fmgr_info_cxt() has // set up flinfo so that what we will actually // call fmgr_security_definer() in fmgr.c, which then calls the // "real" function. Because of this additional layer, and since // fmgr_security_definer() uses the fn_extra field of // struct FmgrInfo in an opaque way (it points to a struct that is // local to fmgr.c), we only initialize the cache if the function // is *not* SECURITY DEFINER. setSystemInformationInFmgrInfo(&flinfo, mSysInfo); } } return &flinfo; } /** * @brief Retrieve the full function name (including arguments) * * We currently do not cache this information because we expect this function * to be primarily called by error handlers. */ inline const char* FunctionInformation::getFullName() { return madlib_format_procedure(oid); } } // namespace postgres } // namespace dbconnector } // namespace madlib #endif // defined(MADLIB_POSTGRES_SYSTEMINFORMATION_IMPL_HPP)