text
stringlengths
5
1.04M
/*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #include "mitkSceneReaderV1.h" #include "mitkSerializerMacros.h" #include "mitkBaseRenderer.h" #include "mitkPropertyListDeserializer.h" #include "mitkProgressBar.h" #include "mitkIOUtil.h" #include "Poco/Path.h" #include <mitkRenderingModeProperty.h> MITK_REGISTER_SERIALIZER(SceneReaderV1) namespace { typedef std::pair<mitk::DataNode::Pointer, std::list<std::string> > NodesAndParentsPair; bool NodeSortByLayerIsLessThan( const NodesAndParentsPair& left, const NodesAndParentsPair& right ) { if ( left.first.IsNotNull() && right.first.IsNotNull() ) { int leftLayer; int rightLayer; if ( left.first->GetIntProperty("layer", leftLayer) && right.first->GetIntProperty("layer", rightLayer) ) { return leftLayer < rightLayer; } else { // fall back to name sort return left.first->GetName() < right.first->GetName(); } } // in all other cases, fall back to stupid pointer comparison // this is not reasonable but at least answers the sorting // question clearly return left.first.GetPointer() < right.first.GetPointer(); } } bool mitk::SceneReaderV1::LoadScene( TiXmlDocument& document, const std::string& workingDirectory, DataStorage* storage ) { assert(storage); bool error(false); // TODO prepare to detect errors (such as cycles) from wrongly written or edited xml files //Get number of elements to initialze progress bar // 1. if there is a <data type="..." file="..."> element, // - construct a name for the appropriate serializer // - try to instantiate this serializer via itk object factory // - if serializer could be created, use it to read the file into a BaseData object // - if successful, call the new node's SetData(..) // create a node for the tag "data" and test if node was created typedef std::vector<mitk::DataNode::Pointer> DataNodeVector; DataNodeVector DataNodes; unsigned int listSize = 0; for( TiXmlElement* element = document.FirstChildElement("node"); element != NULL; element = element->NextSiblingElement("node") ) { ++listSize; } ProgressBar::GetInstance()->AddStepsToDo(listSize * 2); for (TiXmlElement* element = document.FirstChildElement("node"); element != NULL; element = element->NextSiblingElement("node")) { DataNodes.push_back(LoadBaseDataFromDataTag(element->FirstChildElement("data"), workingDirectory, error)); ProgressBar::GetInstance()->Progress(); } // iterate all nodes // first level nodes should be <node> elements DataNodeVector::iterator nit = DataNodes.begin(); for( TiXmlElement* element = document.FirstChildElement("node"); element != NULL || nit != DataNodes.end(); element = element->NextSiblingElement("node"), ++nit ) { mitk::DataNode::Pointer node = *nit; // in case dataXmlElement is valid test whether it containts the "properties" child tag // and process further if and only if yes TiXmlElement *dataXmlElement = element->FirstChildElement("data"); if( dataXmlElement && dataXmlElement->FirstChildElement("properties") ) { TiXmlElement *baseDataElement = dataXmlElement->FirstChildElement("properties"); if ( node->GetData() ) { DecorateBaseDataWithProperties( node->GetData(), baseDataElement, workingDirectory); } else { MITK_WARN << "BaseData properties stored in scene file, but BaseData could not be read" << std::endl; } } // 2. check child nodes const char* uida = element->Attribute("UID"); std::string uid(""); if (uida) { uid = uida; m_NodeForID[uid] = node.GetPointer(); m_IDForNode[ node.GetPointer() ] = uid; } else { MITK_ERROR << "No UID found for current node. Node will have no parents."; error = true; } // 3. if there are <properties> nodes, // - instantiate the appropriate PropertyListDeSerializer // - use them to construct PropertyList objects // - add these properties to the node (if necessary, use renderwindow name) bool success = DecorateNodeWithProperties(node, element, workingDirectory); if (!success) { MITK_ERROR << "Could not load properties for node."; error = true; } // remember node for later adding to DataStorage m_OrderedNodePairs.push_back( std::make_pair( node, std::list<std::string>() ) ); // 4. if there are <source> elements, remember parent objects for( TiXmlElement* source = element->FirstChildElement("source"); source != NULL; source = source->NextSiblingElement("source") ) { const char* sourceUID = source->Attribute("UID"); if (sourceUID) { m_OrderedNodePairs.back().second.push_back( std::string(sourceUID) ); } } ProgressBar::GetInstance()->Progress(); } // end for all <node> // sort our nodes by their "layer" property // (to be inserted in that order) m_OrderedNodePairs.sort( &NodeSortByLayerIsLessThan ); // remove all unknown parent UIDs for (OrderedNodesList::iterator nodesIter = m_OrderedNodePairs.begin(); nodesIter != m_OrderedNodePairs.end(); ++nodesIter) { for (std::list<std::string>::iterator parentsIter = nodesIter->second.begin(); parentsIter != nodesIter->second.end();) { if (m_NodeForID.find( *parentsIter ) == m_NodeForID.end()) { parentsIter = nodesIter->second.erase( parentsIter ); MITK_WARN << "Found a DataNode with unknown parents. Will add it to DataStorage without any parent objects."; error = true; } else { ++parentsIter; } } } // repeat the following loop ... // ... for all created nodes unsigned int lastMapSize(0); while ( lastMapSize != m_OrderedNodePairs.size()) // this is to prevent infinite loops; each iteration must at least add one node to DataStorage { lastMapSize = m_OrderedNodePairs.size(); // iterate (layer) ordered nodes backwards // we insert the highest layers first for (OrderedNodesList::iterator nodesIter = m_OrderedNodePairs.begin(); nodesIter != m_OrderedNodePairs.end(); ++nodesIter) { bool addThisNode(true); // if any parent node is not yet in DataStorage, skip node for now and check later for (std::list<std::string>::iterator parentsIter = nodesIter->second.begin(); parentsIter != nodesIter->second.end(); ++parentsIter) { if ( !storage->Exists( m_NodeForID[ *parentsIter ] ) ) { addThisNode = false; break; } } if (addThisNode) { DataStorage::SetOfObjects::Pointer parents = DataStorage::SetOfObjects::New(); for ( std::list<std::string>::iterator parentsIter = nodesIter->second.begin(); parentsIter != nodesIter->second.end(); ++parentsIter ) { parents->push_back(m_NodeForID[*parentsIter]); } // if all parents are found in datastorage (or are unknown), add node to DataStorage storage->Add(nodesIter->first, parents); // remove this node from m_OrderedNodePairs m_OrderedNodePairs.erase( nodesIter ); // break this for loop because iterators are probably invalid break; } } } // All nodes that are still in m_OrderedNodePairs at this point are not part of a proper directed graph structure. We'll add such nodes without any parent information. for (OrderedNodesList::iterator nodesIter = m_OrderedNodePairs.begin(); nodesIter != m_OrderedNodePairs.end(); ++nodesIter) { storage->Add( nodesIter->first ); MITK_WARN << "Encountered node that is not part of a directed graph structure. Will be added to DataStorage without parents."; error = true; } return !error; } mitk::DataNode::Pointer mitk::SceneReaderV1::LoadBaseDataFromDataTag( TiXmlElement* dataElement, const std::string& workingDirectory, bool& error ) { DataNode::Pointer node; if (dataElement) { const char* filename = dataElement->Attribute("file"); if ( filename ) { try { std::vector<BaseData::Pointer> baseData = IOUtil::Load( workingDirectory + Poco::Path::separator() + filename ); if (baseData.size() > 1) { MITK_WARN << "Discarding multiple base data results from " << filename << " except the first one."; } node = DataNode::New(); node->SetData(baseData.front()); } catch (std::exception& e) { MITK_ERROR << "Error during attempt to read '" << filename << "'. Exception says: " << e.what(); error = true; } if (node.IsNull()) { MITK_ERROR << "Error during attempt to read '" << filename << "'. Factory returned NULL object."; error = true; } } } // in case there was no <data> element we create a new empty node (for appending a propertylist later) if (node.IsNull()) { node = DataNode::New(); } return node; } void mitk::SceneReaderV1::ClearNodePropertyListWithExceptions(DataNode& node, PropertyList& propertyList) { // Basically call propertyList.Clear(), but implement exceptions (see bug 19354) BaseData* data = node.GetData(); PropertyList::Pointer propertiesToKeep = PropertyList::New(); if (dynamic_cast<Image*>(data)) { /* Older scene files (before changes of bug 17547) could contain a RenderingMode property with value "LevelWindow_Color". Since bug 17547 this value has been removed and replaced by the default value LookupTable_LevelWindow_Color. This new default value does only result in "black-to-white" CT images (or others) if there is a corresponding lookup table. Such a lookup table is provided as a default value by the Image mapper. Since that value was never present in older scene files, we do well in not removing the new default value here. Otherwise the mapper would fall back to another default which is all the colors of the rainbow :-( */ BaseProperty::Pointer lutProperty = propertyList.GetProperty("LookupTable"); propertiesToKeep->SetProperty("LookupTable", lutProperty); } propertyList.Clear(); propertyList.ConcatenatePropertyList(propertiesToKeep); } bool mitk::SceneReaderV1::DecorateNodeWithProperties(DataNode* node, TiXmlElement* nodeElement, const std::string& workingDirectory) { assert(node); assert(nodeElement); bool error(false); for( TiXmlElement* properties = nodeElement->FirstChildElement("properties"); properties != NULL; properties = properties->NextSiblingElement("properties") ) { const char* propertiesfilea( properties->Attribute("file") ); std::string propertiesfile( propertiesfilea ? propertiesfilea : "" ); const char* renderwindowa( properties->Attribute("renderwindow") ); std::string renderwindow( renderwindowa ? renderwindowa : "" ); PropertyList::Pointer propertyList = node->GetPropertyList(renderwindow); // DataNode implementation always returns a propertylist ClearNodePropertyListWithExceptions(*node, *propertyList); // use deserializer to construct new properties PropertyListDeserializer::Pointer deserializer = PropertyListDeserializer::New(); deserializer->SetFilename(workingDirectory + Poco::Path::separator() + propertiesfile); bool success = deserializer->Deserialize(); error |= !success; PropertyList::Pointer readProperties = deserializer->GetOutput(); if (readProperties.IsNotNull()) { propertyList->ConcatenatePropertyList( readProperties, true ); // true = replace } else { MITK_ERROR << "Property list reader did not return a property list. This is an implementation error. Please tell your developer."; error = true; } } return !error; } bool mitk::SceneReaderV1::DecorateBaseDataWithProperties(BaseData::Pointer data, TiXmlElement *baseDataNodeElem, const std::string &workingDir) { // check given variables, initialize error variable assert(baseDataNodeElem); bool error(false); // get the file name stored in the <properties ...> tag const char* baseDataPropertyFile( baseDataNodeElem->Attribute("file") ); // check if the filename was found if(baseDataPropertyFile) { //PropertyList::Pointer dataPropList = data->GetPropertyList(); PropertyListDeserializer::Pointer propertyDeserializer = PropertyListDeserializer::New(); // initialize the property reader propertyDeserializer->SetFilename(workingDir + Poco::Path::separator() + baseDataPropertyFile); bool ioSuccess = propertyDeserializer->Deserialize(); error = !ioSuccess; // get the output PropertyList::Pointer inProperties = propertyDeserializer->GetOutput(); // store the read-in properties to the given node or throw error otherwise if( inProperties.IsNotNull() ) { data->SetPropertyList( inProperties ); } else { MITK_ERROR << "The property deserializer did not return a (valid) property list."; error = true; } } else { MITK_ERROR << "Function DecorateBaseDataWithProperties(...) called with false TiXmlElement. \n \t ->Given element does not contain a 'file' attribute. \n"; error = true; } return !error; }
/*========================================================================= Program: Insight Segmentation & Registration Toolkit Module: $RCSfile: itkLabelImageGaussianInterpolateImageFunction.hxx,v $ Language: C++ Date: $Date: $ Version: $Revision: $ Copyright (c) Insight Software Consortium. All rights reserved. See ITKCopyright.txt or http://www.itk.org/HTML/Copyright.htm for details. Portions of this code are covered under the VTK copyright. See VTKCopyright.txt or http://www.kitware.com/VTKCopyright.htm for details. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the above copyright notices for more information. =========================================================================*/ #ifndef __itkLabelImageGaussianInterpolateImageFunction_hxx #define __itkLabelImageGaussianInterpolateImageFunction_hxx #include "itkLabelImageGaussianInterpolateImageFunction.h" namespace itk { /** * Constructor */ template<typename TInputImage, typename TCoordRep, typename TPixelCompare> LabelImageGaussianInterpolateImageFunction<TInputImage, TCoordRep, TPixelCompare> ::LabelImageGaussianInterpolateImageFunction() { } template<typename TInputImage, typename TCoordRep, typename TPixelCompare> typename LabelImageGaussianInterpolateImageFunction<TInputImage, TCoordRep, TPixelCompare> ::OutputType LabelImageGaussianInterpolateImageFunction<TInputImage, TCoordRep, TPixelCompare> ::EvaluateAtContinuousIndex( const ContinuousIndexType & cindex, OutputType * itkNotUsed( grad ) ) const { vnl_vector<RealType> erfArray[ImageDimension]; vnl_vector<RealType> gerfArray[ImageDimension]; // Compute the ERF difference arrays for( unsigned int d = 0; d < ImageDimension; d++ ) { const bool evaluateGradient = false; this->ComputeErrorFunctionArray( d, cindex[d], erfArray[d], gerfArray[d], evaluateGradient ); } // Loop over the voxels in the region identified ImageRegion<ImageDimension> region; for( unsigned int d = 0; d < ImageDimension; d++ ) { const int boundingBoxSize = static_cast<int>( this->m_BoundingBoxEnd[d] - this->m_BoundingBoxStart[d] + 0.5 ); const int begin = vnl_math_max( 0, static_cast<int>( std::floor( cindex[d] - this->m_BoundingBoxStart[d] - this->m_CutoffDistance[d] ) ) ); const int end = vnl_math_min( boundingBoxSize, static_cast<int>( std::ceil( cindex[d] - this->m_BoundingBoxStart[d] + this->m_CutoffDistance[d] ) ) ); region.SetIndex( d, begin ); region.SetSize( d, end - begin ); } RealType wmax = 0.0; OutputType Vmax = NumericTraits<OutputType>::Zero; // Create a map object to store weights for each label encountered // inside the search region. This is not as efficient as having a // linear list of labels, but probably not a huge deal compared to // having to evaluate the erf function typedef std::map<OutputType, RealType, TPixelCompare> WeightMapType; typedef typename std::map<OutputType, RealType, TPixelCompare>::iterator WeightMapIteratorType; WeightMapType weightMap; ImageRegionConstIteratorWithIndex<InputImageType> It( this->GetInputImage(), region ); for( It.GoToBegin(); !It.IsAtEnd(); ++It ) { unsigned int j = It.GetIndex()[0]; RealType w = erfArray[0][j]; for( unsigned int d = 1; d < ImageDimension; d++) { j = It.GetIndex()[d]; w *= erfArray[d][j]; } const OutputType V = It.Get(); WeightMapIteratorType it = weightMap.find( V ); RealType wtest = 0.0; if( it != weightMap.end() ) { it->second += w; wtest = it->second; } else { weightMap.insert( std::make_pair( V, w ) ); wtest = w; } //Keep track of the max value if( wtest > wmax ) { wmax = wtest; Vmax = V; } } return Vmax; } } // namespace itk #endif
#include <iostream> // Biblioteca padrão entrada/saída #include <iomanip> // Biblioteca para definir o número de casas decimais using namespace std; int main() { // Função MAIN: int km; // Declara as variáveis float l; cin >> km; // Pede as entradas ao usuário cin >> l; cout << fixed << setprecision(3); // Configura a saída para 3 casas decimais; cout << km/l << " km/l" << endl; // Mostra o consumo pro usuário // system("pause"); return 0; }
#ifndef assert_hh_INCLUDED #define assert_hh_INCLUDED namespace Kakoune { class StringView; // return true if user asked to ignore the error bool notify_fatal_error(StringView message); void on_assert_failed(const char* message); } #define STRINGIFY(X) #X #define TOSTRING(X) STRINGIFY(X) #ifdef KAK_DEBUG #define kak_assert(...) do { \ if (not (__VA_ARGS__)) \ on_assert_failed("assert failed \"" #__VA_ARGS__ \ "\" at " __FILE__ ":" TOSTRING(__LINE__)); \ } while (false) #define kak_expect_throw(exception_type, ...) try {\ __VA_ARGS__; \ on_assert_failed("expression \"" #__VA_ARGS__ \ "\" did not throw \"" #exception_type \ "\" at " __FILE__ ":" TOSTRING(__LINE__)); \ } catch (exception_type &err) {} #else #define kak_assert(...) do { (void)sizeof(__VA_ARGS__); } while(false) #define kak_expect_throw(_, ...) do { (void)sizeof(__VA_ARGS__); } while(false) #endif #endif // assert_hh_INCLUDED
#include "openvslam/camera/base.h" #include "openvslam/data/common.h" #include "openvslam/data/frame.h" #include "openvslam/data/keyframe.h" #include "openvslam/data/landmark.h" #include "openvslam/data/camera_database.h" #include "openvslam/data/map_database.h" #include "openvslam/util/converter.h" #include <spdlog/spdlog.h> namespace openvslam { namespace data { std::mutex map_database::mtx_database_; map_database::map_database() { spdlog::debug("CONSTRUCT: data::map_database"); } map_database::~map_database() { clear(); spdlog::debug("DESTRUCT: data::map_database"); } void map_database::add_keyframe(keyframe* keyfrm) { std::lock_guard<std::mutex> lock(mtx_map_access_); keyframes_[keyfrm->id_] = keyfrm; if (keyfrm->id_ > max_keyfrm_id_) { max_keyfrm_id_ = keyfrm->id_; } } void map_database::erase_keyframe(keyframe* keyfrm) { std::lock_guard<std::mutex> lock(mtx_map_access_); keyframes_.erase(keyfrm->id_); // TODO: 実体を削除 } void map_database::add_landmark(landmark* lm) { std::lock_guard<std::mutex> lock(mtx_map_access_); landmarks_[lm->id_] = lm; } void map_database::erase_landmark(landmark* lm) { std::lock_guard<std::mutex> lock(mtx_map_access_); landmarks_.erase(lm->id_); // TODO: 実体を削除 } void map_database::set_local_landmarks(const std::vector<landmark*>& local_lms) { std::lock_guard<std::mutex> lock(mtx_map_access_); local_landmarks_ = local_lms; } std::vector<landmark*> map_database::get_local_landmarks() const { std::lock_guard<std::mutex> lock(mtx_map_access_); return local_landmarks_; } std::vector<keyframe*> map_database::get_all_keyframes() const { std::lock_guard<std::mutex> lock(mtx_map_access_); std::vector<keyframe*> keyframes; keyframes.reserve(keyframes_.size()); for (const auto id_keyframe : keyframes_) { keyframes.push_back(id_keyframe.second); } return keyframes; } unsigned int map_database::get_num_keyframes() const { std::lock_guard<std::mutex> lock(mtx_map_access_); return keyframes_.size(); } std::vector<landmark*> map_database::get_all_landmarks() const { std::lock_guard<std::mutex> lock(mtx_map_access_); std::vector<landmark*> landmarks; landmarks.reserve(landmarks_.size()); for (const auto id_landmark : landmarks_) { landmarks.push_back(id_landmark.second); } return landmarks; } unsigned int map_database::get_num_landmarks() const { std::lock_guard<std::mutex> lock(mtx_map_access_); return landmarks_.size(); } unsigned int map_database::get_max_keyframe_id() const { std::lock_guard<std::mutex> lock(mtx_map_access_); return max_keyfrm_id_; } void map_database::clear() { std::lock_guard<std::mutex> lock(mtx_map_access_); for (auto& lm : landmarks_) { delete lm.second; lm.second = nullptr; } for (auto& keyfrm : keyframes_) { delete keyfrm.second; keyfrm.second = nullptr; } landmarks_.clear(); keyframes_.clear(); max_keyfrm_id_ = 0; local_landmarks_.clear(); origin_keyfrm_ = nullptr; frm_stats_.clear(); spdlog::info("clear map database"); } void map_database::from_json(camera_database* cam_db, bow_vocabulary* bow_vocab, bow_database* bow_db, const nlohmann::json& json_keyfrms, const nlohmann::json& json_landmarks) { std::lock_guard<std::mutex> lock(mtx_map_access_); // 1. データベースを全削除する for (auto& lm : landmarks_) { delete lm.second; lm.second = nullptr; } for (auto& keyfrm : keyframes_) { delete keyfrm.second; keyfrm.second = nullptr; } landmarks_.clear(); keyframes_.clear(); max_keyfrm_id_ = 0; local_landmarks_.clear(); origin_keyfrm_ = nullptr; // 2. キーフレームを登録,ただしこの時点でオブジェクトが存在しないポインタはnullptrにしておく spdlog::info("decoding {} keyframes to load", json_keyfrms.size()); for (const auto& json_id_keyfrm : json_keyfrms.items()) { const auto id = std::stoi(json_id_keyfrm.key()); assert(0 <= id); const auto json_keyfrm = json_id_keyfrm.value(); register_keyframe(cam_db, bow_vocab, bow_db, id, json_keyfrm); } // 3. 3次元点を登録,ただしこの時点でオブジェクトが存在しないポインタはnullptrにしておく spdlog::info("decoding {} landmarks to load", json_landmarks.size()); for (const auto& json_id_landmark : json_landmarks.items()) { const auto id = std::stoi(json_id_landmark.key()); assert(0 <= id); const auto json_landmark = json_id_landmark.value(); register_landmark(id, json_landmark); } // 4. グラフ情報を登録 spdlog::info("registering essential graph"); for (const auto& json_id_keyfrm : json_keyfrms.items()) { const auto id = std::stoi(json_id_keyfrm.key()); assert(0 <= id); const auto json_keyfrm = json_id_keyfrm.value(); register_graph(id, json_keyfrm); } // 5. キーフレームと3次元点の対応を登録 spdlog::info("registering keyframe-landmark association"); for (const auto& json_id_keyfrm : json_keyfrms.items()) { const auto id = std::stoi(json_id_keyfrm.key()); assert(0 <= id); const auto json_keyfrm = json_id_keyfrm.value(); register_association(id, json_keyfrm); } // 6. グラフを更新 spdlog::info("updating covisibility graph"); for (const auto& json_id_keyfrm : json_keyfrms.items()) { const auto id = std::stoi(json_id_keyfrm.key()); assert(0 <= id); assert(keyframes_.count(id)); auto keyfrm = keyframes_.at(id); keyfrm->update_connections(); keyfrm->update_covisibility_orders(); } // 7. ジオメトリを更新 spdlog::info("updating landmark geometry"); for (const auto& json_id_landmark : json_landmarks.items()) { const auto id = std::stoi(json_id_landmark.key()); assert(0 <= id); assert(landmarks_.count(id)); auto lm = landmarks_.at(id); lm->update_normal_and_depth(); lm->compute_descriptor(); } } void map_database::register_keyframe(camera_database* cam_db, bow_vocabulary* bow_vocab, bow_database* bow_db, const unsigned int id, const nlohmann::json& json_keyfrm) { // 2-0. メタ情報 const auto src_frm_id = json_keyfrm.at("n_keypts").get<unsigned int>(); const auto timestamp = json_keyfrm.at("ts").get<double>(); const auto camera_name = json_keyfrm.at("cam").get<std::string>(); const auto camera = cam_db->get_camera(camera_name); const auto depth_thr = json_keyfrm.at("depth_thr").get<float>(); // 2-1. 姿勢情報 const Mat33_t rot_cw = convert_json_to_rotation(json_keyfrm.at("rot_cw")); const Vec3_t trans_cw = convert_json_to_translation(json_keyfrm.at("trans_cw")); const auto cam_pose_cw = util::converter::to_eigen_cam_pose(rot_cw, trans_cw); // 2-2. 特徴点情報 const auto num_keypts = json_keyfrm.at("n_keypts").get<unsigned int>(); // keypts const auto json_keypts = json_keyfrm.at("keypts"); const auto keypts = convert_json_to_keypoints(json_keypts); assert(keypts.size() == num_keypts); // undist_keypts const auto json_undist_keypts = json_keyfrm.at("undists"); const auto undist_keypts = convert_json_to_undistorted(json_undist_keypts); assert(undist_keypts.size() == num_keypts); // bearings auto bearings = eigen_alloc_vector<Vec3_t>(num_keypts); assert(bearings.size() == num_keypts); camera->convert_keypoints_to_bearings(undist_keypts, bearings); // stereo_x_right const auto stereo_x_right = json_keyfrm.at("x_rights").get<std::vector<float>>(); assert(stereo_x_right.size() == num_keypts); // depths const auto depths = json_keyfrm.at("depths").get<std::vector<float>>(); assert(depths.size() == num_keypts); // descriptors const auto json_descriptors = json_keyfrm.at("descs"); const auto descriptors = convert_json_to_descriptors(json_descriptors); assert(descriptors.rows == static_cast<int>(num_keypts)); // 2-3. ORBスケール情報 const auto num_scale_levels = json_keyfrm.at("n_scale_levels").get<unsigned int>(); const auto scale_factor = json_keyfrm.at("scale_factor").get<float>(); // 2-4. オブジェクト構築 auto keyfrm = new data::keyframe(id, src_frm_id, timestamp, cam_pose_cw, camera, depth_thr, num_keypts, keypts, undist_keypts, bearings, stereo_x_right, depths, descriptors, num_scale_levels, scale_factor, bow_vocab, bow_db, this); // 2-5. データベースに追加 assert(!keyframes_.count(id)); keyframes_[keyfrm->id_] = keyfrm; if (keyfrm->id_ > max_keyfrm_id_) { max_keyfrm_id_ = keyfrm->id_; } if (id == 0) { origin_keyfrm_ = keyfrm; } } void map_database::register_landmark(const unsigned int id, const nlohmann::json& json_landmark) { const auto first_keyfrm_id = json_landmark.at("1st_keyfrm").get<int>(); const auto pos_w = Vec3_t(json_landmark.at("pos_w").get<std::vector<Vec3_t::value_type>>().data()); const auto ref_keyfrm_id = json_landmark.at("ref_keyfrm").get<int>(); const auto ref_keyfrm = keyframes_.at(ref_keyfrm_id); const auto num_visible = json_landmark.at("n_vis").get<unsigned int>(); const auto num_found = json_landmark.at("n_fnd").get<unsigned int>(); auto lm = new data::landmark(id, first_keyfrm_id, pos_w, ref_keyfrm, num_visible, num_found, this); assert(!landmarks_.count(id)); landmarks_[lm->id_] = lm; } void map_database::register_graph(const unsigned int id, const nlohmann::json& json_keyfrm) { // グラフ情報 const auto spanning_parent_id = json_keyfrm.at("span_parent").get<int>(); const auto spanning_children_ids = json_keyfrm.at("span_children").get<std::vector<int>>(); const auto loop_edge_ids = json_keyfrm.at("loop_edges").get<std::vector<int>>(); assert(keyframes_.count(id)); assert(spanning_parent_id == -1 || keyframes_.count(spanning_parent_id)); keyframes_.at(id)->set_spanning_parent((spanning_parent_id == -1) ? nullptr : keyframes_.at(spanning_parent_id)); for (const auto spanning_child_id : spanning_children_ids) { assert(keyframes_.count(spanning_child_id)); keyframes_.at(id)->add_spanning_child(keyframes_.at(spanning_child_id)); } for (const auto loop_edge_id : loop_edge_ids) { assert(keyframes_.count(loop_edge_id)); keyframes_.at(id)->add_loop_edge(keyframes_.at(loop_edge_id)); } } void map_database::register_association(const unsigned int keyfrm_id, const nlohmann::json& json_keyfrm) { // 特徴点情報 const auto num_keypts = json_keyfrm.at("n_keypts").get<unsigned int>(); const auto landmark_ids = json_keyfrm.at("lm_ids").get<std::vector<int>>(); assert(landmark_ids.size() == num_keypts); assert(keyframes_.count(keyfrm_id)); auto keyfrm = keyframes_.at(keyfrm_id); for (unsigned int idx = 0; idx < num_keypts; ++idx) { const auto lm_id = landmark_ids.at(idx); if (lm_id < 0) { continue; } if (!landmarks_.count(lm_id)) { spdlog::warn("landmark {}: not found in the database", lm_id); continue; } auto lm = landmarks_.at(lm_id); keyfrm->add_landmark(lm, idx); lm->add_observation(keyfrm, idx); } } void map_database::to_json(nlohmann::json& json_keyfrms, nlohmann::json& json_landmarks) { std::lock_guard<std::mutex> lock(mtx_map_access_); // 各キーフレームをJSONに変換して保存する spdlog::info("encoding {} keyframes to store", keyframes_.size()); std::map<std::string, nlohmann::json> keyfrms; for (const auto id_keyfrm : keyframes_) { const auto id = id_keyfrm.first; const auto keyfrm = id_keyfrm.second; assert(keyfrm); assert(id == keyfrm->id_); assert(!keyfrm->will_be_erased()); keyfrm->update_connections(); assert(!keyfrms.count(std::to_string(id))); keyfrms[std::to_string(id)] = keyfrm->to_json(); } json_keyfrms = keyfrms; // 各3次元点をJSONに変換して保存する spdlog::info("encoding {} landmarks to store", landmarks_.size()); std::map<std::string, nlohmann::json> landmarks; for (const auto id_lm : landmarks_) { const auto id = id_lm.first; const auto lm = id_lm.second; assert(lm); assert(id == lm->id_); assert(!lm->will_be_erased()); lm->update_normal_and_depth(); assert(!landmarks.count(std::to_string(id))); landmarks[std::to_string(id)] = lm->to_json(); } json_landmarks = landmarks; } } // namespace data } // namespace openvslam
/*! * \author Saurabh Joshi - sbjoshi@iith.ac.in * * @section LICENSE * * Open-WBO, Copyright (c) 2013-2017, Ruben Martins, Vasco Manquinho, Ines Lynce * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include "Enc_GTE.h" #include <algorithm> #include <numeric> using namespace openwbo; struct less_than_wlitt { inline bool operator()(const wlitt &wl1, const wlitt &wl2) { return (wl1.weight < wl2.weight); } }; Lit GTE::getNewLit(Solver *S) { Lit p = mkLit(S->nVars(), false); newSATVariable(S); nb_variables++; return p; } Lit GTE::get_var(Solver *S, wlit_mapt &oliterals, uint64_t weight) { wlit_mapt::iterator it = oliterals.find(weight); if (it == oliterals.end()) { Lit v = getNewLit(S); oliterals[weight] = v; } return oliterals[weight]; } bool GTE::encodeLeq(uint64_t k, Solver *S, const weightedlitst &iliterals, wlit_mapt &oliterals) { if (iliterals.size() == 0 || k == 0) return false; if (iliterals.size() == 1) { oliterals.insert( wlit_pairt(iliterals.front().weight, iliterals.front().lit)); return true; } unsigned int size = iliterals.size(); // formulat lformula,rformula; weightedlitst linputs, rinputs; wlit_mapt loutputs, routputs; unsigned int lsize = size >> 1; // unsigned int rsize=size-lsize; weightedlitst::const_iterator myit = iliterals.begin(); weightedlitst::const_iterator myit1 = myit + lsize; weightedlitst::const_iterator myit2 = iliterals.end(); linputs.insert(linputs.begin(), myit, myit1); rinputs.insert(rinputs.begin(), myit1, myit2); /*wlitt init_wlit; init_wlit.lit = lit_Undef; init_wlit.weight=0;*/ wlit_sumt wlit_sum; uint64_t lk = std::accumulate(linputs.begin(), linputs.end(), 0, wlit_sum); uint64_t rk = std::accumulate(rinputs.begin(), rinputs.end(), 0, wlit_sum); lk = k >= lk ? lk : k; rk = k >= rk ? rk : k; bool result = encodeLeq(lk, S, linputs, loutputs); if (!result) return result; result = result && encodeLeq(rk, S, rinputs, routputs); if (!result) return result; { assert(!loutputs.empty()); for (wlit_mapt::iterator mit = loutputs.begin(); mit != loutputs.end(); mit++) { if (mit->first > k) { addBinaryClause(S, ~mit->second, get_var(S, oliterals, k)); nb_clauses++; } else { addBinaryClause(S, ~mit->second, get_var(S, oliterals, mit->first)); nb_clauses++; // clause.push_back(get_var(auxvars,oliterals,l.first)); } // formula.push_back(std::move(clause)); } } { assert(!routputs.empty()); for (wlit_mapt::iterator mit = routputs.begin(); mit != routputs.end(); mit++) { if (mit->first > k) { addBinaryClause(S, ~mit->second, get_var(S, oliterals, k)); nb_clauses++; // clause.push_back(get_var(auxvars,oliterals,k)); } else { addBinaryClause(S, ~mit->second, get_var(S, oliterals, mit->first)); nb_clauses++; // clause.push_back(get_var(auxvars,oliterals,r.first)); } // formula.push_back(std::move(clause)); } } // if(!lformula.empty() && !rformula.empty()) { for (wlit_mapt::iterator lit = loutputs.begin(); lit != loutputs.end(); lit++) { for (wlit_mapt::iterator rit = routputs.begin(); rit != routputs.end(); rit++) { /*clauset clause; clause.push_back(-l.second); clause.push_back(-r.second);*/ uint64_t tw = lit->first + rit->first; if (tw > k) { addTernaryClause(S, ~lit->second, ~rit->second, get_var(S, oliterals, k)); nb_clauses++; // clause.push_back(get_var(auxvars,oliterals,k)); } else { addTernaryClause(S, ~lit->second, ~rit->second, get_var(S, oliterals, tw)); nb_clauses++; // clause.push_back(get_var(auxvars,oliterals,tw)); } // formula.push_back(std::move(clause)); } } } return true; } void GTE::encode(Solver *S, vec<Lit> &lits, vec<uint64_t> &coeffs, uint64_t rhs) { // FIXME: do not change coeffs in this method. Make coeffs const. // If the rhs is larger than INT32_MAX is not feasible to encode this // pseudo-Boolean constraint to CNF. if (rhs >= INT32_MAX) { printf("c Overflow in the Encoding\n"); printf("s UNKNOWN\n"); exit(_ERROR_); } hasEncoding = false; nb_variables = 0; nb_clauses = 0; vec<Lit> simp_lits; vec<uint64_t> simp_coeffs; lits.copyTo(simp_lits); coeffs.copyTo(simp_coeffs); lits.clear(); coeffs.clear(); // Fix literals that have a coeff larger than rhs. for (int i = 0; i < simp_lits.size(); i++) { if (simp_coeffs[i] == 0) continue; if (simp_coeffs[i] >= INT32_MAX) { printf("c Overflow in the Encoding\n"); printf("s UNKNOWN\n"); exit(_ERROR_); } if (simp_coeffs[i] <= (unsigned)rhs) { lits.push(simp_lits[i]); coeffs.push(simp_coeffs[i]); } else addUnitClause(S, ~simp_lits[i]); } if (lits.size() == 1) { // addUnitClause(S, ~lits[0]); return; } if (lits.size() == 0) return; weightedlitst iliterals; for (int i = 0; i < lits.size(); i++) { wlitt wl; wl.lit = lits[i]; wl.weight = coeffs[i]; iliterals.push_back(wl); } less_than_wlitt lt_wlit; std::sort(iliterals.begin(), iliterals.end(), lt_wlit); encodeLeq(rhs + 1, S, iliterals, pb_oliterals); for (wlit_mapt::reverse_iterator rit = pb_oliterals.rbegin(); rit != pb_oliterals.rend(); rit++) { if (rit->first > rhs) { addUnitClause(S, ~rit->second); } else { break; } } // addUnitClause(S,~pb_oliterals.rbegin()->second); /* if (pb_oliterals.rbegin()->first != rhs+1){ printf("%d - %d\n",pb_oliterals.rbegin()->first,rhs); for(wlit_mapt::reverse_iterator rit=pb_oliterals.rbegin();rit!=pb_oliterals.rend();rit++) { printf("rit->first %d\n",rit->first); } } */ // assert (pb_oliterals.rbegin()->first == rhs+1); // printLit(~pb_oliterals.rbegin()->second); /* ... PUT CODE HERE FOR CREATING THE ENCODING ... */ /* ... do not forget to sort the coefficients so that GTE is more efficient * ... */ current_pb_rhs = rhs; hasEncoding = true; } void GTE::deleteEncoding() { hasEncoding = false; current_pb_rhs = 0; nb_clauses = 0; nb_variables = 0; } void GTE::update(Solver *S, uint64_t rhs) { assert(hasEncoding); for (wlit_mapt::reverse_iterator rit = pb_oliterals.rbegin(); rit != pb_oliterals.rend(); rit++) { if (rit->first > current_pb_rhs) continue; if (rit->first > rhs) { addUnitClause(S, ~rit->second); } else { break; } } /* ... PUT CODE HERE TO UPDATE THE RHS OF AN ALREADY EXISTING ENCODING ... */ current_pb_rhs = rhs; }
#include "ActionEquipItem.h" #include <AI/Prop/PropActorBrain.h> #include <Chr/Prop/PropEquipment.h> namespace AI { ImplementRTTI(AI::CActionEquipItem, AI::CAction); ImplementFactory(AI::CActionEquipItem); using namespace Properties; bool CActionEquipItem::Activate(CActor* pActor) { //!!!later play animation! //???where to attach model? CPropEquipment* pEquipment = pActor->GetEntity()->FindProperty<CPropEquipment>(); return pEquipment ? pEquipment->Equip(Slot, pEquipment->FindItemStack(Item)) : false; } //--------------------------------------------------------------------- } //namespace AI
// Copyright 2006-2009 Daniel James. // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "../helpers/prefix.hpp" #include <boost/unordered_set.hpp> #include <boost/unordered_map.hpp> void foo(boost::unordered_set<int>& x1, boost::unordered_map<int, int>& x2, boost::unordered_multiset<int>& x3, boost::unordered_multimap<int, int>& x4) { #if BOOST_WORKAROUND(__CODEGEARC__, BOOST_TESTED_AT(0x0613)) struct dummy { boost::unordered_set<int> x1; boost::unordered_map<int, int> x2; boost::unordered_multiset<int> x3; boost::unordered_multimap<int, int> x4; }; #endif x1.insert(1); x2[2] = 2; x3.insert(3); x4.insert(std::make_pair(4, 5)); }
//===-- ASTMerge.cpp - AST Merging Frontent Action --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "clang/Frontend/ASTUnit.h" #include "clang/AST/ASTContext.h" #include "clang/AST/ASTDiagnostic.h" #include "clang/AST/ASTImporter.h" #include "clang/Basic/Diagnostic.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/FrontendActions.h" using namespace clang; ASTConsumer *ASTMergeAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) { return AdaptedAction->CreateASTConsumer(CI, InFile); } bool ASTMergeAction::BeginSourceFileAction(CompilerInstance &CI, StringRef Filename) { // FIXME: This is a hack. We need a better way to communicate the // AST file, compiler instance, and file name than member variables // of FrontendAction. AdaptedAction->setCurrentInput(getCurrentInput(), takeCurrentASTUnit()); AdaptedAction->setCompilerInstance(&CI); return AdaptedAction->BeginSourceFileAction(CI, Filename); } void ASTMergeAction::ExecuteAction() { CompilerInstance &CI = getCompilerInstance(); CI.getDiagnostics().getClient()->BeginSourceFile( CI.getASTContext().getLangOpts()); CI.getDiagnostics().SetArgToStringFn(&FormatASTNodeDiagnosticArgument, &CI.getASTContext()); IntrusiveRefCntPtr<DiagnosticIDs> DiagIDs(CI.getDiagnostics().getDiagnosticIDs()); for (unsigned I = 0, N = ASTFiles.size(); I != N; ++I) { IntrusiveRefCntPtr<DiagnosticsEngine> Diags(new DiagnosticsEngine(DiagIDs, &CI.getDiagnosticOpts(), new ForwardingDiagnosticConsumer( *CI.getDiagnostics().getClient()), /*ShouldOwnClient=*/true)); ASTUnit *Unit = ASTUnit::LoadFromASTFile(ASTFiles[I], Diags, CI.getFileSystemOpts(), false); if (!Unit) continue; ASTImporter Importer(CI.getASTContext(), CI.getFileManager(), Unit->getASTContext(), Unit->getFileManager(), /*MinimalImport=*/false); TranslationUnitDecl *TU = Unit->getASTContext().getTranslationUnitDecl(); for (auto *D : TU->decls()) { // Don't re-import __va_list_tag, __builtin_va_list. if (const auto *ND = dyn_cast<NamedDecl>(D)) if (IdentifierInfo *II = ND->getIdentifier()) if (II->isStr("__va_list_tag") || II->isStr("__builtin_va_list")) continue; Importer.Import(D); } delete Unit; } AdaptedAction->ExecuteAction(); CI.getDiagnostics().getClient()->EndSourceFile(); } void ASTMergeAction::EndSourceFileAction() { return AdaptedAction->EndSourceFileAction(); } ASTMergeAction::ASTMergeAction(FrontendAction *AdaptedAction, ArrayRef<std::string> ASTFiles) : AdaptedAction(AdaptedAction), ASTFiles(ASTFiles.begin(), ASTFiles.end()) { assert(AdaptedAction && "ASTMergeAction needs an action to adapt"); } ASTMergeAction::~ASTMergeAction() { delete AdaptedAction; } bool ASTMergeAction::usesPreprocessorOnly() const { return AdaptedAction->usesPreprocessorOnly(); } TranslationUnitKind ASTMergeAction::getTranslationUnitKind() { return AdaptedAction->getTranslationUnitKind(); } bool ASTMergeAction::hasPCHSupport() const { return AdaptedAction->hasPCHSupport(); } bool ASTMergeAction::hasASTFileSupport() const { return AdaptedAction->hasASTFileSupport(); } bool ASTMergeAction::hasCodeCompletionSupport() const { return AdaptedAction->hasCodeCompletionSupport(); }
/* * This source file is part of ARK * For the latest info, see https://github.com/ArkNX * * Copyright (c) 2013-2019 ArkNX authors. * * Licensed under the Apache License, Version 2.0 (the "License"), * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "base/AFDefine.hpp" #include "kernel/include/AFCKernelModule.hpp" #include "kernel/include/AFCEntity.hpp" #include "log/interface/AFILogModule.hpp" namespace ark { AFCKernelModule::AFCKernelModule() { inner_nodes_.AddElement(AFEntityMetaBaseEntity::config_id(), ARK_NEW int32_t(0)); inner_nodes_.AddElement(AFEntityMetaBaseEntity::class_name(), ARK_NEW int32_t(0)); inner_nodes_.AddElement(AFEntityMetaBaseEntity::map_id(), ARK_NEW int32_t(0)); inner_nodes_.AddElement(AFEntityMetaBaseEntity::map_inst_id(), ARK_NEW int32_t(0)); } AFCKernelModule::~AFCKernelModule() { objects_.clear(); } bool AFCKernelModule::Init() { delete_list_.clear(); m_pMapModule = FindModule<AFIMapModule>(); m_pClassModule = FindModule<AFIClassMetaModule>(); m_pConfigModule = FindModule<AFIConfigModule>(); m_pLogModule = FindModule<AFILogModule>(); m_pGUIDModule = FindModule<AFIGUIDModule>(); auto container_func = std::bind(&AFCKernelModule::OnContainerCallBack, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3, std::placeholders::_4, std::placeholders::_5); AddCommonContainerCallBack(std::move(container_func), 999999); // after other callbacks being done return true; } bool AFCKernelModule::Update() { cur_exec_object_ = NULL_GUID; if (!delete_list_.empty()) { for (auto it : delete_list_) { DestroyEntity(it); } delete_list_.clear(); } for (auto iter : objects_) { auto pEntity = iter.second; if (pEntity == nullptr) { continue; } pEntity->Update(); } return true; } bool AFCKernelModule::PreShut() { return DestroyAll(); } bool AFCKernelModule::CopyData(std::shared_ptr<AFIEntity> pEntity, const ID_TYPE config_id) { if (pEntity == nullptr) { return false; } if (config_id > 0) { // static node manager must be not empty auto pStaticEntity = GetStaticEntity(config_id); auto pStaticNodeManager = GetNodeManager(pStaticEntity); if (pStaticNodeManager == nullptr || pStaticNodeManager->IsEmpty()) { return false; } // node manager must be empty auto pNodeManager = GetNodeManager(pEntity); if (pNodeManager == nullptr || !pNodeManager->IsEmpty()) { return false; } // copy data auto& data_list = pStaticNodeManager->GetDataList(); for (auto iter : data_list) { auto pData = iter.second; pNodeManager->CreateData(pData); } return true; } return false; } std::shared_ptr<AFIEntity> AFCKernelModule::CreateEntity(const AFGUID& self, const int map_id, const int map_instance_id, const std::string& class_name, const ID_TYPE config_id, const AFIDataList& args) { AFGUID object_id = self; auto pMapInfo = m_pMapModule->GetMapInfo(map_id); if (pMapInfo == nullptr) { ARK_LOG_ERROR("There is no scene, scene = {}", map_id); return nullptr; } if (!pMapInfo->ExistInstance(map_instance_id)) { ARK_LOG_ERROR("There is no group, scene = {} group = {}", map_id, map_instance_id); return nullptr; } auto pClassMeta = m_pClassModule->FindMeta(class_name); if (nullptr == pClassMeta) { ARK_LOG_ERROR("There is no class meta, name = {}", class_name); return nullptr; } // check args num size_t arg_count = args.GetCount(); if (arg_count % 2 != 0) { ARK_LOG_ERROR("Args count is wrong, count = {}", arg_count); return nullptr; } if (object_id == NULL_GUID) { object_id = m_pGUIDModule->CreateGUID(); } // Check if the entity exists if (GetEntity(object_id) != nullptr) { ARK_LOG_ERROR("The entity has existed, id = {}", object_id); return nullptr; } std::shared_ptr<AFIEntity> pEntity = std::make_shared<AFCEntity>(pClassMeta, object_id, config_id, map_id, map_instance_id); objects_.insert(object_id, pEntity); pMapInfo->AddEntityToInstance( map_instance_id, object_id, ((class_name == AFEntityMetaPlayer::self_name()) ? true : false)); CopyData(pEntity, config_id); DoEvent(object_id, class_name, ArkEntityEvent::ENTITY_EVT_PRE_LOAD_DATA, args); for (size_t i = 0; i < arg_count; i += 2) { auto data_type = args.GetType(i); auto index = args.UInt(i + 1); switch (data_type) { case ark::ArkDataType::DT_BOOLEAN: pEntity->SetBool(index, args.Bool(i)); break; case ark::ArkDataType::DT_INT32: pEntity->SetInt32(index, args.Int(i)); break; case ark::ArkDataType::DT_UINT32: pEntity->SetUInt32(index, args.UInt(i)); break; case ark::ArkDataType::DT_INT64: pEntity->SetInt64(index, args.Int64(i)); break; case ark::ArkDataType::DT_UINT64: pEntity->SetUInt64(index, args.UInt64(i)); break; case ark::ArkDataType::DT_FLOAT: pEntity->SetFloat(index, args.Float(i)); break; case ark::ArkDataType::DT_DOUBLE: pEntity->SetDouble(index, args.Double(i)); break; case ark::ArkDataType::DT_STRING: pEntity->SetString(index, args.String(i)); break; default: break; } } DoEvent(object_id, class_name, ArkEntityEvent::ENTITY_EVT_LOAD_DATA, args); DoEvent(object_id, class_name, ArkEntityEvent::ENTITY_EVT_PRE_EFFECT_DATA, args); DoEvent(object_id, class_name, ArkEntityEvent::ENTITY_EVT_EFFECT_DATA, args); DoEvent(object_id, class_name, ArkEntityEvent::ENTITY_EVT_POST_EFFECT_DATA, args); DoEvent(object_id, class_name, ArkEntityEvent::ENTITY_EVT_DATA_FINISHED, args); return pEntity; } std::shared_ptr<AFIEntity> AFCKernelModule::CreateContainerEntity( const AFGUID& self, const uint32_t container_index, const std::string& class_name, const ID_TYPE config_id) { auto pEntity = GetEntity(self); if (pEntity == nullptr) { ARK_LOG_ERROR("There is no object, object = {}", self); return nullptr; } auto pContainer = pEntity->FindContainer(container_index); if (pContainer == nullptr) { ARK_LOG_ERROR("There is no container, container = {}", container_index); return nullptr; } auto pClassMeta = m_pClassModule->FindMeta(class_name); if (nullptr == pClassMeta) { ARK_LOG_ERROR("There is no class meta, name = {}", class_name); return nullptr; } auto map_id = pEntity->GetMapID(); auto pMapInfo = m_pMapModule->GetMapInfo(map_id); if (pMapInfo == nullptr) { ARK_LOG_ERROR("There is no scene, scene = {}", map_id); return nullptr; } auto map_instance_id = pEntity->GetMapEntityID(); if (!pMapInfo->ExistInstance(map_instance_id)) { ARK_LOG_ERROR("There is no group, scene = {} group = {}", map_id, map_instance_id); return nullptr; } AFGUID object_id = m_pGUIDModule->CreateGUID(); // Check if the entity exists if (GetEntity(object_id) != nullptr) { ARK_LOG_ERROR("The entity has existed, id = {}", object_id); return nullptr; } std::shared_ptr<AFIEntity> pContainerEntity = std::make_shared<AFCEntity>(pClassMeta, object_id, config_id, map_id, map_instance_id); objects_.insert(object_id, pContainerEntity); CopyData(pContainerEntity, config_id); pContainer->Place(pContainerEntity); AFCDataList args; DoEvent(object_id, class_name, ArkEntityEvent::ENTITY_EVT_PRE_LOAD_DATA, args); DoEvent(object_id, class_name, ArkEntityEvent::ENTITY_EVT_LOAD_DATA, args); DoEvent(object_id, class_name, ArkEntityEvent::ENTITY_EVT_PRE_EFFECT_DATA, args); DoEvent(object_id, class_name, ArkEntityEvent::ENTITY_EVT_EFFECT_DATA, args); DoEvent(object_id, class_name, ArkEntityEvent::ENTITY_EVT_POST_EFFECT_DATA, args); DoEvent(object_id, class_name, ArkEntityEvent::ENTITY_EVT_DATA_FINISHED, args); return pContainerEntity; } std::shared_ptr<AFIStaticEntity> AFCKernelModule::GetStaticEntity(const ID_TYPE config_id) { return m_pConfigModule->FindStaticEntity(config_id); } std::shared_ptr<AFIEntity> AFCKernelModule::GetEntity(const AFGUID& self) { return objects_.find_value(self); } bool AFCKernelModule::DestroyAll() { for (auto iter : objects_) { delete_list_.push_back(iter.second->GetID()); } // run another frame Update(); return true; } bool AFCKernelModule::DestroyEntity(const AFGUID& self) { if (self == cur_exec_object_ && self != NULL_GUID) { return DestroySelf(self); } auto pEntity = GetEntity(self); if (pEntity == nullptr) { ARK_LOG_ERROR("Cannot find this object, self={}", NULL_GUID); return false; } auto pParentContainer = pEntity->GetParentContainer(); if (pParentContainer) { // use container to destroy its entity return pParentContainer->Destroy(self); } else { return InnerDestroyEntity(pEntity); } } bool AFCKernelModule::DestroySelf(const AFGUID& self) { delete_list_.push_back(self); return true; } bool AFCKernelModule::InnerDestroyEntity(std::shared_ptr<AFIEntity> pEntity) { if (pEntity == nullptr) { ARK_LOG_ERROR("Cannot find this object, self={}", NULL_GUID); return false; } auto& self = pEntity->GetID(); int32_t map_id = pEntity->GetMapID(); int32_t inst_id = pEntity->GetMapEntityID(); std::shared_ptr<AFMapInfo> pMapInfo = m_pMapModule->GetMapInfo(map_id); if (pMapInfo != nullptr) { const std::string& class_name = pEntity->GetClassName(); pMapInfo->RemoveEntityFromInstance( inst_id, self, ((class_name == AFEntityMetaPlayer::self_name()) ? true : false)); DoEvent(self, class_name, ArkEntityEvent::ENTITY_EVT_PRE_DESTROY, AFCDataList()); DoEvent(self, class_name, ArkEntityEvent::ENTITY_EVT_DESTROY, AFCDataList()); return objects_.erase(self); } else { ARK_LOG_ERROR("Cannot find this map, object_id={} map={} inst={}", self, map_id, inst_id); return false; } } bool AFCKernelModule::AddEventCallBack(const AFGUID& self, const int nEventID, EVENT_PROCESS_FUNCTOR&& cb) { std::shared_ptr<AFIEntity> pEntity = GetEntity(self); ARK_ASSERT_RET_VAL(pEntity != nullptr, false); auto pEventManager = GetEventManager(pEntity); ARK_ASSERT_RET_VAL(pEventManager != nullptr, false); return pEventManager->AddEventCallBack(nEventID, std::forward<EVENT_PROCESS_FUNCTOR>(cb)); } bool AFCKernelModule::AddClassCallBack(const std::string& class_name, CLASS_EVENT_FUNCTOR&& cb, const int32_t prio) { return m_pClassModule->AddClassCallBack(class_name, std::forward<CLASS_EVENT_FUNCTOR>(cb), prio); } bool AFCKernelModule::AddDataCallBack( const std::string& class_name, const std::string& name, DATA_NODE_EVENT_FUNCTOR&& cb, const int32_t prio) { auto pClassMeta = m_pClassModule->FindMeta(class_name); ARK_ASSERT_RET_VAL(pClassMeta != nullptr, false); auto index = pClassMeta->GetIndex(name); if (index == 0) { return false; } AddDataCallBack(class_name, index, std::forward<DATA_NODE_EVENT_FUNCTOR>(cb), prio); return true; } bool AFCKernelModule::AddTableCallBack( const std::string& class_name, const std::string& name, DATA_TABLE_EVENT_FUNCTOR&& cb, const int32_t prio) { auto pClassMeta = m_pClassModule->FindMeta(class_name); ARK_ASSERT_RET_VAL(pClassMeta != nullptr, false); auto index = pClassMeta->GetIndex(name); if (index == 0) { return false; } AddTableCallBack(class_name, index, std::forward<DATA_TABLE_EVENT_FUNCTOR>(cb), prio); return true; } bool AFCKernelModule::AddDataCallBack( const std::string& class_name, const uint32_t index, DATA_NODE_EVENT_FUNCTOR&& cb, const int32_t prio) { auto pClassMeta = m_pClassModule->FindMeta(class_name); ARK_ASSERT_RET_VAL(pClassMeta != nullptr, false); auto pDataMeta = pClassMeta->FindDataMeta(index); ARK_ASSERT_RET_VAL(pDataMeta != nullptr, false); auto pCallBack = pClassMeta->GetClassCallBackManager(); ARK_ASSERT_RET_VAL(pCallBack != nullptr, false); pCallBack->AddDataCallBack(index, std::forward<DATA_NODE_EVENT_FUNCTOR>(cb), prio); return true; } bool AFCKernelModule::AddTableCallBack( const std::string& class_name, const uint32_t index, DATA_TABLE_EVENT_FUNCTOR&& cb, const int32_t prio) { auto pClassMeta = m_pClassModule->FindMeta(class_name); ARK_ASSERT_RET_VAL(pClassMeta != nullptr, false); auto pTableMeta = pClassMeta->FindTableMeta(index); ARK_ASSERT_RET_VAL(pTableMeta != nullptr, false); auto pCallBack = pClassMeta->GetClassCallBackManager(); ARK_ASSERT_RET_VAL(pCallBack != nullptr, false); pCallBack->AddTableCallBack(index, std::forward<DATA_TABLE_EVENT_FUNCTOR>(cb), prio); return true; } bool AFCKernelModule::AddContainerCallBack( const std::string& class_name, const uint32_t index, CONTAINER_EVENT_FUNCTOR&& cb, const int32_t prio) { auto pClassMeta = m_pClassModule->FindMeta(class_name); ARK_ASSERT_RET_VAL(pClassMeta != nullptr, false); auto pContainerMeta = pClassMeta->FindContainerMeta(index); ARK_ASSERT_RET_VAL(pContainerMeta != nullptr, false); auto pCallBack = pClassMeta->GetClassCallBackManager(); ARK_ASSERT_RET_VAL(pCallBack != nullptr, false); pCallBack->AddContainerCallBack(index, std::forward<CONTAINER_EVENT_FUNCTOR>(cb), prio); return true; } bool AFCKernelModule::AddCommonContainerCallBack(CONTAINER_EVENT_FUNCTOR&& cb, const int32_t prio) { auto pClassMeta = m_pClassModule->FindMeta(AFEntityMetaPlayer::self_name()); ARK_ASSERT_RET_VAL(pClassMeta != nullptr, false); auto& meta_list = pClassMeta->GetContainerMetaList(); for (auto iter : meta_list) { auto pMeta = iter.second; if (!pMeta) { continue; } AddContainerCallBack( AFEntityMetaPlayer::self_name(), pMeta->GetIndex(), std::forward<CONTAINER_EVENT_FUNCTOR>(cb), prio); } return true; } bool AFCKernelModule::AddCommonClassEvent(CLASS_EVENT_FUNCTOR&& cb, const int32_t prio) { auto& class_meta_list = m_pClassModule->GetMetaList(); for (auto iter : class_meta_list) { auto pClassMeta = iter.second; if (nullptr == pClassMeta) { continue; } if (!pClassMeta->IsEntityMeta()) { continue; } AddClassCallBack(iter.first, std::forward<CLASS_EVENT_FUNCTOR>(cb), prio); } return true; } bool AFCKernelModule::AddCommonNodeEvent(DATA_NODE_EVENT_FUNCTOR&& cb, const int32_t prio) { auto& class_meta_list = m_pClassModule->GetMetaList(); for (auto iter : class_meta_list) { auto pClassMeta = iter.second; if (nullptr == pClassMeta) { continue; } if (!pClassMeta->IsEntityMeta()) { continue; } auto& data_meta_list = pClassMeta->GetDataMetaList(); for (auto iter_data : data_meta_list) { AddDataCallBack(iter.first, iter_data.first, std::forward<DATA_NODE_EVENT_FUNCTOR>(cb), prio); } } return true; } bool AFCKernelModule::AddCommonTableEvent(DATA_TABLE_EVENT_FUNCTOR&& cb, const int32_t prio) { auto& class_meta_list = m_pClassModule->GetMetaList(); for (auto iter : class_meta_list) { auto pClassMeta = iter.second; if (nullptr == pClassMeta) { continue; } if (!pClassMeta->IsEntityMeta()) { continue; } auto& table_meta_list = pClassMeta->GetTableMetaList(); for (auto iter_data : table_meta_list) { AddTableCallBack(iter.first, iter_data.first, std::forward<DATA_TABLE_EVENT_FUNCTOR>(cb), prio); } } return true; } bool AFCKernelModule::DoEvent( const AFGUID& self, const std::string& class_name, ArkEntityEvent class_event, const AFIDataList& args) { return m_pClassModule->DoClassEvent(self, class_name, class_event, args); } bool AFCKernelModule::DoEvent(const AFGUID& self, const int event_id, const AFIDataList& args) { std::shared_ptr<AFIEntity> pEntity = GetEntity(self); ARK_ASSERT_RET_VAL(pEntity != nullptr, false); auto pEventManager = GetEventManager(pEntity); ARK_ASSERT_RET_VAL(pEventManager != nullptr, false); return pEventManager->DoEvent(event_id, args); } bool AFCKernelModule::Exist(const AFGUID& self) { return (objects_.find_value(self) != nullptr); } bool AFCKernelModule::LogSelfInfo(const AFGUID& id) { return false; } int AFCKernelModule::LogObjectData(const AFGUID& guid) { auto entity = GetEntity(guid); if (entity == nullptr) { return -1; } auto pNodeManager = GetNodeManager(entity); ARK_ASSERT_RET_VAL(pNodeManager != nullptr, -1); auto pTableManager = GetTableManager(entity); ARK_ASSERT_RET_VAL(pTableManager != nullptr, -1); auto& node_list = pNodeManager->GetDataList(); for (auto iter : node_list) { auto pData = iter.second; if (!pData) { continue; } ARK_LOG_TRACE("Player[{}] Node[{}] Value[{}]", guid, pData->GetName(), pData->ToString()); } auto& table_list = pTableManager->GetTableList(); for (auto iter : table_list) { auto pTable = iter.second; if (!pTable) { continue; } for (auto pRow = pTable->First(); pRow != nullptr; pRow = pTable->Next()) { auto pRowNodeManager = GetNodeManager(pRow); if (!pRowNodeManager) { continue; } auto& row_data_list = pRowNodeManager->GetDataList(); for (auto iter_row : row_data_list) { auto pNode = iter_row.second; if (!pNode) { continue; } ARK_LOG_TRACE("Player[{}] Table[{}] Row[{}] Col[{}] Value[{}]", guid, pTable->GetName(), pRow->GetRow(), pNode->GetName(), pNode->ToString()); } } } return 0; } bool AFCKernelModule::LogInfo(const AFGUID& id) { std::shared_ptr<AFIEntity> pEntity = GetEntity(id); if (pEntity != nullptr) { ARK_LOG_ERROR("Cannot find entity, id = {}", id); return false; } if (m_pMapModule->IsInMapInstance(id)) { int map_id = pEntity->GetMapID(); ARK_LOG_INFO("----------child object list-------- , id = {} mapid = {}", id, map_id); AFCDataList entity_list; int online_count = m_pMapModule->GetMapOnlineList(map_id, entity_list); for (int i = 0; i < online_count; ++i) { AFGUID target_entity_id = entity_list.Int64(i); ARK_LOG_INFO("id = {} mapid = {}", target_entity_id, map_id); } } else { ARK_LOG_INFO("---------print object start--------, id = {}", id); ARK_LOG_INFO("---------print object end--------, id = {}", id); } return true; } //--------------entity to pb db data------------------ bool AFCKernelModule::EntityToDBData(const AFGUID& self, AFMsg::pb_db_entity& pb_data) { std::shared_ptr<AFIEntity> pEntity = GetEntity(self); return EntityToDBData(pEntity, pb_data); } bool AFCKernelModule::EntityToDBData(std::shared_ptr<AFIEntity> pEntity, AFMsg::pb_db_entity& pb_data) { ARK_ASSERT_RET_VAL(pEntity != nullptr, false); auto pNodeManager = GetNodeManager(pEntity); ARK_ASSERT_RET_VAL(pNodeManager != nullptr, false); auto pTableManager = GetTableManager(pEntity); ARK_ASSERT_RET_VAL(pTableManager != nullptr, false); auto pContainerManager = GetContainerManager(pEntity); ARK_ASSERT_RET_VAL(pContainerManager != nullptr, false); pb_data.set_id(pEntity->GetID()); pb_data.set_config_id(pEntity->GetConfigID()); pb_data.set_map_id(pEntity->GetMapID()); pb_data.set_map_inst_id(pEntity->GetMapEntityID()); pb_data.set_class_name(pEntity->GetClassName()); // node to db auto& node_list = pNodeManager->GetDataList(); for (auto iter : node_list) { auto pNode = iter.second; if (!pNode) { continue; } if (!pNode->HaveMask(ArkNodeMask::PF_SAVE)) { continue; } NodeToDBData(pNode, *pb_data.mutable_data()); } // table to db auto& table_list = pTableManager->GetTableList(); for (auto iter : table_list) { auto pTable = iter.second; if (!pTable) { continue; } if (!pTable->HaveMask(ArkTableNodeMask::PF_SAVE)) { continue; } AFMsg::pb_db_table pb_table; if (!TableToDBData(pTable, pb_table)) { continue; } pb_data.mutable_data()->mutable_datas_table()->insert({pTable->GetName(), pb_table}); } // container to db auto& container_list = pContainerManager->GetContainerList(); for (auto iter : container_list) { auto pContainer = iter.second; if (!pContainer) { continue; } AFMsg::pb_db_container pb_container; for (auto index = pContainer->First(); index > 0; index = pContainer->Next()) { auto pSubEntity = pContainer->Find(index); if (!pSubEntity) { continue; } AFMsg::pb_db_entity pb_container_entity; if (!EntityToDBData(pSubEntity, pb_container_entity)) { continue; } pb_container.mutable_datas_value()->insert({index, pb_container_entity}); } if (pb_container.datas_value_size() > 0) { pb_data.mutable_data()->mutable_datas_entity()->insert({pContainer->GetName(), pb_container}); } } return true; } std::shared_ptr<AFIEntity> AFCKernelModule::CreateEntity(const AFMsg::pb_db_entity& pb_data) { auto entity_id = pb_data.id(); auto pEntity = GetEntity(entity_id); if (pEntity != nullptr) { ARK_LOG_ERROR("entity already exists, object = {}", entity_id); return nullptr; } const std::string& class_name = pb_data.class_name(); auto pClassMeta = m_pClassModule->FindMeta(class_name); if (nullptr == pClassMeta) { ARK_LOG_ERROR("There is no class meta, name = {}", class_name); return nullptr; } auto map_id = pb_data.map_id(); auto map_inst_id = pb_data.map_inst_id(); pEntity = std::make_shared<AFCEntity>(pClassMeta, entity_id, NULL_INT, map_id, map_inst_id); objects_.insert(entity_id, pEntity); // init data auto& pb_db_entity_data = pb_data.data(); DBDataToNode(pEntity, pb_db_entity_data); //array data(todo : whether we need this?) //for (auto iter : pb_db_entity_data.datas_bool()) //{ // pEntity->SetBool(iter.first, iter.second); //} // table data for (auto iter : pb_db_entity_data.datas_table()) { DBDataToTable(pEntity, iter.first, iter.second); } // container data for (auto iter : pb_db_entity_data.datas_entity()) { DBDataToContainer(pEntity, iter.first, iter.second); } // pMapInfo->AddEntityToInstance(map_inst_id, entity_id, true); // todo : add new event? AFCDataList args; DoEvent(entity_id, class_name, ArkEntityEvent::ENTITY_EVT_PRE_LOAD_DATA, args); DoEvent(entity_id, class_name, ArkEntityEvent::ENTITY_EVT_LOAD_DATA, args); DoEvent(entity_id, class_name, ArkEntityEvent::ENTITY_EVT_PRE_EFFECT_DATA, args); DoEvent(entity_id, class_name, ArkEntityEvent::ENTITY_EVT_EFFECT_DATA, args); DoEvent(entity_id, class_name, ArkEntityEvent::ENTITY_EVT_POST_EFFECT_DATA, args); DoEvent(entity_id, class_name, ArkEntityEvent::ENTITY_EVT_DATA_FINISHED, args); return pEntity; } bool AFCKernelModule::SendCustomMessage(const AFGUID& target, const uint32_t msg_id, const AFIDataList& args) { ARK_ASSERT_RET_VAL(Exist(target) && msg_id > 0, false); AFMsg::pb_custom_message custom_message; custom_message.set_message_id(msg_id); size_t count = args.GetCount(); for (size_t i = 0; i < count; i++) { auto data_type = args.GetType(i); switch (data_type) { case ark::ArkDataType::DT_BOOLEAN: custom_message.add_data_list()->set_bool_value(args.Bool(i)); break; case ark::ArkDataType::DT_INT32: custom_message.add_data_list()->set_int_value(args.Int(i)); break; case ark::ArkDataType::DT_UINT32: custom_message.add_data_list()->set_uint_value(args.UInt(i)); break; case ark::ArkDataType::DT_INT64: custom_message.add_data_list()->set_int64_value(args.Int64(i)); break; case ark::ArkDataType::DT_UINT64: custom_message.add_data_list()->set_uint64_value(args.UInt64(i)); break; case ark::ArkDataType::DT_FLOAT: custom_message.add_data_list()->set_float_value(args.Float(i)); break; case ark::ArkDataType::DT_DOUBLE: custom_message.add_data_list()->set_double_value(args.Double(i)); break; case ark::ArkDataType::DT_STRING: custom_message.add_data_list()->set_str_value(args.String(i)); break; default: break; } } // send message return true; } // pb table to entity table bool AFCKernelModule::DBDataToTable( std::shared_ptr<AFIEntity> pEntityData, const std::string& name, const AFMsg::pb_db_table& pb_table) { ARK_ASSERT_RET_VAL(pEntityData != nullptr, false); auto pTable = pEntityData->FindTable(name); ARK_ASSERT_RET_VAL(pTable != nullptr, false); for (auto iter : pb_table.datas_value()) { auto row_index = iter.first; if (row_index == NULL_INT) { continue; } auto& pb_db_entity_data = iter.second; auto pRow = pTable->AddRow(row_index); if (pRow == nullptr) { continue; } DBDataToNode(pRow, pb_db_entity_data); } return true; } bool AFCKernelModule::DBDataToContainer( std::shared_ptr<AFIEntity> pEntity, const std::string& name, const AFMsg::pb_db_container& pb_data) { ARK_ASSERT_RET_VAL(pEntity != nullptr, false); for (auto iter : pb_data.datas_value()) { auto pContainer = pEntity->FindContainer(iter.first); if (nullptr == pContainer) { continue; } auto pContainerEntity = CreateEntity(iter.second); if (nullptr == pContainerEntity) { continue; } pContainer->Place(pContainerEntity); } return true; } int AFCKernelModule::OnContainerCallBack(const AFGUID& self, const uint32_t index, const ArkContainerOpType op_type, const uint32_t src_index, const uint32_t dest_index) { if (op_type == ArkContainerOpType::OP_DESTROY) { // destroy entity auto pEntity = GetEntity(self); ARK_ASSERT_RET_VAL(pEntity != nullptr, 0); auto pContainer = pEntity->FindContainer(index); ARK_ASSERT_RET_VAL(pContainer != nullptr, 0); auto pContainerEntity = pContainer->Find(src_index); ARK_ASSERT_RET_VAL(pContainerEntity != nullptr, 0); InnerDestroyEntity(pContainerEntity); } return 0; } template<typename T> bool AFCKernelModule::DBDataToNode(T pData, const AFMsg::pb_db_entity_data& pb_db_entity_data) { //bool data for (auto iter : pb_db_entity_data.datas_bool()) { pData->SetBool(iter.first, iter.second); } //int32 data for (auto iter : pb_db_entity_data.datas_int32()) { pData->SetInt32(iter.first, iter.second); } //uint32 data for (auto iter : pb_db_entity_data.datas_uint32()) { pData->SetUInt32(iter.first, iter.second); } //int64 data for (auto iter : pb_db_entity_data.datas_int64()) { pData->SetInt64(iter.first, iter.second); } //uint64 data for (auto iter : pb_db_entity_data.datas_uint64()) { pData->SetUInt64(iter.first, iter.second); } //float data for (auto iter : pb_db_entity_data.datas_float()) { pData->SetFloat(iter.first, iter.second); } //double data for (auto iter : pb_db_entity_data.datas_double()) { pData->SetDouble(iter.first, iter.second); } //string data for (auto iter : pb_db_entity_data.datas_string()) { pData->SetString(iter.first, iter.second); } return true; } //----------------------------- bool AFCKernelModule::NodeToDBData(AFINode* pNode, AFMsg::pb_db_entity_data& pb_data) { ARK_ASSERT_RET_VAL(pNode != nullptr, false); auto& name = pNode->GetName(); switch (pNode->GetType()) { case ArkDataType::DT_BOOLEAN: pb_data.mutable_datas_bool()->insert({name, pNode->GetBool()}); break; case ArkDataType::DT_INT32: pb_data.mutable_datas_int32()->insert({name, pNode->GetInt32()}); break; case ArkDataType::DT_UINT32: pb_data.mutable_datas_uint32()->insert({name, pNode->GetUInt32()}); break; case ArkDataType::DT_INT64: pb_data.mutable_datas_int64()->insert({name, pNode->GetInt64()}); break; case ArkDataType::DT_UINT64: pb_data.mutable_datas_uint64()->insert({name, pNode->GetUInt64()}); break; case ArkDataType::DT_FLOAT: pb_data.mutable_datas_float()->insert({name, pNode->GetFloat()}); break; case ArkDataType::DT_DOUBLE: pb_data.mutable_datas_double()->insert({name, pNode->GetDouble()}); break; case ArkDataType::DT_STRING: pb_data.mutable_datas_string()->insert({name, pNode->GetString()}); break; default: ARK_ASSERT_RET_VAL(0, false); break; } return true; } bool AFCKernelModule::TableToDBData(AFITable* pTable, AFMsg::pb_db_table& pb_data) { ARK_ASSERT_RET_VAL(pTable != nullptr, false); for (auto pRow = pTable->First(); pRow != nullptr; pRow = pTable->Next()) { auto pNodeManager = GetNodeManager(pRow); if (!pNodeManager) { continue; } AFMsg::pb_db_entity_data row_data; auto& data_list = pNodeManager->GetDataList(); for (auto iter : data_list) { NodeToDBData(iter.second, row_data); } pb_data.mutable_datas_value()->insert({pRow->GetRow(), row_data}); } return true; } //----------entity to pb client data--------------- bool AFCKernelModule::NodeToPBData(AFINode* pNode, AFMsg::pb_entity_data* pb_data) { ARK_ASSERT_RET_VAL(pNode != nullptr && pb_data != nullptr, false); auto index = pNode->GetIndex(); switch (pNode->GetType()) { case ArkDataType::DT_BOOLEAN: pb_data->mutable_datas_bool()->insert({index, pNode->GetBool()}); break; case ArkDataType::DT_INT32: pb_data->mutable_datas_int32()->insert({index, pNode->GetInt32()}); break; case ArkDataType::DT_UINT32: pb_data->mutable_datas_uint32()->insert({index, pNode->GetUInt32()}); break; case ArkDataType::DT_INT64: pb_data->mutable_datas_int64()->insert({index, pNode->GetInt64()}); break; case ArkDataType::DT_UINT64: pb_data->mutable_datas_uint64()->insert({index, pNode->GetUInt64()}); break; case ArkDataType::DT_FLOAT: pb_data->mutable_datas_float()->insert({index, pNode->GetFloat()}); break; case ArkDataType::DT_DOUBLE: pb_data->mutable_datas_double()->insert({index, pNode->GetDouble()}); break; case ArkDataType::DT_STRING: pb_data->mutable_datas_string()->insert({index, pNode->GetString()}); break; default: ARK_ASSERT_RET_VAL(0, false); break; } return true; } bool AFCKernelModule::NodeToPBData(const uint32_t index, const AFIData& data, AFMsg::pb_entity_data* pb_data) { ARK_ASSERT_RET_VAL(index > 0 && pb_data != nullptr, false); switch (data.GetType()) { case ArkDataType::DT_BOOLEAN: pb_data->mutable_datas_bool()->insert({index, data.GetBool()}); break; case ArkDataType::DT_INT32: pb_data->mutable_datas_int32()->insert({index, data.GetInt()}); break; case ArkDataType::DT_UINT32: pb_data->mutable_datas_uint32()->insert({index, data.GetUInt()}); break; case ArkDataType::DT_INT64: pb_data->mutable_datas_int64()->insert({index, data.GetInt64()}); break; case ArkDataType::DT_UINT64: pb_data->mutable_datas_uint64()->insert({index, data.GetUInt64()}); break; case ArkDataType::DT_FLOAT: pb_data->mutable_datas_float()->insert({index, data.GetFloat()}); break; case ArkDataType::DT_DOUBLE: pb_data->mutable_datas_double()->insert({index, data.GetDouble()}); break; case ArkDataType::DT_STRING: pb_data->mutable_datas_string()->insert({index, data.GetString()}); break; default: ARK_ASSERT_RET_VAL(0, false); break; } return true; } bool AFCKernelModule::TableToPBData(AFITable* pTable, const uint32_t index, AFMsg::pb_table* pb_data) { ARK_ASSERT_RET_VAL(pTable != nullptr && index > 0 && pb_data != nullptr, false); for (AFIRow* pRow = pTable->First(); pRow != nullptr; pRow = pTable->Next()) { AFMsg::pb_entity_data row_data; if (!RowToPBData(pRow, pRow->GetRow(), &row_data)) { continue; } pb_data->mutable_datas_value()->insert({index, row_data}); } return true; } bool AFCKernelModule::RowToPBData(AFIRow* pRow, const uint32_t index, AFMsg::pb_entity_data* pb_data) { ARK_ASSERT_RET_VAL(pRow != nullptr && index > 0 && pb_data != nullptr, false); auto pNodeManager = GetNodeManager(pRow); if (!pNodeManager) { return false; } auto& data_list = pNodeManager->GetDataList(); for (auto iter : data_list) { NodeToPBData(iter.second, pb_data); } return true; } bool AFCKernelModule::TableRowDataToPBData( const uint32_t index, uint32_t row, const uint32_t col, const AFIData& data, AFMsg::pb_entity_data* pb_data) { ARK_ASSERT_RET_VAL(index > 0 && row > 0 && col > 0 && pb_data != nullptr, false); AFMsg::pb_entity_data row_data; if (!NodeToPBData(col, data, &row_data)) { return false; } AFMsg::pb_table table_data; table_data.mutable_datas_value()->insert({row, row_data}); pb_data->mutable_datas_table()->insert({index, table_data}); return true; } //node all to pb data bool AFCKernelModule::NodeAllToPBData(std::shared_ptr<AFIEntity> pEntity, AFMsg::pb_entity_data* pb_data) { ARK_ASSERT_RET_VAL(pEntity != nullptr && pb_data != nullptr, false); auto pNodeManager = GetNodeManager(pEntity); ARK_ASSERT_RET_VAL(pNodeManager != nullptr, false); auto& data_list = pNodeManager->GetDataList(); for (auto iter : data_list) { NodeToPBData(iter.second, pb_data); } return true; } //table all to pb data bool AFCKernelModule::TableAllToPBData(std::shared_ptr<AFIEntity> pEntity, AFMsg::pb_entity_data* pb_data) { ARK_ASSERT_RET_VAL(pEntity != nullptr && pb_data != nullptr, false); auto pTableManager = GetTableManager(pEntity); ARK_ASSERT_RET_VAL(pTableManager != nullptr, false); auto& data_list = pTableManager->GetTableList(); for (auto iter : data_list) { auto pTable = iter.second; if (!pTable) { continue; } const auto index = pTable->GetIndex(); AFMsg::pb_table table_data; if (!TableToPBData(pTable, index, &table_data)) { continue; } pb_data->mutable_datas_table()->insert({index, table_data}); } return true; } bool AFCKernelModule::EntityToPBData(std::shared_ptr<AFIEntity> pEntity, AFMsg::pb_entity* pb_data) { ARK_ASSERT_RET_VAL(pEntity != nullptr && pb_data != nullptr, false); pb_data->set_id(pEntity->GetID()); NodeAllToPBData(pEntity, pb_data->mutable_data()); TableAllToPBData(pEntity, pb_data->mutable_data()); return true; } bool AFCKernelModule::EntityToPBDataByMask( std::shared_ptr<AFIEntity> pEntity, ArkMaskType mask, AFMsg::pb_entity* pb_data) { ARK_ASSERT_RET_VAL(pEntity != nullptr && pb_data != nullptr, false); pb_data->set_id(pEntity->GetID()); NodeToPBDataByMask(pEntity, mask, pb_data->mutable_data()); TableToPBDataByMask(pEntity, mask, pb_data->mutable_data()); return true; } //node all to pb data bool AFCKernelModule::NodeToPBDataByMask( std::shared_ptr<AFIEntity> pEntity, const ArkMaskType mask, AFMsg::pb_entity_data* pb_data) { ARK_ASSERT_RET_VAL(pEntity != nullptr && pb_data != nullptr, false); auto pNodeManager = GetNodeManager(pEntity); ARK_ASSERT_RET_VAL(pNodeManager != nullptr, false); auto& data_list = pNodeManager->GetDataList(); for (auto iter : data_list) { auto pNode = iter.second; if (!pNode) { continue; } auto result = (pNode->GetMask() & mask); if (!result.any()) { continue; } NodeToPBData(pNode, pb_data); } return true; } bool AFCKernelModule::TableToPBDataByMask( std::shared_ptr<AFIEntity> pEntity, const ArkMaskType mask, AFMsg::pb_entity_data* pb_data) { ARK_ASSERT_RET_VAL(pEntity != nullptr && pb_data != nullptr, false); auto pTableManager = GetTableManager(pEntity); ARK_ASSERT_RET_VAL(pTableManager != nullptr, false); auto& data_list = pTableManager->GetTableList(); for (auto iter : data_list) { auto pTable = iter.second; if (!pTable) { continue; } auto result = (pTable->GetMask() & mask); if (!result.any()) { continue; } const auto index = pTable->GetIndex(); AFMsg::pb_table table_data; if (!TableToPBData(pTable, index, &table_data)) { continue; } pb_data->mutable_datas_table()->insert({index, table_data}); } return true; } // -----------get entity manager-------------- std::shared_ptr<AFNodeManager> AFCKernelModule::GetNodeManager(std::shared_ptr<AFIStaticEntity> pStaticEntity) const { if (pStaticEntity == nullptr) { return nullptr; } auto pCStaticEntity = std::dynamic_pointer_cast<AFCStaticEntity>(pStaticEntity); if (pCStaticEntity == nullptr) { return nullptr; } return pCStaticEntity->GetNodeManager(); } std::shared_ptr<AFNodeManager> AFCKernelModule::GetNodeManager(std::shared_ptr<AFIEntity> pEntity) const { if (pEntity == nullptr) { return nullptr; } auto pCEnity = std::dynamic_pointer_cast<AFCEntity>(pEntity); if (pCEnity == nullptr) { return nullptr; } return pCEnity->GetNodeManager(); } std::shared_ptr<AFNodeManager> AFCKernelModule::GetNodeManager(AFIRow* pRow) const { if (pRow == nullptr) { return nullptr; } auto pCRow = dynamic_cast<AFCRow*>(pRow); if (pCRow == nullptr) { return nullptr; } return pCRow->GetNodeManager(); } std::shared_ptr<AFTableManager> AFCKernelModule::GetTableManager(std::shared_ptr<AFIEntity> pEntity) const { if (pEntity == nullptr) { return nullptr; } auto pCEnity = std::dynamic_pointer_cast<AFCEntity>(pEntity); if (pCEnity == nullptr) { return nullptr; } return pCEnity->GetTableManager(); } std::shared_ptr<AFIContainerManager> AFCKernelModule::GetContainerManager(std::shared_ptr<AFIEntity> pEntity) const { if (pEntity == nullptr) { return nullptr; } auto pCEnity = std::dynamic_pointer_cast<AFCEntity>(pEntity); if (pCEnity == nullptr) { return nullptr; } return pCEnity->GetContainerManager(); } std::shared_ptr<AFIEventManager> AFCKernelModule::GetEventManager(std::shared_ptr<AFIEntity> pEntity) const { if (pEntity == nullptr) { return nullptr; } auto pCEnity = std::dynamic_pointer_cast<AFCEntity>(pEntity); if (pCEnity == nullptr) { return nullptr; } return pCEnity->GetEventManager(); } } // namespace ark
#include <ext/callback> #ifdef _EXT_CALLBACK_ #include <functional> #include <gtest/gtest.h> void sum_fn(int *sum, int val) { *sum += val; } TEST(callback_test, callback_test) { ext::callback<int> int_callback; int sum = 0; int_callback += std::bind(&sum_fn, &sum, std::placeholders::_1); int_callback += std::bind(&sum_fn, &sum, std::placeholders::_1); #ifdef __cpp_lambdas int_callback += [&sum](int val) { sum += val; }; int_callback += [&sum](int val) { sum += val; }; #endif int_callback(1); #ifdef __cpp_lambdas EXPECT_EQ(sum, 4); #else EXPECT_EQ(sum, 2); #endif } #ifdef __cpp_variadic_templates TEST(callback_test, callback_args_test) { ext::callback<std::string, int> on_changed; int sum = 0; on_changed += [&sum](const std::string &a0, int a1) { EXPECT_EQ(a0.size(), 3); EXPECT_GE(a1, 10); sum += a1; }; on_changed += [&sum](const std::string &a0, int a1) { EXPECT_EQ(a0.size(), 3); EXPECT_GE(a1, 10); sum += a1; }; on_changed("aaa", 10); on_changed("bbb", 20); std::string test = "ccc"; on_changed(test, 10); EXPECT_EQ(sum, 80); } #endif #endif // _EXT_CALLBACK_
// To run this script, `cd` to the `./test/fixtures` directory and then execute in the terminal `runWandbox --file --compiler gcc-head --output output3.json ./runner3.cpp`. #include <iostream> #include <vector> #include <boost/math/special_functions/gamma.hpp> using namespace std; vector<double> linspace( double start, double end, int num ) { double delta = (end - start) / (num - 1); vector<double> arr( num - 1 ); for ( int i = 0; i < num - 1; ++i ){ arr[ i ] = start + delta * i; } arr.push_back( end ); return arr; } void print_vector( vector<double> vec, bool last = false ) { cout << "["; for ( vector<double>::iterator it = vec.begin(); it != vec.end(); ++it ) { if ( vec.end() != it+1 ) { cout << setprecision (16) << *it; cout << ","; } else { cout << setprecision (16) << *it; cout << "]"; if ( last == false ) { cout << ","; } } } return; } void print_results( vector<double> x, vector<double> expected ) { cout << "{" << endl; cout << " \"x\": "; print_vector( x ); cout << " \"expected\": "; print_vector( expected, true ); cout << "}" << endl; return; } int main() { vector<double> x = linspace( -9.99, -2.01, 100 ); vector<double> expected; for ( int i = 0; i < 100; i++ ) { expected.push_back( boost::math::tgamma1pm1( x[ i ] ) ); } print_results( x, expected ); return 0; }
#include <onnxoptimizer/optimize.h> #include <onnx/onnx_pb.h> #include <fstream> int main(int argc, char **argv) { ONNX_NAMESPACE::ModelProto model; std::ifstream ifs(argv[1]); bool success = model.ParseFromIstream(&ifs); if (!success) { std::cout << "load failed" << std::endl; return -1; } onnx::optimization::Optimize( model, {"eliminate_deadend", "eliminate_nop_dropout", "eliminate_nop_cast", "eliminate_nop_monotone_argmax", "eliminate_nop_pad", "extract_constant_to_initializer", "eliminate_unused_initializer", "eliminate_nop_transpose", "eliminate_nop_flatten", "eliminate_identity", "fuse_add_bias_into_conv", "fuse_consecutive_concats", "fuse_consecutive_log_softmax", "fuse_consecutive_reduce_unsqueeze", "fuse_consecutive_squeezes", "fuse_consecutive_transposes", "fuse_matmul_add_bias_into_gemm", "fuse_pad_into_conv", "fuse_transpose_into_gemm"}); std::ofstream ofs(argv[2]); success = model.SerializePartialToOstream(&ofs); if (!success) { std::cout << "save failed" << std::endl; return -1; } return 0; }
/// \file ParamGeneration.cpp /// /// \brief Parameter manipulation routines for the Zerocoin cryptographic /// components. /// /// \author Ian Miers, Christina Garman and Matthew Green /// \date June 2013 /// /// \copyright Copyright 2013 Ian Miers, Christina Garman and Matthew Green /// \license This project is released under the MIT license. // Copyright (c) 2017-2018 The PIVX developers #include "ParamGeneration.h" #include <string> #include <cmath> #include "hash.h" #include "uint256.h" namespace libzerocoin { /// \brief Fill in a set of Zerocoin parameters from a modulus "N". /// \param N A trusted RSA modulus /// \param aux An optional auxiliary string used in derivation /// \param securityLevel A security level /// /// \throws std::runtime_error if the process fails /// /// Fills in a ZC_Params data structure deterministically from /// a trustworthy RSA modulus "N", which is provided as a CBigNum. /// /// Note: this routine makes the fundamental assumption that "N" /// encodes a valid RSA-style modulus of the form "e1*e2" for some /// unknown safe primes "e1" and "e2". These factors must not /// be known to any party, or the security of Zerocoin is /// compromised. The integer "N" must be a MINIMUM of 1023 /// in length, and 3072 bits is strongly recommended. /// void CalculateParams(ZerocoinParams &params, CBigNum N, std::string aux, uint32_t securityLevel) { params.initialized = false; params.accumulatorParams.initialized = false; // Verify that |N| is > 1023 bits. uint32_t NLen = N.bitSize(); if (NLen < 1023) { throw std::runtime_error("Modulus must be at least 1023 bits"); } // Verify that "securityLevel" is at least 80 bits (minimum). if (securityLevel < 80) { throw std::runtime_error("Security level must be at least 80 bits."); } // Set the accumulator modulus to "N". params.accumulatorParams.accumulatorModulus = N; // Calculate the required size of the field "F_p" into which // we're embedding the coin commitment group. This may throw an // exception if the securityLevel is too large to be supported // by the current modulus. uint32_t pLen = 0; uint32_t qLen = 0; calculateGroupParamLengths(NLen - 2, securityLevel, &pLen, &qLen); // Calculate candidate parameters ("p", "q") for the coin commitment group // using a deterministic process based on "N", the "aux" string, and // the dedicated string "COMMITMENTGROUP". params.coinCommitmentGroup = deriveIntegerGroupParams(calculateSeed(N, aux, securityLevel, STRING_COMMIT_GROUP), pLen, qLen); // Next, we derive parameters for a second Accumulated Value commitment group. // This is a Schnorr group with the specific property that the order of the group // must be exactly equal to "q" from the commitment group. We set // the modulus of the new group equal to "2q+1" and test to see if this is prime. params.serialNumberSoKCommitmentGroup = deriveIntegerGroupFromOrder(params.coinCommitmentGroup.modulus); // Calculate the parameters for the internal commitment // using the same process. params.accumulatorParams.accumulatorPoKCommitmentGroup = deriveIntegerGroupParams(calculateSeed(N, aux, securityLevel, STRING_AIC_GROUP), qLen + 300, qLen + 1); // Calculate the parameters for the accumulator QRN commitment generators. This isn't really // a whole group, just a pair of random generators in QR_N. uint32_t resultCtr; params.accumulatorParams.accumulatorQRNCommitmentGroup.g = generateIntegerFromSeed(NLen - 1, calculateSeed(N, aux, securityLevel, STRING_QRNCOMMIT_GROUPG), &resultCtr).pow_mod(CBigNum(2),N); params.accumulatorParams.accumulatorQRNCommitmentGroup.h = generateIntegerFromSeed(NLen - 1, calculateSeed(N, aux, securityLevel, STRING_QRNCOMMIT_GROUPH), &resultCtr).pow_mod(CBigNum(2), N); // Calculate the accumulator base, which we calculate as "u = C**2 mod N" // where C is an arbitrary value. In the unlikely case that "u = 1" we increment // "C" and repeat. CBigNum constant(ACCUMULATOR_BASE_CONSTANT); params.accumulatorParams.accumulatorBase = CBigNum(1); for (uint32_t count = 0; count < MAX_ACCUMGEN_ATTEMPTS && params.accumulatorParams.accumulatorBase.isOne(); count++) { params.accumulatorParams.accumulatorBase = constant.pow_mod(CBigNum(2), params.accumulatorParams.accumulatorModulus); } // Compute the accumulator range. The upper range is the largest possible coin commitment value. // The lower range is sqrt(upper range) + 1. Since OpenSSL doesn't have // a square root function we use a slightly higher approximation. params.accumulatorParams.maxCoinValue = params.coinCommitmentGroup.modulus; params.accumulatorParams.minCoinValue = CBigNum(2).pow((params.coinCommitmentGroup.modulus.bitSize() / 2) + 3); // If all went well, mark params as successfully initialized. params.accumulatorParams.initialized = true; // If all went well, mark params as successfully initialized. params.initialized = true; } /// \brief Format a seed string by hashing several values. /// \param N A CBigNum /// \param aux An auxiliary string /// \param securityLevel The security level in bits /// \param groupName A group description string /// \throws std::runtime_error if the process fails /// /// Returns the hash of the value. arith_uint256 calculateGeneratorSeed(arith_uint256 seed, arith_uint256 pSeed, arith_uint256 qSeed, std::string label, uint32_t index, uint32_t count) { CHashWriter hasher(0,0); // Compute the hash of: // <modulus>||<securitylevel>||<auxString>||groupName hasher << seed; hasher << std::string("||"); hasher << pSeed; hasher << std::string("||"); hasher << qSeed; hasher << std::string("||"); hasher << label; hasher << std::string("||"); hasher << index; hasher << std::string("||"); hasher << count; return UintToArith256(hasher.GetHash()); } /// \brief Format a seed string by hashing several values. /// \param N A CBigNum /// \param aux An auxiliary string /// \param securityLevel The security level in bits /// \param groupName A group description string /// \throws std::runtime_error if the process fails /// /// Returns the hash of the value. arith_uint256 calculateSeed(CBigNum modulus, std::string auxString, uint32_t securityLevel, std::string groupName) { CHashWriter hasher(0,0); uint256 hash; // Compute the hash of: // <modulus>||<securitylevel>||<auxString>||groupName hasher << modulus; hasher << std::string("||"); hasher << securityLevel; hasher << std::string("||"); hasher << auxString; hasher << std::string("||"); hasher << groupName; return UintToArith256(hasher.GetHash()); } arith_uint256 calculateHash(arith_uint256 input) { CHashWriter hasher(0,0); // Compute the hash of "input" hasher << input; return UintToArith256(hasher.GetHash()); } /// \brief Calculate field/group parameter sizes based on a security level. /// \param maxPLen Maximum size of the field (modulus "p") in bits. /// \param securityLevel Required security level in bits (at least 80) /// \param pLen Result: length of "p" in bits /// \param qLen Result: length of "q" in bits /// \throws std::runtime_error if the process fails /// /// Calculates the appropriate sizes of "p" and "q" for a prime-order /// subgroup of order "q" embedded within a field "F_p". The sizes /// are based on a 'securityLevel' provided in symmetric-equivalent /// bits. Our choices slightly exceed the specs in FIPS 186-3: /// /// securityLevel = 80: pLen = 1024, qLen = 256 /// securityLevel = 112: pLen = 2048, qLen = 256 /// securityLevel = 128: qLen = 3072, qLen = 320 /// /// If the length of "p" exceeds the length provided in "maxPLen", or /// if "securityLevel < 80" this routine throws an exception. void calculateGroupParamLengths(uint32_t maxPLen, uint32_t securityLevel, uint32_t *pLen, uint32_t *qLen) { *pLen = *qLen = 0; if (securityLevel < 80) { throw std::runtime_error("Security level must be at least 80 bits."); } else if (securityLevel == 80) { *qLen = 256; *pLen = 1024; } else if (securityLevel <= 112) { *qLen = 256; *pLen = 2048; } else if (securityLevel <= 128) { *qLen = 320; *pLen = 3072; } else { throw std::runtime_error("Security level not supported."); } if (*pLen > maxPLen) { throw std::runtime_error("Modulus size is too small for this security level."); } } /// \brief Deterministically compute a set of group parameters using NIST procedures. /// \param seedStr A byte string seeding the process. /// \param pLen The desired length of the modulus "p" in bits /// \param qLen The desired length of the order "q" in bits /// \return An IntegerGroupParams object /// /// Calculates the description of a group G of prime order "q" embedded within /// a field "F_p". The input to this routine is in arbitrary seed. It uses the /// algorithms described in FIPS 186-3 Appendix A.1.2 to calculate /// primes "p" and "q". It uses the procedure in Appendix A.2.3 to /// derive two generators "g", "h". IntegerGroupParams deriveIntegerGroupParams(arith_uint256 seed, uint32_t pLen, uint32_t qLen) { IntegerGroupParams result; CBigNum p; CBigNum q; arith_uint256 pSeed, qSeed; // Calculate "p" and "q" and "domain_parameter_seed" from the // "seed" buffer above, using the procedure described in NIST // FIPS 186-3, Appendix A.1.2. calculateGroupModulusAndOrder(seed, pLen, qLen, &(result.modulus), &(result.groupOrder), &pSeed, &qSeed); // Calculate the generators "g", "h" using the process described in // NIST FIPS 186-3, Appendix A.2.3. This algorithm takes ("p", "q", // "domain_parameter_seed", "index"). We use "index" value 1 // to generate "g" and "index" value 2 to generate "h". result.g = calculateGroupGenerator(seed, pSeed, qSeed, result.modulus, result.groupOrder, 1); result.h = calculateGroupGenerator(seed, pSeed, qSeed, result.modulus, result.groupOrder, 2); // Perform some basic tests to make sure we have good parameters if ((uint32_t)(result.modulus.bitSize()) < pLen || // modulus is pLen bits long (uint32_t)(result.groupOrder.bitSize()) < qLen || // order is qLen bits long !(result.modulus.isPrime()) || // modulus is prime !(result.groupOrder.isPrime()) || // order is prime !((result.g.pow_mod(result.groupOrder, result.modulus)).isOne()) || // g^order mod modulus = 1 !((result.h.pow_mod(result.groupOrder, result.modulus)).isOne()) || // h^order mod modulus = 1 ((result.g.pow_mod(CBigNum(100), result.modulus)).isOne()) || // g^100 mod modulus != 1 ((result.h.pow_mod(CBigNum(100), result.modulus)).isOne()) || // h^100 mod modulus != 1 result.g == result.h || // g != h result.g.isOne()) { // g != 1 // If any of the above tests fail, throw an exception throw std::runtime_error("Group parameters are not valid"); } return result; } /// \brief Deterministically compute a set of group parameters with a specified order. /// \param groupOrder The order of the group /// \return An IntegerGroupParams object /// /// Given "q" calculates the description of a group G of prime order "q" embedded within /// a field "F_p". IntegerGroupParams deriveIntegerGroupFromOrder(CBigNum &groupOrder) { IntegerGroupParams result; // Set the order to "groupOrder" result.groupOrder = groupOrder; // Try possible values for "modulus" of the form "groupOrder * 2 * i" where // "p" is prime and i is a counter starting at 1. for (uint32_t i = 1; i < NUM_SCHNORRGEN_ATTEMPTS; i++) { // Set modulus equal to "groupOrder * 2 * i" result.modulus = (result.groupOrder * CBigNum(i*2)) + CBigNum(1); // Test the result for primality // TODO: This is a probabilistic routine and thus not the right choice if (result.modulus.isPrime(256)) { // Success. // // Calculate the generators "g", "h" using the process described in // NIST FIPS 186-3, Appendix A.2.3. This algorithm takes ("p", "q", // "domain_parameter_seed", "index"). We use "index" value 1 // to generate "g" and "index" value 2 to generate "h". arith_uint256 seed = calculateSeed(groupOrder, "", 128, ""); arith_uint256 pSeed = calculateHash(seed); arith_uint256 qSeed = calculateHash(pSeed); result.g = calculateGroupGenerator(seed, pSeed, qSeed, result.modulus, result.groupOrder, 1); result.h = calculateGroupGenerator(seed, pSeed, qSeed, result.modulus, result.groupOrder, 2); // Perform some basic tests to make sure we have good parameters if (!(result.modulus.isPrime()) || // modulus is prime !(result.groupOrder.isPrime()) || // order is prime !((result.g.pow_mod(result.groupOrder, result.modulus)).isOne()) || // g^order mod modulus = 1 !((result.h.pow_mod(result.groupOrder, result.modulus)).isOne()) || // h^order mod modulus = 1 ((result.g.pow_mod(CBigNum(100), result.modulus)).isOne()) || // g^100 mod modulus != 1 ((result.h.pow_mod(CBigNum(100), result.modulus)).isOne()) || // h^100 mod modulus != 1 result.g == result.h || // g != h result.g.isOne()) { // g != 1 // If any of the above tests fail, throw an exception throw std::runtime_error("Group parameters are not valid"); } return result; } } // If we reached this point group generation has failed. Throw an exception. throw std::runtime_error("Too many attempts to generate Schnorr group."); } /// \brief Deterministically compute a group description using NIST procedures. /// \param seed A byte string seeding the process. /// \param pLen The desired length of the modulus "p" in bits /// \param qLen The desired length of the order "q" in bits /// \param resultModulus A value "p" describing a finite field "F_p" /// \param resultGroupOrder A value "q" describing the order of a subgroup /// \param resultDomainParameterSeed A resulting seed for use in later calculations. /// /// Calculates the description of a group G of prime order "q" embedded within /// a field "F_p". The input to this routine is in arbitrary seed. It uses the /// algorithms described in FIPS 186-3 Appendix A.1.2 to calculate /// primes "p" and "q". void calculateGroupModulusAndOrder(arith_uint256 seed, uint32_t pLen, uint32_t qLen, CBigNum *resultModulus, CBigNum *resultGroupOrder, arith_uint256 *resultPseed, arith_uint256 *resultQseed) { // Verify that the seed length is >= qLen if (qLen > (sizeof(seed)) * 8) { // TODO: The use of 256-bit seeds limits us to 256-bit group orders. We should probably change this. // throw std::runtime_error("Seed is too short to support the required security level."); } #ifdef ZEROCOIN_DEBUG cout << "calculateGroupModulusAndOrder: pLen = " << pLen << endl; #endif // Generate a random prime for the group order. // This may throw an exception, which we'll pass upwards. // Result is the value "resultGroupOrder", "qseed" and "qgen_counter". arith_uint256 qseed; uint32_t qgen_counter; *resultGroupOrder = generateRandomPrime(qLen, seed, &qseed, &qgen_counter); // Using ⎡pLen / 2 + 1⎤ as the length and qseed as the input_seed, use the random prime // routine to obtain p0 , pseed, and pgen_counter. We pass exceptions upward. uint32_t p0len = ceil((pLen / 2.0) + 1); arith_uint256 pseed; uint32_t pgen_counter; CBigNum p0 = generateRandomPrime(p0len, qseed, &pseed, &pgen_counter); // Set x = 0, old_counter = pgen_counter uint32_t old_counter = pgen_counter; // Generate a random integer "x" of pLen bits uint32_t iterations; CBigNum x = generateIntegerFromSeed(pLen, pseed, &iterations); pseed += (iterations + 1); // Set x = 2^{pLen−1} + (x mod 2^{pLen–1}). CBigNum powerOfTwo = CBigNum(2).pow(pLen-1); x = powerOfTwo + (x % powerOfTwo); // t = ⎡x / (2 * resultGroupOrder * p0)⎤. // TODO: we don't have a ceiling function CBigNum t = x / (CBigNum(2) * (*resultGroupOrder) * p0); // Now loop until we find a valid prime "p" or we fail due to // pgen_counter exceeding ((4*pLen) + old_counter). for ( ; pgen_counter <= ((4*pLen) + old_counter) ; pgen_counter++) { // If (2 * t * resultGroupOrder * p0 + 1) > 2^{pLen}, then // t = ⎡2^{pLen−1} / (2 * resultGroupOrder * p0)⎤. powerOfTwo = CBigNum(2).pow(pLen); CBigNum prod = (CBigNum(2) * t * (*resultGroupOrder) * p0) + CBigNum(1); if (prod > powerOfTwo) { // TODO: implement a ceil function t = CBigNum(2).pow(pLen-1) / (CBigNum(2) * (*resultGroupOrder) * p0); } // Compute a candidate prime resultModulus = 2tqp0 + 1. *resultModulus = (CBigNum(2) * t * (*resultGroupOrder) * p0) + CBigNum(1); // Verify that resultModulus is prime. First generate a pseudorandom integer "a". CBigNum a = generateIntegerFromSeed(pLen, pseed, &iterations); pseed += iterations + 1; // Set a = 2 + (a mod (resultModulus–3)). a = CBigNum(2) + (a % ((*resultModulus) - CBigNum(3))); // Set z = a^{2 * t * resultGroupOrder} mod resultModulus CBigNum z = a.pow_mod(CBigNum(2) * t * (*resultGroupOrder), (*resultModulus)); // If GCD(z–1, resultModulus) == 1 AND (z^{p0} mod resultModulus == 1) // then we have found our result. Return. if ((resultModulus->gcd(z - CBigNum(1))).isOne() && (z.pow_mod(p0, (*resultModulus))).isOne()) { // Success! Return the seeds and primes. *resultPseed = pseed; *resultQseed = qseed; return; } // This prime did not work out. Increment "t" and try again. t = t + CBigNum(1); } // loop continues until pgen_counter exceeds a limit // We reach this point only if we exceeded our maximum iteration count. // Throw an exception. throw std::runtime_error("Unable to generate a prime modulus for the group"); } /// \brief Deterministically compute a generator for a given group. /// \param seed A first seed for the process. /// \param pSeed A second seed for the process. /// \param qSeed A third seed for the process. /// \param modulus Proposed prime modulus for the field. /// \param groupOrder Proposed order of the group. /// \param index Index value, selects which generator you're building. /// \return The resulting generator. /// \throws A std::runtime_error if error. /// /// Generates a random group generator deterministically as a function of (seed,pSeed,qSeed) /// Uses the algorithm described in FIPS 186-3 Appendix A.2.3. CBigNum calculateGroupGenerator(arith_uint256 seed, arith_uint256 pSeed, arith_uint256 qSeed, CBigNum modulus, CBigNum groupOrder, uint32_t index) { CBigNum result; // Verify that 0 <= index < 256 if (index > 255) { throw std::runtime_error("Invalid index for group generation"); } // Compute e = (modulus - 1) / groupOrder CBigNum e = (modulus - CBigNum(1)) / groupOrder; // Loop until we find a generator for (uint32_t count = 1; count < MAX_GENERATOR_ATTEMPTS; count++) { // hash = Hash(seed || pSeed || qSeed || “ggen” || index || count arith_uint256 hash = calculateGeneratorSeed(seed, pSeed, qSeed, "ggen", index, count); CBigNum W(hash); // Compute result = W^e mod p result = W.pow_mod(e, modulus); // If result > 1, we have a generator if (result > 1) { return result; } } // We only get here if we failed to find a generator throw std::runtime_error("Unable to find a generator, too many attempts"); } /// \brief Deterministically compute a random prime number. /// \param primeBitLen Desired bit length of the prime. /// \param in_seed Input seed for the process. /// \param out_seed Result: output seed from the process. /// \param prime_gen_counter Result: number of iterations required. /// \return The resulting prime number. /// \throws A std::runtime_error if error. /// /// Generates a random prime number of primeBitLen bits from a given input /// seed. Uses the Shawe-Taylor algorithm as described in FIPS 186-3 /// Appendix C.6. This is a recursive function. CBigNum generateRandomPrime(uint32_t primeBitLen, arith_uint256 in_seed, arith_uint256 *out_seed, uint32_t *prime_gen_counter) { // Verify that primeBitLen is not too small if (primeBitLen < 2) { throw std::runtime_error("Prime length is too short"); } // If primeBitLen < 33 bits, perform the base case. if (primeBitLen < 33) { CBigNum result(0); // Set prime_seed = in_seed, prime_gen_counter = 0. arith_uint256 prime_seed = in_seed; (*prime_gen_counter) = 0; // Loop up to "4 * primeBitLen" iterations. while ((*prime_gen_counter) < (4 * primeBitLen)) { // Generate a pseudorandom integer "c" of length primeBitLength bits uint32_t iteration_count; CBigNum c = generateIntegerFromSeed(primeBitLen, prime_seed, &iteration_count); #ifdef ZEROCOIN_DEBUG cout << "generateRandomPrime: primeBitLen = " << primeBitLen << endl; cout << "Generated c = " << c << endl; #endif prime_seed += (iteration_count + 1); (*prime_gen_counter)++; // Set "intc" to be the least odd integer >= "c" we just generated uint32_t intc = c.getulong(); intc = (2 * floor(intc / 2.0)) + 1; #ifdef ZEROCOIN_DEBUG cout << "Should be odd. c = " << intc << endl; cout << "The big num is: c = " << c << endl; #endif // Perform trial division on this (relatively small) integer to determine if "intc" // is prime. If so, return success. if (primalityTestByTrialDivision(intc)) { // Return "intc" converted back into a CBigNum and "prime_seed". We also updated // the variable "prime_gen_counter" in previous statements. result = intc; *out_seed = prime_seed; // Success return result; } } // while() // If we reached this point there was an error finding a candidate prime // so throw an exception. throw std::runtime_error("Unable to find prime in Shawe-Taylor algorithm"); // END OF BASE CASE } // If primeBitLen >= 33 bits, perform the recursive case. else { // Recurse to find a new random prime of roughly half the size uint32_t newLength = ceil((double)primeBitLen / 2.0) + 1; CBigNum c0 = generateRandomPrime(newLength, in_seed, out_seed, prime_gen_counter); // Generate a random integer "x" of primeBitLen bits using the output // of the previous call. uint32_t numIterations; CBigNum x = generateIntegerFromSeed(primeBitLen, *out_seed, &numIterations); (*out_seed) += numIterations + 1; // Compute "t" = ⎡x / (2 * c0⎤ // TODO no Ceiling call CBigNum t = x / (CBigNum(2) * c0); // Repeat the following procedure until we find a prime (or time out) for (uint32_t testNum = 0; testNum < MAX_PRIMEGEN_ATTEMPTS; testNum++) { // If ((2 * t * c0) + 1 > 2^{primeBitLen}), // then t = ⎡2^{primeBitLen} – 1 / (2 * c0)⎤. if ((CBigNum(2) * t * c0) > (CBigNum(2).pow(CBigNum(primeBitLen)))) { t = ((CBigNum(2).pow(CBigNum(primeBitLen))) - CBigNum(1)) / (CBigNum(2) * c0); } // Set c = (2 * t * c0) + 1 CBigNum c = (CBigNum(2) * t * c0) + CBigNum(1); // Increment prime_gen_counter (*prime_gen_counter)++; // Test "c" for primality as follows: // 1. First pick an integer "a" in between 2 and (c - 2) CBigNum a = generateIntegerFromSeed(c.bitSize(), (*out_seed), &numIterations); a = CBigNum(2) + (a % (c - CBigNum(3))); (*out_seed) += (numIterations + 1); // 2. Compute "z" = a^{2*t} mod c CBigNum z = a.pow_mod(CBigNum(2) * t, c); // 3. Check if "c" is prime. // Specifically, verify that gcd((z-1), c) == 1 AND (z^c0 mod c) == 1 // If so we return "c" as our result. if (c.gcd(z - CBigNum(1)).isOne() && z.pow_mod(c0, c).isOne()) { // Return "c", out_seed and prime_gen_counter // (the latter two of which were already updated) return c; } // 4. If the test did not succeed, increment "t" and loop t = t + CBigNum(1); } // end of test loop } // We only reach this point if the test loop has iterated MAX_PRIMEGEN_ATTEMPTS // and failed to identify a valid prime. Throw an exception. throw std::runtime_error("Unable to generate random prime (too many tests)"); } CBigNum generateIntegerFromSeed(uint32_t numBits, arith_uint256 seed, uint32_t *numIterations) { CBigNum result(0); uint32_t iterations = ceil((double)numBits / (double)HASH_OUTPUT_BITS); #ifdef ZEROCOIN_DEBUG cout << "numBits = " << numBits << endl; cout << "iterations = " << iterations << endl; #endif // Loop "iterations" times filling up the value "result" with random bits for (uint32_t count = 0; count < iterations; count++) { // result += ( H(pseed + count) * 2^{count * p0len} ) result += CBigNum(calculateHash(seed + count)) * CBigNum(2).pow(count * HASH_OUTPUT_BITS); } result = CBigNum(2).pow(numBits - 1) + (result % (CBigNum(2).pow(numBits - 1))); // Return the number of iterations and the result *numIterations = iterations; return result; } /// \brief Determines whether a uint32_t is a prime through trial division. /// \param candidate Candidate to test. /// \return true if the value is prime, false otherwise /// /// Performs trial division to determine whether a uint32_t is prime. bool primalityTestByTrialDivision(uint32_t candidate) { // TODO: HACK HACK WRONG WRONG CBigNum canBignum(candidate); return canBignum.isPrime(); } } // namespace libzerocoin
#include "opencv2/ccalib/omnidir.hpp" #include "opencv2/ccalib/multicalib.hpp" #include "opencv2/ccalib/randpattern.hpp" using namespace std; using namespace cv; const char * usage = "\n example command line for multi-camera calibration by using random pattern \n" " multi_cameras_calibration -nc 5 -pw 800 -ph 600 -ct 1 -fe 0 -nm 25 -v 0 multi_camera_omnidir.xml \n" "\n" " the file multi_camera_omnidir.xml is generated by imagelist_creator as \n" " imagelist_creator multi_camera_omnidir.xml *.* \n" " note the first filename in multi_camera_omnidir.xml is the pattern, the rest are photo names,\n" " photo names should be in form of cameraIdx-timestamp.*, and cameraIdx starts from 0"; static void help() { printf("\n This is a sample for multi-camera calibration, so far it only support random pattern,\n" "see randomPattern.hpp for detail. Pinhole and omnidirectional cameras are both supported, \n" "for omnidirectional camera, see omnidir.hpp for detail.\n" "Usage: mutiCamCalib \n" " -nc <num_camera> # number of cameras \n" " -pw <pattern_width> # physical width of random pattern \n" " -ph <pattern_height> # physical height of random pattern \n" " -ct <camera_type> # camera type, 0 for pinhole and 1 for omnidirectional \n" " -fe # whether show feature extraction\n" " -nm # number of minimal matches of an image \n" " -v # whether show verbose information \n" " input_data # text file with pattern file names and a list of photo names, the file is generated by imagelist_creator \n"); printf("\n %s", usage); } int main(int argc, char** argv) { float patternWidth = 0.0f, patternHeight = 0.0f; int nCamera = 0, nMiniMatches = 0, cameraType = 0; const char* outputFilename = "multi-camera-results.xml"; const char* inputFilename = 0; int showFeatureExtraction = 0, verbose = 0; if (argc < 2) { help(); return 1; } for (int i = 1; i < argc; ++i) { const char* s = argv[i]; if (strcmp( s, "-nc") == 0) { if (sscanf( argv[++i], "%u", &nCamera) != 1 || nCamera <= 0) { return fprintf(stderr, "Invalid number of cameras \n"), -1; } } else if ( strcmp( s, "-pw" ) == 0 ) { if (sscanf( argv[++i], "%f", &patternWidth) != 1 || patternWidth <=0 ) { return fprintf(stderr, "Invalid pattern width \n"), -1; } } else if ( strcmp( s, "-ph" ) == 0 ) { if (sscanf( argv[++i], "%f", &patternHeight) != 1 || patternHeight <=0 ) { return fprintf(stderr, "Invalid pattern height \n"), -1; } } else if ( strcmp( s, "-ct" ) == 0 ) { if (sscanf( argv[++i], "%u", &cameraType) != 1 || (cameraType !=0 && cameraType !=1 && cameraType !=2) ) { return fprintf(stderr, "Invalid camera type, 0 for pinhole and 1 for omnidirectional \n"), -1; } } else if ( strcmp( s, "-fe" ) == 0 ) { if (sscanf( argv[++i], "%u", &showFeatureExtraction) != 1 || (showFeatureExtraction !=1 && showFeatureExtraction !=0) ) { return fprintf(stderr, "Not bool value, set to 0 or 1 \n"), -1; } } else if ( strcmp( s, "-nm" ) == 0 ) { if (sscanf( argv[++i], "%u", &nMiniMatches) != 1 || nMiniMatches <=0 ) { return fprintf(stderr, "Invalid number of minimal matches \n"), -1; } } else if ( strcmp( s, "-v" ) == 0 ) { if (sscanf( argv[++i], "%u", &verbose) != 1 || (verbose !=1 && verbose !=0) ) { return fprintf(stderr, "verbose is not bool value, set to 0 or 1 \n"), -1; } } else if( s[0] != '-') { inputFilename = s; } else { return fprintf( stderr, "Unknown option %s\n", s ), -1; } } // do multi-camera calibration multicalib::MultiCameraCalibration multiCalib(cameraType, nCamera, inputFilename, patternWidth, patternHeight, verbose, showFeatureExtraction, nMiniMatches); multiCalib.loadImages(); multiCalib.initialize(); multiCalib.optimizeExtrinsics(); // the above three lines can be replaced by multiCalib.run(); multiCalib.writeParameters(outputFilename); }
#ifndef KARGER_ALGO_H #define KARGER_ALGO_H #include <memory> #include <vector> #include "../graphs/lca_computer.hpp" #include "../graphs/undirected_weighted_graph.hpp" #include "../graphs/weighted_tree.hpp" #include "common.hpp" class karger_algo { private: std::shared_ptr<graphs::weighted_graph> graph; std::shared_ptr<graphs::weighted_tree> tree; size_t n; graphs::lca_computer lca; void calc_subtree_cost(); void calc_pair_cost(); void calc_rho(); public: std::vector<int> rho; std::vector<int> subtreeCost; std::vector<std::vector<int>> pairCost; karger_algo(std::shared_ptr<graphs::weighted_graph> graph, std::shared_ptr<graphs::weighted_tree> tree); void initialize(); int get_cut_val(int v, int w); edge_pair find_cut(); }; #endif /* KARGER_ALGO_H */
/**************************************************************************** Copyright (c) 2010 cocos2d-x.org Copyright (c) Microsoft Open Technologies, Inc. http://www.cocos2d-x.org Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ****************************************************************************/ #include "platform/CCCommon.h" #include "platform/CCStdC.h" #include "CCWinRTUtils.h" #if defined(VLD_DEBUG_MEMORY) #include <vld.h> #endif NS_CC_BEGIN void MessageBox(const char * pszMsg, const char * pszTitle) { // Create the message dialog and set its content Platform::String^ message = ref new Platform::String(CCUtf8ToUnicode(pszMsg, -1).c_str()); Platform::String^ title = ref new Platform::String(CCUtf8ToUnicode(pszTitle, -1).c_str()); GLViewImpl::sharedOpenGLView()->ShowMessageBox(title, message); } void CCLuaLog(const char *pszMsg) { #if defined(COCOS2D_DEBUG) int bufflen = MultiByteToWideChar(CP_UTF8, 0, pszMsg, -1, NULL, 0); WCHAR* widebuff = new WCHAR[bufflen + 1]; memset(widebuff, 0, sizeof(WCHAR) * (bufflen + 1)); MultiByteToWideChar(CP_UTF8, 0, pszMsg, -1, widebuff, bufflen); OutputDebugStringW(widebuff); OutputDebugStringA("\n"); bufflen = WideCharToMultiByte(CP_ACP, 0, widebuff, -1, NULL, 0, NULL, NULL); char* buff = new char[bufflen + 1]; memset(buff, 0, sizeof(char) * (bufflen + 1)); WideCharToMultiByte(CP_ACP, 0, widebuff, -1, buff, bufflen, NULL, NULL); puts(buff); delete[] widebuff; delete[] buff; #endif } NS_CC_END
// Copyright (C) 2018-2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #pragma once #include <memory> #include <string> #include <tuple> #include <vector> #include "shared_test_classes/base/layer_test_utils.hpp" #include "ngraph_functions/builders.hpp" #include "ngraph_functions/utils/ngraph_helpers.hpp" namespace SubgraphTestsDefinitions { typedef std::tuple< std::vector<size_t>, // Input Shapes std::vector<size_t>, // Kernel Shape size_t // Stride > convParams; typedef std::tuple< InferenceEngine::Precision, // Network Precision std::string, // Target Device std::map<std::string, std::string>, // Configuration convParams, // Convolution Params size_t, // Input Channels size_t // Output Channels > ScaleShiftConvScaleShiftParams; class ScaleShiftAfterConvTest : public testing::WithParamInterface<ScaleShiftConvScaleShiftParams>, virtual public LayerTestsUtils::LayerTestsCommon { public: static std::string getTestCaseName(const testing::TestParamInfo<ScaleShiftConvScaleShiftParams>& obj); InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override; protected: void SetUp() override; }; class ScaleShiftBeforeConvTest : public testing::WithParamInterface<ScaleShiftConvScaleShiftParams>, virtual public LayerTestsUtils::LayerTestsCommon { public: static std::string getTestCaseName(const testing::TestParamInfo<ScaleShiftConvScaleShiftParams>& obj); InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override; protected: void SetUp() override; }; } // namespace SubgraphTestsDefinitions
#ifndef SN_BUILTIN_CRTDBG_H #define SN_BUILTIN_CRTDBG_H #include "../sn_Config.hpp" #if defined(SN_CONFIG_COMPILER_MSVC) #include <stdlib.h> #include <crtdbg.h> #define _CRTDBG_MAP_ALLOC #ifdef _DEBUG #define SN_BUILTIN_CRTDBG_NEW new(_NORMAL_BLOCK, __FILE__, __LINE__) #define new SN_BUILTIN_CRTDBG_NEW #endif #define SN_BUILTIN_CRTDBG_ALLOC(size) _CrtSetBreakAlloc(size) #define SN_BUILTIN_CRTDBG_BEGIN _CrtSetDbgFlag(_CrtSetDbgFlag(_CRTDBG_REPORT_FLAG | _CRTDBG_LEAK_CHECK_DF)) #define SN_BUILTIN_CRTDBG_END _CrtDumpMemoryLeaks() namespace sn_builtin { namespace crtdbg { class DbgMemLeak { _CrtMemState m_checkpoint; public: explicit DbgMemLeak() { _CrtMemCheckpoint(&m_checkpoint); }; ~DbgMemLeak() { _CrtMemState checkpoint; _CrtMemCheckpoint(&checkpoint); _CrtMemState diff; _CrtMemDifference(&diff, &m_checkpoint, &checkpoint); _CrtMemDumpStatistics(&diff); _CrtMemDumpAllObjectsSince(&diff); }; }; void crtdbg_exit() { (void)_CrtDumpMemoryLeaks(); } } } #define SN_BUILTIN_CRTDBG_REG atexit(&sn_Builtin::crtdbg::crtdbg_exit) #endif #endif
/* * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. * You may obtain a copy of Mulan PSL v2 at: * * http://license.coscl.org.cn/MulanPSL2 * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR * FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. */ #include "mplfe_compiler.h" #include "fe_utils.h" using namespace maple; int main(int argc, char **argv) { MPLTimer timer; timer.Start(); MPLFEOptions &options = MPLFEOptions::GetInstance(); if (options.SolveArgs(argc, argv) == false) { return static_cast<int>(FEErrno::kCmdParseError); } MPLFEEnv::GetInstance().Init(); MIRModule module; MPLFECompiler compiler(module); int res = compiler.Run(); // The MIRModule destructor does not release the pragma memory, add releasing for front-end debugging. MemPool *pragmaMemPoolPtr = module.GetPragmaMemPool(); FEUtils::DeleteMempoolPtr(pragmaMemPoolPtr); timer.Stop(); if (FEOptions::GetInstance().IsDumpTime()) { INFO(kLncInfo, "mplfe time: %.2lfms", timer.ElapsedMilliseconds() / 1.0); } return res; }
/** * Autogenerated by Thrift for storage.thrift * * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING * @generated */ #include "storage_types.h" #include "storage_types.tcc" #include <thrift/lib/cpp2/gen/module_types_cpp.h> #include "storage_data.h" namespace apache { namespace thrift { constexpr std::size_t const TEnumTraits<::nebula::storage::cpp2::StatType>::size; folly::Range<::nebula::storage::cpp2::StatType const*> const TEnumTraits<::nebula::storage::cpp2::StatType>::values = folly::range(TEnumDataStorage<::nebula::storage::cpp2::StatType>::values); folly::Range<folly::StringPiece const*> const TEnumTraits<::nebula::storage::cpp2::StatType>::names = folly::range(TEnumDataStorage<::nebula::storage::cpp2::StatType>::names); char const* TEnumTraits<::nebula::storage::cpp2::StatType>::findName(type value) { using factory = ::nebula::storage::cpp2::_StatType_EnumMapFactory; static folly::Indestructible<factory::ValuesToNamesMapType> const map{ factory::makeValuesToNamesMap()}; auto found = map->find(value); return found == map->end() ? nullptr : found->second; } bool TEnumTraits<::nebula::storage::cpp2::StatType>::findValue(char const* name, type* out) { using factory = ::nebula::storage::cpp2::_StatType_EnumMapFactory; static folly::Indestructible<factory::NamesToValuesMapType> const map{ factory::makeNamesToValuesMap()}; auto found = map->find(name); return found == map->end() ? false : (*out = found->second, true); } }} // apache::thrift namespace nebula { namespace storage { namespace cpp2 { FOLLY_PUSH_WARNING FOLLY_GNU_DISABLE_WARNING("-Wdeprecated-declarations") const _StatType_EnumMapFactory::ValuesToNamesMapType _StatType_VALUES_TO_NAMES = _StatType_EnumMapFactory::makeValuesToNamesMap(); const _StatType_EnumMapFactory::NamesToValuesMapType _StatType_NAMES_TO_VALUES = _StatType_EnumMapFactory::makeNamesToValuesMap(); FOLLY_POP_WARNING }}} // nebula::storage::cpp2 namespace apache { namespace thrift { constexpr std::size_t const TEnumTraits<::nebula::storage::cpp2::OrderDirection>::size; folly::Range<::nebula::storage::cpp2::OrderDirection const*> const TEnumTraits<::nebula::storage::cpp2::OrderDirection>::values = folly::range(TEnumDataStorage<::nebula::storage::cpp2::OrderDirection>::values); folly::Range<folly::StringPiece const*> const TEnumTraits<::nebula::storage::cpp2::OrderDirection>::names = folly::range(TEnumDataStorage<::nebula::storage::cpp2::OrderDirection>::names); char const* TEnumTraits<::nebula::storage::cpp2::OrderDirection>::findName(type value) { using factory = ::nebula::storage::cpp2::_OrderDirection_EnumMapFactory; static folly::Indestructible<factory::ValuesToNamesMapType> const map{ factory::makeValuesToNamesMap()}; auto found = map->find(value); return found == map->end() ? nullptr : found->second; } bool TEnumTraits<::nebula::storage::cpp2::OrderDirection>::findValue(char const* name, type* out) { using factory = ::nebula::storage::cpp2::_OrderDirection_EnumMapFactory; static folly::Indestructible<factory::NamesToValuesMapType> const map{ factory::makeNamesToValuesMap()}; auto found = map->find(name); return found == map->end() ? false : (*out = found->second, true); } }} // apache::thrift namespace nebula { namespace storage { namespace cpp2 { FOLLY_PUSH_WARNING FOLLY_GNU_DISABLE_WARNING("-Wdeprecated-declarations") const _OrderDirection_EnumMapFactory::ValuesToNamesMapType _OrderDirection_VALUES_TO_NAMES = _OrderDirection_EnumMapFactory::makeValuesToNamesMap(); const _OrderDirection_EnumMapFactory::NamesToValuesMapType _OrderDirection_NAMES_TO_VALUES = _OrderDirection_EnumMapFactory::makeNamesToValuesMap(); FOLLY_POP_WARNING }}} // nebula::storage::cpp2 namespace apache { namespace thrift { constexpr std::size_t const TEnumTraits<::nebula::storage::cpp2::EdgeDirection>::size; folly::Range<::nebula::storage::cpp2::EdgeDirection const*> const TEnumTraits<::nebula::storage::cpp2::EdgeDirection>::values = folly::range(TEnumDataStorage<::nebula::storage::cpp2::EdgeDirection>::values); folly::Range<folly::StringPiece const*> const TEnumTraits<::nebula::storage::cpp2::EdgeDirection>::names = folly::range(TEnumDataStorage<::nebula::storage::cpp2::EdgeDirection>::names); char const* TEnumTraits<::nebula::storage::cpp2::EdgeDirection>::findName(type value) { using factory = ::nebula::storage::cpp2::_EdgeDirection_EnumMapFactory; static folly::Indestructible<factory::ValuesToNamesMapType> const map{ factory::makeValuesToNamesMap()}; auto found = map->find(value); return found == map->end() ? nullptr : found->second; } bool TEnumTraits<::nebula::storage::cpp2::EdgeDirection>::findValue(char const* name, type* out) { using factory = ::nebula::storage::cpp2::_EdgeDirection_EnumMapFactory; static folly::Indestructible<factory::NamesToValuesMapType> const map{ factory::makeNamesToValuesMap()}; auto found = map->find(name); return found == map->end() ? false : (*out = found->second, true); } }} // apache::thrift namespace nebula { namespace storage { namespace cpp2 { FOLLY_PUSH_WARNING FOLLY_GNU_DISABLE_WARNING("-Wdeprecated-declarations") const _EdgeDirection_EnumMapFactory::ValuesToNamesMapType _EdgeDirection_VALUES_TO_NAMES = _EdgeDirection_EnumMapFactory::makeValuesToNamesMap(); const _EdgeDirection_EnumMapFactory::NamesToValuesMapType _EdgeDirection_NAMES_TO_VALUES = _EdgeDirection_EnumMapFactory::makeNamesToValuesMap(); FOLLY_POP_WARNING }}} // nebula::storage::cpp2 namespace apache { namespace thrift { constexpr std::size_t const TEnumTraits<::nebula::storage::cpp2::ScanType>::size; folly::Range<::nebula::storage::cpp2::ScanType const*> const TEnumTraits<::nebula::storage::cpp2::ScanType>::values = folly::range(TEnumDataStorage<::nebula::storage::cpp2::ScanType>::values); folly::Range<folly::StringPiece const*> const TEnumTraits<::nebula::storage::cpp2::ScanType>::names = folly::range(TEnumDataStorage<::nebula::storage::cpp2::ScanType>::names); char const* TEnumTraits<::nebula::storage::cpp2::ScanType>::findName(type value) { using factory = ::nebula::storage::cpp2::_ScanType_EnumMapFactory; static folly::Indestructible<factory::ValuesToNamesMapType> const map{ factory::makeValuesToNamesMap()}; auto found = map->find(value); return found == map->end() ? nullptr : found->second; } bool TEnumTraits<::nebula::storage::cpp2::ScanType>::findValue(char const* name, type* out) { using factory = ::nebula::storage::cpp2::_ScanType_EnumMapFactory; static folly::Indestructible<factory::NamesToValuesMapType> const map{ factory::makeNamesToValuesMap()}; auto found = map->find(name); return found == map->end() ? false : (*out = found->second, true); } }} // apache::thrift namespace nebula { namespace storage { namespace cpp2 { FOLLY_PUSH_WARNING FOLLY_GNU_DISABLE_WARNING("-Wdeprecated-declarations") const _ScanType_EnumMapFactory::ValuesToNamesMapType _ScanType_VALUES_TO_NAMES = _ScanType_EnumMapFactory::makeValuesToNamesMap(); const _ScanType_EnumMapFactory::NamesToValuesMapType _ScanType_NAMES_TO_VALUES = _ScanType_EnumMapFactory::makeNamesToValuesMap(); FOLLY_POP_WARNING }}} // nebula::storage::cpp2 namespace apache { namespace thrift { constexpr std::size_t const TEnumTraits<::nebula::storage::cpp2::EngineSignType>::size; folly::Range<::nebula::storage::cpp2::EngineSignType const*> const TEnumTraits<::nebula::storage::cpp2::EngineSignType>::values = folly::range(TEnumDataStorage<::nebula::storage::cpp2::EngineSignType>::values); folly::Range<folly::StringPiece const*> const TEnumTraits<::nebula::storage::cpp2::EngineSignType>::names = folly::range(TEnumDataStorage<::nebula::storage::cpp2::EngineSignType>::names); char const* TEnumTraits<::nebula::storage::cpp2::EngineSignType>::findName(type value) { using factory = ::nebula::storage::cpp2::_EngineSignType_EnumMapFactory; static folly::Indestructible<factory::ValuesToNamesMapType> const map{ factory::makeValuesToNamesMap()}; auto found = map->find(value); return found == map->end() ? nullptr : found->second; } bool TEnumTraits<::nebula::storage::cpp2::EngineSignType>::findValue(char const* name, type* out) { using factory = ::nebula::storage::cpp2::_EngineSignType_EnumMapFactory; static folly::Indestructible<factory::NamesToValuesMapType> const map{ factory::makeNamesToValuesMap()}; auto found = map->find(name); return found == map->end() ? false : (*out = found->second, true); } }} // apache::thrift namespace nebula { namespace storage { namespace cpp2 { FOLLY_PUSH_WARNING FOLLY_GNU_DISABLE_WARNING("-Wdeprecated-declarations") const _EngineSignType_EnumMapFactory::ValuesToNamesMapType _EngineSignType_VALUES_TO_NAMES = _EngineSignType_EnumMapFactory::makeValuesToNamesMap(); const _EngineSignType_EnumMapFactory::NamesToValuesMapType _EngineSignType_NAMES_TO_VALUES = _EngineSignType_EnumMapFactory::makeNamesToValuesMap(); FOLLY_POP_WARNING }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::RequestCommon>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::RequestCommon>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN RequestCommon::RequestCommon(apache::thrift::FragileConstructor, ::nebula::cpp2::SessionID session_id__arg, ::nebula::cpp2::ExecutionPlanID plan_id__arg, bool profile_detail__arg) : session_id(std::move(session_id__arg)), plan_id(std::move(plan_id__arg)), profile_detail(std::move(profile_detail__arg)) { __isset.session_id = true; __isset.plan_id = true; __isset.profile_detail = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void RequestCommon::__clear() { // clear all fields session_id = 0; plan_id = 0; profile_detail = 0; THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool RequestCommon::operator==(const RequestCommon& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (lhs.session_id_ref() != rhs.session_id_ref()) { return false; } if (lhs.plan_id_ref() != rhs.plan_id_ref()) { return false; } if (lhs.profile_detail_ref() != rhs.profile_detail_ref()) { return false; } return true; } bool RequestCommon::operator<(const RequestCommon& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (lhs.session_id_ref() != rhs.session_id_ref()) { return lhs.session_id_ref() < rhs.session_id_ref(); } if (lhs.plan_id_ref() != rhs.plan_id_ref()) { return lhs.plan_id_ref() < rhs.plan_id_ref(); } if (lhs.profile_detail_ref() != rhs.profile_detail_ref()) { return lhs.profile_detail_ref() < rhs.profile_detail_ref(); } return false; } void swap(RequestCommon& a, RequestCommon& b) { using ::std::swap; swap(a.session_id_ref().value_unchecked(), b.session_id_ref().value_unchecked()); swap(a.plan_id_ref().value_unchecked(), b.plan_id_ref().value_unchecked()); swap(a.profile_detail_ref().value_unchecked(), b.profile_detail_ref().value_unchecked()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void RequestCommon::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t RequestCommon::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t RequestCommon::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t RequestCommon::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void RequestCommon::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t RequestCommon::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t RequestCommon::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t RequestCommon::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::PartitionResult>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::PartitionResult>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN PartitionResult::PartitionResult(apache::thrift::FragileConstructor, ::nebula::cpp2::ErrorCode code__arg, ::nebula::cpp2::PartitionID part_id__arg, nebula::HostAddr leader__arg) : code(std::move(code__arg)), part_id(std::move(part_id__arg)), leader(std::move(leader__arg)) { __isset.leader = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void PartitionResult::__clear() { // clear all fields code = ::nebula::cpp2::ErrorCode::SUCCEEDED; part_id = 0; leader.__clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool PartitionResult::operator==(const PartitionResult& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.code == rhs.code)) { return false; } if (!(lhs.part_id == rhs.part_id)) { return false; } if (lhs.leader_ref() != rhs.leader_ref()) { return false; } return true; } bool PartitionResult::operator<(const PartitionResult& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.code == rhs.code)) { return lhs.code < rhs.code; } if (!(lhs.part_id == rhs.part_id)) { return lhs.part_id < rhs.part_id; } if (lhs.leader_ref() != rhs.leader_ref()) { return lhs.leader_ref() < rhs.leader_ref(); } return false; } const nebula::HostAddr* PartitionResult::get_leader() const& { return leader_ref().has_value() ? std::addressof(leader) : nullptr; } nebula::HostAddr* PartitionResult::get_leader() & { return leader_ref().has_value() ? std::addressof(leader) : nullptr; } void swap(PartitionResult& a, PartitionResult& b) { using ::std::swap; swap(a.code_ref().value(), b.code_ref().value()); swap(a.part_id_ref().value(), b.part_id_ref().value()); swap(a.leader_ref().value_unchecked(), b.leader_ref().value_unchecked()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void PartitionResult::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t PartitionResult::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t PartitionResult::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t PartitionResult::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void PartitionResult::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t PartitionResult::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t PartitionResult::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t PartitionResult::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< PartitionResult, ::apache::thrift::type_class::structure, nebula::HostAddr>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< PartitionResult, ::apache::thrift::type_class::structure, nebula::HostAddr>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::ResponseCommon>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::ResponseCommon>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN ResponseCommon::ResponseCommon(apache::thrift::FragileConstructor, ::std::vector< ::nebula::storage::cpp2::PartitionResult> failed_parts__arg, int64_t latency_in_us__arg, ::std::map<::std::string, int32_t> latency_detail_us__arg) : failed_parts(std::move(failed_parts__arg)), latency_in_us(std::move(latency_in_us__arg)), latency_detail_us(std::move(latency_detail_us__arg)) { __isset.latency_detail_us = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void ResponseCommon::__clear() { // clear all fields failed_parts.clear(); latency_in_us = 0; latency_detail_us.clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool ResponseCommon::operator==(const ResponseCommon& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.failed_parts == rhs.failed_parts)) { return false; } if (!(lhs.latency_in_us == rhs.latency_in_us)) { return false; } if (lhs.latency_detail_us_ref() != rhs.latency_detail_us_ref()) { return false; } return true; } bool ResponseCommon::operator<(const ResponseCommon& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.failed_parts == rhs.failed_parts)) { return lhs.failed_parts < rhs.failed_parts; } if (!(lhs.latency_in_us == rhs.latency_in_us)) { return lhs.latency_in_us < rhs.latency_in_us; } if (lhs.latency_detail_us_ref() != rhs.latency_detail_us_ref()) { return lhs.latency_detail_us_ref() < rhs.latency_detail_us_ref(); } return false; } const ::std::vector< ::nebula::storage::cpp2::PartitionResult>& ResponseCommon::get_failed_parts() const& { return failed_parts; } ::std::vector< ::nebula::storage::cpp2::PartitionResult> ResponseCommon::get_failed_parts() && { return std::move(failed_parts); } const ::std::map<::std::string, int32_t>* ResponseCommon::get_latency_detail_us() const& { return latency_detail_us_ref().has_value() ? std::addressof(latency_detail_us) : nullptr; } ::std::map<::std::string, int32_t>* ResponseCommon::get_latency_detail_us() & { return latency_detail_us_ref().has_value() ? std::addressof(latency_detail_us) : nullptr; } void swap(ResponseCommon& a, ResponseCommon& b) { using ::std::swap; swap(a.failed_parts_ref().value(), b.failed_parts_ref().value()); swap(a.latency_in_us_ref().value(), b.latency_in_us_ref().value()); swap(a.latency_detail_us_ref().value_unchecked(), b.latency_detail_us_ref().value_unchecked()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void ResponseCommon::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t ResponseCommon::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t ResponseCommon::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t ResponseCommon::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void ResponseCommon::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t ResponseCommon::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t ResponseCommon::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t ResponseCommon::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< ResponseCommon, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::PartitionResult>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< ResponseCommon, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::PartitionResult>>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::StatProp>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::StatProp>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN StatProp::StatProp(apache::thrift::FragileConstructor, ::std::string alias__arg, ::std::string prop__arg, ::nebula::storage::cpp2::StatType stat__arg) : alias(std::move(alias__arg)), prop(std::move(prop__arg)), stat(std::move(stat__arg)) { __isset.alias = true; __isset.prop = true; __isset.stat = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void StatProp::__clear() { // clear all fields alias = apache::thrift::StringTraits< std::string>::fromStringLiteral(""); prop = apache::thrift::StringTraits< std::string>::fromStringLiteral(""); stat = static_cast< ::nebula::storage::cpp2::StatType>(0); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool StatProp::operator==(const StatProp& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.alias, rhs.alias)) { return false; } if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.prop, rhs.prop)) { return false; } if (!(lhs.stat == rhs.stat)) { return false; } return true; } bool StatProp::operator<(const StatProp& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.alias, rhs.alias)) { return apache::thrift::StringTraits<std::string>::isLess(lhs.alias, rhs.alias); } if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.prop, rhs.prop)) { return apache::thrift::StringTraits<std::string>::isLess(lhs.prop, rhs.prop); } if (!(lhs.stat == rhs.stat)) { return lhs.stat < rhs.stat; } return false; } void swap(StatProp& a, StatProp& b) { using ::std::swap; swap(a.alias_ref().value(), b.alias_ref().value()); swap(a.prop_ref().value(), b.prop_ref().value()); swap(a.stat_ref().value(), b.stat_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void StatProp::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t StatProp::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t StatProp::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t StatProp::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void StatProp::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t StatProp::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t StatProp::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t StatProp::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::Expr>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::Expr>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN Expr::Expr(apache::thrift::FragileConstructor, ::std::string alias__arg, ::std::string expr__arg) : alias(std::move(alias__arg)), expr(std::move(expr__arg)) { __isset.alias = true; __isset.expr = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void Expr::__clear() { // clear all fields alias = apache::thrift::StringTraits< std::string>::fromStringLiteral(""); expr = apache::thrift::StringTraits< std::string>::fromStringLiteral(""); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool Expr::operator==(const Expr& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.alias, rhs.alias)) { return false; } if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.expr, rhs.expr)) { return false; } return true; } bool Expr::operator<(const Expr& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.alias, rhs.alias)) { return apache::thrift::StringTraits<std::string>::isLess(lhs.alias, rhs.alias); } if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.expr, rhs.expr)) { return apache::thrift::StringTraits<std::string>::isLess(lhs.expr, rhs.expr); } return false; } void swap(Expr& a, Expr& b) { using ::std::swap; swap(a.alias_ref().value(), b.alias_ref().value()); swap(a.expr_ref().value(), b.expr_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void Expr::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t Expr::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t Expr::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t Expr::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void Expr::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t Expr::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t Expr::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t Expr::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::EdgeProp>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::EdgeProp>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN EdgeProp::EdgeProp(apache::thrift::FragileConstructor, ::nebula::cpp2::EdgeType type__arg, ::std::vector<::std::string> props__arg) : type(std::move(type__arg)), props(std::move(props__arg)) { __isset.type = true; __isset.props = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void EdgeProp::__clear() { // clear all fields type = 0; props.clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool EdgeProp::operator==(const EdgeProp& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.type == rhs.type)) { return false; } if (!(lhs.props == rhs.props)) { return false; } return true; } bool EdgeProp::operator<(const EdgeProp& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.type == rhs.type)) { return lhs.type < rhs.type; } if (!(lhs.props == rhs.props)) { return lhs.props < rhs.props; } return false; } const ::std::vector<::std::string>& EdgeProp::get_props() const& { return props; } ::std::vector<::std::string> EdgeProp::get_props() && { return std::move(props); } void swap(EdgeProp& a, EdgeProp& b) { using ::std::swap; swap(a.type_ref().value(), b.type_ref().value()); swap(a.props_ref().value(), b.props_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void EdgeProp::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t EdgeProp::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t EdgeProp::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t EdgeProp::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void EdgeProp::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t EdgeProp::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t EdgeProp::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t EdgeProp::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::VertexProp>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::VertexProp>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN VertexProp::VertexProp(apache::thrift::FragileConstructor, ::nebula::cpp2::TagID tag__arg, ::std::vector<::std::string> props__arg) : tag(std::move(tag__arg)), props(std::move(props__arg)) { __isset.tag = true; __isset.props = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void VertexProp::__clear() { // clear all fields tag = 0; props.clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool VertexProp::operator==(const VertexProp& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.tag == rhs.tag)) { return false; } if (!(lhs.props == rhs.props)) { return false; } return true; } bool VertexProp::operator<(const VertexProp& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.tag == rhs.tag)) { return lhs.tag < rhs.tag; } if (!(lhs.props == rhs.props)) { return lhs.props < rhs.props; } return false; } const ::std::vector<::std::string>& VertexProp::get_props() const& { return props; } ::std::vector<::std::string> VertexProp::get_props() && { return std::move(props); } void swap(VertexProp& a, VertexProp& b) { using ::std::swap; swap(a.tag_ref().value(), b.tag_ref().value()); swap(a.props_ref().value(), b.props_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void VertexProp::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t VertexProp::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t VertexProp::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t VertexProp::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void VertexProp::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t VertexProp::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t VertexProp::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t VertexProp::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::OrderBy>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::OrderBy>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN OrderBy::OrderBy(apache::thrift::FragileConstructor, ::std::string prop__arg, ::nebula::storage::cpp2::OrderDirection direction__arg) : prop(std::move(prop__arg)), direction(std::move(direction__arg)) { __isset.prop = true; __isset.direction = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void OrderBy::__clear() { // clear all fields prop = apache::thrift::StringTraits< std::string>::fromStringLiteral(""); direction = static_cast< ::nebula::storage::cpp2::OrderDirection>(0); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool OrderBy::operator==(const OrderBy& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.prop, rhs.prop)) { return false; } if (!(lhs.direction == rhs.direction)) { return false; } return true; } bool OrderBy::operator<(const OrderBy& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.prop, rhs.prop)) { return apache::thrift::StringTraits<std::string>::isLess(lhs.prop, rhs.prop); } if (!(lhs.direction == rhs.direction)) { return lhs.direction < rhs.direction; } return false; } void swap(OrderBy& a, OrderBy& b) { using ::std::swap; swap(a.prop_ref().value(), b.prop_ref().value()); swap(a.direction_ref().value(), b.direction_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void OrderBy::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t OrderBy::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t OrderBy::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t OrderBy::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void OrderBy::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t OrderBy::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t OrderBy::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t OrderBy::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::TraverseSpec>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::TraverseSpec>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN TraverseSpec::TraverseSpec() : edge_direction( ::nebula::storage::cpp2::EdgeDirection::BOTH), dedup(false), random(0), limit(0) {} THRIFT_IGNORE_ISSET_USE_WARNING_END TraverseSpec::~TraverseSpec() {} THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN TraverseSpec::TraverseSpec(apache::thrift::FragileConstructor, ::std::vector< ::nebula::cpp2::EdgeType> edge_types__arg, ::nebula::storage::cpp2::EdgeDirection edge_direction__arg, bool dedup__arg, ::std::vector< ::nebula::storage::cpp2::StatProp> stat_props__arg, ::std::vector< ::nebula::storage::cpp2::VertexProp> vertex_props__arg, ::std::vector< ::nebula::storage::cpp2::EdgeProp> edge_props__arg, ::std::vector< ::nebula::storage::cpp2::Expr> expressions__arg, ::std::vector< ::nebula::storage::cpp2::OrderBy> order_by__arg, bool random__arg, int64_t limit__arg, ::std::string filter__arg) : edge_types(std::move(edge_types__arg)), edge_direction(std::move(edge_direction__arg)), dedup(std::move(dedup__arg)), stat_props(std::move(stat_props__arg)), vertex_props(std::move(vertex_props__arg)), edge_props(std::move(edge_props__arg)), expressions(std::move(expressions__arg)), order_by(std::move(order_by__arg)), random(std::move(random__arg)), limit(std::move(limit__arg)), filter(std::move(filter__arg)) { __isset.edge_types = true; __isset.edge_direction = true; __isset.dedup = true; __isset.stat_props = true; __isset.vertex_props = true; __isset.edge_props = true; __isset.expressions = true; __isset.order_by = true; __isset.random = true; __isset.limit = true; __isset.filter = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void TraverseSpec::__clear() { // clear all fields edge_types.clear(); edge_direction = ::nebula::storage::cpp2::EdgeDirection::BOTH; dedup = false; stat_props.clear(); vertex_props.clear(); edge_props.clear(); expressions.clear(); order_by.clear(); random = 0; limit = 0; filter = apache::thrift::StringTraits< std::string>::fromStringLiteral(""); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool TraverseSpec::operator==(const TraverseSpec& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.edge_types == rhs.edge_types)) { return false; } if (!(lhs.edge_direction == rhs.edge_direction)) { return false; } if (!(lhs.dedup == rhs.dedup)) { return false; } if (lhs.stat_props_ref() != rhs.stat_props_ref()) { return false; } if (lhs.vertex_props_ref() != rhs.vertex_props_ref()) { return false; } if (lhs.edge_props_ref() != rhs.edge_props_ref()) { return false; } if (lhs.expressions_ref() != rhs.expressions_ref()) { return false; } if (lhs.order_by_ref() != rhs.order_by_ref()) { return false; } if (lhs.random_ref() != rhs.random_ref()) { return false; } if (lhs.limit_ref() != rhs.limit_ref()) { return false; } if (lhs.filter_ref().has_value() != rhs.filter_ref().has_value()) { return false; } if (lhs.filter_ref().has_value()) { if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.filter, rhs.filter)) { return false; } } return true; } bool TraverseSpec::operator<(const TraverseSpec& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.edge_types == rhs.edge_types)) { return lhs.edge_types < rhs.edge_types; } if (!(lhs.edge_direction == rhs.edge_direction)) { return lhs.edge_direction < rhs.edge_direction; } if (!(lhs.dedup == rhs.dedup)) { return lhs.dedup < rhs.dedup; } if (lhs.stat_props_ref() != rhs.stat_props_ref()) { return lhs.stat_props_ref() < rhs.stat_props_ref(); } if (lhs.vertex_props_ref() != rhs.vertex_props_ref()) { return lhs.vertex_props_ref() < rhs.vertex_props_ref(); } if (lhs.edge_props_ref() != rhs.edge_props_ref()) { return lhs.edge_props_ref() < rhs.edge_props_ref(); } if (lhs.expressions_ref() != rhs.expressions_ref()) { return lhs.expressions_ref() < rhs.expressions_ref(); } if (lhs.order_by_ref() != rhs.order_by_ref()) { return lhs.order_by_ref() < rhs.order_by_ref(); } if (lhs.random_ref() != rhs.random_ref()) { return lhs.random_ref() < rhs.random_ref(); } if (lhs.limit_ref() != rhs.limit_ref()) { return lhs.limit_ref() < rhs.limit_ref(); } if (lhs.filter_ref().has_value() != rhs.filter_ref().has_value()) { return lhs.filter_ref().has_value() < rhs.filter_ref().has_value(); } if (lhs.filter_ref().has_value()) { if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.filter, rhs.filter)) { return apache::thrift::StringTraits<std::string>::isLess(lhs.filter, rhs.filter); } } return false; } const ::std::vector< ::nebula::cpp2::EdgeType>& TraverseSpec::get_edge_types() const& { return edge_types; } ::std::vector< ::nebula::cpp2::EdgeType> TraverseSpec::get_edge_types() && { return std::move(edge_types); } const ::std::vector< ::nebula::storage::cpp2::StatProp>* TraverseSpec::get_stat_props() const& { return stat_props_ref().has_value() ? std::addressof(stat_props) : nullptr; } ::std::vector< ::nebula::storage::cpp2::StatProp>* TraverseSpec::get_stat_props() & { return stat_props_ref().has_value() ? std::addressof(stat_props) : nullptr; } const ::std::vector< ::nebula::storage::cpp2::VertexProp>* TraverseSpec::get_vertex_props() const& { return vertex_props_ref().has_value() ? std::addressof(vertex_props) : nullptr; } ::std::vector< ::nebula::storage::cpp2::VertexProp>* TraverseSpec::get_vertex_props() & { return vertex_props_ref().has_value() ? std::addressof(vertex_props) : nullptr; } const ::std::vector< ::nebula::storage::cpp2::EdgeProp>* TraverseSpec::get_edge_props() const& { return edge_props_ref().has_value() ? std::addressof(edge_props) : nullptr; } ::std::vector< ::nebula::storage::cpp2::EdgeProp>* TraverseSpec::get_edge_props() & { return edge_props_ref().has_value() ? std::addressof(edge_props) : nullptr; } const ::std::vector< ::nebula::storage::cpp2::Expr>* TraverseSpec::get_expressions() const& { return expressions_ref().has_value() ? std::addressof(expressions) : nullptr; } ::std::vector< ::nebula::storage::cpp2::Expr>* TraverseSpec::get_expressions() & { return expressions_ref().has_value() ? std::addressof(expressions) : nullptr; } const ::std::vector< ::nebula::storage::cpp2::OrderBy>* TraverseSpec::get_order_by() const& { return order_by_ref().has_value() ? std::addressof(order_by) : nullptr; } ::std::vector< ::nebula::storage::cpp2::OrderBy>* TraverseSpec::get_order_by() & { return order_by_ref().has_value() ? std::addressof(order_by) : nullptr; } void swap(TraverseSpec& a, TraverseSpec& b) { using ::std::swap; swap(a.edge_types_ref().value(), b.edge_types_ref().value()); swap(a.edge_direction_ref().value(), b.edge_direction_ref().value()); swap(a.dedup_ref().value(), b.dedup_ref().value()); swap(a.stat_props_ref().value_unchecked(), b.stat_props_ref().value_unchecked()); swap(a.vertex_props_ref().value_unchecked(), b.vertex_props_ref().value_unchecked()); swap(a.edge_props_ref().value_unchecked(), b.edge_props_ref().value_unchecked()); swap(a.expressions_ref().value_unchecked(), b.expressions_ref().value_unchecked()); swap(a.order_by_ref().value_unchecked(), b.order_by_ref().value_unchecked()); swap(a.random_ref().value_unchecked(), b.random_ref().value_unchecked()); swap(a.limit_ref().value_unchecked(), b.limit_ref().value_unchecked()); swap(a.filter_ref().value_unchecked(), b.filter_ref().value_unchecked()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void TraverseSpec::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t TraverseSpec::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t TraverseSpec::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t TraverseSpec::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void TraverseSpec::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t TraverseSpec::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t TraverseSpec::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t TraverseSpec::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< TraverseSpec, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::StatProp>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< TraverseSpec, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::VertexProp>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< TraverseSpec, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::EdgeProp>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< TraverseSpec, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::Expr>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< TraverseSpec, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::OrderBy>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< TraverseSpec, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::StatProp>>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< TraverseSpec, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::VertexProp>>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< TraverseSpec, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::EdgeProp>>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< TraverseSpec, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::Expr>>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< TraverseSpec, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::OrderBy>>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::GetNeighborsRequest>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::GetNeighborsRequest>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN GetNeighborsRequest::GetNeighborsRequest() : space_id(0) {} THRIFT_IGNORE_ISSET_USE_WARNING_END GetNeighborsRequest::~GetNeighborsRequest() {} THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN GetNeighborsRequest::GetNeighborsRequest(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, ::std::vector<::std::string> column_names__arg, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector<nebula::Row>> parts__arg, ::nebula::storage::cpp2::TraverseSpec traverse_spec__arg, ::nebula::storage::cpp2::RequestCommon common__arg) : space_id(std::move(space_id__arg)), column_names(std::move(column_names__arg)), parts(std::move(parts__arg)), traverse_spec(std::move(traverse_spec__arg)), common(std::move(common__arg)) { __isset.space_id = true; __isset.column_names = true; __isset.parts = true; __isset.traverse_spec = true; __isset.common = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void GetNeighborsRequest::__clear() { // clear all fields space_id = 0; column_names.clear(); parts.clear(); traverse_spec.__clear(); common.__clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool GetNeighborsRequest::operator==(const GetNeighborsRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (!(lhs.column_names == rhs.column_names)) { return false; } if (!(lhs.parts == rhs.parts)) { return false; } if (!(lhs.traverse_spec == rhs.traverse_spec)) { return false; } if (lhs.common_ref() != rhs.common_ref()) { return false; } return true; } const ::std::vector<::std::string>& GetNeighborsRequest::get_column_names() const& { return column_names; } ::std::vector<::std::string> GetNeighborsRequest::get_column_names() && { return std::move(column_names); } const std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector<nebula::Row>>& GetNeighborsRequest::get_parts() const& { return parts; } std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector<nebula::Row>> GetNeighborsRequest::get_parts() && { return std::move(parts); } const ::nebula::storage::cpp2::TraverseSpec& GetNeighborsRequest::get_traverse_spec() const& { return traverse_spec; } ::nebula::storage::cpp2::TraverseSpec GetNeighborsRequest::get_traverse_spec() && { return std::move(traverse_spec); } const ::nebula::storage::cpp2::RequestCommon* GetNeighborsRequest::get_common() const& { return common_ref().has_value() ? std::addressof(common) : nullptr; } ::nebula::storage::cpp2::RequestCommon* GetNeighborsRequest::get_common() & { return common_ref().has_value() ? std::addressof(common) : nullptr; } void swap(GetNeighborsRequest& a, GetNeighborsRequest& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.column_names_ref().value(), b.column_names_ref().value()); swap(a.parts_ref().value(), b.parts_ref().value()); swap(a.traverse_spec_ref().value(), b.traverse_spec_ref().value()); swap(a.common_ref().value_unchecked(), b.common_ref().value_unchecked()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void GetNeighborsRequest::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t GetNeighborsRequest::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t GetNeighborsRequest::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t GetNeighborsRequest::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void GetNeighborsRequest::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t GetNeighborsRequest::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t GetNeighborsRequest::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t GetNeighborsRequest::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< GetNeighborsRequest, ::apache::thrift::type_class::map<::apache::thrift::type_class::integral, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>>, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector<nebula::Row>>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< GetNeighborsRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::TraverseSpec>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< GetNeighborsRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::RequestCommon>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< GetNeighborsRequest, ::apache::thrift::type_class::map<::apache::thrift::type_class::integral, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>>, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector<nebula::Row>>>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< GetNeighborsRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::TraverseSpec>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< GetNeighborsRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::RequestCommon>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::GetNeighborsResponse>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::GetNeighborsResponse>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN GetNeighborsResponse::GetNeighborsResponse(apache::thrift::FragileConstructor, ::nebula::storage::cpp2::ResponseCommon result__arg, nebula::DataSet vertices__arg) : result(std::move(result__arg)), vertices(std::move(vertices__arg)) { __isset.vertices = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void GetNeighborsResponse::__clear() { // clear all fields result.__clear(); vertices.__clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool GetNeighborsResponse::operator==(const GetNeighborsResponse& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.result == rhs.result)) { return false; } if (lhs.vertices_ref() != rhs.vertices_ref()) { return false; } return true; } const ::nebula::storage::cpp2::ResponseCommon& GetNeighborsResponse::get_result() const& { return result; } ::nebula::storage::cpp2::ResponseCommon GetNeighborsResponse::get_result() && { return std::move(result); } const nebula::DataSet* GetNeighborsResponse::get_vertices() const& { return vertices_ref().has_value() ? std::addressof(vertices) : nullptr; } nebula::DataSet* GetNeighborsResponse::get_vertices() & { return vertices_ref().has_value() ? std::addressof(vertices) : nullptr; } void swap(GetNeighborsResponse& a, GetNeighborsResponse& b) { using ::std::swap; swap(a.result_ref().value(), b.result_ref().value()); swap(a.vertices_ref().value_unchecked(), b.vertices_ref().value_unchecked()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void GetNeighborsResponse::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t GetNeighborsResponse::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t GetNeighborsResponse::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t GetNeighborsResponse::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void GetNeighborsResponse::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t GetNeighborsResponse::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t GetNeighborsResponse::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t GetNeighborsResponse::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< GetNeighborsResponse, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::ResponseCommon>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< GetNeighborsResponse, ::apache::thrift::type_class::structure, nebula::DataSet>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< GetNeighborsResponse, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::ResponseCommon>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< GetNeighborsResponse, ::apache::thrift::type_class::structure, nebula::DataSet>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::ExecResponse>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::ExecResponse>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN ExecResponse::ExecResponse(apache::thrift::FragileConstructor, ::nebula::storage::cpp2::ResponseCommon result__arg) : result(std::move(result__arg)) {} THRIFT_IGNORE_ISSET_USE_WARNING_END void ExecResponse::__clear() { // clear all fields result.__clear(); } bool ExecResponse::operator==(const ExecResponse& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.result == rhs.result)) { return false; } return true; } bool ExecResponse::operator<(const ExecResponse& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.result == rhs.result)) { return lhs.result < rhs.result; } return false; } const ::nebula::storage::cpp2::ResponseCommon& ExecResponse::get_result() const& { return result; } ::nebula::storage::cpp2::ResponseCommon ExecResponse::get_result() && { return std::move(result); } void swap(ExecResponse& a, ExecResponse& b) { using ::std::swap; swap(a.result_ref().value(), b.result_ref().value()); } template void ExecResponse::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t ExecResponse::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t ExecResponse::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t ExecResponse::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void ExecResponse::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t ExecResponse::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t ExecResponse::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t ExecResponse::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< ExecResponse, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::ResponseCommon>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< ExecResponse, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::ResponseCommon>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::GetPropRequest>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::GetPropRequest>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN GetPropRequest::GetPropRequest() : space_id(0), dedup(false), limit(0) {} THRIFT_IGNORE_ISSET_USE_WARNING_END GetPropRequest::~GetPropRequest() {} THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN GetPropRequest::GetPropRequest(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector<nebula::Row>> parts__arg, ::std::vector< ::nebula::storage::cpp2::VertexProp> vertex_props__arg, ::std::vector< ::nebula::storage::cpp2::EdgeProp> edge_props__arg, ::std::vector< ::nebula::storage::cpp2::Expr> expressions__arg, bool dedup__arg, ::std::vector< ::nebula::storage::cpp2::OrderBy> order_by__arg, int64_t limit__arg, ::std::string filter__arg, ::nebula::storage::cpp2::RequestCommon common__arg) : space_id(std::move(space_id__arg)), parts(std::move(parts__arg)), vertex_props(std::move(vertex_props__arg)), edge_props(std::move(edge_props__arg)), expressions(std::move(expressions__arg)), dedup(std::move(dedup__arg)), order_by(std::move(order_by__arg)), limit(std::move(limit__arg)), filter(std::move(filter__arg)), common(std::move(common__arg)) { __isset.space_id = true; __isset.parts = true; __isset.vertex_props = true; __isset.edge_props = true; __isset.expressions = true; __isset.dedup = true; __isset.order_by = true; __isset.limit = true; __isset.filter = true; __isset.common = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void GetPropRequest::__clear() { // clear all fields space_id = 0; parts.clear(); vertex_props.clear(); edge_props.clear(); expressions.clear(); dedup = false; order_by.clear(); limit = 0; filter = apache::thrift::StringTraits< std::string>::fromStringLiteral(""); common.__clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool GetPropRequest::operator==(const GetPropRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (!(lhs.parts == rhs.parts)) { return false; } if (lhs.vertex_props_ref() != rhs.vertex_props_ref()) { return false; } if (lhs.edge_props_ref() != rhs.edge_props_ref()) { return false; } if (lhs.expressions_ref() != rhs.expressions_ref()) { return false; } if (!(lhs.dedup == rhs.dedup)) { return false; } if (lhs.order_by_ref() != rhs.order_by_ref()) { return false; } if (lhs.limit_ref() != rhs.limit_ref()) { return false; } if (lhs.filter_ref().has_value() != rhs.filter_ref().has_value()) { return false; } if (lhs.filter_ref().has_value()) { if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.filter, rhs.filter)) { return false; } } if (lhs.common_ref() != rhs.common_ref()) { return false; } return true; } const std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector<nebula::Row>>& GetPropRequest::get_parts() const& { return parts; } std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector<nebula::Row>> GetPropRequest::get_parts() && { return std::move(parts); } const ::std::vector< ::nebula::storage::cpp2::VertexProp>* GetPropRequest::get_vertex_props() const& { return vertex_props_ref().has_value() ? std::addressof(vertex_props) : nullptr; } ::std::vector< ::nebula::storage::cpp2::VertexProp>* GetPropRequest::get_vertex_props() & { return vertex_props_ref().has_value() ? std::addressof(vertex_props) : nullptr; } const ::std::vector< ::nebula::storage::cpp2::EdgeProp>* GetPropRequest::get_edge_props() const& { return edge_props_ref().has_value() ? std::addressof(edge_props) : nullptr; } ::std::vector< ::nebula::storage::cpp2::EdgeProp>* GetPropRequest::get_edge_props() & { return edge_props_ref().has_value() ? std::addressof(edge_props) : nullptr; } const ::std::vector< ::nebula::storage::cpp2::Expr>* GetPropRequest::get_expressions() const& { return expressions_ref().has_value() ? std::addressof(expressions) : nullptr; } ::std::vector< ::nebula::storage::cpp2::Expr>* GetPropRequest::get_expressions() & { return expressions_ref().has_value() ? std::addressof(expressions) : nullptr; } const ::std::vector< ::nebula::storage::cpp2::OrderBy>* GetPropRequest::get_order_by() const& { return order_by_ref().has_value() ? std::addressof(order_by) : nullptr; } ::std::vector< ::nebula::storage::cpp2::OrderBy>* GetPropRequest::get_order_by() & { return order_by_ref().has_value() ? std::addressof(order_by) : nullptr; } const ::nebula::storage::cpp2::RequestCommon* GetPropRequest::get_common() const& { return common_ref().has_value() ? std::addressof(common) : nullptr; } ::nebula::storage::cpp2::RequestCommon* GetPropRequest::get_common() & { return common_ref().has_value() ? std::addressof(common) : nullptr; } void swap(GetPropRequest& a, GetPropRequest& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.parts_ref().value(), b.parts_ref().value()); swap(a.vertex_props_ref().value_unchecked(), b.vertex_props_ref().value_unchecked()); swap(a.edge_props_ref().value_unchecked(), b.edge_props_ref().value_unchecked()); swap(a.expressions_ref().value_unchecked(), b.expressions_ref().value_unchecked()); swap(a.dedup_ref().value(), b.dedup_ref().value()); swap(a.order_by_ref().value_unchecked(), b.order_by_ref().value_unchecked()); swap(a.limit_ref().value_unchecked(), b.limit_ref().value_unchecked()); swap(a.filter_ref().value_unchecked(), b.filter_ref().value_unchecked()); swap(a.common_ref().value_unchecked(), b.common_ref().value_unchecked()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void GetPropRequest::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t GetPropRequest::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t GetPropRequest::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t GetPropRequest::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void GetPropRequest::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t GetPropRequest::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t GetPropRequest::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t GetPropRequest::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< GetPropRequest, ::apache::thrift::type_class::map<::apache::thrift::type_class::integral, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>>, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector<nebula::Row>>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< GetPropRequest, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::VertexProp>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< GetPropRequest, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::EdgeProp>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< GetPropRequest, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::Expr>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< GetPropRequest, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::OrderBy>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< GetPropRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::RequestCommon>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< GetPropRequest, ::apache::thrift::type_class::map<::apache::thrift::type_class::integral, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>>, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector<nebula::Row>>>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< GetPropRequest, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::VertexProp>>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< GetPropRequest, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::EdgeProp>>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< GetPropRequest, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::Expr>>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< GetPropRequest, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::OrderBy>>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< GetPropRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::RequestCommon>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::GetPropResponse>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::GetPropResponse>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN GetPropResponse::GetPropResponse(apache::thrift::FragileConstructor, ::nebula::storage::cpp2::ResponseCommon result__arg, nebula::DataSet props__arg) : result(std::move(result__arg)), props(std::move(props__arg)) { __isset.result = true; __isset.props = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void GetPropResponse::__clear() { // clear all fields result.__clear(); props.__clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool GetPropResponse::operator==(const GetPropResponse& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.result == rhs.result)) { return false; } if (lhs.props_ref() != rhs.props_ref()) { return false; } return true; } const ::nebula::storage::cpp2::ResponseCommon& GetPropResponse::get_result() const& { return result; } ::nebula::storage::cpp2::ResponseCommon GetPropResponse::get_result() && { return std::move(result); } const nebula::DataSet* GetPropResponse::get_props() const& { return props_ref().has_value() ? std::addressof(props) : nullptr; } nebula::DataSet* GetPropResponse::get_props() & { return props_ref().has_value() ? std::addressof(props) : nullptr; } void swap(GetPropResponse& a, GetPropResponse& b) { using ::std::swap; swap(a.result_ref().value(), b.result_ref().value()); swap(a.props_ref().value_unchecked(), b.props_ref().value_unchecked()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void GetPropResponse::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t GetPropResponse::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t GetPropResponse::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t GetPropResponse::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void GetPropResponse::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t GetPropResponse::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t GetPropResponse::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t GetPropResponse::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< GetPropResponse, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::ResponseCommon>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< GetPropResponse, ::apache::thrift::type_class::structure, nebula::DataSet>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< GetPropResponse, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::ResponseCommon>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< GetPropResponse, ::apache::thrift::type_class::structure, nebula::DataSet>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::NewTag>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::NewTag>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN NewTag::NewTag(apache::thrift::FragileConstructor, ::nebula::cpp2::TagID tag_id__arg, ::std::vector<nebula::Value> props__arg) : tag_id(std::move(tag_id__arg)), props(std::move(props__arg)) { __isset.tag_id = true; __isset.props = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void NewTag::__clear() { // clear all fields tag_id = 0; props.clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool NewTag::operator==(const NewTag& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.tag_id == rhs.tag_id)) { return false; } if (!(lhs.props == rhs.props)) { return false; } return true; } const ::std::vector<nebula::Value>& NewTag::get_props() const& { return props; } ::std::vector<nebula::Value> NewTag::get_props() && { return std::move(props); } void swap(NewTag& a, NewTag& b) { using ::std::swap; swap(a.tag_id_ref().value(), b.tag_id_ref().value()); swap(a.props_ref().value(), b.props_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void NewTag::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t NewTag::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t NewTag::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t NewTag::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void NewTag::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t NewTag::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t NewTag::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t NewTag::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< NewTag, ::apache::thrift::type_class::list<::apache::thrift::type_class::variant>, ::std::vector<nebula::Value>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< NewTag, ::apache::thrift::type_class::list<::apache::thrift::type_class::variant>, ::std::vector<nebula::Value>>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::NewVertex>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::NewVertex>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN NewVertex::NewVertex(apache::thrift::FragileConstructor, nebula::Value id__arg, ::std::vector< ::nebula::storage::cpp2::NewTag> tags__arg) : id(std::move(id__arg)), tags(std::move(tags__arg)) { __isset.id = true; __isset.tags = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void NewVertex::__clear() { // clear all fields id.__clear(); tags.clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool NewVertex::operator==(const NewVertex& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.id == rhs.id)) { return false; } if (!(lhs.tags == rhs.tags)) { return false; } return true; } const nebula::Value& NewVertex::get_id() const& { return id; } nebula::Value NewVertex::get_id() && { return std::move(id); } const ::std::vector< ::nebula::storage::cpp2::NewTag>& NewVertex::get_tags() const& { return tags; } ::std::vector< ::nebula::storage::cpp2::NewTag> NewVertex::get_tags() && { return std::move(tags); } void swap(NewVertex& a, NewVertex& b) { using ::std::swap; swap(a.id_ref().value(), b.id_ref().value()); swap(a.tags_ref().value(), b.tags_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void NewVertex::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t NewVertex::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t NewVertex::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t NewVertex::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void NewVertex::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t NewVertex::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t NewVertex::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t NewVertex::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< NewVertex, ::apache::thrift::type_class::variant, nebula::Value>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< NewVertex, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::NewTag>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< NewVertex, ::apache::thrift::type_class::variant, nebula::Value>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< NewVertex, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::NewTag>>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::EdgeKey>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::EdgeKey>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN EdgeKey::EdgeKey(apache::thrift::FragileConstructor, nebula::Value src__arg, ::nebula::cpp2::EdgeType edge_type__arg, ::nebula::cpp2::EdgeRanking ranking__arg, nebula::Value dst__arg) : src(std::move(src__arg)), edge_type(std::move(edge_type__arg)), ranking(std::move(ranking__arg)), dst(std::move(dst__arg)) { __isset.src = true; __isset.edge_type = true; __isset.ranking = true; __isset.dst = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void EdgeKey::__clear() { // clear all fields src.__clear(); edge_type = 0; ranking = 0; dst.__clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool EdgeKey::operator==(const EdgeKey& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.src == rhs.src)) { return false; } if (!(lhs.edge_type == rhs.edge_type)) { return false; } if (!(lhs.ranking == rhs.ranking)) { return false; } if (!(lhs.dst == rhs.dst)) { return false; } return true; } const nebula::Value& EdgeKey::get_src() const& { return src; } nebula::Value EdgeKey::get_src() && { return std::move(src); } const nebula::Value& EdgeKey::get_dst() const& { return dst; } nebula::Value EdgeKey::get_dst() && { return std::move(dst); } void swap(EdgeKey& a, EdgeKey& b) { using ::std::swap; swap(a.src_ref().value(), b.src_ref().value()); swap(a.edge_type_ref().value(), b.edge_type_ref().value()); swap(a.ranking_ref().value(), b.ranking_ref().value()); swap(a.dst_ref().value(), b.dst_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void EdgeKey::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t EdgeKey::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t EdgeKey::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t EdgeKey::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void EdgeKey::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t EdgeKey::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t EdgeKey::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t EdgeKey::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< EdgeKey, ::apache::thrift::type_class::variant, nebula::Value>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< EdgeKey, ::apache::thrift::type_class::variant, nebula::Value>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< EdgeKey, ::apache::thrift::type_class::variant, nebula::Value>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< EdgeKey, ::apache::thrift::type_class::variant, nebula::Value>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::NewEdge>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::NewEdge>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN NewEdge::NewEdge(apache::thrift::FragileConstructor, ::nebula::storage::cpp2::EdgeKey key__arg, ::std::vector<nebula::Value> props__arg) : key(std::move(key__arg)), props(std::move(props__arg)) { __isset.key = true; __isset.props = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void NewEdge::__clear() { // clear all fields key.__clear(); props.clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool NewEdge::operator==(const NewEdge& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.key == rhs.key)) { return false; } if (!(lhs.props == rhs.props)) { return false; } return true; } const ::nebula::storage::cpp2::EdgeKey& NewEdge::get_key() const& { return key; } ::nebula::storage::cpp2::EdgeKey NewEdge::get_key() && { return std::move(key); } const ::std::vector<nebula::Value>& NewEdge::get_props() const& { return props; } ::std::vector<nebula::Value> NewEdge::get_props() && { return std::move(props); } void swap(NewEdge& a, NewEdge& b) { using ::std::swap; swap(a.key_ref().value(), b.key_ref().value()); swap(a.props_ref().value(), b.props_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void NewEdge::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t NewEdge::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t NewEdge::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t NewEdge::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void NewEdge::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t NewEdge::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t NewEdge::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t NewEdge::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< NewEdge, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::EdgeKey>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< NewEdge, ::apache::thrift::type_class::list<::apache::thrift::type_class::variant>, ::std::vector<nebula::Value>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< NewEdge, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::EdgeKey>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< NewEdge, ::apache::thrift::type_class::list<::apache::thrift::type_class::variant>, ::std::vector<nebula::Value>>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::AddVerticesRequest>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::AddVerticesRequest>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN AddVerticesRequest::AddVerticesRequest() : space_id(0), if_not_exists(0) {} THRIFT_IGNORE_ISSET_USE_WARNING_END AddVerticesRequest::~AddVerticesRequest() {} THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN AddVerticesRequest::AddVerticesRequest(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector< ::nebula::storage::cpp2::NewVertex>> parts__arg, std::unordered_map< ::nebula::cpp2::TagID, ::std::vector<::std::string>> prop_names__arg, bool if_not_exists__arg, ::nebula::storage::cpp2::RequestCommon common__arg) : space_id(std::move(space_id__arg)), parts(std::move(parts__arg)), prop_names(std::move(prop_names__arg)), if_not_exists(std::move(if_not_exists__arg)), common(std::move(common__arg)) { __isset.space_id = true; __isset.parts = true; __isset.prop_names = true; __isset.if_not_exists = true; __isset.common = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void AddVerticesRequest::__clear() { // clear all fields space_id = 0; parts.clear(); prop_names.clear(); if_not_exists = 0; common.__clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool AddVerticesRequest::operator==(const AddVerticesRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (!(lhs.parts == rhs.parts)) { return false; } if (!(lhs.prop_names == rhs.prop_names)) { return false; } if (!(lhs.if_not_exists == rhs.if_not_exists)) { return false; } if (lhs.common_ref() != rhs.common_ref()) { return false; } return true; } const std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector< ::nebula::storage::cpp2::NewVertex>>& AddVerticesRequest::get_parts() const& { return parts; } std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector< ::nebula::storage::cpp2::NewVertex>> AddVerticesRequest::get_parts() && { return std::move(parts); } const std::unordered_map< ::nebula::cpp2::TagID, ::std::vector<::std::string>>& AddVerticesRequest::get_prop_names() const& { return prop_names; } std::unordered_map< ::nebula::cpp2::TagID, ::std::vector<::std::string>> AddVerticesRequest::get_prop_names() && { return std::move(prop_names); } const ::nebula::storage::cpp2::RequestCommon* AddVerticesRequest::get_common() const& { return common_ref().has_value() ? std::addressof(common) : nullptr; } ::nebula::storage::cpp2::RequestCommon* AddVerticesRequest::get_common() & { return common_ref().has_value() ? std::addressof(common) : nullptr; } void swap(AddVerticesRequest& a, AddVerticesRequest& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.parts_ref().value(), b.parts_ref().value()); swap(a.prop_names_ref().value(), b.prop_names_ref().value()); swap(a.if_not_exists_ref().value(), b.if_not_exists_ref().value()); swap(a.common_ref().value_unchecked(), b.common_ref().value_unchecked()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void AddVerticesRequest::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t AddVerticesRequest::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t AddVerticesRequest::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t AddVerticesRequest::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void AddVerticesRequest::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t AddVerticesRequest::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t AddVerticesRequest::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t AddVerticesRequest::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< AddVerticesRequest, ::apache::thrift::type_class::map<::apache::thrift::type_class::integral, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>>, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector< ::nebula::storage::cpp2::NewVertex>>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< AddVerticesRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::RequestCommon>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< AddVerticesRequest, ::apache::thrift::type_class::map<::apache::thrift::type_class::integral, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>>, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector< ::nebula::storage::cpp2::NewVertex>>>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< AddVerticesRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::RequestCommon>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::AddEdgesRequest>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::AddEdgesRequest>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN AddEdgesRequest::AddEdgesRequest() : space_id(0), if_not_exists(0) {} THRIFT_IGNORE_ISSET_USE_WARNING_END AddEdgesRequest::~AddEdgesRequest() {} THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN AddEdgesRequest::AddEdgesRequest(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector< ::nebula::storage::cpp2::NewEdge>> parts__arg, ::std::vector<::std::string> prop_names__arg, bool if_not_exists__arg, ::nebula::storage::cpp2::RequestCommon common__arg) : space_id(std::move(space_id__arg)), parts(std::move(parts__arg)), prop_names(std::move(prop_names__arg)), if_not_exists(std::move(if_not_exists__arg)), common(std::move(common__arg)) { __isset.space_id = true; __isset.parts = true; __isset.prop_names = true; __isset.if_not_exists = true; __isset.common = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void AddEdgesRequest::__clear() { // clear all fields space_id = 0; parts.clear(); prop_names.clear(); if_not_exists = 0; common.__clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool AddEdgesRequest::operator==(const AddEdgesRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (!(lhs.parts == rhs.parts)) { return false; } if (!(lhs.prop_names == rhs.prop_names)) { return false; } if (!(lhs.if_not_exists == rhs.if_not_exists)) { return false; } if (lhs.common_ref() != rhs.common_ref()) { return false; } return true; } const std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector< ::nebula::storage::cpp2::NewEdge>>& AddEdgesRequest::get_parts() const& { return parts; } std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector< ::nebula::storage::cpp2::NewEdge>> AddEdgesRequest::get_parts() && { return std::move(parts); } const ::std::vector<::std::string>& AddEdgesRequest::get_prop_names() const& { return prop_names; } ::std::vector<::std::string> AddEdgesRequest::get_prop_names() && { return std::move(prop_names); } const ::nebula::storage::cpp2::RequestCommon* AddEdgesRequest::get_common() const& { return common_ref().has_value() ? std::addressof(common) : nullptr; } ::nebula::storage::cpp2::RequestCommon* AddEdgesRequest::get_common() & { return common_ref().has_value() ? std::addressof(common) : nullptr; } void swap(AddEdgesRequest& a, AddEdgesRequest& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.parts_ref().value(), b.parts_ref().value()); swap(a.prop_names_ref().value(), b.prop_names_ref().value()); swap(a.if_not_exists_ref().value(), b.if_not_exists_ref().value()); swap(a.common_ref().value_unchecked(), b.common_ref().value_unchecked()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void AddEdgesRequest::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t AddEdgesRequest::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t AddEdgesRequest::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t AddEdgesRequest::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void AddEdgesRequest::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t AddEdgesRequest::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t AddEdgesRequest::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t AddEdgesRequest::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< AddEdgesRequest, ::apache::thrift::type_class::map<::apache::thrift::type_class::integral, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>>, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector< ::nebula::storage::cpp2::NewEdge>>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< AddEdgesRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::RequestCommon>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< AddEdgesRequest, ::apache::thrift::type_class::map<::apache::thrift::type_class::integral, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>>, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector< ::nebula::storage::cpp2::NewEdge>>>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< AddEdgesRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::RequestCommon>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::DeleteVerticesRequest>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::DeleteVerticesRequest>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN DeleteVerticesRequest::DeleteVerticesRequest(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector<nebula::Value>> parts__arg, ::nebula::storage::cpp2::RequestCommon common__arg) : space_id(std::move(space_id__arg)), parts(std::move(parts__arg)), common(std::move(common__arg)) { __isset.space_id = true; __isset.parts = true; __isset.common = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void DeleteVerticesRequest::__clear() { // clear all fields space_id = 0; parts.clear(); common.__clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool DeleteVerticesRequest::operator==(const DeleteVerticesRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (!(lhs.parts == rhs.parts)) { return false; } if (lhs.common_ref() != rhs.common_ref()) { return false; } return true; } const std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector<nebula::Value>>& DeleteVerticesRequest::get_parts() const& { return parts; } std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector<nebula::Value>> DeleteVerticesRequest::get_parts() && { return std::move(parts); } const ::nebula::storage::cpp2::RequestCommon* DeleteVerticesRequest::get_common() const& { return common_ref().has_value() ? std::addressof(common) : nullptr; } ::nebula::storage::cpp2::RequestCommon* DeleteVerticesRequest::get_common() & { return common_ref().has_value() ? std::addressof(common) : nullptr; } void swap(DeleteVerticesRequest& a, DeleteVerticesRequest& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.parts_ref().value(), b.parts_ref().value()); swap(a.common_ref().value_unchecked(), b.common_ref().value_unchecked()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void DeleteVerticesRequest::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t DeleteVerticesRequest::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t DeleteVerticesRequest::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t DeleteVerticesRequest::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void DeleteVerticesRequest::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t DeleteVerticesRequest::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t DeleteVerticesRequest::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t DeleteVerticesRequest::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< DeleteVerticesRequest, ::apache::thrift::type_class::map<::apache::thrift::type_class::integral, ::apache::thrift::type_class::list<::apache::thrift::type_class::variant>>, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector<nebula::Value>>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< DeleteVerticesRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::RequestCommon>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< DeleteVerticesRequest, ::apache::thrift::type_class::map<::apache::thrift::type_class::integral, ::apache::thrift::type_class::list<::apache::thrift::type_class::variant>>, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector<nebula::Value>>>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< DeleteVerticesRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::RequestCommon>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::DeleteEdgesRequest>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::DeleteEdgesRequest>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN DeleteEdgesRequest::DeleteEdgesRequest(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector< ::nebula::storage::cpp2::EdgeKey>> parts__arg, ::nebula::storage::cpp2::RequestCommon common__arg) : space_id(std::move(space_id__arg)), parts(std::move(parts__arg)), common(std::move(common__arg)) { __isset.space_id = true; __isset.parts = true; __isset.common = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void DeleteEdgesRequest::__clear() { // clear all fields space_id = 0; parts.clear(); common.__clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool DeleteEdgesRequest::operator==(const DeleteEdgesRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (!(lhs.parts == rhs.parts)) { return false; } if (lhs.common_ref() != rhs.common_ref()) { return false; } return true; } const std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector< ::nebula::storage::cpp2::EdgeKey>>& DeleteEdgesRequest::get_parts() const& { return parts; } std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector< ::nebula::storage::cpp2::EdgeKey>> DeleteEdgesRequest::get_parts() && { return std::move(parts); } const ::nebula::storage::cpp2::RequestCommon* DeleteEdgesRequest::get_common() const& { return common_ref().has_value() ? std::addressof(common) : nullptr; } ::nebula::storage::cpp2::RequestCommon* DeleteEdgesRequest::get_common() & { return common_ref().has_value() ? std::addressof(common) : nullptr; } void swap(DeleteEdgesRequest& a, DeleteEdgesRequest& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.parts_ref().value(), b.parts_ref().value()); swap(a.common_ref().value_unchecked(), b.common_ref().value_unchecked()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void DeleteEdgesRequest::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t DeleteEdgesRequest::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t DeleteEdgesRequest::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t DeleteEdgesRequest::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void DeleteEdgesRequest::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t DeleteEdgesRequest::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t DeleteEdgesRequest::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t DeleteEdgesRequest::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< DeleteEdgesRequest, ::apache::thrift::type_class::map<::apache::thrift::type_class::integral, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>>, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector< ::nebula::storage::cpp2::EdgeKey>>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< DeleteEdgesRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::RequestCommon>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< DeleteEdgesRequest, ::apache::thrift::type_class::map<::apache::thrift::type_class::integral, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>>, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector< ::nebula::storage::cpp2::EdgeKey>>>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< DeleteEdgesRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::RequestCommon>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::DelTags>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::DelTags>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN DelTags::DelTags(apache::thrift::FragileConstructor, nebula::Value id__arg, ::std::vector< ::nebula::cpp2::TagID> tags__arg) : id(std::move(id__arg)), tags(std::move(tags__arg)) { __isset.id = true; __isset.tags = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void DelTags::__clear() { // clear all fields id.__clear(); tags.clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool DelTags::operator==(const DelTags& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.id == rhs.id)) { return false; } if (!(lhs.tags == rhs.tags)) { return false; } return true; } const nebula::Value& DelTags::get_id() const& { return id; } nebula::Value DelTags::get_id() && { return std::move(id); } const ::std::vector< ::nebula::cpp2::TagID>& DelTags::get_tags() const& { return tags; } ::std::vector< ::nebula::cpp2::TagID> DelTags::get_tags() && { return std::move(tags); } void swap(DelTags& a, DelTags& b) { using ::std::swap; swap(a.id_ref().value(), b.id_ref().value()); swap(a.tags_ref().value(), b.tags_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void DelTags::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t DelTags::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t DelTags::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t DelTags::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void DelTags::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t DelTags::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t DelTags::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t DelTags::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< DelTags, ::apache::thrift::type_class::variant, nebula::Value>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< DelTags, ::apache::thrift::type_class::variant, nebula::Value>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::DeleteTagsRequest>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::DeleteTagsRequest>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN DeleteTagsRequest::DeleteTagsRequest(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector< ::nebula::storage::cpp2::DelTags>> parts__arg, ::nebula::storage::cpp2::RequestCommon common__arg) : space_id(std::move(space_id__arg)), parts(std::move(parts__arg)), common(std::move(common__arg)) { __isset.space_id = true; __isset.parts = true; __isset.common = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void DeleteTagsRequest::__clear() { // clear all fields space_id = 0; parts.clear(); common.__clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool DeleteTagsRequest::operator==(const DeleteTagsRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (!(lhs.parts == rhs.parts)) { return false; } if (lhs.common_ref() != rhs.common_ref()) { return false; } return true; } const std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector< ::nebula::storage::cpp2::DelTags>>& DeleteTagsRequest::get_parts() const& { return parts; } std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector< ::nebula::storage::cpp2::DelTags>> DeleteTagsRequest::get_parts() && { return std::move(parts); } const ::nebula::storage::cpp2::RequestCommon* DeleteTagsRequest::get_common() const& { return common_ref().has_value() ? std::addressof(common) : nullptr; } ::nebula::storage::cpp2::RequestCommon* DeleteTagsRequest::get_common() & { return common_ref().has_value() ? std::addressof(common) : nullptr; } void swap(DeleteTagsRequest& a, DeleteTagsRequest& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.parts_ref().value(), b.parts_ref().value()); swap(a.common_ref().value_unchecked(), b.common_ref().value_unchecked()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void DeleteTagsRequest::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t DeleteTagsRequest::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t DeleteTagsRequest::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t DeleteTagsRequest::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void DeleteTagsRequest::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t DeleteTagsRequest::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t DeleteTagsRequest::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t DeleteTagsRequest::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< DeleteTagsRequest, ::apache::thrift::type_class::map<::apache::thrift::type_class::integral, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>>, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector< ::nebula::storage::cpp2::DelTags>>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< DeleteTagsRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::RequestCommon>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< DeleteTagsRequest, ::apache::thrift::type_class::map<::apache::thrift::type_class::integral, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>>, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector< ::nebula::storage::cpp2::DelTags>>>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< DeleteTagsRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::RequestCommon>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::UpdateResponse>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::UpdateResponse>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN UpdateResponse::UpdateResponse(apache::thrift::FragileConstructor, ::nebula::storage::cpp2::ResponseCommon result__arg, nebula::DataSet props__arg) : result(std::move(result__arg)), props(std::move(props__arg)) { __isset.props = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void UpdateResponse::__clear() { // clear all fields result.__clear(); props.__clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool UpdateResponse::operator==(const UpdateResponse& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.result == rhs.result)) { return false; } if (lhs.props_ref() != rhs.props_ref()) { return false; } return true; } const ::nebula::storage::cpp2::ResponseCommon& UpdateResponse::get_result() const& { return result; } ::nebula::storage::cpp2::ResponseCommon UpdateResponse::get_result() && { return std::move(result); } const nebula::DataSet* UpdateResponse::get_props() const& { return props_ref().has_value() ? std::addressof(props) : nullptr; } nebula::DataSet* UpdateResponse::get_props() & { return props_ref().has_value() ? std::addressof(props) : nullptr; } void swap(UpdateResponse& a, UpdateResponse& b) { using ::std::swap; swap(a.result_ref().value(), b.result_ref().value()); swap(a.props_ref().value_unchecked(), b.props_ref().value_unchecked()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void UpdateResponse::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t UpdateResponse::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t UpdateResponse::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t UpdateResponse::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void UpdateResponse::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t UpdateResponse::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t UpdateResponse::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t UpdateResponse::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< UpdateResponse, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::ResponseCommon>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< UpdateResponse, ::apache::thrift::type_class::structure, nebula::DataSet>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< UpdateResponse, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::ResponseCommon>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< UpdateResponse, ::apache::thrift::type_class::structure, nebula::DataSet>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::UpdatedProp>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::UpdatedProp>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN UpdatedProp::UpdatedProp(apache::thrift::FragileConstructor, ::std::string name__arg, ::std::string value__arg) : name(std::move(name__arg)), value(std::move(value__arg)) {} THRIFT_IGNORE_ISSET_USE_WARNING_END void UpdatedProp::__clear() { // clear all fields name = apache::thrift::StringTraits< std::string>::fromStringLiteral(""); value = apache::thrift::StringTraits< std::string>::fromStringLiteral(""); } bool UpdatedProp::operator==(const UpdatedProp& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.name, rhs.name)) { return false; } if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.value, rhs.value)) { return false; } return true; } bool UpdatedProp::operator<(const UpdatedProp& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.name, rhs.name)) { return apache::thrift::StringTraits<std::string>::isLess(lhs.name, rhs.name); } if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.value, rhs.value)) { return apache::thrift::StringTraits<std::string>::isLess(lhs.value, rhs.value); } return false; } void swap(UpdatedProp& a, UpdatedProp& b) { using ::std::swap; swap(a.name_ref().value(), b.name_ref().value()); swap(a.value_ref().value(), b.value_ref().value()); } template void UpdatedProp::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t UpdatedProp::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t UpdatedProp::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t UpdatedProp::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void UpdatedProp::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t UpdatedProp::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t UpdatedProp::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t UpdatedProp::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::UpdateVertexRequest>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::UpdateVertexRequest>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN UpdateVertexRequest::UpdateVertexRequest() : space_id(0), part_id(0), tag_id(0), insertable(false) {} THRIFT_IGNORE_ISSET_USE_WARNING_END UpdateVertexRequest::~UpdateVertexRequest() {} THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN UpdateVertexRequest::UpdateVertexRequest(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, ::nebula::cpp2::PartitionID part_id__arg, nebula::Value vertex_id__arg, ::nebula::cpp2::TagID tag_id__arg, ::std::vector< ::nebula::storage::cpp2::UpdatedProp> updated_props__arg, bool insertable__arg, ::std::vector<::std::string> return_props__arg, ::std::string condition__arg, ::nebula::storage::cpp2::RequestCommon common__arg) : space_id(std::move(space_id__arg)), part_id(std::move(part_id__arg)), vertex_id(std::move(vertex_id__arg)), tag_id(std::move(tag_id__arg)), updated_props(std::move(updated_props__arg)), insertable(std::move(insertable__arg)), return_props(std::move(return_props__arg)), condition(std::move(condition__arg)), common(std::move(common__arg)) { __isset.space_id = true; __isset.part_id = true; __isset.vertex_id = true; __isset.updated_props = true; __isset.insertable = true; __isset.return_props = true; __isset.condition = true; __isset.common = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void UpdateVertexRequest::__clear() { // clear all fields space_id = 0; part_id = 0; vertex_id.__clear(); tag_id = 0; updated_props.clear(); insertable = false; return_props.clear(); condition = apache::thrift::StringTraits< std::string>::fromStringLiteral(""); common.__clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool UpdateVertexRequest::operator==(const UpdateVertexRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (!(lhs.part_id == rhs.part_id)) { return false; } if (!(lhs.vertex_id == rhs.vertex_id)) { return false; } if (!(lhs.tag_id == rhs.tag_id)) { return false; } if (!(lhs.updated_props == rhs.updated_props)) { return false; } if (lhs.insertable_ref() != rhs.insertable_ref()) { return false; } if (lhs.return_props_ref() != rhs.return_props_ref()) { return false; } if (lhs.condition_ref().has_value() != rhs.condition_ref().has_value()) { return false; } if (lhs.condition_ref().has_value()) { if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.condition, rhs.condition)) { return false; } } if (lhs.common_ref() != rhs.common_ref()) { return false; } return true; } const nebula::Value& UpdateVertexRequest::get_vertex_id() const& { return vertex_id; } nebula::Value UpdateVertexRequest::get_vertex_id() && { return std::move(vertex_id); } const ::std::vector< ::nebula::storage::cpp2::UpdatedProp>& UpdateVertexRequest::get_updated_props() const& { return updated_props; } ::std::vector< ::nebula::storage::cpp2::UpdatedProp> UpdateVertexRequest::get_updated_props() && { return std::move(updated_props); } const ::std::vector<::std::string>* UpdateVertexRequest::get_return_props() const& { return return_props_ref().has_value() ? std::addressof(return_props) : nullptr; } ::std::vector<::std::string>* UpdateVertexRequest::get_return_props() & { return return_props_ref().has_value() ? std::addressof(return_props) : nullptr; } const ::nebula::storage::cpp2::RequestCommon* UpdateVertexRequest::get_common() const& { return common_ref().has_value() ? std::addressof(common) : nullptr; } ::nebula::storage::cpp2::RequestCommon* UpdateVertexRequest::get_common() & { return common_ref().has_value() ? std::addressof(common) : nullptr; } void swap(UpdateVertexRequest& a, UpdateVertexRequest& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.part_id_ref().value(), b.part_id_ref().value()); swap(a.vertex_id_ref().value(), b.vertex_id_ref().value()); swap(a.tag_id_ref().value(), b.tag_id_ref().value()); swap(a.updated_props_ref().value(), b.updated_props_ref().value()); swap(a.insertable_ref().value_unchecked(), b.insertable_ref().value_unchecked()); swap(a.return_props_ref().value_unchecked(), b.return_props_ref().value_unchecked()); swap(a.condition_ref().value_unchecked(), b.condition_ref().value_unchecked()); swap(a.common_ref().value_unchecked(), b.common_ref().value_unchecked()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void UpdateVertexRequest::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t UpdateVertexRequest::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t UpdateVertexRequest::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t UpdateVertexRequest::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void UpdateVertexRequest::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t UpdateVertexRequest::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t UpdateVertexRequest::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t UpdateVertexRequest::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< UpdateVertexRequest, ::apache::thrift::type_class::variant, nebula::Value>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< UpdateVertexRequest, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::UpdatedProp>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< UpdateVertexRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::RequestCommon>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< UpdateVertexRequest, ::apache::thrift::type_class::variant, nebula::Value>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< UpdateVertexRequest, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::UpdatedProp>>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< UpdateVertexRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::RequestCommon>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::UpdateEdgeRequest>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::UpdateEdgeRequest>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN UpdateEdgeRequest::UpdateEdgeRequest() : space_id(0), part_id(0), insertable(false) {} THRIFT_IGNORE_ISSET_USE_WARNING_END UpdateEdgeRequest::~UpdateEdgeRequest() {} THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN UpdateEdgeRequest::UpdateEdgeRequest(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, ::nebula::cpp2::PartitionID part_id__arg, ::nebula::storage::cpp2::EdgeKey edge_key__arg, ::std::vector< ::nebula::storage::cpp2::UpdatedProp> updated_props__arg, bool insertable__arg, ::std::vector<::std::string> return_props__arg, ::std::string condition__arg, ::nebula::storage::cpp2::RequestCommon common__arg) : space_id(std::move(space_id__arg)), part_id(std::move(part_id__arg)), edge_key(std::move(edge_key__arg)), updated_props(std::move(updated_props__arg)), insertable(std::move(insertable__arg)), return_props(std::move(return_props__arg)), condition(std::move(condition__arg)), common(std::move(common__arg)) { __isset.space_id = true; __isset.part_id = true; __isset.edge_key = true; __isset.updated_props = true; __isset.insertable = true; __isset.return_props = true; __isset.condition = true; __isset.common = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void UpdateEdgeRequest::__clear() { // clear all fields space_id = 0; part_id = 0; edge_key.__clear(); updated_props.clear(); insertable = false; return_props.clear(); condition = apache::thrift::StringTraits< std::string>::fromStringLiteral(""); common.__clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool UpdateEdgeRequest::operator==(const UpdateEdgeRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (!(lhs.part_id == rhs.part_id)) { return false; } if (!(lhs.edge_key == rhs.edge_key)) { return false; } if (!(lhs.updated_props == rhs.updated_props)) { return false; } if (lhs.insertable_ref() != rhs.insertable_ref()) { return false; } if (lhs.return_props_ref() != rhs.return_props_ref()) { return false; } if (lhs.condition_ref().has_value() != rhs.condition_ref().has_value()) { return false; } if (lhs.condition_ref().has_value()) { if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.condition, rhs.condition)) { return false; } } if (lhs.common_ref() != rhs.common_ref()) { return false; } return true; } const ::nebula::storage::cpp2::EdgeKey& UpdateEdgeRequest::get_edge_key() const& { return edge_key; } ::nebula::storage::cpp2::EdgeKey UpdateEdgeRequest::get_edge_key() && { return std::move(edge_key); } const ::std::vector< ::nebula::storage::cpp2::UpdatedProp>& UpdateEdgeRequest::get_updated_props() const& { return updated_props; } ::std::vector< ::nebula::storage::cpp2::UpdatedProp> UpdateEdgeRequest::get_updated_props() && { return std::move(updated_props); } const ::std::vector<::std::string>* UpdateEdgeRequest::get_return_props() const& { return return_props_ref().has_value() ? std::addressof(return_props) : nullptr; } ::std::vector<::std::string>* UpdateEdgeRequest::get_return_props() & { return return_props_ref().has_value() ? std::addressof(return_props) : nullptr; } const ::nebula::storage::cpp2::RequestCommon* UpdateEdgeRequest::get_common() const& { return common_ref().has_value() ? std::addressof(common) : nullptr; } ::nebula::storage::cpp2::RequestCommon* UpdateEdgeRequest::get_common() & { return common_ref().has_value() ? std::addressof(common) : nullptr; } void swap(UpdateEdgeRequest& a, UpdateEdgeRequest& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.part_id_ref().value(), b.part_id_ref().value()); swap(a.edge_key_ref().value(), b.edge_key_ref().value()); swap(a.updated_props_ref().value(), b.updated_props_ref().value()); swap(a.insertable_ref().value_unchecked(), b.insertable_ref().value_unchecked()); swap(a.return_props_ref().value_unchecked(), b.return_props_ref().value_unchecked()); swap(a.condition_ref().value_unchecked(), b.condition_ref().value_unchecked()); swap(a.common_ref().value_unchecked(), b.common_ref().value_unchecked()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void UpdateEdgeRequest::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t UpdateEdgeRequest::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t UpdateEdgeRequest::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t UpdateEdgeRequest::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void UpdateEdgeRequest::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t UpdateEdgeRequest::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t UpdateEdgeRequest::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t UpdateEdgeRequest::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< UpdateEdgeRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::EdgeKey>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< UpdateEdgeRequest, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::UpdatedProp>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< UpdateEdgeRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::RequestCommon>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< UpdateEdgeRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::EdgeKey>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< UpdateEdgeRequest, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::UpdatedProp>>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< UpdateEdgeRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::RequestCommon>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::GetUUIDReq>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::GetUUIDReq>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN GetUUIDReq::GetUUIDReq(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, ::nebula::cpp2::PartitionID part_id__arg, ::std::string name__arg, ::nebula::storage::cpp2::RequestCommon common__arg) : space_id(std::move(space_id__arg)), part_id(std::move(part_id__arg)), name(std::move(name__arg)), common(std::move(common__arg)) { __isset.space_id = true; __isset.part_id = true; __isset.name = true; __isset.common = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void GetUUIDReq::__clear() { // clear all fields space_id = 0; part_id = 0; name = apache::thrift::StringTraits< std::string>::fromStringLiteral(""); common.__clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool GetUUIDReq::operator==(const GetUUIDReq& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (!(lhs.part_id == rhs.part_id)) { return false; } if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.name, rhs.name)) { return false; } if (lhs.common_ref() != rhs.common_ref()) { return false; } return true; } bool GetUUIDReq::operator<(const GetUUIDReq& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return lhs.space_id < rhs.space_id; } if (!(lhs.part_id == rhs.part_id)) { return lhs.part_id < rhs.part_id; } if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.name, rhs.name)) { return apache::thrift::StringTraits<std::string>::isLess(lhs.name, rhs.name); } if (lhs.common_ref() != rhs.common_ref()) { return lhs.common_ref() < rhs.common_ref(); } return false; } const ::nebula::storage::cpp2::RequestCommon* GetUUIDReq::get_common() const& { return common_ref().has_value() ? std::addressof(common) : nullptr; } ::nebula::storage::cpp2::RequestCommon* GetUUIDReq::get_common() & { return common_ref().has_value() ? std::addressof(common) : nullptr; } void swap(GetUUIDReq& a, GetUUIDReq& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.part_id_ref().value(), b.part_id_ref().value()); swap(a.name_ref().value(), b.name_ref().value()); swap(a.common_ref().value_unchecked(), b.common_ref().value_unchecked()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void GetUUIDReq::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t GetUUIDReq::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t GetUUIDReq::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t GetUUIDReq::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void GetUUIDReq::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t GetUUIDReq::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t GetUUIDReq::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t GetUUIDReq::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< GetUUIDReq, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::RequestCommon>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< GetUUIDReq, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::RequestCommon>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::GetUUIDResp>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::GetUUIDResp>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN GetUUIDResp::GetUUIDResp(apache::thrift::FragileConstructor, ::nebula::storage::cpp2::ResponseCommon result__arg, nebula::Value id__arg) : result(std::move(result__arg)), id(std::move(id__arg)) { __isset.id = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void GetUUIDResp::__clear() { // clear all fields result.__clear(); id.__clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool GetUUIDResp::operator==(const GetUUIDResp& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.result == rhs.result)) { return false; } if (!(lhs.id == rhs.id)) { return false; } return true; } const ::nebula::storage::cpp2::ResponseCommon& GetUUIDResp::get_result() const& { return result; } ::nebula::storage::cpp2::ResponseCommon GetUUIDResp::get_result() && { return std::move(result); } const nebula::Value& GetUUIDResp::get_id() const& { return id; } nebula::Value GetUUIDResp::get_id() && { return std::move(id); } void swap(GetUUIDResp& a, GetUUIDResp& b) { using ::std::swap; swap(a.result_ref().value(), b.result_ref().value()); swap(a.id_ref().value(), b.id_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void GetUUIDResp::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t GetUUIDResp::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t GetUUIDResp::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t GetUUIDResp::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void GetUUIDResp::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t GetUUIDResp::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t GetUUIDResp::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t GetUUIDResp::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< GetUUIDResp, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::ResponseCommon>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< GetUUIDResp, ::apache::thrift::type_class::variant, nebula::Value>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< GetUUIDResp, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::ResponseCommon>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< GetUUIDResp, ::apache::thrift::type_class::variant, nebula::Value>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::LookupIndexResp>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::LookupIndexResp>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN LookupIndexResp::LookupIndexResp(apache::thrift::FragileConstructor, ::nebula::storage::cpp2::ResponseCommon result__arg, nebula::DataSet data__arg) : result(std::move(result__arg)), data(std::move(data__arg)) { __isset.data = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void LookupIndexResp::__clear() { // clear all fields result.__clear(); data.__clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool LookupIndexResp::operator==(const LookupIndexResp& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.result == rhs.result)) { return false; } if (lhs.data_ref() != rhs.data_ref()) { return false; } return true; } const ::nebula::storage::cpp2::ResponseCommon& LookupIndexResp::get_result() const& { return result; } ::nebula::storage::cpp2::ResponseCommon LookupIndexResp::get_result() && { return std::move(result); } const nebula::DataSet* LookupIndexResp::get_data() const& { return data_ref().has_value() ? std::addressof(data) : nullptr; } nebula::DataSet* LookupIndexResp::get_data() & { return data_ref().has_value() ? std::addressof(data) : nullptr; } void swap(LookupIndexResp& a, LookupIndexResp& b) { using ::std::swap; swap(a.result_ref().value(), b.result_ref().value()); swap(a.data_ref().value_unchecked(), b.data_ref().value_unchecked()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void LookupIndexResp::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t LookupIndexResp::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t LookupIndexResp::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t LookupIndexResp::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void LookupIndexResp::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t LookupIndexResp::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t LookupIndexResp::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t LookupIndexResp::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< LookupIndexResp, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::ResponseCommon>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< LookupIndexResp, ::apache::thrift::type_class::structure, nebula::DataSet>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< LookupIndexResp, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::ResponseCommon>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< LookupIndexResp, ::apache::thrift::type_class::structure, nebula::DataSet>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::IndexColumnHint>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::IndexColumnHint>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN IndexColumnHint::IndexColumnHint() : scan_type(static_cast< ::nebula::storage::cpp2::ScanType>(0)), include_begin(true), include_end(false) {} THRIFT_IGNORE_ISSET_USE_WARNING_END IndexColumnHint::~IndexColumnHint() {} THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN IndexColumnHint::IndexColumnHint(apache::thrift::FragileConstructor, ::std::string column_name__arg, ::nebula::storage::cpp2::ScanType scan_type__arg, nebula::Value begin_value__arg, nebula::Value end_value__arg, bool include_begin__arg, bool include_end__arg) : column_name(std::move(column_name__arg)), scan_type(std::move(scan_type__arg)), begin_value(std::move(begin_value__arg)), end_value(std::move(end_value__arg)), include_begin(std::move(include_begin__arg)), include_end(std::move(include_end__arg)) { __isset.column_name = true; __isset.scan_type = true; __isset.begin_value = true; __isset.end_value = true; __isset.include_begin = true; __isset.include_end = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void IndexColumnHint::__clear() { // clear all fields column_name = apache::thrift::StringTraits< std::string>::fromStringLiteral(""); scan_type = static_cast< ::nebula::storage::cpp2::ScanType>(0); begin_value.__clear(); end_value.__clear(); include_begin = true; include_end = false; THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool IndexColumnHint::operator==(const IndexColumnHint& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.column_name, rhs.column_name)) { return false; } if (!(lhs.scan_type == rhs.scan_type)) { return false; } if (!(lhs.begin_value == rhs.begin_value)) { return false; } if (!(lhs.end_value == rhs.end_value)) { return false; } if (!(lhs.include_begin == rhs.include_begin)) { return false; } if (!(lhs.include_end == rhs.include_end)) { return false; } return true; } const nebula::Value& IndexColumnHint::get_begin_value() const& { return begin_value; } nebula::Value IndexColumnHint::get_begin_value() && { return std::move(begin_value); } const nebula::Value& IndexColumnHint::get_end_value() const& { return end_value; } nebula::Value IndexColumnHint::get_end_value() && { return std::move(end_value); } void swap(IndexColumnHint& a, IndexColumnHint& b) { using ::std::swap; swap(a.column_name_ref().value(), b.column_name_ref().value()); swap(a.scan_type_ref().value(), b.scan_type_ref().value()); swap(a.begin_value_ref().value(), b.begin_value_ref().value()); swap(a.end_value_ref().value(), b.end_value_ref().value()); swap(a.include_begin_ref().value(), b.include_begin_ref().value()); swap(a.include_end_ref().value(), b.include_end_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void IndexColumnHint::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t IndexColumnHint::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t IndexColumnHint::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t IndexColumnHint::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void IndexColumnHint::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t IndexColumnHint::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t IndexColumnHint::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t IndexColumnHint::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< IndexColumnHint, ::apache::thrift::type_class::variant, nebula::Value>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< IndexColumnHint, ::apache::thrift::type_class::variant, nebula::Value>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< IndexColumnHint, ::apache::thrift::type_class::variant, nebula::Value>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< IndexColumnHint, ::apache::thrift::type_class::variant, nebula::Value>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::IndexQueryContext>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::IndexQueryContext>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN IndexQueryContext::IndexQueryContext(apache::thrift::FragileConstructor, ::nebula::cpp2::IndexID index_id__arg, ::std::string filter__arg, ::std::vector< ::nebula::storage::cpp2::IndexColumnHint> column_hints__arg) : index_id(std::move(index_id__arg)), filter(std::move(filter__arg)), column_hints(std::move(column_hints__arg)) { __isset.index_id = true; __isset.filter = true; __isset.column_hints = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void IndexQueryContext::__clear() { // clear all fields index_id = 0; filter = apache::thrift::StringTraits< std::string>::fromStringLiteral(""); column_hints.clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool IndexQueryContext::operator==(const IndexQueryContext& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.index_id == rhs.index_id)) { return false; } if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.filter, rhs.filter)) { return false; } if (!(lhs.column_hints == rhs.column_hints)) { return false; } return true; } const ::std::vector< ::nebula::storage::cpp2::IndexColumnHint>& IndexQueryContext::get_column_hints() const& { return column_hints; } ::std::vector< ::nebula::storage::cpp2::IndexColumnHint> IndexQueryContext::get_column_hints() && { return std::move(column_hints); } void swap(IndexQueryContext& a, IndexQueryContext& b) { using ::std::swap; swap(a.index_id_ref().value(), b.index_id_ref().value()); swap(a.filter_ref().value(), b.filter_ref().value()); swap(a.column_hints_ref().value(), b.column_hints_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void IndexQueryContext::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t IndexQueryContext::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t IndexQueryContext::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t IndexQueryContext::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void IndexQueryContext::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t IndexQueryContext::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t IndexQueryContext::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t IndexQueryContext::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< IndexQueryContext, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::IndexColumnHint>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< IndexQueryContext, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::IndexColumnHint>>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::IndexSpec>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::IndexSpec>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN IndexSpec::IndexSpec(apache::thrift::FragileConstructor, ::std::vector< ::nebula::storage::cpp2::IndexQueryContext> contexts__arg, ::nebula::cpp2::SchemaID schema_id__arg) : contexts(std::move(contexts__arg)), schema_id(std::move(schema_id__arg)) { __isset.schema_id = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void IndexSpec::__clear() { // clear all fields contexts.clear(); schema_id.__clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool IndexSpec::operator==(const IndexSpec& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.contexts == rhs.contexts)) { return false; } if (!(lhs.schema_id == rhs.schema_id)) { return false; } return true; } const ::std::vector< ::nebula::storage::cpp2::IndexQueryContext>& IndexSpec::get_contexts() const& { return contexts; } ::std::vector< ::nebula::storage::cpp2::IndexQueryContext> IndexSpec::get_contexts() && { return std::move(contexts); } const ::nebula::cpp2::SchemaID& IndexSpec::get_schema_id() const& { return schema_id; } ::nebula::cpp2::SchemaID IndexSpec::get_schema_id() && { return std::move(schema_id); } void swap(IndexSpec& a, IndexSpec& b) { using ::std::swap; swap(a.contexts_ref().value(), b.contexts_ref().value()); swap(a.schema_id_ref().value(), b.schema_id_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void IndexSpec::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t IndexSpec::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t IndexSpec::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t IndexSpec::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void IndexSpec::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t IndexSpec::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t IndexSpec::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t IndexSpec::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< IndexSpec, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::IndexQueryContext>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< IndexSpec, ::apache::thrift::type_class::variant, ::nebula::cpp2::SchemaID>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< IndexSpec, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::IndexQueryContext>>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< IndexSpec, ::apache::thrift::type_class::variant, ::nebula::cpp2::SchemaID>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::LookupIndexRequest>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::LookupIndexRequest>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN LookupIndexRequest::LookupIndexRequest() : space_id(0), limit(0) {} THRIFT_IGNORE_ISSET_USE_WARNING_END LookupIndexRequest::~LookupIndexRequest() {} THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN LookupIndexRequest::LookupIndexRequest(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, ::std::vector< ::nebula::cpp2::PartitionID> parts__arg, ::nebula::storage::cpp2::IndexSpec indices__arg, ::std::vector<::std::string> return_columns__arg, ::nebula::storage::cpp2::RequestCommon common__arg, int64_t limit__arg) : space_id(std::move(space_id__arg)), parts(std::move(parts__arg)), indices(std::move(indices__arg)), return_columns(std::move(return_columns__arg)), common(std::move(common__arg)), limit(std::move(limit__arg)) { __isset.indices = true; __isset.return_columns = true; __isset.common = true; __isset.limit = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void LookupIndexRequest::__clear() { // clear all fields space_id = 0; parts.clear(); indices.__clear(); return_columns.clear(); common.__clear(); limit = 0; THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool LookupIndexRequest::operator==(const LookupIndexRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (!(lhs.parts == rhs.parts)) { return false; } if (!(lhs.indices == rhs.indices)) { return false; } if (lhs.return_columns_ref() != rhs.return_columns_ref()) { return false; } if (lhs.common_ref() != rhs.common_ref()) { return false; } if (lhs.limit_ref() != rhs.limit_ref()) { return false; } return true; } const ::std::vector< ::nebula::cpp2::PartitionID>& LookupIndexRequest::get_parts() const& { return parts; } ::std::vector< ::nebula::cpp2::PartitionID> LookupIndexRequest::get_parts() && { return std::move(parts); } const ::nebula::storage::cpp2::IndexSpec& LookupIndexRequest::get_indices() const& { return indices; } ::nebula::storage::cpp2::IndexSpec LookupIndexRequest::get_indices() && { return std::move(indices); } const ::std::vector<::std::string>* LookupIndexRequest::get_return_columns() const& { return return_columns_ref().has_value() ? std::addressof(return_columns) : nullptr; } ::std::vector<::std::string>* LookupIndexRequest::get_return_columns() & { return return_columns_ref().has_value() ? std::addressof(return_columns) : nullptr; } const ::nebula::storage::cpp2::RequestCommon* LookupIndexRequest::get_common() const& { return common_ref().has_value() ? std::addressof(common) : nullptr; } ::nebula::storage::cpp2::RequestCommon* LookupIndexRequest::get_common() & { return common_ref().has_value() ? std::addressof(common) : nullptr; } void swap(LookupIndexRequest& a, LookupIndexRequest& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.parts_ref().value(), b.parts_ref().value()); swap(a.indices_ref().value(), b.indices_ref().value()); swap(a.return_columns_ref().value_unchecked(), b.return_columns_ref().value_unchecked()); swap(a.common_ref().value_unchecked(), b.common_ref().value_unchecked()); swap(a.limit_ref().value_unchecked(), b.limit_ref().value_unchecked()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void LookupIndexRequest::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t LookupIndexRequest::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t LookupIndexRequest::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t LookupIndexRequest::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void LookupIndexRequest::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t LookupIndexRequest::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t LookupIndexRequest::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t LookupIndexRequest::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< LookupIndexRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::IndexSpec>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< LookupIndexRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::RequestCommon>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< LookupIndexRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::IndexSpec>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< LookupIndexRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::RequestCommon>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::LookupAndTraverseRequest>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::LookupAndTraverseRequest>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN LookupAndTraverseRequest::LookupAndTraverseRequest() : space_id(0) {} THRIFT_IGNORE_ISSET_USE_WARNING_END LookupAndTraverseRequest::~LookupAndTraverseRequest() {} THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN LookupAndTraverseRequest::LookupAndTraverseRequest(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, ::std::vector< ::nebula::cpp2::PartitionID> parts__arg, ::nebula::storage::cpp2::IndexSpec indices__arg, ::nebula::storage::cpp2::TraverseSpec traverse_spec__arg, ::nebula::storage::cpp2::RequestCommon common__arg) : space_id(std::move(space_id__arg)), parts(std::move(parts__arg)), indices(std::move(indices__arg)), traverse_spec(std::move(traverse_spec__arg)), common(std::move(common__arg)) { __isset.indices = true; __isset.traverse_spec = true; __isset.common = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void LookupAndTraverseRequest::__clear() { // clear all fields space_id = 0; parts.clear(); indices.__clear(); traverse_spec.__clear(); common.__clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool LookupAndTraverseRequest::operator==(const LookupAndTraverseRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (!(lhs.parts == rhs.parts)) { return false; } if (!(lhs.indices == rhs.indices)) { return false; } if (!(lhs.traverse_spec == rhs.traverse_spec)) { return false; } if (lhs.common_ref() != rhs.common_ref()) { return false; } return true; } const ::std::vector< ::nebula::cpp2::PartitionID>& LookupAndTraverseRequest::get_parts() const& { return parts; } ::std::vector< ::nebula::cpp2::PartitionID> LookupAndTraverseRequest::get_parts() && { return std::move(parts); } const ::nebula::storage::cpp2::IndexSpec& LookupAndTraverseRequest::get_indices() const& { return indices; } ::nebula::storage::cpp2::IndexSpec LookupAndTraverseRequest::get_indices() && { return std::move(indices); } const ::nebula::storage::cpp2::TraverseSpec& LookupAndTraverseRequest::get_traverse_spec() const& { return traverse_spec; } ::nebula::storage::cpp2::TraverseSpec LookupAndTraverseRequest::get_traverse_spec() && { return std::move(traverse_spec); } const ::nebula::storage::cpp2::RequestCommon* LookupAndTraverseRequest::get_common() const& { return common_ref().has_value() ? std::addressof(common) : nullptr; } ::nebula::storage::cpp2::RequestCommon* LookupAndTraverseRequest::get_common() & { return common_ref().has_value() ? std::addressof(common) : nullptr; } void swap(LookupAndTraverseRequest& a, LookupAndTraverseRequest& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.parts_ref().value(), b.parts_ref().value()); swap(a.indices_ref().value(), b.indices_ref().value()); swap(a.traverse_spec_ref().value(), b.traverse_spec_ref().value()); swap(a.common_ref().value_unchecked(), b.common_ref().value_unchecked()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void LookupAndTraverseRequest::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t LookupAndTraverseRequest::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t LookupAndTraverseRequest::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t LookupAndTraverseRequest::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void LookupAndTraverseRequest::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t LookupAndTraverseRequest::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t LookupAndTraverseRequest::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t LookupAndTraverseRequest::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< LookupAndTraverseRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::IndexSpec>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< LookupAndTraverseRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::TraverseSpec>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< LookupAndTraverseRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::RequestCommon>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< LookupAndTraverseRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::IndexSpec>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< LookupAndTraverseRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::TraverseSpec>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< LookupAndTraverseRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::RequestCommon>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::ScanCursor>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::ScanCursor>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN ScanCursor::ScanCursor(apache::thrift::FragileConstructor, bool has_next__arg, ::std::string next_cursor__arg) : has_next(std::move(has_next__arg)), next_cursor(std::move(next_cursor__arg)) { __isset.has_next = true; __isset.next_cursor = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void ScanCursor::__clear() { // clear all fields has_next = 0; next_cursor = apache::thrift::StringTraits< std::string>::fromStringLiteral(""); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool ScanCursor::operator==(const ScanCursor& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.has_next == rhs.has_next)) { return false; } if (lhs.next_cursor_ref().has_value() != rhs.next_cursor_ref().has_value()) { return false; } if (lhs.next_cursor_ref().has_value()) { if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.next_cursor, rhs.next_cursor)) { return false; } } return true; } bool ScanCursor::operator<(const ScanCursor& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.has_next == rhs.has_next)) { return lhs.has_next < rhs.has_next; } if (lhs.next_cursor_ref().has_value() != rhs.next_cursor_ref().has_value()) { return lhs.next_cursor_ref().has_value() < rhs.next_cursor_ref().has_value(); } if (lhs.next_cursor_ref().has_value()) { if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.next_cursor, rhs.next_cursor)) { return apache::thrift::StringTraits<std::string>::isLess(lhs.next_cursor, rhs.next_cursor); } } return false; } void swap(ScanCursor& a, ScanCursor& b) { using ::std::swap; swap(a.has_next_ref().value(), b.has_next_ref().value()); swap(a.next_cursor_ref().value_unchecked(), b.next_cursor_ref().value_unchecked()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void ScanCursor::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t ScanCursor::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t ScanCursor::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t ScanCursor::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void ScanCursor::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t ScanCursor::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t ScanCursor::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t ScanCursor::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::ScanVertexRequest>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::ScanVertexRequest>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN ScanVertexRequest::ScanVertexRequest() : space_id(0), limit(0), start_time(0), end_time(0), only_latest_version(false), enable_read_from_follower(true) {} THRIFT_IGNORE_ISSET_USE_WARNING_END ScanVertexRequest::~ScanVertexRequest() {} THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN ScanVertexRequest::ScanVertexRequest(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, std::unordered_map< ::nebula::cpp2::PartitionID, ::nebula::storage::cpp2::ScanCursor> parts__arg, ::std::vector< ::nebula::storage::cpp2::VertexProp> return_columns__arg, int64_t limit__arg, int64_t start_time__arg, int64_t end_time__arg, ::std::string filter__arg, bool only_latest_version__arg, bool enable_read_from_follower__arg, ::nebula::storage::cpp2::RequestCommon common__arg) : space_id(std::move(space_id__arg)), parts(std::move(parts__arg)), return_columns(std::move(return_columns__arg)), limit(std::move(limit__arg)), start_time(std::move(start_time__arg)), end_time(std::move(end_time__arg)), filter(std::move(filter__arg)), only_latest_version(std::move(only_latest_version__arg)), enable_read_from_follower(std::move(enable_read_from_follower__arg)), common(std::move(common__arg)) { __isset.space_id = true; __isset.parts = true; __isset.return_columns = true; __isset.limit = true; __isset.start_time = true; __isset.end_time = true; __isset.filter = true; __isset.only_latest_version = true; __isset.enable_read_from_follower = true; __isset.common = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void ScanVertexRequest::__clear() { // clear all fields space_id = 0; parts.clear(); return_columns.clear(); limit = 0; start_time = 0; end_time = 0; filter = apache::thrift::StringTraits< std::string>::fromStringLiteral(""); only_latest_version = false; enable_read_from_follower = true; common.__clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool ScanVertexRequest::operator==(const ScanVertexRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (!(lhs.parts == rhs.parts)) { return false; } if (!(lhs.return_columns == rhs.return_columns)) { return false; } if (!(lhs.limit == rhs.limit)) { return false; } if (lhs.start_time_ref() != rhs.start_time_ref()) { return false; } if (lhs.end_time_ref() != rhs.end_time_ref()) { return false; } if (lhs.filter_ref().has_value() != rhs.filter_ref().has_value()) { return false; } if (lhs.filter_ref().has_value()) { if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.filter, rhs.filter)) { return false; } } if (!(lhs.only_latest_version == rhs.only_latest_version)) { return false; } if (!(lhs.enable_read_from_follower == rhs.enable_read_from_follower)) { return false; } if (lhs.common_ref() != rhs.common_ref()) { return false; } return true; } const std::unordered_map< ::nebula::cpp2::PartitionID, ::nebula::storage::cpp2::ScanCursor>& ScanVertexRequest::get_parts() const& { return parts; } std::unordered_map< ::nebula::cpp2::PartitionID, ::nebula::storage::cpp2::ScanCursor> ScanVertexRequest::get_parts() && { return std::move(parts); } const ::std::vector< ::nebula::storage::cpp2::VertexProp>& ScanVertexRequest::get_return_columns() const& { return return_columns; } ::std::vector< ::nebula::storage::cpp2::VertexProp> ScanVertexRequest::get_return_columns() && { return std::move(return_columns); } const ::nebula::storage::cpp2::RequestCommon* ScanVertexRequest::get_common() const& { return common_ref().has_value() ? std::addressof(common) : nullptr; } ::nebula::storage::cpp2::RequestCommon* ScanVertexRequest::get_common() & { return common_ref().has_value() ? std::addressof(common) : nullptr; } void swap(ScanVertexRequest& a, ScanVertexRequest& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.parts_ref().value(), b.parts_ref().value()); swap(a.return_columns_ref().value(), b.return_columns_ref().value()); swap(a.limit_ref().value(), b.limit_ref().value()); swap(a.start_time_ref().value_unchecked(), b.start_time_ref().value_unchecked()); swap(a.end_time_ref().value_unchecked(), b.end_time_ref().value_unchecked()); swap(a.filter_ref().value_unchecked(), b.filter_ref().value_unchecked()); swap(a.only_latest_version_ref().value(), b.only_latest_version_ref().value()); swap(a.enable_read_from_follower_ref().value(), b.enable_read_from_follower_ref().value()); swap(a.common_ref().value_unchecked(), b.common_ref().value_unchecked()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void ScanVertexRequest::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t ScanVertexRequest::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t ScanVertexRequest::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t ScanVertexRequest::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void ScanVertexRequest::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t ScanVertexRequest::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t ScanVertexRequest::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t ScanVertexRequest::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< ScanVertexRequest, ::apache::thrift::type_class::map<::apache::thrift::type_class::integral, ::apache::thrift::type_class::structure>, std::unordered_map< ::nebula::cpp2::PartitionID, ::nebula::storage::cpp2::ScanCursor>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< ScanVertexRequest, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::VertexProp>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< ScanVertexRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::RequestCommon>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< ScanVertexRequest, ::apache::thrift::type_class::map<::apache::thrift::type_class::integral, ::apache::thrift::type_class::structure>, std::unordered_map< ::nebula::cpp2::PartitionID, ::nebula::storage::cpp2::ScanCursor>>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< ScanVertexRequest, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::VertexProp>>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< ScanVertexRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::RequestCommon>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::ScanEdgeRequest>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::ScanEdgeRequest>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN ScanEdgeRequest::ScanEdgeRequest() : space_id(0), limit(0), start_time(0), end_time(0), only_latest_version(false), enable_read_from_follower(true) {} THRIFT_IGNORE_ISSET_USE_WARNING_END ScanEdgeRequest::~ScanEdgeRequest() {} THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN ScanEdgeRequest::ScanEdgeRequest(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, std::unordered_map< ::nebula::cpp2::PartitionID, ::nebula::storage::cpp2::ScanCursor> parts__arg, ::std::vector< ::nebula::storage::cpp2::EdgeProp> return_columns__arg, int64_t limit__arg, int64_t start_time__arg, int64_t end_time__arg, ::std::string filter__arg, bool only_latest_version__arg, bool enable_read_from_follower__arg, ::nebula::storage::cpp2::RequestCommon common__arg) : space_id(std::move(space_id__arg)), parts(std::move(parts__arg)), return_columns(std::move(return_columns__arg)), limit(std::move(limit__arg)), start_time(std::move(start_time__arg)), end_time(std::move(end_time__arg)), filter(std::move(filter__arg)), only_latest_version(std::move(only_latest_version__arg)), enable_read_from_follower(std::move(enable_read_from_follower__arg)), common(std::move(common__arg)) { __isset.space_id = true; __isset.parts = true; __isset.return_columns = true; __isset.limit = true; __isset.start_time = true; __isset.end_time = true; __isset.filter = true; __isset.only_latest_version = true; __isset.enable_read_from_follower = true; __isset.common = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void ScanEdgeRequest::__clear() { // clear all fields space_id = 0; parts.clear(); return_columns.clear(); limit = 0; start_time = 0; end_time = 0; filter = apache::thrift::StringTraits< std::string>::fromStringLiteral(""); only_latest_version = false; enable_read_from_follower = true; common.__clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool ScanEdgeRequest::operator==(const ScanEdgeRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (!(lhs.parts == rhs.parts)) { return false; } if (!(lhs.return_columns == rhs.return_columns)) { return false; } if (!(lhs.limit == rhs.limit)) { return false; } if (lhs.start_time_ref() != rhs.start_time_ref()) { return false; } if (lhs.end_time_ref() != rhs.end_time_ref()) { return false; } if (lhs.filter_ref().has_value() != rhs.filter_ref().has_value()) { return false; } if (lhs.filter_ref().has_value()) { if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.filter, rhs.filter)) { return false; } } if (!(lhs.only_latest_version == rhs.only_latest_version)) { return false; } if (!(lhs.enable_read_from_follower == rhs.enable_read_from_follower)) { return false; } if (lhs.common_ref() != rhs.common_ref()) { return false; } return true; } const std::unordered_map< ::nebula::cpp2::PartitionID, ::nebula::storage::cpp2::ScanCursor>& ScanEdgeRequest::get_parts() const& { return parts; } std::unordered_map< ::nebula::cpp2::PartitionID, ::nebula::storage::cpp2::ScanCursor> ScanEdgeRequest::get_parts() && { return std::move(parts); } const ::std::vector< ::nebula::storage::cpp2::EdgeProp>& ScanEdgeRequest::get_return_columns() const& { return return_columns; } ::std::vector< ::nebula::storage::cpp2::EdgeProp> ScanEdgeRequest::get_return_columns() && { return std::move(return_columns); } const ::nebula::storage::cpp2::RequestCommon* ScanEdgeRequest::get_common() const& { return common_ref().has_value() ? std::addressof(common) : nullptr; } ::nebula::storage::cpp2::RequestCommon* ScanEdgeRequest::get_common() & { return common_ref().has_value() ? std::addressof(common) : nullptr; } void swap(ScanEdgeRequest& a, ScanEdgeRequest& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.parts_ref().value(), b.parts_ref().value()); swap(a.return_columns_ref().value(), b.return_columns_ref().value()); swap(a.limit_ref().value(), b.limit_ref().value()); swap(a.start_time_ref().value_unchecked(), b.start_time_ref().value_unchecked()); swap(a.end_time_ref().value_unchecked(), b.end_time_ref().value_unchecked()); swap(a.filter_ref().value_unchecked(), b.filter_ref().value_unchecked()); swap(a.only_latest_version_ref().value(), b.only_latest_version_ref().value()); swap(a.enable_read_from_follower_ref().value(), b.enable_read_from_follower_ref().value()); swap(a.common_ref().value_unchecked(), b.common_ref().value_unchecked()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void ScanEdgeRequest::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t ScanEdgeRequest::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t ScanEdgeRequest::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t ScanEdgeRequest::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void ScanEdgeRequest::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t ScanEdgeRequest::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t ScanEdgeRequest::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t ScanEdgeRequest::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< ScanEdgeRequest, ::apache::thrift::type_class::map<::apache::thrift::type_class::integral, ::apache::thrift::type_class::structure>, std::unordered_map< ::nebula::cpp2::PartitionID, ::nebula::storage::cpp2::ScanCursor>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< ScanEdgeRequest, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::EdgeProp>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< ScanEdgeRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::RequestCommon>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< ScanEdgeRequest, ::apache::thrift::type_class::map<::apache::thrift::type_class::integral, ::apache::thrift::type_class::structure>, std::unordered_map< ::nebula::cpp2::PartitionID, ::nebula::storage::cpp2::ScanCursor>>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< ScanEdgeRequest, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::storage::cpp2::EdgeProp>>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< ScanEdgeRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::RequestCommon>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::ScanResponse>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::ScanResponse>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN ScanResponse::ScanResponse(apache::thrift::FragileConstructor, ::nebula::storage::cpp2::ResponseCommon result__arg, nebula::DataSet props__arg, std::unordered_map< ::nebula::cpp2::PartitionID, ::nebula::storage::cpp2::ScanCursor> cursors__arg) : result(std::move(result__arg)), props(std::move(props__arg)), cursors(std::move(cursors__arg)) { __isset.props = true; __isset.cursors = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void ScanResponse::__clear() { // clear all fields result.__clear(); props.__clear(); cursors.clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool ScanResponse::operator==(const ScanResponse& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.result == rhs.result)) { return false; } if (lhs.props_ref() != rhs.props_ref()) { return false; } if (!(lhs.cursors == rhs.cursors)) { return false; } return true; } const ::nebula::storage::cpp2::ResponseCommon& ScanResponse::get_result() const& { return result; } ::nebula::storage::cpp2::ResponseCommon ScanResponse::get_result() && { return std::move(result); } const nebula::DataSet* ScanResponse::get_props() const& { return props_ref().has_value() ? std::addressof(props) : nullptr; } nebula::DataSet* ScanResponse::get_props() & { return props_ref().has_value() ? std::addressof(props) : nullptr; } const std::unordered_map< ::nebula::cpp2::PartitionID, ::nebula::storage::cpp2::ScanCursor>& ScanResponse::get_cursors() const& { return cursors; } std::unordered_map< ::nebula::cpp2::PartitionID, ::nebula::storage::cpp2::ScanCursor> ScanResponse::get_cursors() && { return std::move(cursors); } void swap(ScanResponse& a, ScanResponse& b) { using ::std::swap; swap(a.result_ref().value(), b.result_ref().value()); swap(a.props_ref().value_unchecked(), b.props_ref().value_unchecked()); swap(a.cursors_ref().value(), b.cursors_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void ScanResponse::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t ScanResponse::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t ScanResponse::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t ScanResponse::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void ScanResponse::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t ScanResponse::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t ScanResponse::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t ScanResponse::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< ScanResponse, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::ResponseCommon>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< ScanResponse, ::apache::thrift::type_class::structure, nebula::DataSet>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< ScanResponse, ::apache::thrift::type_class::map<::apache::thrift::type_class::integral, ::apache::thrift::type_class::structure>, std::unordered_map< ::nebula::cpp2::PartitionID, ::nebula::storage::cpp2::ScanCursor>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< ScanResponse, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::ResponseCommon>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< ScanResponse, ::apache::thrift::type_class::structure, nebula::DataSet>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< ScanResponse, ::apache::thrift::type_class::map<::apache::thrift::type_class::integral, ::apache::thrift::type_class::structure>, std::unordered_map< ::nebula::cpp2::PartitionID, ::nebula::storage::cpp2::ScanCursor>>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::TaskPara>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::TaskPara>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN TaskPara::TaskPara(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, ::std::vector< ::nebula::cpp2::PartitionID> parts__arg, ::std::vector<::std::string> task_specific_paras__arg) : space_id(std::move(space_id__arg)), parts(std::move(parts__arg)), task_specific_paras(std::move(task_specific_paras__arg)) { __isset.space_id = true; __isset.parts = true; __isset.task_specific_paras = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void TaskPara::__clear() { // clear all fields space_id = 0; parts.clear(); task_specific_paras.clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool TaskPara::operator==(const TaskPara& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (lhs.parts_ref() != rhs.parts_ref()) { return false; } if (lhs.task_specific_paras_ref() != rhs.task_specific_paras_ref()) { return false; } return true; } bool TaskPara::operator<(const TaskPara& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return lhs.space_id < rhs.space_id; } if (lhs.parts_ref() != rhs.parts_ref()) { return lhs.parts_ref() < rhs.parts_ref(); } if (lhs.task_specific_paras_ref() != rhs.task_specific_paras_ref()) { return lhs.task_specific_paras_ref() < rhs.task_specific_paras_ref(); } return false; } const ::std::vector< ::nebula::cpp2::PartitionID>* TaskPara::get_parts() const& { return parts_ref().has_value() ? std::addressof(parts) : nullptr; } ::std::vector< ::nebula::cpp2::PartitionID>* TaskPara::get_parts() & { return parts_ref().has_value() ? std::addressof(parts) : nullptr; } const ::std::vector<::std::string>* TaskPara::get_task_specific_paras() const& { return task_specific_paras_ref().has_value() ? std::addressof(task_specific_paras) : nullptr; } ::std::vector<::std::string>* TaskPara::get_task_specific_paras() & { return task_specific_paras_ref().has_value() ? std::addressof(task_specific_paras) : nullptr; } void swap(TaskPara& a, TaskPara& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.parts_ref().value_unchecked(), b.parts_ref().value_unchecked()); swap(a.task_specific_paras_ref().value_unchecked(), b.task_specific_paras_ref().value_unchecked()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void TaskPara::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t TaskPara::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t TaskPara::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t TaskPara::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void TaskPara::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t TaskPara::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t TaskPara::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t TaskPara::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::KVGetRequest>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::KVGetRequest>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN KVGetRequest::KVGetRequest(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector<::std::string>> parts__arg, bool return_partly__arg) : space_id(std::move(space_id__arg)), parts(std::move(parts__arg)), return_partly(std::move(return_partly__arg)) { __isset.space_id = true; __isset.parts = true; __isset.return_partly = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void KVGetRequest::__clear() { // clear all fields space_id = 0; parts.clear(); return_partly = 0; THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool KVGetRequest::operator==(const KVGetRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (!(lhs.parts == rhs.parts)) { return false; } if (!(lhs.return_partly == rhs.return_partly)) { return false; } return true; } const std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector<::std::string>>& KVGetRequest::get_parts() const& { return parts; } std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector<::std::string>> KVGetRequest::get_parts() && { return std::move(parts); } void swap(KVGetRequest& a, KVGetRequest& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.parts_ref().value(), b.parts_ref().value()); swap(a.return_partly_ref().value(), b.return_partly_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void KVGetRequest::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t KVGetRequest::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t KVGetRequest::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t KVGetRequest::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void KVGetRequest::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t KVGetRequest::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t KVGetRequest::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t KVGetRequest::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::KVGetResponse>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::KVGetResponse>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN KVGetResponse::KVGetResponse(apache::thrift::FragileConstructor, ::nebula::storage::cpp2::ResponseCommon result__arg, std::unordered_map<::std::string, ::std::string> key_values__arg) : result(std::move(result__arg)), key_values(std::move(key_values__arg)) { __isset.key_values = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void KVGetResponse::__clear() { // clear all fields result.__clear(); key_values.clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool KVGetResponse::operator==(const KVGetResponse& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.result == rhs.result)) { return false; } if (!(lhs.key_values == rhs.key_values)) { return false; } return true; } const ::nebula::storage::cpp2::ResponseCommon& KVGetResponse::get_result() const& { return result; } ::nebula::storage::cpp2::ResponseCommon KVGetResponse::get_result() && { return std::move(result); } const std::unordered_map<::std::string, ::std::string>& KVGetResponse::get_key_values() const& { return key_values; } std::unordered_map<::std::string, ::std::string> KVGetResponse::get_key_values() && { return std::move(key_values); } void swap(KVGetResponse& a, KVGetResponse& b) { using ::std::swap; swap(a.result_ref().value(), b.result_ref().value()); swap(a.key_values_ref().value(), b.key_values_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void KVGetResponse::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t KVGetResponse::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t KVGetResponse::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t KVGetResponse::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void KVGetResponse::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t KVGetResponse::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t KVGetResponse::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t KVGetResponse::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< KVGetResponse, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::ResponseCommon>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< KVGetResponse, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::ResponseCommon>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::KVPutRequest>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::KVPutRequest>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN KVPutRequest::KVPutRequest(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector<nebula::KeyValue>> parts__arg) : space_id(std::move(space_id__arg)), parts(std::move(parts__arg)) { __isset.space_id = true; __isset.parts = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void KVPutRequest::__clear() { // clear all fields space_id = 0; parts.clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool KVPutRequest::operator==(const KVPutRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (!(lhs.parts == rhs.parts)) { return false; } return true; } const std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector<nebula::KeyValue>>& KVPutRequest::get_parts() const& { return parts; } std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector<nebula::KeyValue>> KVPutRequest::get_parts() && { return std::move(parts); } void swap(KVPutRequest& a, KVPutRequest& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.parts_ref().value(), b.parts_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void KVPutRequest::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t KVPutRequest::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t KVPutRequest::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t KVPutRequest::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void KVPutRequest::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t KVPutRequest::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t KVPutRequest::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t KVPutRequest::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< KVPutRequest, ::apache::thrift::type_class::map<::apache::thrift::type_class::integral, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>>, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector<nebula::KeyValue>>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< KVPutRequest, ::apache::thrift::type_class::map<::apache::thrift::type_class::integral, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>>, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector<nebula::KeyValue>>>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::KVRemoveRequest>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::KVRemoveRequest>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN KVRemoveRequest::KVRemoveRequest(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector<::std::string>> parts__arg) : space_id(std::move(space_id__arg)), parts(std::move(parts__arg)) { __isset.space_id = true; __isset.parts = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void KVRemoveRequest::__clear() { // clear all fields space_id = 0; parts.clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool KVRemoveRequest::operator==(const KVRemoveRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (!(lhs.parts == rhs.parts)) { return false; } return true; } const std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector<::std::string>>& KVRemoveRequest::get_parts() const& { return parts; } std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector<::std::string>> KVRemoveRequest::get_parts() && { return std::move(parts); } void swap(KVRemoveRequest& a, KVRemoveRequest& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.parts_ref().value(), b.parts_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void KVRemoveRequest::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t KVRemoveRequest::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t KVRemoveRequest::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t KVRemoveRequest::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void KVRemoveRequest::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t KVRemoveRequest::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t KVRemoveRequest::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t KVRemoveRequest::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::AdminExecResp>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::AdminExecResp>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN AdminExecResp::AdminExecResp(apache::thrift::FragileConstructor, ::nebula::storage::cpp2::ResponseCommon result__arg, ::nebula::meta::cpp2::StatsItem stats__arg) : result(std::move(result__arg)), stats(std::move(stats__arg)) { __isset.stats = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void AdminExecResp::__clear() { // clear all fields result.__clear(); stats.__clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool AdminExecResp::operator==(const AdminExecResp& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.result == rhs.result)) { return false; } if (lhs.stats_ref() != rhs.stats_ref()) { return false; } return true; } const ::nebula::storage::cpp2::ResponseCommon& AdminExecResp::get_result() const& { return result; } ::nebula::storage::cpp2::ResponseCommon AdminExecResp::get_result() && { return std::move(result); } const ::nebula::meta::cpp2::StatsItem* AdminExecResp::get_stats() const& { return stats_ref().has_value() ? std::addressof(stats) : nullptr; } ::nebula::meta::cpp2::StatsItem* AdminExecResp::get_stats() & { return stats_ref().has_value() ? std::addressof(stats) : nullptr; } void swap(AdminExecResp& a, AdminExecResp& b) { using ::std::swap; swap(a.result_ref().value(), b.result_ref().value()); swap(a.stats_ref().value_unchecked(), b.stats_ref().value_unchecked()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void AdminExecResp::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t AdminExecResp::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t AdminExecResp::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t AdminExecResp::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void AdminExecResp::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t AdminExecResp::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t AdminExecResp::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t AdminExecResp::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< AdminExecResp, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::ResponseCommon>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< AdminExecResp, ::apache::thrift::type_class::structure, ::nebula::meta::cpp2::StatsItem>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< AdminExecResp, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::ResponseCommon>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< AdminExecResp, ::apache::thrift::type_class::structure, ::nebula::meta::cpp2::StatsItem>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::TransLeaderReq>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::TransLeaderReq>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN TransLeaderReq::TransLeaderReq(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, ::nebula::cpp2::PartitionID part_id__arg, nebula::HostAddr new_leader__arg) : space_id(std::move(space_id__arg)), part_id(std::move(part_id__arg)), new_leader(std::move(new_leader__arg)) { __isset.space_id = true; __isset.part_id = true; __isset.new_leader = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void TransLeaderReq::__clear() { // clear all fields space_id = 0; part_id = 0; new_leader.__clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool TransLeaderReq::operator==(const TransLeaderReq& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (!(lhs.part_id == rhs.part_id)) { return false; } if (!(lhs.new_leader == rhs.new_leader)) { return false; } return true; } bool TransLeaderReq::operator<(const TransLeaderReq& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return lhs.space_id < rhs.space_id; } if (!(lhs.part_id == rhs.part_id)) { return lhs.part_id < rhs.part_id; } if (!(lhs.new_leader == rhs.new_leader)) { return lhs.new_leader < rhs.new_leader; } return false; } const nebula::HostAddr& TransLeaderReq::get_new_leader() const& { return new_leader; } nebula::HostAddr TransLeaderReq::get_new_leader() && { return std::move(new_leader); } void swap(TransLeaderReq& a, TransLeaderReq& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.part_id_ref().value(), b.part_id_ref().value()); swap(a.new_leader_ref().value(), b.new_leader_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void TransLeaderReq::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t TransLeaderReq::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t TransLeaderReq::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t TransLeaderReq::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void TransLeaderReq::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t TransLeaderReq::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t TransLeaderReq::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t TransLeaderReq::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< TransLeaderReq, ::apache::thrift::type_class::structure, nebula::HostAddr>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< TransLeaderReq, ::apache::thrift::type_class::structure, nebula::HostAddr>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::AddPartReq>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::AddPartReq>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN AddPartReq::AddPartReq(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, ::nebula::cpp2::PartitionID part_id__arg, bool as_learner__arg, ::std::vector<nebula::HostAddr> peers__arg) : space_id(std::move(space_id__arg)), part_id(std::move(part_id__arg)), as_learner(std::move(as_learner__arg)), peers(std::move(peers__arg)) { __isset.space_id = true; __isset.part_id = true; __isset.as_learner = true; __isset.peers = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void AddPartReq::__clear() { // clear all fields space_id = 0; part_id = 0; as_learner = 0; peers.clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool AddPartReq::operator==(const AddPartReq& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (!(lhs.part_id == rhs.part_id)) { return false; } if (!(lhs.as_learner == rhs.as_learner)) { return false; } if (!(lhs.peers == rhs.peers)) { return false; } return true; } bool AddPartReq::operator<(const AddPartReq& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return lhs.space_id < rhs.space_id; } if (!(lhs.part_id == rhs.part_id)) { return lhs.part_id < rhs.part_id; } if (!(lhs.as_learner == rhs.as_learner)) { return lhs.as_learner < rhs.as_learner; } if (!(lhs.peers == rhs.peers)) { return lhs.peers < rhs.peers; } return false; } const ::std::vector<nebula::HostAddr>& AddPartReq::get_peers() const& { return peers; } ::std::vector<nebula::HostAddr> AddPartReq::get_peers() && { return std::move(peers); } void swap(AddPartReq& a, AddPartReq& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.part_id_ref().value(), b.part_id_ref().value()); swap(a.as_learner_ref().value(), b.as_learner_ref().value()); swap(a.peers_ref().value(), b.peers_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void AddPartReq::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t AddPartReq::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t AddPartReq::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t AddPartReq::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void AddPartReq::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t AddPartReq::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t AddPartReq::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t AddPartReq::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< AddPartReq, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector<nebula::HostAddr>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< AddPartReq, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector<nebula::HostAddr>>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::AddLearnerReq>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::AddLearnerReq>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN AddLearnerReq::AddLearnerReq(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, ::nebula::cpp2::PartitionID part_id__arg, nebula::HostAddr learner__arg) : space_id(std::move(space_id__arg)), part_id(std::move(part_id__arg)), learner(std::move(learner__arg)) { __isset.space_id = true; __isset.part_id = true; __isset.learner = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void AddLearnerReq::__clear() { // clear all fields space_id = 0; part_id = 0; learner.__clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool AddLearnerReq::operator==(const AddLearnerReq& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (!(lhs.part_id == rhs.part_id)) { return false; } if (!(lhs.learner == rhs.learner)) { return false; } return true; } bool AddLearnerReq::operator<(const AddLearnerReq& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return lhs.space_id < rhs.space_id; } if (!(lhs.part_id == rhs.part_id)) { return lhs.part_id < rhs.part_id; } if (!(lhs.learner == rhs.learner)) { return lhs.learner < rhs.learner; } return false; } const nebula::HostAddr& AddLearnerReq::get_learner() const& { return learner; } nebula::HostAddr AddLearnerReq::get_learner() && { return std::move(learner); } void swap(AddLearnerReq& a, AddLearnerReq& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.part_id_ref().value(), b.part_id_ref().value()); swap(a.learner_ref().value(), b.learner_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void AddLearnerReq::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t AddLearnerReq::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t AddLearnerReq::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t AddLearnerReq::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void AddLearnerReq::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t AddLearnerReq::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t AddLearnerReq::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t AddLearnerReq::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< AddLearnerReq, ::apache::thrift::type_class::structure, nebula::HostAddr>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< AddLearnerReq, ::apache::thrift::type_class::structure, nebula::HostAddr>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::RemovePartReq>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::RemovePartReq>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN RemovePartReq::RemovePartReq(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, ::nebula::cpp2::PartitionID part_id__arg) : space_id(std::move(space_id__arg)), part_id(std::move(part_id__arg)) { __isset.space_id = true; __isset.part_id = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void RemovePartReq::__clear() { // clear all fields space_id = 0; part_id = 0; THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool RemovePartReq::operator==(const RemovePartReq& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (!(lhs.part_id == rhs.part_id)) { return false; } return true; } bool RemovePartReq::operator<(const RemovePartReq& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return lhs.space_id < rhs.space_id; } if (!(lhs.part_id == rhs.part_id)) { return lhs.part_id < rhs.part_id; } return false; } void swap(RemovePartReq& a, RemovePartReq& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.part_id_ref().value(), b.part_id_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void RemovePartReq::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t RemovePartReq::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t RemovePartReq::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t RemovePartReq::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void RemovePartReq::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t RemovePartReq::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t RemovePartReq::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t RemovePartReq::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::MemberChangeReq>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::MemberChangeReq>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN MemberChangeReq::MemberChangeReq(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, ::nebula::cpp2::PartitionID part_id__arg, nebula::HostAddr peer__arg, bool add__arg) : space_id(std::move(space_id__arg)), part_id(std::move(part_id__arg)), peer(std::move(peer__arg)), add(std::move(add__arg)) { __isset.space_id = true; __isset.part_id = true; __isset.peer = true; __isset.add = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void MemberChangeReq::__clear() { // clear all fields space_id = 0; part_id = 0; peer.__clear(); add = 0; THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool MemberChangeReq::operator==(const MemberChangeReq& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (!(lhs.part_id == rhs.part_id)) { return false; } if (!(lhs.peer == rhs.peer)) { return false; } if (!(lhs.add == rhs.add)) { return false; } return true; } bool MemberChangeReq::operator<(const MemberChangeReq& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return lhs.space_id < rhs.space_id; } if (!(lhs.part_id == rhs.part_id)) { return lhs.part_id < rhs.part_id; } if (!(lhs.peer == rhs.peer)) { return lhs.peer < rhs.peer; } if (!(lhs.add == rhs.add)) { return lhs.add < rhs.add; } return false; } const nebula::HostAddr& MemberChangeReq::get_peer() const& { return peer; } nebula::HostAddr MemberChangeReq::get_peer() && { return std::move(peer); } void swap(MemberChangeReq& a, MemberChangeReq& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.part_id_ref().value(), b.part_id_ref().value()); swap(a.peer_ref().value(), b.peer_ref().value()); swap(a.add_ref().value(), b.add_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void MemberChangeReq::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t MemberChangeReq::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t MemberChangeReq::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t MemberChangeReq::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void MemberChangeReq::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t MemberChangeReq::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t MemberChangeReq::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t MemberChangeReq::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< MemberChangeReq, ::apache::thrift::type_class::structure, nebula::HostAddr>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< MemberChangeReq, ::apache::thrift::type_class::structure, nebula::HostAddr>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::CatchUpDataReq>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::CatchUpDataReq>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN CatchUpDataReq::CatchUpDataReq(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, ::nebula::cpp2::PartitionID part_id__arg, nebula::HostAddr target__arg) : space_id(std::move(space_id__arg)), part_id(std::move(part_id__arg)), target(std::move(target__arg)) { __isset.space_id = true; __isset.part_id = true; __isset.target = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void CatchUpDataReq::__clear() { // clear all fields space_id = 0; part_id = 0; target.__clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool CatchUpDataReq::operator==(const CatchUpDataReq& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (!(lhs.part_id == rhs.part_id)) { return false; } if (!(lhs.target == rhs.target)) { return false; } return true; } bool CatchUpDataReq::operator<(const CatchUpDataReq& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return lhs.space_id < rhs.space_id; } if (!(lhs.part_id == rhs.part_id)) { return lhs.part_id < rhs.part_id; } if (!(lhs.target == rhs.target)) { return lhs.target < rhs.target; } return false; } const nebula::HostAddr& CatchUpDataReq::get_target() const& { return target; } nebula::HostAddr CatchUpDataReq::get_target() && { return std::move(target); } void swap(CatchUpDataReq& a, CatchUpDataReq& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.part_id_ref().value(), b.part_id_ref().value()); swap(a.target_ref().value(), b.target_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void CatchUpDataReq::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t CatchUpDataReq::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t CatchUpDataReq::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t CatchUpDataReq::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void CatchUpDataReq::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t CatchUpDataReq::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t CatchUpDataReq::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t CatchUpDataReq::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< CatchUpDataReq, ::apache::thrift::type_class::structure, nebula::HostAddr>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< CatchUpDataReq, ::apache::thrift::type_class::structure, nebula::HostAddr>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::GetLeaderReq>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::GetLeaderReq>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN GetLeaderReq::GetLeaderReq(apache::thrift::FragileConstructor) {} THRIFT_IGNORE_ISSET_USE_WARNING_END void GetLeaderReq::__clear() { // clear all fields } bool GetLeaderReq::operator==(const GetLeaderReq& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; return true; } bool GetLeaderReq::operator<(const GetLeaderReq& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; return false; } void swap(GetLeaderReq& a, GetLeaderReq& b) { using ::std::swap; (void)a; (void)b; } template void GetLeaderReq::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t GetLeaderReq::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t GetLeaderReq::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t GetLeaderReq::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void GetLeaderReq::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t GetLeaderReq::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t GetLeaderReq::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t GetLeaderReq::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::CreateCPRequest>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::CreateCPRequest>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN CreateCPRequest::CreateCPRequest(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, ::std::string name__arg) : space_id(std::move(space_id__arg)), name(std::move(name__arg)) { __isset.space_id = true; __isset.name = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void CreateCPRequest::__clear() { // clear all fields space_id = 0; name = apache::thrift::StringTraits< std::string>::fromStringLiteral(""); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool CreateCPRequest::operator==(const CreateCPRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.name, rhs.name)) { return false; } return true; } bool CreateCPRequest::operator<(const CreateCPRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return lhs.space_id < rhs.space_id; } if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.name, rhs.name)) { return apache::thrift::StringTraits<std::string>::isLess(lhs.name, rhs.name); } return false; } void swap(CreateCPRequest& a, CreateCPRequest& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.name_ref().value(), b.name_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void CreateCPRequest::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t CreateCPRequest::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t CreateCPRequest::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t CreateCPRequest::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void CreateCPRequest::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t CreateCPRequest::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t CreateCPRequest::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t CreateCPRequest::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::DropCPRequest>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::DropCPRequest>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN DropCPRequest::DropCPRequest(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, ::std::string name__arg) : space_id(std::move(space_id__arg)), name(std::move(name__arg)) { __isset.space_id = true; __isset.name = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void DropCPRequest::__clear() { // clear all fields space_id = 0; name = apache::thrift::StringTraits< std::string>::fromStringLiteral(""); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool DropCPRequest::operator==(const DropCPRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.name, rhs.name)) { return false; } return true; } bool DropCPRequest::operator<(const DropCPRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return lhs.space_id < rhs.space_id; } if (!apache::thrift::StringTraits<std::string>::isEqual(lhs.name, rhs.name)) { return apache::thrift::StringTraits<std::string>::isLess(lhs.name, rhs.name); } return false; } void swap(DropCPRequest& a, DropCPRequest& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.name_ref().value(), b.name_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void DropCPRequest::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t DropCPRequest::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t DropCPRequest::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t DropCPRequest::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void DropCPRequest::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t DropCPRequest::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t DropCPRequest::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t DropCPRequest::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::BlockingSignRequest>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::BlockingSignRequest>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN BlockingSignRequest::BlockingSignRequest(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, ::nebula::storage::cpp2::EngineSignType sign__arg) : space_id(std::move(space_id__arg)), sign(std::move(sign__arg)) { __isset.space_id = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void BlockingSignRequest::__clear() { // clear all fields space_id = 0; sign = static_cast< ::nebula::storage::cpp2::EngineSignType>(0); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool BlockingSignRequest::operator==(const BlockingSignRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (!(lhs.sign == rhs.sign)) { return false; } return true; } bool BlockingSignRequest::operator<(const BlockingSignRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return lhs.space_id < rhs.space_id; } if (!(lhs.sign == rhs.sign)) { return lhs.sign < rhs.sign; } return false; } void swap(BlockingSignRequest& a, BlockingSignRequest& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.sign_ref().value(), b.sign_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void BlockingSignRequest::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t BlockingSignRequest::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t BlockingSignRequest::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t BlockingSignRequest::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void BlockingSignRequest::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t BlockingSignRequest::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t BlockingSignRequest::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t BlockingSignRequest::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::GetLeaderPartsResp>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::GetLeaderPartsResp>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN GetLeaderPartsResp::GetLeaderPartsResp(apache::thrift::FragileConstructor, ::nebula::storage::cpp2::ResponseCommon result__arg, std::unordered_map< ::nebula::cpp2::GraphSpaceID, ::std::vector< ::nebula::cpp2::PartitionID>> leader_parts__arg) : result(std::move(result__arg)), leader_parts(std::move(leader_parts__arg)) { __isset.leader_parts = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void GetLeaderPartsResp::__clear() { // clear all fields result.__clear(); leader_parts.clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool GetLeaderPartsResp::operator==(const GetLeaderPartsResp& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.result == rhs.result)) { return false; } if (!(lhs.leader_parts == rhs.leader_parts)) { return false; } return true; } const ::nebula::storage::cpp2::ResponseCommon& GetLeaderPartsResp::get_result() const& { return result; } ::nebula::storage::cpp2::ResponseCommon GetLeaderPartsResp::get_result() && { return std::move(result); } const std::unordered_map< ::nebula::cpp2::GraphSpaceID, ::std::vector< ::nebula::cpp2::PartitionID>>& GetLeaderPartsResp::get_leader_parts() const& { return leader_parts; } std::unordered_map< ::nebula::cpp2::GraphSpaceID, ::std::vector< ::nebula::cpp2::PartitionID>> GetLeaderPartsResp::get_leader_parts() && { return std::move(leader_parts); } void swap(GetLeaderPartsResp& a, GetLeaderPartsResp& b) { using ::std::swap; swap(a.result_ref().value(), b.result_ref().value()); swap(a.leader_parts_ref().value(), b.leader_parts_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void GetLeaderPartsResp::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t GetLeaderPartsResp::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t GetLeaderPartsResp::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t GetLeaderPartsResp::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void GetLeaderPartsResp::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t GetLeaderPartsResp::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t GetLeaderPartsResp::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t GetLeaderPartsResp::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< GetLeaderPartsResp, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::ResponseCommon>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< GetLeaderPartsResp, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::ResponseCommon>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::CheckPeersReq>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::CheckPeersReq>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN CheckPeersReq::CheckPeersReq(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, ::nebula::cpp2::PartitionID part_id__arg, ::std::vector<nebula::HostAddr> peers__arg) : space_id(std::move(space_id__arg)), part_id(std::move(part_id__arg)), peers(std::move(peers__arg)) { __isset.space_id = true; __isset.part_id = true; __isset.peers = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void CheckPeersReq::__clear() { // clear all fields space_id = 0; part_id = 0; peers.clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool CheckPeersReq::operator==(const CheckPeersReq& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (!(lhs.part_id == rhs.part_id)) { return false; } if (!(lhs.peers == rhs.peers)) { return false; } return true; } bool CheckPeersReq::operator<(const CheckPeersReq& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return lhs.space_id < rhs.space_id; } if (!(lhs.part_id == rhs.part_id)) { return lhs.part_id < rhs.part_id; } if (!(lhs.peers == rhs.peers)) { return lhs.peers < rhs.peers; } return false; } const ::std::vector<nebula::HostAddr>& CheckPeersReq::get_peers() const& { return peers; } ::std::vector<nebula::HostAddr> CheckPeersReq::get_peers() && { return std::move(peers); } void swap(CheckPeersReq& a, CheckPeersReq& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.part_id_ref().value(), b.part_id_ref().value()); swap(a.peers_ref().value(), b.peers_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void CheckPeersReq::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t CheckPeersReq::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t CheckPeersReq::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t CheckPeersReq::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void CheckPeersReq::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t CheckPeersReq::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t CheckPeersReq::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t CheckPeersReq::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< CheckPeersReq, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector<nebula::HostAddr>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< CheckPeersReq, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector<nebula::HostAddr>>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::RebuildIndexRequest>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::RebuildIndexRequest>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN RebuildIndexRequest::RebuildIndexRequest(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, ::std::vector< ::nebula::cpp2::PartitionID> parts__arg, ::nebula::cpp2::IndexID index_id__arg) : space_id(std::move(space_id__arg)), parts(std::move(parts__arg)), index_id(std::move(index_id__arg)) { __isset.space_id = true; __isset.parts = true; __isset.index_id = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void RebuildIndexRequest::__clear() { // clear all fields space_id = 0; parts.clear(); index_id = 0; THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool RebuildIndexRequest::operator==(const RebuildIndexRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (!(lhs.parts == rhs.parts)) { return false; } if (!(lhs.index_id == rhs.index_id)) { return false; } return true; } bool RebuildIndexRequest::operator<(const RebuildIndexRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return lhs.space_id < rhs.space_id; } if (!(lhs.parts == rhs.parts)) { return lhs.parts < rhs.parts; } if (!(lhs.index_id == rhs.index_id)) { return lhs.index_id < rhs.index_id; } return false; } const ::std::vector< ::nebula::cpp2::PartitionID>& RebuildIndexRequest::get_parts() const& { return parts; } ::std::vector< ::nebula::cpp2::PartitionID> RebuildIndexRequest::get_parts() && { return std::move(parts); } void swap(RebuildIndexRequest& a, RebuildIndexRequest& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.parts_ref().value(), b.parts_ref().value()); swap(a.index_id_ref().value(), b.index_id_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void RebuildIndexRequest::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t RebuildIndexRequest::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t RebuildIndexRequest::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t RebuildIndexRequest::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void RebuildIndexRequest::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t RebuildIndexRequest::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t RebuildIndexRequest::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t RebuildIndexRequest::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::CreateCPResp>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::CreateCPResp>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN CreateCPResp::CreateCPResp(apache::thrift::FragileConstructor, ::nebula::storage::cpp2::ResponseCommon result__arg, ::std::vector< ::nebula::cpp2::CheckpointInfo> info__arg) : result(std::move(result__arg)), info(std::move(info__arg)) { __isset.info = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void CreateCPResp::__clear() { // clear all fields result.__clear(); info.clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool CreateCPResp::operator==(const CreateCPResp& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.result == rhs.result)) { return false; } if (!(lhs.info == rhs.info)) { return false; } return true; } const ::nebula::storage::cpp2::ResponseCommon& CreateCPResp::get_result() const& { return result; } ::nebula::storage::cpp2::ResponseCommon CreateCPResp::get_result() && { return std::move(result); } const ::std::vector< ::nebula::cpp2::CheckpointInfo>& CreateCPResp::get_info() const& { return info; } ::std::vector< ::nebula::cpp2::CheckpointInfo> CreateCPResp::get_info() && { return std::move(info); } void swap(CreateCPResp& a, CreateCPResp& b) { using ::std::swap; swap(a.result_ref().value(), b.result_ref().value()); swap(a.info_ref().value(), b.info_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void CreateCPResp::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t CreateCPResp::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t CreateCPResp::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t CreateCPResp::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void CreateCPResp::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t CreateCPResp::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t CreateCPResp::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t CreateCPResp::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< CreateCPResp, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::ResponseCommon>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< CreateCPResp, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::cpp2::CheckpointInfo>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< CreateCPResp, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::ResponseCommon>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< CreateCPResp, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>, ::std::vector< ::nebula::cpp2::CheckpointInfo>>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::ListClusterInfoResp>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::ListClusterInfoResp>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN ListClusterInfoResp::ListClusterInfoResp(apache::thrift::FragileConstructor, ::nebula::storage::cpp2::ResponseCommon result__arg, ::nebula::cpp2::DirInfo dir__arg) : result(std::move(result__arg)), dir(std::move(dir__arg)) { __isset.dir = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void ListClusterInfoResp::__clear() { // clear all fields result.__clear(); dir.__clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool ListClusterInfoResp::operator==(const ListClusterInfoResp& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.result == rhs.result)) { return false; } if (!(lhs.dir == rhs.dir)) { return false; } return true; } bool ListClusterInfoResp::operator<(const ListClusterInfoResp& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.result == rhs.result)) { return lhs.result < rhs.result; } if (!(lhs.dir == rhs.dir)) { return lhs.dir < rhs.dir; } return false; } const ::nebula::storage::cpp2::ResponseCommon& ListClusterInfoResp::get_result() const& { return result; } ::nebula::storage::cpp2::ResponseCommon ListClusterInfoResp::get_result() && { return std::move(result); } const ::nebula::cpp2::DirInfo& ListClusterInfoResp::get_dir() const& { return dir; } ::nebula::cpp2::DirInfo ListClusterInfoResp::get_dir() && { return std::move(dir); } void swap(ListClusterInfoResp& a, ListClusterInfoResp& b) { using ::std::swap; swap(a.result_ref().value(), b.result_ref().value()); swap(a.dir_ref().value(), b.dir_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void ListClusterInfoResp::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t ListClusterInfoResp::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t ListClusterInfoResp::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t ListClusterInfoResp::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void ListClusterInfoResp::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t ListClusterInfoResp::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t ListClusterInfoResp::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t ListClusterInfoResp::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< ListClusterInfoResp, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::ResponseCommon>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< ListClusterInfoResp, ::apache::thrift::type_class::structure, ::nebula::cpp2::DirInfo>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< ListClusterInfoResp, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::ResponseCommon>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< ListClusterInfoResp, ::apache::thrift::type_class::structure, ::nebula::cpp2::DirInfo>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::ListClusterInfoReq>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::ListClusterInfoReq>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN ListClusterInfoReq::ListClusterInfoReq(apache::thrift::FragileConstructor) {} THRIFT_IGNORE_ISSET_USE_WARNING_END void ListClusterInfoReq::__clear() { // clear all fields } bool ListClusterInfoReq::operator==(const ListClusterInfoReq& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; return true; } bool ListClusterInfoReq::operator<(const ListClusterInfoReq& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; return false; } void swap(ListClusterInfoReq& a, ListClusterInfoReq& b) { using ::std::swap; (void)a; (void)b; } template void ListClusterInfoReq::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t ListClusterInfoReq::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t ListClusterInfoReq::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t ListClusterInfoReq::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void ListClusterInfoReq::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t ListClusterInfoReq::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t ListClusterInfoReq::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t ListClusterInfoReq::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::AddAdminTaskRequest>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::AddAdminTaskRequest>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN AddAdminTaskRequest::AddAdminTaskRequest(apache::thrift::FragileConstructor, ::nebula::meta::cpp2::AdminCmd cmd__arg, int32_t job_id__arg, int32_t task_id__arg, ::nebula::storage::cpp2::TaskPara para__arg, int32_t concurrency__arg) : cmd(std::move(cmd__arg)), job_id(std::move(job_id__arg)), task_id(std::move(task_id__arg)), para(std::move(para__arg)), concurrency(std::move(concurrency__arg)) { __isset.cmd = true; __isset.job_id = true; __isset.task_id = true; __isset.para = true; __isset.concurrency = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void AddAdminTaskRequest::__clear() { // clear all fields cmd = ::nebula::meta::cpp2::AdminCmd::COMPACT; job_id = 0; task_id = 0; para.__clear(); concurrency = 0; THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool AddAdminTaskRequest::operator==(const AddAdminTaskRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.cmd == rhs.cmd)) { return false; } if (!(lhs.job_id == rhs.job_id)) { return false; } if (!(lhs.task_id == rhs.task_id)) { return false; } if (!(lhs.para == rhs.para)) { return false; } if (lhs.concurrency_ref() != rhs.concurrency_ref()) { return false; } return true; } bool AddAdminTaskRequest::operator<(const AddAdminTaskRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.cmd == rhs.cmd)) { return lhs.cmd < rhs.cmd; } if (!(lhs.job_id == rhs.job_id)) { return lhs.job_id < rhs.job_id; } if (!(lhs.task_id == rhs.task_id)) { return lhs.task_id < rhs.task_id; } if (!(lhs.para == rhs.para)) { return lhs.para < rhs.para; } if (lhs.concurrency_ref() != rhs.concurrency_ref()) { return lhs.concurrency_ref() < rhs.concurrency_ref(); } return false; } const ::nebula::storage::cpp2::TaskPara& AddAdminTaskRequest::get_para() const& { return para; } ::nebula::storage::cpp2::TaskPara AddAdminTaskRequest::get_para() && { return std::move(para); } void swap(AddAdminTaskRequest& a, AddAdminTaskRequest& b) { using ::std::swap; swap(a.cmd_ref().value(), b.cmd_ref().value()); swap(a.job_id_ref().value(), b.job_id_ref().value()); swap(a.task_id_ref().value(), b.task_id_ref().value()); swap(a.para_ref().value(), b.para_ref().value()); swap(a.concurrency_ref().value_unchecked(), b.concurrency_ref().value_unchecked()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void AddAdminTaskRequest::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t AddAdminTaskRequest::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t AddAdminTaskRequest::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t AddAdminTaskRequest::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void AddAdminTaskRequest::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t AddAdminTaskRequest::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t AddAdminTaskRequest::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t AddAdminTaskRequest::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< AddAdminTaskRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::TaskPara>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< AddAdminTaskRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::TaskPara>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::StopAdminTaskRequest>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::StopAdminTaskRequest>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN StopAdminTaskRequest::StopAdminTaskRequest(apache::thrift::FragileConstructor, int32_t job_id__arg, int32_t task_id__arg) : job_id(std::move(job_id__arg)), task_id(std::move(task_id__arg)) { __isset.job_id = true; __isset.task_id = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void StopAdminTaskRequest::__clear() { // clear all fields job_id = 0; task_id = 0; THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool StopAdminTaskRequest::operator==(const StopAdminTaskRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.job_id == rhs.job_id)) { return false; } if (!(lhs.task_id == rhs.task_id)) { return false; } return true; } bool StopAdminTaskRequest::operator<(const StopAdminTaskRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.job_id == rhs.job_id)) { return lhs.job_id < rhs.job_id; } if (!(lhs.task_id == rhs.task_id)) { return lhs.task_id < rhs.task_id; } return false; } void swap(StopAdminTaskRequest& a, StopAdminTaskRequest& b) { using ::std::swap; swap(a.job_id_ref().value(), b.job_id_ref().value()); swap(a.task_id_ref().value(), b.task_id_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void StopAdminTaskRequest::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t StopAdminTaskRequest::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t StopAdminTaskRequest::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t StopAdminTaskRequest::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void StopAdminTaskRequest::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t StopAdminTaskRequest::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t StopAdminTaskRequest::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t StopAdminTaskRequest::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::InternalTxnRequest>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::InternalTxnRequest>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN InternalTxnRequest::InternalTxnRequest() : txn_id(0) {} THRIFT_IGNORE_ISSET_USE_WARNING_END InternalTxnRequest::~InternalTxnRequest() {} THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN InternalTxnRequest::InternalTxnRequest(apache::thrift::FragileConstructor, int64_t txn_id__arg, ::std::map< ::nebula::cpp2::PartitionID, int64_t> term_of_parts__arg, ::nebula::storage::cpp2::AddEdgesRequest add_edge_req__arg, ::nebula::storage::cpp2::UpdateEdgeRequest upd_edge_req__arg, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector<int64_t>> edge_ver__arg) : txn_id(std::move(txn_id__arg)), term_of_parts(std::move(term_of_parts__arg)), add_edge_req(std::move(add_edge_req__arg)), upd_edge_req(std::move(upd_edge_req__arg)), edge_ver(std::move(edge_ver__arg)) { __isset.txn_id = true; __isset.term_of_parts = true; __isset.add_edge_req = true; __isset.upd_edge_req = true; __isset.edge_ver = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void InternalTxnRequest::__clear() { // clear all fields txn_id = 0; term_of_parts.clear(); add_edge_req.__clear(); upd_edge_req.__clear(); edge_ver.clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool InternalTxnRequest::operator==(const InternalTxnRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.txn_id == rhs.txn_id)) { return false; } if (!(lhs.term_of_parts == rhs.term_of_parts)) { return false; } if (lhs.add_edge_req_ref() != rhs.add_edge_req_ref()) { return false; } if (lhs.upd_edge_req_ref() != rhs.upd_edge_req_ref()) { return false; } if (lhs.edge_ver_ref() != rhs.edge_ver_ref()) { return false; } return true; } const ::std::map< ::nebula::cpp2::PartitionID, int64_t>& InternalTxnRequest::get_term_of_parts() const& { return term_of_parts; } ::std::map< ::nebula::cpp2::PartitionID, int64_t> InternalTxnRequest::get_term_of_parts() && { return std::move(term_of_parts); } const ::nebula::storage::cpp2::AddEdgesRequest* InternalTxnRequest::get_add_edge_req() const& { return add_edge_req_ref().has_value() ? std::addressof(add_edge_req) : nullptr; } ::nebula::storage::cpp2::AddEdgesRequest* InternalTxnRequest::get_add_edge_req() & { return add_edge_req_ref().has_value() ? std::addressof(add_edge_req) : nullptr; } const ::nebula::storage::cpp2::UpdateEdgeRequest* InternalTxnRequest::get_upd_edge_req() const& { return upd_edge_req_ref().has_value() ? std::addressof(upd_edge_req) : nullptr; } ::nebula::storage::cpp2::UpdateEdgeRequest* InternalTxnRequest::get_upd_edge_req() & { return upd_edge_req_ref().has_value() ? std::addressof(upd_edge_req) : nullptr; } const std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector<int64_t>>* InternalTxnRequest::get_edge_ver() const& { return edge_ver_ref().has_value() ? std::addressof(edge_ver) : nullptr; } std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector<int64_t>>* InternalTxnRequest::get_edge_ver() & { return edge_ver_ref().has_value() ? std::addressof(edge_ver) : nullptr; } void swap(InternalTxnRequest& a, InternalTxnRequest& b) { using ::std::swap; swap(a.txn_id_ref().value(), b.txn_id_ref().value()); swap(a.term_of_parts_ref().value(), b.term_of_parts_ref().value()); swap(a.add_edge_req_ref().value_unchecked(), b.add_edge_req_ref().value_unchecked()); swap(a.upd_edge_req_ref().value_unchecked(), b.upd_edge_req_ref().value_unchecked()); swap(a.edge_ver_ref().value_unchecked(), b.edge_ver_ref().value_unchecked()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void InternalTxnRequest::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t InternalTxnRequest::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t InternalTxnRequest::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t InternalTxnRequest::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void InternalTxnRequest::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t InternalTxnRequest::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t InternalTxnRequest::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t InternalTxnRequest::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< InternalTxnRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::AddEdgesRequest>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_json< InternalTxnRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::UpdateEdgeRequest>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< InternalTxnRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::AddEdgesRequest>, "inconsistent use of nimble option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< InternalTxnRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::UpdateEdgeRequest>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::ChainAddEdgesRequest>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::ChainAddEdgesRequest>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN ChainAddEdgesRequest::ChainAddEdgesRequest() : space_id(0), if_not_exists(0), term(0), edge_version(0) {} THRIFT_IGNORE_ISSET_USE_WARNING_END ChainAddEdgesRequest::~ChainAddEdgesRequest() {} THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN ChainAddEdgesRequest::ChainAddEdgesRequest(apache::thrift::FragileConstructor, ::nebula::cpp2::GraphSpaceID space_id__arg, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector< ::nebula::storage::cpp2::NewEdge>> parts__arg, ::std::vector<::std::string> prop_names__arg, bool if_not_exists__arg, int64_t term__arg, int64_t edge_version__arg) : space_id(std::move(space_id__arg)), parts(std::move(parts__arg)), prop_names(std::move(prop_names__arg)), if_not_exists(std::move(if_not_exists__arg)), term(std::move(term__arg)), edge_version(std::move(edge_version__arg)) { __isset.space_id = true; __isset.parts = true; __isset.prop_names = true; __isset.if_not_exists = true; __isset.term = true; __isset.edge_version = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void ChainAddEdgesRequest::__clear() { // clear all fields space_id = 0; parts.clear(); prop_names.clear(); if_not_exists = 0; term = 0; edge_version = 0; THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool ChainAddEdgesRequest::operator==(const ChainAddEdgesRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.space_id == rhs.space_id)) { return false; } if (!(lhs.parts == rhs.parts)) { return false; } if (!(lhs.prop_names == rhs.prop_names)) { return false; } if (!(lhs.if_not_exists == rhs.if_not_exists)) { return false; } if (!(lhs.term == rhs.term)) { return false; } if (lhs.edge_version_ref() != rhs.edge_version_ref()) { return false; } return true; } const std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector< ::nebula::storage::cpp2::NewEdge>>& ChainAddEdgesRequest::get_parts() const& { return parts; } std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector< ::nebula::storage::cpp2::NewEdge>> ChainAddEdgesRequest::get_parts() && { return std::move(parts); } const ::std::vector<::std::string>& ChainAddEdgesRequest::get_prop_names() const& { return prop_names; } ::std::vector<::std::string> ChainAddEdgesRequest::get_prop_names() && { return std::move(prop_names); } void swap(ChainAddEdgesRequest& a, ChainAddEdgesRequest& b) { using ::std::swap; swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.parts_ref().value(), b.parts_ref().value()); swap(a.prop_names_ref().value(), b.prop_names_ref().value()); swap(a.if_not_exists_ref().value(), b.if_not_exists_ref().value()); swap(a.term_ref().value(), b.term_ref().value()); swap(a.edge_version_ref().value_unchecked(), b.edge_version_ref().value_unchecked()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void ChainAddEdgesRequest::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t ChainAddEdgesRequest::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t ChainAddEdgesRequest::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t ChainAddEdgesRequest::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void ChainAddEdgesRequest::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t ChainAddEdgesRequest::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t ChainAddEdgesRequest::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t ChainAddEdgesRequest::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< ChainAddEdgesRequest, ::apache::thrift::type_class::map<::apache::thrift::type_class::integral, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>>, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector< ::nebula::storage::cpp2::NewEdge>>>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< ChainAddEdgesRequest, ::apache::thrift::type_class::map<::apache::thrift::type_class::integral, ::apache::thrift::type_class::list<::apache::thrift::type_class::structure>>, std::unordered_map< ::nebula::cpp2::PartitionID, ::std::vector< ::nebula::storage::cpp2::NewEdge>>>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2 namespace apache { namespace thrift { namespace detail { void TccStructTraits<::nebula::storage::cpp2::ChainUpdateEdgeRequest>::translateFieldName( folly::StringPiece _fname, int16_t& fid, apache::thrift::protocol::TType& _ftype) noexcept { using data = apache::thrift::TStructDataStorage<::nebula::storage::cpp2::ChainUpdateEdgeRequest>; static const st::translate_field_name_table table{ data::fields_size, data::fields_names.data(), data::fields_ids.data(), data::fields_types.data()}; st::translate_field_name(_fname, fid, _ftype, table); } } // namespace detail } // namespace thrift } // namespace apache namespace nebula { namespace storage { namespace cpp2 { THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN ChainUpdateEdgeRequest::ChainUpdateEdgeRequest() : term(0), edge_version(0), space_id(0) {} THRIFT_IGNORE_ISSET_USE_WARNING_END ChainUpdateEdgeRequest::~ChainUpdateEdgeRequest() {} THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN ChainUpdateEdgeRequest::ChainUpdateEdgeRequest(apache::thrift::FragileConstructor, ::nebula::storage::cpp2::UpdateEdgeRequest update_edge_request__arg, int64_t term__arg, int64_t edge_version__arg, ::nebula::cpp2::GraphSpaceID space_id__arg, ::std::vector< ::nebula::cpp2::PartitionID> parts__arg) : update_edge_request(std::move(update_edge_request__arg)), term(std::move(term__arg)), edge_version(std::move(edge_version__arg)), space_id(std::move(space_id__arg)), parts(std::move(parts__arg)) { __isset.update_edge_request = true; __isset.term = true; __isset.edge_version = true; __isset.space_id = true; } THRIFT_IGNORE_ISSET_USE_WARNING_END void ChainUpdateEdgeRequest::__clear() { // clear all fields update_edge_request.__clear(); term = 0; edge_version = 0; space_id = 0; parts.clear(); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN __isset = {}; THRIFT_IGNORE_ISSET_USE_WARNING_END } bool ChainUpdateEdgeRequest::operator==(const ChainUpdateEdgeRequest& rhs) const { (void)rhs; auto& lhs = *this; (void)lhs; if (!(lhs.update_edge_request == rhs.update_edge_request)) { return false; } if (!(lhs.term == rhs.term)) { return false; } if (lhs.edge_version_ref() != rhs.edge_version_ref()) { return false; } if (!(lhs.space_id == rhs.space_id)) { return false; } if (!(lhs.parts == rhs.parts)) { return false; } return true; } const ::nebula::storage::cpp2::UpdateEdgeRequest& ChainUpdateEdgeRequest::get_update_edge_request() const& { return update_edge_request; } ::nebula::storage::cpp2::UpdateEdgeRequest ChainUpdateEdgeRequest::get_update_edge_request() && { return std::move(update_edge_request); } const ::std::vector< ::nebula::cpp2::PartitionID>& ChainUpdateEdgeRequest::get_parts() const& { return parts; } ::std::vector< ::nebula::cpp2::PartitionID> ChainUpdateEdgeRequest::get_parts() && { return std::move(parts); } void swap(ChainUpdateEdgeRequest& a, ChainUpdateEdgeRequest& b) { using ::std::swap; swap(a.update_edge_request_ref().value(), b.update_edge_request_ref().value()); swap(a.term_ref().value(), b.term_ref().value()); swap(a.edge_version_ref().value_unchecked(), b.edge_version_ref().value_unchecked()); swap(a.space_id_ref().value(), b.space_id_ref().value()); swap(a.parts_ref().value(), b.parts_ref().value()); THRIFT_IGNORE_ISSET_USE_WARNING_BEGIN swap(a.__isset, b.__isset); THRIFT_IGNORE_ISSET_USE_WARNING_END } template void ChainUpdateEdgeRequest::readNoXfer<>(apache::thrift::BinaryProtocolReader*); template uint32_t ChainUpdateEdgeRequest::write<>(apache::thrift::BinaryProtocolWriter*) const; template uint32_t ChainUpdateEdgeRequest::serializedSize<>(apache::thrift::BinaryProtocolWriter const*) const; template uint32_t ChainUpdateEdgeRequest::serializedSizeZC<>(apache::thrift::BinaryProtocolWriter const*) const; template void ChainUpdateEdgeRequest::readNoXfer<>(apache::thrift::CompactProtocolReader*); template uint32_t ChainUpdateEdgeRequest::write<>(apache::thrift::CompactProtocolWriter*) const; template uint32_t ChainUpdateEdgeRequest::serializedSize<>(apache::thrift::CompactProtocolWriter const*) const; template uint32_t ChainUpdateEdgeRequest::serializedSizeZC<>(apache::thrift::CompactProtocolWriter const*) const; static_assert( ::apache::thrift::detail::st::gen_check_json< ChainUpdateEdgeRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::UpdateEdgeRequest>, "inconsistent use of json option"); static_assert( ::apache::thrift::detail::st::gen_check_nimble< ChainUpdateEdgeRequest, ::apache::thrift::type_class::structure, ::nebula::storage::cpp2::UpdateEdgeRequest>, "inconsistent use of nimble option"); }}} // nebula::storage::cpp2
//===-- Unittests for x86 long double -------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "utils/FPUtil/FPBits.h" #include "utils/UnitTest/Test.h" #include <math.h> using FPBits = __llvm_libc::fputil::FPBits<long double>; TEST(X86LongDoubleTest, isNaN) { // In the nan checks below, we use the macro isnan from math.h to ensure that // a number is actually a NaN. The isnan macro resolves to the compiler // builtin function. Hence, matching LLVM-libc's notion of NaN with the // isnan result ensures that LLVM-libc's behavior matches the compiler's // behavior. FPBits bits(0.0l); bits.exponent = FPBits::maxExponent; for (unsigned int i = 0; i < 1000000; ++i) { // If exponent has the max value and the implicit bit is 0, // then the number is a NaN for all values of mantissa. bits.mantissa = i; long double nan = bits; ASSERT_NE(isnan(nan), 0); ASSERT_TRUE(bits.isNaN()); } bits.implicitBit = 1; for (unsigned int i = 1; i < 1000000; ++i) { // If exponent has the max value and the implicit bit is 1, // then the number is a NaN for all non-zero values of mantissa. // Note the initial value of |i| of 1 to avoid a zero mantissa. bits.mantissa = i; long double nan = bits; ASSERT_NE(isnan(nan), 0); ASSERT_TRUE(bits.isNaN()); } bits.exponent = 1; bits.implicitBit = 0; for (unsigned int i = 0; i < 1000000; ++i) { // If exponent is non-zero and also not max, and the implicit bit is 0, // then the number is a NaN for all values of mantissa. bits.mantissa = i; long double nan = bits; ASSERT_NE(isnan(nan), 0); ASSERT_TRUE(bits.isNaN()); } bits.exponent = 1; bits.implicitBit = 1; for (unsigned int i = 0; i < 1000000; ++i) { // If exponent is non-zero and also not max, and the implicit bit is 1, // then the number is normal value for all values of mantissa. bits.mantissa = i; long double valid = bits; ASSERT_EQ(isnan(valid), 0); ASSERT_FALSE(bits.isNaN()); } bits.exponent = 0; bits.implicitBit = 1; for (unsigned int i = 0; i < 1000000; ++i) { // If exponent is zero, then the number is a valid but denormal value. bits.mantissa = i; long double valid = bits; ASSERT_EQ(isnan(valid), 0); ASSERT_FALSE(bits.isNaN()); } bits.exponent = 0; bits.implicitBit = 0; for (unsigned int i = 0; i < 1000000; ++i) { // If exponent is zero, then the number is a valid but denormal value. bits.mantissa = i; long double valid = bits; ASSERT_EQ(isnan(valid), 0); ASSERT_FALSE(bits.isNaN()); } }
#include <iostream> using namespace std; int main() { //deklaracja zmiennych int liczba; unsigned long long silnia = 1; //unsigned long long bo potrzeba przechować wartość większą niż int //odbieranie wartości o użytkownika do { cout << "Podaj liczbe nie wieksza niz 20" << endl; cin >> liczba; } while (liczba > 20); //obliczanie silni for (int i = 1; i <= liczba; i++) { silnia*=i; } //wyświetlanie wyniku cout << liczba << "! = " << silnia; return 0; }
// Copyright (c) 2010-2018 The AriA Core developers // Distributed under the MIT/X11 software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "crypto/sha1.h" #include "crypto/common.h" #include <string.h> // Internal implementation code. namespace { /// Internal SHA-1 implementation. namespace sha1 { /** One round of SHA-1. */ void inline Round(uint32_t a, uint32_t& b, uint32_t c, uint32_t d, uint32_t& e, uint32_t f, uint32_t k, uint32_t w) { e += ((a << 5) | (a >> 27)) + f + k + w; b = (b << 30) | (b >> 2); } uint32_t inline f1(uint32_t b, uint32_t c, uint32_t d) { return d ^ (b & (c ^ d)); } uint32_t inline f2(uint32_t b, uint32_t c, uint32_t d) { return b ^ c ^ d; } uint32_t inline f3(uint32_t b, uint32_t c, uint32_t d) { return (b & c) | (d & (b | c)); } uint32_t inline left(uint32_t x) { return (x << 1) | (x >> 31); } /** Initialize SHA-1 state. */ void inline Initialize(uint32_t* s) { s[0] = 0x67452301ul; s[1] = 0xEFCDAB89ul; s[2] = 0x98BADCFEul; s[3] = 0x10325476ul; s[4] = 0xC3D2E1F0ul; } const uint32_t k1 = 0x5A827999ul; const uint32_t k2 = 0x6ED9EBA1ul; const uint32_t k3 = 0x8F1BBCDCul; const uint32_t k4 = 0xCA62C1D6ul; /** Perform a SHA-1 transformation, processing a 64-byte chunk. */ void Transform(uint32_t* s, const unsigned char* chunk) { uint32_t a = s[0], b = s[1], c = s[2], d = s[3], e = s[4]; uint32_t w0, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15; Round(a, b, c, d, e, f1(b, c, d), k1, w0 = ReadBE32(chunk + 0)); Round(e, a, b, c, d, f1(a, b, c), k1, w1 = ReadBE32(chunk + 4)); Round(d, e, a, b, c, f1(e, a, b), k1, w2 = ReadBE32(chunk + 8)); Round(c, d, e, a, b, f1(d, e, a), k1, w3 = ReadBE32(chunk + 12)); Round(b, c, d, e, a, f1(c, d, e), k1, w4 = ReadBE32(chunk + 16)); Round(a, b, c, d, e, f1(b, c, d), k1, w5 = ReadBE32(chunk + 20)); Round(e, a, b, c, d, f1(a, b, c), k1, w6 = ReadBE32(chunk + 24)); Round(d, e, a, b, c, f1(e, a, b), k1, w7 = ReadBE32(chunk + 28)); Round(c, d, e, a, b, f1(d, e, a), k1, w8 = ReadBE32(chunk + 32)); Round(b, c, d, e, a, f1(c, d, e), k1, w9 = ReadBE32(chunk + 36)); Round(a, b, c, d, e, f1(b, c, d), k1, w10 = ReadBE32(chunk + 40)); Round(e, a, b, c, d, f1(a, b, c), k1, w11 = ReadBE32(chunk + 44)); Round(d, e, a, b, c, f1(e, a, b), k1, w12 = ReadBE32(chunk + 48)); Round(c, d, e, a, b, f1(d, e, a), k1, w13 = ReadBE32(chunk + 52)); Round(b, c, d, e, a, f1(c, d, e), k1, w14 = ReadBE32(chunk + 56)); Round(a, b, c, d, e, f1(b, c, d), k1, w15 = ReadBE32(chunk + 60)); Round(e, a, b, c, d, f1(a, b, c), k1, w0 = left(w0 ^ w13 ^ w8 ^ w2)); Round(d, e, a, b, c, f1(e, a, b), k1, w1 = left(w1 ^ w14 ^ w9 ^ w3)); Round(c, d, e, a, b, f1(d, e, a), k1, w2 = left(w2 ^ w15 ^ w10 ^ w4)); Round(b, c, d, e, a, f1(c, d, e), k1, w3 = left(w3 ^ w0 ^ w11 ^ w5)); Round(a, b, c, d, e, f2(b, c, d), k2, w4 = left(w4 ^ w1 ^ w12 ^ w6)); Round(e, a, b, c, d, f2(a, b, c), k2, w5 = left(w5 ^ w2 ^ w13 ^ w7)); Round(d, e, a, b, c, f2(e, a, b), k2, w6 = left(w6 ^ w3 ^ w14 ^ w8)); Round(c, d, e, a, b, f2(d, e, a), k2, w7 = left(w7 ^ w4 ^ w15 ^ w9)); Round(b, c, d, e, a, f2(c, d, e), k2, w8 = left(w8 ^ w5 ^ w0 ^ w10)); Round(a, b, c, d, e, f2(b, c, d), k2, w9 = left(w9 ^ w6 ^ w1 ^ w11)); Round(e, a, b, c, d, f2(a, b, c), k2, w10 = left(w10 ^ w7 ^ w2 ^ w12)); Round(d, e, a, b, c, f2(e, a, b), k2, w11 = left(w11 ^ w8 ^ w3 ^ w13)); Round(c, d, e, a, b, f2(d, e, a), k2, w12 = left(w12 ^ w9 ^ w4 ^ w14)); Round(b, c, d, e, a, f2(c, d, e), k2, w13 = left(w13 ^ w10 ^ w5 ^ w15)); Round(a, b, c, d, e, f2(b, c, d), k2, w14 = left(w14 ^ w11 ^ w6 ^ w0)); Round(e, a, b, c, d, f2(a, b, c), k2, w15 = left(w15 ^ w12 ^ w7 ^ w1)); Round(d, e, a, b, c, f2(e, a, b), k2, w0 = left(w0 ^ w13 ^ w8 ^ w2)); Round(c, d, e, a, b, f2(d, e, a), k2, w1 = left(w1 ^ w14 ^ w9 ^ w3)); Round(b, c, d, e, a, f2(c, d, e), k2, w2 = left(w2 ^ w15 ^ w10 ^ w4)); Round(a, b, c, d, e, f2(b, c, d), k2, w3 = left(w3 ^ w0 ^ w11 ^ w5)); Round(e, a, b, c, d, f2(a, b, c), k2, w4 = left(w4 ^ w1 ^ w12 ^ w6)); Round(d, e, a, b, c, f2(e, a, b), k2, w5 = left(w5 ^ w2 ^ w13 ^ w7)); Round(c, d, e, a, b, f2(d, e, a), k2, w6 = left(w6 ^ w3 ^ w14 ^ w8)); Round(b, c, d, e, a, f2(c, d, e), k2, w7 = left(w7 ^ w4 ^ w15 ^ w9)); Round(a, b, c, d, e, f3(b, c, d), k3, w8 = left(w8 ^ w5 ^ w0 ^ w10)); Round(e, a, b, c, d, f3(a, b, c), k3, w9 = left(w9 ^ w6 ^ w1 ^ w11)); Round(d, e, a, b, c, f3(e, a, b), k3, w10 = left(w10 ^ w7 ^ w2 ^ w12)); Round(c, d, e, a, b, f3(d, e, a), k3, w11 = left(w11 ^ w8 ^ w3 ^ w13)); Round(b, c, d, e, a, f3(c, d, e), k3, w12 = left(w12 ^ w9 ^ w4 ^ w14)); Round(a, b, c, d, e, f3(b, c, d), k3, w13 = left(w13 ^ w10 ^ w5 ^ w15)); Round(e, a, b, c, d, f3(a, b, c), k3, w14 = left(w14 ^ w11 ^ w6 ^ w0)); Round(d, e, a, b, c, f3(e, a, b), k3, w15 = left(w15 ^ w12 ^ w7 ^ w1)); Round(c, d, e, a, b, f3(d, e, a), k3, w0 = left(w0 ^ w13 ^ w8 ^ w2)); Round(b, c, d, e, a, f3(c, d, e), k3, w1 = left(w1 ^ w14 ^ w9 ^ w3)); Round(a, b, c, d, e, f3(b, c, d), k3, w2 = left(w2 ^ w15 ^ w10 ^ w4)); Round(e, a, b, c, d, f3(a, b, c), k3, w3 = left(w3 ^ w0 ^ w11 ^ w5)); Round(d, e, a, b, c, f3(e, a, b), k3, w4 = left(w4 ^ w1 ^ w12 ^ w6)); Round(c, d, e, a, b, f3(d, e, a), k3, w5 = left(w5 ^ w2 ^ w13 ^ w7)); Round(b, c, d, e, a, f3(c, d, e), k3, w6 = left(w6 ^ w3 ^ w14 ^ w8)); Round(a, b, c, d, e, f3(b, c, d), k3, w7 = left(w7 ^ w4 ^ w15 ^ w9)); Round(e, a, b, c, d, f3(a, b, c), k3, w8 = left(w8 ^ w5 ^ w0 ^ w10)); Round(d, e, a, b, c, f3(e, a, b), k3, w9 = left(w9 ^ w6 ^ w1 ^ w11)); Round(c, d, e, a, b, f3(d, e, a), k3, w10 = left(w10 ^ w7 ^ w2 ^ w12)); Round(b, c, d, e, a, f3(c, d, e), k3, w11 = left(w11 ^ w8 ^ w3 ^ w13)); Round(a, b, c, d, e, f2(b, c, d), k4, w12 = left(w12 ^ w9 ^ w4 ^ w14)); Round(e, a, b, c, d, f2(a, b, c), k4, w13 = left(w13 ^ w10 ^ w5 ^ w15)); Round(d, e, a, b, c, f2(e, a, b), k4, w14 = left(w14 ^ w11 ^ w6 ^ w0)); Round(c, d, e, a, b, f2(d, e, a), k4, w15 = left(w15 ^ w12 ^ w7 ^ w1)); Round(b, c, d, e, a, f2(c, d, e), k4, w0 = left(w0 ^ w13 ^ w8 ^ w2)); Round(a, b, c, d, e, f2(b, c, d), k4, w1 = left(w1 ^ w14 ^ w9 ^ w3)); Round(e, a, b, c, d, f2(a, b, c), k4, w2 = left(w2 ^ w15 ^ w10 ^ w4)); Round(d, e, a, b, c, f2(e, a, b), k4, w3 = left(w3 ^ w0 ^ w11 ^ w5)); Round(c, d, e, a, b, f2(d, e, a), k4, w4 = left(w4 ^ w1 ^ w12 ^ w6)); Round(b, c, d, e, a, f2(c, d, e), k4, w5 = left(w5 ^ w2 ^ w13 ^ w7)); Round(a, b, c, d, e, f2(b, c, d), k4, w6 = left(w6 ^ w3 ^ w14 ^ w8)); Round(e, a, b, c, d, f2(a, b, c), k4, w7 = left(w7 ^ w4 ^ w15 ^ w9)); Round(d, e, a, b, c, f2(e, a, b), k4, w8 = left(w8 ^ w5 ^ w0 ^ w10)); Round(c, d, e, a, b, f2(d, e, a), k4, w9 = left(w9 ^ w6 ^ w1 ^ w11)); Round(b, c, d, e, a, f2(c, d, e), k4, w10 = left(w10 ^ w7 ^ w2 ^ w12)); Round(a, b, c, d, e, f2(b, c, d), k4, w11 = left(w11 ^ w8 ^ w3 ^ w13)); Round(e, a, b, c, d, f2(a, b, c), k4, w12 = left(w12 ^ w9 ^ w4 ^ w14)); Round(d, e, a, b, c, f2(e, a, b), k4, left(w13 ^ w10 ^ w5 ^ w15)); Round(c, d, e, a, b, f2(d, e, a), k4, left(w14 ^ w11 ^ w6 ^ w0)); Round(b, c, d, e, a, f2(c, d, e), k4, left(w15 ^ w12 ^ w7 ^ w1)); s[0] += a; s[1] += b; s[2] += c; s[3] += d; s[4] += e; } } // namespace sha1 } // namespace ////// SHA1 CSHA1::CSHA1() : bytes(0) { sha1::Initialize(s); } CSHA1& CSHA1::Write(const unsigned char* data, size_t len) { const unsigned char* end = data + len; size_t bufsize = bytes % 64; if (bufsize && bufsize + len >= 64) { // Fill the buffer, and process it. memcpy(buf + bufsize, data, 64 - bufsize); bytes += 64 - bufsize; data += 64 - bufsize; sha1::Transform(s, buf); bufsize = 0; } while (end >= data + 64) { // Process full chunks directly from the source. sha1::Transform(s, data); bytes += 64; data += 64; } if (end > data) { // Fill the buffer with what remains. memcpy(buf + bufsize, data, end - data); bytes += end - data; } return *this; } void CSHA1::Finalize(unsigned char hash[OUTPUT_SIZE]) { static const unsigned char pad[64] = {0x80}; unsigned char sizedesc[8]; WriteBE64(sizedesc, bytes << 3); Write(pad, 1 + ((119 - (bytes % 64)) % 64)); Write(sizedesc, 8); WriteBE32(hash, s[0]); WriteBE32(hash + 4, s[1]); WriteBE32(hash + 8, s[2]); WriteBE32(hash + 12, s[3]); WriteBE32(hash + 16, s[4]); } CSHA1& CSHA1::Reset() { bytes = 0; sha1::Initialize(s); return *this; }
/* Copyright (C) 2006 - 2013 ScriptDev2 <http://www.scriptdev2.com/> * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* ScriptData SDName: instance_vortex_pinnacle SD%Complete: 0 SDComment: Placeholder SDCategory: Vortex Pinnacle EndScriptData */ #include "precompiled.h" void AddSC_instance_vortex_pinnacle() { }
// Copyright (c) 2013-2018 LG Electronics, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // SPDX-License-Identifier: Apache-2.0 #include "db/MojDbShardEngine.h" #include "db/MojDbKind.h" #include "db/MojDb.h" #include "db/MojDbServiceDefs.h" #include "db/MojDbMediaLinkManager.h" #include "core/MojDataSerialization.h" #include <boost/crc.hpp> #include <string> #include <sys/statvfs.h> using namespace std; static const MojChar* const ShardInfoKind1Str = _T("{\"id\":\"ShardInfo1:1\",") _T("\"owner\":\"mojodb.admin\",") _T("\"indexes\":[ {\"name\":\"ShardId\", \"props\":[ {\"name\":\"shardId\"} ]}, \ {\"name\":\"DatabasePath\",\"props\":[ {\"name\":\"databasePath\"} ]}, \ {\"name\":\"DeviceId\", \"props\":[ {\"name\":\"deviceId\"} ]}, \ {\"name\":\"IdBase64\", \"props\":[ {\"name\":\"idBase64\"} ]}, \ {\"name\":\"Active\", \"props\":[ {\"name\":\"active\"} ]}, \ {\"name\":\"Transient\", \"props\":[ {\"name\":\"transient\"} ]}, \ {\"name\":\"Timestamp\", \"props\":[ {\"name\":\"timestamp\"} ]}, \ {\"name\":\"KindIds\", \"props\":[ {\"name\":\"kindIds\"} ]}\ ]}"); MojDbShardEngine::MojDbShardEngine(MojDb& db) : #ifdef LMDB_ENGINE_SUPPORT m_db(db), m_enable(false) #else m_db(db) #endif { } MojDbShardEngine::~MojDbShardEngine(void) { } /** * initialize MojDbShardEngine * * @param ip_db * pointer to MojDb instance * * @param io_req * batch support * * @return MojErr */ MojErr MojDbShardEngine::init (const MojObject& conf, MojDbReqRef req) { LOG_TRACE("Entering function %s", __FUNCTION__); MojErr err; MojObject obj; m_cache.clear(); err = configure(conf); MojErrCheck(err); // add type err = obj.fromJson(ShardInfoKind1Str); MojErrCheck(err); MojDbAdminGuard admin(req); err = m_db.kindEngine()->putKind(obj, req, true); // add builtin kind MojErrCheck(err); err = KindHash::registerKind(&m_db, req); MojErrCheck(err); //all devices should not be active at startup err = resetShards(req); MojErrCheck(err); err = initCache(req); MojErrCheck(err); return MojErrNone; } MojErr MojDbShardEngine::configure(const MojObject& conf) { LOG_TRACE("Entering function %s", __FUNCTION__); MojErr err; MojString mediaLinkDirectory; if (!conf.get(_T("enable_sharding"), m_enable)) { m_enable = false; return MojErrNone; } err = conf.getRequired(_T("shard_db_prefix"), m_databasePrefix); MojErrCheck(err); err = conf.getRequired(_T("device_links_path"), mediaLinkDirectory); MojErrCheck(err); err = m_mediaLinkManager.setLinkDirectory(mediaLinkDirectory); MojErrCheck(err); if (m_databasePrefix.empty()) { MojErrThrow (MojErrRequiredPropNotFound); } if (m_databasePrefix.at(0) == _T('/')) { m_databasePrefixIsAbsolute = true; } else { m_databasePrefixIsAbsolute = false; } err = conf.getRequired(_T("fallback_path"), m_fallbackPath); MojErrCheck(err); MojObject val; err = conf.getRequired(_T("device_minimum_free_bytes"), val); MojErrCheck(err); m_reqFreePartSpaceBytes = val.intValue(); if (conf.get(_T("device_minimum_free_percentage"), val)) { MojDecimal dec = val.decimalValue(); m_reqFreePartSpacePercantage = float(dec.floatValue()); if (m_reqFreePartSpacePercantage >= 100.0f) { m_enable = false; return MojErrNone; } else if (m_reqFreePartSpacePercantage < 0.0f) { MojErrThrow(MojErrInvalidArg); } } else { m_reqFreePartSpacePercantage = 0.0f; } return MojErrNone; } /** * all devices should not be active at startup: * - reset 'active flag' * - reset 'mountPath' * * @param io_req * batch support * * @return MojErr */ MojErr MojDbShardEngine::resetShards (MojDbReq& io_req) { LOG_TRACE("Entering function %s", __FUNCTION__); MojDbQuery query; MojDbCursor cursor; MojObject props; MojUInt32 count; MojErr err = query.from(_T("ShardInfo1:1")); MojErrCheck(err); err = props.put(_T("active"), false); MojErrCheck(err); err = props.put(_T("mountPath"), MojString()); MojErrCheck(err); MojDbAdminGuard admin(io_req); err = m_db.merge(query, props, count, MojDbFlagNone, io_req); MojErrCheck(err); return MojErrNone; } /** * put a new shard description to db * * @param shardInfo * device info to store * * @return MojErr */ MojErr MojDbShardEngine::put (const MojDbShardInfo& shardInfo, MojDbReqRef req) { LOG_TRACE("Entering function %s", __FUNCTION__); MojAssert(shardInfo.id); MojObject obj; MojErr err; err = obj.putString(_T("_kind"), _T("ShardInfo1:1")); MojErrCheck(err); MojDbShardInfo info = shardInfo; updateTimestamp(info); if (info.id_base64.empty()) { err = MojDbShardEngine::convertId(info.id, info.id_base64); MojErrCheck(err); } err = convert(info, obj); MojErrCheck(err); MojDbAdminGuard admin(req); err = m_db.put(obj, MojDbFlagNone, req); MojErrCheck(err); m_cache.put(shardInfo.id, obj); return MojErrNone; } /** * get shard description by id * * @param shardId * device id * * @param shardInfo * device info, initialized if device found by id * * @param found * true if found * * @return MojErr */ MojErr MojDbShardEngine::get (MojUInt32 shardId, MojDbShardInfo& shardInfo, bool& found) { LOG_TRACE("Entering function %s", __FUNCTION__); MojErr err; MojObject dbObj; found = m_cache.get(shardId, dbObj); if (found) { err = convert(dbObj, shardInfo); MojErrCheck(err); } return MojErrNone; } /** * get list of all active shards * * @param shardInfoList * list of device info collect all shards with state 'active'==true * * @param count * number of devices info added * * @return MojErr */ MojErr MojDbShardEngine::getAllActive (std::list<MojDbShardInfo>& shardInfoList, MojUInt32& count, MojDbReqRef req) { LOG_TRACE("Entering function %s", __FUNCTION__); MojErr err; MojDbQuery query; MojDbCursor cursor; MojObject obj(true); MojDbShardInfo shardInfo; err = query.from(_T("ShardInfo1:1")); MojErrCheck(err); err = query.where(_T("active"), MojDbQuery::OpEq, obj); MojErrCheck(err); MojDbAdminGuard admin(req); err = m_db.find(query, cursor, req); MojErrCheck(err); count = 0; shardInfoList.clear(); while (true) { bool found; MojObject dbObj; err = cursor.get(dbObj, found); MojErrCheck(err); if (!found) break; err = convert(dbObj, shardInfo); MojErrCheck(err); shardInfoList.push_back(shardInfo); ++count; } return MojErrNone; } /** * update shardInfo * * @param i_shardInfo * update device info properties (search by ShardInfo.id) * * @return MojErr */ MojErr MojDbShardEngine::update (const MojDbShardInfo& i_shardInfo, MojDbReqRef req) { LOG_TRACE("Entering function %s", __FUNCTION__); MojErr err; MojDbQuery query; MojObject dbObj; MojObject obj(i_shardInfo.id); err = query.from(_T("ShardInfo1:1")); MojErrCheck(err); err = query.where(_T("shardId"), MojDbQuery::OpEq, obj); MojErrCheck(err); MojObject update; MojUInt32 count = 0; MojDbShardInfo shardInfo = i_shardInfo; updateTimestamp(shardInfo); err = convert(shardInfo, update); MojErrCheck(err); MojDbAdminGuard admin(req); err = m_db.merge(query, update, count, MojDbFlagNone, req); MojErrCheck(err); if (count == 0) return MojErrDbObjectNotFound; m_cache.update(i_shardInfo.id, update); return MojErrNone; } /** * get shard description by uuid * * @param deviceUuid * uuid * * @param shardInfo * device info (initialized if found) * * @param found * true if found * * @return MojErr */ MojErr MojDbShardEngine::getByDeviceUuid (const MojString& deviceUuid, MojDbShardInfo& shardInfo, bool& found, MojDbReqRef req) { LOG_TRACE("Entering function %s", __FUNCTION__); MojErr err; //get record from db, extract id MojDbQuery query; MojDbCursor cursor; MojObject obj(deviceUuid); MojObject dbObj; err = query.from(_T("ShardInfo1:1")); MojErrCheck(err); err = query.where(_T("deviceId"), MojDbQuery::OpEq, obj); MojErrCheck(err); MojDbAdminGuard admin(req); err = m_db.find(query, cursor, req); MojErrCheck(err); err = cursor.get(dbObj, found); MojErrCheck(err); if (found) convert(dbObj, shardInfo); return MojErrNone; } /** * get device id by uuid * * search within db for i_deviceId, return id if found * else * allocate a new id * * @param deviceUuid * device uuid * * @param shardId * device id * * @return MojErr */ MojErr MojDbShardEngine::getShardId (const MojString& deviceUuid, MojUInt32& shardId) { LOG_TRACE("Entering function %s", __FUNCTION__); MojErr err; MojDbShardInfo shardInfo; bool found; err = getByDeviceUuid(deviceUuid, shardInfo, found); MojErrCheck(err); if (found) { shardId = shardInfo.id; LOG_DEBUG("[db_shardEngine] Shard id for device %s is %d", deviceUuid.data(), shardId); } else { LOG_DEBUG("[db_shardEngine] Shard id for device %s not found, generating it", deviceUuid.data()); err = allocateId(deviceUuid, shardId); MojErrCheck(err); } return MojErrNone; } /** * compute a new shard id * * @param deviceUuid * device uuid * * @param shardId * device id * * @return MojErr */ MojErr MojDbShardEngine::allocateId (const MojString& deviceUuid, MojUInt32& shardId) { LOG_TRACE("Entering function %s", __FUNCTION__); MojErr err; MojUInt32 id; MojUInt32 calc_id; MojUInt32 prefix = 1; MojUInt32 suffix = 1; bool found = false; err = computeId(deviceUuid, calc_id); MojErrCheck(err); do { id = calc_id | (prefix * 0x01000000); //check the table to see if this ID already exists err = isIdExist(id, found); MojErrCheck(err); if (found) { LOG_WARNING(MSGID_DB_SHARDENGINE_WARNING, 2, PMLOGKFV("id", "%x", id), PMLOGKFV("prefix", "%u", prefix), "id generation -> 'id' exist already, prefix = 'prefix'"); prefix++; } else { MojAssert(id); shardId = id; break; // exit from loop } if (prefix == 128) { LOG_WARNING(MSGID_DB_SHARDENGINE_WARNING, 1, PMLOGKFV("prefix", "%u", prefix), "id generation -> next iteration"); prefix = 1; MojString modified_uuid; modified_uuid.format("%s%x", deviceUuid.data(), ++suffix); computeId(modified_uuid, calc_id); //next iteration } } while (!found); return MojErrNone; } /** * is device id exist? * * @param shardId * device id * * @param found * true, if found * * @return MojErr */ MojErr MojDbShardEngine::isIdExist (MojUInt32 shardId, bool& found) { LOG_TRACE("Entering function %s", __FUNCTION__); found = m_cache.isExist(shardId); return MojErrNone; } /** * compute device id by media uuid * * @param mediaUuid * media uuid * * @param shardId * device id * * @return MojErr */ MojErr MojDbShardEngine::computeId (const MojString& mediaUuid, MojUInt32& sharId) { LOG_TRACE("Entering function %s", __FUNCTION__); MojAssert(!mediaUuid.empty()); std::string str = mediaUuid.data(); //Create a 24 bit hash of the string boost::crc_32_type result; result.process_bytes(str.data(), str.length()); MojInt32 code = result.checksum(); result.reset(); //Prefix the 24 bit hash with 0x01 to create a 32 bit unique shard ID sharId = code & 0xFFFFFF; return MojErrNone; } /** * convert device id to base64 string * * @param i_id * device id * * @param o_id_base64 * device id converted to base64 string * * @return MojErr */ MojErr MojDbShardEngine::convertId (const MojUInt32 i_id, MojString& o_id_base64) { LOG_TRACE("Entering function %s", __FUNCTION__); MojErr err; MojBuffer buf; MojDataWriter writer(buf); err = writer.writeUInt32(i_id); MojErrCheck(err); MojVector<MojByte> byteVec; err = buf.toByteVec(byteVec); MojErrCheck(err); MojString str; err = o_id_base64.base64Encode(byteVec, false); MojErrCheck(err); return MojErrNone; } /** * convert base64 string to device id * * @param i_id_base64 * device id converted to base64 string * * @param o_id * device id * * @return MojErr */ MojErr MojDbShardEngine::convertId (const MojString& i_id_base64, MojUInt32& o_id) { LOG_TRACE("Entering function %s", __FUNCTION__); MojErr err; MojVector<MojByte> idVec; err = i_id_base64.base64Decode(idVec); MojErrCheck(err); // extract first 32bits of _id as shard id in native order MojDataReader reader(idVec.begin(), idVec.size()); err = reader.readUInt32(o_id); MojErrCheck(err); return MojErrNone; } /** * convert device info to MojObject * * @param i_shardInfo * device info * * @param o_obj * MojObject * * @return MojErr */ MojErr MojDbShardEngine::convert (const MojDbShardInfo& i_shardInfo, MojObject& o_obj) { LOG_TRACE("Entering function %s", __FUNCTION__); MojObject obj1(i_shardInfo.id); MojErr err = o_obj.put(_T("shardId"), obj1); MojErrCheck(err); MojObject obj2(i_shardInfo.active); err = o_obj.put(_T("active"), obj2); MojErrCheck(err); err = o_obj.put(_T("transient"), i_shardInfo.transient); MojErrCheck(err); MojObject obj3(i_shardInfo.id_base64); err = o_obj.put(_T("idBase64"), obj3); MojErrCheck(err); MojObject obj4(i_shardInfo.deviceId); err = o_obj.put(_T("deviceId"), obj4); MojErrCheck(err); MojObject obj5(i_shardInfo.deviceUri); err = o_obj.put(_T("deviceUri"), obj5); MojErrCheck(err); MojObject obj6(i_shardInfo.mountPath); err = o_obj.put(_T("mountPath"), obj6); MojErrCheck(err); err = o_obj.put(_T("databasePath"), i_shardInfo.databasePath); MojErrCheck(err); MojObject obj7(i_shardInfo.deviceName); err = o_obj.put(_T("deviceName"), obj7); MojErrCheck(err); MojObject obj8(i_shardInfo.timestamp); err = o_obj.put(_T("timestamp"), obj8); MojErrCheck(err); err = o_obj.putString(_T("parentDeviceId"), i_shardInfo.parentDeviceId); MojErrCheck(err); //convert kindIds MojString strKindIds; err = i_shardInfo.kindIds.toString(strKindIds); MojErrCheck(err); MojObject obj9(strKindIds); err = o_obj.put(_T("kindIds"), obj9); MojErrCheck(err); return MojErrNone; } /** * convert MojObject to device info * * @param i_obj * MojObject, input * * @param o_shardInfo * device info * * @return MojErr */ MojErr MojDbShardEngine::convert (const MojObject& i_obj, MojDbShardInfo& o_shardInfo) { LOG_TRACE("Entering function %s", __FUNCTION__); MojErr err = i_obj.getRequired(_T("shardId"), o_shardInfo.id); MojErrCheck(err); err = i_obj.getRequired(_T("idBase64"), o_shardInfo.id_base64); MojErrCheck(err); err = i_obj.getRequired(_T("active"), o_shardInfo.active); MojErrCheck(err); err = i_obj.getRequired(_T("transient"), o_shardInfo.transient); MojErrCheck(err); err = i_obj.getRequired(_T("deviceId"), o_shardInfo.deviceId); MojErrCheck(err); err = i_obj.getRequired(_T("deviceUri"), o_shardInfo.deviceUri); MojErrCheck(err); err = i_obj.getRequired(_T("mountPath"), o_shardInfo.mountPath); MojErrCheck(err); err = i_obj.getRequired(_T("databasePath"), o_shardInfo.databasePath); MojErrCheck(err); err = i_obj.getRequired(_T("deviceName"), o_shardInfo.deviceName); MojErrCheck(err); err = i_obj.getRequired(_T("timestamp"), o_shardInfo.timestamp); MojErrCheck(err); err = i_obj.getRequired(_T("parentDeviceId"), o_shardInfo.parentDeviceId); MojErrCheck(err); MojString strKindIds; err = i_obj.getRequired(_T("kindIds"), strKindIds); MojErrCheck(err); err = o_shardInfo.kindIds.fromString(strKindIds); MojErrCheck(err); return MojErrNone; } /** * Support garbage collection of obsolete shards * remove shard objects older <numDays> days * * @param numDays * days * * @param req * batch support * * @return MojErr */ MojErr MojDbShardEngine::purgeShardObjects (MojInt64 numDays, MojDbReqRef req) { LOG_TRACE("Entering function %s", __FUNCTION__); MojDbQuery query1, query2; MojDbCursor cursor; MojInt32 value_id = 0; MojInt64 value_timestamp; MojObject obj(value_id); MojObject dbObj; MojObject obj_active(false); MojVector<MojUInt32> arrShardIds; MojString shardIdStr; bool found; bool value_active; MojTime time; MojErrCheck( MojGetCurrentTime(time) ); MojInt64 purgeTime = time.microsecs() - (MojTime::UnitsPerDay * numDays); LOG_DEBUG("[db_shardEngine] purging objects for shards inactive for more than %lld days...", numDays); //collect 'old' shards //-------------------- MojErr err = query1.from(_T("ShardInfo1:1")); MojErrCheck(err); err = query1.where(_T("timestamp"), MojDbQuery::OpLessThanEq, purgeTime); MojErrCheck(err); query1.setIgnoreInactiveShards(false); MojDbAdminGuard admin(req); err = m_db.find(query1, cursor, req); MojErrCheck(err); while (true) { err = cursor.get(dbObj, found); MojErrCheck(err); if(!found) break; err = dbObj.getRequired(_T("shardId"), value_id); MojErrCheck(err); err = MojDbShardEngine::convertId(value_id, shardIdStr); MojErrCheck(err); err = dbObj.getRequired(_T("active"), value_active); MojErrCheck(err); if(!value_active) { err = arrShardIds.pushUnique(value_id); MojErrCheck(err); LOG_DEBUG("[db_shardEngine] Need to purge records for old shard: [%s]", shardIdStr.data()); } else { // TODO: Remove LOG_DEBUG("[db_shardEngine] Ignore active shard: [%s]", shardIdStr.data()); } } MojErrCheck(cursor.close()); removeShardObjects(arrShardIds, req); LOG_DEBUG("[db_shardEngine] Ended"); return MojErrNone; } /** * removeShardObjects * * @param strShardIdToRemove * device id * * @param req * batch support * * @return MojErr */ MojErr MojDbShardEngine::removeShardObjects (const MojString& strShardIdToRemove, MojDbReqRef req) { LOG_TRACE("Entering function %s", __FUNCTION__); MojUInt32 shardId; MojVector<MojUInt32> shardIds; MojErr err = MojDbShardEngine::convertId(strShardIdToRemove, shardId); MojErrCheck(err); err = shardIds.push(shardId); MojErrCheck(err); LOG_DEBUG("[db_shardEngine] purging objects for shard: %s", strShardIdToRemove.data()); return(removeShardObjects(shardIds, req)); } /** * removeShardObjects * * @param shardId * device id * * @param req * batch support * * @return MojErr */ MojErr MojDbShardEngine::removeShardObjects (const MojVector<MojUInt32>& arrShardIds, MojDbReqRef req) { LOG_TRACE("Entering function %s", __FUNCTION__); MojDbAdminGuard admin(req); if (arrShardIds.size() > 0) { MojDbShardInfo info; bool foundOut; MojErr err; MojDbKind* pKind; for (MojVector<MojUInt32>::ConstIterator itShardId = arrShardIds.begin(); itShardId != arrShardIds.end(); ++itShardId) { //get shard info structure err = get(*itShardId, info, foundOut); MojErrCheck(err); if(foundOut) { //iterate over kindIds array for (std::list<MojString>::iterator itKindId = info.kindIds.begin(); itKindId != info.kindIds.end(); ++itKindId) { //verify kind for 'built-in' flag err = m_db.kindEngine()->getKind((*itKindId).data(), pKind); MojErrCheck(err); if(pKind->isBuiltin()) continue; LOG_DEBUG("[db_shardEngine] Get next shard for %s", (*itKindId).data()); // TODO: to debug err = removeShardKindObjects(*itShardId, *itKindId, req); MojErrCheck(err); } } } LOG_DEBUG("[db_shardEngine] Returned from removeShardObjects"); // TODO: to debug } return MojErrNone; } /** * removeShardRecords * * @param shardIdStr * device id * * @param kindId * kind id * * @param req * batch support * * @return MojErr */ MojErr MojDbShardEngine::removeShardKindObjects (const MojUInt32 shardId, const MojString& kindId, MojDbReq& req) { LOG_TRACE("Entering function %s", __FUNCTION__); // make query bool found; uint32_t countDeleted = 0; uint32_t countRead = 0; MojDbQuery query; MojErr err = query.from(kindId); MojErrCheck(err); query.setIgnoreInactiveShards(false); MojDbCursor cursor; MojDbAdminGuard admin(req); err = m_db.find(query, cursor, req); MojErrCheck(err); MojString shardIdStr; err = MojDbShardEngine::convertId(shardId, shardIdStr); MojErrCheck(err); LOG_DEBUG("[db_shardEngine] purging objects for shard: [%s], Kind: [%s]", shardIdStr.data(), kindId.data());// todo: convert to Info MojObject record; MojObject recordId; MojUInt32 cmpShardId; while(true) { err = cursor.get(record, found); MojErrCheck(err); if (!found) break; err = record.getRequired(MojDb::IdKey, recordId); MojErrCheck(err); countRead ++; err = MojDbIdGenerator::extractShard(recordId, cmpShardId); MojErrCheck(err); if (cmpShardId != shardId) continue; err = m_db.del(recordId, found, MojDbFlagNone, req); MojErrCheck(err); countDeleted++; } if (countDeleted) { LOG_DEBUG("[db_shardEngine] purged %d of %d objects for shard: [%s] from Kind: [%s]", countDeleted, countRead, shardIdStr.data(), kindId.data()); } else { LOG_DEBUG("[db_shardEngine] none purged out of %d objects", countRead); // todo: convert to Info } return MojErrNone; } /** * update ShardInfo::timestamp with current time value */ MojErr MojDbShardEngine::updateTimestamp (MojDbShardInfo& shardInfo) { LOG_TRACE("Entering function %s", __FUNCTION__); MojTime time; MojErrCheck( MojGetCurrentTime(time) ); shardInfo.timestamp = time.microsecs(); return MojErrNone; } /** * init cache * * @param io_req * batch support * * @return MojErr */ MojErr MojDbShardEngine::initCache (MojDbReq& io_req) { LOG_TRACE("Entering function %s", __FUNCTION__); MojDbQuery query; MojDbCursor cursor; MojInt32 value_id = 0; MojObject obj(value_id); MojObject dbObj; bool found; MojErr err = query.from(_T("ShardInfo1:1")); MojErrCheck(err); MojDbAdminGuard admin(io_req); err = m_db.find(query, cursor, io_req); MojErrCheck(err); while (true) { err = cursor.get(dbObj, found); MojErrCheck(err); if(!found) break; err = dbObj.getRequired(_T("shardId"), value_id); MojErrCheck(err); m_cache.put(value_id, dbObj); } MojErrCheck(cursor.close()); return MojErrNone; } MojErr MojDbShardEngine::linkShardAndKindId (const MojString& shardIdBase64, const MojString& kindId, MojDbReqRef req) { LOG_TRACE("Entering function %s", __FUNCTION__); if(shardIdBase64.empty()) { LOG_DEBUG("[db_shardEngine] link shard and kind: empty shardId"); return MojErrNone; } MojErr err; MojUInt32 id; err = convertId(shardIdBase64, id); MojErrCheck(err); err = linkShardAndKindId(id, kindId, req); MojErrCheck(err); return MojErrNone; } MojErr MojDbShardEngine::linkShardAndKindId (const MojUInt32 shardId, const MojString& kindId, MojDbReqRef req) { LOG_TRACE("Entering function %s", __FUNCTION__); if(kindId.empty()) { LOG_DEBUG("[db_shardEngine] link shard and kind: empty kindId"); return MojErrNone; } bool found; MojDbShardInfo shardInfo; MojErr err; err = get(shardId, shardInfo, found); MojErrCheck(err); if(!found) return MojErrNone; if(shardInfo.kindIds.isExist(kindId)) return MojErrNone; //link shard and kindId shardInfo.kindIds.add(kindId); //update db err = update(shardInfo, req); MojErrCheck(err); return MojErrNone; } MojErr MojDbShardEngine::unlinkShardAndKindId (const MojString& shardIdBase64, const MojString& kindId, MojDbReqRef req) { LOG_TRACE("Entering function %s", __FUNCTION__); if(shardIdBase64.empty()) { LOG_DEBUG("[db_shardEngine] link shard and kind: empty shardId"); return MojErrNone; } MojErr err; MojUInt32 id; err = convertId(shardIdBase64, id); MojErrCheck(err); err = unlinkShardAndKindId(id, kindId, req); MojErrCheck(err); return MojErrNone; } MojErr MojDbShardEngine::unlinkShardAndKindId (const MojUInt32 shardId, const MojString& kindId, MojDbReqRef req) { LOG_TRACE("Entering function %s", __FUNCTION__); if(kindId.empty()) { LOG_DEBUG("[db_shardEngine] link shard and kind: empty kindId"); return MojErrNone; } bool found; MojDbShardInfo shardInfo; MojErr err; err = get(shardId, shardInfo, found); MojErrCheck(err); if(!found) return MojErrNone; if(!shardInfo.kindIds.isExist(kindId)) return MojErrNone; //unlink shard and kindId shardInfo.kindIds.remove(kindId); //update db err = update(shardInfo, req); MojErrCheck(err); return MojErrNone; } /** * removeShardInfo record * * @param shardIdStr * device id * * @param kindId * kind id * * @param req * batch support * * @return MojErr */ MojErr MojDbShardEngine::removeShardInfo (const MojUInt32 shardId, MojDbReqRef req) { LOG_TRACE("Entering function %s", __FUNCTION__); MojUInt32 count; MojDbQuery query; MojObject obj_id(shardId); MojErr err = query.from(_T("ShardInfo1:1")); MojErrCheck(err); err = query.where(_T("shardId"), MojDbQuery::OpEq, obj_id); MojErrCheck(err); MojDbAdminGuard admin(req); err = m_db.del(query, count, MojDbFlagNone, req); MojErrCheck(err); m_cache.del(shardId); return MojErrNone; } MojErr MojDbShardEngine::processShardInfo(const MojDbShardInfo& shardInfo, MojDbShardInfo* databaseShardInfo, MojDbReqRef req) { LOG_TRACE("Entering function %s", __FUNCTION__); LOG_DEBUG("[db_shardEngine] Shard engine notified about new shard"); MojErr err; bool found; // Inside shardInfo we have only filled deviceId deviceUri mountPath MojString deviceName // Note: we'll use short transaction here if we are not in an exclusive access already err = getByDeviceUuid(shardInfo.deviceId, *databaseShardInfo, found, req->schemaLocked() ? req : MojDbReqRef(MojDbReq())); MojErrCheck(err); copyRequiredFields(shardInfo, *databaseShardInfo); // fill/update databaseShardInfo if (!found) { // initialize new shardInfo err = allocateId(shardInfo.deviceId, databaseShardInfo->id); MojErrCheck(err); LOG_DEBUG("[db_shardEngine] shardEngine for device %s generated shard id: %d", databaseShardInfo->deviceId.data(), databaseShardInfo->id); databaseShardInfo->parentDeviceId = shardInfo.parentDeviceId; databaseShardInfo->deviceId = shardInfo.deviceId; databaseShardInfo->transient = false; } MojErr accErr = MojErrNone; err = req->begin(&m_db, true); MojErrCheck(err); // update shardInfo and mounted shards atomically // note that shard mount/unmount requires exclusive lock if (databaseShardInfo->active) { err = databasePrepare(databaseShardInfo, req); MojErrCheck(err); err = req->begin(&m_db, true); // lock schema for write MojErrCheck(err); err = m_db.storageEngine()->mountShard(databaseShardInfo->id, databaseShardInfo->databasePath); MojErrCheck(err); } else { err = m_db.storageEngine()->unMountShard(databaseShardInfo->id); MojErrAccumulate(accErr, err); } err = req->end(true); // unlock and commit immediately in case of success MojErrCheck(err); err = m_mediaLinkManager.processShardInfo(*databaseShardInfo); MojErrAccumulate(accErr, err); if (found) { if (!databaseShardInfo->active && databaseShardInfo->transient) { err = removeTransientShard(*databaseShardInfo, req); MojErrAccumulate(accErr, err); } else { err = update(*databaseShardInfo, req); MojErrAccumulate(accErr, err); } } else { err = put(*databaseShardInfo, req); MojErrAccumulate(accErr, err); } err = req->end(true); // unlock and commit immediately whatever we can MojErrCheck(err); if (databaseShardInfo->active) { err = dropGarbage(databaseShardInfo->id, req); MojErrAccumulate(accErr, err); err = putKindsHashes(databaseShardInfo->id, req); MojErrAccumulate(accErr, err); } //notify MojDB about the change of shards status //m_db.onShardStatusChange(databaseShardInfo); err = m_db.shardStatusChanged.call(*databaseShardInfo); MojErrAccumulate(accErr, err); // propagate accumulated error MojErrCheck(accErr); return MojErrNone; } MojErr MojDbShardEngine::processShardInfo(const MojDbShardInfo& shardInfo, MojDbReqRef req) { MojDbShardInfo result; MojErr err; err = processShardInfo(shardInfo, &result, req); MojErrCheck(err); return MojErrNone; } MojErr MojDbShardEngine::copyRequiredFields(const MojDbShardInfo& from, MojDbShardInfo& to) { LOG_TRACE("Entering function %s", __FUNCTION__); to.deviceUri = from.deviceUri; to.deviceName = from.deviceName; to.active = from.active; to.transient |= from.transient; to.parentDeviceId = from.parentDeviceId; return MojErrNone; } MojErr MojDbShardEngine::removeTransientShard(const MojDbShardInfo& shardInfo, MojDbReqRef req) { LOG_TRACE("Entering function %s", __FUNCTION__); MojErr err; MojAssert(shardInfo.databasePath.data()); err = MojRmDirRecursive(shardInfo.databasePath.data()); MojErrCheck(err); err = removeShardInfo(shardInfo.id, req); MojErrCheck(err); return MojErrNone; } MojErr MojDbShardEngine::databasePrepare(MojDbShardInfo* shardInfo, MojDbReqRef req) { MojErr err; MojErr accErr = MojErrNone; MojString databaseId; err = m_db.databaseId(databaseId, req); MojErrCheck(err); if (m_databasePrefixIsAbsolute) { // path absolute. databasePrefix: /var/db; // shardId: 777x03 // result: /var/db/777x03/ err = shardInfo->databasePath.format(_T("%s/%08x/"), m_databasePrefix.data(), shardInfo->id); MojErrCheck(err); err = MojCreateDirIfNotPresent(shardInfo->databasePath.data()); MojErrCheck(err); } else { // is relative, use mountpath as current dir // path relative. databasePrefix .db8 // mountPath: /media/run/3A3A // databaseId: AA333AAA // result: /media/run/3A3A/.db8/AA333AAA/ err = shardInfo->databasePath.format(_T("%s/%s/%s"), shardInfo->deviceUri.data(), m_databasePrefix.data(), databaseId.data()); MojErrCheck(err); err = checkDatabaseAccess(shardInfo->databasePath.data()); MojErrAccumulate(accErr, err); err = checkDatabaseSpace(shardInfo->databasePath.data()); MojErrAccumulate(accErr, err); MojErrCatchAll(accErr) { err = shardInfo->databasePath.format(_T("%s/%08x/"), m_fallbackPath.data(), shardInfo->id); MojErrCheck(err); LOG_DEBUG("[db_shardEngine] Switch shard to use fallback. New shard db path: %s", shardInfo->databasePath.data()); } } return MojErrNone; } MojErr MojDbShardEngine::checkDatabaseAccess(const MojChar* path) { MojAssert(path); MojErr err; if (0 != access(path, F_OK | R_OK | W_OK)) { if (ENOENT == errno) { // does not exist err = MojCreateDirIfNotPresent(path); MojErrCheck(err); } else { LOG_DEBUG("[db_shardEngine] Database exist on %s , but it is read/only.", path); return MojErrNotOpen; } } return MojErrNone; } MojErr MojDbShardEngine::checkDatabaseSpace(const MojChar* path) { MojAssert(m_reqFreePartSpaceBytes > 0); MojAssert(m_reqFreePartSpacePercantage >= 0.0f); MojAssert(m_reqFreePartSpacePercantage <= 100.0f); struct statvfs st; int ret = statvfs(path, &st); if (ret != 0) { LOG_DEBUG("[db_shardEngine] Can't statfs %s ", path); return MojErrNotOpen; } typedef unsigned long long FileSystemBlocks; FileSystemBlocks minFreeBlocks = m_reqFreePartSpaceBytes / st.f_bsize; if (st.f_bavail < minFreeBlocks) { LOG_WARNING(MSGID_MOJ_DB_WARNING, 2, PMLOGKS("path", path), PMLOGKFV("required_space", "%lu", m_reqFreePartSpaceBytes), "[db_shardEngine] No free space"); return MojErrNoMem; } if (m_reqFreePartSpacePercantage > 0.0f) { // and check for minFreeBlocks = FileSystemBlocks(double(st.f_blocks) * double(st.f_frsize) * m_reqFreePartSpacePercantage / 100); if ((st.f_bavail * st.f_bsize) < minFreeBlocks) { LOG_WARNING(MSGID_MOJ_DB_WARNING, 2, PMLOGKS("path", path), PMLOGKFV("required_space", "%llu", minFreeBlocks), "[db_shardEngine] No free space"); return MojErrNoMem; } } return MojErrNone; } MojErr MojDbShardEngine::putKindHash(const MojChar* kindName, MojDbReqRef req) { MojErr err; std::list<MojDbShardInfo> shards; MojUInt32 count; MojDbKind* kind; err = m_db.kindEngine()->getKind(kindName, kind); MojErrCheck(err); MojAssert(kind); err = getAllActive(shards, count, req); MojErrCheck(err); for (const MojDbShardInfo& shard : shards) { err = putKindHash(shard.id, *kind, req); MojErrCheck(err); } return MojErrNone; } MojErr MojDbShardEngine::putKindHash(const MojObject& kindObj, MojDbReqRef req) { MojErr err; std::list<MojDbShardInfo> shards; MojUInt32 count; err = getAllActive(shards, count, req); MojErrCheck(err); for (const MojDbShardInfo& shard : shards) { err = putKindHash(shard.id, kindObj, req); MojErrCheck(err); } return MojErrNone; } MojErr MojDbShardEngine::putKindHash(const MojUInt32 shardId, const MojDbKind& kind, MojDbReqRef req) { MojErr err; KindHash kindHash; err = kindHash.fromKind(kind); MojErrCheck(err); MojString shardIdStr; err = convertId(shardId, shardIdStr); MojErrCheck(err); MojDbAdminGuard admin(req); err = kindHash.save(&m_db, shardIdStr, req); MojErrCheck(err); return MojErrNone; } MojErr MojDbShardEngine::putKindHash(const MojUInt32 shardId, const MojObject& kindObj, MojDbReqRef req) { MojErr err; KindHash kindHash; err = kindHash.fromKindObject(kindObj); MojErrCheck(err); MojString shardIdStr; err = convertId(shardId, shardIdStr); MojErrCheck(err); MojDbAdminGuard admin(req); err = kindHash.save(&m_db, shardIdStr, req); MojErrCheck(err); return MojErrNone; } MojErr MojDbShardEngine::putKindsHashes(const MojUInt32 shardId, MojDbReqRef req) { MojAssert(m_db.kindEngine()); MojErr err; MojVector<MojObject> kinds; err = m_db.kindEngine()->getKinds(kinds); MojErrCheck(err); for (const MojObject& kindObj : kinds) { err = putKindHash(shardId, kindObj, req); MojErrCheck(err); } return MojErrNone; } MojErr MojDbShardEngine::dropObject(const MojUInt32 shardId, MojDbStorageItem* item, MojDbReqRef req) { MojAssert(item); MojErr err; bool found; err = req->begin(&m_db, true); MojErrCheck(err); const MojObject& id = item->id(); MojUInt32 objectShardId; err = MojDbIdGenerator::extractShard(id, objectShardId); MojErrCheck(err); if (objectShardId != shardId) return MojErrNone; MojString kindName; err = item->kindId(kindName, *m_db.kindEngine()); MojErrCheck(err); MojAssert(!kindName.empty()); MojObject object; // Can't unserialize object and get required fields. Simple del method unworkable here. We manually prepare object, // ask Kind engine to remove index and remove manually object itself. // code below makes the same, as m_db.del method, but in tricky way. /*err = m_db.del(id, found, MojDbFlagPurge, req); M ojErrCheck(err);*/ err = object.put(_T("_id"), id); MojErrCheck(err); err = object.putString(_T("_kind"), kindName); MojErrCheck(err); // SEE: MojDb::delObj MojTokenSet tokenSet; // we want purge to force delete req->fixmode(true); err = m_db.kindEngine()->update(nullptr, &object, req, OpDelete, tokenSet, true); MojErrCheck(err); err = m_db.storageDatabase()->del(shardId, id, req->txn(), found); MojErrCheck(err); err = req->end(); MojErrCheck(err); return MojErrNone; } MojErr MojDbShardEngine::delKindData(const MojUInt32 shardId, const MojChar* kindId, MojDbReqRef req) { MojErr err; MojDbQuery query; err = query.from(kindId); MojErrCheck(err); MojDbCursor cursor; err = m_db.find(query, cursor, req); while (true) { MojDbStorageItem* item; bool found; err = cursor.get(item, found); MojErrCheck(err); if (!found) break; err = dropObject(shardId, item, req); MojErrCheck(err); } return MojErrNone; } MojErr MojDbShardEngine::dropGarbage(const MojUInt32 shardId, MojDbReqRef req) { MojErr err; KindHash::KindHashContainer hashes; MojVector<MojObject> kinds; err = m_db.kindEngine()->getKinds(kinds); MojErrCheck(err); MojDbAdminGuard admin(req); err = KindHash::loadHashes(&m_db, shardId, &hashes, req); MojErrCheck(err); MojString shardIdStr; err = convertId(shardId, shardIdStr); MojErrCheck(err); for (KindHash& hash : hashes) { MojDbKind* kind; err = m_db.kindEngine()->getKind(hash.kindId().data(), kind); switch (err) { case MojErrNone: if (kind->hash() != hash.hash()) { err = delKindData(shardId, hash.kindId(), req); MojErrCheck(err); err = hash.del(&m_db, shardIdStr, req); MojErrCheck(err); } break; case MojErrDbKindNotRegistered: err = delKindData(shardId, hash.kindId(), req); MojErrCheck(err); err = hash.del(&m_db, shardIdStr, req); MojErrCheck(err); break; default: MojErrThrow(err); break; } } return MojErrNone; }
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/safe_browsing/safe_browsing_store_file.h" #include "base/files/file_util.h" #include "base/files/scoped_file.h" #include "base/md5.h" #include "base/metrics/histogram.h" #include "base/metrics/sparse_histogram.h" namespace { // NOTE(shess): kFileMagic should not be a byte-wise palindrome, so // that byte-order changes force corruption. const int32 kFileMagic = 0x600D71FE; // Version history: // Version 6: aad08754/r2814 by erikkay@google.com on 2008-10-02 (sqlite) // Version 7: 6afe28a5/r37435 by shess@chromium.org on 2010-01-28 // Version 8: d3dd0715/r259791 by shess@chromium.org on 2014-03-27 const int32 kFileVersion = 8; // ReadAndVerifyHeader() returns this in case of error. const int32 kInvalidVersion = -1; // Starting with version 8, the storage is sorted and can be sharded to allow // updates to be done with lower memory requirements. Newly written files will // be sharded to need less than this amount of memory during update. Larger // values are preferred to minimize looping overhead during processing. const int64 kUpdateStorageBytes = 100 * 1024; // Prevent excessive sharding by setting a lower limit on the shard stride. // Smaller values should work fine, but very small values will probably lead to // poor performance. Shard stride is indirectly related to // |kUpdateStorageBytes|, setting that very small will bump against this. const uint32 kMinShardStride = 1 << 24; // Strides over the entire SBPrefix space. const uint64 kMaxShardStride = 1ULL << 32; // Maximum SBPrefix value. const SBPrefix kMaxSBPrefix = 0xFFFFFFFF; // Header at the front of the main database file. struct FileHeader { int32 magic, version; uint32 add_chunk_count, sub_chunk_count; uint32 shard_stride; // TODO(shess): Is this where 64-bit will bite me? Perhaps write a // specialized read/write? }; // Header for each chunk in the chunk-accumulation file. struct ChunkHeader { uint32 add_prefix_count, sub_prefix_count; uint32 add_hash_count, sub_hash_count; }; // Header for each shard of data in the main database file. struct ShardHeader { uint32 add_prefix_count, sub_prefix_count; uint32 add_hash_count, sub_hash_count; }; // Enumerate different format-change events for histogramming // purposes. DO NOT CHANGE THE ORDERING OF THESE VALUES. enum FormatEventType { // Corruption detected, broken down by file format. FORMAT_EVENT_FILE_CORRUPT, FORMAT_EVENT_SQLITE_CORRUPT, // Obsolete // The type of format found in the file. The expected case (new // file format) is intentionally not covered. FORMAT_EVENT_FOUND_SQLITE, // Obsolete FORMAT_EVENT_FOUND_UNKNOWN, // magic does not match. // The number of SQLite-format files deleted should be the same as // FORMAT_EVENT_FOUND_SQLITE. It can differ if the delete fails, // or if a failure prevents the update from succeeding. FORMAT_EVENT_SQLITE_DELETED, // Obsolete FORMAT_EVENT_SQLITE_DELETE_FAILED, // Obsolete // Found and deleted (or failed to delete) the ancient "Safe // Browsing" file. FORMAT_EVENT_DELETED_ORIGINAL, // Obsolete FORMAT_EVENT_DELETED_ORIGINAL_FAILED, // Obsolete // The checksum did not check out in CheckValidity() or in // FinishUpdate(). This most likely indicates that the machine // crashed before the file was fully sync'ed to disk. FORMAT_EVENT_VALIDITY_CHECKSUM_FAILURE, FORMAT_EVENT_UPDATE_CHECKSUM_FAILURE, // The header checksum was incorrect in ReadAndVerifyHeader(). Likely // indicates that the system crashed while writing an update. FORMAT_EVENT_HEADER_CHECKSUM_FAILURE, FORMAT_EVENT_FOUND_DEPRECATED, // version too old. // Memory space for histograms is determined by the max. ALWAYS // ADD NEW VALUES BEFORE THIS ONE. FORMAT_EVENT_MAX }; void RecordFormatEvent(FormatEventType event_type) { UMA_HISTOGRAM_ENUMERATION("SB2.FormatEvent", event_type, FORMAT_EVENT_MAX); } // Rewind the file. Using fseek(2) because rewind(3) errors are // weird. bool FileRewind(FILE* fp) { int rv = fseek(fp, 0, SEEK_SET); DCHECK_EQ(rv, 0); return rv == 0; } // Read from |fp| into |item|, and fold the input data into the // checksum in |context|, if non-NULL. Return true on success. template <class T> bool ReadItem(T* item, FILE* fp, base::MD5Context* context) { const size_t ret = fread(item, sizeof(T), 1, fp); if (ret != 1) return false; if (context) { base::MD5Update(context, base::StringPiece(reinterpret_cast<char*>(item), sizeof(T))); } return true; } // Write |item| to |fp|, and fold the output data into the checksum in // |context|, if non-NULL. Return true on success. template <class T> bool WriteItem(const T& item, FILE* fp, base::MD5Context* context) { const size_t ret = fwrite(&item, sizeof(T), 1, fp); if (ret != 1) return false; if (context) { base::MD5Update(context, base::StringPiece(reinterpret_cast<const char*>(&item), sizeof(T))); } return true; } // Read |count| items into |values| from |fp|, and fold them into the // checksum in |context|. Returns true on success. template <typename CT> bool ReadToContainer(CT* values, size_t count, FILE* fp, base::MD5Context* context) { if (!count) return true; for (size_t i = 0; i < count; ++i) { typename CT::value_type value; if (!ReadItem(&value, fp, context)) return false; // push_back() is more obvious, but coded this way std::set can // also be read. values->insert(values->end(), value); } return true; } // Write values between |beg| and |end| to |fp|, and fold the data into the // checksum in |context|, if non-NULL. Returns true if all items successful. template <typename CTI> bool WriteRange(const CTI& beg, const CTI& end, FILE* fp, base::MD5Context* context) { for (CTI iter = beg; iter != end; ++iter) { if (!WriteItem(*iter, fp, context)) return false; } return true; } // Write all of |values| to |fp|, and fold the data into the checksum // in |context|, if non-NULL. Returns true if all items successful. template <typename CT> bool WriteContainer(const CT& values, FILE* fp, base::MD5Context* context) { return WriteRange(values.begin(), values.end(), fp, context); } // Delete the chunks in |deleted| from |chunks|. void DeleteChunksFromSet(const base::hash_set<int32>& deleted, std::set<int32>* chunks) { for (std::set<int32>::iterator iter = chunks->begin(); iter != chunks->end();) { std::set<int32>::iterator prev = iter++; if (deleted.count(*prev) > 0) chunks->erase(prev); } } bool ReadAndVerifyChecksum(FILE* fp, base::MD5Context* context) { base::MD5Digest calculated_digest; base::MD5IntermediateFinal(&calculated_digest, context); base::MD5Digest file_digest; if (!ReadItem(&file_digest, fp, context)) return false; return memcmp(&file_digest, &calculated_digest, sizeof(file_digest)) == 0; } // Helper function to read the file header and chunk TOC. Rewinds |fp| and // initializes |context|. The header is left in |header|, with the version // returned. kInvalidVersion is returned for sanity check or checksum failure. int ReadAndVerifyHeader(const base::FilePath& filename, FileHeader* header, std::set<int32>* add_chunks, std::set<int32>* sub_chunks, FILE* fp, base::MD5Context* context) { DCHECK(header); DCHECK(add_chunks); DCHECK(sub_chunks); DCHECK(fp); DCHECK(context); base::MD5Init(context); if (!FileRewind(fp)) return kInvalidVersion; if (!ReadItem(header, fp, context)) return kInvalidVersion; if (header->magic != kFileMagic) return kInvalidVersion; // Track version read to inform removal of support for older versions. UMA_HISTOGRAM_SPARSE_SLOWLY("SB2.StoreVersionRead", header->version); if (header->version != kFileVersion) return kInvalidVersion; if (!ReadToContainer(add_chunks, header->add_chunk_count, fp, context) || !ReadToContainer(sub_chunks, header->sub_chunk_count, fp, context)) { return kInvalidVersion; } // Verify that the data read thus far is valid. if (!ReadAndVerifyChecksum(fp, context)) { RecordFormatEvent(FORMAT_EVENT_HEADER_CHECKSUM_FAILURE); return kInvalidVersion; } return kFileVersion; } // Helper function to write out the initial header and chunks-contained data. // Rewinds |fp|, initializes |context|, then writes a file header and // |add_chunks| and |sub_chunks|. bool WriteHeader(uint32 out_stride, const std::set<int32>& add_chunks, const std::set<int32>& sub_chunks, FILE* fp, base::MD5Context* context) { if (!FileRewind(fp)) return false; base::MD5Init(context); FileHeader header; header.magic = kFileMagic; header.version = kFileVersion; header.add_chunk_count = add_chunks.size(); header.sub_chunk_count = sub_chunks.size(); header.shard_stride = out_stride; if (!WriteItem(header, fp, context)) return false; if (!WriteContainer(add_chunks, fp, context) || !WriteContainer(sub_chunks, fp, context)) return false; // Write out the header digest. base::MD5Digest header_digest; base::MD5IntermediateFinal(&header_digest, context); if (!WriteItem(header_digest, fp, context)) return false; return true; } // Return |true| if the range is sorted by the given comparator. template <typename CTI, typename LESS> bool sorted(CTI beg, CTI end, LESS less) { while ((end - beg) > 2) { CTI n = beg++; DCHECK(!less(*beg, *n)); if (less(*beg, *n)) return false; } return true; } // Merge |beg|..|end| into |container|. Both should be sorted by the given // comparator, and the range iterators should not be derived from |container|. // Differs from std::inplace_merge() in that additional memory is not required // for linear performance. template <typename CT, typename CTI, typename COMP> void container_merge(CT* container, CTI beg, CTI end, const COMP& less) { DCHECK(sorted(container->begin(), container->end(), less)); DCHECK(sorted(beg, end, less)); // Size the container to fit the results. const size_t c_size = container->size(); container->resize(c_size + (end - beg)); // |c_end| points to the original endpoint, while |c_out| points to the // endpoint that will scan from end to beginning while merging. typename CT::iterator c_end = container->begin() + c_size; typename CT::iterator c_out = container->end(); // While both inputs have data, move the greater to |c_out|. while (c_end != container->begin() && end != beg) { if (less(*(c_end - 1), *(end - 1))) { *(--c_out) = *(--end); } else { *(--c_out) = *(--c_end); } } // Copy any data remaining in the new range. if (end != beg) { // The original container data has been fully shifted. DCHECK(c_end == container->begin()); // There is exactly the correct amount of space left. DCHECK_EQ(c_out - c_end, end - beg); std::copy(beg, end, container->begin()); } DCHECK(sorted(container->begin(), container->end(), less)); } // Collection of iterators used while stepping through StateInternal (see // below). class StateInternalPos { public: StateInternalPos(SBAddPrefixes::iterator add_prefixes_iter, SBSubPrefixes::iterator sub_prefixes_iter, std::vector<SBAddFullHash>::iterator add_hashes_iter, std::vector<SBSubFullHash>::iterator sub_hashes_iter) : add_prefixes_iter_(add_prefixes_iter), sub_prefixes_iter_(sub_prefixes_iter), add_hashes_iter_(add_hashes_iter), sub_hashes_iter_(sub_hashes_iter) { } SBAddPrefixes::iterator add_prefixes_iter_; SBSubPrefixes::iterator sub_prefixes_iter_; std::vector<SBAddFullHash>::iterator add_hashes_iter_; std::vector<SBSubFullHash>::iterator sub_hashes_iter_; }; // Helper to find the next shard boundary. template <class T> bool prefix_bounder(SBPrefix val, const T& elt) { return val < elt.GetAddPrefix(); } // Container for partial database state. Includes add/sub prefixes/hashes, plus // aggregate operations on same. class StateInternal { public: // Append indicated amount of data from |fp|. bool AppendData(size_t add_prefix_count, size_t sub_prefix_count, size_t add_hash_count, size_t sub_hash_count, FILE* fp, base::MD5Context* context) { return ReadToContainer(&add_prefixes_, add_prefix_count, fp, context) && ReadToContainer(&sub_prefixes_, sub_prefix_count, fp, context) && ReadToContainer(&add_full_hashes_, add_hash_count, fp, context) && ReadToContainer(&sub_full_hashes_, sub_hash_count, fp, context); } void ClearData() { add_prefixes_.clear(); sub_prefixes_.clear(); add_full_hashes_.clear(); sub_full_hashes_.clear(); } // Merge data from |beg|..|end| into receiver's state, then process the state. // The current state and the range given should corrospond to the same sorted // shard of data from different sources. |add_del_cache| and |sub_del_cache| // indicate the chunk ids which should be deleted during processing (see // SBProcessSubs). void MergeDataAndProcess(const StateInternalPos& beg, const StateInternalPos& end, const base::hash_set<int32>& add_del_cache, const base::hash_set<int32>& sub_del_cache) { container_merge(&add_prefixes_, beg.add_prefixes_iter_, end.add_prefixes_iter_, SBAddPrefixLess<SBAddPrefix,SBAddPrefix>); container_merge(&sub_prefixes_, beg.sub_prefixes_iter_, end.sub_prefixes_iter_, SBAddPrefixLess<SBSubPrefix,SBSubPrefix>); container_merge(&add_full_hashes_, beg.add_hashes_iter_, end.add_hashes_iter_, SBAddPrefixHashLess<SBAddFullHash,SBAddFullHash>); container_merge(&sub_full_hashes_, beg.sub_hashes_iter_, end.sub_hashes_iter_, SBAddPrefixHashLess<SBSubFullHash, SBSubFullHash>); SBProcessSubs(&add_prefixes_, &sub_prefixes_, &add_full_hashes_, &sub_full_hashes_, add_del_cache, sub_del_cache); } // Sort the data appropriately for the sharding, merging, and processing // operations. void SortData() { std::sort(add_prefixes_.begin(), add_prefixes_.end(), SBAddPrefixLess<SBAddPrefix,SBAddPrefix>); std::sort(sub_prefixes_.begin(), sub_prefixes_.end(), SBAddPrefixLess<SBSubPrefix,SBSubPrefix>); std::sort(add_full_hashes_.begin(), add_full_hashes_.end(), SBAddPrefixHashLess<SBAddFullHash,SBAddFullHash>); std::sort(sub_full_hashes_.begin(), sub_full_hashes_.end(), SBAddPrefixHashLess<SBSubFullHash,SBSubFullHash>); } // Iterator from the beginning of the state's data. StateInternalPos StateBegin() { return StateInternalPos(add_prefixes_.begin(), sub_prefixes_.begin(), add_full_hashes_.begin(), sub_full_hashes_.begin()); } // An iterator pointing just after the last possible element of the shard // indicated by |shard_max|. Used to step through the state by shard. // TODO(shess): Verify whether binary search really improves over linear. // Merging or writing will immediately touch all of these elements. StateInternalPos ShardEnd(const StateInternalPos& beg, SBPrefix shard_max) { return StateInternalPos( std::upper_bound(beg.add_prefixes_iter_, add_prefixes_.end(), shard_max, prefix_bounder<SBAddPrefix>), std::upper_bound(beg.sub_prefixes_iter_, sub_prefixes_.end(), shard_max, prefix_bounder<SBSubPrefix>), std::upper_bound(beg.add_hashes_iter_, add_full_hashes_.end(), shard_max, prefix_bounder<SBAddFullHash>), std::upper_bound(beg.sub_hashes_iter_, sub_full_hashes_.end(), shard_max, prefix_bounder<SBSubFullHash>)); } // Write a shard header and data for the shard starting at |beg| and ending at // the element before |end|. bool WriteShard(const StateInternalPos& beg, const StateInternalPos& end, FILE* fp, base::MD5Context* context) { ShardHeader shard_header; shard_header.add_prefix_count = end.add_prefixes_iter_ - beg.add_prefixes_iter_; shard_header.sub_prefix_count = end.sub_prefixes_iter_ - beg.sub_prefixes_iter_; shard_header.add_hash_count = end.add_hashes_iter_ - beg.add_hashes_iter_; shard_header.sub_hash_count = end.sub_hashes_iter_ - beg.sub_hashes_iter_; return WriteItem(shard_header, fp, context) && WriteRange(beg.add_prefixes_iter_, end.add_prefixes_iter_, fp, context) && WriteRange(beg.sub_prefixes_iter_, end.sub_prefixes_iter_, fp, context) && WriteRange(beg.add_hashes_iter_, end.add_hashes_iter_, fp, context) && WriteRange(beg.sub_hashes_iter_, end.sub_hashes_iter_, fp, context); } SBAddPrefixes add_prefixes_; SBSubPrefixes sub_prefixes_; std::vector<SBAddFullHash> add_full_hashes_; std::vector<SBSubFullHash> sub_full_hashes_; }; // True if |val| is an even power of two. template <typename T> bool IsPowerOfTwo(const T& val) { return val && (val & (val - 1)) == 0; } // Helper to read the entire database state, used by GetAddPrefixes() and // GetAddFullHashes(). Those functions are generally used only for smaller // files. Returns false in case of errors reading the data. bool ReadDbStateHelper(const base::FilePath& filename, StateInternal* db_state) { base::ScopedFILE file(base::OpenFile(filename, "rb")); if (file.get() == NULL) return false; std::set<int32> add_chunks; std::set<int32> sub_chunks; base::MD5Context context; FileHeader header; const int version = ReadAndVerifyHeader(filename, &header, &add_chunks, &sub_chunks, file.get(), &context); if (version == kInvalidVersion) return false; uint64 in_min = 0; uint64 in_stride = header.shard_stride; if (!in_stride) in_stride = kMaxShardStride; if (!IsPowerOfTwo(in_stride)) return false; do { ShardHeader shard_header; if (!ReadItem(&shard_header, file.get(), &context)) return false; if (!db_state->AppendData(shard_header.add_prefix_count, shard_header.sub_prefix_count, shard_header.add_hash_count, shard_header.sub_hash_count, file.get(), &context)) { return false; } in_min += in_stride; } while (in_min <= kMaxSBPrefix); if (!ReadAndVerifyChecksum(file.get(), &context)) return false; int64 size = 0; if (!base::GetFileSize(filename, &size)) return false; return static_cast<int64>(ftell(file.get())) == size; } } // namespace SafeBrowsingStoreFile::SafeBrowsingStoreFile() : chunks_written_(0), empty_(false), corruption_seen_(false) {} SafeBrowsingStoreFile::~SafeBrowsingStoreFile() { Close(); } bool SafeBrowsingStoreFile::Delete() { // The database should not be open at this point. But, just in // case, close everything before deleting. if (!Close()) { NOTREACHED(); return false; } return DeleteStore(filename_); } bool SafeBrowsingStoreFile::CheckValidity() { // The file was either empty or never opened. The empty case is // presumed not to be invalid. The never-opened case can happen if // BeginUpdate() fails for any databases, and should already have // caused the corruption callback to fire. if (!file_.get()) return true; if (!FileRewind(file_.get())) return OnCorruptDatabase(); int64 size = 0; if (!base::GetFileSize(filename_, &size)) return OnCorruptDatabase(); base::MD5Context context; base::MD5Init(&context); // Read everything except the final digest. size_t bytes_left = static_cast<size_t>(size); CHECK(size == static_cast<int64>(bytes_left)); if (bytes_left < sizeof(base::MD5Digest)) return OnCorruptDatabase(); bytes_left -= sizeof(base::MD5Digest); // Fold the contents of the file into the checksum. while (bytes_left > 0) { char buf[4096]; const size_t c = std::min(sizeof(buf), bytes_left); const size_t ret = fread(buf, 1, c, file_.get()); // The file's size changed while reading, give up. if (ret != c) return OnCorruptDatabase(); base::MD5Update(&context, base::StringPiece(buf, c)); bytes_left -= c; } if (!ReadAndVerifyChecksum(file_.get(), &context)) { RecordFormatEvent(FORMAT_EVENT_VALIDITY_CHECKSUM_FAILURE); return OnCorruptDatabase(); } return true; } void SafeBrowsingStoreFile::Init( const base::FilePath& filename, const base::Closure& corruption_callback ) { filename_ = filename; corruption_callback_ = corruption_callback; } bool SafeBrowsingStoreFile::BeginChunk() { return ClearChunkBuffers(); } bool SafeBrowsingStoreFile::WriteAddPrefix(int32 chunk_id, SBPrefix prefix) { add_prefixes_.push_back(SBAddPrefix(chunk_id, prefix)); return true; } bool SafeBrowsingStoreFile::GetAddPrefixes(SBAddPrefixes* add_prefixes) { add_prefixes->clear(); if (!base::PathExists(filename_)) return true; StateInternal db_state; if (!ReadDbStateHelper(filename_, &db_state)) return OnCorruptDatabase(); add_prefixes->swap(db_state.add_prefixes_); return true; } bool SafeBrowsingStoreFile::GetAddFullHashes( std::vector<SBAddFullHash>* add_full_hashes) { add_full_hashes->clear(); if (!base::PathExists(filename_)) return true; StateInternal db_state; if (!ReadDbStateHelper(filename_, &db_state)) return OnCorruptDatabase(); add_full_hashes->swap(db_state.add_full_hashes_); return true; } bool SafeBrowsingStoreFile::WriteAddHash(int32 chunk_id, const SBFullHash& full_hash) { add_hashes_.push_back(SBAddFullHash(chunk_id, full_hash)); return true; } bool SafeBrowsingStoreFile::WriteSubPrefix(int32 chunk_id, int32 add_chunk_id, SBPrefix prefix) { sub_prefixes_.push_back(SBSubPrefix(chunk_id, add_chunk_id, prefix)); return true; } bool SafeBrowsingStoreFile::WriteSubHash(int32 chunk_id, int32 add_chunk_id, const SBFullHash& full_hash) { sub_hashes_.push_back(SBSubFullHash(chunk_id, add_chunk_id, full_hash)); return true; } bool SafeBrowsingStoreFile::OnCorruptDatabase() { if (!corruption_seen_) RecordFormatEvent(FORMAT_EVENT_FILE_CORRUPT); corruption_seen_ = true; corruption_callback_.Run(); // Return false as a convenience to callers. return false; } bool SafeBrowsingStoreFile::Close() { ClearUpdateBuffers(); // Make sure the files are closed. file_.reset(); new_file_.reset(); return true; } bool SafeBrowsingStoreFile::BeginUpdate() { DCHECK(!file_.get() && !new_file_.get()); // Structures should all be clear unless something bad happened. DCHECK(add_chunks_cache_.empty()); DCHECK(sub_chunks_cache_.empty()); DCHECK(add_del_cache_.empty()); DCHECK(sub_del_cache_.empty()); DCHECK(add_prefixes_.empty()); DCHECK(sub_prefixes_.empty()); DCHECK(add_hashes_.empty()); DCHECK(sub_hashes_.empty()); DCHECK_EQ(chunks_written_, 0); corruption_seen_ = false; const base::FilePath new_filename = TemporaryFileForFilename(filename_); base::ScopedFILE new_file(base::OpenFile(new_filename, "wb+")); if (new_file.get() == NULL) return false; base::ScopedFILE file(base::OpenFile(filename_, "rb")); empty_ = (file.get() == NULL); if (empty_) { // If the file exists but cannot be opened, try to delete it (not // deleting directly, the bloom filter needs to be deleted, too). if (base::PathExists(filename_)) return OnCorruptDatabase(); new_file_.swap(new_file); return true; } base::MD5Context context; FileHeader header; const int version = ReadAndVerifyHeader(filename_, &header, &add_chunks_cache_, &sub_chunks_cache_, file.get(), &context); if (version == kInvalidVersion) { FileHeader retry_header; if (FileRewind(file.get()) && ReadItem(&retry_header, file.get(), NULL)) { if (retry_header.magic == kFileMagic && retry_header.version < kFileVersion) { RecordFormatEvent(FORMAT_EVENT_FOUND_DEPRECATED); } else { RecordFormatEvent(FORMAT_EVENT_FOUND_UNKNOWN); } } // Close the file so that it can be deleted. file.reset(); return OnCorruptDatabase(); } file_.swap(file); new_file_.swap(new_file); return true; } bool SafeBrowsingStoreFile::FinishChunk() { if (!add_prefixes_.size() && !sub_prefixes_.size() && !add_hashes_.size() && !sub_hashes_.size()) return true; ChunkHeader header; header.add_prefix_count = add_prefixes_.size(); header.sub_prefix_count = sub_prefixes_.size(); header.add_hash_count = add_hashes_.size(); header.sub_hash_count = sub_hashes_.size(); if (!WriteItem(header, new_file_.get(), NULL)) return false; if (!WriteContainer(add_prefixes_, new_file_.get(), NULL) || !WriteContainer(sub_prefixes_, new_file_.get(), NULL) || !WriteContainer(add_hashes_, new_file_.get(), NULL) || !WriteContainer(sub_hashes_, new_file_.get(), NULL)) return false; ++chunks_written_; // Clear everything to save memory. return ClearChunkBuffers(); } bool SafeBrowsingStoreFile::DoUpdate( safe_browsing::PrefixSetBuilder* builder, std::vector<SBAddFullHash>* add_full_hashes_result) { DCHECK(file_.get() || empty_); DCHECK(new_file_.get()); CHECK(builder); CHECK(add_full_hashes_result); // Rewind the temporary storage. if (!FileRewind(new_file_.get())) return false; // Get chunk file's size for validating counts. int64 update_size = 0; if (!base::GetFileSize(TemporaryFileForFilename(filename_), &update_size)) return OnCorruptDatabase(); // Track update size to answer questions at http://crbug.com/72216 . // Log small updates as 1k so that the 0 (underflow) bucket can be // used for "empty" in SafeBrowsingDatabase. UMA_HISTOGRAM_COUNTS("SB2.DatabaseUpdateKilobytes", std::max(static_cast<int>(update_size / 1024), 1)); // Chunk updates to integrate. StateInternal new_state; // Read update chunks. for (int i = 0; i < chunks_written_; ++i) { ChunkHeader header; int64 ofs = ftell(new_file_.get()); if (ofs == -1) return false; if (!ReadItem(&header, new_file_.get(), NULL)) return false; // As a safety measure, make sure that the header describes a sane // chunk, given the remaining file size. int64 expected_size = ofs + sizeof(ChunkHeader); expected_size += header.add_prefix_count * sizeof(SBAddPrefix); expected_size += header.sub_prefix_count * sizeof(SBSubPrefix); expected_size += header.add_hash_count * sizeof(SBAddFullHash); expected_size += header.sub_hash_count * sizeof(SBSubFullHash); if (expected_size > update_size) return false; if (!new_state.AppendData(header.add_prefix_count, header.sub_prefix_count, header.add_hash_count, header.sub_hash_count, new_file_.get(), NULL)) { return false; } } // The state was accumulated by chunk, sort by prefix. new_state.SortData(); // These strides control how much data is loaded into memory per pass. // Strides must be an even power of two. |in_stride| will be derived from the // input file. |out_stride| will be derived from an estimate of the resulting // file's size. |process_stride| will be the max of both. uint64 in_stride = kMaxShardStride; uint64 out_stride = kMaxShardStride; uint64 process_stride = 0; // Used to verify the input's checksum if |!empty_|. base::MD5Context in_context; if (!empty_) { DCHECK(file_.get()); FileHeader header = {0}; int version = ReadAndVerifyHeader(filename_, &header, &add_chunks_cache_, &sub_chunks_cache_, file_.get(), &in_context); if (version == kInvalidVersion) return OnCorruptDatabase(); if (header.shard_stride) in_stride = header.shard_stride; // The header checksum should have prevented this case, but the code will be // broken if this is not correct. if (!IsPowerOfTwo(in_stride)) return OnCorruptDatabase(); } // We no longer need to track deleted chunks. DeleteChunksFromSet(add_del_cache_, &add_chunks_cache_); DeleteChunksFromSet(sub_del_cache_, &sub_chunks_cache_); // Calculate |out_stride| to break the file down into reasonable shards. { int64 original_size = 0; if (!empty_ && !base::GetFileSize(filename_, &original_size)) return OnCorruptDatabase(); // Approximate the final size as everything. Subs and deletes will reduce // the size, but modest over-sharding won't hurt much. int64 shard_size = original_size + update_size; // Keep splitting until a single stride of data fits the target. size_t shifts = 0; while (out_stride > kMinShardStride && shard_size > kUpdateStorageBytes) { out_stride >>= 1; shard_size >>= 1; ++shifts; } UMA_HISTOGRAM_COUNTS("SB2.OutShardShifts", shifts); DCHECK(IsPowerOfTwo(out_stride)); } // Outer loop strides by the max of the input stride (to read integral shards) // and the output stride (to write integral shards). process_stride = std::max(in_stride, out_stride); DCHECK(IsPowerOfTwo(process_stride)); DCHECK_EQ(0u, process_stride % in_stride); DCHECK_EQ(0u, process_stride % out_stride); // Start writing the new data to |new_file_|. base::MD5Context out_context; if (!WriteHeader(out_stride, add_chunks_cache_, sub_chunks_cache_, new_file_.get(), &out_context)) { return false; } // Start at the beginning of the SBPrefix space. uint64 in_min = 0; uint64 out_min = 0; uint64 process_min = 0; // Start at the beginning of the updates. StateInternalPos new_pos = new_state.StateBegin(); // Re-usable container for shard processing. StateInternal db_state; // Track aggregate counts for histograms. size_t add_prefix_count = 0; size_t sub_prefix_count = 0; do { // Maximum element in the current shard. SBPrefix process_max = static_cast<SBPrefix>(process_min + process_stride - 1); DCHECK_GT(process_max, process_min); // Drop the data from previous pass. db_state.ClearData(); // Fill the processing shard with one or more input shards. if (!empty_) { do { ShardHeader shard_header; if (!ReadItem(&shard_header, file_.get(), &in_context)) return OnCorruptDatabase(); if (!db_state.AppendData(shard_header.add_prefix_count, shard_header.sub_prefix_count, shard_header.add_hash_count, shard_header.sub_hash_count, file_.get(), &in_context)) return OnCorruptDatabase(); in_min += in_stride; } while (in_min <= kMaxSBPrefix && in_min < process_max); } // Shard the update data to match the database data, then merge the update // data and process the results. { StateInternalPos new_end = new_state.ShardEnd(new_pos, process_max); db_state.MergeDataAndProcess(new_pos, new_end, add_del_cache_, sub_del_cache_); new_pos = new_end; } // Collect the processed data for return to caller. for (size_t i = 0; i < db_state.add_prefixes_.size(); ++i) { builder->AddPrefix(db_state.add_prefixes_[i].prefix); } add_full_hashes_result->insert(add_full_hashes_result->end(), db_state.add_full_hashes_.begin(), db_state.add_full_hashes_.end()); add_prefix_count += db_state.add_prefixes_.size(); sub_prefix_count += db_state.sub_prefixes_.size(); // Write one or more shards of processed output. StateInternalPos out_pos = db_state.StateBegin(); do { SBPrefix out_max = static_cast<SBPrefix>(out_min + out_stride - 1); DCHECK_GT(out_max, out_min); StateInternalPos out_end = db_state.ShardEnd(out_pos, out_max); if (!db_state.WriteShard(out_pos, out_end, new_file_.get(), &out_context)) return false; out_pos = out_end; out_min += out_stride; } while (out_min == static_cast<SBPrefix>(out_min) && out_min < process_max); process_min += process_stride; } while (process_min <= kMaxSBPrefix); // Verify the overall checksum. if (!empty_) { if (!ReadAndVerifyChecksum(file_.get(), &in_context)) { RecordFormatEvent(FORMAT_EVENT_UPDATE_CHECKSUM_FAILURE); return OnCorruptDatabase(); } // TODO(shess): Verify EOF? // Close the input file so the new file can be renamed over it. file_.reset(); } DCHECK(!file_.get()); // Write the overall checksum. base::MD5Digest out_digest; base::MD5Final(&out_digest, &out_context); if (!WriteItem(out_digest, new_file_.get(), NULL)) return false; // Trim any excess left over from the temporary chunk data. if (!base::TruncateFile(new_file_.get())) return false; // Close the file handle and swizzle the file into place. new_file_.reset(); if (!base::DeleteFile(filename_, false) && base::PathExists(filename_)) return false; const base::FilePath new_filename = TemporaryFileForFilename(filename_); if (!base::Move(new_filename, filename_)) return false; // Record counts before swapping to caller. UMA_HISTOGRAM_COUNTS("SB2.AddPrefixes", add_prefix_count); UMA_HISTOGRAM_COUNTS("SB2.SubPrefixes", sub_prefix_count); return true; } bool SafeBrowsingStoreFile::FinishUpdate( safe_browsing::PrefixSetBuilder* builder, std::vector<SBAddFullHash>* add_full_hashes_result) { DCHECK(builder); DCHECK(add_full_hashes_result); if (!DoUpdate(builder, add_full_hashes_result)) { CancelUpdate(); return false; } DCHECK(!new_file_.get()); DCHECK(!file_.get()); return Close(); } bool SafeBrowsingStoreFile::CancelUpdate() { bool ret = Close(); // Delete stale staging file. const base::FilePath new_filename = TemporaryFileForFilename(filename_); base::DeleteFile(new_filename, false); return ret; } void SafeBrowsingStoreFile::SetAddChunk(int32 chunk_id) { add_chunks_cache_.insert(chunk_id); } bool SafeBrowsingStoreFile::CheckAddChunk(int32 chunk_id) { return add_chunks_cache_.count(chunk_id) > 0; } void SafeBrowsingStoreFile::GetAddChunks(std::vector<int32>* out) { out->clear(); out->insert(out->end(), add_chunks_cache_.begin(), add_chunks_cache_.end()); } void SafeBrowsingStoreFile::SetSubChunk(int32 chunk_id) { sub_chunks_cache_.insert(chunk_id); } bool SafeBrowsingStoreFile::CheckSubChunk(int32 chunk_id) { return sub_chunks_cache_.count(chunk_id) > 0; } void SafeBrowsingStoreFile::GetSubChunks(std::vector<int32>* out) { out->clear(); out->insert(out->end(), sub_chunks_cache_.begin(), sub_chunks_cache_.end()); } void SafeBrowsingStoreFile::DeleteAddChunk(int32 chunk_id) { add_del_cache_.insert(chunk_id); } void SafeBrowsingStoreFile::DeleteSubChunk(int32 chunk_id) { sub_del_cache_.insert(chunk_id); } // static bool SafeBrowsingStoreFile::DeleteStore(const base::FilePath& basename) { if (!base::DeleteFile(basename, false) && base::PathExists(basename)) { NOTREACHED(); return false; } const base::FilePath new_filename = TemporaryFileForFilename(basename); if (!base::DeleteFile(new_filename, false) && base::PathExists(new_filename)) { NOTREACHED(); return false; } // With SQLite support gone, one way to get to this code is if the // existing file is a SQLite file. Make sure the journal file is // also removed. const base::FilePath journal_filename( basename.value() + FILE_PATH_LITERAL("-journal")); if (base::PathExists(journal_filename)) base::DeleteFile(journal_filename, false); return true; }
#include <algorithm> #include <cmath> #include <cstdio> #include <cstring> typedef long long ll; const int N = 11, P = 1e9 + 7, PP = 1e9 + 6; void mul(int n, int a[N][N], int b[N][N], int c[N][N]) { static int tmp[N][N]; memset(tmp, 0, sizeof tmp); for (int i = 0; i < n; i++) for (int k = 0; k < n; k++) for (int j = 0; j < n; j++) tmp[i][j] = (tmp[i][j] + 1ll * a[i][k] * b[k][j]) % PP; memcpy(c, tmp, sizeof tmp); } void pw(int n, int x[N][N], ll y, int z[N][N]) { static int tmp[N][N]; memset(tmp, 0, sizeof tmp); for (int i = 0; i < n; i++) tmp[i][i] = 1; for (; y; y >>= 1, mul(n, x, x, x)) if (y & 1) mul(n, tmp, x, tmp); memcpy(z, tmp, sizeof tmp); } int pw(int x, int y) { int r = 1; for (; y; y >>= 1, x = 1ll * x * x % P) if (y & 1) r = 1ll * x * r % P; return r; } int A[N][N] = {{1, 1, 1}, {1}, {0, 1}}; int a[N][N] = {{0, 0, 1}, {0, 1}, {1}}; int B[N][N] = {{1, 1, 1, 2, -4}, {1}, {0, 1}, {0, 0, 0, 1, 1}, {0, 0, 0, 0, 1}}; int b[N][N] = {{}, {}, {}, {3}, {1}}; int f1, f2, f3, c; ll n; int main() { scanf("%lld%d%d%d%d", &n, &f1, &f2, &f3, &c); pw(3, A, n - 3, A), pw(5, B, n - 3, B); mul(3, A, a, a), mul(5, B, b, b); printf("%d", 1ll * pw(f1, a[0][0]) * pw(f2, a[0][1]) % P * pw(f3, a[0][2]) % P * pw(c, b[0][0]) % P); }
/* All modification made by Intel Corporation: © 2016 Intel Corporation All contributions by the University of California: Copyright (c) 2014, 2015, The Regents of the University of California (Regents) All rights reserved. All other contributions: Copyright (c) 2014, 2015, the respective contributors All rights reserved. For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef CAFFE_SCALE_LAYER_HPP_ #define CAFFE_SCALE_LAYER_HPP_ #include <vector> #include "caffe/blob.hpp" #include "caffe/layer.hpp" #include "caffe/proto/caffe.pb.h" #include "caffe/layers/bias_layer.hpp" namespace caffe { /** * @brief Computes a product of two input Blobs, with the shape of the * latter Blob "broadcast" to match the shape of the former. * Equivalent to tiling the latter Blob, then computing the elementwise * product. * * The second input may be omitted, in which case it's learned as a parameter * of the layer. */ template <typename Dtype> class ScaleLayer: public Layer<Dtype> { public: explicit ScaleLayer(const LayerParameter& param) : Layer<Dtype>(param) {} virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top); virtual void Reshape(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top); virtual inline const char* type() const { return "Scale"; } // Scale virtual inline int MinBottomBlobs() const { return 1; } virtual inline int MaxBottomBlobs() const { return 2; } virtual inline int ExactNumTopBlobs() const { return 1; } protected: /** * In the below shape specifications, @f$ i @f$ denotes the value of the * `axis` field given by `this->layer_param_.scale_param().axis()`, after * canonicalization (i.e., conversion from negative to positive index, * if applicable). * * @param bottom input Blob vector (length 2) * -# @f$ (d_0 \times ... \times * d_i \times ... \times d_j \times ... \times d_n) @f$ * the first factor @f$ x @f$ * -# @f$ (d_i \times ... \times d_j) @f$ * the second factor @f$ y @f$ * @param top output Blob vector (length 1) * -# @f$ (d_0 \times ... \times * d_i \times ... \times d_j \times ... \times d_n) @f$ * the product @f$ z = x y @f$ computed after "broadcasting" y. * Equivalent to tiling @f$ y @f$ to have the same shape as @f$ x @f$, * then computing the elementwise product. */ virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top); virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top); virtual void Backward_cpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom); virtual void Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom); shared_ptr<Layer<Dtype> > bias_layer_; vector<Blob<Dtype>*> bias_bottom_vec_; vector<bool> bias_propagate_down_; int bias_param_id_; Blob<Dtype> sum_multiplier_; Blob<Dtype> sum_result_; Blob<Dtype> temp_; int axis_; int outer_dim_, scale_dim_, inner_dim_; }; } // namespace caffe #endif // CAFFE_SCALE_LAYER_HPP_
/*************************************************************************/ /* editor_settings.cpp */ /*************************************************************************/ /* This file is part of: */ /* GODOT ENGINE */ /* https://godotengine.org */ /*************************************************************************/ /* Copyright (c) 2007-2018 Juan Linietsky, Ariel Manzur. */ /* Copyright (c) 2014-2018 Godot Engine contributors (cf. AUTHORS.md) */ /* */ /* Permission is hereby granted, free of charge, to any person obtaining */ /* a copy of this software and associated documentation files (the */ /* "Software"), to deal in the Software without restriction, including */ /* without limitation the rights to use, copy, modify, merge, publish, */ /* distribute, sublicense, and/or sell copies of the Software, and to */ /* permit persons to whom the Software is furnished to do so, subject to */ /* the following conditions: */ /* */ /* The above copyright notice and this permission notice shall be */ /* included in all copies or substantial portions of the Software. */ /* */ /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */ /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */ /* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/ /* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */ /* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */ /* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */ /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /*************************************************************************/ #include "editor_settings.h" #include "core/io/compression.h" #include "core/io/config_file.h" #include "core/io/file_access_memory.h" #include "core/io/resource_loader.h" #include "core/io/resource_saver.h" #include "core/io/translation_loader_po.h" #include "core/os/dir_access.h" #include "core/os/file_access.h" #include "core/os/keyboard.h" #include "core/os/os.h" #include "core/project_settings.h" #include "core/version.h" #include "editor/editor_node.h" #include "editor/translations.gen.h" #include "scene/main/node.h" #include "scene/main/scene_tree.h" #include "scene/main/viewport.h" // PRIVATE METHODS Ref<EditorSettings> EditorSettings::singleton = NULL; // Properties bool EditorSettings::_set(const StringName &p_name, const Variant &p_value) { _THREAD_SAFE_METHOD_ bool changed = _set_only(p_name, p_value); if (changed) { emit_signal("settings_changed"); } return true; } bool EditorSettings::_set_only(const StringName &p_name, const Variant &p_value) { _THREAD_SAFE_METHOD_ if (p_name.operator String() == "shortcuts") { Array arr = p_value; ERR_FAIL_COND_V(arr.size() && arr.size() & 1, true); for (int i = 0; i < arr.size(); i += 2) { String name = arr[i]; Ref<InputEvent> shortcut = arr[i + 1]; Ref<ShortCut> sc; sc.instance(); sc->set_shortcut(shortcut); add_shortcut(name, sc); } return false; } bool changed = false; if (p_value.get_type() == Variant::NIL) { if (props.has(p_name)) { props.erase(p_name); changed = true; } } else { if (props.has(p_name)) { if (p_value != props[p_name].variant) { props[p_name].variant = p_value; changed = true; } } else { props[p_name] = VariantContainer(p_value, last_order++); changed = true; } if (save_changed_setting) { if (props[p_name].save != true) { props[p_name].save = true; changed = true; } } } return changed; } bool EditorSettings::_get(const StringName &p_name, Variant &r_ret) const { _THREAD_SAFE_METHOD_ if (p_name.operator String() == "shortcuts") { Array arr; for (const Map<String, Ref<ShortCut> >::Element *E = shortcuts.front(); E; E = E->next()) { Ref<ShortCut> sc = E->get(); if (optimize_save) { if (!sc->has_meta("original")) { continue; //this came from settings but is not any longer used } Ref<InputEvent> original = sc->get_meta("original"); if (sc->is_shortcut(original) || (original.is_null() && sc->get_shortcut().is_null())) continue; //not changed from default, don't save } arr.push_back(E->key()); arr.push_back(sc->get_shortcut()); } r_ret = arr; return true; } const VariantContainer *v = props.getptr(p_name); if (!v) { print_line("EditorSettings::_get - Warning, not found: " + String(p_name)); return false; } r_ret = v->variant; return true; } void EditorSettings::_initial_set(const StringName &p_name, const Variant &p_value) { set(p_name, p_value); props[p_name].initial = p_value; props[p_name].has_default_value = true; } struct _EVCSort { String name; Variant::Type type; int order; bool save; bool operator<(const _EVCSort &p_vcs) const { return order < p_vcs.order; } }; void EditorSettings::_get_property_list(List<PropertyInfo> *p_list) const { _THREAD_SAFE_METHOD_ const String *k = NULL; Set<_EVCSort> vclist; while ((k = props.next(k))) { const VariantContainer *v = props.getptr(*k); if (v->hide_from_editor) continue; _EVCSort vc; vc.name = *k; vc.order = v->order; vc.type = v->variant.get_type(); vc.save = v->save; vclist.insert(vc); } for (Set<_EVCSort>::Element *E = vclist.front(); E; E = E->next()) { int pinfo = 0; if (E->get().save || !optimize_save) { pinfo |= PROPERTY_USAGE_STORAGE; } if (!E->get().name.begins_with("_") && !E->get().name.begins_with("projects/")) { pinfo |= PROPERTY_USAGE_EDITOR; } else { pinfo |= PROPERTY_USAGE_STORAGE; //hiddens must always be saved } PropertyInfo pi(E->get().type, E->get().name); pi.usage = pinfo; if (hints.has(E->get().name)) pi = hints[E->get().name]; p_list->push_back(pi); } p_list->push_back(PropertyInfo(Variant::ARRAY, "shortcuts", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_NOEDITOR | PROPERTY_USAGE_INTERNAL)); //do not edit } void EditorSettings::_add_property_info_bind(const Dictionary &p_info) { ERR_FAIL_COND(!p_info.has("name")); ERR_FAIL_COND(!p_info.has("type")); PropertyInfo pinfo; pinfo.name = p_info["name"]; ERR_FAIL_COND(!props.has(pinfo.name)); pinfo.type = Variant::Type(p_info["type"].operator int()); ERR_FAIL_INDEX(pinfo.type, Variant::VARIANT_MAX); if (p_info.has("hint")) pinfo.hint = PropertyHint(p_info["hint"].operator int()); if (p_info.has("hint_string")) pinfo.hint_string = p_info["hint_string"]; add_property_hint(pinfo); } // Default configs bool EditorSettings::has_default_value(const String &p_setting) const { _THREAD_SAFE_METHOD_ if (!props.has(p_setting)) return false; return props[p_setting].has_default_value; } void EditorSettings::_load_defaults(Ref<ConfigFile> p_extra_config) { _THREAD_SAFE_METHOD_ { String lang_hint = "en"; String host_lang = OS::get_singleton()->get_locale(); host_lang = TranslationServer::standardize_locale(host_lang); String best; EditorTranslationList *etl = _editor_translations; while (etl->data) { const String &locale = etl->lang; lang_hint += ","; lang_hint += locale; if (host_lang == locale) { best = locale; } if (best == String() && host_lang.begins_with(locale)) { best = locale; } etl++; } if (best == String()) { best = "en"; } _initial_set("interface/editor/editor_language", best); hints["interface/editor/editor_language"] = PropertyInfo(Variant::STRING, "interface/editor/editor_language", PROPERTY_HINT_ENUM, lang_hint, PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_RESTART_IF_CHANGED); } _initial_set("interface/editor/display_scale", 0); hints["interface/editor/display_scale"] = PropertyInfo(Variant::INT, "interface/editor/display_scale", PROPERTY_HINT_ENUM, "Auto,75%,100%,125%,150%,175%,200%,Custom", PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_RESTART_IF_CHANGED); _initial_set("interface/editor/custom_display_scale", 1.0f); hints["interface/editor/custom_display_scale"] = PropertyInfo(Variant::REAL, "interface/editor/custom_display_scale", PROPERTY_HINT_RANGE, "0.75,3,0.01", PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_RESTART_IF_CHANGED); _initial_set("interface/scene_tabs/show_script_button", false); _initial_set("interface/editor/main_font_size", 14); hints["interface/editor/main_font_size"] = PropertyInfo(Variant::INT, "interface/editor/main_font_size", PROPERTY_HINT_RANGE, "10,40,1", PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_RESTART_IF_CHANGED); _initial_set("interface/editor/code_font_size", 14); hints["interface/editor/code_font_size"] = PropertyInfo(Variant::INT, "interface/editor/code_font_size", PROPERTY_HINT_RANGE, "8,96,1", PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_RESTART_IF_CHANGED); _initial_set("interface/editor/main_font_hinting", 2); hints["interface/editor/main_font_hinting"] = PropertyInfo(Variant::INT, "interface/editor/main_font_hinting", PROPERTY_HINT_ENUM, "None,Light,Normal", PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_RESTART_IF_CHANGED); _initial_set("interface/editor/code_font_hinting", 2); hints["interface/editor/code_font_hinting"] = PropertyInfo(Variant::INT, "interface/editor/code_font_hinting", PROPERTY_HINT_ENUM, "None,Light,Normal", PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_RESTART_IF_CHANGED); _initial_set("interface/editor/main_font", ""); hints["interface/editor/main_font"] = PropertyInfo(Variant::STRING, "interface/editor/main_font", PROPERTY_HINT_GLOBAL_FILE, "*.ttf,*.otf", PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_RESTART_IF_CHANGED); _initial_set("interface/editor/main_font_bold", ""); hints["interface/editor/main_font_bold"] = PropertyInfo(Variant::STRING, "interface/editor/main_font_bold", PROPERTY_HINT_GLOBAL_FILE, "*.ttf,*.otf", PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_RESTART_IF_CHANGED); _initial_set("interface/editor/code_font", ""); hints["interface/editor/code_font"] = PropertyInfo(Variant::STRING, "interface/editor/code_font", PROPERTY_HINT_GLOBAL_FILE, "*.ttf,*.otf", PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_RESTART_IF_CHANGED); _initial_set("interface/editor/dim_editor_on_dialog_popup", true); _initial_set("interface/editor/dim_amount", 0.6f); hints["interface/editor/dim_amount"] = PropertyInfo(Variant::REAL, "interface/editor/dim_amount", PROPERTY_HINT_RANGE, "0,1,0.01", PROPERTY_USAGE_DEFAULT); _initial_set("interface/editor/dim_transition_time", 0.08f); hints["interface/editor/dim_transition_time"] = PropertyInfo(Variant::REAL, "interface/editor/dim_transition_time", PROPERTY_HINT_RANGE, "0,1,0.001", PROPERTY_USAGE_DEFAULT); _initial_set("interface/editor/separate_distraction_mode", false); _initial_set("interface/editor/save_each_scene_on_quit", true); // Regression _initial_set("interface/editor/quit_confirmation", true); _initial_set("interface/theme/preset", 0); hints["interface/theme/preset"] = PropertyInfo(Variant::INT, "interface/theme/preset", PROPERTY_HINT_ENUM, "Default,Custom,Grey,Godot 2,Arc,Light,Alien,Solarized (Dark),Solarized (Light)", PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_RESTART_IF_CHANGED); _initial_set("interface/theme/icon_and_font_color", 0); hints["interface/theme/icon_and_font_color"] = PropertyInfo(Variant::INT, "interface/theme/icon_and_font_color", PROPERTY_HINT_ENUM, "Auto,Dark,Light", PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_RESTART_IF_CHANGED); _initial_set("interface/theme/base_color", Color::html("#323b4f")); hints["interface/theme/accent_color"] = PropertyInfo(Variant::COLOR, "interface/theme/accent_color", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_RESTART_IF_CHANGED); _initial_set("interface/theme/accent_color", Color::html("#699ce8")); hints["interface/theme/base_color"] = PropertyInfo(Variant::COLOR, "interface/theme/base_color", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_RESTART_IF_CHANGED); _initial_set("interface/theme/contrast", 0.25); hints["interface/theme/contrast"] = PropertyInfo(Variant::REAL, "interface/theme/contrast", PROPERTY_HINT_RANGE, "0.01, 1, 0.01"); _initial_set("interface/theme/highlight_tabs", false); _initial_set("interface/theme/border_size", 1); _initial_set("interface/theme/use_graph_node_headers", false); hints["interface/theme/border_size"] = PropertyInfo(Variant::INT, "interface/theme/border_size", PROPERTY_HINT_RANGE, "0,2,1", PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_RESTART_IF_CHANGED); _initial_set("interface/theme/additional_spacing", 0); hints["interface/theme/additional_spacing"] = PropertyInfo(Variant::REAL, "interface/theme/additional_spacing", PROPERTY_HINT_RANGE, "0,5,0.1", PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_RESTART_IF_CHANGED); _initial_set("interface/theme/custom_theme", ""); hints["interface/theme/custom_theme"] = PropertyInfo(Variant::STRING, "interface/theme/custom_theme", PROPERTY_HINT_GLOBAL_FILE, "*.res,*.tres,*.theme", PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_RESTART_IF_CHANGED); _initial_set("interface/scene_tabs/show_extension", false); _initial_set("interface/scene_tabs/show_thumbnail_on_hover", true); _initial_set("interface/scene_tabs/resize_if_many_tabs", true); _initial_set("interface/scene_tabs/minimum_width", 50); hints["interface/scene_tabs/minimum_width"] = PropertyInfo(Variant::INT, "interface/scene_tabs/minimum_width", PROPERTY_HINT_RANGE, "50,500,1", PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_RESTART_IF_CHANGED); _initial_set("filesystem/directories/autoscan_project_path", ""); hints["filesystem/directories/autoscan_project_path"] = PropertyInfo(Variant::STRING, "filesystem/directories/autoscan_project_path", PROPERTY_HINT_GLOBAL_DIR); _initial_set("filesystem/directories/default_project_path", OS::get_singleton()->has_environment("HOME") ? OS::get_singleton()->get_environment("HOME") : OS::get_singleton()->get_system_dir(OS::SYSTEM_DIR_DOCUMENTS)); hints["filesystem/directories/default_project_path"] = PropertyInfo(Variant::STRING, "filesystem/directories/default_project_path", PROPERTY_HINT_GLOBAL_DIR); _initial_set("filesystem/directories/default_project_export_path", ""); hints["filesystem/directories/default_project_export_path"] = PropertyInfo(Variant::STRING, "filesystem/directories/default_project_export_path", PROPERTY_HINT_GLOBAL_DIR); _initial_set("interface/scene_tabs/show_script_button", false); _initial_set("text_editor/theme/color_theme", "Adaptive"); hints["text_editor/theme/color_theme"] = PropertyInfo(Variant::STRING, "text_editor/theme/color_theme", PROPERTY_HINT_ENUM, "Adaptive,Default,Custom"); _initial_set("text_editor/theme/line_spacing", 4); _load_default_text_editor_theme(); _initial_set("text_editor/highlighting/syntax_highlighting", true); _initial_set("text_editor/highlighting/highlight_all_occurrences", true); _initial_set("text_editor/highlighting/highlight_current_line", true); _initial_set("text_editor/cursor/scroll_past_end_of_file", false); _initial_set("text_editor/indent/type", 0); hints["text_editor/indent/type"] = PropertyInfo(Variant::INT, "text_editor/indent/type", PROPERTY_HINT_ENUM, "Tabs,Spaces"); _initial_set("text_editor/indent/size", 4); hints["text_editor/indent/size"] = PropertyInfo(Variant::INT, "text_editor/indent/size", PROPERTY_HINT_RANGE, "1, 64, 1"); // size of 0 crashes. _initial_set("text_editor/indent/auto_indent", true); _initial_set("text_editor/indent/convert_indent_on_save", false); _initial_set("text_editor/indent/draw_tabs", true); _initial_set("text_editor/line_numbers/show_line_numbers", true); _initial_set("text_editor/line_numbers/line_numbers_zero_padded", false); _initial_set("text_editor/line_numbers/show_breakpoint_gutter", true); _initial_set("text_editor/line_numbers/code_folding", true); _initial_set("text_editor/line_numbers/word_wrap", false); _initial_set("text_editor/line_numbers/show_line_length_guideline", false); _initial_set("text_editor/line_numbers/line_length_guideline_column", 80); hints["text_editor/line_numbers/line_length_guideline_column"] = PropertyInfo(Variant::INT, "text_editor/line_numbers/line_length_guideline_column", PROPERTY_HINT_RANGE, "20, 160, 1"); _initial_set("text_editor/open_scripts/smooth_scrolling", true); _initial_set("text_editor/open_scripts/v_scroll_speed", 80); _initial_set("text_editor/open_scripts/show_members_overview", true); _initial_set("text_editor/files/trim_trailing_whitespace_on_save", false); _initial_set("text_editor/completion/idle_parse_delay", 2); _initial_set("text_editor/tools/create_signal_callbacks", true); _initial_set("text_editor/tools/sort_members_outline_alphabetically", false); _initial_set("text_editor/files/autosave_interval_secs", 0); _initial_set("text_editor/cursor/block_caret", false); _initial_set("text_editor/cursor/caret_blink", true); _initial_set("text_editor/cursor/caret_blink_speed", 0.65); hints["text_editor/cursor/caret_blink_speed"] = PropertyInfo(Variant::REAL, "text_editor/cursor/caret_blink_speed", PROPERTY_HINT_RANGE, "0.1, 10, 0.01"); _initial_set("text_editor/cursor/right_click_moves_caret", true); _initial_set("text_editor/completion/auto_brace_complete", false); _initial_set("text_editor/completion/put_callhint_tooltip_below_current_line", true); _initial_set("text_editor/completion/callhint_tooltip_offset", Vector2()); _initial_set("text_editor/files/restore_scripts_on_load", true); _initial_set("text_editor/completion/complete_file_paths", true); _initial_set("docks/scene_tree/start_create_dialog_fully_expanded", false); _initial_set("docks/scene_tree/draw_relationship_lines", false); _initial_set("docks/scene_tree/relationship_line_color", Color::html("464646")); _initial_set("editors/grid_map/pick_distance", 5000.0); _initial_set("editors/3d/primary_grid_color", Color::html("909090")); hints["editors/3d/primary_grid_color"] = PropertyInfo(Variant::COLOR, "editors/3d/primary_grid_color", PROPERTY_HINT_COLOR_NO_ALPHA, "", PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_RESTART_IF_CHANGED); _initial_set("editors/3d/secondary_grid_color", Color::html("606060")); hints["editors/3d/secondary_grid_color"] = PropertyInfo(Variant::COLOR, "editors/3d/secondary_grid_color", PROPERTY_HINT_COLOR_NO_ALPHA, "", PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_RESTART_IF_CHANGED); _initial_set("editors/3d/grid_size", 50); hints["editors/3d/grid_size"] = PropertyInfo(Variant::INT, "editors/3d/grid_size", PROPERTY_HINT_RANGE, "1,500,1", PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_RESTART_IF_CHANGED); _initial_set("editors/3d/primary_grid_steps", 10); hints["editors/3d/primary_grid_steps"] = PropertyInfo(Variant::INT, "editors/3d/primary_grid_steps", PROPERTY_HINT_RANGE, "1,100,1", PROPERTY_USAGE_DEFAULT | PROPERTY_USAGE_RESTART_IF_CHANGED); _initial_set("editors/3d/default_fov", 70.0); _initial_set("editors/3d/default_z_near", 0.05); _initial_set("editors/3d/default_z_far", 500.0); // navigation _initial_set("editors/3d/navigation/navigation_scheme", 0); _initial_set("editors/3d/navigation/invert_y-axis", false); hints["editors/3d/navigation/navigation_scheme"] = PropertyInfo(Variant::INT, "editors/3d/navigation/navigation_scheme", PROPERTY_HINT_ENUM, "Godot,Maya,Modo"); _initial_set("editors/3d/navigation/zoom_style", 0); hints["editors/3d/navigation/zoom_style"] = PropertyInfo(Variant::INT, "editors/3d/navigation/zoom_style", PROPERTY_HINT_ENUM, "Vertical, Horizontal"); _initial_set("editors/3d/navigation/emulate_3_button_mouse", false); _initial_set("editors/3d/navigation/orbit_modifier", 0); hints["editors/3d/navigation/orbit_modifier"] = PropertyInfo(Variant::INT, "editors/3d/navigation/orbit_modifier", PROPERTY_HINT_ENUM, "None,Shift,Alt,Meta,Ctrl"); _initial_set("editors/3d/navigation/pan_modifier", 1); hints["editors/3d/navigation/pan_modifier"] = PropertyInfo(Variant::INT, "editors/3d/navigation/pan_modifier", PROPERTY_HINT_ENUM, "None,Shift,Alt,Meta,Ctrl"); _initial_set("editors/3d/navigation/zoom_modifier", 4); hints["editors/3d/navigation/zoom_modifier"] = PropertyInfo(Variant::INT, "editors/3d/navigation/zoom_modifier", PROPERTY_HINT_ENUM, "None,Shift,Alt,Meta,Ctrl"); // _initial_set("editors/3d/navigation/emulate_numpad", false); not used at the moment _initial_set("editors/3d/navigation/warped_mouse_panning", true); // navigation feel _initial_set("editors/3d/navigation_feel/orbit_sensitivity", 0.4); hints["editors/3d/navigation_feel/orbit_sensitivity"] = PropertyInfo(Variant::REAL, "editors/3d/navigation_feel/orbit_sensitivity", PROPERTY_HINT_RANGE, "0.0, 2, 0.01"); _initial_set("editors/3d/navigation_feel/orbit_inertia", 0.05); hints["editors/3d/navigation_feel/orbit_inertia"] = PropertyInfo(Variant::REAL, "editors/3d/navigation_feel/orbit_inertia", PROPERTY_HINT_RANGE, "0.0, 1, 0.01"); _initial_set("editors/3d/navigation_feel/translation_inertia", 0.15); hints["editors/3d/navigation_feel/translation_inertia"] = PropertyInfo(Variant::REAL, "editors/3d/navigation_feel/translation_inertia", PROPERTY_HINT_RANGE, "0.0, 1, 0.01"); _initial_set("editors/3d/navigation_feel/zoom_inertia", 0.075); hints["editors/3d/navigation_feel/zoom_inertia"] = PropertyInfo(Variant::REAL, "editors/3d/navigation_feel/zoom_inertia", PROPERTY_HINT_RANGE, "0.0, 1, 0.01"); _initial_set("editors/3d/navigation_feel/manipulation_orbit_inertia", 0.075); hints["editors/3d/navigation_feel/manipulation_orbit_inertia"] = PropertyInfo(Variant::REAL, "editors/3d/navigation_feel/manipulation_orbit_inertia", PROPERTY_HINT_RANGE, "0.0, 1, 0.01"); _initial_set("editors/3d/navigation_feel/manipulation_translation_inertia", 0.075); hints["editors/3d/navigation_feel/manipulation_translation_inertia"] = PropertyInfo(Variant::REAL, "editors/3d/navigation_feel/manipulation_translation_inertia", PROPERTY_HINT_RANGE, "0.0, 1, 0.01"); // freelook _initial_set("editors/3d/freelook/freelook_inertia", 0.1); hints["editors/3d/freelook/freelook_inertia"] = PropertyInfo(Variant::REAL, "editors/3d/freelook/freelook_inertia", PROPERTY_HINT_RANGE, "0.0, 1, 0.01"); _initial_set("editors/3d/freelook/freelook_base_speed", 5.0); hints["editors/3d/freelook/freelook_base_speed"] = PropertyInfo(Variant::REAL, "editors/3d/freelook/freelook_base_speed", PROPERTY_HINT_RANGE, "0.0, 10, 0.01"); _initial_set("editors/3d/freelook/freelook_activation_modifier", 0); hints["editors/3d/freelook/freelook_activation_modifier"] = PropertyInfo(Variant::INT, "editors/3d/freelook/freelook_activation_modifier", PROPERTY_HINT_ENUM, "None,Shift,Alt,Meta,Ctrl"); _initial_set("editors/3d/freelook/freelook_modifier_speed_factor", 3.0); hints["editors/3d/freelook/freelook_modifier_speed_factor"] = PropertyInfo(Variant::REAL, "editors/3d/freelook/freelook_modifier_speed_factor", PROPERTY_HINT_RANGE, "0.0, 10.0, 0.1"); _initial_set("editors/3d/freelook/freelook_speed_zoom_link", false); _initial_set("editors/2d/guides_color", Color(0.6, 0.0, 0.8)); _initial_set("editors/2d/bone_width", 5); _initial_set("editors/2d/bone_color1", Color(1.0, 1.0, 1.0, 0.9)); _initial_set("editors/2d/bone_color2", Color(0.6, 0.6, 0.6, 0.9)); _initial_set("editors/2d/bone_selected_color", Color(0.9, 0.45, 0.45, 0.9)); _initial_set("editors/2d/bone_ik_color", Color(0.9, 0.9, 0.45, 0.9)); _initial_set("editors/2d/bone_outline_color", Color(0.35, 0.35, 0.35)); _initial_set("editors/2d/bone_outline_size", 2); _initial_set("editors/2d/keep_margins_when_changing_anchors", false); _initial_set("editors/2d/warped_mouse_panning", true); _initial_set("editors/2d/simple_spacebar_panning", false); _initial_set("editors/2d/scroll_to_pan", false); _initial_set("editors/2d/pan_speed", 20); _initial_set("editors/poly_editor/point_grab_radius", 8); _initial_set("editors/poly_editor/show_previous_outline", true); _initial_set("run/window_placement/rect", 1); hints["run/window_placement/rect"] = PropertyInfo(Variant::INT, "run/window_placement/rect", PROPERTY_HINT_ENUM, "Top Left,Centered,Custom Position,Force Maximized,Force Fullscreen"); String screen_hints = TTR("Default (Same as Editor)"); for (int i = 0; i < OS::get_singleton()->get_screen_count(); i++) { screen_hints += ",Monitor " + itos(i + 1); } _initial_set("run/window_placement/rect_custom_position", Vector2()); _initial_set("run/window_placement/screen", 0); hints["run/window_placement/screen"] = PropertyInfo(Variant::INT, "run/window_placement/screen", PROPERTY_HINT_ENUM, screen_hints); _initial_set("filesystem/on_save/compress_binary_resources", true); _initial_set("filesystem/on_save/save_modified_external_resources", true); _initial_set("text_editor/tools/create_signal_callbacks", true); _initial_set("filesystem/file_dialog/show_hidden_files", false); _initial_set("filesystem/file_dialog/display_mode", 0); hints["filesystem/file_dialog/display_mode"] = PropertyInfo(Variant::INT, "filesystem/file_dialog/display_mode", PROPERTY_HINT_ENUM, "Thumbnails,List"); _initial_set("filesystem/file_dialog/thumbnail_size", 64); hints["filesystem/file_dialog/thumbnail_size"] = PropertyInfo(Variant::INT, "filesystem/file_dialog/thumbnail_size", PROPERTY_HINT_RANGE, "32,128,16"); _initial_set("docks/filesystem/display_mode", 0); hints["docks/filesystem/display_mode"] = PropertyInfo(Variant::INT, "docks/filesystem/display_mode", PROPERTY_HINT_ENUM, "Thumbnails,List"); _initial_set("docks/filesystem/thumbnail_size", 64); hints["docks/filesystem/thumbnail_size"] = PropertyInfo(Variant::INT, "docks/filesystem/thumbnail_size", PROPERTY_HINT_RANGE, "32,128,16"); _initial_set("docks/filesystem/display_mode", 0); hints["docks/filesystem/display_mode"] = PropertyInfo(Variant::INT, "docks/filesystem/display_mode", PROPERTY_HINT_ENUM, "Thumbnails,List"); _initial_set("docks/filesystem/always_show_folders", true); _initial_set("editors/animation/autorename_animation_tracks", true); _initial_set("editors/animation/confirm_insert_track", true); _initial_set("editors/animation/onion_layers_past_color", Color(1, 0, 0)); _initial_set("editors/animation/onion_layers_future_color", Color(0, 1, 0)); _initial_set("docks/property_editor/texture_preview_width", 48); _initial_set("docks/property_editor/auto_refresh_interval", 0.3); _initial_set("text_editor/help/doc_path", ""); _initial_set("text_editor/help/show_help_index", true); _initial_set("filesystem/import/ask_save_before_reimport", false); _initial_set("filesystem/import/pvrtc_texture_tool", ""); #ifdef WINDOWS_ENABLED hints["filesystem/import/pvrtc_texture_tool"] = PropertyInfo(Variant::STRING, "filesystem/import/pvrtc_texture_tool", PROPERTY_HINT_GLOBAL_FILE, "*.exe"); #else hints["filesystem/import/pvrtc_texture_tool"] = PropertyInfo(Variant::STRING, "filesystem/import/pvrtc_texture_tool", PROPERTY_HINT_GLOBAL_FILE, ""); #endif _initial_set("filesystem/import/pvrtc_fast_conversion", false); _initial_set("run/auto_save/save_before_running", true); _initial_set("run/output/always_clear_output_on_play", true); _initial_set("run/output/always_open_output_on_play", true); _initial_set("run/output/always_close_output_on_stop", false); _initial_set("filesystem/resources/save_compressed_resources", true); _initial_set("filesystem/resources/auto_reload_modified_images", true); _initial_set("filesystem/import/automatic_reimport_on_sources_changed", true); _initial_set("filesystem/on_save/safe_save_on_backup_then_rename", true); if (p_extra_config.is_valid()) { if (p_extra_config->has_section("init_projects") && p_extra_config->has_section_key("init_projects", "list")) { Vector<String> list = p_extra_config->get_value("init_projects", "list"); for (int i = 0; i < list.size(); i++) { String name = list[i].replace("/", "::"); set("projects/" + name, list[i]); }; }; if (p_extra_config->has_section("presets")) { List<String> keys; p_extra_config->get_section_keys("presets", &keys); for (List<String>::Element *E = keys.front(); E; E = E->next()) { String key = E->get(); Variant val = p_extra_config->get_value("presets", key); set(key, val); }; }; }; } void EditorSettings::_load_default_text_editor_theme() { bool dark_theme = is_dark_theme(); _initial_set("text_editor/highlighting/symbol_color", Color::html("badfff")); _initial_set("text_editor/highlighting/keyword_color", Color::html("ffffb3")); _initial_set("text_editor/highlighting/base_type_color", Color::html("a4ffd4")); _initial_set("text_editor/highlighting/engine_type_color", Color::html("83d3ff")); _initial_set("text_editor/highlighting/comment_color", Color::html("676767")); _initial_set("text_editor/highlighting/string_color", Color::html("ef6ebe")); _initial_set("text_editor/highlighting/background_color", dark_theme ? Color::html("3b000000") : Color::html("#323b4f")); _initial_set("text_editor/highlighting/completion_background_color", Color::html("2C2A32")); _initial_set("text_editor/highlighting/completion_selected_color", Color::html("434244")); _initial_set("text_editor/highlighting/completion_existing_color", Color::html("21dfdfdf")); _initial_set("text_editor/highlighting/completion_scroll_color", Color::html("ffffff")); _initial_set("text_editor/highlighting/completion_font_color", Color::html("aaaaaa")); _initial_set("text_editor/highlighting/text_color", Color::html("aaaaaa")); _initial_set("text_editor/highlighting/line_number_color", Color::html("66aaaaaa")); _initial_set("text_editor/highlighting/caret_color", Color::html("aaaaaa")); _initial_set("text_editor/highlighting/caret_background_color", Color::html("000000")); _initial_set("text_editor/highlighting/text_selected_color", Color::html("000000")); _initial_set("text_editor/highlighting/selection_color", Color::html("6ca9c2")); _initial_set("text_editor/highlighting/brace_mismatch_color", Color(1, 0.2, 0.2)); _initial_set("text_editor/highlighting/current_line_color", Color(0.3, 0.5, 0.8, 0.15)); _initial_set("text_editor/highlighting/line_length_guideline_color", Color(0.3, 0.5, 0.8, 0.1)); _initial_set("text_editor/highlighting/word_highlighted_color", Color(0.8, 0.9, 0.9, 0.15)); _initial_set("text_editor/highlighting/number_color", Color::html("EB9532")); _initial_set("text_editor/highlighting/function_color", Color::html("66a2ce")); _initial_set("text_editor/highlighting/member_variable_color", Color::html("e64e59")); _initial_set("text_editor/highlighting/mark_color", Color(1.0, 0.4, 0.4, 0.4)); _initial_set("text_editor/highlighting/breakpoint_color", Color(0.8, 0.8, 0.4, 0.2)); _initial_set("text_editor/highlighting/code_folding_color", Color(0.8, 0.8, 0.8, 0.8)); _initial_set("text_editor/highlighting/search_result_color", Color(0.05, 0.25, 0.05, 1)); _initial_set("text_editor/highlighting/search_result_border_color", Color(0.1, 0.45, 0.1, 1)); } bool EditorSettings::_save_text_editor_theme(String p_file) { String theme_section = "color_theme"; Ref<ConfigFile> cf = memnew(ConfigFile); // hex is better? List<String> keys; props.get_key_list(&keys); keys.sort(); for (const List<String>::Element *E = keys.front(); E; E = E->next()) { String key = E->get(); if (key.begins_with("text_editor/highlighting/") && key.find("color") >= 0) { cf->set_value(theme_section, key.replace("text_editor/highlighting/", ""), ((Color)props[key].variant).to_html()); } } Error err = cf->save(p_file); if (err == OK) { return true; } return false; } static Dictionary _get_builtin_script_templates() { Dictionary templates; //No Comments templates["no_comments.gd"] = "extends %BASE%\n" "\n" "func _ready():\n" "%TS%pass\n"; //Empty templates["empty.gd"] = "extends %BASE%" "\n" "\n"; return templates; } static void _create_script_templates(const String &p_path) { Dictionary templates = _get_builtin_script_templates(); List<Variant> keys; templates.get_key_list(&keys); FileAccess *file = FileAccess::create(FileAccess::ACCESS_FILESYSTEM); DirAccess *dir = DirAccess::create(DirAccess::ACCESS_FILESYSTEM); dir->change_dir(p_path); for (int i = 0; i < keys.size(); i++) { if (!dir->file_exists(keys[i])) { Error err = file->reopen(p_path.plus_file((String)keys[i]), FileAccess::WRITE); ERR_FAIL_COND(err != OK); file->store_string(templates[keys[i]]); file->close(); } } memdelete(dir); memdelete(file); } // PUBLIC METHODS EditorSettings *EditorSettings::get_singleton() { return singleton.ptr(); } void EditorSettings::create() { if (singleton.ptr()) return; //pointless DirAccess *dir = NULL; String data_path; String data_dir; String config_path; String config_dir; String cache_path; String cache_dir; Ref<ConfigFile> extra_config = memnew(ConfigFile); String exe_path = OS::get_singleton()->get_executable_path().get_base_dir(); DirAccess *d = DirAccess::create_for_path(exe_path); bool self_contained = false; if (d->file_exists(exe_path + "/._sc_")) { self_contained = true; extra_config->load(exe_path + "/._sc_"); } else if (d->file_exists(exe_path + "/_sc_")) { self_contained = true; extra_config->load(exe_path + "/_sc_"); } memdelete(d); if (self_contained) { // editor is self contained, all in same folder data_path = exe_path; data_dir = data_path.plus_file("editor_data"); config_path = exe_path; config_dir = data_dir; cache_path = exe_path; cache_dir = data_dir.plus_file("cache"); } else { // Typically XDG_DATA_HOME or %APPDATA% data_path = OS::get_singleton()->get_data_path(); data_dir = data_path.plus_file(OS::get_singleton()->get_godot_dir_name()); // Can be different from data_path e.g. on Linux or macOS config_path = OS::get_singleton()->get_config_path(); config_dir = config_path.plus_file(OS::get_singleton()->get_godot_dir_name()); // Can be different from above paths, otherwise a subfolder of data_dir cache_path = OS::get_singleton()->get_cache_path(); if (cache_path == data_path) { cache_dir = data_dir.plus_file("cache"); } else { cache_dir = cache_path.plus_file(OS::get_singleton()->get_godot_dir_name()); } } ClassDB::register_class<EditorSettings>(); //otherwise it can't be unserialized String config_file_path; if (data_path != "" && config_path != "" && cache_path != "") { // Validate/create data dir and subdirectories dir = DirAccess::create(DirAccess::ACCESS_FILESYSTEM); if (dir->change_dir(data_path) != OK) { ERR_PRINT("Cannot find path for data directory!"); memdelete(dir); goto fail; } if (dir->change_dir(data_dir) != OK) { dir->make_dir(data_dir); if (dir->change_dir(data_dir) != OK) { ERR_PRINT("Cannot create data directory!"); memdelete(dir); goto fail; } } if (dir->change_dir("templates") != OK) { dir->make_dir("templates"); } else { dir->change_dir(".."); } // Validate/create cache dir if (dir->change_dir(cache_path) != OK) { ERR_PRINT("Cannot find path for cache directory!"); memdelete(dir); goto fail; } if (dir->change_dir(cache_dir) != OK) { dir->make_dir(cache_dir); if (dir->change_dir(cache_dir) != OK) { ERR_PRINT("Cannot create cache directory!"); memdelete(dir); goto fail; } } // Validate/create config dir and subdirectories if (dir->change_dir(config_path) != OK) { ERR_PRINT("Cannot find path for config directory!"); memdelete(dir); goto fail; } if (dir->change_dir(config_dir) != OK) { dir->make_dir(config_dir); if (dir->change_dir(config_dir) != OK) { ERR_PRINT("Cannot create config directory!"); memdelete(dir); goto fail; } } if (dir->change_dir("text_editor_themes") != OK) { dir->make_dir("text_editor_themes"); } else { dir->change_dir(".."); } if (dir->change_dir("script_templates") != OK) { dir->make_dir("script_templates"); } else { dir->change_dir(".."); } _create_script_templates(dir->get_current_dir().plus_file("script_templates")); if (dir->change_dir("projects") != OK) { dir->make_dir("projects"); } else { dir->change_dir(".."); } // Validate/create project-specific config dir dir->change_dir("projects"); String project_config_dir = ProjectSettings::get_singleton()->get_resource_path(); if (project_config_dir.ends_with("/")) project_config_dir = config_path.substr(0, project_config_dir.size() - 1); project_config_dir = project_config_dir.get_file() + "-" + project_config_dir.md5_text(); if (dir->change_dir(project_config_dir) != OK) { dir->make_dir(project_config_dir); } else { dir->change_dir(".."); } dir->change_dir(".."); // Validate editor config file String config_file_name = "editor_settings-" + itos(VERSION_MAJOR) + ".tres"; config_file_path = config_dir.plus_file(config_file_name); if (!dir->file_exists(config_file_name)) { goto fail; } memdelete(dir); singleton = ResourceLoader::load(config_file_path, "EditorSettings"); if (singleton.is_null()) { WARN_PRINT("Could not open config file."); goto fail; } singleton->save_changed_setting = true; singleton->config_file_path = config_file_path; singleton->project_config_dir = project_config_dir; singleton->settings_dir = config_dir; singleton->data_dir = data_dir; singleton->cache_dir = cache_dir; if (OS::get_singleton()->is_stdout_verbose()) { print_line("EditorSettings: Load OK!"); } singleton->setup_language(); singleton->setup_network(); singleton->load_favorites(); singleton->list_text_editor_themes(); return; } fail: // patch init projects if (extra_config->has_section("init_projects")) { Vector<String> list = extra_config->get_value("init_projects", "list"); for (int i = 0; i < list.size(); i++) { list[i] = exe_path + "/" + list[i]; }; extra_config->set_value("init_projects", "list", list); }; singleton = Ref<EditorSettings>(memnew(EditorSettings)); singleton->save_changed_setting = true; singleton->config_file_path = config_file_path; singleton->settings_dir = config_dir; singleton->data_dir = data_dir; singleton->cache_dir = cache_dir; singleton->_load_defaults(extra_config); singleton->setup_language(); singleton->setup_network(); singleton->list_text_editor_themes(); } void EditorSettings::setup_language() { String lang = get("interface/editor/editor_language"); if (lang == "en") return; //none to do EditorTranslationList *etl = _editor_translations; while (etl->data) { if (etl->lang == lang) { Vector<uint8_t> data; data.resize(etl->uncomp_size); Compression::decompress(data.ptrw(), etl->uncomp_size, etl->data, etl->comp_size, Compression::MODE_DEFLATE); FileAccessMemory *fa = memnew(FileAccessMemory); fa->open_custom(data.ptr(), data.size()); Ref<Translation> tr = TranslationLoaderPO::load_translation(fa, NULL, "translation_" + String(etl->lang)); if (tr.is_valid()) { tr->set_locale(etl->lang); TranslationServer::get_singleton()->set_tool_translation(tr); break; } } etl++; } } void EditorSettings::setup_network() { List<IP_Address> local_ip; IP::get_singleton()->get_local_addresses(&local_ip); String lip = "127.0.0.1"; String hint; String current = has_setting("network/debug/remote_host") ? get("network/debug/remote_host") : ""; int port = has_setting("network/debug/remote_port") ? (int)get("network/debug/remote_port") : 6007; for (List<IP_Address>::Element *E = local_ip.front(); E; E = E->next()) { String ip = E->get(); // link-local IPv6 addresses don't work, skipping them if (ip.begins_with("fe80:0:0:0:")) // fe80::/64 continue; if (ip == current) lip = current; //so it saves if (hint != "") hint += ","; hint += ip; } _initial_set("network/debug/remote_host", lip); add_property_hint(PropertyInfo(Variant::STRING, "network/debug/remote_host", PROPERTY_HINT_ENUM, hint)); _initial_set("network/debug/remote_port", port); add_property_hint(PropertyInfo(Variant::INT, "network/debug/remote_port", PROPERTY_HINT_RANGE, "1,65535,1")); } void EditorSettings::save() { //_THREAD_SAFE_METHOD_ if (!singleton.ptr()) return; if (singleton->config_file_path == "") { ERR_PRINT("Cannot save EditorSettings config, no valid path"); return; } Error err = ResourceSaver::save(singleton->config_file_path, singleton); if (err != OK) { ERR_PRINTS("Error saving editor settings to " + singleton->config_file_path); } else if (OS::get_singleton()->is_stdout_verbose()) { print_line("EditorSettings Save OK!"); } } void EditorSettings::destroy() { if (!singleton.ptr()) return; save(); singleton = Ref<EditorSettings>(); } void EditorSettings::set_optimize_save(bool p_optimize) { optimize_save = p_optimize; } // Properties void EditorSettings::set_setting(const String &p_setting, const Variant &p_value) { _THREAD_SAFE_METHOD_ set(p_setting, p_value); } Variant EditorSettings::get_setting(const String &p_setting) const { _THREAD_SAFE_METHOD_ return get(p_setting); } bool EditorSettings::has_setting(const String &p_setting) const { _THREAD_SAFE_METHOD_ return props.has(p_setting); } void EditorSettings::erase(const String &p_setting) { _THREAD_SAFE_METHOD_ props.erase(p_setting); } void EditorSettings::raise_order(const String &p_setting) { _THREAD_SAFE_METHOD_ ERR_FAIL_COND(!props.has(p_setting)); props[p_setting].order = ++last_order; } void EditorSettings::set_initial_value(const StringName &p_setting, const Variant &p_value, bool p_update_current) { _THREAD_SAFE_METHOD_ if (!props.has(p_setting)) return; props[p_setting].initial = p_value; props[p_setting].has_default_value = true; if (p_update_current) { set(p_setting, p_value); } } Variant _EDITOR_DEF(const String &p_setting, const Variant &p_default) { Variant ret = p_default; if (EditorSettings::get_singleton()->has_setting(p_setting)) ret = EditorSettings::get_singleton()->get(p_setting); else EditorSettings::get_singleton()->set_manually(p_setting, p_default); if (!EditorSettings::get_singleton()->has_default_value(p_setting)) EditorSettings::get_singleton()->set_initial_value(p_setting, p_default); return ret; } Variant _EDITOR_GET(const String &p_setting) { ERR_FAIL_COND_V(!EditorSettings::get_singleton()->has_setting(p_setting), Variant()) return EditorSettings::get_singleton()->get(p_setting); } bool EditorSettings::property_can_revert(const String &p_setting) { if (!props.has(p_setting)) return false; if (!props[p_setting].has_default_value) return false; return props[p_setting].initial != props[p_setting].variant; } Variant EditorSettings::property_get_revert(const String &p_setting) { if (!props.has(p_setting) || !props[p_setting].has_default_value) return Variant(); return props[p_setting].initial; } void EditorSettings::add_property_hint(const PropertyInfo &p_hint) { _THREAD_SAFE_METHOD_ hints[p_hint.name] = p_hint; } // Data directories String EditorSettings::get_data_dir() const { return data_dir; } String EditorSettings::get_templates_dir() const { return get_data_dir().plus_file("templates"); } // Config directories String EditorSettings::get_settings_dir() const { return settings_dir; } String EditorSettings::get_project_settings_dir() const { return get_settings_dir().plus_file("projects").plus_file(project_config_dir); } String EditorSettings::get_text_editor_themes_dir() const { return get_settings_dir().plus_file("text_editor_themes"); } String EditorSettings::get_script_templates_dir() const { return get_settings_dir().plus_file("script_templates"); } // Cache directory String EditorSettings::get_cache_dir() const { return cache_dir; } // Metadata void EditorSettings::set_project_metadata(const String &p_section, const String &p_key, Variant p_data) { Ref<ConfigFile> cf = memnew(ConfigFile); String path = get_project_settings_dir().plus_file("project_metadata.cfg"); cf->load(path); cf->set_value(p_section, p_key, p_data); cf->save(path); } Variant EditorSettings::get_project_metadata(const String &p_section, const String &p_key, Variant p_default) const { Ref<ConfigFile> cf = memnew(ConfigFile); String path = get_project_settings_dir().plus_file("project_metadata.cfg"); Error err = cf->load(path); if (err != OK) { return p_default; } return cf->get_value(p_section, p_key, p_default); } void EditorSettings::set_favorite_dirs(const Vector<String> &p_favorites_dirs) { favorite_dirs = p_favorites_dirs; FileAccess *f = FileAccess::open(get_project_settings_dir().plus_file("favorite_dirs"), FileAccess::WRITE); if (f) { for (int i = 0; i < favorite_dirs.size(); i++) f->store_line(favorite_dirs[i]); memdelete(f); } } Vector<String> EditorSettings::get_favorite_dirs() const { return favorite_dirs; } void EditorSettings::set_recent_dirs(const Vector<String> &p_recent_dirs) { recent_dirs = p_recent_dirs; FileAccess *f = FileAccess::open(get_project_settings_dir().plus_file("recent_dirs"), FileAccess::WRITE); if (f) { for (int i = 0; i < recent_dirs.size(); i++) f->store_line(recent_dirs[i]); memdelete(f); } } Vector<String> EditorSettings::get_recent_dirs() const { return recent_dirs; } void EditorSettings::load_favorites() { FileAccess *f = FileAccess::open(get_project_settings_dir().plus_file("favorite_dirs"), FileAccess::READ); if (f) { String line = f->get_line().strip_edges(); while (line != "") { favorite_dirs.push_back(line); line = f->get_line().strip_edges(); } memdelete(f); } f = FileAccess::open(get_project_settings_dir().plus_file("recent_dirs"), FileAccess::READ); if (f) { String line = f->get_line().strip_edges(); while (line != "") { recent_dirs.push_back(line); line = f->get_line().strip_edges(); } memdelete(f); } } bool EditorSettings::is_dark_theme() { int AUTO_COLOR = 0; int LIGHT_COLOR = 2; Color base_color = get("interface/theme/base_color"); int icon_font_color_setting = get("interface/theme/icon_and_font_color"); return (icon_font_color_setting == AUTO_COLOR && ((base_color.r + base_color.g + base_color.b) / 3.0) < 0.5) || icon_font_color_setting == LIGHT_COLOR; } void EditorSettings::list_text_editor_themes() { String themes = "Adaptive,Default,Custom"; DirAccess *d = DirAccess::open(get_text_editor_themes_dir()); if (d) { d->list_dir_begin(); String file = d->get_next(); while (file != String()) { if (file.get_extension() == "tet" && file.get_basename().to_lower() != "default" && file.get_basename().to_lower() != "adaptive" && file.get_basename().to_lower() != "custom") { themes += "," + file.get_basename(); } file = d->get_next(); } d->list_dir_end(); memdelete(d); } add_property_hint(PropertyInfo(Variant::STRING, "text_editor/theme/color_theme", PROPERTY_HINT_ENUM, themes)); } void EditorSettings::load_text_editor_theme() { if (get("text_editor/theme/color_theme") == "Default" || get("text_editor/theme/color_theme") == "Adaptive" || get("text_editor/theme/color_theme") == "Custom") { if (get("text_editor/theme/color_theme") == "Default") { _load_default_text_editor_theme(); } return; // sorry for "Settings changed" console spam } String theme_path = get_text_editor_themes_dir().plus_file((String)get("text_editor/theme/color_theme") + ".tet"); Ref<ConfigFile> cf = memnew(ConfigFile); Error err = cf->load(theme_path); if (err != OK) { return; } List<String> keys; cf->get_section_keys("color_theme", &keys); for (List<String>::Element *E = keys.front(); E; E = E->next()) { String key = E->get(); String val = cf->get_value("color_theme", key); // don't load if it's not already there! if (has_setting("text_editor/highlighting/" + key)) { // make sure it is actually a color if (val.is_valid_html_color() && key.find("color") >= 0) { props["text_editor/highlighting/" + key].variant = Color::html(val); // change manually to prevent "Settings changed" console spam } } } emit_signal("settings_changed"); // if it doesn't load just use what is currently loaded } bool EditorSettings::import_text_editor_theme(String p_file) { if (!p_file.ends_with(".tet")) { return false; } else { if (p_file.get_file().to_lower() == "default.tet") { return false; } DirAccess *d = DirAccess::open(get_text_editor_themes_dir()); if (d) { d->copy(p_file, get_text_editor_themes_dir().plus_file(p_file.get_file())); memdelete(d); return true; } } return false; } bool EditorSettings::save_text_editor_theme() { String p_file = get("text_editor/theme/color_theme"); if (p_file.get_file().to_lower() == "default" || p_file.get_file().to_lower() == "adaptive" || p_file.get_file().to_lower() == "custom") { return false; } String theme_path = get_text_editor_themes_dir().plus_file(p_file + ".tet"); return _save_text_editor_theme(theme_path); } bool EditorSettings::save_text_editor_theme_as(String p_file) { if (!p_file.ends_with(".tet")) { p_file += ".tet"; } if (p_file.get_file().to_lower() == "default.tet" || p_file.get_file().to_lower() == "adaptive.tet" || p_file.get_file().to_lower() == "custom.tet") { return false; } if (_save_text_editor_theme(p_file)) { // switch to theme is saved in the theme directory list_text_editor_themes(); String theme_name = p_file.substr(0, p_file.length() - 4).get_file(); if (p_file.get_base_dir() == get_text_editor_themes_dir()) { _initial_set("text_editor/theme/color_theme", theme_name); load_text_editor_theme(); } return true; } return false; } Vector<String> EditorSettings::get_script_templates(const String &p_extension) { Vector<String> templates; DirAccess *d = DirAccess::open(get_script_templates_dir()); if (d) { d->list_dir_begin(); String file = d->get_next(); while (file != String()) { if (file.get_extension() == p_extension) { templates.push_back(file.get_basename()); } file = d->get_next(); } d->list_dir_end(); memdelete(d); } return templates; } String EditorSettings::get_editor_layouts_config() const { return get_settings_dir().plus_file("editor_layouts.cfg"); } // Shortcuts void EditorSettings::add_shortcut(const String &p_name, Ref<ShortCut> &p_shortcut) { shortcuts[p_name] = p_shortcut; } bool EditorSettings::is_shortcut(const String &p_name, const Ref<InputEvent> &p_event) const { const Map<String, Ref<ShortCut> >::Element *E = shortcuts.find(p_name); if (!E) { ERR_EXPLAIN("Unknown Shortcut: " + p_name); ERR_FAIL_V(false); } return E->get()->is_shortcut(p_event); } Ref<ShortCut> EditorSettings::get_shortcut(const String &p_name) const { const Map<String, Ref<ShortCut> >::Element *E = shortcuts.find(p_name); if (!E) return Ref<ShortCut>(); return E->get(); } void EditorSettings::get_shortcut_list(List<String> *r_shortcuts) { for (const Map<String, Ref<ShortCut> >::Element *E = shortcuts.front(); E; E = E->next()) { r_shortcuts->push_back(E->key()); } } Ref<ShortCut> ED_GET_SHORTCUT(const String &p_path) { Ref<ShortCut> sc = EditorSettings::get_singleton()->get_shortcut(p_path); if (!sc.is_valid()) { ERR_EXPLAIN("Used ED_GET_SHORTCUT with invalid shortcut: " + p_path); ERR_FAIL_COND_V(!sc.is_valid(), sc); } return sc; } struct ShortCutMapping { const char *path; uint32_t keycode; }; Ref<ShortCut> ED_SHORTCUT(const String &p_path, const String &p_name, uint32_t p_keycode) { #ifdef OSX_ENABLED // Use Cmd+Backspace as a general replacement for Delete shortcuts on macOS if (p_keycode == KEY_DELETE) { p_keycode = KEY_MASK_CMD | KEY_BACKSPACE; } #endif Ref<InputEventKey> ie; if (p_keycode) { ie.instance(); ie->set_unicode(p_keycode & KEY_CODE_MASK); ie->set_scancode(p_keycode & KEY_CODE_MASK); ie->set_shift(bool(p_keycode & KEY_MASK_SHIFT)); ie->set_alt(bool(p_keycode & KEY_MASK_ALT)); ie->set_control(bool(p_keycode & KEY_MASK_CTRL)); ie->set_metakey(bool(p_keycode & KEY_MASK_META)); } Ref<ShortCut> sc = EditorSettings::get_singleton()->get_shortcut(p_path); if (sc.is_valid()) { sc->set_name(p_name); //keep name (the ones that come from disk have no name) sc->set_meta("original", ie); //to compare against changes return sc; } sc.instance(); sc->set_name(p_name); sc->set_shortcut(ie); sc->set_meta("original", ie); //to compare against changes EditorSettings::get_singleton()->add_shortcut(p_path, sc); return sc; } void EditorSettings::notify_changes() { _THREAD_SAFE_METHOD_ SceneTree *sml = Object::cast_to<SceneTree>(OS::get_singleton()->get_main_loop()); if (!sml) { return; } Node *root = sml->get_root()->get_child(0); if (!root) { return; } root->propagate_notification(NOTIFICATION_EDITOR_SETTINGS_CHANGED); } void EditorSettings::_bind_methods() { ClassDB::bind_method(D_METHOD("has_setting", "name"), &EditorSettings::has_setting); ClassDB::bind_method(D_METHOD("set_setting", "name", "value"), &EditorSettings::set_setting); ClassDB::bind_method(D_METHOD("get_setting", "name"), &EditorSettings::get_setting); ClassDB::bind_method(D_METHOD("erase", "property"), &EditorSettings::erase); ClassDB::bind_method(D_METHOD("set_initial_value", "name", "value", "update_current"), &EditorSettings::set_initial_value); ClassDB::bind_method(D_METHOD("property_can_revert", "name"), &EditorSettings::property_can_revert); ClassDB::bind_method(D_METHOD("property_get_revert", "name"), &EditorSettings::property_get_revert); ClassDB::bind_method(D_METHOD("add_property_info", "info"), &EditorSettings::_add_property_info_bind); ClassDB::bind_method(D_METHOD("get_settings_dir"), &EditorSettings::get_settings_dir); ClassDB::bind_method(D_METHOD("get_project_settings_dir"), &EditorSettings::get_project_settings_dir); ClassDB::bind_method(D_METHOD("set_project_metadata", "section", "key", "data"), &EditorSettings::set_project_metadata); ClassDB::bind_method(D_METHOD("get_project_metadata", "section", "key", "default"), &EditorSettings::get_project_metadata, DEFVAL(Variant())); ClassDB::bind_method(D_METHOD("set_favorite_dirs", "dirs"), &EditorSettings::set_favorite_dirs); ClassDB::bind_method(D_METHOD("get_favorite_dirs"), &EditorSettings::get_favorite_dirs); ClassDB::bind_method(D_METHOD("set_recent_dirs", "dirs"), &EditorSettings::set_recent_dirs); ClassDB::bind_method(D_METHOD("get_recent_dirs"), &EditorSettings::get_recent_dirs); ADD_SIGNAL(MethodInfo("settings_changed")); } EditorSettings::EditorSettings() { last_order = 0; optimize_save = true; save_changed_setting = true; _load_defaults(); } EditorSettings::~EditorSettings() { }
/////////////////////////////////////////////////////////////////////////////// // // ZX-ESPectrum - ZX Spectrum emulator for ESP32 // // Copyright (c) 2020, 2021 David Crespo [dcrespo3d] // https://github.com/dcrespo3d/ZX-ESPectrum-Wiimote // // Based on previous work by Ramón Martinez, Jorge Fuertes and many others // https://github.com/rampa069/ZX-ESPectrum // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to // deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or // sell copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. // #include "FileUtils.h" #include "PS2Kbd.h" #include "ESPectrum.h" #include "messages.h" #include "osd.h" #include "Wiimote2Keys.h" #include <math.h> #define MENU_MAX_ROWS 23 // Line type #define IS_TITLE 0 #define IS_FOCUSED 1 #define IS_NORMAL 2 // Scroll #define UP true #define DOWN false extern Font Font6x8; static byte cols; // Maximum columns static unsigned short real_rows; // Real row count static byte virtual_rows; // Virtual maximum rows on screen static byte w; // Width in pixels static byte h; // Height in pixels static byte x; // X vertical position static byte y; // Y horizontal position static String menu; // Menu string static unsigned short begin_row; // First real displayed row static byte focus; // Focused virtual row static byte last_focus; // To check for changes static unsigned short last_begin_row; // To check for changes #define NUM_SPECTRUM_COLORS 16 static word spectrum_colors[NUM_SPECTRUM_COLORS] = { BLACK, BLUE, RED, MAGENTA, GREEN, CYAN, YELLOW, WHITE, BRI_BLACK, BRI_BLUE, BRI_RED, BRI_MAGENTA, BRI_GREEN, BRI_CYAN, BRI_YELLOW, BRI_WHITE, }; uint16_t OSD::zxColor(uint8_t color, uint8_t bright) { if (bright) color += 8; return spectrum_colors[color]; } // Set menu and force recalc void OSD::newMenu(String new_menu) { menu = new_menu; menuRecalc(); menuDraw(); } void OSD::menuRecalc() { // Columns cols = 24; byte col_count = 0; for (unsigned short i = 0; i < menu.length(); i++) { if (menu.charAt(i) == ASCII_NL) { if (col_count > cols) { cols = col_count; } col_count = 0; } col_count++; } cols = (cols > osdMaxCols() ? osdMaxCols() : cols); // Rows real_rows = rowCount(menu); virtual_rows = (real_rows > MENU_MAX_ROWS ? MENU_MAX_ROWS : real_rows); begin_row = last_begin_row = last_focus = focus = 1; // Size w = (cols * OSD_FONT_W) + 2; h = (virtual_rows * OSD_FONT_H) + 2; // Position x = scrAlignCenterX(w); y = scrAlignCenterY(h); } // Get real row number for a virtual one unsigned short OSD::menuRealRowFor(byte virtual_row_num) { return begin_row + virtual_row_num - 1; } // Menu relative AT void OSD::menuAt(short int row, short int col) { if (col < 0) col = cols - 2 - col; if (row < 0) row = virtual_rows - 2 - row; ESPectrum::vga.setCursor(x + 1 + (col * OSD_FONT_W), y + 1 + (row * OSD_FONT_H)); } // Print a virtual row void OSD::menuPrintRow(byte virtual_row_num, byte line_type) { VGA& vga = ESPectrum::vga; byte margin; String line = rowGet(menu, menuRealRowFor(virtual_row_num)); switch (line_type) { case IS_TITLE: vga.setTextColor(OSD::zxColor(7, 0), OSD::zxColor(0, 0)); margin = 2; break; case IS_FOCUSED: vga.setTextColor(OSD::zxColor(0, 1), OSD::zxColor(5, 1)); margin = (real_rows > virtual_rows ? 3 : 2); break; default: vga.setTextColor(OSD::zxColor(0, 1), OSD::zxColor(7, 1)); margin = (real_rows > virtual_rows ? 3 : 2); } menuAt(virtual_row_num, 0); vga.print(" "); if (line.length() < cols - margin) { vga.print(line.c_str()); for (byte i = line.length(); i < (cols - margin); i++) vga.print(" "); } else { vga.print(line.substring(0, cols - margin).c_str()); } vga.print(" "); } // Draw the complete menu void OSD::menuDraw() { ESPectrum::waitForVideoTask(); VGA& vga = ESPectrum::vga; // Set font vga.setFont(Font6x8); // Menu border vga.rect(x, y, w, h, OSD::zxColor(0, 0)); // Title menuPrintRow(0, IS_TITLE); // Rainbow unsigned short rb_y = y + 8; unsigned short rb_paint_x = x + w - 30; byte rb_colors[] = {2, 6, 4, 5}; for (byte c = 0; c < 4; c++) { for (byte i = 0; i < 5; i++) { vga.line(rb_paint_x + i, rb_y, rb_paint_x + 8 + i, rb_y - 8, OSD::zxColor(rb_colors[c], 1)); } rb_paint_x += 5; } // Focused first line menuPrintRow(1, IS_FOCUSED); for (byte r = 2; r < virtual_rows; r++) { menuPrintRow(r, IS_NORMAL); } focus = 1; menuScrollBar(); } String OSD::getArchMenu() { String menu = (String)MENU_ARCH + FileUtils::getFileEntriesFromDir(DISK_ROM_DIR); return menu; } String OSD::getRomsetMenu(String arch) { String menu = (String)MENU_ROMSET + FileUtils::getFileEntriesFromDir((String)DISK_ROM_DIR + "/" + arch); return menu; } // Run a new menu unsigned short OSD::menuRun(String new_menu) { newMenu(new_menu); while (1) { updateWiimote2KeysOSD(); if (PS2Keyboard::checkAndCleanKey(KEY_CURSOR_UP)) { if (focus == 1 and begin_row > 1) { menuScroll(DOWN); } else if (focus > 1) { focus--; menuPrintRow(focus, IS_FOCUSED); if (focus + 1 < virtual_rows) { menuPrintRow(focus + 1, IS_NORMAL); } } } else if (PS2Keyboard::checkAndCleanKey(KEY_CURSOR_DOWN)) { if (focus == virtual_rows - 1) { menuScroll(UP); } else if (focus < virtual_rows - 1) { focus++; menuPrintRow(focus, IS_FOCUSED); if (focus - 1 > 0) { menuPrintRow(focus - 1, IS_NORMAL); } } } else if (PS2Keyboard::checkAndCleanKey(KEY_PAGE_UP)) { if (begin_row > virtual_rows) { focus = 1; begin_row -= virtual_rows; } else { focus = 1; begin_row = 1; } menuRedraw(); } else if (PS2Keyboard::checkAndCleanKey(KEY_PAGE_DOWN)) { if (real_rows - begin_row - virtual_rows > virtual_rows) { focus = 1; begin_row += virtual_rows - 1; } else { focus = virtual_rows - 1; begin_row = real_rows - virtual_rows + 1; } menuRedraw(); } else if (PS2Keyboard::checkAndCleanKey(KEY_HOME)) { focus = 1; begin_row = 1; menuRedraw(); } else if (PS2Keyboard::checkAndCleanKey(KEY_END)) { focus = virtual_rows - 1; begin_row = real_rows - virtual_rows + 1; menuRedraw(); } else if (PS2Keyboard::checkAndCleanKey(KEY_ENTER)) { return menuRealRowFor(focus); } else if (PS2Keyboard::checkAndCleanKey(KEY_ESC) || PS2Keyboard::checkAndCleanKey(KEY_F1)) { return 0; } } } // Scroll void OSD::menuScroll(boolean dir) { if (dir == DOWN and begin_row > 1) { begin_row--; } else if (dir == UP and (begin_row + virtual_rows - 1) < real_rows) { begin_row++; } else { return; } menuRedraw(); } // Redraw inside rows void OSD::menuRedraw() { if (focus != last_focus or begin_row != last_begin_row) { for (byte row = 1; row < virtual_rows; row++) { if (row == focus) { menuPrintRow(row, IS_FOCUSED); } else { menuPrintRow(row, IS_NORMAL); } } menuScrollBar(); last_focus = focus; last_begin_row = begin_row; } } // Draw menu scroll bar void OSD::menuScrollBar() { VGA& vga = ESPectrum::vga; if (real_rows > virtual_rows) { // Top handle menuAt(1, -1); if (begin_row > 1) { vga.setTextColor(OSD::zxColor(7, 0), OSD::zxColor(0, 0)); vga.print("+"); } else { vga.setTextColor(OSD::zxColor(7, 0), OSD::zxColor(0, 0)); vga.print("-"); } // Complete bar unsigned short holder_x = x + (OSD_FONT_W * (cols - 1)) + 1; unsigned short holder_y = y + (OSD_FONT_H * 2); unsigned short holder_h = OSD_FONT_H * (virtual_rows - 3); unsigned short holder_w = OSD_FONT_W; vga.fillRect(holder_x, holder_y, holder_w, holder_h + 1, OSD::zxColor(7, 0)); holder_y++; // Scroll bar unsigned short shown_pct = round(((float)virtual_rows / (float)real_rows) * 100.0); unsigned short begin_pct = round(((float)(begin_row - 1) / (float)real_rows) * 100.0); unsigned short bar_h = round(((float)holder_h / 100.0) * (float)shown_pct); unsigned short bar_y = round(((float)holder_h / 100.0) * (float)begin_pct); while ((bar_y + bar_h) >= holder_h) { bar_h--; } vga.fillRect(holder_x + 1, holder_y + bar_y, holder_w - 2, bar_h, OSD::zxColor(0, 0)); // Bottom handle menuAt(-1, -1); if ((begin_row + virtual_rows - 1) < real_rows) { vga.setTextColor(OSD::zxColor(7, 0), OSD::zxColor(0, 0)); vga.print("+"); } else { vga.setTextColor(OSD::zxColor(7, 0), OSD::zxColor(0, 0)); vga.print("-"); } } } // Return a test menu String OSD::getTestMenu(unsigned short n_lines) { String test_menu = "Test Menu\n"; for (unsigned short line = 1; line <= n_lines; line += 2) { test_menu += "Option Line " + (String)line + "\n"; test_menu += "1........10........20........30........40........50........60\n"; } return test_menu; }
// Copyright (c) 2009-2019 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <rpc/server.h> #include <banman.h> #include <chainparams.h> #include <clientversion.h> #include <core_io.h> #include <net.h> #include <net_processing.h> #include <netbase.h> #include <policy/policy.h> #include <rpc/protocol.h> #include <rpc/util.h> #include <sync.h> #include <timedata.h> #include <ui_interface.h> #include <util/strencodings.h> #include <util/system.h> #include <validation.h> #include <version.h> #include <warnings.h> #include <univalue.h> static UniValue getconnectioncount(const JSONRPCRequest& request) { if (request.fHelp || request.params.size() != 0) throw std::runtime_error( RPCHelpMan{"getconnectioncount", "\nReturns the number of connections to other nodes.\n", {}, RPCResult{ "n (numeric) The connection count\n" }, RPCExamples{ HelpExampleCli("getconnectioncount", "") + HelpExampleRpc("getconnectioncount", "") }, }.ToString()); if(!g_connman) throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled"); return (int)g_connman->GetNodeCount(CConnman::CONNECTIONS_ALL); } static UniValue ping(const JSONRPCRequest& request) { if (request.fHelp || request.params.size() != 0) throw std::runtime_error( RPCHelpMan{"ping", "\nRequests that a ping be sent to all other nodes, to measure ping time.\n" "Results provided in getpeerinfo, pingtime and pingwait fields are decimal seconds.\n" "Ping command is handled in queue with all other commands, so it measures processing backlog, not just network ping.\n", {}, RPCResults{}, RPCExamples{ HelpExampleCli("ping", "") + HelpExampleRpc("ping", "") }, }.ToString()); if(!g_connman) throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled"); // Request that each node send a ping during next message processing pass g_connman->ForEachNode([](CNode* pnode) { pnode->fPingQueued = true; }); return NullUniValue; } static UniValue getpeerinfo(const JSONRPCRequest& request) { if (request.fHelp || request.params.size() != 0) throw std::runtime_error( RPCHelpMan{"getpeerinfo", "\nReturns data about each connected network node as a json array of objects.\n", {}, RPCResult{ "[\n" " {\n" " \"id\": n, (numeric) Peer index\n" " \"addr\":\"host:port\", (string) The IP address and port of the peer\n" " \"addrbind\":\"ip:port\", (string) Bind address of the connection to the peer\n" " \"addrlocal\":\"ip:port\", (string) Local address as reported by the peer\n" " \"services\":\"xxxxxxxxxxxxxxxx\", (string) The services offered\n" " \"relaytxes\":true|false, (boolean) Whether peer has asked us to relay transactions to it\n" " \"lastsend\": ttt, (numeric) The time in seconds since epoch (Jan 1 1970 GMT) of the last send\n" " \"lastrecv\": ttt, (numeric) The time in seconds since epoch (Jan 1 1970 GMT) of the last receive\n" " \"bytessent\": n, (numeric) The total bytes sent\n" " \"bytesrecv\": n, (numeric) The total bytes received\n" " \"conntime\": ttt, (numeric) The connection time in seconds since epoch (Jan 1 1970 GMT)\n" " \"timeoffset\": ttt, (numeric) The time offset in seconds\n" " \"pingtime\": n, (numeric) ping time (if available)\n" " \"minping\": n, (numeric) minimum observed ping time (if any at all)\n" " \"pingwait\": n, (numeric) ping wait (if non-zero)\n" " \"version\": v, (numeric) The peer version, such as 70001\n" " \"subver\": \"/Satoshi:0.8.5/\", (string) The string version\n" " \"inbound\": true|false, (boolean) Inbound (true) or Outbound (false)\n" " \"addnode\": true|false, (boolean) Whether connection was due to addnode/-connect or if it was an automatic/inbound connection\n" " \"startingheight\": n, (numeric) The starting height (block) of the peer\n" " \"banscore\": n, (numeric) The ban score\n" " \"synced_headers\": n, (numeric) The last header we have in common with this peer\n" " \"synced_blocks\": n, (numeric) The last block we have in common with this peer\n" " \"inflight\": [\n" " n, (numeric) The heights of blocks we're currently asking from this peer\n" " ...\n" " ],\n" " \"whitelisted\": true|false, (boolean) Whether the peer is whitelisted\n" " \"minfeefilter\": n, (numeric) The minimum fee rate for transactions this peer accepts\n" " \"bytessent_per_msg\": {\n" " \"msg\": n, (numeric) The total bytes sent aggregated by message type\n" " When a message type is not listed in this json object, the bytes sent are 0.\n" " Only known message types can appear as keys in the object.\n" " ...\n" " },\n" " \"bytesrecv_per_msg\": {\n" " \"msg\": n, (numeric) The total bytes received aggregated by message type\n" " When a message type is not listed in this json object, the bytes received are 0.\n" " Only known message types can appear as keys in the object and all bytes received of unknown message types are listed under '"+NET_MESSAGE_COMMAND_OTHER+"'.\n" " ...\n" " }\n" " }\n" " ,...\n" "]\n" }, RPCExamples{ HelpExampleCli("getpeerinfo", "") + HelpExampleRpc("getpeerinfo", "") }, }.ToString()); if(!g_connman) throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled"); std::vector<CNodeStats> vstats; g_connman->GetNodeStats(vstats); UniValue ret(UniValue::VARR); for (const CNodeStats& stats : vstats) { UniValue obj(UniValue::VOBJ); CNodeStateStats statestats; bool fStateStats = GetNodeStateStats(stats.nodeid, statestats); obj.pushKV("id", stats.nodeid); obj.pushKV("addr", stats.addrName); if (!(stats.addrLocal.empty())) obj.pushKV("addrlocal", stats.addrLocal); if (stats.addrBind.IsValid()) obj.pushKV("addrbind", stats.addrBind.ToString()); obj.pushKV("services", strprintf("%016x", stats.nServices)); obj.pushKV("relaytxes", stats.fRelayTxes); obj.pushKV("lastsend", stats.nLastSend); obj.pushKV("lastrecv", stats.nLastRecv); obj.pushKV("bytessent", stats.nSendBytes); obj.pushKV("bytesrecv", stats.nRecvBytes); obj.pushKV("conntime", stats.nTimeConnected); obj.pushKV("timeoffset", stats.nTimeOffset); if (stats.dPingTime > 0.0) obj.pushKV("pingtime", stats.dPingTime); if (stats.dMinPing < static_cast<double>(std::numeric_limits<int64_t>::max())/1e6) obj.pushKV("minping", stats.dMinPing); if (stats.dPingWait > 0.0) obj.pushKV("pingwait", stats.dPingWait); obj.pushKV("version", stats.nVersion); // Use the sanitized form of subver here, to avoid tricksy remote peers from // corrupting or modifying the JSON output by putting special characters in // their ver message. obj.pushKV("subver", stats.cleanSubVer); obj.pushKV("inbound", stats.fInbound); obj.pushKV("addnode", stats.m_manual_connection); obj.pushKV("startingheight", stats.nStartingHeight); if (fStateStats) { obj.pushKV("banscore", statestats.nMisbehavior); obj.pushKV("synced_headers", statestats.nSyncHeight); obj.pushKV("synced_blocks", statestats.nCommonHeight); UniValue heights(UniValue::VARR); for (const int height : statestats.vHeightInFlight) { heights.push_back(height); } obj.pushKV("inflight", heights); } obj.pushKV("whitelisted", stats.fWhitelisted); obj.pushKV("minfeefilter", ValueFromAmount(stats.minFeeFilter)); UniValue sendPerMsgCmd(UniValue::VOBJ); for (const auto& i : stats.mapSendBytesPerMsgCmd) { if (i.second > 0) sendPerMsgCmd.pushKV(i.first, i.second); } obj.pushKV("bytessent_per_msg", sendPerMsgCmd); UniValue recvPerMsgCmd(UniValue::VOBJ); for (const auto& i : stats.mapRecvBytesPerMsgCmd) { if (i.second > 0) recvPerMsgCmd.pushKV(i.first, i.second); } obj.pushKV("bytesrecv_per_msg", recvPerMsgCmd); ret.push_back(obj); } return ret; } static UniValue addnode(const JSONRPCRequest& request) { std::string strCommand; if (!request.params[1].isNull()) strCommand = request.params[1].get_str(); if (request.fHelp || request.params.size() != 2 || (strCommand != "onetry" && strCommand != "add" && strCommand != "remove")) throw std::runtime_error( RPCHelpMan{"addnode", "\nAttempts to add or remove a node from the addnode list.\n" "Or try a connection to a node once.\n" "Nodes added using addnode (or -connect) are protected from DoS disconnection and are not required to be\n" "full nodes/support SegWit as other outbound peers are (though such peers will not be synced from).\n", { {"node", RPCArg::Type::STR, RPCArg::Optional::NO, "The node (see getpeerinfo for nodes)"}, {"command", RPCArg::Type::STR, RPCArg::Optional::NO, "'add' to add a node to the list, 'remove' to remove a node from the list, 'onetry' to try a connection to the node once"}, }, RPCResults{}, RPCExamples{ HelpExampleCli("addnode", "\"192.168.0.6:9333\" \"onetry\"") + HelpExampleRpc("addnode", "\"192.168.0.6:9333\", \"onetry\"") }, }.ToString()); if(!g_connman) throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled"); std::string strNode = request.params[0].get_str(); if (strCommand == "onetry") { CAddress addr; g_connman->OpenNetworkConnection(addr, false, nullptr, strNode.c_str(), false, false, true); return NullUniValue; } if (strCommand == "add") { if(!g_connman->AddNode(strNode)) throw JSONRPCError(RPC_CLIENT_NODE_ALREADY_ADDED, "Error: Node already added"); } else if(strCommand == "remove") { if(!g_connman->RemoveAddedNode(strNode)) throw JSONRPCError(RPC_CLIENT_NODE_NOT_ADDED, "Error: Node has not been added."); } return NullUniValue; } static UniValue disconnectnode(const JSONRPCRequest& request) { if (request.fHelp || request.params.size() == 0 || request.params.size() >= 3) throw std::runtime_error( RPCHelpMan{"disconnectnode", "\nImmediately disconnects from the specified peer node.\n" "\nStrictly one out of 'address' and 'nodeid' can be provided to identify the node.\n" "\nTo disconnect by nodeid, either set 'address' to the empty string, or call using the named 'nodeid' argument only.\n", { {"address", RPCArg::Type::STR, /* default */ "fallback to nodeid", "The IP address/port of the node"}, {"nodeid", RPCArg::Type::NUM, /* default */ "fallback to address", "The node ID (see getpeerinfo for node IDs)"}, }, RPCResults{}, RPCExamples{ HelpExampleCli("disconnectnode", "\"192.168.0.6:8333\"") + HelpExampleCli("disconnectnode", "\"\" 1") + HelpExampleRpc("disconnectnode", "\"192.168.0.6:8333\"") + HelpExampleRpc("disconnectnode", "\"\", 1") }, }.ToString()); if(!g_connman) throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled"); bool success; const UniValue &address_arg = request.params[0]; const UniValue &id_arg = request.params[1]; if (!address_arg.isNull() && id_arg.isNull()) { /* handle disconnect-by-address */ success = g_connman->DisconnectNode(address_arg.get_str()); } else if (!id_arg.isNull() && (address_arg.isNull() || (address_arg.isStr() && address_arg.get_str().empty()))) { /* handle disconnect-by-id */ NodeId nodeid = (NodeId) id_arg.get_int64(); success = g_connman->DisconnectNode(nodeid); } else { throw JSONRPCError(RPC_INVALID_PARAMS, "Only one of address and nodeid should be provided."); } if (!success) { throw JSONRPCError(RPC_CLIENT_NODE_NOT_CONNECTED, "Node not found in connected nodes"); } return NullUniValue; } static UniValue getaddednodeinfo(const JSONRPCRequest& request) { if (request.fHelp || request.params.size() > 1) throw std::runtime_error( RPCHelpMan{"getaddednodeinfo", "\nReturns information about the given added node, or all added nodes\n" "(note that onetry addnodes are not listed here)\n", { {"node", RPCArg::Type::STR, /* default */ "all nodes", "If provided, return information about this specific node, otherwise all nodes are returned."}, }, RPCResult{ "[\n" " {\n" " \"addednode\" : \"192.168.0.201\", (string) The node IP address or name (as provided to addnode)\n" " \"connected\" : true|false, (boolean) If connected\n" " \"addresses\" : [ (list of objects) Only when connected = true\n" " {\n" " \"address\" : \"192.168.0.201:9333\", (string) The zencoin server IP and port we're connected to\n" " \"connected\" : \"outbound\" (string) connection, inbound or outbound\n" " }\n" " ]\n" " }\n" " ,...\n" "]\n" }, RPCExamples{ HelpExampleCli("getaddednodeinfo", "\"192.168.0.201\"") + HelpExampleRpc("getaddednodeinfo", "\"192.168.0.201\"") }, }.ToString()); if(!g_connman) throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled"); std::vector<AddedNodeInfo> vInfo = g_connman->GetAddedNodeInfo(); if (!request.params[0].isNull()) { bool found = false; for (const AddedNodeInfo& info : vInfo) { if (info.strAddedNode == request.params[0].get_str()) { vInfo.assign(1, info); found = true; break; } } if (!found) { throw JSONRPCError(RPC_CLIENT_NODE_NOT_ADDED, "Error: Node has not been added."); } } UniValue ret(UniValue::VARR); for (const AddedNodeInfo& info : vInfo) { UniValue obj(UniValue::VOBJ); obj.pushKV("addednode", info.strAddedNode); obj.pushKV("connected", info.fConnected); UniValue addresses(UniValue::VARR); if (info.fConnected) { UniValue address(UniValue::VOBJ); address.pushKV("address", info.resolvedAddress.ToString()); address.pushKV("connected", info.fInbound ? "inbound" : "outbound"); addresses.push_back(address); } obj.pushKV("addresses", addresses); ret.push_back(obj); } return ret; } static UniValue getnettotals(const JSONRPCRequest& request) { if (request.fHelp || request.params.size() > 0) throw std::runtime_error( RPCHelpMan{"getnettotals", "\nReturns information about network traffic, including bytes in, bytes out,\n" "and current time.\n", {}, RPCResult{ "{\n" " \"totalbytesrecv\": n, (numeric) Total bytes received\n" " \"totalbytessent\": n, (numeric) Total bytes sent\n" " \"timemillis\": t, (numeric) Current UNIX time in milliseconds\n" " \"uploadtarget\":\n" " {\n" " \"timeframe\": n, (numeric) Length of the measuring timeframe in seconds\n" " \"target\": n, (numeric) Target in bytes\n" " \"target_reached\": true|false, (boolean) True if target is reached\n" " \"serve_historical_blocks\": true|false, (boolean) True if serving historical blocks\n" " \"bytes_left_in_cycle\": t, (numeric) Bytes left in current time cycle\n" " \"time_left_in_cycle\": t (numeric) Seconds left in current time cycle\n" " }\n" "}\n" }, RPCExamples{ HelpExampleCli("getnettotals", "") + HelpExampleRpc("getnettotals", "") }, }.ToString()); if(!g_connman) throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled"); UniValue obj(UniValue::VOBJ); obj.pushKV("totalbytesrecv", g_connman->GetTotalBytesRecv()); obj.pushKV("totalbytessent", g_connman->GetTotalBytesSent()); obj.pushKV("timemillis", GetTimeMillis()); UniValue outboundLimit(UniValue::VOBJ); outboundLimit.pushKV("timeframe", g_connman->GetMaxOutboundTimeframe()); outboundLimit.pushKV("target", g_connman->GetMaxOutboundTarget()); outboundLimit.pushKV("target_reached", g_connman->OutboundTargetReached(false)); outboundLimit.pushKV("serve_historical_blocks", !g_connman->OutboundTargetReached(true)); outboundLimit.pushKV("bytes_left_in_cycle", g_connman->GetOutboundTargetBytesLeft()); outboundLimit.pushKV("time_left_in_cycle", g_connman->GetMaxOutboundTimeLeftInCycle()); obj.pushKV("uploadtarget", outboundLimit); return obj; } static UniValue GetNetworksInfo() { UniValue networks(UniValue::VARR); for(int n=0; n<NET_MAX; ++n) { enum Network network = static_cast<enum Network>(n); if(network == NET_UNROUTABLE || network == NET_INTERNAL) continue; proxyType proxy; UniValue obj(UniValue::VOBJ); GetProxy(network, proxy); obj.pushKV("name", GetNetworkName(network)); obj.pushKV("limited", !IsReachable(network)); obj.pushKV("reachable", IsReachable(network)); obj.pushKV("proxy", proxy.IsValid() ? proxy.proxy.ToStringIPPort() : std::string()); obj.pushKV("proxy_randomize_credentials", proxy.randomize_credentials); networks.push_back(obj); } return networks; } static UniValue getnetworkinfo(const JSONRPCRequest& request) { if (request.fHelp || request.params.size() != 0) throw std::runtime_error( RPCHelpMan{"getnetworkinfo", "Returns an object containing various state info regarding P2P networking.\n", {}, RPCResult{ "{\n" " \"version\": xxxxx, (numeric) the server version\n" " \"subversion\": \"/Satoshi:x.x.x/\", (string) the server subversion string\n" " \"protocolversion\": xxxxx, (numeric) the protocol version\n" " \"localservices\": \"xxxxxxxxxxxxxxxx\", (string) the services we offer to the network\n" " \"localrelay\": true|false, (bool) true if transaction relay is requested from peers\n" " \"timeoffset\": xxxxx, (numeric) the time offset\n" " \"connections\": xxxxx, (numeric) the number of connections\n" " \"networkactive\": true|false, (bool) whether p2p networking is enabled\n" " \"networks\": [ (array) information per network\n" " {\n" " \"name\": \"xxx\", (string) network (ipv4, ipv6 or onion)\n" " \"limited\": true|false, (boolean) is the network limited using -onlynet?\n" " \"reachable\": true|false, (boolean) is the network reachable?\n" " \"proxy\": \"host:port\" (string) the proxy that is used for this network, or empty if none\n" " \"proxy_randomize_credentials\": true|false, (string) Whether randomized credentials are used\n" " }\n" " ,...\n" " ],\n" " \"relayfee\": x.xxxxxxxx, (numeric) minimum relay fee for transactions in " + CURRENCY_UNIT + "/kB\n" " \"incrementalfee\": x.xxxxxxxx, (numeric) minimum fee increment for mempool limiting or BIP 125 replacement in " + CURRENCY_UNIT + "/kB\n" " \"localaddresses\": [ (array) list of local addresses\n" " {\n" " \"address\": \"xxxx\", (string) network address\n" " \"port\": xxx, (numeric) network port\n" " \"score\": xxx (numeric) relative score\n" " }\n" " ,...\n" " ]\n" " \"warnings\": \"...\" (string) any network and blockchain warnings\n" "}\n" }, RPCExamples{ HelpExampleCli("getnetworkinfo", "") + HelpExampleRpc("getnetworkinfo", "") }, }.ToString()); LOCK(cs_main); UniValue obj(UniValue::VOBJ); obj.pushKV("version", CLIENT_VERSION); obj.pushKV("subversion", strSubVersion); obj.pushKV("protocolversion",PROTOCOL_VERSION); if(g_connman) obj.pushKV("localservices", strprintf("%016x", g_connman->GetLocalServices())); obj.pushKV("localrelay", g_relay_txes); obj.pushKV("timeoffset", GetTimeOffset()); if (g_connman) { obj.pushKV("networkactive", g_connman->GetNetworkActive()); obj.pushKV("connections", (int)g_connman->GetNodeCount(CConnman::CONNECTIONS_ALL)); } obj.pushKV("networks", GetNetworksInfo()); obj.pushKV("relayfee", ValueFromAmount(::minRelayTxFee.GetFeePerK())); obj.pushKV("incrementalfee", ValueFromAmount(::incrementalRelayFee.GetFeePerK())); UniValue localAddresses(UniValue::VARR); { LOCK(cs_mapLocalHost); for (const std::pair<const CNetAddr, LocalServiceInfo> &item : mapLocalHost) { UniValue rec(UniValue::VOBJ); rec.pushKV("address", item.first.ToString()); rec.pushKV("port", item.second.nPort); rec.pushKV("score", item.second.nScore); localAddresses.push_back(rec); } } obj.pushKV("localaddresses", localAddresses); obj.pushKV("warnings", GetWarnings("statusbar")); return obj; } static UniValue setban(const JSONRPCRequest& request) { const RPCHelpMan help{"setban", "\nAttempts to add or remove an IP/Subnet from the banned list.\n", { {"subnet", RPCArg::Type::STR, RPCArg::Optional::NO, "The IP/Subnet (see getpeerinfo for nodes IP) with an optional netmask (default is /32 = single IP)"}, {"command", RPCArg::Type::STR, RPCArg::Optional::NO, "'add' to add an IP/Subnet to the list, 'remove' to remove an IP/Subnet from the list"}, {"bantime", RPCArg::Type::NUM, /* default */ "0", "time in seconds how long (or until when if [absolute] is set) the IP is banned (0 or empty means using the default time of 24h which can also be overwritten by the -bantime startup argument)"}, {"absolute", RPCArg::Type::BOOL, /* default */ "false", "If set, the bantime must be an absolute timestamp in seconds since epoch (Jan 1 1970 GMT)"}, }, RPCResults{}, RPCExamples{ HelpExampleCli("setban", "\"192.168.0.6\" \"add\" 86400") + HelpExampleCli("setban", "\"192.168.0.0/24\" \"add\"") + HelpExampleRpc("setban", "\"192.168.0.6\", \"add\", 86400") }, }; std::string strCommand; if (!request.params[1].isNull()) strCommand = request.params[1].get_str(); if (request.fHelp || !help.IsValidNumArgs(request.params.size()) || (strCommand != "add" && strCommand != "remove")) { throw std::runtime_error(help.ToString()); } if (!g_banman) { throw JSONRPCError(RPC_DATABASE_ERROR, "Error: Ban database not loaded"); } CSubNet subNet; CNetAddr netAddr; bool isSubnet = false; if (request.params[0].get_str().find('/') != std::string::npos) isSubnet = true; if (!isSubnet) { CNetAddr resolved; LookupHost(request.params[0].get_str().c_str(), resolved, false); netAddr = resolved; } else LookupSubNet(request.params[0].get_str().c_str(), subNet); if (! (isSubnet ? subNet.IsValid() : netAddr.IsValid()) ) throw JSONRPCError(RPC_CLIENT_INVALID_IP_OR_SUBNET, "Error: Invalid IP/Subnet"); if (strCommand == "add") { if (isSubnet ? g_banman->IsBanned(subNet) : g_banman->IsBanned(netAddr)) { throw JSONRPCError(RPC_CLIENT_NODE_ALREADY_ADDED, "Error: IP/Subnet already banned"); } int64_t banTime = 0; //use standard bantime if not specified if (!request.params[2].isNull()) banTime = request.params[2].get_int64(); bool absolute = false; if (request.params[3].isTrue()) absolute = true; if (isSubnet) { g_banman->Ban(subNet, BanReasonManuallyAdded, banTime, absolute); if (g_connman) { g_connman->DisconnectNode(subNet); } } else { g_banman->Ban(netAddr, BanReasonManuallyAdded, banTime, absolute); if (g_connman) { g_connman->DisconnectNode(netAddr); } } } else if(strCommand == "remove") { if (!( isSubnet ? g_banman->Unban(subNet) : g_banman->Unban(netAddr) )) { throw JSONRPCError(RPC_CLIENT_INVALID_IP_OR_SUBNET, "Error: Unban failed. Requested address/subnet was not previously banned."); } } return NullUniValue; } static UniValue listbanned(const JSONRPCRequest& request) { if (request.fHelp || request.params.size() != 0) throw std::runtime_error( RPCHelpMan{"listbanned", "\nList all banned IPs/Subnets.\n", {}, RPCResults{}, RPCExamples{ HelpExampleCli("listbanned", "") + HelpExampleRpc("listbanned", "") }, }.ToString()); if(!g_banman) { throw JSONRPCError(RPC_DATABASE_ERROR, "Error: Ban database not loaded"); } banmap_t banMap; g_banman->GetBanned(banMap); UniValue bannedAddresses(UniValue::VARR); for (const auto& entry : banMap) { const CBanEntry& banEntry = entry.second; UniValue rec(UniValue::VOBJ); rec.pushKV("address", entry.first.ToString()); rec.pushKV("banned_until", banEntry.nBanUntil); rec.pushKV("ban_created", banEntry.nCreateTime); rec.pushKV("ban_reason", banEntry.banReasonToString()); bannedAddresses.push_back(rec); } return bannedAddresses; } static UniValue clearbanned(const JSONRPCRequest& request) { if (request.fHelp || request.params.size() != 0) throw std::runtime_error( RPCHelpMan{"clearbanned", "\nClear all banned IPs.\n", {}, RPCResults{}, RPCExamples{ HelpExampleCli("clearbanned", "") + HelpExampleRpc("clearbanned", "") }, }.ToString()); if (!g_banman) { throw JSONRPCError(RPC_DATABASE_ERROR, "Error: Ban database not loaded"); } g_banman->ClearBanned(); return NullUniValue; } static UniValue setnetworkactive(const JSONRPCRequest& request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( RPCHelpMan{"setnetworkactive", "\nDisable/enable all p2p network activity.\n", { {"state", RPCArg::Type::BOOL, RPCArg::Optional::NO, "true to enable networking, false to disable"}, }, RPCResults{}, RPCExamples{""}, }.ToString() ); } if (!g_connman) { throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled"); } g_connman->SetNetworkActive(request.params[0].get_bool()); return g_connman->GetNetworkActive(); } static UniValue getnodeaddresses(const JSONRPCRequest& request) { if (request.fHelp || request.params.size() > 1) { throw std::runtime_error( RPCHelpMan{"getnodeaddresses", "\nReturn known addresses which can potentially be used to find new nodes in the network\n", { {"count", RPCArg::Type::NUM, /* default */ "1", "How many addresses to return. Limited to the smaller of " + std::to_string(ADDRMAN_GETADDR_MAX) + " or " + std::to_string(ADDRMAN_GETADDR_MAX_PCT) + "% of all known addresses."}, }, RPCResult{ "[\n" " {\n" " \"time\": ttt, (numeric) Timestamp in seconds since epoch (Jan 1 1970 GMT) keeping track of when the node was last seen\n" " \"services\": n, (numeric) The services offered\n" " \"address\": \"host\", (string) The address of the node\n" " \"port\": n (numeric) The port of the node\n" " }\n" " ,....\n" "]\n" }, RPCExamples{ HelpExampleCli("getnodeaddresses", "8") + HelpExampleRpc("getnodeaddresses", "8") }, }.ToString()); } if (!g_connman) { throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled"); } int count = 1; if (!request.params[0].isNull()) { count = request.params[0].get_int(); if (count <= 0) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Address count out of range"); } } // returns a shuffled list of CAddress std::vector<CAddress> vAddr = g_connman->GetAddresses(); UniValue ret(UniValue::VARR); int address_return_count = std::min<int>(count, vAddr.size()); for (int i = 0; i < address_return_count; ++i) { UniValue obj(UniValue::VOBJ); const CAddress& addr = vAddr[i]; obj.pushKV("time", (int)addr.nTime); obj.pushKV("services", (uint64_t)addr.nServices); obj.pushKV("address", addr.ToStringIP()); obj.pushKV("port", addr.GetPort()); ret.push_back(obj); } return ret; } // clang-format off static const CRPCCommand commands[] = { // category name actor (function) argNames // --------------------- ------------------------ ----------------------- ---------- { "network", "getconnectioncount", &getconnectioncount, {} }, { "network", "ping", &ping, {} }, { "network", "getpeerinfo", &getpeerinfo, {} }, { "network", "addnode", &addnode, {"node","command"} }, { "network", "disconnectnode", &disconnectnode, {"address", "nodeid"} }, { "network", "getaddednodeinfo", &getaddednodeinfo, {"node"} }, { "network", "getnettotals", &getnettotals, {} }, { "network", "getnetworkinfo", &getnetworkinfo, {} }, { "network", "setban", &setban, {"subnet", "command", "bantime", "absolute"} }, { "network", "listbanned", &listbanned, {} }, { "network", "clearbanned", &clearbanned, {} }, { "network", "setnetworkactive", &setnetworkactive, {"state"} }, { "network", "getnodeaddresses", &getnodeaddresses, {"count"} }, }; // clang-format on void RegisterNetRPCCommands(CRPCTable &t) { for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++) t.appendCommand(commands[vcidx].name, &commands[vcidx]); }
// Copyright (c) 2013 The Bitcoin Core developers // Distributed under the MIT/X11 software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. // // Unit tests for alert system // #include "alert.h" #include "clientversion.h" #include "data/alertTests.raw.h" #include "serialize.h" #include "streams.h" #include "util.h" #include "utilstrencodings.h" #include <fstream> #include <boost/filesystem/operations.hpp> #include <boost/foreach.hpp> #include <boost/test/unit_test.hpp> #if 0 // // alertTests contains 7 alerts, generated with this code: // (SignAndSave code not shown, alert signing key is secret) // { CAlert alert; alert.nRelayUntil = 60; alert.nExpiration = 24 * 60 * 60; alert.nID = 1; alert.nCancel = 0; // cancels previous messages up to this ID number alert.nMinVer = 0; // These versions are protocol versions alert.nMaxVer = 999001; alert.nPriority = 1; alert.strComment = "Alert comment"; alert.strStatusBar = "Alert 1"; SignAndSave(alert, "test/alertTests"); alert.setSubVer.insert(std::string("/Satoshi:0.1.0/")); alert.strStatusBar = "Alert 1 for Satoshi 0.1.0"; SignAndSave(alert, "test/alertTests"); alert.setSubVer.insert(std::string("/Satoshi:0.2.0/")); alert.strStatusBar = "Alert 1 for Satoshi 0.1.0, 0.2.0"; SignAndSave(alert, "test/alertTests"); alert.setSubVer.clear(); ++alert.nID; alert.nCancel = 1; alert.nPriority = 100; alert.strStatusBar = "Alert 2, cancels 1"; SignAndSave(alert, "test/alertTests"); alert.nExpiration += 60; ++alert.nID; SignAndSave(alert, "test/alertTests"); ++alert.nID; alert.nMinVer = 11; alert.nMaxVer = 22; SignAndSave(alert, "test/alertTests"); ++alert.nID; alert.strStatusBar = "Alert 2 for Satoshi 0.1.0"; alert.setSubVer.insert(std::string("/Satoshi:0.1.0/")); SignAndSave(alert, "test/alertTests"); ++alert.nID; alert.nMinVer = 0; alert.nMaxVer = 999999; alert.strStatusBar = "Evil Alert'; /bin/ls; echo '"; alert.setSubVer.clear(); SignAndSave(alert, "test/alertTests"); } #endif struct ReadAlerts { ReadAlerts() { std::vector<unsigned char> vch(alert_tests::alertTests, alert_tests::alertTests + sizeof(alert_tests::alertTests)); CDataStream stream(vch, SER_DISK, CLIENT_VERSION); try { while (!stream.eof()) { CAlert alert; stream >> alert; alerts.push_back(alert); } } catch (std::exception) { } } ~ReadAlerts() { } static std::vector<std::string> read_lines(boost::filesystem::path filepath) { std::vector<std::string> result; std::ifstream f(filepath.string().c_str()); std::string line; while (std::getline(f,line)) result.push_back(line); return result; } std::vector<CAlert> alerts; }; #ifdef DISABLE_PASSED_TEST BOOST_FIXTURE_TEST_SUITE(Alert_tests, ReadAlerts) BOOST_AUTO_TEST_CASE(AlertApplies) { SetMockTime(11); BOOST_FOREACH(const CAlert& alert, alerts) { BOOST_CHECK(alert.CheckSignature()); } BOOST_CHECK(alerts.size() >= 3); // Matches: BOOST_CHECK(alerts[0].AppliesTo(1, "")); BOOST_CHECK(alerts[0].AppliesTo(999001, "")); BOOST_CHECK(alerts[0].AppliesTo(1, "/Satoshi:11.11.11/")); BOOST_CHECK(alerts[1].AppliesTo(1, "/Satoshi:0.1.0/")); BOOST_CHECK(alerts[1].AppliesTo(999001, "/Satoshi:0.1.0/")); BOOST_CHECK(alerts[2].AppliesTo(1, "/Satoshi:0.1.0/")); BOOST_CHECK(alerts[2].AppliesTo(1, "/Satoshi:0.2.0/")); // Don't match: BOOST_CHECK(!alerts[0].AppliesTo(-1, "")); BOOST_CHECK(!alerts[0].AppliesTo(999002, "")); BOOST_CHECK(!alerts[1].AppliesTo(1, "")); BOOST_CHECK(!alerts[1].AppliesTo(1, "Satoshi:0.1.0")); BOOST_CHECK(!alerts[1].AppliesTo(1, "/Satoshi:0.1.0")); BOOST_CHECK(!alerts[1].AppliesTo(1, "Satoshi:0.1.0/")); BOOST_CHECK(!alerts[1].AppliesTo(-1, "/Satoshi:0.1.0/")); BOOST_CHECK(!alerts[1].AppliesTo(999002, "/Satoshi:0.1.0/")); BOOST_CHECK(!alerts[1].AppliesTo(1, "/Satoshi:0.2.0/")); BOOST_CHECK(!alerts[2].AppliesTo(1, "/Satoshi:0.3.0/")); SetMockTime(0); } BOOST_AUTO_TEST_CASE(AlertNotify) { SetMockTime(11); boost::filesystem::path temp = GetTempPath() / "alertnotify.txt"; boost::filesystem::remove(temp); mapArgs["-alertnotify"] = std::string("echo %s >> ") + temp.string(); BOOST_FOREACH(CAlert alert, alerts) alert.ProcessAlert(false); std::vector<std::string> r = read_lines(temp); BOOST_CHECK_EQUAL(r.size(), 4u); // Windows built-in echo semantics are different than posixy shells. Quotes and // whitespace are printed literally. #ifndef WIN32 BOOST_CHECK_EQUAL(r[0], "Alert 1"); BOOST_CHECK_EQUAL(r[1], "Alert 2, cancels 1"); BOOST_CHECK_EQUAL(r[2], "Alert 2, cancels 1"); BOOST_CHECK_EQUAL(r[3], "Evil Alert; /bin/ls; echo "); // single-quotes should be removed #else BOOST_CHECK_EQUAL(r[0], "'Alert 1' "); BOOST_CHECK_EQUAL(r[1], "'Alert 2, cancels 1' "); BOOST_CHECK_EQUAL(r[2], "'Alert 2, cancels 1' "); BOOST_CHECK_EQUAL(r[3], "'Evil Alert; /bin/ls; echo ' "); #endif boost::filesystem::remove(temp); SetMockTime(0); } BOOST_AUTO_TEST_SUITE_END() #endif
/** @file "/owlcpp/include/owlcpp/rdf/detail/triple_index_fwd.hpp" part of owlcpp project. @n @n Distributed under the Boost Software License, Version 1.0; see doc/license.txt. @n Copyright Mikhail K Levin 2013 *******************************************************************************/ #ifndef TRIPLE_INDEX_FWD_HPP_ #define TRIPLE_INDEX_FWD_HPP_ namespace owlcpp{ namespace map_triple_detail{ /**@brief Store RDF triples in a searchable fashion @tparam Map a map between Tag0 elements and <Tag1,Tag2,Tag3> triple fragments @tparam Tag0 numerical tag indicating which triple element is indexed first @tparam Tag1 numerical tag indicating which triple element is indexed second @tparam Tag2 numerical tag indicating which triple element is indexed third @tparam Tag3 numerical tag indicating which triple element is indexed fourth *******************************************************************************/ template< template<class,class,class,class> class Map, class Tag0, class Tag1, class Tag2, class Tag3 > class Triple_index; }//namespace map_triple_detail }//namespace owlcpp #endif /* TRIPLE_INDEX_FWD_HPP_ */
// stdafx.cpp : source file that includes just the standard includes // ga.pch will be the pre-compiled header // stdafx.obj will contain the pre-compiled type information #include "stdafx.h" // TODO: reference any additional headers you need in STDAFX.H // and not in this file
// This file is a part of the OpenSurgSim project. // Copyright 2013-2016, SimQuest Solutions Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <gtest/gtest.h> #include "SurgSim/Collision/CollisionPair.h" #include "SurgSim/DataStructures/Location.h" #include "SurgSim/Math/Vector.h" #include "SurgSim/Physics/CcdCollisionLoop.h" #include "SurgSim/Physics/PushResults.h" #include "SurgSim/Physics/RigidCollisionRepresentation.h" #include "SurgSim/Physics/SolveMlcp.h" namespace SurgSim { namespace Physics { TEST(CcdCollisionLoopTest, Setters) { auto ccd = std::make_shared<CcdCollisionLoop>(false); std::unique_ptr<SolveMlcp> solveMlcp(new SolveMlcp(false)); ccd->setSolveMlcp(std::move(solveMlcp)); std::unique_ptr<PushResults> pushResults(new PushResults(false)); ccd->setPushResults(std::move(pushResults)); } TEST(CcdCollisionLoopTest, FilterContacts) { double toi = -1.0; auto rep1 = std::make_shared<RigidCollisionRepresentation>("One"); rep1->setSelfCollisionDetectionType(Collision::COLLISION_DETECTION_TYPE_CONTINUOUS); auto pair = std::make_shared<Collision::CollisionPair>(rep1, rep1); auto computation = std::make_shared<CcdCollisionLoop>(false); DataStructures::Location location; std::vector<std::shared_ptr<Collision::CollisionPair>> pairs(1, pair); EXPECT_FALSE(computation->findEarliestContact(pairs, &toi)); // Check that we find the toi correctly pair->addCcdContact(0.0, 0.1, Math::Vector3d::Zero(), Math::Vector3d::Zero(), std::make_pair(location, location)); EXPECT_TRUE(computation->findEarliestContact(pairs, &toi)); EXPECT_DOUBLE_EQ(0.1, toi); computation->filterLaterContacts(&pairs, 0.0, toi); EXPECT_EQ(1u, pair->getContacts().size()); pair->addCcdContact(0.0, 0.2, Math::Vector3d::Zero(), Math::Vector3d::Zero(), std::make_pair(location, location)); pair->addCcdContact(0.0, 0.3, Math::Vector3d::Zero(), Math::Vector3d::Zero(), std::make_pair(location, location)); // Check that we filter everything after the toi toi = 0.0; EXPECT_EQ(3u, pair->getContacts().size()); EXPECT_TRUE(computation->findEarliestContact(pairs, &toi)); computation->filterLaterContacts(&pairs, 0.0, toi); EXPECT_DOUBLE_EQ(0.1, toi); EXPECT_EQ(1u, pair->getContacts().size()); } TEST(CcdCollisionLoopTest, FilterContactsWithEpsilon) { double toi = -1.0; auto rep1 = std::make_shared<RigidCollisionRepresentation>("One"); rep1->setSelfCollisionDetectionType(Collision::COLLISION_DETECTION_TYPE_CONTINUOUS); auto pair = std::make_shared<Collision::CollisionPair>(rep1, rep1); auto computation = std::make_shared<CcdCollisionLoop>(false); DataStructures::Location location; std::vector<std::shared_ptr<Collision::CollisionPair>> pairs(1, pair); // Check that we find the toi correctly pair->addCcdContact(0.0, 0.1, Math::Vector3d::Zero(), Math::Vector3d::Zero(), std::make_pair(location, location)); pair->addCcdContact(0.0, 0.2, Math::Vector3d::Zero(), Math::Vector3d::Zero(), std::make_pair(location, location)); pair->addCcdContact(0.0, 0.3, Math::Vector3d::Zero(), Math::Vector3d::Zero(), std::make_pair(location, location)); EXPECT_TRUE(computation->findEarliestContact(pairs, &toi)); computation->filterLaterContacts(&pairs, 0.11, toi); // toi should be 0.1 + 0.11 i.e. toi + epsilon EXPECT_DOUBLE_EQ(0.1, toi); EXPECT_EQ(2u, pair->getContacts().size()); } } }
#include "Image.h" #include "Base.h" namespace tools { Image::Image() : _data(NULL), _format(RGBA), _width(0), _height(0), _bpp(0) { } Image::~Image() { delete[] _data; } Image* Image::create(const char* path) { // Open the file. FILE* fp = fopen(path, "rb"); if (fp == NULL) { LOG(1, "Failed to open image file '%s'.\n", path); return NULL; } // Verify PNG signature. unsigned char sig[8]; if (fread(sig, 1, 8, fp) != 8 || png_sig_cmp(sig, 0, 8) != 0) { LOG(1, "Failed to load file '%s'; not a valid PNG.\n", path); if (fclose(fp) != 0) { LOG(1, "Failed to close image file '%s'.\n", path); } return NULL; } // Initialize png read struct (last three parameters use stderr+longjump if NULL). png_structp png = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); if (png == NULL) { LOG(1, "Failed to create PNG structure for reading PNG file '%s'.\n", path); if (fclose(fp) != 0) { LOG(1, "Failed to close image file '%s'.\n", path); } return NULL; } // Initialize info struct. png_infop info = png_create_info_struct(png); if (info == NULL) { LOG(1, "Failed to create PNG info structure for PNG file '%s'.\n", path); if (fclose(fp) != 0) { LOG(1, "Failed to close image file '%s'.\n", path); } png_destroy_read_struct(&png, NULL, NULL); return NULL; } // Initialize file io. png_init_io(png, fp); // Indicate that we already read the first 8 bytes (signature). png_set_sig_bytes(png, 8); // Read the entire image into memory. png_read_png(png, info, PNG_TRANSFORM_STRIP_16 | PNG_TRANSFORM_PACKING | PNG_TRANSFORM_EXPAND, NULL); Image* image = new Image(); image->_width = png_get_image_width(png, info); image->_height = png_get_image_height(png, info); png_byte colorType = png_get_color_type(png, info); switch (colorType) { case PNG_COLOR_TYPE_GRAY: image->_bpp = 1; image->_format = Image::LUMINANCE; break; case PNG_COLOR_TYPE_RGBA: image->_bpp = 4; image->_format = Image::RGBA; break; case PNG_COLOR_TYPE_RGB: image->_bpp = 3; image->_format = Image::RGB; break; default: LOG(1, "Unsupported PNG color type (%d) for image file '%s'.\n", (int)colorType, path); if (fclose(fp) != 0) { LOG(1, "Failed to close image file '%s'.\n", path); } png_destroy_read_struct(&png, &info, NULL); return NULL; } size_t stride = png_get_rowbytes(png, info); // Allocate image data. image->_data = new unsigned char[stride * image->_height]; // Read rows into image data. png_bytepp rows = png_get_rows(png, info); for (unsigned int i = 0; i < image->_height; ++i) { memcpy(image->_data+(stride * i), rows[i], stride); } // Clean up. png_destroy_read_struct(&png, &info, NULL); if (fclose(fp) != 0) { LOG(1, "Failed to close image file '%s'.\n", path); } return image; } Image* Image::create(Format format, unsigned int width, unsigned int height) { unsigned int bpp; switch (format) { case LUMINANCE: bpp = 1; break; case RGB: bpp = 3; break; case RGBA: bpp = 4; break; default: LOG(1, "Invalid image format passed to create.\n"); return NULL; } Image* image = new Image(); image->_format = format; image->_width = width; image->_height = height; image->_bpp = bpp; image->_data = new unsigned char[width * height * bpp]; memset(image->_data, 0, width * height * bpp); return image; } void* Image::getData() const { return _data; } void Image::setData(void* data) { memcpy(_data, data, _width * _height * _bpp); } Image::Format Image::getFormat() const { return _format; } unsigned int Image::getHeight() const { return _height; } unsigned int Image::getWidth() const { return _width; } unsigned int Image::getBpp() const { return _bpp; } int getPNGColorType(Image::Format format) { switch (format) { case Image::LUMINANCE: return PNG_COLOR_TYPE_GRAY; case Image::RGB: return PNG_COLOR_TYPE_RGB; case Image::RGBA: return PNG_COLOR_TYPE_RGBA; } return PNG_COLOR_TYPE_RGBA; } void Image::save(const char* path) { png_structp png_ptr = NULL; png_infop info_ptr = NULL; png_bytep row = NULL; unsigned int stride; int index; FILE* fp = fopen(path, "wb"); if (fp == NULL) { LOG(1, "Error: Failed to open image for writing: %s\n", path); goto error; } png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); if (png_ptr == NULL) { LOG(1, "Error: Write struct creation failed: %s\n", path); goto error; } info_ptr = png_create_info_struct(png_ptr); if (info_ptr == NULL) { LOG(1, "Error: Info struct creation failed: %s\n", path); goto error; } png_init_io(png_ptr, fp); png_set_IHDR(png_ptr, info_ptr, _width, _height, 8, getPNGColorType(_format), PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE); png_write_info(png_ptr, info_ptr); // Allocate memory for a single row of image data stride = _bpp * _width * sizeof(png_byte); row = (png_bytep)malloc(stride); index = 0; for (unsigned int y = 0; y < _height; ++y) { for (unsigned int x = 0; x < stride; ++x) { // Write data row[x] = (png_byte)_data[index++]; } png_write_row(png_ptr, row); } png_write_end(png_ptr, NULL); error: if (fp) fclose(fp); if (row) free(row); if (info_ptr) png_free_data(png_ptr, info_ptr, PNG_FREE_ALL, -1); if (png_ptr) png_destroy_write_struct(&png_ptr, (png_infopp)NULL); } }
#include "Xor.h" #include "V3DITKFilterDualImage.h" // ITK Header Files #include "itkXorImageFilter.h" // Q_EXPORT_PLUGIN2 ( PluginName, ClassName ) // The value of PluginName should correspond to the TARGET specified in the // plugin's project file. Q_EXPORT_PLUGIN2(Xor, XorPlugin) QStringList XorPlugin::menulist() const { return QStringList() << QObject::tr("ITK Xor") << QObject::tr("about this plugin"); } QStringList XorPlugin::funclist() const { return QStringList(); } template <typename TPixelType> class PluginSpecialized : public V3DITKFilterDualImage< TPixelType, TPixelType > { typedef V3DITKFilterDualImage< TPixelType, TPixelType > Superclass; typedef typename Superclass::Input3DImageType ImageType; typedef itk::XorImageFilter< ImageType, ImageType > FilterType; public: PluginSpecialized( V3DPluginCallback * callback ): Superclass(callback) { this->m_Filter = FilterType::New(); this->RegisterInternalFilter( this->m_Filter, 1.0 ); } virtual ~PluginSpecialized() {}; void Execute(const QString &menu_name, QWidget *parent) { this->SetImageSelectionDialogTitle("Xor two Images"); this->AddImageSelectionLabel("Image 1"); this->AddImageSelectionLabel("Image 2"); this->m_ImageSelectionDialog.SetCallback(this->m_V3DPluginCallback); this->Compute(); } virtual void ComputeOneRegion() { this->m_Filter->SetInput1( this->GetInput3DImage1() ); this->m_Filter->SetInput2( this->GetInput3DImage2() ); this->m_Filter->Update(); this->SetOutputImage( this->m_Filter->GetOutput() ); } private: typename FilterType::Pointer m_Filter; }; #define EXECUTE_PLUGIN_FOR_ONE_IMAGE_TYPE( v3d_pixel_type, c_pixel_type ) \ case v3d_pixel_type: \ { \ PluginSpecialized< c_pixel_type > runner( &callback ); \ runner.Execute( menu_name, parent ); \ break; \ } void XorPlugin::dofunc(const QString & func_name, const V3DPluginArgList & input, V3DPluginArgList & output, QWidget * parent) { // empty by now } void XorPlugin::domenu(const QString & menu_name, V3DPluginCallback & callback, QWidget * parent) { if (menu_name == QObject::tr("about this plugin")) { QMessageBox::information(parent, "Version info", "ITK Xor 1.0 (2010-Jul-31): this plugin is developed by Luis Ibanez."); return; } v3dhandle curwin = callback.currentImageWindow(); if (!curwin) { v3d_msg(tr("You don't have any image open in the main window.")); return; } Image4DSimple *p4DImage = callback.getImage(curwin); if (! p4DImage) { v3d_msg(tr("The input image is null.")); return; } EXECUTE_PLUGIN_FOR_INTEGER_PIXEL_TYPES; }
/* * Copyright (C) 2013 Google Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "modules/donottrack/NavigatorDoNotTrack.h" #include "core/frame/LocalFrame.h" #include "core/frame/Navigator.h" #include "core/loader/FrameLoaderClient.h" namespace blink { NavigatorDoNotTrack::NavigatorDoNotTrack(Navigator& navigator) : Supplement<Navigator>(navigator) {} DEFINE_TRACE(NavigatorDoNotTrack) { Supplement<Navigator>::trace(visitor); } const char* NavigatorDoNotTrack::supplementName() { return "NavigatorDoNotTrack"; } NavigatorDoNotTrack& NavigatorDoNotTrack::from(Navigator& navigator) { NavigatorDoNotTrack* supplement = static_cast<NavigatorDoNotTrack*>( Supplement<Navigator>::from(navigator, supplementName())); if (!supplement) { supplement = new NavigatorDoNotTrack(navigator); provideTo(navigator, supplementName(), supplement); } return *supplement; } String NavigatorDoNotTrack::doNotTrack(Navigator& navigator) { return NavigatorDoNotTrack::from(navigator).doNotTrack(); } String NavigatorDoNotTrack::doNotTrack() { LocalFrame* frame = supplementable()->frame(); if (!frame || !frame->loader().client()) return String(); return frame->loader().client()->doNotTrackValue(); } } // namespace blink
#include "metronome.h" #include "constants.h" #include "metronome_gl.h" #include "../third_party/gui_math.h" #include "../third_party/gui_window.h" #include "../third_party/gui_io.h" enum image { I_START, I_STOP, I_BPM, I_SEGMENT_HOR, I_SEGMENT_VER, I_SEGMENT_HOR_MINI, I_SEGMENT_VER_MINI }; enum sound_type { S_SECONDARY, S_MAIN }; static void play (metronome_app* app) { if (timer_get_value (app -> play_timer) >= app -> play_timer.target_miliseconds) { audio_play (&app -> sounds[app -> click_count == 0 ? S_MAIN : S_SECONDARY]); timer_reset (&app -> play_timer); ++app -> click_count; if (app -> click_count == app -> count) app -> click_count = 0; } } static int digit_to_code (unsigned digit) { switch (digit) { case 0: return 0b00111111; case 1: return 0b00000110; case 2: return 0b01011011; case 3: return 0b01001111; case 4: return 0b01100110; case 5: return 0b01101101; case 6: return 0b01111101; case 7: return 0b00000111; case 8: return 0b01111111; case 9: return 0b01101111; default: io_log_error ("%d is not a digit", digit); return 0; } } static void handle_input (metronome_app* app, metronome_input input, gui_window window) { if (app -> changing_value_type != CT_NONE) { if (input.lmb_up) { app -> changing_value_type = CT_NONE; wnd_uncapture_cursor (); } return; } rect bpm_rect = make_rect (ACTIVE_BPM_RECT); rect count_rect = make_rect (ACTIVE_METER_COUNT_RECT); rect length_rect = make_rect (ACTIVE_METER_LEN_RECT); if (is_point_in_rect (bpm_rect, input.mouse_pos) && input.lmb_down) app -> changing_value_type = CT_BPM; else if (is_point_in_rect (count_rect, input.mouse_pos) && input.lmb_down) app -> changing_value_type = CT_COUNT; else if (is_point_in_rect (length_rect, input.mouse_pos) && input.lmb_down) app -> changing_value_type = CT_LENGTH; if (app -> changing_value_type != CT_NONE) { app -> drag_origin = input.mouse_pos; wnd_capture_cursor (window); } } static void update_digit_segments (int code, digit* result) { for (unsigned i = 0; i < DIGIT_SEGMENT_COUNT; ++i) { result -> segments[i].on = code & 1; code >>= 1; } } static void update_number (unsigned number, unsigned count, digit digits[]) { for (int i = count - 1; i >= 0; --i) { unsigned d = number % 10; update_digit_segments (digit_to_code (d), &(digits[i])); number /= 10; } } static void draw_digit (v2 origin, digit d, v4 color) { for (unsigned j = 0; j < DIGIT_SEGMENT_COUNT; ++j) { digit_segment segment = d.segments[j]; if (segment.on) { rect r = make_rect (origin + segment.position, 0.0f, 0.0f); gl_draw_image (r, color, segment.image); } } } static void update_bpm (metronome_app* app, metronome_input input, gui_window window) { if (app -> changing_value_type != CT_BPM) return; int tempo_change = (int)(input.mouse_pos.x - app -> drag_origin.x) / TEMPO_CHANGE_STEP; if (tempo_change != 0) { app -> tempo += tempo_change; app -> tempo = CLAMP (app -> tempo, MIN_TEMPO, MAX_TEMPO); app -> play_timer.target_miliseconds = 60000 / app -> tempo / (app -> length / 4); app -> drag_origin = input.mouse_pos; update_number (app -> tempo, BPM_DIGIT_COUNT, app -> bpm_digits); } } static void draw_bpm (metronome_app* app, metronome_input input) { v2 positions[] = { make_v2 (BPM_DIGIT_1_POSITION), make_v2 (BPM_DIGIT_2_POSITION), make_v2 (BPM_DIGIT_3_POSITION) }; v4 color = make_color_white (); rect r = make_rect (ACTIVE_BPM_RECT); if (is_point_in_rect (r, input.mouse_pos) || app -> changing_value_type == CT_BPM) color = make_color (DIGIT_HOVER_COLOR); for (unsigned i = app -> tempo < 100 ? 1 : 0; i < BPM_DIGIT_COUNT; ++i) draw_digit (positions[i], app -> bpm_digits[i], color); rect bpm_rect = make_rect (BPM_TEXT_POSITION, 0, 0); gl_draw_image (bpm_rect, make_color_white (), app -> images[I_BPM]); } static void update_meter_count (metronome_app* app, metronome_input input, gui_window window) { if (app -> changing_value_type != CT_COUNT) return; int change = (int)(input.mouse_pos.x - app -> drag_origin.x) / METER_CHANGE_STEP; if (change != 0) { app -> count_index += change < 0 ? -1 : 1; app -> count_index = CLAMP (app -> count_index, 0, METER_COUNT_VALUE_COUNT - 1); app -> count = count_values[app -> count_index]; app -> drag_origin = input.mouse_pos; update_number (app -> count, METER_DIGIT_COUNT, app -> meter_count_digits); } } static void draw_meter_count (metronome_app* app, metronome_input input) { v2 positions[] = { make_v2 (COUNT_DIGIT_1_POSITION), make_v2 (COUNT_DIGIT_2_POSITION) }; v4 color = make_color_white (); rect r = make_rect (ACTIVE_METER_COUNT_RECT); if (is_point_in_rect (r, input.mouse_pos) || app -> changing_value_type == CT_COUNT) color = make_color (DIGIT_HOVER_COLOR); for (unsigned i = app -> count < 10 ? 1 : 0; i < METER_DIGIT_COUNT; ++i) draw_digit (positions[i], app -> meter_count_digits[i], color); } static void update_meter_length (metronome_app* app, metronome_input input, gui_window window) { if (app -> changing_value_type != CT_LENGTH) return; int change = (int)(input.mouse_pos.x - app -> drag_origin.x) / METER_CHANGE_STEP; if (change != 0) { app -> length_index += change < 0 ? -1 : 1; app -> length_index = CLAMP (app -> length_index, 0, METER_LEN_VALUE_COUNT -1); app -> length = length_values[app -> length_index]; app -> play_timer.target_miliseconds = 60000 / app -> tempo / (app -> length / 4); app -> drag_origin = input.mouse_pos; update_number (app -> length, METER_DIGIT_COUNT, app -> meter_length_digits); } } static void draw_meter_length (metronome_app* app, metronome_input input) { v2 positions[] = { make_v2 (LENGTH_DIGIT_1_POSITION), make_v2 (LENGTH_DIGIT_2_POSITION) }; v4 color = make_color_white (); rect r = make_rect (ACTIVE_METER_LEN_RECT); if (is_point_in_rect (r, input.mouse_pos) || app -> changing_value_type == CT_LENGTH) color = make_color (DIGIT_HOVER_COLOR); for (unsigned i = app -> length < 10 ? 1 : 0; i < METER_DIGIT_COUNT; ++i) draw_digit (positions[i], app -> meter_length_digits[i], color); } static bool draw_button (metronome_app* app, metronome_input input) { bool result = false; rect r = make_rect (0, WINDOW_HEIGHT - BUTTON_HEIGHT, WINDOW_WIDTH, BUTTON_HEIGHT); v4 color = make_color (BUTTON_DEFAULT_COLOR); rect image_rect = r; if (is_point_in_rect (r, input.mouse_pos)) { color = make_color (BUTTON_HOVER_COLOR); result = input.lmb_up && app -> changing_value_type == CT_NONE; if (input.lmb_pressed) image_rect.y += 2; } gl_draw_rect (r, color); gl_draw_image (image_rect, make_color_white (), app -> playing ? app -> images[I_STOP] : app -> images[I_START]); return result || input.space_down; } static void draw_app_border () { rect t = make_rect (0, 0, WINDOW_WIDTH, PLAY_INDICATOR_WIDTH); rect r = make_rect (WINDOW_WIDTH - PLAY_INDICATOR_WIDTH, 0, PLAY_INDICATOR_WIDTH, WINDOW_HEIGHT); rect b = make_rect (0, WINDOW_HEIGHT - PLAY_INDICATOR_WIDTH, WINDOW_WIDTH, PLAY_INDICATOR_WIDTH); rect l = make_rect (0, 0, PLAY_INDICATOR_WIDTH, WINDOW_HEIGHT); v4 color = make_color (PLAY_INDICATOR_COLOR); gl_draw_rect (t, color); gl_draw_rect (r, color); gl_draw_rect (b, color); gl_draw_rect (l, color); } static void init_digit (metronome_app* app, v2 positions[], bool images[], digit* d, image hor_type, image ver_type) { for (unsigned i = 0; i < DIGIT_SEGMENT_COUNT; ++i) { digit_segment* segment = &d -> segments[i]; segment -> image = images[i] ? app -> images[hor_type] : app -> images[ver_type]; segment -> position = positions[i]; segment -> on = false; } } void metronome_init (void* memory, gui_window window) { metronome_app* app = (metronome_app*)memory; for (unsigned i = 0; i < IMAGE_COUNT; ++i) { if (!resources_load_image (images[i], &app -> images[i])) io_log_error ("Image at %s could not be loaded", images[i]); gl_load_image (&app -> images[i]); } app -> playing = false; app -> changing_value_type = CT_NONE; app -> tempo = 120; app -> count_index = 2; app -> count = START_COUNT; app -> length_index = 0; app -> length = START_LENGTH; app -> click_count = 0; app -> play_timer.target_miliseconds = 60000 / app -> tempo / (app -> length / 4); bool images[] = { true, false, false, true, false, false, true }; // True - horizontal, false - vertical v2 positions[] = { make_v2 (SEGMENT_A), make_v2 (SEGMENT_B), make_v2 (SEGMENT_C), make_v2 (SEGMENT_D), make_v2 (SEGMENT_E), make_v2 (SEGMENT_F), make_v2 (SEGMENT_G) }; for (unsigned i = 0; i < BPM_DIGIT_COUNT; ++i) init_digit (app, positions, images, &app -> bpm_digits[i], I_SEGMENT_HOR, I_SEGMENT_VER); v2 mini_positions[] = { make_v2 (SEGMENT_MINI_A), make_v2 (SEGMENT_MINI_B), make_v2 (SEGMENT_MINI_C), make_v2 (SEGMENT_MINI_D), make_v2 (SEGMENT_MINI_E), make_v2 (SEGMENT_MINI_F), make_v2 (SEGMENT_MINI_G) }; for (unsigned i = 0; i < METER_DIGIT_COUNT; ++i) { init_digit (app, mini_positions, images, &app -> meter_count_digits[i], I_SEGMENT_HOR_MINI, I_SEGMENT_VER_MINI); init_digit (app, mini_positions, images, &app -> meter_length_digits[i], I_SEGMENT_HOR_MINI, I_SEGMENT_VER_MINI); } update_number (app -> tempo, BPM_DIGIT_COUNT, app -> bpm_digits); update_number (app -> count, METER_DIGIT_COUNT, app -> meter_count_digits); update_number (app -> length, METER_DIGIT_COUNT, app -> meter_length_digits); gl_init (window); audio_init (); for (unsigned i = 0; i < SOUND_COUNT; ++i) app -> sounds[i] = audio_load (sounds[i]); } void metronome_update (void* memory, metronome_input input, gui_window window) { metronome_app* app = (metronome_app*)memory; handle_input (app, input, window); update_meter_count (app, input, window); draw_meter_count (app, input); rect divider_rect = make_rect (METER_DIVIDER); v4 color = make_color (ACCENT_COLOR); gl_draw_rect (divider_rect, color); update_meter_length (app, input, window); draw_meter_length (app, input); update_bpm (app, input, window); draw_bpm (app, input); if (draw_button (app, input)) { app -> playing = !app -> playing; if (!app -> playing) { app -> click_count = 0; timer_reset (&app -> play_timer); } } if (app -> playing) { draw_app_border (); play (app); } } void metronome_deinit () { audio_deinit (); }
/* * Author: illuz <iilluzen[at]gmail.com> * File: AC_memorial_memorial_dfs_n^4.cpp * Create Date: 2015-03-08 13:42:18 * Descripton: Use O(n^3) space to memorial. * dp[i][j][n] means the s1[i~i+n] and s2[j~j+n] is match. */ #include <bits/stdc++.h> using namespace std; const int N = 0; int dp[110][110][110]; class Solution { private: bool memorial_dfs(string &s1, string &s2, int p1, int p2, int len) { if (len == 1) return s1[p1] == s2[p2]; if (s1.substr(p1, len) == s2.substr(p2, len)) return true; if (dp[p1][p2][len] != -1) return dp[p1][p2][len]; for (int i = 1; i < len; ++i) { if ((memorial_dfs(s1, s2, p1, p2, i) && memorial_dfs(s1, s2, p1 + i, p2 + i, len - i)) || (memorial_dfs(s1, s2, p1, p2 + len - i, i) && memorial_dfs(s1, s2, p1 + i, p2, len - i))) return dp[p1][p2][len] = true; } return dp[p1][p2][len] = false; } public: bool isScramble(string s1, string s2) { if (s1.size() != s2.size()) return false; int len = s1.size(); memset(dp, -1, sizeof(dp)); return memorial_dfs(s1, s2, 0, 0, len); } }; int main() { string s1, s2; Solution s; while (cin >> s1 >> s2) cout << s.isScramble(s1, s2) << endl; return 0; }
#include <torch/csrc/distributed/c10d/reducer.h> #include <functional> #include <c10/core/DeviceGuard.h> #include <c10/core/StreamGuard.h> #include <c10/util/Exception.h> #include <torch/csrc/autograd/engine.h> #include <torch/csrc/autograd/function_hook.h> #include <torch/csrc/autograd/functions/accumulate_grad.h> #include <torch/csrc/autograd/profiler.h> #include <torch/csrc/autograd/utils/grad_layout_contract.h> #include <torch/csrc/autograd/utils/lambda_post_hook.h> #include <torch/csrc/distributed/c10d/comm.h> #include <torch/csrc/utils/hash.h> #include <torch/csrc/utils/memory.h> namespace c10d { namespace { inline int64_t current_time_in_nanos() { return torch::autograd::profiler::getTime(); } } // namespace Reducer::Reducer( std::vector<std::vector<torch::autograd::Variable>> replicas, std::vector<std::vector<size_t>> bucket_indices, std::shared_ptr<c10d::ProcessGroup> process_group, std::vector<std::vector<bool>> expect_sparse_gradients, int64_t bucket_bytes_cap, bool find_unused_parameters) : replicas_(std::move(replicas)), process_group_(std::move(process_group)), expect_sparse_gradients_(std::move(expect_sparse_gradients)), expect_autograd_hooks_(false), require_finalize_(false), next_bucket_(0), has_marked_unused_parameters_(false), find_unused_parameters_(find_unused_parameters), local_used_maps_reduced_(false), backward_stats_base_(0), has_rebuilt_bucket_(false), bucket_bytes_cap_(bucket_bytes_cap), comm_hook_(nullptr) { C10_LOG_API_USAGE_ONCE("torch.distributed.ddp.reducer"); TORCH_CHECK(replicas_.size() >= 1, "Expected at least one model replica."); TORCH_CHECK(replicas_[0].size() >= 1, "Expected at least one parameter."); // If `expect_sparse_gradients` is not specified, initialize it such that // we do not expect sparse gradients for any parameter. if (expect_sparse_gradients_.empty()) { expect_sparse_gradients_ = std::vector<std::vector<bool>>( replicas_.size(), std::vector<bool>(replicas_[0].size(), false)); } TORCH_INTERNAL_ASSERT(expect_sparse_gradients_.size() == replicas_.size()); // Corresponding params' layouts (strides) must match across // replicas within this process and across processes. // (see Note: "Gradient Layout Contract" in initialize_buckets). verify_replicas_within_process(); verify_replica0_across_processes(); // Initialize variable bucketing. // This can be reinitialized later after capturing runtime information. initialize_buckets(std::move(bucket_indices)); // All variables are expected to have their `grad_fn` set to the gradient // accumulation function (since they are leafs in the autograd graph). // We store pointers to these functions such that we can check if they are // used in an autograd pass. If they are not, we know their grad tensors // can be marked as ready for reduction. { const auto replica_count = replicas_.size(); grad_accumulators_.resize(replica_count); for (size_t replica_index = 0; replica_index < replica_count; replica_index++) { const auto variable_count = replicas_[replica_index].size(); grad_accumulators_[replica_index].resize(variable_count); for (size_t variable_index = 0; variable_index < variable_count; variable_index++) { auto& variable = replicas_[replica_index][variable_index]; const auto index = VariableIndex{ .replica_index = replica_index, .variable_index = variable_index, }; // The gradient accumulator function is lazily initialized once. // Therefore we can use its presence in the autograd graph as // evidence that the parameter has participated in an iteration. auto grad_accumulator = torch::autograd::impl::grad_accumulator(variable); using torch::distributed::autograd::ThreadLocalDistAutogradContext; // Hook to execute after the gradient accumulator has executed. hooks_.emplace_back( grad_accumulator->add_post_hook( torch::make_unique<torch::autograd::utils::LambdaPostHook>( [=](const torch::autograd::variable_list& outputs, const torch::autograd::variable_list& /* unused */) { this->rpc_context_.set( ThreadLocalDistAutogradContext::getContextPtr()); this->autograd_hook(index); return outputs; })), grad_accumulator); // Map raw function pointer to replica index and parameter index. // This is used later on when the autograd graph is traversed // to check for parameters for which no gradient is computed. func_[grad_accumulator.get()] = index; // The gradient accumulator is stored as weak_ptr in the autograd // metadata of the variable, so we have to keep it alive here for // the raw pointer to be valid. grad_accumulators_[replica_index][variable_index] = std::move(grad_accumulator); } } } // Initialize backward stats vector. { const auto replica_count = replicas_.size(); backward_stats_.resize(replica_count); const auto variable_count = replicas_[0].size(); std::for_each( backward_stats_.begin(), backward_stats_.end(), [=](std::vector<int64_t>& v) { v.resize(variable_count); }); } // See Note [Skip allreducing local_used_maps_dev] if (find_unused_parameters_) { // Initialize locally used parameter maps { const auto replica_count = replicas_.size(); const auto variable_count = replicas_[0].size(); local_used_maps_.resize(replica_count); local_used_maps_dev_.resize(replica_count); for (size_t i = 0; i < replica_count; i++) { at::TensorOptions options; options = options.dtype(at::kInt); if (replicas_[i][0].is_cuda()) { at::DeviceGuard g(replicas_[i][0].device()); local_used_maps_[i] = at::zeros( {static_cast<long>(variable_count)}, options.pinned_memory(true)); } else { local_used_maps_[i] = at::zeros({static_cast<long>(variable_count)}, options); } // This tensor needs to be on the same device as replica because backend // such as NCCL may not support CPU tensors, and hence it might not work // if we always put it on CPU. options = options.device(replicas_[i][0].device()); local_used_maps_dev_[i] = at::empty({static_cast<long>(variable_count)}, options); } } } } // Note [Skip allreducing local_used_maps_dev] // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // If find_unused_parameters_ is set to false, there is no need to allreduce // local_used_maps_dev_, because all parameters will be reduced anyway. // Therefore, we can avoid allocating memory for local_used_maps and // local_used_maps_dev_ if find_unused_parameters_ is false. // Note [DDP Communication Hook] // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // If DDP communication hook is not registered, the reducer reduces the buckets // by just calling allreduce. If registered, it calls the hook and uses future // work handle. // DDP communication hook is an enhancement that provides a hook which can be // used to override how DDP communicates gradients across ranks, this can be // used for algorithms like Gradient Compression/GossipGrad. This hook can be // registered from Python API using `register_comm_hook`. `PythonCommHook` // enables registering a Python hook and is a sub class of `CommHookInterface`. // `CommHookInterface` can be used to implement CPP hooks in the future. Reducer::~Reducer() noexcept(false) { // Remove all hooks on variables registered by this Reducer. This is necessary // to make DDP failure recoverable. Otherwise, multiple Reducer instances // (from recoveries) will add their hooks to the original model, and those // hooks will try to invoke methods on a deleted Reducer objects. for (auto& hook : hooks_) { auto& key = hook.first; auto& grad_accumulator = hook.second; TORCH_CHECK( grad_accumulator->del_post_hook(key), "Reducer attempts to delete a non-existing hook."); } } // Verifies replicas in this process treat the same number of params, // all params require grad, and corresponding params across replicas // have the same dtype/size/layout. void Reducer::verify_replicas_within_process() { const auto replica_count = replicas_.size(); for (size_t replica_index = 0; replica_index < replica_count; replica_index++) { const auto variable_count = replicas_[replica_index].size(); TORCH_CHECK( replicas_[replica_index].size() == replicas_[0].size(), "Model replicas must have an equal number of parameters."); TORCH_CHECK( expect_sparse_gradients_[replica_index].size() == expect_sparse_gradients_[0].size(), "Expected number of entries in expect_sparse_gradients ", "to be equal across replicas."); for (size_t variable_index = 0; variable_index < variable_count; variable_index++) { TORCH_CHECK( replicas_[replica_index][variable_index].requires_grad(), "Variables must require gradients (have `requires_grad` set)."); TORCH_CHECK( replicas_[replica_index][variable_index].sizes() == replicas_[0][variable_index].sizes(), "Variables across model replicas must have identical sizes."); TORCH_CHECK( replicas_[replica_index][variable_index].strides() == replicas_[0][variable_index].strides(), "Variables across model replicas must have identical strides."); TORCH_CHECK( replicas_[replica_index][variable_index].dtype() == replicas_[0][variable_index].dtype(), "Variables across model replicas must have identical dtype."); TORCH_CHECK( expect_sparse_gradients_[replica_index][variable_index] == expect_sparse_gradients_[0][variable_index], "Expected the same variables across replicas to either both ", "or neither expect a sparse gradient."); } } } // Verifies corresponding params in replica 0 have the same sizes/strides // across processes. void Reducer::verify_replica0_across_processes() { size_t i = 0; for (const auto& t : replicas_[0]) { i += 2 * t.dim(); } at::TensorOptions options; options = options.dtype(at::kLong); auto metadata = at::empty({static_cast<long>(i)}, options); // Technically, process 0 is the broadcast source, so only process 0 needs // to populate metadata. But no harm keeping work aligned across processes. auto metadata_accessor = metadata.accessor<int64_t, 1>(); i = 0; for (const auto& t : replicas_[0]) { for (const auto& sz : t.sizes()) { metadata_accessor[i++] = sz; } for (const auto& str : t.strides()) { metadata_accessor[i++] = str; } } auto metadata_dev = metadata.clone().to(replicas_[0][0].device()); std::vector<at::Tensor> vec{metadata_dev}; process_group_->broadcast(vec)->wait(); // Technically, process 0 doesn't need to double-check metadata, because it // was the source. But no harm keeping work aligned. auto control = at::empty({static_cast<long>(i)}, options); control.copy_(metadata_dev, /*non_blocking=*/false); auto control_accessor = control.accessor<int64_t, 1>(); i = 0; for (size_t p = 0; p < replicas_[0].size(); p++) { const auto& t = replicas_[0][p]; // I'd like to include which process we are in the message, // but ProcessGroup::getRank is not public! for (const auto& sz : t.sizes()) { TORCH_CHECK( sz == control_accessor[i++], "replicas[0][", p, "] in this process" " with sizes ", t.sizes(), " appears not to match sizes of the same param in process 0."); } for (const auto& str : t.strides()) { TORCH_CHECK( str == control_accessor[i++], "replicas[0][", p, "] in this process" " with strides ", t.strides(), " appears not to match strides of the same param in process 0."); } } } void Reducer::mark_variable_ready_dense(VariableIndex index) { const auto replica_index = index.replica_index; const auto variable_index = index.variable_index; const auto& bucket_index = variable_locators_[variable_index]; auto& bucket = buckets_[bucket_index.bucket_index]; auto& replica = bucket.replicas[replica_index]; auto& variable = replica.variables[bucket_index.intra_bucket_index]; const auto offset = replica.offsets[bucket_index.intra_bucket_index]; const auto length = replica.lengths[bucket_index.intra_bucket_index]; auto& bucket_view = replica.bucket_views[bucket_index.intra_bucket_index]; // Copy contents of gradient tensor to bucket tensor. // If the gradient is not set, we assume it wasn't computed // as part of the current backwards pass, and zero the part // of the bucket it would otherwise hold. runGradCallbackForVariable(variable, [&](auto& grad) { if (grad.defined()) { // Ensure that the gradient type matches the bucket type. TORCH_CHECK( grad.options().type_equal(bucket_view.options()), "Expected ", bucket_view.toString(), ", got ", grad.toString()); // Assert that the grad tensor and the bucket don't share storage. // If they did, we could avoid the copy altogether. // The reason for not doing this is that existing code calls // `detach_` from `zero_grad`, which is incompatible with views. TORCH_INTERNAL_ASSERT(!grad.is_alias_of(bucket_view)); TORCH_INTERNAL_ASSERT(grad.device() == bucket_view.device()); TORCH_INTERNAL_ASSERT(grad.numel() == bucket_view.numel()); // AccumulateGrad doesn't HAVE to obey the grad layout contract. // The penalty for disobedience is reduced performance, not numerical // death. Warnings here help diagnose poor DDP performance. if (grad.strides() != bucket_view.strides()) { TORCH_WARN_ONCE( "Grad strides do not match bucket view strides. " "This may indicate grad was not created according to the " "gradient layout contract, or that the param's strides " "changed since DDP was constructed. This is not an error, " "but may impair performance.\n" "grad.sizes() = ", grad.sizes(), ", strides() = ", grad.strides(), "\n", "bucket_view.sizes() = ", bucket_view.sizes(), ", strides() = ", bucket_view.strides()); } // imitates wrapped_scalar_tensor in ATen/native/BinaryOps.cpp auto wrapped = c10::scalar_to_tensor(double(1.) / process_group_->getSize()); wrapped.unsafeGetTensorImpl()->set_wrapped_number(true); // Divides while copying into the bucket view. at::native::mul_out(bucket_view, grad, wrapped); } else { bucket_view.zero_(); } // The grad is not modified and doesn't need to be written back. return false; }); } void Reducer::mark_variable_ready_sparse(VariableIndex index) { const auto replica_index = index.replica_index; const auto variable_index = index.variable_index; const auto& bucket_index = variable_locators_[variable_index]; auto& bucket = buckets_[bucket_index.bucket_index]; auto& replica = bucket.replicas[replica_index]; auto& variable = replica.variables[bucket_index.intra_bucket_index]; runGradCallbackForVariable(variable, [&](auto& grad) { TORCH_CHECK(grad.defined(), "Expected sparse gradient to be defined."); TORCH_CHECK( grad.options().layout() == c10::kSparse, "Expected variable to have sparse gradient."); // Sparse tensors cannot be grouped together with other sparse tensors // in a single reduction operation like we can for dense tensors. // Therefore, the `offsets` and `lengths` vectors in the bucket replica // struct are empty, and there is no pre-existing accumulation tensor. // Directly assign the sparse tensor to the `contents` field. replica.contents = grad; replica.contents.div_(process_group_->getSize()); // The grad is modified in place and needs to be written back. return true; }); } // The function `autograd_hook` is called after the gradient for a // model parameter has been accumulated into its gradient tensor. // This function is only to be called from the autograd thread. void Reducer::autograd_hook(VariableIndex index) { std::lock_guard<std::mutex> lock(this->mutex_); // See Note [Skip allreducing local_used_maps_dev] if (find_unused_parameters_) { // Since it gets here, this param has been used for this iteration. We want // to mark it in local_used_maps_. During no_sync session, the same var can // be set multiple times, which is OK as does not affect correctness. As // long as it is used once during no_sync session, it is marked as used. local_used_maps_[index.replica_index][index.variable_index] = 1; } // Ignore if we don't expect to be called. // This may be the case if the user wants to accumulate gradients // for number of iterations before reducing them. if (!expect_autograd_hooks_) { return; } // Rebuild bucket only if 1) it is the first time to rebuild bucket 2) // find_unused_parameters_ is false, currently it does not support when there // are unused parameters 3) this backward pass needs to run allreduce. Here, // we just dump tensors and their parameter indices into rebuilt_params_ and // rebuilt_param_indices_ based on gradient arriving order, and then at the // end of finalize_backward(), buckets will be rebuilt based on // rebuilt_params_ and rebuilt_param_indices_, and then will be broadcasted // and intialized. Also we only need to dump tensors and parameter indcies of // one replica. if (!has_rebuilt_bucket_ && !find_unused_parameters_ && index.replica_index == 0) { rebuilt_params_.push_back( replicas_[index.replica_index][index.variable_index]); rebuilt_param_indices_.push_back(index.variable_index); } // If `find_unused_parameters_` is true there may be model parameters that // went unused when computing the model output, they won't be part of the // autograd graph, and won't receive gradients. These parameters are // discovered in the `prepare_for_backward` function and their indexes stored // in the `unused_parameters_` vector. if (!has_marked_unused_parameters_ && find_unused_parameters_) { has_marked_unused_parameters_ = true; for (const auto& unused_index : unused_parameters_) { mark_variable_ready(unused_index); } } // Finally mark variable for which this function was originally called. mark_variable_ready(index); } void Reducer::mark_variable_ready(VariableIndex index) { const auto replica_index = index.replica_index; const auto variable_index = index.variable_index; TORCH_CHECK(replica_index < replicas_.size(), "Out of range replica index."); TORCH_CHECK( variable_index < variable_locators_.size(), "Out of range variable index."); backward_stats_[replica_index][variable_index] = current_time_in_nanos() - backward_stats_base_; // Any time we mark a variable ready (be it in line due to unused parameters, // or via an autograd hook), we require a call to the finalize function. If // this doesn't happen before the next iteration (or call to // `prepare_for_backwards`), we know something is wrong. require_finalize_ = true; const auto& bucket_index = variable_locators_[variable_index]; auto& bucket = buckets_[bucket_index.bucket_index]; auto& replica = bucket.replicas[replica_index]; // Something is wrong if all variables contained in this bucket replica have // already been marked as ready. if (replica.pending == 0) { const auto common_error = c10::str( "Expected to mark a variable ready only once. ", "", "This error is caused by one of the following reasons: ", "1) Use of a module parameter outside the `forward` function. ", "Please make sure model parameters are not shared across multiple ", "concurrent forward-backward passes", "2) Reused parameters in multiple reentrant backward passes. For ", "example, if you use multiple `checkpoint` functions to wrap the ", "same part of your model, it would result in the same set of ", "parameters been used by different reentrant backward passes ", "multiple times, and hence marking a variable ready multiple times. ", "DDP does not support such use cases yet."); TORCH_CHECK( has_marked_unused_parameters_, common_error, "3) Incorrect unused parameter detection. The return value of the ", "`forward` function is inspected by the distributed data parallel ", "wrapper to figure out if any of the module's parameters went ", "unused. For unused parameters, DDP would not expect gradients from ", "then. However, if an unused parameter becomes part of the autograd ", "graph at a later point in time (e.g., in a reentrant backward when ", "using `checkpoint`), the gradient will show up unexpectedly. If all ", "parameters in the model participate in the backward pass, you can ", "disable unused parameter detection by passing the keyword argument ", "`find_unused_parameters=False` to ", "`torch.nn.parallel.DistributedDataParallel`."); TORCH_CHECK(!has_marked_unused_parameters_, common_error); } if (bucket.expect_sparse_gradient) { mark_variable_ready_sparse(index); } else { mark_variable_ready_dense(index); } // TODO(@pietern): Make this work for both CPU/CUDA tensors. // When using CPU tensors we don't need to do this. // // Record event so that we can wait for all of them. // auto& event = replica.events[bucket_index.intra_bucket_index]; // event.record(); // Check if this was the final gradient for this bucket. if (--replica.pending == 0) { // Kick off reduction if all replicas for this bucket are ready. if (--bucket.pending == 0) { mark_bucket_ready(bucket_index.bucket_index); } } // Run finalizer function and kick off reduction for local_used_maps once the // final bucket was marked ready. if (next_bucket_ == buckets_.size()) { // See Note [Skip allreducing local_used_maps_dev] if (find_unused_parameters_) { // H2D from local_used_maps_ to local_used_maps_dev_ for (size_t i = 0; i < local_used_maps_.size(); i++) { // We do async H2D to avoid the blocking overhead. The async copy and // allreduce respect the current stream, so will be sequenced correctly. local_used_maps_dev_[i].copy_(local_used_maps_[i], true); } local_used_work_ = process_group_->allreduce(local_used_maps_dev_); } // The autograd engine uses the default stream when running callbacks, so we // pass in the current CUDA stream in case it is not the default. c10::DeviceType deviceType = replica.contents.device().type(); const c10::impl::VirtualGuardImpl guard = c10::impl::VirtualGuardImpl{deviceType}; const c10::Stream currentStream = guard.getStream(replica.contents.device()); torch::autograd::Engine::get_default_engine().queue_callback([=] { std::unique_lock<std::mutex> lock(this->mutex_); // Run callback with the current stream c10::OptionalStreamGuard currentStreamGuard{currentStream}; this->finalize_backward(); // Rebuild bucket if this is the first time to rebuild if (!rebuilt_params_.empty()) { auto rebuilt_bucket_indices = rebuildBuckets(); // Unlock before initialize_buckets() as initialize_buckets() requires a // lock, it could result in self deadlock without unlocking here. lock.unlock(); initialize_buckets(std::move(rebuilt_bucket_indices)); } else { lock.unlock(); } }); } } // Called when the bucket at the specified index is ready to be reduced. void Reducer::mark_bucket_ready(size_t bucket_index) { TORCH_INTERNAL_ASSERT(bucket_index >= next_bucket_); // Buckets are reduced in sequence. Ignore this bucket if // it's not its turn to be reduced. if (bucket_index > next_bucket_) { return; } // Keep going, until we either: // - have kicked off reduction for all buckets, or // - found a bucket that's not yet ready for reduction. for (; next_bucket_ < buckets_.size() && buckets_[next_bucket_].pending == 0; next_bucket_++) { auto& bucket = buckets_[next_bucket_]; std::vector<at::Tensor> tensors; tensors.reserve(bucket.replicas.size()); for (const auto& replica : bucket.replicas) { // TODO(@pietern): Ensure proper synchronization with the CUDA events // that recorded copies into this contents tensor. If these copies are // executed on non-default streams, the current stream for the device // that holds the contents tensor must wait on these events. // // As long as autograd uses the default stream for every device, // these operations are implicitly sequenced, and we don't need to // do any extra synchronization here. // tensors.push_back(replica.contents); } // See Note [DDP Communication Hook] // TODO(@sinannasir): merge `work` and `future_work`. Related to GH Issue // #41266. if (comm_hook_ == nullptr) { bucket.work = process_group_->allreduce(tensors); } else { bucket.future_work = comm_hook_->runHook(GradBucket(tensors)); } } } void Reducer::initialize_buckets( std::vector<std::vector<size_t>> bucket_indices) { std::lock_guard<std::mutex> lock(mutex_); // This shouldn't be called if we're expecting autograd hooks to fire. TORCH_CHECK( !expect_autograd_hooks_, "`initialize_buckets` must NOT be called during autograd execution."); // Clear current bucket assignment. buckets_.clear(); variable_locators_.clear(); // Ensure we have a bucket index for every variable. variable_locators_.resize(replicas_[0].size()); // Iterate over buckets. const auto bucket_count = bucket_indices.size(); const auto replica_count = replicas_.size(); buckets_.reserve(bucket_count); for (size_t bucket_index = 0; bucket_index < bucket_count; bucket_index++) { Bucket bucket; // TODO(@pietern): Validate indices. // Must be non-empty, unique, and unique across buckets. TORCH_CHECK( bucket_indices[bucket_index].size() > 0, "Empty bucket specified."); // Variables that expect sparse gradients must have their own bucket. if (bucket_indices[bucket_index].size() == 1) { const auto variable_index = bucket_indices[bucket_index].front(); bucket.expect_sparse_gradient = expect_sparse_gradients_[0][variable_index]; } else { for (const auto variable_index : bucket_indices[bucket_index]) { TORCH_CHECK( !expect_sparse_gradients_[0][variable_index], "Buckets with more than one variable cannot include variables ", "that expect a sparse gradient."); } } // Iterate over model replicas. for (size_t replica_index = 0; replica_index < replica_count; replica_index++) { BucketReplica replica; if (bucket.expect_sparse_gradient) { const auto variable_index = bucket_indices[bucket_index].front(); const auto& variable = replicas_[replica_index][variable_index]; TORCH_INTERNAL_ASSERT(bucket_indices[bucket_index].size() == 1); replica.variables = {variable}; } else { at::TensorOptions options; size_t offset = 0; // Iterate over bucket variables. for (const auto variable_index : bucket_indices[bucket_index]) { TORCH_CHECK( variable_index < replicas_[replica_index].size(), "Out of range variable index specified."); const auto& variable = replicas_[replica_index][variable_index]; if (!options.has_device()) { options = options.device(variable.device()); } else { TORCH_CHECK( variable.device() == options.device(), "All parameters in a bucket must be ", "placed on the same device."); } if (!options.has_dtype()) { options = options.dtype(variable.dtype()); } else { TORCH_CHECK( variable.dtype() == options.dtype(), "All parameters in a bucket must have the same dtype."); } const auto length = variable.numel(); replica.variables.push_back(variable); replica.offsets.push_back(offset); replica.lengths.push_back(length); offset += length; } // Allocate bucket contents tensor. replica.contents = at::empty({static_cast<long>(offset)}, options); // Note: "Gradient Layout Contract" // // Here, create views into the contents tensor for each variable's grad. // Views serve as entry points to copy_ each grad's data in/out of the // flat contents tensor. // // Gradients may have dense memory but non-row-major-contiguous strides // (e.g. channels_last or channels_last_3d). For coalesced accesses // during copy_s, it's beneficial for each view's layout to match its // grad's layout. // // Specifically, we expect torch/csrc/autograd/AccumulateGrad.h produces // grads that obey there "Gradient Layout Contract": // (1) if variable.is_non_overlapping_and_dense(), the stashed grad's // strides match variable. // (2) else, stashed grad is rowmajor contiguous. // and create views to match. // // If AccumulateGrad breaks the contract, and produces a grad with an // unexpected layout, performance will degrade due to poor memory access // patterns when copy_ing grad data in and out of its bucket view. // However, numerics remain correct, because the bucket view is the same // on either end of the raw allreduce. bucket_view.copy(grad) tranposes // (+ densifies) to the bucket view's layout, the data is allreduced, // then grad.copy_(bucket_view) transposes it back to grad's layout. // // The only way the numerics can go haywire is if the bucket views // themselves have different layouts across processes (or replicas). // Bucket views' sizes and strides are set based on param layouts, using // the same logic that (we expect) AccumulateGrad uses for their grads. // Therefore, the only way a bucket view could have different layouts in // different processes is if its param has a different layout in // different processes. We can check that param layouts match across // processes and replicas in Reducer's constructor by allreducing some // metadata. Checking just once won't catch if someone messes with // param layouts over time, but not messing with params after DDP // construction is already a documented constraint. initialize_bucketviews(replica, replica.contents); } // Add bucket replica to enclosing bucket. bucket.replicas.push_back(std::move(replica)); } // Map participating variables to this bucket. // This is identical across replicas so we only need to do this once. size_t intra_bucket_index = 0; for (const auto variable_index : bucket_indices[bucket_index]) { TORCH_CHECK( variable_index < variable_locators_.size(), "Out of range variable index specified."); variable_locators_[variable_index] = VariableLocator{ .bucket_index = bucket_index, .intra_bucket_index = intra_bucket_index++, }; } bucket.variable_indices = std::move(bucket_indices[bucket_index]); buckets_.push_back(std::move(bucket)); } } // (see Note: "Gradient Layout Contract" in initialize_buckets). void Reducer::initialize_bucketviews( Reducer::BucketReplica& replica, at::Tensor& contents) { for (size_t i = 0; i < replica.variables.size(); i++) { const auto& v = replica.variables[i]; const auto offset = replica.offsets[i]; const auto length = replica.lengths[i]; if (v.is_non_overlapping_and_dense()) { // If the param's memory is dense, match its layout, anticipating // the autograd engine (AccumulateGrad) will also create gradients // matching its layout. replica.bucket_views.push_back( contents.as_strided(v.sizes(), v.strides(), offset)); } else { // Fall back to a C-style contiguous view, again anticipating // AccumulateGrad will do the same when stashing grads for non-dense // params. replica.bucket_views.push_back( contents.narrow(0, offset, length).view(v.sizes())); } } } // Traverse the autograd graph starting at the specified output. // All parameters for which we have a pointer to their gradient accumulation // functions, but don't show up in the autograd graph will be marked ready for // for reduction as soon as the first autograd hook is called. This is not // done immediately because the model output may be ignored, and we only // want to start performing reductions on `torch.autograd.backward()`. void Reducer::prepare_for_backward( const std::vector<torch::autograd::Variable>& outputs) { std::lock_guard<std::mutex> lock(mutex_); std::unordered_set<torch::autograd::Node*> seen; std::vector<torch::autograd::Node*> queue; // Check that any prior reduction has finished. // The variable `require_finalize_` is true until all gradients // have been computed and reduction of all buckets has been kicked off. if (require_finalize_) { TORCH_CHECK( false, "Expected to have finished reduction in the prior iteration before ", "starting a new one. ", "", "This error indicates that your module has parameters that were ", "not used in producing loss. ", "", "You can enable unused parameter detection by (1) passing the keyword " "argument `find_unused_parameters=True` to ", "`torch.nn.parallel.DistributedDataParallel`; (2) making sure all ", "`forward` function outputs participate in calculating loss. " "", "If you already have done the above two steps, then the distributed ", "data parallel module wasn't able to locate the output tensors in the ", "return value of your module's `forward` function. ", "Please include the loss function and the structure of the return ", "value of `forward` of your module when reporting this issue (e.g. ", "list, dict, iterable)."); } // Reset accounting. expect_autograd_hooks_ = true; next_bucket_ = 0; backward_stats_base_ = current_time_in_nanos(); for (auto& bucket : buckets_) { for (auto& replica : bucket.replicas) { replica.pending = replica.variables.size(); } bucket.pending = bucket.replicas.size(); } // Reset unused parameter accounting. has_marked_unused_parameters_ = false; unused_parameters_.clear(); // If find_unused_parameters_ is false, we assume that autograd hooks for ALL // variables will be called, and we don't have to search the autograd graph // for presence of these hooks. if (!find_unused_parameters_) { return; } // Seed queue with the grad functions of all outputs. for (const auto& output : outputs) { const auto& grad_fn = output.grad_fn(); if (grad_fn) { queue.push_back(grad_fn.get()); } } // Traverse the autograd graph starting at the specified output. while (!queue.empty()) { auto fn = queue.back(); queue.pop_back(); for (const auto& edge : fn->next_edges()) { if (auto next_ptr = edge.function.get()) { const bool was_inserted = seen.insert(next_ptr).second; if (was_inserted) { queue.push_back(next_ptr); } } } } // Find accumulator functions that don't show up in this graph. for (const auto& it : func_) { // If the accumulator function is present in the graph, we know // a gradient will be computed for the corresponding parameter. if (seen.count(it.first) > 0) { continue; } unused_parameters_.push_back(it.second); } } // A bucket with one or more dense tensors needs to be unflattened. void Reducer::finalize_bucket_dense(Bucket& bucket) { for (size_t replica_index = 0; replica_index < bucket.replicas.size(); replica_index++) { auto& replica = bucket.replicas[replica_index]; for (size_t intra_bucket_index = 0; intra_bucket_index < replica.variables.size(); intra_bucket_index++) { auto& variable = replica.variables[intra_bucket_index]; const auto offset = replica.offsets[intra_bucket_index]; const auto length = replica.lengths[intra_bucket_index]; bool global_unused = false; // See Note [Skip allreducing local_used_maps_dev] if (find_unused_parameters_) { // Determine if this param has been used globally or not. // // If the variable was used locally, it is also used globally and then // we don't need to wait for the reduction. Otherwise we lazily wait for // the reduction to complete, only when we see a variable that was // unused locally. Then we end up delaying the synchronization point // that local_used_work_->wait() implies. If we don't have any unused // parameters at all, we can skip waiting for the work to complete // altogether, and cause negligible performance overhead for models // where all parameters are used. Such lazily waiting means minimizing // performance impact for the big majority of models where all // parameters are always used. Then we only pay the overhead cost if // there is indeed a parameter that is locally unused, because we need // to check if it's also globally unused. size_t variable_index = bucket.variable_indices[intra_bucket_index]; // Note: global_unused might not be global yet. As we lazily wait for // the reduction to complete, it becomes really global only if we get to // the point as below where we wait for the reduction work, make D2H // copy, and update global_unused with the real global consensus, i.e. // local_used_maps_reduced_ is true. global_unused = local_used_maps_[replica_index][variable_index].item<int>() == 0; if (global_unused && !local_used_maps_reduced_) { // Wait for local_used_maps reduction to complete. local_used_work_->wait(); // D2H from local_used_maps_dev_ to local_used_maps_ for (size_t i = 0; i < local_used_maps_.size(); i++) { local_used_maps_[i].copy_(local_used_maps_dev_[i]); } global_unused = local_used_maps_[replica_index][variable_index].item<int>() == 0; local_used_maps_reduced_ = true; } } const auto& bucket_view = replica.bucket_views[intra_bucket_index]; runGradCallbackForVariable(variable, [&](auto& grad) { // If a parameter is globally unused, we keep its grad untouched. if (!global_unused) { if (!grad.defined()) { // Creates grad according to the "Gradient Layout Contract" // (see torch/csrc/grad/AccumulateGrad.h) grad = torch::autograd::utils::clone_obey_contract( bucket_view, variable); } else { grad.copy_(bucket_view); } // The grad is modified and needs to be written back. return true; } // The grad is not modified. return false; }); } } } void Reducer::finalize_backward() { // No longer expect autograd hooks to fire after this function returns. TORCH_INTERNAL_ASSERT(expect_autograd_hooks_); expect_autograd_hooks_ = false; // No longer require call to finalize after this function returns. TORCH_INTERNAL_ASSERT(require_finalize_); require_finalize_ = false; // Check that all buckets were completed and had their work kicked off. TORCH_INTERNAL_ASSERT(next_bucket_ == buckets_.size()); // Wait for asynchronous reduction to complete and unflatten contents. for (auto& bucket : buckets_) { // See Note [DDP Communication Hook] if (comm_hook_ == nullptr) { TORCH_INTERNAL_ASSERT( bucket.work, "Expected bucket.work not to be null. " "This may indicate that allreduce hooks were not properly installed."); bucket.work->wait(); } else { TORCH_INTERNAL_ASSERT( bucket.future_work, "Expected bucket.future_work not to be null. " "This may indicate that communication hook was not properly installed."); bucket.future_work->wait(); auto future_result = comm_hook_->processFuture(bucket.future_work->value()); // Reinitialize bucket_views with the future_result by following // the same logic in `inititalize_buckets`. for (size_t i = 0; i < future_result.size(); i++) { bucket.replicas[i].bucket_views.clear(); initialize_bucketviews(bucket.replicas[i], future_result[i]); } } if (!bucket.expect_sparse_gradient) { // We don't need to finalize the sparse bucket since the sparse grad and // the bucket essentially point to the same storage. As a result, once // the allreduce is done, the sparse grads are automatically updated. finalize_bucket_dense(bucket); } } // See Note [Skip allreducing local_used_maps_dev] if (find_unused_parameters_) { // Reset unused parameter accounting. for (auto& local_used : local_used_maps_) { local_used.fill_(0); } // Due to the lazy wait, it is possible that reduction of the current // iteration is still going when the one for next iteration gets kicked off. // For such case, we want to wait explicitly to make sure the reduction does // complete before kicking off next one. Otherwise the previous one may // interfere, write to the device-side memory and clobber the content of // local_unused_maps_dev_. if (!local_used_maps_reduced_) { local_used_work_->wait(); } local_used_maps_reduced_ = false; } } void Reducer::runGradCallbackForVariable( torch::autograd::Variable& variable, GradCallback&& cb) { auto context_ptr = rpc_context_.context_ptr.load(); if (context_ptr == nullptr) { cb(variable.mutable_grad()); } else { // Under distributed autograd context_ptr->runGradCallbackForVariable(variable, std::move(cb)); } } void Reducer::RpcContext::set(ContextPtr&& new_context_ptr) { // We should set 'new_context_ptr' even if it's nullptr. That means the // reducer is under a local backward run. const auto new_context_raw_ptr = new_context_ptr.get(); if (context_ptr.exchange(new_context_raw_ptr) != new_context_raw_ptr) { // Set the shared ptr to the context only if it's set first time. // All call sites should use the same context ptr. // Use an atomic to avoid data race from multiple threads. context_ptr_holder = std::move(new_context_ptr); } } void Reducer::sync_bucket_indices( std::vector<std::vector<size_t>>& bucket_indices) { auto num_buckets = bucket_indices.size(); std::vector<size_t> bucket_sizes; bucket_sizes.reserve(num_buckets); int64_t total_size = 0; for (size_t i = 0; i < num_buckets; i++) { auto bucket_size = bucket_indices.at(i).size(); bucket_sizes.push_back(bucket_size); total_size += bucket_size; } at::TensorOptions options; options = options.dtype(at::kInt); options = options.device(replicas_[0][0].device()); // Group indices and num_bucket together into indices_tensor // Broadcast this tensor first, as its size is equal among all processes auto indices_tensor = at::empty({total_size + 1}, at::kInt); auto indices_accessor = indices_tensor.accessor<int, 1>(); auto indices_accessor_Index = 0; for (size_t i = 0; i < num_buckets; i++) { const auto& bucket_size = bucket_indices.at(i).size(); for (size_t j = 0; j < bucket_size; j++) { indices_accessor[indices_accessor_Index++] = bucket_indices[i][j]; } } indices_accessor[indices_accessor_Index] = num_buckets; // Copy CPU tensor to device tensor, as the process_group_ could be NCCL and // it can only broadcast device tensors. auto indices_tensor_device = at::empty({total_size + 1}, options); indices_tensor_device.copy_(indices_tensor, /*non_blocking=*/true); std::vector<at::Tensor> indices_tensor_list = {indices_tensor_device}; process_group_->broadcast(indices_tensor_list)->wait(); indices_tensor.copy_(indices_tensor_list.front(), /*non_blocking=*/false); // Update num_buckets after receiving it from rank 0 num_buckets = indices_accessor[indices_accessor_Index]; // Broadcast bucket_sizes auto bucket_sizes_tensor = at::empty({(int64_t)num_buckets}, at::kInt); auto bucket_sizes_accessor = bucket_sizes_tensor.accessor<int, 1>(); for (size_t i = 0; i < num_buckets; i++) { // For rank != 0, it is possible that local num buckets bucket_sizes.size() // is smaller than broadcasted num_buckets bucket_sizes_accessor[i] = bucket_sizes.at(std::min(i, (bucket_sizes.size() - 1))); } auto bucket_sizes_tensor_device = at::empty({(int64_t)num_buckets}, options); bucket_sizes_tensor_device.copy_(bucket_sizes_tensor, /*non_blocking=*/true); std::vector<at::Tensor> bucket_sizes_tensor_list = { bucket_sizes_tensor_device}; process_group_->broadcast(bucket_sizes_tensor_list)->wait(); bucket_sizes_tensor.copy_( bucket_sizes_tensor_list.front(), /*non_blocking=*/false); // Clear bucket_indices first, and then update bucket_indices using received // num_buckets, bucket_sizes_tensor and indices_tensor from rank 0 bucket_indices.clear(); bucket_indices.reserve(num_buckets); indices_accessor_Index = 0; for (size_t i = 0; i < num_buckets; i++) { const auto& bucket_size = bucket_sizes_accessor[i]; std::vector<size_t> bucket; bucket.reserve(bucket_size); for (size_t j = 0; j < bucket_size; j++) { bucket.push_back(indices_accessor[indices_accessor_Index++]); } bucket_indices.emplace_back(std::move(bucket)); } } std::vector<std::vector<size_t>> Reducer::rebuildBuckets() { TORCH_INTERNAL_ASSERT( rebuilt_params_.size() == rebuilt_param_indices_.size(), "rebuilt parameter tensors size is not same as rebuilt parameter indices size."); TORCH_INTERNAL_ASSERT( replicas_[0].size() == rebuilt_param_indices_.size(), "rebuilt parameter indices size is not same as original model parameters size."); std::vector<std::vector<size_t>> rebuilt_bucket_indices; std::vector<size_t> bucket_size_limits; bucket_size_limits.push_back(kDefaultFirstBucketBytes); bucket_size_limits.push_back(bucket_bytes_cap_); rebuilt_bucket_indices = compute_bucket_assignment_by_size( rebuilt_params_, bucket_size_limits, expect_sparse_gradients_[0], rebuilt_param_indices_); // For rebuilt bucket indices, it needs to be synced across all ranks. // Broadcast the newly rebuilt bucket indices from rank 0 in default. // After syncing up rebuilt bucket indices, initialize buckets for reducer. sync_bucket_indices(rebuilt_bucket_indices); has_rebuilt_bucket_ = true; rebuilt_params_.clear(); rebuilt_param_indices_.clear(); return rebuilt_bucket_indices; } // See Note [DDP Communication Hook] void Reducer::register_comm_hook(std::unique_ptr<CommHookInterface> iface) { TORCH_CHECK( comm_hook_ == nullptr, "register_comm_hook can only be called once."); // TODO(@sinannasir): Single process multiple device mode support for DDP // communication hook. Related to GH Issue #42542. TORCH_CHECK( replicas_.size() == 1, "Communication hook does not support single process multiple device mode."); comm_hook_ = std::move(iface); } namespace { // Tensors may be coalesced into buckets. Buckets must contain tensors of // the same type, on the same device, so a bucket can identified by a // composite key of a tensor's type identifier and its device. struct BucketKey { BucketKey(c10::ScalarType type, c10::Device device) : type(std::move(type)), device(std::move(device)) {} const c10::ScalarType type; const c10::Device device; // See torch/csrc/utils/hash.h for dispatch code. static size_t hash(const BucketKey& key) { return torch::get_hash(key.type, key.device); } }; inline bool operator==(const BucketKey& lhs, const BucketKey& rhs) { return lhs.type == rhs.type && lhs.device == rhs.device; } } // namespace // This is equivalent to take_tensors but returns indices into the // tensor list argument for bucket assignment. Also, it is aware // of device placement and will not allow buckets to span devices. // The index of tensors[i] assigned to bucket is tensor_indices[i], // when tensor_indices is empty, the index of tensors[i] assigned to // bucket is i. std::vector<std::vector<size_t>> compute_bucket_assignment_by_size( const std::vector<at::Tensor>& tensors, const std::vector<size_t>& bucket_size_limits, const std::vector<bool>& expect_sparse_gradient, const std::vector<int64_t>& tensor_indices) { // Either expect_sparse_gradient is not specified or it has as many elements // as the vector with tensors. TORCH_INTERNAL_ASSERT( expect_sparse_gradient.empty() || (tensors.size() == expect_sparse_gradient.size())); TORCH_INTERNAL_ASSERT(tensors.size() > 0); std::vector<std::vector<size_t>> result; result.reserve(tensors.size()); // Keep iterator into the size_limit vector by tensor type and device. // This is done so that we can use the consecutive bucket limits per type. std::unordered_map< BucketKey, std::vector<size_t>::const_iterator, torch::hash<BucketKey>> bucket_size_limit_iterators; // Local accumulator type for a single bucket. struct BucketAccumulator { std::vector<size_t> indices; size_t size = 0; }; // Keep vector of indices and size accumulator by tensor type and device. std::unordered_map<BucketKey, BucketAccumulator, torch::hash<BucketKey>> buckets; for (size_t i = 0; i < tensors.size(); i++) { const auto& tensor = tensors[i]; TORCH_CHECK(!tensor.is_sparse(), "No support for sparse tensors."); // when tensor_indices is empty, the index of tensors[i] assigned to // bucket is i, otherwise the tensor index is tensor_indices[i]. auto tensor_index = i; if (!tensor_indices.empty()) { tensor_index = tensor_indices[i]; } // If we expect a sparse gradient to be produced for this tensor, it cannot // be grouped together with other gradients and gets its own bucket. if (!expect_sparse_gradient.empty() && expect_sparse_gradient[tensor_index]) { result.push_back({tensor_index}); continue; } auto key = BucketKey(tensor.scalar_type(), tensor.device()); auto& bucket = buckets[key]; bucket.indices.push_back(tensor_index); bucket.size += tensor.numel() * tensor.element_size(); // Initialize bucket size limit iterator if necessary. if (bucket_size_limit_iterators.count(key) == 0) { bucket_size_limit_iterators[key] = bucket_size_limits.begin(); } auto& bucket_size_limit_iterator = bucket_size_limit_iterators[key]; const auto bucket_size_limit = *bucket_size_limit_iterator; if (bucket.size >= bucket_size_limit) { result.emplace_back(std::move(bucket.indices)); bucket = BucketAccumulator(); // Advance to the next bucket size limit for this type/device. auto next = bucket_size_limit_iterator + 1; if (next != bucket_size_limits.end()) { bucket_size_limit_iterator = next; } } } // Add remaining buckets. for (auto& it : buckets) { auto& bucket = it.second; if (!bucket.indices.empty()) { result.emplace_back(std::move(bucket.indices)); } } // If tensor_indices is not empty, the order of the tensors is in the gradient // ready order, so no need to sort. // If tensor_indices is empty, sort resulting buckets by the minimum tensor // index they include. We assume that the order of the tensors is the order in // which they are used (or the reverse order in which their gradients are // produced). This sorting step ensures that the buckets are ready in // consecutive order. if (tensor_indices.empty()) { std::sort( result.begin(), result.end(), [](const std::vector<size_t>& a, const std::vector<size_t>& b) { const auto amin = std::min_element(a.begin(), a.end()); const auto bmin = std::min_element(b.begin(), b.end()); return *amin < *bmin; }); } return result; } } // namespace c10d
// // Created by liqinbin on 12/11/20. // #define THRUST_IGNORE_DEPRECATED_CPP_DIALECT #include "thrust/reduce.h" #include "thrust/execution_policy.h" #include "FedTree/util/device_lambda.h" #include "FedTree/metric/pointwise_metric.h" float_type RMSE::get_score(const SyncArray<float_type> &y_p) const { CHECK_EQ(y_p.size(), y.size()); int n_instances = y_p.size(); SyncArray<float_type> sq_err(n_instances); auto sq_err_data = sq_err.host_data(); const float_type *y_data = y.host_data(); const float_type *y_predict_data = y_p.host_data(); #pragma omp parallel for for (int i = 0; i < n_instances; i++){ float_type e = y_predict_data[i] - y_data[i]; sq_err_data[i] = e * e; } float_type rmse = sqrtf(thrust::reduce(thrust::host, sq_err.host_data(), sq_err.host_end()) / n_instances); return rmse; }
/** \copyright * Copyright (c) 2014, Balazs Racz * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * \file Packet.hxx * * Defines a DCC Packet structure. * * @author Balazs Racz * @date 10 May 2014 */ #ifndef _DCC_PACKET_HXX_ #define _DCC_PACKET_HXX_ #include <stdint.h> #include <string.h> #include "dcc/Address.hxx" #include "dcc/packet.h" namespace dcc { /** Represents a command to be sent to the track driver. Most of the commands * are "send packet X", but there are some that are controlling the track * booster itself, such as "power off". */ struct Packet : public DCCPacket { /** Maximum number of payload bytes. */ static const unsigned MAX_PAYLOAD = DCC_PACKET_MAX_PAYLOAD; /** Send this speed step to emergency-stop the locomotive. */ static const unsigned EMERGENCY_STOP = DCC_PACKET_EMERGENCY_STOP; /** Send this speed step to switch direction of the locomotive. Only used * for marklin-14-step speed commands. */ static const unsigned CHANGE_DIR = DCC_PACKET_EMERGENCY_STOP; /** Used for page-preset packets. */ static const unsigned PAGE_REGISTER_ID = 0b101; Packet() { clear(); } struct DCC_IDLE {}; /// Constructor generating a DCC IDLE packet. @param i indicator type. Packet(DCC_IDLE i) { clear(); set_dcc_idle(); } /// Resets the packet ot empty. void clear() { memset((DCCPacket*)this, 0, sizeof(*this)); } /** @return true if this is a packet, false if it is a command to the * track processor. */ bool IsPacket() { return packet_header.is_pkt; } /// Sets the packet to a standalone command. @param cmd is the standalone /// command to send. void set_cmd(uint8_t cmd) { dlc = 0; HASSERT(cmd & 1); header_raw_data = cmd; } /** Initializes the packet structure for a regular DCC packet. */ void start_dcc_packet() { header_raw_data = 0; dlc = 0; } /** Initializes the packet structure for a regular DCC packet. */ void start_dcc_svc_packet() { header_raw_data = 0; dlc = 0; packet_header.send_long_preamble = true; } /// Adds the header to the packet needed for addressing a DCC /// locomotive. @param address is the DCC (short) address. void add_dcc_address(DccShortAddress address); /// Adds the header to the packet needed for addressing a DCC /// locomotive. @param address is the DCC (long) address. void add_dcc_address(DccLongAddress address); /** Adds a speed-and-direction command (dcc baseline command) ot the * packet. Speed is maximum 14. This should be called after * add_dcc_address. */ /// @param is_fwd true for forward speed /// @param light true for light on, false for off /// @param speed the speed step to set (0..14) void add_dcc_speed14(bool is_fwd, bool light, unsigned speed); /** Adds a speed-and-direction command (dcc baseline command) ot the * packet. Speed is maximum 14. This should be called after * add_dcc_address. */ /// @param a the DCC address /// @param is_fwd true for forward speed /// @param light true for light on, false for off /// @param speed the speed step to set (0..14) template <class A> void set_dcc_speed14(A a, bool is_fwd, bool light, unsigned speed) { add_dcc_address(a); add_dcc_speed14(is_fwd, light, speed); } /** Adds a speed-and-direction command (dcc baseline command) to the * packet. Speed is maximum 28. This should be called after * add_dcc_address. */ /// @param is_fwd true for forward speed /// @param speed the speed step to set (0..14) void add_dcc_speed28(bool is_fwd, unsigned speed); /// @param a the DCC address /// @param is_fwd true for forward speed /// @param speed the speed step to set (0..14) template <class A> void set_dcc_speed28(A a, bool is_fwd, unsigned speed) { add_dcc_address(a); add_dcc_speed28(is_fwd, speed); } /** Adds a speed-and-direction command (dcc extended command) for 128 speed * steps to the packet. Speed is maximum 126. This shoudl be called after * add_dcc_address. */ /// @param is_fwd true for forward speed /// @param speed the speed step to set (0..14) void add_dcc_speed128(bool is_fwd, unsigned speed); /// @param a the DCC address /// @param is_fwd true for forward speed /// @param speed the speed step to set (0..14) template <class A> void set_dcc_speed128(A a, bool is_fwd, unsigned speed) { add_dcc_address(a); add_dcc_speed128(is_fwd, speed); } /** Adds a DCC function group command to the packet. The lowest numbered * function is always at bit zero. @param values are bitmask of functions * to send to the loco. */ void add_dcc_function0_4(unsigned values); /** Adds a DCC function group command to the packet. The lowest numbered * function is always at bit zero. @param values are bitmask of functions * to send to the loco. */ void add_dcc_function5_8(unsigned values); /** Adds a DCC function group command to the packet. The lowest numbered * function is always at bit zero. @param values are bitmask of functions * to send to the loco. */ void add_dcc_function9_12(unsigned values); /** Adds a DCC function group command to the packet. The lowest numbered * function is always at bit zero. @param values are bitmask of functions * to send to the loco. */ void add_dcc_function13_20(unsigned values) { add_dcc_function_hi(13, values); } /** Adds a DCC function group command to the packet. The lowest numbered * function is always at bit zero. @param values are bitmask of functions * to send to the loco. */ void add_dcc_function21_28(unsigned values) { add_dcc_function_hi(21, values); } /** Adds a DCC function group command to the packet. The lowest numbered * function is always at bit zero. * @param base is a valid function number base, 13, 21, 29, 37, 45, 53 * or 61. * @param values are bitmask of functions to send to the loco. */ void add_dcc_function_hi(uint8_t base, uint8_t values); /** Adds a DCC binary state control command to the packet. Automatically * picks the short or long form, depending on the range of the argument. * @param fn is a binary function variable, 0 to 32767. * @param value true/false, what to set to. */ void add_dcc_binary_state(uint16_t fn, bool value); /** Adds a DCC analog function control command to the packet. * @param fn is an analog function variable, 0 to 255. * @param value to set it to, 0 to 255. */ void add_dcc_analog_function(uint8_t fn, uint8_t value); /** Helper function for adding programming mode packets. */ void add_dcc_prog_command( uint8_t cmd_hi, unsigned cv_number, uint8_t value); /** Adds a DCC POM read single CV command and the xor byte. This should be * called after add_dcc_address. @param cv_number which CV to read. */ void add_dcc_pom_read1(unsigned cv_number); /** Adds a DCC POM write single CV command and the xor byte. This should be * called after add_dcc_address. * @param cv_number which CV to write - 1, * @param value is the value to set it to. */ void add_dcc_pom_write1(unsigned cv_number, uint8_t value); /** Sets the packet to a DCC service mode packet verifying the contents of * an entire CV. This function does not need a DCC address. (Includes the * checksum.) * @param cv_number which CV to test - 1, * @param value is the value to test. */ void set_dcc_svc_verify_byte(unsigned cv_number, uint8_t value); /** Sets the packet to a DCC service mode packet writing the contents of * an entire CV. This function does not need a DCC address. (Includes the * checksum.) * @param cv_number which CV to write - 1, * @param value is the value to write. */ void set_dcc_svc_write_byte(unsigned cv_number, uint8_t value); /** Sets the packet to a DCC service mode packet verifying the contents of * a single bit in a CV. This function does not need a DCC address. * (Includes the checksum.) * @param cv_number which CV to test - 1, * @param bit is 0..7 to set which bit to test * @param desired is true if bit==1 should be tested */ void set_dcc_svc_verify_bit( unsigned cv_number, unsigned bit, bool expected); /** Sets the packet to a DCC service mode packet verifying the contents of * a single bit in a CV. This function does not need a DCC address. * (Includes the checksum.) * @param cv_number which CV to edit - 1, * @param bit is 0..7 to define which bit to edit * @param desired is true if bit:=1 should be written */ void set_dcc_svc_write_bit(unsigned cv_number, unsigned bit, bool desired); /** Sets the packet to a DCC service mode packet in Paged Mode, setting the * page register. This function does not need a DCC address. Includes the * checksum. * @param page Page to set, 1 is the default page, zero is reserved, 255 * max. */ void set_dcc_svc_paged_set_page(unsigned page = 1) { set_dcc_svc_paged_write_reg(PAGE_REGISTER_ID, page); } /** Sets the packet to a DCC service mode packet in Paged Mode, setting any * register. This function does not need a DCC address. Includes the * checksum. * @param reg register, 0 to 7. On the default page register 0 is CV1 * (address). * @param value Payload to write to that register. 0 to 255. */ void set_dcc_svc_paged_write_reg(uint8_t reg, uint8_t value); /** Sets the packet to a DCC service mode packet in Paged Mode, setting the * page register. This function does not need a DCC address. Includes the * checksum. * @param reg register, 0 to 7. On the default page register 0 is CV1 * (address). * @param value Payload to check on that register. */ void set_dcc_svc_paged_verify_reg(uint8_t reg, uint8_t value); /** Adds a DCC basic accessory decoder command packet and the checksum * byte. * @param address is the unencoded 12-bit address, containing both the A * bits and the D bits in the DCC standard, in the range of 0..4095. The * values of 4088-4095 are broadcast addresses. * @param is_activate is true for setting bit C to one, false for setting * bit C to zero. Usually commandstations set is_activate to one and use * the lowest bit of the address to decide whether the turnouts should be * closed or thrown. */ void add_dcc_basic_accessory(unsigned address, bool is_activate); /** Appends one byte to the packet payload that represents the XOR checksum * for DCC. */ void add_dcc_checksum(); /** Creates a DCC idle packet. (Includes the checksum.) */ void set_dcc_idle(); /** Creates a DCC reset-all-decoders packet. (Includes the checksum.) */ void set_dcc_reset_all_decoders(); /** Sets the packet type to marklin-motorola. Initilizes the packet with 18 * zero bits. */ void start_mm_packet(); /** Sets the address and F0 bits of an MM packet to a specific loco * address. */ void add_mm_address(MMAddress address, bool light); /** Sets the packet to a 14-step MM speed-and-light packet. Max value of * speed is 14. A special value of speed == CHANGE_DIR will signal * direction change. */ void add_mm_speed(unsigned speed); /** Sets the packet to a direction-aware 14-step MM speed-and-light * packet. Max value of speed is 14. */ /// @param is_fwd true for forward speed /// @param speed the speed step to set (0..14) void add_mm_new_speed(bool is_fwd, unsigned speed); /** Creates a speed-and-fn packet for the new MM format. * @param fn_num is the function, valid values = 1..4 * @param value is whether the funciton is on or off * @param speed is the speed step (0..14). If it is set to emergency stop, * then no function packet will be generated. */ void add_mm_new_fn(unsigned fn_num, bool value, unsigned speed); /** Shifts a MM packet to the second half of the packet buffer. After this * call another add_mm_speed call is valid, which will fill in the first * half of the double packet. */ void mm_shift(); private: /** Sets the speed bits of an MM packet. Clips speed to 15, avoids speed==1 * and returns the final speed step number. @param speed is the raw speed * step value (0..14 but more are accepted). @return the final speed step * number */ unsigned set_mm_speed_bits(unsigned speed); }; } // namespace dcc #endif // _DCC_PACKET_HXX_
//===-- checksum.cc ---------------------------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "checksum.h" #include "atomic_helpers.h" #if defined(__x86_64__) || defined(__i386__) #include <cpuid.h> #elif defined(__arm__) || defined(__aarch64__) #if SCUDO_FUCHSIA #include <zircon/features.h> #include <zircon/syscalls.h> #else #include <sys/auxv.h> #endif #endif namespace scudo { atomic_u8 HashAlgorithm = {BSDChecksum}; #if defined(__x86_64__) || defined(__i386__) // i386 and x86_64 specific code to detect CRC32 hardware support via CPUID. // CRC32 requires the SSE 4.2 instruction set. #ifndef bit_SSE4_2 #define bit_SSE4_2 bit_SSE42 // clang and gcc have different defines. #endif bool hasHardwareCRC32() { u32 Eax, Ebx = 0, Ecx = 0, Edx = 0; __get_cpuid(0, &Eax, &Ebx, &Ecx, &Edx); const bool IsIntel = (Ebx == signature_INTEL_ebx) && (Edx == signature_INTEL_edx) && (Ecx == signature_INTEL_ecx); const bool IsAMD = (Ebx == signature_AMD_ebx) && (Edx == signature_AMD_edx) && (Ecx == signature_AMD_ecx); if (!IsIntel && !IsAMD) return false; __get_cpuid(1, &Eax, &Ebx, &Ecx, &Edx); return !!(Ecx & bit_SSE4_2); } #elif defined(__arm__) || defined(__aarch64__) #ifndef AT_HWCAP #define AT_HWCAP 16 #endif #ifndef HWCAP_CRC32 #define HWCAP_CRC32 (1U << 7) // HWCAP_CRC32 is missing on older platforms. #endif bool hasHardwareCRC32() { #if SCUDO_FUCHSIA u32 HWCap; const zx_status_t Status = zx_system_get_features(ZX_FEATURE_KIND_CPU, &HWCap); if (Status != ZX_OK) return false; return !!(HWCap & ZX_ARM64_FEATURE_ISA_CRC32); #else return !!(getauxval(AT_HWCAP) & HWCAP_CRC32); #endif // SCUDO_FUCHSIA } #endif // defined(__x86_64__) || defined(__i386__) } // namespace scudo
#pragma once #include <string> #include <chrono> #include <ostream> namespace cpb { /** High resolution timer (below 1 microsecond accuracy). */ class Chrono { public: Chrono() { tic(); }; void tic() { tic_time = std::chrono::high_resolution_clock::now(); } Chrono& toc() { elapsed = std::chrono::high_resolution_clock::now() - tic_time; return *this; } Chrono& toc_accumulate() { elapsed += std::chrono::high_resolution_clock::now() - tic_time; return *this; } template<class Fn> Chrono& timeit(Fn lambda) { tic(); lambda(); toc(); return *this; } double elapsed_seconds() const { return 1e-9 * static_cast<double>(elapsed.count()); } std::string str() const; Chrono& print(std::string msg = ""); friend std::ostream& operator<<(std::ostream& os, Chrono const& chrono) { os << chrono.str(); return os; } private: std::chrono::time_point<std::chrono::high_resolution_clock> tic_time; std::chrono::nanoseconds elapsed{0}; }; } // namespace cpb
/*========================================================================= Program: Visualization Toolkit Module: vtkWin32OpenGLRenderWindow.cxx Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen All rights reserved. See Copyright.txt or http://www.kitware.com/Copyright.htm for details. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the above copyright notice for more information. =========================================================================*/ #include "vtkWin32OpenGLRenderWindow.h" #include "vtkIdList.h" #include "vtkCommand.h" #include "vtkObjectFactory.h" #include "vtkOpenGLRenderer.h" #include "vtkOpenGLRenderWindow.h" #include "vtkOpenGLError.h" #include "vtkRendererCollection.h" #include "vtkWin32RenderWindowInteractor.h" #include <math.h> #include <sstream> #include "vtkOpenGLError.h" vtkStandardNewMacro(vtkWin32OpenGLRenderWindow); vtkWin32OpenGLRenderWindow::vtkWin32OpenGLRenderWindow() { this->ApplicationInstance = NULL; this->Palette = NULL; this->ContextId = 0; this->WindowId = 0; this->ParentId = 0; this->NextWindowId = 0; this->DeviceContext = (HDC)0; // hsr this->MFChandledWindow = FALSE; // hsr this->StereoType = VTK_STEREO_CRYSTAL_EYES; this->CursorHidden = 0; this->Capabilities = 0; this->ScreenDeviceContext = (HDC)0; this->MemoryHdc = (HDC)0; this->CreatingOffScreenWindow = 0; this->WindowIdReferenceCount = 0; this->ScreenMapped = this->Mapped; this->ScreenWindowSize[0] = this->Size[0]; this->ScreenWindowSize[1] = this->Size[1]; this->ScreenDeviceContext = this->DeviceContext; this->ScreenDoubleBuffer = this->DoubleBuffer; this->ScreenContextId = this->ContextId; } vtkWin32OpenGLRenderWindow::~vtkWin32OpenGLRenderWindow() { this->Finalize(); vtkRenderer *ren; vtkCollectionSimpleIterator rit; this->Renderers->InitTraversal(rit); while ( (ren = this->Renderers->GetNextRenderer(rit)) ) { ren->SetRenderWindow(NULL); } delete[] this->Capabilities; } void vtkWin32OpenGLRenderWindow::Clean() { /* finish OpenGL rendering */ if (this->OwnContext && this->ContextId) { this->MakeCurrent(); this->CleanUpRenderers(); // Note: wglMakeCurrent(NULL,NULL) is valid according to the documentation // and works with nVidia and ATI but not with Intel. Passing an existing // device context works in any case. // see VTK Bug 7119. if(wglMakeCurrent(this->DeviceContext,NULL)!=TRUE) { vtkErrorMacro("wglMakeCurrent failed in Clean(), error: " << GetLastError()); } if (wglDeleteContext(this->ContextId) != TRUE) { vtkErrorMacro("wglDeleteContext failed in Clean(), error: " << GetLastError()); } } this->ContextId = NULL; if (this->Palette) { SelectPalette(this->DeviceContext, this->OldPalette, FALSE); // SVA delete the old palette DeleteObject(this->Palette); this->Palette = NULL; } } void vtkWin32OpenGLRenderWindow::CleanUpRenderers() { // tell each of the renderers that this render window/graphics context // is being removed (the RendererCollection is removed by vtkRenderWindow's // destructor) vtkRenderer *ren; vtkCollectionSimpleIterator rsit; for (this->Renderers->InitTraversal(rsit); (ren = this->Renderers->GetNextRenderer(rsit));) { ren->SetRenderWindow(NULL); } this->ReleaseGraphicsResources(); } LRESULT APIENTRY vtkWin32OpenGLRenderWindow::WndProc(HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam) { LRESULT res; vtkWin32OpenGLRenderWindow *me = (vtkWin32OpenGLRenderWindow *)vtkGetWindowLong(hWnd,sizeof(vtkLONG)); if (me && me->GetReferenceCount()>0) { me->Register(me); res = me->MessageProc(hWnd, message, wParam, lParam); me->UnRegister(me); } else { res = DefWindowProc(hWnd, message, wParam, lParam); } return res; } void vtkWin32OpenGLRenderWindow::SetWindowName( const char * _arg ) { vtkWindow::SetWindowName(_arg); if (this->WindowId) { #ifdef UNICODE wchar_t *wname = new wchar_t [mbstowcs(NULL, this->WindowName, 32000)+1]; mbstowcs(wname, this->WindowName, 32000); SetWindowText(this->WindowId, wname); delete [] wname; #else SetWindowText(this->WindowId, this->WindowName); #endif } } int vtkWin32OpenGLRenderWindow::GetEventPending() { MSG msg; if (PeekMessage(&msg,this->WindowId,WM_MOUSEFIRST,WM_MOUSELAST,PM_NOREMOVE)) { if (msg.message == WM_MOUSEMOVE) { PeekMessage(&msg,this->WindowId,WM_MOUSEFIRST,WM_MOUSELAST,PM_REMOVE); } if ((msg.message == WM_LBUTTONDOWN) || (msg.message == WM_RBUTTONDOWN) || (msg.message == WM_MBUTTONDOWN)) { return 1; } } return 0; } // ---------------------------------------------------------------------------- bool vtkWin32OpenGLRenderWindow::InitializeFromCurrentContext() { HGLRC currentContext = wglGetCurrentContext(); if (currentContext != NULL) { this->SetWindowId(WindowFromDC(wglGetCurrentDC())); this->SetDeviceContext(wglGetCurrentDC()); this->SetContextId(currentContext); this->OpenGLInit(); this->OwnContext = 0; return true; } return false; } // ---------------------------------------------------------------------------- void vtkWin32OpenGLRenderWindow::MakeCurrent() { // Try to avoid doing anything (for performance). HGLRC current = wglGetCurrentContext(); if (this->ContextId != current) { if(this->IsPicking && current) { vtkErrorMacro("Attempting to call MakeCurrent for a different window" " than the one doing the picking, this can causes crashes" " and/or bad pick results"); } else { if (wglMakeCurrent(this->DeviceContext, this->ContextId) != TRUE) { LPVOID lpMsgBuf; ::FormatMessage( FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, GetLastError(), MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), // Default language (LPTSTR) &lpMsgBuf, 0, NULL ); if(lpMsgBuf) { #ifdef UNICODE wchar_t *wmsg = new wchar_t [mbstowcs(NULL, (const char*)lpMsgBuf, 32000)+1]; wchar_t *wtemp = new wchar_t [mbstowcs(NULL, "wglMakeCurrent failed in MakeCurrent(), error: ", 32000)+1]; mbstowcs(wmsg, (const char*)lpMsgBuf, 32000); mbstowcs(wtemp, "wglMakeCurrent failed in MakeCurrent(), error: ", 32000); vtkErrorMacro(<< wcscat(wtemp, wmsg)); delete [] wmsg; delete [] wtemp; #else vtkErrorMacro("wglMakeCurrent failed in MakeCurrent(), error: " << (LPCTSTR)lpMsgBuf); #endif ::LocalFree( lpMsgBuf ); } } } } } // ---------------------------------------------------------------------------- // Description: // Tells if this window is the current OpenGL context for the calling thread. bool vtkWin32OpenGLRenderWindow::IsCurrent() { return this->ContextId!=0 && this->ContextId==wglGetCurrentContext(); } // ---------------------------------------------------------------------------- void AdjustWindowRectForBorders(const int borders, const int x, const int y, const int width, const int height, RECT &r) { DWORD style = WS_CLIPCHILDREN /*| WS_CLIPSIBLINGS*/; if (borders) { style |= WS_OVERLAPPEDWINDOW; } else { style |= WS_POPUP; } r.left = x; r.top = y; r.right = r.left + width; r.bottom = r.top + height; BOOL result = AdjustWindowRect(&r, style, FALSE); if (!result) { vtkGenericWarningMacro("AdjustWindowRect failed, error: " << GetLastError()); } } // ---------------------------------------------------------------------------- void vtkWin32OpenGLRenderWindow::SetSize(int x, int y) { static int resizing = 0; if ((this->Size[0] != x) || (this->Size[1] != y)) { this->Modified(); this->Size[0] = x; this->Size[1] = y; if (this->Interactor) { this->Interactor->SetSize(x, y); } if (this->OffScreenRendering) { if(!this->CreatingOffScreenWindow) { if (!resizing) { resizing = 1; this->CleanUpOffScreenRendering(); this->CreateOffScreenWindow(x,y); resizing = 0; } } } else if (this->Mapped) { if (!resizing) { resizing = 1; if (this->ParentId) { SetWindowExtEx(this->DeviceContext, x, y, NULL); SetViewportExtEx(this->DeviceContext, x, y, NULL); SetWindowPos(this->WindowId, HWND_TOP, 0, 0, x, y, SWP_NOMOVE | SWP_NOZORDER); } else { RECT r; AdjustWindowRectForBorders(this->Borders, 0, 0, x, y, r); SetWindowPos(this->WindowId, HWND_TOP, 0, 0, r.right - r.left, r.bottom - r.top, SWP_NOMOVE | SWP_NOZORDER); } resizing = 0; } } } } void vtkWin32OpenGLRenderWindow::SetPosition(int x, int y) { static int resizing = 0; if ((this->Position[0] != x) || (this->Position[1] != y)) { this->Modified(); this->Position[0] = x; this->Position[1] = y; if (this->Mapped) { if (!resizing) { resizing = 1; SetWindowPos(this->WindowId,HWND_TOP,x,y, 0, 0, SWP_NOSIZE | SWP_NOZORDER); resizing = 0; } } } } // End the rendering process and display the image. void vtkWin32OpenGLRenderWindow::Frame(void) { this->MakeCurrent(); if (!this->AbortRender && this->DoubleBuffer && this->SwapBuffers) { // If this check is not enforced, we crash in offscreen rendering if (this->DeviceContext) { // use global scope to get Win32 API SwapBuffers and not be // confused with this->SwapBuffers ::SwapBuffers(this->DeviceContext); vtkDebugMacro(<< " SwapBuffers\n"); } } else { glFlush(); } } int vtkWin32OpenGLRenderWindow::SupportsOpenGL() { MakeCurrent(); if (!this->DeviceContext) { return 0; } int pixelFormat = GetPixelFormat(this->DeviceContext); PIXELFORMATDESCRIPTOR pfd; DescribePixelFormat(this->DeviceContext, pixelFormat, sizeof(PIXELFORMATDESCRIPTOR), &pfd); return (pfd.dwFlags & PFD_SUPPORT_OPENGL) ? 1:0; } int vtkWin32OpenGLRenderWindow::IsDirect() { MakeCurrent(); if (!this->DeviceContext) { return 0; } int pixelFormat = GetPixelFormat(this->DeviceContext); PIXELFORMATDESCRIPTOR pfd; DescribePixelFormat(this->DeviceContext, pixelFormat, sizeof(PIXELFORMATDESCRIPTOR), &pfd); return (pfd.dwFlags & PFD_GENERIC_FORMAT) ? 0:1; } const char* vtkWin32OpenGLRenderWindow::ReportCapabilities() { MakeCurrent(); if (!this->DeviceContext) { return "no device context"; } int pixelFormat = GetPixelFormat(this->DeviceContext); PIXELFORMATDESCRIPTOR pfd; DescribePixelFormat(this->DeviceContext, pixelFormat, sizeof(PIXELFORMATDESCRIPTOR), &pfd); const char *glVendor = (const char *) glGetString(GL_VENDOR); const char *glRenderer = (const char *) glGetString(GL_RENDERER); const char *glVersion = (const char *) glGetString(GL_VERSION); std::ostringstream strm; if(glVendor) strm << "OpenGL vendor string: " << glVendor << endl; if(glRenderer) strm << "OpenGL renderer string: " << glRenderer << endl; if(glVersion) strm << "OpenGL version string: " << glVersion << endl; strm << "OpenGL extensions: " << endl; GLint n, i; glGetIntegerv(GL_NUM_EXTENSIONS, &n); for (i = 0; i < n; i++) { const char *ext = (const char *)glGetStringi(GL_EXTENSIONS, i); strm << " " << ext << endl; } strm << "PixelFormat Descriptor:" << endl; strm << "depth: " << static_cast<int>(pfd.cDepthBits) << endl; if (pfd.cColorBits <= 8) { strm << "class: PseudoColor" << endl; } else { strm << "class: TrueColor" << endl; } strm << "buffer size: " << static_cast<int>(pfd.cColorBits) << endl; strm << "level: " << static_cast<int>(pfd.bReserved) << endl; if (pfd.iPixelType == PFD_TYPE_RGBA) { strm << "renderType: rgba" << endl; } else { strm <<"renderType: ci" << endl; } if (pfd.dwFlags & PFD_DOUBLEBUFFER) { strm << "double buffer: True" << endl; } else { strm << "double buffer: False" << endl; } if (pfd.dwFlags & PFD_STEREO) { strm << "stereo: True" << endl; } else { strm << "stereo: False" << endl; } if (pfd.dwFlags & PFD_GENERIC_FORMAT) { strm << "hardware acceleration: False" << endl; } else { strm << "hardware acceleration: True" << endl; } strm << "rgba: redSize=" << static_cast<int>(pfd.cRedBits) << " greenSize=" << static_cast<int>(pfd.cGreenBits) << "blueSize=" << static_cast<int>(pfd.cBlueBits) << "alphaSize=" << static_cast<int>(pfd.cAlphaBits) << endl; strm << "aux buffers: " << static_cast<int>(pfd.cAuxBuffers)<< endl; strm << "depth size: " << static_cast<int>(pfd.cDepthBits) << endl; strm << "stencil size: " << static_cast<int>(pfd.cStencilBits) << endl; strm << "accum: redSize=" << static_cast<int>(pfd.cAccumRedBits) << " greenSize=" << static_cast<int>(pfd.cAccumGreenBits) << "blueSize=" << static_cast<int>(pfd.cAccumBlueBits) << "alphaSize=" << static_cast<int>(pfd.cAccumAlphaBits) << endl; delete[] this->Capabilities; size_t len = strm.str().length() + 1; this->Capabilities = new char[len]; strncpy(this->Capabilities, strm.str().c_str(), len); return this->Capabilities; } typedef bool (APIENTRY *wglChoosePixelFormatARBType)(HDC, const int*, const float*, unsigned int, int*, unsigned int*); void vtkWin32OpenGLRenderWindow::SetupPixelFormatPaletteAndContext( HDC hDC, DWORD dwFlags, int debug, int bpp, int zbpp) { // Create a dummy window, needed for calling wglGetProcAddress. #ifdef UNICODE HWND tempId = CreateWindow(L"vtkOpenGL", 0, 0, 0, 0, 1, 1, 0, 0, this->ApplicationInstance, 0); #else HWND tempId = CreateWindow("vtkOpenGL", 0, 0, 0, 0, 1, 1, 0, 0, this->ApplicationInstance, 0); #endif HDC tempDC = GetDC(tempId); PIXELFORMATDESCRIPTOR tempPfd; memset(&tempPfd, 0, sizeof(PIXELFORMATDESCRIPTOR)); tempPfd.nSize = sizeof(PIXELFORMATDESCRIPTOR); tempPfd.nVersion = 1; tempPfd.dwFlags = PFD_SUPPORT_OPENGL | PFD_DRAW_TO_WINDOW; tempPfd.iPixelType = PFD_TYPE_RGBA; int tempPixelFormat = ChoosePixelFormat(tempDC, &tempPfd); SetPixelFormat(tempDC, tempPixelFormat, &tempPfd); HGLRC tempContext = wglCreateContext(tempDC); wglMakeCurrent(tempDC, tempContext); // make sure glew is initialized with fake window this->OpenGLInit(); // First we try to use the newer wglChoosePixelFormatARB which enables // features like multisamples. PIXELFORMATDESCRIPTOR pfd; int pixelFormat = 0; wglChoosePixelFormatARBType wglChoosePixelFormatARB = reinterpret_cast<wglChoosePixelFormatARBType>(wglGetProcAddress("wglChoosePixelFormatARB")); if ((dwFlags & PFD_DRAW_TO_WINDOW) && wglChoosePixelFormatARB) { int attrib[] = { WGL_ACCELERATION_ARB, WGL_FULL_ACCELERATION_ARB, WGL_SUPPORT_OPENGL_ARB, TRUE, WGL_DRAW_TO_WINDOW_ARB, TRUE, WGL_DOUBLE_BUFFER_ARB, TRUE, WGL_COLOR_BITS_ARB, bpp/4*3, WGL_DEPTH_BITS_ARB, zbpp/4*3, WGL_PIXEL_TYPE_ARB, WGL_TYPE_RGBA_ARB, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; unsigned int n = 14; if (this->AlphaBitPlanes) { attrib[n] = WGL_ALPHA_BITS_ARB; attrib[n+1] = bpp/4; n += 2; } if (this->StencilCapable) { attrib[n] = WGL_STENCIL_BITS_ARB; attrib[n+1] = 8; n += 2; } if (dwFlags & PFD_STEREO) { attrib[n] = WGL_STEREO_ARB; attrib[n+1] = TRUE; n += 2; } unsigned int multiSampleAttributeIndex = 0; if (this->MultiSamples > 1 && wglewIsSupported("WGL_ARB_multisample")) { attrib[n] = WGL_SAMPLE_BUFFERS_ARB; attrib[n+1] = 1; attrib[n+2] = WGL_SAMPLES_ARB; attrib[n+3] = this->MultiSamples; multiSampleAttributeIndex = n+3; n += 4; } unsigned int numFormats; if (!wglChoosePixelFormatARB(hDC, attrib, 0, 1, &pixelFormat, &numFormats)) { // If the requested number of multisamples does not work, try // scaling down the number of multisamples a few times. if (multiSampleAttributeIndex) { attrib[multiSampleAttributeIndex] /= 2; if (!wglChoosePixelFormatARB(hDC, attrib, 0, 1, &pixelFormat, &numFormats)) { attrib[multiSampleAttributeIndex] /= 2; wglChoosePixelFormatARB(hDC, attrib, 0, 1, &pixelFormat, &numFormats); } } } DescribePixelFormat(hDC, pixelFormat, sizeof(pfd), &pfd); if (!SetPixelFormat(hDC, pixelFormat, &pfd)) { pixelFormat = 0; } else { if (debug && (dwFlags & PFD_STEREO) && !(pfd.dwFlags & PFD_STEREO)) { vtkGenericWarningMacro("No Stereo Available!"); this->StereoCapableWindow = 0; } } } // If we got a valid pixel format in the process, we are done. // Otherwise, we use the old approach of using ChoosePixelFormat. if (pixelFormat) { this->SetupPalette(hDC); // create a context #define USE_32_CONTEXT #ifdef USE_32_CONTEXT PFNWGLCREATECONTEXTATTRIBSARBPROC wglCreateContextAttribsARB = reinterpret_cast<PFNWGLCREATECONTEXTATTRIBSARBPROC>(wglGetProcAddress("wglCreateContextAttribsARB")); if (wglCreateContextAttribsARB) { int iContextAttribs[] = { WGL_CONTEXT_MAJOR_VERSION_ARB, 3, WGL_CONTEXT_MINOR_VERSION_ARB, 2, WGL_CONTEXT_FLAGS_ARB, 0, // WGL_CONTEXT_PROFILE_MASK_ARB, // WGL_CONTEXT_COMPATIBILITY_PROFILE_BIT_ARB, // WGL_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB, 0 // End of attributes list }; this->ContextId = wglCreateContextAttribsARB(hDC, 0, iContextAttribs); } if (this->ContextId) { this->SetContextSupportsOpenGL32(true); } else #endif { this->ContextId = wglCreateContext(hDC); } if (this->ContextId == NULL) { vtkErrorMacro("wglCreateContext failed in CreateAWindow(), error: " << GetLastError()); } } // Delete the dummy window wglMakeCurrent(tempDC, 0); wglDeleteContext(tempContext); ReleaseDC(tempId, tempDC); ::DestroyWindow(tempId); // windows api // If we got a valid pixel format in the process, we are done. // Otherwise, we use the old approach of using ChoosePixelFormat. if (pixelFormat) { return; } BYTE bpp_byte = static_cast<BYTE>(bpp); BYTE zbpp_byte = static_cast<BYTE>(zbpp); PIXELFORMATDESCRIPTOR pfd2 = { sizeof(PIXELFORMATDESCRIPTOR), /* size */ 1, /* version */ dwFlags, /* support double-buffering */ PFD_TYPE_RGBA, /* color type */ bpp_byte, /* preferred color depth */ 0, 0, 0, 0, 0, 0, /* color bits (ignored) */ static_cast<BYTE>(this->AlphaBitPlanes ? bpp/4 : 0), /* no alpha buffer */ 0, /* alpha bits (ignored) */ 0, /* no accumulation buffer */ 0, 0, 0, 0, /* accum bits (ignored) */ zbpp_byte, /* depth buffer */ static_cast<BYTE>(this->StencilCapable), /* stencil buffer */ 0, /* no auxiliary buffers */ PFD_MAIN_PLANE, /* main layer */ 0, /* reserved */ 0, 0, 0, /* no layer, visible, damage masks */ }; // Only try to set pixel format if we do not currently have one int currentPixelFormat = GetPixelFormat(hDC); // if there is a current pixel format, then make sure it // supports OpenGL if (currentPixelFormat != 0) { DescribePixelFormat(hDC, currentPixelFormat,sizeof(pfd2), &pfd2); if (!(pfd2.dwFlags & PFD_SUPPORT_OPENGL)) { vtkDebugWithObjectMacro(this,"DescribePixelFormat failed; either invalid format or illegal multiple Win32 OpenGL initialize calls."); if (this->HasObserver(vtkCommand::ExitEvent)) { this->InvokeEvent(vtkCommand::ExitEvent, NULL); return; } else { return; } } } else { // hDC has no current PixelFormat, so pixelFormat = ChoosePixelFormat(hDC, &pfd2); if (pixelFormat == 0) { #ifdef UNICODE MessageBox(WindowFromDC(hDC), L"ChoosePixelFormat failed.", L"Error", MB_ICONERROR | MB_OK); #else MessageBox(WindowFromDC(hDC), "ChoosePixelFormat failed.", "Error", MB_ICONERROR | MB_OK); #endif if (this->HasObserver(vtkCommand::ExitEvent)) { this->InvokeEvent(vtkCommand::ExitEvent, NULL); return; } else { exit(1); } } DescribePixelFormat(hDC, pixelFormat,sizeof(pfd2), &pfd2); if (SetPixelFormat(hDC, pixelFormat, &pfd2) != TRUE) { // int err = GetLastError(); #ifdef UNICODE MessageBox(WindowFromDC(hDC), L"SetPixelFormat failed.", L"Error", MB_ICONERROR | MB_OK); #else MessageBox(WindowFromDC(hDC), "SetPixelFormat failed.", "Error", MB_ICONERROR | MB_OK); #endif if (this->HasObserver(vtkCommand::ExitEvent)) { this->InvokeEvent(vtkCommand::ExitEvent, NULL); return; } else { exit(1); } } } if (debug && (dwFlags & PFD_STEREO) && !(pfd2.dwFlags & PFD_STEREO)) { vtkGenericWarningMacro("No Stereo Available!"); this->StereoCapableWindow = 0; } this->SetupPalette(hDC); // create a context this->ContextId = wglCreateContext(hDC); if (this->ContextId == NULL) { vtkErrorMacro("wglCreateContext failed in CreateAWindow(), error: " << GetLastError()); } } void vtkWin32OpenGLRenderWindow::SetupPalette(HDC hDC) { int pixelFormat = GetPixelFormat(hDC); PIXELFORMATDESCRIPTOR pfd; LOGPALETTE* pPal; int paletteSize; DescribePixelFormat(hDC, pixelFormat, sizeof(PIXELFORMATDESCRIPTOR), &pfd); if (pfd.dwFlags & PFD_NEED_PALETTE) { paletteSize = 1 << pfd.cColorBits; } else { return; } pPal = (LOGPALETTE*) malloc(sizeof(LOGPALETTE) + paletteSize * sizeof(PALETTEENTRY)); pPal->palVersion = 0x300; pPal->palNumEntries = paletteSize; /* build a simple RGB color palette */ { int redMask = (1 << pfd.cRedBits) - 1; int greenMask = (1 << pfd.cGreenBits) - 1; int blueMask = (1 << pfd.cBlueBits) - 1; int i; for (i=0; i<paletteSize; ++i) { pPal->palPalEntry[i].peRed = (((i >> pfd.cRedShift) & redMask) * 255) / redMask; pPal->palPalEntry[i].peGreen = (((i >> pfd.cGreenShift) & greenMask) * 255) / greenMask; pPal->palPalEntry[i].peBlue = (((i >> pfd.cBlueShift) & blueMask) * 255) / blueMask; pPal->palPalEntry[i].peFlags = 0; } } this->Palette = CreatePalette(pPal); free(pPal); if (this->Palette) { this->OldPalette = SelectPalette(hDC, this->Palette, FALSE); RealizePalette(hDC); } } LRESULT vtkWin32OpenGLRenderWindow::MessageProc(HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam) { switch (message) { case WM_CREATE: { // nothing to be done here, opengl is initilized after the call to // create now return 0; } case WM_DESTROY: this->Clean(); if (this->DeviceContext) { ReleaseDC(this->WindowId, this->DeviceContext); this->DeviceContext = NULL; this->WindowId = NULL; } return 0; case WM_SIZE: /* track window size changes */ if (this->ContextId) { this->SetSize((int) LOWORD(lParam),(int) HIWORD(lParam)); return 0; } case WM_PALETTECHANGED: /* realize palette if this is *not* the current window */ if (this->ContextId && this->Palette && (HWND) wParam != hWnd) { SelectPalette(this->DeviceContext, this->OldPalette, FALSE); UnrealizeObject(this->Palette); this->OldPalette = SelectPalette(this->DeviceContext, this->Palette, FALSE); RealizePalette(this->DeviceContext); this->Render(); } break; case WM_QUERYNEWPALETTE: /* realize palette if this is the current window */ if (this->ContextId && this->Palette) { SelectPalette(this->DeviceContext, this->OldPalette, FALSE); UnrealizeObject(this->Palette); this->OldPalette = SelectPalette(this->DeviceContext, this->Palette, FALSE); RealizePalette(this->DeviceContext); this->Render(); return TRUE; } break; case WM_PAINT: { PAINTSTRUCT ps; BeginPaint(hWnd, &ps); if (this->ContextId) { this->Render(); } EndPaint(hWnd, &ps); return 0; } break; case WM_ERASEBKGND: return TRUE; case WM_SETCURSOR: if (HTCLIENT == LOWORD(lParam)) { this->SetCurrentCursor(this->GetCurrentCursor()); return TRUE; } break; default: this->InvokeEvent(vtkCommand::RenderWindowMessageEvent, &message); break; } return DefWindowProc(hWnd, message, wParam, lParam); } void vtkWin32OpenGLRenderWindow::InitializeApplication() { // get the application instance if we don't have one already if (!this->ApplicationInstance) { // if we have a parent window get the app instance from it if (this->ParentId) { this->ApplicationInstance = (HINSTANCE)vtkGetWindowLong(this->ParentId,vtkGWL_HINSTANCE); } else { this->ApplicationInstance = GetModuleHandle(NULL); /*AfxGetInstanceHandle();*/ } } } void vtkWin32OpenGLRenderWindow::CreateAWindow() { WNDCLASS wndClass; // has the class been registered ? #ifdef UNICODE if (!GetClassInfo(this->ApplicationInstance,L"vtkOpenGL",&wndClass)) #else if (!GetClassInfo(this->ApplicationInstance,"vtkOpenGL",&wndClass)) #endif { wndClass.style = CS_HREDRAW | CS_VREDRAW | CS_OWNDC; wndClass.lpfnWndProc = vtkWin32OpenGLRenderWindow::WndProc; wndClass.cbClsExtra = 0; wndClass.hInstance = this->ApplicationInstance; wndClass.hIcon = LoadIcon(NULL, IDI_APPLICATION); wndClass.hCursor = LoadCursor(NULL, IDC_ARROW); wndClass.hbrBackground = (HBRUSH)GetStockObject(BLACK_BRUSH); wndClass.lpszMenuName = NULL; #ifdef UNICODE wndClass.lpszClassName = L"vtkOpenGL"; #else wndClass.lpszClassName = "vtkOpenGL"; #endif // vtk doesn't use the first extra vtkLONG's worth of bytes, // but app writers may want them, so we provide them. VTK // does use the second vtkLONG's worth of bytes of extra space. wndClass.cbWndExtra = 2 * sizeof(vtkLONG); RegisterClass(&wndClass); } if(this->WindowIdReferenceCount == 0) { static int count = 1; char *windowName; if (!this->WindowId) { this->DeviceContext = 0; int len = static_cast<int>(strlen("Visualization Toolkit - Win32OpenGL #")) + (int)ceil( (double) log10( (double)(count+1) ) ) + 1; windowName = new char [ len ]; sprintf(windowName,"Visualization Toolkit - Win32OpenGL #%i",count++); this->SetWindowName(windowName); delete [] windowName; #ifdef UNICODE wchar_t *wname = new wchar_t [mbstowcs(NULL, this->WindowName, 32000)+1]; mbstowcs(wname, this->WindowName, 32000); #endif int x = ((this->Position[0] >= 0) ? this->Position[0] : 5); int y = ((this->Position[1] >= 0) ? this->Position[1] : 5); int height = ((this->Size[1] > 0) ? this->Size[1] : 300); int width = ((this->Size[0] > 0) ? this->Size[0] : 300); /* create window */ if (this->ParentId) { #ifdef UNICODE this->WindowId = CreateWindow( L"vtkOpenGL", wname, WS_CHILD | WS_CLIPCHILDREN /*| WS_CLIPSIBLINGS*/, x, y, width, height, this->ParentId, NULL, this->ApplicationInstance, NULL); #else this->WindowId = CreateWindow( "vtkOpenGL", this->WindowName, WS_CHILD | WS_CLIPCHILDREN /*| WS_CLIPSIBLINGS*/, x, y, width, height, this->ParentId, NULL, this->ApplicationInstance, NULL); #endif } else { DWORD style; if (this->Borders) { style = WS_OVERLAPPEDWINDOW | WS_CLIPCHILDREN /*| WS_CLIPSIBLINGS*/; } else { style = WS_POPUP | WS_CLIPCHILDREN /*| WS_CLIPSIBLINGS*/; } RECT r; AdjustWindowRectForBorders(this->Borders, x, y, width, height, r); #ifdef UNICODE this->WindowId = CreateWindow( L"vtkOpenGL", wname, style, x, y, r.right-r.left, r.bottom-r.top, NULL, NULL, this->ApplicationInstance, NULL); #else this->WindowId = CreateWindow( "vtkOpenGL", this->WindowName, style, x, y, r.right-r.left, r.bottom-r.top, NULL, NULL, this->ApplicationInstance, NULL); #endif } #ifdef UNICODE delete [] wname; #endif if (!this->WindowId) { vtkErrorMacro("Could not create window, error: " << GetLastError()); return; } // extract the create info /* display window */ if(!this->OffScreenRendering) { ShowWindow(this->WindowId, SW_SHOW); } //UpdateWindow(this->WindowId); this->OwnWindow = 1; vtkSetWindowLong(this->WindowId,sizeof(vtkLONG),(intptr_t)this); } if (!this->DeviceContext) { this->DeviceContext = GetDC(this->WindowId); } if (this->StereoCapableWindow) { this->SetupPixelFormatPaletteAndContext(this->DeviceContext, PFD_SUPPORT_OPENGL | PFD_DRAW_TO_WINDOW | PFD_DOUBLEBUFFER | PFD_STEREO, this->GetDebug(), 32, 32); } else { this->SetupPixelFormatPaletteAndContext(this->DeviceContext, PFD_SUPPORT_OPENGL | PFD_DRAW_TO_WINDOW | PFD_DOUBLEBUFFER, this->GetDebug(), 32, 32); } this->MakeCurrent(); // wipe out any existing display lists vtkRenderer* ren; vtkCollectionSimpleIterator rsit; for (this->Renderers->InitTraversal(rsit); (ren = this->Renderers->GetNextRenderer(rsit));) { ren->SetRenderWindow(0); ren->SetRenderWindow(this); } this->OpenGLInit(); this->Mapped = 1; this->WindowIdReferenceCount = 1; } else { ++this->WindowIdReferenceCount; } } // Initialize the window for rendering. void vtkWin32OpenGLRenderWindow::WindowInitialize() { // create our own window if not already set this->OwnWindow = 0; if (!this->MFChandledWindow) { this->InitializeApplication(); this->CreateAWindow(); } else { this->MakeCurrent(); // hsr this->OpenGLInit(); } } // Initialize the rendering window. void vtkWin32OpenGLRenderWindow::Initialize (void) { // make sure we havent already been initialized if (!this->OffScreenRendering && !this->ContextId) { this->WindowInitialize(); } else { if(this->OffScreenRendering && !(this->ContextId || this->OffScreenUseFrameBuffer)) { this->InitializeApplication(); int width = ((this->Size[0] > 0) ? this->Size[0] : 300); int height = ((this->Size[1] > 0) ? this->Size[1] : 300); this->CreateOffScreenWindow(width,height); } } } void vtkWin32OpenGLRenderWindow::Finalize (void) { if (this->CursorHidden) { this->ShowCursor(); } if (this->OffScreenRendering) { this->CleanUpOffScreenRendering(); } this->DestroyWindow(); } void vtkWin32OpenGLRenderWindow::DestroyWindow() { if(this->WindowIdReferenceCount > 0) { --this->WindowIdReferenceCount; if(this->WindowIdReferenceCount == 0) { this->Clean(); if (this->WindowId) { ReleaseDC(this->WindowId, this->DeviceContext); // can't set WindowId=NULL, needed for DestroyWindow this->DeviceContext = NULL; // clear the extra data before calling destroy vtkSetWindowLong(this->WindowId,sizeof(vtkLONG),(vtkLONG)0); if(this->OwnWindow) { ::DestroyWindow(this->WindowId); // windows api this->WindowId=0; } } } } } // Get the current size of the window. int *vtkWin32OpenGLRenderWindow::GetSize(void) { // if we aren't mapped then just return the ivar if (this->Mapped) { RECT rect; // Find the current window size if (GetClientRect(this->WindowId, &rect)) { this->Size[0] = rect.right; this->Size[1] = rect.bottom; } else { this->Size[0] = 0; this->Size[1] = 0; } } return this->vtkOpenGLRenderWindow::GetSize(); } // Get the size of the whole screen. int *vtkWin32OpenGLRenderWindow::GetScreenSize(void) { HDC hDC = ::GetDC(NULL); if (hDC) { // This technique yields the screen size of the primary monitor // only in a multi-monitor configuration... this->Size[0] = ::GetDeviceCaps(hDC, HORZRES); this->Size[1] = ::GetDeviceCaps(hDC, VERTRES); ::ReleaseDC(NULL, hDC); } else { // This technique gets the "work area" (the whole screen except // for the bit covered by the Windows task bar) -- use it as a // fallback if there's an error calling GetDC. RECT rect; SystemParametersInfo(SPI_GETWORKAREA, 0, &rect, 0); this->Size[0] = rect.right - rect.left; this->Size[1] = rect.bottom - rect.top; } return this->Size; } // Get the position in screen coordinates of the window. int *vtkWin32OpenGLRenderWindow::GetPosition(void) { // if we aren't mapped then just return the ivar if (!this->Mapped) { return this->Position; } // Find the current window position // x,y,&this->Position[0],&this->Position[1],&child); return this->Position; } // Change the window to fill the entire screen. void vtkWin32OpenGLRenderWindow::SetFullScreen(int arg) { int *temp; if (this->FullScreen == arg) { return; } if (!this->Mapped) { this->PrefFullScreen(); return; } // set the mode this->FullScreen = arg; if (this->FullScreen <= 0) { this->Position[0] = this->OldScreen[0]; this->Position[1] = this->OldScreen[1]; this->Size[0] = this->OldScreen[2]; this->Size[1] = this->OldScreen[3]; this->Borders = this->OldScreen[4]; } else { // if window already up get its values if (this->WindowId) { temp = this->GetPosition(); this->OldScreen[0] = temp[0]; this->OldScreen[1] = temp[1]; this->OldScreen[4] = this->Borders; this->PrefFullScreen(); } } // remap the window this->WindowRemap(); this->Modified(); } // // Set the variable that indicates that we want a stereo capable window // be created. This method can only be called before a window is realized. // void vtkWin32OpenGLRenderWindow::SetStereoCapableWindow(int capable) { if (this->ContextId == 0) { vtkRenderWindow::SetStereoCapableWindow(capable); } else { vtkWarningMacro(<< "Requesting a StereoCapableWindow must be performed " << "before the window is realized, i.e. before a render."); } } // Set the preferred window size to full screen. void vtkWin32OpenGLRenderWindow::PrefFullScreen() { int *size = this->GetScreenSize(); // don't show borders this->Borders = 0; RECT r; AdjustWindowRectForBorders(this->Borders, 0, 0, size[0], size[1], r); // use full screen this->Position[0] = 0; this->Position[1] = 0; this->Size[0] = r.right - r.left; this->Size[1] = r.bottom - r.top; } // Remap the window. void vtkWin32OpenGLRenderWindow::WindowRemap() { // close everything down this->Finalize(); // set the default windowid this->WindowId = this->NextWindowId; this->NextWindowId = 0; // and set it up! this->Initialize(); } void vtkWin32OpenGLRenderWindow::PrintSelf(ostream& os, vtkIndent indent) { this->Superclass::PrintSelf(os,indent); os << indent << "ContextId: " << this->ContextId << "\n"; os << indent << "Next Window Id: " << this->NextWindowId << "\n"; os << indent << "Window Id: " << this->WindowId << "\n"; } // Get the window id. HWND vtkWin32OpenGLRenderWindow::GetWindowId() { vtkDebugMacro(<< "Returning WindowId of " << this->WindowId << "\n"); return this->WindowId; } // Set the window id to a pre-existing window. void vtkWin32OpenGLRenderWindow::SetWindowId(HWND arg) { vtkDebugMacro(<< "Setting WindowId to " << arg << "\n"); if (arg != this->WindowId) { this->WindowId = arg; if (this->ContextId) { wglDeleteContext(this->ContextId); } this->ContextId = 0; this->DeviceContext = 0; } } // Set this RenderWindow's X window id to a pre-existing window. void vtkWin32OpenGLRenderWindow::SetWindowInfo(char *info) { int tmp; sscanf(info,"%i",&tmp); this->WindowId = (HWND)tmp; vtkDebugMacro(<< "Setting WindowId to " << this->WindowId << "\n"); } void vtkWin32OpenGLRenderWindow::SetNextWindowInfo(char *info) { int tmp; sscanf(info,"%i",&tmp); this->SetNextWindowId((HWND)tmp); } void vtkWin32OpenGLRenderWindow::SetDisplayId(void * arg) { this->DeviceContext = (HDC) arg; } void vtkWin32OpenGLRenderWindow::SetContextId(HGLRC arg) { this->ContextId = arg; } void vtkWin32OpenGLRenderWindow::SetDeviceContext(HDC arg) { this->DeviceContext = arg; this->MFChandledWindow = TRUE; } // Sets the HWND id of the window that WILL BE created. void vtkWin32OpenGLRenderWindow::SetParentInfo(char *info) { int tmp; sscanf(info,"%i",&tmp); this->ParentId = (HWND)tmp; vtkDebugMacro(<< "Setting ParentId to " << this->ParentId << "\n"); } // Set the window id to a pre-existing window. void vtkWin32OpenGLRenderWindow::SetParentId(HWND arg) { vtkDebugMacro(<< "Setting ParentId to " << arg << "\n"); this->ParentId = arg; } // Set the window id of the new window once a WindowRemap is done. void vtkWin32OpenGLRenderWindow::SetNextWindowId(HWND arg) { vtkDebugMacro(<< "Setting NextWindowId to " << arg << "\n"); this->NextWindowId = arg; } void vtkWin32OpenGLRenderWindow::SetNextWindowId(void *arg) { this->SetNextWindowId((HWND)arg); } // Begin the rendering process. void vtkWin32OpenGLRenderWindow::Start(void) { // if the renderer has not been initialized, do so now if (!this->ContextId) { this->Initialize(); } // set the current window this->MakeCurrent(); } void vtkWin32OpenGLRenderWindow::SetOffScreenRendering(int offscreen) { if (offscreen == this->OffScreenRendering) { return; } this->vtkRenderWindow::SetOffScreenRendering(offscreen); if (offscreen) { int size[2]; size[0] = (this->Size[0] > 0) ? this->Size[0] : 300; size[1] = (this->Size[1] > 0) ? this->Size[1] : 300; this->SaveScreenRendering(); this->CreateOffScreenWindow(size[0],size[1]); } else { this->CleanUpOffScreenRendering(); if (!this->WindowId) { this->WindowInitialize(); this->OpenGLInit(); if (this->Interactor) { this->Interactor->ReInitialize(); } this->DoubleBuffer = 1; } else { this->ResumeScreenRendering(); } } } void vtkWin32OpenGLRenderWindow::SaveScreenRendering() { this->ScreenMapped = this->Mapped; this->ScreenWindowSize[0] = this->Size[0]; this->ScreenWindowSize[1] = this->Size[1]; this->ScreenDeviceContext = this->DeviceContext; this->ScreenDoubleBuffer = this->DoubleBuffer; this->ScreenContextId = this->ContextId; } void vtkWin32OpenGLRenderWindow::CreateOffScreenWindow(int width, int height) { int status = this->CreatingOffScreenWindow; this->CreatingOffScreenWindow = 1; if(!this->CreateHardwareOffScreenWindow(width,height)) { #ifdef UNICODE HDC dc = CreateDC(L"DISPLAY", 0, 0, 0); #else HDC dc = CreateDC("DISPLAY", 0, 0, 0); #endif this->CreateOffScreenDC(width,height,dc); DeleteDC(dc); } this->CreatingOffScreenWindow = status; } void vtkWin32OpenGLRenderWindow::CreateOffScreenDC(int xsize, int ysize, HDC aHdc) { int dataWidth = ((xsize*3+3)/4)*4; this->MemoryDataHeader.bmiHeader.biSize = 40; this->MemoryDataHeader.bmiHeader.biWidth = xsize; this->MemoryDataHeader.bmiHeader.biHeight = ysize; this->MemoryDataHeader.bmiHeader.biPlanes = 1; this->MemoryDataHeader.bmiHeader.biBitCount = 24; this->MemoryDataHeader.bmiHeader.biCompression = BI_RGB; this->MemoryDataHeader.bmiHeader.biClrUsed = 0; this->MemoryDataHeader.bmiHeader.biClrImportant = 0; this->MemoryDataHeader.bmiHeader.biSizeImage = dataWidth*ysize; this->MemoryDataHeader.bmiHeader.biXPelsPerMeter = 10000; this->MemoryDataHeader.bmiHeader.biYPelsPerMeter = 10000; HBITMAP dib = CreateDIBSection(aHdc, &this->MemoryDataHeader, DIB_RGB_COLORS, (void **)(&(this->MemoryData)), NULL, 0); SIZE oldSize; SetBitmapDimensionEx(dib, xsize, ysize, &oldSize); // try using a DIBsection this->CreateOffScreenDC(dib, aHdc); } void vtkWin32OpenGLRenderWindow::CreateOffScreenDC(HBITMAP hbmp, HDC aHdc) { BITMAP bm; GetObject(hbmp, sizeof(BITMAP), &bm); this->MemoryBuffer = hbmp; // Create a compatible device context this->MemoryHdc = (HDC)CreateCompatibleDC(aHdc); // Put the bitmap into the device context SelectObject(this->MemoryHdc, this->MemoryBuffer); // Renderers will need to redraw anything cached in display lists vtkRenderer *ren; vtkCollectionSimpleIterator rsit; for (this->Renderers->InitTraversal(rsit); (ren = this->Renderers->GetNextRenderer(rsit));) { ren->SetRenderWindow(NULL); ren->SetRenderWindow(this); } // adjust settings for renderwindow this->Mapped =0; this->Size[0] = bm.bmWidth; this->Size[1] = bm.bmHeight; this->DeviceContext = this->MemoryHdc; this->DoubleBuffer = 0; this->SetupPixelFormatPaletteAndContext(this->DeviceContext, PFD_SUPPORT_OPENGL | PFD_SUPPORT_GDI | PFD_DRAW_TO_BITMAP, this->GetDebug(), 24, 32); this->MakeCurrent(); this->OpenGLInit(); } void vtkWin32OpenGLRenderWindow::SetupMemoryRendering(int xsize, int ysize, HDC aHdc) { // save the current state this->ScreenMapped = this->Mapped; this->ScreenWindowSize[0] = this->Size[0]; this->ScreenWindowSize[1] = this->Size[1]; this->ScreenDeviceContext = this->DeviceContext; this->ScreenDoubleBuffer = this->DoubleBuffer; this->ScreenContextId = this->ContextId; this->CreateOffScreenDC(xsize, ysize, aHdc); } void vtkWin32OpenGLRenderWindow::SetupMemoryRendering(HBITMAP hbmp) { #ifdef UNICODE HDC dc = CreateDC(L"DISPLAY", 0, 0, 0); #else HDC dc = CreateDC("DISPLAY", 0, 0, 0); #endif // save the current state this->ScreenMapped = this->Mapped; this->ScreenWindowSize[0] = this->Size[0]; this->ScreenWindowSize[1] = this->Size[1]; this->ScreenDeviceContext = this->DeviceContext; this->ScreenDoubleBuffer = this->DoubleBuffer; this->ScreenContextId = this->ContextId; this->CreateOffScreenDC(hbmp, dc); DeleteDC(dc); } HDC vtkWin32OpenGLRenderWindow::GetMemoryDC() { return this->MemoryHdc; } void vtkWin32OpenGLRenderWindow::CleanUpOffScreenRendering(void) { if(this->OffScreenUseFrameBuffer) { this->DestroyHardwareOffScreenWindow(); } else { if (!this->MemoryHdc) { return; } GdiFlush(); // we need to release resources this->CleanUpRenderers(); DeleteDC(this->MemoryHdc); this->MemoryHdc = (HDC)0; DeleteObject(this->MemoryBuffer); if (wglDeleteContext(this->ContextId) != TRUE) { vtkErrorMacro("wglDeleteContext failed in CleanUpOffScreenRendering(), error: " << GetLastError()); } this->ContextId=0; } } void vtkWin32OpenGLRenderWindow::ResumeScreenRendering(void) { // release OpenGL graphics resources before switch back to on-screen. if(this->ContextId!=0) { this->MakeCurrent(); // Renderers will need to redraw anything cached in display lists vtkRenderer *ren; vtkCollectionSimpleIterator rsit; for (this->Renderers->InitTraversal(rsit); (ren = this->Renderers->GetNextRenderer(rsit));) { ren->SetRenderWindow(NULL); ren->SetRenderWindow(this); } } this->Mapped = this->ScreenMapped; this->Size[0] = this->ScreenWindowSize[0]; this->Size[1] = this->ScreenWindowSize[1]; this->DeviceContext = this->ScreenDeviceContext; this->DoubleBuffer = this->ScreenDoubleBuffer; this->ContextId = this->ScreenContextId; this->MakeCurrent(); } //---------------------------------------------------------------------------- void vtkWin32OpenGLRenderWindow::HideCursor() { if (this->CursorHidden) { return; } this->CursorHidden = 1; ::ShowCursor(!this->CursorHidden); } //---------------------------------------------------------------------------- void vtkWin32OpenGLRenderWindow::ShowCursor() { if (!this->CursorHidden) { return; } this->CursorHidden = 0; ::ShowCursor(!this->CursorHidden); } //---------------------------------------------------------------------------- void vtkWin32OpenGLRenderWindow::SetCursorPosition(int x, int y) { int *size = this->GetSize(); POINT point; point.x = x; point.y = size[1] - y - 1; if (ClientToScreen(this->WindowId, &point)) { SetCursorPos(point.x, point.y); } } //---------------------------------------------------------------------------- void vtkWin32OpenGLRenderWindow::SetCurrentCursor(int shape) { if ( this->InvokeEvent(vtkCommand::CursorChangedEvent,&shape) ) { return; } this->Superclass::SetCurrentCursor(shape); LPCTSTR cursorName = 0; switch (shape) { case VTK_CURSOR_DEFAULT: case VTK_CURSOR_ARROW: cursorName = IDC_ARROW; break; case VTK_CURSOR_SIZENE: case VTK_CURSOR_SIZESW: cursorName = IDC_SIZENESW; break; case VTK_CURSOR_SIZENW: case VTK_CURSOR_SIZESE: cursorName = IDC_SIZENWSE; break; case VTK_CURSOR_SIZENS: cursorName = IDC_SIZENS; break; case VTK_CURSOR_SIZEWE: cursorName = IDC_SIZEWE; break; case VTK_CURSOR_SIZEALL: cursorName = IDC_SIZEALL; break; case VTK_CURSOR_HAND: #if(WINVER >= 0x0500) cursorName = IDC_HAND; #else cursorName = IDC_ARROW; #endif break; case VTK_CURSOR_CROSSHAIR: cursorName = IDC_CROSS; break; } if (cursorName) { HANDLE cursor = LoadImage(0,cursorName,IMAGE_CURSOR,0,0,LR_SHARED | LR_DEFAULTSIZE); SetCursor((HCURSOR)cursor); } } //---------------------------------------------------------------------------- bool vtkWin32OpenGLRenderWindow::DetectDPI() { this->SetDPI(GetDeviceCaps(this->DeviceContext, LOGPIXELSY)); return true; }
/* <samplecode> <abstract> Utility class to manage DSP parameters which can change value smoothly (be ramped) while rendering, without introducing clicks or other distortion into the signal. </abstract> </samplecode> */ #ifndef ParameterRamper_h #define ParameterRamper_h // N.B. This is C++. #import <AudioToolbox/AudioToolbox.h> #import <libkern/OSAtomic.h> class ParameterRamper { float clampLow, clampHigh; float _uiValue; float _goal; float inverseSlope; AUAudioFrameCount samplesRemaining; volatile int32_t changeCounter = 0; int32_t updateCounter = 0; void setImmediate(float value) { // only to be called from the render thread or when resources are not allocated. _goal = _uiValue = value; inverseSlope = 0.0; samplesRemaining = 0; } public: ParameterRamper(float value) { setImmediate(value); } void init() { /* Call this from the kernel init. Updates the internal value from the UI value. */ setImmediate(_uiValue); } void reset() { changeCounter = updateCounter = 0; } void setUIValue(float value) { _uiValue = value; OSAtomicIncrement32Barrier(&changeCounter); } float getUIValue() const { return _uiValue; } void dezipperCheck(AUAudioFrameCount rampDuration) { // check to see if the UI has changed and if so, start a ramp to dezipper it. int32_t changeCounterSnapshot = changeCounter; if (updateCounter != changeCounterSnapshot) { updateCounter = changeCounterSnapshot; startRamp(_uiValue, rampDuration); } } void startRamp(float newGoal, AUAudioFrameCount duration) { if (duration == 0) { setImmediate(newGoal); } else { /* Set a new ramp. Assigning to inverseSlope must come before assigning to goal. */ inverseSlope = (get() - newGoal) / float(duration); samplesRemaining = duration; _goal = _uiValue = newGoal; } } float get() const { /* For long ramps, integrating a sum loses precision and does not reach the goal at the right time. So instead, a line equation is used. y = m * x + b. */ return inverseSlope * float(samplesRemaining) + _goal; } void step() { // Do this in each inner loop iteration after getting the value. if (samplesRemaining != 0) { --samplesRemaining; } } float getAndStep() { // Combines get and step. Saves a multiply-add when not ramping. if (samplesRemaining != 0) { float value = get(); --samplesRemaining; return value; } else { return _goal; } } void stepBy(AUAudioFrameCount n) { /* When a parameter does not participate in the current inner loop, you will want to advance it after the end of the loop. */ if (n >= samplesRemaining) { samplesRemaining = 0; } else { samplesRemaining -= n; } } }; #endif /* ParameterRamper_h */
//----------------------------------------------------------------------------- // Copyright (c) 2013 GarageGames, LLC // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to // deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or // sell copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. //----------------------------------------------------------------------------- #include "undo.h" #include "console/console.h" #include "console/consoleTypes.h" //----------------------------------------------------------------------------- // UndoAction //----------------------------------------------------------------------------- IMPLEMENT_CONOBJECT(UndoAction); IMPLEMENT_CONOBJECT(UndoScriptAction); UndoAction::UndoAction( const UTF8* actionName) { mActionName = StringTable->insert(actionName); mUndoManager = NULL; } // Modified to clean up quiet sub actions [KNM | 08/10/11 | ITGB-152] UndoAction::~UndoAction() { clearAllNotifications(); for (U32 i = 0; i < (U32)mQuietSubActions.size(); i++) delete mQuietSubActions[i]; mQuietSubActions.clear(); } //----------------------------------------------------------------------------- void UndoAction::initPersistFields() { Parent::initPersistFields(); addField("actionName", TypeString, Offset(mActionName, UndoAction), "A brief description of the action, for UI representation of this undo/redo action."); } //----------------------------------------------------------------------------- // Implemented to trickle down into quiet sub actions [KNM | 08/10/11 | ITGB-152] void UndoAction::undo(void) { for (U32 i = 0; i < (U32)mQuietSubActions.size(); i++) mQuietSubActions[i]->undo(); } //----------------------------------------------------------------------------- // Implemented to trickle down into quiet sub actions [KNM | 08/10/11 | ITGB-152] void UndoAction::redo(void) { for (U32 i = 0; i < (U32)mQuietSubActions.size(); i++) mQuietSubActions[i]->redo(); } //----------------------------------------------------------------------------- // Adds a "quiet (hidden from user)" sub action [KNM | 08/10/11 | ITGB-152] void UndoAction::addQuietSubAction(UndoAction * quietSubAction) { if (!quietSubAction) return; mQuietSubActions.push_back(quietSubAction); } //----------------------------------------------------------------------------- void UndoAction::addToManager(UndoManager* theMan) { if(theMan) { mUndoManager = theMan; (*theMan).addAction(this); } else { mUndoManager = &UndoManager::getDefaultManager(); mUndoManager->addAction(this); } } //----------------------------------------------------------------------------- // UndoManager //----------------------------------------------------------------------------- IMPLEMENT_CONOBJECT(UndoManager); UndoManager::UndoManager(U32 levels) { mNumLevels = levels; // levels can be arbitrarily high, so we don't really want to reserve(levels). mUndoStack.reserve(10); mRedoStack.reserve(10); } //----------------------------------------------------------------------------- UndoManager::~UndoManager() { clearStack(mUndoStack); clearStack(mRedoStack); } //----------------------------------------------------------------------------- void UndoManager::initPersistFields() { addField("numLevels", TypeS32, Offset(mNumLevels, UndoManager), "Number of undo & redo levels."); // arrange for the default undo manager to exist. // UndoManager &def = getDefaultManager(); // Con::printf("def = %s undo manager created", def.getName()); } //----------------------------------------------------------------------------- UndoManager& UndoManager::getDefaultManager() { // the default manager is created the first time it is asked for. static UndoManager *defaultMan = NULL; if(!defaultMan) { defaultMan = new UndoManager(); defaultMan->assignName("DefaultUndoManager"); defaultMan->registerObject(); } return *defaultMan; } ConsoleMethod(UndoManager, clearAll, void, 2, 2, "Clears the undo manager." "@return No Return Value") { object->clearAll(); } void UndoManager::clearAll() { clearStack(mUndoStack); clearStack(mRedoStack); Con::executef(this, 1, "onClear"); } //----------------------------------------------------------------------------- void UndoManager::clearStack(Vector<UndoAction*> &stack) { Vector<UndoAction*>::iterator itr = stack.begin(); while (itr != stack.end()) { UndoAction* undo = stack.first(); stack.pop_front(); // Don't delete script created undos. if (dynamic_cast<UndoScriptAction*>(undo)) undo->deleteObject(); else delete undo; } stack.clear(); } //----------------------------------------------------------------------------- void UndoManager::clampStack(Vector<UndoAction*> &stack) { while((U32)stack.size() > mNumLevels) { UndoAction *act = stack.front(); stack.pop_front(); UndoScriptAction* scriptAction = dynamic_cast<UndoScriptAction*>(act); if (scriptAction) scriptAction->deleteObject(); else delete act; } } void UndoManager::removeAction(UndoAction *action) { Vector<UndoAction*>::iterator itr = mUndoStack.begin(); while (itr != mUndoStack.end()) { if ((*itr) == action) { UndoAction* deleteAction = *itr; mUndoStack.erase(itr); if (!dynamic_cast<UndoScriptAction*>(deleteAction)) delete deleteAction; Con::executef(this, 1, "onRemoveUndo"); return; } itr++; } itr = mRedoStack.begin(); while (itr != mRedoStack.end()) { if ((*itr) == action) { UndoAction* deleteAction = *itr; mRedoStack.erase(itr); if (!dynamic_cast<UndoScriptAction*>(deleteAction)) delete deleteAction; Con::executef(this, 1, "onRemoveUndo"); return; } itr++; } } //----------------------------------------------------------------------------- void UndoManager::undo() { // make sure we have an action available if(mUndoStack.size() < 1) return; // pop the action off the undo stack UndoAction *act = mUndoStack.last(); mUndoStack.pop_back(); // add it to the redo stack mRedoStack.push_back(act); if((U32)mRedoStack.size() > mNumLevels) mRedoStack.pop_front(); Con::executef(this, 1, "onUndo"); // perform the undo, whatever it may be. (*act).undo(); } //----------------------------------------------------------------------------- void UndoManager::redo() { // make sure we have an action available if(mRedoStack.size() < 1) return; // pop the action off the redo stack UndoAction *react = mRedoStack.last(); mRedoStack.pop_back(); // add it to the undo stack mUndoStack.push_back(react); if((U32)mUndoStack.size() > mNumLevels) mUndoStack.pop_front(); Con::executef(this, 1, "onRedo"); // perform the redo, whatever it may be. (*react).redo(); } ConsoleMethod(UndoManager, getUndoCount, S32, 2, 2, "() \n @return Returns the number of UndoActions stored as an integer") { return object->getUndoCount(); } S32 UndoManager::getUndoCount() { return mUndoStack.size(); } ConsoleMethod(UndoManager, getUndoName, const char*, 3, 3, "( S32 index ) Gets the name of the UndoAction at given index.\n " "@param index An integer index value for the desired undo\n" "@return The name as a string") { return object->getUndoName(dAtoi(argv[2])); } StringTableEntry UndoManager::getUndoName(S32 index) { if ((index < getUndoCount()) && (index >= 0)) return mUndoStack[index]->mActionName; return NULL; } ConsoleMethod(UndoManager, getRedoCount, S32, 2, 2, "() \n @return Returns the number of redo Actions stored as an integer") { return object->getRedoCount(); } S32 UndoManager::getRedoCount() { return mRedoStack.size(); } ConsoleMethod(UndoManager, getRedoName, const char*, 3, 3, "( S32 index ) Gets the name of the Action at given index.\n " "@param index An integer index value for the desired redo\n" "@return The name as a string") { return object->getRedoName(dAtoi(argv[2])); } StringTableEntry UndoManager::getRedoName(S32 index) { if ((index < getRedoCount()) && (index >= 0)) return mRedoStack[getRedoCount() - index - 1]->mActionName; return NULL; } //----------------------------------------------------------------------------- StringTableEntry UndoManager::getNextUndoName() { if(mUndoStack.size() < 1) return NULL; UndoAction *act = mUndoStack.last(); return (*act).mActionName; } //----------------------------------------------------------------------------- StringTableEntry UndoManager::getNextRedoName() { if(mRedoStack.size() < 1) return NULL; UndoAction *act = mRedoStack.last(); return (*act).mActionName; } //----------------------------------------------------------------------------- void UndoManager::addAction(UndoAction* action) { // push the incoming action onto the stack, move old data off the end if necessary. mUndoStack.push_back(action); if((U32)mUndoStack.size() > mNumLevels) mUndoStack.pop_front(); Con::executef(this, 1, "onAddUndo"); // clear the redo stack clearStack(mRedoStack); } //----------------------------------------------------------------------------- ConsoleMethod(UndoAction, addToManager, void, 2, 3, "action.addToManager([undoManager]) Adds an UndoAction to the manager" "@param undoManager The manager to add the object to (default NULL)\n" "@return No Return Value") { UndoManager *theMan = NULL; if(argc == 3) { SimObject *obj = Sim::findObject(argv[2]); if(obj) theMan = dynamic_cast<UndoManager*> (obj); } object->addToManager(theMan); } //----------------------------------------------------------------------------- ConsoleMethod(UndoManager, undo, void, 2, 2, "UndoManager.undo(); Pops the top undo action off the stack, resolves it, " "and then pushes it onto the redo stack") { object->undo(); } //----------------------------------------------------------------------------- ConsoleMethod(UndoManager, redo, void, 2, 2, "UndoManager.redo(); Pops the top redo action off the stack, resolves it, " "and then pushes it onto the undo stack") { object->redo(); } //----------------------------------------------------------------------------- ConsoleMethod(UndoManager, getNextUndoName, const char *, 2, 2, "UndoManager.getNextUndoName(); Gets the name of the action at the top of the undo stack\n" "@return The name of the top action on the undo stack") { StringTableEntry name = object->getNextUndoName(); if(!name) return NULL; char *ret = Con::getReturnBuffer(dStrlen(name) + 1); dStrcpy(ret, name); return ret; } //----------------------------------------------------------------------------- ConsoleMethod(UndoManager, getNextRedoName, const char *, 2, 2, "UndoManager.getNextRedoName(); Gets the name of the action at the top of the undo stack\n" "@return The name of the top action on the redo stack") { StringTableEntry name = object->getNextRedoName(); if(!name) return NULL; char *ret = Con::getReturnBuffer(dStrlen(name) + 1); dStrcpy(ret, name); return ret; }
/*++ Copyright (c) 1998 Microsoft Corporation Module Name: database.cpp Abstract: SIS Groveler Jet-Blue database front-end Authors: Cedric Krumbein, 1998 Environment: User Mode Revision History: --*/ #include "all.hxx" /*****************************************************************************/ /*************** SGDatabase class static value initializations ***************/ /*****************************************************************************/ DWORD SGDatabase::numInstances = 0; JET_INSTANCE SGDatabase::instance = 0; BOOL SGDatabase::jetInitialized = FALSE; TCHAR * SGDatabase::logDir = NULL; /*****************************************************************************/ /****************** SGDatabase class private static methods ******************/ /*****************************************************************************/ BOOL SGDatabase::set_log_drive(const _TCHAR *drive_name) { int drive_name_len = wcslen(drive_name); int logDirLen = drive_name_len + wcslen(CS_DIR_PATH) + 1; HRESULT r; // // Allocate a buffer // ASSERT(drive_name_len > 0); ASSERT(NULL == logDir); logDir = new TCHAR[logDirLen]; ASSERT(NULL != logDir); // // copy the log drive name, remove trailing slash if there is one // r = StringCchCopy(logDir, logDirLen, drive_name); ASSERT(r == S_OK); // // Insert common store directory path // TrimTrailingChar(logDir,L'\\'); r = StringCchCat(logDir, logDirLen, CS_DIR_PATH); ASSERT(r == S_OK); return TRUE; } BOOL SGDatabase::InitializeEngine() { DWORD_PTR maxVerPages; DWORD_PTR minCacheSize; DWORD_PTR newCacheSize; DWORDLONG cacheSize; DWORD circularLog; MEMORYSTATUSEX memStatus; SYSTEM_INFO sysInfo; JET_ERR jetErr; ASSERT(!jetInitialized); ASSERT(logDir); if (!SetCurrentDirectory(logDir)) { DPRINTF((_T("SGDatabase::InitializeEngine: can't cd to \"%s\", %ld\n"), logDir, GetLastError())); return FALSE; } circularLog = 1; jetErr = JetSetSystemParameter(&instance, 0, JET_paramCircularLog, circularLog, NULL); if (jetErr != JET_errSuccess) { DPRINTF((_T("(2) JetSetSystemParameter: jetErr=%ld\n"), jetErr)); return FALSE; } // // Set the maximum cache size used by the database engine to min(4% phys mem, 6M). // jetErr = JetGetSystemParameter(instance, 0, JET_paramCacheSizeMin, &minCacheSize, NULL, 0); if (jetErr != JET_errSuccess) { DPRINTF((_T("JetGetSystemParameter: jetErr=%ld\n"), jetErr)); TerminateEngine(); return FALSE; } memStatus.dwLength = sizeof memStatus; GlobalMemoryStatusEx(&memStatus); // get total physical memory GetSystemInfo(&sysInfo); // get page size cacheSize = memStatus.ullTotalPhys / 25; // 4% newCacheSize = (DWORD) min(cacheSize, MAX_DATABASE_CACHE_SIZE); newCacheSize = newCacheSize / sysInfo.dwPageSize; if (newCacheSize < minCacheSize) newCacheSize = minCacheSize; jetErr = JetSetSystemParameter(&instance, 0, JET_paramCacheSizeMax, newCacheSize, NULL); if (jetErr != JET_errSuccess) { DPRINTF((_T("(3) JetSetSystemParameter: jetErr=%ld\n"), jetErr)); return FALSE; } // // Set Version Cache size // jetErr = JetGetSystemParameter(instance, 0, JET_paramMaxVerPages, &maxVerPages, NULL, 0); if (jetErr != JET_errSuccess) { DPRINTF((_T("(2) JetGetSystemParameter: jetErr=%ld\n"), jetErr)); TerminateEngine(); return FALSE; } if (maxVerPages >= MIN_VER_PAGES) { DPRINTF((_T("JetGetSystemParameter(instance=%lu): MaxVerPages=%lu\n"), instance, maxVerPages)); } else { maxVerPages = MIN_VER_PAGES; jetErr = JetSetSystemParameter(&instance, 0, JET_paramMaxVerPages, maxVerPages, NULL); if (jetErr != JET_errSuccess) { DPRINTF((_T("(4) JetSetSystemParameter: jetErr=%ld\n"), jetErr)); TerminateEngine(); return FALSE; } DPRINTF((_T("JetSetSystemParameter(instance=%lu, MaxVerPages)=%lu\n"), instance, maxVerPages)); } // // Initialize Jet // jetErr = JetInit(&instance); if (jetErr != JET_errSuccess) { DPRINTF((_T("JetInit: jetErr=%ld\n"), jetErr)); // // If we have a log mismatch, delete the log files // if (jetErr == JET_errDatabaseLogSetMismatch) { DeleteAllDatabaseFiles(); } return FALSE; } jetInitialized = TRUE; DPRINTF((_T("JetInit: instance=%lu\n"), instance)); return TRUE; } /*****************************************************************************/ BOOL SGDatabase::TerminateEngine() { JET_ERR jetErr; BOOL rc; ASSERT(jetInitialized); jetErr = JetTerm(instance); if (jetErr != JET_errSuccess) { DPRINTF((_T("JetTerm: jetErr=%ld\n"), jetErr)); rc = FALSE; } else { rc = TRUE; CleanupEngineFiles(); } jetInitialized = FALSE; DPRINTF((_T("JetTerm\n"))); return rc; } /*****************************************************************************/ void SGDatabase::CleanupEngineFiles() // Delete no longer needed jet files. { WIN32_FIND_DATA findData; HANDLE fHandle; BOOL success; TFileName fName, delName; if (logDir) { delName.assign(logDir); delName.append(_T("\\")); delName.append(DATABASE_DELETE_RES_FILE_NAME); fHandle = FindFirstFile(delName.name, &findData); if (fHandle != INVALID_HANDLE_VALUE) { do { if ((findData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) == 0) { success = GetParentName(delName.name, &fName); ASSERT(success); // internal error if failed fName.append(_T("\\")); fName.append(findData.cFileName); if (!DeleteFile(fName.name)) { DPRINTF((_T("SGDatabase: can't delete \"%s\", %d\n"), delName.name, GetLastError())); } else { DPRINTF((_T("Deleted \"%s\"\n"), fName.name)); } } } while (FindNextFile(fHandle, &findData)); success = FindClose(fHandle); ASSERT(success); } } } void SGDatabase::DeleteAllDatabaseFiles() // Delete all jet database files { WIN32_FIND_DATA findData; HANDLE fHandle; BOOL success; TFileName fName, delName; if (logDir) { delName.assign(logDir); delName.append(_T("\\")); delName.append(DATABASE_DELETE_LOG_FILE_NAME); fHandle = FindFirstFile(delName.name, &findData); if (fHandle != INVALID_HANDLE_VALUE) { do { if ((findData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) == 0) { success = GetParentName(delName.name, &fName); ASSERT(success); // internal error if failed fName.append(_T("\\")); fName.append(findData.cFileName); if (!DeleteFile(fName.name)) { DPRINTF((_T("SGDatabase: can't delete \"%s\", %d\n"), delName.name, GetLastError())); } else { DPRINTF((_T("Deleted \"%s\"\n"), fName.name)); } } } while (FindNextFile(fHandle, &findData)); success = FindClose(fHandle); ASSERT(success); } } } /*****************************************************************************/ /********************** SGDatabase class private methods *********************/ /*****************************************************************************/ BOOL SGDatabase::CreateTable( const CHAR *tblName, DWORD numColumns, ColumnSpec **columnSpecs, JET_COLUMNID *columnIDs, JET_TABLEID *tblID) { JET_COLUMNDEF columnDef; JET_COLUMNID colIDcount; JET_ERR jetErr; ColumnSpec *columnSpec; DWORD i, j; ASSERT(sesID != ~0); ASSERT(dbID != ~0); ASSERT(numColumns <= MAX_COLUMNS); jetErr = JetCreateTable(sesID, dbID, tblName, TABLE_PAGES, TABLE_DENSITY, tblID); if (jetErr != JET_errSuccess) { DPRINTF((_T("JetCreateTable: jetErr=%ld\n"), jetErr)); return FALSE; } DPRINTF((_T("JetCreateTable: tblID=%lu colIDs={"), *tblID)); columnDef.cbStruct = sizeof(JET_COLUMNDEF); columnDef.wCountry = COUNTRY_CODE; columnDef.langid = LANG_ID; columnDef.cp = CODE_PAGE; columnDef.wCollate = COLLATE; colIDcount = 1; for (i = 0; i < numColumns; i++) { columnSpec = columnSpecs[i]; columnDef.columnid = colIDcount; columnDef.coltyp = columnSpec->coltyp; columnDef.cbMax = columnSpec->size; columnDef.grbit = columnSpec->grbit; jetErr = JetAddColumn(sesID, *tblID, columnSpec->name, &columnDef, NULL, 0, &columnIDs[i]); if (jetErr != JET_errSuccess) { DPRINTF((_T("\nJetAddColumn: jetErr=%ld\n"), jetErr)); return FALSE; } DPRINTF((_T(" %lu"), columnIDs[i])); if (i+1 < numColumns && colIDcount == columnIDs[i]) { ColIDCollision: colIDcount++; for (j = 0; j < i; j++) if (colIDcount == columnIDs[j]) goto ColIDCollision; } } DPRINTF((_T(" }\n"))); return TRUE; } /*****************************************************************************/ BOOL SGDatabase::CreateIndex( JET_TABLEID tblID, const CHAR *keyName, DWORD numKeys, ColumnSpec **keyColumnSpecs) { JET_ERR jetErr; HRESULT r; CHAR indexStr[MAX_PATH]; ColumnSpec *keyColumnSpec; DWORD indexStrLen, i; ASSERT(sesID != ~0); ASSERT(numKeys <= MAX_KEYS); indexStrLen = 0; for (i = 0; i < numKeys; i++) { keyColumnSpec = keyColumnSpecs[i]; indexStr[indexStrLen++] = '+'; r = StringCbCopyA( indexStr + indexStrLen, sizeof(indexStr), keyColumnSpec->name); ASSERT(r == S_OK); indexStrLen += strlen(keyColumnSpec->name) + 1; } indexStr[indexStrLen++] = '\0'; jetErr = JetCreateIndex(sesID, tblID, keyName, 0, indexStr, indexStrLen, TABLE_DENSITY); if (jetErr != JET_errSuccess) { DPRINTF((_T("JetCreateIndex: jetErr=%ld\n"), jetErr)); return FALSE; } return TRUE; } /*****************************************************************************/ BOOL SGDatabase::OpenTable( const CHAR *tblName, DWORD numColumns, ColumnSpec **columnSpecs, JET_COLUMNID *columnIDs, JET_TABLEID *tblID) { JET_COLUMNDEF columnDef; JET_ERR jetErr; ColumnSpec *columnSpec; DWORD i; ASSERT(sesID != ~0); ASSERT(dbID != ~0); ASSERT(numColumns <= MAX_COLUMNS); jetErr = JetOpenTable(sesID, dbID, tblName, NULL, 0, 0, tblID); if (jetErr != JET_errSuccess) { DPRINTF((_T("JetOpenTable: jetErr=%ld\n"), jetErr)); return FALSE; } DPRINTF((_T("JetOpenTable: tblID=%lu colIDs={"), *tblID)); for (i = 0; i < numColumns; i++) { columnSpec = columnSpecs[i]; jetErr = JetGetTableColumnInfo(sesID, *tblID, columnSpec->name, &columnDef, sizeof(JET_COLUMNDEF), JET_ColInfo); if (jetErr != JET_errSuccess) { DPRINTF((_T("\nJetGetTableColumnInfo: jetErr=%ld\n"), jetErr)); return FALSE; } columnIDs[i] = columnDef.columnid; DPRINTF((_T(" %lu"), columnIDs[i])); } DPRINTF((_T(" }\n"))); return TRUE; } /*****************************************************************************/ BOOL SGDatabase::CloseTable(JET_TABLEID tblID) { JET_ERR jetErr; ASSERT(sesID != ~0); ASSERT(tblID != ~0); jetErr = JetCloseTable(sesID, tblID); if (jetErr != JET_errSuccess) { DPRINTF((_T("JetCloseTable: jetErr=%ld\n"), jetErr)); return FALSE; } return TRUE; } /*****************************************************************************/ LONG SGDatabase::PositionCursor( JET_TABLEID tblID, const CHAR *keyName, const VOID *entry, DWORD numKeys, ColumnSpec **keyColumnSpecs) const { JET_COLTYP coltyp; JET_ERR jetErr; ColumnSpec *keyColumnSpec; const BYTE *dataPtr[MAX_KEYS]; DWORD cbData[MAX_KEYS], i; ASSERT(sesID != ~0); ASSERT(numKeys <= MAX_KEYS); jetErr = JetSetCurrentIndex(sesID, tblID, keyName); if (jetErr != JET_errSuccess) { DPRINTF((_T("JetSetCurrentIndex: jetErr=%ld\n"), jetErr)); return -1; } for (i = 0; i < numKeys; i++) { keyColumnSpec = keyColumnSpecs[i]; coltyp = keyColumnSpec->coltyp; dataPtr[i] = (const BYTE *)entry + keyColumnSpec->offset; if (coltyp == JET_coltypBinary) { dataPtr[i] = *(BYTE **)dataPtr[i]; ASSERT(dataPtr[i] != NULL); cbData[i] = (_tcslen((const TCHAR *)dataPtr[i]) + 1) * sizeof(TCHAR); } else cbData[i] = keyColumnSpec->size; jetErr = JetMakeKey(sesID, tblID, dataPtr[i], cbData[i], i == 0 ? JET_bitNewKey : 0); if (jetErr != JET_errSuccess) { DPRINTF((_T("JetMakeKey: jetErr=%ld\n"), jetErr)); return -1; } } jetErr = JetSeek(sesID, tblID, JET_bitSeekEQ); if (jetErr != JET_errSuccess) { if (jetErr == JET_errRecordNotFound) return 0; DPRINTF((_T("JetSeek: jetErr=%ld\n"), jetErr)); return -1; } for (i = 0; i < numKeys; i++) { jetErr = JetMakeKey(sesID, tblID, dataPtr[i], cbData[i], i == 0 ? JET_bitNewKey : 0); if (jetErr != JET_errSuccess) { DPRINTF((_T("JetMakeKey: jetErr=%ld\n"), jetErr)); return -1; } } jetErr = JetSetIndexRange(sesID, tblID, JET_bitRangeUpperLimit | JET_bitRangeInclusive); if (jetErr != JET_errSuccess) { DPRINTF((_T("JetSetIndexRange: jetErr=%ld\n"), jetErr)); return -1; } return 1; } /*****************************************************************************/ LONG SGDatabase::PositionCursorFirst( JET_TABLEID tblID, const CHAR *keyName) const { JET_ERR jetErr; ASSERT(sesID != ~0); jetErr = JetSetCurrentIndex(sesID, tblID, keyName); if (jetErr != JET_errSuccess) { DPRINTF((_T("JetSetCurrentIndex: jetErr=%ld\n"), jetErr)); return -1; } jetErr = JetMove(sesID, tblID, JET_MoveFirst, 0); if (jetErr != JET_errSuccess) { if (jetErr == JET_errNoCurrentRecord) return 0; DPRINTF((_T("JetMove: jetErr=%ld\n"), jetErr)); return -1; } return 1; } /*****************************************************************************/ LONG SGDatabase::PositionCursorNext(JET_TABLEID tblID) const { JET_ERR jetErr; ASSERT(sesID != ~0); jetErr = JetMove(sesID, tblID, JET_MoveNext, 0); if (jetErr != JET_errSuccess) { if (jetErr == JET_errNoCurrentRecord) return 0; DPRINTF((_T("JetMove: jetErr=%ld\n"), jetErr)); return -1; } return 1; } /*****************************************************************************/ LONG SGDatabase::PositionCursorLast( JET_TABLEID tblID, const CHAR *keyName) const { JET_ERR jetErr; ASSERT(sesID != ~0); jetErr = JetSetCurrentIndex(sesID, tblID, keyName); if (jetErr != JET_errSuccess) { DPRINTF((_T("JetSetCurrentIndex: jetErr=%ld\n"), jetErr)); return -1; } jetErr = JetMove(sesID, tblID, JET_MoveLast, 0); if (jetErr != JET_errSuccess) { if (jetErr == JET_errNoCurrentRecord) return 0; DPRINTF((_T("JetMove: jetErr=%ld\n"), jetErr)); return -1; } return 1; } /*****************************************************************************/ BOOL SGDatabase::PutData( JET_TABLEID tblID, const VOID *entry, DWORD numColumns, ColumnSpec **columnSpecs, const JET_COLUMNID *columnIDs) { JET_COLTYP coltyp; JET_ERR jetErr; ColumnSpec *columnSpec; const BYTE *dataPtr; DWORD cbData, i; ASSERT(sesID != ~0); ASSERT(numColumns <= MAX_COLUMNS); jetErr = JetPrepareUpdate(sesID, tblID, JET_prepInsert); if (jetErr != JET_errSuccess) { DPRINTF((_T("JetPrepareUpdate: jetErr=%ld\n"), jetErr)); return FALSE; } for (i = 0; i < numColumns; i++) { columnSpec = columnSpecs[i]; coltyp = columnSpec->coltyp; if (columnSpec->grbit != JET_bitColumnAutoincrement) { dataPtr = (const BYTE *)entry + columnSpec->offset; if (coltyp == JET_coltypBinary || coltyp == JET_coltypLongBinary) { dataPtr = *(BYTE **)dataPtr; cbData = dataPtr != NULL ? (_tcslen((const TCHAR *)dataPtr) + 1) * sizeof(TCHAR) : 0; } else cbData = columnSpec->size; // May want to convert to JetSetColumns jetErr = JetSetColumn(sesID, tblID, columnIDs[i], dataPtr, cbData, 0, NULL); if (jetErr != JET_errSuccess) { DPRINTF((_T("JetSetColumn: jetErr=%ld\n"), jetErr)); return FALSE; } } } jetErr = JetUpdate(sesID, tblID, NULL, 0, NULL); if (jetErr != JET_errSuccess) { DPRINTF((_T("JetUpdate: jetErr=%ld\n"), jetErr)); return FALSE; } return TRUE; } /*****************************************************************************/ BOOL SGDatabase::RetrieveData( JET_TABLEID tblID, VOID *entry, DWORD numColumns, ColumnSpec **columnSpecs, const JET_COLUMNID *columnIDs, DWORD includeMask) const { JET_COLTYP coltyp; JET_ERR jetErr; ColumnSpec *columnSpec; BYTE *dataPtr; DWORD cbData, cbActual, i; BOOL varCol; ASSERT(sesID != ~0); ASSERT(numColumns <= MAX_COLUMNS); // May want to convert to JetRetrieveColumns for (i = 0; i < numColumns; i++) if ((includeMask & (1U << i)) != 0) { columnSpec = columnSpecs[i]; coltyp = columnSpec->coltyp; varCol = coltyp == JET_coltypBinary || coltyp == JET_coltypLongBinary; dataPtr = (BYTE *)entry + columnSpec->offset; if (varCol) dataPtr = *(BYTE **)dataPtr; if (dataPtr != NULL) { jetErr = JetRetrieveColumn(sesID, tblID, columnIDs[i], dataPtr, columnSpec->size, &cbActual, 0, NULL); if (jetErr == JET_errSuccess) cbData = varCol ? (_tcslen((TCHAR *)dataPtr) + 1) * sizeof(TCHAR) : columnSpec->size; else if (varCol && jetErr == JET_wrnColumnNull) { *(TCHAR *)dataPtr = _T('\0'); cbData = 0; } else { DPRINTF((_T("JetRetrieveColumn: jetErr=%ld\n"), jetErr)); return FALSE; } if (cbActual != cbData) { DPRINTF((_T("JetRetrieveColumn: cbActual=%lu!=%lu\n"), cbActual, cbData)); return FALSE; } } } return TRUE; } /*****************************************************************************/ LONG SGDatabase::Delete(JET_TABLEID tblID) { JET_ERR jetErr; LONG count, status; count = 0; ASSERT(sesID != ~0); while (TRUE) { jetErr = JetDelete(sesID, tblID); if (jetErr != JET_errSuccess) { DPRINTF((_T("JetDelete: jetErr=%ld\n"), jetErr)); return -1; } count++; status = PositionCursorNext(tblID); if (status < 0) return status; if (status == 0) return count; } } /*****************************************************************************/ LONG SGDatabase::Count( JET_TABLEID tblID, const CHAR *keyName) const { JET_ERR jetErr; LONG count, status; count = 0; status = PositionCursorFirst(tblID, keyName); if (status < 0) return status; if (status == 0) return 0; ASSERT(sesID != ~0); jetErr = JetIndexRecordCount(sesID, tblID, (ULONG *) &count, MAXLONG); if (jetErr != JET_errSuccess) { if (jetErr == JET_errNoCurrentRecord) return 0; DPRINTF((_T("JetIndexRecordCount: jetErr=%ld\n"), jetErr)); return -1; } return count; } /*****************************************************************************/ /********************** SGDatabase class public methods **********************/ /*****************************************************************************/ SGDatabase::SGDatabase() { fileName = NULL; sesID = tableID = queueID = stackID = listID = ~0U; dbID = ~0U; numTableEntries = numQueueEntries = numStackEntries = numListEntries = 0; numUncommittedTableEntries = numUncommittedQueueEntries = numUncommittedStackEntries = numUncommittedListEntries = 0; inTransaction = FALSE; if (!jetInitialized) InitializeEngine(); numInstances++; } /*****************************************************************************/ SGDatabase::~SGDatabase() { Close(); ASSERT(fileName == NULL); ASSERT(sesID == ~0U); ASSERT(dbID == ~0U); ASSERT(tableID == ~0U); ASSERT(queueID == ~0U); ASSERT(stackID == ~0U); ASSERT(listID == ~0U); ASSERT(numTableEntries == 0); ASSERT(numQueueEntries == 0); ASSERT(numStackEntries == 0); ASSERT(numListEntries == 0); ASSERT(numUncommittedTableEntries == 0); ASSERT(numUncommittedQueueEntries == 0); ASSERT(numUncommittedStackEntries == 0); ASSERT(numUncommittedListEntries == 0); ASSERT(!inTransaction); if (--numInstances == 0 && jetInitialized) { TerminateEngine(); } } /*****************************************************************************/ BOOL SGDatabase::Create( const TCHAR *dbName) { CHAR szConnect[MAX_PATH]; DWORD strSize1; JET_ERR jetErr; ASSERT(fileName == NULL); ASSERT(sesID == ~0U); ASSERT(dbID == ~0U); ASSERT(tableID == ~0U); ASSERT(queueID == ~0U); ASSERT(stackID == ~0U); ASSERT(listID == ~0U); ASSERT(numTableEntries == 0); ASSERT(numQueueEntries == 0); ASSERT(numStackEntries == 0); ASSERT(numListEntries == 0); ASSERT(numUncommittedTableEntries == 0); ASSERT(numUncommittedQueueEntries == 0); ASSERT(numUncommittedStackEntries == 0); ASSERT(numUncommittedListEntries == 0); ASSERT(!inTransaction); if (!jetInitialized && !InitializeEngine()) return FALSE; ASSERT(jetInitialized); jetErr = JetBeginSession(instance, &sesID, USERNAME, PASSWORD); if (jetErr != JET_errSuccess) { DPRINTF((_T("JetBeginSession: jetErr=%ld\n"), jetErr)); Close(); return FALSE; } DPRINTF((_T("JetBeginSession: sesID=%lu\n"), sesID)); ASSERT(fileName == NULL); strSize1 = _tcslen(dbName) + 1; fileName = new CHAR[strSize1]; ASSERT(fileName != NULL); (void)StringCchPrintfA(fileName, strSize1, "%S", dbName); (void)StringCbPrintfA(szConnect, sizeof(szConnect), ";COUNTRY=%u;LANGID=0x%04x;CP=%u", COUNTRY_CODE, LANG_ID, CODE_PAGE); // // Create the database // jetErr = JetCreateDatabase(sesID, fileName, szConnect, &dbID, 0); if (jetErr == JET_errSuccess) { DPRINTF((_T("JetCreateDatabase(\"%s\"): dbID=%lu\n"),dbName, dbID)); } else { if (jetErr != JET_errDatabaseDuplicate) { DPRINTF((_T("JetCreateDatabase(\"%s\"): jetErr=%ld\n"), dbName, jetErr)); Close(); return FALSE; } if (!DeleteFile(dbName)) { DPRINTF((_T("JetCreateDatabase: \"%s\" already exists and can't be deleted: %lu\n"), dbName, GetLastError())); Close(); return FALSE; } jetErr = JetCreateDatabase(sesID, fileName, szConnect, &dbID, 0); if (jetErr != JET_errSuccess) { DPRINTF((_T("JetCreateDatabase: deleted old \"%s\"; jetErr=%ld\n"), dbName, jetErr)); Close(); return FALSE; } DPRINTF((_T("JetCreateDatabase: deleted old \"%s\"; new dbID=%lu\n"), dbName, dbID)); } if (!CreateTable(TABLE_NAME, TABLE_NCOLS, tableColumnSpecs, tableColumnIDs, &tableID)) { Close(); return FALSE; } if (!CreateIndex(tableID, TABLE_KEY_NAME_FILE_ID, TABLE_KEY_NCOLS_FILE_ID, tableKeyFileID) || !CreateIndex(tableID, TABLE_KEY_NAME_ATTR, TABLE_KEY_NCOLS_ATTR, tableKeyAttr) || !CreateIndex(tableID, TABLE_KEY_NAME_CSID, TABLE_KEY_NCOLS_CSID, tableKeyCSID)) { Close(); return FALSE; } if (!CreateTable(QUEUE_NAME, QUEUE_NCOLS, queueColumnSpecs, queueColumnIDs, &queueID)) { Close(); return FALSE; } if (!CreateIndex(queueID, QUEUE_KEY_NAME_READY_TIME, QUEUE_KEY_NCOLS_READY_TIME, queueKeyReadyTime) || !CreateIndex(queueID, QUEUE_KEY_NAME_FILE_ID, QUEUE_KEY_NCOLS_FILE_ID, queueKeyFileID) || !CreateIndex(queueID, QUEUE_KEY_NAME_ORDER, QUEUE_KEY_NCOLS_ORDER, queueKeyOrder)) { Close(); return FALSE; } if (!CreateTable(STACK_NAME, STACK_NCOLS, stackColumnSpecs, stackColumnIDs, &stackID)) { Close(); return FALSE; } if (!CreateIndex(stackID, STACK_KEY_NAME_FILE_ID, STACK_KEY_NCOLS_FILE_ID, stackKeyFileID) || !CreateIndex(stackID, STACK_KEY_NAME_ORDER, STACK_KEY_NCOLS_ORDER, stackKeyOrder)) { Close(); return FALSE; } if (!CreateTable(LIST_NAME, LIST_NCOLS, listColumnSpecs, listColumnIDs, &listID)) { Close(); return FALSE; } if (!CreateIndex(listID, LIST_KEY_NAME_NAME, LIST_KEY_NCOLS_NAME, listKeyName)) { Close(); return FALSE; } return TRUE; } /*****************************************************************************/ BOOL SGDatabase::Open( const TCHAR *driveLetterName, const TCHAR *dbName, BOOL is_log_drive) { SGNativeStackEntry stackEntry; JET_ERR jetErr; DWORD strSize1; LONG status; ASSERT(sesID == ~0U); ASSERT(dbID == ~0U); ASSERT(tableID == ~0U); ASSERT(queueID == ~0U); ASSERT(stackID == ~0U); ASSERT(listID == ~0U); ASSERT(numTableEntries == 0); ASSERT(numQueueEntries == 0); ASSERT(numStackEntries == 0); ASSERT(numListEntries == 0); ASSERT(numUncommittedTableEntries == 0); ASSERT(numUncommittedQueueEntries == 0); ASSERT(numUncommittedStackEntries == 0); ASSERT(numUncommittedListEntries == 0); ASSERT(!inTransaction); // If this isn't the log drive, delete any log files that may exist // from a previous run. This is an abnormal condition that can arise // when the log drive is changing because of problems detected during // a previous startup. if (!is_log_drive) { WIN32_FIND_DATA findData; HANDLE fHandle; BOOL success; TFileName fName, delName; delName.assign(logDir); delName.append(_T("\\")); delName.append(DATABASE_DELETE_LOG_FILE_NAME); fHandle = FindFirstFile(delName.name, &findData); if (fHandle != INVALID_HANDLE_VALUE) { do { if ((findData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) == 0) { success = GetParentName(delName.name, &fName); ASSERT(success); // internal error if failed fName.append(_T("\\")); fName.append(findData.cFileName); if (!DeleteFile(fName.name)) { DPRINTF((_T("SGDatabase::Open: can't delete \"%s\", %d\n"), delName.name, GetLastError())); } } } while (FindNextFile(fHandle, &findData)); success = FindClose(fHandle); ASSERT(success); fHandle = NULL; } } if (!jetInitialized && !InitializeEngine()) { Close(); return FALSE; } ASSERT(jetInitialized); jetErr = JetBeginSession(instance, &sesID, USERNAME, PASSWORD); if (jetErr != JET_errSuccess) { DPRINTF((_T("JetBeginSession: jetErr=%ld\n"), jetErr)); Close(); return FALSE; } DPRINTF((_T("%s: JetBeginSession: sesID=%lu\n"), driveLetterName, sesID)); ASSERT(fileName == NULL); strSize1 = _tcslen(dbName) + 1; fileName = new CHAR[strSize1]; ASSERT(fileName != NULL); (void)StringCchPrintfA(fileName, strSize1, "%S", dbName); // // Open the database // jetErr = JetAttachDatabase(sesID, fileName, 0); if (jetErr != JET_errSuccess && jetErr != JET_wrnDatabaseAttached) { if (jetErr == JET_errFileNotFound) { DPRINTF((_T("JetAttachDatabase: \"%s\" not found\n"), dbName)); } else { DPRINTF((_T("JetAttachDatabase(\"%s\"): jetErr=%ld\n"), dbName, jetErr)); } Close(); return FALSE; } jetErr = JetOpenDatabase(sesID, fileName, NULL, &dbID, 0); if (jetErr != JET_errSuccess) { DPRINTF((_T("JetOpenDatabase(\"%s\"): jetErr=%ld\n"), dbName, jetErr)); Close(); return FALSE; } DPRINTF((_T("%s: JetOpenDatabase(\"%s\"): dbID=%lu\n"), driveLetterName, dbName, dbID)); if (!OpenTable(TABLE_NAME, TABLE_NCOLS, tableColumnSpecs, tableColumnIDs, &tableID)) { Close(); return FALSE; } if (!OpenTable(QUEUE_NAME, QUEUE_NCOLS, queueColumnSpecs, queueColumnIDs, &queueID)) { Close(); return FALSE; } if (!OpenTable(STACK_NAME, STACK_NCOLS, stackColumnSpecs, stackColumnIDs, &stackID)) { Close(); return FALSE; } if (!OpenTable(LIST_NAME, LIST_NCOLS, listColumnSpecs, listColumnIDs, &listID)) { Close(); return FALSE; } if ((numTableEntries = Count(tableID, TABLE_KEY_NAME_FILE_ID)) < 0 || (numQueueEntries = Count(queueID, QUEUE_KEY_NAME_READY_TIME)) < 0 || (numStackEntries = Count(stackID, STACK_KEY_NAME_FILE_ID)) < 0 || (numListEntries = Count(listID, LIST_KEY_NAME_NAME)) < 0) { Close(); return FALSE; } return TRUE; } /*****************************************************************************/ BOOL SGDatabase::Close() { JET_ERR jetErr; int strLen; BOOL success = TRUE; if (inTransaction) { success = CommitTransaction(); inTransaction = FALSE; } ASSERT(numUncommittedTableEntries == 0); ASSERT(numUncommittedQueueEntries == 0); ASSERT(numUncommittedStackEntries == 0); ASSERT(numUncommittedListEntries == 0); if (tableID != ~0U) { if (!CloseTable(tableID)) success = FALSE; tableID = ~0U; } if (queueID != ~0U) { if (!CloseTable(queueID)) success = FALSE; queueID = ~0U; } if (stackID != ~0U) { if (!CloseTable(stackID)) success = FALSE; stackID = ~0U; } if (listID != ~0U) { if (!CloseTable(listID)) success = FALSE; listID = ~0U; } if (dbID != ~0U) { ASSERT(fileName != NULL); ASSERT(sesID != ~0U); jetErr = JetCloseDatabase(sesID, dbID, 0); if (jetErr != JET_errSuccess) { DPRINTF((_T("JetCloseDatabase: jetErr=%ld\n"), jetErr)); success = FALSE; } jetErr = JetDetachDatabase(sesID, fileName); if (jetErr != JET_errSuccess) { DPRINTF((_T("JetDetachDatabase: jetErr=%ld\n"), jetErr)); success = FALSE; } dbID = ~0U; } if (sesID != ~0U) { jetErr = JetEndSession(sesID, 0); if (jetErr != JET_errSuccess) { DPRINTF((_T("JetEndSession: jetErr=%ld\n"), jetErr)); success = FALSE; } sesID = ~0U; } if (fileName != NULL) { delete[] fileName; fileName = NULL; } numTableEntries = numQueueEntries = numStackEntries = numListEntries = 0; return success; } /*****************************************************************************/ BOOL SGDatabase::BeginTransaction() { JET_ERR jetErr; ASSERT(!inTransaction); ASSERT(numUncommittedTableEntries == 0); ASSERT(numUncommittedQueueEntries == 0); ASSERT(numUncommittedStackEntries == 0); ASSERT(numUncommittedListEntries == 0); if (sesID == ~0U) return -1; jetErr = JetBeginTransaction(sesID); if (jetErr != JET_errSuccess) { DPRINTF((_T("JetBeginTransaction: jetErr=%ld\n"), jetErr)); return FALSE; } inTransaction = TRUE; return TRUE; } /*****************************************************************************/ BOOL SGDatabase::CommitTransaction() { JET_ERR jetErr; ASSERT(inTransaction); if (sesID == ~0U) return -1; jetErr = JetCommitTransaction(sesID, 0); if (jetErr != JET_errSuccess) { DPRINTF((_T("JetCommitTransaction: jetErr=%ld\n"), jetErr)); return FALSE; } numTableEntries += numUncommittedTableEntries; numQueueEntries += numUncommittedQueueEntries; numStackEntries += numUncommittedStackEntries; numListEntries += numUncommittedListEntries; numUncommittedTableEntries = 0; numUncommittedQueueEntries = 0; numUncommittedStackEntries = 0; numUncommittedListEntries = 0; inTransaction = FALSE; return TRUE; } /*****************************************************************************/ BOOL SGDatabase::AbortTransaction() { JET_ERR jetErr; ASSERT(inTransaction); inTransaction = FALSE; if (sesID == ~0U) return -1; jetErr = JetRollback(sesID, 0); if (jetErr != JET_errSuccess) { DPRINTF((_T("JetRollback: jetErr=%ld\n"), jetErr)); return FALSE; } numUncommittedTableEntries = 0; numUncommittedQueueEntries = 0; numUncommittedStackEntries = 0; numUncommittedListEntries = 0; return TRUE; } /******************************* Table methods *******************************/ LONG SGDatabase::TablePut(const SGNativeTableEntry *entry) { BOOL alreadyInTransaction = inTransaction; ASSERT(entry != NULL); if (sesID == ~0U || dbID == ~0U || tableID == ~0U) return -1; if (!inTransaction && !BeginTransaction()) return -1; ASSERT(inTransaction); if (!PutData(tableID, entry, TABLE_NCOLS, tableColumnSpecs, tableColumnIDs)) { if (!alreadyInTransaction) AbortTransaction(); return -1; } numUncommittedTableEntries++; if (!alreadyInTransaction && !CommitTransaction()) return -1; return 1; } /*****************************************************************************/ LONG SGDatabase::TableGetFirstByFileID(SGNativeTableEntry *entry) const { LONG status; ASSERT(entry != NULL); if (sesID == ~0U || dbID == ~0U || tableID == ~0U) return -1; status = PositionCursor(tableID, TABLE_KEY_NAME_FILE_ID, entry, TABLE_KEY_NCOLS_FILE_ID, tableKeyFileID); if (status <= 0) return status; return RetrieveData(tableID, entry, TABLE_NCOLS, tableColumnSpecs, tableColumnIDs, TABLE_EXCLUDE_FILE_ID_MASK ) ? 1 : -1; } /*****************************************************************************/ LONG SGDatabase::TableGetFirstByAttr(SGNativeTableEntry *entry) const { LONG status; ASSERT(entry != NULL); if (sesID == ~0U || dbID == ~0U || tableID == ~0U) return -1; status = PositionCursor(tableID, TABLE_KEY_NAME_ATTR, entry, TABLE_KEY_NCOLS_ATTR, tableKeyAttr); if (status <= 0) return status; return RetrieveData(tableID, entry, TABLE_NCOLS, tableColumnSpecs, tableColumnIDs, TABLE_EXCLUDE_ATTR_MASK) ? 1 : -1; } /*****************************************************************************/ LONG SGDatabase::TableGetFirstByCSIndex(SGNativeTableEntry *entry) const { LONG status; ASSERT(entry != NULL); if (sesID == ~0U || dbID == ~0U || tableID == ~0U) return -1; status = PositionCursor(tableID, TABLE_KEY_NAME_CSID, entry, TABLE_KEY_NCOLS_CSID, tableKeyCSID); if (status <= 0) return status; return RetrieveData(tableID, entry, TABLE_NCOLS, tableColumnSpecs, tableColumnIDs, TABLE_EXCLUDE_CS_INDEX_MASK) ? 1 : -1; } /*****************************************************************************/ LONG SGDatabase::TableGetNext(SGNativeTableEntry *entry) const { LONG status; ASSERT(entry != NULL); if (sesID == ~0U || dbID == ~0U || tableID == ~0U) return -1; status = PositionCursorNext(tableID); if (status <= 0) return status; return RetrieveData(tableID, entry, TABLE_NCOLS, tableColumnSpecs, tableColumnIDs, GET_ALL_MASK) ? 1 : -1; } /*****************************************************************************/ LONG SGDatabase::TableDeleteByFileID(DWORDLONG fileID) { SGNativeTableEntry entry; LONG status; BOOL alreadyInTransaction = inTransaction; if (sesID == ~0U || dbID == ~0U || tableID == ~0U) return -1; entry.fileID = fileID; status = PositionCursor(tableID, TABLE_KEY_NAME_FILE_ID, &entry, TABLE_KEY_NCOLS_FILE_ID, tableKeyFileID); if (status <= 0) return status; if (!inTransaction && !BeginTransaction()) return -1; ASSERT(inTransaction); status = Delete(tableID); if (status < 0) { if (!alreadyInTransaction) AbortTransaction(); return -1; } numUncommittedTableEntries -= status; if (!alreadyInTransaction && !CommitTransaction()) return -1; return status; } /*****************************************************************************/ LONG SGDatabase::TableDeleteByCSIndex(const CSID *csIndex) { SGNativeTableEntry entry; LONG status; BOOL alreadyInTransaction = inTransaction; ASSERT(csIndex != NULL); if (sesID == ~0U || dbID == ~0U || tableID == ~0U) return -1; entry.csIndex = *csIndex; status = PositionCursor(tableID, TABLE_KEY_NAME_CSID, &entry, TABLE_KEY_NCOLS_CSID, tableKeyCSID); if (status <= 0) return status; if (!inTransaction && !BeginTransaction()) return -1; ASSERT(inTransaction); status = Delete(tableID); if (status < 0) { if (!alreadyInTransaction) AbortTransaction(); return -1; } numUncommittedTableEntries -= status; if (!alreadyInTransaction && !CommitTransaction()) return -1; return status; } /*****************************************************************************/ LONG SGDatabase::TableCount() const { LONG numEntries; if (sesID == ~0U || dbID == ~0U || tableID == ~0U) return -1; numEntries = numTableEntries + numUncommittedTableEntries; ASSERT(numEntries >= 0); ASSERT(Count(tableID, TABLE_KEY_NAME_FILE_ID) == numEntries); return numEntries; } /******************************* Queue methods *******************************/ LONG SGDatabase::QueuePut(SGNativeQueueEntry *entry) { BOOL alreadyInTransaction = inTransaction; ASSERT(entry != NULL); if (sesID == ~0U || dbID == ~0U || queueID == ~0U) return -1; if (!inTransaction && !BeginTransaction()) return -1; ASSERT(inTransaction); if (!PutData(queueID, entry, QUEUE_NCOLS, queueColumnSpecs, queueColumnIDs)) { if (!alreadyInTransaction) AbortTransaction(); return -1; } numUncommittedQueueEntries++; if (!alreadyInTransaction && !CommitTransaction()) return -1; return 1; } /*****************************************************************************/ LONG SGDatabase::QueueGetFirst(SGNativeQueueEntry *entry) const { LONG status; ASSERT(entry != NULL); if (sesID == ~0U || dbID == ~0U || queueID == ~0U) return -1; status = PositionCursorFirst(queueID, QUEUE_KEY_NAME_READY_TIME); if (status <= 0) return status; return RetrieveData(queueID, entry, QUEUE_NCOLS, queueColumnSpecs, queueColumnIDs, GET_ALL_MASK) ? 1 : -1; } /*****************************************************************************/ LONG SGDatabase::QueueGetFirstByFileID(SGNativeQueueEntry *entry) const { LONG status; ASSERT(entry != NULL); if (sesID == ~0U || dbID == ~0U || queueID == ~0U) return -1; status = PositionCursor(queueID, QUEUE_KEY_NAME_FILE_ID, entry, QUEUE_KEY_NCOLS_FILE_ID, queueKeyFileID); if (status <= 0) return status; return RetrieveData(queueID, entry, QUEUE_NCOLS, queueColumnSpecs, queueColumnIDs, QUEUE_EXCLUDE_FILE_ID_MASK) ? 1 : -1; } /*****************************************************************************/ LONG SGDatabase::QueueGetNext(SGNativeQueueEntry *entry) const { LONG status; ASSERT(entry != NULL); if (sesID == ~0U || dbID == ~0U || queueID == ~0U) return -1; status = PositionCursorNext(queueID); if (status <= 0) return status; return RetrieveData(queueID, entry, QUEUE_NCOLS, queueColumnSpecs, queueColumnIDs, GET_ALL_MASK) ? 1 : -1; } /*****************************************************************************/ LONG SGDatabase::QueueDelete(DWORD order) { SGNativeQueueEntry entry; LONG status; BOOL alreadyInTransaction = inTransaction; ASSERT(sesID != ~0U); ASSERT(dbID != ~0U); ASSERT(queueID != ~0U); entry.order = order; status = PositionCursor(queueID, QUEUE_KEY_NAME_ORDER, &entry, QUEUE_KEY_NCOLS_ORDER, queueKeyOrder); if (status <= 0) return status; if (!inTransaction && !BeginTransaction()) return -1; ASSERT(inTransaction); status = Delete(queueID); ASSERT(status <= 1); if (status < 0) { if (!alreadyInTransaction) AbortTransaction(); return -1; } numUncommittedQueueEntries -= status; if (!alreadyInTransaction && !CommitTransaction()) return -1; return status; } /*****************************************************************************/ LONG SGDatabase::QueueDeleteByFileID(DWORDLONG fileID) { SGNativeQueueEntry entry; LONG status; BOOL alreadyInTransaction = inTransaction; if (sesID == ~0U || dbID == ~0U || queueID == ~0U) return -1; entry.fileID = fileID; status = PositionCursor(queueID, QUEUE_KEY_NAME_FILE_ID, &entry, QUEUE_KEY_NCOLS_FILE_ID, queueKeyFileID); if (status <= 0) return status; if (!inTransaction && !BeginTransaction()) return -1; ASSERT(inTransaction); status = Delete(queueID); if (status < 0) { if (!alreadyInTransaction) AbortTransaction(); return -1; } numUncommittedQueueEntries -= status; if (!alreadyInTransaction && !CommitTransaction()) return -1; return status; } /*****************************************************************************/ LONG SGDatabase::QueueCount() const { LONG numEntries; if (sesID == ~0U || dbID == ~0U || queueID == ~0U) return -1; numEntries = numQueueEntries + numUncommittedQueueEntries; ASSERT(numEntries >= 0); // // This appears to be a bogus assert. I don't believe there is proper // syncronziation on this test because if I do a GO the system resyncs // the count properly and continues on. // Nealch (4/9/02) // //ASSERT(Count(queueID, QUEUE_KEY_NAME_READY_TIME) == numEntries); return numEntries; } /******************************* Stack methods *******************************/ LONG SGDatabase::StackPut(DWORDLONG fileID, BOOL done) { SGNativeStackEntry entry; LONG status; BOOL alreadyInTransaction = inTransaction; if (sesID == ~0U || dbID == ~0U || stackID == ~0U) return -1; if (done) entry.order = 0; else { status = PositionCursorLast(stackID, STACK_KEY_NAME_ORDER); if (status < 0) return -1; if (status == 0) entry.order = 1; else { if (!RetrieveData(stackID, &entry, STACK_NCOLS, stackColumnSpecs, stackColumnIDs, STACK_GET_ORDER_ONLY_MASK)) return -1; entry.order++; } } entry.fileID = fileID; if (!inTransaction && !BeginTransaction()) return -1; ASSERT(inTransaction); if (!PutData(stackID, &entry, STACK_NCOLS, stackColumnSpecs, stackColumnIDs)) { if (!alreadyInTransaction) AbortTransaction(); return -1; } numUncommittedStackEntries++; if (!alreadyInTransaction && !CommitTransaction()) return -1; return 1; } /*****************************************************************************/ LONG SGDatabase::StackGetTop(SGNativeStackEntry *entry) const { LONG status; ASSERT(entry != NULL); if (sesID == ~0U || dbID == ~0U || stackID == ~0U) return -1; status = PositionCursorLast(stackID, STACK_KEY_NAME_ORDER); if (status <= 0) return status; status = RetrieveData(stackID, entry, STACK_NCOLS, stackColumnSpecs, stackColumnIDs, GET_ALL_MASK); if (status < 0) return status; ASSERT(status == 1); return entry->order == 0 ? 0 : 1; } /*****************************************************************************/ LONG SGDatabase::StackGetFirstByFileID(SGNativeStackEntry *entry) const { LONG status; ASSERT(entry != NULL); if (sesID == ~0U || dbID == ~0U || stackID == ~0U) return -1; status = PositionCursor(stackID, STACK_KEY_NAME_FILE_ID, entry, STACK_KEY_NCOLS_FILE_ID, stackKeyFileID); if (status <= 0) return status; return RetrieveData(stackID, entry, STACK_NCOLS, stackColumnSpecs, stackColumnIDs, STACK_EXCLUDE_FILE_ID_MASK) ? 1 : -1; } /*****************************************************************************/ LONG SGDatabase::StackGetNext(SGNativeStackEntry *entry) const { LONG status; ASSERT(entry != NULL); if (sesID == ~0U || dbID == ~0U || stackID == ~0U) return -1; status = PositionCursorNext(stackID); if (status <= 0) return status; return RetrieveData(stackID, entry, STACK_NCOLS, stackColumnSpecs, stackColumnIDs, GET_ALL_MASK) ? 1 : -1; } /*****************************************************************************/ LONG SGDatabase::StackDelete(DWORD order) { SGNativeStackEntry entry; LONG status; BOOL alreadyInTransaction = inTransaction; if (sesID == ~0U || dbID == ~0U || stackID == ~0U) return -1; entry.order = order; status = PositionCursor(stackID, STACK_KEY_NAME_ORDER, &entry, STACK_KEY_NCOLS_ORDER, stackKeyOrder); if (status <= 0) return status; if (!inTransaction && !BeginTransaction()) return -1; ASSERT(inTransaction); status = Delete(stackID); ASSERT(order == 0 || status <= 1); if (status < 0) { if (!alreadyInTransaction) AbortTransaction(); return -1; } numUncommittedStackEntries -= status; if (!alreadyInTransaction && !CommitTransaction()) return -1; return status; } /*****************************************************************************/ LONG SGDatabase::StackDeleteByFileID(DWORDLONG fileID) { SGNativeStackEntry entry; LONG status; BOOL alreadyInTransaction = inTransaction; if (sesID == ~0U || dbID == ~0U || stackID == ~0U) return -1; entry.fileID = fileID; status = PositionCursor(stackID, STACK_KEY_NAME_FILE_ID, &entry, STACK_KEY_NCOLS_FILE_ID, stackKeyFileID); if (status <= 0) return status; if (!inTransaction && !BeginTransaction()) return -1; ASSERT(inTransaction); status = Delete(stackID); if (status < 0) { if (!alreadyInTransaction) AbortTransaction(); return -1; } numUncommittedStackEntries -= status; if (!alreadyInTransaction && !CommitTransaction()) return -1; return status; } /*****************************************************************************/ LONG SGDatabase::StackCount() const { LONG numEntries; if (sesID == ~0U || dbID == ~0U || stackID == ~0U) return -1; numEntries = numStackEntries + numUncommittedStackEntries; ASSERT(numEntries >= 0); ASSERT(Count(stackID, STACK_KEY_NAME_ORDER) == numEntries); return numEntries; } /******************************* List methods ********************************/ LONG SGDatabase::ListWrite(const SGNativeListEntry *entry) { LONG status; BOOL alreadyInTransaction = inTransaction; ASSERT(entry != NULL); ASSERT(entry->name != NULL); if (sesID == ~0U || dbID == ~0U || listID == ~0U) return -1; // May want to overwrite the entry directly instead of deleting and inserting if (!inTransaction && !BeginTransaction()) return -1; ASSERT(inTransaction); status = ListDelete(entry->name); ASSERT(status <= 1); if (status < 0 || !PutData(listID, entry, LIST_NCOLS, listColumnSpecs, listColumnIDs)) { if (!alreadyInTransaction) AbortTransaction(); return -1; } if (status == 0) numUncommittedListEntries++; if (!alreadyInTransaction && !CommitTransaction()) return -1; return 1; } /*****************************************************************************/ LONG SGDatabase::ListRead(SGNativeListEntry *entry) const { LONG status; ASSERT(entry != NULL); ASSERT(entry->name != NULL); if (sesID == ~0U || dbID == ~0U || listID == ~0U) return -1; status = PositionCursor(listID, LIST_KEY_NAME_NAME, entry, LIST_KEY_NCOLS_NAME, listKeyName); if (status <= 0) return status; return RetrieveData(listID, entry, LIST_NCOLS, listColumnSpecs, listColumnIDs, LIST_EXCLUDE_NAME_MASK) ? 1 : -1; } /*****************************************************************************/ LONG SGDatabase::ListDelete(const TCHAR *name) { SGNativeListEntry entry; LONG status; BOOL alreadyInTransaction = inTransaction; ASSERT(name != NULL); if (sesID == ~0U || dbID == ~0U || listID == ~0U) return -1; entry.name = name; entry.value = NULL; status = PositionCursor(listID, LIST_KEY_NAME_NAME, &entry, LIST_KEY_NCOLS_NAME, listKeyName); if (status <= 0) return status; if (!inTransaction && !BeginTransaction()) return -1; ASSERT(inTransaction); status = Delete(listID); if (status < 0) { if (!alreadyInTransaction) AbortTransaction(); return -1; } return status; } /*****************************************************************************/ LONG SGDatabase::ListCount() const { LONG numEntries; if (sesID == ~0U || dbID == ~0U || listID == ~0U) return -1; numEntries = numListEntries + numUncommittedListEntries; ASSERT(numEntries >= 0); ASSERT(Count(listID, LIST_KEY_NAME_NAME) == numEntries); return numEntries; } 
#include "mainwindow.h" #include "ui_mainwindow.h" MainWindow::MainWindow(QWidget *parent) : QMainWindow(parent) , ui(new Ui::MainWindow) { ui->setupUi(this); dataBase = QSqlDatabase::addDatabase("QSQLITE"); dataBase.setHostName("127.0.0.1"); dataBase.setUserName("root"); dataBase.setDatabaseName("DatabasE.db"); if (!dataBase.open()) QMessageBox::critical(this, "БД не открылась", dataBase.lastError().text()); } MainWindow::~MainWindow() { delete ui; } void MainWindow::on_pushButton_clicked() { QString cb = ui->comboBox->currentText() + ' ' + ui->comboBox_2->currentText() + ' ' + ui->comboBox_3->currentText(); QString l = ui->lineEdit->text(); sqlQuery = QSqlQuery(dataBase); if(dataBase.open()) { sqlQuery.prepare("INSERT INTO Diary(data, record) VALUES ('"+cb+"', '"+l+"')"); if(!sqlQuery.exec()) QMessageBox::critical(this, "Запрос не выполнился", "В принципе, не удивительно\n" + dataBase.lastError().databaseText() + "\n" + dataBase.lastError().driverText()); QMessageBox::information(this, "Состояние:", "Заметка записана!"); }else QMessageBox::critical(this, "БД не открылась", "В принципе, не удивительно\n" + dataBase.lastError().text()); } void MainWindow::on_pushButton_2_clicked() { QString cb = ui->comboBox->currentText() + ' ' + ui->comboBox_2->currentText() + ' ' + ui->comboBox_3->currentText(); sqlQuery = QSqlQuery(dataBase); if(dataBase.open()) { sqlQuery.prepare("SELECT * FROM Diary"); if(!sqlQuery.exec()) QMessageBox::critical(this, "Запрос не выполнился", "В принципе, не удивительно\n" + dataBase.lastError().databaseText() + "\n" + dataBase.lastError().driverText()); QString temp = ""; while(sqlQuery.next()) { if(sqlQuery.value(1).toString() == cb) temp += sqlQuery.value(2).toString() + '\n'; } if(temp != "") QMessageBox::information(this, "Заметки:", temp); else QMessageBox::warning(this, "Заметки:", "На этот день заметок нет!"); }else QMessageBox::critical(this, "БД не открылась", "В принципе, не удивительно\n" + dataBase.lastError().text()); } void MainWindow::on_pushButton_3_clicked() { QString cb = ui->comboBox->currentText() + ' ' + ui->comboBox_2->currentText() + ' ' + ui->comboBox_3->currentText(); QString l = ui->lineEdit->text(); QString temp; sqlQuery = QSqlQuery(dataBase); if(dataBase.open()) { if(l == "") { sqlQuery.prepare("DELETE FROM Diary WHERE data='"+cb+"'"); if(!sqlQuery.exec()) QMessageBox::critical(this, "Запрос не выполнился", "В принципе, не удивительно\n" + dataBase.lastError().databaseText() + "\n" + dataBase.lastError().driverText()); }else { sqlQuery.prepare("DELETE FROM Diary WHERE data='"+cb+"' AND record='"+l+"'"); if(!sqlQuery.exec()) QMessageBox::critical(this, "Запрос не выполнился", "В принципе, не удивительно\n" + dataBase.lastError().databaseText() + "\n" + dataBase.lastError().driverText()); } QMessageBox::information(this, "Состояние:", "Заметка удалена!"); }else QMessageBox::critical(this, "БД не открылась", "В принципе, не удивительно\n" + dataBase.lastError().text()); } void MainWindow::on_pushButton_4_clicked() { QString cb = ui->comboBox->currentText() + ' ' + ui->comboBox_2->currentText() + ' ' + ui->comboBox_3->currentText(); QString l = ui->lineEdit->text(); sqlQuery = QSqlQuery(dataBase); if(dataBase.open()) { sqlQuery.prepare("UPDATE Diary set record='"+l+"' WHERE data='"+cb+"'"); if(!sqlQuery.exec()) QMessageBox::critical(this, "Запрос не выполнился", "В принципе, не удивительно\n" + dataBase.lastError().databaseText() + "\n" + dataBase.lastError().driverText()); QMessageBox::information(this, "Состояние:", "Заметка изменена!"); }else QMessageBox::critical(this, "БД не открылась", "В принципе, не удивительно\n" + dataBase.lastError().text()); } void MainWindow::on_pushButton_5_clicked() { sqlQuery = QSqlQuery(dataBase); if(dataBase.open()) { sqlQuery.prepare("SELECT * FROM Diary"); if(!sqlQuery.exec()) QMessageBox::critical(this, "Запрос не выполнился", "В принципе, не удивительно\n" + dataBase.lastError().databaseText() + "\n" + dataBase.lastError().driverText()); QString temp = ""; while(sqlQuery.next()) temp += sqlQuery.value(1).toString() + '\t' + sqlQuery.value(2).toString() + '\n'; if(temp != "") QMessageBox::information(this, "Заметки:", temp); else QMessageBox::warning(this, "Заметки:", "Заметок нет!"); }else QMessageBox::critical(this, "БД не открылась", "В принципе, не удивительно\n" + dataBase.lastError().text()); } void MainWindow::on_pushButton_6_clicked() { sqlQuery = QSqlQuery(dataBase); if(dataBase.open()) { sqlQuery.prepare("DELETE FROM Diary"); if(!sqlQuery.exec()) QMessageBox::critical(this, "Запрос не выполнился", "В принципе, не удивительно\n" + dataBase.lastError().databaseText() + "\n" + dataBase.lastError().driverText()); QMessageBox::information(this, "Состояние:", "Все заметки удалены!"); }else QMessageBox::critical(this, "БД не открылась", "В принципе, не удивительно\n" + dataBase.lastError().text()); } void MainWindow::on_actionKamishiro_Iyamoto_triggered() { QMessageBox::information(this, "Разработчик и его ВК:", "Собственно, этот человек учится в коледже и сидит дома\nЕго ВК: https://vk.com/kamishiro_iyamoto"); }
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE762_Mismatched_Memory_Management_Routines__delete_array_struct_malloc_64b.cpp Label Definition File: CWE762_Mismatched_Memory_Management_Routines__delete_array.label.xml Template File: sources-sinks-64b.tmpl.cpp */ /* * @description * CWE: 762 Mismatched Memory Management Routines * BadSource: malloc Allocate data using malloc() * GoodSource: Allocate data using new [] * Sinks: * GoodSink: Deallocate data using free() * BadSink : Deallocate data using delete [] * Flow Variant: 64 Data flow: void pointer to data passed from one function to another in different source files * * */ #include "std_testcase.h" namespace CWE762_Mismatched_Memory_Management_Routines__delete_array_struct_malloc_64 { #ifndef OMITBAD void badSink(void * dataVoidPtr) { /* cast void pointer to a pointer of the appropriate type */ twoIntsStruct * * dataPtr = (twoIntsStruct * *)dataVoidPtr; /* dereference dataPtr into data */ twoIntsStruct * data = (*dataPtr); /* POTENTIAL FLAW: Deallocate memory using delete [] - the source memory allocation function may * require a call to free() to deallocate the memory */ delete [] data; } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B uses the GoodSource with the BadSink */ void goodG2BSink(void * dataVoidPtr) { /* cast void pointer to a pointer of the appropriate type */ twoIntsStruct * * dataPtr = (twoIntsStruct * *)dataVoidPtr; /* dereference dataPtr into data */ twoIntsStruct * data = (*dataPtr); /* POTENTIAL FLAW: Deallocate memory using delete [] - the source memory allocation function may * require a call to free() to deallocate the memory */ delete [] data; } /* goodB2G uses the BadSource with the GoodSink */ void goodB2GSink(void * dataVoidPtr) { /* cast void pointer to a pointer of the appropriate type */ twoIntsStruct * * dataPtr = (twoIntsStruct * *)dataVoidPtr; /* dereference dataPtr into data */ twoIntsStruct * data = (*dataPtr); /* FIX: Free memory using free() */ free(data); } #endif /* OMITGOOD */ } /* close namespace */
/* Copyright (c) 2012-2013, The Tor Project, Inc. */ /* See LICENSE for licensing information */ #include "orconfig.h" #include "crypto.h" #define ONION_NTOR_PRIVATE #include "onion_ntor.h" #include "torlog.h" #include "util.h" /** Free storage held in an ntor handshake state. */ void ntor_handshake_state_free(ntor_handshake_state_t *state) { if (!state) return; memwipe(state, 0, sizeof(*state)); tor_free(state); } /** Convenience function to represent HMAC_SHA256 as our instantiation of * ntor's "tweaked hash'. Hash the <b>inp_len</b> bytes at <b>inp</b> into * a DIGEST256_LEN-byte digest at <b>out</b>, with the hash changing * depending on the value of <b>tweak</b>. */ static void h_tweak(uint8_t *out, const uint8_t *inp, size_t inp_len, const char *tweak) { size_t tweak_len = strlen(tweak); crypto_hmac_sha256((char*)out, tweak, tweak_len, (const char*)inp, inp_len); } /** Wrapper around a set of tweak-values for use with the ntor handshake. */ typedef struct tweakset_t { const char *t_mac; const char *t_key; const char *t_verify; const char *m_expand; } tweakset_t; /** The tweaks to be used with our handshake. */ const tweakset_t proto1_tweaks = { #define PROTOID "ntor-curve25519-sha256-1" #define PROTOID_LEN 24 PROTOID ":mac", PROTOID ":key_extract", PROTOID ":verify", PROTOID ":key_expand" }; /** Convenience macro: copy <b>len</b> bytes from <b>inp</b> to <b>ptr</b>, * and advance <b>ptr</b> by the number of bytes copied. */ #define APPEND(ptr, inp, len) \ STMT_BEGIN { \ memcpy(ptr, (inp), (len)); \ ptr += len; \ } STMT_END /** * Compute the first client-side step of the ntor handshake for communicating * with a server whose DIGEST_LEN-byte server identity is <b>router_id</b>, * and whose onion key is <b>router_key</b>. Store the NTOR_ONIONSKIN_LEN-byte * message in <b>onion_skin_out</b>, and store the handshake state in * *<b>handshake_state_out</b>. Return 0 on success, -1 on failure. */ int onion_skin_ntor_create(const uint8_t *router_id, const curve25519_public_key_t *router_key, ntor_handshake_state_t **handshake_state_out, uint8_t *onion_skin_out) { ntor_handshake_state_t *state; uint8_t *op; state = (ntor_handshake_state_t *)tor_malloc_zero(sizeof(ntor_handshake_state_t)); memcpy(state->router_id, router_id, DIGEST_LEN); memcpy(&state->pubkey_B, router_key, sizeof(curve25519_public_key_t)); if (curve25519_secret_key_generate(&state->seckey_x, 0) < 0) { tor_free(state); return -1; } curve25519_public_key_generate(&state->pubkey_X, &state->seckey_x); op = onion_skin_out; APPEND(op, router_id, DIGEST_LEN); APPEND(op, router_key->public_key, CURVE25519_PUBKEY_LEN); APPEND(op, state->pubkey_X.public_key, CURVE25519_PUBKEY_LEN); tor_assert(op == onion_skin_out + NTOR_ONIONSKIN_LEN); *handshake_state_out = state; return 0; } #define SERVER_STR "Server" #define SERVER_STR_LEN 6 #define SECRET_INPUT_LEN (CURVE25519_PUBKEY_LEN * 3 + \ CURVE25519_OUTPUT_LEN * 2 + \ DIGEST_LEN + PROTOID_LEN) #define AUTH_INPUT_LEN (DIGEST256_LEN + DIGEST_LEN + \ CURVE25519_PUBKEY_LEN*3 + \ PROTOID_LEN + SERVER_STR_LEN) /** * Perform the server side of an ntor handshake. Given an * NTOR_ONIONSKIN_LEN-byte message in <b>onion_skin</b>, our own identity * fingerprint as <b>my_node_id</b>, and an associative array mapping public * onion keys to curve25519_keypair_t in <b>private_keys</b>, attempt to * perform the handshake. Use <b>junk_keys</b> if present if the handshake * indicates an unrecognized public key. Write an NTOR_REPLY_LEN-byte * message to send back to the client into <b>handshake_reply_out</b>, and * generate <b>key_out_len</b> bytes of key material in <b>key_out</b>. Return * 0 on success, -1 on failure. */ int onion_skin_ntor_server_handshake(const uint8_t *onion_skin, const di_digest256_map_t *private_keys, const curve25519_keypair_t *junk_keys, const uint8_t *my_node_id, uint8_t *handshake_reply_out, uint8_t *key_out, size_t key_out_len) { const tweakset_t *T = &proto1_tweaks; /* Sensitive stack-allocated material. Kept in an anonymous struct to make * it easy to wipe. */ struct { uint8_t secret_input[SECRET_INPUT_LEN]; uint8_t auth_input[AUTH_INPUT_LEN]; curve25519_public_key_t pubkey_X; curve25519_secret_key_t seckey_y; curve25519_public_key_t pubkey_Y; uint8_t verify[DIGEST256_LEN]; } s; uint8_t *si = s.secret_input, *ai = s.auth_input; const curve25519_keypair_t *keypair_bB; int bad; /* Decode the onion skin */ /* XXXX Does this possible early-return business threaten our security? */ if (tor_memneq(onion_skin, my_node_id, DIGEST_LEN)) return -1; /* Note that on key-not-found, we go through with this operation anyway, * using "junk_keys". This will result in failed authentication, but won't * leak whether we recognized the key. */ keypair_bB = (const curve25519_keypair_t*)dimap_search(private_keys, onion_skin + DIGEST_LEN, (void*)junk_keys); if (!keypair_bB) return -1; memcpy(s.pubkey_X.public_key, onion_skin+DIGEST_LEN+DIGEST256_LEN, CURVE25519_PUBKEY_LEN); /* Make y, Y */ curve25519_secret_key_generate(&s.seckey_y, 0); curve25519_public_key_generate(&s.pubkey_Y, &s.seckey_y); /* NOTE: If we ever use a group other than curve25519, or a different * representation for its points, we may need to perform different or * additional checks on X here and on Y in the client handshake, or lose our * security properties. What checks we need would depend on the properties * of the group and its representation. * * In short: if you use anything other than curve25519, this aspect of the * code will need to be reconsidered carefully. */ /* build secret_input */ curve25519_handshake(si, &s.seckey_y, &s.pubkey_X); bad = safe_mem_is_zero(si, CURVE25519_OUTPUT_LEN); si += CURVE25519_OUTPUT_LEN; curve25519_handshake(si, &keypair_bB->seckey, &s.pubkey_X); bad |= safe_mem_is_zero(si, CURVE25519_OUTPUT_LEN); si += CURVE25519_OUTPUT_LEN; APPEND(si, my_node_id, DIGEST_LEN); APPEND(si, keypair_bB->pubkey.public_key, CURVE25519_PUBKEY_LEN); APPEND(si, s.pubkey_X.public_key, CURVE25519_PUBKEY_LEN); APPEND(si, s.pubkey_Y.public_key, CURVE25519_PUBKEY_LEN); APPEND(si, PROTOID, PROTOID_LEN); tor_assert(si == s.secret_input + sizeof(s.secret_input)); /* Compute hashes of secret_input */ h_tweak(s.verify, s.secret_input, sizeof(s.secret_input), T->t_verify); /* Compute auth_input */ APPEND(ai, s.verify, DIGEST256_LEN); APPEND(ai, my_node_id, DIGEST_LEN); APPEND(ai, keypair_bB->pubkey.public_key, CURVE25519_PUBKEY_LEN); APPEND(ai, s.pubkey_Y.public_key, CURVE25519_PUBKEY_LEN); APPEND(ai, s.pubkey_X.public_key, CURVE25519_PUBKEY_LEN); APPEND(ai, PROTOID, PROTOID_LEN); APPEND(ai, SERVER_STR, SERVER_STR_LEN); tor_assert(ai == s.auth_input + sizeof(s.auth_input)); /* Build the reply */ memcpy(handshake_reply_out, s.pubkey_Y.public_key, CURVE25519_PUBKEY_LEN); h_tweak(handshake_reply_out+CURVE25519_PUBKEY_LEN, s.auth_input, sizeof(s.auth_input), T->t_mac); /* Generate the key material */ crypto_expand_key_material_rfc5869_sha256( s.secret_input, sizeof(s.secret_input), (const uint8_t*)T->t_key, strlen(T->t_key), (const uint8_t*)T->m_expand, strlen(T->m_expand), key_out, key_out_len); /* Wipe all of our local state */ memwipe(&s, 0, sizeof(s)); return bad ? -1 : 0; } /** * Perform the final client side of the ntor handshake, using the state in * <b>handshake_state</b> and the server's NTOR_REPLY_LEN-byte reply in * <b>handshake_reply</b>. Generate <b>key_out_len</b> bytes of key material * in <b>key_out</b>. Return 0 on success, -1 on failure. */ int onion_skin_ntor_client_handshake( const ntor_handshake_state_t *handshake_state, const uint8_t *handshake_reply, uint8_t *key_out, size_t key_out_len) { const tweakset_t *T = &proto1_tweaks; /* Sensitive stack-allocated material. Kept in an anonymous struct to make * it easy to wipe. */ struct { curve25519_public_key_t pubkey_Y; uint8_t secret_input[SECRET_INPUT_LEN]; uint8_t verify[DIGEST256_LEN]; uint8_t auth_input[AUTH_INPUT_LEN]; uint8_t auth[DIGEST256_LEN]; } s; uint8_t *ai = s.auth_input, *si = s.secret_input; const uint8_t *auth_candidate; int bad; /* Decode input */ memcpy(s.pubkey_Y.public_key, handshake_reply, CURVE25519_PUBKEY_LEN); auth_candidate = handshake_reply + CURVE25519_PUBKEY_LEN; /* See note in server_handshake above about checking points. The * circumstances under which we'd need to check Y for membership are * different than those under which we'd be checking X. */ /* Compute secret_input */ curve25519_handshake(si, &handshake_state->seckey_x, &s.pubkey_Y); bad = safe_mem_is_zero(si, CURVE25519_OUTPUT_LEN); si += CURVE25519_OUTPUT_LEN; curve25519_handshake(si, &handshake_state->seckey_x, &handshake_state->pubkey_B); bad |= (safe_mem_is_zero(si, CURVE25519_OUTPUT_LEN) << 1); si += CURVE25519_OUTPUT_LEN; APPEND(si, handshake_state->router_id, DIGEST_LEN); APPEND(si, handshake_state->pubkey_B.public_key, CURVE25519_PUBKEY_LEN); APPEND(si, handshake_state->pubkey_X.public_key, CURVE25519_PUBKEY_LEN); APPEND(si, s.pubkey_Y.public_key, CURVE25519_PUBKEY_LEN); APPEND(si, PROTOID, PROTOID_LEN); tor_assert(si == s.secret_input + sizeof(s.secret_input)); /* Compute verify from secret_input */ h_tweak(s.verify, s.secret_input, sizeof(s.secret_input), T->t_verify); /* Compute auth_input */ APPEND(ai, s.verify, DIGEST256_LEN); APPEND(ai, handshake_state->router_id, DIGEST_LEN); APPEND(ai, handshake_state->pubkey_B.public_key, CURVE25519_PUBKEY_LEN); APPEND(ai, s.pubkey_Y.public_key, CURVE25519_PUBKEY_LEN); APPEND(ai, handshake_state->pubkey_X.public_key, CURVE25519_PUBKEY_LEN); APPEND(ai, PROTOID, PROTOID_LEN); APPEND(ai, SERVER_STR, SERVER_STR_LEN); tor_assert(ai == s.auth_input + sizeof(s.auth_input)); /* Compute auth */ h_tweak(s.auth, s.auth_input, sizeof(s.auth_input), T->t_mac); bad |= (tor_memneq(s.auth, auth_candidate, DIGEST256_LEN) << 2); crypto_expand_key_material_rfc5869_sha256( s.secret_input, sizeof(s.secret_input), (const uint8_t*)T->t_key, strlen(T->t_key), (const uint8_t*)T->m_expand, strlen(T->m_expand), key_out, key_out_len); memwipe(&s, 0, sizeof(s)); if (bad) { log_warn(LD_PROTOCOL, "Invalid result from curve25519 handshake: %d", bad); } return bad ? -1 : 0; }
/*=================================================================== The Medical Imaging Interaction Toolkit (MITK) Copyright (c) German Cancer Research Center, Division of Medical and Biological Informatics. All rights reserved. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See LICENSE.txt or http://www.mitk.org for details. ===================================================================*/ #include "mitkVectorImageMapper2D.h" // vtk related includes #include <vtkCellArray.h> #include <vtkCellData.h> #include <vtkCutter.h> #include <vtkDataArray.h> #include <vtkDataObject.h> #include <vtkDataSetWriter.h> #include <vtkFloatArray.h> #include <vtkGlyph2D.h> #include <vtkGlyphSource2D.h> #include <vtkImageData.h> #include <vtkImageReslice.h> #include <vtkIndent.h> #include <vtkLinearTransform.h> #include <vtkLookupTable.h> #include <vtkLookupTable.h> #include <vtkMaskedGlyph2D.h> #include <vtkMaskedGlyph3D.h> #include <vtkMath.h> #include <vtkMatrix4x4.h> #include <vtkMatrixToLinearTransform.h> #include <vtkPlane.h> #include <vtkPlane.h> #include <vtkPointData.h> #include <vtkPoints.h> #include <vtkPolyData.h> #include <vtkPolyData.h> #include <vtkPolyDataMapper.h> #include <vtkScalarsToColors.h> #include <vtkScalarsToColors.h> #include <vtkTransform.h> #include <fstream> // mitk related includes #include "mitkAbstractTransformGeometry.h" #include "mitkBaseRenderer.h" #include "mitkColorProperty.h" #include "mitkGL.h" #include "mitkProperties.h" #include <mitkLookupTableProperty.h> const mitk::Image *mitk::VectorImageMapper2D::GetInput(void) { if (m_Image.IsNotNull()) return m_Image; else return dynamic_cast<const mitk::Image *>(GetDataNode()->GetData()); } void mitk::VectorImageMapper2D::Paint(mitk::BaseRenderer *renderer) { // std::cout << "2d vector mapping..." << std::endl; bool visible = true; GetDataNode()->GetVisibility(visible, renderer, "visible"); if (!visible) return; mitk::Image::Pointer input = const_cast<mitk::Image *>(this->GetInput()); if (input.IsNull()) return; vtkImageData *vtkImage = input->GetVtkImageData(this->GetCurrentTimeStep(input, renderer)); // // set up the cutter orientation according to the current geometry of // the renderers plane // Point3D point; Vector3D normal; PlaneGeometry::ConstPointer worldPlaneGeometry = renderer->GetCurrentWorldPlaneGeometry(); if (worldPlaneGeometry.IsNotNull()) { // set up vtkPlane according to worldGeometry point = worldPlaneGeometry->GetOrigin(); normal = worldPlaneGeometry->GetNormal(); normal.Normalize(); m_Plane->SetTransform((vtkAbstractTransform *)NULL); } else { itkWarningMacro(<< "worldPlaneGeometry is NULL!"); return; } double vp[3], vp_slice[3], vnormal[3]; vnl2vtk(point.GetVnlVector(), vp); vnl2vtk(normal.GetVnlVector(), vnormal); // std::cout << "Origin: " << vp[0] <<" "<< vp[1] <<" "<< vp[2] << std::endl; // std::cout << "Normal: " << vnormal[0] <<" "<< vnormal[1] <<" "<< vnormal[2] << std::endl; // normally, we would need to transform the surface and cut the transformed surface with the cutter. // This might be quite slow. Thus, the idea is, to perform an inverse transform of the plane instead. //@todo It probably does not work for scaling operations yet:scaling operations have to be // dealed with after the cut is performed by scaling the contour. vtkLinearTransform *vtktransform = GetDataNode()->GetVtkTransform(); vtkTransform *world2vtk = vtkTransform::New(); world2vtk->Identity(); world2vtk->Concatenate(vtktransform->GetLinearInverse()); double myscale[3]; world2vtk->GetScale(myscale); world2vtk->PostMultiply(); world2vtk->Scale(1 / myscale[0], 1 / myscale[1], 1 / myscale[2]); world2vtk->TransformPoint(vp, vp); world2vtk->TransformNormalAtPoint(vp, vnormal, vnormal); world2vtk->Delete(); // vtk works in axis align coords // thus the normal also must be axis align, since // we do not allow arbitrary cutting through volume // // vnormal should already be axis align, but in order // to get rid of precision effects, we set the two smaller // components to zero here int dims[3]; vtkImage->GetDimensions(dims); double spac[3]; vtkImage->GetSpacing(spac); vp_slice[0] = vp[0]; vp_slice[1] = vp[1]; vp_slice[2] = vp[2]; if (fabs(vnormal[0]) > fabs(vnormal[1]) && fabs(vnormal[0]) > fabs(vnormal[2])) { if (fabs(vp_slice[0] / spac[0]) < 0.4) vp_slice[0] = 0.4 * spac[0]; if (fabs(vp_slice[0] / spac[0]) > (dims[0] - 1) - 0.4) vp_slice[0] = ((dims[0] - 1) - 0.4) * spac[0]; vnormal[1] = 0; vnormal[2] = 0; } if (fabs(vnormal[1]) > fabs(vnormal[0]) && fabs(vnormal[1]) > fabs(vnormal[2])) { if (fabs(vp_slice[1] / spac[1]) < 0.4) vp_slice[1] = 0.4 * spac[1]; if (fabs(vp_slice[1] / spac[1]) > (dims[1] - 1) - 0.4) vp_slice[1] = ((dims[1] - 1) - 0.4) * spac[1]; vnormal[0] = 0; vnormal[2] = 0; } if (fabs(vnormal[2]) > fabs(vnormal[1]) && fabs(vnormal[2]) > fabs(vnormal[0])) { if (fabs(vp_slice[2] / spac[2]) < 0.4) vp_slice[2] = 0.4 * spac[2]; if (fabs(vp_slice[2] / spac[2]) > (dims[2] - 1) - 0.4) vp_slice[2] = ((dims[2] - 1) - 0.4) * spac[2]; vnormal[0] = 0; vnormal[1] = 0; } m_Plane->SetOrigin(vp_slice); m_Plane->SetNormal(vnormal); vtkPolyData *cuttedPlane; if (!((dims[0] == 1 && vnormal[0] != 0) || (dims[1] == 1 && vnormal[1] != 0) || (dims[2] == 1 && vnormal[2] != 0))) { m_Cutter->SetCutFunction(m_Plane); m_Cutter->SetInputData(vtkImage); m_Cutter->GenerateCutScalarsOff(); //! m_Cutter->Update(); cuttedPlane = m_Cutter->GetOutput(); } else { // cutting of a 2D-Volume does not work, // so we have to build up our own polydata object cuttedPlane = vtkPolyData::New(); vtkPoints *points = vtkPoints::New(); points->SetNumberOfPoints(vtkImage->GetNumberOfPoints()); for (int i = 0; i < vtkImage->GetNumberOfPoints(); i++) points->SetPoint(i, vtkImage->GetPoint(i)); cuttedPlane->SetPoints(points); vtkFloatArray *pointdata = vtkFloatArray::New(); int comps = vtkImage->GetPointData()->GetScalars()->GetNumberOfComponents(); pointdata->SetNumberOfComponents(comps); int tuples = vtkImage->GetPointData()->GetScalars()->GetNumberOfTuples(); pointdata->SetNumberOfTuples(tuples); for (int i = 0; i < tuples; i++) pointdata->SetTuple(i, vtkImage->GetPointData()->GetScalars()->GetTuple(i)); pointdata->SetName("vector"); cuttedPlane->GetPointData()->AddArray(pointdata); } if (cuttedPlane->GetNumberOfPoints() != 0) { // // make sure, that we have point data with more than 1 component (as vectors) // vtkPointData *pointData = cuttedPlane->GetPointData(); if (pointData == NULL) { itkWarningMacro(<< "no point data associated with cutters result!"); return; } if (pointData->GetNumberOfArrays() == 0) { itkWarningMacro(<< "point data returned by cutter doesn't have any arrays associated!"); return; } else if (pointData->GetArray(0)->GetNumberOfComponents() <= 1) { itkWarningMacro(<< "number of components <= 1!"); return; } else if (pointData->GetArrayName(0) == NULL) { pointData->GetArray(0)->SetName("vector"); // std::cout << "array name = vectors now" << std::endl; } // std::cout << " projecting..."<< std::endl; // // constrain the vectors to lie on the plane, which means to remove the vector component, // which is orthogonal to the plane. // vtkIdType numPoints, pointId; numPoints = cuttedPlane->GetNumberOfPoints(); vtkDataArray *inVectors = cuttedPlane->GetPointData()->GetVectors("vector"); assert(inVectors != NULL); vtkFloatArray *vectorMagnitudes = vtkFloatArray::New(); vectorMagnitudes->SetName("vectorMagnitudes"); vectorMagnitudes->SetNumberOfComponents(1); vectorMagnitudes->SetNumberOfValues(numPoints); vectorMagnitudes->SetNumberOfTuples(numPoints); double inVector[3], outVector[3], wnormal[3]; //, tmpVector[ 3 ], outVector[ 3 ]; double k = 0.0; vnl2vtk(normal.GetVnlVector(), wnormal); vtkMath::Normalize(wnormal); bool normalizeVecs; m_DataNode->GetBoolProperty("NormalizeVecs", normalizeVecs); for (pointId = 0; pointId < numPoints; ++pointId) { inVectors->GetTuple(pointId, inVector); if (normalizeVecs) { vnl_vector<double> tmp(3); vtk2vnl(inVector, tmp); tmp.normalize(); vnl2vtk(tmp, inVector); } k = vtkMath::Dot(wnormal, inVector); // Remove non orthogonal component. outVector[0] = inVector[0] - (wnormal[0] * k); outVector[1] = inVector[1] - (wnormal[1] * k); outVector[2] = inVector[2] - (wnormal[2] * k); inVectors->SetTuple(pointId, outVector); // ?? this was set to norm(inVector) before, but outVector made more sense to me vectorMagnitudes->SetValue(pointId, vtkMath::Norm(outVector)); // std::cout << "method old: " << inVector[0] <<", " << inVector[1] << ", "<<inVector[2] << ", method new: " << // outVector[0] << ", "<< outVector[1] << ", "<< outVector[2] << std::endl; } pointData->AddArray(vectorMagnitudes); pointData->CopyAllOn(); // pointData->PrintSelf(std::cout, vtkIndent(4)); // std::cout << " ...done!"<< std::endl; // std::cout << " glyphing..."<< std::endl; // call glyph2D to generate 2D glyphs for each of the // vectors vtkGlyphSource2D *glyphSource = vtkGlyphSource2D::New(); // glyphSource->SetGlyphTypeToDash(); glyphSource->DashOn(); // glyphSource->SetScale( 0.1 ); // glyphSource->SetScale2( .5 ); // glyphSource->SetCenter( 0.5, 0.5, 0.5 ); glyphSource->CrossOff(); // glyphSource->FilledOff(); // glyphSource->Update(); double spacing[3]; vtkImage->GetSpacing(spacing); double min = spacing[0]; min = min > spacing[1] ? spacing[1] : min; min = min > spacing[2] ? spacing[2] : min; float scale = 1; mitk::FloatProperty::Pointer mitkScaleProp = dynamic_cast<mitk::FloatProperty *>(GetDataNode()->GetProperty("Scale")); if (mitkScaleProp.IsNotNull()) { scale = mitkScaleProp->GetValue(); } vtkMaskedGlyph3D *glyphGenerator = vtkMaskedGlyph3D::New(); glyphGenerator->SetSourceData(glyphSource->GetOutput()); glyphGenerator->SetInput(cuttedPlane); glyphGenerator->SetInputArrayToProcess(1, 0, 0, vtkDataObject::FIELD_ASSOCIATION_POINTS, "vector"); glyphGenerator->SetVectorModeToUseVector(); glyphGenerator->OrientOn(); glyphGenerator->SetScaleFactor(min * scale); glyphGenerator->SetUseMaskPoints(true); glyphGenerator->SetRandomMode(true); glyphGenerator->SetMaximumNumberOfPoints(128 * 128); glyphGenerator->Update(); /* vtkLookupTable* vtkLut = NULL; mitk::LookupTableProperty::Pointer mitkLutProp = dynamic_cast<mitk::LookupTableProperty*>(GetDataNode()->GetProperty("LookupTable")); if (mitkLutProp.IsNotNull()) { vtkLut = mitkLutProp->GetLookupTable()->GetVtkLookupTable(); } */ mitk::Color color; mitk::ColorProperty::Pointer mitkColorProp = dynamic_cast<mitk::ColorProperty *>(GetDataNode()->GetProperty("color")); if (mitkColorProp.IsNotNull()) { color = mitkColorProp->GetColor(); } else { color.SetRed(0); color.SetBlue(1); color.SetGreen(0); } float lwidth = 1; mitk::FloatProperty::Pointer mitkLWidthProp = dynamic_cast<mitk::FloatProperty *>(GetDataNode()->GetProperty("LineWidth")); if (mitkLWidthProp.IsNotNull()) { lwidth = mitkLWidthProp->GetValue(); } vtkTransform *trafo = vtkTransform::New(); trafo->Identity(); trafo->Concatenate(vtktransform); trafo->PreMultiply(); double myscale[3]; trafo->GetScale(myscale); trafo->Scale(1 / myscale[0], 1 / myscale[1], 1 / myscale[2]); this->PaintCells(glyphGenerator->GetOutput(), renderer->GetCurrentWorldPlaneGeometry(), trafo, renderer, NULL /*vtkLut*/, color, lwidth, spacing); vectorMagnitudes->Delete(); glyphSource->Delete(); glyphGenerator->Delete(); trafo->Delete(); } else { std::cout << " no points cutted!" << std::endl; } // std::cout << "...done!" << std::endl; } void mitk::VectorImageMapper2D::PaintCells(vtkPolyData *glyphs, const PlaneGeometry * /*worldGeometry*/, vtkLinearTransform *vtktransform, mitk::BaseRenderer *renderer, vtkScalarsToColors *lut, mitk::Color color, float lwidth, double *spacing) { vtkPoints *points = glyphs->GetPoints(); vtkPointData *vpointdata = glyphs->GetPointData(); vtkDataArray *vpointscalars = vpointdata->GetArray("vectorMagnitudes"); // vtkDataArray* vpointpositions = vpointdata->GetArray("pointPositions"); assert(vpointscalars != NULL); // std::cout << " Scalars range 2d:" << vpointscalars->GetRange()[0] << " " << vpointscalars->GetRange()[0] << // std::endl; Point3D p; Point2D p2d; vtkIdList *idList; vtkCell *cell; double offset[3]; for (auto &elem : offset) { elem = 0; } vtkIdType numCells = glyphs->GetNumberOfCells(); for (vtkIdType cellId = 0; cellId < numCells; ++cellId) { double vp[3]; cell = glyphs->GetCell(cellId); idList = cell->GetPointIds(); int numPoints = idList->GetNumberOfIds(); if (numPoints == 1) { // take transformation via vtktransform into account double pos[3], vp_raster[3]; points->GetPoint(idList->GetId(0), vp); vp_raster[0] = vtkMath::Round(vp[0] / spacing[0]) * spacing[0]; vp_raster[1] = vtkMath::Round(vp[1] / spacing[1]) * spacing[1]; vp_raster[2] = vtkMath::Round(vp[2] / spacing[2]) * spacing[2]; vtktransform->TransformPoint(vp_raster, pos); offset[0] = pos[0] - vp[0]; offset[1] = pos[1] - vp[1]; offset[2] = pos[2] - vp[2]; } else { glLineWidth(lwidth); glBegin(GL_LINE_LOOP); for (int pointNr = 0; pointNr < numPoints; ++pointNr) { points->GetPoint(idList->GetId(pointNr), vp); vp[0] = vp[0] + offset[0]; vp[1] = vp[1] + offset[1]; vp[2] = vp[2] + offset[2]; double tmp[3]; vtktransform->TransformPoint(vp, tmp); vtk2itk(vp, p); // convert 3D point (in mm) to display coordinates (units ) renderer->WorldToDisplay(p, p2d); if (lut != NULL) { // color each point according to point data double *color; if (vpointscalars != NULL) { vpointscalars->GetComponent(pointNr, 0); color = lut->GetColor(vpointscalars->GetComponent(idList->GetId(pointNr), 0)); glColor3f(color[0], color[1], color[2]); } } else { glColor3f(color.GetRed(), color.GetGreen(), color.GetBlue()); } // std::cout << idList->GetId( pointNr )<< ": " << p2d[0]<< " "<< p2d[1] << std::endl; // draw the line glVertex2f(p2d[0], p2d[1]); } glEnd(); } } } mitk::VectorImageMapper2D::VectorImageMapper2D() { m_LUT = NULL; m_Plane = vtkPlane::New(); m_Cutter = vtkCutter::New(); m_Cutter->SetCutFunction(m_Plane); m_Cutter->GenerateValues(1, 0, 1); } mitk::VectorImageMapper2D::~VectorImageMapper2D() { if (m_LUT != NULL) m_LUT->Delete(); if (m_Plane != NULL) m_Plane->Delete(); if (m_Cutter != NULL) m_Cutter->Delete(); } int mitk::VectorImageMapper2D::GetCurrentTimeStep(mitk::BaseData *data, mitk::BaseRenderer *renderer) { // // get the TimeGeometry of the input object // const TimeGeometry *dataTimeGeometry = data->GetUpdatedTimeGeometry(); if ((dataTimeGeometry == NULL) || (dataTimeGeometry->CountTimeSteps() == 0)) { itkWarningMacro(<< "The given object is missing a mitk::TimeGeometry, or the number of time steps is 0!"); return 0; } // // get the world time // ScalarType time = renderer->GetTime(); // // convert the world time to time steps of the input object // int timestep = 0; if (time > itk::NumericTraits<mitk::ScalarType>::NonpositiveMin()) timestep = dataTimeGeometry->TimePointToTimeStep(time); if (dataTimeGeometry->IsValidTimeStep(timestep) == false) { itkWarningMacro(<< timestep << " is not a valid time of the given data object!"); return 0; } return timestep; }
class Solution { public: bool wordPattern(string pattern, string str) { vector<string> sequence; bool inWord = false; string currentWord; for(int i = 0; i < str.length(); ++i) { if (!inWord) { if (str[i] != ' ') { inWord = true; currentWord.push_back(str[i]); } } else { if (str[i] == ' ') { inWord = false; sequence.push_back(currentWord); currentWord.clear(); } else { currentWord.push_back(str[i]); } } } if (currentWord.length() > 0) { sequence.push_back(currentWord); } if (pattern.length() != sequence.size()) { return false; } map<string, char> M1; for (int i = 0; i < pattern.length(); ++i) { if (M1.count(sequence[i]) == 0) { M1[sequence[i]] = pattern[i]; } else { if (M1[sequence[i]] != pattern[i]) { return false; } } } map<char, string> M2; for (int i = 0; i < pattern.length(); ++i) { if (M2.count(pattern[i]) == 0) { M2[pattern[i]] = sequence[i]; } else { if (M2[pattern[i]] != sequence[i]) { return false; } } } return true; } };
/* Plutonium library @file render_SDL2.hpp @brief Wrapper code to simplify SDL2 usage @author XorTroll @copyright Plutonium project - an easy-to-use UI framework for Nintendo Switch homebrew */ #pragma once #include <string> #include <pu/cross.h> #include <pu/pu_String.hpp> #include <pu/ui/ui_Types.hpp> #include <SDL2/SDL.h> #include <SDL2/SDL_image.h> #include <pu/ui/render/SDL_ttf.h> #include <SDL2/SDL2_gfxPrimitives.h> #include <SDL2/SDL_mixer.h> namespace pu::ui::render { typedef SDL_Window *NativeWindow; typedef SDL_Renderer *NativeRenderer; typedef SDL_Surface *NativeSurface; typedef SDL_Texture *NativeTexture; typedef TTF_Font *NativeFont; enum class SharedFont { Standard, ChineseSimplified, ExtendedChineseSimplified, ChineseTraditional, Korean, NintendoExtended, }; NativeTexture ConvertToTexture(NativeSurface Surface); NativeTexture RenderText(NativeFont Font, String Text, Color Color, u32 WrapLength = 1280, u32 LineSpacing = 0); NativeTexture LoadImage(std::string Path); NativeFont LoadSharedFont(SharedFont Type, s32 Size); NativeFont LoadFont(std::string Path, s32 Size); void SetDefaultFont(std::string Path); void SetDefaultFontFromShared(SharedFont Type); NativeFont LoadDefaultFont(s32 Size); s32 GetTextureWidth(NativeTexture Texture); s32 GetTextureHeight(NativeTexture Texture); void SetAlphaValue(NativeTexture Texture, u8 Alpha); void DeleteFont(NativeFont Font); void DeleteTexture(NativeTexture Texture); }
// // Copyright Jason Rice 2016 // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // #include <mpdef/list.hpp> #include <mpdef/tree_node.hpp> #include <boost/hana.hpp> namespace hana = boost::hana; int main() { { constexpr auto xs = mpdef::make_list( hana::make_map( mpdef::make_tree_node(hana::int_c<1>, hana::int_c<-1>), mpdef::make_tree_node(hana::int_c<2>, hana::int_c<-2>) ), hana::make_map( mpdef::make_tree_node(hana::int_c<2>, hana::int_c<-2>), mpdef::make_tree_node(hana::int_c<1>, hana::int_c<-1>) ) ); (void) xs; BOOST_HANA_CONSTANT_ASSERT(xs == hana::reverse(xs)); } { constexpr auto xs = mpdef::make_list( hana::int_c<0>, hana::int_c<1>, hana::int_c<2>, hana::int_c<3>, hana::int_c<4>, hana::int_c<5> ); (void)xs; BOOST_HANA_CONSTANT_ASSERT(hana::at(xs, hana::int_c<3>) == hana::int_c<3>); } { constexpr auto xs = mpdef::make_list( hana::int_c<0>, hana::int_c<1>, hana::int_c<2>, hana::int_c<3>, hana::int_c<4>, hana::int_c<5> ); (void)xs; BOOST_HANA_CONSTANT_ASSERT(hana::unpack(xs, hana::make_tuple) == hana::make_tuple( hana::int_c<0>, hana::int_c<1>, hana::int_c<2>, hana::int_c<3>, hana::int_c<4>, hana::int_c<5> ) ); } { constexpr auto xs1 = mpdef::make_list( hana::int_c<0>, hana::int_c<1>, hana::int_c<2>, hana::int_c<3>, hana::int_c<4>, hana::int_c<5> ); constexpr auto xs = mpdef::make_list(xs1, xs1, xs1); (void)xs; BOOST_HANA_CONSTANT_ASSERT(hana::flatten(xs) == mpdef::make_list( hana::int_c<0>, hana::int_c<1>, hana::int_c<2>, hana::int_c<3>, hana::int_c<4>, hana::int_c<5>, hana::int_c<0>, hana::int_c<1>, hana::int_c<2>, hana::int_c<3>, hana::int_c<4>, hana::int_c<5>, hana::int_c<0>, hana::int_c<1>, hana::int_c<2>, hana::int_c<3>, hana::int_c<4>, hana::int_c<5> ) ); } }
// ----------------------------------------------------------------------------------------------------- // Copyright (c) 2006-2021, Knut Reinert & Freie Universität Berlin // Copyright (c) 2016-2021, Knut Reinert & MPI für molekulare Genetik // This file may be used, modified and/or redistributed under the terms of the 3-clause BSD-License // shipped with this file and also available at: https://github.com/seqan/seqan3/blob/master/LICENSE.md // ----------------------------------------------------------------------------------------------------- /*!\file * \brief Provides seqan3::detail::affine_cell_proxy. * \author Rene Rahn <rene.rahn AT fu-berlin.de> */ #pragma once #include <seqan3/std/concepts> #include <tuple> #include <type_traits> #include <seqan3/alignment/matrix/detail/trace_directions.hpp> #include <seqan3/core/detail/empty_type.hpp> #include <seqan3/utility/detail/exposition_only_concept.hpp> #include <seqan3/utility/simd/concept.hpp> #include <seqan3/utility/tuple/concept.hpp> namespace seqan3::detail { /*!\interface seqan3::detail::arithmetic_or_simd <> * \brief The concept for a type that models either seqan3::arithmetic or seqan3::simd::simd_concept. * \ingroup alignment_matrix */ //!\cond template <typename t> SEQAN3_CONCEPT arithmetic_or_simd = arithmetic<t> || simd_concept<t>; //!\endcond /*!\interface seqan3::detail::tracedirections_or_simd <> * \brief The concept for a type that either is the same type as seqan3::detail::trace_directions or * models the seqan3::simd::simd_concept. * \ingroup alignment_matrix */ //!\cond template <typename t> SEQAN3_CONCEPT tracedirections_or_simd = std::same_as<std::remove_cvref_t<t>, trace_directions> || simd_concept<t>; //!\endcond /*!\interface seqan3::detail::affine_score_cell <> * \extends seqan3::tuple_like * \brief The concept for a type that models an affine cell of the score matrix. * \ingroup alignment_matrix * * \details * * This concept describes the requirements an alignment matrix cell must fulfil to represent an affine score * matrix entry. */ //!\cond template <typename t> SEQAN3_CONCEPT affine_score_cell = tuple_like<t> && std::tuple_size_v<t> == 3 && arithmetic_or_simd<std::remove_reference_t<std::tuple_element_t<0, t>>> && arithmetic_or_simd<std::remove_reference_t<std::tuple_element_t<1, t>>> && arithmetic_or_simd<std::remove_reference_t<std::tuple_element_t<2, t>>>; //!\endcond /*!\interface seqan3::detail::affine_trace_cell <> * \extends seqan3::tuple_like * \brief The concept for a type that models an affine cell of the trace matrix. * \ingroup alignment_matrix * * \details * * This concept describes the requirements an alignment matrix cell must fulfil to represent an affine trace * matrix entry. */ //!\cond template <typename t> SEQAN3_CONCEPT affine_trace_cell = tuple_like<t> && std::tuple_size_v<t> == 3 && tracedirections_or_simd<std::remove_reference_t<std::tuple_element_t<0, t>>> && tracedirections_or_simd<std::remove_reference_t<std::tuple_element_t<1, t>>> && tracedirections_or_simd<std::remove_reference_t<std::tuple_element_t<2, t>>>; //!\endcond /*!\interface seqan3::detail::affine_score_and_trace_cell <> * \extends seqan3::tuple_like * \brief The concept for a type that models an affine cell of the combined score and trace matrix. * \ingroup alignment_matrix * * \details * * This concept describes the requirements an alignment matrix cell must fulfil to represent an affine score * matrix entry with the score and trace information. */ //!\cond template <typename t> SEQAN3_CONCEPT affine_score_and_trace_cell = tuple_like<t> && std::tuple_size_v<t> == 2 && affine_score_cell<std::tuple_element_t<0, t>> && affine_trace_cell<std::tuple_element_t<1, t>>; //!\endcond /*!\brief A proxy for an affine score matrix cell. * \implements seqan3::tuple_like * \ingroup alignment_matrix * * \tparam tuple_t The underlying cell type of the affine alignment matrix; must model * seqan3::detail::affine_score_cell or seqan3::detail::affine_score_and_trace_cell. * * \details * * This wrapper provides a uniform access to the different elements of the cell within an affine score matrix. This * includes the best score, the horizontal gap score and the vertical gap score. In case of a combined alignment * matrix including the trace matrix, the interface is extended to also access the best, horizontal, and vertical trace * value. */ template <typename tuple_t> //!\cond requires (affine_score_cell<tuple_t> || affine_score_and_trace_cell<tuple_t>) //!\endcond class affine_cell_proxy : public tuple_t { private: //!\brief The type of the score cell. using score_cell_type = std::conditional_t<affine_score_cell<tuple_t>, tuple_t, std::tuple_element_t<0, tuple_t>>; //!\brief The type of the trace cell (might be seqan3::detail::empty_type if not defined). using trace_cell_type = std::conditional_t<affine_score_and_trace_cell<tuple_t>, std::tuple_element_t<1, tuple_t>, empty_type>; public: /*!\name Constructors, destructor and assignment * \{ */ affine_cell_proxy() = default; //!< Defaulted. affine_cell_proxy(affine_cell_proxy const &) = default; //!< Defaulted. affine_cell_proxy(affine_cell_proxy &&) = default; //!< Defaulted. affine_cell_proxy & operator=(affine_cell_proxy const &) = default; //!< Defaulted. affine_cell_proxy & operator=(affine_cell_proxy &&) = default; //!< Defaulted. ~affine_cell_proxy() = default; //!< Defaulted. // Inherit the base class's constructor to enable element-wise initialisation (direct and converting constructor). using tuple_t::tuple_t; //!\brief Converting constructor. Initialises from another tuple type. template <typename other_tuple_t> //!\cond requires std::constructible_from<tuple_t, other_tuple_t &&> //!\endcond explicit affine_cell_proxy(other_tuple_t && other) : tuple_t{std::forward<other_tuple_t>(other)} {} //!\brief Converting copy-constructor. template <typename other_tuple_t> //!\cond requires std::constructible_from<tuple_t, other_tuple_t const &> //!\endcond explicit affine_cell_proxy(affine_cell_proxy<other_tuple_t> const & other) : tuple_t{static_cast<other_tuple_t const &>(other)} {} //!\brief Converting move-constructor. template <typename other_tuple_t> //!\cond requires std::constructible_from<tuple_t, other_tuple_t> //!\endcond explicit affine_cell_proxy(affine_cell_proxy<other_tuple_t> && other) : tuple_t{static_cast<other_tuple_t &&>(std::move(other))} {} //!\brief Converting assignment. Initialises from another tuple type. template <typename other_tuple_t> //!\cond requires std::assignable_from<tuple_t &, other_tuple_t &&> //!\endcond affine_cell_proxy & operator=(other_tuple_t && other) { as_base() = std::forward<other_tuple_t>(other); return *this; } //!\brief Converting copy-assignment. template <typename other_tuple_t> //!\cond requires std::assignable_from<tuple_t &, other_tuple_t const &> //!\endcond affine_cell_proxy & operator=(affine_cell_proxy<other_tuple_t> const & other) { as_base() = static_cast<other_tuple_t const &>(other); return *this; } //!\brief Converting move-assignment. template <typename other_tuple_t> //!\cond requires std::assignable_from<tuple_t &, other_tuple_t> //!\endcond affine_cell_proxy & operator=(affine_cell_proxy<other_tuple_t> && other) { as_base() = static_cast<other_tuple_t &&>(std::move(other)); return *this; } //!\} /*!\name Score value accessor * \brief Specific accessor function to get the respective score value from an affine matrix cell. * \{ */ //!\brief Access the best score of the wrapped score matrix cell. decltype(auto) best_score() & noexcept { return get_score_impl<0>(*this); } //!\overload decltype(auto) best_score() const & noexcept { return get_score_impl<0>(*this); } //!\overload decltype(auto) best_score() && noexcept { return get_score_impl<0>(std::move(*this)); } //!\overload decltype(auto) best_score() const && noexcept { #if SEQAN3_WORKAROUND_GCC_94967 // A simple std::move(...) does not work, because it would mess up tuple_element types like `int const &` using return_t = std::tuple_element_t<0, score_cell_type>; return static_cast<return_t const &&>(get_score_impl<0>(std::move(*this))); #else // ^^^ workaround / no workaround vvv return get_score_impl<0>(std::move(*this)); #endif // SEQAN3_WORKAROUND_GCC_94967 } //!\brief Access the horizontal score of the wrapped score matrix cell. decltype(auto) horizontal_score() & noexcept { return get_score_impl<1>(*this); } //!\overload decltype(auto) horizontal_score() const & noexcept { return get_score_impl<1>(*this); } //!\overload decltype(auto) horizontal_score() && noexcept { return get_score_impl<1>(std::move(*this)); } //!\overload decltype(auto) horizontal_score() const && noexcept { #if SEQAN3_WORKAROUND_GCC_94967 // A simple std::move(...) does not work, because it would mess up tuple_element types like `int const &` using return_t = std::tuple_element_t<1, score_cell_type>; return static_cast<return_t const &&>(get_score_impl<1>(std::move(*this))); #else // ^^^ workaround / no workaround vvv return get_score_impl<1>(std::move(*this)); #endif // SEQAN3_WORKAROUND_GCC_94967 } //!\brief Access the vertical score of the wrapped score matrix cell. decltype(auto) vertical_score() & noexcept { return get_score_impl<2>(*this); } //!\overload decltype(auto) vertical_score() const & noexcept { return get_score_impl<2>(*this); } //!\overload decltype(auto) vertical_score() && noexcept { return get_score_impl<2>(std::move(*this)); } //!\overload decltype(auto) vertical_score() const && noexcept { #if SEQAN3_WORKAROUND_GCC_94967 // A simple std::move(...) does not work, because it would mess up tuple_element types like `int const &` using return_t = std::tuple_element_t<2, score_cell_type>; return static_cast<return_t const &&>(get_score_impl<2>(std::move(*this))); #else // ^^^ workaround / no workaround vvv return get_score_impl<2>(std::move(*this)); #endif // SEQAN3_WORKAROUND_GCC_94967 } //!\} /*!\name Trace value accessor * \brief Specific accessor function to get the respective trace value from an affine matrix cell. * \{ */ //!\brief Access the optimal score of the wrapped score matrix cell. decltype(auto) best_trace() & noexcept //!\cond requires affine_score_and_trace_cell<tuple_t> //!\endcond { return get_trace_impl<0>(*this); } //!\overload decltype(auto) best_trace() const & noexcept //!\cond requires affine_score_and_trace_cell<tuple_t> //!\endcond { return get_trace_impl<0>(*this); } //!\overload decltype(auto) best_trace() && noexcept //!\cond requires affine_score_and_trace_cell<tuple_t> //!\endcond { return get_trace_impl<0>(std::move(*this)); } //!\overload decltype(auto) best_trace() const && noexcept //!\cond requires affine_score_and_trace_cell<tuple_t> //!\endcond { #if SEQAN3_WORKAROUND_GCC_94967 // A simple std::move(...) does not work, because it would mess up tuple_element types like `int const &` using return_t = std::tuple_element_t<0, trace_cell_type>; return static_cast<return_t const &&>(get_trace_impl<0>(std::move(*this))); #else // ^^^ workaround / no workaround vvv return get_trace_impl<0>(std::move(*this)); #endif // SEQAN3_WORKAROUND_GCC_94967 } //!\brief Access the horizontal score of the wrapped score matrix cell. decltype(auto) horizontal_trace() & noexcept //!\cond requires affine_score_and_trace_cell<tuple_t> //!\endcond { return get_trace_impl<1>(*this); } //!\overload decltype(auto) horizontal_trace() const & noexcept //!\cond requires affine_score_and_trace_cell<tuple_t> //!\endcond { return get_trace_impl<1>(*this); } //!\overload decltype(auto) horizontal_trace() && noexcept //!\cond requires affine_score_and_trace_cell<tuple_t> //!\endcond { return get_trace_impl<1>(std::move(*this)); } //!\overload decltype(auto) horizontal_trace() const && noexcept //!\cond requires affine_score_and_trace_cell<tuple_t> //!\endcond { #if SEQAN3_WORKAROUND_GCC_94967 // A simple std::move(...) does not work, because it would mess up tuple_element types like `int const &` using return_t = std::tuple_element_t<1, trace_cell_type>; return static_cast<return_t const &&>(get_trace_impl<1>(std::move(*this))); #else // ^^^ workaround / no workaround vvv return get_trace_impl<1>(std::move(*this)); #endif // SEQAN3_WORKAROUND_GCC_94967 } //!\brief Access the vertical score of the wrapped score matrix cell. decltype(auto) vertical_trace() & noexcept //!\cond requires affine_score_and_trace_cell<tuple_t> //!\endcond { return get_trace_impl<2>(*this); } //!\overload decltype(auto) vertical_trace() const & noexcept //!\cond requires affine_score_and_trace_cell<tuple_t> //!\endcond { return get_trace_impl<2>(*this); } //!\overload decltype(auto) vertical_trace() && noexcept //!\cond requires affine_score_and_trace_cell<tuple_t> //!\endcond { return get_trace_impl<2>(std::move(*this)); } //!\overload decltype(auto) vertical_trace() const && noexcept //!\cond requires affine_score_and_trace_cell<tuple_t> //!\endcond { #if SEQAN3_WORKAROUND_GCC_94967 // A simple std::move(...) does not work, because it would mess up tuple_element types like `int const &` using return_t = std::tuple_element_t<2, trace_cell_type>; return static_cast<return_t const &&>(get_trace_impl<2>(std::move(*this))); #else // ^^^ workaround / no workaround vvv return get_trace_impl<2>(std::move(*this)); #endif // SEQAN3_WORKAROUND_GCC_94967 } //!\} private: /*!\brief Implements the get interface for the various calls to receive the score value. * \tparam index The index of the tuple element to get; must be smaller than 3. * \tparam this_t The perfectly forwarded type of `*this`. * * \param[in] me The instance of `*this`. * * \returns The score value from the given tuple index. */ template <size_t index, typename this_t> //!\cond requires (index < 3) //!\endcond static constexpr decltype(auto) get_score_impl(this_t && me) noexcept { using std::get; if constexpr (affine_score_cell<tuple_t>) return get<index>(std::forward<this_t>(me)); else return get<index>(get<0>(std::forward<this_t>(me))); } /*!\brief Implements the get interface for the various calls to receive the trace value. * \tparam index The index of the tuple element to get; must be smaller than 3. * \tparam this_t The perfectly forwarded type of `*this`. * * \param[in] me The instance of `*this`. * * \returns The trace value from the given tuple index. */ template <size_t index, typename this_t> //!\cond requires (index < 3 && affine_score_and_trace_cell<tuple_t>) //!\endcond static constexpr decltype(auto) get_trace_impl(this_t && me) noexcept { using std::get; return get<index>(get<1>(std::forward<this_t>(me))); } //!\brief Casts `this` to the base class type. tuple_t & as_base() & noexcept { return static_cast<tuple_t &>(*this); } }; } // namespace seqan3::detail namespace std { //!\cond template <typename tuple_t> //!\cond requires (seqan3::detail::affine_score_cell<tuple_t> || seqan3::detail::affine_score_and_trace_cell<tuple_t>) //!\endcond struct tuple_size<seqan3::detail::affine_cell_proxy<tuple_t>> : public tuple_size<tuple_t> {}; template <size_t index, typename tuple_t> //!\cond requires (seqan3::detail::affine_score_cell<tuple_t> || seqan3::detail::affine_score_and_trace_cell<tuple_t>) //!\endcond struct tuple_element<index, seqan3::detail::affine_cell_proxy<tuple_t>> : public tuple_element<index, tuple_t> {}; //!\endcond } // namespace std
/********************************************************************* * Software License Agreement (BSD License) * * Copyright (c) 2018, Mohamad Ayman. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * The name of Mohamad Ayman may not be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. *********************************************************************/ /* Author: Mohamad Ayman */ // SA #include "simulation_widget.h" // Qt #include <QVBoxLayout> #include <QMessageBox> #include <QPushButton> #include <QColor> #include <moveit/robot_state/conversions.h> #include <moveit_msgs/msg/display_robot_state.hpp> #include <regex> namespace moveit_setup_assistant { // ****************************************************************************************** // Constructor // ****************************************************************************************** SimulationWidget::SimulationWidget(QWidget* parent, const MoveItConfigDataPtr& config_data) : SetupScreenWidget(parent), config_data_(config_data) { // Basic widget container QVBoxLayout* layout = new QVBoxLayout(); layout->setAlignment(Qt::AlignTop); // Top Header Area ------------------------------------------------ HeaderWidget* header = new HeaderWidget("Simulate With Gazebo", "The following tool will auto-generate the URDF changes needed " "for Gazebo compatibility with ROSControl and MoveIt. The " "needed changes are shown in green.", this); layout->addWidget(header); // Spacing QSpacerItem* blank_space = new QSpacerItem(1, 8); layout->addSpacerItem(blank_space); QLabel* instructions = new QLabel(this); instructions->setText("You can run the following command to quickly find the necessary URDF file to edit:"); layout->addWidget(instructions); QTextEdit* instructions_command = new QTextEdit(this); instructions_command->setText(std::string("roscd " + config_data->urdf_pkg_name_).c_str()); instructions_command->setReadOnly(true); instructions_command->setMaximumHeight(30); layout->addWidget(instructions_command); // Spacing blank_space = new QSpacerItem(1, 6); layout->addSpacerItem(blank_space); // Used to make the new URDF visible QPushButton* btn_generate = new QPushButton("&Generate URDF", this); btn_generate->setMinimumWidth(180); btn_generate->setMinimumHeight(40); layout->addWidget(btn_generate); layout->setAlignment(btn_generate, Qt::AlignLeft); connect(btn_generate, SIGNAL(clicked()), this, SLOT(generateURDFClick())); // When there wa no change to be made no_changes_label_ = new QLabel(this); no_changes_label_->setText("No Changes To Be Made"); layout->addWidget(no_changes_label_); no_changes_label_->setVisible(false); // URDF text simulation_text_ = new QTextEdit(this); simulation_text_->setLineWrapMode(QTextEdit::NoWrap); layout->addWidget(simulation_text_); // Copy URDF link, hidden initially copy_urdf_ = new QLabel(this); copy_urdf_->setText("<a href='contract'>Copy to Clipboard</a>"); connect(copy_urdf_, SIGNAL(linkActivated(const QString)), this, SLOT(copyURDF(const QString))); copy_urdf_->setVisible(false); layout->addWidget(copy_urdf_); // Finish Layout -------------------------------------------------- this->setLayout(layout); } // ****************************************************************************************** // Called when generate URDF button is clicked // ****************************************************************************************** void SimulationWidget::generateURDFClick() { simulation_text_->setVisible(true); std::string gazebo_compatible_urdf_string = config_data_->getGazeboCompatibleURDF(); std::size_t urdf_length = gazebo_compatible_urdf_string.length(); // Check if the urdf do need new elements to be added if (urdf_length > 0) { // Split the added elements from the original urdf to view them in different colors std::smatch start_match; std::smatch end_match; std::regex start_reg_ex("<inertial"); std::regex end_reg_ex("</inertial"); // Search for inertial elemnts using regex std::regex_search(gazebo_compatible_urdf_string, start_match, start_reg_ex); std::regex_search(gazebo_compatible_urdf_string, end_match, end_reg_ex); // Used to cache the positions of the opening and closing of the inertial elements std::vector<int> inertial_opening_matches; std::vector<int> inertial_closing_matches; inertial_closing_matches.push_back(0); // Cache the positions of the openings of the inertial elements for (auto it = std::sregex_iterator(gazebo_compatible_urdf_string.begin(), gazebo_compatible_urdf_string.end(), start_reg_ex); it != std::sregex_iterator(); ++it) { inertial_opening_matches.push_back(it->position()); } inertial_opening_matches.push_back(urdf_length); // Cache the positions of the closings of the inertial elements for (auto it = std::sregex_iterator(gazebo_compatible_urdf_string.begin(), gazebo_compatible_urdf_string.end(), end_reg_ex); it != std::sregex_iterator(); ++it) { inertial_closing_matches.push_back(it->position()); } for (std::size_t match_number = 0; match_number < inertial_opening_matches.size() - 1; match_number++) { // Show the unmodified elements in black simulation_text_->setTextColor(QColor("black")); simulation_text_->append( QString(gazebo_compatible_urdf_string .substr(inertial_closing_matches[match_number], inertial_opening_matches[match_number] - inertial_closing_matches[match_number]) .c_str())); // Show the added elements in green simulation_text_->setTextColor(QColor("green")); simulation_text_->append( QString(gazebo_compatible_urdf_string .substr(inertial_opening_matches[match_number], inertial_closing_matches[match_number + 1] - inertial_opening_matches[match_number] + 11) .c_str())); inertial_closing_matches[match_number + 1] += 11; } // Position of the first transmission element in the urdf std::size_t first_transmission = gazebo_compatible_urdf_string.find("<transmission"); // Position of the last inertial element in the urdf std::size_t last_inertial = inertial_closing_matches[inertial_opening_matches.size() - 1]; if (first_transmission != std::string::npos) { simulation_text_->setTextColor(QColor("black")); simulation_text_->append( QString(gazebo_compatible_urdf_string.substr(last_inertial, first_transmission - last_inertial).c_str())); // Write from the first transmission element until the closing robot element in green simulation_text_->setTextColor(QColor("green")); simulation_text_->append(QString( gazebo_compatible_urdf_string.substr(first_transmission, urdf_length - first_transmission - 10).c_str())); // Write the closing robot element in black simulation_text_->setTextColor(QColor("black")); simulation_text_->append(QString("</robot>")); } // Copy link appears after the text is ready copy_urdf_->setVisible(true); } else { simulation_text_->append(QString(gazebo_compatible_urdf_string.c_str())); no_changes_label_->setVisible(true); } } // ****************************************************************************************** // Called the copy to clipboard button is clicked // ****************************************************************************************** void SimulationWidget::copyURDF(const QString& /*link*/) { simulation_text_->selectAll(); simulation_text_->copy(); } } // namespace moveit_setup_assistant
// // Copyright Aliaksei Levin (levlam@telegram.org), Arseny Smirnov (arseny30@gmail.com) 2014-2022 // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #pragma once #include "td/telegram/PollId.h" #include "td/telegram/PollManager.h" #include "td/telegram/PollManager.hpp" #include "td/telegram/Td.h" namespace td { template <class StorerT> void store(const PollId &poll_id, StorerT &storer) { storer.context()->td().get_actor_unsafe()->poll_manager_->store_poll(poll_id, storer); } template <class ParserT> void parse(PollId &poll_id, ParserT &parser) { poll_id = parser.context()->td().get_actor_unsafe()->poll_manager_->parse_poll(parser); } } // namespace td
/*============================================================================= Copyright (c) 2001-2010 Joel de Guzman Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) =============================================================================*/ /////////////////////////////////////////////////////////////////////////////// // // A Calculator example demonstrating generation of AST // // [ JDG April 28, 2008 ] // /////////////////////////////////////////////////////////////////////////////// #include <boost/config/warning_disable.hpp> #include <boost/spirit/include/qi.hpp> #include <boost/variant/recursive_variant.hpp> #include <boost/variant/apply_visitor.hpp> #include <boost/spirit/include/phoenix_operator.hpp> #include <boost/spirit/include/phoenix_function.hpp> #include <iostream> #include <vector> #include <string> namespace client { namespace qi = boost::spirit::qi; namespace ascii = boost::spirit::ascii; /////////////////////////////////////////////////////////////////////////// // Our AST /////////////////////////////////////////////////////////////////////////// struct binary_op; struct unary_op; struct nil {}; struct expression_ast { typedef boost::variant< nil // can't happen! , unsigned int , boost::recursive_wrapper<expression_ast> , boost::recursive_wrapper<binary_op> , boost::recursive_wrapper<unary_op> > type; expression_ast() : expr(nil()) {} template <typename Expr> expression_ast(Expr const& expr) : expr(expr) {} expression_ast& operator+=(expression_ast const& rhs); expression_ast& operator-=(expression_ast const& rhs); expression_ast& operator*=(expression_ast const& rhs); expression_ast& operator/=(expression_ast const& rhs); type expr; }; struct binary_op { binary_op( char op , expression_ast const& left , expression_ast const& right) : op(op), left(left), right(right) {} char op; expression_ast left; expression_ast right; }; struct unary_op { unary_op( char op , expression_ast const& subject) : op(op), subject(subject) {} char op; expression_ast subject; }; expression_ast& expression_ast::operator+=(expression_ast const& rhs) { expr = binary_op('+', expr, rhs); return *this; } expression_ast& expression_ast::operator-=(expression_ast const& rhs) { expr = binary_op('-', expr, rhs); return *this; } expression_ast& expression_ast::operator*=(expression_ast const& rhs) { expr = binary_op('*', expr, rhs); return *this; } expression_ast& expression_ast::operator/=(expression_ast const& rhs) { expr = binary_op('/', expr, rhs); return *this; } // We should be using expression_ast::operator-. There's a bug // in phoenix type deduction mechanism that prevents us from // doing so. Phoenix will be switching to BOOST_TYPEOF. In the // meantime, we will use a phoenix::function below: struct negate_expr { template <typename T> struct result { typedef T type; }; expression_ast operator()(expression_ast const& expr) const { return expression_ast(unary_op('-', expr)); } }; boost::phoenix::function<negate_expr> neg; /////////////////////////////////////////////////////////////////////////// // Walk the tree /////////////////////////////////////////////////////////////////////////// struct ast_print { typedef void result_type; void operator()(qi::info::nil) const {} void operator()(int n) const { std::cout << n; } void operator()(expression_ast const& ast) const { boost::apply_visitor(*this, ast.expr); } void operator()(binary_op const& expr) const { std::cout << "op:" << expr.op << "("; boost::apply_visitor(*this, expr.left.expr); std::cout << ", "; boost::apply_visitor(*this, expr.right.expr); std::cout << ')'; } void operator()(unary_op const& expr) const { std::cout << "op:" << expr.op << "("; boost::apply_visitor(*this, expr.subject.expr); std::cout << ')'; } }; /////////////////////////////////////////////////////////////////////////// // Our calculator grammar /////////////////////////////////////////////////////////////////////////// template <typename Iterator> struct calculator : qi::grammar<Iterator, expression_ast(), ascii::space_type> { calculator() : calculator::base_type(expression) { using qi::_val; using qi::_1; using qi::uint_; expression = term [_val = _1] >> *( ('+' >> term [_val += _1]) | ('-' >> term [_val -= _1]) ) ; term = factor [_val = _1] >> *( ('*' >> factor [_val *= _1]) | ('/' >> factor [_val /= _1]) ) ; factor = uint_ [_val = _1] | '(' >> expression [_val = _1] >> ')' | ('-' >> factor [_val = neg(_1)]) | ('+' >> factor [_val = _1]) ; } qi::rule<Iterator, expression_ast(), ascii::space_type> expression, term, factor; }; } /////////////////////////////////////////////////////////////////////////////// // Main program /////////////////////////////////////////////////////////////////////////////// int main() { std::cout << "/////////////////////////////////////////////////////////\n\n"; std::cout << "Expression parser...\n\n"; std::cout << "/////////////////////////////////////////////////////////\n\n"; std::cout << "Type an expression...or [q or Q] to quit\n\n"; using boost::spirit::ascii::space; using client::expression_ast; using client::ast_print; typedef std::string::const_iterator iterator_type; typedef client::calculator<iterator_type> calculator; calculator calc; // Our grammar std::string str; while (std::getline(std::cin, str)) { if (str.empty() || str[0] == 'q' || str[0] == 'Q') break; std::string::const_iterator iter = str.begin(); std::string::const_iterator end = str.end(); expression_ast ast; ast_print printer; bool r = phrase_parse(iter, end, calc, space, ast); if (r && iter == end) { std::cout << "-------------------------\n"; std::cout << "Parsing succeeded\n"; printer(ast); std::cout << "\n-------------------------\n"; } else { std::string rest(iter, end); std::cout << "-------------------------\n"; std::cout << "Parsing failed\n"; std::cout << "stopped at: \": " << rest << "\"\n"; std::cout << "-------------------------\n"; } } std::cout << "Bye... :-) \n\n"; return 0; }
// Copyright 2019-2021 Lawrence Livermore National Security, LLC and other YGM // Project Developers. See the top-level COPYRIGHT file for details. // // SPDX-License-Identifier: MIT #pragma once #include <vector> #include <memory> #include <deque> #include <thread> #include <atomic> #include <mutex> #include <ygm/detail/mpi.hpp> #include <ygm/detail/ygm_cereal_archive.hpp> #include <ygm/meta/functional.hpp> namespace ygm { class comm::impl { public: impl(MPI_Comm c, int buffer_capacity = 16 * 1024) { ASSERT_MPI(MPI_Comm_dup(c, &m_comm_async)); ASSERT_MPI(MPI_Comm_dup(c, &m_comm_barrier)); ASSERT_MPI(MPI_Comm_dup(c, &m_comm_other)); ASSERT_MPI(MPI_Comm_size(m_comm_async, &m_comm_size)); ASSERT_MPI(MPI_Comm_rank(m_comm_async, &m_comm_rank)); m_buffer_capacity = buffer_capacity; // Allocate send buffers for (int i = 0; i < m_comm_size; ++i) { m_vec_send_buffers.push_back(allocate_buffer()); } // launch listener thread m_listener = std::thread(&impl::listen, this); } ~impl() { barrier(); // send kill signal to self (listener thread) MPI_Send(NULL, 0, MPI_BYTE, m_comm_rank, 0, m_comm_async); // Join listener thread. m_listener.join(); // Free cloned communicator. ASSERT_RELEASE(MPI_Barrier(m_comm_async) == MPI_SUCCESS); MPI_Comm_free(&m_comm_async); MPI_Comm_free(&m_comm_barrier); MPI_Comm_free(&m_comm_other); } int size() const { return m_comm_size; } int rank() const { return m_comm_rank; } template <typename... SendArgs> void async(int dest, const SendArgs &... args) { ASSERT_DEBUG(dest < m_comm_size); if (dest == m_comm_rank) { local_receive(std::forward<const SendArgs>(args)...); } else { m_send_count++; std::vector<char> data = pack_lambda(std::forward<const SendArgs>(args)...); if (data.size() < m_buffer_capacity) { // check if buffer doesn't have enough space if (data.size() + m_vec_send_buffers[dest]->size() > m_buffer_capacity) { async_flush(dest); } // add data to the to dest buffer m_vec_send_buffers[dest]->insert(m_vec_send_buffers[dest]->end(), data.begin(), data.end()); } else { // Large message send_large_message(data, dest); } } // check if listener has queued receives to process if (receive_queue_peek_size() > 0) { receive_queue_process(); } } // // // // Blocking barrier // void barrier() { // int64_t all_count = -1; // while (all_count != 0) { // receive_queue_process(); // do { // async_flush_all(); // std::this_thread::yield(); // } while (receive_queue_process()); // int64_t local_count = m_send_count - m_recv_count; // ASSERT_MPI(MPI_Allreduce(&local_count, &all_count, 1, MPI_INT64_T, // MPI_SUM, m_comm_barrier)); // std::this_thread::yield(); // // std::cout << "MPI_Allreduce() " << std::endl; // } // } void wait_local_idle() { receive_queue_process(); do { async_flush_all(); std::this_thread::yield(); } while (receive_queue_process()); } void barrier() { while (true) { wait_local_idle(); MPI_Request req = MPI_REQUEST_NULL; int64_t first_all_count{-1}; int64_t first_local_count = m_send_count - m_recv_count; ASSERT_MPI(MPI_Iallreduce(&first_local_count, &first_all_count, 1, MPI_INT64_T, MPI_SUM, m_comm_barrier, &req)); while (true) { int test_flag{-1}; ASSERT_MPI(MPI_Test(&req, &test_flag, MPI_STATUS_IGNORE)); if (test_flag) { if (first_all_count == 0) { // double check int64_t second_all_count{-1}; int64_t second_local_count = m_send_count - m_recv_count; ASSERT_MPI(MPI_Allreduce(&second_local_count, &second_all_count, 1, MPI_INT64_T, MPI_SUM, m_comm_barrier)); if (second_all_count == 0) { ASSERT_RELEASE(first_local_count == second_local_count); return; } } break; // failed, start over } else { wait_local_idle(); } } } } // // SOMETHING WRONG :( // // Non-blocking barrier loop // void barrier() { // std::pair<int64_t, int64_t> last{-1, -2}, current{-3, -4}, local{-5, // -6}; MPI_Request req = MPI_REQUEST_NULL; // do { // receive_queue_process(); // do { async_flush_all(); } while (receive_queue_process()); // int64_t local_count = m_send_count - m_recv_count; // if (req == MPI_REQUEST_NULL) { // last = current; // current = {-3, -4}; // local = std::make_pair(m_send_count, m_recv_count); // ASSERT_MPI(MPI_Iallreduce(&local, &current, 2, MPI_INT64_T, // MPI_SUM, // m_comm_barrier, &req)); // } else { // int flag{-1}; // ASSERT_MPI(MPI_Test(&req, &flag, MPI_STATUS_IGNORE)); // if (flag) { // req = MPI_REQUEST_NULL; // } else { // std::this_thread::yield(); // } // } // } while (req != MPI_REQUEST_NULL || current.first != current.second || // last != current); // ASSERT_MPI(MPI_Barrier(m_comm_barrier)); // } void async_flush(int dest) { if (dest != m_comm_rank) { // Skip dest == m_comm_rank; Only kill messages go to self. if (m_vec_send_buffers[dest]->size() == 0) return; auto buffer = allocate_buffer(); std::swap(buffer, m_vec_send_buffers[dest]); ASSERT_MPI(MPI_Send(buffer->data(), buffer->size(), MPI_BYTE, dest, 0, m_comm_async)); free_buffer(buffer); } } void async_flush_all() { for (int i = 0; i < size(); ++i) { int dest = (rank() + i) % size(); async_flush(dest); } // TODO async_flush_bcast(); goes here } template <typename T> T all_reduce_sum(const T &t) const { T to_return; ASSERT_MPI(MPI_Allreduce(&t, &to_return, 1, detail::mpi_typeof(T()), MPI_SUM, m_comm_other)); return to_return; } template <typename T> T all_reduce_min(const T &t) const { T to_return; ASSERT_MPI(MPI_Allreduce(&t, &to_return, 1, detail::mpi_typeof(T()), MPI_MIN, m_comm_other)); return to_return; } template <typename T> T all_reduce_max(const T &t) const { T to_return; ASSERT_MPI(MPI_Allreduce(&t, &to_return, 1, detail::mpi_typeof(T()), MPI_MAX, m_comm_other)); return to_return; } template <typename T> void mpi_send(const T &data, int dest, int tag, MPI_Comm comm) const { std::vector<char> packed; cereal::YGMOutputArchive oarchive(packed); oarchive(data); size_t packed_size = packed.size(); ASSERT_RELEASE(packed_size < 1024 * 1024 * 1024); ASSERT_MPI(MPI_Send(&packed_size, 1, detail::mpi_typeof(packed_size), dest, tag, comm)); ASSERT_MPI(MPI_Send(packed.data(), packed_size, MPI_BYTE, dest, tag, comm)); } template <typename T> T mpi_recv(int source, int tag, MPI_Comm comm) const { std::vector<char> packed; size_t packed_size{0}; ASSERT_MPI(MPI_Recv(&packed_size, 1, detail::mpi_typeof(packed_size), source, tag, comm, MPI_STATUS_IGNORE)); packed.resize(packed_size); ASSERT_MPI(MPI_Recv(packed.data(), packed_size, MPI_BYTE, source, tag, comm, MPI_STATUS_IGNORE)); T to_return; cereal::YGMInputArchive iarchive(packed.data(), packed.size()); iarchive(to_return); return to_return; } template <typename T> T mpi_bcast(const T &to_bcast, int root, MPI_Comm comm) const { std::vector<char> packed; cereal::YGMOutputArchive oarchive(packed); if (rank() == root) { oarchive(to_bcast); } size_t packed_size = packed.size(); ASSERT_RELEASE(packed_size < 1024 * 1024 * 1024); ASSERT_MPI(MPI_Bcast(&packed_size, 1, detail::mpi_typeof(packed_size), root, comm)); if (rank() != root) { packed.resize(packed_size); } ASSERT_MPI(MPI_Bcast(packed.data(), packed_size, MPI_BYTE, root, comm)); cereal::YGMInputArchive iarchive(packed.data(), packed.size()); T to_return; iarchive(to_return); return to_return; } /** * @brief Tree based reduction, could be optimized significantly * * @tparam T * @tparam MergeFunction * @param in * @param merge * @return T */ template <typename T, typename MergeFunction> T all_reduce(const T &in, MergeFunction merge) const { int first_child = 2 * rank() + 1; int second_child = 2 * (rank() + 1); int parent = (rank() - 1) / 2; // Step 1: Receive from children, merge into tmp T tmp = in; if (first_child < size()) { T fc = mpi_recv<T>(first_child, 0, m_comm_other); tmp = merge(tmp, fc); } if (second_child < size()) { T sc = mpi_recv<T>(second_child, 0, m_comm_other); tmp = merge(tmp, sc); } // Step 2: Send merged to parent if (rank() != 0) { mpi_send(tmp, parent, 0, m_comm_other); } // Step 3: Rank 0 bcasts T to_return = mpi_bcast(tmp, 0, m_comm_other); return to_return; } private: /** * @brief Listener thread * */ void listen() { while (true) { auto recv_buffer = allocate_buffer(); recv_buffer->resize(m_buffer_capacity); // TODO: does this clear? MPI_Status status; ASSERT_MPI(MPI_Recv(recv_buffer->data(), m_buffer_capacity, MPI_BYTE, MPI_ANY_SOURCE, MPI_ANY_TAG, m_comm_async, &status)); int tag = status.MPI_TAG; if (tag == large_message_announce_tag) { // Determine size and source of message size_t size = *(reinterpret_cast<size_t *>(recv_buffer->data())); int src = status.MPI_SOURCE; // Allocate large buffer auto large_recv_buff = std::make_shared<std::vector<char>>(size); // Receive large message receive_large_message(large_recv_buff, src, size); // Add buffer to receive queue receive_queue_push_back(large_recv_buff, src); } else { int count; ASSERT_MPI(MPI_Get_count(&status, MPI_BYTE, &count)) // std::cout << "RANK: " << rank() << " received count: " << count // << std::endl; // Resize buffer to cout MPI actually received recv_buffer->resize(count); // Check for kill signal if (status.MPI_SOURCE == m_comm_rank) break; // Add buffer to receive queue receive_queue_push_back(recv_buffer, status.MPI_SOURCE); } } } /* * @brief Send a large message * * @param dest Destination for message * @param msg Packed message to send */ void send_large_message(const std::vector<char> &msg, const int dest) { // Announce the large message and its size size_t size = msg.size(); ASSERT_MPI(MPI_Send(&size, 8, MPI_BYTE, dest, large_message_announce_tag, m_comm_async)); // Send message ASSERT_MPI(MPI_Send(msg.data(), size, MPI_BYTE, dest, large_message_tag, m_comm_async)); } /* * @brief Receive a large message that has been announced * * @param src Source of message * @param msg Buffer to hold message */ void receive_large_message(std::shared_ptr<std::vector<char>> msg, const int src, const size_t size) { ASSERT_MPI(MPI_Recv(msg->data(), size, MPI_BYTE, src, large_message_tag, m_comm_async, MPI_STATUS_IGNORE)); } /** * @brief Allocates buffer; checks free pool first. * * @return std::shared_ptr<std::vector<char>> */ std::shared_ptr<std::vector<char>> allocate_buffer() { std::scoped_lock lock(m_vec_free_buffers_mutex); if (m_vec_free_buffers.empty()) { auto to_return = std::make_shared<std::vector<char>>(); to_return->reserve(m_buffer_capacity); return to_return; } else { auto to_return = m_vec_free_buffers.back(); m_vec_free_buffers.pop_back(); return to_return; } } /** * @brief Frees a previously allocated buffer. Adds buffer to free pool. * * @param b buffer to free */ void free_buffer(std::shared_ptr<std::vector<char>> b) { b->clear(); std::scoped_lock lock(m_vec_free_buffers_mutex); m_vec_free_buffers.push_back(b); } size_t receive_queue_peek_size() const { return m_receive_queue.size(); } std::pair<std::shared_ptr<std::vector<char>>, int> receive_queue_try_pop() { std::scoped_lock lock(m_receive_queue_mutex); if (m_receive_queue.empty()) { return std::make_pair(std::shared_ptr<std::vector<char>>(), int(-1)); } else { auto to_return = m_receive_queue.front(); m_receive_queue.pop_front(); return to_return; } } void receive_queue_push_back(std::shared_ptr<std::vector<char>> b, int from) { size_t current_size = 0; { std::scoped_lock lock(m_receive_queue_mutex); m_receive_queue.push_back(std::make_pair(b, from)); current_size = m_receive_queue.size(); } if (current_size > 16) { std::this_thread::sleep_for(std::chrono::microseconds(current_size - 16)); } } // Used if dest = m_comm_rank template <typename Lambda, typename... Args> int32_t local_receive(Lambda l, const Args &... args) { ASSERT_DEBUG(sizeof(Lambda) == 1); // Question: should this be std::forward(...) // \pp was: (l)(this, m_comm_rank, args...); ygm::meta::apply_optional(l, std::make_tuple(this, m_comm_rank), std::make_tuple(args...)); return 1; } template <typename Lambda, typename... PackArgs> std::vector<char> pack_lambda(Lambda l, const PackArgs &... args) { std::vector<char> to_return; const std::tuple<PackArgs...> tuple_args( std::forward<const PackArgs>(args)...); ASSERT_DEBUG(sizeof(Lambda) == 1); void (*fun_ptr)(impl *, int, cereal::YGMInputArchive &) = [](impl *t, int from, cereal::YGMInputArchive &bia) { std::tuple<PackArgs...> ta; bia(ta); Lambda *pl; auto t1 = std::make_tuple((impl *)t, from); // \pp was: std::apply(*pl, std::tuple_cat(t1, ta)); ygm::meta::apply_optional(*pl, std::move(t1), std::move(ta)); }; cereal::YGMOutputArchive oarchive(to_return); // Create an output archive // // oarchive(fun_ptr); int64_t iptr = (int64_t)fun_ptr - (int64_t)&reference; oarchive(iptr, tuple_args); return to_return; } // this is used to fix address space randomization static void reference() {} bool receive_queue_process() { bool received = false; while (true) { auto buffer_source = receive_queue_try_pop(); auto buffer = buffer_source.first; if (buffer == nullptr) break; int from = buffer_source.second; received = true; cereal::YGMInputArchive iarchive(buffer->data(), buffer->size()); while (!iarchive.empty()) { int64_t iptr; iarchive(iptr); iptr += (int64_t)&reference; void (*fun_ptr)(impl *, int, cereal::YGMInputArchive &); memcpy(&fun_ptr, &iptr, sizeof(uint64_t)); fun_ptr(this, from, iarchive); m_recv_count++; } // Only keep buffers of size m_buffer_capacity in pool of buffers if (buffer->size() == m_buffer_capacity) free_buffer(buffer); } return received; } MPI_Comm m_comm_async; MPI_Comm m_comm_barrier; MPI_Comm m_comm_other; int m_comm_size; int m_comm_rank; size_t m_buffer_capacity; std::vector<std::shared_ptr<std::vector<char>>> m_vec_send_buffers; std::mutex m_vec_free_buffers_mutex; std::vector<std::shared_ptr<std::vector<char>>> m_vec_free_buffers; std::deque<std::pair<std::shared_ptr<std::vector<char>>, int>> m_receive_queue; std::mutex m_receive_queue_mutex; std::thread m_listener; int64_t m_recv_count = 0; int64_t m_send_count = 0; int large_message_announce_tag = 32766; int large_message_tag = 32767; }; inline comm::comm(int *argc, char ***argv, int buffer_capacity = 1048576) { pimpl_if = std::make_shared<detail::mpi_init_finalize>(argc, argv); pimpl = std::make_shared<comm::impl>(MPI_COMM_WORLD, buffer_capacity); } inline comm::comm(MPI_Comm mcomm, int buffer_capacity = 1048576) { pimpl_if.reset(); int flag(0); ASSERT_MPI(MPI_Initialized(&flag)); if (!flag) { throw std::runtime_error("ERROR: MPI not initialized"); } int provided(0); ASSERT_MPI(MPI_Query_thread(&provided)); if (provided != MPI_THREAD_MULTIPLE) { throw std::runtime_error("ERROR: MPI_THREAD_MULTIPLE not provided"); } pimpl = std::make_shared<comm::impl>(mcomm, buffer_capacity); } inline comm::~comm() { ASSERT_RELEASE(MPI_Barrier(MPI_COMM_WORLD) == MPI_SUCCESS); pimpl.reset(); ASSERT_RELEASE(MPI_Barrier(MPI_COMM_WORLD) == MPI_SUCCESS); pimpl_if.reset(); } template <typename AsyncFunction, typename... SendArgs> inline void comm::async(int dest, AsyncFunction fn, const SendArgs &... args) { static_assert(std::is_empty<AsyncFunction>::value, "Only stateless lambdas are supported"); pimpl->async(dest, fn, std::forward<const SendArgs>(args)...); } inline int comm::size() const { return pimpl->size(); } inline int comm::rank() const { return pimpl->rank(); } inline void comm::barrier() { pimpl->barrier(); } inline void comm::async_flush(int rank) { pimpl->async_flush(rank); } inline void comm::async_flush_all() { pimpl->async_flush_all(); } template <typename T> inline T comm::all_reduce_sum(const T &t) const { return pimpl->all_reduce_sum(t); } template <typename T> inline T comm::all_reduce_min(const T &t) const { return pimpl->all_reduce_min(t); } template <typename T> inline T comm::all_reduce_max(const T &t) const { return pimpl->all_reduce_max(t); } template <typename T, typename MergeFunction> inline T comm::all_reduce(const T &t, MergeFunction merge) { return pimpl->all_reduce(t, merge); } } // namespace ygm
#include <algorithm> #include <unordered_map> #include <utility> #include <vector> class Solution1 { public: bool canReorderDoubled(std::vector<int> &arr) { std::unordered_map<int, int> numCount; for (const int x : arr) numCount[x]++; std::vector<int> keys; for (std::pair<const int, int> item : numCount) keys.push_back(item.first); std::sort(keys.begin(), keys.end(), [](int x, int y){ return abs(x) < abs(y); }); for (const int x : keys) { if (numCount[x] > numCount[x * 2]) return false; numCount[x * 2] -= numCount[x]; } return true; } };
/* Copyright 2020 Aeva Palecek Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <iostream> #include "lodepng.h" #include "common.h" #include "images.h" ImageData ReadPng(const char* Path) { ImageData Image; std::vector<unsigned char> Data; unsigned Error = lodepng::decode(Data, Image.Width, Image.Height, Path); if (Error) { std::cout \ << "Failed to read " << Path << "!\n" << " - Reason: PNG decode error:\n" << " - [" << Error << "] " << lodepng_error_text(Error) << "\n"; HaltAndCatchFire(); } Image.Data.resize(Data.size()); int Dst = 0; const int RowSize = Image.Width * 4; for (int y = Image.Height - 1; y >= 0; --y) { int Src = RowSize * y; for (int x = 0; x < RowSize; ++x) { Image.Data[Dst] = Data[Src]; ++Dst; ++Src; } } return Image; }
/* Copyright (C) 2006 - 2013 ScriptDev2 <http://www.scriptdev2.com/> * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* ScriptData SDName: Boss_Silver_Hand_Bosses SD%Complete: 80 SDComment: Timers; Not sure if we need to respawn dead npcs on evade; May need additional adjustments / research SDCategory: Stratholme EndScriptData */ #include "precompiled.h" #include "stratholme.h" /*##### # Additional: # Although this is a working solution, the correct would be in addition to check if Aurius is dead. # Once player extinguish the eternal flame (cast spell 31497->start event 11206) Aurius should become hostile. # Once Aurius is defeated, he should be the one summoning the ghosts. #####*/ enum { // Gregor SPELL_HAMMER_JUSTICE = 13005, SPELL_HAMMER_WRATH = 32772, SPELL_HOLY_SHOCK = 32771, // Cathela SPELL_HOLY_SHIELD = 32777, SPELL_REDOUBT = 32776, // Aelmar SPELL_JUDGEMENT = 32778, // Vicar SPELL_BLESSING = 32770, SPELL_HOLY_LIGHT = 32769, TARGET_TYPE_RANDOM = 0, TARGET_TYPE_VICTIM = 1, TARGET_TYPE_SELF = 2, TARGET_TYPE_FRIENDLY = 3, }; struct SilverHandAbilityStruct { uint32 m_uiCreatureEntry, m_uiSpellId; uint8 m_uiTargetType; uint32 m_uiInitialTimer, m_uiCooldown; }; static SilverHandAbilityStruct m_aSilverHandAbility[8] = { {NPC_GREGOR_THE_JUSTICIAR, SPELL_HAMMER_JUSTICE, TARGET_TYPE_RANDOM, 2000, 15000}, {NPC_GREGOR_THE_JUSTICIAR, SPELL_HAMMER_WRATH, TARGET_TYPE_RANDOM, 10000, 15000}, {NPC_GREGOR_THE_JUSTICIAR, SPELL_HOLY_SHOCK, TARGET_TYPE_RANDOM, 4000, 7000}, {NPC_CATHELA_THE_SEEKER, SPELL_HOLY_SHIELD, TARGET_TYPE_SELF, 1000, 60000}, {NPC_CATHELA_THE_SEEKER, SPELL_REDOUBT, TARGET_TYPE_SELF, 5000, 15000}, {NPC_AELMAR_THE_VANQUISHER, SPELL_JUDGEMENT, TARGET_TYPE_VICTIM, 4000, 9000}, {NPC_VICAR_HYERONIMUS, SPELL_BLESSING, TARGET_TYPE_FRIENDLY, 2000, 13000}, {NPC_VICAR_HYERONIMUS, SPELL_HOLY_LIGHT, TARGET_TYPE_FRIENDLY, 5000, 9000}, }; struct boss_silver_hand_bossesAI : public ScriptedAI { boss_silver_hand_bossesAI(Creature* pCreature) : ScriptedAI(pCreature) { m_pInstance = (instance_stratholme*)pCreature->GetInstanceData(); for (uint8 i = 0; i < countof(m_aSilverHandAbility); ++i) { if (m_aSilverHandAbility[i].m_uiCreatureEntry == m_creature->GetEntry()) m_mSpellTimers[i] = m_aSilverHandAbility[i].m_uiInitialTimer; } Reset(); } instance_stratholme* m_pInstance; UNORDERED_MAP<uint8, uint32> m_mSpellTimers; void Reset() override { for (UNORDERED_MAP<uint8, uint32>::iterator itr = m_mSpellTimers.begin(); itr != m_mSpellTimers.end(); ++itr) itr->second = m_aSilverHandAbility[itr->first].m_uiInitialTimer; } void JustDied(Unit* pKiller) override { if (m_pInstance) { // Set data to special when each paladin dies m_pInstance->SetData(TYPE_TRUE_MASTERS, SPECIAL); // For the last one which dies, give the quest credit if (m_pInstance->GetData(TYPE_TRUE_MASTERS) == DONE) { if (pKiller->GetTypeId() == TYPEID_PLAYER) { if (Creature* pCredit = m_pInstance->GetSingleCreatureFromStorage(NPC_PALADIN_QUEST_CREDIT)) ((Player*)pKiller)->KilledMonsterCredit(NPC_PALADIN_QUEST_CREDIT, pCredit->GetObjectGuid()); } } } } bool CanUseSpecialAbility(uint32 uiIndex) { Unit* pTarget = NULL; switch (m_aSilverHandAbility[uiIndex].m_uiTargetType) { case TARGET_TYPE_SELF: pTarget = m_creature; break; case TARGET_TYPE_VICTIM: pTarget = m_creature->getVictim(); break; case TARGET_TYPE_RANDOM: pTarget = m_creature->SelectAttackingTarget(ATTACKING_TARGET_RANDOM, 0, m_aSilverHandAbility[uiIndex].m_uiSpellId, SELECT_FLAG_IN_LOS); break; case TARGET_TYPE_FRIENDLY: pTarget = DoSelectLowestHpFriendly(10.0f); break; } if (pTarget) { if (DoCastSpellIfCan(pTarget, m_aSilverHandAbility[uiIndex].m_uiSpellId) == CAST_OK) return true; } return false; } void UpdateAI(const uint32 uiDiff) override { // Return since we have no target if (!m_creature->SelectHostileTarget() || !m_creature->getVictim()) return; for (UNORDERED_MAP<uint8, uint32>::iterator itr = m_mSpellTimers.begin(); itr != m_mSpellTimers.end(); ++itr) { if (itr->second < uiDiff) { if (CanUseSpecialAbility(itr->first)) { itr->second = m_aSilverHandAbility[itr->first].m_uiCooldown; break; } } else itr->second -= uiDiff; } DoMeleeAttackIfReady(); } }; CreatureAI* GetAI_boss_silver_hand_bossesAI(Creature* pCreature) { return new boss_silver_hand_bossesAI(pCreature); } void AddSC_boss_order_of_silver_hand() { Script* pNewScript; pNewScript = new Script; pNewScript->Name = "boss_silver_hand_bosses"; pNewScript->GetAI = &GetAI_boss_silver_hand_bossesAI; pNewScript->RegisterSelf(); }
#ifndef __PPU_H__ #define __PPU_H__ #pragma once #include "BgFifo.hpp" #include "BgMapAttributes.hpp" #include "Color.hpp" #include "Enums.hpp" #include "OAMSprite.hpp" #include "SpriteFifo.hpp" #include "Tile.hpp" #include <cstdio> #define PPU_NUM_SPRITES 40 #define PPU_MAX_SPRITES_ON_LINE 10 #define PPU_OAM_SEARCH_T_CYCLES 80 #define PPU_LINE_T_CYCLES 456 #define PPU_VBLANK_T_CYCLES 4560 #define PPU_OAM_DMA_T_CYCLES 640 #define PPU_DEFAULT_DRAW_T_CYCLES 172 #define PPU_DEFAULT_HBLANK_T_CYCLES 204 #define PPU_VRAM_DMA_BLOCK_TRANSFER_DOUBLE_SPEED_T_CYCLES 64 #define PPU_SCREEN_WIDTH 160 #define PPU_SCREEN_HEIGHT 144 class Memory; class SM83; class GameBoy; class Config; enum LcdMode { H_BLANK = 0, V_BLANK = 1, OAM_SEARCH = 2, DRAW = 3 }; class PPU { public: Memory *memory; SM83 *cpu; Config *config; uint64_t renderedFrames = 0; EmulatorMode emulatorMode; bool doubleSpeedMode; bool readyToDraw; Color display[PPU_SCREEN_HEIGHT][PPU_SCREEN_WIDTH]; bool lcdWasTurnedOn = false; uint8_t drawModeLength; // should be set to 172 when entering mode 3 uint8_t hBlankModeLength; // this should be set to 204 when entering mode 3 and modified based // on what the bg and pixel fifo are doing int8_t spritesOnCurrentLine[PPU_MAX_SPRITES_ON_LINE]; uint8_t numSpritesOnCurrentLine; uint8_t windowYCounter; uint8_t windowXCounter; bool windowYTrigger; bool windowXTrigger; BgFifo bgFifo; SpriteFifo spriteFifo; bool oamDmaActive; uint16_t oamDmaCurrentCycles; bool vramGeneralDmaActive; bool vramHblankDmaActive; uint32_t vramDmaLength; uint32_t vramDmaTransferredBytes; uint32_t vramDmaCurrentCycles; uint32_t tCycles; uint32_t currentModeTCycles; uint16_t currentOamDmaTCycles; uint8_t xPos; // X position on the current line PPU(); /** * LCD CONTROL REGISTER * * The LCD Control Register is located at 0xFF40 * * Bit 7 - LCD Display Enable: Specifies if the LCD is on and the PPU active * Bit 6 - Window Tile Map Display Select: Controls which background map is used by the window * 0 = 0x9800; 1 = 0x9C00 * Bit 5 - Window Display Enable: Controls whether the window is displayed or not * Bit 4 - BG & Window Tile Data Select: Controls which addressing mode the BG and Window use * to pick tiles: 0 = 0x8800-0x97FF; 1 = 0x8000-0x8FFF * Bit 3 - BG Tile Map Display Select: Similar to bit 6; 0 = 0x9800; 1 = 0x9C00 * Bit 2 - OBJ (Sprite) Size: 0 = 8x8; 1 = 8x16 * Bit 1 - OBJ (Sprite) Display Enable: Toggles if sprites are displayed * Bit 0 - BG/Window Display/Priority: * - DMG Mode: BG Display: If 0 then both background and window become blank, only * sprites may be displayed * - CGB Mode: BG & Window Master Priority: If 0, the background and window lose * priority; the sprites will be displayed on * top independently of the priority flags */ uint8_t getLcdDisplayEnable(); uint8_t getWindowTileMapDisplaySelect(); uint8_t getWindowDisplayEnable(); uint8_t getBgAndWindowTileDataSelect(); uint8_t getBgTileMapDisplaySelect(); uint8_t getObjSize(); uint8_t getObjDisplayEnable(); uint8_t getBgWindowDisplayPriority(); void setLcdControlRegister(uint8_t val); void setLcdDisplayEnable(uint8_t val); void setWindowTileMapDisplaySelect(uint8_t val); void setWindowDisplayEnable(uint8_t val); void setBgAndWindowTileDataSelect(uint8_t val); void setBgTileMapDisplaySelect(uint8_t val); void setObjSize(uint8_t val); void setObjDisplayEnable(uint8_t val); void setBgWindowDisplayPriority(uint8_t val); /** * LCD STATUS REGISTER * * LCD STAT is located at 0xFF41 * * Bit 6 - LYC=LY Coincidence Interrupt: Triggers LCD_STAT interrupt when LYC = LY * Bit 5 - Mode 2 OAM Interrupt * Bit 4 - Mode 1 VBlank Interrupt * Bit 3 - Mode 0 HBlank Interrupt * Bit 2 - Coincidence Flag: 0 = LYC != LY; 1 = LYC == LY * Bit 1-0 - Mode Flag: 0) During HBlank * 1) During VBlank * 2) During Searching OAM * 3) During Transferring Data to LCD Driver (Pixel Transfer) */ uint8_t getLycLyCoincidence(); uint8_t getMode2OamInterrupt(); uint8_t getMode1VBlankInterrupt(); uint8_t getMode0HBlankInterrupt(); uint8_t getCoincidenceFlag(); uint8_t getModeFlag(); LcdMode getLcdMode(); void setLcdStatRegister(uint8_t val); void setLycLyCoincidence(uint8_t val); void setMode2OamInterrupt(uint8_t val); void setMode1VBlankInterrupt(uint8_t val); void setMode0HBlankInterrupt(uint8_t val); void setCoincidenceFlag(uint8_t val); void setModeFlag(uint8_t val); /** * LCD POSITION AND SCROLLING * * SCY: Scroll Y (0xFF42) / SCX: Scroll X (0xFF43) - Position to be displayed in the top-left * corner of the screen; Wrapping occurs * when drawing exceeds the lower right * border of the map area * LY: LCDC Y-Coordinate (0xFF44) - Current vertical line * LYC: LY Compare (0xFF45) - The value to which LY is compared * WY: Window Y Pos (0xFF4A) / WX: Window X Pos (0xFF4B) - Top-left position of the window * area; WX is specified as WX - 7; * WY=0, WX=7 is the top left corner */ uint8_t getScrollY(); uint8_t getScrollX(); uint8_t getLy(); uint8_t getLyc(); uint8_t getWy(); uint8_t getWx(); void setScrollY(uint8_t val); void setScrollX(uint8_t val); void setLy(uint8_t val); void setLyc(uint8_t val); void setWy(uint8_t val); void setWx(uint8_t val); /** * LCD MONOCHROME PALETTES (NON CGB ONLY) * * BGP: BG Palette Data (0xFF47) - Gray shades assigned to the color numbers of the BG & Widnow * tiles; * Bit 7-6: Color 3 * Bit 5-4: Color 2 * Bit 3-2: Color 1 * Bit 1-0: Color 0 * OBP0: Object Palette 0 Data (0xFF48) - It works similar to BGP, but the lower 2 bits aren't * used; they represent transparency * OBP1: Object Palette 1 Data (0xFF49) - Same as OBP0 */ uint8_t getBgPaletteData(); uint8_t getObjPalette0Data(); uint8_t getObjPalette1Data(); void setBgPaletteData(uint8_t val); void setObjPalette0Data(uint8_t val); void setObjPalette1Data(uint8_t val); /** * LCD COLOR PALETTES (CGB ONLY) * * BGPI: BG Color Palette Index (0xFF68) - Used to address a byte in the CGB Palette Memory * Bit 7: Auto Increment after write * Bit 5-0: Index (0x00-0x3F) * * BGPD: BG Color Palette Data (0xFF69) - Used to access byte selected by BGPI * OBPI: Object Color Palette Index (0xFF6A) - Similar to BGPI * OBPD: Object Color Palette Data (0xFF6B) - Similar to BGPD */ uint8_t getBgColorPaletteIndex(); uint8_t getBgColorPaletteData(); uint8_t getObjColorPaletteIndex(); uint8_t getObjColorPaletteData(); void setBgColorPaletteIndex(uint8_t val); void setBgColorPaletteData(uint8_t val); void setObjColorPaletteIndex(uint8_t val); void setObjColorPaletteData(uint8_t val); /** * OAM DMA * * DMA: DMA Transfer and Start Address (0xFF46) - Launch OAM DMA and specify source address * Src Addess format: XX00 (XX is a value * between 0x00 and 0xF1) * Copies bytes from XX00-XX9F to 0xFE00-0xFE9F */ uint8_t getOamDma(); void setOamDma(uint8_t val); /** * VRAM DMA (CGB Only) * HDMA1: DMA Source High Byte (0xFF51) * HDMA2: DMA Source Low Byte (0xFF52) * These 2 registers are combined to create the DMA source address. The 4 lower bits are * ignored * HDMA3: DMA Destination High Byte (0xFF53) * HDMA4: DMA Destination Low Byte (0xFF54) * These 2 registers are combined to create the DMA destination address. Only bits 12-4 * are used * HDMA5: DMA Length, Mode, Start (0xFF55) - Writing to this register starts the DMA transfer * Bit 7: Mode: General=0 or HBlank=1 * Bit 6-0: Transfer length divided by 0x10 minus 1 * In General mode all data is transferred at once * In HBlank mode data is transferred during HBlank mode */ uint8_t getHdma1(); uint8_t getHdma2(); uint16_t getHdmaSrcAddress(); uint8_t getHdma3(); uint8_t getHdma4(); uint16_t getHdmaDestAddress(); uint8_t getHdma5(); uint8_t getHdmaMode(); uint8_t getHdmaLength(); void setHdma1(uint8_t val); void setHdma2(uint8_t val); void setHdma3(uint8_t val); void setHdma4(uint8_t val); void setHdma5(uint8_t val); Tile getTileByIndex(int index); OAMSprite getSpriteByIndex(int index); BgMapAttributes getBgMapByIndex(int index, int tilemap); // tilemap 0 = 0x9800; 1 = 0x9C00 // Returns a sprite tile (0x8000-0x8FFF) based on its index. If LCDC.2 is set (8x16 mode) then // the tileNo argument is used to decide what tile of the sprite should be returned, if LCDC.2 // is 0 then tileNo is ignored. Tile getSpriteTile(int index, int tileNo = 0, int vramBank = 0); // Searches the sprites that will be displayed on the current line and puts their index in // spritesOnCurrentLine. Also sets numSpritesOnCurrentLine void searchSpritesOnLine(); // Returns a Color object based on the FifoPixel Color getColorFromFifoPixel(FifoPixel *fifoPixel, bool normalizeCgbColor = true); void cycle(); void oamDmaCycle(); void vramDmaCycle(); Color *mixPixels(FifoPixel *bgPixel, FifoPixel *spritePixel); }; #endif // __PPU_H__
//===- DemoteRegToStack.cpp - Move a virtual register to the stack --------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "llvm/ADT/DenseMap.h" #include "llvm/Analysis/CFG.h" #include "llvm/Analysis/Utils/Local.h" #include "llvm/IR/Function.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Type.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" using namespace llvm; /// DemoteRegToStack - This function takes a virtual register computed by an /// Instruction and replaces it with a slot in the stack frame, allocated via /// alloca. This allows the CFG to be changed around without fear of /// invalidating the SSA information for the value. It returns the pointer to /// the alloca inserted to create a stack slot for I. AllocaInst *llvm::DemoteRegToStack(Instruction &I, bool VolatileLoads, Instruction *AllocaPoint) { if (I.use_empty()) { I.eraseFromParent(); return nullptr; } Function *F = I.getParent()->getParent(); const DataLayout &DL = F->getParent()->getDataLayout(); // Create a stack slot to hold the value. AllocaInst *Slot; if (AllocaPoint) { Slot = new AllocaInst(I.getType(), DL.getAllocaAddrSpace(), nullptr, I.getName()+".reg2mem", AllocaPoint); } else { Slot = new AllocaInst(I.getType(), DL.getAllocaAddrSpace(), nullptr, I.getName() + ".reg2mem", &F->getEntryBlock().front()); } // We cannot demote invoke instructions to the stack if their normal edge // is critical. Therefore, split the critical edge and create a basic block // into which the store can be inserted. if (InvokeInst *II = dyn_cast<InvokeInst>(&I)) { if (!II->getNormalDest()->getSinglePredecessor()) { unsigned SuccNum = GetSuccessorNumber(II->getParent(), II->getNormalDest()); assert(isCriticalEdge(II, SuccNum) && "Expected a critical edge!"); BasicBlock *BB = SplitCriticalEdge(II, SuccNum); assert(BB && "Unable to split critical edge."); (void)BB; } } // Change all of the users of the instruction to read from the stack slot. while (!I.use_empty()) { Instruction *U = cast<Instruction>(I.user_back()); if (PHINode *PN = dyn_cast<PHINode>(U)) { // If this is a PHI node, we can't insert a load of the value before the // use. Instead insert the load in the predecessor block corresponding // to the incoming value. // // Note that if there are multiple edges from a basic block to this PHI // node that we cannot have multiple loads. The problem is that the // resulting PHI node will have multiple values (from each load) coming in // from the same block, which is illegal SSA form. For this reason, we // keep track of and reuse loads we insert. DenseMap<BasicBlock*, Value*> Loads; for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) if (PN->getIncomingValue(i) == &I) { Value *&V = Loads[PN->getIncomingBlock(i)]; if (!V) { // Insert the load into the predecessor block V = new LoadInst(Slot, I.getName()+".reload", VolatileLoads, PN->getIncomingBlock(i)->getTerminator()); } PN->setIncomingValue(i, V); } } else { // If this is a normal instruction, just insert a load. Value *V = new LoadInst(Slot, I.getName()+".reload", VolatileLoads, U); U->replaceUsesOfWith(&I, V); } } // Insert stores of the computed value into the stack slot. We have to be // careful if I is an invoke instruction, because we can't insert the store // AFTER the terminator instruction. BasicBlock::iterator InsertPt; if (!isa<TerminatorInst>(I)) { InsertPt = ++I.getIterator(); for (; isa<PHINode>(InsertPt) || InsertPt->isEHPad(); ++InsertPt) /* empty */; // Don't insert before PHI nodes or landingpad instrs. } else { InvokeInst &II = cast<InvokeInst>(I); InsertPt = II.getNormalDest()->getFirstInsertionPt(); } new StoreInst(&I, Slot, &*InsertPt); return Slot; } /// DemotePHIToStack - This function takes a virtual register computed by a PHI /// node and replaces it with a slot in the stack frame allocated via alloca. /// The PHI node is deleted. It returns the pointer to the alloca inserted. AllocaInst *llvm::DemotePHIToStack(PHINode *P, Instruction *AllocaPoint) { if (P->use_empty()) { P->eraseFromParent(); return nullptr; } const DataLayout &DL = P->getModule()->getDataLayout(); // Create a stack slot to hold the value. AllocaInst *Slot; if (AllocaPoint) { Slot = new AllocaInst(P->getType(), DL.getAllocaAddrSpace(), nullptr, P->getName()+".reg2mem", AllocaPoint); } else { Function *F = P->getParent()->getParent(); Slot = new AllocaInst(P->getType(), DL.getAllocaAddrSpace(), nullptr, P->getName() + ".reg2mem", &F->getEntryBlock().front()); } // Iterate over each operand inserting a store in each predecessor. for (unsigned i = 0, e = P->getNumIncomingValues(); i < e; ++i) { if (InvokeInst *II = dyn_cast<InvokeInst>(P->getIncomingValue(i))) { assert(II->getParent() != P->getIncomingBlock(i) && "Invoke edge not supported yet"); (void)II; } new StoreInst(P->getIncomingValue(i), Slot, P->getIncomingBlock(i)->getTerminator()); } // Insert a load in place of the PHI and replace all uses. BasicBlock::iterator InsertPt = P->getIterator(); for (; isa<PHINode>(InsertPt) || InsertPt->isEHPad(); ++InsertPt) /* empty */; // Don't insert before PHI nodes or landingpad instrs. Value *V = new LoadInst(Slot, P->getName() + ".reload", &*InsertPt); P->replaceAllUsesWith(V); // Delete PHI. P->eraseFromParent(); return Slot; }
//===--- SemaStmt.cpp - Semantic Analysis for Statements ------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements semantic analysis for statements. // //===----------------------------------------------------------------------===// #include "clang/Sema/Ownership.h" #include "clang/Sema/SemaInternal.h" #include "clang/AST/ASTContext.h" #include "clang/AST/ASTDiagnostic.h" #include "clang/AST/ASTLambda.h" #include "clang/AST/CharUnits.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/EvaluatedExprVisitor.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/RecursiveASTVisitor.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/TargetInfo.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/Initialization.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" using namespace clang; using namespace sema; StmtResult Sema::ActOnExprStmt(ExprResult FE, bool DiscardedValue) { if (FE.isInvalid()) return StmtError(); FE = ActOnFinishFullExpr(FE.get(), FE.get()->getExprLoc(), DiscardedValue); if (FE.isInvalid()) return StmtError(); // C99 6.8.3p2: The expression in an expression statement is evaluated as a // void expression for its side effects. Conversion to void allows any // operand, even incomplete types. // Same thing in for stmt first clause (when expr) and third clause. return StmtResult(FE.getAs<Stmt>()); } StmtResult Sema::ActOnExprStmtError() { DiscardCleanupsInEvaluationContext(); return StmtError(); } StmtResult Sema::ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro) { return new (Context) NullStmt(SemiLoc, HasLeadingEmptyMacro); } StmtResult Sema::ActOnDeclStmt(DeclGroupPtrTy dg, SourceLocation StartLoc, SourceLocation EndLoc) { DeclGroupRef DG = dg.get(); // If we have an invalid decl, just return an error. if (DG.isNull()) return StmtError(); return new (Context) DeclStmt(DG, StartLoc, EndLoc); } void Sema::ActOnForEachDeclStmt(DeclGroupPtrTy dg) { DeclGroupRef DG = dg.get(); // If we don't have a declaration, or we have an invalid declaration, // just return. if (DG.isNull() || !DG.isSingleDecl()) return; Decl *decl = DG.getSingleDecl(); if (!decl || decl->isInvalidDecl()) return; // Only variable declarations are permitted. VarDecl *var = dyn_cast<VarDecl>(decl); if (!var) { Diag(decl->getLocation(), diag::err_non_variable_decl_in_for); decl->setInvalidDecl(); return; } // foreach variables are never actually initialized in the way that // the parser came up with. var->setInit(nullptr); // In ARC, we don't need to retain the iteration variable of a fast // enumeration loop. Rather than actually trying to catch that // during declaration processing, we remove the consequences here. if (getLangOpts().ObjCAutoRefCount) { QualType type = var->getType(); // Only do this if we inferred the lifetime. Inferred lifetime // will show up as a local qualifier because explicit lifetime // should have shown up as an AttributedType instead. if (type.getLocalQualifiers().getObjCLifetime() == Qualifiers::OCL_Strong) { // Add 'const' and mark the variable as pseudo-strong. var->setType(type.withConst()); var->setARCPseudoStrong(true); } } } /// Diagnose unused comparisons, both builtin and overloaded operators. /// For '==' and '!=', suggest fixits for '=' or '|='. /// /// Adding a cast to void (or other expression wrappers) will prevent the /// warning from firing. static bool DiagnoseUnusedComparison(Sema &S, const Expr *E) { SourceLocation Loc; bool CanAssign; enum { Equality, Inequality, Relational, ThreeWay } Kind; if (const BinaryOperator *Op = dyn_cast<BinaryOperator>(E)) { if (!Op->isComparisonOp()) return false; if (Op->getOpcode() == BO_EQ) Kind = Equality; else if (Op->getOpcode() == BO_NE) Kind = Inequality; else if (Op->getOpcode() == BO_Cmp) Kind = ThreeWay; else { assert(Op->isRelationalOp()); Kind = Relational; } Loc = Op->getOperatorLoc(); CanAssign = Op->getLHS()->IgnoreParenImpCasts()->isLValue(); } else if (const CXXOperatorCallExpr *Op = dyn_cast<CXXOperatorCallExpr>(E)) { switch (Op->getOperator()) { case OO_EqualEqual: Kind = Equality; break; case OO_ExclaimEqual: Kind = Inequality; break; case OO_Less: case OO_Greater: case OO_GreaterEqual: case OO_LessEqual: Kind = Relational; break; case OO_Spaceship: Kind = ThreeWay; break; default: return false; } Loc = Op->getOperatorLoc(); CanAssign = Op->getArg(0)->IgnoreParenImpCasts()->isLValue(); } else { // Not a typo-prone comparison. return false; } // Suppress warnings when the operator, suspicious as it may be, comes from // a macro expansion. if (S.SourceMgr.isMacroBodyExpansion(Loc)) return false; S.Diag(Loc, diag::warn_unused_comparison) << (unsigned)Kind << E->getSourceRange(); // If the LHS is a plausible entity to assign to, provide a fixit hint to // correct common typos. if (CanAssign) { if (Kind == Inequality) S.Diag(Loc, diag::note_inequality_comparison_to_or_assign) << FixItHint::CreateReplacement(Loc, "|="); else if (Kind == Equality) S.Diag(Loc, diag::note_equality_comparison_to_assign) << FixItHint::CreateReplacement(Loc, "="); } return true; } static bool DiagnoseNoDiscard(Sema &S, const WarnUnusedResultAttr *A, SourceLocation Loc, SourceRange R1, SourceRange R2, bool IsCtor) { if (!A) return false; StringRef Msg = A->getMessage(); if (Msg.empty()) { if (IsCtor) return S.Diag(Loc, diag::warn_unused_constructor) << A << R1 << R2; return S.Diag(Loc, diag::warn_unused_result) << A << R1 << R2; } if (IsCtor) return S.Diag(Loc, diag::warn_unused_constructor_msg) << A << Msg << R1 << R2; return S.Diag(Loc, diag::warn_unused_result_msg) << A << Msg << R1 << R2; } void Sema::DiagnoseUnusedExprResult(const Stmt *S) { if (const LabelStmt *Label = dyn_cast_or_null<LabelStmt>(S)) return DiagnoseUnusedExprResult(Label->getSubStmt()); const Expr *E = dyn_cast_or_null<Expr>(S); if (!E) return; // If we are in an unevaluated expression context, then there can be no unused // results because the results aren't expected to be used in the first place. if (isUnevaluatedContext()) return; SourceLocation ExprLoc = E->IgnoreParenImpCasts()->getExprLoc(); // In most cases, we don't want to warn if the expression is written in a // macro body, or if the macro comes from a system header. If the offending // expression is a call to a function with the warn_unused_result attribute, // we warn no matter the location. Because of the order in which the various // checks need to happen, we factor out the macro-related test here. bool ShouldSuppress = SourceMgr.isMacroBodyExpansion(ExprLoc) || SourceMgr.isInSystemMacro(ExprLoc); const Expr *WarnExpr; SourceLocation Loc; SourceRange R1, R2; if (!E->isUnusedResultAWarning(WarnExpr, Loc, R1, R2, Context)) return; // If this is a GNU statement expression expanded from a macro, it is probably // unused because it is a function-like macro that can be used as either an // expression or statement. Don't warn, because it is almost certainly a // false positive. if (isa<StmtExpr>(E) && Loc.isMacroID()) return; // Check if this is the UNREFERENCED_PARAMETER from the Microsoft headers. // That macro is frequently used to suppress "unused parameter" warnings, // but its implementation makes clang's -Wunused-value fire. Prevent this. if (isa<ParenExpr>(E->IgnoreImpCasts()) && Loc.isMacroID()) { SourceLocation SpellLoc = Loc; if (findMacroSpelling(SpellLoc, "UNREFERENCED_PARAMETER")) return; } // Okay, we have an unused result. Depending on what the base expression is, // we might want to make a more specific diagnostic. Check for one of these // cases now. unsigned DiagID = diag::warn_unused_expr; if (const FullExpr *Temps = dyn_cast<FullExpr>(E)) E = Temps->getSubExpr(); if (const CXXBindTemporaryExpr *TempExpr = dyn_cast<CXXBindTemporaryExpr>(E)) E = TempExpr->getSubExpr(); if (DiagnoseUnusedComparison(*this, E)) return; E = WarnExpr; if (const auto *Cast = dyn_cast<CastExpr>(E)) if (Cast->getCastKind() == CK_NoOp || Cast->getCastKind() == CK_ConstructorConversion) E = Cast->getSubExpr()->IgnoreImpCasts(); if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { if (E->getType()->isVoidType()) return; if (DiagnoseNoDiscard(*this, cast_or_null<WarnUnusedResultAttr>( CE->getUnusedResultAttr(Context)), Loc, R1, R2, /*isCtor=*/false)) return; // If the callee has attribute pure, const, or warn_unused_result, warn with // a more specific message to make it clear what is happening. If the call // is written in a macro body, only warn if it has the warn_unused_result // attribute. if (const Decl *FD = CE->getCalleeDecl()) { if (ShouldSuppress) return; if (FD->hasAttr<PureAttr>()) { Diag(Loc, diag::warn_unused_call) << R1 << R2 << "pure"; return; } if (FD->hasAttr<ConstAttr>()) { Diag(Loc, diag::warn_unused_call) << R1 << R2 << "const"; return; } } } else if (const auto *CE = dyn_cast<CXXConstructExpr>(E)) { if (const CXXConstructorDecl *Ctor = CE->getConstructor()) { const auto *A = Ctor->getAttr<WarnUnusedResultAttr>(); A = A ? A : Ctor->getParent()->getAttr<WarnUnusedResultAttr>(); if (DiagnoseNoDiscard(*this, A, Loc, R1, R2, /*isCtor=*/true)) return; } } else if (const auto *ILE = dyn_cast<InitListExpr>(E)) { if (const TagDecl *TD = ILE->getType()->getAsTagDecl()) { if (DiagnoseNoDiscard(*this, TD->getAttr<WarnUnusedResultAttr>(), Loc, R1, R2, /*isCtor=*/false)) return; } } else if (ShouldSuppress) return; E = WarnExpr; if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(E)) { if (getLangOpts().ObjCAutoRefCount && ME->isDelegateInitCall()) { Diag(Loc, diag::err_arc_unused_init_message) << R1; return; } const ObjCMethodDecl *MD = ME->getMethodDecl(); if (MD) { if (DiagnoseNoDiscard(*this, MD->getAttr<WarnUnusedResultAttr>(), Loc, R1, R2, /*isCtor=*/false)) return; } } else if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { const Expr *Source = POE->getSyntacticForm(); if (isa<ObjCSubscriptRefExpr>(Source)) DiagID = diag::warn_unused_container_subscript_expr; else DiagID = diag::warn_unused_property_expr; } else if (const CXXFunctionalCastExpr *FC = dyn_cast<CXXFunctionalCastExpr>(E)) { const Expr *E = FC->getSubExpr(); if (const CXXBindTemporaryExpr *TE = dyn_cast<CXXBindTemporaryExpr>(E)) E = TE->getSubExpr(); if (isa<CXXTemporaryObjectExpr>(E)) return; if (const CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(E)) if (const CXXRecordDecl *RD = CE->getType()->getAsCXXRecordDecl()) if (!RD->getAttr<WarnUnusedAttr>()) return; } // Diagnose "(void*) blah" as a typo for "(void) blah". else if (const CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(E)) { TypeSourceInfo *TI = CE->getTypeInfoAsWritten(); QualType T = TI->getType(); // We really do want to use the non-canonical type here. if (T == Context.VoidPtrTy) { PointerTypeLoc TL = TI->getTypeLoc().castAs<PointerTypeLoc>(); Diag(Loc, diag::warn_unused_voidptr) << FixItHint::CreateRemoval(TL.getStarLoc()); return; } } if (E->isGLValue() && E->getType().isVolatileQualified()) { Diag(Loc, diag::warn_unused_volatile) << R1 << R2; return; } DiagRuntimeBehavior(Loc, nullptr, PDiag(DiagID) << R1 << R2); } void Sema::ActOnStartOfCompoundStmt(bool IsStmtExpr) { PushCompoundScope(IsStmtExpr); } void Sema::ActOnFinishOfCompoundStmt() { PopCompoundScope(); } sema::CompoundScopeInfo &Sema::getCurCompoundScope() const { return getCurFunction()->CompoundScopes.back(); } StmtResult Sema::ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr) { const unsigned NumElts = Elts.size(); // If we're in C89 mode, check that we don't have any decls after stmts. If // so, emit an extension diagnostic. if (!getLangOpts().C99 && !getLangOpts().CPlusPlus) { // Note that __extension__ can be around a decl. unsigned i = 0; // Skip over all declarations. for (; i != NumElts && isa<DeclStmt>(Elts[i]); ++i) /*empty*/; // We found the end of the list or a statement. Scan for another declstmt. for (; i != NumElts && !isa<DeclStmt>(Elts[i]); ++i) /*empty*/; if (i != NumElts) { Decl *D = *cast<DeclStmt>(Elts[i])->decl_begin(); Diag(D->getLocation(), diag::ext_mixed_decls_code); } } // Check for suspicious empty body (null statement) in `for' and `while' // statements. Don't do anything for template instantiations, this just adds // noise. if (NumElts != 0 && !CurrentInstantiationScope && getCurCompoundScope().HasEmptyLoopBodies) { for (unsigned i = 0; i != NumElts - 1; ++i) DiagnoseEmptyLoopBody(Elts[i], Elts[i + 1]); } return CompoundStmt::Create(Context, Elts, L, R); } ExprResult Sema::ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val) { if (!Val.get()) return Val; if (DiagnoseUnexpandedParameterPack(Val.get())) return ExprError(); // If we're not inside a switch, let the 'case' statement handling diagnose // this. Just clean up after the expression as best we can. if (getCurFunction()->SwitchStack.empty()) return ActOnFinishFullExpr(Val.get(), Val.get()->getExprLoc(), false, getLangOpts().CPlusPlus11); Expr *CondExpr = getCurFunction()->SwitchStack.back().getPointer()->getCond(); if (!CondExpr) return ExprError(); QualType CondType = CondExpr->getType(); auto CheckAndFinish = [&](Expr *E) { if (CondType->isDependentType() || E->isTypeDependent()) return ExprResult(E); if (getLangOpts().CPlusPlus11) { // C++11 [stmt.switch]p2: the constant-expression shall be a converted // constant expression of the promoted type of the switch condition. llvm::APSInt TempVal; return CheckConvertedConstantExpression(E, CondType, TempVal, CCEK_CaseValue); } ExprResult ER = E; if (!E->isValueDependent()) ER = VerifyIntegerConstantExpression(E); if (!ER.isInvalid()) ER = DefaultLvalueConversion(ER.get()); if (!ER.isInvalid()) ER = ImpCastExprToType(ER.get(), CondType, CK_IntegralCast); if (!ER.isInvalid()) ER = ActOnFinishFullExpr(ER.get(), ER.get()->getExprLoc(), false); return ER; }; ExprResult Converted = CorrectDelayedTyposInExpr(Val, CheckAndFinish); if (Converted.get() == Val.get()) Converted = CheckAndFinish(Val.get()); return Converted; } StmtResult Sema::ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHSVal, SourceLocation DotDotDotLoc, ExprResult RHSVal, SourceLocation ColonLoc) { assert((LHSVal.isInvalid() || LHSVal.get()) && "missing LHS value"); assert((DotDotDotLoc.isInvalid() ? RHSVal.isUnset() : RHSVal.isInvalid() || RHSVal.get()) && "missing RHS value"); if (getCurFunction()->SwitchStack.empty()) { Diag(CaseLoc, diag::err_case_not_in_switch); return StmtError(); } if (LHSVal.isInvalid() || RHSVal.isInvalid()) { getCurFunction()->SwitchStack.back().setInt(true); return StmtError(); } auto *CS = CaseStmt::Create(Context, LHSVal.get(), RHSVal.get(), CaseLoc, DotDotDotLoc, ColonLoc); getCurFunction()->SwitchStack.back().getPointer()->addSwitchCase(CS); return CS; } /// ActOnCaseStmtBody - This installs a statement as the body of a case. void Sema::ActOnCaseStmtBody(Stmt *S, Stmt *SubStmt) { cast<CaseStmt>(S)->setSubStmt(SubStmt); } StmtResult Sema::ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope) { if (getCurFunction()->SwitchStack.empty()) { Diag(DefaultLoc, diag::err_default_not_in_switch); return SubStmt; } DefaultStmt *DS = new (Context) DefaultStmt(DefaultLoc, ColonLoc, SubStmt); getCurFunction()->SwitchStack.back().getPointer()->addSwitchCase(DS); return DS; } StmtResult Sema::ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt) { // If the label was multiply defined, reject it now. if (TheDecl->getStmt()) { Diag(IdentLoc, diag::err_redefinition_of_label) << TheDecl->getDeclName(); Diag(TheDecl->getLocation(), diag::note_previous_definition); return SubStmt; } // Otherwise, things are good. Fill in the declaration and return it. LabelStmt *LS = new (Context) LabelStmt(IdentLoc, TheDecl, SubStmt); TheDecl->setStmt(LS); if (!TheDecl->isGnuLocal()) { TheDecl->setLocStart(IdentLoc); if (!TheDecl->isMSAsmLabel()) { // Don't update the location of MS ASM labels. These will result in // a diagnostic, and changing the location here will mess that up. TheDecl->setLocation(IdentLoc); } } return LS; } StmtResult Sema::ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt) { // Fill in the declaration and return it. AttributedStmt *LS = AttributedStmt::Create(Context, AttrLoc, Attrs, SubStmt); return LS; } namespace { class CommaVisitor : public EvaluatedExprVisitor<CommaVisitor> { typedef EvaluatedExprVisitor<CommaVisitor> Inherited; Sema &SemaRef; public: CommaVisitor(Sema &SemaRef) : Inherited(SemaRef.Context), SemaRef(SemaRef) {} void VisitBinaryOperator(BinaryOperator *E) { if (E->getOpcode() == BO_Comma) SemaRef.DiagnoseCommaOperator(E->getLHS(), E->getExprLoc()); EvaluatedExprVisitor<CommaVisitor>::VisitBinaryOperator(E); } }; } StmtResult Sema::ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *thenStmt, SourceLocation ElseLoc, Stmt *elseStmt) { if (Cond.isInvalid()) Cond = ConditionResult( *this, nullptr, MakeFullExpr(new (Context) OpaqueValueExpr(SourceLocation(), Context.BoolTy, VK_RValue), IfLoc), false); Expr *CondExpr = Cond.get().second; // Only call the CommaVisitor when not C89 due to differences in scope flags. if ((getLangOpts().C99 || getLangOpts().CPlusPlus) && !Diags.isIgnored(diag::warn_comma_operator, CondExpr->getExprLoc())) CommaVisitor(*this).Visit(CondExpr); if (!elseStmt) DiagnoseEmptyStmtBody(CondExpr->getEndLoc(), thenStmt, diag::warn_empty_if_body); return BuildIfStmt(IfLoc, IsConstexpr, InitStmt, Cond, thenStmt, ElseLoc, elseStmt); } StmtResult Sema::BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *thenStmt, SourceLocation ElseLoc, Stmt *elseStmt) { if (Cond.isInvalid()) return StmtError(); if (IsConstexpr || isa<ObjCAvailabilityCheckExpr>(Cond.get().second)) setFunctionHasBranchProtectedScope(); return IfStmt::Create(Context, IfLoc, IsConstexpr, InitStmt, Cond.get().first, Cond.get().second, thenStmt, ElseLoc, elseStmt); } namespace { struct CaseCompareFunctor { bool operator()(const std::pair<llvm::APSInt, CaseStmt*> &LHS, const llvm::APSInt &RHS) { return LHS.first < RHS; } bool operator()(const std::pair<llvm::APSInt, CaseStmt*> &LHS, const std::pair<llvm::APSInt, CaseStmt*> &RHS) { return LHS.first < RHS.first; } bool operator()(const llvm::APSInt &LHS, const std::pair<llvm::APSInt, CaseStmt*> &RHS) { return LHS < RHS.first; } }; } /// CmpCaseVals - Comparison predicate for sorting case values. /// static bool CmpCaseVals(const std::pair<llvm::APSInt, CaseStmt*>& lhs, const std::pair<llvm::APSInt, CaseStmt*>& rhs) { if (lhs.first < rhs.first) return true; if (lhs.first == rhs.first && lhs.second->getCaseLoc().getRawEncoding() < rhs.second->getCaseLoc().getRawEncoding()) return true; return false; } /// CmpEnumVals - Comparison predicate for sorting enumeration values. /// static bool CmpEnumVals(const std::pair<llvm::APSInt, EnumConstantDecl*>& lhs, const std::pair<llvm::APSInt, EnumConstantDecl*>& rhs) { return lhs.first < rhs.first; } /// EqEnumVals - Comparison preficate for uniqing enumeration values. /// static bool EqEnumVals(const std::pair<llvm::APSInt, EnumConstantDecl*>& lhs, const std::pair<llvm::APSInt, EnumConstantDecl*>& rhs) { return lhs.first == rhs.first; } /// GetTypeBeforeIntegralPromotion - Returns the pre-promotion type of /// potentially integral-promoted expression @p expr. static QualType GetTypeBeforeIntegralPromotion(const Expr *&E) { if (const auto *FE = dyn_cast<FullExpr>(E)) E = FE->getSubExpr(); while (const auto *ImpCast = dyn_cast<ImplicitCastExpr>(E)) { if (ImpCast->getCastKind() != CK_IntegralCast) break; E = ImpCast->getSubExpr(); } return E->getType(); } ExprResult Sema::CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond) { class SwitchConvertDiagnoser : public ICEConvertDiagnoser { Expr *Cond; public: SwitchConvertDiagnoser(Expr *Cond) : ICEConvertDiagnoser(/*AllowScopedEnumerations*/true, false, true), Cond(Cond) {} SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) override { return S.Diag(Loc, diag::err_typecheck_statement_requires_integer) << T; } SemaDiagnosticBuilder diagnoseIncomplete( Sema &S, SourceLocation Loc, QualType T) override { return S.Diag(Loc, diag::err_switch_incomplete_class_type) << T << Cond->getSourceRange(); } SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override { return S.Diag(Loc, diag::err_switch_explicit_conversion) << T << ConvTy; } SemaDiagnosticBuilder noteExplicitConv( Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override { return S.Diag(Conv->getLocation(), diag::note_switch_conversion) << ConvTy->isEnumeralType() << ConvTy; } SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) override { return S.Diag(Loc, diag::err_switch_multiple_conversions) << T; } SemaDiagnosticBuilder noteAmbiguous( Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override { return S.Diag(Conv->getLocation(), diag::note_switch_conversion) << ConvTy->isEnumeralType() << ConvTy; } SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override { llvm_unreachable("conversion functions are permitted"); } } SwitchDiagnoser(Cond); ExprResult CondResult = PerformContextualImplicitConversion(SwitchLoc, Cond, SwitchDiagnoser); if (CondResult.isInvalid()) return ExprError(); // FIXME: PerformContextualImplicitConversion doesn't always tell us if it // failed and produced a diagnostic. Cond = CondResult.get(); if (!Cond->isTypeDependent() && !Cond->getType()->isIntegralOrEnumerationType()) return ExprError(); // C99 6.8.4.2p5 - Integer promotions are performed on the controlling expr. return UsualUnaryConversions(Cond); } StmtResult Sema::ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond) { Expr *CondExpr = Cond.get().second; assert((Cond.isInvalid() || CondExpr) && "switch with no condition"); if (CondExpr && !CondExpr->isTypeDependent()) { // We have already converted the expression to an integral or enumeration // type, when we parsed the switch condition. If we don't have an // appropriate type now, enter the switch scope but remember that it's // invalid. assert(CondExpr->getType()->isIntegralOrEnumerationType() && "invalid condition type"); if (CondExpr->isKnownToHaveBooleanValue()) { // switch(bool_expr) {...} is often a programmer error, e.g. // switch(n && mask) { ... } // Doh - should be "n & mask". // One can always use an if statement instead of switch(bool_expr). Diag(SwitchLoc, diag::warn_bool_switch_condition) << CondExpr->getSourceRange(); } } setFunctionHasBranchIntoScope(); auto *SS = SwitchStmt::Create(Context, InitStmt, Cond.get().first, CondExpr); getCurFunction()->SwitchStack.push_back( FunctionScopeInfo::SwitchInfo(SS, false)); return SS; } static void AdjustAPSInt(llvm::APSInt &Val, unsigned BitWidth, bool IsSigned) { Val = Val.extOrTrunc(BitWidth); Val.setIsSigned(IsSigned); } /// Check the specified case value is in range for the given unpromoted switch /// type. static void checkCaseValue(Sema &S, SourceLocation Loc, const llvm::APSInt &Val, unsigned UnpromotedWidth, bool UnpromotedSign) { // In C++11 onwards, this is checked by the language rules. if (S.getLangOpts().CPlusPlus11) return; // If the case value was signed and negative and the switch expression is // unsigned, don't bother to warn: this is implementation-defined behavior. // FIXME: Introduce a second, default-ignored warning for this case? if (UnpromotedWidth < Val.getBitWidth()) { llvm::APSInt ConvVal(Val); AdjustAPSInt(ConvVal, UnpromotedWidth, UnpromotedSign); AdjustAPSInt(ConvVal, Val.getBitWidth(), Val.isSigned()); // FIXME: Use different diagnostics for overflow in conversion to promoted // type versus "switch expression cannot have this value". Use proper // IntRange checking rather than just looking at the unpromoted type here. if (ConvVal != Val) S.Diag(Loc, diag::warn_case_value_overflow) << Val.toString(10) << ConvVal.toString(10); } } typedef SmallVector<std::pair<llvm::APSInt, EnumConstantDecl*>, 64> EnumValsTy; /// Returns true if we should emit a diagnostic about this case expression not /// being a part of the enum used in the switch controlling expression. static bool ShouldDiagnoseSwitchCaseNotInEnum(const Sema &S, const EnumDecl *ED, const Expr *CaseExpr, EnumValsTy::iterator &EI, EnumValsTy::iterator &EIEnd, const llvm::APSInt &Val) { if (!ED->isClosed()) return false; if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CaseExpr->IgnoreParenImpCasts())) { if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) { QualType VarType = VD->getType(); QualType EnumType = S.Context.getTypeDeclType(ED); if (VD->hasGlobalStorage() && VarType.isConstQualified() && S.Context.hasSameUnqualifiedType(EnumType, VarType)) return false; } } if (ED->hasAttr<FlagEnumAttr>()) return !S.IsValueInFlagEnum(ED, Val, false); while (EI != EIEnd && EI->first < Val) EI++; if (EI != EIEnd && EI->first == Val) return false; return true; } static void checkEnumTypesInSwitchStmt(Sema &S, const Expr *Cond, const Expr *Case) { QualType CondType = Cond->getType(); QualType CaseType = Case->getType(); const EnumType *CondEnumType = CondType->getAs<EnumType>(); const EnumType *CaseEnumType = CaseType->getAs<EnumType>(); if (!CondEnumType || !CaseEnumType) return; // Ignore anonymous enums. if (!CondEnumType->getDecl()->getIdentifier() && !CondEnumType->getDecl()->getTypedefNameForAnonDecl()) return; if (!CaseEnumType->getDecl()->getIdentifier() && !CaseEnumType->getDecl()->getTypedefNameForAnonDecl()) return; if (S.Context.hasSameUnqualifiedType(CondType, CaseType)) return; S.Diag(Case->getExprLoc(), diag::warn_comparison_of_mixed_enum_types_switch) << CondType << CaseType << Cond->getSourceRange() << Case->getSourceRange(); } StmtResult Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *BodyStmt) { SwitchStmt *SS = cast<SwitchStmt>(Switch); bool CaseListIsIncomplete = getCurFunction()->SwitchStack.back().getInt(); assert(SS == getCurFunction()->SwitchStack.back().getPointer() && "switch stack missing push/pop!"); getCurFunction()->SwitchStack.pop_back(); if (!BodyStmt) return StmtError(); SS->setBody(BodyStmt, SwitchLoc); Expr *CondExpr = SS->getCond(); if (!CondExpr) return StmtError(); QualType CondType = CondExpr->getType(); // C++ 6.4.2.p2: // Integral promotions are performed (on the switch condition). // // A case value unrepresentable by the original switch condition // type (before the promotion) doesn't make sense, even when it can // be represented by the promoted type. Therefore we need to find // the pre-promotion type of the switch condition. const Expr *CondExprBeforePromotion = CondExpr; QualType CondTypeBeforePromotion = GetTypeBeforeIntegralPromotion(CondExprBeforePromotion); // Get the bitwidth of the switched-on value after promotions. We must // convert the integer case values to this width before comparison. bool HasDependentValue = CondExpr->isTypeDependent() || CondExpr->isValueDependent(); unsigned CondWidth = HasDependentValue ? 0 : Context.getIntWidth(CondType); bool CondIsSigned = CondType->isSignedIntegerOrEnumerationType(); // Get the width and signedness that the condition might actually have, for // warning purposes. // FIXME: Grab an IntRange for the condition rather than using the unpromoted // type. unsigned CondWidthBeforePromotion = HasDependentValue ? 0 : Context.getIntWidth(CondTypeBeforePromotion); bool CondIsSignedBeforePromotion = CondTypeBeforePromotion->isSignedIntegerOrEnumerationType(); // Accumulate all of the case values in a vector so that we can sort them // and detect duplicates. This vector contains the APInt for the case after // it has been converted to the condition type. typedef SmallVector<std::pair<llvm::APSInt, CaseStmt*>, 64> CaseValsTy; CaseValsTy CaseVals; // Keep track of any GNU case ranges we see. The APSInt is the low value. typedef std::vector<std::pair<llvm::APSInt, CaseStmt*> > CaseRangesTy; CaseRangesTy CaseRanges; DefaultStmt *TheDefaultStmt = nullptr; bool CaseListIsErroneous = false; for (SwitchCase *SC = SS->getSwitchCaseList(); SC && !HasDependentValue; SC = SC->getNextSwitchCase()) { if (DefaultStmt *DS = dyn_cast<DefaultStmt>(SC)) { if (TheDefaultStmt) { Diag(DS->getDefaultLoc(), diag::err_multiple_default_labels_defined); Diag(TheDefaultStmt->getDefaultLoc(), diag::note_duplicate_case_prev); // FIXME: Remove the default statement from the switch block so that // we'll return a valid AST. This requires recursing down the AST and // finding it, not something we are set up to do right now. For now, // just lop the entire switch stmt out of the AST. CaseListIsErroneous = true; } TheDefaultStmt = DS; } else { CaseStmt *CS = cast<CaseStmt>(SC); Expr *Lo = CS->getLHS(); if (Lo->isValueDependent()) { HasDependentValue = true; break; } // We already verified that the expression has a constant value; // get that value (prior to conversions). const Expr *LoBeforePromotion = Lo; GetTypeBeforeIntegralPromotion(LoBeforePromotion); llvm::APSInt LoVal = LoBeforePromotion->EvaluateKnownConstInt(Context); // Check the unconverted value is within the range of possible values of // the switch expression. checkCaseValue(*this, Lo->getBeginLoc(), LoVal, CondWidthBeforePromotion, CondIsSignedBeforePromotion); // FIXME: This duplicates the check performed for warn_not_in_enum below. checkEnumTypesInSwitchStmt(*this, CondExprBeforePromotion, LoBeforePromotion); // Convert the value to the same width/sign as the condition. AdjustAPSInt(LoVal, CondWidth, CondIsSigned); // If this is a case range, remember it in CaseRanges, otherwise CaseVals. if (CS->getRHS()) { if (CS->getRHS()->isValueDependent()) { HasDependentValue = true; break; } CaseRanges.push_back(std::make_pair(LoVal, CS)); } else CaseVals.push_back(std::make_pair(LoVal, CS)); } } if (!HasDependentValue) { // If we don't have a default statement, check whether the // condition is constant. llvm::APSInt ConstantCondValue; bool HasConstantCond = false; if (!TheDefaultStmt) { Expr::EvalResult Result; HasConstantCond = CondExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects); if (Result.Val.isInt()) ConstantCondValue = Result.Val.getInt(); assert(!HasConstantCond || (ConstantCondValue.getBitWidth() == CondWidth && ConstantCondValue.isSigned() == CondIsSigned)); } bool ShouldCheckConstantCond = HasConstantCond; // Sort all the scalar case values so we can easily detect duplicates. llvm::stable_sort(CaseVals, CmpCaseVals); if (!CaseVals.empty()) { for (unsigned i = 0, e = CaseVals.size(); i != e; ++i) { if (ShouldCheckConstantCond && CaseVals[i].first == ConstantCondValue) ShouldCheckConstantCond = false; if (i != 0 && CaseVals[i].first == CaseVals[i-1].first) { // If we have a duplicate, report it. // First, determine if either case value has a name StringRef PrevString, CurrString; Expr *PrevCase = CaseVals[i-1].second->getLHS()->IgnoreParenCasts(); Expr *CurrCase = CaseVals[i].second->getLHS()->IgnoreParenCasts(); if (DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(PrevCase)) { PrevString = DeclRef->getDecl()->getName(); } if (DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(CurrCase)) { CurrString = DeclRef->getDecl()->getName(); } SmallString<16> CaseValStr; CaseVals[i-1].first.toString(CaseValStr); if (PrevString == CurrString) Diag(CaseVals[i].second->getLHS()->getBeginLoc(), diag::err_duplicate_case) << (PrevString.empty() ? StringRef(CaseValStr) : PrevString); else Diag(CaseVals[i].second->getLHS()->getBeginLoc(), diag::err_duplicate_case_differing_expr) << (PrevString.empty() ? StringRef(CaseValStr) : PrevString) << (CurrString.empty() ? StringRef(CaseValStr) : CurrString) << CaseValStr; Diag(CaseVals[i - 1].second->getLHS()->getBeginLoc(), diag::note_duplicate_case_prev); // FIXME: We really want to remove the bogus case stmt from the // substmt, but we have no way to do this right now. CaseListIsErroneous = true; } } } // Detect duplicate case ranges, which usually don't exist at all in // the first place. if (!CaseRanges.empty()) { // Sort all the case ranges by their low value so we can easily detect // overlaps between ranges. llvm::stable_sort(CaseRanges); // Scan the ranges, computing the high values and removing empty ranges. std::vector<llvm::APSInt> HiVals; for (unsigned i = 0, e = CaseRanges.size(); i != e; ++i) { llvm::APSInt &LoVal = CaseRanges[i].first; CaseStmt *CR = CaseRanges[i].second; Expr *Hi = CR->getRHS(); const Expr *HiBeforePromotion = Hi; GetTypeBeforeIntegralPromotion(HiBeforePromotion); llvm::APSInt HiVal = HiBeforePromotion->EvaluateKnownConstInt(Context); // Check the unconverted value is within the range of possible values of // the switch expression. checkCaseValue(*this, Hi->getBeginLoc(), HiVal, CondWidthBeforePromotion, CondIsSignedBeforePromotion); // Convert the value to the same width/sign as the condition. AdjustAPSInt(HiVal, CondWidth, CondIsSigned); // If the low value is bigger than the high value, the case is empty. if (LoVal > HiVal) { Diag(CR->getLHS()->getBeginLoc(), diag::warn_case_empty_range) << SourceRange(CR->getLHS()->getBeginLoc(), Hi->getEndLoc()); CaseRanges.erase(CaseRanges.begin()+i); --i; --e; continue; } if (ShouldCheckConstantCond && LoVal <= ConstantCondValue && ConstantCondValue <= HiVal) ShouldCheckConstantCond = false; HiVals.push_back(HiVal); } // Rescan the ranges, looking for overlap with singleton values and other // ranges. Since the range list is sorted, we only need to compare case // ranges with their neighbors. for (unsigned i = 0, e = CaseRanges.size(); i != e; ++i) { llvm::APSInt &CRLo = CaseRanges[i].first; llvm::APSInt &CRHi = HiVals[i]; CaseStmt *CR = CaseRanges[i].second; // Check to see whether the case range overlaps with any // singleton cases. CaseStmt *OverlapStmt = nullptr; llvm::APSInt OverlapVal(32); // Find the smallest value >= the lower bound. If I is in the // case range, then we have overlap. CaseValsTy::iterator I = llvm::lower_bound(CaseVals, CRLo, CaseCompareFunctor()); if (I != CaseVals.end() && I->first < CRHi) { OverlapVal = I->first; // Found overlap with scalar. OverlapStmt = I->second; } // Find the smallest value bigger than the upper bound. I = std::upper_bound(I, CaseVals.end(), CRHi, CaseCompareFunctor()); if (I != CaseVals.begin() && (I-1)->first >= CRLo) { OverlapVal = (I-1)->first; // Found overlap with scalar. OverlapStmt = (I-1)->second; } // Check to see if this case stmt overlaps with the subsequent // case range. if (i && CRLo <= HiVals[i-1]) { OverlapVal = HiVals[i-1]; // Found overlap with range. OverlapStmt = CaseRanges[i-1].second; } if (OverlapStmt) { // If we have a duplicate, report it. Diag(CR->getLHS()->getBeginLoc(), diag::err_duplicate_case) << OverlapVal.toString(10); Diag(OverlapStmt->getLHS()->getBeginLoc(), diag::note_duplicate_case_prev); // FIXME: We really want to remove the bogus case stmt from the // substmt, but we have no way to do this right now. CaseListIsErroneous = true; } } } // Complain if we have a constant condition and we didn't find a match. if (!CaseListIsErroneous && !CaseListIsIncomplete && ShouldCheckConstantCond) { // TODO: it would be nice if we printed enums as enums, chars as // chars, etc. Diag(CondExpr->getExprLoc(), diag::warn_missing_case_for_condition) << ConstantCondValue.toString(10) << CondExpr->getSourceRange(); } // Check to see if switch is over an Enum and handles all of its // values. We only issue a warning if there is not 'default:', but // we still do the analysis to preserve this information in the AST // (which can be used by flow-based analyes). // const EnumType *ET = CondTypeBeforePromotion->getAs<EnumType>(); // If switch has default case, then ignore it. if (!CaseListIsErroneous && !CaseListIsIncomplete && !HasConstantCond && ET && ET->getDecl()->isCompleteDefinition()) { const EnumDecl *ED = ET->getDecl(); EnumValsTy EnumVals; // Gather all enum values, set their type and sort them, // allowing easier comparison with CaseVals. for (auto *EDI : ED->enumerators()) { llvm::APSInt Val = EDI->getInitVal(); AdjustAPSInt(Val, CondWidth, CondIsSigned); EnumVals.push_back(std::make_pair(Val, EDI)); } llvm::stable_sort(EnumVals, CmpEnumVals); auto EI = EnumVals.begin(), EIEnd = std::unique(EnumVals.begin(), EnumVals.end(), EqEnumVals); // See which case values aren't in enum. for (CaseValsTy::const_iterator CI = CaseVals.begin(); CI != CaseVals.end(); CI++) { Expr *CaseExpr = CI->second->getLHS(); if (ShouldDiagnoseSwitchCaseNotInEnum(*this, ED, CaseExpr, EI, EIEnd, CI->first)) Diag(CaseExpr->getExprLoc(), diag::warn_not_in_enum) << CondTypeBeforePromotion; } // See which of case ranges aren't in enum EI = EnumVals.begin(); for (CaseRangesTy::const_iterator RI = CaseRanges.begin(); RI != CaseRanges.end(); RI++) { Expr *CaseExpr = RI->second->getLHS(); if (ShouldDiagnoseSwitchCaseNotInEnum(*this, ED, CaseExpr, EI, EIEnd, RI->first)) Diag(CaseExpr->getExprLoc(), diag::warn_not_in_enum) << CondTypeBeforePromotion; llvm::APSInt Hi = RI->second->getRHS()->EvaluateKnownConstInt(Context); AdjustAPSInt(Hi, CondWidth, CondIsSigned); CaseExpr = RI->second->getRHS(); if (ShouldDiagnoseSwitchCaseNotInEnum(*this, ED, CaseExpr, EI, EIEnd, Hi)) Diag(CaseExpr->getExprLoc(), diag::warn_not_in_enum) << CondTypeBeforePromotion; } // Check which enum vals aren't in switch auto CI = CaseVals.begin(); auto RI = CaseRanges.begin(); bool hasCasesNotInSwitch = false; SmallVector<DeclarationName,8> UnhandledNames; for (EI = EnumVals.begin(); EI != EIEnd; EI++) { // Don't warn about omitted unavailable EnumConstantDecls. switch (EI->second->getAvailability()) { case AR_Deprecated: // Omitting a deprecated constant is ok; it should never materialize. case AR_Unavailable: continue; case AR_NotYetIntroduced: // Partially available enum constants should be present. Note that we // suppress -Wunguarded-availability diagnostics for such uses. case AR_Available: break; } if (EI->second->hasAttr<UnusedAttr>()) continue; // Drop unneeded case values while (CI != CaseVals.end() && CI->first < EI->first) CI++; if (CI != CaseVals.end() && CI->first == EI->first) continue; // Drop unneeded case ranges for (; RI != CaseRanges.end(); RI++) { llvm::APSInt Hi = RI->second->getRHS()->EvaluateKnownConstInt(Context); AdjustAPSInt(Hi, CondWidth, CondIsSigned); if (EI->first <= Hi) break; } if (RI == CaseRanges.end() || EI->first < RI->first) { hasCasesNotInSwitch = true; UnhandledNames.push_back(EI->second->getDeclName()); } } if (TheDefaultStmt && UnhandledNames.empty() && ED->isClosedNonFlag()) Diag(TheDefaultStmt->getDefaultLoc(), diag::warn_unreachable_default); // Produce a nice diagnostic if multiple values aren't handled. if (!UnhandledNames.empty()) { DiagnosticBuilder DB = Diag(CondExpr->getExprLoc(), TheDefaultStmt ? diag::warn_def_missing_case : diag::warn_missing_case) << (int)UnhandledNames.size(); for (size_t I = 0, E = std::min(UnhandledNames.size(), (size_t)3); I != E; ++I) DB << UnhandledNames[I]; } if (!hasCasesNotInSwitch) SS->setAllEnumCasesCovered(); } } if (BodyStmt) DiagnoseEmptyStmtBody(CondExpr->getEndLoc(), BodyStmt, diag::warn_empty_switch_body); // FIXME: If the case list was broken is some way, we don't have a good system // to patch it up. Instead, just return the whole substmt as broken. if (CaseListIsErroneous) return StmtError(); return SS; } void Sema::DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr) { if (Diags.isIgnored(diag::warn_not_in_enum_assignment, SrcExpr->getExprLoc())) return; if (const EnumType *ET = DstType->getAs<EnumType>()) if (!Context.hasSameUnqualifiedType(SrcType, DstType) && SrcType->isIntegerType()) { if (!SrcExpr->isTypeDependent() && !SrcExpr->isValueDependent() && SrcExpr->isIntegerConstantExpr(Context)) { // Get the bitwidth of the enum value before promotions. unsigned DstWidth = Context.getIntWidth(DstType); bool DstIsSigned = DstType->isSignedIntegerOrEnumerationType(); llvm::APSInt RhsVal = SrcExpr->EvaluateKnownConstInt(Context); AdjustAPSInt(RhsVal, DstWidth, DstIsSigned); const EnumDecl *ED = ET->getDecl(); if (!ED->isClosed()) return; if (ED->hasAttr<FlagEnumAttr>()) { if (!IsValueInFlagEnum(ED, RhsVal, true)) Diag(SrcExpr->getExprLoc(), diag::warn_not_in_enum_assignment) << DstType.getUnqualifiedType(); } else { typedef SmallVector<std::pair<llvm::APSInt, EnumConstantDecl *>, 64> EnumValsTy; EnumValsTy EnumVals; // Gather all enum values, set their type and sort them, // allowing easier comparison with rhs constant. for (auto *EDI : ED->enumerators()) { llvm::APSInt Val = EDI->getInitVal(); AdjustAPSInt(Val, DstWidth, DstIsSigned); EnumVals.push_back(std::make_pair(Val, EDI)); } if (EnumVals.empty()) return; llvm::stable_sort(EnumVals, CmpEnumVals); EnumValsTy::iterator EIend = std::unique(EnumVals.begin(), EnumVals.end(), EqEnumVals); // See which values aren't in the enum. EnumValsTy::const_iterator EI = EnumVals.begin(); while (EI != EIend && EI->first < RhsVal) EI++; if (EI == EIend || EI->first != RhsVal) { Diag(SrcExpr->getExprLoc(), diag::warn_not_in_enum_assignment) << DstType.getUnqualifiedType(); } } } } } StmtResult Sema::ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body) { if (Cond.isInvalid()) return StmtError(); auto CondVal = Cond.get(); CheckBreakContinueBinding(CondVal.second); if (CondVal.second && !Diags.isIgnored(diag::warn_comma_operator, CondVal.second->getExprLoc())) CommaVisitor(*this).Visit(CondVal.second); if (isa<NullStmt>(Body)) getCurCompoundScope().setHasEmptyLoopBodies(); return WhileStmt::Create(Context, CondVal.first, CondVal.second, Body, WhileLoc); } StmtResult Sema::ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen) { assert(Cond && "ActOnDoStmt(): missing expression"); CheckBreakContinueBinding(Cond); ExprResult CondResult = CheckBooleanCondition(DoLoc, Cond); if (CondResult.isInvalid()) return StmtError(); Cond = CondResult.get(); CondResult = ActOnFinishFullExpr(Cond, DoLoc, /*DiscardedValue*/ false); if (CondResult.isInvalid()) return StmtError(); Cond = CondResult.get(); // Only call the CommaVisitor for C89 due to differences in scope flags. if (Cond && !getLangOpts().C99 && !getLangOpts().CPlusPlus && !Diags.isIgnored(diag::warn_comma_operator, Cond->getExprLoc())) CommaVisitor(*this).Visit(Cond); return new (Context) DoStmt(Body, Cond, DoLoc, WhileLoc, CondRParen); } namespace { // Use SetVector since the diagnostic cares about the ordering of the Decl's. using DeclSetVector = llvm::SetVector<VarDecl *, llvm::SmallVector<VarDecl *, 8>, llvm::SmallPtrSet<VarDecl *, 8>>; // This visitor will traverse a conditional statement and store all // the evaluated decls into a vector. Simple is set to true if none // of the excluded constructs are used. class DeclExtractor : public EvaluatedExprVisitor<DeclExtractor> { DeclSetVector &Decls; SmallVectorImpl<SourceRange> &Ranges; bool Simple; public: typedef EvaluatedExprVisitor<DeclExtractor> Inherited; DeclExtractor(Sema &S, DeclSetVector &Decls, SmallVectorImpl<SourceRange> &Ranges) : Inherited(S.Context), Decls(Decls), Ranges(Ranges), Simple(true) {} bool isSimple() { return Simple; } // Replaces the method in EvaluatedExprVisitor. void VisitMemberExpr(MemberExpr* E) { Simple = false; } // Any Stmt not whitelisted will cause the condition to be marked complex. void VisitStmt(Stmt *S) { Simple = false; } void VisitBinaryOperator(BinaryOperator *E) { Visit(E->getLHS()); Visit(E->getRHS()); } void VisitCastExpr(CastExpr *E) { Visit(E->getSubExpr()); } void VisitUnaryOperator(UnaryOperator *E) { // Skip checking conditionals with derefernces. if (E->getOpcode() == UO_Deref) Simple = false; else Visit(E->getSubExpr()); } void VisitConditionalOperator(ConditionalOperator *E) { Visit(E->getCond()); Visit(E->getTrueExpr()); Visit(E->getFalseExpr()); } void VisitParenExpr(ParenExpr *E) { Visit(E->getSubExpr()); } void VisitBinaryConditionalOperator(BinaryConditionalOperator *E) { Visit(E->getOpaqueValue()->getSourceExpr()); Visit(E->getFalseExpr()); } void VisitIntegerLiteral(IntegerLiteral *E) { } void VisitFloatingLiteral(FloatingLiteral *E) { } void VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) { } void VisitCharacterLiteral(CharacterLiteral *E) { } void VisitGNUNullExpr(GNUNullExpr *E) { } void VisitImaginaryLiteral(ImaginaryLiteral *E) { } void VisitDeclRefExpr(DeclRefExpr *E) { VarDecl *VD = dyn_cast<VarDecl>(E->getDecl()); if (!VD) { // Don't allow unhandled Decl types. Simple = false; return; } Ranges.push_back(E->getSourceRange()); Decls.insert(VD); } }; // end class DeclExtractor // DeclMatcher checks to see if the decls are used in a non-evaluated // context. class DeclMatcher : public EvaluatedExprVisitor<DeclMatcher> { DeclSetVector &Decls; bool FoundDecl; public: typedef EvaluatedExprVisitor<DeclMatcher> Inherited; DeclMatcher(Sema &S, DeclSetVector &Decls, Stmt *Statement) : Inherited(S.Context), Decls(Decls), FoundDecl(false) { if (!Statement) return; Visit(Statement); } void VisitReturnStmt(ReturnStmt *S) { FoundDecl = true; } void VisitBreakStmt(BreakStmt *S) { FoundDecl = true; } void VisitGotoStmt(GotoStmt *S) { FoundDecl = true; } void VisitCastExpr(CastExpr *E) { if (E->getCastKind() == CK_LValueToRValue) CheckLValueToRValueCast(E->getSubExpr()); else Visit(E->getSubExpr()); } void CheckLValueToRValueCast(Expr *E) { E = E->IgnoreParenImpCasts(); if (isa<DeclRefExpr>(E)) { return; } if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { Visit(CO->getCond()); CheckLValueToRValueCast(CO->getTrueExpr()); CheckLValueToRValueCast(CO->getFalseExpr()); return; } if (BinaryConditionalOperator *BCO = dyn_cast<BinaryConditionalOperator>(E)) { CheckLValueToRValueCast(BCO->getOpaqueValue()->getSourceExpr()); CheckLValueToRValueCast(BCO->getFalseExpr()); return; } Visit(E); } void VisitDeclRefExpr(DeclRefExpr *E) { if (VarDecl *VD = dyn_cast<VarDecl>(E->getDecl())) if (Decls.count(VD)) FoundDecl = true; } void VisitPseudoObjectExpr(PseudoObjectExpr *POE) { // Only need to visit the semantics for POE. // SyntaticForm doesn't really use the Decal. for (auto *S : POE->semantics()) { if (auto *OVE = dyn_cast<OpaqueValueExpr>(S)) // Look past the OVE into the expression it binds. Visit(OVE->getSourceExpr()); else Visit(S); } } bool FoundDeclInUse() { return FoundDecl; } }; // end class DeclMatcher void CheckForLoopConditionalStatement(Sema &S, Expr *Second, Expr *Third, Stmt *Body) { // Condition is empty if (!Second) return; if (S.Diags.isIgnored(diag::warn_variables_not_in_loop_body, Second->getBeginLoc())) return; PartialDiagnostic PDiag = S.PDiag(diag::warn_variables_not_in_loop_body); DeclSetVector Decls; SmallVector<SourceRange, 10> Ranges; DeclExtractor DE(S, Decls, Ranges); DE.Visit(Second); // Don't analyze complex conditionals. if (!DE.isSimple()) return; // No decls found. if (Decls.size() == 0) return; // Don't warn on volatile, static, or global variables. for (auto *VD : Decls) if (VD->getType().isVolatileQualified() || VD->hasGlobalStorage()) return; if (DeclMatcher(S, Decls, Second).FoundDeclInUse() || DeclMatcher(S, Decls, Third).FoundDeclInUse() || DeclMatcher(S, Decls, Body).FoundDeclInUse()) return; // Load decl names into diagnostic. if (Decls.size() > 4) { PDiag << 0; } else { PDiag << (unsigned)Decls.size(); for (auto *VD : Decls) PDiag << VD->getDeclName(); } for (auto Range : Ranges) PDiag << Range; S.Diag(Ranges.begin()->getBegin(), PDiag); } // If Statement is an incemement or decrement, return true and sets the // variables Increment and DRE. bool ProcessIterationStmt(Sema &S, Stmt* Statement, bool &Increment, DeclRefExpr *&DRE) { if (auto Cleanups = dyn_cast<ExprWithCleanups>(Statement)) if (!Cleanups->cleanupsHaveSideEffects()) Statement = Cleanups->getSubExpr(); if (UnaryOperator *UO = dyn_cast<UnaryOperator>(Statement)) { switch (UO->getOpcode()) { default: return false; case UO_PostInc: case UO_PreInc: Increment = true; break; case UO_PostDec: case UO_PreDec: Increment = false; break; } DRE = dyn_cast<DeclRefExpr>(UO->getSubExpr()); return DRE; } if (CXXOperatorCallExpr *Call = dyn_cast<CXXOperatorCallExpr>(Statement)) { FunctionDecl *FD = Call->getDirectCallee(); if (!FD || !FD->isOverloadedOperator()) return false; switch (FD->getOverloadedOperator()) { default: return false; case OO_PlusPlus: Increment = true; break; case OO_MinusMinus: Increment = false; break; } DRE = dyn_cast<DeclRefExpr>(Call->getArg(0)); return DRE; } return false; } // A visitor to determine if a continue or break statement is a // subexpression. class BreakContinueFinder : public ConstEvaluatedExprVisitor<BreakContinueFinder> { SourceLocation BreakLoc; SourceLocation ContinueLoc; bool InSwitch = false; public: BreakContinueFinder(Sema &S, const Stmt* Body) : Inherited(S.Context) { Visit(Body); } typedef ConstEvaluatedExprVisitor<BreakContinueFinder> Inherited; void VisitContinueStmt(const ContinueStmt* E) { ContinueLoc = E->getContinueLoc(); } void VisitBreakStmt(const BreakStmt* E) { if (!InSwitch) BreakLoc = E->getBreakLoc(); } void VisitSwitchStmt(const SwitchStmt* S) { if (const Stmt *Init = S->getInit()) Visit(Init); if (const Stmt *CondVar = S->getConditionVariableDeclStmt()) Visit(CondVar); if (const Stmt *Cond = S->getCond()) Visit(Cond); // Don't return break statements from the body of a switch. InSwitch = true; if (const Stmt *Body = S->getBody()) Visit(Body); InSwitch = false; } void VisitForStmt(const ForStmt *S) { // Only visit the init statement of a for loop; the body // has a different break/continue scope. if (const Stmt *Init = S->getInit()) Visit(Init); } void VisitWhileStmt(const WhileStmt *) { // Do nothing; the children of a while loop have a different // break/continue scope. } void VisitDoStmt(const DoStmt *) { // Do nothing; the children of a while loop have a different // break/continue scope. } void VisitCXXForRangeStmt(const CXXForRangeStmt *S) { // Only visit the initialization of a for loop; the body // has a different break/continue scope. if (const Stmt *Init = S->getInit()) Visit(Init); if (const Stmt *Range = S->getRangeStmt()) Visit(Range); if (const Stmt *Begin = S->getBeginStmt()) Visit(Begin); if (const Stmt *End = S->getEndStmt()) Visit(End); } void VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S) { // Only visit the initialization of a for loop; the body // has a different break/continue scope. if (const Stmt *Element = S->getElement()) Visit(Element); if (const Stmt *Collection = S->getCollection()) Visit(Collection); } bool ContinueFound() { return ContinueLoc.isValid(); } bool BreakFound() { return BreakLoc.isValid(); } SourceLocation GetContinueLoc() { return ContinueLoc; } SourceLocation GetBreakLoc() { return BreakLoc; } }; // end class BreakContinueFinder // Emit a warning when a loop increment/decrement appears twice per loop // iteration. The conditions which trigger this warning are: // 1) The last statement in the loop body and the third expression in the // for loop are both increment or both decrement of the same variable // 2) No continue statements in the loop body. void CheckForRedundantIteration(Sema &S, Expr *Third, Stmt *Body) { // Return when there is nothing to check. if (!Body || !Third) return; if (S.Diags.isIgnored(diag::warn_redundant_loop_iteration, Third->getBeginLoc())) return; // Get the last statement from the loop body. CompoundStmt *CS = dyn_cast<CompoundStmt>(Body); if (!CS || CS->body_empty()) return; Stmt *LastStmt = CS->body_back(); if (!LastStmt) return; bool LoopIncrement, LastIncrement; DeclRefExpr *LoopDRE, *LastDRE; if (!ProcessIterationStmt(S, Third, LoopIncrement, LoopDRE)) return; if (!ProcessIterationStmt(S, LastStmt, LastIncrement, LastDRE)) return; // Check that the two statements are both increments or both decrements // on the same variable. if (LoopIncrement != LastIncrement || LoopDRE->getDecl() != LastDRE->getDecl()) return; if (BreakContinueFinder(S, Body).ContinueFound()) return; S.Diag(LastDRE->getLocation(), diag::warn_redundant_loop_iteration) << LastDRE->getDecl() << LastIncrement; S.Diag(LoopDRE->getLocation(), diag::note_loop_iteration_here) << LoopIncrement; } } // end namespace void Sema::CheckBreakContinueBinding(Expr *E) { if (!E || getLangOpts().CPlusPlus) return; BreakContinueFinder BCFinder(*this, E); Scope *BreakParent = CurScope->getBreakParent(); if (BCFinder.BreakFound() && BreakParent) { if (BreakParent->getFlags() & Scope::SwitchScope) { Diag(BCFinder.GetBreakLoc(), diag::warn_break_binds_to_switch); } else { Diag(BCFinder.GetBreakLoc(), diag::warn_loop_ctrl_binds_to_inner) << "break"; } } else if (BCFinder.ContinueFound() && CurScope->getContinueParent()) { Diag(BCFinder.GetContinueLoc(), diag::warn_loop_ctrl_binds_to_inner) << "continue"; } } StmtResult Sema::ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg third, SourceLocation RParenLoc, Stmt *Body) { if (Second.isInvalid()) return StmtError(); if (!getLangOpts().CPlusPlus) { if (DeclStmt *DS = dyn_cast_or_null<DeclStmt>(First)) { // C99 6.8.5p3: The declaration part of a 'for' statement shall only // declare identifiers for objects having storage class 'auto' or // 'register'. for (auto *DI : DS->decls()) { VarDecl *VD = dyn_cast<VarDecl>(DI); if (VD && VD->isLocalVarDecl() && !VD->hasLocalStorage()) VD = nullptr; if (!VD) { Diag(DI->getLocation(), diag::err_non_local_variable_decl_in_for); DI->setInvalidDecl(); } } } } CheckBreakContinueBinding(Second.get().second); CheckBreakContinueBinding(third.get()); if (!Second.get().first) CheckForLoopConditionalStatement(*this, Second.get().second, third.get(), Body); CheckForRedundantIteration(*this, third.get(), Body); if (Second.get().second && !Diags.isIgnored(diag::warn_comma_operator, Second.get().second->getExprLoc())) CommaVisitor(*this).Visit(Second.get().second); Expr *Third = third.release().getAs<Expr>(); if (isa<NullStmt>(Body)) getCurCompoundScope().setHasEmptyLoopBodies(); return new (Context) ForStmt(Context, First, Second.get().second, Second.get().first, Third, Body, ForLoc, LParenLoc, RParenLoc); } /// In an Objective C collection iteration statement: /// for (x in y) /// x can be an arbitrary l-value expression. Bind it up as a /// full-expression. StmtResult Sema::ActOnForEachLValueExpr(Expr *E) { // Reduce placeholder expressions here. Note that this rejects the // use of pseudo-object l-values in this position. ExprResult result = CheckPlaceholderExpr(E); if (result.isInvalid()) return StmtError(); E = result.get(); ExprResult FullExpr = ActOnFinishFullExpr(E, /*DiscardedValue*/ false); if (FullExpr.isInvalid()) return StmtError(); return StmtResult(static_cast<Stmt*>(FullExpr.get())); } ExprResult Sema::CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection) { if (!collection) return ExprError(); ExprResult result = CorrectDelayedTyposInExpr(collection); if (!result.isUsable()) return ExprError(); collection = result.get(); // Bail out early if we've got a type-dependent expression. if (collection->isTypeDependent()) return collection; // Perform normal l-value conversion. result = DefaultFunctionArrayLvalueConversion(collection); if (result.isInvalid()) return ExprError(); collection = result.get(); // The operand needs to have object-pointer type. // TODO: should we do a contextual conversion? const ObjCObjectPointerType *pointerType = collection->getType()->getAs<ObjCObjectPointerType>(); if (!pointerType) return Diag(forLoc, diag::err_collection_expr_type) << collection->getType() << collection->getSourceRange(); // Check that the operand provides // - countByEnumeratingWithState:objects:count: const ObjCObjectType *objectType = pointerType->getObjectType(); ObjCInterfaceDecl *iface = objectType->getInterface(); // If we have a forward-declared type, we can't do this check. // Under ARC, it is an error not to have a forward-declared class. if (iface && (getLangOpts().ObjCAutoRefCount ? RequireCompleteType(forLoc, QualType(objectType, 0), diag::err_arc_collection_forward, collection) : !isCompleteType(forLoc, QualType(objectType, 0)))) { // Otherwise, if we have any useful type information, check that // the type declares the appropriate method. } else if (iface || !objectType->qual_empty()) { IdentifierInfo *selectorIdents[] = { &Context.Idents.get("countByEnumeratingWithState"), &Context.Idents.get("objects"), &Context.Idents.get("count") }; Selector selector = Context.Selectors.getSelector(3, &selectorIdents[0]); ObjCMethodDecl *method = nullptr; // If there's an interface, look in both the public and private APIs. if (iface) { method = iface->lookupInstanceMethod(selector); if (!method) method = iface->lookupPrivateMethod(selector); } // Also check protocol qualifiers. if (!method) method = LookupMethodInQualifiedType(selector, pointerType, /*instance*/ true); // If we didn't find it anywhere, give up. if (!method) { Diag(forLoc, diag::warn_collection_expr_type) << collection->getType() << selector << collection->getSourceRange(); } // TODO: check for an incompatible signature? } // Wrap up any cleanups in the expression. return collection; } StmtResult Sema::ActOnObjCForCollectionStmt(SourceLocation ForLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc) { setFunctionHasBranchProtectedScope(); ExprResult CollectionExprResult = CheckObjCForCollectionOperand(ForLoc, collection); if (First) { QualType FirstType; if (DeclStmt *DS = dyn_cast<DeclStmt>(First)) { if (!DS->isSingleDecl()) return StmtError(Diag((*DS->decl_begin())->getLocation(), diag::err_toomany_element_decls)); VarDecl *D = dyn_cast<VarDecl>(DS->getSingleDecl()); if (!D || D->isInvalidDecl()) return StmtError(); FirstType = D->getType(); // C99 6.8.5p3: The declaration part of a 'for' statement shall only // declare identifiers for objects having storage class 'auto' or // 'register'. if (!D->hasLocalStorage()) return StmtError(Diag(D->getLocation(), diag::err_non_local_variable_decl_in_for)); // If the type contained 'auto', deduce the 'auto' to 'id'. if (FirstType->getContainedAutoType()) { OpaqueValueExpr OpaqueId(D->getLocation(), Context.getObjCIdType(), VK_RValue); Expr *DeducedInit = &OpaqueId; if (DeduceAutoType(D->getTypeSourceInfo(), DeducedInit, FirstType) == DAR_Failed) DiagnoseAutoDeductionFailure(D, DeducedInit); if (FirstType.isNull()) { D->setInvalidDecl(); return StmtError(); } D->setType(FirstType); if (!inTemplateInstantiation()) { SourceLocation Loc = D->getTypeSourceInfo()->getTypeLoc().getBeginLoc(); Diag(Loc, diag::warn_auto_var_is_id) << D->getDeclName(); } } } else { Expr *FirstE = cast<Expr>(First); if (!FirstE->isTypeDependent() && !FirstE->isLValue()) return StmtError( Diag(First->getBeginLoc(), diag::err_selector_element_not_lvalue) << First->getSourceRange()); FirstType = static_cast<Expr*>(First)->getType(); if (FirstType.isConstQualified()) Diag(ForLoc, diag::err_selector_element_const_type) << FirstType << First->getSourceRange(); } if (!FirstType->isDependentType() && !FirstType->isObjCObjectPointerType() && !FirstType->isBlockPointerType()) return StmtError(Diag(ForLoc, diag::err_selector_element_type) << FirstType << First->getSourceRange()); } if (CollectionExprResult.isInvalid()) return StmtError(); CollectionExprResult = ActOnFinishFullExpr(CollectionExprResult.get(), /*DiscardedValue*/ false); if (CollectionExprResult.isInvalid()) return StmtError(); return new (Context) ObjCForCollectionStmt(First, CollectionExprResult.get(), nullptr, ForLoc, RParenLoc); } /// Finish building a variable declaration for a for-range statement. /// \return true if an error occurs. static bool FinishForRangeVarDecl(Sema &SemaRef, VarDecl *Decl, Expr *Init, SourceLocation Loc, int DiagID) { if (Decl->getType()->isUndeducedType()) { ExprResult Res = SemaRef.CorrectDelayedTyposInExpr(Init); if (!Res.isUsable()) { Decl->setInvalidDecl(); return true; } Init = Res.get(); } // Deduce the type for the iterator variable now rather than leaving it to // AddInitializerToDecl, so we can produce a more suitable diagnostic. QualType InitType; if ((!isa<InitListExpr>(Init) && Init->getType()->isVoidType()) || SemaRef.DeduceAutoType(Decl->getTypeSourceInfo(), Init, InitType) == Sema::DAR_Failed) SemaRef.Diag(Loc, DiagID) << Init->getType(); if (InitType.isNull()) { Decl->setInvalidDecl(); return true; } Decl->setType(InitType); // In ARC, infer lifetime. // FIXME: ARC may want to turn this into 'const __unsafe_unretained' if // we're doing the equivalent of fast iteration. if (SemaRef.getLangOpts().ObjCAutoRefCount && SemaRef.inferObjCARCLifetime(Decl)) Decl->setInvalidDecl(); SemaRef.AddInitializerToDecl(Decl, Init, /*DirectInit=*/false); SemaRef.FinalizeDeclaration(Decl); SemaRef.CurContext->addHiddenDecl(Decl); return false; } namespace { // An enum to represent whether something is dealing with a call to begin() // or a call to end() in a range-based for loop. enum BeginEndFunction { BEF_begin, BEF_end }; /// Produce a note indicating which begin/end function was implicitly called /// by a C++11 for-range statement. This is often not obvious from the code, /// nor from the diagnostics produced when analysing the implicit expressions /// required in a for-range statement. void NoteForRangeBeginEndFunction(Sema &SemaRef, Expr *E, BeginEndFunction BEF) { CallExpr *CE = dyn_cast<CallExpr>(E); if (!CE) return; FunctionDecl *D = dyn_cast<FunctionDecl>(CE->getCalleeDecl()); if (!D) return; SourceLocation Loc = D->getLocation(); std::string Description; bool IsTemplate = false; if (FunctionTemplateDecl *FunTmpl = D->getPrimaryTemplate()) { Description = SemaRef.getTemplateArgumentBindingsText( FunTmpl->getTemplateParameters(), *D->getTemplateSpecializationArgs()); IsTemplate = true; } SemaRef.Diag(Loc, diag::note_for_range_begin_end) << BEF << IsTemplate << Description << E->getType(); } /// Build a variable declaration for a for-range statement. VarDecl *BuildForRangeVarDecl(Sema &SemaRef, SourceLocation Loc, QualType Type, StringRef Name) { DeclContext *DC = SemaRef.CurContext; IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name); TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc); VarDecl *Decl = VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type, TInfo, SC_None); Decl->setImplicit(); return Decl; } } static bool ObjCEnumerationCollection(Expr *Collection) { return !Collection->isTypeDependent() && Collection->getType()->getAs<ObjCObjectPointerType>() != nullptr; } /// ActOnCXXForRangeStmt - Check and build a C++11 for-range statement. /// /// C++11 [stmt.ranged]: /// A range-based for statement is equivalent to /// /// { /// auto && __range = range-init; /// for ( auto __begin = begin-expr, /// __end = end-expr; /// __begin != __end; /// ++__begin ) { /// for-range-declaration = *__begin; /// statement /// } /// } /// /// The body of the loop is not available yet, since it cannot be analysed until /// we have determined the type of the for-range-declaration. StmtResult Sema::ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *First, SourceLocation ColonLoc, Expr *Range, SourceLocation RParenLoc, BuildForRangeKind Kind) { if (!First) return StmtError(); if (Range && ObjCEnumerationCollection(Range)) { // FIXME: Support init-statements in Objective-C++20 ranged for statement. if (InitStmt) return Diag(InitStmt->getBeginLoc(), diag::err_objc_for_range_init_stmt) << InitStmt->getSourceRange(); return ActOnObjCForCollectionStmt(ForLoc, First, Range, RParenLoc); } DeclStmt *DS = dyn_cast<DeclStmt>(First); assert(DS && "first part of for range not a decl stmt"); if (!DS->isSingleDecl()) { Diag(DS->getBeginLoc(), diag::err_type_defined_in_for_range); return StmtError(); } Decl *LoopVar = DS->getSingleDecl(); if (LoopVar->isInvalidDecl() || !Range || DiagnoseUnexpandedParameterPack(Range, UPPC_Expression)) { LoopVar->setInvalidDecl(); return StmtError(); } // Build the coroutine state immediately and not later during template // instantiation if (!CoawaitLoc.isInvalid()) { if (!ActOnCoroutineBodyStart(S, CoawaitLoc, "co_await")) return StmtError(); } // Build auto && __range = range-init // Divide by 2, since the variables are in the inner scope (loop body). const auto DepthStr = std::to_string(S->getDepth() / 2); SourceLocation RangeLoc = Range->getBeginLoc(); VarDecl *RangeVar = BuildForRangeVarDecl(*this, RangeLoc, Context.getAutoRRefDeductType(), std::string("__range") + DepthStr); if (FinishForRangeVarDecl(*this, RangeVar, Range, RangeLoc, diag::err_for_range_deduction_failure)) { LoopVar->setInvalidDecl(); return StmtError(); } // Claim the type doesn't contain auto: we've already done the checking. DeclGroupPtrTy RangeGroup = BuildDeclaratorGroup(MutableArrayRef<Decl *>((Decl **)&RangeVar, 1)); StmtResult RangeDecl = ActOnDeclStmt(RangeGroup, RangeLoc, RangeLoc); if (RangeDecl.isInvalid()) { LoopVar->setInvalidDecl(); return StmtError(); } return BuildCXXForRangeStmt( ForLoc, CoawaitLoc, InitStmt, ColonLoc, RangeDecl.get(), /*BeginStmt=*/nullptr, /*EndStmt=*/nullptr, /*Cond=*/nullptr, /*Inc=*/nullptr, DS, RParenLoc, Kind); } /// Create the initialization, compare, and increment steps for /// the range-based for loop expression. /// This function does not handle array-based for loops, /// which are created in Sema::BuildCXXForRangeStmt. /// /// \returns a ForRangeStatus indicating success or what kind of error occurred. /// BeginExpr and EndExpr are set and FRS_Success is returned on success; /// CandidateSet and BEF are set and some non-success value is returned on /// failure. static Sema::ForRangeStatus BuildNonArrayForRange(Sema &SemaRef, Expr *BeginRange, Expr *EndRange, QualType RangeType, VarDecl *BeginVar, VarDecl *EndVar, SourceLocation ColonLoc, SourceLocation CoawaitLoc, OverloadCandidateSet *CandidateSet, ExprResult *BeginExpr, ExprResult *EndExpr, BeginEndFunction *BEF) { DeclarationNameInfo BeginNameInfo( &SemaRef.PP.getIdentifierTable().get("begin"), ColonLoc); DeclarationNameInfo EndNameInfo(&SemaRef.PP.getIdentifierTable().get("end"), ColonLoc); LookupResult BeginMemberLookup(SemaRef, BeginNameInfo, Sema::LookupMemberName); LookupResult EndMemberLookup(SemaRef, EndNameInfo, Sema::LookupMemberName); auto BuildBegin = [&] { *BEF = BEF_begin; Sema::ForRangeStatus RangeStatus = SemaRef.BuildForRangeBeginEndCall(ColonLoc, ColonLoc, BeginNameInfo, BeginMemberLookup, CandidateSet, BeginRange, BeginExpr); if (RangeStatus != Sema::FRS_Success) { if (RangeStatus == Sema::FRS_DiagnosticIssued) SemaRef.Diag(BeginRange->getBeginLoc(), diag::note_in_for_range) << ColonLoc << BEF_begin << BeginRange->getType(); return RangeStatus; } if (!CoawaitLoc.isInvalid()) { // FIXME: getCurScope() should not be used during template instantiation. // We should pick up the set of unqualified lookup results for operator // co_await during the initial parse. *BeginExpr = SemaRef.ActOnCoawaitExpr(SemaRef.getCurScope(), ColonLoc, BeginExpr->get()); if (BeginExpr->isInvalid()) return Sema::FRS_DiagnosticIssued; } if (FinishForRangeVarDecl(SemaRef, BeginVar, BeginExpr->get(), ColonLoc, diag::err_for_range_iter_deduction_failure)) { NoteForRangeBeginEndFunction(SemaRef, BeginExpr->get(), *BEF); return Sema::FRS_DiagnosticIssued; } return Sema::FRS_Success; }; auto BuildEnd = [&] { *BEF = BEF_end; Sema::ForRangeStatus RangeStatus = SemaRef.BuildForRangeBeginEndCall(ColonLoc, ColonLoc, EndNameInfo, EndMemberLookup, CandidateSet, EndRange, EndExpr); if (RangeStatus != Sema::FRS_Success) { if (RangeStatus == Sema::FRS_DiagnosticIssued) SemaRef.Diag(EndRange->getBeginLoc(), diag::note_in_for_range) << ColonLoc << BEF_end << EndRange->getType(); return RangeStatus; } if (FinishForRangeVarDecl(SemaRef, EndVar, EndExpr->get(), ColonLoc, diag::err_for_range_iter_deduction_failure)) { NoteForRangeBeginEndFunction(SemaRef, EndExpr->get(), *BEF); return Sema::FRS_DiagnosticIssued; } return Sema::FRS_Success; }; if (CXXRecordDecl *D = RangeType->getAsCXXRecordDecl()) { // - if _RangeT is a class type, the unqualified-ids begin and end are // looked up in the scope of class _RangeT as if by class member access // lookup (3.4.5), and if either (or both) finds at least one // declaration, begin-expr and end-expr are __range.begin() and // __range.end(), respectively; SemaRef.LookupQualifiedName(BeginMemberLookup, D); if (BeginMemberLookup.isAmbiguous()) return Sema::FRS_DiagnosticIssued; SemaRef.LookupQualifiedName(EndMemberLookup, D); if (EndMemberLookup.isAmbiguous()) return Sema::FRS_DiagnosticIssued; if (BeginMemberLookup.empty() != EndMemberLookup.empty()) { // Look up the non-member form of the member we didn't find, first. // This way we prefer a "no viable 'end'" diagnostic over a "i found // a 'begin' but ignored it because there was no member 'end'" // diagnostic. auto BuildNonmember = [&]( BeginEndFunction BEFFound, LookupResult &Found, llvm::function_ref<Sema::ForRangeStatus()> BuildFound, llvm::function_ref<Sema::ForRangeStatus()> BuildNotFound) { LookupResult OldFound = std::move(Found); Found.clear(); if (Sema::ForRangeStatus Result = BuildNotFound()) return Result; switch (BuildFound()) { case Sema::FRS_Success: return Sema::FRS_Success; case Sema::FRS_NoViableFunction: CandidateSet->NoteCandidates( PartialDiagnosticAt(BeginRange->getBeginLoc(), SemaRef.PDiag(diag::err_for_range_invalid) << BeginRange->getType() << BEFFound), SemaRef, OCD_AllCandidates, BeginRange); LLVM_FALLTHROUGH; case Sema::FRS_DiagnosticIssued: for (NamedDecl *D : OldFound) { SemaRef.Diag(D->getLocation(), diag::note_for_range_member_begin_end_ignored) << BeginRange->getType() << BEFFound; } return Sema::FRS_DiagnosticIssued; } llvm_unreachable("unexpected ForRangeStatus"); }; if (BeginMemberLookup.empty()) return BuildNonmember(BEF_end, EndMemberLookup, BuildEnd, BuildBegin); return BuildNonmember(BEF_begin, BeginMemberLookup, BuildBegin, BuildEnd); } } else { // - otherwise, begin-expr and end-expr are begin(__range) and // end(__range), respectively, where begin and end are looked up with // argument-dependent lookup (3.4.2). For the purposes of this name // lookup, namespace std is an associated namespace. } if (Sema::ForRangeStatus Result = BuildBegin()) return Result; return BuildEnd(); } /// Speculatively attempt to dereference an invalid range expression. /// If the attempt fails, this function will return a valid, null StmtResult /// and emit no diagnostics. static StmtResult RebuildForRangeWithDereference(Sema &SemaRef, Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVarDecl, SourceLocation ColonLoc, Expr *Range, SourceLocation RangeLoc, SourceLocation RParenLoc) { // Determine whether we can rebuild the for-range statement with a // dereferenced range expression. ExprResult AdjustedRange; { Sema::SFINAETrap Trap(SemaRef); AdjustedRange = SemaRef.BuildUnaryOp(S, RangeLoc, UO_Deref, Range); if (AdjustedRange.isInvalid()) return StmtResult(); StmtResult SR = SemaRef.ActOnCXXForRangeStmt( S, ForLoc, CoawaitLoc, InitStmt, LoopVarDecl, ColonLoc, AdjustedRange.get(), RParenLoc, Sema::BFRK_Check); if (SR.isInvalid()) return StmtResult(); } // The attempt to dereference worked well enough that it could produce a valid // loop. Produce a fixit, and rebuild the loop with diagnostics enabled, in // case there are any other (non-fatal) problems with it. SemaRef.Diag(RangeLoc, diag::err_for_range_dereference) << Range->getType() << FixItHint::CreateInsertion(RangeLoc, "*"); return SemaRef.ActOnCXXForRangeStmt( S, ForLoc, CoawaitLoc, InitStmt, LoopVarDecl, ColonLoc, AdjustedRange.get(), RParenLoc, Sema::BFRK_Rebuild); } namespace { /// RAII object to automatically invalidate a declaration if an error occurs. struct InvalidateOnErrorScope { InvalidateOnErrorScope(Sema &SemaRef, Decl *D, bool Enabled) : Trap(SemaRef.Diags), D(D), Enabled(Enabled) {} ~InvalidateOnErrorScope() { if (Enabled && Trap.hasErrorOccurred()) D->setInvalidDecl(); } DiagnosticErrorTrap Trap; Decl *D; bool Enabled; }; } /// BuildCXXForRangeStmt - Build or instantiate a C++11 for-range statement. StmtResult Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind) { // FIXME: This should not be used during template instantiation. We should // pick up the set of unqualified lookup results for the != and + operators // in the initial parse. // // Testcase (accepts-invalid): // template<typename T> void f() { for (auto x : T()) {} } // namespace N { struct X { X begin(); X end(); int operator*(); }; } // bool operator!=(N::X, N::X); void operator++(N::X); // void g() { f<N::X>(); } Scope *S = getCurScope(); DeclStmt *RangeDS = cast<DeclStmt>(RangeDecl); VarDecl *RangeVar = cast<VarDecl>(RangeDS->getSingleDecl()); QualType RangeVarType = RangeVar->getType(); DeclStmt *LoopVarDS = cast<DeclStmt>(LoopVarDecl); VarDecl *LoopVar = cast<VarDecl>(LoopVarDS->getSingleDecl()); // If we hit any errors, mark the loop variable as invalid if its type // contains 'auto'. InvalidateOnErrorScope Invalidate(*this, LoopVar, LoopVar->getType()->isUndeducedType()); StmtResult BeginDeclStmt = Begin; StmtResult EndDeclStmt = End; ExprResult NotEqExpr = Cond, IncrExpr = Inc; if (RangeVarType->isDependentType()) { // The range is implicitly used as a placeholder when it is dependent. RangeVar->markUsed(Context); // Deduce any 'auto's in the loop variable as 'DependentTy'. We'll fill // them in properly when we instantiate the loop. if (!LoopVar->isInvalidDecl() && Kind != BFRK_Check) { if (auto *DD = dyn_cast<DecompositionDecl>(LoopVar)) for (auto *Binding : DD->bindings()) Binding->setType(Context.DependentTy); LoopVar->setType(SubstAutoType(LoopVar->getType(), Context.DependentTy)); } } else if (!BeginDeclStmt.get()) { SourceLocation RangeLoc = RangeVar->getLocation(); const QualType RangeVarNonRefType = RangeVarType.getNonReferenceType(); ExprResult BeginRangeRef = BuildDeclRefExpr(RangeVar, RangeVarNonRefType, VK_LValue, ColonLoc); if (BeginRangeRef.isInvalid()) return StmtError(); ExprResult EndRangeRef = BuildDeclRefExpr(RangeVar, RangeVarNonRefType, VK_LValue, ColonLoc); if (EndRangeRef.isInvalid()) return StmtError(); QualType AutoType = Context.getAutoDeductType(); Expr *Range = RangeVar->getInit(); if (!Range) return StmtError(); QualType RangeType = Range->getType(); if (RequireCompleteType(RangeLoc, RangeType, diag::err_for_range_incomplete_type)) return StmtError(); // Build auto __begin = begin-expr, __end = end-expr. // Divide by 2, since the variables are in the inner scope (loop body). const auto DepthStr = std::to_string(S->getDepth() / 2); VarDecl *BeginVar = BuildForRangeVarDecl(*this, ColonLoc, AutoType, std::string("__begin") + DepthStr); VarDecl *EndVar = BuildForRangeVarDecl(*this, ColonLoc, AutoType, std::string("__end") + DepthStr); // Build begin-expr and end-expr and attach to __begin and __end variables. ExprResult BeginExpr, EndExpr; if (const ArrayType *UnqAT = RangeType->getAsArrayTypeUnsafe()) { // - if _RangeT is an array type, begin-expr and end-expr are __range and // __range + __bound, respectively, where __bound is the array bound. If // _RangeT is an array of unknown size or an array of incomplete type, // the program is ill-formed; // begin-expr is __range. BeginExpr = BeginRangeRef; if (!CoawaitLoc.isInvalid()) { BeginExpr = ActOnCoawaitExpr(S, ColonLoc, BeginExpr.get()); if (BeginExpr.isInvalid()) return StmtError(); } if (FinishForRangeVarDecl(*this, BeginVar, BeginRangeRef.get(), ColonLoc, diag::err_for_range_iter_deduction_failure)) { NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin); return StmtError(); } // Find the array bound. ExprResult BoundExpr; if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(UnqAT)) BoundExpr = IntegerLiteral::Create( Context, CAT->getSize(), Context.getPointerDiffType(), RangeLoc); else if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(UnqAT)) { // For a variably modified type we can't just use the expression within // the array bounds, since we don't want that to be re-evaluated here. // Rather, we need to determine what it was when the array was first // created - so we resort to using sizeof(vla)/sizeof(element). // For e.g. // void f(int b) { // int vla[b]; // b = -1; <-- This should not affect the num of iterations below // for (int &c : vla) { .. } // } // FIXME: This results in codegen generating IR that recalculates the // run-time number of elements (as opposed to just using the IR Value // that corresponds to the run-time value of each bound that was // generated when the array was created.) If this proves too embarrassing // even for unoptimized IR, consider passing a magic-value/cookie to // codegen that then knows to simply use that initial llvm::Value (that // corresponds to the bound at time of array creation) within // getelementptr. But be prepared to pay the price of increasing a // customized form of coupling between the two components - which could // be hard to maintain as the codebase evolves. ExprResult SizeOfVLAExprR = ActOnUnaryExprOrTypeTraitExpr( EndVar->getLocation(), UETT_SizeOf, /*IsType=*/true, CreateParsedType(VAT->desugar(), Context.getTrivialTypeSourceInfo( VAT->desugar(), RangeLoc)) .getAsOpaquePtr(), EndVar->getSourceRange()); if (SizeOfVLAExprR.isInvalid()) return StmtError(); ExprResult SizeOfEachElementExprR = ActOnUnaryExprOrTypeTraitExpr( EndVar->getLocation(), UETT_SizeOf, /*IsType=*/true, CreateParsedType(VAT->desugar(), Context.getTrivialTypeSourceInfo( VAT->getElementType(), RangeLoc)) .getAsOpaquePtr(), EndVar->getSourceRange()); if (SizeOfEachElementExprR.isInvalid()) return StmtError(); BoundExpr = ActOnBinOp(S, EndVar->getLocation(), tok::slash, SizeOfVLAExprR.get(), SizeOfEachElementExprR.get()); if (BoundExpr.isInvalid()) return StmtError(); } else { // Can't be a DependentSizedArrayType or an IncompleteArrayType since // UnqAT is not incomplete and Range is not type-dependent. llvm_unreachable("Unexpected array type in for-range"); } // end-expr is __range + __bound. EndExpr = ActOnBinOp(S, ColonLoc, tok::plus, EndRangeRef.get(), BoundExpr.get()); if (EndExpr.isInvalid()) return StmtError(); if (FinishForRangeVarDecl(*this, EndVar, EndExpr.get(), ColonLoc, diag::err_for_range_iter_deduction_failure)) { NoteForRangeBeginEndFunction(*this, EndExpr.get(), BEF_end); return StmtError(); } } else { OverloadCandidateSet CandidateSet(RangeLoc, OverloadCandidateSet::CSK_Normal); BeginEndFunction BEFFailure; ForRangeStatus RangeStatus = BuildNonArrayForRange( *this, BeginRangeRef.get(), EndRangeRef.get(), RangeType, BeginVar, EndVar, ColonLoc, CoawaitLoc, &CandidateSet, &BeginExpr, &EndExpr, &BEFFailure); if (Kind == BFRK_Build && RangeStatus == FRS_NoViableFunction && BEFFailure == BEF_begin) { // If the range is being built from an array parameter, emit a // a diagnostic that it is being treated as a pointer. if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Range)) { if (ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) { QualType ArrayTy = PVD->getOriginalType(); QualType PointerTy = PVD->getType(); if (PointerTy->isPointerType() && ArrayTy->isArrayType()) { Diag(Range->getBeginLoc(), diag::err_range_on_array_parameter) << RangeLoc << PVD << ArrayTy << PointerTy; Diag(PVD->getLocation(), diag::note_declared_at); return StmtError(); } } } // If building the range failed, try dereferencing the range expression // unless a diagnostic was issued or the end function is problematic. StmtResult SR = RebuildForRangeWithDereference(*this, S, ForLoc, CoawaitLoc, InitStmt, LoopVarDecl, ColonLoc, Range, RangeLoc, RParenLoc); if (SR.isInvalid() || SR.isUsable()) return SR; } // Otherwise, emit diagnostics if we haven't already. if (RangeStatus == FRS_NoViableFunction) { Expr *Range = BEFFailure ? EndRangeRef.get() : BeginRangeRef.get(); CandidateSet.NoteCandidates( PartialDiagnosticAt(Range->getBeginLoc(), PDiag(diag::err_for_range_invalid) << RangeLoc << Range->getType() << BEFFailure), *this, OCD_AllCandidates, Range); } // Return an error if no fix was discovered. if (RangeStatus != FRS_Success) return StmtError(); } assert(!BeginExpr.isInvalid() && !EndExpr.isInvalid() && "invalid range expression in for loop"); // C++11 [dcl.spec.auto]p7: BeginType and EndType must be the same. // C++1z removes this restriction. QualType BeginType = BeginVar->getType(), EndType = EndVar->getType(); if (!Context.hasSameType(BeginType, EndType)) { Diag(RangeLoc, getLangOpts().CPlusPlus17 ? diag::warn_for_range_begin_end_types_differ : diag::ext_for_range_begin_end_types_differ) << BeginType << EndType; NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin); NoteForRangeBeginEndFunction(*this, EndExpr.get(), BEF_end); } BeginDeclStmt = ActOnDeclStmt(ConvertDeclToDeclGroup(BeginVar), ColonLoc, ColonLoc); EndDeclStmt = ActOnDeclStmt(ConvertDeclToDeclGroup(EndVar), ColonLoc, ColonLoc); const QualType BeginRefNonRefType = BeginType.getNonReferenceType(); ExprResult BeginRef = BuildDeclRefExpr(BeginVar, BeginRefNonRefType, VK_LValue, ColonLoc); if (BeginRef.isInvalid()) return StmtError(); ExprResult EndRef = BuildDeclRefExpr(EndVar, EndType.getNonReferenceType(), VK_LValue, ColonLoc); if (EndRef.isInvalid()) return StmtError(); // Build and check __begin != __end expression. NotEqExpr = ActOnBinOp(S, ColonLoc, tok::exclaimequal, BeginRef.get(), EndRef.get()); if (!NotEqExpr.isInvalid()) NotEqExpr = CheckBooleanCondition(ColonLoc, NotEqExpr.get()); if (!NotEqExpr.isInvalid()) NotEqExpr = ActOnFinishFullExpr(NotEqExpr.get(), /*DiscardedValue*/ false); if (NotEqExpr.isInvalid()) { Diag(RangeLoc, diag::note_for_range_invalid_iterator) << RangeLoc << 0 << BeginRangeRef.get()->getType(); NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin); if (!Context.hasSameType(BeginType, EndType)) NoteForRangeBeginEndFunction(*this, EndExpr.get(), BEF_end); return StmtError(); } // Build and check ++__begin expression. BeginRef = BuildDeclRefExpr(BeginVar, BeginRefNonRefType, VK_LValue, ColonLoc); if (BeginRef.isInvalid()) return StmtError(); IncrExpr = ActOnUnaryOp(S, ColonLoc, tok::plusplus, BeginRef.get()); if (!IncrExpr.isInvalid() && CoawaitLoc.isValid()) // FIXME: getCurScope() should not be used during template instantiation. // We should pick up the set of unqualified lookup results for operator // co_await during the initial parse. IncrExpr = ActOnCoawaitExpr(S, CoawaitLoc, IncrExpr.get()); if (!IncrExpr.isInvalid()) IncrExpr = ActOnFinishFullExpr(IncrExpr.get(), /*DiscardedValue*/ false); if (IncrExpr.isInvalid()) { Diag(RangeLoc, diag::note_for_range_invalid_iterator) << RangeLoc << 2 << BeginRangeRef.get()->getType() ; NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin); return StmtError(); } // Build and check *__begin expression. BeginRef = BuildDeclRefExpr(BeginVar, BeginRefNonRefType, VK_LValue, ColonLoc); if (BeginRef.isInvalid()) return StmtError(); ExprResult DerefExpr = ActOnUnaryOp(S, ColonLoc, tok::star, BeginRef.get()); if (DerefExpr.isInvalid()) { Diag(RangeLoc, diag::note_for_range_invalid_iterator) << RangeLoc << 1 << BeginRangeRef.get()->getType(); NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin); return StmtError(); } // Attach *__begin as initializer for VD. Don't touch it if we're just // trying to determine whether this would be a valid range. if (!LoopVar->isInvalidDecl() && Kind != BFRK_Check) { AddInitializerToDecl(LoopVar, DerefExpr.get(), /*DirectInit=*/false); if (LoopVar->isInvalidDecl()) NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin); } } // Don't bother to actually allocate the result if we're just trying to // determine whether it would be valid. if (Kind == BFRK_Check) return StmtResult(); // In OpenMP loop region loop control variable must be private. Perform // analysis of first part (if any). if (getLangOpts().OpenMP >= 50 && BeginDeclStmt.isUsable()) ActOnOpenMPLoopInitialization(ForLoc, BeginDeclStmt.get()); return new (Context) CXXForRangeStmt( InitStmt, RangeDS, cast_or_null<DeclStmt>(BeginDeclStmt.get()), cast_or_null<DeclStmt>(EndDeclStmt.get()), NotEqExpr.get(), IncrExpr.get(), LoopVarDS, /*Body=*/nullptr, ForLoc, CoawaitLoc, ColonLoc, RParenLoc); } /// FinishObjCForCollectionStmt - Attach the body to a objective-C foreach /// statement. StmtResult Sema::FinishObjCForCollectionStmt(Stmt *S, Stmt *B) { if (!S || !B) return StmtError(); ObjCForCollectionStmt * ForStmt = cast<ObjCForCollectionStmt>(S); ForStmt->setBody(B); return S; } // Warn when the loop variable is a const reference that creates a copy. // Suggest using the non-reference type for copies. If a copy can be prevented // suggest the const reference type that would do so. // For instance, given "for (const &Foo : Range)", suggest // "for (const Foo : Range)" to denote a copy is made for the loop. If // possible, also suggest "for (const &Bar : Range)" if this type prevents // the copy altogether. static void DiagnoseForRangeReferenceVariableCopies(Sema &SemaRef, const VarDecl *VD, QualType RangeInitType) { const Expr *InitExpr = VD->getInit(); if (!InitExpr) return; QualType VariableType = VD->getType(); if (auto Cleanups = dyn_cast<ExprWithCleanups>(InitExpr)) if (!Cleanups->cleanupsHaveSideEffects()) InitExpr = Cleanups->getSubExpr(); const MaterializeTemporaryExpr *MTE = dyn_cast<MaterializeTemporaryExpr>(InitExpr); // No copy made. if (!MTE) return; const Expr *E = MTE->getSubExpr()->IgnoreImpCasts(); // Searching for either UnaryOperator for dereference of a pointer or // CXXOperatorCallExpr for handling iterators. while (!isa<CXXOperatorCallExpr>(E) && !isa<UnaryOperator>(E)) { if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(E)) { E = CCE->getArg(0); } else if (const CXXMemberCallExpr *Call = dyn_cast<CXXMemberCallExpr>(E)) { const MemberExpr *ME = cast<MemberExpr>(Call->getCallee()); E = ME->getBase(); } else { const MaterializeTemporaryExpr *MTE = cast<MaterializeTemporaryExpr>(E); E = MTE->getSubExpr(); } E = E->IgnoreImpCasts(); } bool ReturnsReference = false; if (isa<UnaryOperator>(E)) { ReturnsReference = true; } else { const CXXOperatorCallExpr *Call = cast<CXXOperatorCallExpr>(E); const FunctionDecl *FD = Call->getDirectCallee(); QualType ReturnType = FD->getReturnType(); ReturnsReference = ReturnType->isReferenceType(); } if (ReturnsReference) { // Loop variable creates a temporary. Suggest either to go with // non-reference loop variable to indicate a copy is made, or // the correct time to bind a const reference. SemaRef.Diag(VD->getLocation(), diag::warn_for_range_const_reference_copy) << VD << VariableType << E->getType(); QualType NonReferenceType = VariableType.getNonReferenceType(); NonReferenceType.removeLocalConst(); QualType NewReferenceType = SemaRef.Context.getLValueReferenceType(E->getType().withConst()); SemaRef.Diag(VD->getBeginLoc(), diag::note_use_type_or_non_reference) << NonReferenceType << NewReferenceType << VD->getSourceRange() << FixItHint::CreateRemoval(VD->getTypeSpecEndLoc()); } else if (!VariableType->isRValueReferenceType()) { // The range always returns a copy, so a temporary is always created. // Suggest removing the reference from the loop variable. // If the type is a rvalue reference do not warn since that changes the // semantic of the code. SemaRef.Diag(VD->getLocation(), diag::warn_for_range_variable_always_copy) << VD << RangeInitType; QualType NonReferenceType = VariableType.getNonReferenceType(); NonReferenceType.removeLocalConst(); SemaRef.Diag(VD->getBeginLoc(), diag::note_use_non_reference_type) << NonReferenceType << VD->getSourceRange() << FixItHint::CreateRemoval(VD->getTypeSpecEndLoc()); } } /// Determines whether the @p VariableType's declaration is a record with the /// clang::trivial_abi attribute. static bool hasTrivialABIAttr(QualType VariableType) { if (CXXRecordDecl *RD = VariableType->getAsCXXRecordDecl()) return RD->hasAttr<TrivialABIAttr>(); return false; } // Warns when the loop variable can be changed to a reference type to // prevent a copy. For instance, if given "for (const Foo x : Range)" suggest // "for (const Foo &x : Range)" if this form does not make a copy. static void DiagnoseForRangeConstVariableCopies(Sema &SemaRef, const VarDecl *VD) { const Expr *InitExpr = VD->getInit(); if (!InitExpr) return; QualType VariableType = VD->getType(); if (const CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(InitExpr)) { if (!CE->getConstructor()->isCopyConstructor()) return; } else if (const CastExpr *CE = dyn_cast<CastExpr>(InitExpr)) { if (CE->getCastKind() != CK_LValueToRValue) return; } else { return; } // Small trivially copyable types are cheap to copy. Do not emit the // diagnostic for these instances. 64 bytes is a common size of a cache line. // (The function `getTypeSize` returns the size in bits.) ASTContext &Ctx = SemaRef.Context; if (Ctx.getTypeSize(VariableType) <= 64 * 8 && (VariableType.isTriviallyCopyableType(Ctx) || hasTrivialABIAttr(VariableType))) return; // Suggest changing from a const variable to a const reference variable // if doing so will prevent a copy. SemaRef.Diag(VD->getLocation(), diag::warn_for_range_copy) << VD << VariableType << InitExpr->getType(); SemaRef.Diag(VD->getBeginLoc(), diag::note_use_reference_type) << SemaRef.Context.getLValueReferenceType(VariableType) << VD->getSourceRange() << FixItHint::CreateInsertion(VD->getLocation(), "&"); } /// DiagnoseForRangeVariableCopies - Diagnose three cases and fixes for them. /// 1) for (const foo &x : foos) where foos only returns a copy. Suggest /// using "const foo x" to show that a copy is made /// 2) for (const bar &x : foos) where bar is a temporary initialized by bar. /// Suggest either "const bar x" to keep the copying or "const foo& x" to /// prevent the copy. /// 3) for (const foo x : foos) where x is constructed from a reference foo. /// Suggest "const foo &x" to prevent the copy. static void DiagnoseForRangeVariableCopies(Sema &SemaRef, const CXXForRangeStmt *ForStmt) { if (SemaRef.inTemplateInstantiation()) return; if (SemaRef.Diags.isIgnored(diag::warn_for_range_const_reference_copy, ForStmt->getBeginLoc()) && SemaRef.Diags.isIgnored(diag::warn_for_range_variable_always_copy, ForStmt->getBeginLoc()) && SemaRef.Diags.isIgnored(diag::warn_for_range_copy, ForStmt->getBeginLoc())) { return; } const VarDecl *VD = ForStmt->getLoopVariable(); if (!VD) return; QualType VariableType = VD->getType(); if (VariableType->isIncompleteType()) return; const Expr *InitExpr = VD->getInit(); if (!InitExpr) return; if (InitExpr->getExprLoc().isMacroID()) return; if (VariableType->isReferenceType()) { DiagnoseForRangeReferenceVariableCopies(SemaRef, VD, ForStmt->getRangeInit()->getType()); } else if (VariableType.isConstQualified()) { DiagnoseForRangeConstVariableCopies(SemaRef, VD); } } /// FinishCXXForRangeStmt - Attach the body to a C++0x for-range statement. /// This is a separate step from ActOnCXXForRangeStmt because analysis of the /// body cannot be performed until after the type of the range variable is /// determined. StmtResult Sema::FinishCXXForRangeStmt(Stmt *S, Stmt *B) { if (!S || !B) return StmtError(); if (isa<ObjCForCollectionStmt>(S)) return FinishObjCForCollectionStmt(S, B); CXXForRangeStmt *ForStmt = cast<CXXForRangeStmt>(S); ForStmt->setBody(B); DiagnoseEmptyStmtBody(ForStmt->getRParenLoc(), B, diag::warn_empty_range_based_for_body); DiagnoseForRangeVariableCopies(*this, ForStmt); return S; } StmtResult Sema::ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl) { setFunctionHasBranchIntoScope(); TheDecl->markUsed(Context); return new (Context) GotoStmt(TheDecl, GotoLoc, LabelLoc); } StmtResult Sema::ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *E) { // Convert operand to void* if (!E->isTypeDependent()) { QualType ETy = E->getType(); QualType DestTy = Context.getPointerType(Context.VoidTy.withConst()); ExprResult ExprRes = E; AssignConvertType ConvTy = CheckSingleAssignmentConstraints(DestTy, ExprRes); if (ExprRes.isInvalid()) return StmtError(); E = ExprRes.get(); if (DiagnoseAssignmentResult(ConvTy, StarLoc, DestTy, ETy, E, AA_Passing)) return StmtError(); } ExprResult ExprRes = ActOnFinishFullExpr(E, /*DiscardedValue*/ false); if (ExprRes.isInvalid()) return StmtError(); E = ExprRes.get(); setFunctionHasIndirectGoto(); return new (Context) IndirectGotoStmt(GotoLoc, StarLoc, E); } static void CheckJumpOutOfSEHFinally(Sema &S, SourceLocation Loc, const Scope &DestScope) { if (!S.CurrentSEHFinally.empty() && DestScope.Contains(*S.CurrentSEHFinally.back())) { S.Diag(Loc, diag::warn_jump_out_of_seh_finally); } } StmtResult Sema::ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope) { Scope *S = CurScope->getContinueParent(); if (!S) { // C99 6.8.6.2p1: A break shall appear only in or as a loop body. return StmtError(Diag(ContinueLoc, diag::err_continue_not_in_loop)); } CheckJumpOutOfSEHFinally(*this, ContinueLoc, *S); return new (Context) ContinueStmt(ContinueLoc); } StmtResult Sema::ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope) { Scope *S = CurScope->getBreakParent(); if (!S) { // C99 6.8.6.3p1: A break shall appear only in or as a switch/loop body. return StmtError(Diag(BreakLoc, diag::err_break_not_in_loop_or_switch)); } if (S->isOpenMPLoopScope()) return StmtError(Diag(BreakLoc, diag::err_omp_loop_cannot_use_stmt) << "break"); CheckJumpOutOfSEHFinally(*this, BreakLoc, *S); return new (Context) BreakStmt(BreakLoc); } /// Determine whether the given expression is a candidate for /// copy elision in either a return statement or a throw expression. /// /// \param ReturnType If we're determining the copy elision candidate for /// a return statement, this is the return type of the function. If we're /// determining the copy elision candidate for a throw expression, this will /// be a NULL type. /// /// \param E The expression being returned from the function or block, or /// being thrown. /// /// \param CESK Whether we allow function parameters or /// id-expressions that could be moved out of the function to be considered NRVO /// candidates. C++ prohibits these for NRVO itself, but we re-use this logic to /// determine whether we should try to move as part of a return or throw (which /// does allow function parameters). /// /// \returns The NRVO candidate variable, if the return statement may use the /// NRVO, or NULL if there is no such candidate. VarDecl *Sema::getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK) { // - in a return statement in a function [where] ... // ... the expression is the name of a non-volatile automatic object ... DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E->IgnoreParens()); if (!DR || DR->refersToEnclosingVariableOrCapture()) return nullptr; VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl()); if (!VD) return nullptr; if (isCopyElisionCandidate(ReturnType, VD, CESK)) return VD; return nullptr; } bool Sema::isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK) { QualType VDType = VD->getType(); // - in a return statement in a function with ... // ... a class return type ... if (!ReturnType.isNull() && !ReturnType->isDependentType()) { if (!ReturnType->isRecordType()) return false; // ... the same cv-unqualified type as the function return type ... // When considering moving this expression out, allow dissimilar types. if (!(CESK & CES_AllowDifferentTypes) && !VDType->isDependentType() && !Context.hasSameUnqualifiedType(ReturnType, VDType)) return false; } // ...object (other than a function or catch-clause parameter)... if (VD->getKind() != Decl::Var && !((CESK & CES_AllowParameters) && VD->getKind() == Decl::ParmVar)) return false; if (!(CESK & CES_AllowExceptionVariables) && VD->isExceptionVariable()) return false; // ...automatic... if (!VD->hasLocalStorage()) return false; // Return false if VD is a __block variable. We don't want to implicitly move // out of a __block variable during a return because we cannot assume the // variable will no longer be used. if (VD->hasAttr<BlocksAttr>()) return false; if (CESK & CES_AllowDifferentTypes) return true; // ...non-volatile... if (VD->getType().isVolatileQualified()) return false; // Variables with higher required alignment than their type's ABI // alignment cannot use NRVO. if (!VD->getType()->isDependentType() && VD->hasAttr<AlignedAttr>() && Context.getDeclAlign(VD) > Context.getTypeAlignInChars(VD->getType())) return false; return true; } /// Try to perform the initialization of a potentially-movable value, /// which is the operand to a return or throw statement. /// /// This routine implements C++14 [class.copy]p32, which attempts to treat /// returned lvalues as rvalues in certain cases (to prefer move construction), /// then falls back to treating them as lvalues if that failed. /// /// \param ConvertingConstructorsOnly If true, follow [class.copy]p32 and reject /// resolutions that find non-constructors, such as derived-to-base conversions /// or `operator T()&&` member functions. If false, do consider such /// conversion sequences. /// /// \param Res We will fill this in if move-initialization was possible. /// If move-initialization is not possible, such that we must fall back to /// treating the operand as an lvalue, we will leave Res in its original /// invalid state. static void TryMoveInitialization(Sema& S, const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *&Value, bool ConvertingConstructorsOnly, ExprResult &Res) { ImplicitCastExpr AsRvalue(ImplicitCastExpr::OnStack, Value->getType(), CK_NoOp, Value, VK_XValue); Expr *InitExpr = &AsRvalue; InitializationKind Kind = InitializationKind::CreateCopy( Value->getBeginLoc(), Value->getBeginLoc()); InitializationSequence Seq(S, Entity, Kind, InitExpr); if (!Seq) return; for (const InitializationSequence::Step &Step : Seq.steps()) { if (Step.Kind != InitializationSequence::SK_ConstructorInitialization && Step.Kind != InitializationSequence::SK_UserConversion) continue; FunctionDecl *FD = Step.Function.Function; if (ConvertingConstructorsOnly) { if (isa<CXXConstructorDecl>(FD)) { // C++14 [class.copy]p32: // [...] If the first overload resolution fails or was not performed, // or if the type of the first parameter of the selected constructor // is not an rvalue reference to the object's type (possibly // cv-qualified), overload resolution is performed again, considering // the object as an lvalue. const RValueReferenceType *RRefType = FD->getParamDecl(0)->getType()->getAs<RValueReferenceType>(); if (!RRefType) break; if (!S.Context.hasSameUnqualifiedType(RRefType->getPointeeType(), NRVOCandidate->getType())) break; } else { continue; } } else { if (isa<CXXConstructorDecl>(FD)) { // Check that overload resolution selected a constructor taking an // rvalue reference. If it selected an lvalue reference, then we // didn't need to cast this thing to an rvalue in the first place. if (!isa<RValueReferenceType>(FD->getParamDecl(0)->getType())) break; } else if (isa<CXXMethodDecl>(FD)) { // Check that overload resolution selected a conversion operator // taking an rvalue reference. if (cast<CXXMethodDecl>(FD)->getRefQualifier() != RQ_RValue) break; } else { continue; } } // Promote "AsRvalue" to the heap, since we now need this // expression node to persist. Value = ImplicitCastExpr::Create(S.Context, Value->getType(), CK_NoOp, Value, nullptr, VK_XValue); // Complete type-checking the initialization of the return type // using the constructor we found. Res = Seq.Perform(S, Entity, Kind, Value); } } /// Perform the initialization of a potentially-movable value, which /// is the result of return value. /// /// This routine implements C++14 [class.copy]p32, which attempts to treat /// returned lvalues as rvalues in certain cases (to prefer move construction), /// then falls back to treating them as lvalues if that failed. ExprResult Sema::PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO) { // C++14 [class.copy]p32: // When the criteria for elision of a copy/move operation are met, but not for // an exception-declaration, and the object to be copied is designated by an // lvalue, or when the expression in a return statement is a (possibly // parenthesized) id-expression that names an object with automatic storage // duration declared in the body or parameter-declaration-clause of the // innermost enclosing function or lambda-expression, overload resolution to // select the constructor for the copy is first performed as if the object // were designated by an rvalue. ExprResult Res = ExprError(); if (AllowNRVO) { bool AffectedByCWG1579 = false; if (!NRVOCandidate) { NRVOCandidate = getCopyElisionCandidate(ResultType, Value, CES_Default); if (NRVOCandidate && !getDiagnostics().isIgnored(diag::warn_return_std_move_in_cxx11, Value->getExprLoc())) { const VarDecl *NRVOCandidateInCXX11 = getCopyElisionCandidate(ResultType, Value, CES_FormerDefault); AffectedByCWG1579 = (!NRVOCandidateInCXX11); } } if (NRVOCandidate) { TryMoveInitialization(*this, Entity, NRVOCandidate, ResultType, Value, true, Res); } if (!Res.isInvalid() && AffectedByCWG1579) { QualType QT = NRVOCandidate->getType(); if (QT.getNonReferenceType() .getUnqualifiedType() .isTriviallyCopyableType(Context)) { // Adding 'std::move' around a trivially copyable variable is probably // pointless. Don't suggest it. } else { // Common cases for this are returning unique_ptr<Derived> from a // function of return type unique_ptr<Base>, or returning T from a // function of return type Expected<T>. This is totally fine in a // post-CWG1579 world, but was not fine before. assert(!ResultType.isNull()); SmallString<32> Str; Str += "std::move("; Str += NRVOCandidate->getDeclName().getAsString(); Str += ")"; Diag(Value->getExprLoc(), diag::warn_return_std_move_in_cxx11) << Value->getSourceRange() << NRVOCandidate->getDeclName() << ResultType << QT; Diag(Value->getExprLoc(), diag::note_add_std_move_in_cxx11) << FixItHint::CreateReplacement(Value->getSourceRange(), Str); } } else if (Res.isInvalid() && !getDiagnostics().isIgnored(diag::warn_return_std_move, Value->getExprLoc())) { const VarDecl *FakeNRVOCandidate = getCopyElisionCandidate(QualType(), Value, CES_AsIfByStdMove); if (FakeNRVOCandidate) { QualType QT = FakeNRVOCandidate->getType(); if (QT->isLValueReferenceType()) { // Adding 'std::move' around an lvalue reference variable's name is // dangerous. Don't suggest it. } else if (QT.getNonReferenceType() .getUnqualifiedType() .isTriviallyCopyableType(Context)) { // Adding 'std::move' around a trivially copyable variable is probably // pointless. Don't suggest it. } else { ExprResult FakeRes = ExprError(); Expr *FakeValue = Value; TryMoveInitialization(*this, Entity, FakeNRVOCandidate, ResultType, FakeValue, false, FakeRes); if (!FakeRes.isInvalid()) { bool IsThrow = (Entity.getKind() == InitializedEntity::EK_Exception); SmallString<32> Str; Str += "std::move("; Str += FakeNRVOCandidate->getDeclName().getAsString(); Str += ")"; Diag(Value->getExprLoc(), diag::warn_return_std_move) << Value->getSourceRange() << FakeNRVOCandidate->getDeclName() << IsThrow; Diag(Value->getExprLoc(), diag::note_add_std_move) << FixItHint::CreateReplacement(Value->getSourceRange(), Str); } } } } } // Either we didn't meet the criteria for treating an lvalue as an rvalue, // above, or overload resolution failed. Either way, we need to try // (again) now with the return value expression as written. if (Res.isInvalid()) Res = PerformCopyInitialization(Entity, SourceLocation(), Value); return Res; } /// Determine whether the declared return type of the specified function /// contains 'auto'. static bool hasDeducedReturnType(FunctionDecl *FD) { const FunctionProtoType *FPT = FD->getTypeSourceInfo()->getType()->castAs<FunctionProtoType>(); return FPT->getReturnType()->isUndeducedType(); } /// ActOnCapScopeReturnStmt - Utility routine to type-check return statements /// for capturing scopes. /// StmtResult Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) { // If this is the first return we've seen, infer the return type. // [expr.prim.lambda]p4 in C++11; block literals follow the same rules. CapturingScopeInfo *CurCap = cast<CapturingScopeInfo>(getCurFunction()); QualType FnRetType = CurCap->ReturnType; LambdaScopeInfo *CurLambda = dyn_cast<LambdaScopeInfo>(CurCap); bool HasDeducedReturnType = CurLambda && hasDeducedReturnType(CurLambda->CallOperator); if (ExprEvalContexts.back().Context == ExpressionEvaluationContext::DiscardedStatement && (HasDeducedReturnType || CurCap->HasImplicitReturnType)) { if (RetValExp) { ExprResult ER = ActOnFinishFullExpr(RetValExp, ReturnLoc, /*DiscardedValue*/ false); if (ER.isInvalid()) return StmtError(); RetValExp = ER.get(); } return ReturnStmt::Create(Context, ReturnLoc, RetValExp, /* NRVOCandidate=*/nullptr); } if (HasDeducedReturnType) { // In C++1y, the return type may involve 'auto'. // FIXME: Blocks might have a return type of 'auto' explicitly specified. FunctionDecl *FD = CurLambda->CallOperator; if (CurCap->ReturnType.isNull()) CurCap->ReturnType = FD->getReturnType(); AutoType *AT = CurCap->ReturnType->getContainedAutoType(); assert(AT && "lost auto type from lambda return type"); if (DeduceFunctionTypeFromReturnExpr(FD, ReturnLoc, RetValExp, AT)) { FD->setInvalidDecl(); return StmtError(); } CurCap->ReturnType = FnRetType = FD->getReturnType(); } else if (CurCap->HasImplicitReturnType) { // For blocks/lambdas with implicit return types, we check each return // statement individually, and deduce the common return type when the block // or lambda is completed. // FIXME: Fold this into the 'auto' codepath above. if (RetValExp && !isa<InitListExpr>(RetValExp)) { ExprResult Result = DefaultFunctionArrayLvalueConversion(RetValExp); if (Result.isInvalid()) return StmtError(); RetValExp = Result.get(); // DR1048: even prior to C++14, we should use the 'auto' deduction rules // when deducing a return type for a lambda-expression (or by extension // for a block). These rules differ from the stated C++11 rules only in // that they remove top-level cv-qualifiers. if (!CurContext->isDependentContext()) FnRetType = RetValExp->getType().getUnqualifiedType(); else FnRetType = CurCap->ReturnType = Context.DependentTy; } else { if (RetValExp) { // C++11 [expr.lambda.prim]p4 bans inferring the result from an // initializer list, because it is not an expression (even // though we represent it as one). We still deduce 'void'. Diag(ReturnLoc, diag::err_lambda_return_init_list) << RetValExp->getSourceRange(); } FnRetType = Context.VoidTy; } // Although we'll properly infer the type of the block once it's completed, // make sure we provide a return type now for better error recovery. if (CurCap->ReturnType.isNull()) CurCap->ReturnType = FnRetType; } assert(!FnRetType.isNull()); if (auto *CurBlock = dyn_cast<BlockScopeInfo>(CurCap)) { if (CurBlock->FunctionType->castAs<FunctionType>()->getNoReturnAttr()) { Diag(ReturnLoc, diag::err_noreturn_block_has_return_expr); return StmtError(); } } else if (auto *CurRegion = dyn_cast<CapturedRegionScopeInfo>(CurCap)) { Diag(ReturnLoc, diag::err_return_in_captured_stmt) << CurRegion->getRegionName(); return StmtError(); } else { assert(CurLambda && "unknown kind of captured scope"); if (CurLambda->CallOperator->getType() ->castAs<FunctionType>() ->getNoReturnAttr()) { Diag(ReturnLoc, diag::err_noreturn_lambda_has_return_expr); return StmtError(); } } // Otherwise, verify that this result type matches the previous one. We are // pickier with blocks than for normal functions because we don't have GCC // compatibility to worry about here. const VarDecl *NRVOCandidate = nullptr; if (FnRetType->isDependentType()) { // Delay processing for now. TODO: there are lots of dependent // types we can conclusively prove aren't void. } else if (FnRetType->isVoidType()) { if (RetValExp && !isa<InitListExpr>(RetValExp) && !(getLangOpts().CPlusPlus && (RetValExp->isTypeDependent() || RetValExp->getType()->isVoidType()))) { if (!getLangOpts().CPlusPlus && RetValExp->getType()->isVoidType()) Diag(ReturnLoc, diag::ext_return_has_void_expr) << "literal" << 2; else { Diag(ReturnLoc, diag::err_return_block_has_expr); RetValExp = nullptr; } } } else if (!RetValExp) { return StmtError(Diag(ReturnLoc, diag::err_block_return_missing_expr)); } else if (!RetValExp->isTypeDependent()) { // we have a non-void block with an expression, continue checking // C99 6.8.6.4p3(136): The return statement is not an assignment. The // overlap restriction of subclause 6.5.16.1 does not apply to the case of // function return. // In C++ the return statement is handled via a copy initialization. // the C version of which boils down to CheckSingleAssignmentConstraints. NRVOCandidate = getCopyElisionCandidate(FnRetType, RetValExp, CES_Strict); InitializedEntity Entity = InitializedEntity::InitializeResult(ReturnLoc, FnRetType, NRVOCandidate != nullptr); ExprResult Res = PerformMoveOrCopyInitialization(Entity, NRVOCandidate, FnRetType, RetValExp); if (Res.isInvalid()) { // FIXME: Cleanup temporaries here, anyway? return StmtError(); } RetValExp = Res.get(); CheckReturnValExpr(RetValExp, FnRetType, ReturnLoc); } else { NRVOCandidate = getCopyElisionCandidate(FnRetType, RetValExp, CES_Strict); } if (RetValExp) { ExprResult ER = ActOnFinishFullExpr(RetValExp, ReturnLoc, /*DiscardedValue*/ false); if (ER.isInvalid()) return StmtError(); RetValExp = ER.get(); } auto *Result = ReturnStmt::Create(Context, ReturnLoc, RetValExp, NRVOCandidate); // If we need to check for the named return value optimization, // or if we need to infer the return type, // save the return statement in our scope for later processing. if (CurCap->HasImplicitReturnType || NRVOCandidate) FunctionScopes.back()->Returns.push_back(Result); if (FunctionScopes.back()->FirstReturnLoc.isInvalid()) FunctionScopes.back()->FirstReturnLoc = ReturnLoc; return Result; } namespace { /// Marks all typedefs in all local classes in a type referenced. /// /// In a function like /// auto f() { /// struct S { typedef int a; }; /// return S(); /// } /// /// the local type escapes and could be referenced in some TUs but not in /// others. Pretend that all local typedefs are always referenced, to not warn /// on this. This isn't necessary if f has internal linkage, or the typedef /// is private. class LocalTypedefNameReferencer : public RecursiveASTVisitor<LocalTypedefNameReferencer> { public: LocalTypedefNameReferencer(Sema &S) : S(S) {} bool VisitRecordType(const RecordType *RT); private: Sema &S; }; bool LocalTypedefNameReferencer::VisitRecordType(const RecordType *RT) { auto *R = dyn_cast<CXXRecordDecl>(RT->getDecl()); if (!R || !R->isLocalClass() || !R->isLocalClass()->isExternallyVisible() || R->isDependentType()) return true; for (auto *TmpD : R->decls()) if (auto *T = dyn_cast<TypedefNameDecl>(TmpD)) if (T->getAccess() != AS_private || R->hasFriends()) S.MarkAnyDeclReferenced(T->getLocation(), T, /*OdrUse=*/false); return true; } } TypeLoc Sema::getReturnTypeLoc(FunctionDecl *FD) const { return FD->getTypeSourceInfo() ->getTypeLoc() .getAsAdjusted<FunctionProtoTypeLoc>() .getReturnLoc(); } /// Deduce the return type for a function from a returned expression, per /// C++1y [dcl.spec.auto]p6. bool Sema::DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT) { // If this is the conversion function for a lambda, we choose to deduce it // type from the corresponding call operator, not from the synthesized return // statement within it. See Sema::DeduceReturnType. if (isLambdaConversionOperator(FD)) return false; TypeLoc OrigResultType = getReturnTypeLoc(FD); QualType Deduced; if (RetExpr && isa<InitListExpr>(RetExpr)) { // If the deduction is for a return statement and the initializer is // a braced-init-list, the program is ill-formed. Diag(RetExpr->getExprLoc(), getCurLambda() ? diag::err_lambda_return_init_list : diag::err_auto_fn_return_init_list) << RetExpr->getSourceRange(); return true; } if (FD->isDependentContext()) { // C++1y [dcl.spec.auto]p12: // Return type deduction [...] occurs when the definition is // instantiated even if the function body contains a return // statement with a non-type-dependent operand. assert(AT->isDeduced() && "should have deduced to dependent type"); return false; } if (RetExpr) { // Otherwise, [...] deduce a value for U using the rules of template // argument deduction. DeduceAutoResult DAR = DeduceAutoType(OrigResultType, RetExpr, Deduced); if (DAR == DAR_Failed && !FD->isInvalidDecl()) Diag(RetExpr->getExprLoc(), diag::err_auto_fn_deduction_failure) << OrigResultType.getType() << RetExpr->getType(); if (DAR != DAR_Succeeded) return true; // If a local type is part of the returned type, mark its fields as // referenced. LocalTypedefNameReferencer Referencer(*this); Referencer.TraverseType(RetExpr->getType()); } else { // In the case of a return with no operand, the initializer is considered // to be void(). // // Deduction here can only succeed if the return type is exactly 'cv auto' // or 'decltype(auto)', so just check for that case directly. if (!OrigResultType.getType()->getAs<AutoType>()) { Diag(ReturnLoc, diag::err_auto_fn_return_void_but_not_auto) << OrigResultType.getType(); return true; } // We always deduce U = void in this case. Deduced = SubstAutoType(OrigResultType.getType(), Context.VoidTy); if (Deduced.isNull()) return true; } // CUDA: Kernel function must have 'void' return type. if (getLangOpts().CUDA) if (FD->hasAttr<CUDAGlobalAttr>() && !Deduced->isVoidType()) { Diag(FD->getLocation(), diag::err_kern_type_not_void_return) << FD->getType() << FD->getSourceRange(); return true; } // If a function with a declared return type that contains a placeholder type // has multiple return statements, the return type is deduced for each return // statement. [...] if the type deduced is not the same in each deduction, // the program is ill-formed. QualType DeducedT = AT->getDeducedType(); if (!DeducedT.isNull() && !FD->isInvalidDecl()) { AutoType *NewAT = Deduced->getContainedAutoType(); // It is possible that NewAT->getDeducedType() is null. When that happens, // we should not crash, instead we ignore this deduction. if (NewAT->getDeducedType().isNull()) return false; CanQualType OldDeducedType = Context.getCanonicalFunctionResultType( DeducedT); CanQualType NewDeducedType = Context.getCanonicalFunctionResultType( NewAT->getDeducedType()); if (!FD->isDependentContext() && OldDeducedType != NewDeducedType) { const LambdaScopeInfo *LambdaSI = getCurLambda(); if (LambdaSI && LambdaSI->HasImplicitReturnType) { Diag(ReturnLoc, diag::err_typecheck_missing_return_type_incompatible) << NewAT->getDeducedType() << DeducedT << true /*IsLambda*/; } else { Diag(ReturnLoc, diag::err_auto_fn_different_deductions) << (AT->isDecltypeAuto() ? 1 : 0) << NewAT->getDeducedType() << DeducedT; } return true; } } else if (!FD->isInvalidDecl()) { // Update all declarations of the function to have the deduced return type. Context.adjustDeducedFunctionResultType(FD, Deduced); } return false; } StmtResult Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope) { // Correct typos, in case the containing function returns 'auto' and // RetValExp should determine the deduced type. ExprResult RetVal = CorrectDelayedTyposInExpr(RetValExp); if (RetVal.isInvalid()) return StmtError(); StmtResult R = BuildReturnStmt(ReturnLoc, RetVal.get()); if (R.isInvalid() || ExprEvalContexts.back().Context == ExpressionEvaluationContext::DiscardedStatement) return R; if (VarDecl *VD = const_cast<VarDecl*>(cast<ReturnStmt>(R.get())->getNRVOCandidate())) { CurScope->addNRVOCandidate(VD); } else { CurScope->setNoNRVO(); } CheckJumpOutOfSEHFinally(*this, ReturnLoc, *CurScope->getFnParent()); return R; } StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) { // Check for unexpanded parameter packs. if (RetValExp && DiagnoseUnexpandedParameterPack(RetValExp)) return StmtError(); if (isa<CapturingScopeInfo>(getCurFunction())) return ActOnCapScopeReturnStmt(ReturnLoc, RetValExp); QualType FnRetType; QualType RelatedRetType; const AttrVec *Attrs = nullptr; bool isObjCMethod = false; if (const FunctionDecl *FD = getCurFunctionDecl()) { FnRetType = FD->getReturnType(); if (FD->hasAttrs()) Attrs = &FD->getAttrs(); if (FD->isNoReturn()) Diag(ReturnLoc, diag::warn_noreturn_function_has_return_expr) << FD->getDeclName(); if (FD->isMain() && RetValExp) if (isa<CXXBoolLiteralExpr>(RetValExp)) Diag(ReturnLoc, diag::warn_main_returns_bool_literal) << RetValExp->getSourceRange(); } else if (ObjCMethodDecl *MD = getCurMethodDecl()) { FnRetType = MD->getReturnType(); isObjCMethod = true; if (MD->hasAttrs()) Attrs = &MD->getAttrs(); if (MD->hasRelatedResultType() && MD->getClassInterface()) { // In the implementation of a method with a related return type, the // type used to type-check the validity of return statements within the // method body is a pointer to the type of the class being implemented. RelatedRetType = Context.getObjCInterfaceType(MD->getClassInterface()); RelatedRetType = Context.getObjCObjectPointerType(RelatedRetType); } } else // If we don't have a function/method context, bail. return StmtError(); // C++1z: discarded return statements are not considered when deducing a // return type. if (ExprEvalContexts.back().Context == ExpressionEvaluationContext::DiscardedStatement && FnRetType->getContainedAutoType()) { if (RetValExp) { ExprResult ER = ActOnFinishFullExpr(RetValExp, ReturnLoc, /*DiscardedValue*/ false); if (ER.isInvalid()) return StmtError(); RetValExp = ER.get(); } return ReturnStmt::Create(Context, ReturnLoc, RetValExp, /* NRVOCandidate=*/nullptr); } // FIXME: Add a flag to the ScopeInfo to indicate whether we're performing // deduction. if (getLangOpts().CPlusPlus14) { if (AutoType *AT = FnRetType->getContainedAutoType()) { FunctionDecl *FD = cast<FunctionDecl>(CurContext); if (DeduceFunctionTypeFromReturnExpr(FD, ReturnLoc, RetValExp, AT)) { FD->setInvalidDecl(); return StmtError(); } else { FnRetType = FD->getReturnType(); } } } bool HasDependentReturnType = FnRetType->isDependentType(); ReturnStmt *Result = nullptr; if (FnRetType->isVoidType()) { if (RetValExp) { if (isa<InitListExpr>(RetValExp)) { // We simply never allow init lists as the return value of void // functions. This is compatible because this was never allowed before, // so there's no legacy code to deal with. NamedDecl *CurDecl = getCurFunctionOrMethodDecl(); int FunctionKind = 0; if (isa<ObjCMethodDecl>(CurDecl)) FunctionKind = 1; else if (isa<CXXConstructorDecl>(CurDecl)) FunctionKind = 2; else if (isa<CXXDestructorDecl>(CurDecl)) FunctionKind = 3; Diag(ReturnLoc, diag::err_return_init_list) << CurDecl->getDeclName() << FunctionKind << RetValExp->getSourceRange(); // Drop the expression. RetValExp = nullptr; } else if (!RetValExp->isTypeDependent()) { // C99 6.8.6.4p1 (ext_ since GCC warns) unsigned D = diag::ext_return_has_expr; if (RetValExp->getType()->isVoidType()) { NamedDecl *CurDecl = getCurFunctionOrMethodDecl(); if (isa<CXXConstructorDecl>(CurDecl) || isa<CXXDestructorDecl>(CurDecl)) D = diag::err_ctor_dtor_returns_void; else D = diag::ext_return_has_void_expr; } else { ExprResult Result = RetValExp; Result = IgnoredValueConversions(Result.get()); if (Result.isInvalid()) return StmtError(); RetValExp = Result.get(); RetValExp = ImpCastExprToType(RetValExp, Context.VoidTy, CK_ToVoid).get(); } // return of void in constructor/destructor is illegal in C++. if (D == diag::err_ctor_dtor_returns_void) { NamedDecl *CurDecl = getCurFunctionOrMethodDecl(); Diag(ReturnLoc, D) << CurDecl->getDeclName() << isa<CXXDestructorDecl>(CurDecl) << RetValExp->getSourceRange(); } // return (some void expression); is legal in C++. else if (D != diag::ext_return_has_void_expr || !getLangOpts().CPlusPlus) { NamedDecl *CurDecl = getCurFunctionOrMethodDecl(); int FunctionKind = 0; if (isa<ObjCMethodDecl>(CurDecl)) FunctionKind = 1; else if (isa<CXXConstructorDecl>(CurDecl)) FunctionKind = 2; else if (isa<CXXDestructorDecl>(CurDecl)) FunctionKind = 3; Diag(ReturnLoc, D) << CurDecl->getDeclName() << FunctionKind << RetValExp->getSourceRange(); } } if (RetValExp) { ExprResult ER = ActOnFinishFullExpr(RetValExp, ReturnLoc, /*DiscardedValue*/ false); if (ER.isInvalid()) return StmtError(); RetValExp = ER.get(); } } Result = ReturnStmt::Create(Context, ReturnLoc, RetValExp, /* NRVOCandidate=*/nullptr); } else if (!RetValExp && !HasDependentReturnType) { FunctionDecl *FD = getCurFunctionDecl(); unsigned DiagID; if (getLangOpts().CPlusPlus11 && FD && FD->isConstexpr()) { // C++11 [stmt.return]p2 DiagID = diag::err_constexpr_return_missing_expr; FD->setInvalidDecl(); } else if (getLangOpts().C99) { // C99 6.8.6.4p1 (ext_ since GCC warns) DiagID = diag::ext_return_missing_expr; } else { // C90 6.6.6.4p4 DiagID = diag::warn_return_missing_expr; } if (FD) Diag(ReturnLoc, DiagID) << FD->getIdentifier() << 0 /*fn*/ << FD->isConsteval(); else Diag(ReturnLoc, DiagID) << getCurMethodDecl()->getDeclName() << 1/*meth*/; Result = ReturnStmt::Create(Context, ReturnLoc, /* RetExpr=*/nullptr, /* NRVOCandidate=*/nullptr); } else { assert(RetValExp || HasDependentReturnType); const VarDecl *NRVOCandidate = nullptr; QualType RetType = RelatedRetType.isNull() ? FnRetType : RelatedRetType; // C99 6.8.6.4p3(136): The return statement is not an assignment. The // overlap restriction of subclause 6.5.16.1 does not apply to the case of // function return. // In C++ the return statement is handled via a copy initialization, // the C version of which boils down to CheckSingleAssignmentConstraints. if (RetValExp) NRVOCandidate = getCopyElisionCandidate(FnRetType, RetValExp, CES_Strict); if (!HasDependentReturnType && !RetValExp->isTypeDependent()) { // we have a non-void function with an expression, continue checking InitializedEntity Entity = InitializedEntity::InitializeResult(ReturnLoc, RetType, NRVOCandidate != nullptr); ExprResult Res = PerformMoveOrCopyInitialization(Entity, NRVOCandidate, RetType, RetValExp); if (Res.isInvalid()) { // FIXME: Clean up temporaries here anyway? return StmtError(); } RetValExp = Res.getAs<Expr>(); // If we have a related result type, we need to implicitly // convert back to the formal result type. We can't pretend to // initialize the result again --- we might end double-retaining // --- so instead we initialize a notional temporary. if (!RelatedRetType.isNull()) { Entity = InitializedEntity::InitializeRelatedResult(getCurMethodDecl(), FnRetType); Res = PerformCopyInitialization(Entity, ReturnLoc, RetValExp); if (Res.isInvalid()) { // FIXME: Clean up temporaries here anyway? return StmtError(); } RetValExp = Res.getAs<Expr>(); } CheckReturnValExpr(RetValExp, FnRetType, ReturnLoc, isObjCMethod, Attrs, getCurFunctionDecl()); } if (RetValExp) { ExprResult ER = ActOnFinishFullExpr(RetValExp, ReturnLoc, /*DiscardedValue*/ false); if (ER.isInvalid()) return StmtError(); RetValExp = ER.get(); } Result = ReturnStmt::Create(Context, ReturnLoc, RetValExp, NRVOCandidate); } // If we need to check for the named return value optimization, save the // return statement in our scope for later processing. if (Result->getNRVOCandidate()) FunctionScopes.back()->Returns.push_back(Result); if (FunctionScopes.back()->FirstReturnLoc.isInvalid()) FunctionScopes.back()->FirstReturnLoc = ReturnLoc; return Result; } StmtResult Sema::ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body) { VarDecl *Var = cast_or_null<VarDecl>(Parm); if (Var && Var->isInvalidDecl()) return StmtError(); return new (Context) ObjCAtCatchStmt(AtLoc, RParen, Var, Body); } StmtResult Sema::ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body) { return new (Context) ObjCAtFinallyStmt(AtLoc, Body); } StmtResult Sema::ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg CatchStmts, Stmt *Finally) { if (!getLangOpts().ObjCExceptions) Diag(AtLoc, diag::err_objc_exceptions_disabled) << "@try"; setFunctionHasBranchProtectedScope(); unsigned NumCatchStmts = CatchStmts.size(); return ObjCAtTryStmt::Create(Context, AtLoc, Try, CatchStmts.data(), NumCatchStmts, Finally); } StmtResult Sema::BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw) { if (Throw) { ExprResult Result = DefaultLvalueConversion(Throw); if (Result.isInvalid()) return StmtError(); Result = ActOnFinishFullExpr(Result.get(), /*DiscardedValue*/ false); if (Result.isInvalid()) return StmtError(); Throw = Result.get(); QualType ThrowType = Throw->getType(); // Make sure the expression type is an ObjC pointer or "void *". if (!ThrowType->isDependentType() && !ThrowType->isObjCObjectPointerType()) { const PointerType *PT = ThrowType->getAs<PointerType>(); if (!PT || !PT->getPointeeType()->isVoidType()) return StmtError(Diag(AtLoc, diag::err_objc_throw_expects_object) << Throw->getType() << Throw->getSourceRange()); } } return new (Context) ObjCAtThrowStmt(AtLoc, Throw); } StmtResult Sema::ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope) { if (!getLangOpts().ObjCExceptions) Diag(AtLoc, diag::err_objc_exceptions_disabled) << "@throw"; if (!Throw) { // @throw without an expression designates a rethrow (which must occur // in the context of an @catch clause). Scope *AtCatchParent = CurScope; while (AtCatchParent && !AtCatchParent->isAtCatchScope()) AtCatchParent = AtCatchParent->getParent(); if (!AtCatchParent) return StmtError(Diag(AtLoc, diag::err_rethrow_used_outside_catch)); } return BuildObjCAtThrowStmt(AtLoc, Throw); } ExprResult Sema::ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand) { ExprResult result = DefaultLvalueConversion(operand); if (result.isInvalid()) return ExprError(); operand = result.get(); // Make sure the expression type is an ObjC pointer or "void *". QualType type = operand->getType(); if (!type->isDependentType() && !type->isObjCObjectPointerType()) { const PointerType *pointerType = type->getAs<PointerType>(); if (!pointerType || !pointerType->getPointeeType()->isVoidType()) { if (getLangOpts().CPlusPlus) { if (RequireCompleteType(atLoc, type, diag::err_incomplete_receiver_type)) return Diag(atLoc, diag::err_objc_synchronized_expects_object) << type << operand->getSourceRange(); ExprResult result = PerformContextuallyConvertToObjCPointer(operand); if (result.isInvalid()) return ExprError(); if (!result.isUsable()) return Diag(atLoc, diag::err_objc_synchronized_expects_object) << type << operand->getSourceRange(); operand = result.get(); } else { return Diag(atLoc, diag::err_objc_synchronized_expects_object) << type << operand->getSourceRange(); } } } // The operand to @synchronized is a full-expression. return ActOnFinishFullExpr(operand, /*DiscardedValue*/ false); } StmtResult Sema::ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SyncExpr, Stmt *SyncBody) { // We can't jump into or indirect-jump out of a @synchronized block. setFunctionHasBranchProtectedScope(); return new (Context) ObjCAtSynchronizedStmt(AtLoc, SyncExpr, SyncBody); } /// ActOnCXXCatchBlock - Takes an exception declaration and a handler block /// and creates a proper catch handler from them. StmtResult Sema::ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock) { // There's nothing to test that ActOnExceptionDecl didn't already test. return new (Context) CXXCatchStmt(CatchLoc, cast_or_null<VarDecl>(ExDecl), HandlerBlock); } StmtResult Sema::ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body) { setFunctionHasBranchProtectedScope(); return new (Context) ObjCAutoreleasePoolStmt(AtLoc, Body); } namespace { class CatchHandlerType { QualType QT; unsigned IsPointer : 1; // This is a special constructor to be used only with DenseMapInfo's // getEmptyKey() and getTombstoneKey() functions. friend struct llvm::DenseMapInfo<CatchHandlerType>; enum Unique { ForDenseMap }; CatchHandlerType(QualType QT, Unique) : QT(QT), IsPointer(false) {} public: /// Used when creating a CatchHandlerType from a handler type; will determine /// whether the type is a pointer or reference and will strip off the top /// level pointer and cv-qualifiers. CatchHandlerType(QualType Q) : QT(Q), IsPointer(false) { if (QT->isPointerType()) IsPointer = true; if (IsPointer || QT->isReferenceType()) QT = QT->getPointeeType(); QT = QT.getUnqualifiedType(); } /// Used when creating a CatchHandlerType from a base class type; pretends the /// type passed in had the pointer qualifier, does not need to get an /// unqualified type. CatchHandlerType(QualType QT, bool IsPointer) : QT(QT), IsPointer(IsPointer) {} QualType underlying() const { return QT; } bool isPointer() const { return IsPointer; } friend bool operator==(const CatchHandlerType &LHS, const CatchHandlerType &RHS) { // If the pointer qualification does not match, we can return early. if (LHS.IsPointer != RHS.IsPointer) return false; // Otherwise, check the underlying type without cv-qualifiers. return LHS.QT == RHS.QT; } }; } // namespace namespace llvm { template <> struct DenseMapInfo<CatchHandlerType> { static CatchHandlerType getEmptyKey() { return CatchHandlerType(DenseMapInfo<QualType>::getEmptyKey(), CatchHandlerType::ForDenseMap); } static CatchHandlerType getTombstoneKey() { return CatchHandlerType(DenseMapInfo<QualType>::getTombstoneKey(), CatchHandlerType::ForDenseMap); } static unsigned getHashValue(const CatchHandlerType &Base) { return DenseMapInfo<QualType>::getHashValue(Base.underlying()); } static bool isEqual(const CatchHandlerType &LHS, const CatchHandlerType &RHS) { return LHS == RHS; } }; } namespace { class CatchTypePublicBases { ASTContext &Ctx; const llvm::DenseMap<CatchHandlerType, CXXCatchStmt *> &TypesToCheck; const bool CheckAgainstPointer; CXXCatchStmt *FoundHandler; CanQualType FoundHandlerType; public: CatchTypePublicBases( ASTContext &Ctx, const llvm::DenseMap<CatchHandlerType, CXXCatchStmt *> &T, bool C) : Ctx(Ctx), TypesToCheck(T), CheckAgainstPointer(C), FoundHandler(nullptr) {} CXXCatchStmt *getFoundHandler() const { return FoundHandler; } CanQualType getFoundHandlerType() const { return FoundHandlerType; } bool operator()(const CXXBaseSpecifier *S, CXXBasePath &) { if (S->getAccessSpecifier() == AccessSpecifier::AS_public) { CatchHandlerType Check(S->getType(), CheckAgainstPointer); const auto &M = TypesToCheck; auto I = M.find(Check); if (I != M.end()) { FoundHandler = I->second; FoundHandlerType = Ctx.getCanonicalType(S->getType()); return true; } } return false; } }; } /// ActOnCXXTryBlock - Takes a try compound-statement and a number of /// handlers and creates a try statement from them. StmtResult Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers) { // Don't report an error if 'try' is used in system headers. if (!getLangOpts().CXXExceptions && !getSourceManager().isInSystemHeader(TryLoc) && !getLangOpts().CUDA) { // Delay error emission for the OpenMP device code. targetDiag(TryLoc, diag::err_exceptions_disabled) << "try"; } // Exceptions aren't allowed in CUDA device code. if (getLangOpts().CUDA) CUDADiagIfDeviceCode(TryLoc, diag::err_cuda_device_exceptions) << "try" << CurrentCUDATarget(); if (getCurScope() && getCurScope()->isOpenMPSimdDirectiveScope()) Diag(TryLoc, diag::err_omp_simd_region_cannot_use_stmt) << "try"; sema::FunctionScopeInfo *FSI = getCurFunction(); // C++ try is incompatible with SEH __try. if (!getLangOpts().Borland && FSI->FirstSEHTryLoc.isValid()) { Diag(TryLoc, diag::err_mixing_cxx_try_seh_try); Diag(FSI->FirstSEHTryLoc, diag::note_conflicting_try_here) << "'__try'"; } const unsigned NumHandlers = Handlers.size(); assert(!Handlers.empty() && "The parser shouldn't call this if there are no handlers."); llvm::DenseMap<CatchHandlerType, CXXCatchStmt *> HandledTypes; for (unsigned i = 0; i < NumHandlers; ++i) { CXXCatchStmt *H = cast<CXXCatchStmt>(Handlers[i]); // Diagnose when the handler is a catch-all handler, but it isn't the last // handler for the try block. [except.handle]p5. Also, skip exception // declarations that are invalid, since we can't usefully report on them. if (!H->getExceptionDecl()) { if (i < NumHandlers - 1) return StmtError(Diag(H->getBeginLoc(), diag::err_early_catch_all)); continue; } else if (H->getExceptionDecl()->isInvalidDecl()) continue; // Walk the type hierarchy to diagnose when this type has already been // handled (duplication), or cannot be handled (derivation inversion). We // ignore top-level cv-qualifiers, per [except.handle]p3 CatchHandlerType HandlerCHT = (QualType)Context.getCanonicalType(H->getCaughtType()); // We can ignore whether the type is a reference or a pointer; we need the // underlying declaration type in order to get at the underlying record // decl, if there is one. QualType Underlying = HandlerCHT.underlying(); if (auto *RD = Underlying->getAsCXXRecordDecl()) { if (!RD->hasDefinition()) continue; // Check that none of the public, unambiguous base classes are in the // map ([except.handle]p1). Give the base classes the same pointer // qualification as the original type we are basing off of. This allows // comparison against the handler type using the same top-level pointer // as the original type. CXXBasePaths Paths; Paths.setOrigin(RD); CatchTypePublicBases CTPB(Context, HandledTypes, HandlerCHT.isPointer()); if (RD->lookupInBases(CTPB, Paths)) { const CXXCatchStmt *Problem = CTPB.getFoundHandler(); if (!Paths.isAmbiguous(CTPB.getFoundHandlerType())) { Diag(H->getExceptionDecl()->getTypeSpecStartLoc(), diag::warn_exception_caught_by_earlier_handler) << H->getCaughtType(); Diag(Problem->getExceptionDecl()->getTypeSpecStartLoc(), diag::note_previous_exception_handler) << Problem->getCaughtType(); } } } // Add the type the list of ones we have handled; diagnose if we've already // handled it. auto R = HandledTypes.insert(std::make_pair(H->getCaughtType(), H)); if (!R.second) { const CXXCatchStmt *Problem = R.first->second; Diag(H->getExceptionDecl()->getTypeSpecStartLoc(), diag::warn_exception_caught_by_earlier_handler) << H->getCaughtType(); Diag(Problem->getExceptionDecl()->getTypeSpecStartLoc(), diag::note_previous_exception_handler) << Problem->getCaughtType(); } } FSI->setHasCXXTry(TryLoc); return CXXTryStmt::Create(Context, TryLoc, TryBlock, Handlers); } StmtResult Sema::ActOnSEHTryBlock(bool IsCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler) { assert(TryBlock && Handler); sema::FunctionScopeInfo *FSI = getCurFunction(); // SEH __try is incompatible with C++ try. Borland appears to support this, // however. if (!getLangOpts().Borland) { if (FSI->FirstCXXTryLoc.isValid()) { Diag(TryLoc, diag::err_mixing_cxx_try_seh_try); Diag(FSI->FirstCXXTryLoc, diag::note_conflicting_try_here) << "'try'"; } } FSI->setHasSEHTry(TryLoc); // Reject __try in Obj-C methods, blocks, and captured decls, since we don't // track if they use SEH. DeclContext *DC = CurContext; while (DC && !DC->isFunctionOrMethod()) DC = DC->getParent(); FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(DC); if (FD) FD->setUsesSEHTry(true); else Diag(TryLoc, diag::err_seh_try_outside_functions); // Reject __try on unsupported targets. if (!Context.getTargetInfo().isSEHTrySupported()) Diag(TryLoc, diag::err_seh_try_unsupported); return SEHTryStmt::Create(Context, IsCXXTry, TryLoc, TryBlock, Handler); } StmtResult Sema::ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block) { assert(FilterExpr && Block); QualType FTy = FilterExpr->getType(); if (!FTy->isIntegerType() && !FTy->isDependentType()) { return StmtError( Diag(FilterExpr->getExprLoc(), diag::err_filter_expression_integral) << FTy); } return SEHExceptStmt::Create(Context, Loc, FilterExpr, Block); } void Sema::ActOnStartSEHFinallyBlock() { CurrentSEHFinally.push_back(CurScope); } void Sema::ActOnAbortSEHFinallyBlock() { CurrentSEHFinally.pop_back(); } StmtResult Sema::ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block) { assert(Block); CurrentSEHFinally.pop_back(); return SEHFinallyStmt::Create(Context, Loc, Block); } StmtResult Sema::ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope) { Scope *SEHTryParent = CurScope; while (SEHTryParent && !SEHTryParent->isSEHTryScope()) SEHTryParent = SEHTryParent->getParent(); if (!SEHTryParent) return StmtError(Diag(Loc, diag::err_ms___leave_not_in___try)); CheckJumpOutOfSEHFinally(*this, Loc, *SEHTryParent); return new (Context) SEHLeaveStmt(Loc); } StmtResult Sema::BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested) { return new (Context) MSDependentExistsStmt(KeywordLoc, IsIfExists, QualifierLoc, NameInfo, cast<CompoundStmt>(Nested)); } StmtResult Sema::ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested) { return BuildMSDependentExistsStmt(KeywordLoc, IsIfExists, SS.getWithLocInContext(Context), GetNameFromUnqualifiedId(Name), Nested); } RecordDecl* Sema::CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams) { DeclContext *DC = CurContext; while (!(DC->isFunctionOrMethod() || DC->isRecord() || DC->isFileContext())) DC = DC->getParent(); RecordDecl *RD = nullptr; if (getLangOpts().CPlusPlus) RD = CXXRecordDecl::Create(Context, TTK_Struct, DC, Loc, Loc, /*Id=*/nullptr); else RD = RecordDecl::Create(Context, TTK_Struct, DC, Loc, Loc, /*Id=*/nullptr); RD->setCapturedRecord(); DC->addDecl(RD); RD->setImplicit(); RD->startDefinition(); assert(NumParams > 0 && "CapturedStmt requires context parameter"); CD = CapturedDecl::Create(Context, CurContext, NumParams); DC->addDecl(CD); return RD; } static bool buildCapturedStmtCaptureList(Sema &S, CapturedRegionScopeInfo *RSI, SmallVectorImpl<CapturedStmt::Capture> &Captures, SmallVectorImpl<Expr *> &CaptureInits) { for (const sema::Capture &Cap : RSI->Captures) { if (Cap.isInvalid()) continue; // Form the initializer for the capture. ExprResult Init = S.BuildCaptureInit(Cap, Cap.getLocation(), RSI->CapRegionKind == CR_OpenMP); // FIXME: Bail out now if the capture is not used and the initializer has // no side-effects. // Create a field for this capture. FieldDecl *Field = S.BuildCaptureField(RSI->TheRecordDecl, Cap); // Add the capture to our list of captures. if (Cap.isThisCapture()) { Captures.push_back(CapturedStmt::Capture(Cap.getLocation(), CapturedStmt::VCK_This)); } else if (Cap.isVLATypeCapture()) { Captures.push_back( CapturedStmt::Capture(Cap.getLocation(), CapturedStmt::VCK_VLAType)); } else { assert(Cap.isVariableCapture() && "unknown kind of capture"); if (S.getLangOpts().OpenMP && RSI->CapRegionKind == CR_OpenMP) S.setOpenMPCaptureKind(Field, Cap.getVariable(), RSI->OpenMPLevel); Captures.push_back(CapturedStmt::Capture(Cap.getLocation(), Cap.isReferenceCapture() ? CapturedStmt::VCK_ByRef : CapturedStmt::VCK_ByCopy, Cap.getVariable())); } CaptureInits.push_back(Init.get()); } return false; } void Sema::ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams) { CapturedDecl *CD = nullptr; RecordDecl *RD = CreateCapturedStmtRecordDecl(CD, Loc, NumParams); // Build the context parameter DeclContext *DC = CapturedDecl::castToDeclContext(CD); IdentifierInfo *ParamName = &Context.Idents.get("__context"); QualType ParamType = Context.getPointerType(Context.getTagDeclType(RD)); auto *Param = ImplicitParamDecl::Create(Context, DC, Loc, ParamName, ParamType, ImplicitParamDecl::CapturedContext); DC->addDecl(Param); CD->setContextParam(0, Param); // Enter the capturing scope for this captured region. PushCapturedRegionScope(CurScope, CD, RD, Kind); if (CurScope) PushDeclContext(CurScope, CD); else CurContext = CD; PushExpressionEvaluationContext( ExpressionEvaluationContext::PotentiallyEvaluated); } void Sema::ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel) { CapturedDecl *CD = nullptr; RecordDecl *RD = CreateCapturedStmtRecordDecl(CD, Loc, Params.size()); // Build the context parameter DeclContext *DC = CapturedDecl::castToDeclContext(CD); bool ContextIsFound = false; unsigned ParamNum = 0; for (ArrayRef<CapturedParamNameType>::iterator I = Params.begin(), E = Params.end(); I != E; ++I, ++ParamNum) { if (I->second.isNull()) { assert(!ContextIsFound && "null type has been found already for '__context' parameter"); IdentifierInfo *ParamName = &Context.Idents.get("__context"); QualType ParamType = Context.getPointerType(Context.getTagDeclType(RD)) .withConst() .withRestrict(); auto *Param = ImplicitParamDecl::Create(Context, DC, Loc, ParamName, ParamType, ImplicitParamDecl::CapturedContext); DC->addDecl(Param); CD->setContextParam(ParamNum, Param); ContextIsFound = true; } else { IdentifierInfo *ParamName = &Context.Idents.get(I->first); auto *Param = ImplicitParamDecl::Create(Context, DC, Loc, ParamName, I->second, ImplicitParamDecl::CapturedContext); DC->addDecl(Param); CD->setParam(ParamNum, Param); } } assert(ContextIsFound && "no null type for '__context' parameter"); if (!ContextIsFound) { // Add __context implicitly if it is not specified. IdentifierInfo *ParamName = &Context.Idents.get("__context"); QualType ParamType = Context.getPointerType(Context.getTagDeclType(RD)); auto *Param = ImplicitParamDecl::Create(Context, DC, Loc, ParamName, ParamType, ImplicitParamDecl::CapturedContext); DC->addDecl(Param); CD->setContextParam(ParamNum, Param); } // Enter the capturing scope for this captured region. PushCapturedRegionScope(CurScope, CD, RD, Kind, OpenMPCaptureLevel); if (CurScope) PushDeclContext(CurScope, CD); else CurContext = CD; PushExpressionEvaluationContext( ExpressionEvaluationContext::PotentiallyEvaluated); } void Sema::ActOnCapturedRegionError() { DiscardCleanupsInEvaluationContext(); PopExpressionEvaluationContext(); PopDeclContext(); PoppedFunctionScopePtr ScopeRAII = PopFunctionScopeInfo(); CapturedRegionScopeInfo *RSI = cast<CapturedRegionScopeInfo>(ScopeRAII.get()); RecordDecl *Record = RSI->TheRecordDecl; Record->setInvalidDecl(); SmallVector<Decl*, 4> Fields(Record->fields()); ActOnFields(/*Scope=*/nullptr, Record->getLocation(), Record, Fields, SourceLocation(), SourceLocation(), ParsedAttributesView()); } StmtResult Sema::ActOnCapturedRegionEnd(Stmt *S) { // Leave the captured scope before we start creating captures in the // enclosing scope. DiscardCleanupsInEvaluationContext(); PopExpressionEvaluationContext(); PopDeclContext(); PoppedFunctionScopePtr ScopeRAII = PopFunctionScopeInfo(); CapturedRegionScopeInfo *RSI = cast<CapturedRegionScopeInfo>(ScopeRAII.get()); SmallVector<CapturedStmt::Capture, 4> Captures; SmallVector<Expr *, 4> CaptureInits; if (buildCapturedStmtCaptureList(*this, RSI, Captures, CaptureInits)) return StmtError(); CapturedDecl *CD = RSI->TheCapturedDecl; RecordDecl *RD = RSI->TheRecordDecl; CapturedStmt *Res = CapturedStmt::Create( getASTContext(), S, static_cast<CapturedRegionKind>(RSI->CapRegionKind), Captures, CaptureInits, CD, RD); CD->setBody(Res->getCapturedStmt()); RD->completeDefinition(); return Res; }
#include <iostream> using std::cout; using std::endl; class Sample { int i; public: Sample(); Sample(int val); void Display(); ~Sample(); }; Sample::Sample() { cout << "Constructor1" << endl; i=0; } Sample::Sample(int val) { cout << "Constructor2" << endl; i=val; } void Sample::Display() { cout << "i=" << i << endl; } Sample::~Sample() { cout << "Destructor" << endl; } int main() { Sample a, b(10); a.Display(); b.Display(); return 0; }
#pragma once #include <cstdlib> #include <iterator> #include <tuple> #include "L1_Peripheral/i2c.hpp" #include "L3_Application/commandline.hpp" #include "third_party/etl/vector.h" #include "utility/log.hpp" namespace sjsu { /// The I2cCommand [will] allows the user to discover, read, and write to /// devices on the I2C bus. class I2cCommand : public Command { public: enum Args { kName = 0, kOperation = 1, kDeviceAddress = 2, // kRegisterAddress && kWriteStartByte have the same argument position kRegisterAddress = 3, kWriteStartByte = 3, kLength = 4 }; struct AddressString_t { // String large enough to hold the string 0x00 char str[sizeof("0x00")]; }; struct Arguments_t { char * operation = nullptr; uint8_t device_address = 0x00; uint8_t register_address = 0x00; uint8_t length = 0; bool invalid = false; }; static constexpr char kDescription[] = R"(Read and write to the i2c bus. i2c write <device address> <register address> <data0> ... i2c read <device address> <register address> <length> i2c discover )"; explicit I2cCommand(const I2c & i2c) : Command("i2c", kDescription), i2c_(i2c) { } void Initialize() { i2c_.Initialize(); } std::tuple<uint8_t, bool> ParseByte(const char * const kArgument, uint8_t radix) { char * string_end; uint8_t byte = static_cast<uint8_t>(std::strtol(kArgument, &string_end, radix)); return std::make_tuple(byte, (string_end != kArgument)); } char * GetI2cOperation(const char * const kOperationArgument) { for (const char * operation : kI2cOperations) { if (std::strstr(operation, kOperationArgument) == operation) { return const_cast<char *>(operation); } } return nullptr; } Arguments_t ParseArguments(int, const char * const argv[]) { Arguments_t args; args.operation = GetI2cOperation(argv[Args::kOperation]); if (args.operation == nullptr) { LOG_ERROR("Invalid operation %s", argv[Args::kOperation]); args.invalid = true; } auto [device_address, s0] = ParseByte(argv[Args::kDeviceAddress], 16); if (!s0) { LOG_ERROR( "Invalid device address %u, must be a hex number of format 0xAA", device_address); args.invalid = true; } auto [register_address, s1] = ParseByte(argv[Args::kRegisterAddress], 16); if (!s1) { LOG_ERROR( "Invalid register address %u, must be a hex number of format 0xAA", register_address); args.invalid = true; } auto byte_status_tuple = ParseByte(argv[Args::kLength], 10); args.device_address = device_address; args.register_address = register_address; args.length = std::get<0>(byte_status_tuple); return args; } void I2cDiscover() { constexpr uint8_t kFirstI2cAddress = 0x08; constexpr uint8_t kLastI2cAddress = 0x78; devices_found_.clear(); for (uint8_t address = kFirstI2cAddress; address < kLastI2cAddress; address++) { if (Status::kSuccess == i2c_.Write(address, nullptr, 0, 50)) { AddressString_t address_string; snprintf(address_string.str, sizeof(address_string.str), "0x%02X", address); devices_found_.push_back(address_string); } } } int PerformReadOperation(int argc, const char * const argv[]) { if (argc - 1 < kLength) { LOG_ERROR( "Invalid number of arguments for read operation, required %d, " "supplied %d", kRegisterAddress, argc); return 1; } Arguments_t args = ParseArguments(argc, argv); uint8_t contents[128]; if (args.length < sizeof(contents)) { i2c_.WriteThenRead(args.device_address, &args.register_address, 1, contents, args.length); debug::Hexdump(contents, args.length); } else { LOG_ERROR("Length cannot be more then 128 bytes."); return 1; } return 0; } int PerformWriteOperation(int argc, const char * const argv[]) { if (argc - 1 < kRegisterAddress) { LOG_ERROR( "Invalid number of arguments for write opeation, required %d, " "supplied %d", kRegisterAddress, argc); return 1; } Arguments_t args = ParseArguments(argc, argv); uint8_t payload[64]; size_t position; for (position = 0; position < std::size(payload) && argv[Args::kWriteStartByte + position] != nullptr; position++) { payload[position] = std::get<0>(ParseByte(argv[Args::kWriteStartByte + position], 16)); } // subtract 1 since position will overshoot by 1 i2c_.Write(args.device_address, payload, position - 1); debug::Hexdump(payload, position - 1); return 0; } int PerformDiscoveryOperation() { I2cDiscover(); for (size_t i = 0; i < devices_found_.size(); i++) { if (i % 16 == 0) { printf("\n"); } printf("%s ", devices_found_[i].str); } printf("\n"); return 0; } int AutoComplete(int argc, const char * const argv[], const char * completion[], const size_t) override final { size_t position = 0; completion[0] = nullptr; switch (argc - 1) { // If nothing has been typed for the function, provide the i2c functions case Args::kOperation: for (const char * operation : kI2cOperations) { const char * const kArgument = argv[Args::kOperation]; if (std::strstr(operation, kArgument) == operation) { completion[position++] = const_cast<char *>(operation); } } break; case Args::kDeviceAddress: // Do not perform a tab complete when the operation is "discover" if (strcmp(argv[Args::kOperation], kI2cOperations[2]) == 0) { break; } I2cDiscover(); for (auto & address : devices_found_) { const char * const kArgument = argv[Args::kDeviceAddress]; if (std::strstr(address.str, kArgument) == address.str) { completion[position++] = address.str; } } } return position; } int Program(int argc, const char * const argv[]) override final { if (argc - 1 < kOperation) { LOG_ERROR("Invalid number of arguments, required %d, supplied %d", kOperation, argc); return 1; } const char * operation = GetI2cOperation(argv[Args::kOperation]); if (operation == kI2cOperations[0]) // read { return PerformReadOperation(argc, argv); } else if (operation == kI2cOperations[1]) // write { return PerformWriteOperation(argc, argv); } else if (operation == kI2cOperations[2]) // discover { return PerformDiscoveryOperation(); } return 0; } private: static inline const char * const kI2cOperations[] = { "read", "write", "discover", nullptr }; etl::vector<AddressString_t, command::kAutoCompleteOptions> devices_found_; const I2c & i2c_; }; } // namespace sjsu
/********** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.) This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA **********/ // "liveMedia" // Copyright (c) 1996-2017 Live Networks, Inc. All rights reserved. // A filter that breaks up a H.264 or H.265 Video Elementary Stream into NAL units. // C++ header #ifndef _H264_OR_5_VIDEO_STREAM_FRAMER_HH #define _H264_OR_5_VIDEO_STREAM_FRAMER_HH #ifndef _MPEG_VIDEO_STREAM_FRAMER_HH #include "MPEGVideoStreamFramer.hh" #endif class H264or5VideoStreamFramer: public MPEGVideoStreamFramer { public: void getVPSandSPSandPPS(u_int8_t*& vps, unsigned& vpsSize, u_int8_t*& sps, unsigned& spsSize, u_int8_t*& pps, unsigned& ppsSize) const { // Returns pointers to copies of the most recently seen VPS (video parameter set) // SPS (sequence parameter set) and PPS (picture parameter set) NAL units. // (NULL pointers are returned if the NAL units have not yet been seen.) vps = fLastSeenVPS; vpsSize = fLastSeenVPSSize; sps = fLastSeenSPS; spsSize = fLastSeenSPSSize; pps = fLastSeenPPS; ppsSize = fLastSeenPPSSize; } void setVPSandSPSandPPS(u_int8_t* vps, unsigned vpsSize, u_int8_t* sps, unsigned spsSize, u_int8_t* pps, unsigned ppsSize) { // Assigns copies of the VPS, SPS and PPS NAL units. If this function is not called, // then these NAL units are assigned only if/when they appear in the input stream. saveCopyOfVPS(vps, vpsSize); saveCopyOfSPS(sps, spsSize); saveCopyOfPPS(pps, ppsSize); } protected: H264or5VideoStreamFramer(int hNumber, // 264 or 265 UsageEnvironment& env, FramedSource* inputSource, Boolean createParser, Boolean includeStartCodeInOutput); // We're an abstract base class. virtual ~H264or5VideoStreamFramer(); void saveCopyOfVPS(u_int8_t* from, unsigned size); void saveCopyOfSPS(u_int8_t* from, unsigned size); void saveCopyOfPPS(u_int8_t* from, unsigned size); void setPresentationTime() { fPresentationTime = fNextPresentationTime; } Boolean isVPS(u_int8_t nal_unit_type); Boolean isSPS(u_int8_t nal_unit_type); Boolean isPPS(u_int8_t nal_unit_type); Boolean isVCL(u_int8_t nal_unit_type); protected: int fHNumber; u_int8_t* fLastSeenVPS; unsigned fLastSeenVPSSize; u_int8_t* fLastSeenSPS; unsigned fLastSeenSPSSize; u_int8_t* fLastSeenPPS; unsigned fLastSeenPPSSize; struct timeval fNextPresentationTime; // the presentation time to be used for the next NAL unit to be parsed/delivered after this friend class H264or5VideoStreamParser; // hack }; // A general routine for making a copy of a (H.264 or H.265) NAL unit, // removing 'emulation' bytes from the copy: unsigned removeH264or5EmulationBytes(u_int8_t* to, unsigned toMaxSize, u_int8_t const* from, unsigned fromSize); // returns the size of the copy; it will be <= min(toMaxSize,fromSize) #endif
// Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2014 The Bitcoin developers // Copyright (c) 2016-2018 The KONG Developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "txmempool.h" #include "clientversion.h" #include "main.h" #include "streams.h" #include "util.h" #include "utilmoneystr.h" #include "version.h" #include <boost/circular_buffer.hpp> using namespace std; CTxMemPoolEntry::CTxMemPoolEntry() : nFee(0), nTxSize(0), nModSize(0), nTime(0), dPriority(0.0) { nHeight = MEMPOOL_HEIGHT; } CTxMemPoolEntry::CTxMemPoolEntry(const CTransaction& _tx, const CAmount& _nFee, int64_t _nTime, double _dPriority, unsigned int _nHeight) : tx(_tx), nFee(_nFee), nTime(_nTime), dPriority(_dPriority), nHeight(_nHeight) { nTxSize = ::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION); nModSize = tx.CalculateModifiedSize(nTxSize); } CTxMemPoolEntry::CTxMemPoolEntry(const CTxMemPoolEntry& other) { *this = other; } double CTxMemPoolEntry::GetPriority(unsigned int currentHeight) const { CAmount nValueIn = tx.GetValueOut() + nFee; double deltaPriority = ((double)(currentHeight - nHeight) * nValueIn) / nModSize; double dResult = dPriority + deltaPriority; return dResult; } /** * Keep track of fee/priority for transactions confirmed within N blocks */ class CBlockAverage { private: boost::circular_buffer<CFeeRate> feeSamples; boost::circular_buffer<double> prioritySamples; template <typename T> std::vector<T> buf2vec(boost::circular_buffer<T> buf) const { std::vector<T> vec(buf.begin(), buf.end()); return vec; } public: CBlockAverage() : feeSamples(100), prioritySamples(100) {} void RecordFee(const CFeeRate& feeRate) { feeSamples.push_back(feeRate); } void RecordPriority(double priority) { prioritySamples.push_back(priority); } size_t FeeSamples() const { return feeSamples.size(); } size_t GetFeeSamples(std::vector<CFeeRate>& insertInto) const { BOOST_FOREACH (const CFeeRate& f, feeSamples) insertInto.push_back(f); return feeSamples.size(); } size_t PrioritySamples() const { return prioritySamples.size(); } size_t GetPrioritySamples(std::vector<double>& insertInto) const { BOOST_FOREACH (double d, prioritySamples) insertInto.push_back(d); return prioritySamples.size(); } /** * Used as belt-and-suspenders check when reading to detect * file corruption */ static bool AreSane(const CFeeRate fee, const CFeeRate& minRelayFee) { if (fee < CFeeRate(0)) return false; if (fee.GetFeePerK() > minRelayFee.GetFeePerK() * 10000) return false; return true; } static bool AreSane(const std::vector<CFeeRate>& vecFee, const CFeeRate& minRelayFee) { BOOST_FOREACH (CFeeRate fee, vecFee) { if (!AreSane(fee, minRelayFee)) return false; } return true; } static bool AreSane(const double priority) { return priority >= 0; } static bool AreSane(const std::vector<double> vecPriority) { BOOST_FOREACH (double priority, vecPriority) { if (!AreSane(priority)) return false; } return true; } void Write(CAutoFile& fileout) const { std::vector<CFeeRate> vecFee = buf2vec(feeSamples); fileout << vecFee; std::vector<double> vecPriority = buf2vec(prioritySamples); fileout << vecPriority; } void Read(CAutoFile& filein, const CFeeRate& minRelayFee) { std::vector<CFeeRate> vecFee; filein >> vecFee; if (AreSane(vecFee, minRelayFee)) feeSamples.insert(feeSamples.end(), vecFee.begin(), vecFee.end()); else throw runtime_error("Corrupt fee value in estimates file."); std::vector<double> vecPriority; filein >> vecPriority; if (AreSane(vecPriority)) prioritySamples.insert(prioritySamples.end(), vecPriority.begin(), vecPriority.end()); else throw runtime_error("Corrupt priority value in estimates file."); if (feeSamples.size() + prioritySamples.size() > 0) LogPrint("estimatefee", "Read %d fee samples and %d priority samples\n", feeSamples.size(), prioritySamples.size()); } }; class CMinerPolicyEstimator { private: /** * Records observed averages transactions that confirmed within one block, two blocks, * three blocks etc. */ std::vector<CBlockAverage> history; std::vector<CFeeRate> sortedFeeSamples; std::vector<double> sortedPrioritySamples; int nBestSeenHeight; /** * nBlocksAgo is 0 based, i.e. transactions that confirmed in the highest seen block are * nBlocksAgo == 0, transactions in the block before that are nBlocksAgo == 1 etc. */ void seenTxConfirm(const CFeeRate& feeRate, const CFeeRate& minRelayFee, double dPriority, int nBlocksAgo) { // Last entry records "everything else". int nBlocksTruncated = min(nBlocksAgo, (int)history.size() - 1); assert(nBlocksTruncated >= 0); // We need to guess why the transaction was included in a block-- either // because it is high-priority or because it has sufficient fees. bool sufficientFee = (feeRate > minRelayFee); bool sufficientPriority = AllowFree(dPriority); const char* assignedTo = "unassigned"; if (sufficientFee && !sufficientPriority && CBlockAverage::AreSane(feeRate, minRelayFee)) { history[nBlocksTruncated].RecordFee(feeRate); assignedTo = "fee"; } else if (sufficientPriority && !sufficientFee && CBlockAverage::AreSane(dPriority)) { history[nBlocksTruncated].RecordPriority(dPriority); assignedTo = "priority"; } else { // Neither or both fee and priority sufficient to get confirmed: // don't know why they got confirmed. } LogPrint("estimatefee", "Seen TX confirm: %s : %s fee/%g priority, took %d blocks\n", assignedTo, feeRate.ToString(), dPriority, nBlocksAgo); } public: CMinerPolicyEstimator(int nEntries) : nBestSeenHeight(0) { history.resize(nEntries); } void seenBlock(const std::vector<CTxMemPoolEntry>& entries, int nBlockHeight, const CFeeRate minRelayFee) { if (nBlockHeight <= nBestSeenHeight) { // Ignore side chains and re-orgs; assuming they are random // they don't affect the estimate. // And if an attacker can re-org the chain at will, then // you've got much bigger problems than "attacker can influence // transaction fees." return; } nBestSeenHeight = nBlockHeight; // Fill up the history buckets based on how long transactions took // to confirm. std::vector<std::vector<const CTxMemPoolEntry*> > entriesByConfirmations; entriesByConfirmations.resize(history.size()); BOOST_FOREACH (const CTxMemPoolEntry& entry, entries) { // How many blocks did it take for miners to include this transaction? int delta = nBlockHeight - entry.GetHeight(); if (delta <= 0) { // Re-org made us lose height, this should only happen if we happen // to re-org on a difficulty transition point: very rare! continue; } if ((delta - 1) >= (int)history.size()) delta = history.size(); // Last bucket is catch-all entriesByConfirmations.at(delta - 1).push_back(&entry); } for (size_t i = 0; i < entriesByConfirmations.size(); i++) { std::vector<const CTxMemPoolEntry*>& e = entriesByConfirmations.at(i); // Insert at most 10 random entries per bucket, otherwise a single block // can dominate an estimate: if (e.size() > 10) { std::random_shuffle(e.begin(), e.end()); e.resize(10); } BOOST_FOREACH (const CTxMemPoolEntry* entry, e) { // Fees are stored and reported as BTC-per-kb: CFeeRate feeRate(entry->GetFee(), entry->GetTxSize()); double dPriority = entry->GetPriority(entry->GetHeight()); // Want priority when it went IN seenTxConfirm(feeRate, minRelayFee, dPriority, i); } } //After new samples are added, we have to clear the sorted lists, //so they'll be resorted the next time someone asks for an estimate sortedFeeSamples.clear(); sortedPrioritySamples.clear(); for (size_t i = 0; i < history.size(); i++) { if (history[i].FeeSamples() + history[i].PrioritySamples() > 0) LogPrint("estimatefee", "estimates: for confirming within %d blocks based on %d/%d samples, fee=%s, prio=%g\n", i, history[i].FeeSamples(), history[i].PrioritySamples(), estimateFee(i + 1).ToString(), estimatePriority(i + 1)); } } /** * Can return CFeeRate(0) if we don't have any data for that many blocks back. nBlocksToConfirm is 1 based. */ CFeeRate estimateFee(int nBlocksToConfirm) { nBlocksToConfirm--; if (nBlocksToConfirm < 0 || nBlocksToConfirm >= (int)history.size()) return CFeeRate(0); if (sortedFeeSamples.size() == 0) { for (size_t i = 0; i < history.size(); i++) history.at(i).GetFeeSamples(sortedFeeSamples); std::sort(sortedFeeSamples.begin(), sortedFeeSamples.end(), std::greater<CFeeRate>()); } if (sortedFeeSamples.size() < 11) { // Eleven is Gavin's Favorite Number // ... but we also take a maximum of 10 samples per block so eleven means // we're getting samples from at least two different blocks return CFeeRate(0); } int nBucketSize = history.at(nBlocksToConfirm).FeeSamples(); // Estimates should not increase as number of confirmations goes up, // but the estimates are noisy because confirmations happen discretely // in blocks. To smooth out the estimates, use all samples in the history // and use the nth highest where n is (number of samples in previous bucket + // half the samples in nBlocksToConfirm bucket): size_t nPrevSize = 0; for (int i = 0; i < nBlocksToConfirm; i++) nPrevSize += history.at(i).FeeSamples(); size_t index = min(nPrevSize + nBucketSize / 2, sortedFeeSamples.size() - 1); return sortedFeeSamples[index]; } double estimatePriority(int nBlocksToConfirm) { nBlocksToConfirm--; if (nBlocksToConfirm < 0 || nBlocksToConfirm >= (int)history.size()) return -1; if (sortedPrioritySamples.size() == 0) { for (size_t i = 0; i < history.size(); i++) history.at(i).GetPrioritySamples(sortedPrioritySamples); std::sort(sortedPrioritySamples.begin(), sortedPrioritySamples.end(), std::greater<double>()); } if (sortedPrioritySamples.size() < 11) return -1.0; int nBucketSize = history.at(nBlocksToConfirm).PrioritySamples(); // Estimates should not increase as number of confirmations needed goes up, // but the estimates are noisy because confirmations happen discretely // in blocks. To smooth out the estimates, use all samples in the history // and use the nth highest where n is (number of samples in previous buckets + // half the samples in nBlocksToConfirm bucket). size_t nPrevSize = 0; for (int i = 0; i < nBlocksToConfirm; i++) nPrevSize += history.at(i).PrioritySamples(); size_t index = min(nPrevSize + nBucketSize / 2, sortedPrioritySamples.size() - 1); return sortedPrioritySamples[index]; } void Write(CAutoFile& fileout) const { fileout << nBestSeenHeight; fileout << history.size(); BOOST_FOREACH (const CBlockAverage& entry, history) { entry.Write(fileout); } } void Read(CAutoFile& filein, const CFeeRate& minRelayFee) { int nFileBestSeenHeight; filein >> nFileBestSeenHeight; size_t numEntries; filein >> numEntries; if (numEntries <= 0 || numEntries > 10000) throw runtime_error("Corrupt estimates file. Must have between 1 and 10k entries."); std::vector<CBlockAverage> fileHistory; for (size_t i = 0; i < numEntries; i++) { CBlockAverage entry; entry.Read(filein, minRelayFee); fileHistory.push_back(entry); } // Now that we've processed the entire fee estimate data file and not // thrown any errors, we can copy it to our history nBestSeenHeight = nFileBestSeenHeight; history = fileHistory; assert(history.size() > 0); } }; CTxMemPool::CTxMemPool(const CFeeRate& _minRelayFee) : nTransactionsUpdated(0), minRelayFee(_minRelayFee) { // Sanity checks off by default for performance, because otherwise // accepting transactions becomes O(N^2) where N is the number // of transactions in the pool fSanityCheck = false; // 25 blocks is a compromise between using a lot of disk/memory and // trying to give accurate estimates to people who might be willing // to wait a day or two to save a fraction of a penny in fees. // Confirmation times for very-low-fee transactions that take more // than an hour or three to confirm are highly variable. minerPolicyEstimator = new CMinerPolicyEstimator(25); } CTxMemPool::~CTxMemPool() { delete minerPolicyEstimator; } void CTxMemPool::pruneSpent(const uint256& hashTx, CCoins& coins) { LOCK(cs); std::map<COutPoint, CInPoint>::iterator it = mapNextTx.lower_bound(COutPoint(hashTx, 0)); // iterate over all COutPoints in mapNextTx whose hash equals the provided hashTx while (it != mapNextTx.end() && it->first.hash == hashTx) { coins.Spend(it->first.n); // and remove those outputs from coins it++; } } unsigned int CTxMemPool::GetTransactionsUpdated() const { LOCK(cs); return nTransactionsUpdated; } void CTxMemPool::AddTransactionsUpdated(unsigned int n) { LOCK(cs); nTransactionsUpdated += n; } bool CTxMemPool::addUnchecked(const uint256& hash, const CTxMemPoolEntry& entry) { // Add to memory pool without checking anything. // Used by main.cpp AcceptToMemoryPool(), which DOES do // all the appropriate checks. LOCK(cs); { mapTx[hash] = entry; const CTransaction& tx = mapTx[hash].GetTx(); if(!tx.IsZerocoinSpend()) { for (unsigned int i = 0; i < tx.vin.size(); i++) mapNextTx[tx.vin[i].prevout] = CInPoint(&tx, i); } nTransactionsUpdated++; totalTxSize += entry.GetTxSize(); } return true; } void CTxMemPool::remove(const CTransaction& origTx, std::list<CTransaction>& removed, bool fRecursive) { // Remove transaction from memory pool { LOCK(cs); std::deque<uint256> txToRemove; txToRemove.push_back(origTx.GetHash()); if (fRecursive && !mapTx.count(origTx.GetHash())) { // If recursively removing but origTx isn't in the mempool // be sure to remove any children that are in the pool. This can // happen during chain re-orgs if origTx isn't re-accepted into // the mempool for any reason. for (unsigned int i = 0; i < origTx.vout.size(); i++) { std::map<COutPoint, CInPoint>::iterator it = mapNextTx.find(COutPoint(origTx.GetHash(), i)); if (it == mapNextTx.end()) continue; txToRemove.push_back(it->second.ptx->GetHash()); } } while (!txToRemove.empty()) { uint256 hash = txToRemove.front(); txToRemove.pop_front(); if (!mapTx.count(hash)) continue; const CTransaction& tx = mapTx[hash].GetTx(); if (fRecursive) { for (unsigned int i = 0; i < tx.vout.size(); i++) { std::map<COutPoint, CInPoint>::iterator it = mapNextTx.find(COutPoint(hash, i)); if (it == mapNextTx.end()) continue; txToRemove.push_back(it->second.ptx->GetHash()); } } BOOST_FOREACH (const CTxIn& txin, tx.vin) mapNextTx.erase(txin.prevout); removed.push_back(tx); totalTxSize -= mapTx[hash].GetTxSize(); mapTx.erase(hash); nTransactionsUpdated++; } } } void CTxMemPool::removeCoinbaseSpends(const CCoinsViewCache* pcoins, unsigned int nMemPoolHeight) { // Remove transactions spending a coinbase which are now immature LOCK(cs); list<CTransaction> transactionsToRemove; for (std::map<uint256, CTxMemPoolEntry>::const_iterator it = mapTx.begin(); it != mapTx.end(); it++) { const CTransaction& tx = it->second.GetTx(); BOOST_FOREACH (const CTxIn& txin, tx.vin) { std::map<uint256, CTxMemPoolEntry>::const_iterator it2 = mapTx.find(txin.prevout.hash); if (it2 != mapTx.end()) continue; const CCoins* coins = pcoins->AccessCoins(txin.prevout.hash); if (fSanityCheck) assert(coins); if (!coins || ((coins->IsCoinBase() || coins->IsCoinStake()) && nMemPoolHeight - coins->nHeight < (unsigned)Params().COINBASE_MATURITY())) { transactionsToRemove.push_back(tx); break; } } } BOOST_FOREACH (const CTransaction& tx, transactionsToRemove) { list<CTransaction> removed; remove(tx, removed, true); } } void CTxMemPool::removeConflicts(const CTransaction& tx, std::list<CTransaction>& removed) { // Remove transactions which depend on inputs of tx, recursively list<CTransaction> result; LOCK(cs); BOOST_FOREACH (const CTxIn& txin, tx.vin) { std::map<COutPoint, CInPoint>::iterator it = mapNextTx.find(txin.prevout); if (it != mapNextTx.end()) { const CTransaction& txConflict = *it->second.ptx; if (txConflict != tx) { remove(txConflict, removed, true); } } } } /** * Called when a block is connected. Removes from mempool and updates the miner fee estimator. */ void CTxMemPool::removeForBlock(const std::vector<CTransaction>& vtx, unsigned int nBlockHeight, std::list<CTransaction>& conflicts) { LOCK(cs); std::vector<CTxMemPoolEntry> entries; BOOST_FOREACH (const CTransaction& tx, vtx) { uint256 hash = tx.GetHash(); if (mapTx.count(hash)) entries.push_back(mapTx[hash]); } minerPolicyEstimator->seenBlock(entries, nBlockHeight, minRelayFee); BOOST_FOREACH (const CTransaction& tx, vtx) { std::list<CTransaction> dummy; remove(tx, dummy, false); removeConflicts(tx, conflicts); ClearPrioritisation(tx.GetHash()); } } void CTxMemPool::clear() { LOCK(cs); mapTx.clear(); mapNextTx.clear(); totalTxSize = 0; ++nTransactionsUpdated; } void CTxMemPool::check(const CCoinsViewCache* pcoins) const { if (!fSanityCheck) return; LogPrint("mempool", "Checking mempool with %u transactions and %u inputs\n", (unsigned int)mapTx.size(), (unsigned int)mapNextTx.size()); uint64_t checkTotal = 0; CCoinsViewCache mempoolDuplicate(const_cast<CCoinsViewCache*>(pcoins)); LOCK(cs); list<const CTxMemPoolEntry*> waitingOnDependants; for (std::map<uint256, CTxMemPoolEntry>::const_iterator it = mapTx.begin(); it != mapTx.end(); it++) { unsigned int i = 0; checkTotal += it->second.GetTxSize(); const CTransaction& tx = it->second.GetTx(); bool fDependsWait = false; BOOST_FOREACH (const CTxIn& txin, tx.vin) { // Check that every mempool transaction's inputs refer to available coins, or other mempool tx's. std::map<uint256, CTxMemPoolEntry>::const_iterator it2 = mapTx.find(txin.prevout.hash); if (it2 != mapTx.end()) { const CTransaction& tx2 = it2->second.GetTx(); assert(tx2.vout.size() > txin.prevout.n && !tx2.vout[txin.prevout.n].IsNull()); fDependsWait = true; } else { const CCoins* coins = pcoins->AccessCoins(txin.prevout.hash); assert(coins && coins->IsAvailable(txin.prevout.n)); } // Check whether its inputs are marked in mapNextTx. std::map<COutPoint, CInPoint>::const_iterator it3 = mapNextTx.find(txin.prevout); assert(it3 != mapNextTx.end()); assert(it3->second.ptx == &tx); assert(it3->second.n == i); i++; } if (fDependsWait) waitingOnDependants.push_back(&it->second); else { CValidationState state; CTxUndo undo; assert(CheckInputs(tx, state, mempoolDuplicate, false, 0, false, NULL)); UpdateCoins(tx, state, mempoolDuplicate, undo, 1000000); } } unsigned int stepsSinceLastRemove = 0; while (!waitingOnDependants.empty()) { const CTxMemPoolEntry* entry = waitingOnDependants.front(); waitingOnDependants.pop_front(); CValidationState state; if (!mempoolDuplicate.HaveInputs(entry->GetTx())) { waitingOnDependants.push_back(entry); stepsSinceLastRemove++; assert(stepsSinceLastRemove < waitingOnDependants.size()); } else { assert(CheckInputs(entry->GetTx(), state, mempoolDuplicate, false, 0, false, NULL)); CTxUndo undo; UpdateCoins(entry->GetTx(), state, mempoolDuplicate, undo, 1000000); stepsSinceLastRemove = 0; } } for (std::map<COutPoint, CInPoint>::const_iterator it = mapNextTx.begin(); it != mapNextTx.end(); it++) { uint256 hash = it->second.ptx->GetHash(); map<uint256, CTxMemPoolEntry>::const_iterator it2 = mapTx.find(hash); const CTransaction& tx = it2->second.GetTx(); assert(it2 != mapTx.end()); assert(&tx == it->second.ptx); assert(tx.vin.size() > it->second.n); assert(it->first == it->second.ptx->vin[it->second.n].prevout); } assert(totalTxSize == checkTotal); } void CTxMemPool::queryHashes(vector<uint256>& vtxid) { vtxid.clear(); LOCK(cs); vtxid.reserve(mapTx.size()); for (map<uint256, CTxMemPoolEntry>::iterator mi = mapTx.begin(); mi != mapTx.end(); ++mi) vtxid.push_back((*mi).first); } void CTxMemPool::getTransactions(std::set<uint256>& setTxid) { setTxid.clear(); LOCK(cs); for (map<uint256, CTxMemPoolEntry>::iterator mi = mapTx.begin(); mi != mapTx.end(); ++mi) setTxid.insert((*mi).first); } bool CTxMemPool::lookup(uint256 hash, CTransaction& result) const { LOCK(cs); map<uint256, CTxMemPoolEntry>::const_iterator i = mapTx.find(hash); if (i == mapTx.end()) return false; result = i->second.GetTx(); return true; } CFeeRate CTxMemPool::estimateFee(int nBlocks) const { LOCK(cs); return minerPolicyEstimator->estimateFee(nBlocks); } double CTxMemPool::estimatePriority(int nBlocks) const { LOCK(cs); return minerPolicyEstimator->estimatePriority(nBlocks); } bool CTxMemPool::WriteFeeEstimates(CAutoFile& fileout) const { try { LOCK(cs); fileout << 120000; // version required to read: 0.12.00 or later fileout << CLIENT_VERSION; // version that wrote the file minerPolicyEstimator->Write(fileout); } catch (const std::exception&) { LogPrintf("CTxMemPool::WriteFeeEstimates() : unable to write policy estimator data (non-fatal)"); return false; } return true; } bool CTxMemPool::ReadFeeEstimates(CAutoFile& filein) { try { int nVersionRequired, nVersionThatWrote; filein >> nVersionRequired >> nVersionThatWrote; if (nVersionRequired > CLIENT_VERSION) return error("CTxMemPool::ReadFeeEstimates() : up-version (%d) fee estimate file", nVersionRequired); LOCK(cs); minerPolicyEstimator->Read(filein, minRelayFee); } catch (const std::exception&) { LogPrintf("CTxMemPool::ReadFeeEstimates() : unable to read policy estimator data (non-fatal)"); return false; } return true; } void CTxMemPool::PrioritiseTransaction(const uint256 hash, const string strHash, double dPriorityDelta, const CAmount& nFeeDelta) { { LOCK(cs); std::pair<double, CAmount>& deltas = mapDeltas[hash]; deltas.first += dPriorityDelta; deltas.second += nFeeDelta; } LogPrintf("PrioritiseTransaction: %s priority += %f, fee += %d\n", strHash, dPriorityDelta, FormatMoney(nFeeDelta)); } void CTxMemPool::ApplyDeltas(const uint256 hash, double& dPriorityDelta, CAmount& nFeeDelta) { LOCK(cs); std::map<uint256, std::pair<double, CAmount> >::iterator pos = mapDeltas.find(hash); if (pos == mapDeltas.end()) return; const std::pair<double, CAmount>& deltas = pos->second; dPriorityDelta += deltas.first; nFeeDelta += deltas.second; } void CTxMemPool::ClearPrioritisation(const uint256 hash) { LOCK(cs); mapDeltas.erase(hash); } CCoinsViewMemPool::CCoinsViewMemPool(CCoinsView* baseIn, CTxMemPool& mempoolIn) : CCoinsViewBacked(baseIn), mempool(mempoolIn) {} bool CCoinsViewMemPool::GetCoins(const uint256& txid, CCoins& coins) const { // If an entry in the mempool exists, always return that one, as it's guaranteed to never // conflict with the underlying cache, and it cannot have pruned entries (as it contains full) // transactions. First checking the underlying cache risks returning a pruned entry instead. CTransaction tx; if (mempool.lookup(txid, tx)) { coins = CCoins(tx, MEMPOOL_HEIGHT); return true; } return (base->GetCoins(txid, coins) && !coins.IsPruned()); } bool CCoinsViewMemPool::HaveCoins(const uint256& txid) const { return mempool.exists(txid) || base->HaveCoins(txid); }
/** * Tencent is pleased to support the open source community by making Tars available. * * Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved. * * Licensed under the BSD 3-Clause License (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * https://opensource.org/licenses/BSD-3-Clause * * Unless required by applicable law or agreed to in writing, software distributed * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ #include "util/tc_bitmap.h" #include "util/tc_common.h" #include <cassert> #include <string.h> #include <iostream> namespace tars { const int TC_BitMap::BitMap::_magic_bits[8]={0x80,0x40,0x20,0x10,0x8,0x4,0x2,0x1}; size_t TC_BitMap::BitMap::calcMemSize(size_t iElementCount) { assert(iElementCount > 0); iElementCount--; size_t iMemSize = iElementCount/8+1; iMemSize += sizeof(tagBitMapHead); return iMemSize; } void TC_BitMap::BitMap::create(void *pAddr, size_t iSize) { memset((char*)pAddr, 0, iSize); _pHead = static_cast<tagBitMapHead*>(pAddr); _pHead->_cVersion = BM_VERSION; _pHead->_iMemSize = iSize; _pData = (unsigned char*)pAddr + sizeof(tagBitMapHead); } int TC_BitMap::BitMap::connect(void *pAddr, size_t iSize) { _pHead = static_cast<tagBitMapHead*>(pAddr); if(_pHead->_cVersion != BM_VERSION) { return -1; } if(iSize != _pHead->_iMemSize) { return -2; } _pData = (unsigned char*)pAddr + sizeof(tagBitMapHead); return 0; } int TC_BitMap::BitMap::get(size_t i) { if(i/8 >= (_pHead->_iMemSize-sizeof(tagBitMapHead))) { return -1; } unsigned char* p =_pData + i/8; return _get_bit(*p, i%8)>0?1:0; } int TC_BitMap::BitMap::set(size_t i) { if(i/8 >= (_pHead->_iMemSize-sizeof(tagBitMapHead))) { return -1; } unsigned char* p=(unsigned char*)_pData + i/8; *p = _set_bit(*p, i%8); return (int)(*p)>0?1:0; } int TC_BitMap::BitMap::clear(size_t i) { if(i/8 >= (_pHead->_iMemSize-sizeof(tagBitMapHead))) { return -1; } unsigned char* p = (unsigned char*)_pData + i/8; *p = _clear_bit(*p, i%8); return (int)(*p)>0?1:0; } int TC_BitMap::BitMap::clear4all() { memset(_pData, 0, _pHead->_iMemSize-sizeof(tagBitMapHead)); return 0; } int TC_BitMap::BitMap::dump2file(const string &sFile) { FILE *fp = fopen(sFile.c_str(), "wb"); if(fp == NULL) { return -1; } size_t ret = fwrite((void*)_pHead, 1, _pHead->_iMemSize, fp); fclose(fp); if(ret == _pHead->_iMemSize) { return 0; } return -1; } int TC_BitMap::BitMap::load5file(const string &sFile) { FILE *fp = fopen(sFile.c_str(), "rb"); if(fp == NULL) { return -1; } fseek(fp, 0L, SEEK_END); size_t fs = ftell(fp); if(fs != _pHead->_iMemSize) { fclose(fp); return -2; } fseek(fp, 0L, SEEK_SET); size_t iSize = 1024*1024*10; size_t iLen = 0; char *pBuffer = new char[iSize]; while(true) { int ret = fread(pBuffer, 1, iSize, fp); if(ret == 0) { break; } //检查版本 if(iLen == 0) { tagBitMapHead *tmp = (tagBitMapHead*)pBuffer; if(tmp->_cVersion != BM_VERSION) { fclose(fp); delete[] pBuffer; return -3; } if(tmp->_iMemSize != _pHead->_iMemSize) { fclose(fp); delete[] pBuffer; return -2; } } memcpy((char*)_pHead + iLen, pBuffer, ret); iLen += ret; } fclose(fp); delete[] pBuffer; if(iLen != _pHead->_iMemSize) { return -2; } return 0; } //////////////////////////////////////////////////////////////////////////// size_t TC_BitMap::calcMemSize(size_t iElementCount, unsigned iBitCount) { size_t n = BitMap::calcMemSize(iElementCount); if(n * iBitCount < n) { throw TC_BitMap_Exception("[TC_BitMap::calcMemSize] memory to much error"); } return n * iBitCount; } void TC_BitMap::create(void *pAddr, size_t iSize, unsigned iBitCount) { assert(iBitCount != 0); assert(iSize % iBitCount == 0); BitMap bitmap; size_t n = iSize/iBitCount; for(unsigned i = 0; i < iBitCount; i++) { bitmap.create((char*)pAddr + i*n, n); _bitmaps.push_back(bitmap); } } int TC_BitMap::connect(void *pAddr, size_t iSize, unsigned iBitCount) { assert(iBitCount != 0); assert(iSize % iBitCount == 0); BitMap bitmap; size_t n = iSize/iBitCount; for(unsigned i = 0; i < iBitCount; i++) { int ret = bitmap.connect((char*)pAddr + i*n, n); if(ret != 0) { return ret; } _bitmaps.push_back(bitmap); } return 0; } int TC_BitMap::get(size_t i, unsigned iBit) { assert(iBit != 0); if(iBit > _bitmaps.size()) { throw TC_BitMap_Exception("[TC_BitMap::get] bit beyond range:"+TC_Common::tostr(iBit)+">"+TC_Common::tostr(_bitmaps.size())); } return _bitmaps[iBit-1].get(i); } int TC_BitMap::set(size_t i, unsigned iBit) { assert(iBit != 0); if(iBit > _bitmaps.size()) { throw TC_BitMap_Exception("[TC_BitMap::get] bit beyond range:"+TC_Common::tostr(iBit)+">"+TC_Common::tostr(_bitmaps.size())); } return _bitmaps[iBit-1].set(i); } int TC_BitMap::clear(size_t i, unsigned iBit) { assert(iBit != 0); if(iBit > _bitmaps.size()) { throw TC_BitMap_Exception("[TC_BitMap::get] bit beyond range:"+TC_Common::tostr(iBit)+">"+TC_Common::tostr(_bitmaps.size())); } return _bitmaps[iBit-1].clear(i); } int TC_BitMap::clear4all(unsigned iBit) { assert(iBit != 0); if (iBit != (unsigned)(-1) && iBit > _bitmaps.size()) { throw TC_BitMap_Exception("[TC_BitMap::get] bit beyond range:"+TC_Common::tostr(iBit)+">"+TC_Common::tostr(_bitmaps.size())); } for (vector<BitMap>::size_type i = 0; i < _bitmaps.size(); i++) { if (iBit == (unsigned)(-1) || iBit == i + 1) { _bitmaps[i].clear4all(); } } return 0; } int TC_BitMap::dump2file(const string &sFile) { FILE *fp = fopen(sFile.c_str(), "wb"); if(fp == NULL) { return -1; } for(unsigned i = 0; i < _bitmaps.size(); i++) { size_t ret = fwrite((void*)_bitmaps[i].getAddr(), 1, _bitmaps[i].getMemSize(), fp); if(ret != _bitmaps[i].getMemSize()) { fclose(fp); return -1; } } fclose(fp); return 0; } int TC_BitMap::load5file(const string &sFile) { FILE *fp = fopen(sFile.c_str(), "rb"); if(fp == NULL) { return -1; } //总内存大小 size_t iAllSize = 0; for(unsigned i = 0; i < _bitmaps.size(); i++) { iAllSize += _bitmaps[i].getMemSize(); } fseek(fp, 0L, SEEK_END); size_t fs = ftell(fp); if(fs != iAllSize) { fclose(fp); return -2; } fseek(fp, 0L, SEEK_SET); size_t iSize = 1024*1024*10; size_t iLen = 0; char *pBuffer = new char[iSize]; while(true) { int ret = fread(pBuffer, 1, iSize, fp); if(ret == 0) { break; } //检查版本 if(iLen == 0) { BitMap::tagBitMapHead *tmp = (BitMap::tagBitMapHead*)pBuffer; if(tmp->_cVersion != BM_VERSION) { fclose(fp); delete[] pBuffer; return -3; } if(tmp->_iMemSize != _bitmaps[0].getMemSize()) { fclose(fp); delete[] pBuffer; return -2; } } memcpy((char*)_bitmaps[0].getAddr() + iLen, pBuffer, ret); iLen += ret; } fclose(fp); delete[] pBuffer; if(iLen != iAllSize) { return -2; } return 0; } }
#include <bits/stdc++.h> #define f first #define s second using namespace std; typedef long long ll; typedef vector<int> vi; typedef pair<int, int> pii; template <typename T1, typename T2> ostream &operator <<(ostream &os, pair<T1, T2> p){os << p.first << " " << p.second; return os;} template <typename T> ostream &operator <<(ostream &os, vector<T> &v){for(T i : v)os << i << ", "; return os;} template <typename T> ostream &operator <<(ostream &os, set<T> s){for(T i : s) os << i << ", "; return os;} template <typename T1, typename T2> ostream &operator <<(ostream &os, map<T1, T2> m){for(pair<T1, T2> i : m) os << i << endl; return os;} int dx[] = {1, -1, 0, 0, 3, -3, 0, 0, 2, 2, -2, -2, 1, 1, -1, -1}; int dy[] = {0, 0, -1, 1, 0, 0, -3, 3, 1, -1, 1, -1, 2, -2, 2, -2}; int N, T; int a[100][100]; vector<pii> get_moves(int x, int y){ vector<pii> ans; for(int i = 0; i < 16; i++){ int nx = x + dx[i]; int ny = y + dy[i]; if(nx >= 0 && ny >= 0 && nx < N && ny < N){ ans.push_back(pii(nx, ny)); } } return ans; } void dij(pii src, int (&dist)[100][100]){ priority_queue<pair<int, pii>, vector<pair<int, pii>>, greater<pair<int, pii>>> pq; bool vis[N][N]; fill(dist[0], dist[N-1]+N, INT_MAX); fill(vis[0], vis[N-1]+N, false); dist[src.f][src.s] = 0; pq.push(make_pair(dist[src.f][src.s], src)); while(!pq.empty()){ pii lst = pq.top().s; pq.pop(); if(vis[lst.f][lst.s]){ continue; } vis[lst.f][lst.s] = true; vector<pii> go_nxt = get_moves(lst.f, lst.s); for(pii i : go_nxt){ if(dist[i.f][i.s] > dist[lst.f][lst.s] + 3 * T + a[i.f][i.s]){ dist[i.f][i.s] = dist[lst.f][lst.s] + 3 * T + a[i.f][i.s]; pq.push(make_pair(dist[i.f][i.s], pii(i.f, i.s))); } } } } int main(){ freopen("visitfj.in", "r", stdin); freopen("visitfj.out", "w", stdout); ios_base::sync_with_stdio(false); cin.tie(NULL); cin >> N >> T; for(int i = 0; i < N; i++){ for(int j = 0; j < N; j++){ cin >> a[i][j]; } } int dist[100][100]; int ans = INT_MAX; dij(pii(0, 0), dist); queue<pair<pii, int>> q; q.push(make_pair(pii(N-1, N-1), 0)); while(!q.empty()){ pii loc = q.front().f; int x = loc.f, y = loc.s; int d = q.front().s; q.pop(); if(d >= 3){ break; } ans = min(ans, dist[x][y] + T * d); for(int i = 0; i < 4; i++){ if(x+dx[i] < N && y + dy[i] < N && x + dx[i] >= 0 && y + dy[i] >= 0){ q.push(make_pair(pii(x+dx[i], y+dy[i]), d+1)); } } } cout << ans << endl; return 0; }
#include <iostream> #include "parser.h" using namespace std; #define MAX_LINE 50000 int parse_tick(string market, string line, struct Tick* p) { size_t num = 0; unsigned pre = 0; int ret = -1; struct Tick tick; sprintf_s(tick.ExchangeID, "%s", market.c_str()); while ((ret = line.find(',', pre)) > 0) { string element = line.substr(pre, ret - pre); int line_len = element.size(); if (num == 0) { string t0 = element.substr(0, 8); string t1 = element.substr(8, line_len - 3 - 8); string t2 = element.substr(line_len - 3, 3); tick.ActionDay = atoi(t0.c_str()); tick.TradingDay = tick.ActionDay; tick.UpdateTime = atoi(t1.c_str()); tick.UpdateMillisec = atoi(t2.c_str()); } else if (num == 1) { sprintf_s(tick.Symbol, "%s", element.c_str()); sprintf_s(tick.InstrumentID, "%s", element.c_str()); } else if (num == 2) { tick.OpenPrice = atof(element.c_str()); } else if (num == 3) { tick.ClosePrice = atof(element.c_str()); } else if (num == 4) { tick.HighestPrice = atof(element.c_str()); } else if (num == 5) { tick.LowestPrice = atof(element.c_str()); } else if (num == 6) { tick.Turnover = atof(element.c_str()); } else if (num == 7) { tick.Volume = atoi(element.c_str()); } else if (num == 8) { tick.LastPrice = atof(element.c_str()); } else if (num == 9) { tick.AveragePrice = atof(element.c_str()); } else if (num == 10) { tick.PreClosePrice = atof(element.c_str()); } else if (num == 11) { tick.UpperLimitPrice = atof(element.c_str()); } else if (num == 12) { tick.LowerLimitPrice = atof(element.c_str()); } else if (num == 13) { tick.OpenInterest = atoll(element.c_str()); } else if (num == 14) { tick.PreOpenInterest = atoll(element.c_str()); } else if (num == 15) { tick.PreSettlementPrice = atof(element.c_str()); } else if (num == 16) { tick.SettlementPrice = atof(element.c_str()); } else if (num == 17) { tick.AskPrice1 = atof(element.c_str()); } else if (num == 18) { tick.AskPrice2 = atof(element.c_str()); } else if (num == 19) { tick.AskPrice3 = atof(element.c_str()); } else if (num == 20) { tick.AskPrice4 = atof(element.c_str()); } else if (num == 21) { tick.AskPrice5 = atof(element.c_str()); } else if (num == 22) { tick.AskVolume1 = atoi(element.c_str()); } else if (num == 23) { tick.AskVolume2 = atoi(element.c_str()); } else if (num == 24) { tick.AskVolume3 = atoi(element.c_str()); } else if (num == 25) { tick.AskVolume4 = atoi(element.c_str()); } else if (num == 26) { tick.AskVolume5 = atoi(element.c_str()); } else if (num == 27) { tick.BidPrice1 = atof(element.c_str()); } else if (num == 28) { tick.BidPrice2 = atof(element.c_str()); } else if (num == 29) { tick.BidPrice3 = atof(element.c_str()); } else if (num == 30) { tick.BidPrice4 = atof(element.c_str()); } else if (num == 31) { tick.BidPrice5 = atof(element.c_str()); } else if (num == 32) { tick.BidVolume1 = atoi(element.c_str()); } else if (num == 33) { tick.BidVolume2 = atoi(element.c_str()); } else if (num == 34) { tick.BidVolume3 = atoi(element.c_str()); } else if (num == 35) { tick.BidVolume4 = atoi(element.c_str()); } pre = ret + 1; num++; } string element = line.substr(pre, line.size() - pre); tick.BidVolume5 = atoi(element.c_str()); memcpy(p, &tick, sizeof(struct Tick)); return 0; } size_t append_csv2_buffer(string market, string csv, struct Tick* ptr, size_t ava_num) { size_t num = 0; unsigned pre = 0; int ret = -1; while ((ret = csv.find('\n', pre)) > 0) { string line = csv.substr(pre, ret + 1 - pre); if (num < ava_num) { if (0 == parse_tick(market, line, ptr + num)) { num++; } } else { break; } pre = ret + 1; } return num; } int parse_string(const char* c_market,const char* content, struct Tick** result, unsigned* len) { string market(c_market); string tts((const char*)content); *result = (struct Tick*)malloc(sizeof(struct Tick) * MAX_LINE); int ava_num = MAX_LINE; *len = append_csv2_buffer(market, tts, *result,ava_num); return 0; } void m_FreeMem(void* ptr) { free(ptr); }
// // Created by yidafu on 2017/10/15. // #include <iostream> #include "language.h" #include "Stack.h" #include "Queue.h" #include "Deque.h" void print (ElemType e){ std::cout << e << "|---|"; } void stack_test(){ Stack *s = new Stack; char ch = 'e'; // 入栈测试 s->push(ch); ch ++; s->push(ch); ch ++; s->push(ch); ch ++; s->push(ch); s->push(ch); ch ++; s->push(ch); ch ++; s->push(ch); ch ++; s->push(ch); // 遍历测试 s->traverse(print); std::cout << std::endl; // 循环出栈测试 while ( ! s->isEmpty() ) { s->pop(ch); printf("%c",ch) ; } // 越界删除测试 bool test_bool = s->pop(ch); delete s; } void queue_test() { Queue *queue = new Queue; // // 空队列清空测试 // queue->clear(); char ch = 66; // 入队测试 queue->enQueue(ch); ch ++; queue->enQueue(ch); ch ++; // // 中途析构测试 // delete queue; queue->enQueue(ch); ch ++; queue->enQueue(ch); ch ++; int queue_length = queue->length(); queue->getHead(ch); while ( ! queue->isEmpty() ) { queue->delQueue(ch); std::cout << ch << "\t"; } // // 清除测试 // queue->clear(); // // 出队测试 // queue->delQueue(ch); // std::cout << ch << "---"; // queue->delQueue(ch); // std::cout << ch << "---"; // queue->delQueue(ch); // std::cout << ch << "---"; // queue->delQueue(ch); // std::cout << ch << "---"; // // // 删除越界测试 // bool test_bool = queue->delQueue(ch); // std::cout << ch << "---"; // 析构测试 delete queue; } void deque_test() { Deque deque; char ch = 66; deque.enQueue(ch); ch ++; deque.enQueue(ch); ch ++; deque.enQueue(ch); ch ++; deque.enQueue(ch); ch ++; deque.enFront(ch); delete &deque; }
////////////////////////////////////////////////////////////////////////////// // // Detours Test Program (tracebld.cpp of tracebld.exe) // // Microsoft Research Detours Package // // Copyright (c) Microsoft Corporation. All rights reserved. // #include <windows.h> #include <stdio.h> #include <stdlib.h> #include <stddef.h> #ifdef _MSC_VER #pragma warning(push) #endif #if _MSC_VER > 1400 #pragma warning(disable:6102 6103) // /analyze warnings #endif #include <strsafe.h> #ifdef _MSC_VER #pragma warning(pop) #endif #include <detours.h> #include "tracebld.h" #if (_MSC_VER < 1299) typedef ULONG * PULONG_PTR; typedef ULONG ULONG_PTR; typedef LONG * PLONG_PTR; typedef LONG LONG_PTR; #endif ////////////////////////////////////////////////////////////////////////////// #ifdef _MSC_VER #pragma warning(disable:4127) // Many of our asserts are constants. #endif #define ASSERT_ALWAYS(x) \ do { \ if (!(x)) { \ AssertMessage(#x, __FILE__, __LINE__); \ DebugBreak(); \ } \ } while (0) #ifndef NDEBUG #define ASSERT(x) ASSERT_ALWAYS(x) #else #define ASSERT(x) #endif #define UNUSED(c) (c) = (c) ////////////////////////////////////////////////////////////////////////////// enum { CLIENT_AWAITING_PIPE_ACCEPT = 0x21, CLIENT_AWAITING_PIPE_DATA = 0x22, }; typedef struct _CLIENT : OVERLAPPED { HANDLE hPipe; LONG nClient; HANDLE hFile; BOOL fAwaitingAccept; PVOID Zero; TBLOG_MESSAGE Message; BOOL LogMessage(PTBLOG_MESSAGE pMessage, DWORD nBytes); BOOL LogMessageV(const CHAR *pszMsg, ...); } CLIENT, *PCLIENT; ////////////////////////////////////////////////////////////////////////////// // CHAR s_szLogFile[MAX_PATH]; CHAR s_szPipe[MAX_PATH]; LONG s_nActiveClients = 0; LONG s_nTotalClients = 0; LONGLONG s_llStartTime; BOOL s_fVerbose = FALSE; TBLOG_PAYLOAD s_Payload; ////////////////////////////////////////////////////////////////////////////// // VOID MyErrExit(PCSTR pszMsg) { DWORD error = GetLastError(); fprintf(stderr, "TRACEBLD: Error %d in %s.\n", (int)error, pszMsg); fflush(stderr); exit(1); } ////////////////////////////////////////////////////////////////////////////// // BOOL CLIENT::LogMessageV(const CHAR *pszMsg, ...) { DWORD cbWritten = 0; CHAR szBuf[1024]; PCHAR pcchEnd = szBuf + ARRAYSIZE(szBuf) - 2; PCHAR pcchCur = szBuf; HRESULT hr; va_list args; va_start(args, pszMsg); hr = StringCchVPrintfExA(pcchCur, pcchEnd - pcchCur, &pcchCur, NULL, STRSAFE_NULL_ON_FAILURE, pszMsg, args); va_end(args); if (FAILED(hr)) { goto cleanup; } hr = StringCchPrintfExA(pcchCur, szBuf + (ARRAYSIZE(szBuf)) - pcchCur, &pcchCur, NULL, STRSAFE_NULL_ON_FAILURE, "\n"); cleanup: WriteFile(hFile, szBuf, (DWORD)(pcchCur - szBuf), &cbWritten, NULL); return TRUE; } BOOL CLIENT::LogMessage(PTBLOG_MESSAGE pMessage, DWORD nBytes) { // Sanity check the size of the message. // if (nBytes > pMessage->nBytes) { nBytes = pMessage->nBytes; } if (nBytes >= sizeof(*pMessage)) { nBytes = sizeof(*pMessage) - 1; } // Don't log message if there isn't and message text. // DWORD cbWrite = nBytes - offsetof(TBLOG_MESSAGE, szMessage); if (cbWrite <= 0 ) { return TRUE; } if (s_fVerbose) { printf("[%s]", pMessage->szMessage); } DWORD cbWritten = 0; WriteFile(hFile, pMessage->szMessage, cbWrite, &cbWritten, NULL); return TRUE; } BOOL CloseConnection(PCLIENT pClient) { InterlockedDecrement(&s_nActiveClients); if (pClient != NULL) { if (pClient->hPipe != INVALID_HANDLE_VALUE) { //FlushFileBuffers(pClient->hPipe); if (!DisconnectNamedPipe(pClient->hPipe)) { DWORD error = GetLastError(); pClient->LogMessageV("<!-- Error %d in DisconnectNamedPipe. -->\n", (int)error); } CloseHandle(pClient->hPipe); pClient->hPipe = INVALID_HANDLE_VALUE; } if (pClient->hFile != INVALID_HANDLE_VALUE) { CloseHandle(pClient->hFile); pClient->hFile = INVALID_HANDLE_VALUE; } GlobalFree(pClient); pClient = NULL; } return TRUE; } // Creates a pipe instance and initiate an accept request. // PCLIENT CreatePipeConnection(HANDLE hCompletionPort, LONG nClient) { HANDLE hPipe = CreateNamedPipeA(s_szPipe, // pipe name PIPE_ACCESS_INBOUND | // read-only access FILE_FLAG_OVERLAPPED, // overlapped mode PIPE_TYPE_MESSAGE | // message-type pipe PIPE_READMODE_MESSAGE | // message read mode PIPE_WAIT, // blocking mode PIPE_UNLIMITED_INSTANCES, // unlimited instances 0, // output buffer size 0, // input buffer size 20000, // client time-out NULL); // no security attributes if (hPipe == INVALID_HANDLE_VALUE) { MyErrExit("CreateNamedPipe"); } // Allocate the client data structure. // PCLIENT pClient = (PCLIENT) GlobalAlloc(GPTR, sizeof(CLIENT)); if (pClient == NULL) { MyErrExit("GlobalAlloc pClient"); } CHAR szLogFile[MAX_PATH]; StringCchPrintfA(szLogFile, ARRAYSIZE(szLogFile), "%s.%08d.xml", s_szLogFile, nClient); ZeroMemory(pClient, sizeof(*pClient)); pClient->hPipe = hPipe; pClient->nClient = nClient; pClient->fAwaitingAccept = TRUE; pClient->hFile = CreateFileA(szLogFile, GENERIC_WRITE, FILE_SHARE_READ, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL | FILE_FLAG_SEQUENTIAL_SCAN, NULL); if (pClient->hFile == INVALID_HANDLE_VALUE) { fprintf(stderr, "TRACEBLD: Error opening output file: %s: %d\n\n", szLogFile, (int)GetLastError()); fflush(stderr); MyErrExit("CreateFile"); } // Associate file with our complietion port. // if (!CreateIoCompletionPort(pClient->hPipe, hCompletionPort, (ULONG_PTR)pClient, 0)) { MyErrExit("CreateIoComplietionPort pClient"); } if (!ConnectNamedPipe(hPipe, pClient)) { DWORD error = GetLastError(); if (error == ERROR_IO_PENDING) { return NULL; } if (error == ERROR_PIPE_CONNECTED) { #if 0 pClient->LogMessageV("<!-- ConnectNamedPipe client already connected. -->"); #endif pClient->fAwaitingAccept = FALSE; } else if (error != ERROR_IO_PENDING && error != ERROR_PIPE_LISTENING) { MyErrExit("ConnectNamedPipe"); } } else { fprintf(stderr, "*** ConnectNamedPipe accepted immediately.\n"); #if 0 pClient->LogMessageV("<!-- ConnectNamedPipe accepted immediately. -->"); #endif pClient->fAwaitingAccept = FALSE; } return pClient; } BOOL DoRead(PCLIENT pClient) { SetLastError(NO_ERROR); DWORD nBytes = 0; BOOL b = ReadFile(pClient->hPipe, &pClient->Message, sizeof(pClient->Message), &nBytes, pClient); DWORD error = GetLastError(); if (b && error == NO_ERROR) { return TRUE; } if (error == ERROR_BROKEN_PIPE) { pClient->LogMessageV("<!-- **** ReadFile 002 *** ERROR_BROKEN_PIPE [%d] -->\n", nBytes); CloseConnection(pClient); return TRUE; } else if (error == ERROR_INVALID_HANDLE) { // ? pClient->LogMessageV("<!-- **** ReadFile 002 *** ERROR_INVALID_HANDLE -->\n"); // I have no idea why this happens. Our remedy is to drop the connection. return TRUE; } else if (error != ERROR_IO_PENDING) { if (b) { pClient->LogMessageV("<!-- **** ReadFile 002 succeeded: %d -->\n", (int)error); } else { pClient->LogMessageV("<!-- **** ReadFile 002 failed: %d -->\n", (int)error); } CloseConnection(pClient); } return TRUE; } DWORD WINAPI WorkerThread(LPVOID pvVoid) { PCLIENT pClient; BOOL b; LPOVERLAPPED lpo; DWORD nBytes; HANDLE hCompletionPort = (HANDLE)pvVoid; for (BOOL fKeepLooping = TRUE; fKeepLooping;) { pClient = NULL; lpo = NULL; nBytes = 0; b = GetQueuedCompletionStatus(hCompletionPort, &nBytes, (PULONG_PTR)&pClient, &lpo, INFINITE); if (!b) { if (pClient) { if (GetLastError() == ERROR_BROKEN_PIPE) { pClient->LogMessageV("<!-- Client closed pipe. -->"); } else { pClient->LogMessageV("<!-- *** GetQueuedCompletionStatus failed %d -->", GetLastError()); } CloseConnection(pClient); } continue; } if (pClient->fAwaitingAccept) { BOOL fAgain = TRUE; while (fAgain) { LONG nClient = InterlockedIncrement(&s_nTotalClients); InterlockedIncrement(&s_nActiveClients); pClient->fAwaitingAccept = FALSE; PCLIENT pNew = CreatePipeConnection(hCompletionPort, nClient); fAgain = FALSE; if (pNew != NULL) { fAgain = !pNew->fAwaitingAccept; DoRead(pNew); } } } else { if (nBytes <= offsetof(TBLOG_MESSAGE, szMessage)) { pClient->LogMessageV("</t:Process>\n"); CloseConnection(pClient); continue; } pClient->LogMessage(&pClient->Message, nBytes); } DoRead(pClient); } return 0; } BOOL CreateWorkers(HANDLE hCompletionPort) { DWORD dwThread; HANDLE hThread; DWORD i; SYSTEM_INFO SystemInfo; GetSystemInfo(&SystemInfo); for (i = 0; i < 1; i++) { hThread = CreateThread(NULL, 0, WorkerThread, hCompletionPort, 0, &dwThread); if (!hThread) { MyErrExit("CreateThread WorkerThread"); // Unreachable: return FALSE; } CloseHandle(hThread); } return TRUE; } DWORD CopyEnvironment(PWCHAR pwzzOut, PCWSTR pwzzIn) { PCWSTR pwzzBeg = pwzzOut; while (*pwzzIn) { while (*pwzzIn) { *pwzzOut++ = *pwzzIn++; } *pwzzOut++ = *pwzzIn++; // Copy zero. } *pwzzOut++ = '\0'; // Add last zero. return (DWORD)(pwzzOut - pwzzBeg); } ////////////////////////////////////////////////////////////////////////////// // int main(int argc, char **argv) { HANDLE hCompletionPort; BOOL fNeedHelp = FALSE; WCHAR wzzDrop[1024] = L"build\0nmake\0"; GetSystemTimeAsFileTime((FILETIME *)&s_llStartTime); StringCchPrintfA(s_szPipe, ARRAYSIZE(s_szPipe), "%s.%d", TBLOG_PIPE_NAME, GetCurrentProcessId()); int arg = 1; for (; arg < argc && (argv[arg][0] == '-' || argv[arg][0] == '/'); arg++) { CHAR *argn = argv[arg] + 1; CHAR *argp = argn; while (*argp && *argp != ':' && *argp != '=') { argp++; } if (*argp == ':' || *argp == '=') { *argp++ = '\0'; } switch (argn[0]) { case 'd': // Drop Processes case 'D': if (*argp) { PWCHAR pwz = wzzDrop; while (*argp) { if (*argp == ';') { *pwz++ = '\0'; } else { *pwz++ = *argp++; } } *pwz++ = '\0'; *pwz = '\0'; } case 'o': // Output file. case 'O': StringCchCopyA(s_szLogFile, ARRAYSIZE(s_szLogFile), argp); break; case 'v': // Verbose case 'V': s_fVerbose = TRUE; break; case '?': // Help. fNeedHelp = TRUE; break; default: fNeedHelp = TRUE; printf("TRACEBLD: Bad argument: %s:%s\n", argn, argp); break; } } if (arg >= argc) { fNeedHelp = TRUE; } if (fNeedHelp) { printf("Usage:\n" " tracebld [options] command {command arguments}\n" "Options:\n" " /o:file Log all events to the output files.\n" " /? Display this help message.\n" "Summary:\n" " Runs the build commands and figures out which files have dependencies..\n" "\n"); exit(9001); } // Create the completion port. hCompletionPort = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 0); if (hCompletionPort == NULL) { MyErrExit("CreateIoCompletionPort"); } // Create completion port worker threads. // CreateWorkers(hCompletionPort); CreatePipeConnection(hCompletionPort, 0); printf("TRACEBLD: Ready for clients. Press Ctrl-C to stop.\n"); /////////////////////////////////////////////////////////// Validate DLLs. // CHAR szTmpPath[MAX_PATH]; CHAR szExePath[MAX_PATH]; CHAR szDllPath[MAX_PATH]; PCHAR pszFilePart = NULL; if (!GetModuleFileNameA(NULL, szTmpPath, ARRAYSIZE(szTmpPath))) { printf("TRACEBLD: Couldn't retreive exe name.\n"); return 9002; } if (!GetFullPathNameA(szTmpPath, ARRAYSIZE(szExePath), szExePath, &pszFilePart) || pszFilePart == NULL) { printf("TRACEBLD: Error: %s is not a valid path name..\n", szTmpPath); return 9002; } StringCchCopyA(pszFilePart, szExePath + ARRAYSIZE(szExePath) - pszFilePart, "trcbld" DETOURS_STRINGIFY(DETOURS_BITS) ".dll"); StringCchCopyA(szDllPath, ARRAYSIZE(szDllPath), szExePath); ////////////////////////////////////////////////////////////////////////// STARTUPINFOA si; PROCESS_INFORMATION pi; CHAR szCommand[2048]; CHAR szExe[MAX_PATH]; CHAR szFullExe[MAX_PATH] = "\0"; PCHAR pszFileExe = NULL; ZeroMemory(&si, sizeof(si)); ZeroMemory(&pi, sizeof(pi)); si.cb = sizeof(si); szCommand[0] = L'\0'; StringCchCopyA(szExe, sizeof(szExe), argv[arg]); for (; arg < argc; arg++) { if (strchr(argv[arg], ' ') != NULL || strchr(argv[arg], '\t') != NULL) { StringCchCatA(szCommand, sizeof(szCommand), "\""); StringCchCatA(szCommand, sizeof(szCommand), argv[arg]); StringCchCatA(szCommand, sizeof(szCommand), "\""); } else { StringCchCatA(szCommand, sizeof(szCommand), argv[arg]); } if (arg + 1 < argc) { StringCchCatA(szCommand, sizeof(szCommand), " "); } } printf("TRACEBLD: Starting: `%s'\n", szCommand); printf("TRACEBLD: with `%s'\n", szDllPath); fflush(stdout); DWORD dwFlags = CREATE_DEFAULT_ERROR_MODE | CREATE_SUSPENDED; SetLastError(0); SearchPathA(NULL, szExe, ".exe", ARRAYSIZE(szFullExe), szFullExe, &pszFileExe); if (!DetourCreateProcessWithDllExA(szFullExe[0] ? szFullExe : NULL, szCommand, NULL, NULL, TRUE, dwFlags, NULL, NULL, &si, &pi, szDllPath, NULL)) { printf("TRACEBLD: DetourCreateProcessWithDllEx failed: %d\n", (int)GetLastError()); ExitProcess(9007); } ZeroMemory(&s_Payload, sizeof(s_Payload)); s_Payload.nParentProcessId = GetCurrentProcessId(); s_Payload.nTraceProcessId = GetCurrentProcessId(); s_Payload.nGeneology = 1; s_Payload.rGeneology[0] = 0; StringCchCopyW(s_Payload.wzStdin, ARRAYSIZE(s_Payload.wzStdin), L"\\\\.\\CONIN$"); StringCchCopyW(s_Payload.wzStdout, ARRAYSIZE(s_Payload.wzStdout), L"\\\\.\\CONOUT$"); StringCchCopyW(s_Payload.wzStderr, ARRAYSIZE(s_Payload.wzStderr), L"\\\\.\\CONOUT$"); StringCchCopyW(s_Payload.wzParents, ARRAYSIZE(s_Payload.wzParents), L""); CopyEnvironment(s_Payload.wzzDrop, wzzDrop); LPWCH pwStrings = GetEnvironmentStringsW(); CopyEnvironment(s_Payload.wzzEnvironment, pwStrings); FreeEnvironmentStringsW(pwStrings); if (!DetourCopyPayloadToProcess(pi.hProcess, s_guidTrace, &s_Payload, sizeof(s_Payload))) { printf("TRACEBLD: DetourCopyPayloadToProcess failed: %d\n", (int)GetLastError()); ExitProcess(9008); } ResumeThread(pi.hThread); WaitForSingleObject(pi.hProcess, INFINITE); DWORD dwResult = 0; if (!GetExitCodeProcess(pi.hProcess, &dwResult)) { printf("TRACEBLD: GetExitCodeProcess failed: %d\n", (int)GetLastError()); return 9008; } printf("TRACEBLD: %d processes.\n", (int)s_nTotalClients); return dwResult; } // //////////////////////////////////////////////////////////////////////////////
#include "coreir.h" #include "coreirsim.h" using namespace CoreIR; using namespace std; void tribuf_test() { Context* c = newContext(); Namespace* g = c->getGlobal(); uint width = 1; Type* tp = c->Record({{"io", c->BitInOut()}, {"en", c->BitIn()}, {"from_io", c->Bit()}, {"to_io", c->BitIn()}}); Module* io = g->newModuleDecl("io", tp); ModuleDef* def = io->newModuleDef(); def->addInstance("tristate_buf", "coreir.tribuf", {{"width", Const::make(c, width)}}); def->addInstance("tristate_out", "coreir.ibuf", {{"width", Const::make(c, width)}}); def->connect("tristate_buf.en", "self.en"); def->connect("tristate_buf.in.0", "self.to_io"); def->connect("tristate_buf.out.0", "self.io"); def->connect("tristate_out.in.0", "self.io"); def->connect("tristate_out.out.0", "self.from_io"); io->setDef(def); cout << "Before splitting" << endl; io->print(); c->runPasses({"split-inouts"}); cout << "After splitting" << endl; io->print(); assert(def->getInstances().size() == 1); SimulatorState sim(io); // en = 1 means data flows into the IO // en = 0 means data flows from the IO to the outputs sim.setValue("self.en", BitVector(1, 0)); sim.setValue("self.io_input", BitVector(1, 1)); sim.setValue("self.to_io", BitVector(1, 0)); sim.execute(); assert(sim.getBitVec("self.from_io") == BitVector(1, 1)); sim.setValue("self.io_input", BitVector(1, 0)); sim.execute(); assert(sim.getBitVec("self.from_io") == BitVector(1, 0)); // Now pass data to the IO sim.setValue("self.en", BitVector(1, 1)); sim.setValue("self.to_io", BitVector(1, 0)); sim.execute(); assert(sim.getBitVec("self.from_io") == BitVector(1, 0)); assert(sim.getBitVec("self.io_output") == BitVector(1, 0)); deleteContext(c); } void io_to_io_test() { Context* c = newContext(); Namespace* g = c->getGlobal(); uint width = 1; { Type* tp = c->Record({{"io", c->BitInOut()}, {"en", c->BitIn()}, {"from_io", c->Bit()}, {"to_io", c->BitIn()}}); Module* inner = g->newModuleDecl("inner", tp); ModuleDef* def = inner->newModuleDef(); def->addInstance("tristate_buf", "coreir.tribuf", {{"width", Const::make(c, width)}}); def->addInstance("tristate_out", "coreir.ibuf", {{"width", Const::make(c, width)}}); def->connect("tristate_buf.en", "self.en"); def->connect("tristate_buf.in.0", "self.to_io"); def->connect("tristate_buf.out.0", "self.io"); def->connect("tristate_out.in.0", "self.io"); def->connect("tristate_out.out.0", "self.from_io"); inner->setDef(def); } { Type* outer_tp = c->Record({{"io_port", c->BitInOut()}, {"in", c->BitIn()}, {"out", c->Bit()}, {"en", c->BitIn()}}); Module* outer = g->newModuleDecl("outer", outer_tp); ModuleDef* def = outer->newModuleDef(); def->addInstance("inner_mod", "global.inner"); def->connect("self.io_port", "inner_mod.io"); def->connect("self.en", "inner_mod.en"); def->connect("self.out", "inner_mod.from_io"); def->connect("self.in", "inner_mod.to_io"); outer->setDef(def); cout << "PRINT OUTER" << endl; outer->print(); } c->runPasses({"rungenerators"}); cout << "AFTER RUNGENERATORS" << endl; auto outer = g->getModule("outer"); outer->print(); c->runPasses({"split-inouts", "flatten"}); cout << "After splitting" << endl; outer->print(); // Note input and output need to be renamed SimulatorState sim(outer); // when en = 0 data flows from IO input to mod output // when en = 1 data flows from input to IO output sim.setValue("self.en", BitVector(1, 0)); sim.setValue("self.in", BitVector(1, 1)); sim.setValue("self.io_port_input", BitVector(1, 0)); sim.execute(); assert(sim.getBitVec("self.out") == BitVector(1, 0)); sim.setValue("self.io_port_input", BitVector(1, 1)); sim.execute(); assert(sim.getBitVec("self.out") == BitVector(1, 1)); // Now data flows from input port to IO output sim.setValue("self.en", BitVector(1, 1)); sim.setValue("self.in", BitVector(1, 1)); sim.execute(); assert(sim.getBitVec("self.io_port_output") == BitVector(1, 1)); sim.setValue("self.in", BitVector(1, 0)); sim.execute(); assert(sim.getBitVec("self.io_port_output") == BitVector(1, 0)); deleteContext(c); } int main() { tribuf_test(); io_to_io_test(); }
#include <iostream> #include <cstdio> #include <cstring> #include <cassert> #include <cctype> using namespace std; typedef long long lint; #define cout cerr #define ni (next_num<int>()) template<class T>inline T next_num(){ T i=0;char c; while(!isdigit(c=getchar())&&c!='-'); bool neg=c=='-'; neg?c=getchar():0; while(i=i*10-'0'+c,isdigit(c=getchar())); return neg?-i:i; } template<class T1,class T2>inline void apmax(T1 &a,const T2 &b){if(a<b)a=b;} template<class T1,class T2>inline void apmin(T1 &a,const T2 &b){if(b<a)a=b;} template<class T>inline void mset(T a[],int v,int n){memset(a,v,n*sizeof(T));} template<class T>inline void mcpy(T a[],T b[],int n){memcpy(a,b,n*sizeof(T));} const int N=100010,O=1e9+7; int s[N]; int ext[N]; int n,k; inline int calc(){ ext[1]=0; int ans=0; for(int i=2,j=1;i<=n;i++){ ext[i]=max(min(j+ext[j]-i,ext[i-j+1]),0); for(int &k=ext[i];i+k<=n&&s[1+k]==s[i+k];k++); if(i+ext[i]>j+ext[j]){ j=i; } apmax(ans,ext[i]); } return ans; } lint ans; void dfs(int x){ if(x>n){ ans+=calc(); return; } for(int i=1;i<=k;i++){ s[x]=i; dfs(x+1); } } int main(){ #ifndef ONLINE_JUDGE freopen("a.in","r",stdin); freopen("a.out","w",stdout); #endif n=ni,k=ni; ans=0; dfs(1); printf("%lld\n",ans%O); /* for(k=1;k<=10;k++){ cout<<"k="<<k<<endl; cout<<"\t"; for(n=1;n<=10;n++){ ans=0; dfs(1); cout<<ans<<" "; } cout<<endl; }*/ return 0; }
// Distributed under the MIT License. // See LICENSE.txt for details. #pragma once #include <cstddef> #include "DataStructures/Tensor/TypeAliases.hpp" #include "Utilities/Gsl.hpp" namespace Ccz4 { /// @{ /*! * \brief Computes the gradient of the spatial part of the Z4 constraint * * \details Computes the gradient as: * * \f{align} * \nabla_i Z_j &= * D_{ijl} \left(\hat{\Gamma}^l - \tilde{\Gamma}^l\right) + * \frac{1}{2} \tilde{\gamma}_{jl} \left( * \partial_i \hat{\Gamma}^l - \partial_i \tilde{\Gamma}^l\right) - * \Gamma^l_{ij} Z_l * \f} * * where \f$Z_i\f$ is the spatial Z4 constraint defined by * `Ccz4::Tags::SpatialZ4Constraint`, \f$\tilde{\gamma}_{ij}\f$ is the conformal * spatial metric defined by `Ccz4::Tags::ConformalMetric`, \f$\Gamma^k_{ij}\f$ * is the spatial Christoffel symbols of the second kind defined by * `Ccz4::Tags::ChristoffelSecondKind`, \f$D_{ijk}\f$ is the CCZ4 auxiliary * variable defined by `Ccz4::Tags::FieldD`, * \f$\left(\hat{\Gamma}^i - \tilde{\Gamma}^i\right)\f$ is the CCZ4 temporary * expression defined by * `Ccz4::Tags::GammaHatMinusContractedConformalChristoffel`, and * \f$\left(\partial_i \hat{\Gamma}^j - \partial_i \tilde{\Gamma}^j\right)\f$ is * its spatial derivative. */ template <size_t Dim, typename Frame, typename DataType> void grad_spatial_z4_constraint( const gsl::not_null<tnsr::ij<DataType, Dim, Frame>*> result, const tnsr::i<DataType, Dim, Frame>& spatial_z4_constraint, const tnsr::ii<DataType, Dim, Frame>& conformal_spatial_metric, const tnsr::Ijj<DataType, Dim, Frame>& christoffel_second_kind, const tnsr::ijj<DataType, Dim, Frame>& field_d, const tnsr::I<DataType, Dim, Frame>& gamma_hat_minus_contracted_conformal_christoffel, const tnsr::iJ<DataType, Dim, Frame>& d_gamma_hat_minus_contracted_conformal_christoffel); template <size_t Dim, typename Frame, typename DataType> tnsr::ij<DataType, Dim, Frame> grad_spatial_z4_constraint( const tnsr::i<DataType, Dim, Frame>& spatial_z4_constraint, const tnsr::ii<DataType, Dim, Frame>& conformal_spatial_metric, const tnsr::Ijj<DataType, Dim, Frame>& christoffel_second_kind, const tnsr::ijj<DataType, Dim, Frame>& field_d, const tnsr::I<DataType, Dim, Frame>& gamma_hat_minus_contracted_conformal_christoffel, const tnsr::iJ<DataType, Dim, Frame>& d_gamma_hat_minus_contracted_conformal_christoffel); /// @} } // namespace Ccz4
// Copyright (c) 2020 by Robert Bosch GmbH. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "iceoryx_posh/iceoryx_posh_types.hpp" #include "iceoryx_posh/internal/mepoo/memory_manager.hpp" #include "iceoryx_posh/internal/popo/building_blocks/chunk_distributor.hpp" #include "iceoryx_posh/internal/popo/building_blocks/chunk_distributor_data.hpp" #include "iceoryx_posh/internal/popo/building_blocks/chunk_queue_data.hpp" #include "iceoryx_posh/internal/popo/building_blocks/chunk_queue_popper.hpp" #include "iceoryx_posh/internal/popo/building_blocks/chunk_queue_pusher.hpp" #include "iceoryx_posh/internal/popo/building_blocks/chunk_sender.hpp" #include "iceoryx_posh/internal/popo/building_blocks/chunk_sender_data.hpp" #include "iceoryx_posh/internal/popo/building_blocks/locking_policy.hpp" #include "iceoryx_posh/internal/popo/ports/base_port.hpp" #include "iceoryx_posh/mepoo/mepoo_config.hpp" #include "iceoryx_utils/cxx/generic_raii.hpp" #include "iceoryx_utils/error_handling/error_handling.hpp" #include "iceoryx_utils/internal/posix_wrapper/shared_memory_object/allocator.hpp" #include "test.hpp" #include <memory> using namespace ::testing; struct DummySample { uint64_t dummy{42}; }; class ChunkSender_test : public Test { protected: ChunkSender_test() { m_mempoolconf.addMemPool({SMALL_CHUNK, NUM_CHUNKS_IN_POOL}); m_mempoolconf.addMemPool({BIG_CHUNK, NUM_CHUNKS_IN_POOL}); m_memoryManager.configureMemoryManager(m_mempoolconf, &m_memoryAllocator, &m_memoryAllocator); } ~ChunkSender_test() { } void SetUp() { } void TearDown() { } static constexpr size_t MEMORY_SIZE = 1024 * 1024; uint8_t m_memory[MEMORY_SIZE]; static constexpr uint32_t NUM_CHUNKS_IN_POOL = 20; static constexpr uint32_t SMALL_CHUNK = 128; static constexpr uint32_t BIG_CHUNK = 256; static constexpr uint64_t HISTORY_CAPACITY = 4; static constexpr uint32_t MAX_NUMBER_QUEUES = 128; iox::cxx::GenericRAII m_uniqueRouDiId{[] { iox::popo::internal::setUniqueRouDiId(0); }, [] { iox::popo::internal::unsetUniqueRouDiId(); }}; iox::posix::Allocator m_memoryAllocator{m_memory, MEMORY_SIZE}; iox::mepoo::MePooConfig m_mempoolconf; iox::mepoo::MemoryManager m_memoryManager; struct ChunkDistributorConfig { static constexpr uint32_t MAX_QUEUES = MAX_NUMBER_QUEUES; static constexpr uint64_t MAX_HISTORY_CAPACITY = iox::MAX_HISTORY_CAPACITY_OF_CHUNK_DISTRIBUTOR; }; struct ChunkQueueConfig { static constexpr uint64_t MAX_QUEUE_CAPACITY = NUM_CHUNKS_IN_POOL; }; using ChunkQueueData_t = iox::popo::ChunkQueueData<ChunkQueueConfig, iox::popo::ThreadSafePolicy>; using ChunkDistributorData_t = iox::popo::ChunkDistributorData<ChunkDistributorConfig, iox::popo::ThreadSafePolicy, iox::popo::ChunkQueuePusher<ChunkQueueData_t>>; using ChunkDistributor_t = iox::popo::ChunkDistributor<ChunkDistributorData_t>; ChunkQueueData_t m_chunkQueueData{iox::cxx::VariantQueueTypes::SoFi_SingleProducerSingleConsumer}; iox::popo::ChunkSenderData<iox::MAX_CHUNKS_ALLOCATE_PER_SENDER, ChunkDistributorData_t> m_chunkSenderData{ &m_memoryManager, 0}; // must be 0 for test iox::popo::ChunkSenderData<iox::MAX_CHUNKS_ALLOCATE_PER_SENDER, ChunkDistributorData_t> m_chunkSenderDataWithHistory{&m_memoryManager, HISTORY_CAPACITY}; iox::popo::ChunkSender<ChunkDistributor_t> m_chunkSender{&m_chunkSenderData}; iox::popo::ChunkSender<ChunkDistributor_t> m_chunkSenderWithHistory{&m_chunkSenderDataWithHistory}; }; TEST_F(ChunkSender_test, allocate_OneChunk) { auto maybeChunkHeader = m_chunkSender.allocate(sizeof(DummySample), iox::UniquePortId()); ASSERT_FALSE(maybeChunkHeader.has_error()); EXPECT_THAT(m_memoryManager.getMemPoolInfo(0).m_usedChunks, Eq(1u)); } TEST_F(ChunkSender_test, allocate_ChunkHasOriginIdSet) { iox::UniquePortId uniqueId; auto maybeChunkHeader = m_chunkSender.allocate(sizeof(DummySample), uniqueId); ASSERT_FALSE(maybeChunkHeader.has_error()); EXPECT_THAT((*maybeChunkHeader)->m_originId, Eq(uniqueId)); } TEST_F(ChunkSender_test, allocate_MultipleChunks) { auto chunk1 = m_chunkSender.allocate(sizeof(DummySample), iox::UniquePortId()); auto chunk2 = m_chunkSender.allocate(sizeof(DummySample), iox::UniquePortId()); ASSERT_FALSE(chunk1.has_error()); ASSERT_FALSE(chunk2.has_error()); // must be different chunks EXPECT_THAT(*chunk1, Ne(*chunk2)); EXPECT_THAT(m_memoryManager.getMemPoolInfo(0).m_usedChunks, Eq(2u)); } TEST_F(ChunkSender_test, allocate_Overflow) { std::vector<iox::mepoo::ChunkHeader*> chunks; // allocate chunks until MAX_CHUNKS_ALLOCATE_PER_SENDER level for (size_t i = 0; i < iox::MAX_CHUNKS_ALLOCATE_PER_SENDER; i++) { auto maybeChunkHeader = m_chunkSender.allocate(sizeof(DummySample), iox::UniquePortId()); if (!maybeChunkHeader.has_error()) { chunks.push_back(*maybeChunkHeader); } } for (size_t i = 0; i < iox::MAX_CHUNKS_ALLOCATE_PER_SENDER; i++) { EXPECT_THAT(chunks[i], Ne(nullptr)); } EXPECT_THAT(m_memoryManager.getMemPoolInfo(0).m_usedChunks, Eq(iox::MAX_CHUNKS_ALLOCATE_PER_SENDER)); // Allocate one more sample for overflow auto maybeChunkHeader = m_chunkSender.allocate(sizeof(DummySample), iox::UniquePortId()); EXPECT_TRUE(maybeChunkHeader.has_error()); EXPECT_THAT(maybeChunkHeader.get_error(), Eq(iox::popo::AllocationError::TOO_MANY_CHUNKS_ALLOCATED_IN_PARALLEL)); EXPECT_THAT(m_memoryManager.getMemPoolInfo(0).m_usedChunks, Eq(iox::MAX_CHUNKS_ALLOCATE_PER_SENDER)); } TEST_F(ChunkSender_test, freeChunk) { std::vector<iox::mepoo::ChunkHeader*> chunks; // allocate chunks until MAX_CHUNKS_ALLOCATE_PER_SENDER level for (size_t i = 0; i < iox::MAX_CHUNKS_ALLOCATE_PER_SENDER; i++) { auto maybeChunkHeader = m_chunkSender.allocate(sizeof(DummySample), iox::UniquePortId()); if (!maybeChunkHeader.has_error()) { chunks.push_back(*maybeChunkHeader); } } EXPECT_THAT(m_memoryManager.getMemPoolInfo(0).m_usedChunks, Eq(iox::MAX_CHUNKS_ALLOCATE_PER_SENDER)); // release them all for (size_t i = 0; i < iox::MAX_CHUNKS_ALLOCATE_PER_SENDER; i++) { m_chunkSender.release(chunks[i]); } EXPECT_THAT(m_memoryManager.getMemPoolInfo(0).m_usedChunks, Eq(0u)); } TEST_F(ChunkSender_test, freeInvalidChunk) { auto maybeChunkHeader = m_chunkSender.allocate(sizeof(DummySample), iox::UniquePortId()); EXPECT_FALSE(maybeChunkHeader.has_error()); EXPECT_THAT(m_memoryManager.getMemPoolInfo(0).m_usedChunks, Eq(1u)); auto errorHandlerCalled{false}; auto errorHandlerGuard = iox::ErrorHandler::SetTemporaryErrorHandler( [&errorHandlerCalled](const iox::Error, const std::function<void()>, const iox::ErrorLevel) { errorHandlerCalled = true; }); auto myCrazyChunk = std::make_shared<iox::mepoo::ChunkHeader>(); m_chunkSender.release(myCrazyChunk.get()); EXPECT_TRUE(errorHandlerCalled); EXPECT_THAT(m_memoryManager.getMemPoolInfo(0).m_usedChunks, Eq(1u)); } TEST_F(ChunkSender_test, sendWithoutReceiver) { auto maybeChunkHeader = m_chunkSender.allocate(sizeof(DummySample), iox::UniquePortId()); EXPECT_FALSE(maybeChunkHeader.has_error()); EXPECT_THAT(m_memoryManager.getMemPoolInfo(0).m_usedChunks, Eq(1u)); if (!maybeChunkHeader.has_error()) { auto sample = *maybeChunkHeader; m_chunkSender.send(sample); // chunk is still used because last chunk is stored EXPECT_THAT(m_memoryManager.getMemPoolInfo(0).m_usedChunks, Eq(1u)); } } TEST_F(ChunkSender_test, sendMultipleWithoutReceiverAndAlwaysLast) { for (size_t i = 0; i < 100; i++) { auto maybeChunkHeader = m_chunkSender.allocate(sizeof(DummySample), iox::UniquePortId()); EXPECT_FALSE(maybeChunkHeader.has_error()); auto maybeLastChunk = m_chunkSender.getLast(); if (i > 0) { EXPECT_TRUE(maybeLastChunk.has_value()); // We get the last chunk again EXPECT_TRUE(*maybeChunkHeader == *maybeLastChunk); EXPECT_TRUE((*maybeChunkHeader)->payload() == (*maybeLastChunk)->payload()); } else { EXPECT_FALSE(maybeLastChunk.has_value()); } auto sample = (*maybeChunkHeader)->payload(); new (sample) DummySample(); m_chunkSender.send(*maybeChunkHeader); } // Exactly one chunk is used because last chunk is stored EXPECT_THAT(m_memoryManager.getMemPoolInfo(0).m_usedChunks, Eq(1u)); } TEST_F(ChunkSender_test, sendMultipleWithoutReceiverWithHistoryNoLastReuse) { for (size_t i = 0; i < 10 * HISTORY_CAPACITY; i++) { auto maybeChunkHeader = m_chunkSenderWithHistory.allocate(sizeof(DummySample), iox::UniquePortId()); EXPECT_FALSE(maybeChunkHeader.has_error()); auto maybeLastChunk = m_chunkSenderWithHistory.getLast(); if (i > 0) { EXPECT_TRUE(maybeLastChunk.has_value()); // We don't get the last chunk again EXPECT_FALSE(*maybeChunkHeader == *maybeLastChunk); EXPECT_FALSE((*maybeChunkHeader)->payload() == (*maybeLastChunk)->payload()); } else { EXPECT_FALSE(maybeLastChunk.has_value()); } auto sample = (*maybeChunkHeader)->payload(); new (sample) DummySample(); m_chunkSenderWithHistory.send(*maybeChunkHeader); } // Used chunks == history size EXPECT_THAT(m_memoryManager.getMemPoolInfo(0).m_usedChunks, Eq(HISTORY_CAPACITY)); } TEST_F(ChunkSender_test, sendOneWithReceiver) { m_chunkSender.addQueue(&m_chunkQueueData); auto maybeChunkHeader = m_chunkSender.allocate(sizeof(DummySample), iox::UniquePortId()); EXPECT_FALSE(maybeChunkHeader.has_error()); EXPECT_THAT(m_memoryManager.getMemPoolInfo(0).m_usedChunks, Eq(1u)); if (!maybeChunkHeader.has_error()) { auto sample = (*maybeChunkHeader)->payload(); new (sample) DummySample(); m_chunkSender.send(*maybeChunkHeader); // consume the sample { iox::popo::ChunkQueuePopper<ChunkQueueData_t> myQueue(&m_chunkQueueData); EXPECT_FALSE(myQueue.empty()); auto popRet = myQueue.pop(); EXPECT_TRUE(popRet.has_value()); auto dummySample = *reinterpret_cast<DummySample*>(popRet->getPayload()); EXPECT_THAT(dummySample.dummy, Eq(42)); } } } TEST_F(ChunkSender_test, sendMultipleWithReceiver) { m_chunkSender.addQueue(&m_chunkQueueData); iox::popo::ChunkQueuePopper<ChunkQueueData_t> checkQueue(&m_chunkQueueData); EXPECT_TRUE(NUM_CHUNKS_IN_POOL <= checkQueue.getCurrentCapacity()); for (size_t i = 0; i < NUM_CHUNKS_IN_POOL; i++) { auto maybeChunkHeader = m_chunkSender.allocate(sizeof(DummySample), iox::UniquePortId()); EXPECT_FALSE(maybeChunkHeader.has_error()); if (!maybeChunkHeader.has_error()) { auto sample = (*maybeChunkHeader)->payload(); new (sample) DummySample(); static_cast<DummySample*>(sample)->dummy = i; m_chunkSender.send(*maybeChunkHeader); } } for (size_t i = 0; i < NUM_CHUNKS_IN_POOL; i++) { iox::popo::ChunkQueuePopper<ChunkQueueData_t> myQueue(&m_chunkQueueData); EXPECT_FALSE(myQueue.empty()); auto popRet = myQueue.pop(); EXPECT_TRUE(popRet.has_value()); auto dummySample = *reinterpret_cast<DummySample*>(popRet->getPayload()); EXPECT_THAT(dummySample.dummy, Eq(i)); EXPECT_THAT(popRet->getChunkHeader()->m_info.m_sequenceNumber, Eq(i)); } } TEST_F(ChunkSender_test, sendMultipleWithReceiverExternalSequenceNumber) { m_chunkSender.addQueue(&m_chunkQueueData); iox::popo::ChunkQueuePopper<ChunkQueueData_t> checkQueue(&m_chunkQueueData); EXPECT_TRUE(NUM_CHUNKS_IN_POOL <= checkQueue.getCurrentCapacity()); for (size_t i = 0; i < NUM_CHUNKS_IN_POOL; i++) { auto maybeChunkHeader = m_chunkSender.allocate(sizeof(DummySample), iox::UniquePortId()); EXPECT_FALSE(maybeChunkHeader.has_error()); if (!maybeChunkHeader.has_error()) { (*maybeChunkHeader)->m_info.m_externalSequenceNumber_bl = true; (*maybeChunkHeader)->m_info.m_sequenceNumber = i; m_chunkSender.send(*maybeChunkHeader); } } for (size_t i = 0; i < NUM_CHUNKS_IN_POOL; i++) { iox::popo::ChunkQueuePopper<ChunkQueueData_t> myQueue(&m_chunkQueueData); EXPECT_FALSE(myQueue.empty()); auto popRet = myQueue.pop(); EXPECT_TRUE(popRet.has_value()); EXPECT_THAT(popRet->getChunkHeader()->m_info.m_sequenceNumber, Eq(i)); } } TEST_F(ChunkSender_test, sendTillRunningOutOfChunks) { m_chunkSender.addQueue(&m_chunkQueueData); iox::popo::ChunkQueuePopper<ChunkQueueData_t> checkQueue(&m_chunkQueueData); EXPECT_TRUE(NUM_CHUNKS_IN_POOL <= checkQueue.getCurrentCapacity()); for (size_t i = 0; i < NUM_CHUNKS_IN_POOL; i++) { auto maybeChunkHeader = m_chunkSender.allocate(sizeof(DummySample), iox::UniquePortId()); EXPECT_FALSE(maybeChunkHeader.has_error()); if (!maybeChunkHeader.has_error()) { (*maybeChunkHeader)->m_info.m_externalSequenceNumber_bl = true; (*maybeChunkHeader)->m_info.m_sequenceNumber = i; auto sample = (*maybeChunkHeader)->payload(); new (sample) DummySample(); static_cast<DummySample*>(sample)->dummy = i; m_chunkSender.send(*maybeChunkHeader); } } auto errorHandlerCalled{false}; auto errorHandlerGuard = iox::ErrorHandler::SetTemporaryErrorHandler( [&errorHandlerCalled](const iox::Error, const std::function<void()>, const iox::ErrorLevel) { errorHandlerCalled = true; }); auto maybeChunkHeader = m_chunkSender.allocate(sizeof(DummySample), iox::UniquePortId()); EXPECT_TRUE(maybeChunkHeader.has_error()); EXPECT_THAT(maybeChunkHeader.get_error(), Eq(iox::popo::AllocationError::RUNNING_OUT_OF_CHUNKS)); } TEST_F(ChunkSender_test, sendInvalidChunk) { auto maybeChunkHeader = m_chunkSender.allocate(sizeof(DummySample), iox::UniquePortId()); EXPECT_FALSE(maybeChunkHeader.has_error()); EXPECT_THAT(m_memoryManager.getMemPoolInfo(0).m_usedChunks, Eq(1u)); auto errorHandlerCalled{false}; auto errorHandlerGuard = iox::ErrorHandler::SetTemporaryErrorHandler( [&errorHandlerCalled](const iox::Error, const std::function<void()>, const iox::ErrorLevel) { errorHandlerCalled = true; }); auto myCrazyChunk = std::make_shared<iox::mepoo::ChunkHeader>(); m_chunkSender.send(myCrazyChunk.get()); EXPECT_TRUE(errorHandlerCalled); EXPECT_THAT(m_memoryManager.getMemPoolInfo(0).m_usedChunks, Eq(1u)); } TEST_F(ChunkSender_test, pushToHistory) { for (size_t i = 0; i < 10 * HISTORY_CAPACITY; i++) { auto maybeChunkHeader = m_chunkSenderWithHistory.allocate(sizeof(DummySample), iox::UniquePortId()); EXPECT_FALSE(maybeChunkHeader.has_error()); m_chunkSenderWithHistory.pushToHistory(*maybeChunkHeader); } // Used chunks == history size EXPECT_THAT(m_memoryManager.getMemPoolInfo(0).m_usedChunks, Eq(HISTORY_CAPACITY)); } TEST_F(ChunkSender_test, pushInvalidChunkToHistory) { auto maybeChunkHeader = m_chunkSender.allocate(sizeof(DummySample), iox::UniquePortId()); EXPECT_FALSE(maybeChunkHeader.has_error()); EXPECT_THAT(m_memoryManager.getMemPoolInfo(0).m_usedChunks, Eq(1u)); auto errorHandlerCalled{false}; auto errorHandlerGuard = iox::ErrorHandler::SetTemporaryErrorHandler( [&errorHandlerCalled](const iox::Error, const std::function<void()>, const iox::ErrorLevel) { errorHandlerCalled = true; }); auto myCrazyChunk = std::make_shared<iox::mepoo::ChunkHeader>(); m_chunkSender.pushToHistory(myCrazyChunk.get()); EXPECT_TRUE(errorHandlerCalled); EXPECT_THAT(m_memoryManager.getMemPoolInfo(0).m_usedChunks, Eq(1u)); } TEST_F(ChunkSender_test, sendMultipleWithReceiverNoLastReuse) { m_chunkSender.addQueue(&m_chunkQueueData); for (size_t i = 0; i < NUM_CHUNKS_IN_POOL; i++) { auto maybeChunkHeader = m_chunkSender.allocate(sizeof(DummySample), iox::UniquePortId()); EXPECT_FALSE(maybeChunkHeader.has_error()); auto maybeLastChunk = m_chunkSender.getLast(); if (i > 0) { EXPECT_TRUE(maybeLastChunk.has_value()); // No last chunk for us :-( EXPECT_FALSE(*maybeChunkHeader == *maybeLastChunk); EXPECT_FALSE((*maybeChunkHeader)->payload() == (*maybeLastChunk)->payload()); } else { EXPECT_FALSE(maybeLastChunk.has_value()); } auto sample = (*maybeChunkHeader)->payload(); new (sample) DummySample(); m_chunkSender.send(*maybeChunkHeader); } // All Chunks used now EXPECT_THAT(m_memoryManager.getMemPoolInfo(0).m_usedChunks, NUM_CHUNKS_IN_POOL); } TEST_F(ChunkSender_test, sendMultipleWithReceiverLastReuseBecauseAlreadyConsumed) { m_chunkSender.addQueue(&m_chunkQueueData); for (size_t i = 0; i < NUM_CHUNKS_IN_POOL; i++) { auto maybeChunkHeader = m_chunkSender.allocate(sizeof(DummySample), iox::UniquePortId()); EXPECT_FALSE(maybeChunkHeader.has_error()); auto maybeLastChunk = m_chunkSender.getLast(); if (i > 0) { EXPECT_TRUE(maybeLastChunk.has_value()); // We get the last chunk again EXPECT_TRUE(*maybeChunkHeader == *maybeLastChunk); EXPECT_TRUE((*maybeChunkHeader)->payload() == (*maybeLastChunk)->payload()); } else { EXPECT_FALSE(maybeLastChunk.has_value()); } auto sample = (*maybeChunkHeader)->payload(); new (sample) DummySample(); m_chunkSender.send(*maybeChunkHeader); iox::popo::ChunkQueuePopper<ChunkQueueData_t> myQueue(&m_chunkQueueData); EXPECT_FALSE(myQueue.empty()); auto popRet = myQueue.pop(); EXPECT_TRUE(popRet.has_value()); } // All consumed but the lastChunk EXPECT_THAT(m_memoryManager.getMemPoolInfo(0).m_usedChunks, 1); } TEST_F(ChunkSender_test, ReuseLastIfSmaller) { auto maybeChunkHeader = m_chunkSender.allocate(BIG_CHUNK, iox::UniquePortId()); EXPECT_FALSE(maybeChunkHeader.has_error()); EXPECT_THAT(m_memoryManager.getMemPoolInfo(1).m_usedChunks, Eq(1u)); auto chunkHeader = *maybeChunkHeader; m_chunkSender.send(chunkHeader); auto chunkSmaller = m_chunkSender.allocate(SMALL_CHUNK, iox::UniquePortId()); EXPECT_FALSE(chunkSmaller.has_error()); // no small chunk used as big one is recycled EXPECT_THAT(m_memoryManager.getMemPoolInfo(0).m_usedChunks, Eq(0u)); EXPECT_THAT(m_memoryManager.getMemPoolInfo(1).m_usedChunks, Eq(1u)); auto maybeLastChunk = m_chunkSender.getLast(); EXPECT_TRUE(maybeLastChunk.has_value()); // We get the last chunk again EXPECT_TRUE(*chunkSmaller == *maybeLastChunk); EXPECT_TRUE((*chunkSmaller)->payload() == (*maybeLastChunk)->payload()); } TEST_F(ChunkSender_test, NoReuseOfLastIfBigger) { auto maybeChunkHeader = m_chunkSender.allocate(SMALL_CHUNK, iox::UniquePortId()); EXPECT_FALSE(maybeChunkHeader.has_error()); EXPECT_THAT(m_memoryManager.getMemPoolInfo(0).m_usedChunks, Eq(1u)); auto chunkHeader = *maybeChunkHeader; m_chunkSender.send(chunkHeader); auto chunkBigger = m_chunkSender.allocate(BIG_CHUNK, iox::UniquePortId()); EXPECT_FALSE(chunkBigger.has_error()); // no reuse, we hav a small and a big chunk in use EXPECT_THAT(m_memoryManager.getMemPoolInfo(0).m_usedChunks, Eq(1u)); EXPECT_THAT(m_memoryManager.getMemPoolInfo(1).m_usedChunks, Eq(1u)); auto maybeLastChunk = m_chunkSender.getLast(); EXPECT_TRUE(maybeLastChunk.has_value()); // not the last chunk EXPECT_FALSE(*chunkBigger == *maybeLastChunk); EXPECT_FALSE((*chunkBigger)->payload() == (*maybeLastChunk)->payload()); } TEST_F(ChunkSender_test, ReuseOfLastIfBiggerButFitsInChunk) { auto maybeChunkHeader = m_chunkSender.allocate(SMALL_CHUNK - 10, iox::UniquePortId()); EXPECT_FALSE(maybeChunkHeader.has_error()); EXPECT_THAT(m_memoryManager.getMemPoolInfo(0).m_usedChunks, Eq(1u)); auto chunkHeader = *maybeChunkHeader; m_chunkSender.send(chunkHeader); auto chunkBigger = m_chunkSender.allocate(SMALL_CHUNK, iox::UniquePortId()); EXPECT_FALSE(chunkBigger.has_error()); // reuse as it still fits in the small chunk EXPECT_THAT(m_memoryManager.getMemPoolInfo(0).m_usedChunks, Eq(1u)); EXPECT_THAT(m_memoryManager.getMemPoolInfo(1).m_usedChunks, Eq(0u)); auto maybeLastChunk = m_chunkSender.getLast(); EXPECT_TRUE(maybeLastChunk.has_value()); // not the last chunk EXPECT_TRUE(*chunkBigger == *maybeLastChunk); EXPECT_TRUE((*chunkBigger)->payload() == (*maybeLastChunk)->payload()); } TEST_F(ChunkSender_test, Cleanup) { EXPECT_TRUE((HISTORY_CAPACITY + iox::MAX_CHUNKS_ALLOCATE_PER_SENDER) <= NUM_CHUNKS_IN_POOL); for (size_t i = 0; i < HISTORY_CAPACITY; i++) { auto maybeChunkHeader = m_chunkSenderWithHistory.allocate(SMALL_CHUNK, iox::UniquePortId()); EXPECT_FALSE(maybeChunkHeader.has_error()); m_chunkSenderWithHistory.send(*maybeChunkHeader); } for (size_t i = 0; i < iox::MAX_CHUNKS_ALLOCATE_PER_SENDER; i++) { auto maybeChunkHeader = m_chunkSenderWithHistory.allocate(SMALL_CHUNK, iox::UniquePortId()); EXPECT_FALSE(maybeChunkHeader.has_error()); } EXPECT_THAT(m_memoryManager.getMemPoolInfo(0).m_usedChunks, Eq(HISTORY_CAPACITY + iox::MAX_CHUNKS_ALLOCATE_PER_SENDER)); m_chunkSenderWithHistory.releaseAll(); EXPECT_THAT(m_memoryManager.getMemPoolInfo(0).m_usedChunks, Eq(0u)); }
#include <iostream> #include <cstdlib> template <typename ... Ts> auto echo(Ts ... ts) { (std::cout << ... << ts); } int main(int argc, char* argv[]) { echo("Hello", ",", " ", "World", "!", "\n"); return EXIT_SUCCESS; }
// Copyright (c) 2012-2013 The Bitcoin Core developers // Distributed under the MIT/X11 software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "util.h" #include <string> #include <vector> #include <boost/algorithm/string.hpp> #include <boost/foreach.hpp> #include <boost/test/unit_test.hpp> BOOST_AUTO_TEST_SUITE(getarg_tests) static void ResetArgs(const std::string& strArg) { std::vector<std::string> vecArg; if (strArg.size()) boost::split(vecArg, strArg, boost::is_space(), boost::token_compress_on); // Insert dummy executable name: vecArg.insert(vecArg.begin(), "testbitcoin"); // Convert to char*: std::vector<const char*> vecChar; BOOST_FOREACH(std::string& s, vecArg) vecChar.push_back(s.c_str()); ParseParameters(vecChar.size(), &vecChar[0]); } BOOST_AUTO_TEST_CASE(boolarg) { ResetArgs("-foo"); BOOST_CHECK(GetBoolArg("-foo", false)); BOOST_CHECK(GetBoolArg("-foo", true)); BOOST_CHECK(!GetBoolArg("-fo", false)); BOOST_CHECK(GetBoolArg("-fo", true)); BOOST_CHECK(!GetBoolArg("-fooo", false)); BOOST_CHECK(GetBoolArg("-fooo", true)); ResetArgs("-foo=0"); BOOST_CHECK(!GetBoolArg("-foo", false)); BOOST_CHECK(!GetBoolArg("-foo", true)); ResetArgs("-foo=1"); BOOST_CHECK(GetBoolArg("-foo", false)); BOOST_CHECK(GetBoolArg("-foo", true)); // New 0.6 feature: auto-map -nosomething to !-something: ResetArgs("-nofoo"); BOOST_CHECK(!GetBoolArg("-foo", false)); BOOST_CHECK(!GetBoolArg("-foo", true)); ResetArgs("-nofoo=1"); BOOST_CHECK(!GetBoolArg("-foo", false)); BOOST_CHECK(!GetBoolArg("-foo", true)); ResetArgs("-foo -nofoo"); // -foo should win BOOST_CHECK(GetBoolArg("-foo", false)); BOOST_CHECK(GetBoolArg("-foo", true)); ResetArgs("-foo=1 -nofoo=1"); // -foo should win BOOST_CHECK(GetBoolArg("-foo", false)); BOOST_CHECK(GetBoolArg("-foo", true)); ResetArgs("-foo=0 -nofoo=0"); // -foo should win BOOST_CHECK(!GetBoolArg("-foo", false)); BOOST_CHECK(!GetBoolArg("-foo", true)); // New 0.6 feature: treat -- same as -: ResetArgs("--foo=1"); BOOST_CHECK(GetBoolArg("-foo", false)); BOOST_CHECK(GetBoolArg("-foo", true)); ResetArgs("--nofoo=1"); BOOST_CHECK(!GetBoolArg("-foo", false)); BOOST_CHECK(!GetBoolArg("-foo", true)); } BOOST_AUTO_TEST_CASE(stringarg) { ResetArgs(""); BOOST_CHECK_EQUAL(GetArg("-foo", ""), ""); BOOST_CHECK_EQUAL(GetArg("-foo", "eleven"), "eleven"); ResetArgs("-foo -bar"); BOOST_CHECK_EQUAL(GetArg("-foo", ""), ""); BOOST_CHECK_EQUAL(GetArg("-foo", "eleven"), ""); ResetArgs("-foo="); BOOST_CHECK_EQUAL(GetArg("-foo", ""), ""); BOOST_CHECK_EQUAL(GetArg("-foo", "eleven"), ""); ResetArgs("-foo=11"); BOOST_CHECK_EQUAL(GetArg("-foo", ""), "11"); BOOST_CHECK_EQUAL(GetArg("-foo", "eleven"), "11"); ResetArgs("-foo=eleven"); BOOST_CHECK_EQUAL(GetArg("-foo", ""), "eleven"); BOOST_CHECK_EQUAL(GetArg("-foo", "eleven"), "eleven"); } BOOST_AUTO_TEST_CASE(intarg) { ResetArgs(""); BOOST_CHECK_EQUAL(GetArg("-foo", 11), 11); BOOST_CHECK_EQUAL(GetArg("-foo", 0), 0); ResetArgs("-foo -bar"); BOOST_CHECK_EQUAL(GetArg("-foo", 11), 0); BOOST_CHECK_EQUAL(GetArg("-bar", 11), 0); ResetArgs("-foo=11 -bar=12"); BOOST_CHECK_EQUAL(GetArg("-foo", 0), 11); BOOST_CHECK_EQUAL(GetArg("-bar", 11), 12); ResetArgs("-foo=NaN -bar=NotANumber"); BOOST_CHECK_EQUAL(GetArg("-foo", 1), 0); BOOST_CHECK_EQUAL(GetArg("-bar", 11), 0); } BOOST_AUTO_TEST_CASE(doubledowin) { ResetArgs("--foo"); BOOST_CHECK_EQUAL(GetBoolArg("-foo", false), true); ResetArgs("--foo=verbose --bar=1"); BOOST_CHECK_EQUAL(GetArg("-foo", ""), "verbose"); BOOST_CHECK_EQUAL(GetArg("-bar", 0), 1); } BOOST_AUTO_TEST_CASE(boolargno) { ResetArgs("-nofoo"); BOOST_CHECK(!GetBoolArg("-foo", true)); BOOST_CHECK(!GetBoolArg("-foo", false)); ResetArgs("-nofoo=1"); BOOST_CHECK(!GetBoolArg("-foo", true)); BOOST_CHECK(!GetBoolArg("-foo", false)); ResetArgs("-nofoo=0"); BOOST_CHECK(GetBoolArg("-foo", true)); BOOST_CHECK(GetBoolArg("-foo", false)); ResetArgs("-foo --nofoo"); BOOST_CHECK(GetBoolArg("-foo", true)); BOOST_CHECK(GetBoolArg("-foo", false)); ResetArgs("-nofoo -foo"); // foo always wins: BOOST_CHECK(GetBoolArg("-foo", true)); BOOST_CHECK(GetBoolArg("-foo", false)); } BOOST_AUTO_TEST_SUITE_END()
#include <cstdint> template <class T> auto BinPow(T base, int64_t exponent) -> T { if (exponent == 1) return base; if (exponent & 1) return base * BinPow(base * base, exponent >> 1); return BinPow(base * base, exponent >> 1); }
/** @file Decoration.cxx ** Visual elements added over text. **/ // Copyright 1998-2007 by Neil Hodgson <neilh@scintilla.org> // The License.txt file describes the conditions under which this software may be distributed. #include <cstddef> #include <cstdlib> #include <cstring> #include <cstdio> #include <cstdarg> #include <stdexcept> #include <string_view> #include <vector> #include <algorithm> #include <memory> #include "Platform.h" #include "Scintilla.h" #include "Position.h" #include "SplitVector.h" #include "Partitioning.h" #include "RunStyles.h" #include "Decoration.h" using namespace Scintilla; namespace { template <typename POS> class Decoration : public IDecoration { int indicator; public: RunStyles<POS, int> rs; explicit Decoration(int indicator_) : indicator(indicator_) { } ~Decoration() override { } bool Empty() const noexcept override { return (rs.Runs() == 1) && (rs.AllSameAs(0)); } int Indicator() const noexcept override { return indicator; } Sci::Position Length() const noexcept override { return rs.Length(); } int ValueAt(Sci::Position position) const noexcept override { return rs.ValueAt(static_cast<POS>(position)); } Sci::Position StartRun(Sci::Position position) const noexcept override { return rs.StartRun(static_cast<POS>(position)); } Sci::Position EndRun(Sci::Position position) const noexcept override { return rs.EndRun(static_cast<POS>(position)); } void SetValueAt(Sci::Position position, int value) override { rs.SetValueAt(static_cast<POS>(position), value); } void InsertSpace(Sci::Position position, Sci::Position insertLength) override { rs.InsertSpace(static_cast<POS>(position), static_cast<POS>(insertLength)); } Sci::Position Runs() const noexcept override { return rs.Runs(); } }; template <typename POS> class DecorationList : public IDecorationList { int currentIndicator; int currentValue; Decoration<POS> *current; // Cached so FillRange doesn't have to search for each call. Sci::Position lengthDocument; // Ordered by indicator std::vector<std::unique_ptr<Decoration<POS>>> decorationList; std::vector<const IDecoration*> decorationView; // Read-only view of decorationList bool clickNotified; Decoration<POS> *DecorationFromIndicator(int indicator) noexcept; Decoration<POS> *Create(int indicator, Sci::Position length); void Delete(int indicator); void DeleteAnyEmpty(); void SetView(); public: DecorationList(); ~DecorationList() override; const std::vector<const IDecoration*> &View() const noexcept override { return decorationView; } void SetCurrentIndicator(int indicator) override; int GetCurrentIndicator() const noexcept override { return currentIndicator; } void SetCurrentValue(int value) override; int GetCurrentValue() const noexcept override { return currentValue; } // Returns changed=true if some values may have changed FillResult<Sci::Position> FillRange(Sci::Position position, int value, Sci::Position fillLength) override; void InsertSpace(Sci::Position position, Sci::Position insertLength) override; void DeleteRange(Sci::Position position, Sci::Position deleteLength) override; void DeleteLexerDecorations() override; int AllOnFor(Sci::Position position) const noexcept override; int ValueAt(int indicator, Sci::Position position) noexcept override; Sci::Position Start(int indicator, Sci::Position position) noexcept override; Sci::Position End(int indicator, Sci::Position position) noexcept override; bool ClickNotified() const noexcept override { return clickNotified; } void SetClickNotified(bool notified) noexcept override { clickNotified = notified; } }; template <typename POS> DecorationList<POS>::DecorationList() : currentIndicator(0), currentValue(1), current(nullptr), lengthDocument(0), clickNotified(false) { } template <typename POS> DecorationList<POS>::~DecorationList() { current = nullptr; } template <typename POS> Decoration<POS> *DecorationList<POS>::DecorationFromIndicator(int indicator) noexcept { for (const std::unique_ptr<Decoration<POS>> &deco : decorationList) { if (deco->Indicator() == indicator) { return deco.get(); } } return nullptr; } template <typename POS> Decoration<POS> *DecorationList<POS>::Create(int indicator, Sci::Position length) { currentIndicator = indicator; std::unique_ptr<Decoration<POS>> decoNew = std::make_unique<Decoration<POS>>(indicator); decoNew->rs.InsertSpace(0, static_cast<POS>(length)); typename std::vector<std::unique_ptr<Decoration<POS>>>::iterator it = std::lower_bound( decorationList.begin(), decorationList.end(), decoNew, [](const std::unique_ptr<Decoration<POS>> &a, const std::unique_ptr<Decoration<POS>> &b) noexcept { return a->Indicator() < b->Indicator(); }); typename std::vector<std::unique_ptr<Decoration<POS>>>::iterator itAdded = decorationList.insert(it, std::move(decoNew)); SetView(); return itAdded->get(); } template <typename POS> void DecorationList<POS>::Delete(int indicator) { decorationList.erase(std::remove_if(decorationList.begin(), decorationList.end(), [indicator](const std::unique_ptr<Decoration<POS>> &deco) noexcept { return deco->Indicator() == indicator; }), decorationList.end()); current = nullptr; SetView(); } template <typename POS> void DecorationList<POS>::SetCurrentIndicator(int indicator) { currentIndicator = indicator; current = DecorationFromIndicator(indicator); currentValue = 1; } template <typename POS> void DecorationList<POS>::SetCurrentValue(int value) { currentValue = value ? value : 1; } template <typename POS> FillResult<Sci::Position> DecorationList<POS>::FillRange(Sci::Position position, int value, Sci::Position fillLength) { if (!current) { current = DecorationFromIndicator(currentIndicator); if (!current) { current = Create(currentIndicator, lengthDocument); } } // Converting result from POS to Sci::Position as callers not polymorphic. const FillResult<POS> frInPOS = current->rs.FillRange(static_cast<POS>(position), value, static_cast<POS>(fillLength)); const FillResult<Sci::Position> fr { frInPOS.changed, frInPOS.position, frInPOS.fillLength }; if (current->Empty()) { Delete(currentIndicator); } return fr; } template <typename POS> void DecorationList<POS>::InsertSpace(Sci::Position position, Sci::Position insertLength) { const bool atEnd = position == lengthDocument; lengthDocument += insertLength; for (const std::unique_ptr<Decoration<POS>> &deco : decorationList) { deco->rs.InsertSpace(static_cast<POS>(position), static_cast<POS>(insertLength)); if (atEnd) { deco->rs.FillRange(static_cast<POS>(position), 0, static_cast<POS>(insertLength)); } } } template <typename POS> void DecorationList<POS>::DeleteRange(Sci::Position position, Sci::Position deleteLength) { lengthDocument -= deleteLength; for (const std::unique_ptr<Decoration<POS>> &deco : decorationList) { deco->rs.DeleteRange(static_cast<POS>(position), static_cast<POS>(deleteLength)); } DeleteAnyEmpty(); if (decorationList.size() != decorationView.size()) { // One or more empty decorations deleted so update view. current = nullptr; SetView(); } } template <typename POS> void DecorationList<POS>::DeleteLexerDecorations() { decorationList.erase(std::remove_if(decorationList.begin(), decorationList.end(), [](const std::unique_ptr<Decoration<POS>> &deco) noexcept { return deco->Indicator() < INDICATOR_CONTAINER ; }), decorationList.end()); current = nullptr; SetView(); } template <typename POS> void DecorationList<POS>::DeleteAnyEmpty() { if (lengthDocument == 0) { decorationList.clear(); } else { decorationList.erase(std::remove_if(decorationList.begin(), decorationList.end(), [](const std::unique_ptr<Decoration<POS>> &deco) noexcept { return deco->Empty(); }), decorationList.end()); } } template <typename POS> void DecorationList<POS>::SetView() { decorationView.clear(); for (const std::unique_ptr<Decoration<POS>> &deco : decorationList) { decorationView.push_back(deco.get()); } } template <typename POS> int DecorationList<POS>::AllOnFor(Sci::Position position) const noexcept { int mask = 0; for (const std::unique_ptr<Decoration<POS>> &deco : decorationList) { if (deco->rs.ValueAt(static_cast<POS>(position))) { if (deco->Indicator() < INDICATOR_IME) { mask |= 1 << deco->Indicator(); } } } return mask; } template <typename POS> int DecorationList<POS>::ValueAt(int indicator, Sci::Position position) noexcept { const Decoration<POS> *deco = DecorationFromIndicator(indicator); if (deco) { return deco->rs.ValueAt(static_cast<POS>(position)); } return 0; } template <typename POS> Sci::Position DecorationList<POS>::Start(int indicator, Sci::Position position) noexcept { const Decoration<POS> *deco = DecorationFromIndicator(indicator); if (deco) { return deco->rs.StartRun(static_cast<POS>(position)); } return 0; } template <typename POS> Sci::Position DecorationList<POS>::End(int indicator, Sci::Position position) noexcept { const Decoration<POS> *deco = DecorationFromIndicator(indicator); if (deco) { return deco->rs.EndRun(static_cast<POS>(position)); } return 0; } } namespace Scintilla { std::unique_ptr<IDecoration> DecorationCreate(bool largeDocument, int indicator) { if (largeDocument) return std::make_unique<Decoration<Sci::Position>>(indicator); else return std::make_unique<Decoration<int>>(indicator); } std::unique_ptr<IDecorationList> DecorationListCreate(bool largeDocument) { if (largeDocument) return std::make_unique<DecorationList<Sci::Position>>(); else return std::make_unique<DecorationList<int>>(); } }
#include "table_header_item.h" TableHeaderItem::TableHeaderItem(const string& id, const string& text, const string& classes) : Component(id, ".table-header-item " + classes) { this->text = text; setup(); } void TableHeaderItem::setup() { style("../styles/std_components/table/style.css"); setText(this->text); }
// Copyright 2022, University of Freiburg, // Chair of Algorithms and Data Structures. // Author: Robin Textor-Falconi (textorr@informatik.uni-freiburg.de) #include <gtest/gtest.h> #include "../src/util/HttpServer/ContentEncodingHelper.h" using namespace ad_utility::content_encoding; namespace http = boost::beast::http; using ad_utility::content_encoding::CompressionMethod; class ContentEncodingHelperFixture : public ::testing::TestWithParam< std::pair<CompressionMethod, std::string_view>> {}; TEST_P(ContentEncodingHelperFixture, HeaderIsNotSetCorrectly) { const auto& [compressionMethod, headerValue] = GetParam(); http::header<false, http::fields> header; setContentEncodingHeaderForCompressionMethod(compressionMethod, header); ASSERT_EQ(header[http::field::content_encoding], headerValue); } TEST(ContentEncodingHelper, TestHeadersAreInsertedCorrectly) { http::header<false, http::fields> header; setContentEncodingHeaderForCompressionMethod(CompressionMethod::GZIP, header); setContentEncodingHeaderForCompressionMethod(CompressionMethod::DEFLATE, header); setContentEncodingHeaderForCompressionMethod(CompressionMethod::DEFLATE, header); auto [current, end] = header.equal_range(http::field::content_encoding); ASSERT_NE(current, end); ASSERT_EQ(current->value(), "gzip"); current++; ASSERT_NE(current, end); ASSERT_EQ(current->value(), "deflate"); current++; ASSERT_NE(current, end); ASSERT_EQ(current->value(), "deflate"); current++; ASSERT_EQ(current, end); } auto getValuePairsForHeaderTest() { return ::testing::Values( // empty string_view means no such header is present std::pair{CompressionMethod::NONE, std::string_view{}}, std::pair{CompressionMethod::DEFLATE, "deflate"}, std::pair{CompressionMethod::GZIP, "gzip"}); } INSTANTIATE_TEST_SUITE_P(CompressionMethodParameters, ContentEncodingHelperFixture, getValuePairsForHeaderTest()); TEST(ContentEncodingHelper, NoneHeaderIsIndentifiedCorrectly) { http::request<http::string_body> request; auto result = getCompressionMethodForRequest(request); ASSERT_EQ(result, CompressionMethod::NONE); } TEST(ContentEncodingHelper, GzipHeaderIsIndentifiedCorrectly) { http::request<http::string_body> request; request.set(http::field::accept_encoding, "gzip"); auto result = getCompressionMethodForRequest(request); ASSERT_EQ(result, CompressionMethod::GZIP); } TEST(ContentEncodingHelper, DeflateHeaderIsIndentifiedCorrectly) { http::request<http::string_body> request; request.set(http::field::accept_encoding, "deflate"); auto result = getCompressionMethodForRequest(request); ASSERT_EQ(result, CompressionMethod::DEFLATE); } TEST(ContentEncodingHelper, DeflateHeaderIsPreferredOverGzip) { http::request<http::string_body> request; request.set(http::field::accept_encoding, "gzip, deflate"); auto result = getCompressionMethodForRequest(request); ASSERT_EQ(result, CompressionMethod::DEFLATE); } TEST(ContentEncodingHelper, DeflateHeaderIsPreferredOverGzipOnMultipleHeaders) { http::request<http::string_body> request; request.insert(http::field::accept_encoding, "gzip"); request.insert(http::field::accept_encoding, "deflate"); auto result = getCompressionMethodForRequest(request); ASSERT_EQ(result, CompressionMethod::DEFLATE); }
// Copyright 2017 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "garnet/bin/mdns/service/prober.h" #include <zircon/syscalls.h> #include "src/lib/fxl/logging.h" #include "src/lib/fxl/time/time_delta.h" #include "src/lib/fxl/time/time_point.h" namespace mdns { // static constexpr fxl::TimeDelta Prober::kMaxProbeInterval = fxl::TimeDelta::FromMilliseconds(250); Prober::Prober(MdnsAgent::Host* host, DnsType type, CompletionCallback callback) : MdnsAgent(host), type_(type), callback_(std::move(callback)) { FXL_DCHECK(callback_); } Prober::~Prober() {} void Prober::Start(const std::string& host_full_name, inet::IpPort mdns_port) { FXL_DCHECK(!host_full_name.empty()); MdnsAgent::Start(host_full_name, mdns_port); host_full_name_ = host_full_name; question_ = std::make_shared<DnsQuestion>(ResourceName(), DnsType::kAny); question_->unicast_response_ = true; Probe(InitialDelay()); } void Prober::ReceiveResource(const DnsResource& resource, MdnsResourceSection section) { if (resource.name_.dotted_string_ != ResourceName()) { return; } if (resource.type_ == type_ || (resource.type_ == DnsType::kAaaa && type_ == DnsType::kA)) { // Conflict detected. We defer the call to |RemoveSelf| and the callback // so we aren't calling |RemoveSelf| from |ReceiveResource|. PostTaskForTime( [this]() { CompletionCallback callback = std::move(callback_); RemoveSelf(); // This |Prober| has probably been deleted at this point, so we avoid // referencing any members. callback(false); }, fxl::TimePoint::Now()); } } fxl::TimeDelta Prober::InitialDelay() { uint64_t random = 0; zx_cprng_draw(&random, sizeof(random)); int64_t random_nonnegative_int64 = static_cast<int64_t>(random >> 1); FXL_DCHECK(random_nonnegative_int64 >= 0); return fxl::TimeDelta::FromNanoseconds(random_nonnegative_int64 % kMaxProbeInterval.ToNanoseconds()); } void Prober::Probe(fxl::TimeDelta delay) { PostTaskForTime( [this]() { if (++probe_attempt_count_ > kMaxProbeAttemptCount) { // No conflict detected. CompletionCallback callback = std::move(callback_); RemoveSelf(); // This |Prober| has probably been deleted at this point, so // we avoid referencing any members. callback(true); } else { SendQuestion(question_); SendProposedResources(MdnsResourceSection::kAuthority); Probe(kMaxProbeInterval); } }, fxl::TimePoint::Now() + delay); } } // namespace mdns
#include "compat.h" #include "txdb-leveldb.h" #include "blockparams.h" #include "spork.h" #include "instantx.h" #include "cblock.h" #include "mining.h" #include "ctransactionlock.h" #include "main_extern.h" #include "ctxmempool.h" #include "main.h" #include "ctxout.h" #include "ctxin.h" #include "thread.h" #include "serialize.h" #include "ctxindex.h" #include "util.h" #include "cblockindex.h" #include "cdatastream.h" #include "cmerkletx.h" CMerkleTx::CMerkleTx() { Init(); } CMerkleTx::CMerkleTx(const CTransaction& txIn) : CTransaction(txIn) { Init(); } unsigned int CMerkleTx::GetSerializeSize(int nType, int nVersion) const { CSerActionGetSerializeSize ser_action; const bool fGetSize = true; const bool fWrite = false; const bool fRead = false; unsigned int nSerSize = 0; ser_streamplaceholder s; assert(fGetSize||fWrite||fRead); /* suppress warning */ s.nType = nType; s.nVersion = nVersion; nSerSize += SerReadWrite(s, *(CTransaction*)this, nType, nVersion, ser_action); nVersion = this->nVersion; READWRITE(hashBlock); READWRITE(vMerkleBranch); READWRITE(nIndex); return nSerSize; } template<typename Stream> void CMerkleTx::Serialize(Stream& s, int nType, int nVersion) const { CSerActionSerialize ser_action; const bool fGetSize = false; const bool fWrite = true; const bool fRead = false; unsigned int nSerSize = 0; assert(fGetSize||fWrite||fRead); /* suppress warning */ nSerSize += SerReadWrite(s, *(CTransaction*)this, nType, nVersion, ser_action); nVersion = this->nVersion; READWRITE(hashBlock); READWRITE(vMerkleBranch); READWRITE(nIndex); } template<typename Stream> void CMerkleTx::Unserialize(Stream& s, int nType, int nVersion) { CSerActionUnserialize ser_action; const bool fGetSize = false; const bool fWrite = false; const bool fRead = true; unsigned int nSerSize = 0; assert(fGetSize||fWrite||fRead); /* suppress warning */ nSerSize += SerReadWrite(s, *(CTransaction*)this, nType, nVersion, ser_action); nVersion = this->nVersion; READWRITE(hashBlock); READWRITE(vMerkleBranch); READWRITE(nIndex); } template void CMerkleTx::Serialize<CDataStream>(CDataStream& s, int nType, int nVersion) const; template void CMerkleTx::Unserialize<CDataStream>(CDataStream& s, int nType, int nVersion); void CMerkleTx::Init() { hashBlock = 0; nIndex = -1; fMerkleVerified = false; } int CMerkleTx::SetMerkleBranch(const CBlock* pblock) { AssertLockHeld(cs_main); CBlock blockTmp; if (pblock == NULL) { // Load the block this tx is in CTxIndex txindex; if (!CTxDB("r").ReadTxIndex(GetHash(), txindex)) { return 0; } if (!blockTmp.ReadFromDisk(txindex.pos.nFile, txindex.pos.nBlockPos)) { return 0; pblock = &blockTmp; } } if (pblock) { // Update the tx's hashBlock hashBlock = pblock->GetHash(); // Locate the transaction for (nIndex = 0; nIndex < (int)pblock->vtx.size(); nIndex++) { if (pblock->vtx[nIndex] == *(CTransaction*)this) { break; } } if (nIndex == (int)pblock->vtx.size()) { vMerkleBranch.clear(); nIndex = -1; LogPrintf("ERROR: SetMerkleBranch() : couldn't find tx in block\n"); return 0; } // Fill in merkle branch vMerkleBranch = pblock->GetMerkleBranch(nIndex); } // Is the tx in a block that's in the main chain std::map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(hashBlock); if (mi == mapBlockIndex.end()) { return 0; } CBlockIndex* pindex = (*mi).second; if (!pindex || !pindex->IsInMainChain()) { return 0; } return pindexBest->nHeight - pindex->nHeight + 1; } // Return depth of transaction in blockchain: // -1 : not in blockchain, and not in memory pool (conflicted transaction) // 0 : in memory pool, waiting to be included in a block // >=1 : this many blocks deep in the main chain int CMerkleTx::GetDepthInMainChain(CBlockIndex* &pindexRet, bool enableIX) const { AssertLockHeld(cs_main); int nResult = GetDepthInMainChainINTERNAL(pindexRet); if (nResult == 0 && !mempool.exists(GetHash())) { return -1; // Not in chain, not in mempool } if(enableIX) { if (nResult < 10) { int signatures = GetTransactionLockSignatures(); if(signatures >= INSTANTX_SIGNATURES_REQUIRED) { return nInstantXDepth+nResult; } } } return nResult; } int CMerkleTx::GetDepthInMainChain(bool enableIX) const { CBlockIndex *pindexRet; return GetDepthInMainChain(pindexRet, enableIX); } int CMerkleTx::GetDepthInMainChainINTERNAL(CBlockIndex* &pindexRet) const { if (hashBlock == 0 || nIndex == -1) { return 0; } AssertLockHeld(cs_main); // Find the block it claims to be in std::map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(hashBlock); if (mi == mapBlockIndex.end()) { return 0; } CBlockIndex* pindex = (*mi).second; if (!pindex || !pindex->IsInMainChain()) { return 0; } // Make sure the merkle branch connects to this block if (!fMerkleVerified) { if (CBlock::CheckMerkleBranch(GetHash(), vMerkleBranch, nIndex) != pindex->hashMerkleRoot) { return 0; } fMerkleVerified = true; } pindexRet = pindex; return pindexBest->nHeight - pindex->nHeight + 1; } bool CMerkleTx::IsInMainChain() const { CBlockIndex *pindexRet; return GetDepthInMainChainINTERNAL(pindexRet) > 0; } int CMerkleTx::GetBlocksToMaturity() const { if (!(IsCoinBase() || IsCoinStake())) { return 0; } return std::max(0, nCoinbaseMaturity+75 - GetDepthInMainChain()); } bool CMerkleTx::AcceptToMemoryPool(bool fLimitFree, bool fRejectInsaneFee, bool ignoreFees) { return ::AcceptToMemoryPool(mempool, *this, fLimitFree, NULL, fRejectInsaneFee, ignoreFees); } int CMerkleTx::GetTransactionLockSignatures() const { if(!IsSporkActive(SPORK_2_INSTANTX)) { return -3; } if(!fEnableInstantX) { return -1; } //compile consessus vote std::map<uint256, CTransactionLock>::iterator i = mapTxLocks.find(GetHash()); if (i != mapTxLocks.end()) { return (*i).second.CountSignatures(); } return -1; } bool CMerkleTx::IsTransactionLockTimedOut() const { if(!fEnableInstantX) { return -1; } //compile consessus vote std::map<uint256, CTransactionLock>::iterator i = mapTxLocks.find(GetHash()); if (i != mapTxLocks.end()) { return GetTime() > (*i).second.nTimeout; } return false; }
#ifndef PX_CG_ITEM_BALL_HPP #define PX_CG_ITEM_BALL_HPP #include "item.hpp" namespace px { namespace item { class Ball; } } class px::item::Ball : public Item { private: static ItemInfo ITEM_INFO; public: static Shader *shader; static void initShader(); static void destroyShader(); public: struct { glm::vec3 ambient; glm::vec3 diffuse; glm::vec3 specular; float shininess; } color; static std::shared_ptr<Item> create(); static std::size_t regItem(); bool postRender() const override { return true; } void init() override; void render() override; Ball(glm::vec3 const &pos = glm::vec3(0.f), float radius = 1.f); ~Ball() override; protected: unsigned int vao[1], vbo[2]; private: int _n_indices; }; #endif
/* * Copyright (c) 2012, Klaus Pototzky * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1) Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2) Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * 3) Neither the name of the FLENS development group nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef PLAYGROUND_CXXBLAS_LEVEL1_SUM_TCC #define PLAYGROUND_CXXBLAS_LEVEL1_SUM_TCC 1 #include <cxxstd/cmath.h> #include <cxxblas/cxxblas.h> namespace cxxblas { template <typename IndexType, typename X, typename T> void sum_generic(IndexType n, const X *x, IndexType incX, T &sum) { CXXBLAS_DEBUG_OUT("sum_generic (extension)"); sum = T(0); for (IndexType i=0; i<n; ++i, x+=incX) { sum += (*x); } } template <typename IndexType, typename X, typename T> void sum(IndexType n, const X *x, IndexType incX, T &sum) { if (incX<0) { x -= incX*(n-1); } sum_generic(n, x, incX, sum); } } // namespace cxxblas #endif // PLAYGROUND_CXXBLAS_LEVEL1_SUM_TCC
/**************************************************************************************************************************************************** * Copyright (c) 2016 Freescale Semiconductor, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of the Freescale Semiconductor, Inc. nor the names of * its contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************************************************************************************/ #include "GridRenderVBLineStrip2.hpp" #include <FslBase/Math/MathHelper.hpp> #include <FslBase/Math/VectorHelper.hpp> #include <FslBase/Exceptions.hpp> #include <FslUtil/OpenGLES3/GLCheck.hpp> #include <FslUtil/OpenGLES3/DynamicNativeTexture2D.hpp> #include <cassert> namespace Fsl { using namespace GLES3; //! The CatmullRom spline smoothing is now done in 3D in a initial line generation pass //! This method produces a better looking grid and has the benefit of being slightly faster to render multiple times as the line generation code //! isn't run on each draw call. //! The line generation pass outputs one vertex line list, which is then transfered to a vertex buffer and the rest is send off to the GPU to //! render. This is a lot faster than rendering via native batch and we save the entire CPU 3d->2d pass. GridRenderVBLineStrip2::GridRenderVBLineStrip2(const Point2& gridSize, const Point2& screenSize, const std::shared_ptr<IContentManager>& contentManager) : m_gridSize(gridSize) , m_gridFinalSize((2 * gridSize.X) - 1, (2 * gridSize.Y) - 1) , m_coordinates(((m_gridFinalSize.X + 2) * m_gridSize.Y) + ((m_gridFinalSize.Y + 2) * m_gridSize.X)) , m_vertexBuffer(nullptr, m_coordinates.size(), VertexPositionColor::GetVertexDeclaration(), GL_STREAM_DRAW) , m_program(contentManager->ReadAllText("Shaders/LineShaderColor.vert"), contentManager->ReadAllText("Shaders/LineShaderColor.frag")) , m_locWorldViewProjection(GLValues::INVALID_LOCATION) { if ((gridSize.X & 1) != 0 || (gridSize.Y & 1) != 0) { throw NotSupportedException("Even grid size required"); } m_locWorldViewProjection = glGetUniformLocation(m_program.Get(), "WorldViewProjection"); // if (m_locWorldViewProjection < 0 || m_locAmbientColor < 0) // throw GraphicsException("Shader did not contain the expected uniforms"); const auto screenWidth = static_cast<float>(screenSize.X); const auto screenHeight = static_cast<float>(screenSize.Y); const float aspectRatio = screenWidth / screenHeight; const float fov = 60.0f; // Calc distance that z=0 has to be away from the camera for the width and height to match the resolution float yMax = std::tan(fov * MathHelper::PI / 360.0f); float depth = screenHeight / 2.0f / yMax; Matrix world = Matrix::CreateRotationX(MathHelper::TO_RADS * 180); Matrix view = Matrix::CreateTranslation(-screenWidth * 0.5f, screenHeight * 0.5f, -depth); Matrix projection = Matrix::CreatePerspectiveFieldOfView(MathHelper::ToRadians(fov), aspectRatio, 1.0f, 2000.0f); m_worldViewProjection = world * view * projection; { Vector4 color(0.12f, 0.12f, 0.55f, 0.33f); VertexPositionColor defaultVertex(Vector3(), color); for (std::size_t i = 0; i < m_coordinates.size(); ++i) { m_coordinates[i] = defaultVertex; } const std::ptrdiff_t dstStrideX = m_gridFinalSize.X + 2; { VertexPositionColor* pDst = m_coordinates.data(); const VertexPositionColor* const pDstEnd = pDst + (dstStrideX * m_gridSize.Y); assert(pDstEnd <= (m_coordinates.data() + m_coordinates.size())); while (pDst < pDstEnd) { pDst[0].Color = Vector4(); pDst[dstStrideX - 1].Color = Vector4(); pDst += dstStrideX; } } { const std::ptrdiff_t dstStrideY = m_gridFinalSize.Y + 2; VertexPositionColor* pDst = m_coordinates.data() + (dstStrideX * m_gridSize.Y); const VertexPositionColor* const pDstEnd = m_coordinates.data() + m_coordinates.size(); while (pDst < pDstEnd) { pDst[0].Color = Vector4(); pDst[dstStrideY - 1].Color = Vector4(); pDst += dstStrideY; } } } } const char* GridRenderVBLineStrip2::GetName() const { return "VB+line strips 2, Catmull-Rom spline"; } void GridRenderVBLineStrip2::Update(const DemoTime& /*demoTime*/, const Vector2& /*areaSize*/, const std::vector<PointMass>& points) { Calc3DCoordinates(m_coordinates, points); } void GridRenderVBLineStrip2::Draw(const GridRenderDrawContext& /*drawContext*/, const std::vector<PointMass>& /*points*/) { // glLineWidth(4); // glLineWidth(3); glDisable(GL_DEPTH_TEST); glEnable(GL_BLEND); glBlendFunc(GL_ONE, GL_ONE); glUseProgram(m_program.Get()); if (m_locWorldViewProjection >= 0) { glUniformMatrix4fv(m_locWorldViewProjection, 1, 0u, m_worldViewProjection.DirectAccess()); } auto& vb = m_vertexBuffer; glBindBuffer(vb.GetTarget(), vb.Get()); vb.EnableAttribArrays(); glDrawArrays(GL_LINE_STRIP, 0, vb.GetCapacity()); vb.DisableAttribArrays(); } void GridRenderVBLineStrip2::Calc3DCoordinates(std::vector<VertexPositionColor>& rDst, const std::vector<PointMass>& points) { int32_t pointsWritten = 0; pointsWritten += CreateLinesHorizontal(rDst, points, pointsWritten); pointsWritten += CreateLinesVertical(rDst, points, pointsWritten); m_vertexBuffer.SetData(0, m_coordinates.data(), m_coordinates.size()); } int32_t GridRenderVBLineStrip2::CreateLinesHorizontal(std::vector<VertexPositionColor>& rDst, const std::vector<PointMass>& points, const std::ptrdiff_t dstOffset) { const int srcGridMaxX = m_gridSize.X; const int srcGridMaxY = m_gridSize.Y; const int dstGridMaxX = m_gridFinalSize.X; // const int dstGridMaxY = m_gridFinalSize.Y; const std::ptrdiff_t srcStride = srcGridMaxX; const std::ptrdiff_t dstStride = dstGridMaxX + 2; #ifndef NDEBUG const auto* const pDstEnd = rDst.data() + rDst.size(); #endif // Create the horizontal border points as they are a special case { const PointMass* pSrcLeft = points.data(); const PointMass* pSrcRight = pSrcLeft + srcGridMaxX - 3; const PointMass* const pSrcEnd = pSrcLeft + (srcStride * srcGridMaxY); VertexPositionColor* pDstLeft = rDst.data() + dstOffset; VertexPositionColor* pDstRight = pDstLeft + dstGridMaxX - 3 + 1; while (pSrcLeft < pSrcEnd) { // lines from left to right assert(pSrcLeft < pSrcEnd); assert(pSrcRight < pSrcEnd); assert((pSrcLeft + 2) < pSrcEnd); assert((pSrcRight + 2) < pSrcEnd); assert(pDstLeft < pDstEnd); assert(pDstRight < pDstEnd); assert((pDstLeft + 3) < pDstEnd); assert((pDstRight + 3) < pDstEnd); pDstLeft[0].Position = pSrcLeft[0].m_position; pDstLeft[1].Position = pSrcLeft[0].m_position; pDstLeft[2].Position = VectorHelper::CatmullRom(pSrcLeft[0].m_position, pSrcLeft[0].m_position, pSrcLeft[1].m_position, pSrcLeft[2].m_position, 0.5f); pDstLeft[3].Position = pSrcLeft[1].m_position; pDstRight[1].Position = VectorHelper::CatmullRom(pSrcRight[0].m_position, pSrcRight[1].m_position, pSrcRight[2].m_position, pSrcRight[2].m_position, 0.5f); pDstRight[2].Position = pSrcRight[2].m_position; pDstRight[3].Position = pSrcRight[2].m_position; pSrcLeft += srcStride; pSrcRight += srcStride; pDstLeft += dstStride; pDstRight += dstStride; // flip the output so we do the lines from right to left assert(pSrcLeft < pSrcEnd); assert(pSrcRight < pSrcEnd); assert((pSrcLeft + 2) < pSrcEnd); assert((pSrcRight + 2) < pSrcEnd); assert(pDstLeft < pDstEnd); assert(pDstRight < pDstEnd); assert((pDstLeft + 2) < pDstEnd); assert((pDstRight + 3) < pDstEnd); pDstRight[0].Position = pSrcLeft[1].m_position; pDstRight[1].Position = VectorHelper::CatmullRom(pSrcLeft[0].m_position, pSrcLeft[0].m_position, pSrcLeft[1].m_position, pSrcLeft[2].m_position, 0.5f); pDstRight[2].Position = pSrcLeft[0].m_position; pDstRight[3].Position = pSrcLeft[0].m_position; pDstLeft[0].Position = pSrcRight[2].m_position; pDstLeft[1].Position = pSrcRight[2].m_position; pDstLeft[2].Position = VectorHelper::CatmullRom(pSrcRight[0].m_position, pSrcRight[1].m_position, pSrcRight[2].m_position, pSrcRight[2].m_position, 0.5f); pSrcLeft += srcStride; pSrcRight += srcStride; pDstLeft += dstStride; pDstRight += dstStride; } } { // horizontal pass - transfer all existing coordinates and spawn the new horizontal coordinates const PointMass* pSrc = points.data(); const PointMass* const pSrcEnd = pSrc + (srcStride * srcGridMaxY); // +3 to skip the three points written in the border handling code VertexPositionColor* pDstLeft = rDst.data() + dstOffset + 3 + 1; VertexPositionColor* pDstRight = rDst.data() + dstOffset + dstGridMaxX - 4 + 1 + dstStride; const int constrainedGridMaxX = srcGridMaxX - 3; const auto dstStride2 = dstStride * 2; while (pSrc < pSrcEnd) { assert(pDstLeft < pDstEnd); // lines from left to right for (int x = 0; x < constrainedGridMaxX; ++x) { assert((pDstLeft + (x * 2)) < pDstEnd); assert((pDstLeft + (x * 2) + 1) < pDstEnd); pDstLeft[x * 2].Position = VectorHelper::CatmullRom(pSrc[x].m_position, pSrc[x + 1].m_position, pSrc[x + 2].m_position, pSrc[x + 3].m_position, 0.5f); pDstLeft[(x * 2) + 1].Position = pSrc[x + 2].m_position; } pSrc += srcStride; assert(pSrc < pSrcEnd); // lines from right to left for (int x = 0; x < constrainedGridMaxX; ++x) { assert((pDstRight - (x * 2)) >= rDst.data()); assert((pDstRight - ((x * 2) + 1)) >= rDst.data()); assert((pDstRight - (x * 2)) < pDstEnd); assert((pDstRight - ((x * 2) + 1)) < pDstEnd); pDstRight[-(x * 2)].Position = VectorHelper::CatmullRom(pSrc[x].m_position, pSrc[x + 1].m_position, pSrc[x + 2].m_position, pSrc[x + 3].m_position, 0.5f); pDstRight[-((x * 2) + 1)].Position = pSrc[x + 2].m_position; } pSrc += srcStride; pDstLeft += dstStride2; pDstRight += dstStride2; } } return ((m_gridFinalSize.X + 2) * m_gridSize.Y); } int32_t GridRenderVBLineStrip2::CreateLinesVertical(std::vector<VertexPositionColor>& rDst, const std::vector<PointMass>& points, const std::ptrdiff_t dstOffset) { const int srcGridMaxX = m_gridSize.X; const int srcGridMaxY = m_gridSize.Y; // const int dstGridMaxX = m_gridFinalSize.X; const int dstGridMaxY = m_gridFinalSize.Y; const std::ptrdiff_t srcStride = srcGridMaxX; const std::ptrdiff_t dstStride = dstGridMaxY + 2; #ifndef NDEBUG const auto* const pDstEnd = rDst.data() + rDst.size(); #endif // Create the vertical border points as they are a special case { const PointMass* pSrcTop = points.data(); const PointMass* pSrcBottom = pSrcTop + (srcStride * (srcGridMaxY - 3)); VertexPositionColor* pDstTop = rDst.data() + dstOffset; VertexPositionColor* pDstBottom = pDstTop + (dstGridMaxY - 3 + 1); for (int x = 0; x < srcGridMaxX; ++x) { assert(pDstTop < pDstEnd); assert((pDstTop + 1) < pDstEnd); assert((pDstTop + dstStride) <= pDstEnd); assert(pDstBottom < (pDstTop + dstStride)); assert((pDstBottom + 3) < (pDstTop + dstStride)); // Bottom to top pDstBottom[3].Position = pSrcTop[x].m_position; pDstBottom[2].Position = pSrcTop[x].m_position; pDstBottom[1].Position = VectorHelper::CatmullRom(pSrcTop[x].m_position, pSrcTop[x].m_position, pSrcTop[x + srcStride].m_position, pSrcTop[x + (2 * srcStride)].m_position, 0.5f); pDstBottom[0].Position = pSrcTop[x + srcStride].m_position; pDstTop[2].Position = VectorHelper::CatmullRom(pSrcBottom[x].m_position, pSrcBottom[x + srcStride].m_position, pSrcBottom[x + (srcStride * 2)].m_position, pSrcBottom[x + (srcStride * 2)].m_position, 0.5f); pDstTop[1].Position = pSrcBottom[x + (srcStride * 2)].m_position; pDstTop[0].Position = pSrcBottom[x + (srcStride * 2)].m_position; pDstTop += dstStride; pDstBottom += dstStride; ++x; // Top to bottom assert(pDstTop < pDstEnd); assert((pDstTop + 2) < pDstEnd); assert((pDstTop + dstStride) <= pDstEnd); assert(pDstBottom < (pDstTop + dstStride)); assert((pDstBottom + 3) < (pDstTop + dstStride)); pDstTop[0].Position = pSrcTop[x].m_position; pDstTop[1].Position = pSrcTop[x].m_position; pDstTop[2].Position = VectorHelper::CatmullRom(pSrcTop[x].m_position, pSrcTop[x].m_position, pSrcTop[x + srcStride].m_position, pSrcTop[x + (2 * srcStride)].m_position, 0.5f); pDstTop[3].Position = pSrcTop[x + srcStride].m_position; pDstBottom[1].Position = VectorHelper::CatmullRom(pSrcBottom[x].m_position, pSrcBottom[x + srcStride].m_position, pSrcBottom[x + (srcStride * 2)].m_position, pSrcBottom[x + (srcStride * 2)].m_position, 0.5f); pDstBottom[2].Position = pSrcBottom[x + (srcStride * 2)].m_position; pDstBottom[3].Position = pSrcBottom[x + (srcStride * 2)].m_position; pDstTop += dstStride; pDstBottom += dstStride; } } { // vertical pass - transfer all existing coordinates and spawn the new vertical coordinates // +3 to skip the three points written in the border handling code const PointMass* const pSrcStart = points.data(); const PointMass* const pSrcConstrainedEnd = pSrcStart + (srcStride * (srcGridMaxY - 3)); VertexPositionColor* pDstLeftStart = rDst.data() + dstOffset + 3 + 1; VertexPositionColor* pDstRightStart = rDst.data() + dstOffset + dstGridMaxY - 5 + 1; for (int x = 0; x < srcGridMaxX; ++x) { // Write lines from bottom to top const PointMass* pSrc = pSrcStart; VertexPositionColor* pDst = pDstRightStart; while (pSrc < pSrcConstrainedEnd) { assert(pDst < pDstEnd); assert((pDst + 1) < pDstEnd); pDst[1].Position = VectorHelper::CatmullRom(pSrc[x].m_position, pSrc[x + srcStride].m_position, pSrc[x + (2 * srcStride)].m_position, pSrc[x + (3 * srcStride)].m_position, 0.5f); pDst[0].Position = pSrc[x + (2 * srcStride)].m_position; pSrc += srcStride; pDst -= 2; } ++x; pDstLeftStart += dstStride; pDstRightStart += dstStride; // Write lines from top to bottom pSrc = pSrcStart; pDst = pDstLeftStart; while (pSrc < pSrcConstrainedEnd) { assert(pDst < pDstEnd); assert((pDst + 1) < pDstEnd); pDst[0].Position = VectorHelper::CatmullRom(pSrc[x].m_position, pSrc[x + srcStride].m_position, pSrc[x + (2 * srcStride)].m_position, pSrc[x + (3 * srcStride)].m_position, 0.5f); pDst[1].Position = pSrc[x + (2 * srcStride)].m_position; pSrc += srcStride; pDst += 2; } pDstLeftStart += dstStride; pDstRightStart += dstStride; } } return ((m_gridFinalSize.Y + 2) * m_gridSize.X); } }
/* * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "It_vfs_fat.h" static UINT32 TestCase(VOID) { INT32 fd = 0; INT32 ret; CHAR pathname1[FAT_STANDARD_NAME_LENGTH] = FAT_PATH_NAME; fd = open(pathname1, O_NONBLOCK | O_CREAT | O_RDWR | O_EXCL, S_IRWXU | S_IRWXG | S_IRWXO); ICUNIT_GOTO_NOT_EQUAL(fd, FAT_IS_ERROR, fd, EXIT1); ret = close(fd); ICUNIT_GOTO_EQUAL(ret, FAT_NO_ERROR, ret, EXIT1); ret = ftruncate(fd, 0x400); ICUNIT_GOTO_EQUAL(ret, FAT_IS_ERROR, ret, EXIT1); ICUNIT_GOTO_EQUAL(errno, EBADF, errno, EXIT1); ret = ftruncate64(fd, 0x400); ICUNIT_GOTO_EQUAL(ret, FAT_IS_ERROR, ret, EXIT1); ICUNIT_GOTO_EQUAL(errno, EBADF, errno, EXIT1); ret = remove(pathname1); ICUNIT_GOTO_EQUAL(ret, FAT_NO_ERROR, ret, EXIT); return FAT_NO_ERROR; EXIT1: close(fd); EXIT: remove(pathname1); return FAT_NO_ERROR; } /* * * -@test IT_FS_VFAT_694 * -@tspec The API test for truncate * -@ttitle The API test for truncate with the fd has been closed for the first parameter * -@tprecon The filesystem module is open * -@tbrief 1. use the open to create one file; 2. use the close to close the file; 3. use the truncate to recover the space; 4. remove the file. * -@texpect 1. Return successed 2. Return successed 3. Return failed 4. Sucessful operation * -@tprior 1 * -@tauto TRUE * -@tremark */ VOID ItFsFat694(VOID) { TEST_ADD_CASE("IT_FS_FAT_694", TestCase, TEST_VFS, TEST_VFAT, TEST_LEVEL2, TEST_FUNCTION); }
/* This file is part of Magnum. Copyright © 2010, 2011, 2012, 2013, 2014, 2015, 2016 Vladimír Vondruš <mosra@centrum.cz> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "MagnumPlugins/JpegImporter/JpegImporter.h" CORRADE_PLUGIN_REGISTER(JpegImporter, Magnum::Trade::JpegImporter, "cz.mosra.magnum.Trade.AbstractImporter/0.3")
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE78_OS_Command_Injection__char_connect_socket_execl_73b.cpp Label Definition File: CWE78_OS_Command_Injection.strings.label.xml Template File: sources-sink-73b.tmpl.cpp */ /* * @description * CWE: 78 OS Command Injection * BadSource: connect_socket Read data using a connect socket (client side) * GoodSource: Fixed string * Sinks: execl * BadSink : execute command with execl * Flow Variant: 73 Data flow: data passed in a list from one function to another in different source files * * */ #include "std_testcase.h" #include <list> #include <wchar.h> #ifdef _WIN32 #define COMMAND_INT_PATH "%WINDIR%\\system32\\cmd.exe" #define COMMAND_INT "cmd.exe" #define COMMAND_ARG1 "/c" #define COMMAND_ARG2 "dir " #define COMMAND_ARG3 data #else /* NOT _WIN32 */ #include <unistd.h> #define COMMAND_INT_PATH "/bin/sh" #define COMMAND_INT "sh" #define COMMAND_ARG1 "-c" #define COMMAND_ARG2 "ls " #define COMMAND_ARG3 data #endif #ifdef _WIN32 #include <process.h> #define EXECL _execl #else /* NOT _WIN32 */ #define EXECL execl #endif using namespace std; namespace CWE78_OS_Command_Injection__char_connect_socket_execl_73 { #ifndef OMITBAD void badSink(list<char *> dataList) { /* copy data out of dataList */ char * data = dataList.back(); /* execl - specify the path where the command is located */ /* POTENTIAL FLAW: Execute command without validating input possibly leading to command injection */ EXECL(COMMAND_INT_PATH, COMMAND_INT_PATH, COMMAND_ARG1, COMMAND_ARG3, NULL); } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B uses the GoodSource with the BadSink */ void goodG2BSink(list<char *> dataList) { char * data = dataList.back(); /* execl - specify the path where the command is located */ /* POTENTIAL FLAW: Execute command without validating input possibly leading to command injection */ EXECL(COMMAND_INT_PATH, COMMAND_INT_PATH, COMMAND_ARG1, COMMAND_ARG3, NULL); } #endif /* OMITGOOD */ } /* close namespace */
/* * This file is part of bogus, a C++ sparse block matrix library. * * Copyright 2013 Gilles Daviet <gdaviet@gmail.com> * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #ifndef BOGUS_BLOCK_STREAMS_HPP #define BOGUS_BLOCK_STREAMS_HPP #include <iostream> #include "SparseBlockMatrix.hpp" #include "SparseBlockIndexComputer.hpp" template < typename Derived > std::ostream& operator<<( std::ostream &out, const bogus::SparseBlockMatrixBase< Derived > &sbm ) { typedef bogus::SparseBlockIndexComputer< Derived, false, false > IndexComputerType ; IndexComputerType indexComputer( sbm ) ; typedef typename IndexComputerType::ReturnType SourceIndexType ; const SourceIndexType &sourceIndex = indexComputer.get() ; out << " Total rows: " << sbm.rows() << " / cols: " << sbm.cols() << std::endl ; for ( unsigned i = 0 ; i < (unsigned) sourceIndex.outerSize() ; ++ i ) { out << "Row " << i << ": " ; for( typename SourceIndexType::InnerIterator it( sourceIndex, i ) ; it ; ++ it ) { out << " " << it.inner() << "@" << it.ptr() << "; " ; } out << std::endl ; } out << " Blocks (" << sbm.nBlocks() << ")" << std::endl ; for ( unsigned i = 0 ; i < sbm.nBlocks() ; ++ i ) { out << sbm.block(i) << std::endl ; out << "^-- " << i << std::endl ; } return out ; } #endif
/* * Copyright (c) 2015 PLUMgrid, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include <fcntl.h> #include <ftw.h> #include <map> #include <stdio.h> #include <string> #include <sys/stat.h> #include <sys/utsname.h> #include <unistd.h> #include <vector> #include <linux/bpf.h> #include <llvm/ADT/STLExtras.h> #include <llvm/ExecutionEngine/MCJIT.h> #include <llvm/ExecutionEngine/SectionMemoryManager.h> #include <llvm/IRReader/IRReader.h> #include <llvm/IR/IRBuilder.h> #include <llvm/IR/IRPrintingPasses.h> #include <llvm/IR/LegacyPassManager.h> #include <llvm/IR/LLVMContext.h> #include <llvm/IR/Module.h> #include <llvm/IR/Verifier.h> #include <llvm/Object/ObjectFile.h> #include <llvm/Support/FormattedStream.h> #include <llvm/Support/Host.h> #include <llvm/Support/SourceMgr.h> #include <llvm/Support/TargetSelect.h> #include <llvm/Transforms/IPO.h> #include <llvm/Transforms/IPO/PassManagerBuilder.h> #include <llvm-c/Transforms/IPO.h> #include "bcc_exception.h" #include "frontends/b/loader.h" #include "frontends/clang/loader.h" #include "frontends/clang/b_frontend_action.h" #include "bpf_module.h" #include "exported_files.h" #include "kbuild_helper.h" #include "shared_table.h" #include "libbpf.h" namespace ebpf { using std::get; using std::make_tuple; using std::map; using std::move; using std::string; using std::tuple; using std::unique_ptr; using std::vector; using namespace llvm; const string BPFModule::FN_PREFIX = BPF_FN_PREFIX; // Snooping class to remember the sections as the JIT creates them class MyMemoryManager : public SectionMemoryManager { public: explicit MyMemoryManager(map<string, tuple<uint8_t *, uintptr_t>> *sections) : sections_(sections) { } virtual ~MyMemoryManager() {} uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment, unsigned SectionID, StringRef SectionName) override { uint8_t *Addr = SectionMemoryManager::allocateCodeSection(Size, Alignment, SectionID, SectionName); //printf("allocateCodeSection: %s Addr %p Size %ld Alignment %d SectionID %d\n", // SectionName.str().c_str(), (void *)Addr, Size, Alignment, SectionID); (*sections_)[SectionName.str()] = make_tuple(Addr, Size); return Addr; } uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment, unsigned SectionID, StringRef SectionName, bool isReadOnly) override { uint8_t *Addr = SectionMemoryManager::allocateDataSection(Size, Alignment, SectionID, SectionName, isReadOnly); //printf("allocateDataSection: %s Addr %p Size %ld Alignment %d SectionID %d RO %d\n", // SectionName.str().c_str(), (void *)Addr, Size, Alignment, SectionID, isReadOnly); (*sections_)[SectionName.str()] = make_tuple(Addr, Size); return Addr; } map<string, tuple<uint8_t *, uintptr_t>> *sections_; }; BPFModule::BPFModule(unsigned flags, TableStorage *ts) : flags_(flags), ctx_(new LLVMContext), id_(std::to_string((uintptr_t)this)), ts_(ts) { InitializeNativeTarget(); InitializeNativeTargetAsmPrinter(); LLVMInitializeBPFTarget(); LLVMInitializeBPFTargetMC(); LLVMInitializeBPFTargetInfo(); LLVMInitializeBPFAsmPrinter(); LLVMLinkInMCJIT(); /* call empty function to force linking of MCJIT */ if (!ts_) { local_ts_ = createSharedTableStorage(); ts_ = &*local_ts_; } } static StatusTuple unimplemented_sscanf(const char *, void *) { return StatusTuple(-1, "sscanf unimplemented"); } static StatusTuple unimplemented_snprintf(char *, size_t, const void *) { return StatusTuple(-1, "snprintf unimplemented"); } BPFModule::~BPFModule() { for (auto &v : tables_) { v->key_sscanf = unimplemented_sscanf; v->leaf_sscanf = unimplemented_sscanf; v->key_snprintf = unimplemented_snprintf; v->leaf_snprintf = unimplemented_snprintf; } engine_.reset(); rw_engine_.reset(); ctx_.reset(); ts_->DeletePrefix(Path({id_})); } static void debug_printf(Module *mod, IRBuilder<> &B, const string &fmt, vector<Value *> args) { GlobalVariable *fmt_gvar = B.CreateGlobalString(fmt, "fmt"); args.insert(args.begin(), B.CreateInBoundsGEP(fmt_gvar, vector<Value *>({B.getInt64(0), B.getInt64(0)}))); args.insert(args.begin(), B.getInt64((uintptr_t)stderr)); Function *fprintf_fn = mod->getFunction("fprintf"); if (!fprintf_fn) { vector<Type *> fprintf_fn_args({B.getInt64Ty(), B.getInt8PtrTy()}); FunctionType *fprintf_fn_type = FunctionType::get(B.getInt32Ty(), fprintf_fn_args, /*isvarArg=*/true); fprintf_fn = Function::Create(fprintf_fn_type, GlobalValue::ExternalLinkage, "fprintf", mod); fprintf_fn->setCallingConv(CallingConv::C); fprintf_fn->addFnAttr(Attribute::NoUnwind); } B.CreateCall(fprintf_fn, args); } static void finish_sscanf(IRBuilder<> &B, vector<Value *> *args, string *fmt, const map<string, Value *> &locals, bool exact_args) { // fmt += "%n"; // int nread = 0; // int n = sscanf(s, fmt, args..., &nread); // if (n < 0) return -1; // s = &s[nread]; Value *sptr = locals.at("sptr"); Value *nread = locals.at("nread"); Function *cur_fn = B.GetInsertBlock()->getParent(); Function *sscanf_fn = B.GetInsertBlock()->getModule()->getFunction("sscanf"); *fmt += "%n"; B.CreateStore(B.getInt32(0), nread); GlobalVariable *fmt_gvar = B.CreateGlobalString(*fmt, "fmt"); (*args)[1] = B.CreateInBoundsGEP(fmt_gvar, {B.getInt64(0), B.getInt64(0)}); (*args)[0] = B.CreateLoad(sptr); args->push_back(nread); CallInst *call = B.CreateCall(sscanf_fn, *args); call->setTailCall(true); BasicBlock *label_true = BasicBlock::Create(B.getContext(), "", cur_fn); BasicBlock *label_false = BasicBlock::Create(B.getContext(), "", cur_fn); // exact_args means fail if don't consume exact number of "%" inputs // exact_args is disabled for string parsing (empty case) Value *cond = exact_args ? B.CreateICmpNE(call, B.getInt32(args->size() - 3)) : B.CreateICmpSLT(call, B.getInt32(0)); B.CreateCondBr(cond, label_true, label_false); B.SetInsertPoint(label_true); B.CreateRet(B.getInt32(-1)); B.SetInsertPoint(label_false); // s = &s[nread]; B.CreateStore( B.CreateInBoundsGEP(B.CreateLoad(sptr), B.CreateLoad(nread, true)), sptr); args->resize(2); fmt->clear(); } // recursive helper to capture the arguments static void parse_type(IRBuilder<> &B, vector<Value *> *args, string *fmt, Type *type, Value *out, const map<string, Value *> &locals, bool is_writer) { if (StructType *st = dyn_cast<StructType>(type)) { *fmt += "{ "; unsigned idx = 0; for (auto field : st->elements()) { parse_type(B, args, fmt, field, B.CreateStructGEP(type, out, idx++), locals, is_writer); *fmt += " "; } *fmt += "}"; } else if (ArrayType *at = dyn_cast<ArrayType>(type)) { if (at->getElementType() == B.getInt8Ty()) { // treat i8[] as a char string instead of as an array of u8's if (is_writer) { *fmt += "\"%s\""; args->push_back(out); } else { // When reading strings, scanf doesn't support empty "", so we need to // break this up into multiple scanf calls. To understand it, let's take // an example: // struct Event { // u32 a; // struct { // char x[64]; // int y; // } b[2]; // u32 c; // }; // The writer string would look like: // "{ 0x%x [ { \"%s\" 0x%x } { \"%s\" 0x%x } ] 0x%x }" // But the reader string needs to restart at each \"\". // reader0(const char *s, struct Event *val) { // int nread, rc; // nread = 0; // rc = sscanf(s, "{ %i [ { \"%n", &val->a, &nread); // if (rc != 1) return -1; // s += nread; nread = 0; // rc = sscanf(s, "%[^\"]%n", &val->b[0].x, &nread); // if (rc < 0) return -1; // s += nread; nread = 0; // rc = sscanf(s, "\" %i } { \"%n", &val->b[0].y, &nread); // if (rc != 1) return -1; // s += nread; nread = 0; // rc = sscanf(s, "%[^\"]%n", &val->b[1].x, &nread); // if (rc < 0) return -1; // s += nread; nread = 0; // rc = sscanf(s, "\" %i } ] %i }%n", &val->b[1].y, &val->c, &nread); // if (rc != 2) return -1; // s += nread; nread = 0; // return 0; // } *fmt += "\""; finish_sscanf(B, args, fmt, locals, true); *fmt = "%[^\"]"; args->push_back(out); finish_sscanf(B, args, fmt, locals, false); *fmt = "\""; } } else { *fmt += "[ "; for (size_t i = 0; i < at->getNumElements(); ++i) { parse_type(B, args, fmt, at->getElementType(), B.CreateStructGEP(type, out, i), locals, is_writer); *fmt += " "; } *fmt += "]"; } } else if (isa<PointerType>(type)) { *fmt += "0xl"; if (is_writer) *fmt += "x"; else *fmt += "i"; } else if (IntegerType *it = dyn_cast<IntegerType>(type)) { if (is_writer) *fmt += "0x"; if (it->getBitWidth() <= 8) *fmt += "%hh"; else if (it->getBitWidth() <= 16) *fmt += "%h"; else if (it->getBitWidth() <= 32) *fmt += "%"; else *fmt += "%l"; if (is_writer) *fmt += "x"; else *fmt += "i"; args->push_back(is_writer ? B.CreateLoad(out) : out); } } // make_reader generates a dynamic function in the instruction set of the host // (not bpf) that is able to convert c-strings in the pretty-print format of // make_writer back into binary representations. The encoding of the string // takes the llvm ir structure format, which closely maps the c structure but // not exactly (no support for unions for instance). // The general algorithm is: // pod types (u8..u64) <= %i // array types // u8[] no nested quotes :( <= "..." // !u8[] <= [ %i %i ... ] // struct types // struct { u8 a; u64 b; } <= { %i %i } // nesting is supported // struct { struct { u8 a[]; }; } <= { "" } // struct { struct { u64 a[]; }; } <= { [ %i %i .. ] } string BPFModule::make_reader(Module *mod, Type *type) { auto fn_it = readers_.find(type); if (fn_it != readers_.end()) return fn_it->second; // int read(const char *in, Type *out) { // int n = sscanf(in, "{ %i ... }", &out->field1, ...); // if (n != num_fields) return -1; // return 0; // } IRBuilder<> B(*ctx_); FunctionType *sscanf_fn_type = FunctionType::get( B.getInt32Ty(), {B.getInt8PtrTy(), B.getInt8PtrTy()}, /*isVarArg=*/true); Function *sscanf_fn = mod->getFunction("sscanf"); if (!sscanf_fn) { sscanf_fn = Function::Create(sscanf_fn_type, GlobalValue::ExternalLinkage, "sscanf", mod); sscanf_fn->setCallingConv(CallingConv::C); sscanf_fn->addFnAttr(Attribute::NoUnwind); } string name = "reader" + std::to_string(readers_.size()); vector<Type *> fn_args({B.getInt8PtrTy(), PointerType::getUnqual(type)}); FunctionType *fn_type = FunctionType::get(B.getInt32Ty(), fn_args, /*isVarArg=*/false); Function *fn = Function::Create(fn_type, GlobalValue::ExternalLinkage, name, mod); auto arg_it = fn->arg_begin(); Argument *arg_in = &*arg_it; ++arg_it; arg_in->setName("in"); Argument *arg_out = &*arg_it; ++arg_it; arg_out->setName("out"); BasicBlock *label_entry = BasicBlock::Create(*ctx_, "entry", fn); B.SetInsertPoint(label_entry); Value *nread = B.CreateAlloca(B.getInt32Ty()); Value *sptr = B.CreateAlloca(B.getInt8PtrTy()); map<string, Value *> locals{{"nread", nread}, {"sptr", sptr}}; B.CreateStore(arg_in, sptr); vector<Value *> args({nullptr, nullptr}); string fmt; parse_type(B, &args, &fmt, type, arg_out, locals, false); if (0) debug_printf(mod, B, "%p %p\n", vector<Value *>({arg_in, arg_out})); finish_sscanf(B, &args, &fmt, locals, true); B.CreateRet(B.getInt32(0)); readers_[type] = name; return name; } // make_writer generates a dynamic function in the instruction set of the host // (not bpf) that is able to pretty-print key/leaf entries as a c-string. The // encoding of the string takes the llvm ir structure format, which closely maps // the c structure but not exactly (no support for unions for instance). // The general algorithm is: // pod types (u8..u64) => 0x%x // array types // u8[] => "..." // !u8[] => [ 0x%x 0x%x ... ] // struct types // struct { u8 a; u64 b; } => { 0x%x 0x%x } // nesting is supported // struct { struct { u8 a[]; }; } => { "" } // struct { struct { u64 a[]; }; } => { [ 0x%x 0x%x .. ] } string BPFModule::make_writer(Module *mod, Type *type) { auto fn_it = writers_.find(type); if (fn_it != writers_.end()) return fn_it->second; // int write(int len, char *out, Type *in) { // return snprintf(out, len, "{ %i ... }", out->field1, ...); // } IRBuilder<> B(*ctx_); string name = "writer" + std::to_string(writers_.size()); vector<Type *> fn_args({B.getInt8PtrTy(), B.getInt64Ty(), PointerType::getUnqual(type)}); FunctionType *fn_type = FunctionType::get(B.getInt32Ty(), fn_args, /*isVarArg=*/false); Function *fn = Function::Create(fn_type, GlobalValue::ExternalLinkage, name, mod); auto arg_it = fn->arg_begin(); Argument *arg_out = &*arg_it; ++arg_it; arg_out->setName("out"); Argument *arg_len = &*arg_it; ++arg_it; arg_len->setName("len"); Argument *arg_in = &*arg_it; ++arg_it; arg_in->setName("in"); BasicBlock *label_entry = BasicBlock::Create(*ctx_, "entry", fn); B.SetInsertPoint(label_entry); map<string, Value *> locals{ {"nread", B.CreateAlloca(B.getInt64Ty())}, }; vector<Value *> args({arg_out, B.CreateZExt(arg_len, B.getInt64Ty()), nullptr}); string fmt; parse_type(B, &args, &fmt, type, arg_in, locals, true); GlobalVariable *fmt_gvar = B.CreateGlobalString(fmt, "fmt"); args[2] = B.CreateInBoundsGEP(fmt_gvar, vector<Value *>({B.getInt64(0), B.getInt64(0)})); if (0) debug_printf(mod, B, "%d %p %p\n", vector<Value *>({arg_len, arg_out, arg_in})); vector<Type *> snprintf_fn_args({B.getInt8PtrTy(), B.getInt64Ty(), B.getInt8PtrTy()}); FunctionType *snprintf_fn_type = FunctionType::get(B.getInt32Ty(), snprintf_fn_args, /*isVarArg=*/true); Function *snprintf_fn = mod->getFunction("snprintf"); if (!snprintf_fn) snprintf_fn = Function::Create(snprintf_fn_type, GlobalValue::ExternalLinkage, "snprintf", mod); snprintf_fn->setCallingConv(CallingConv::C); snprintf_fn->addFnAttr(Attribute::NoUnwind); CallInst *call = B.CreateCall(snprintf_fn, args); call->setTailCall(true); B.CreateRet(call); writers_[type] = name; return name; } unique_ptr<ExecutionEngine> BPFModule::finalize_rw(unique_ptr<Module> m) { Module *mod = &*m; run_pass_manager(*mod); string err; EngineBuilder builder(move(m)); builder.setErrorStr(&err); builder.setUseOrcMCJITReplacement(true); auto engine = unique_ptr<ExecutionEngine>(builder.create()); if (!engine) fprintf(stderr, "Could not create ExecutionEngine: %s\n", err.c_str()); return engine; } // load an entire c file as a module int BPFModule::load_cfile(const string &file, bool in_memory, const char *cflags[], int ncflags) { clang_loader_ = ebpf::make_unique<ClangLoader>(&*ctx_, flags_); if (clang_loader_->parse(&mod_, *ts_, file, in_memory, cflags, ncflags, id_)) return -1; return 0; } // NOTE: this is a duplicate of the above, but planning to deprecate if we // settle on clang as the frontend // Load in a pre-built list of functions into the initial Module object, then // build an ExecutionEngine. int BPFModule::load_includes(const string &text) { clang_loader_ = ebpf::make_unique<ClangLoader>(&*ctx_, flags_); if (clang_loader_->parse(&mod_, *ts_, text, true, nullptr, 0, "")) return -1; return 0; } int BPFModule::annotate() { for (auto fn = mod_->getFunctionList().begin(); fn != mod_->getFunctionList().end(); ++fn) if (!fn->hasFnAttribute(Attribute::NoInline)) fn->addFnAttr(Attribute::AlwaysInline); // separate module to hold the reader functions auto m = ebpf::make_unique<Module>("sscanf", *ctx_); size_t id = 0; Path path({id_}); for (auto it = ts_->lower_bound(path), up = ts_->upper_bound(path); it != up; ++it) { TableDesc &table = it->second; tables_.push_back(&it->second); table_names_[table.name] = id++; GlobalValue *gvar = mod_->getNamedValue(table.name); if (!gvar) continue; if (PointerType *pt = dyn_cast<PointerType>(gvar->getType())) { if (StructType *st = dyn_cast<StructType>(pt->getElementType())) { if (st->getNumElements() < 2) continue; Type *key_type = st->elements()[0]; Type *leaf_type = st->elements()[1]; using std::placeholders::_1; using std::placeholders::_2; using std::placeholders::_3; table.key_sscanf = std::bind(&BPFModule::sscanf, this, make_reader(&*m, key_type), _1, _2); table.leaf_sscanf = std::bind(&BPFModule::sscanf, this, make_reader(&*m, leaf_type), _1, _2); table.key_snprintf = std::bind(&BPFModule::snprintf, this, make_writer(&*m, key_type), _1, _2, _3); table.leaf_snprintf = std::bind(&BPFModule::snprintf, this, make_writer(&*m, leaf_type), _1, _2, _3); } } } rw_engine_ = finalize_rw(move(m)); if (!rw_engine_) return -1; return 0; } StatusTuple BPFModule::sscanf(string fn_name, const char *str, void *val) { auto fn = (int (*)(const char *, void *))rw_engine_->getFunctionAddress(fn_name); if (!fn) return StatusTuple(-1, "sscanf not available"); int rc = fn(str, val); if (rc < 0) return StatusTuple(rc, "error in sscanf: %s", std::strerror(errno)); return StatusTuple(rc); } StatusTuple BPFModule::snprintf(string fn_name, char *str, size_t sz, const void *val) { auto fn = (int (*)(char *, size_t, const void *))rw_engine_->getFunctionAddress(fn_name); if (!fn) return StatusTuple(-1, "snprintf not available"); int rc = fn(str, sz, val); if (rc < 0) return StatusTuple(rc, "error in snprintf: %s", std::strerror(errno)); if ((size_t)rc == sz) return StatusTuple(-1, "buffer of size %zd too small", sz); return StatusTuple(0); } void BPFModule::dump_ir(Module &mod) { legacy::PassManager PM; PM.add(createPrintModulePass(errs())); PM.run(mod); } int BPFModule::run_pass_manager(Module &mod) { if (verifyModule(mod, &errs())) { if (flags_ & 1) dump_ir(mod); return -1; } legacy::PassManager PM; PassManagerBuilder PMB; PMB.OptLevel = 3; PM.add(createFunctionInliningPass()); /* * llvm < 4.0 needs * PM.add(createAlwaysInlinerPass()); * llvm >= 4.0 needs * PM.add(createAlwaysInlinerLegacyPass()); * use below 'stable' workaround */ LLVMAddAlwaysInlinerPass(reinterpret_cast<LLVMPassManagerRef>(&PM)); PMB.populateModulePassManager(PM); if (flags_ & 1) PM.add(createPrintModulePass(outs())); PM.run(mod); return 0; } int BPFModule::finalize() { Module *mod = &*mod_; mod->setDataLayout("e-m:e-p:64:64-i64:64-n32:64-S128"); mod->setTargetTriple("bpf-pc-linux"); string err; EngineBuilder builder(move(mod_)); builder.setErrorStr(&err); builder.setMCJITMemoryManager(ebpf::make_unique<MyMemoryManager>(&sections_)); builder.setMArch("bpf"); builder.setUseOrcMCJITReplacement(true); engine_ = unique_ptr<ExecutionEngine>(builder.create()); if (!engine_) { fprintf(stderr, "Could not create ExecutionEngine: %s\n", err.c_str()); return -1; } if (int rc = run_pass_manager(*mod)) return rc; engine_->finalizeObject(); // give functions an id for (auto section : sections_) if (!strncmp(FN_PREFIX.c_str(), section.first.c_str(), FN_PREFIX.size())) function_names_.push_back(section.first); return 0; } size_t BPFModule::num_functions() const { return function_names_.size(); } const char * BPFModule::function_name(size_t id) const { if (id >= function_names_.size()) return nullptr; return function_names_[id].c_str() + FN_PREFIX.size(); } uint8_t * BPFModule::function_start(size_t id) const { if (id >= function_names_.size()) return nullptr; auto section = sections_.find(function_names_[id]); if (section == sections_.end()) return nullptr; return get<0>(section->second); } uint8_t * BPFModule::function_start(const string &name) const { auto section = sections_.find(FN_PREFIX + name); if (section == sections_.end()) return nullptr; return get<0>(section->second); } size_t BPFModule::function_size(size_t id) const { if (id >= function_names_.size()) return 0; auto section = sections_.find(function_names_[id]); if (section == sections_.end()) return 0; return get<1>(section->second); } size_t BPFModule::function_size(const string &name) const { auto section = sections_.find(FN_PREFIX + name); if (section == sections_.end()) return 0; return get<1>(section->second); } char * BPFModule::license() const { auto section = sections_.find("license"); if (section == sections_.end()) return nullptr; return (char *)get<0>(section->second); } unsigned BPFModule::kern_version() const { auto section = sections_.find("version"); if (section == sections_.end()) return 0; return *(unsigned *)get<0>(section->second); } size_t BPFModule::num_tables() const { return tables_.size(); } size_t BPFModule::table_id(const string &name) const { auto it = table_names_.find(name); if (it == table_names_.end()) return ~0ull; return it->second; } int BPFModule::table_fd(const string &name) const { return table_fd(table_id(name)); } int BPFModule::table_fd(size_t id) const { if (id >= tables_.size()) return -1; return tables_[id]->fd; } int BPFModule::table_type(const string &name) const { return table_type(table_id(name)); } int BPFModule::table_type(size_t id) const { if (id >= tables_.size()) return -1; return tables_[id]->type; } size_t BPFModule::table_max_entries(const string &name) const { return table_max_entries(table_id(name)); } size_t BPFModule::table_max_entries(size_t id) const { if (id >= tables_.size()) return 0; return tables_[id]->max_entries; } int BPFModule::table_flags(const string &name) const { return table_flags(table_id(name)); } int BPFModule::table_flags(size_t id) const { if (id >= tables_.size()) return -1; return tables_[id]->flags; } const char * BPFModule::table_name(size_t id) const { if (id >= tables_.size()) return nullptr; return tables_[id]->name.c_str(); } const char * BPFModule::table_key_desc(size_t id) const { if (b_loader_) return nullptr; if (id >= tables_.size()) return nullptr; return tables_[id]->key_desc.c_str(); } const char * BPFModule::table_key_desc(const string &name) const { return table_key_desc(table_id(name)); } const char * BPFModule::table_leaf_desc(size_t id) const { if (b_loader_) return nullptr; if (id >= tables_.size()) return nullptr; return tables_[id]->leaf_desc.c_str(); } const char * BPFModule::table_leaf_desc(const string &name) const { return table_leaf_desc(table_id(name)); } size_t BPFModule::table_key_size(size_t id) const { if (id >= tables_.size()) return 0; return tables_[id]->key_size; } size_t BPFModule::table_key_size(const string &name) const { return table_key_size(table_id(name)); } size_t BPFModule::table_leaf_size(size_t id) const { if (id >= tables_.size()) return 0; return tables_[id]->leaf_size; } size_t BPFModule::table_leaf_size(const string &name) const { return table_leaf_size(table_id(name)); } struct TableIterator { TableIterator(size_t key_size, size_t leaf_size) : key(new uint8_t[key_size]), leaf(new uint8_t[leaf_size]) { } unique_ptr<uint8_t[]> key; unique_ptr<uint8_t[]> leaf; uint8_t keyb[512]; }; int BPFModule::table_key_printf(size_t id, char *buf, size_t buflen, const void *key) { if (id >= tables_.size()) return -1; const TableDesc &desc = *tables_[id]; StatusTuple rc = desc.key_snprintf(buf, buflen, key); if (rc.code() < 0) { fprintf(stderr, "%s\n", rc.msg().c_str()); return -1; } return 0; } int BPFModule::table_leaf_printf(size_t id, char *buf, size_t buflen, const void *leaf) { if (id >= tables_.size()) return -1; const TableDesc &desc = *tables_[id]; StatusTuple rc = desc.leaf_snprintf(buf, buflen, leaf); if (rc.code() < 0) { fprintf(stderr, "%s\n", rc.msg().c_str()); return -1; } return 0; } int BPFModule::table_key_scanf(size_t id, const char *key_str, void *key) { if (id >= tables_.size()) return -1; const TableDesc &desc = *tables_[id]; StatusTuple rc = desc.key_sscanf(key_str, key); if (rc.code() < 0) { fprintf(stderr, "%s\n", rc.msg().c_str()); return -1; } return 0; } int BPFModule::table_leaf_scanf(size_t id, const char *leaf_str, void *leaf) { if (id >= tables_.size()) return -1; const TableDesc &desc = *tables_[id]; StatusTuple rc = desc.leaf_sscanf(leaf_str, leaf); if (rc.code() < 0) { fprintf(stderr, "%s\n", rc.msg().c_str()); return -1; } return 0; } // load a B file, which comes in two parts int BPFModule::load_b(const string &filename, const string &proto_filename) { if (!sections_.empty()) { fprintf(stderr, "Program already initialized\n"); return -1; } if (filename.empty() || proto_filename.empty()) { fprintf(stderr, "Invalid filenames\n"); return -1; } // Helpers are inlined in the following file (C). Load the definitions and // pass the partially compiled module to the B frontend to continue with. auto helpers_h = ExportedFiles::headers().find("/virtual/include/bcc/helpers.h"); if (helpers_h == ExportedFiles::headers().end()) { fprintf(stderr, "Internal error: missing bcc/helpers.h"); return -1; } if (int rc = load_includes(helpers_h->second)) return rc; b_loader_.reset(new BLoader(flags_)); if (int rc = b_loader_->parse(&*mod_, filename, proto_filename, *ts_, id_)) return rc; if (int rc = annotate()) return rc; if (int rc = finalize()) return rc; return 0; } // load a C file int BPFModule::load_c(const string &filename, const char *cflags[], int ncflags) { if (!sections_.empty()) { fprintf(stderr, "Program already initialized\n"); return -1; } if (filename.empty()) { fprintf(stderr, "Invalid filename\n"); return -1; } if (int rc = load_cfile(filename, false, cflags, ncflags)) return rc; if (int rc = annotate()) return rc; if (int rc = finalize()) return rc; return 0; } // load a C text string int BPFModule::load_string(const string &text, const char *cflags[], int ncflags) { if (!sections_.empty()) { fprintf(stderr, "Program already initialized\n"); return -1; } if (int rc = load_cfile(text, true, cflags, ncflags)) return rc; if (int rc = annotate()) return rc; if (int rc = finalize()) return rc; return 0; } } // namespace ebpf
/* * Copyright (C) 2018 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <vector> #include "OperationsUtils.h" #include "Pow.h" namespace android::nn { namespace pow { Result<Version> validate(const IOperationValidationContext* context) { NN_RET_CHECK(context->getNumInputs() == 2 && context->getNumOutputs() == 1) << context->invalidInOutNumberMessage(2, 1); auto inputType = context->getInputType(0); std::vector<OperandType> inExpectedTypes; std::vector<OperandType> outExpectedTypes; if (inputType == OperandType::TENSOR_FLOAT16 || inputType == OperandType::TENSOR_FLOAT32) { inExpectedTypes = {inputType, inputType}; outExpectedTypes = {inputType}; } else { NN_RET_CHECK_FAIL() << "Unsupported input tensor type for operation " << context->getOperationName(); } Version version; if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { version = kVersionFeatureLevel4; } else { version = kVersionFeatureLevel3; } NN_TRY(context->validateOperationOperandTypes(inExpectedTypes, outExpectedTypes)); return version; } } // namespace pow NN_DEFINE_VALIDATION_FUNCTION(POW, pow::validate); } // namespace android::nn
// Copyright (c) 2009-2014 The Bitcoin developers // Copyright (c) 2017 The PIVX Developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "uritests.h" #include "guiutil.h" #include "walletmodel.h" #include <QUrl> void URITests::uriTests() { SendCoinsRecipient rv; QUrl uri; uri.setUrl(QString("rupeeevolution:D72dLgywmL73JyTwQBfuU29CADz9yCJ99v?req-dontexist=")); QVERIFY(!GUIUtil::parseBitcoinURI(uri, &rv)); uri.setUrl(QString("rupeeevolution:D72dLgywmL73JyTwQBfuU29CADz9yCJ99v?dontexist=")); QVERIFY(GUIUtil::parseBitcoinURI(uri, &rv)); QVERIFY(rv.address == QString("D72dLgywmL73JyTwQBfuU29CADz9yCJ99v")); QVERIFY(rv.label == QString()); QVERIFY(rv.amount == 0); uri.setUrl(QString("rupeeevolution:D72dLgywmL73JyTwQBfuU29CADz9yCJ99v?label=Some Example Address")); QVERIFY(GUIUtil::parseBitcoinURI(uri, &rv)); QVERIFY(rv.address == QString("D72dLgywmL73JyTwQBfuU29CADz9yCJ99v")); QVERIFY(rv.label == QString("Some Example Address")); QVERIFY(rv.amount == 0); uri.setUrl(QString("rupeeevolution:D72dLgywmL73JyTwQBfuU29CADz9yCJ99v?amount=0.001")); QVERIFY(GUIUtil::parseBitcoinURI(uri, &rv)); QVERIFY(rv.address == QString("D72dLgywmL73JyTwQBfuU29CADz9yCJ99v")); QVERIFY(rv.label == QString()); QVERIFY(rv.amount == 100000); uri.setUrl(QString("rupeeevolution:D72dLgywmL73JyTwQBfuU29CADz9yCJ99v?amount=1.001")); QVERIFY(GUIUtil::parseBitcoinURI(uri, &rv)); QVERIFY(rv.address == QString("D72dLgywmL73JyTwQBfuU29CADz9yCJ99v")); QVERIFY(rv.label == QString()); QVERIFY(rv.amount == 100100000); uri.setUrl(QString("rupeeevolution:D72dLgywmL73JyTwQBfuU29CADz9yCJ99v?amount=100&label=Some Example")); QVERIFY(GUIUtil::parseBitcoinURI(uri, &rv)); QVERIFY(rv.address == QString("D72dLgywmL73JyTwQBfuU29CADz9yCJ99v")); QVERIFY(rv.amount == 10000000000LL); QVERIFY(rv.label == QString("Some Example")); uri.setUrl(QString("rupeeevolution:D72dLgywmL73JyTwQBfuU29CADz9yCJ99v?message=Some Example Address")); QVERIFY(GUIUtil::parseBitcoinURI(uri, &rv)); QVERIFY(rv.address == QString("D72dLgywmL73JyTwQBfuU29CADz9yCJ99v")); QVERIFY(rv.label == QString()); QVERIFY(GUIUtil::parseBitcoinURI("rupeeevolution://D72dLgywmL73JyTwQBfuU29CADz9yCJ99v?message=Some Example Address", &rv)); QVERIFY(rv.address == QString("D72dLgywmL73JyTwQBfuU29CADz9yCJ99v")); QVERIFY(rv.label == QString()); uri.setUrl(QString("rupeeevolution:D72dLgywmL73JyTwQBfuU29CADz9yCJ99v?req-message=Some Example Address")); QVERIFY(GUIUtil::parseBitcoinURI(uri, &rv)); uri.setUrl(QString("rupeeevolution:D72dLgywmL73JyTwQBfuU29CADz9yCJ99v?amount=1,000&label=Some Example")); QVERIFY(!GUIUtil::parseBitcoinURI(uri, &rv)); uri.setUrl(QString("rupeeevolution:D72dLgywmL73JyTwQBfuU29CADz9yCJ99v?amount=1,000.0&label=Some Example")); QVERIFY(!GUIUtil::parseBitcoinURI(uri, &rv)); }
/* Copyright (c) 2010-2016, Arvid Norberg All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "libtorrent/thread.hpp" #include "libtorrent/assert.hpp" #ifdef TORRENT_BEOS #include <kernel/OS.h> #endif #ifdef BOOST_HAS_PTHREADS #include <sys/time.h> // for gettimeofday() #include <boost/cstdint.hpp> #endif #include <algorithm> namespace libtorrent { void sleep(int milliseconds) { #if defined TORRENT_WINDOWS || defined TORRENT_CYGWIN Sleep(milliseconds); #elif defined TORRENT_BEOS snooze_until(system_time() + boost::int64_t(milliseconds) * 1000, B_SYSTEM_TIMEBASE); #else usleep(milliseconds * 1000); #endif } #ifdef BOOST_HAS_PTHREADS condition_variable::condition_variable() { pthread_cond_init(&m_cond, 0); } condition_variable::~condition_variable() { pthread_cond_destroy(&m_cond); } void condition_variable::wait(mutex::scoped_lock& l) { TORRENT_ASSERT(l.locked()); // wow, this is quite a hack pthread_cond_wait(&m_cond, reinterpret_cast<pthread_mutex_t*>(&l.mutex())); } void condition_variable::wait_for(mutex::scoped_lock& l, time_duration rel_time) { TORRENT_ASSERT(l.locked()); struct timeval tv; struct timespec ts; gettimeofday(&tv, NULL); boost::uint64_t microseconds = tv.tv_usec + total_microseconds(rel_time) % 1000000; ts.tv_nsec = (microseconds % 1000000) * 1000; ts.tv_sec = tv.tv_sec + total_seconds(rel_time) + microseconds / 1000000; // wow, this is quite a hack pthread_cond_timedwait(&m_cond, reinterpret_cast<pthread_mutex_t*>(&l.mutex()), &ts); } void condition_variable::notify_all() { pthread_cond_broadcast(&m_cond); } void condition_variable::notify() { pthread_cond_signal(&m_cond); } #elif defined TORRENT_WINDOWS || defined TORRENT_CYGWIN condition_variable::condition_variable() : m_num_waiters(0) { #if _WIN32_WINNT <= 0x0501 m_sem = CreateSemaphore(0, 0, INT_MAX, 0); #else m_sem = CreateSemaphoreEx(0, 0, INT_MAX, 0, 0, SEMAPHORE_ALL_ACCESS); #endif } condition_variable::~condition_variable() { CloseHandle(m_sem); } void condition_variable::wait(mutex::scoped_lock& l) { TORRENT_ASSERT(l.locked()); ++m_num_waiters; l.unlock(); WaitForSingleObjectEx(m_sem, INFINITE, FALSE); l.lock(); --m_num_waiters; } void condition_variable::wait_for(mutex::scoped_lock& l, time_duration rel_time) { TORRENT_ASSERT(l.locked()); ++m_num_waiters; l.unlock(); WaitForSingleObjectEx(m_sem, total_milliseconds(rel_time), FALSE); l.lock(); --m_num_waiters; } void condition_variable::notify_all() { if (m_num_waiters > 0) { ReleaseSemaphore(m_sem, m_num_waiters, 0); } } void condition_variable::notify() { if (m_num_waiters > 0) { ReleaseSemaphore(m_sem, (std::min)(m_num_waiters, 1), 0); } } #elif defined TORRENT_BEOS condition_variable::condition_variable() : m_num_waiters(0) { m_sem = create_sem(0, 0); } condition_variable::~condition_variable() { delete_sem(m_sem); } void condition_variable::wait(mutex::scoped_lock& l) { TORRENT_ASSERT(l.locked()); ++m_num_waiters; l.unlock(); acquire_sem(m_sem); l.lock(); --m_num_waiters; } void condition_variable::wait_for(mutex::scoped_lock& l, time_duration rel_time) { TORRENT_ASSERT(l.locked()); ++m_num_waiters; l.unlock(); acquire_sem_etc(m_sem, 1, B_RELATIVE_TIMEOUT, total_microseconds(rel_time)); l.lock(); --m_num_waiters; } void condition_variable::notify_all() { if (m_num_waiters > 0) { release_sem_etc(m_sem, m_num_waiters, 0); } } void condition_variable::notify() { if (m_num_waiters > 0) { release_sem_etc(m_sem, (std::min)(m_num_waiters, 1), 0); } } #else #error not implemented #endif }