text
stringlengths
5
1.04M
//////////////////////////////////////////////////////////////////////////////// /// DISCLAIMER /// /// Copyright 2016 by EMC Corporation, All Rights Reserved /// /// Licensed under the Apache License, Version 2.0 (the "License"); /// you may not use this file except in compliance with the License. /// You may obtain a copy of the License at /// /// http://www.apache.org/licenses/LICENSE-2.0 /// /// Unless required by applicable law or agreed to in writing, software /// distributed under the License is distributed on an "AS IS" BASIS, /// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. /// See the License for the specific language governing permissions and /// limitations under the License. /// /// Copyright holder is EMC Corporation /// /// @author Andrey Abramov /// @author Vasiliy Nabatchikov //////////////////////////////////////////////////////////////////////////////// #include "tests_shared.hpp" #include "filter_test_case_base.hpp" #include "formats/formats_10.hpp" #include "filter_test_case_base.hpp" #include "analysis/token_attributes.hpp" #include "search/phrase_filter.hpp" #include "store/memory_directory.hpp" #include "store/fs_directory.hpp" namespace ir = iresearch; namespace tests { void analyzed_json_field_factory( tests::document& doc, const std::string& name, const tests::json_doc_generator::json_value& data) { typedef templates::text_field<std::string> text_field; class string_field : public templates::string_field { public: string_field(const ir::string_ref& name, const ir::string_ref& value) : templates::string_field(name, value) { } const ir::flags& features() const { static ir::flags features{ ir::frequency::type() }; return features; } }; // string_field if (data.is_string()) { // analyzed field doc.indexed.push_back(std::make_shared<text_field>( std::string(name.c_str()) + "_anl", data.str )); // not analyzed field doc.insert(std::make_shared<string_field>( ir::string_ref(name), data.str )); } } class phrase_filter_test_case : public filter_test_case_base { protected: void sequential() { // add segment { tests::json_doc_generator gen( resource("phrase_sequential.json"), &tests::analyzed_json_field_factory); add_segment(gen); } // read segment auto rdr = open_reader(); // empty field { ir::by_phrase q; auto prepared = q.prepare(rdr); auto sub = rdr.begin(); auto docs = prepared->execute(*sub); ASSERT_TRUE(ir::type_limits<ir::type_t::doc_id_t>::eof(docs->value())); } // empty phrase { ir::by_phrase q; q.field("phrase_anl"); auto prepared = q.prepare(rdr); auto sub = rdr.begin(); auto docs = prepared->execute(*sub); ASSERT_TRUE(ir::type_limits<ir::type_t::doc_id_t>::eof(docs->value())); } // equals to term_filter "fox" { irs::bytes_ref actual_value; ir::by_phrase q; q.field("phrase_anl") .push_back("fox"); auto prepared = q.prepare(rdr); auto sub = rdr.begin(); auto column = sub->column_reader("name"); ASSERT_NE(nullptr, column); auto values = column->values(); auto docs = prepared->execute(*sub); ASSERT_FALSE(iresearch::type_limits<iresearch::type_t::doc_id_t>::valid(docs->value())); auto docs_seek = prepared->execute(*sub); ASSERT_FALSE(iresearch::type_limits<iresearch::type_t::doc_id_t>::valid(docs_seek->value())); ASSERT_TRUE(docs->next()); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("A", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_EQ(docs->value(), docs_seek->seek(docs->value())); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("A", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_TRUE(docs->next()); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("G", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_EQ(docs->value(), docs_seek->seek(docs->value())); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("G", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_TRUE(docs->next()); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("I", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_EQ(docs->value(), docs_seek->seek(docs->value())); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("I", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_TRUE(docs->next()); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("K", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_EQ(docs->value(), docs_seek->seek(docs->value())); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("K", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_TRUE(docs->next()); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("L", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_EQ(docs->value(), docs_seek->seek(docs->value())); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("L", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_FALSE(docs->next()); ASSERT_TRUE(ir::type_limits<ir::type_t::doc_id_t>::eof(docs->value())); } // search "fox" on field without positions { ir::by_phrase q; q.field("phrase") .push_back("fox"); auto prepared = q.prepare(rdr); auto sub = rdr.begin(); auto docs = prepared->execute(*sub); ASSERT_TRUE(ir::type_limits<ir::type_t::doc_id_t>::eof(docs->value())); ASSERT_FALSE(docs->next()); ASSERT_TRUE(ir::type_limits<ir::type_t::doc_id_t>::eof(docs->value())); } // equals to term_filter "fox" with phrase offset // which is does not matter { irs::bytes_ref actual_value; ir::by_phrase q; q.field("phrase_anl") .push_back("fox", ir::integer_traits<size_t>::const_max); auto prepared = q.prepare(rdr); auto sub = rdr.begin(); auto column = sub->column_reader("name"); ASSERT_NE(nullptr, column); auto values = column->values(); auto docs = prepared->execute(*sub); ASSERT_FALSE(iresearch::type_limits<iresearch::type_t::doc_id_t>::valid(docs->value())); auto docs_seek = prepared->execute(*sub); ASSERT_FALSE(iresearch::type_limits<iresearch::type_t::doc_id_t>::valid(docs_seek->value())); ASSERT_TRUE(docs->next()); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("A", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_EQ(docs->value(), docs_seek->seek(docs->value())); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("A", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_TRUE(docs->next()); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("G", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_EQ(docs->value(), docs_seek->seek(docs->value())); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("G", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_TRUE(docs->next()); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("I", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_EQ(docs->value(), docs_seek->seek(docs->value())); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("I", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_TRUE(docs->next()); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("K", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_EQ(docs->value(), docs_seek->seek(docs->value())); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("K", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_TRUE(docs->next()); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("L", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_EQ(docs->value(), docs_seek->seek(docs->value())); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("L", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_FALSE(docs->next()); ASSERT_TRUE(ir::type_limits<ir::type_t::doc_id_t>::eof(docs->value())); } // "quick brown fox" { irs::bytes_ref actual_value; ir::by_phrase q; q.field("phrase_anl") .push_back("quick") .push_back("brown") .push_back("fox"); auto prepared = q.prepare(rdr); auto sub = rdr.begin(); auto column = sub->column_reader("name"); ASSERT_NE(nullptr, column); auto values = column->values(); auto docs = prepared->execute(*sub); ASSERT_FALSE(iresearch::type_limits<iresearch::type_t::doc_id_t>::valid(docs->value())); auto docs_seek = prepared->execute(*sub); ASSERT_FALSE(iresearch::type_limits<iresearch::type_t::doc_id_t>::valid(docs_seek->value())); ASSERT_TRUE(docs->next()); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("A", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_EQ(docs->value(), docs_seek->seek(docs->value())); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("A", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_TRUE(docs->next()); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("G", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_EQ(docs->value(), docs_seek->seek(docs->value())); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("G", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_TRUE(docs->next()); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("I", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_EQ(docs->value(), docs_seek->seek(docs->value())); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("I", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_FALSE(docs->next()); ASSERT_TRUE(ir::type_limits<ir::type_t::doc_id_t>::eof(docs->value())); ASSERT_TRUE(ir::type_limits<ir::type_t::doc_id_t>::eof(docs_seek->seek(ir::type_limits<ir::type_t::doc_id_t>::eof()))); } // "quick brown fox" with order { irs::bytes_ref actual_value; irs::by_phrase q; q.field("phrase_anl") .push_back("quick") .push_back("brown") .push_back("fox"); size_t collect_count = 0; size_t finish_count = 0; irs::order ord; auto& sort = ord.add<tests::sort::custom_sort>(false); sort.collector_collect = [&collect_count](const irs::sub_reader&, const irs::term_reader&, const irs::attribute_view&)->void{ ++collect_count; }; sort.collector_finish = [&finish_count](irs::attribute_store&, const irs::index_reader&)->void{ ++finish_count; }; sort.prepare_collector = [&sort]()->irs::sort::collector::ptr { return irs::memory::make_unique<sort::custom_sort::prepared::collector>(sort); }; sort.scorer_add = [](irs::doc_id_t& dst, const irs::doc_id_t& src)->void { ASSERT_TRUE( irs::type_limits<irs::type_t::doc_id_t>::invalid() == dst || dst == src ); dst = src; }; auto pord = ord.prepare(); auto prepared = q.prepare(rdr, pord); ASSERT_EQ(3, collect_count); ASSERT_EQ(3, finish_count); // 3 sub-terms in phrase auto sub = rdr.begin(); auto column = sub->column_reader("name"); ASSERT_NE(nullptr, column); auto values = column->values(); auto docs = prepared->execute(*sub, pord); ASSERT_FALSE(iresearch::type_limits<irs::type_t::doc_id_t>::valid(docs->value())); auto docs_seek = prepared->execute(*sub); ASSERT_FALSE(iresearch::type_limits<irs::type_t::doc_id_t>::valid(docs_seek->value())); auto& score = docs->attributes().get<irs::score>(); ASSERT_FALSE(!score); ASSERT_TRUE(docs->next()); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("A", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_EQ(docs->value(), docs_seek->seek(docs->value())); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("A", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_TRUE(docs->next()); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("G", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_EQ(docs->value(), docs_seek->seek(docs->value())); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("G", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_TRUE(docs->next()); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("I", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_EQ(docs->value(), docs_seek->seek(docs->value())); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("I", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_FALSE(docs->next()); ASSERT_TRUE(irs::type_limits<irs::type_t::doc_id_t>::eof(docs->value())); ASSERT_TRUE(irs::type_limits<irs::type_t::doc_id_t>::eof(docs_seek->seek(irs::type_limits<ir::type_t::doc_id_t>::eof()))); } // "fox ... quick" { irs::bytes_ref actual_value; ir::by_phrase q; q.field("phrase_anl") .push_back("fox") .push_back("quick", 1); auto prepared = q.prepare(rdr); auto sub = rdr.begin(); auto column = sub->column_reader("name"); ASSERT_NE(nullptr, column); auto values = column->values(); auto docs = prepared->execute(*sub); ASSERT_FALSE(iresearch::type_limits<iresearch::type_t::doc_id_t>::valid(docs->value())); auto docs_seek = prepared->execute(*sub); ASSERT_FALSE(iresearch::type_limits<iresearch::type_t::doc_id_t>::valid(docs_seek->value())); ASSERT_TRUE(docs->next()); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("L", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_EQ(docs->value(), docs_seek->seek(docs->value())); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("L", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_FALSE(docs->next()); ASSERT_TRUE(ir::type_limits<ir::type_t::doc_id_t>::eof(docs->value())); ASSERT_TRUE(ir::type_limits<ir::type_t::doc_id_t>::eof(docs_seek->seek(ir::type_limits<ir::type_t::doc_id_t>::eof()))); } // "fox ... quick" with phrase offset // which is does not matter { irs::bytes_ref actual_value; ir::by_phrase q; q.field("phrase_anl") .push_back("fox", ir::integer_traits<size_t>::const_max) .push_back("quick", 1); auto prepared = q.prepare(rdr); auto sub = rdr.begin(); auto column = sub->column_reader("name"); ASSERT_NE(nullptr, column); auto values = column->values(); auto docs = prepared->execute(*sub); ASSERT_FALSE(iresearch::type_limits<iresearch::type_t::doc_id_t>::valid( docs->value())); auto docs_seek = prepared->execute(*sub); ASSERT_FALSE(iresearch::type_limits<iresearch::type_t::doc_id_t>::valid(docs_seek->value())); ASSERT_TRUE(docs->next()); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("L", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_EQ(docs->value(), docs_seek->seek(docs->value())); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("L", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_FALSE(docs->next()); ASSERT_TRUE(ir::type_limits<ir::type_t::doc_id_t>::eof(docs->value())); ASSERT_TRUE(ir::type_limits<ir::type_t::doc_id_t>::eof(docs_seek->seek(ir::type_limits<ir::type_t::doc_id_t>::eof()))); } // "fox ... ... quick" { ir::by_phrase q; q.field("phrase_anl") .push_back("fox") .push_back("quick", 2); auto prepared = q.prepare(rdr); auto sub = rdr.begin(); auto docs = prepared->execute(*sub); ASSERT_FALSE(iresearch::type_limits<iresearch::type_t::doc_id_t>::valid(docs->value())); ASSERT_FALSE(docs->next()); ASSERT_TRUE(ir::type_limits<ir::type_t::doc_id_t>::eof(docs->value())); } // "eye ... eye" { irs::bytes_ref actual_value; ir::by_phrase q; q.field("phrase_anl") .push_back("eye") .push_back("eye", 1); auto prepared = q.prepare(rdr); auto sub = rdr.begin(); auto column = sub->column_reader("name"); ASSERT_NE(nullptr, column); auto values = column->values(); auto docs = prepared->execute(*sub); ASSERT_FALSE(iresearch::type_limits<iresearch::type_t::doc_id_t>::valid(docs->value())); auto docs_seek = prepared->execute(*sub); ASSERT_FALSE(iresearch::type_limits<iresearch::type_t::doc_id_t>::valid(docs_seek->value())); ASSERT_TRUE(docs->next()); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("C", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_EQ(docs->value(), docs_seek->seek(docs->value())); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("C", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_FALSE(docs->next()); ASSERT_TRUE(ir::type_limits<ir::type_t::doc_id_t>::eof(docs->value())); ASSERT_TRUE(ir::type_limits<ir::type_t::doc_id_t>::eof(docs_seek->seek(ir::type_limits<ir::type_t::doc_id_t>::eof()))); } // "as in the past we are looking forward" { irs::bytes_ref actual_value; ir::by_phrase q; q.field("phrase_anl") .push_back("as") .push_back("in") .push_back("the") .push_back("past") .push_back("we") .push_back("are") .push_back("looking") .push_back("forward"); auto prepared = q.prepare(rdr); auto sub = rdr.begin(); auto column = sub->column_reader("name"); ASSERT_NE(nullptr, column); auto values = column->values(); auto docs = prepared->execute(*sub); ASSERT_FALSE(iresearch::type_limits<iresearch::type_t::doc_id_t>::valid(docs->value())); auto docs_seek = prepared->execute(*sub); ASSERT_FALSE(iresearch::type_limits<iresearch::type_t::doc_id_t>::valid(docs_seek->value())); ASSERT_TRUE(docs->next()); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("H", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_EQ(docs->value(), docs_seek->seek(docs->value())); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("H", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_FALSE(docs->next()); ASSERT_TRUE(ir::type_limits<ir::type_t::doc_id_t>::eof(docs->value())); ASSERT_TRUE(ir::type_limits<ir::type_t::doc_id_t>::eof(docs_seek->seek(ir::type_limits<ir::type_t::doc_id_t>::eof()))); } // "as in the past we are looking forward" with order { irs::bytes_ref actual_value; irs::by_phrase q; q.field("phrase_anl") .push_back("as") .push_back("in") .push_back("the") .push_back("past") .push_back("we") .push_back("are") .push_back("looking") .push_back("forward"); irs::order ord; auto& sort = ord.add<tests::sort::custom_sort>(false); sort.scorer_add = [](irs::doc_id_t& dst, const irs::doc_id_t& src)->void { ASSERT_TRUE( irs::type_limits<irs::type_t::doc_id_t>::invalid() == dst || dst == src ); dst = src; }; auto pord = ord.prepare(); auto prepared = q.prepare(rdr, pord); auto sub = rdr.begin(); auto column = sub->column_reader("name"); ASSERT_NE(nullptr, column); auto values = column->values(); auto docs = prepared->execute(*sub, pord); ASSERT_FALSE(iresearch::type_limits<irs::type_t::doc_id_t>::valid(docs->value())); auto docs_seek = prepared->execute(*sub); ASSERT_FALSE(iresearch::type_limits<irs::type_t::doc_id_t>::valid(docs_seek->value())); auto& score = docs->attributes().get<irs::score>(); ASSERT_FALSE(!score); ASSERT_TRUE(docs->next()); score->evaluate(); ASSERT_EQ(docs->value(),pord.get<irs::doc_id_t>(score->c_str(), 0)); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("H", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_EQ(docs->value(), docs_seek->seek(docs->value())); ASSERT_TRUE(values(docs->value(), actual_value)); ASSERT_EQ("H", irs::to_string<irs::string_ref>(actual_value.c_str())); ASSERT_FALSE(docs->next()); ASSERT_TRUE(irs::type_limits<irs::type_t::doc_id_t>::eof(docs->value())); ASSERT_TRUE(irs::type_limits<irs::type_t::doc_id_t>::eof(docs_seek->seek(irs::type_limits<ir::type_t::doc_id_t>::eof()))); } } }; // phrase_filter_test_case } // tests // ---------------------------------------------------------------------------- // --SECTION-- by_phrase base tests // ---------------------------------------------------------------------------- TEST(by_phrase_test, ctor) { ir::by_phrase q; ASSERT_EQ(ir::by_phrase::type(), q.type()); ASSERT_EQ("", q.field()); ASSERT_TRUE(q.empty()); ASSERT_EQ(0, q.size()); ASSERT_EQ(q.begin(), q.end()); ASSERT_EQ(ir::boost::no_boost(), q.boost()); auto& features = ir::by_phrase::required(); ASSERT_EQ(2, features.size()); ASSERT_TRUE(features.check<ir::frequency>()); ASSERT_TRUE(features.check<ir::position>()); } TEST(by_phrase_test, boost) { // no boost { // no terms { ir::by_phrase q; q.field("field"); auto prepared = q.prepare(tests::empty_index_reader::instance()); ASSERT_EQ(ir::boost::no_boost(), ir::boost::extract(prepared->attributes())); } // single term { ir::by_phrase q; q.field("field").push_back("quick"); auto prepared = q.prepare(tests::empty_index_reader::instance()); ASSERT_EQ(ir::boost::no_boost(), ir::boost::extract(prepared->attributes())); } // multiple terms { ir::by_phrase q; q.field("field").push_back("quick").push_back("brown"); auto prepared = q.prepare(tests::empty_index_reader::instance()); ASSERT_EQ(ir::boost::no_boost(), ir::boost::extract(prepared->attributes())); } } // with boost { iresearch::boost::boost_t boost = 1.5f; // no terms, return empty query { ir::by_phrase q; q.field("field"); q.boost(boost); auto prepared = q.prepare(tests::empty_index_reader::instance()); ASSERT_EQ(ir::boost::no_boost(), ir::boost::extract(prepared->attributes())); } // single term { ir::by_phrase q; q.field("field").push_back("quick"); q.boost(boost); auto prepared = q.prepare(tests::empty_index_reader::instance()); ASSERT_EQ(boost, ir::boost::extract(prepared->attributes())); } // single multiple terms { ir::by_phrase q; q.field("field").push_back("quick").push_back("brown"); q.boost(boost); auto prepared = q.prepare(tests::empty_index_reader::instance()); ASSERT_EQ(boost, ir::boost::extract(prepared->attributes())); } } } TEST(by_phrase_test, push_back_insert) { ir::by_phrase q; // push_back { q.push_back("quick"); q.push_back(ir::ref_cast<ir::byte_type>(ir::string_ref("brown")), 1); q.push_back(ir::bstring(ir::ref_cast<ir::byte_type>(ir::string_ref("fox")))); ASSERT_FALSE(q.empty()); ASSERT_EQ(3, q.size()); // check elements via positions { ASSERT_EQ(ir::ref_cast<ir::byte_type>(ir::string_ref("quick")), q[0]); ASSERT_EQ(ir::ref_cast<ir::byte_type>(ir::string_ref("brown")), q[2]); ASSERT_EQ(ir::ref_cast<ir::byte_type>(ir::string_ref("fox")), q[3]); } // check elements via iterators { auto it = q.begin(); ASSERT_NE(q.end(), it); ASSERT_EQ(0, it->first); ASSERT_EQ(ir::ref_cast<ir::byte_type>(ir::string_ref("quick")), it->second); ++it; ASSERT_NE(q.end(), it); ASSERT_EQ(2, it->first); ASSERT_EQ(ir::ref_cast<ir::byte_type>(ir::string_ref("brown")), it->second); ++it; ASSERT_NE(q.end(), it); ASSERT_EQ(3, it->first); ASSERT_EQ(ir::ref_cast<ir::byte_type>(ir::string_ref("fox")), it->second); ++it; ASSERT_EQ(q.end(), it); } // push term { q.push_back("squirrel", 0); ASSERT_EQ(ir::ref_cast<ir::byte_type>(ir::string_ref("squirrel")), q[4]); } ASSERT_EQ(4, q.size()); } // insert { q[3] = ir::ref_cast<ir::byte_type>(ir::string_ref("jumps")); ASSERT_EQ(ir::ref_cast<ir::byte_type>(ir::string_ref("jumps")), q[3]); ASSERT_EQ(4, q.size()); q.insert(5, "lazy"); ASSERT_EQ(ir::ref_cast<ir::byte_type>(ir::string_ref("lazy")), q[5]); ASSERT_EQ(5, q.size()); q.insert(28, ir::bstring(ir::ref_cast<ir::byte_type>(ir::string_ref("dog")))); ASSERT_EQ(ir::ref_cast<ir::byte_type>(ir::string_ref("dog")), q[28]); ASSERT_EQ(6, q.size()); } } TEST(by_phrase_test, equal) { ASSERT_EQ(ir::by_phrase(), ir::by_phrase()); { ir::by_phrase q0; q0.field("name"); q0.push_back("quick"); q0.push_back("brown"); ir::by_phrase q1; q1.field("name"); q1.push_back("quick"); q1.push_back("brown"); ASSERT_EQ(q0, q1); ASSERT_EQ(q0.hash(), q1.hash()); } { ir::by_phrase q0; q0.field("name"); q0.push_back("quick"); q0.push_back("squirrel"); ir::by_phrase q1; q1.field("name"); q1.push_back("quick"); q1.push_back("brown"); ASSERT_NE(q0, q1); } { ir::by_phrase q0; q0.field("name1"); q0.push_back("quick"); q0.push_back("brown"); ir::by_phrase q1; q1.field("name"); q1.push_back("quick"); q1.push_back("brown"); ASSERT_NE(q0, q1); } { ir::by_phrase q0; q0.field("name"); q0.push_back("quick"); ir::by_phrase q1; q1.field("name"); q1.push_back("quick"); q1.push_back("brown"); ASSERT_NE(q0, q1); } } // ---------------------------------------------------------------------------- // --SECTION-- memory_directory + iresearch_format_10 // ---------------------------------------------------------------------------- class memory_phrase_filter_test_case : public tests::phrase_filter_test_case { protected: virtual irs::directory* get_directory() override { return new irs::memory_directory(); } virtual irs::format::ptr get_codec() override { static irs::version10::format FORMAT; return irs::format::ptr(&FORMAT, [](irs::format*)->void{}); } }; TEST_F(memory_phrase_filter_test_case, by_phrase) { sequential(); } // ---------------------------------------------------------------------------- // --SECTION-- fs_directory + iresearch_format_10 // ---------------------------------------------------------------------------- class fs_phrase_filter_test_case : public tests::phrase_filter_test_case { protected: virtual irs::directory* get_directory() override { const fs::path dir = fs::path(test_dir()).append("index"); return new irs::fs_directory(dir.string()); } virtual irs::format::ptr get_codec() override { static irs::version10::format FORMAT; return irs::format::ptr(&FORMAT, [](irs::format*)->void{}); } }; TEST_F(fs_phrase_filter_test_case, by_phrase) { sequential(); }
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef _THRIFT_PROTOCOL_THEADERPROTOCOL_CPP_ #define _THRIFT_PROTOCOL_THEADERPROTOCOL_CPP_ 1 #include "THeaderProtocol.h" #include "TCompactProtocol.h" #include "TJSONProtocol.h" #include <thrift/TApplicationException.h> #include <limits> #include <boost/static_assert.hpp> namespace apache { namespace thrift { namespace protocol { void THeaderProtocol::resetProtocol() { if (proto_ && protoId_ == trans_->getProtocolId()) { return; } protoId_ = trans_->getProtocolId(); switch (protoId_) { case T_BINARY_PROTOCOL: proto_ = boost::shared_ptr<TProtocol>( new TBinaryProtocolT<THeaderTransport>(trans_)); break; case T_COMPACT_PROTOCOL: proto_ = boost::shared_ptr<TProtocol>( new TCompactProtocolT<THeaderTransport>(trans_)); break; case T_JSON_PROTOCOL: proto_ = boost::shared_ptr<TProtocol>(new TJSONProtocol(trans_)); break; default: throw TApplicationException(TApplicationException::INVALID_PROTOCOL, "Unknown protocol requested"); } } uint32_t THeaderProtocol::writeMessageBegin(const std::string& name, const TMessageType messageType, const int32_t seqId) { resetProtocol(); // Reset in case we changed protocols trans_->setSequenceNumber(seqId); return proto_->writeMessageBegin(name, messageType, seqId); } uint32_t THeaderProtocol::writeMessageEnd() { return proto_->writeMessageEnd(); } uint32_t THeaderProtocol::writeStructBegin(const char* name) { return proto_->writeStructBegin(name); } uint32_t THeaderProtocol::writeStructEnd() { return proto_->writeStructEnd(); } uint32_t THeaderProtocol::writeFieldBegin(const char* name, const TType fieldType, const int16_t fieldId) { return proto_->writeFieldBegin(name, fieldType, fieldId); } uint32_t THeaderProtocol::writeFieldEnd() { return proto_->writeFieldEnd(); } uint32_t THeaderProtocol::writeFieldStop() { return proto_->writeFieldStop(); } uint32_t THeaderProtocol::writeMapBegin(const TType keyType, const TType valType, const uint32_t size) { return proto_->writeMapBegin(keyType, valType, size); } uint32_t THeaderProtocol::writeMapEnd() { return proto_->writeMapEnd(); } uint32_t THeaderProtocol::writeListBegin(const TType elemType, const uint32_t size) { return proto_->writeListBegin(elemType, size); } uint32_t THeaderProtocol::writeListEnd() { return proto_->writeListEnd(); } uint32_t THeaderProtocol::writeSetBegin(const TType elemType, const uint32_t size) { return proto_->writeSetBegin(elemType, size); } uint32_t THeaderProtocol::writeSetEnd() { return proto_->writeSetEnd(); } uint32_t THeaderProtocol::writeBool(const bool value) { return proto_->writeBool(value); } uint32_t THeaderProtocol::writeByte(const int8_t byte) { return proto_->writeByte(byte); } uint32_t THeaderProtocol::writeI16(const int16_t i16) { return proto_->writeI16(i16); } uint32_t THeaderProtocol::writeI32(const int32_t i32) { return proto_->writeI32(i32); } uint32_t THeaderProtocol::writeI64(const int64_t i64) { return proto_->writeI64(i64); } uint32_t THeaderProtocol::writeDouble(const double dub) { return proto_->writeDouble(dub); } uint32_t THeaderProtocol::writeString(const std::string& str) { return proto_->writeString(str); } uint32_t THeaderProtocol::writeBinary(const std::string& str) { return proto_->writeBinary(str); } /** * Reading functions */ uint32_t THeaderProtocol::readMessageBegin(std::string& name, TMessageType& messageType, int32_t& seqId) { // Read the next frame, and change protocols if needed try { trans_->resetProtocol(); resetProtocol(); } catch (const TApplicationException& ex) { writeMessageBegin("", T_EXCEPTION, 0); ex.write((TProtocol*)this); writeMessageEnd(); trans_->flush(); } return proto_->readMessageBegin(name, messageType, seqId); } uint32_t THeaderProtocol::readMessageEnd() { return proto_->readMessageEnd(); } uint32_t THeaderProtocol::readStructBegin(std::string& name) { return proto_->readStructBegin(name); } uint32_t THeaderProtocol::readStructEnd() { return proto_->readStructEnd(); } uint32_t THeaderProtocol::readFieldBegin(std::string& name, TType& fieldType, int16_t& fieldId) { return proto_->readFieldBegin(name, fieldType, fieldId); } uint32_t THeaderProtocol::readFieldEnd() { return proto_->readFieldEnd(); } uint32_t THeaderProtocol::readMapBegin(TType& keyType, TType& valType, uint32_t& size) { return proto_->readMapBegin(keyType, valType, size); } uint32_t THeaderProtocol::readMapEnd() { return proto_->readMapEnd(); } uint32_t THeaderProtocol::readListBegin(TType& elemType, uint32_t& size) { return proto_->readListBegin(elemType, size); } uint32_t THeaderProtocol::readListEnd() { return proto_->readListEnd(); } uint32_t THeaderProtocol::readSetBegin(TType& elemType, uint32_t& size) { return proto_->readSetBegin(elemType, size); } uint32_t THeaderProtocol::readSetEnd() { return proto_->readSetEnd(); } uint32_t THeaderProtocol::readBool(bool& value) { return proto_->readBool(value); } uint32_t THeaderProtocol::readByte(int8_t& byte) { return proto_->readByte(byte); } uint32_t THeaderProtocol::readI16(int16_t& i16) { return proto_->readI16(i16); } uint32_t THeaderProtocol::readI32(int32_t& i32) { return proto_->readI32(i32); } uint32_t THeaderProtocol::readI64(int64_t& i64) { return proto_->readI64(i64); } uint32_t THeaderProtocol::readDouble(double& dub) { return proto_->readDouble(dub); } uint32_t THeaderProtocol::readString(std::string& str) { return proto_->readString(str); } uint32_t THeaderProtocol::readBinary(std::string& binary) { return proto_->readBinary(binary); } }}} // apache::thrift::protocol #endif // #ifndef THRIFT_PROTOCOL_THEADERPROTOCOL_CPP_
/** * @file * @brief deal with reading and writing of highscore file **/ /* * ----------- MODIFYING THE PRINTED SCORE FORMAT --------------------- * Do this at your leisure. Change hiscores_format_single() as much * as you like. * */ #include "AppHdr.h" #include "hiscores.h" #include <algorithm> #include <cctype> #include <cstdio> #include <cstdlib> #include <memory> #if defined(UNIX) || defined(TARGET_COMPILER_MINGW) #include <unistd.h> #endif #include "branch.h" #include "chardump.h" #include "cio.h" #include "dungeon.h" #include "end.h" #include "english.h" #include "files.h" #include "initfile.h" #include "item-prop.h" #include "item-status-flag-type.h" #include "items.h" #include "jobs.h" #include "kills.h" #include "libutil.h" #include "menu.h" #include "misc.h" #include "mon-util.h" #include "options.h" #include "ouch.h" #include "place.h" #include "religion.h" #include "scroller.h" #include "skills.h" #include "state.h" #include "status.h" #include "stringutil.h" #ifdef USE_TILE #include "tilepick.h" #endif #ifdef USE_TILE_LOCAL #include "tilereg-crt.h" #endif #include "unwind.h" #include "version.h" #include "outer-menu.h" using namespace ui; #define SCORE_VERSION "0.1" // enough memory allocated to snarf in the scorefile entries static unique_ptr<scorefile_entry> hs_list[SCORE_FILE_ENTRIES]; static int hs_list_size = 0; static bool hs_list_initalized = false; static FILE *_hs_open(const char *mode, const string &filename); static void _hs_close(FILE *handle); static bool _hs_read(FILE *scores, scorefile_entry &dest); static void _hs_write(FILE *scores, scorefile_entry &entry); static time_t _parse_time(const string &st); static string _xlog_escape(const string &s); static string _xlog_unescape(const string &s); static vector<string> _xlog_split_fields(const string &s); static string _score_file_name() { string ret; if (!SysEnv.scorefile.empty()) ret = SysEnv.scorefile; else ret = catpath(Options.shared_dir, "scores"); ret += crawl_state.game_type_qualifier(); if (crawl_state.game_is_sprint() && !crawl_state.map.empty()) ret += "-" + crawl_state.map; return ret; } static string _log_file_name() { return catpath(Options.shared_dir, "logfile" + crawl_state.game_type_qualifier()); } int hiscores_new_entry(const scorefile_entry &ne) { unwind_bool score_update(crawl_state.updating_scores, true); FILE *scores; int i; bool inserted = false; int newest_entry = -1; // open highscore file (reading) -- nullptr is fatal! // // Opening as a+ instead of r+ to force an exclusive lock (see // hs_open) and to create the file if it's not there already. scores = _hs_open("a+", _score_file_name()); if (scores == nullptr) end(1, true, "failed to open score file for writing"); // we're at the end of the file, seek back to beginning. fseek(scores, 0, SEEK_SET); // read highscore file, inserting new entry at appropriate point, for (i = 0; i < SCORE_FILE_ENTRIES; i++) { hs_list[i].reset(new scorefile_entry); if (_hs_read(scores, *hs_list[i]) == false) break; // compare points.. if (!inserted && ne.get_score() >= hs_list[i]->get_score()) { newest_entry = i; // for later printing inserted = true; // copy read entry to i+1th position // Fixed a nasty overflow bug here -- Sharp if (i+1 < SCORE_FILE_ENTRIES) { hs_list[i + 1] = move(hs_list[i]); hs_list[i].reset(new scorefile_entry(ne)); i++; } else *hs_list[i] = ne; // copy new entry to current position } } // special case: lowest score, with room if (!inserted && i < SCORE_FILE_ENTRIES) { newest_entry = i; inserted = true; // copy new entry hs_list[i].reset(new scorefile_entry(ne)); i++; } hs_list_size = i; hs_list_initalized = true; // If we've still not inserted it, it's not a highscore. if (!inserted) { _hs_close(scores); return -1; } // The old code closed and reopened the score file, leading to a // race condition where one Crawl process could overwrite the // other's highscore. Now we truncate and rewrite the file without // closing it. if (ftruncate(fileno(scores), 0)) end(1, true, "unable to truncate scorefile"); rewind(scores); // write scorefile entries. for (i = 0; i < hs_list_size; i++) { _hs_write(scores, *hs_list[i]); // Leave in memory. Does this anyway if !inserted. // Can write cleanup function if nessicary?? // hs_list[i].reset(nullptr); } // close scorefile. _hs_close(scores); return newest_entry; } void logfile_new_entry(const scorefile_entry &ne) { unwind_bool logfile_update(crawl_state.updating_scores, true); FILE *logfile; scorefile_entry le = ne; // open logfile (appending) -- nullptr *is* fatal here. logfile = _hs_open("a", _log_file_name()); if (logfile == nullptr) { mprf(MSGCH_ERROR, "ERROR: failure writing to the logfile."); return; } _hs_write(logfile, le); // close logfile. _hs_close(logfile); } template <class t_printf> static void _hiscores_print_entry(const scorefile_entry &se, int index, int format, t_printf pf) { char buf[200]; // print position (tracked implicitly by order score file) snprintf(buf, sizeof buf, "%3d.", index + 1); pf("%s", buf); string entry; // format the entry if (format == SCORE_TERSE) entry = hiscores_format_single(se); else entry = hiscores_format_single_long(se, (format == SCORE_VERBOSE)); entry += "\n"; pf("%s", entry.c_str()); } // Reads hiscores file to memory void hiscores_read_to_memory() { FILE *scores; int i; // open highscore file (reading) scores = _hs_open("r", _score_file_name()); if (scores == nullptr) return; // read highscore file for (i = 0; i < SCORE_FILE_ENTRIES; i++) { hs_list[i].reset(new scorefile_entry); if (_hs_read(scores, *hs_list[i]) == false) break; } hs_list_size = i; hs_list_initalized = true; //close off _hs_close(scores); } // Writes all entries in the scorefile to stdout in human-readable form. void hiscores_print_all(int display_count, int format) { unwind_bool scorefile_display(crawl_state.updating_scores, true); FILE *scores = _hs_open("r", _score_file_name()); if (scores == nullptr) { // will only happen from command line puts("No scores."); return; } for (int entry = 0; display_count <= 0 || entry < display_count; ++entry) { scorefile_entry se; if (!_hs_read(scores, se)) break; if (format == -1) printf("%s", se.raw_string().c_str()); else _hiscores_print_entry(se, entry, format, printf); } _hs_close(scores); } // Displays high scores using curses. For output to the console, use // hiscores_print_all. string hiscores_print_list(int display_count, int format, int newest_entry, int& start_out) { unwind_bool scorefile_display(crawl_state.updating_scores, true); string ret; // Additional check to preserve previous functionality if (!hs_list_initalized) hiscores_read_to_memory(); int i, total_entries; if (display_count <= 0) return ""; total_entries = hs_list_size; int start = newest_entry - display_count / 2; if (start + display_count > total_entries) start = total_entries - display_count; if (start < 0) start = 0; const int finish = start + display_count; for (i = start; i < finish && i < total_entries; i++) { // check for recently added entry if (i == newest_entry) ret += "<yellow>"; _hiscores_print_entry(*hs_list[i], i, format, [&ret](const char */*fmt*/, const char *s){ ret += string(s); }); // return to normal color for next entry if (i == newest_entry) ret += "<lightgrey>"; } start_out = start; return ret; } static void _show_morgue(scorefile_entry& se) { int flags = FS_PREWRAPPED_TEXT; formatted_scroller morgue_file(flags); morgue_file.set_tag("morgue"); morgue_file.set_more(); string morgue_base = morgue_name(se.get_name(), se.get_death_time()); string morgue_path = catpath(morgue_directory(), strip_filename_unsafe_chars(morgue_base) + ".txt"); FILE* morgue = lk_open("r", morgue_path); if (!morgue) // TODO: add an error message return; char buf[200]; string morgue_text = ""; while (fgets(buf, sizeof buf, morgue) != nullptr) { string line = string(buf); size_t newline_pos = line.find_last_of('\n'); if (newline_pos != string::npos) line.erase(newline_pos); morgue_text += "<w>" + replace_all(line, "<", "<<") + "</w>" + '\n'; } lk_close(morgue); column_composer cols(2, 40); cols.add_formatted( 0, morgue_text, true); vector<formatted_string> blines = cols.formatted_lines(); unsigned i; for (i = 0; i < blines.size(); ++i) morgue_file.add_formatted_string(blines[i], true); morgue_file.show(); } class UIHiscoresMenu : public Widget { public: UIHiscoresMenu(); virtual shared_ptr<Widget> get_child_at_offset(int, int) override { return static_pointer_cast<Widget>(m_root); } virtual void _render() override; virtual SizeReq _get_preferred_size(Direction dim, int prosp_width) override; virtual void _allocate_region() override; void on_show(); bool done = false; private: void _construct_hiscore_table(); void _add_hiscore_row(scorefile_entry& se, int id); Widget* initial_focus = nullptr; bool have_allocated {false}; shared_ptr<Box> m_root; shared_ptr<Text> m_description; shared_ptr<OuterMenu> m_score_entries; }; static int nhsr; UIHiscoresMenu::UIHiscoresMenu() { m_root = make_shared<Box>(Widget::VERT); add_internal_child(m_root); m_root->set_cross_alignment(Widget::STRETCH); auto title_hbox = make_shared<Box>(Widget::HORZ); title_hbox->set_margin_for_sdl(0, 0, 20, 0); title_hbox->set_margin_for_crt(0, 0, 1, 0); #ifdef USE_TILE auto tile = make_shared<Image>(); tile->set_tile(tile_def(TILEG_STARTUP_HIGH_SCORES)); title_hbox->add_child(move(tile)); #endif auto title = make_shared<Text>(formatted_string( "Dungeon Crawl Stone Soup: High Scores", YELLOW)); title->set_margin_for_sdl(0, 0, 0, 16); title_hbox->add_child(move(title)); title_hbox->set_main_alignment(Widget::CENTER); title_hbox->set_cross_alignment(Widget::CENTER); m_description = make_shared<Text>(string(9, '\n')); m_score_entries= make_shared<OuterMenu>(true, 1, 100); nhsr = 0; _construct_hiscore_table(); m_root->add_child(move(title_hbox)); if (initial_focus) { m_root->add_child(m_description); m_root->add_child(m_score_entries); } else { auto placeholder = formatted_string("No high scores yet...", DARKGRAY); m_root->add_child(make_shared<Text>(placeholder)); initial_focus = this; } on_hotkey_event([this](const KeyEvent& ev) { return done = (key_is_escape(ev.key()) || ev.key() == CK_MOUSE_CMD); }); } void UIHiscoresMenu::_construct_hiscore_table() { FILE *scores = _hs_open("r", _score_file_name()); if (scores == nullptr) return; int i; // read highscore file for (i = 0; i < SCORE_FILE_ENTRIES; i++) { hs_list[i].reset(new scorefile_entry); if (_hs_read(scores, *hs_list[i]) == false) break; } _hs_close(scores); for (int j=0; j<i; j++) _add_hiscore_row(*hs_list[j], j); } void UIHiscoresMenu::_add_hiscore_row(scorefile_entry& se, int id) { auto tmp = make_shared<Text>(); tmp->set_text(hiscores_format_single(se)); auto btn = make_shared<MenuButton>(); tmp->set_margin_for_sdl(2); btn->set_child(move(tmp)); btn->on_activate_event([id](const ActivateEvent&) { _show_morgue(*hs_list[id]); return true; }); btn->on_focusin_event([this, se](const FocusEvent&) { formatted_string desc(hiscores_format_single_long(se, true)); desc.cprintf(string(max(0, 9-count_linebreaks(desc)), '\n')); m_description->set_text(move(desc)); return false; }); if (!initial_focus) initial_focus = btn.get(); m_score_entries->add_button(move(btn), 0, nhsr++); } void UIHiscoresMenu::_render() { m_root->render(); } void UIHiscoresMenu::on_show() { ui::set_focused_widget(initial_focus); } SizeReq UIHiscoresMenu::_get_preferred_size(Direction dim, int prosp_width) { return m_root->get_preferred_size(dim, prosp_width); } void UIHiscoresMenu::_allocate_region() { if (!have_allocated) { have_allocated = true; on_show(); } m_root->allocate_region(m_region); } void show_hiscore_table() { unwind_var<string> sprintmap(crawl_state.map, crawl_state.sprint_map); auto hiscore_ui = make_shared<UIHiscoresMenu>(); auto popup = make_shared<ui::Popup>(hiscore_ui); ui::run_layout(move(popup), hiscore_ui->done); } // Trying to supply an appropriate verb for the attack type. -- bwr static const char *_range_type_verb(const char *const aux) { if (strncmp(aux, "Shot ", 5) == 0) // launched return "shot"; else if (aux[0] == 0 // unknown || strncmp(aux, "Hit ", 4) == 0 // thrown || strncmp(aux, "volley ", 7) == 0) // manticore spikes { return "hit from afar"; } return "blasted"; // spells, wands } string hiscores_format_single(const scorefile_entry &se) { return se.hiscore_line(scorefile_entry::DDV_ONELINE); } static bool _hiscore_same_day(time_t t1, time_t t2) { struct tm *d1 = TIME_FN(&t1); const int year = d1->tm_year; const int mon = d1->tm_mon; const int day = d1->tm_mday; struct tm *d2 = TIME_FN(&t2); return d2->tm_mday == day && d2->tm_mon == mon && d2->tm_year == year; } static string _hiscore_date_string(time_t time) { struct tm *date = TIME_FN(&time); const char *mons[12] = { "Jan", "Feb", "Mar", "Apr", "May", "June", "July", "Aug", "Sept", "Oct", "Nov", "Dec" }; return make_stringf("%s %d, %d", mons[date->tm_mon], date->tm_mday, date->tm_year + 1900); } static string _hiscore_newline_string() { return "\n "; } string hiscores_format_single_long(const scorefile_entry &se, bool verbose) { return se.hiscore_line(verbose ? scorefile_entry::DDV_VERBOSE : scorefile_entry::DDV_NORMAL); } // -------------------------------------------------------------------------- // BEGIN private functions // -------------------------------------------------------------------------- static FILE *_hs_open(const char *mode, const string &scores) { // allow reading from standard input if (scores == "-") return stdin; return lk_open(mode, scores); } static void _hs_close(FILE *handle) { lk_close(handle); } static bool _hs_read(FILE *scores, scorefile_entry &dest) { char inbuf[1300]; if (!scores || feof(scores)) return false; memset(inbuf, 0, sizeof inbuf); dest.reset(); if (!fgets(inbuf, sizeof inbuf, scores)) return false; return dest.parse(inbuf); } static int _val_char(char digit) { return digit - '0'; } static time_t _parse_time(const string &st) { struct tm date; if (st.length() < 15) return static_cast<time_t>(0); date.tm_year = _val_char(st[0]) * 1000 + _val_char(st[1]) * 100 + _val_char(st[2]) * 10 + _val_char(st[3]) - 1900; date.tm_mon = _val_char(st[4]) * 10 + _val_char(st[5]); date.tm_mday = _val_char(st[6]) * 10 + _val_char(st[7]); date.tm_hour = _val_char(st[8]) * 10 + _val_char(st[9]); date.tm_min = _val_char(st[10]) * 10 + _val_char(st[11]); date.tm_sec = _val_char(st[12]) * 10 + _val_char(st[13]); date.tm_isdst = (st[14] == 'D'); return mktime(&date); } static void _hs_write(FILE *scores, scorefile_entry &se) { fprintf(scores, "%s", se.raw_string().c_str()); } static const char *kill_method_names[] = { "mon", "pois", "cloud", "beam", "lava", "water", "stupidity", "weakness", "clumsiness", "trap", "leaving", "winning", "quitting", "wizmode", "draining", "starvation", "freezing", "burning", "wild_magic", "xom", "rotting", "targeting", "spore", "tso_smiting", "petrification", "something", "falling_down_stairs", "acid", "curare", "beogh_smiting", "divine_wrath", "bounce", "reflect", "self_aimed", "falling_through_gate", "disintegration", "headbutt", "rolling", "mirror_damage", "spines", "frailty", "barbs", "being_thrown", "collision", "zot", }; static const char *_kill_method_name(kill_method_type kmt) { COMPILE_CHECK(NUM_KILLBY == ARRAYSZ(kill_method_names)); if (kmt == NUM_KILLBY) return ""; return kill_method_names[kmt]; } static kill_method_type _str_to_kill_method(const string &s) { COMPILE_CHECK(NUM_KILLBY == ARRAYSZ(kill_method_names)); for (int i = 0; i < NUM_KILLBY; ++i) { if (s == kill_method_names[i]) return static_cast<kill_method_type>(i); } return NUM_KILLBY; } ////////////////////////////////////////////////////////////////////////// // scorefile_entry scorefile_entry::scorefile_entry(int dam, mid_t dsource, int dtype, const char *aux, bool death_cause_only, const char *dsource_name, time_t dt) { reset(); init_death_cause(dam, dsource, dtype, aux, dsource_name); if (!death_cause_only) init(dt); } scorefile_entry::scorefile_entry() { // Completely uninitialised, caveat user. reset(); } scorefile_entry::scorefile_entry(const scorefile_entry &se) { init_from(se); } scorefile_entry &scorefile_entry::operator = (const scorefile_entry &se) { init_from(se); return *this; } void scorefile_entry::init_from(const scorefile_entry &se) { version = se.version; save_rcs_version = se.save_rcs_version; save_tag_version = se.save_tag_version; tiles = se.tiles; points = se.points; name = se.name; race = se.race; job = se.job; race_class_name = se.race_class_name; lvl = se.lvl; best_skill = se.best_skill; best_skill_lvl = se.best_skill_lvl; title = se.title; death_type = se.death_type; death_source = se.death_source; death_source_name = se.death_source_name; death_source_flags = se.death_source_flags; auxkilldata = se.auxkilldata; indirectkiller = se.indirectkiller; killerpath = se.killerpath; last_banisher = se.last_banisher; dlvl = se.dlvl; absdepth = se.absdepth; branch = se.branch; map = se.map; mapdesc = se.mapdesc; killer_map = se.killer_map; final_hp = se.final_hp; final_max_hp = se.final_max_hp; final_max_max_hp = se.final_max_max_hp; final_mp = se.final_mp; final_max_mp = se.final_max_mp; final_base_max_mp = se.final_base_max_mp; damage = se.damage; source_damage = se.source_damage; turn_damage = se.turn_damage; str = se.str; intel = se.intel; dex = se.dex; ac = se.ac; ev = se.ev; sh = se.sh; god = se.god; piety = se.piety; penance = se.penance; wiz_mode = se.wiz_mode; explore_mode = se.explore_mode; birth_time = se.birth_time; death_time = se.death_time; real_time = se.real_time; num_turns = se.num_turns; num_aut = se.num_aut; num_diff_runes = se.num_diff_runes; num_runes = se.num_runes; kills = se.kills; maxed_skills = se.maxed_skills; fifteen_skills = se.fifteen_skills; status_effects = se.status_effects; gold = se.gold; gold_spent = se.gold_spent; gold_found = se.gold_found; zigs = se.zigs; zigmax = se.zigmax; scrolls_used = se.scrolls_used; potions_used = se.potions_used; seed = se.seed; fixup_char_name(); // We could just reset raw_line to "" instead. raw_line = se.raw_line; } actor* scorefile_entry::killer() const { return actor_by_mid(death_source); } xlog_fields scorefile_entry::get_fields() const { if (!fields) return xlog_fields(); else return *fields; } bool scorefile_entry::parse(const string &line) { // Scorefile formats down the ages: // // 1) old-style lines which were 80 character blocks // 2) 4.0 pr1 through pr7 versions which were newline terminated // 3) 4.0 pr8 and onwards which are colon-separated fields (and // start with a colon), and may exceed 80 characters! // 4) 0.2 and onwards, which are xlogfile format - no leading // colon, fields separated by colons, each field specified as // key=value. Colons are not allowed in key names, must be escaped to // :: in values. // // 0.3 only reads and writes entries of type (4). // Leading colon implies 4.0 style line: if (line[0] == ':') { dprf("Corrupted xlog-line: %s", line.c_str()); return false; } raw_line = line; return parse_scoreline(line); } string scorefile_entry::raw_string() const { if (!raw_line.empty()) return raw_line; set_score_fields(); if (!fields) return ""; return fields->xlog_line() + "\n"; } bool scorefile_entry::parse_scoreline(const string &line) { fields.reset(new xlog_fields(line)); init_with_fields(); return true; } static const char* _short_branch_name(int branch) { if (branch >= 0 && branch < NUM_BRANCHES) return branches[branch].abbrevname; return ""; } enum old_job_type { OLD_JOB_THIEF = -1, OLD_JOB_DEATH_KNIGHT = -2, OLD_JOB_PALADIN = -3, OLD_JOB_REAVER = -4, OLD_JOB_STALKER = -5, OLD_JOB_JESTER = -6, OLD_JOB_PRIEST = -7, OLD_JOB_HEALER = -8, OLD_JOB_SKALD = -9, NUM_OLD_JOBS = -OLD_JOB_SKALD }; static const char* _job_name(int job) { switch (job) { case OLD_JOB_THIEF: return "Thief"; case OLD_JOB_DEATH_KNIGHT: return "Death Knight"; case OLD_JOB_PALADIN: return "Paladin"; case OLD_JOB_REAVER: return "Reaver"; case OLD_JOB_STALKER: return "Stalker"; case OLD_JOB_JESTER: return "Jester"; case OLD_JOB_PRIEST: return "Priest"; case OLD_JOB_HEALER: return "Healer"; case OLD_JOB_SKALD: return "Skald"; } return get_job_name(static_cast<job_type>(job)); } static const char* _job_abbrev(int job) { switch (job) { case OLD_JOB_THIEF: return "Th"; case OLD_JOB_DEATH_KNIGHT: return "DK"; case OLD_JOB_PALADIN: return "Pa"; case OLD_JOB_REAVER: return "Re"; case OLD_JOB_STALKER: return "St"; case OLD_JOB_JESTER: return "Jr"; case OLD_JOB_PRIEST: return "Pr"; case OLD_JOB_HEALER: return "He"; case OLD_JOB_SKALD: return "Sk"; } return get_job_abbrev(static_cast<job_type>(job)); } static int _job_by_name(const string& name) { int job = get_job_by_name(name.c_str()); if (job != JOB_UNKNOWN) return job; for (job = -1; job >= -NUM_OLD_JOBS; job--) if (name == _job_name(job)) return job; return JOB_UNKNOWN; } enum old_species_type { OLD_SP_ELF = -1, OLD_SP_HILL_DWARF = -2, OLD_SP_OGRE_MAGE = -3, OLD_SP_GREY_ELF = -4, OLD_SP_GNOME = -5, OLD_SP_MOUNTAIN_DWARF = -6, OLD_SP_SLUDGE_ELF = -7, OLD_SP_DJINNI = -8, OLD_SP_LAVA_ORC = -9, NUM_OLD_SPECIES = -OLD_SP_LAVA_ORC }; static string _species_name(int race) { switch (race) { case OLD_SP_ELF: return "Elf"; case OLD_SP_HILL_DWARF: return "Hill Dwarf"; case OLD_SP_OGRE_MAGE: return "Ogre-Mage"; case OLD_SP_GREY_ELF: return "Grey Elf"; case OLD_SP_GNOME: return "Gnome"; case OLD_SP_MOUNTAIN_DWARF: return "Mountain Dwarf"; case OLD_SP_SLUDGE_ELF: return "Sludge Elf"; case OLD_SP_DJINNI: return "Djinni"; case OLD_SP_LAVA_ORC: return "Lava Orc"; } // Guard against an ASSERT in get_species_def; it's really bad if the game // crashes at this point while trying to clean up a dead/quit player. // (This doesn't seem to even impact what is shown in the score list?) if (race < 0 || race >= NUM_SPECIES) return "Unknown (buggy) species!"; return species::name(static_cast<species_type>(race)); } static const char* _species_abbrev(int race) { switch (race) { case OLD_SP_ELF: return "El"; case OLD_SP_HILL_DWARF: return "HD"; case OLD_SP_OGRE_MAGE: return "OM"; case OLD_SP_GREY_ELF: return "GE"; case OLD_SP_GNOME: return "Gn"; case OLD_SP_MOUNTAIN_DWARF: return "MD"; case OLD_SP_SLUDGE_ELF: return "SE"; case OLD_SP_DJINNI: return "Dj"; case OLD_SP_LAVA_ORC: return "LO"; } // see note in _species_name: don't ASSERT in get_species_def. if (race < 0 || race >= NUM_SPECIES) return "??"; return species::get_abbrev(static_cast<species_type>(race)); } static int _species_by_name(const string& name) { int race = species::from_str(name); if (race != SP_UNKNOWN) return race; for (race = -1; race >= -NUM_OLD_SPECIES; race--) if (name == _species_name(race)) return race; return SP_UNKNOWN; } void scorefile_entry::init_with_fields() { version = fields->str_field("v"); save_rcs_version = fields->str_field("vsavrv"); save_tag_version = fields->str_field("vsav"); tiles = fields->int_field("tiles"); points = fields->int_field("sc"); name = fields->str_field("name"); race = _species_by_name(fields->str_field("race")); job = _job_by_name(fields->str_field("cls")); lvl = fields->int_field("xl"); race_class_name = fields->str_field("char"); best_skill = str_to_skill_safe(fields->str_field("sk")); best_skill_lvl = fields->int_field("sklev"); title = fields->str_field("title"); death_type = _str_to_kill_method(fields->str_field("ktyp")); death_source_name = fields->str_field("killer"); const vector<string> kflags = split_string(" ", fields->str_field("killer_flags")); death_source_flags = set<string>(kflags.begin(), kflags.end()); auxkilldata = fields->str_field("kaux"); indirectkiller = fields->str_field("ikiller"); if (indirectkiller.empty()) indirectkiller = death_source_name; killerpath = fields->str_field("kpath"); last_banisher = fields->str_field("banisher"); branch = branch_by_abbrevname(fields->str_field("br"), BRANCH_DUNGEON); dlvl = fields->int_field("lvl"); absdepth = fields->int_field("absdepth"); map = fields->str_field("map"); mapdesc = fields->str_field("mapdesc"); killer_map = fields->str_field("killermap"); final_hp = fields->int_field("hp"); final_max_hp = fields->int_field("mhp"); final_max_max_hp = fields->int_field("mmhp"); final_mp = fields->int_field("mp"); final_max_mp = fields->int_field("mmp"); final_base_max_mp = fields->int_field("bmmp"); damage = fields->int_field("dam"); source_damage = fields->int_field("sdam"); turn_damage = fields->int_field("tdam"); str = fields->int_field("str"); intel = fields->int_field("int"); dex = fields->int_field("dex"); ac = fields->int_field("ac"); ev = fields->int_field("ev"); sh = fields->int_field("sh"); god = str_to_god(fields->str_field("god")); piety = fields->int_field("piety"); penance = fields->int_field("pen"); wiz_mode = fields->int_field("wiz"); explore_mode = fields->int_field("explore"); birth_time = _parse_time(fields->str_field("start")); death_time = _parse_time(fields->str_field("end")); real_time = fields->int_field("dur"); num_turns = fields->int_field("turn"); num_aut = fields->int_field("aut"); num_diff_runes = fields->int_field("urune"); num_runes = fields->int_field("nrune"); kills = fields->int_field("kills"); maxed_skills = fields->str_field("maxskills"); fifteen_skills = fields->str_field("fifteenskills"); status_effects = fields->str_field("status"); gold = fields->int_field("gold"); gold_found = fields->int_field("goldfound"); gold_spent = fields->int_field("goldspent"); zigs = fields->int_field("zigscompleted"); zigmax = fields->int_field("zigdeepest"); scrolls_used = fields->int_field("scrollsused"); potions_used = fields->int_field("potionsused"); seed = fields->str_field("seed"); fixup_char_name(); } void scorefile_entry::set_base_xlog_fields() const { if (!fields) fields.reset(new xlog_fields); string score_version = SCORE_VERSION; if (crawl_state.game_is_sprint()) { /* XXX: hmmm, something better here? */ score_version += "-sprint.1"; } fields->add_field("v", "%s", Version::Short); fields->add_field("vlong", "%s", Version::Long); fields->add_field("lv", "%s", score_version.c_str()); if (!save_rcs_version.empty()) fields->add_field("vsavrv", "%s", save_rcs_version.c_str()); if (!save_tag_version.empty()) fields->add_field("vsav", "%s", save_tag_version.c_str()); #ifdef EXPERIMENTAL_BRANCH fields->add_field("explbr", EXPERIMENTAL_BRANCH); #endif if (tiles) fields->add_field("tiles", "%d", tiles); fields->add_field("name", "%s", name.c_str()); fields->add_field("race", "%s", _species_name(race).c_str()); fields->add_field("cls", "%s", _job_name(job)); fields->add_field("char", "%s", race_class_name.c_str()); fields->add_field("xl", "%d", lvl); fields->add_field("sk", "%s", skill_name(best_skill)); fields->add_field("sklev", "%d", best_skill_lvl); fields->add_field("title", "%s", title.c_str()); fields->add_field("place", "%s", level_id(branch, dlvl).describe().c_str()); if (!last_banisher.empty()) fields->add_field("banisher", "%s", last_banisher.c_str()); // Note: "br", "lvl" (and former "ltyp") are redundant with "place" // but may still be used by DGL logs. fields->add_field("br", "%s", _short_branch_name(branch)); fields->add_field("lvl", "%d", dlvl); fields->add_field("absdepth", "%d", absdepth); fields->add_field("hp", "%d", final_hp); fields->add_field("mhp", "%d", final_max_hp); fields->add_field("mmhp", "%d", final_max_max_hp); fields->add_field("mp", "%d", final_mp); fields->add_field("mmp", "%d", final_max_mp); fields->add_field("bmmp", "%d", final_base_max_mp); fields->add_field("str", "%d", str); fields->add_field("int", "%d", intel); fields->add_field("dex", "%d", dex); fields->add_field("ac", "%d", ac); fields->add_field("ev", "%d", ev); fields->add_field("sh", "%d", sh); fields->add_field("god", "%s", god == GOD_NO_GOD ? "" : god_name(god).c_str()); if (wiz_mode) fields->add_field("wiz", "%d", wiz_mode); if (explore_mode) fields->add_field("explore", "%d", explore_mode); fields->add_field("start", "%s", make_date_string(birth_time).c_str()); fields->add_field("dur", "%d", (int)real_time); fields->add_field("turn", "%d", num_turns); fields->add_field("aut", "%d", num_aut); if (num_diff_runes) fields->add_field("urune", "%d", num_diff_runes); if (num_runes) fields->add_field("nrune", "%d", num_runes); fields->add_field("kills", "%d", kills); if (!maxed_skills.empty()) fields->add_field("maxskills", "%s", maxed_skills.c_str()); if (!fifteen_skills.empty()) fields->add_field("fifteenskills", "%s", fifteen_skills.c_str()); if (!status_effects.empty()) fields->add_field("status", "%s", status_effects.c_str()); fields->add_field("gold", "%d", gold); fields->add_field("goldfound", "%d", gold_found); fields->add_field("goldspent", "%d", gold_spent); if (zigs) fields->add_field("zigscompleted", "%d", zigs); if (zigmax) fields->add_field("zigdeepest", "%d", zigmax); fields->add_field("scrollsused", "%d", scrolls_used); fields->add_field("potionsused", "%d", potions_used); } void scorefile_entry::set_score_fields() const { fields.reset(new xlog_fields); if (!fields) return; set_base_xlog_fields(); fields->add_field("sc", "%d", points); fields->add_field("ktyp", "%s", _kill_method_name(kill_method_type(death_type))); fields->add_field("killer", "%s", death_source_desc().c_str()); if (!death_source_flags.empty()) { const string kflags = comma_separated_line( death_source_flags.begin(), death_source_flags.end(), " ", " "); fields->add_field("killer_flags", "%s", kflags.c_str()); } fields->add_field("dam", "%d", damage); fields->add_field("sdam", "%d", source_damage); fields->add_field("tdam", "%d", turn_damage); fields->add_field("kaux", "%s", auxkilldata.c_str()); if (indirectkiller != death_source_desc()) fields->add_field("ikiller", "%s", indirectkiller.c_str()); if (!killerpath.empty()) fields->add_field("kpath", "%s", killerpath.c_str()); if (piety > 0) fields->add_field("piety", "%d", piety); if (penance > 0) fields->add_field("pen", "%d", penance); fields->add_field("end", "%s", make_date_string(death_time).c_str()); if (!map.empty()) { fields->add_field("map", "%s", map.c_str()); if (!mapdesc.empty()) fields->add_field("mapdesc", "%s", mapdesc.c_str()); } if (!killer_map.empty()) fields->add_field("killermap", "%s", killer_map.c_str()); fields->add_field("seed", "%s", seed.c_str()); #ifdef DGL_EXTENDED_LOGFILES const string short_msg = short_kill_message(); fields->add_field("tmsg", "%s", short_msg.c_str()); const string long_msg = long_kill_message(); if (long_msg != short_msg) fields->add_field("vmsg", "%s", long_msg.c_str()); #endif } string scorefile_entry::make_oneline(const string &ml) const { vector<string> lines = split_string("\n", ml); for (string &s : lines) { if (starts_with(s, "...")) { s = s.substr(3); trim_string(s); } } return comma_separated_line(lines.begin(), lines.end(), " ", " "); } string scorefile_entry::long_kill_message() const { string msg = death_description(DDV_LOGVERBOSE); msg = make_oneline(msg); msg[0] = tolower_safe(msg[0]); trim_string(msg); return msg; } string scorefile_entry::short_kill_message() const { string msg = death_description(DDV_ONELINE); msg = make_oneline(msg); msg[0] = tolower_safe(msg[0]); trim_string(msg); return msg; } /** * Remove from a string everything up to and including a given infix. * * @param[in,out] str The string to modify. * @param[in] infix The infix to remove. * @post If \c infix occurred as a substring of <tt>str</tt>, \c str is updated * by removing all characters up to and including the last character * of the the first occurrence. Otherwise, \c str is unchanged. * @return \c true if \c str was modified, \c false otherwise. */ static bool _strip_to(string &str, const char *infix) { // Don't treat stripping the empty string as a change. if (*infix == '\0') return false; size_t pos = str.find(infix); if (pos != string::npos) { str.erase(0, pos + strlen(infix)); return true; } return false; } void scorefile_entry::init_death_cause(int dam, mid_t dsrc, int dtype, const char *aux, const char *dsrc_name) { death_source = dsrc; death_type = dtype; damage = dam; const monster *source_monster = monster_by_mid(death_source); if (source_monster) killer_map = source_monster->originating_map(); // Set the default aux data value... // If aux is passed in (ie for a trap), we'll default to that. if (aux == nullptr) auxkilldata.clear(); else auxkilldata = aux; // for death by monster if ((death_type == KILLED_BY_MONSTER || death_type == KILLED_BY_HEADBUTT || death_type == KILLED_BY_BEAM || death_type == KILLED_BY_DISINT || death_type == KILLED_BY_ACID || death_type == KILLED_BY_DRAINING || death_type == KILLED_BY_BURNING || death_type == KILLED_BY_DEATH_EXPLOSION || death_type == KILLED_BY_CLOUD || death_type == KILLED_BY_ROTTING || death_type == KILLED_BY_REFLECTION || death_type == KILLED_BY_ROLLING || death_type == KILLED_BY_SPINES || death_type == KILLED_BY_WATER || death_type == KILLED_BY_BEING_THROWN || death_type == KILLED_BY_COLLISION) && monster_by_mid(death_source)) { const monster* mons = monster_by_mid(death_source); ASSERT(mons); // Previously the weapon was only used for dancing weapons, // but now we pass it in as a string through the scorefile // entry to be appended in hiscores_format_single in long or // medium scorefile formats. if (death_type == KILLED_BY_MONSTER && mons->inv[MSLOT_WEAPON] != NON_ITEM) { // [ds] The highscore entry may be constructed while the player // is alive (for notes), so make sure we don't reveal info we // shouldn't. if (you.hp <= 0) { set_ident_flags(env.item[mons->inv[MSLOT_WEAPON]], ISFLAG_IDENT_MASK); } // Setting this is redundant for dancing weapons, however // we do care about the above indentification. -- bwr if (mons->type != MONS_DANCING_WEAPON) auxkilldata = env.item[mons->inv[MSLOT_WEAPON]].name(DESC_A); } const bool death = (you.hp <= 0 || death_type == KILLED_BY_DRAINING); const description_level_type desc = death_type == KILLED_BY_DEATH_EXPLOSION ? DESC_PLAIN : DESC_A; death_source_name = mons->name(desc, death); if (death || you.can_see(*mons)) death_source_name = mons->full_name(desc); // Some shadows have names if (mons_is_player_shadow(*mons) && mons->mname.empty()) death_source_name = "their own shadow"; // heh if (mons->mid == MID_YOU_FAULTLESS) death_source_name = "themself"; if (mons->has_ench(ENCH_SHAPESHIFTER)) death_source_name += " (shapeshifter)"; else if (mons->has_ench(ENCH_GLOWING_SHAPESHIFTER)) death_source_name += " (glowing shapeshifter)"; if (mons->type == MONS_PANDEMONIUM_LORD) death_source_name += " the pandemonium lord"; if (mons->has_ench(ENCH_PHANTOM_MIRROR)) death_source_name += " (illusionary)"; if (mons_is_unique(mons->type)) death_source_flags.insert("unique"); if (mons->props.exists("blame")) { const CrawlVector& blame = mons->props["blame"].get_vector(); indirectkiller = blame[blame.size() - 1].get_string(); _strip_to(indirectkiller, " by "); _strip_to(indirectkiller, "ed to "); // "attached to" and similar killerpath = ""; for (const auto &bl : blame) killerpath = killerpath + ":" + _xlog_escape(bl.get_string()); killerpath.erase(killerpath.begin()); } else { indirectkiller = death_source_name; killerpath = ""; } } else if (death_type == KILLED_BY_DISINT || death_type == KILLED_BY_CLOUD) { death_source_name = dsrc_name ? dsrc_name : dsrc == MHITYOU ? "you" : ""; indirectkiller = killerpath = ""; } else { if (dsrc_name) death_source_name = dsrc_name; else death_source_name.clear(); indirectkiller = killerpath = ""; } if (death_type == KILLED_BY_WEAKNESS || death_type == KILLED_BY_STUPIDITY || death_type == KILLED_BY_CLUMSINESS) { if (auxkilldata.empty()) auxkilldata = "unknown source"; } if (death_type == KILLED_BY_POISON) { death_source_name = you.props["poisoner"].get_string(); auxkilldata = you.props["poison_aux"].get_string(); } if (death_type == KILLED_BY_BURNING) { death_source_name = you.props["sticky_flame_source"].get_string(); auxkilldata = you.props["sticky_flame_aux"].get_string(); } } void scorefile_entry::reset() { // simple init raw_line.clear(); version.clear(); save_rcs_version.clear(); save_tag_version.clear(); tiles = 0; points = -1; name.clear(); race = SP_UNKNOWN; job = JOB_UNKNOWN; lvl = 0; race_class_name.clear(); best_skill = SK_NONE; best_skill_lvl = 0; title.clear(); death_type = KILLED_BY_SOMETHING; death_source = MID_NOBODY; death_source_name.clear(); auxkilldata.clear(); indirectkiller.clear(); killerpath.clear(); last_banisher.clear(); dlvl = 0; absdepth = 1; branch = BRANCH_DUNGEON; map.clear(); mapdesc.clear(); final_hp = -1; final_max_hp = -1; final_max_max_hp = -1; final_mp = -1; final_max_mp = -1; final_base_max_mp = -1; str = -1; intel = -1; dex = -1; ac = -1; ev = -1; sh = -1; damage = -1; source_damage = -1; turn_damage = -1; god = GOD_NO_GOD; piety = -1; penance = -1; wiz_mode = 0; explore_mode = 0; birth_time = 0; death_time = 0; real_time = -1; num_turns = -1; num_aut = -1; num_diff_runes = 0; num_runes = 0; kills = 0; maxed_skills.clear(); fifteen_skills.clear(); status_effects.clear(); gold = 0; gold_found = 0; gold_spent = 0; zigs = 0; zigmax = 0; scrolls_used = 0; potions_used = 0; seed.clear(); } static int _award_modified_experience() { int xp = you.experience; int result = 0; if (xp <= 250000) return xp * 7 / 10; result += 250000 * 7 / 10; xp -= 250000; if (xp <= 750000) { result += xp * 4 / 10; return result; } result += 750000 * 4 / 10; xp -= 750000; if (xp <= 2000000) { result += xp * 2 / 10; return result; } result += 2000000 * 2 / 10; xp -= 2000000; result += xp / 10; return result; } void scorefile_entry::init(time_t dt) { // Score file entry version: // // 4.0 - original versioned entry // 4.1 - added real_time and num_turn fields // 4.2 - stats and god info version = Version::Short; #ifdef USE_TILE_LOCAL tiles = 1; #elif defined (USE_TILE_WEB) tiles = ::tiles.is_controlled_from_web(); #else tiles = 0; #endif name = you.your_name; save_rcs_version = crawl_state.save_rcs_version; if (crawl_state.minor_version > 0) { save_tag_version = make_stringf("%d.%d", TAG_MAJOR_VERSION, crawl_state.minor_version); } /* * old scoring system (0.1-0.3): * * Gold * + 0.7 * Experience * + (distinct Runes +2)^2 * 1000, winners with distinct runes >= 3 only * + value of Inventory, for winners only * * * 0.4 scoring system, as suggested by Lemuel: * * Gold * + 0.7 * Experience up to 250,000 * + 0.4 * Experience between 250,000 and 1,000,000 * + 0.2 * Experience between 1,000,000 and 3,000,000 * + 0.1 * Experience above 3,000,000 * + (distinct Runes +2)^2 * 1000, winners with distinct runes >= 3 only * + value of Inventory, for winners only * + (250,000 * d. runes) * (25,000/(turns/d. runes)), for winners only * * current scoring system (mostly the same as above): * * Experience terms as above * + runes * (runes + 12) * 1000 (for everyone) * + (250000 + 2 * (runes + 2) * 1000) (winners only) * + 250000 * 25000 * runes^2 / turns (winners only) */ // do points first. points = 0; bool base_score = true; dlua.pushglobal("dgn.persist.calc_score"); lua_pushboolean(dlua, death_type == KILLED_BY_WINNING); if (dlua.callfn(nullptr, 1, 2)) dlua.fnreturns(">db", &points, &base_score); // If calc_score didn't exist, or returned true as its second value, // use the default formula. if (base_score) { // sprint games could overflow a 32 bit value uint64_t pt = points + _award_modified_experience(); num_runes = runes_in_pack(); num_diff_runes = num_runes; // There's no point in rewarding lugging artefacts. Thus, no points // for the value of the inventory. -- 1KB if (death_type == KILLED_BY_WINNING) { pt += 250000; // the Orb pt += num_runes * 2000 + 4000; pt += ((uint64_t)250000) * 25000 * num_runes * num_runes / (1+you.num_turns); } pt += num_runes * 10000; pt += num_runes * (num_runes + 2) * 1000; points = pt; } else ASSERT(crawl_state.game_is_sprint()); // only sprint should use custom scores race = you.species; job = you.char_class; race_class_name.clear(); fixup_char_name(); lvl = you.experience_level; best_skill = ::best_skill(SK_FIRST_SKILL, SK_LAST_SKILL); best_skill_lvl = you.skills[ best_skill ]; title = player_title(false); // Note all skills at level 27, and also all skills at level >= 15. for (skill_type sk = SK_FIRST_SKILL; sk < NUM_SKILLS; ++sk) { if (you.skills[sk] == 27) { if (!maxed_skills.empty()) maxed_skills += ","; maxed_skills += skill_name(sk); } if (you.skills[sk] >= 15) { if (!fifteen_skills.empty()) fifteen_skills += ","; fifteen_skills += skill_name(sk); } } status_info inf; for (unsigned i = 0; i <= STATUS_LAST_STATUS; ++i) { if (fill_status_info(i, inf) && !inf.short_text.empty()) { if (!status_effects.empty()) status_effects += ","; status_effects += inf.short_text; } } kills = you.kills.total_kills(); final_hp = you.hp; final_max_hp = you.hp_max; final_max_max_hp = get_real_hp(true, false); final_mp = you.magic_points; final_max_mp = you.max_magic_points; final_base_max_mp = get_real_mp(false); source_damage = you.source_damage; turn_damage = you.turn_damage; // Use possibly negative stat values. str = you.stat(STAT_STR, false); intel = you.stat(STAT_INT, false); dex = you.stat(STAT_DEX, false); ac = you.armour_class(); ev = you.evasion(); sh = player_displayed_shield_class(); god = you.religion; if (!you_worship(GOD_NO_GOD)) { piety = you.piety; penance = you.penance[you.religion]; } branch = you.where_are_you; // no adjustments necessary. dlvl = you.depth; absdepth = env.absdepth0 + 1; // 1-based absolute depth. last_banisher = you.banished_by; if (const vault_placement *vp = dgn_vault_at(you.pos())) { map = vp->map_name_at(you.pos()); mapdesc = vp->map.description; } birth_time = you.birth_time; // start time of game death_time = (dt != 0 ? dt : time(nullptr)); // end time of game handle_real_time(chrono::system_clock::from_time_t(death_time)); real_time = you.real_time(); num_turns = you.num_turns; num_aut = you.elapsed_time; gold = you.gold; gold_found = you.attribute[ATTR_GOLD_FOUND]; gold_spent = you.attribute[ATTR_PURCHASES]; zigs = you.zigs_completed; zigmax = you.zig_max; scrolls_used = 0; pair<caction_type, int> p(CACT_USE, caction_compound(OBJ_SCROLLS)); const int maxlev = min<int>(you.max_level, 27); if (you.action_count.count(p)) for (int i = 0; i < maxlev; i++) scrolls_used += you.action_count[p][i]; potions_used = 0; p = make_pair(CACT_USE, caction_compound(OBJ_POTIONS)); if (you.action_count.count(p)) for (int i = 0; i < maxlev; i++) potions_used += you.action_count[p][i]; wiz_mode = (you.wizard || you.suppress_wizard ? 1 : 0); explore_mode = (you.explore ? 1 : 0); seed = make_stringf("%" PRIu64, crawl_state.seed); } string scorefile_entry::hiscore_line(death_desc_verbosity verbosity) const { string line = character_description(verbosity); line += death_description(verbosity); line += death_place(verbosity); line += game_time(verbosity); return line; } string scorefile_entry::game_time(death_desc_verbosity verbosity) const { string line; if (verbosity == DDV_VERBOSE) { line += make_stringf("The game lasted %s (%d turns).", make_time_string(real_time).c_str(), num_turns); line += _hiscore_newline_string(); } return line; } const char *scorefile_entry::damage_verb() const { // GDL: here's an example of using final_hp. Verbiage could be better. // bwr: changed "blasted" since this is for melee return (final_hp > -6) ? "Slain" : (final_hp > -14) ? "Mangled" : (final_hp > -22) ? "Demolished" : "Annihilated"; } string scorefile_entry::death_source_desc() const { return death_source_name; } string scorefile_entry::damage_string(bool terse) const { return make_stringf("(%d%s)", damage, terse? "" : " damage"); } string scorefile_entry::strip_article_a(const string &s) const { if (starts_with(s, "a ")) return s.substr(2); else if (starts_with(s, "an ")) return s.substr(3); return s; } string scorefile_entry::terse_missile_name() const { const string pre_post[][2] = { { "Shot with ", " by " }, { "Hit by ", " thrown by " } }; const string &aux = auxkilldata; string missile; for (const string (&affixes)[2] : pre_post) { if (!starts_with(aux, affixes[0])) continue; string::size_type end = aux.rfind(affixes[1]); if (end == string::npos) continue; int istart = affixes[0].length(); int nchars = end - istart; missile = aux.substr(istart, nchars); // Was this prefixed by "a" or "an"? // (This should only ever not be the case with Robin and Ijyb.) missile = strip_article_a(missile); } return missile; } string scorefile_entry::terse_missile_cause() const { const string &aux = auxkilldata; string monster_prefix = " by "; // We're looking for Shot with a%s %s by %s/ Hit by a%s %s thrown by %s string::size_type by = aux.rfind(monster_prefix); if (by == string::npos) return "???"; string mcause = aux.substr(by + monster_prefix.length()); mcause = strip_article_a(mcause); string missile = terse_missile_name(); if (!missile.empty()) mcause += "/" + missile; return mcause; } string scorefile_entry::terse_beam_cause() const { string cause = auxkilldata; if (starts_with(cause, "by ") || starts_with(cause, "By ")) cause = cause.substr(3); return cause; } string scorefile_entry::terse_wild_magic() const { return terse_beam_cause(); } void scorefile_entry::fixup_char_name() { if (race_class_name.empty()) { race_class_name = make_stringf("%s%s", _species_abbrev(race), _job_abbrev(job)); } } string scorefile_entry::single_cdesc() const { string scname; scname = chop_string(name, 10); return make_stringf("%8d %s %s-%02d%s", points, scname.c_str(), race_class_name.c_str(), lvl, (wiz_mode == 1) ? "W" : (explore_mode == 1) ? "E" : ""); } static string _append_sentence_delimiter(const string &sentence, const string &delimiter) { if (sentence.empty()) return sentence; const char lastch = sentence[sentence.length() - 1]; if (lastch == '!' || lastch == '.') return sentence; return sentence + delimiter; } string scorefile_entry::character_description(death_desc_verbosity verbosity) const { bool single = verbosity == DDV_TERSE || verbosity == DDV_ONELINE; if (single) return single_cdesc(); bool verbose = verbosity == DDV_VERBOSE; string desc; // Please excuse the following bit of mess in the name of flavour ;) if (verbose) { desc = make_stringf("%8d %s the %s (level %d", points, name.c_str(), title.c_str(), lvl); } else { desc = make_stringf("%8d %s the %s %s (level %d", points, name.c_str(), _species_name(race).c_str(), _job_name(job), lvl); } if (final_max_max_hp > 0) // as the other two may be negative { desc += make_stringf(", %d/%d", final_hp, final_max_hp); if (final_max_hp < final_max_max_hp) desc += make_stringf(" (%d)", final_max_max_hp); desc += " HPs"; } desc += wiz_mode ? ") *WIZ*" : explore_mode ? ") *EXPLORE*" : ")"; desc += _hiscore_newline_string(); if (verbose) { string srace = _species_name(race); desc += make_stringf("Began as a%s %s %s", is_vowel(srace[0]) ? "n" : "", srace.c_str(), _job_name(job)); ASSERT(birth_time); desc += " on "; desc += _hiscore_date_string(birth_time); // TODO: show seed here? desc = _append_sentence_delimiter(desc, "."); desc += _hiscore_newline_string(); if (god != GOD_NO_GOD // XX is this check really needed? && !species::mutation_level(static_cast<species_type>(race), MUT_FORLORN)) { if (god == GOD_XOM) { desc += make_stringf("Was a %sPlaything of Xom.", (lvl >= 20) ? "Favourite " : ""); desc += _hiscore_newline_string(); } else { // Not exactly the same as the religion screen, but // good enough to fill this slot for now. desc += make_stringf("Was %s of %s%s", (piety >= piety_breakpoint(5)) ? "the Champion" : (piety >= piety_breakpoint(4)) ? "a High Priest" : (piety >= piety_breakpoint(3)) ? "an Elder" : (piety >= piety_breakpoint(2)) ? "a Priest" : (piety >= piety_breakpoint(1)) ? "a Believer" : (piety >= piety_breakpoint(0)) ? "a Follower" : "an Initiate", god_name(god).c_str(), (penance > 0) ? " (penitent)." : "."); desc += _hiscore_newline_string(); } } } return desc; } string scorefile_entry::death_place(death_desc_verbosity verbosity) const { bool verbose = (verbosity == DDV_VERBOSE); string place; if (death_type == KILLED_BY_LEAVING || death_type == KILLED_BY_WINNING) return ""; if (verbosity == DDV_ONELINE || verbosity == DDV_TERSE) return " (" + level_id(branch, dlvl).describe() + ")"; if (verbose && death_type != KILLED_BY_QUITTING && death_type != KILLED_BY_WIZMODE) place += "..."; // where did we die? place += " " + prep_branch_level_name(level_id(branch, dlvl)); if (!mapdesc.empty()) place += make_stringf(" (%s)", mapdesc.c_str()); if (verbose && death_time && !_hiscore_same_day(birth_time, death_time)) { place += " on "; place += _hiscore_date_string(death_time); } place = _append_sentence_delimiter(place, "."); place += _hiscore_newline_string(); return place; } /** * Describes the cause of the player's death. * * @param verbosity The verbosity of the description. * @return A description of the cause of death. */ string scorefile_entry::death_description(death_desc_verbosity verbosity) const { bool needs_beam_cause_line = false; bool needs_called_by_monster_line = false; bool needs_damage = false; const bool terse = (verbosity == DDV_TERSE); const bool semiverbose = (verbosity == DDV_LOGVERBOSE); const bool verbose = (verbosity == DDV_VERBOSE || semiverbose); const bool oneline = (verbosity == DDV_ONELINE); string desc; if (oneline) desc = " "; switch (death_type) { case KILLED_BY_MONSTER: if (terse) desc += death_source_desc(); else if (oneline) desc += "slain by " + death_source_desc(); else { desc += damage_verb(); desc += " by "; desc += death_source_desc(); } // put the damage on the weapon line if there is one if (auxkilldata.empty()) needs_damage = true; break; case KILLED_BY_HEADBUTT: if (terse) desc += apostrophise(death_source_desc()) + " headbutt"; else desc += "Headbutted by " + death_source_desc(); needs_damage = true; break; case KILLED_BY_ROLLING: if (terse) desc += "squashed by " + death_source_desc(); else desc += "Rolled over by " + death_source_desc(); needs_damage = true; break; case KILLED_BY_SPINES: if (terse) desc += apostrophise(death_source_desc()) + " spines"; else desc += "Impaled on " + apostrophise(death_source_desc()) + " spines" ; needs_damage = true; break; case KILLED_BY_POISON: if (death_source_name.empty() || terse) { if (!terse) desc += "Succumbed to poison"; else if (!death_source_name.empty()) desc += "poisoned by " + death_source_name; else desc += "poison"; if (!auxkilldata.empty()) desc += " (" + auxkilldata + ")"; } else if (auxkilldata.empty() && death_source_name.find("poison") != string::npos) { desc += "Succumbed to " + death_source_name; } else { desc += "Succumbed to " + ((death_source_name == "you") ? "their own" : apostrophise(death_source_name)) + " " + (auxkilldata.empty()? "poison" : auxkilldata); } break; case KILLED_BY_CLOUD: ASSERT(!auxkilldata.empty()); // there are no nameless clouds if (terse) if (death_source_name.empty()) desc += "cloud of " + auxkilldata; else desc += "cloud of " +auxkilldata + " [" + death_source_name == "you" ? "self" : death_source_name + "]"; else { desc += make_stringf("Engulfed by %s%s %s", death_source_name.empty() ? "a" : death_source_name == "you" ? "their own" : apostrophise(death_source_name).c_str(), death_source_name.empty() ? " cloud of" : "", auxkilldata.c_str()); } needs_damage = true; break; case KILLED_BY_BEAM: if (oneline || semiverbose) { // keeping this short to leave room for the deep elf spellcasters: desc += make_stringf("%s by ", _range_type_verb(auxkilldata.c_str())); desc += (death_source_name == "you") ? "themself" : death_source_desc(); if (semiverbose) { string beam = terse_missile_name(); if (beam.empty()) beam = terse_beam_cause(); trim_string(beam); if (!beam.empty()) desc += make_stringf(" (%s)", beam.c_str()); } } else if (isupper(auxkilldata[0])) // already made (ie shot arrows) { // If terse we have to parse the information from the string. // Darn it to heck. desc += terse? terse_missile_cause() : auxkilldata; needs_damage = true; } else if (verbose && starts_with(auxkilldata, "by ")) { // "by" is used for priest attacks where the effect is indirect // in verbose format we have another line for the monster if (death_source_name == "you") { needs_damage = true; desc += make_stringf("Killed by their own %s", auxkilldata.substr(3).c_str()); } else { needs_called_by_monster_line = true; desc += make_stringf("Killed %s", auxkilldata.c_str()); } } else { // Note: This is also used for the "by" cases in non-verbose // mode since listing the monster is more imporatant. if (semiverbose) desc += "Killed by "; else if (!terse) desc += "Killed from afar by "; if (death_source_name == "you") desc += "themself"; else desc += death_source_desc(); if (!auxkilldata.empty()) needs_beam_cause_line = true; needs_damage = true; } break; case KILLED_BY_LAVA: if (terse) desc += "lava"; else { if (starts_with(species::skin_name( static_cast<species_type>(race)), "bandage")) { desc += "Turned to ash by lava"; } else desc += "Took a swim in molten lava"; } break; case KILLED_BY_WATER: if (species::is_undead(static_cast<species_type>(race))) { if (terse) desc = "fell apart"; else if (starts_with(species::skin_name( static_cast<species_type>(race)), "bandage")) { desc = "Soaked and fell apart"; } else desc = "Sank and fell apart"; } else { if (!death_source_name.empty()) { desc += terse? "drowned by " : "Drowned by "; desc += death_source_name; needs_damage = true; } else desc += terse? "drowned" : "Drowned"; } break; case KILLED_BY_STUPIDITY: if (terse) desc += "stupidity"; else if (race >= 0 && // not a removed race species::is_unbreathing(static_cast<species_type>(race))) { desc += "Forgot to exist"; } else desc += "Forgot to breathe"; break; case KILLED_BY_WEAKNESS: desc += terse? "collapsed" : "Collapsed under their own weight"; break; case KILLED_BY_CLUMSINESS: desc += terse? "clumsiness" : "Slipped on a banana peel"; break; case KILLED_BY_TRAP: if (terse) desc += auxkilldata.c_str(); else { desc += make_stringf("Killed by triggering %s", auxkilldata.c_str()); } needs_damage = true; break; case KILLED_BY_LEAVING: if (terse) desc += "left"; else { if (num_runes > 0) desc += "Got out of the dungeon"; else if (species::is_undead(static_cast<species_type>(race))) desc += "Safely got out of the dungeon"; else desc += "Got out of the dungeon alive"; } break; case KILLED_BY_WINNING: desc += terse? "escaped" : "Escaped with the Orb"; if (num_runes < 1) desc += "!"; break; case KILLED_BY_QUITTING: desc += terse? "quit" : "Quit the game"; break; case KILLED_BY_WIZMODE: desc += terse? "wizmode" : "Entered wizard mode"; break; case KILLED_BY_DRAINING: if (terse) desc += "drained"; else { desc += "Drained of all life"; if (!death_source_desc().empty()) { desc += " by " + death_source_desc(); if (!auxkilldata.empty()) needs_beam_cause_line = true; } else if (!auxkilldata.empty()) desc += " by " + auxkilldata; } break; case KILLED_BY_STARVATION: desc += terse? "starvation" : "Starved to death"; break; case KILLED_BY_FREEZING: // refrigeration spell desc += terse? "frozen" : "Froze to death"; needs_damage = true; break; case KILLED_BY_BURNING: // sticky flame if (terse) desc += "burnt"; else if (!death_source_desc().empty()) { desc += "Incinerated by " + death_source_desc(); if (!auxkilldata.empty()) needs_beam_cause_line = true; } else desc += "Burnt to a crisp"; needs_damage = true; break; case KILLED_BY_WILD_MAGIC: if (auxkilldata.empty()) desc += terse? "wild magic" : "Killed by wild magic"; else { if (terse) desc += terse_wild_magic(); else { // A lot of sources for this case... some have "by" already. desc += make_stringf("Killed %s%s", (auxkilldata.find("by ") != 0) ? "by " : "", auxkilldata.c_str()); } } needs_damage = true; break; case KILLED_BY_XOM: if (terse) desc += "xom"; else desc += auxkilldata.empty() ? "Killed for Xom's enjoyment" : "Killed by " + auxkilldata; needs_damage = true; break; case KILLED_BY_ROTTING: desc += terse? "rotting" : "Rotted away"; if (!auxkilldata.empty()) desc += " (" + auxkilldata + ")"; if (!death_source_desc().empty()) desc += " (" + death_source_desc() + ")"; break; case KILLED_BY_TARGETING: if (terse) desc += "shot self"; else { desc += "Killed themself with "; if (auxkilldata.empty()) desc += "bad targeting"; else desc += "a badly aimed " + auxkilldata; } needs_damage = true; break; case KILLED_BY_REFLECTION: needs_damage = true; if (terse) desc += "reflected bolt"; else { desc += "Killed by a reflected "; if (auxkilldata.empty()) desc += "bolt"; else desc += auxkilldata; if (!death_source_name.empty() && !oneline && !semiverbose) { desc += "\n"; desc += " "; desc += "... reflected by "; desc += death_source_name; needs_damage = false; } } break; case KILLED_BY_BOUNCE: if (terse) desc += "bounced beam"; else { desc += "Killed themself with a bounced "; if (auxkilldata.empty()) desc += "beam"; else desc += auxkilldata; } needs_damage = true; break; case KILLED_BY_SELF_AIMED: if (terse) desc += "suicidal targeting"; else { desc += "Shot themself with "; if (auxkilldata.empty()) desc += "a beam"; else desc += article_a(auxkilldata, true); } needs_damage = true; break; case KILLED_BY_DEATH_EXPLOSION: if (terse) { if (death_source_name.empty()) desc += "spore"; else desc += death_source_name; } else { desc += "Killed by an exploding "; if (death_source_name.empty()) desc += "spore"; else desc += death_source_name; } needs_damage = true; break; case KILLED_BY_TSO_SMITING: desc += terse? "smitten by Shining One" : "Smitten by the Shining One"; needs_damage = true; break; case KILLED_BY_BEOGH_SMITING: desc += terse? "smitten by Beogh" : "Smitten by Beogh"; needs_damage = true; break; case KILLED_BY_PETRIFICATION: desc += terse? "petrified" : "Turned to stone"; break; case KILLED_BY_SOMETHING: if (!auxkilldata.empty()) desc += (terse ? "" : "Killed by ") + auxkilldata; else desc += terse? "died" : "Died"; needs_damage = true; break; case KILLED_BY_FALLING_DOWN_STAIRS: desc += terse? "fell downstairs" : "Fell down a flight of stairs"; needs_damage = true; break; case KILLED_BY_FALLING_THROUGH_GATE: desc += terse? "fell through a gate" : "Fell down through a gate"; needs_damage = true; break; case KILLED_BY_ACID: if (terse) desc += "acid"; else if (!death_source_desc().empty()) { desc += "Splashed by " + apostrophise(death_source_desc()) + " acid"; } else desc += "Splashed with acid"; needs_damage = true; break; case KILLED_BY_CURARE: desc += terse? "asphyx" : "Asphyxiated"; break; case KILLED_BY_DIVINE_WRATH: if (terse) desc += "divine wrath"; else { desc += "Killed by "; if (auxkilldata.empty()) desc += "divine wrath"; else { // Lugonu's touch or "the <retribution> of <deity>"; // otherwise it's a beam if (!isupper(auxkilldata[0]) && !starts_with(auxkilldata, "the ")) { desc += is_vowel(auxkilldata[0]) ? "an " : "a "; } desc += auxkilldata; } } needs_damage = true; if (!death_source_name.empty()) needs_called_by_monster_line = true; break; case KILLED_BY_DISINT: if (terse) desc += "disintegration"; else { if (death_source_name == "you") desc += "Blew themself up"; else desc += "Blown up by " + death_source_desc(); needs_beam_cause_line = true; } needs_damage = true; break; case KILLED_BY_MIRROR_DAMAGE: desc += terse ? "mirror damage" : "Killed by mirror damage"; needs_damage = true; break; case KILLED_BY_FRAILTY: desc += terse ? "frailty" : "Became unviable by " + auxkilldata; break; case KILLED_BY_BARBS: desc += terse ? "barbs" : "Succumbed to barbed spike wounds"; break; case KILLED_BY_BEING_THROWN: if (terse) desc += apostrophise(death_source_desc()) + " throw"; else desc += "Thrown by " + death_source_desc(); needs_damage = true; break; case KILLED_BY_COLLISION: if (terse) desc += auxkilldata + " collision"; else { desc += "Collided with " + auxkilldata; needs_called_by_monster_line = true; } needs_damage = true; break; case KILLED_BY_ZOT: desc += terse ? "Zot" : "Tarried too long and was consumed by Zot"; break; default: desc += terse? "program bug" : "Nibbled to death by software bugs"; break; } // end switch switch (death_type) { case KILLED_BY_STUPIDITY: case KILLED_BY_WEAKNESS: case KILLED_BY_CLUMSINESS: if (terse || oneline) { desc += " ("; desc += auxkilldata; desc += ")"; } else { desc += "\n"; desc += " "; desc += "... caused by "; desc += auxkilldata; } break; default: break; } if (oneline && desc.length() > 2) desc[1] = tolower_safe(desc[1]); // TODO: Eventually, get rid of "..." for cases where the text fits. if (terse) { if (death_type == KILLED_BY_MONSTER && !auxkilldata.empty()) { desc += "/"; desc += strip_article_a(auxkilldata); needs_damage = true; } else if (needs_beam_cause_line) desc += "/" + terse_beam_cause(); else if (needs_called_by_monster_line) desc += death_source_name; if (!killerpath.empty()) desc += "[" + indirectkiller + "]"; if (needs_damage && damage > 0) desc += " " + damage_string(true); } else if (verbose) { bool done_damage = false; // paranoia if (!semiverbose && needs_damage && damage > 0) { desc += " " + damage_string(); needs_damage = false; done_damage = true; } if (death_type == KILLED_BY_LEAVING || death_type == KILLED_BY_WINNING) { if (num_runes > 0) { desc += _hiscore_newline_string(); desc += make_stringf("... %s %d rune%s", (death_type == KILLED_BY_WINNING) ? "and" : "with", num_runes, (num_runes > 1) ? "s" : ""); if (!semiverbose && death_time > 0 && !_hiscore_same_day(birth_time, death_time)) { desc += " on "; desc += _hiscore_date_string(death_time); } desc = _append_sentence_delimiter(desc, "!"); desc += _hiscore_newline_string(); } else desc = _append_sentence_delimiter(desc, "."); } else if (death_type != KILLED_BY_QUITTING && death_type != KILLED_BY_WIZMODE) { desc += _hiscore_newline_string(); if (death_type == KILLED_BY_MONSTER && !auxkilldata.empty()) { if (!semiverbose) { desc += make_stringf("... wielding %s", auxkilldata.c_str()); needs_damage = true; desc += _hiscore_newline_string(); } else desc += make_stringf(" (%s)", auxkilldata.c_str()); } else if (needs_beam_cause_line) { if (!semiverbose) { desc += auxkilldata == "damnation" ? "... with " : (is_vowel(auxkilldata[0])) ? "... with an " : "... with a "; desc += auxkilldata; desc += _hiscore_newline_string(); needs_damage = true; } else if (death_type == KILLED_BY_DRAINING || death_type == KILLED_BY_BURNING) { desc += make_stringf(" (%s)", auxkilldata.c_str()); } } else if (needs_called_by_monster_line) { desc += make_stringf("... %s by %s", death_type == KILLED_BY_COLLISION ? "caused" : auxkilldata == "by angry trees" ? "awakened" : auxkilldata == "by Freeze" ? "generated" : "invoked", death_source_name.c_str()); desc += _hiscore_newline_string(); needs_damage = true; } if (!killerpath.empty()) { vector<string> summoners = _xlog_split_fields(killerpath); for (const auto &sumname : summoners) { if (!semiverbose) { desc += "... " + sumname; desc += _hiscore_newline_string(); } else desc += " (" + sumname; } if (semiverbose) desc += string(summoners.size(), ')'); } if (!semiverbose) { if (needs_damage && !done_damage && damage > 0) desc += " " + damage_string(); if (needs_damage && !done_damage) desc += _hiscore_newline_string(); if (you.duration[DUR_PARALYSIS]) { desc += "... while paralysed"; if (you.props.exists(PARALYSED_BY_KEY)) { desc += " by " + you.props[PARALYSED_BY_KEY].get_string(); } desc += _hiscore_newline_string(); } else if (you.duration[DUR_PETRIFIED]) { desc += "... while petrified"; if (you.props.exists(PETRIFIED_BY_KEY)) { desc += " by " + you.props[PETRIFIED_BY_KEY].get_string(); } desc += _hiscore_newline_string(); } } } } if (!oneline) { if (death_type == KILLED_BY_LEAVING || death_type == KILLED_BY_WINNING) { // TODO: strcat "after reaching level %d"; for LEAVING if (verbosity == DDV_NORMAL) { desc = _append_sentence_delimiter(desc, num_runes > 0? "!" : "."); } desc += _hiscore_newline_string(); } } if (death_type == KILLED_BY_DEATH_EXPLOSION && !terse && !auxkilldata.empty()) { desc += "... "; desc += auxkilldata; desc += "\n"; desc += " "; } if (terse) { trim_string(desc); desc = strip_article_a(desc); } return desc; } ////////////////////////////////////////////////////////////////////////////// // xlog_fields xlog_fields::xlog_fields() : fields(), fieldmap() { } xlog_fields::xlog_fields(const string &line) : fields(), fieldmap() { init(line); } // xlogfile escape: s/:/::/g static string _xlog_escape(const string &s) { return replace_all(s, ":", "::"); } // xlogfile unescape: s/::/:/g static string _xlog_unescape(const string &s) { return replace_all(s, "::", ":"); } static string::size_type _xlog_next_separator(const string &s, string::size_type start) { string::size_type p = s.find(':', start); if (p != string::npos && p < s.length() - 1 && s[p + 1] == ':') return _xlog_next_separator(s, p + 2); return p; } static vector<string> _xlog_split_fields(const string &s) { string::size_type start = 0, end = 0; vector<string> fs; for (; (end = _xlog_next_separator(s, start)) != string::npos; start = end + 1) { fs.push_back(s.substr(start, end - start)); } if (start < s.length()) fs.push_back(s.substr(start)); return fs; } void xlog_fields::init(const string &line) { for (const string &field : _xlog_split_fields(line)) { string::size_type st = field.find('='); if (st == string::npos) continue; fields.emplace_back(field.substr(0, st), _xlog_unescape(field.substr(st + 1))); } map_fields(); } void xlog_fields::add_field(const string &key, const char *format, ...) { va_list args; va_start(args, format); string buf = vmake_stringf(format, args); va_end(args); fields.emplace_back(key, buf); fieldmap[key] = buf; } string xlog_fields::str_field(const string &s) const { return lookup(fieldmap, s, ""); } int xlog_fields::int_field(const string &s) const { string field = str_field(s); return atoi(field.c_str()); } void xlog_fields::map_fields() const { fieldmap.clear(); for (const pair<string, string> &f : fields) fieldmap[f.first] = f.second; } string xlog_fields::xlog_line() const { string line; for (const pair<string, string> &f : fields) { // Don't write empty fields. if (f.second.empty()) continue; if (!line.empty()) line += ":"; line += f.first; line += "="; line += _xlog_escape(f.second); } return line; } /////////////////////////////////////////////////////////////////////////////// // Milestones /** * @brief Record the player reaching a milestone, if ::DGL_MILESTONES is defined. * @callergraph */ void mark_milestone(const string &type, const string &milestone, const string &origin_level, time_t milestone_time) { #ifdef DGL_MILESTONES static string lasttype, lastmilestone; static long lastturn = -1; if (crawl_state.game_is_arena() || !crawl_state.need_save // Suppress duplicate milestones on the same turn. || (lastturn == you.num_turns && lasttype == type && lastmilestone == milestone) #ifndef SCORE_WIZARD_CHARACTERS // Don't mark normal milestones in wizmode or explore mode || (type != "crash" && (you.wizard || you.suppress_wizard || you.explore)) #endif ) { return; } lasttype = type; lastmilestone = milestone; lastturn = you.num_turns; const string milestone_file = catpath( Options.save_dir, "milestones" + crawl_state.game_type_qualifier()); const scorefile_entry se(0, MID_NOBODY, KILL_MISC, nullptr); se.set_base_xlog_fields(); xlog_fields xl = se.get_fields(); if (!origin_level.empty()) { xl.add_field("oplace", "%s", ((origin_level == "parent") ? current_level_parent().describe() : origin_level).c_str()); } xl.add_field("time", "%s", make_date_string( milestone_time ? milestone_time : se.get_death_time()).c_str()); xl.add_field("type", "%s", type.c_str()); xl.add_field("milestone", "%s", milestone.c_str()); const string xlog_line = xl.xlog_line(); if (FILE *fp = lk_open("a", milestone_file)) { fprintf(fp, "%s\n", xlog_line.c_str()); lk_close(fp); } #else UNUSED(type, milestone, origin_level, milestone_time); #endif // DGL_MILESTONES } #ifdef DGL_WHEREIS string xlog_status_line() { const scorefile_entry se(0, MID_NOBODY, KILL_MISC, nullptr); se.set_base_xlog_fields(); xlog_fields xl = se.get_fields(); xl.add_field("time", "%s", make_date_string(time(nullptr)).c_str()); return xl.xlog_line(); } #endif // DGL_WHEREIS
/*! * @copyright * Copyright (c) 2015-2019 Intel Corporation * * @copyright * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * @copyright * http://www.apache.org/licenses/LICENSE-2.0 * * @copyright * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ #include "agent-framework/module/model/processor.hpp" #include "agent-framework/module/constants/compute.hpp" using namespace agent_framework::model; using namespace agent_framework::model::utils; const enums::Component Processor::component = enums::Component::Processor; const enums::CollectionName Processor::collection_name = enums::CollectionName::Processors; Processor::Processor(const std::string& parent_uuid, enums::Component parent_type) : Resource(parent_uuid, parent_type) {} Processor::~Processor() {} json::Json Processor::to_json() const { json::Json result = json::Json(); result[literals::Processor::STATUS] = get_status().to_json(); result[literals::Processor::SOCKET] = get_socket(); result[literals::Processor::TYPE] = get_processor_type(); result[literals::Processor::ARCHITECTURE] = get_processor_architecture(); result[literals::Processor::INSTRUCTION_SET] = get_instruction_set(); result[literals::Processor::CAPABILITIES] = get_capabilities().to_json(); result[literals::Processor::MANUFACTURER] = get_manufacturer(); result[literals::Processor::MODEL] = get_model(); result[literals::Processor::MODEL_NAME] = get_model_name(); result[literals::Processor::CPU_ID] = get_cpu_id().to_json(); result[literals::Processor::EXTENDED_CPU_ID] = get_extended_cpu_id().to_json(); result[literals::Processor::MAX_SPEED] = get_max_speed_mhz(); result[literals::Processor::TOTAL_CORES] = get_total_cores(); result[literals::Processor::ENABLED_CORES] = get_enabled_cores(); result[literals::Processor::TOTAL_THREADS] = get_total_threads(); result[literals::Processor::ENABLED_THREADS] = get_enabled_threads(); result[literals::Processor::THERMAL_DESIGN_POWER_WATT] = get_tdp_watt(); result[literals::Processor::FPGA] = get_fpga().to_json(); result[literals::Processor::OEM] = get_oem().to_json(); return result; } Processor Processor::from_json(const json::Json& json) { Processor processor{}; processor.set_status(attribute::Status::from_json(json[literals::Processor::STATUS])); processor.set_socket(json[literals::Processor::SOCKET]); processor.set_processor_type(json[literals::Processor::TYPE]); processor.set_processor_architecture(json[literals::Processor::ARCHITECTURE]); processor.set_instruction_set(json[literals::Processor::INSTRUCTION_SET]); processor.set_capabilities(Capabilities::from_json(json[literals::Processor::CAPABILITIES])); processor.set_manufacturer(json[literals::Processor::MANUFACTURER]); processor.set_model(json[literals::Processor::MODEL]); processor.set_model_name(json[literals::Processor::MODEL_NAME]); processor.set_cpu_id(attribute::CpuId::from_json(json[literals::Processor::CPU_ID])); processor.set_extended_cpu_id(attribute::ExtendedCpuId::from_json(json[literals::Processor::EXTENDED_CPU_ID])); processor.set_max_speed_mhz(json[literals::Processor::MAX_SPEED]); processor.set_total_cores(json[literals::Processor::TOTAL_CORES]); processor.set_enabled_cores(json[literals::Processor::ENABLED_CORES]); processor.set_total_threads(json[literals::Processor::TOTAL_THREADS]); processor.set_enabled_threads(json[literals::Processor::ENABLED_THREADS]); processor.set_tdp_watt(json[literals::Processor::THERMAL_DESIGN_POWER_WATT]); processor.set_fpga(attribute::Fpga::from_json(json[literals::Processor::FPGA])); processor.set_oem(attribute::Oem::from_json(json[literals::Processor::OEM])); return processor; }
// // Created by Gabriele Gaetano Fronzé on 19/09/2017. // #include <TRandom.h> #include "TROOT.h" #include "TH1D.h" #include "TFile.h" #include "TSystFitParameter.h" #include "TSystFitSettings.h" #include "TSystFitter.h" #include "TFitResult.h" #include "TFitResultPtr.h" #include "TVirtualFitter.h" #include "TCanvas.h" #include <vector> #include <iostream> using namespace std; void PerformRealSystFit(){ TVirtualFitter::SetMaxIterations( 20000 ); auto canv = new TCanvas("canv","canv"); // Read the raw data from the file .root and plot the histogram TFile *f = new TFile("dati.root"); f->ls(); TCanvas *c = new TCanvas("c", "Signal charge", 800, 600); TH1F *histo = (TH1F*)f->Get("h"); histo->Draw(); histo->GetXaxis()->SetTitle("charge [pC/4]"); histo->GetYaxis()->SetTitle("counts"); histo->GetYaxis()->SetRangeUser(0, 120); histo->GetXaxis()->SetRangeUser(-50, 370); // Gaussian function for baseline TF1 *f1 = new TF1("f1","[0]*Gaus(x, [1], [2])", -10, 15); f1->SetParameter(0, 70); f1->SetParameter(1, 0); f1->SetParameter(2, 3); histo->Fit("f1", "r"); // Gaussian function for avalanche peak TF1 *f2 = new TF1("f2","[0]*Landau(x, [1], [2])", 20, 80); f2->SetParameter(0, 533); f2->SetParameter(1, 25); f2->SetParameter(2, 5.2); histo->Fit("f2", "r"); // Gaussian function for streamer peak TF1 *f3 = new TF1("f3","[0]*Gaus(x, [1], [2])", 150, 350); f3->SetParameter(0, 10); f3->SetParameter(1, 250); f3->SetParameter(2, 25); histo->Fit("f3", "r"); // Global function: that's the sum of the previous functions TF1 *f4 = new TF1("f4","[0]*Gaus(x, [1], [2]) + [3]*Landau(x, [4], [5]) + [6]*Gaus(x, [7], [8])", -100, 400); f4->SetParameter(0, f1->GetParameter(0)); f4->FixParameter(1, f1->GetParameter(1)); // fixed parameter f4->SetParameter(2, f1->GetParameter(2)); f4->SetParameter(3, f2->GetParameter(0)); f4->SetParameter(4, f2->GetParameter(1)); f4->SetParameter(5, f2->GetParameter(2)); f4->SetParameter(6, f3->GetParameter(0)); f4->SetParameter(7, f3->GetParameter(1)); f4->SetParameter(8, f3->GetParameter(2)); f4->SetNpx(1000); histo->Fit("f4", "re"); std::vector<int> nSamples = {0,0,0,0,7,7,0,7,7}; auto *systFitSettings = new TSystFitSettings(*f4,nSamples); // cout<<"Settings initialized"<<endl; // // systFitSettings->AddParameter(TSystFitParameter(ParamValue(20.,10.,30.),1)); // // cout<<"Par0 initialized with "<<systFitSettings->GetParameter(0).GetNValues()<<" values"<<endl; // // systFitSettings->AddParameter(TSystFitParameter(ParamValue(17.,5.,25.),5)); // // cout<<"Par1 initialized with "<<systFitSettings->GetParameter(1).GetNValues()<<" values"<<endl; // // systFitSettings->AddParameter(TSystFitParameter(new TF1("fa1","sin(x)/x",0.,50.),5)); // // cout<<"Par2 initialized with "<<systFitSettings->GetParameter(2).GetNValues()<<" values"<<endl; // // Double_t par3Values[3] = {1.,0.,2.}; // systFitSettings->AddParameter(TSystFitParameter(ParamValue(par3Values),5)); // // cout<<"Par3 initialized with "<<systFitSettings->GetParameter(3).GetNValues()<<" values"<<endl; cout<<"A total of "<<systFitSettings->GetNConfigurations()<<" configurations will be tested"<<endl; systFitSettings->GenerateConfigurations(); // return; auto *systFitter = new TSystFitter(histo); systFitter->SetSystFitSettings(systFitSettings); systFitter->SystFit(f4,"srliq","",-100.,400.); systFitter->PrintResults(canv); }
#include<iostream> using namespace std; int main() { int u_input,amount = 0,count = 0,r = 0,c = 0,b = 0 ; cout<<"Welcome to Shaaf Car Parking System\n"; cout<<"Choose your vehicle type from given list\n"; while(true) { cout<<"1) Rickshaw. "<<endl; cout<<"2) Car. "<<endl; cout<<"3) Bus. "<<endl; cout<<"4) show the record. "<<endl; cout<<"5) Delete the record. "<<endl; cout<<"\noption = "; cin>>u_input; if(u_input == 1) { if(count<=50) { r++; amount = amount + 100 ; count = count + 1; } else { cout<<"Parking is full."<<endl; } } else if(u_input == 2) { if(count<=50) { c++; amount = amount + 200 ; count = count + 1; } else { cout<<"Parking is full."<<endl; } } else if(u_input == 3) { if(count<=50) { b++; amount = amount + 300 ; count = count + 1; } else { cout<<"Parking is full."<<endl; } } else if(u_input == 4) { cout<<"*************************************"<<endl; cout<<"Total Amount = "<<amount<<endl; cout<<"Total numberc of vehicles = "<<count<<endl; cout<<"Total number of rikshaws parked = "<<r<<endl; cout<<"Total number of cars parked = "<<c<<endl; cout<<"Total number of parked Buses = "<<b<<endl; cout<<"*************************************"<<endl; } else if(u_input == 5) { amount = 0 ; count = 0 ; r = 0 ; c= 0 ; b = 0 ; cout<<"*************************************"<<endl; cout<<"Record Deleted."<<endl; cout<<"*************************************"<<endl; } else { cout<<"*************************************"<<endl; cout<<"Invalid number."<<endl; cout<<"*************************************"<<endl; } } return 0 ; }
#include "CpOpenhomeOrgSubscriptionLongPoll1.h" #include <OpenHome/Net/Core/CpProxy.h> #include <OpenHome/Net/Private/CpiService.h> #include <OpenHome/Private/Thread.h> #include <OpenHome/Net/Private/AsyncPrivate.h> #include <OpenHome/Net/Core/CpDevice.h> #include <OpenHome/Net/Private/Error.h> #include <OpenHome/Net/Private/CpiDevice.h> namespace OpenHome { namespace Net { class SyncSubscribeOpenhomeOrgSubscriptionLongPoll1 : public SyncProxyAction { public: SyncSubscribeOpenhomeOrgSubscriptionLongPoll1(CpProxyOpenhomeOrgSubscriptionLongPoll1& aProxy, Brh& aSid, TUint& aDuration); virtual void CompleteRequest(IAsync& aAsync); private: CpProxyOpenhomeOrgSubscriptionLongPoll1& iService; Brh& iSid; TUint& iDuration; }; class SyncUnsubscribeOpenhomeOrgSubscriptionLongPoll1 : public SyncProxyAction { public: SyncUnsubscribeOpenhomeOrgSubscriptionLongPoll1(CpProxyOpenhomeOrgSubscriptionLongPoll1& aProxy); virtual void CompleteRequest(IAsync& aAsync); private: CpProxyOpenhomeOrgSubscriptionLongPoll1& iService; }; class SyncRenewOpenhomeOrgSubscriptionLongPoll1 : public SyncProxyAction { public: SyncRenewOpenhomeOrgSubscriptionLongPoll1(CpProxyOpenhomeOrgSubscriptionLongPoll1& aProxy, TUint& aDuration); virtual void CompleteRequest(IAsync& aAsync); private: CpProxyOpenhomeOrgSubscriptionLongPoll1& iService; TUint& iDuration; }; class SyncGetPropertyUpdatesOpenhomeOrgSubscriptionLongPoll1 : public SyncProxyAction { public: SyncGetPropertyUpdatesOpenhomeOrgSubscriptionLongPoll1(CpProxyOpenhomeOrgSubscriptionLongPoll1& aProxy, Brh& aUpdates); virtual void CompleteRequest(IAsync& aAsync); private: CpProxyOpenhomeOrgSubscriptionLongPoll1& iService; Brh& iUpdates; }; } // namespace Net } // namespace OpenHome using namespace OpenHome; using namespace OpenHome::Net; // SyncSubscribeOpenhomeOrgSubscriptionLongPoll1 SyncSubscribeOpenhomeOrgSubscriptionLongPoll1::SyncSubscribeOpenhomeOrgSubscriptionLongPoll1(CpProxyOpenhomeOrgSubscriptionLongPoll1& aProxy, Brh& aSid, TUint& aDuration) : iService(aProxy) , iSid(aSid) , iDuration(aDuration) { } void SyncSubscribeOpenhomeOrgSubscriptionLongPoll1::CompleteRequest(IAsync& aAsync) { iService.EndSubscribe(aAsync, iSid, iDuration); } // SyncUnsubscribeOpenhomeOrgSubscriptionLongPoll1 SyncUnsubscribeOpenhomeOrgSubscriptionLongPoll1::SyncUnsubscribeOpenhomeOrgSubscriptionLongPoll1(CpProxyOpenhomeOrgSubscriptionLongPoll1& aProxy) : iService(aProxy) { } void SyncUnsubscribeOpenhomeOrgSubscriptionLongPoll1::CompleteRequest(IAsync& aAsync) { iService.EndUnsubscribe(aAsync); } // SyncRenewOpenhomeOrgSubscriptionLongPoll1 SyncRenewOpenhomeOrgSubscriptionLongPoll1::SyncRenewOpenhomeOrgSubscriptionLongPoll1(CpProxyOpenhomeOrgSubscriptionLongPoll1& aProxy, TUint& aDuration) : iService(aProxy) , iDuration(aDuration) { } void SyncRenewOpenhomeOrgSubscriptionLongPoll1::CompleteRequest(IAsync& aAsync) { iService.EndRenew(aAsync, iDuration); } // SyncGetPropertyUpdatesOpenhomeOrgSubscriptionLongPoll1 SyncGetPropertyUpdatesOpenhomeOrgSubscriptionLongPoll1::SyncGetPropertyUpdatesOpenhomeOrgSubscriptionLongPoll1(CpProxyOpenhomeOrgSubscriptionLongPoll1& aProxy, Brh& aUpdates) : iService(aProxy) , iUpdates(aUpdates) { } void SyncGetPropertyUpdatesOpenhomeOrgSubscriptionLongPoll1::CompleteRequest(IAsync& aAsync) { iService.EndGetPropertyUpdates(aAsync, iUpdates); } // CpProxyOpenhomeOrgSubscriptionLongPoll1 CpProxyOpenhomeOrgSubscriptionLongPoll1::CpProxyOpenhomeOrgSubscriptionLongPoll1(CpDevice& aDevice) : iCpProxy("openhome-org", "SubscriptionLongPoll", 1, aDevice.Device()) { OpenHome::Net::Parameter* param; iActionSubscribe = new Action("Subscribe"); param = new OpenHome::Net::ParameterString("ClientId"); iActionSubscribe->AddInputParameter(param); param = new OpenHome::Net::ParameterString("Udn"); iActionSubscribe->AddInputParameter(param); param = new OpenHome::Net::ParameterString("Service"); iActionSubscribe->AddInputParameter(param); param = new OpenHome::Net::ParameterUint("RequestedDuration"); iActionSubscribe->AddInputParameter(param); param = new OpenHome::Net::ParameterString("Sid"); iActionSubscribe->AddOutputParameter(param); param = new OpenHome::Net::ParameterUint("Duration"); iActionSubscribe->AddOutputParameter(param); iActionUnsubscribe = new Action("Unsubscribe"); param = new OpenHome::Net::ParameterString("Sid"); iActionUnsubscribe->AddInputParameter(param); iActionRenew = new Action("Renew"); param = new OpenHome::Net::ParameterString("Sid"); iActionRenew->AddInputParameter(param); param = new OpenHome::Net::ParameterUint("RequestedDuration"); iActionRenew->AddInputParameter(param); param = new OpenHome::Net::ParameterUint("Duration"); iActionRenew->AddOutputParameter(param); iActionGetPropertyUpdates = new Action("GetPropertyUpdates"); param = new OpenHome::Net::ParameterString("ClientId"); iActionGetPropertyUpdates->AddInputParameter(param); param = new OpenHome::Net::ParameterString("Updates"); iActionGetPropertyUpdates->AddOutputParameter(param); } CpProxyOpenhomeOrgSubscriptionLongPoll1::~CpProxyOpenhomeOrgSubscriptionLongPoll1() { DestroyService(); delete iActionSubscribe; delete iActionUnsubscribe; delete iActionRenew; delete iActionGetPropertyUpdates; } void CpProxyOpenhomeOrgSubscriptionLongPoll1::SyncSubscribe(const Brx& aClientId, const Brx& aUdn, const Brx& aService, TUint aRequestedDuration, Brh& aSid, TUint& aDuration) { SyncSubscribeOpenhomeOrgSubscriptionLongPoll1 sync(*this, aSid, aDuration); BeginSubscribe(aClientId, aUdn, aService, aRequestedDuration, sync.Functor()); sync.Wait(); } void CpProxyOpenhomeOrgSubscriptionLongPoll1::BeginSubscribe(const Brx& aClientId, const Brx& aUdn, const Brx& aService, TUint aRequestedDuration, FunctorAsync& aFunctor) { Invocation* invocation = iCpProxy.GetService().Invocation(*iActionSubscribe, aFunctor); TUint inIndex = 0; const Action::VectorParameters& inParams = iActionSubscribe->InputParameters(); invocation->AddInput(new ArgumentString(*inParams[inIndex++], aClientId)); invocation->AddInput(new ArgumentString(*inParams[inIndex++], aUdn)); invocation->AddInput(new ArgumentString(*inParams[inIndex++], aService)); invocation->AddInput(new ArgumentUint(*inParams[inIndex++], aRequestedDuration)); TUint outIndex = 0; const Action::VectorParameters& outParams = iActionSubscribe->OutputParameters(); invocation->AddOutput(new ArgumentString(*outParams[outIndex++])); invocation->AddOutput(new ArgumentUint(*outParams[outIndex++])); iCpProxy.GetInvocable().InvokeAction(*invocation); } void CpProxyOpenhomeOrgSubscriptionLongPoll1::EndSubscribe(IAsync& aAsync, Brh& aSid, TUint& aDuration) { ASSERT(((Async&)aAsync).Type() == Async::eInvocation); Invocation& invocation = (Invocation&)aAsync; ASSERT(invocation.Action().Name() == Brn("Subscribe")); Error::ELevel level; TUint code; const TChar* ignore; if (invocation.Error(level, code, ignore)) { THROW_PROXYERROR(level, code); } TUint index = 0; ((ArgumentString*)invocation.OutputArguments()[index++])->TransferTo(aSid); aDuration = ((ArgumentUint*)invocation.OutputArguments()[index++])->Value(); } void CpProxyOpenhomeOrgSubscriptionLongPoll1::SyncUnsubscribe(const Brx& aSid) { SyncUnsubscribeOpenhomeOrgSubscriptionLongPoll1 sync(*this); BeginUnsubscribe(aSid, sync.Functor()); sync.Wait(); } void CpProxyOpenhomeOrgSubscriptionLongPoll1::BeginUnsubscribe(const Brx& aSid, FunctorAsync& aFunctor) { Invocation* invocation = iCpProxy.GetService().Invocation(*iActionUnsubscribe, aFunctor); TUint inIndex = 0; const Action::VectorParameters& inParams = iActionUnsubscribe->InputParameters(); invocation->AddInput(new ArgumentString(*inParams[inIndex++], aSid)); iCpProxy.GetInvocable().InvokeAction(*invocation); } void CpProxyOpenhomeOrgSubscriptionLongPoll1::EndUnsubscribe(IAsync& aAsync) { ASSERT(((Async&)aAsync).Type() == Async::eInvocation); Invocation& invocation = (Invocation&)aAsync; ASSERT(invocation.Action().Name() == Brn("Unsubscribe")); Error::ELevel level; TUint code; const TChar* ignore; if (invocation.Error(level, code, ignore)) { THROW_PROXYERROR(level, code); } } void CpProxyOpenhomeOrgSubscriptionLongPoll1::SyncRenew(const Brx& aSid, TUint aRequestedDuration, TUint& aDuration) { SyncRenewOpenhomeOrgSubscriptionLongPoll1 sync(*this, aDuration); BeginRenew(aSid, aRequestedDuration, sync.Functor()); sync.Wait(); } void CpProxyOpenhomeOrgSubscriptionLongPoll1::BeginRenew(const Brx& aSid, TUint aRequestedDuration, FunctorAsync& aFunctor) { Invocation* invocation = iCpProxy.GetService().Invocation(*iActionRenew, aFunctor); TUint inIndex = 0; const Action::VectorParameters& inParams = iActionRenew->InputParameters(); invocation->AddInput(new ArgumentString(*inParams[inIndex++], aSid)); invocation->AddInput(new ArgumentUint(*inParams[inIndex++], aRequestedDuration)); TUint outIndex = 0; const Action::VectorParameters& outParams = iActionRenew->OutputParameters(); invocation->AddOutput(new ArgumentUint(*outParams[outIndex++])); iCpProxy.GetInvocable().InvokeAction(*invocation); } void CpProxyOpenhomeOrgSubscriptionLongPoll1::EndRenew(IAsync& aAsync, TUint& aDuration) { ASSERT(((Async&)aAsync).Type() == Async::eInvocation); Invocation& invocation = (Invocation&)aAsync; ASSERT(invocation.Action().Name() == Brn("Renew")); Error::ELevel level; TUint code; const TChar* ignore; if (invocation.Error(level, code, ignore)) { THROW_PROXYERROR(level, code); } TUint index = 0; aDuration = ((ArgumentUint*)invocation.OutputArguments()[index++])->Value(); } void CpProxyOpenhomeOrgSubscriptionLongPoll1::SyncGetPropertyUpdates(const Brx& aClientId, Brh& aUpdates) { SyncGetPropertyUpdatesOpenhomeOrgSubscriptionLongPoll1 sync(*this, aUpdates); BeginGetPropertyUpdates(aClientId, sync.Functor()); sync.Wait(); } void CpProxyOpenhomeOrgSubscriptionLongPoll1::BeginGetPropertyUpdates(const Brx& aClientId, FunctorAsync& aFunctor) { Invocation* invocation = iCpProxy.GetService().Invocation(*iActionGetPropertyUpdates, aFunctor); TUint inIndex = 0; const Action::VectorParameters& inParams = iActionGetPropertyUpdates->InputParameters(); invocation->AddInput(new ArgumentString(*inParams[inIndex++], aClientId)); TUint outIndex = 0; const Action::VectorParameters& outParams = iActionGetPropertyUpdates->OutputParameters(); invocation->AddOutput(new ArgumentString(*outParams[outIndex++])); iCpProxy.GetInvocable().InvokeAction(*invocation); } void CpProxyOpenhomeOrgSubscriptionLongPoll1::EndGetPropertyUpdates(IAsync& aAsync, Brh& aUpdates) { ASSERT(((Async&)aAsync).Type() == Async::eInvocation); Invocation& invocation = (Invocation&)aAsync; ASSERT(invocation.Action().Name() == Brn("GetPropertyUpdates")); Error::ELevel level; TUint code; const TChar* ignore; if (invocation.Error(level, code, ignore)) { THROW_PROXYERROR(level, code); } TUint index = 0; ((ArgumentString*)invocation.OutputArguments()[index++])->TransferTo(aUpdates); } void CpProxyOpenhomeOrgSubscriptionLongPoll1::Subscribe() { iCpProxy.Subscribe(); } void CpProxyOpenhomeOrgSubscriptionLongPoll1::Unsubscribe() { iCpProxy.Unsubscribe(); } void CpProxyOpenhomeOrgSubscriptionLongPoll1::SetPropertyChanged(Functor& aFunctor) { iCpProxy.SetPropertyChanged(aFunctor); } void CpProxyOpenhomeOrgSubscriptionLongPoll1::SetPropertyInitialEvent(Functor& aFunctor) { iCpProxy.SetPropertyInitialEvent(aFunctor); } void CpProxyOpenhomeOrgSubscriptionLongPoll1::AddProperty(Property* aProperty) { iCpProxy.AddProperty(aProperty); } void CpProxyOpenhomeOrgSubscriptionLongPoll1::DestroyService() { iCpProxy.DestroyService(); } void CpProxyOpenhomeOrgSubscriptionLongPoll1::ReportEvent(Functor aFunctor) { iCpProxy.ReportEvent(aFunctor); } TUint CpProxyOpenhomeOrgSubscriptionLongPoll1::Version() const { return iCpProxy.Version(); }
#include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> void LRNLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) { size_ = this->layer_param_.lrn_param().local_size(); CHECK_EQ(size_ % 2, 1) << "LRN only supports odd values for local_size"; pre_pad_ = (size_ - 1) / 2; alpha_ = this->layer_param_.lrn_param().alpha(); beta_ = this->layer_param_.lrn_param().beta(); if (this->layer_param_.lrn_param().norm_region() == LRNParameter_NormRegion_WITHIN_CHANNEL) { // Set up split_layer_ to use inputs in the numerator and denominator. split_top_vec_.clear(); split_top_vec_.push_back(&product_input_); split_top_vec_.push_back(&square_input_); LayerParameter split_param; split_layer_.reset(new SplitLayer<Dtype>(split_param)); split_layer_->SetUp(bottom, &split_top_vec_); // Set up square_layer_ to square the inputs. square_bottom_vec_.clear(); square_top_vec_.clear(); square_bottom_vec_.push_back(&square_input_); square_top_vec_.push_back(&square_output_); LayerParameter square_param; square_param.mutable_power_param()->set_power(Dtype(2)); square_layer_.reset(new PowerLayer<Dtype>(square_param)); square_layer_->SetUp(square_bottom_vec_, &square_top_vec_); // Set up pool_layer_ to sum over square neighborhoods of the input. pool_top_vec_.clear(); pool_top_vec_.push_back(&pool_output_); LayerParameter pool_param; pool_param.mutable_pooling_param()->set_pool( PoolingParameter_PoolMethod_AVE); pool_param.mutable_pooling_param()->set_pad(pre_pad_); pool_param.mutable_pooling_param()->set_kernel_size(size_); pool_layer_.reset(new PoolingLayer<Dtype>(pool_param)); pool_layer_->SetUp(square_top_vec_, &pool_top_vec_); // Set up power_layer_ to compute (1 + alpha_/N^2 s)^-beta_, where s is // the sum of a squared neighborhood (the output of pool_layer_). power_top_vec_.clear(); power_top_vec_.push_back(&power_output_); LayerParameter power_param; power_param.mutable_power_param()->set_power(-beta_); power_param.mutable_power_param()->set_scale(alpha_); power_param.mutable_power_param()->set_shift(Dtype(1)); power_layer_.reset(new PowerLayer<Dtype>(power_param)); power_layer_->SetUp(pool_top_vec_, &power_top_vec_); // Set up a product_layer_ to compute outputs by multiplying inputs by the // inverse demoninator computed by the power layer. product_bottom_vec_.clear(); product_bottom_vec_.push_back(&product_input_); product_bottom_vec_.push_back(&power_output_); LayerParameter product_param; EltwiseParameter* eltwise_param = product_param.mutable_eltwise_param(); eltwise_param->set_operation(EltwiseParameter_EltwiseOp_PROD); product_layer_.reset(new EltwiseLayer<Dtype>(product_param)); product_layer_->SetUp(product_bottom_vec_, top); } } template <typename Dtype> void LRNLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) { num_ = bottom[0]->num(); channels_ = bottom[0]->channels(); height_ = bottom[0]->height(); width_ = bottom[0]->width(); switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: (*top)[0]->Reshape(num_, channels_, height_, width_); scale_.Reshape(num_, channels_, height_, width_); break; case LRNParameter_NormRegion_WITHIN_CHANNEL: split_layer_->Reshape(bottom, &split_top_vec_); square_layer_->Reshape(square_bottom_vec_, &square_top_vec_); pool_layer_->Reshape(square_top_vec_, &pool_top_vec_); power_layer_->Reshape(pool_top_vec_, &power_top_vec_); product_layer_->Reshape(product_bottom_vec_, top); break; } } template <typename Dtype> void LRNLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) { switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: CrossChannelForward_cpu(bottom, top); break; case LRNParameter_NormRegion_WITHIN_CHANNEL: WithinChannelForward(bottom, top); break; default: LOG(FATAL) << "Unknown normalization region."; } } template <typename Dtype> void LRNLayer<Dtype>::CrossChannelForward_cpu( const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) { const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = (*top)[0]->mutable_cpu_data(); Dtype* scale_data = scale_.mutable_cpu_data(); // start with the constant value for (int i = 0; i < scale_.count(); ++i) { scale_data[i] = 1.; } Blob<Dtype> padded_square(1, channels_ + size_ - 1, height_, width_); Dtype* padded_square_data = padded_square.mutable_cpu_data(); caffe_set(padded_square.count(), Dtype(0), padded_square_data); Dtype alpha_over_size = alpha_ / size_; // go through the images for (int n = 0; n < num_; ++n) { // compute the padded square caffe_sqr(channels_ * height_ * width_, bottom_data + bottom[0]->offset(n), padded_square_data + padded_square.offset(0, pre_pad_)); // Create the first channel scale for (int c = 0; c < size_; ++c) { caffe_axpy<Dtype>(height_ * width_, alpha_over_size, padded_square_data + padded_square.offset(0, c), scale_data + scale_.offset(n, 0)); } for (int c = 1; c < channels_; ++c) { // copy previous scale caffe_copy<Dtype>(height_ * width_, scale_data + scale_.offset(n, c - 1), scale_data + scale_.offset(n, c)); // add head caffe_axpy<Dtype>(height_ * width_, alpha_over_size, padded_square_data + padded_square.offset(0, c + size_ - 1), scale_data + scale_.offset(n, c)); // subtract tail caffe_axpy<Dtype>(height_ * width_, -alpha_over_size, padded_square_data + padded_square.offset(0, c - 1), scale_data + scale_.offset(n, c)); } // for (int i = 0; i < scale_.count(); ++i) { // if (scale_data[i] < 0 ) // LOG(FATAL) << "found negative norm term " << scale_data[i] << " @ " << i; // } } // In the end, compute output caffe_powx<Dtype>(scale_.count(), scale_data, -beta_, top_data); caffe_mul<Dtype>(scale_.count(), top_data, bottom_data, top_data); } template <typename Dtype> void LRNLayer<Dtype>::WithinChannelForward( const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) { split_layer_->Forward(bottom, &split_top_vec_); square_layer_->Forward(square_bottom_vec_, &square_top_vec_); pool_layer_->Forward(square_top_vec_, &pool_top_vec_); power_layer_->Forward(pool_top_vec_, &power_top_vec_); product_layer_->Forward(product_bottom_vec_, top); } template <typename Dtype> void LRNLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) { switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: CrossChannelBackward_cpu(top, propagate_down, bottom); break; case LRNParameter_NormRegion_WITHIN_CHANNEL: WithinChannelBackward(top, propagate_down, bottom); break; default: LOG(FATAL) << "Unknown normalization region."; } } template <typename Dtype> void LRNLayer<Dtype>::CrossChannelBackward_cpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) { const Dtype* top_diff = top[0]->cpu_diff(); const Dtype* top_data = top[0]->cpu_data(); const Dtype* bottom_data = (*bottom)[0]->cpu_data(); const Dtype* scale_data = scale_.cpu_data(); Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); Blob<Dtype> padded_ratio(1, channels_ + size_ - 1, height_, width_); Blob<Dtype> accum_ratio(1, 1, height_, width_); Dtype* padded_ratio_data = padded_ratio.mutable_cpu_data(); Dtype* accum_ratio_data = accum_ratio.mutable_cpu_data(); // We hack a little bit by using the diff() to store an additional result Dtype* accum_ratio_times_bottom = accum_ratio.mutable_cpu_diff(); caffe_set(padded_ratio.count(), Dtype(0), padded_ratio_data); Dtype cache_ratio_value = 2. * alpha_ * beta_ / size_; caffe_powx<Dtype>(scale_.count(), scale_data, -beta_, bottom_diff); caffe_mul<Dtype>(scale_.count(), top_diff, bottom_diff, bottom_diff); // go through individual data int inverse_pre_pad = size_ - (size_ + 1) / 2; for (int n = 0; n < num_; ++n) { int block_offset = scale_.offset(n); // first, compute diff_i * y_i / s_i caffe_mul<Dtype>(channels_ * height_ * width_, top_diff + block_offset, top_data + block_offset, padded_ratio_data + padded_ratio.offset(0, inverse_pre_pad)); caffe_div<Dtype>(channels_ * height_ * width_, padded_ratio_data + padded_ratio.offset(0, inverse_pre_pad), scale_data + block_offset, padded_ratio_data + padded_ratio.offset(0, inverse_pre_pad)); // Now, compute the accumulated ratios and the bottom diff caffe_set(accum_ratio.count(), Dtype(0), accum_ratio_data); for (int c = 0; c < size_ - 1; ++c) { caffe_axpy<Dtype>(height_ * width_, 1., padded_ratio_data + padded_ratio.offset(0, c), accum_ratio_data); } for (int c = 0; c < channels_; ++c) { caffe_axpy<Dtype>(height_ * width_, 1., padded_ratio_data + padded_ratio.offset(0, c + size_ - 1), accum_ratio_data); // compute bottom diff caffe_mul<Dtype>(height_ * width_, bottom_data + top[0]->offset(n, c), accum_ratio_data, accum_ratio_times_bottom); caffe_axpy<Dtype>(height_ * width_, -cache_ratio_value, accum_ratio_times_bottom, bottom_diff + top[0]->offset(n, c)); caffe_axpy<Dtype>(height_ * width_, -1., padded_ratio_data + padded_ratio.offset(0, c), accum_ratio_data); } } } template <typename Dtype> void LRNLayer<Dtype>::WithinChannelBackward( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) { if (propagate_down[0]) { vector<bool> product_propagate_down(2, true); product_layer_->Backward(top, product_propagate_down, &product_bottom_vec_); power_layer_->Backward(power_top_vec_, propagate_down, &pool_top_vec_); pool_layer_->Backward(pool_top_vec_, propagate_down, &square_top_vec_); square_layer_->Backward(square_top_vec_, propagate_down, &square_bottom_vec_); split_layer_->Backward(split_top_vec_, propagate_down, bottom); } } #ifdef CPU_ONLY STUB_GPU(LRNLayer); STUB_GPU_FORWARD(LRNLayer, CrossChannelForward); STUB_GPU_BACKWARD(LRNLayer, CrossChannelBackward); #endif INSTANTIATE_CLASS(LRNLayer); } // namespace caffe
/* * Copyright (C) 2017-2020 Intel Corporation * * SPDX-License-Identifier: MIT * */ #include "shared/source/compiler_interface/compiler_interface.h" #include "shared/source/device/device.h" #include "shared/source/helpers/file_io.h" #include "opencl/test/unit_test/helpers/kernel_binary_helper.h" #include "opencl/test/unit_test/helpers/test_files.h" #include "opencl/test/unit_test/mocks/mock_kernel.h" #include "cl_api_tests.h" using namespace NEO; struct clGetKernelWorkGroupInfoTests : public ApiFixture<>, public ::testing::TestWithParam<uint32_t /*cl_kernel_work_group_info*/> { typedef ApiFixture BaseClass; void SetUp() override { BaseClass::SetUp(); std::unique_ptr<char[]> pSource = nullptr; size_t sourceSize = 0; std::string testFile; kbHelper = new KernelBinaryHelper("CopyBuffer_simd16", false); testFile.append(clFiles); testFile.append("CopyBuffer_simd16.cl"); ASSERT_EQ(true, fileExists(testFile)); pSource = loadDataFromFile( testFile.c_str(), sourceSize); ASSERT_NE(0u, sourceSize); ASSERT_NE(nullptr, pSource); const char *sources[1] = {pSource.get()}; pProgram = clCreateProgramWithSource( pContext, 1, sources, &sourceSize, &retVal); EXPECT_NE(nullptr, pProgram); ASSERT_EQ(CL_SUCCESS, retVal); pSource.reset(); retVal = clBuildProgram( pProgram, num_devices, devices, nullptr, nullptr, nullptr); ASSERT_EQ(CL_SUCCESS, retVal); kernel = clCreateKernel(pProgram, "CopyBuffer", &retVal); ASSERT_EQ(CL_SUCCESS, retVal); } void TearDown() override { retVal = clReleaseKernel(kernel); EXPECT_EQ(CL_SUCCESS, retVal); retVal = clReleaseProgram(pProgram); EXPECT_EQ(CL_SUCCESS, retVal); delete kbHelper; BaseClass::TearDown(); } cl_program pProgram = nullptr; cl_kernel kernel = nullptr; KernelBinaryHelper *kbHelper; }; namespace ULT { TEST_P(clGetKernelWorkGroupInfoTests, GivenValidParametersWhenGettingKernelWorkGroupInfoThenSuccessIsReturned) { size_t paramValueSizeRet; retVal = clGetKernelWorkGroupInfo( kernel, devices[testedRootDeviceIndex], GetParam(), 0, nullptr, &paramValueSizeRet); EXPECT_EQ(CL_SUCCESS, retVal); EXPECT_NE(0u, paramValueSizeRet); } TEST_F(clGetKernelWorkGroupInfoTests, GivenKernelRequiringScratchSpaceWhenGettingKernelWorkGroupInfoThenCorrectSpillMemSizeIsReturned) { size_t paramValueSizeRet; cl_ulong param_value; auto pDevice = castToObject<ClDevice>(devices[testedRootDeviceIndex]); MockKernelWithInternals mockKernel(*pDevice); SPatchMediaVFEState mediaVFEstate; mediaVFEstate.PerThreadScratchSpace = 1024; //whatever greater than 0 mockKernel.kernelInfo.patchInfo.mediavfestate = &mediaVFEstate; cl_ulong scratchSpaceSize = static_cast<cl_ulong>(mockKernel.mockKernel->getScratchSize()); EXPECT_EQ(scratchSpaceSize, 1024u); retVal = clGetKernelWorkGroupInfo( mockKernel, pDevice, CL_KERNEL_SPILL_MEM_SIZE_INTEL, sizeof(cl_ulong), &param_value, &paramValueSizeRet); EXPECT_EQ(retVal, CL_SUCCESS); EXPECT_EQ(paramValueSizeRet, sizeof(cl_ulong)); EXPECT_EQ(param_value, scratchSpaceSize); } TEST_F(clGetKernelWorkGroupInfoTests, givenKernelHavingPrivateMemoryAllocationWhenAskedForPrivateAllocationSizeThenProperSizeIsReturned) { size_t paramValueSizeRet; cl_ulong param_value; auto pDevice = castToObject<ClDevice>(devices[testedRootDeviceIndex]); MockKernelWithInternals mockKernel(*pDevice); SPatchAllocateStatelessPrivateSurface privateAllocation; privateAllocation.PerThreadPrivateMemorySize = 1024; mockKernel.kernelInfo.patchInfo.pAllocateStatelessPrivateSurface = &privateAllocation; retVal = clGetKernelWorkGroupInfo( mockKernel, pDevice, CL_KERNEL_PRIVATE_MEM_SIZE, sizeof(cl_ulong), &param_value, &paramValueSizeRet); EXPECT_EQ(retVal, CL_SUCCESS); EXPECT_EQ(paramValueSizeRet, sizeof(cl_ulong)); EXPECT_EQ(param_value, privateAllocation.PerThreadPrivateMemorySize); } TEST_F(clGetKernelWorkGroupInfoTests, givenKernelNotHavingPrivateMemoryAllocationWhenAskedForPrivateAllocationSizeThenZeroIsReturned) { size_t paramValueSizeRet; cl_ulong param_value; auto pDevice = castToObject<ClDevice>(devices[testedRootDeviceIndex]); MockKernelWithInternals mockKernel(*pDevice); retVal = clGetKernelWorkGroupInfo( mockKernel, pDevice, CL_KERNEL_PRIVATE_MEM_SIZE, sizeof(cl_ulong), &param_value, &paramValueSizeRet); EXPECT_EQ(retVal, CL_SUCCESS); EXPECT_EQ(paramValueSizeRet, sizeof(cl_ulong)); EXPECT_EQ(param_value, 0u); } static cl_kernel_work_group_info paramNames[] = { CL_KERNEL_WORK_GROUP_SIZE, CL_KERNEL_COMPILE_WORK_GROUP_SIZE, CL_KERNEL_LOCAL_MEM_SIZE, CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE, CL_KERNEL_SPILL_MEM_SIZE_INTEL, CL_KERNEL_PRIVATE_MEM_SIZE}; INSTANTIATE_TEST_CASE_P( api, clGetKernelWorkGroupInfoTests, testing::ValuesIn(paramNames)); } // namespace ULT
// ------------------------------------------------------------------------------------------------- // Copyright 2016 - NumScale SAS // // Distributed under the Boost Software License, Version 1.0. // See accompanying file LICENSE.txt or copy at // http://www.boost.org/LICENSE_1_0.txt // ------------------------------------------------------------------------------------------------- #include <simd_bench.hpp> #include <boost/simd/function/simd/expm1.hpp> #include <boost/simd/pack.hpp> namespace nsb = ns::bench; namespace bs = boost::simd; DEFINE_SIMD_BENCH(simd_expm1, bs::expm1); DEFINE_BENCH_MAIN() { nsb::for_each<simd_expm1, NS_BENCH_IEEE_TYPES>(-10, 10); }
#ifndef P2P_AbstractValue_hxx #define P2P_AbstractValue_hxx #include "Signable.hxx" namespace p2p { class AbstractValue :public Signable { public: int getKind() const; UInt64 getGeneration() const; void setKind(int kind) const; void getGeneration(UInt64 generation) const; }; } // p2p #endif // P2P_AbstractValue_hxx /* ====================================================================== * Copyright (c) 2008, Various contributors to the Resiprocate project * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * - The names of the project's contributors may not be used to * endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. *====================================================================== */
// This class implements a 8 bit unsigned DAC #ifndef DACSOUND8U_HH #define DACSOUND8U_HH #include "DACSound16S.hh" namespace openmsx { class DACSound8U final : public DACSound16S { public: DACSound8U(string_view name, string_view desc, const DeviceConfig& config); void writeDAC(uint8_t value, EmuTime::param time); }; } // namespace openmsx #endif
#include<iostream> using namespace std; int sumdigits(int n) { int sum=0; while(n!=0) { sum=sum+n%10; n=n/10; } return sum; } void g(int n) { if(n/10!=0) { cout<<sumdigits(n)<<endl; g(sumdigits(n)); } else cout<<n<<endl; } int main() { int n; while(!0) { cin>>n; if(n==0) break; g(n); } return 0; }
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "base/path_service.h" #include "chrome/browser/extensions/extension_apitest.h" #include "content/public/common/content_paths.h" #include "net/base/test_data_directory.h" #include "net/dns/mock_host_resolver.h" IN_PROC_BROWSER_TEST_F(ExtensionApiTest, WebSocket) { ASSERT_TRUE(StartWebSocketServer(net::GetWebSocketTestDataDirectory())); ASSERT_TRUE(RunExtensionTest("websocket")) << message_; }
#include<bits/stdc++.h> #define boost ios_base::sync_with_stdio(0);cin.tie(0);cout.tie(0); #define ll long long #define deb(x) cout << #x << "=" << x << endl using namespace std; //Height of a Binary Tree struct Node { int data; struct Node* left; struct Node* right; Node(int x) { data = x; left = right = NULL; } }; int height(Node* root) { if(root==NULL) return 0; int lh=height(root->left)+1; int rh=height(root->right)+1; return max(lh,rh); } int main() { boost; return 0; }
// // Created by Electrux Redsworth on 9/8/17. // #ifndef HASHTABLEIMPLEMENTATION_HASHKEY_HPP #define HASHTABLEIMPLEMENTATION_HASHKEY_HPP #include <string> #include "Constants.hpp" #include "TemplateDefs.hpp" namespace Electrux { // Class definition for Hashkey. template < typename T > class HashKey { COUNTTYPE table; T key; // Generate the hash value from the member variable - key. void GenHash() { hashval = GetHash( key ) % TABLE_SIZE; } public: // Member to store the hash value so that it is not needed to find again // and again, thereby saving time. int hashval; // Constructor for setting the default table to -1. HashKey() { table = -1; } // Constructor which sets the key as wll. HashKey( T key ) { table = -1; this->key = key; this->GenHash(); } // Fetch the key. T GetKey() const { return key; } // Fetch the table in which this key resides. COUNTTYPE GetTable() const { return table; } // Overloaded operator which returns the hash value. // This enables the (semi) Generalization of the HashKey class. COUNTTYPE operator ()() const { return hashval; } // Sets the key. void SetKey( T key ) { this->key = key; this->GenHash(); } // Set the table of the key instance. void SetTable( int table ) { this->table = table; } // Set the key using assignment operator. void operator =( T key ) { this->table = -1; this->key = key; this->GenHash(); } }; } #endif //HASHTABLEIMPLEMENTATION_HASHKEY_HPP
#include "../../include/EnemyTypes/Mimic.h" Mimic::Mimic() { name = "Mimic"; ExperienceAmount = 10; CoinsDrop = 150 + rand() % 51; } EnemyType Mimic::GetType() { return etMimic; } int Mimic::ReturnDamage() { return 3 + rand() % 7; } int Mimic::ReturnRiskAttackDamage() { int selector = rand() % 2; switch (selector) { case 0: return 13; break; case 1: return 2; break; default: return 0; break; } } int Mimic::ReturnHealAmount() { return 2 + rand() % 9; } std::string Mimic::GetIntro() { return "Is this your lucky day??"; }
/* * Copyright (C) 2014, 2018 Pavel Kirienko <pavel.kirienko@gmail.com> * Kinetis Port Author David Sidrane <david_s5@nscdg.com> */ #pragma once #include <uavcan_kinetis/build_config.hpp> #if UAVCAN_KINETIS_NUTTX # include <nuttx/arch.h> # include "up_arch.h" # include <arch/board/board.h> # include <hardware/kinetis_pit.h> # include <hardware/kinetis_sim.h> # include <syslog.h> #else # error "Unknown OS" #endif /** * Debug output */ #ifndef UAVCAN_KINETIS_LOG # if 1 # define UAVCAN_KINETIS_LOG(fmt, ...) syslog(LOG_INFO, "uavcan_kinetis: " fmt "\n", ## __VA_ARGS__) # else # define UAVCAN_KINETIS_LOG(...) ((void)0) # endif #endif /** * IRQ handler macros */ #define UAVCAN_KINETIS_IRQ_HANDLER(id) int id(int irq, FAR void* context, FAR void *arg) /** * Glue macros */ #define UAVCAN_KINETIS_GLUE2_(A, B) A ## B #define UAVCAN_KINETIS_GLUE2(A, B) UAVCAN_KINETIS_GLUE2_(A, B) #define UAVCAN_KINETIS_GLUE3_(A, B, C) A ## B ## C #define UAVCAN_KINETIS_GLUE3(A, B, C) UAVCAN_KINETIS_GLUE3_(A, B, C) namespace uavcan_kinetis { #if UAVCAN_KINETIS_NUTTX struct CriticalSectionLocker { const irqstate_t flags_; CriticalSectionLocker() : flags_(enter_critical_section()) { } ~CriticalSectionLocker() { leave_critical_section(flags_); } }; #endif namespace clock { uavcan::uint64_t getUtcUSecFromCanInterrupt(); } }
// Copyright (c) 2014-2015 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "chain.h" #include "random.h" #include "util.h" #include "test/test_mia.h" #include <vector> #include <boost/test/unit_test.hpp> #define SKIPLIST_LENGTH 300000 BOOST_FIXTURE_TEST_SUITE(skiplist_tests, BasicTestingSetup) BOOST_AUTO_TEST_CASE(skiplist_test) { std::vector<CBlockIndex> vIndex(SKIPLIST_LENGTH); for (int i=0; i<SKIPLIST_LENGTH; i++) { vIndex[i].nHeight = i; vIndex[i].pprev = (i == 0) ? NULL : &vIndex[i - 1]; vIndex[i].BuildSkip(); } for (int i=0; i<SKIPLIST_LENGTH; i++) { if (i > 0) { BOOST_CHECK(vIndex[i].pskip == &vIndex[vIndex[i].pskip->nHeight]); BOOST_CHECK(vIndex[i].pskip->nHeight < i); } else { BOOST_CHECK(vIndex[i].pskip == NULL); } } for (int i=0; i < 1000; i++) { int from = insecure_rand() % (SKIPLIST_LENGTH - 1); int to = insecure_rand() % (from + 1); BOOST_CHECK(vIndex[SKIPLIST_LENGTH - 1].GetAncestor(from) == &vIndex[from]); BOOST_CHECK(vIndex[from].GetAncestor(to) == &vIndex[to]); BOOST_CHECK(vIndex[from].GetAncestor(0) == &vIndex[0]); } } BOOST_AUTO_TEST_CASE(getlocator_test) { // Build a main chain 100000 blocks long. std::vector<uint256> vHashMain(100000); std::vector<CBlockIndex> vBlocksMain(100000); for (unsigned int i=0; i<vBlocksMain.size(); i++) { vHashMain[i] = ArithToUint256(i); // Set the hash equal to the height, so we can quickly check the distances. vBlocksMain[i].nHeight = i; vBlocksMain[i].pprev = i ? &vBlocksMain[i - 1] : NULL; vBlocksMain[i].phashBlock = &vHashMain[i]; vBlocksMain[i].BuildSkip(); BOOST_CHECK_EQUAL((int)UintToArith256(vBlocksMain[i].GetBlockHash()).GetLow64(), vBlocksMain[i].nHeight); BOOST_CHECK(vBlocksMain[i].pprev == NULL || vBlocksMain[i].nHeight == vBlocksMain[i].pprev->nHeight + 1); } // Build a branch that splits off at block 49999, 50000 blocks long. std::vector<uint256> vHashSide(50000); std::vector<CBlockIndex> vBlocksSide(50000); for (unsigned int i=0; i<vBlocksSide.size(); i++) { vHashSide[i] = ArithToUint256(i + 50000 + (arith_uint256(1) << 128)); // Add 1<<128 to the hashes, so GetLow64() still returns the height. vBlocksSide[i].nHeight = i + 50000; vBlocksSide[i].pprev = i ? &vBlocksSide[i - 1] : &vBlocksMain[49999]; vBlocksSide[i].phashBlock = &vHashSide[i]; vBlocksSide[i].BuildSkip(); BOOST_CHECK_EQUAL((int)UintToArith256(vBlocksSide[i].GetBlockHash()).GetLow64(), vBlocksSide[i].nHeight); BOOST_CHECK(vBlocksSide[i].pprev == NULL || vBlocksSide[i].nHeight == vBlocksSide[i].pprev->nHeight + 1); } // Build a CChain for the main branch. CChain chain; chain.SetTip(&vBlocksMain.back()); // Test 100 random starting points for locators. for (int n=0; n<100; n++) { int r = insecure_rand() % 150000; CBlockIndex* tip = (r < 100000) ? &vBlocksMain[r] : &vBlocksSide[r - 100000]; CBlockLocator locator = chain.GetLocator(tip); // The first result must be the block itself, the last one must be genesis. BOOST_CHECK(locator.vHave.front() == tip->GetBlockHash()); BOOST_CHECK(locator.vHave.back() == vBlocksMain[0].GetBlockHash()); // Entries 1 through 11 (inclusive) go back one step each. for (unsigned int i = 1; i < 12 && i < locator.vHave.size() - 1; i++) { BOOST_CHECK_EQUAL(UintToArith256(locator.vHave[i]).GetLow64(), tip->nHeight - i); } // The further ones (excluding the last one) go back with exponential steps. unsigned int dist = 2; for (unsigned int i = 12; i < locator.vHave.size() - 1; i++) { BOOST_CHECK_EQUAL(UintToArith256(locator.vHave[i - 1]).GetLow64() - UintToArith256(locator.vHave[i]).GetLow64(), dist); dist *= 2; } } } BOOST_AUTO_TEST_SUITE_END()
// Copyright (c) 2010 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/common/extensions/user_script.h" #include "base/pickle.h" #include "base/string_util.h" namespace { bool UrlMatchesPatterns(const UserScript::PatternList* patterns, const GURL& url) { for (UserScript::PatternList::const_iterator pattern = patterns->begin(); pattern != patterns->end(); ++pattern) { if (pattern->MatchesUrl(url)) return true; } return false; } bool UrlMatchesGlobs(const std::vector<std::string>* globs, const GURL& url) { for (std::vector<std::string>::const_iterator glob = globs->begin(); glob != globs->end(); ++glob) { if (MatchPattern(url.spec(), *glob)) return true; } return false; } } // namespace // static const char UserScript::kFileExtension[] = ".user.js"; // static const int UserScript::kValidUserScriptSchemes = URLPattern::SCHEME_HTTP | URLPattern::SCHEME_HTTPS | URLPattern::SCHEME_FILE | URLPattern::SCHEME_FTP; bool UserScript::HasUserScriptFileExtension(const GURL& url) { return EndsWith(url.ExtractFileName(), kFileExtension, false); } bool UserScript::HasUserScriptFileExtension(const FilePath& path) { static FilePath extension(FilePath().AppendASCII(kFileExtension)); return EndsWith(path.BaseName().value(), extension.value(), false); } UserScript::File::File(const FilePath& extension_root, const FilePath& relative_path, const GURL& url) : extension_root_(extension_root), relative_path_(relative_path), url_(url) { } UserScript::File::File() {} UserScript::File::~File() {} UserScript::UserScript() : run_location_(DOCUMENT_IDLE), emulate_greasemonkey_(false), match_all_frames_(false), incognito_enabled_(false), allow_file_access_(false) { } UserScript::~UserScript() { } void UserScript::add_url_pattern(const URLPattern& pattern) { url_patterns_.push_back(pattern); } void UserScript::clear_url_patterns() { url_patterns_.clear(); } bool UserScript::MatchesUrl(const GURL& url) const { if (url_patterns_.size() > 0) { if (!UrlMatchesPatterns(&url_patterns_, url)) return false; } if (globs_.size() > 0) { if (!UrlMatchesGlobs(&globs_, url)) return false; } if (exclude_globs_.size() > 0) { if (UrlMatchesGlobs(&exclude_globs_, url)) return false; } return true; } void UserScript::File::Pickle(::Pickle* pickle) const { pickle->WriteString(url_.spec()); // Do not write path. It's not needed in the renderer. // Do not write content. It will be serialized by other means. } void UserScript::File::Unpickle(const ::Pickle& pickle, void** iter) { // Read url. std::string url; CHECK(pickle.ReadString(iter, &url)); set_url(GURL(url)); } void UserScript::Pickle(::Pickle* pickle) const { // Write simple types. pickle->WriteInt(run_location()); pickle->WriteString(extension_id()); pickle->WriteBool(emulate_greasemonkey()); pickle->WriteBool(match_all_frames()); pickle->WriteBool(is_incognito_enabled()); pickle->WriteBool(allow_file_access()); // Write globs. std::vector<std::string>::const_iterator glob; pickle->WriteSize(globs_.size()); for (glob = globs_.begin(); glob != globs_.end(); ++glob) { pickle->WriteString(*glob); } pickle->WriteSize(exclude_globs_.size()); for (glob = exclude_globs_.begin(); glob != exclude_globs_.end(); ++glob) { pickle->WriteString(*glob); } // Write url patterns. pickle->WriteSize(url_patterns_.size()); for (PatternList::const_iterator pattern = url_patterns_.begin(); pattern != url_patterns_.end(); ++pattern) { pickle->WriteInt(pattern->valid_schemes()); pickle->WriteString(pattern->GetAsString()); } // Write js scripts. pickle->WriteSize(js_scripts_.size()); for (FileList::const_iterator file = js_scripts_.begin(); file != js_scripts_.end(); ++file) { file->Pickle(pickle); } // Write css scripts. pickle->WriteSize(css_scripts_.size()); for (FileList::const_iterator file = css_scripts_.begin(); file != css_scripts_.end(); ++file) { file->Pickle(pickle); } } void UserScript::Unpickle(const ::Pickle& pickle, void** iter) { // Read the run location. int run_location = 0; CHECK(pickle.ReadInt(iter, &run_location)); CHECK(run_location >= 0 && run_location < RUN_LOCATION_LAST); run_location_ = static_cast<RunLocation>(run_location); CHECK(pickle.ReadString(iter, &extension_id_)); CHECK(pickle.ReadBool(iter, &emulate_greasemonkey_)); CHECK(pickle.ReadBool(iter, &match_all_frames_)); CHECK(pickle.ReadBool(iter, &incognito_enabled_)); CHECK(pickle.ReadBool(iter, &allow_file_access_)); // Read globs. size_t num_globs = 0; CHECK(pickle.ReadSize(iter, &num_globs)); globs_.clear(); for (size_t i = 0; i < num_globs; ++i) { std::string glob; CHECK(pickle.ReadString(iter, &glob)); globs_.push_back(glob); } CHECK(pickle.ReadSize(iter, &num_globs)); exclude_globs_.clear(); for (size_t i = 0; i < num_globs; ++i) { std::string glob; CHECK(pickle.ReadString(iter, &glob)); exclude_globs_.push_back(glob); } // Read url patterns. size_t num_patterns = 0; CHECK(pickle.ReadSize(iter, &num_patterns)); url_patterns_.clear(); for (size_t i = 0; i < num_patterns; ++i) { int valid_schemes; CHECK(pickle.ReadInt(iter, &valid_schemes)); std::string pattern_str; URLPattern pattern(valid_schemes); CHECK(pickle.ReadString(iter, &pattern_str)); CHECK(URLPattern::PARSE_SUCCESS == pattern.Parse(pattern_str)); url_patterns_.push_back(pattern); } // Read js scripts. size_t num_js_files = 0; CHECK(pickle.ReadSize(iter, &num_js_files)); js_scripts_.clear(); for (size_t i = 0; i < num_js_files; ++i) { File file; file.Unpickle(pickle, iter); js_scripts_.push_back(file); } // Read css scripts. size_t num_css_files = 0; CHECK(pickle.ReadSize(iter, &num_css_files)); css_scripts_.clear(); for (size_t i = 0; i < num_css_files; ++i) { File file; file.Unpickle(pickle, iter); css_scripts_.push_back(file); } }
/* * This file is part of the Simutrans-Extended project under the Artistic License. * (see LICENSE.txt) */ #include <stdio.h> #include <windows.h> #include <mmsystem.h> #include "sound.h" /* * Hajo: flag if sound module should be used * with Win32 the number+1 of the device used */ static int use_sound = 0; /* this list contains all the samples */ static void *samples[1024]; static int sample_number = 0; /** * Sound initialisation routine */ bool dr_init_sound() { use_sound = 1; return true; } /** * loads a single sample * @return a handle for that sample or -1 on failure * @author Hj. Malthaner */ int dr_load_sample(char const* filename) { if(use_sound && sample_number>=0 && sample_number < 1024) { if (FILE* const fIn = fopen(filename, "rb")) { long len; fseek( fIn, 0, SEEK_END ); len = ftell( fIn ); if(len>0) { samples[sample_number] = GlobalLock( GlobalAlloc( GMEM_MOVEABLE, (len+4)&0x7FFFFFFCu ) ); rewind( fIn ); fread( samples[sample_number], len, 1, fIn ); fclose( fIn ); return sample_number++; } } } return -1; } /** * plays a sample * @param key the key for the sample to be played * @author Hj. Malthaner */ void dr_play_sample(int sample_number, int volume) { if(use_sound!=0 && sample_number>=0 /*&& sample_number<64 */ && volume>1) { // Too late: DirectSound is deprecated. /* TODO: Use DirectSound to render the sound in Windows 32-bit * so as to enable better quality sound playback without * interfering with system volume levels. HWND this_window = NULL; bool mixing = true; GetFocusWindow(this_window, &mixing); SetCooperativeLevel(this_window, DSSCL_NORMAL);*/ static int oldvol = -1; volume = (volume<<8)-1; if(oldvol!=volume) { long vol = (volume<<16)|volume; waveOutSetVolume( 0, vol ); oldvol = volume; } UINT flags = SND_MEMORY | SND_ASYNC | SND_NODEFAULT; // Terminate the current sound, if not already the requested one. static int last_sample_nr = -1; if (last_sample_nr == sample_number) flags |= SND_NOSTOP; last_sample_nr = sample_number; sndPlaySound(static_cast<TCHAR const*>(samples[sample_number]), flags); } }
/* * DragonMotorControllerFactory.cpp * */ #include <iostream> #include <map> #include <string> #include <subsys/IMechanism.h> #include <hw/factories/DragonMotorControllerFactory.h> #include <hw/usages/MotorControllerUsage.h> #include <xmlhw/MotorDefn.h> #include <hw/DragonTalon.h> #include <hw/DragonSparkMax.h> #include <utils/Logger.h> #include <ctre/phoenix/MotorControl/CAN/TalonSRX.h> #include <ctre/phoenix/MotorControl/FeedbackDevice.h> #include <rev/CANSparkMax.h> using namespace std; DragonMotorControllerFactory* DragonMotorControllerFactory::m_instance = nullptr; DragonMotorControllerFactory* DragonMotorControllerFactory::GetInstance() { if ( DragonMotorControllerFactory::m_instance == nullptr ) { DragonMotorControllerFactory::m_instance = new DragonMotorControllerFactory(); } return DragonMotorControllerFactory::m_instance; } DragonMotorControllerFactory::DragonMotorControllerFactory() { for ( auto inx=0; inx<63; ++inx ) { m_canControllers[inx] = nullptr; } MotorControllerUsage::GetInstance(); CreateTypeMap(); } //======================================================================================= // Method: CreateMotorController // Description: Create a motor controller from the inputs // Returns: Void //======================================================================================= shared_ptr<IDragonMotorController> DragonMotorControllerFactory::CreateMotorController ( string mtype, int canID, int pdpID, string usage, bool inverted, bool sensorInverted, ctre::phoenix::motorcontrol::FeedbackDevice feedbackDevice, int countsPerRev, float gearRatio, bool brakeMode, int slaveTo, int peakCurrentDuration, int continuousCurrentLimit, int peakCurrentLimit, bool enableCurrentLimit ) { shared_ptr<IDragonMotorController> controller; auto hasError = false; auto type = m_typeMap.find(mtype)->second; if ( type == MOTOR_TYPE::TALONSRX ) { // TODO:: set PDP ID auto talon = new DragonTalon( MotorControllerUsage::GetInstance()->GetUsage(usage), canID, pdpID, countsPerRev, gearRatio ); talon->EnableBrakeMode( brakeMode ); talon->Invert( inverted ); talon->SetSensorInverted( sensorInverted ); talon->ConfigSelectedFeedbackSensor( feedbackDevice, 0, 50 ); talon->ConfigSelectedFeedbackSensor( feedbackDevice, 1, 50 ); talon->ConfigPeakCurrentLimit( peakCurrentLimit, 50 ); talon->ConfigPeakCurrentDuration( peakCurrentDuration, 50 ); talon->ConfigContinuousCurrentLimit( continuousCurrentLimit, 50 ); talon->EnableCurrentLimiting( enableCurrentLimit ); if ( slaveTo > -1 ) { talon->SetAsSlave( slaveTo ); } controller.reset( talon ); } else if ( type == MOTOR_TYPE::BRUSHED_SPARK_MAX || type == MOTOR_TYPE::BRUSHLESS_SPARK_MAX ) { auto brushedBrushless = (type == MOTOR_TYPE::BRUSHED_SPARK_MAX) ? rev::CANSparkMax::MotorType::kBrushed : rev::CANSparkMax::MotorType::kBrushless; auto smax = new DragonSparkMax( canID, pdpID, MotorControllerUsage::GetInstance()->GetUsage(usage), brushedBrushless, gearRatio ); smax->Invert( inverted ); smax->EnableBrakeMode( brakeMode ); smax->InvertEncoder( sensorInverted ); smax->EnableCurrentLimiting( enableCurrentLimit ); smax->SetSmartCurrentLimiting( continuousCurrentLimit ); if ( slaveTo > -1 ) { DragonSparkMax* master = nullptr; if ( GetController( slaveTo ) != nullptr ) { master = dynamic_cast<DragonSparkMax*>( GetController( slaveTo ).get() ); } if ( master != nullptr ) { smax->Follow( master ); } else { string msg = "invalid Slave to ID "; msg += to_string( slaveTo ); Logger::GetLogger()->LogError( "DragonMotorControllerFactory::CreateMotorController", msg ); } } controller.reset( smax ); } else { hasError = true; } if ( !hasError ) { m_canControllers[ canID ] = controller; } return controller; } //======================================================================================= // Method: GetController // Description: return motor controller // Returns: IDragonMotorController* may be nullptr if there isn't a controller // with this CAN ID. //======================================================================================= shared_ptr<IDragonMotorController> DragonMotorControllerFactory::GetController ( int canID /// Motor controller CAN ID ) const { shared_ptr<IDragonMotorController> controller; if ( canID > -1 && canID < 63 ) { controller = m_canControllers[ canID ]; } else { string msg = "invalid CAN ID "; msg += to_string( canID ); Logger::GetLogger()->LogError( "DragonMotorControllerFactory::GetController", msg ); } return controller; } void DragonMotorControllerFactory::CreateTypeMap() { m_typeMap["TALONSRX"] = DragonMotorControllerFactory::MOTOR_TYPE::TALONSRX; m_typeMap["BRUSHLESS_SPARK_MAX"] = DragonMotorControllerFactory::MOTOR_TYPE::BRUSHLESS_SPARK_MAX; m_typeMap["BRUSHED_SPARK_MAX"] = DragonMotorControllerFactory::MOTOR_TYPE::BRUSHED_SPARK_MAX; }
/** * @file rw_select_insert.cpp Checks changes of COM_SELECT and COM_INSERT after queris to check if RWSplit sends queries to master or to slave depending on if it is write or read only query * - connect to RWSplit, create table * - execute SELECT using RWSplit * - check COM_SELECT and COM_INSERT change on all nodes * - execute INSERT using RWSplit * - check COM_SELECT and COM_INSERT change on all nodes * - repeat previous steps one more time (now SELECT extracts real date, in the first case table was empty) * - execute SELECT 100 times, check COM_SELECT and COM_INSERT after every query (tolerate 2*N+1 queries) * - execute INSERT 100 times, check COM_SELECT and COM_INSERT after every query (tolerate 2*N+1 queries) */ #include "testconnections.h" #include "get_com_select_insert.h" #include "maxadmin_operations.h" /** * @brief check_com_select Checks if COM_SELECT increase takes place only on one slave node and there is no COM_INSERT increase * @param new_selects COM_SELECT after query * @param new_inserts COM_INSERT after query * @param selects COM_SELECT before query * @param inserts COM_INSERT before query * @param Nodes pointer to Mariadb_nodes object that contains references to Master/Slave setup * @return 0 if COM_SELECT increased only on slave node and there is no COM_INSERT increase anywhere */ int check_com_select(long int *new_selects, long int *new_inserts, long int *selects, long int *inserts, Mariadb_nodes * Nodes, int expected) { int i; int result = 0; int sum_selects = 0; int NodesNum = Nodes->N; if (new_selects[0] - selects[0] != 0) { result = 1; printf("SELECT query executed, but COM_INSERT increased on master\n"); } for (i = 0; i < NodesNum; i++) { if (new_inserts[i] - inserts[i] != 0) { result = 1; printf("SELECT query executed, but COM_INSERT increased\n"); } int diff = new_selects[i] - selects[i]; sum_selects += diff; selects[i] = new_selects[i]; inserts[i] = new_inserts[i]; } if (sum_selects != expected) { printf("Expected %d SELECT queries executed, got %d\n", expected, sum_selects); result = 1; } if (result) { printf("COM_SELECT increase FAIL\n"); } return result; } /** * @brief Checks if COM_INSERT increase takes places on all nodes and there is no COM_SELECT increase * @param new_selects COM_SELECT after query * @param new_inserts COM_INSERT after query * @param selects COM_SELECT before query * @param inserts COM_INSERT before query * @param Nodes pointer to Mariadb_nodes object that contains references to Master/Slave setup * @return 0 if COM_INSERT increases on all nodes and there is no COM_SELECT increate anywhere */ int check_com_insert(long int *new_selects, long int *new_inserts, long int *selects, long int *inserts, Mariadb_nodes * Nodes, int expected) { int result = 0; int diff_ins = new_inserts[0] - inserts[0]; int diff_sel = new_selects[0] - selects[0]; if (diff_ins == 0) { result = 1; printf("INSERT query executed, but COM_INSERT did not increase\n"); } if (diff_sel != 0) { printf("INSERT query executed, but COM_SELECT increase is %d\n", diff_sel); result = 1; } selects[0] = new_selects[0]; inserts[0] = new_inserts[0]; if (diff_ins != expected) { printf("Expected %d INSERT queries executed, got %d\n", expected, diff_ins); result = 1; } if (result) { printf("COM_INSERT increase FAIL\n"); } return result; } int main(int argc, char *argv[]) { long int selects[256]; long int inserts[256]; long int new_selects[256]; long int new_inserts[256]; int silent = 1; int i; TestConnections * Test = new TestConnections(argc, argv); Test->set_timeout(120); Test->repl->connect(); Test->tprintf("Connecting to RWSplit %s\n", Test->maxscales->IP[0]); Test->maxscales->connect_rwsplit(0); for (i = 0; i < Test->maxscales->N; i++) { Test->maxscales->execute_maxadmin_command(i, (char *) "shutdown monitor MySQL-Monitor"); } get_global_status_allnodes(&selects[0], &inserts[0], Test->repl, silent); Test->tprintf("Creating table t1\n"); fflush(stdout); Test->try_query(Test->maxscales->conn_rwsplit[0], "DROP TABLE IF EXISTS t1;"); Test->try_query(Test->maxscales->conn_rwsplit[0], "create table t1 (x1 int);"); Test->repl->sync_slaves(); printf("Trying SELECT * FROM t1\n"); fflush(stdout); get_global_status_allnodes(&selects[0], &inserts[0], Test->repl, silent); Test->try_query(Test->maxscales->conn_rwsplit[0], "select * from t1;"); get_global_status_allnodes(&new_selects[0], &new_inserts[0], Test->repl, silent); Test->add_result(check_com_select(&new_selects[0], &new_inserts[0], &selects[0], &inserts[0], Test->repl, 1), "Wrong check_com_select result\n"); printf("Trying INSERT INTO t1 VALUES(1);\n"); fflush(stdout); get_global_status_allnodes(&selects[0], &inserts[0], Test->repl, silent); Test->try_query(Test->maxscales->conn_rwsplit[0], "insert into t1 values(1);"); get_global_status_allnodes(&new_selects[0], &new_inserts[0], Test->repl, silent); Test->add_result(check_com_insert(&new_selects[0], &new_inserts[0], &selects[0], &inserts[0], Test->repl, 1), "Wrong check_com_insert result\n"); Test->stop_timeout(); Test->repl->sync_slaves(); printf("Trying SELECT * FROM t1\n"); fflush(stdout); get_global_status_allnodes(&selects[0], &inserts[0], Test->repl, silent); execute_query(Test->maxscales->conn_rwsplit[0], "select * from t1;"); get_global_status_allnodes(&new_selects[0], &new_inserts[0], Test->repl, silent); Test->add_result(check_com_select(&new_selects[0], &new_inserts[0], &selects[0], &inserts[0], Test->repl, 1), "Wrong check_com_select result\n"); printf("Trying INSERT INTO t1 VALUES(1);\n"); fflush(stdout); get_global_status_allnodes(&selects[0], &inserts[0], Test->repl, silent); execute_query(Test->maxscales->conn_rwsplit[0], "insert into t1 values(1);"); get_global_status_allnodes(&new_selects[0], &new_inserts[0], Test->repl, silent); Test->add_result(check_com_insert(&new_selects[0], &new_inserts[0], &selects[0], &inserts[0], Test->repl, 1), "Wrong check_com_insert result\n"); Test->stop_timeout(); Test->repl->sync_slaves(); Test->tprintf("Doing 100 selects\n"); get_global_status_allnodes(&selects[0], &inserts[0], Test->repl, silent); for (i = 0; i < 100; i++) { Test->set_timeout(20); Test->try_query(Test->maxscales->conn_rwsplit[0], "select * from t1;"); } Test->stop_timeout(); Test->repl->sync_slaves(); get_global_status_allnodes(&new_selects[0], &new_inserts[0], Test->repl, silent); Test->add_result(check_com_select(&new_selects[0], &new_inserts[0], &selects[0], &inserts[0], Test->repl, 100), "Wrong check_com_select result\n"); Test->set_timeout(20); get_global_status_allnodes(&selects[0], &inserts[0], Test->repl, silent); Test->tprintf("Doing 100 inserts\n"); for (i = 0; i < 100; i++) { Test->set_timeout(20); Test->try_query(Test->maxscales->conn_rwsplit[0], "insert into t1 values(1);"); } Test->stop_timeout(); Test->repl->sync_slaves(); get_global_status_allnodes(&new_selects[0], &new_inserts[0], Test->repl, silent); Test->add_result(check_com_insert(&new_selects[0], &new_inserts[0], &selects[0], &inserts[0], Test->repl, 100), "Wrong check_com_insert result\n"); Test->maxscales->close_rwsplit(0); int rval = Test->global_result; delete Test; return rval; }
//+------------------------------------------------------------------------- // // Microsoft Windows // Copyright (C) Microsoft Corporation, 1994 - 1998. // // File: waitmult.hxx // // Contents: Encapsulates a Win32 WaitForMultipleObjects // // History: 04-Aug-94 DwightKr Created // //-------------------------------------------------------------------------- #pragma once //+------------------------------------------------------------------------- // // Class: CWaitForMultipleObjects // // Purpose: Constructor // // History: 07-Jun-94 DwightKr Created // //-------------------------------------------------------------------------- class CWaitForMultipleObjects { public : inline CWaitForMultipleObjects(ULONG cMaxHandles); ~CWaitForMultipleObjects() { delete [] _pHandles; } inline void AddEvent( HANDLE hEvent ); inline HANDLE Get( DWORD i ) const; inline DWORD Wait( DWORD dwTimeout ); void ResetCount() { _cNumHandles = 0; } private: HANDLE * _pHandles; // Doesn't own handles in _pHandles ULONG _cMaxHandles; ULONG _cNumHandles; }; //+------------------------------------------------------------------------- // // Method: CWaitForMultipleObjects::CWaitForMultipleObjects, public // // Purpose: Constructor // // History: 07-Jun-94 DwightKr Created // //-------------------------------------------------------------------------- inline CWaitForMultipleObjects::CWaitForMultipleObjects( ULONG cMaxHandles ) : _cMaxHandles(cMaxHandles), _cNumHandles(0), _pHandles(0) { _pHandles = new HANDLE[_cMaxHandles]; } //+------------------------------------------------------------------------- // // Method: CWaitForMultipleObjects::AddEvent, public // // Purpose: Adds an handle to be waited on // // Arguments: [hEvent] -- Handle to add // // History: 07-Jun-94 DwightKr Created // //-------------------------------------------------------------------------- inline void CWaitForMultipleObjects::AddEvent( HANDLE hEvent ) { Win4Assert( _cNumHandles < _cMaxHandles ); _pHandles[ _cNumHandles ] = hEvent; _cNumHandles++; } //+------------------------------------------------------------------------- // // Method: CWaitForMultipleObjects::Wait, public // // Purpose: Waits for one of the handles to be signalled, or timeout // // History: 07-Jun-94 DwightKr Created // //-------------------------------------------------------------------------- inline DWORD CWaitForMultipleObjects::Wait( DWORD dwTimeout ) { return WaitForMultipleObjects( _cNumHandles, _pHandles, FALSE, dwTimeout ); } //+------------------------------------------------------------------------- // // Method: CWaitForMultipleObjects::Get, public // // Synopsis: Retrieves the I-th handle // // Arguments: [i] -- i // // Returns: The I-th handle // // History: 23-Jun-98 KyleP Created // //-------------------------------------------------------------------------- inline HANDLE CWaitForMultipleObjects::Get( DWORD i ) const { Win4Assert( i < _cNumHandles ); return _pHandles[ i ]; }
#include "gtest/gtest.h" #include "sum.hpp" #define TEST_FOR(func_name) \ TEST(TestSum, func_name) \ { \ int xs[] { 0, 1, 2, 3, 4 }; \ int ys[5] { 1, 2, 3, 4, 5 }; \ \ func_name(ys, 5, xs, 2); \ \ EXPECT_EQ(ys[0], 1 + 0 * 2); \ EXPECT_EQ(ys[1], 2 + 1 * 2); \ EXPECT_EQ(ys[2], 3 + 2 * 2); \ EXPECT_EQ(ys[3], 4 + 3 * 2); \ EXPECT_EQ(ys[4], 5 + 4 * 2); \ } TEST_FOR(scalar_add) TEST_FOR(pure_simd_add) TEST(TestSum, ScalarAddBits) { std::uint8_t x = 0b01010111; int ys[8]{}; scalar_add_bits(ys, 8, &x, 2); EXPECT_EQ(ys[0], 2); EXPECT_EQ(ys[1], 2); EXPECT_EQ(ys[2], 2); EXPECT_EQ(ys[3], 0); EXPECT_EQ(ys[4], 2); EXPECT_EQ(ys[5], 0); EXPECT_EQ(ys[6], 2); EXPECT_EQ(ys[7], 0); } TEST(TestSum, PureSIMDAddBits) { std::uint8_t x = 0b01010111; int ys[8]{}; pure_simd_add_bits(ys, 8, &x, 2); EXPECT_EQ(ys[0], 2); EXPECT_EQ(ys[1], 2); EXPECT_EQ(ys[2], 2); EXPECT_EQ(ys[3], 0); EXPECT_EQ(ys[4], 2); EXPECT_EQ(ys[5], 0); EXPECT_EQ(ys[6], 2); EXPECT_EQ(ys[7], 0); }
/*============================================================================= Copyright (c) 2009 Christopher Schmidt Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) ==============================================================================*/ #ifndef BOOST_FUSION_VIEW_FILTER_VIEW_DETAIL_DEREF_DATA_IMPL_HPP #define BOOST_FUSION_VIEW_FILTER_VIEW_DETAIL_DEREF_DATA_IMPL_HPP #include <boost/fusion/iterator/deref_data.hpp> #include <boost/fusion/support/config.hpp> namespace boost { namespace fusion { namespace extension { template <typename> struct deref_data_impl; template <> struct deref_data_impl<filter_view_iterator_tag> { template <typename It> struct apply { typedef typename result_of::deref_data<typename It::first_type>::type type; BOOST_CONSTEXPR BOOST_FUSION_GPU_ENABLED static type call(It const &it) { return fusion::deref_data(it.first); } }; }; } // namespace extension } // namespace fusion } // namespace boost #endif
/* * Copyright (c) 2013 Juniper Networks, Inc. All rights reserved. */ #include <cstdio> #include <iomanip> #include <assert.h> #include <errno.h> #include <sys/types.h> #include <dirent.h> #include "base/logging.h" #include <base/contrail_ports.h> #include <bind/bind_util.h> #include <cfg/dns_config.h> #include <mgr/dns_oper.h> #include "named_config.h" #include <cmn/dns.h> using namespace std; NamedConfig *NamedConfig::singleton_; const string NamedConfig::NamedZoneFileSuffix = "zone"; const string NamedConfig::NamedZoneNSPrefix = "contrail-ns"; const string NamedConfig::NamedZoneMXPrefix = "contrail-mx"; const char NamedConfig::pid_file_name[] = "contrail-named.pid"; const char NamedConfig::sessionkey_file_name[] = "session.key"; void NamedConfig::Init(const std::string& named_config_dir, const std::string& named_config_file, const std::string& named_log_file, const std::string& rndc_config_file, const std::string& rndc_secret, const std::string& named_max_cache_size) { assert(singleton_ == NULL); singleton_ = new NamedConfig(named_config_dir, named_config_file, named_log_file, rndc_config_file, rndc_secret, named_max_cache_size); singleton_->Reset(); } void NamedConfig::Shutdown() { assert(singleton_); delete singleton_; } // Reset bind config void NamedConfig::Reset() { reset_flag_ = true; CreateRndcConf(); UpdateNamedConf(); DIR *dir = opendir(named_config_dir_.c_str()); if (dir) { struct dirent *file; while ((file = readdir(dir)) != NULL) { std::string str(named_config_dir_); str.append(file->d_name); if (str.find(".zone") != std::string::npos) { remove(str.c_str()); } } closedir(dir); } reset_flag_ = false; } void NamedConfig::AddView(const VirtualDnsConfig *vdns) { UpdateNamedConf(vdns); } void NamedConfig::ChangeView(const VirtualDnsConfig *vdns) { UpdateNamedConf(vdns); std::string old_domain = vdns->GetOldDomainName(); if (vdns->GetDomainName() != old_domain) { ZoneList zones; zones.push_back(old_domain); RemoveZoneFiles(vdns, zones); } } void NamedConfig::DelView(const VirtualDnsConfig *vdns) { UpdateNamedConf(vdns); } void NamedConfig::AddAllViews() { all_zone_files_ = true; UpdateNamedConf(); all_zone_files_ = false; } void NamedConfig::AddZone(const Subnet &subnet, const VirtualDnsConfig *vdns) { ZoneList zones; subnet.GetReverseZones(zones); // Ignore zone files which already exist for (unsigned int i = 0; i < zones.size();) { std::ifstream file(zones[i].c_str()); if (file.is_open()) { if (file.good()) { zones.erase(zones.begin() + i); file.close(); continue; } file.close(); } i++; } AddZoneFiles(zones, vdns); UpdateNamedConf(); } void NamedConfig::DelZone(const Subnet &subnet, const VirtualDnsConfig *vdns) { UpdateNamedConf(); ZoneList vdns_zones, snet_zones; MakeZoneList(vdns, vdns_zones); subnet.GetReverseZones(snet_zones); // Ignore zones which are still in use for (unsigned int i = 0; i < snet_zones.size();) { unsigned int j; for (j = 0; j < vdns_zones.size(); j++) { if (snet_zones[i] == vdns_zones[j]) { snet_zones.erase(snet_zones.begin() + i); break; } } if (j == vdns_zones.size()) i++; } RemoveZoneFiles(vdns, snet_zones); } void NamedConfig::UpdateNamedConf(const VirtualDnsConfig *updated_vdns) { CreateNamedConf(updated_vdns); #ifndef _WIN32 sync(); #endif ifstream pyscript("/etc/contrail/dns/applynamedconfig.py"); if (!pyscript.good()) { std::stringstream str; str << "/usr/bin/contrail-rndc -c " << rndc_config_file_ << " -p "; str << ContrailPorts::DnsRndc(); str << " reconfig"; int res = system(str.str().c_str()); if (res) { LOG(WARN, "/usr/bin/contrail-rndc command failed"); } } else { std::stringstream str; // execute the helper script to apply named config str << "python /etc/contrail/dns/applynamedconfig.py"; int res = system(str.str().c_str()); if (res) { LOG(ERROR, "Applying named configuration failed"); } } } void NamedConfig::CreateNamedConf(const VirtualDnsConfig *updated_vdns) { GetDefaultForwarders(); file_.open(named_config_file_.c_str()); WriteOptionsConfig(); WriteRndcConfig(); WriteLoggingConfig(); WriteViewConfig(updated_vdns); file_.flush(); file_.close(); } void NamedConfig::CreateRndcConf() { file_.open(rndc_config_file_.c_str()); file_ << "key \"rndc-key\" {" << endl; file_ << " algorithm hmac-md5;" << endl; file_ << " secret \"" << rndc_secret_ << "\";" << endl; file_ << "};" << endl << endl; file_ << "options {" << endl; file_ << " default-key \"rndc-key\";" << endl; file_ << " default-server 127.0.0.1;" << endl; file_ << " default-port " << ContrailPorts::DnsRndc() << ";" << endl; file_ << "};" << endl << endl; file_.flush(); file_.close(); } void NamedConfig::WriteOptionsConfig() { file_ << "options {" << endl; file_ << " directory \"" << named_config_dir_ << "\";" << endl; file_ << " managed-keys-directory \"" << named_config_dir_ << "\";" << endl; file_ << " empty-zones-enable no;" << endl; file_ << " pid-file \"" << GetPidFilePath() << "\";" << endl; file_ << " session-keyfile \"" << GetSessionKeyFilePath() << "\";" << endl; file_ << " listen-on port " << Dns::GetDnsPort() << " { any; };" << endl; file_ << " allow-query { any; };" << endl; file_ << " allow-recursion { any; };" << endl; file_ << " allow-query-cache { any; };" << endl; if (!named_max_cache_size_.empty()) file_ << " max-cache-size " << named_max_cache_size_ << ";" << endl; file_ << "};" << endl << endl; } void NamedConfig::WriteRndcConfig() { file_ << "key \"rndc-key\" {" << endl; file_ << " algorithm hmac-md5;" << endl; file_ << " secret \"" << rndc_secret_ << "\";" << endl; file_ << "};" << endl << endl; file_ << "controls {" << endl; file_ << " inet 127.0.0.1 port "<< ContrailPorts::DnsRndc() << endl; file_ << " allow { 127.0.0.1; } keys { \"rndc-key\"; };" << endl; file_ << "};" << endl << endl; } void NamedConfig::WriteLoggingConfig() { file_ << "logging {" << endl; file_ << " channel debug_log {" << endl; file_ << " file \"" << named_log_file_ << "\" versions 3 size 5m;" << endl; file_ << " severity debug;" << endl; file_ << " print-time yes;" << endl; file_ << " print-severity yes;" << endl; file_ << " print-category yes;" << endl; file_ << " };" << endl; file_ << " category default {" << endl; file_ << " debug_log;" << endl; file_ << " };" << endl; file_ << " category queries {" << endl; file_ << " debug_log;" << endl; file_ << " };" << endl; file_ << "};" << endl << endl; } void NamedConfig::WriteViewConfig(const VirtualDnsConfig *updated_vdns) { ZoneViewMap zone_view_map; if (reset_flag_) { WriteDefaultView(zone_view_map); return; } VirtualDnsConfig::DataMap vdns = VirtualDnsConfig::GetVirtualDnsMap(); for (VirtualDnsConfig::DataMap::iterator it = vdns.begin(); it != vdns.end(); ++it) { VirtualDnsConfig *curr_vdns = it->second; ZoneList zones; MakeZoneList(curr_vdns, zones); if (curr_vdns->IsDeleted() || !curr_vdns->IsNotified()) { RemoveZoneFiles(curr_vdns, zones); continue; } std::string view_name = curr_vdns->GetViewName(); file_ << "view \"" << view_name << "\" {" << endl; std::string order = curr_vdns->GetRecordOrder(); if (!order.empty()) { if (order == "round-robin") order = "cyclic"; file_ << " rrset-order {order " << order << ";};" << endl; } std::string next_dns = curr_vdns->GetNextDns(); if (!next_dns.empty()) { boost::system::error_code ec; boost::asio::ip::address_v4 next_addr(boost::asio::ip::address_v4::from_string(next_dns, ec)); if (!ec.value()) { file_ << " forwarders {" << next_addr.to_string() << ";};" << endl; } else { file_ << " virtual-forwarder \"" << next_dns << "\";" << endl; } } else if (!default_forwarders_.empty()) { file_ << " forwarders {" << default_forwarders_ << "};" << endl; } bool reverse_resolution = curr_vdns->IsReverseResolutionEnabled(); for (unsigned int i = 0; i < zones.size(); i++) { WriteZone(view_name, zones[i], true, reverse_resolution, next_dns); // update the zone view map, to be used to generate default view if (curr_vdns->IsExternalVisible()) zone_view_map.insert(ZoneViewPair(zones[i], view_name)); } file_ << "};" << endl << endl; if (curr_vdns == updated_vdns || all_zone_files_) AddZoneFiles(zones, curr_vdns); } WriteDefaultView(zone_view_map); } void NamedConfig::WriteDefaultView(ZoneViewMap &zone_view_map) { // Create a default view first for any requests which do not have // view name TXT record file_ << "view \"_default_view_\" {" << endl; file_ << " match-clients {any;};" << endl; file_ << " match-destinations {any;};" << endl; file_ << " match-recursive-only no;" << endl; if (!default_forwarders_.empty()) { file_ << " forwarders {" << default_forwarders_ << "};" << endl; } for (ZoneViewMap::iterator it = zone_view_map.begin(); it != zone_view_map.end(); ++it) { WriteZone(it->second, it->first, false, false, ""); } file_ << "};" << endl << endl; } void NamedConfig::WriteZone(const string &vdns, const string &name, bool is_master, bool is_rr, const string &next_dns) { file_ << " zone \"" << name << "\" IN {" << endl; if (is_master) { file_ << " type master;" << endl; file_ << " file \"" << GetZoneFilePath(vdns, name) << "\";" << endl; file_ << " allow-update {127.0.0.1;};" << endl; if (!next_dns.empty()) { if (!is_rr && BindUtil::IsReverseZone(name)) { file_ << " forwarders { };" << endl; } } else { file_ << " forwarders { };" << endl; } } else { file_ << " type static-stub;" << endl; file_ << " virtual-server-name \"" << vdns << "\";" << endl; file_ << " server-addresses {127.0.0.1;};" << endl; } file_ << " };" << endl; } void NamedConfig::AddZoneFiles(ZoneList &zones, const VirtualDnsConfig *vdns) { for (unsigned int i = 0; i < zones.size(); i++) { CreateZoneFile(zones[i], vdns, !BindUtil::IsReverseZone(zones[i])); } } void NamedConfig::RemoveZoneFiles(const VirtualDnsConfig *vdns, ZoneList &zones) { for (unsigned int i = 0; i < zones.size(); i++) { RemoveZoneFile(vdns, zones[i]); } } void NamedConfig::RemoveZoneFile(const VirtualDnsConfig *vdns, string &zone) { string zfile_name = GetZoneFilePath(vdns->GetViewName(), zone); remove(zfile_name.c_str()); zfile_name.append(".jnl"); remove(zfile_name.c_str()); } string NamedConfig::GetZoneFileName(const string &vdns, const string &name) { if (name.size() && name.at(name.size() - 1) == '.') return (vdns + "." + name + NamedZoneFileSuffix); else return (vdns + "." + name + "." + NamedZoneFileSuffix); } string NamedConfig::GetZoneFilePath(const string &vdns, const string &name) { return (named_config_dir_ + GetZoneFileName(vdns, name)); } string NamedConfig::GetPidFilePath() { return (named_config_dir_ + pid_file_name); } string NamedConfig::GetSessionKeyFilePath() { return (named_config_dir_ + sessionkey_file_name); } string NamedConfig::GetZoneNSName(const string domain_name) { return (NamedZoneNSPrefix + "." + domain_name); } string NamedConfig::GetZoneMXName(const string domain_name) { return (NamedZoneMXPrefix + "." + domain_name); } void NamedConfig::CreateZoneFile(std::string &zone_name, const VirtualDnsConfig *vdns, bool ns) { ofstream zfile; string ns_name; string zone_filename = GetZoneFilePath(vdns->GetViewName(), zone_name); zfile.open(zone_filename.c_str()); zfile << "$ORIGIN ." << endl; if (vdns->GetTtl() > 0) { zfile << "$TTL " << vdns->GetTtl() << endl; } else { zfile << "$TTL " << Defaults::GlobalTTL << endl; } zfile << left << setw(NameWidth) << zone_name << " IN SOA " << GetZoneNSName(vdns->GetDomainName()) << " " << GetZoneMXName(vdns->GetDomainName()) << " (" << endl; zfile << setw(NameWidth + 8) << "" << setw(NumberWidth) << Defaults::Serial << endl; zfile << setw(NameWidth + 8) << "" << setw(NumberWidth) << Defaults::Refresh << endl; zfile << setw(NameWidth + 8) << "" << setw(NumberWidth) << Defaults::Retry << endl; zfile << setw(NameWidth + 8) << "" << setw(NumberWidth) << Defaults::Expire << endl; if (vdns->GetNegativeCacheTtl() > 0 ) { zfile << setw(NameWidth + 8) << "" << setw(NumberWidth) << vdns->GetNegativeCacheTtl() << endl; } else { zfile << setw(NameWidth + 8) << "" << setw(NumberWidth) << Defaults::Minimum << endl; } zfile << setw(NameWidth + 8) << "" << ")" << endl; /* NS records are mandatory in zone file. They are required for the following reasons 1. Name servers returns NS RR in responses to queries, in the authority section of the DNS message. 2. Name servers use the NS records to determine where to send NOTIFY messages. */ zfile << setw(NameWidth + 4) << "" << setw(TypeWidth) << " NS " << setw(NameWidth) << GetZoneNSName(vdns->GetDomainName()) << endl; zfile << "$ORIGIN " << zone_name << endl; //Write the NS record if (ns) zfile << setw(NameWidth) << NamedZoneNSPrefix << " IN A " << Dns::GetSelfIp() << endl; zfile.flush(); zfile.close(); } // Create a list of zones for the virtual DNS void NamedConfig::MakeZoneList(const VirtualDnsConfig *vdns_config, ZoneList &zones) { // always take domain name in lower case, to avoid differences due to case std::string dns_domain = boost::to_lower_copy(vdns_config->GetDomainName()); if (dns_domain.empty()) { return; } // Forward Zone zones.push_back(dns_domain); // Reverse zones MakeReverseZoneList(vdns_config, zones); } void NamedConfig::MakeReverseZoneList(const VirtualDnsConfig *vdns_config, ZoneList &zones) { const VirtualDnsConfig::IpamList &ipams = vdns_config->GetIpamList(); for (VirtualDnsConfig::IpamList::const_iterator ipam_it = ipams.begin(); ipam_it != ipams.end(); ++ipam_it) { if ((*ipam_it)->IsDeleted() || !(*ipam_it)->IsValid()) { continue; } const IpamConfig::VnniList &vnni_list = (*ipam_it)->GetVnniList(); for (IpamConfig::VnniList::iterator vnni_it = vnni_list.begin(); vnni_it != vnni_list.end(); ++vnni_it) { if ((*vnni_it)->IsDeleted() || !(*vnni_it)->IsValid()) { continue; } const Subnets &subnets = (*vnni_it)->GetSubnets(); for (unsigned int i = 0; i < subnets.size(); ++i) { const Subnet &subnet = subnets[i]; if (subnet.IsDeleted()) continue; subnet.GetReverseZones(zones); } } } // If same subnet is used in different VNs, remove duplicates std::sort(zones.begin(), zones.end()); ZoneList::iterator it = std::unique(zones.begin(), zones.end()); zones.resize(std::distance(zones.begin(), it)); } void NamedConfig::GetDefaultForwarders() { default_forwarders_.clear(); std::ifstream fd; fd.open(GetResolveFile().c_str()); if (!fd.is_open()) { return; } std::string line; while (getline(fd, line)) { std::size_t pos = line.find_first_of("#"); std::stringstream ss(line.substr(0, pos)); std::string key; ss >> key; if (key == "nameserver") { std::string ip; ss >> ip; boost::system::error_code ec; boost::asio::ip::address_v4::from_string(ip, ec); if (!ec.value()) { default_forwarders_ += ip + "; "; } } } fd.close(); } /////////////////////////////////////////////////////////////////////////////// BindStatus::BindStatus(BindEventHandler handler) : named_pid_(-1), handler_(handler), change_timeout_(true) { status_timer_ = TimerManager::CreateTimer( *Dns::GetEventManager()->io_service(), "BindStatusTimer", TaskScheduler::GetInstance()->GetTaskId("dns::BindStatus"), 0); status_timer_->Start(kInitTimeout, boost::bind(&BindStatus::CheckBindStatus, this)); } BindStatus::~BindStatus() { status_timer_->Cancel(); TimerManager::DeleteTimer(status_timer_); } // Check if a given pid belongs to contrail-named bool BindStatus::IsBindPid(uint32_t pid) { bool ret = false; std::stringstream str; str << "/proc/" << pid << "/cmdline"; ifstream ifile(str.str().c_str()); if (ifile.is_open()) { if (ifile.good()) { std::string cmdline; cmdline.assign((istreambuf_iterator<char>(ifile)), istreambuf_iterator<char>()); istringstream cmdstream(cmdline); if (cmdstream.str().find("/usr/bin/contrail-named") != std::string::npos) { ret = true; } } ifile.close(); } return ret; } bool BindStatus::CheckBindStatus() { uint32_t new_pid = -1; NamedConfig *ncfg = NamedConfig::GetNamedConfigObject(); if (ncfg) { std::ifstream pid_file(ncfg->GetPidFilePath().c_str()); if (pid_file.is_open()) { if (pid_file.good()) { pid_file >> new_pid; } pid_file.close(); } } if (new_pid == (uint32_t) -1) { handler_(Down); } else if (!IsBindPid(new_pid)) { if (named_pid_ != (uint32_t) -1) { named_pid_ = -1; handler_(Down); } } else { if (named_pid_ != new_pid) { named_pid_ = new_pid; handler_(Up); } } if (change_timeout_) { change_timeout_ = false; status_timer_->Reschedule(kBindStatusTimeout); } return true; }
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chromeos/network/onc/onc_merger.h" #include <set> #include <string> #include <utility> #include <vector> #include "base/logging.h" #include "base/macros.h" #include "base/memory/ptr_util.h" #include "base/values.h" #include "chromeos/network/onc/onc_signature.h" #include "components/onc/onc_constants.h" namespace chromeos { namespace onc { namespace { typedef std::unique_ptr<base::DictionaryValue> DictionaryPtr; // Returns true if the field is the identifier of a configuration, i.e. the GUID // of a network or a certificate. bool IsIdentifierField(const OncValueSignature& value_signature, const std::string& field_name) { if (&value_signature == &kNetworkConfigurationSignature) return field_name == ::onc::network_config::kGUID; if (&value_signature == &kCertificateSignature) return field_name == ::onc::certificate::kGUID; return false; } // Identifier fields and other read-only fields (specifically Type) are // handled specially during merging because they are always identical for the // various setting sources. bool IsReadOnlyField(const OncValueSignature& value_signature, const std::string& field_name) { if (IsIdentifierField(value_signature, field_name)) return true; if (&value_signature == &kNetworkConfigurationSignature) return field_name == ::onc::network_config::kType; return false; } // Inserts |true| at every field name in |result| that is recommended in // |policy|. void MarkRecommendedFieldnames(const base::DictionaryValue& policy, base::DictionaryValue* result) { const base::ListValue* recommended_value = NULL; if (!policy.GetListWithoutPathExpansion(::onc::kRecommended, &recommended_value)) return; for (base::ListValue::const_iterator it = recommended_value->begin(); it != recommended_value->end(); ++it) { std::string entry; if ((*it)->GetAsString(&entry)) result->SetBooleanWithoutPathExpansion(entry, true); } } // Returns a dictionary which contains |true| at each path that is editable by // the user. No other fields are set. DictionaryPtr GetEditableFlags(const base::DictionaryValue& policy) { DictionaryPtr result_editable(new base::DictionaryValue); MarkRecommendedFieldnames(policy, result_editable.get()); // Recurse into nested dictionaries. for (base::DictionaryValue::Iterator it(policy); !it.IsAtEnd(); it.Advance()) { const base::DictionaryValue* child_policy = NULL; if (it.key() == ::onc::kRecommended || !it.value().GetAsDictionary(&child_policy)) { continue; } result_editable->SetWithoutPathExpansion( it.key(), GetEditableFlags(*child_policy).release()); } return result_editable; } // This is the base class for merging a list of DictionaryValues in // parallel. See MergeDictionaries function. class MergeListOfDictionaries { public: typedef std::vector<const base::DictionaryValue*> DictPtrs; MergeListOfDictionaries() { } virtual ~MergeListOfDictionaries() { } // For each path in any of the dictionaries |dicts|, the function // MergeListOfValues is called with the list of values that are located at // that path in each of the dictionaries. This function returns a new // dictionary containing all results of MergeListOfValues at the respective // paths. The resulting dictionary doesn't contain empty dictionaries. DictionaryPtr MergeDictionaries(const DictPtrs &dicts) { DictionaryPtr result(new base::DictionaryValue); std::set<std::string> visited; for (DictPtrs::const_iterator it_outer = dicts.begin(); it_outer != dicts.end(); ++it_outer) { if (!*it_outer) continue; for (base::DictionaryValue::Iterator field(**it_outer); !field.IsAtEnd(); field.Advance()) { const std::string& key = field.key(); if (key == ::onc::kRecommended || !visited.insert(key).second) continue; std::unique_ptr<base::Value> merged_value; if (field.value().IsType(base::Value::TYPE_DICTIONARY)) { DictPtrs nested_dicts; for (DictPtrs::const_iterator it_inner = dicts.begin(); it_inner != dicts.end(); ++it_inner) { const base::DictionaryValue* nested_dict = NULL; if (*it_inner) (*it_inner)->GetDictionaryWithoutPathExpansion(key, &nested_dict); nested_dicts.push_back(nested_dict); } DictionaryPtr merged_dict(MergeNestedDictionaries(key, nested_dicts)); if (!merged_dict->empty()) merged_value = std::move(merged_dict); } else { std::vector<const base::Value*> values; for (DictPtrs::const_iterator it_inner = dicts.begin(); it_inner != dicts.end(); ++it_inner) { const base::Value* value = NULL; if (*it_inner) (*it_inner)->GetWithoutPathExpansion(key, &value); values.push_back(value); } merged_value = MergeListOfValues(key, values); } if (merged_value) result->SetWithoutPathExpansion(key, merged_value.release()); } } return result; } protected: // This function is called by MergeDictionaries for each list of values that // are located at the same path in each of the dictionaries. The order of the // values is the same as of the given dictionaries |dicts|. If a dictionary // doesn't contain a path then it's value is NULL. virtual std::unique_ptr<base::Value> MergeListOfValues( const std::string& key, const std::vector<const base::Value*>& values) = 0; virtual DictionaryPtr MergeNestedDictionaries(const std::string& key, const DictPtrs &dicts) { return MergeDictionaries(dicts); } private: DISALLOW_COPY_AND_ASSIGN(MergeListOfDictionaries); }; // This is the base class for merging policies and user settings. class MergeSettingsAndPolicies : public MergeListOfDictionaries { public: struct ValueParams { const base::Value* user_policy; const base::Value* device_policy; const base::Value* user_setting; const base::Value* shared_setting; const base::Value* active_setting; bool user_editable; bool device_editable; }; MergeSettingsAndPolicies() {} // Merge the provided dictionaries. For each path in any of the dictionaries, // MergeValues is called. Its results are collected in a new dictionary which // is then returned. The resulting dictionary never contains empty // dictionaries. DictionaryPtr MergeDictionaries( const base::DictionaryValue* user_policy, const base::DictionaryValue* device_policy, const base::DictionaryValue* user_settings, const base::DictionaryValue* shared_settings, const base::DictionaryValue* active_settings) { hasUserPolicy_ = (user_policy != NULL); hasDevicePolicy_ = (device_policy != NULL); DictionaryPtr user_editable; if (user_policy != NULL) user_editable = GetEditableFlags(*user_policy); DictionaryPtr device_editable; if (device_policy != NULL) device_editable = GetEditableFlags(*device_policy); std::vector<const base::DictionaryValue*> dicts(kLastIndex, NULL); dicts[kUserPolicyIndex] = user_policy; dicts[kDevicePolicyIndex] = device_policy; dicts[kUserSettingsIndex] = user_settings; dicts[kSharedSettingsIndex] = shared_settings; dicts[kActiveSettingsIndex] = active_settings; dicts[kUserEditableIndex] = user_editable.get(); dicts[kDeviceEditableIndex] = device_editable.get(); return MergeListOfDictionaries::MergeDictionaries(dicts); } protected: // This function is called by MergeDictionaries for each list of values that // are located at the same path in each of the dictionaries. Implementations // can use the Has*Policy functions. virtual std::unique_ptr<base::Value> MergeValues( const std::string& key, const ValueParams& values) = 0; // Whether a user policy was provided. bool HasUserPolicy() { return hasUserPolicy_; } // Whether a device policy was provided. bool HasDevicePolicy() { return hasDevicePolicy_; } // MergeListOfDictionaries override. std::unique_ptr<base::Value> MergeListOfValues( const std::string& key, const std::vector<const base::Value*>& values) override { bool user_editable = !HasUserPolicy(); if (values[kUserEditableIndex]) values[kUserEditableIndex]->GetAsBoolean(&user_editable); bool device_editable = !HasDevicePolicy(); if (values[kDeviceEditableIndex]) values[kDeviceEditableIndex]->GetAsBoolean(&device_editable); ValueParams params; params.user_policy = values[kUserPolicyIndex]; params.device_policy = values[kDevicePolicyIndex]; params.user_setting = values[kUserSettingsIndex]; params.shared_setting = values[kSharedSettingsIndex]; params.active_setting = values[kActiveSettingsIndex]; params.user_editable = user_editable; params.device_editable = device_editable; return MergeValues(key, params); } private: enum { kUserPolicyIndex, kDevicePolicyIndex, kUserSettingsIndex, kSharedSettingsIndex, kActiveSettingsIndex, kUserEditableIndex, kDeviceEditableIndex, kLastIndex }; bool hasUserPolicy_, hasDevicePolicy_; DISALLOW_COPY_AND_ASSIGN(MergeSettingsAndPolicies); }; // Call MergeDictionaries to merge policies and settings to the effective // values. This ignores the active settings of Shill. See the description of // MergeSettingsAndPoliciesToEffective. class MergeToEffective : public MergeSettingsAndPolicies { public: MergeToEffective() {} protected: // Merges |values| to the effective value (Mandatory policy overwrites user // settings overwrites shared settings overwrites recommended policy). |which| // is set to the respective onc::kAugmentation* constant that indicates which // source of settings is effective. Note that this function may return a NULL // pointer and set |which| to ::onc::kAugmentationUserPolicy, which means that // the // user policy didn't set a value but also didn't recommend it, thus enforcing // the empty value. std::unique_ptr<base::Value> MergeValues(const std::string& key, const ValueParams& values, std::string* which) { const base::Value* result = NULL; which->clear(); if (!values.user_editable) { result = values.user_policy; *which = ::onc::kAugmentationUserPolicy; } else if (!values.device_editable) { result = values.device_policy; *which = ::onc::kAugmentationDevicePolicy; } else if (values.user_setting) { result = values.user_setting; *which = ::onc::kAugmentationUserSetting; } else if (values.shared_setting) { result = values.shared_setting; *which = ::onc::kAugmentationSharedSetting; } else if (values.user_policy) { result = values.user_policy; *which = ::onc::kAugmentationUserPolicy; } else if (values.device_policy) { result = values.device_policy; *which = ::onc::kAugmentationDevicePolicy; } else { // Can be reached if the current field is recommended, but none of the // dictionaries contained a value for it. } if (result) return base::WrapUnique(result->DeepCopy()); return std::unique_ptr<base::Value>(); } // MergeSettingsAndPolicies override. std::unique_ptr<base::Value> MergeValues(const std::string& key, const ValueParams& values) override { std::string which; return MergeValues(key, values, &which); } private: DISALLOW_COPY_AND_ASSIGN(MergeToEffective); }; namespace { // Returns true if all not-null values in |values| are equal to |value|. bool AllPresentValuesEqual(const MergeSettingsAndPolicies::ValueParams& values, const base::Value& value) { if (values.user_policy && !value.Equals(values.user_policy)) return false; if (values.device_policy && !value.Equals(values.device_policy)) return false; if (values.user_setting && !value.Equals(values.user_setting)) return false; if (values.shared_setting && !value.Equals(values.shared_setting)) return false; if (values.active_setting && !value.Equals(values.active_setting)) return false; return true; } } // namespace // Call MergeDictionaries to merge policies and settings to an augmented // dictionary which contains a dictionary for each value in the original // dictionaries. See the description of MergeSettingsAndPoliciesToAugmented. class MergeToAugmented : public MergeToEffective { public: MergeToAugmented() {} DictionaryPtr MergeDictionaries( const OncValueSignature& signature, const base::DictionaryValue* user_policy, const base::DictionaryValue* device_policy, const base::DictionaryValue* user_settings, const base::DictionaryValue* shared_settings, const base::DictionaryValue* active_settings) { signature_ = &signature; return MergeToEffective::MergeDictionaries(user_policy, device_policy, user_settings, shared_settings, active_settings); } protected: // MergeSettingsAndPolicies override. std::unique_ptr<base::Value> MergeValues(const std::string& key, const ValueParams& values) override { const OncFieldSignature* field = NULL; if (signature_) field = GetFieldSignature(*signature_, key); if (!field) { // This field is not part of the provided ONCSignature, thus it cannot be // controlled by policy. Return the plain active value instead of an // augmented dictionary. if (values.active_setting) return base::WrapUnique(values.active_setting->DeepCopy()); return nullptr; } // This field is part of the provided ONCSignature, thus it can be // controlled by policy. std::string which_effective; std::unique_ptr<base::Value> effective_value = MergeToEffective::MergeValues(key, values, &which_effective); if (IsReadOnlyField(*signature_, key)) { // Don't augment read-only fields (GUID and Type). if (effective_value) { // DCHECK that all provided fields are identical. DCHECK(AllPresentValuesEqual(values, *effective_value)) << "Values do not match: " << key << " Effective: " << *effective_value; // Return the un-augmented field. return effective_value; } if (values.active_setting) { // Unmanaged networks have assigned (active) values. return base::WrapUnique(values.active_setting->DeepCopy()); } LOG(ERROR) << "Field has no effective value: " << key; return nullptr; } std::unique_ptr<base::DictionaryValue> augmented_value( new base::DictionaryValue); if (values.active_setting) { augmented_value->SetWithoutPathExpansion( ::onc::kAugmentationActiveSetting, values.active_setting->DeepCopy()); } if (!which_effective.empty()) { augmented_value->SetStringWithoutPathExpansion( ::onc::kAugmentationEffectiveSetting, which_effective); } // Prevent credentials from being forwarded in cleartext to // UI. User/shared credentials are not stored separately, so they cannot // leak here. bool is_credential = onc::FieldIsCredential(*signature_, key); if (!is_credential) { if (values.user_policy) { augmented_value->SetWithoutPathExpansion( ::onc::kAugmentationUserPolicy, values.user_policy->DeepCopy()); } if (values.device_policy) { augmented_value->SetWithoutPathExpansion( ::onc::kAugmentationDevicePolicy, values.device_policy->DeepCopy()); } } if (values.user_setting) { augmented_value->SetWithoutPathExpansion( ::onc::kAugmentationUserSetting, values.user_setting->DeepCopy()); } if (values.shared_setting) { augmented_value->SetWithoutPathExpansion( ::onc::kAugmentationSharedSetting, values.shared_setting->DeepCopy()); } if (HasUserPolicy() && values.user_editable) { augmented_value->SetBooleanWithoutPathExpansion( ::onc::kAugmentationUserEditable, true); } if (HasDevicePolicy() && values.device_editable) { augmented_value->SetBooleanWithoutPathExpansion( ::onc::kAugmentationDeviceEditable, true); } if (augmented_value->empty()) augmented_value.reset(); return std::move(augmented_value); } // MergeListOfDictionaries override. DictionaryPtr MergeNestedDictionaries(const std::string& key, const DictPtrs& dicts) override { DictionaryPtr result; if (signature_) { const OncValueSignature* enclosing_signature = signature_; signature_ = NULL; const OncFieldSignature* field = GetFieldSignature(*enclosing_signature, key); if (field) signature_ = field->value_signature; result = MergeToEffective::MergeNestedDictionaries(key, dicts); signature_ = enclosing_signature; } else { result = MergeToEffective::MergeNestedDictionaries(key, dicts); } return result; } private: const OncValueSignature* signature_; DISALLOW_COPY_AND_ASSIGN(MergeToAugmented); }; } // namespace DictionaryPtr MergeSettingsAndPoliciesToEffective( const base::DictionaryValue* user_policy, const base::DictionaryValue* device_policy, const base::DictionaryValue* user_settings, const base::DictionaryValue* shared_settings) { MergeToEffective merger; return merger.MergeDictionaries( user_policy, device_policy, user_settings, shared_settings, NULL); } DictionaryPtr MergeSettingsAndPoliciesToAugmented( const OncValueSignature& signature, const base::DictionaryValue* user_policy, const base::DictionaryValue* device_policy, const base::DictionaryValue* user_settings, const base::DictionaryValue* shared_settings, const base::DictionaryValue* active_settings) { MergeToAugmented merger; return merger.MergeDictionaries( signature, user_policy, device_policy, user_settings, shared_settings, active_settings); } } // namespace onc } // namespace chromeos
// <Snippet1> #using <system.dll> #using <system.messaging.dll> using namespace System; using namespace System::Messaging; ref class MyNewQueue { public: //************************************************* // Retrieves the default properties for a Message. //************************************************* void RetrieveDefaultProperties() { // Connect to a message queue. MessageQueue^ myQueue = gcnew MessageQueue( ".\\myQueue" ); // Specify to retrieve the default properties only. myQueue->MessageReadPropertyFilter->SetDefaults(); // Set the formatter for the Message. array<Type^>^p = gcnew array<Type^>(1); p[ 0 ] = String::typeid; myQueue->Formatter = gcnew XmlMessageFormatter( p ); // Receive the first message in the queue. Message^ myMessage = myQueue->Receive(); // Display selected properties. Console::WriteLine( "Label: {0}", myMessage->Label ); Console::WriteLine( "Body: {0}", static_cast<String^>(myMessage->Body) ); return; } //************************************************* // Retrieves all properties for a Message. //************************************************* void RetrieveAllProperties() { // Connect to a message queue. MessageQueue^ myQueue = gcnew MessageQueue( ".\\myQueue" ); // Specify to retrieve all properties. myQueue->MessageReadPropertyFilter->SetAll(); // Set the formatter for the Message. array<Type^>^p = gcnew array<Type^>(1); p[ 0 ] = String::typeid; myQueue->Formatter = gcnew XmlMessageFormatter( p ); // Receive the first message in the queue. Message^ myMessage = myQueue->Receive(); // Display selected properties. Console::WriteLine( "Encryption algorithm: {0}", myMessage->EncryptionAlgorithm.ToString() ); Console::WriteLine( "Body: {0}", myMessage->Body ); return; } //************************************************* // Retrieves application-specific properties for a // Message. //************************************************* void RetrieveSelectedProperties() { // Connect to a message queue. MessageQueue^ myQueue = gcnew MessageQueue( ".\\myQueue" ); // Specify to retrieve selected properties. MessagePropertyFilter^ myFilter = gcnew MessagePropertyFilter; myFilter->ClearAll(); // The following list is a random subset of available properties. myFilter->Body = true; myFilter->Label = true; myFilter->MessageType = true; myFilter->Priority = true; myQueue->MessageReadPropertyFilter = myFilter; // Set the formatter for the Message. array<Type^>^p = gcnew array<Type^>(1); p[ 0 ] = String::typeid; myQueue->Formatter = gcnew XmlMessageFormatter( p ); // Receive the first message in the queue. Message^ myMessage = myQueue->Receive(); // Display selected properties. Console::WriteLine( "Message type: {0}", myMessage->MessageType.ToString() ); Console::WriteLine( "Priority: {0}", myMessage->Priority.ToString() ); return; } }; //************************************************* // Provides an entry point into the application. // // This example retrieves specific groups of Message // properties. //************************************************* int main() { // Create a new instance of the class. MyNewQueue^ myNewQueue = gcnew MyNewQueue; // Retrieve specific sets of Message properties. myNewQueue->RetrieveDefaultProperties(); myNewQueue->RetrieveAllProperties(); myNewQueue->RetrieveSelectedProperties(); return 0; } // </Snippet1>
/*========================================================================= Program: Visualization Toolkit Module: TestTriangle.cxx Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen All rights reserved. See Copyright.txt or http://www.kitware.com/Copyright.htm for details. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the above copyright notice for more information. =========================================================================*/ // .NAME // .SECTION Description // this program tests the Triangle #include "vtkNew.h" #include "vtkPoints.h" #include "vtkSmartPointer.h" #include "vtkTriangle.h" #include <limits> template <class A> bool fuzzyCompare(A a, A b) { return fabs(a - b) < std::numeric_limits<A>::epsilon(); } int TestTriangle(int, char*[]) { // three vertices making a triangle double pnt0[3] = { 0, 2, 0 }; double pnt1[3] = { 4, 2, 0 }; double pnt2[3] = { 0, 6, 0 }; // points to be tested against the triangle double pnts[][3] = { // squared error tolerance // = 0.0001 * 0.0001 = 0.00000001 // outside the triangle { 0, 1.999, 0 }, { -0.001, 2, 0 }, { 4, 1.999, 0 }, { 4, 2.001, 0 }, { 4.001, 2, 0 }, { 0, 6.001, 0 }, { 0.001, 6, 0 }, { -0.001, 6, 0 }, { -0.001, 2.001, 0 }, { -0.001, 1.999, 0 }, { 0.001, 1.999, 0 }, { 4.001, 2.001, 0 }, { 4.001, 1.999, 0 }, { 3.999, 1.999, 0 }, { -0.001, 5.999, 0 }, { -0.001, 6.001, 0 }, { 0.001, 6.001, 0 }, // inside the triangle { 0, 2.001, 0 }, { 0.001, 2, 0 }, { 0.001, 2.001, 0 }, { 3.999, 2.001, 0 }, { 3.999, 2, 0 }, { 0, 5.999, 0 }, { 0.001, 5.999, 0 }, { 0, 2, 0 }, { 4, 2, 0 }, { 0, 6, 0 }, { 2, 2, 0 }, { 2, 4, 0 }, { 0, 4, 0 }, { 1.333, 3.333, 0 }, }; int inside; for (int i = 0; i < 31; i++) { inside = vtkTriangle::PointInTriangle(pnts[i], pnt0, pnt1, pnt2, 0.00000001); if (inside && i < 17) { cerr << "ERROR: point #" << i << ", an outside-point, considered to be inside the triangle!!!" << endl; cerr << "Squared error tolerance: 0.00000001" << endl; return EXIT_FAILURE; } else if (!inside && i > 16) { cerr << "ERROR: point #" << i << ", an inside-point, considered to be outside the triangle!!!" << endl; cerr << "Squared error tolerance: 0.00000001" << endl; return EXIT_FAILURE; } } cout << "Passed: 17 points outside and 14 points inside the triangle." << endl; vtkSmartPointer<vtkTriangle> triangle = vtkSmartPointer<vtkTriangle>::New(); triangle->GetPoints()->SetPoint(0, 0.0, 0.0, 0.0); triangle->GetPoints()->SetPoint(1, 1.0, 0.0, 0.0); triangle->GetPoints()->SetPoint(2, 0.0, 1.0, 0.0); double area = triangle->ComputeArea(); if (!fuzzyCompare(area, 0.5)) { cerr << "ERROR: triangle area is " << area << ", should be 0.5" << endl; return EXIT_FAILURE; } // Testing degenerated triangle double pntDeg0[3] = { 0, 0, -10 }; double pntDeg1[3] = { 0, 0, 0 }; double pntDeg2[3] = { 0, 0, 10 }; vtkNew<vtkTriangle> triangleDeg; triangleDeg->GetPoints()->SetPoint(0, pntDeg0); triangleDeg->GetPoints()->SetPoint(1, pntDeg1); triangleDeg->GetPoints()->SetPoint(2, pntDeg2); double p1[3] = { 0, 1, 1 }; double p2[3] = { 0, -1, 1 }; double t; double x[3]; double pcoords[3]; int subId; double dEpsilon = std::numeric_limits<double>::epsilon(); if (triangleDeg->IntersectWithLine(p1, p2, dEpsilon, t, x, pcoords, subId) != 1 || x[0] != 0 || x[1] != 0 || x[2] != 1 || t != 0.5 || pcoords[0] != 1.1 || pcoords[1] != 0.55 || pcoords[2] != 0) { cerr << "Error while intersecting degenerated triangle" << endl; return EXIT_FAILURE; } double p1b[3] = { 0, 1, 10.001 }; double p2b[3] = { 0, -1, 10.001 }; if (triangleDeg->IntersectWithLine(p1b, p2b, dEpsilon, t, x, pcoords, subId) != 0) { cerr << "Error while intersecting degenerated triangle" << endl; return EXIT_FAILURE; } return EXIT_SUCCESS; }
#include "atk/toolkit.h" #include "atkui/framework.h" #include "atkui/skeleton_drawer.h" #include <algorithm> #include <cmath> #include <string> using namespace glm; using namespace atk; using namespace atkui; class AReorient : public atkui::Framework { public: AReorient() : atkui::Framework(atkui::Perspective) {} virtual ~AReorient() {} void setup() { BVHReader reader; reader.load("../motions/Beta/walking.bvh", _skeleton, _motion); _heading = 0; _offset = vec3(0); _offset[1] = _motion.getKey(0).rootPos[1]; _time = 0; _reoriented = reorient(_motion, _offset, _heading); } Motion reorient(const Motion& motion, const vec3& pos, float heading) { Motion result; result.setFramerate(motion.getFramerate()); // compute transformations quat desiredRot = glm::angleAxis(heading, vec3(0,1,0)); Transform desired = Transform::Rot(desiredRot); desired.setT(pos); Transform I = Transform::Translate(-motion.getKey(0).rootPos); for (int i = 0; i < motion.getNumKeys(); i++) { Pose pose = motion.getKey(i); vec3 d = pose.rootPos; quat rot = pose.jointRots[0]; Transform origin = Transform(); origin.setR(rot); origin.setT(d); Transform move = desired * I * origin; pose.jointRots[0] = move.r(); pose.rootPos = move.t(); result.appendKey(pose); } return result; } void update() { _time += dt() * 0.5; _reoriented.update(_skeleton, _time); } void scene() { update(); SkeletonDrawer drawer; drawer.draw(_skeleton, *this); } void keyUp(int key, int mods) { if (key == GLFW_KEY_LEFT) { _heading += M_PI/8; _reoriented = reorient(_motion, _offset, _heading); _time = 0; } else if (key == GLFW_KEY_RIGHT) { _heading -= M_PI/8; _reoriented = reorient(_motion, _offset, _heading); _time = 0; } if (key == 'W') { _offset[2] += 25; _reoriented = reorient(_motion, _offset, _heading); std::cout << _offset << std::endl; _time = 0; } else if (key == 'S') { _offset[2] -= 25; _reoriented = reorient(_motion, _offset, _heading); _time = 0; std::cout << _offset << std::endl; } else if (key == 'A') { _offset[0] += 25; _reoriented = reorient(_motion, _offset, _heading); _time = 0; std::cout << _offset << std::endl; } else if (key == 'D') { _offset[0] -= 25; _reoriented = reorient(_motion, _offset, _heading); _time = 0; std::cout << _offset << std::endl; } } Skeleton _skeleton; Motion _motion; Motion _reoriented; vec3 _offset; float _heading; float _time; }; int main(int argc, char** argv) { AReorient viewer; viewer.run(); return 0; }
#include "ntp1sendtxdata.h" #include "ntp1sendtxdata.h" #include "init.h" #include "util.h" #include "wallet.h" #include "json/json_spirit.h" #include <algorithm> #include <random> const std::string NTP1SendTxData::NEBL_TOKEN_ID = "NEBL"; // token id of new non-existent token (placeholder) const std::string NTP1SendTxData::TO_ISSUE_TOKEN_ID = "NEW"; std::vector<NTP1OutPoint> NTP1SendTxData::getUsedInputs() const { if (!ready) throw std::runtime_error("NTP1SendTxData not ready; cannot get used inputs"); return tokenSourceInputs; } std::map<std::string, NTP1Int> NTP1SendTxData::getChangeTokens() const { if (!ready) throw std::runtime_error("NTP1SendTxData not ready; cannot get change amounts"); return totalChangeTokens; } NTP1SendTxData::NTP1SendTxData() { /*fee = 0;*/ } std::map<std::string, NTP1Int> CalculateRequiredTokenAmounts(const std::vector<NTP1SendTokensOneRecipientData>& recipients) { std::map<std::string, NTP1Int> required_amounts; for (const auto& r : recipients) { if (r.tokenId == NTP1SendTxData::TO_ISSUE_TOKEN_ID) { // there's no required NTP1 token amount for issuance, unlike transfer, because we're minting continue; } if (required_amounts.find(r.tokenId) == required_amounts.end()) { required_amounts[r.tokenId] = 0; } required_amounts[r.tokenId] += r.amount; } return required_amounts; } void NTP1SendTxData::verifyNTP1IssuanceRecipientsValidity( const std::vector<NTP1SendTokensOneRecipientData>& recipients) { int issuanceCount = 0; for (const NTP1SendTokensOneRecipientData& r : recipients) { if (r.tokenId == NTP1SendTxData::TO_ISSUE_TOKEN_ID) { issuanceCount++; } } if (issuanceCount > 1) { throw std::runtime_error("Only one recipient of an issuance transaction can be present."); } if (issuanceCount > 0 && !tokenToIssueData) { throw std::runtime_error("While a recipient was spicified to receive newly minted tokens, no " "issuance data was speicified."); } if (issuanceCount == 0 && tokenToIssueData) { throw std::runtime_error("While issuance data was provided, no recipient for issued/minted " "tokens was specified in the list of recipients."); } } // get available balances, either from inputs (if provided) or from the wallet std::map<std::string, NTP1Int> GetAvailableTokenBalances(NTP1WalletPtr wallet, const std::vector<NTP1OutPoint>& inputs, bool useBalancesFromWallet) { std::map<std::string, NTP1Int> balancesMap; if (useBalancesFromWallet) { // get token balances from the wallet balancesMap = wallet->getBalancesMap(); } else { // loop over all inputs and collect the total amount of tokens available for (const auto& input : inputs) { const std::unordered_map<NTP1OutPoint, NTP1Transaction> availableOutputsMap = wallet->getWalletOutputsWithTokens(); auto it = availableOutputsMap.find(input); if (it != availableOutputsMap.end()) { const NTP1Transaction& ntp1tx = it->second; if (input.getIndex() + 1 > ntp1tx.getTxOutCount()) { throw std::runtime_error("An output you have of transaction " + ntp1tx.getTxHash().ToString() + " claims that you have an invalid output number: " + ::ToString(input.getIndex())); } // loop over tokens for (int i = 0; i < (int)ntp1tx.getTxOut(input.getIndex()).tokenCount(); i++) { const NTP1TokenTxData& tokenT = ntp1tx.getTxOut(input.getIndex()).getToken(i); const auto balanceIt = balancesMap.find(tokenT.getTokenId()); if (balanceIt == balancesMap.end()) { balancesMap[tokenT.getTokenId()] = 0; } balancesMap[tokenT.getTokenId()] += tokenT.getAmount(); } } } } return balancesMap; } int64_t CalculateTotalNeblsInInputs(std::vector<NTP1OutPoint> inputs) { { std::unordered_set<NTP1OutPoint> inputsSet(inputs.begin(), inputs.end()); inputs = std::vector<NTP1OutPoint>(inputsSet.begin(), inputsSet.end()); } int64_t currentTotalNeblsInSelectedInputs = 0; for (const auto& input : inputs) { auto it = pwalletMain->mapWallet.find(input.getHash()); if (it == pwalletMain->mapWallet.end()) { throw std::runtime_error("The transaction: " + input.getHash().ToString() + " was not found in the wallet."); } const CTransaction& tx = it->second; if (input.getIndex() + 1 > tx.vout.size()) { throw std::runtime_error("An invalid output index: " + ::ToString(input.getIndex()) + " of transaction " + input.getHash().ToString() + " was used."); } currentTotalNeblsInSelectedInputs += static_cast<int64_t>(tx.vout.at(input.getIndex()).nValue); } return currentTotalNeblsInSelectedInputs; } void NTP1SendTxData::selectNTP1Tokens(NTP1WalletPtr wallet, const std::vector<COutPoint>& inputs, const std::vector<NTP1SendTokensOneRecipientData>& recipients, bool addMoreInputsIfRequired) { std::vector<NTP1OutPoint> ntp1OutPoints; std::transform(inputs.begin(), inputs.end(), std::back_inserter(ntp1OutPoints), [](const COutPoint& o) { return NTP1OutPoint(o.hash, o.n); }); selectNTP1Tokens(wallet, ntp1OutPoints, recipients, addMoreInputsIfRequired); } void NTP1SendTxData::issueNTP1Token(const IssueTokenData& data) { if (ready) { throw std::runtime_error("You should register issuing a token before processing NTP1 tokens, in " "order for the new tokens to be taken into account"); } tokenToIssueData = data; } boost::optional<IssueTokenData> NTP1SendTxData::getNTP1TokenIssuanceData() const { return tokenToIssueData; } bool NTP1SendTxData::getWhetherIssuanceExists() const { return tokenToIssueData.is_initialized(); } void NTP1SendTxData::selectNTP1Tokens(NTP1WalletPtr wallet, std::vector<NTP1OutPoint> inputs, std::vector<NTP1SendTokensOneRecipientData> recipients, bool addMoreInputsIfRequired) { totalTokenAmountsInSelectedInputs.clear(); tokenSourceInputs.clear(); totalChangeTokens.clear(); intermediaryTIs.clear(); recipientsList.clear(); usedWallet.reset(); // remove non-NTP1 recipients (nebl recipients) recipients.erase(std::remove_if(recipients.begin(), recipients.end(), [](const NTP1SendTokensOneRecipientData& r) { return (r.tokenId == NTP1SendTxData::NEBL_TOKEN_ID); }), recipients.end()); verifyNTP1IssuanceRecipientsValidity(recipients); // remove inputs duplicates { std::unordered_set<NTP1OutPoint> inputsSet(inputs.begin(), inputs.end()); inputs = std::vector<NTP1OutPoint>(inputsSet.begin(), inputsSet.end()); } // collect all required amounts in one map, with tokenId vs amount const std::map<std::string, NTP1Int> targetAmounts = CalculateRequiredTokenAmounts(recipients); // get available balances, either from inputs (if provided) or from the wallet const std::map<std::string, NTP1Int> balancesMap = GetAvailableTokenBalances(wallet, inputs, addMoreInputsIfRequired); // check whether the required amounts can be covered by the available balances for (const auto& required_amount : targetAmounts) { if (required_amount.first == NTP1SendTxData::NEBL_TOKEN_ID) { // ignore nebls, deal only with tokens continue; } if (required_amount.first == NTP1SendTxData::TO_ISSUE_TOKEN_ID) { // ignore newly issued tokens, as no inputs will ever satisfy them continue; } auto available_balance = balancesMap.find(required_amount.first); if (available_balance != balancesMap.end()) { if (required_amount.second > available_balance->second) { throw std::runtime_error("Your balance/selected inputs is not sufficient to cover for " + wallet->getTokenName(required_amount.first)); } } else { throw std::runtime_error("You're trying to spend tokens that you don't own or are not " "included in the inputs you selected; namely: " + wallet->getTokenName(required_amount.first)); } } // calculate reserved balances to be used in this transaction const std::unordered_map<NTP1OutPoint, NTP1Transaction> walletOutputsMap = wallet->getWalletOutputsWithTokens(); std::deque<NTP1OutPoint> availableOutputs; if (addMoreInputsIfRequired) { // assume that inputs automatically has to be gathered from the wallet for (const auto& el : walletOutputsMap) { availableOutputs.push_back(el.first); } for (const auto& el : inputs) { tokenSourceInputs.push_back(el); } } else { for (const auto& el : inputs) { tokenSourceInputs.push_back(el); availableOutputs.push_back(el); } } // remove inputs duplicates { std::unordered_set<NTP1OutPoint> inputsSet(tokenSourceInputs.begin(), tokenSourceInputs.end()); tokenSourceInputs = std::vector<NTP1OutPoint>(inputsSet.begin(), inputsSet.end()); } { std::random_device rd; std::mt19937 g(rd()); // to improve privacy, shuffle inputs; pseudo-random is good enough here std::shuffle(availableOutputs.begin(), availableOutputs.end(), g); } // this container will be filled and must have tokens that are higher than the required amounts // reset fulfilled amounts and change to zero for (const std::pair<const std::string, NTP1Int>& el : targetAmounts) { totalTokenAmountsInSelectedInputs[el.first] = 0; } // fill tokenSourceInputs if inputs are not given for (const std::pair<const std::string, NTP1Int>& targetAmount : targetAmounts) { for (int i = 0; i < (int)availableOutputs.size(); i++) { const auto& output = availableOutputs.at(i); auto ntp1TxIt = walletOutputsMap.find(output); if (ntp1TxIt == walletOutputsMap.end()) { // if the output is not found the NTP1 wallet outputs, it means that it doesn't have NTP1 // tokens, so skip continue; } const NTP1Transaction& txData = ntp1TxIt->second; const NTP1TxOut& ntp1txOut = txData.getTxOut(output.getIndex()); auto numOfTokensInOutput = ntp1txOut.tokenCount(); bool takeThisOutput = false; if (addMoreInputsIfRequired) { for (auto j = 0u; j < numOfTokensInOutput; j++) { std::string outputTokenId = ntp1txOut.getToken(j).getTokenId(); // if token id matches in the transaction with the required one, take it into account NTP1Int required_amount_still = targetAmount.second - totalTokenAmountsInSelectedInputs[outputTokenId]; if (targetAmount.first == outputTokenId && required_amount_still > 0) { takeThisOutput = true; break; } } } else { // take all prev outputs takeThisOutput = true; } // take this transaction by // 1. remove it from the vector of available outputs // 2. add its values to fulfilledTokenAmounts // 3. add the address to the list of inputs to use (pointless if a list of inputs was // provided) if (takeThisOutput) { for (auto j = 0u; j < numOfTokensInOutput; j++) { std::string outputTokenId = ntp1txOut.getToken(j).getTokenId(); totalTokenAmountsInSelectedInputs[outputTokenId] += ntp1txOut.getToken(j).getAmount(); } tokenSourceInputs.push_back(output); availableOutputs.erase(availableOutputs.begin() + i); i--; if (availableOutputs.size() == 0) { break; } } } } recipientsList.assign(recipients.begin(), recipients.end()); // remove empty elements from total from inputs for (auto it = totalTokenAmountsInSelectedInputs.begin(); it != totalTokenAmountsInSelectedInputs.end();) { if (it->second == 0) it = totalTokenAmountsInSelectedInputs.erase(it); else ++it; } // remove inputs duplicates { std::unordered_set<NTP1OutPoint> inputsSet(tokenSourceInputs.begin(), tokenSourceInputs.end()); tokenSourceInputs = std::vector<NTP1OutPoint>(inputsSet.begin(), inputsSet.end()); } // sort inputs by which has more tokens first std::sort( tokenSourceInputs.begin(), tokenSourceInputs.end(), [&walletOutputsMap](const NTP1OutPoint& o1, const NTP1OutPoint& o2) { auto it1 = walletOutputsMap.find(o1); auto it2 = walletOutputsMap.find(o2); int count1 = 0; int count2 = 0; if (it1 != walletOutputsMap.end()) { const NTP1Transaction& tx1 = it1->second; if (o1.getIndex() + 1 > tx1.getTxOutCount()) { throw std::runtime_error( "While sorting inputs in NTP1 selector, output index is out of range for: " + o1.getHash().ToString() + ":" + ::ToString(o1.getIndex())); } count1 = tx1.getTxOut(o1.getIndex()).tokenCount(); } if (it2 != walletOutputsMap.end()) { const NTP1Transaction& tx2 = it2->second; if (o2.getIndex() + 1 > tx2.getTxOutCount()) { throw std::runtime_error( "While sorting inputs in NTP1 selector, output index is out of range for: " + o2.getHash().ToString() + ":" + ::ToString(o2.getIndex())); } count2 = tx2.getTxOut(o2.getIndex()).tokenCount(); } return count1 > count2; }); // this map has depletable balances to be consumed while filling TIs std::unordered_map<NTP1OutPoint, NTP1TxOut> decreditMap; for (const auto& in : tokenSourceInputs) { // get the output auto it = walletOutputsMap.find(in); if (it == walletOutputsMap.end()) { // No NTP1 token in this input continue; } // extract the transaction from the output const NTP1Transaction& ntp1tx = it->second; if (in.getIndex() + 1 > ntp1tx.getTxOutCount()) { throw std::runtime_error( "While attempting to credit recipients, input index is out of range for: " + in.getHash().ToString() + ":" + ::ToString(in.getIndex())); } decreditMap[in] = ntp1tx.getTxOut(in.getIndex()); } // if this is an issuance transaction, add the issuance TI if (tokenToIssueData.is_initialized()) { IntermediaryTI iti; NTP1Script::TransferInstruction ti; // issuance output is always the first one (will be transformed in CreateTransaction) ti.outputIndex = 0; ti.skipInput = false; ti.amount = tokenToIssueData.get().amount; iti.isNTP1TokenIssuance = true; iti.TIs.push_back(ti); intermediaryTIs.push_back(iti); } // copy of the recipients to deduce the amounts they recieved std::vector<NTP1SendTokensOneRecipientData> recps = recipients; // for every input, for every NTP1 token kind, move them to the recipients // loop u: looping over inputs // loop i: looping over token kinds inside input "u" // loop j: looping over recipients, and give them the tokens they require, // from input "u", and token kind "i" for (int u = 0; u < (int)tokenSourceInputs.size(); u++) { const auto& in = tokenSourceInputs[u]; IntermediaryTI iti; iti.input = in; // "in" is guaranteed to be in the map because it comes from tokenSourceInputs NTP1TxOut& ntp1txOut = decreditMap[in]; for (int i = 0; i < (int)ntp1txOut.tokenCount(); i++) { NTP1TokenTxData& token = ntp1txOut.getToken(i); for (int j = 0; j < (int)recps.size(); j++) { // if the token id matches and the recipient needs more, give them that amount (by // substracting the amount from the recipient) if (ntp1txOut.getToken(i).getTokenId() == recps[j].tokenId && recps[j].amount > 0) { if (recps[j].tokenId == TO_ISSUE_TOKEN_ID) { throw std::runtime_error("An issuance transaction cannot have transfer elements " "in it except for the issued transaction. Everything " "else should go into change."); } NTP1Script::TransferInstruction ti; // there's still more for the recipient. Aggregate from possible adjacent tokens! // aggregation: loop over inputs and tokens, check the ids, and add them to the // current recipient bool stop = false; for (int v = u; v < (int)tokenSourceInputs.size(); v++) { // "inComp" is guaranteed to be in the map because it comes from // tokenSourceInputs const auto& inComp = tokenSourceInputs[v]; NTP1TxOut& ntp1txOutComp = decreditMap[inComp]; for (int k = (v == u ? i : 0); k < (int)ntp1txOutComp.tokenCount(); k++) { // if the adjacent token id is not the same, break and move on if (ntp1txOut.getToken(i).getTokenId() != ntp1txOutComp.getToken(k).getTokenId()) { stop = true; break; } // the token slot that the recipient will take from for aggregation NTP1TokenTxData& tokenComp = ntp1txOutComp.getToken(k); if (recps[j].amount >= tokenComp.getAmount()) { // the token amount required by the recipient is larger than the // amount in the token slot, hence the amount in the slot is set to // zero recps[j].amount -= tokenComp.getAmount(); ti.amount += tokenComp.getAmount(); tokenComp.setAmount(0); } else { // the token amount required by the recipient is smaller than the // amount in the token slot, hence the recipient is set to zero tokenComp.setAmount(tokenComp.getAmount() - recps[j].amount); ti.amount += recps[j].amount; recps[j].amount = 0; // recipient amount is fulfilled. Break and move on stop = true; break; } } if (stop) { break; } } // add that this input will go to recipient j ti.outputIndex = j; ti.skipInput = false; if (ti.amount > 0) { iti.TIs.push_back(ti); } } } // after having gone through all recipients and given them all their amounts of the token // "in", now we see if there's more to be added to change if (token.getAmount() > 0) { NTP1Script::TransferInstruction ti; // Aggregate ajacent change tokens. Aggregate from possible adjacent tokens! bool stop = false; for (int v = u; v < (int)tokenSourceInputs.size(); v++) { // "inComp" is guaranteed to be in the map because it comes from // tokenSourceInputs const auto& inComp = tokenSourceInputs[v]; NTP1TxOut& ntp1txOutComp = decreditMap[inComp]; for (int k = (v == u ? i : 0); k < (int)ntp1txOutComp.tokenCount(); k++) { // if the adjacent token id is not the same, break and move on if (ntp1txOut.getToken(i).getTokenId() != ntp1txOutComp.getToken(k).getTokenId()) { stop = true; break; } // the token slot that the recipient will take from for aggregation NTP1TokenTxData& tokenComp = ntp1txOutComp.getToken(k); ti.amount += tokenComp.getAmount(); // add change to total change const std::string tokenId = ntp1txOut.getToken(i).getTokenId(); if (totalChangeTokens.find(tokenId) == totalChangeTokens.end()) { totalChangeTokens[tokenId] = 0; } totalChangeTokens[tokenId] += tokenComp.getAmount(); tokenComp.setAmount(0); } if (stop) { break; } } // add that this input will go to recipient j ti.outputIndex = IntermediaryTI::CHANGE_OUTPUT_FAKE_INDEX; ti.skipInput = false; iti.TIs.push_back(ti); } } // ITIs can have zero TIs, because they carry important input information still intermediaryTIs.push_back(iti); } // make sure that all recipients have received their tokens for (const auto& r : recps) { // we don't select nebls if (r.tokenId == NTP1SendTxData::NEBL_TOKEN_ID) { continue; } // we ignore tokens to issue, those are to be minted if (r.tokenId == NTP1SendTxData::TO_ISSUE_TOKEN_ID) { continue; } if (r.amount != 0) { throw std::runtime_error("The recipient " + r.destination + "; of token: " + r.tokenId + "; still has an unfulfilled amount: " + ::ToString(r.amount) + ". This should've been spotted earlier."); } } // remove empty elements from change for (auto it = totalChangeTokens.begin(); it != totalChangeTokens.end();) { if (it->second == 0) it = totalChangeTokens.erase(it); else ++it; } usedWallet = wallet; ready = true; } std::map<std::string, NTP1Int> NTP1SendTxData::getTotalTokensInInputs() const { if (!ready) throw std::runtime_error("NTP1SendTxData not ready; cannot get total tokens in inputs"); return totalTokenAmountsInSelectedInputs; } bool NTP1SendTxData::isReady() const { return ready; } std::vector<NTP1SendTokensOneRecipientData> NTP1SendTxData::getNTP1TokenRecipientsList() const { if (!ready) throw std::runtime_error("NTP1SendTxData not ready; cannot get the recipients list"); return recipientsList; } NTP1WalletPtr NTP1SendTxData::getWallet() const { if (!ready) throw std::runtime_error("NTP1SendTxData not ready; cannot get the wallet used in calculations"); return usedWallet; } std::vector<IntermediaryTI> NTP1SendTxData::getIntermediaryTIs() const { return intermediaryTIs; } bool NTP1SendTxData::hasNTP1Tokens() const { uint64_t total = std::accumulate(intermediaryTIs.begin(), intermediaryTIs.end(), 0, [](uint64_t curr, const IntermediaryTI& iti) { return curr + iti.TIs.size(); }); return (total != 0); } uint64_t NTP1SendTxData::getRequiredNeblsForOutputs() const { if (!ready) throw std::runtime_error("NTP1SendTxData not ready; cannot get required fees"); if (intermediaryTIs.size() > 0) { int64_t issuanceFee = (tokenToIssueData.is_initialized() ? NTP1Transaction::IssuanceFee : 0); int64_t changeCount = (this->getChangeTokens().size() > 0 ? 1 : 0); // + 1 is for OP_RETURN output return MIN_TX_FEE * (recipientsList.size() + 1 + changeCount) + issuanceFee; } else { return 0; } } int64_t NTP1SendTxData::EstimateTxSizeInBytes(int64_t num_of_inputs, int64_t num_of_outputs) { return num_of_inputs * 181 + num_of_outputs * 34 + 10; } int64_t NTP1SendTxData::EstimateTxFee(int64_t num_of_inputs, int64_t num_of_outputs) { double Fee = static_cast<double>(MIN_TX_FEE) * (static_cast<double>(EstimateTxSizeInBytes(num_of_inputs, num_of_outputs)) / 1000.); // nearest 10000 return static_cast<int64_t>(std::ceil(Fee / 10000) * 10000); } void NTP1SendTxData::FixTIsChangeOutputIndex(std::vector<NTP1Script::TransferInstruction>& TIs, int changeOutputIndex) { for (auto& ti : TIs) { if (ti.outputIndex == IntermediaryTI::CHANGE_OUTPUT_FAKE_INDEX) { ti.outputIndex = changeOutputIndex; } } }
//===- RegAllocGreedy.cpp - greedy register allocator ---------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the RAGreedy function pass for register allocation in // optimized builds. // //===----------------------------------------------------------------------===// #include "AllocationOrder.h" #include "InterferenceCache.h" #include "LiveDebugVariables.h" #include "RegAllocBase.h" #include "SpillPlacement.h" #include "SplitKit.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/BitVector.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/IndexedMap.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringRef.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/OptimizationRemarkEmitter.h" #include "llvm/CodeGen/CalcSpillWeights.h" #include "llvm/CodeGen/EdgeBundles.h" #include "llvm/CodeGen/LiveInterval.h" #include "llvm/CodeGen/LiveIntervalUnion.h" #include "llvm/CodeGen/LiveIntervals.h" #include "llvm/CodeGen/LiveRangeEdit.h" #include "llvm/CodeGen/LiveRegMatrix.h" #include "llvm/CodeGen/LiveStacks.h" #include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/CodeGen/MachineBlockFrequencyInfo.h" #include "llvm/CodeGen/MachineDominators.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineLoopInfo.h" #include "llvm/CodeGen/MachineOperand.h" #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/RegAllocRegistry.h" #include "llvm/CodeGen/RegisterClassInfo.h" #include "llvm/CodeGen/SlotIndexes.h" #include "llvm/CodeGen/Spiller.h" #include "llvm/CodeGen/TargetInstrInfo.h" #include "llvm/CodeGen/TargetRegisterInfo.h" #include "llvm/CodeGen/TargetSubtargetInfo.h" #include "llvm/CodeGen/VirtRegMap.h" #include "llvm/IR/Function.h" #include "llvm/IR/LLVMContext.h" #include "llvm/MC/MCRegisterInfo.h" #include "llvm/Pass.h" #include "llvm/Support/BlockFrequency.h" #include "llvm/Support/BranchProbability.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/Timer.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetMachine.h" #include "llvm/IR/DebugInfoMetadata.h" #include <algorithm> #include <cassert> #include <cstdint> #include <memory> #include <queue> #include <tuple> #include <utility> using namespace llvm; #define DEBUG_TYPE "regalloc" STATISTIC(NumGlobalSplits, "Number of split global live ranges"); STATISTIC(NumLocalSplits, "Number of split local live ranges"); STATISTIC(NumEvicted, "Number of interferences evicted"); static cl::opt<SplitEditor::ComplementSpillMode> SplitSpillMode( "split-spill-mode", cl::Hidden, cl::desc("Spill mode for splitting live ranges"), cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"), clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"), clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed")), cl::init(SplitEditor::SM_Speed)); static cl::opt<unsigned> LastChanceRecoloringMaxDepth("lcr-max-depth", cl::Hidden, cl::desc("Last chance recoloring max depth"), cl::init(5)); static cl::opt<unsigned> LastChanceRecoloringMaxInterference( "lcr-max-interf", cl::Hidden, cl::desc("Last chance recoloring maximum number of considered" " interference at a time"), cl::init(8)); static cl::opt<bool> ExhaustiveSearch( "exhaustive-register-search", cl::NotHidden, cl::desc("Exhaustive Search for registers bypassing the depth " "and interference cutoffs of last chance recoloring"), cl::Hidden); static cl::opt<bool> EnableLocalReassignment( "enable-local-reassign", cl::Hidden, cl::desc("Local reassignment can yield better allocation decisions, but " "may be compile time intensive"), cl::init(false)); static cl::opt<bool> EnableDeferredSpilling( "enable-deferred-spilling", cl::Hidden, cl::desc("Instead of spilling a variable right away, defer the actual " "code insertion to the end of the allocation. That way the " "allocator might still find a suitable coloring for this " "variable because of other evicted variables."), cl::init(false)); // FIXME: Find a good default for this flag and remove the flag. static cl::opt<unsigned> CSRFirstTimeCost("regalloc-csr-first-time-cost", cl::desc("Cost for first time use of callee-saved register."), cl::init(0), cl::Hidden); static cl::opt<bool> ConsiderLocalIntervalCost( "consider-local-interval-cost", cl::Hidden, cl::desc("Consider the cost of local intervals created by a split " "candidate when choosing the best split candidate."), cl::init(false)); static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator", createGreedyRegisterAllocator); namespace { class RAGreedy : public MachineFunctionPass, public RegAllocBase, private LiveRangeEdit::Delegate { // Convenient shortcuts. using PQueue = std::priority_queue<std::pair<unsigned, unsigned>>; using SmallLISet = SmallPtrSet<LiveInterval *, 4>; using SmallVirtRegSet = SmallSet<Register, 16>; // context MachineFunction *MF; // Shortcuts to some useful interface. const TargetInstrInfo *TII; const TargetRegisterInfo *TRI; RegisterClassInfo RCI; // analyses SlotIndexes *Indexes; MachineBlockFrequencyInfo *MBFI; MachineDominatorTree *DomTree; MachineLoopInfo *Loops; MachineOptimizationRemarkEmitter *ORE; EdgeBundles *Bundles; SpillPlacement *SpillPlacer; LiveDebugVariables *DebugVars; AliasAnalysis *AA; // state std::unique_ptr<Spiller> SpillerInstance; PQueue Queue; unsigned NextCascade; std::unique_ptr<VirtRegAuxInfo> VRAI; // Live ranges pass through a number of stages as we try to allocate them. // Some of the stages may also create new live ranges: // // - Region splitting. // - Per-block splitting. // - Local splitting. // - Spilling. // // Ranges produced by one of the stages skip the previous stages when they are // dequeued. This improves performance because we can skip interference checks // that are unlikely to give any results. It also guarantees that the live // range splitting algorithm terminates, something that is otherwise hard to // ensure. enum LiveRangeStage { /// Newly created live range that has never been queued. RS_New, /// Only attempt assignment and eviction. Then requeue as RS_Split. RS_Assign, /// Attempt live range splitting if assignment is impossible. RS_Split, /// Attempt more aggressive live range splitting that is guaranteed to make /// progress. This is used for split products that may not be making /// progress. RS_Split2, /// Live range will be spilled. No more splitting will be attempted. RS_Spill, /// Live range is in memory. Because of other evictions, it might get moved /// in a register in the end. RS_Memory, /// There is nothing more we can do to this live range. Abort compilation /// if it can't be assigned. RS_Done }; // Enum CutOffStage to keep a track whether the register allocation failed // because of the cutoffs encountered in last chance recoloring. // Note: This is used as bitmask. New value should be next power of 2. enum CutOffStage { // No cutoffs encountered CO_None = 0, // lcr-max-depth cutoff encountered CO_Depth = 1, // lcr-max-interf cutoff encountered CO_Interf = 2 }; uint8_t CutOffInfo; #ifndef NDEBUG static const char *const StageName[]; #endif // RegInfo - Keep additional information about each live range. struct RegInfo { LiveRangeStage Stage = RS_New; // Cascade - Eviction loop prevention. See canEvictInterference(). unsigned Cascade = 0; RegInfo() = default; }; IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo; LiveRangeStage getStage(const LiveInterval &VirtReg) const { return ExtraRegInfo[VirtReg.reg()].Stage; } void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) { ExtraRegInfo.resize(MRI->getNumVirtRegs()); ExtraRegInfo[VirtReg.reg()].Stage = Stage; } template<typename Iterator> void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) { ExtraRegInfo.resize(MRI->getNumVirtRegs()); for (;Begin != End; ++Begin) { Register Reg = *Begin; if (ExtraRegInfo[Reg].Stage == RS_New) ExtraRegInfo[Reg].Stage = NewStage; } } /// Cost of evicting interference. struct EvictionCost { unsigned BrokenHints = 0; ///< Total number of broken hints. float MaxWeight = 0; ///< Maximum spill weight evicted. EvictionCost() = default; bool isMax() const { return BrokenHints == ~0u; } void setMax() { BrokenHints = ~0u; } void setBrokenHints(unsigned NHints) { BrokenHints = NHints; } bool operator<(const EvictionCost &O) const { return std::tie(BrokenHints, MaxWeight) < std::tie(O.BrokenHints, O.MaxWeight); } }; /// EvictionTrack - Keeps track of past evictions in order to optimize region /// split decision. class EvictionTrack { public: using EvictorInfo = std::pair<Register /* evictor */, MCRegister /* physreg */>; using EvicteeInfo = llvm::DenseMap<Register /* evictee */, EvictorInfo>; private: /// Each Vreg that has been evicted in the last stage of selectOrSplit will /// be mapped to the evictor Vreg and the PhysReg it was evicted from. EvicteeInfo Evictees; public: /// Clear all eviction information. void clear() { Evictees.clear(); } /// Clear eviction information for the given evictee Vreg. /// E.g. when Vreg get's a new allocation, the old eviction info is no /// longer relevant. /// \param Evictee The evictee Vreg for whom we want to clear collected /// eviction info. void clearEvicteeInfo(Register Evictee) { Evictees.erase(Evictee); } /// Track new eviction. /// The Evictor vreg has evicted the Evictee vreg from Physreg. /// \param PhysReg The physical register Evictee was evicted from. /// \param Evictor The evictor Vreg that evicted Evictee. /// \param Evictee The evictee Vreg. void addEviction(MCRegister PhysReg, Register Evictor, Register Evictee) { Evictees[Evictee].first = Evictor; Evictees[Evictee].second = PhysReg; } /// Return the Evictor Vreg which evicted Evictee Vreg from PhysReg. /// \param Evictee The evictee vreg. /// \return The Evictor vreg which evicted Evictee vreg from PhysReg. 0 if /// nobody has evicted Evictee from PhysReg. EvictorInfo getEvictor(Register Evictee) { if (Evictees.count(Evictee)) { return Evictees[Evictee]; } return EvictorInfo(0, 0); } }; // Keeps track of past evictions in order to optimize region split decision. EvictionTrack LastEvicted; // splitting state. std::unique_ptr<SplitAnalysis> SA; std::unique_ptr<SplitEditor> SE; /// Cached per-block interference maps InterferenceCache IntfCache; /// All basic blocks where the current register has uses. SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints; /// Global live range splitting candidate info. struct GlobalSplitCandidate { // Register intended for assignment, or 0. MCRegister PhysReg; // SplitKit interval index for this candidate. unsigned IntvIdx; // Interference for PhysReg. InterferenceCache::Cursor Intf; // Bundles where this candidate should be live. BitVector LiveBundles; SmallVector<unsigned, 8> ActiveBlocks; void reset(InterferenceCache &Cache, MCRegister Reg) { PhysReg = Reg; IntvIdx = 0; Intf.setPhysReg(Cache, Reg); LiveBundles.clear(); ActiveBlocks.clear(); } // Set B[I] = C for every live bundle where B[I] was NoCand. unsigned getBundles(SmallVectorImpl<unsigned> &B, unsigned C) { unsigned Count = 0; for (unsigned I : LiveBundles.set_bits()) if (B[I] == NoCand) { B[I] = C; Count++; } return Count; } }; /// Candidate info for each PhysReg in AllocationOrder. /// This vector never shrinks, but grows to the size of the largest register /// class. SmallVector<GlobalSplitCandidate, 32> GlobalCand; enum : unsigned { NoCand = ~0u }; /// Candidate map. Each edge bundle is assigned to a GlobalCand entry, or to /// NoCand which indicates the stack interval. SmallVector<unsigned, 32> BundleCand; /// Callee-save register cost, calculated once per machine function. BlockFrequency CSRCost; /// Run or not the local reassignment heuristic. This information is /// obtained from the TargetSubtargetInfo. bool EnableLocalReassign; /// Enable or not the consideration of the cost of local intervals created /// by a split candidate when choosing the best split candidate. bool EnableAdvancedRASplitCost; /// Set of broken hints that may be reconciled later because of eviction. SmallSetVector<LiveInterval *, 8> SetOfBrokenHints; /// The register cost values. This list will be recreated for each Machine /// Function ArrayRef<uint8_t> RegCosts; public: RAGreedy(); /// Return the pass name. StringRef getPassName() const override { return "Greedy Register Allocator"; } /// RAGreedy analysis usage. void getAnalysisUsage(AnalysisUsage &AU) const override; void releaseMemory() override; Spiller &spiller() override { return *SpillerInstance; } void enqueue(LiveInterval *LI) override; LiveInterval *dequeue() override; MCRegister selectOrSplit(LiveInterval &, SmallVectorImpl<Register> &) override; void aboutToRemoveInterval(LiveInterval &) override; /// Perform register allocation. bool runOnMachineFunction(MachineFunction &mf) override; MachineFunctionProperties getRequiredProperties() const override { return MachineFunctionProperties().set( MachineFunctionProperties::Property::NoPHIs); } MachineFunctionProperties getClearedProperties() const override { return MachineFunctionProperties().set( MachineFunctionProperties::Property::IsSSA); } static char ID; private: MCRegister selectOrSplitImpl(LiveInterval &, SmallVectorImpl<Register> &, SmallVirtRegSet &, unsigned = 0); bool LRE_CanEraseVirtReg(Register) override; void LRE_WillShrinkVirtReg(Register) override; void LRE_DidCloneVirtReg(Register, Register) override; void enqueue(PQueue &CurQueue, LiveInterval *LI); LiveInterval *dequeue(PQueue &CurQueue); BlockFrequency calcSpillCost(); bool addSplitConstraints(InterferenceCache::Cursor, BlockFrequency&); bool addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>); bool growRegion(GlobalSplitCandidate &Cand); bool splitCanCauseEvictionChain(Register Evictee, GlobalSplitCandidate &Cand, unsigned BBNumber, const AllocationOrder &Order); bool splitCanCauseLocalSpill(unsigned VirtRegToSplit, GlobalSplitCandidate &Cand, unsigned BBNumber, const AllocationOrder &Order); BlockFrequency calcGlobalSplitCost(GlobalSplitCandidate &, const AllocationOrder &Order, bool *CanCauseEvictionChain); bool calcCompactRegion(GlobalSplitCandidate&); void splitAroundRegion(LiveRangeEdit&, ArrayRef<unsigned>); void calcGapWeights(MCRegister, SmallVectorImpl<float> &); Register canReassign(LiveInterval &VirtReg, Register PrevReg) const; bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool) const; bool canEvictInterference(LiveInterval &, MCRegister, bool, EvictionCost &, const SmallVirtRegSet &) const; bool canEvictInterferenceInRange(const LiveInterval &VirtReg, MCRegister PhysReg, SlotIndex Start, SlotIndex End, EvictionCost &MaxCost) const; MCRegister getCheapestEvicteeWeight(const AllocationOrder &Order, const LiveInterval &VirtReg, SlotIndex Start, SlotIndex End, float *BestEvictWeight) const; void evictInterference(LiveInterval &, MCRegister, SmallVectorImpl<Register> &); bool mayRecolorAllInterferences(MCRegister PhysReg, LiveInterval &VirtReg, SmallLISet &RecoloringCandidates, const SmallVirtRegSet &FixedRegisters); MCRegister tryAssign(LiveInterval&, AllocationOrder&, SmallVectorImpl<Register>&, const SmallVirtRegSet&); MCRegister tryEvict(LiveInterval &, AllocationOrder &, SmallVectorImpl<Register> &, uint8_t, const SmallVirtRegSet &); MCRegister tryRegionSplit(LiveInterval &, AllocationOrder &, SmallVectorImpl<Register> &); /// Calculate cost of region splitting. unsigned calculateRegionSplitCost(LiveInterval &VirtReg, AllocationOrder &Order, BlockFrequency &BestCost, unsigned &NumCands, bool IgnoreCSR, bool *CanCauseEvictionChain = nullptr); /// Perform region splitting. unsigned doRegionSplit(LiveInterval &VirtReg, unsigned BestCand, bool HasCompact, SmallVectorImpl<Register> &NewVRegs); /// Check other options before using a callee-saved register for the first /// time. MCRegister tryAssignCSRFirstTime(LiveInterval &VirtReg, AllocationOrder &Order, MCRegister PhysReg, uint8_t &CostPerUseLimit, SmallVectorImpl<Register> &NewVRegs); void initializeCSRCost(); unsigned tryBlockSplit(LiveInterval&, AllocationOrder&, SmallVectorImpl<Register>&); unsigned tryInstructionSplit(LiveInterval&, AllocationOrder&, SmallVectorImpl<Register>&); unsigned tryLocalSplit(LiveInterval&, AllocationOrder&, SmallVectorImpl<Register>&); unsigned trySplit(LiveInterval&, AllocationOrder&, SmallVectorImpl<Register>&, const SmallVirtRegSet&); unsigned tryLastChanceRecoloring(LiveInterval &, AllocationOrder &, SmallVectorImpl<Register> &, SmallVirtRegSet &, unsigned); bool tryRecoloringCandidates(PQueue &, SmallVectorImpl<Register> &, SmallVirtRegSet &, unsigned); void tryHintRecoloring(LiveInterval &); void tryHintsRecoloring(); /// Model the information carried by one end of a copy. struct HintInfo { /// The frequency of the copy. BlockFrequency Freq; /// The virtual register or physical register. Register Reg; /// Its currently assigned register. /// In case of a physical register Reg == PhysReg. MCRegister PhysReg; HintInfo(BlockFrequency Freq, Register Reg, MCRegister PhysReg) : Freq(Freq), Reg(Reg), PhysReg(PhysReg) {} }; using HintsInfo = SmallVector<HintInfo, 4>; BlockFrequency getBrokenHintFreq(const HintsInfo &, MCRegister); void collectHintInfo(Register, HintsInfo &); bool isUnusedCalleeSavedReg(MCRegister PhysReg) const; /// Greedy RA statistic to remark. struct RAGreedyStats { unsigned Reloads = 0; unsigned FoldedReloads = 0; unsigned ZeroCostFoldedReloads = 0; unsigned Spills = 0; unsigned FoldedSpills = 0; unsigned Copies = 0; float ReloadsCost = 0.0f; float FoldedReloadsCost = 0.0f; float SpillsCost = 0.0f; float FoldedSpillsCost = 0.0f; float CopiesCost = 0.0f; bool isEmpty() { return !(Reloads || FoldedReloads || Spills || FoldedSpills || ZeroCostFoldedReloads || Copies); } void add(RAGreedyStats other) { Reloads += other.Reloads; FoldedReloads += other.FoldedReloads; ZeroCostFoldedReloads += other.ZeroCostFoldedReloads; Spills += other.Spills; FoldedSpills += other.FoldedSpills; Copies += other.Copies; ReloadsCost += other.ReloadsCost; FoldedReloadsCost += other.FoldedReloadsCost; SpillsCost += other.SpillsCost; FoldedSpillsCost += other.FoldedSpillsCost; CopiesCost += other.CopiesCost; } void report(MachineOptimizationRemarkMissed &R); }; /// Compute statistic for a basic block. RAGreedyStats computeStats(MachineBasicBlock &MBB); /// Compute and report statistic through a remark. RAGreedyStats reportStats(MachineLoop *L); /// Report the statistic for each loop. void reportStats(); }; } // end anonymous namespace char RAGreedy::ID = 0; char &llvm::RAGreedyID = RAGreedy::ID; INITIALIZE_PASS_BEGIN(RAGreedy, "greedy", "Greedy Register Allocator", false, false) INITIALIZE_PASS_DEPENDENCY(LiveDebugVariables) INITIALIZE_PASS_DEPENDENCY(SlotIndexes) INITIALIZE_PASS_DEPENDENCY(LiveIntervals) INITIALIZE_PASS_DEPENDENCY(RegisterCoalescer) INITIALIZE_PASS_DEPENDENCY(MachineScheduler) INITIALIZE_PASS_DEPENDENCY(LiveStacks) INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) INITIALIZE_PASS_DEPENDENCY(VirtRegMap) INITIALIZE_PASS_DEPENDENCY(LiveRegMatrix) INITIALIZE_PASS_DEPENDENCY(EdgeBundles) INITIALIZE_PASS_DEPENDENCY(SpillPlacement) INITIALIZE_PASS_DEPENDENCY(MachineOptimizationRemarkEmitterPass) INITIALIZE_PASS_END(RAGreedy, "greedy", "Greedy Register Allocator", false, false) #ifndef NDEBUG const char *const RAGreedy::StageName[] = { "RS_New", "RS_Assign", "RS_Split", "RS_Split2", "RS_Spill", "RS_Memory", "RS_Done" }; #endif // Hysteresis to use when comparing floats. // This helps stabilize decisions based on float comparisons. const float Hysteresis = (2007 / 2048.0f); // 0.97998046875 FunctionPass* llvm::createGreedyRegisterAllocator() { return new RAGreedy(); } RAGreedy::RAGreedy(): MachineFunctionPass(ID) { } void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesCFG(); AU.addRequired<MachineBlockFrequencyInfo>(); AU.addPreserved<MachineBlockFrequencyInfo>(); AU.addRequired<AAResultsWrapperPass>(); AU.addPreserved<AAResultsWrapperPass>(); AU.addRequired<LiveIntervals>(); AU.addPreserved<LiveIntervals>(); AU.addRequired<SlotIndexes>(); AU.addPreserved<SlotIndexes>(); AU.addRequired<LiveDebugVariables>(); AU.addPreserved<LiveDebugVariables>(); AU.addRequired<LiveStacks>(); AU.addPreserved<LiveStacks>(); AU.addRequired<MachineDominatorTree>(); AU.addPreserved<MachineDominatorTree>(); AU.addRequired<MachineLoopInfo>(); AU.addPreserved<MachineLoopInfo>(); AU.addRequired<VirtRegMap>(); AU.addPreserved<VirtRegMap>(); AU.addRequired<LiveRegMatrix>(); AU.addPreserved<LiveRegMatrix>(); AU.addRequired<EdgeBundles>(); AU.addRequired<SpillPlacement>(); AU.addRequired<MachineOptimizationRemarkEmitterPass>(); MachineFunctionPass::getAnalysisUsage(AU); } //===----------------------------------------------------------------------===// // LiveRangeEdit delegate methods //===----------------------------------------------------------------------===// bool RAGreedy::LRE_CanEraseVirtReg(Register VirtReg) { LiveInterval &LI = LIS->getInterval(VirtReg); if (VRM->hasPhys(VirtReg)) { Matrix->unassign(LI); aboutToRemoveInterval(LI); return true; } // Unassigned virtreg is probably in the priority queue. // RegAllocBase will erase it after dequeueing. // Nonetheless, clear the live-range so that the debug // dump will show the right state for that VirtReg. LI.clear(); return false; } void RAGreedy::LRE_WillShrinkVirtReg(Register VirtReg) { if (!VRM->hasPhys(VirtReg)) return; // Register is assigned, put it back on the queue for reassignment. LiveInterval &LI = LIS->getInterval(VirtReg); Matrix->unassign(LI); enqueue(&LI); } void RAGreedy::LRE_DidCloneVirtReg(Register New, Register Old) { // Cloning a register we haven't even heard about yet? Just ignore it. if (!ExtraRegInfo.inBounds(Old)) return; // LRE may clone a virtual register because dead code elimination causes it to // be split into connected components. The new components are much smaller // than the original, so they should get a new chance at being assigned. // same stage as the parent. ExtraRegInfo[Old].Stage = RS_Assign; ExtraRegInfo.grow(New); ExtraRegInfo[New] = ExtraRegInfo[Old]; } void RAGreedy::releaseMemory() { SpillerInstance.reset(); ExtraRegInfo.clear(); GlobalCand.clear(); } void RAGreedy::enqueue(LiveInterval *LI) { enqueue(Queue, LI); } void RAGreedy::enqueue(PQueue &CurQueue, LiveInterval *LI) { // Prioritize live ranges by size, assigning larger ranges first. // The queue holds (size, reg) pairs. const unsigned Size = LI->getSize(); const Register Reg = LI->reg(); assert(Reg.isVirtual() && "Can only enqueue virtual registers"); unsigned Prio; ExtraRegInfo.grow(Reg); if (ExtraRegInfo[Reg].Stage == RS_New) ExtraRegInfo[Reg].Stage = RS_Assign; if (ExtraRegInfo[Reg].Stage == RS_Split) { // Unsplit ranges that couldn't be allocated immediately are deferred until // everything else has been allocated. Prio = Size; } else if (ExtraRegInfo[Reg].Stage == RS_Memory) { // Memory operand should be considered last. // Change the priority such that Memory operand are assigned in // the reverse order that they came in. // TODO: Make this a member variable and probably do something about hints. static unsigned MemOp = 0; Prio = MemOp++; } else { // Giant live ranges fall back to the global assignment heuristic, which // prevents excessive spilling in pathological cases. bool ReverseLocal = TRI->reverseLocalAssignment(); const TargetRegisterClass &RC = *MRI->getRegClass(Reg); bool ForceGlobal = !ReverseLocal && (Size / SlotIndex::InstrDist) > (2 * RC.getNumRegs()); if (ExtraRegInfo[Reg].Stage == RS_Assign && !ForceGlobal && !LI->empty() && LIS->intervalIsInOneMBB(*LI)) { // Allocate original local ranges in linear instruction order. Since they // are singly defined, this produces optimal coloring in the absence of // global interference and other constraints. if (!ReverseLocal) Prio = LI->beginIndex().getInstrDistance(Indexes->getLastIndex()); else { // Allocating bottom up may allow many short LRGs to be assigned first // to one of the cheap registers. This could be much faster for very // large blocks on targets with many physical registers. Prio = Indexes->getZeroIndex().getInstrDistance(LI->endIndex()); } Prio |= RC.AllocationPriority << 24; } else { // Allocate global and split ranges in long->short order. Long ranges that // don't fit should be spilled (or split) ASAP so they don't create // interference. Mark a bit to prioritize global above local ranges. Prio = (1u << 29) + Size; } // Mark a higher bit to prioritize global and local above RS_Split. Prio |= (1u << 31); // Boost ranges that have a physical register hint. if (VRM->hasKnownPreference(Reg)) Prio |= (1u << 30); } // The virtual register number is a tie breaker for same-sized ranges. // Give lower vreg numbers higher priority to assign them first. CurQueue.push(std::make_pair(Prio, ~Reg)); } LiveInterval *RAGreedy::dequeue() { return dequeue(Queue); } LiveInterval *RAGreedy::dequeue(PQueue &CurQueue) { if (CurQueue.empty()) return nullptr; LiveInterval *LI = &LIS->getInterval(~CurQueue.top().second); CurQueue.pop(); return LI; } //===----------------------------------------------------------------------===// // Direct Assignment //===----------------------------------------------------------------------===// /// tryAssign - Try to assign VirtReg to an available register. MCRegister RAGreedy::tryAssign(LiveInterval &VirtReg, AllocationOrder &Order, SmallVectorImpl<Register> &NewVRegs, const SmallVirtRegSet &FixedRegisters) { MCRegister PhysReg; for (auto I = Order.begin(), E = Order.end(); I != E && !PhysReg; ++I) { assert(*I); if (!Matrix->checkInterference(VirtReg, *I)) { if (I.isHint()) return *I; else PhysReg = *I; } } if (!PhysReg.isValid()) return PhysReg; // PhysReg is available, but there may be a better choice. // If we missed a simple hint, try to cheaply evict interference from the // preferred register. if (Register Hint = MRI->getSimpleHint(VirtReg.reg())) if (Order.isHint(Hint)) { MCRegister PhysHint = Hint.asMCReg(); LLVM_DEBUG(dbgs() << "missed hint " << printReg(PhysHint, TRI) << '\n'); EvictionCost MaxCost; MaxCost.setBrokenHints(1); if (canEvictInterference(VirtReg, PhysHint, true, MaxCost, FixedRegisters)) { evictInterference(VirtReg, PhysHint, NewVRegs); return PhysHint; } // Record the missed hint, we may be able to recover // at the end if the surrounding allocation changed. SetOfBrokenHints.insert(&VirtReg); } // Try to evict interference from a cheaper alternative. uint8_t Cost = RegCosts[PhysReg]; // Most registers have 0 additional cost. if (!Cost) return PhysReg; LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << " is available at cost " << Cost << '\n'); MCRegister CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost, FixedRegisters); return CheapReg ? CheapReg : PhysReg; } //===----------------------------------------------------------------------===// // Interference eviction //===----------------------------------------------------------------------===// Register RAGreedy::canReassign(LiveInterval &VirtReg, Register PrevReg) const { auto Order = AllocationOrder::create(VirtReg.reg(), *VRM, RegClassInfo, Matrix); MCRegister PhysReg; for (auto I = Order.begin(), E = Order.end(); I != E && !PhysReg; ++I) { if ((*I).id() == PrevReg.id()) continue; MCRegUnitIterator Units(*I, TRI); for (; Units.isValid(); ++Units) { // Instantiate a "subquery", not to be confused with the Queries array. LiveIntervalUnion::Query subQ(VirtReg, Matrix->getLiveUnions()[*Units]); if (subQ.checkInterference()) break; } // If no units have interference, break out with the current PhysReg. if (!Units.isValid()) PhysReg = *I; } if (PhysReg) LLVM_DEBUG(dbgs() << "can reassign: " << VirtReg << " from " << printReg(PrevReg, TRI) << " to " << printReg(PhysReg, TRI) << '\n'); return PhysReg; } /// shouldEvict - determine if A should evict the assigned live range B. The /// eviction policy defined by this function together with the allocation order /// defined by enqueue() decides which registers ultimately end up being split /// and spilled. /// /// Cascade numbers are used to prevent infinite loops if this function is a /// cyclic relation. /// /// @param A The live range to be assigned. /// @param IsHint True when A is about to be assigned to its preferred /// register. /// @param B The live range to be evicted. /// @param BreaksHint True when B is already assigned to its preferred register. bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint, LiveInterval &B, bool BreaksHint) const { bool CanSplit = getStage(B) < RS_Spill; // Be fairly aggressive about following hints as long as the evictee can be // split. if (CanSplit && IsHint && !BreaksHint) return true; if (A.weight() > B.weight()) { LLVM_DEBUG(dbgs() << "should evict: " << B << " w= " << B.weight() << '\n'); return true; } return false; } /// canEvictInterference - Return true if all interferences between VirtReg and /// PhysReg can be evicted. /// /// @param VirtReg Live range that is about to be assigned. /// @param PhysReg Desired register for assignment. /// @param IsHint True when PhysReg is VirtReg's preferred register. /// @param MaxCost Only look for cheaper candidates and update with new cost /// when returning true. /// @returns True when interference can be evicted cheaper than MaxCost. bool RAGreedy::canEvictInterference( LiveInterval &VirtReg, MCRegister PhysReg, bool IsHint, EvictionCost &MaxCost, const SmallVirtRegSet &FixedRegisters) const { // It is only possible to evict virtual register interference. if (Matrix->checkInterference(VirtReg, PhysReg) > LiveRegMatrix::IK_VirtReg) return false; bool IsLocal = LIS->intervalIsInOneMBB(VirtReg); // Find VirtReg's cascade number. This will be unassigned if VirtReg was never // involved in an eviction before. If a cascade number was assigned, deny // evicting anything with the same or a newer cascade number. This prevents // infinite eviction loops. // // This works out so a register without a cascade number is allowed to evict // anything, and it can be evicted by anything. unsigned Cascade = ExtraRegInfo[VirtReg.reg()].Cascade; if (!Cascade) Cascade = NextCascade; EvictionCost Cost; for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units); // If there is 10 or more interferences, chances are one is heavier. if (Q.collectInterferingVRegs(10) >= 10) return false; // Check if any interfering live range is heavier than MaxWeight. for (LiveInterval *Intf : reverse(Q.interferingVRegs())) { assert(Register::isVirtualRegister(Intf->reg()) && "Only expecting virtual register interference from query"); // Do not allow eviction of a virtual register if we are in the middle // of last-chance recoloring and this virtual register is one that we // have scavenged a physical register for. if (FixedRegisters.count(Intf->reg())) return false; // Never evict spill products. They cannot split or spill. if (getStage(*Intf) == RS_Done) return false; // Once a live range becomes small enough, it is urgent that we find a // register for it. This is indicated by an infinite spill weight. These // urgent live ranges get to evict almost anything. // // Also allow urgent evictions of unspillable ranges from a strictly // larger allocation order. bool Urgent = !VirtReg.isSpillable() && (Intf->isSpillable() || RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(VirtReg.reg())) < RegClassInfo.getNumAllocatableRegs( MRI->getRegClass(Intf->reg()))); // Only evict older cascades or live ranges without a cascade. unsigned IntfCascade = ExtraRegInfo[Intf->reg()].Cascade; if (Cascade <= IntfCascade) { if (!Urgent) return false; // We permit breaking cascades for urgent evictions. It should be the // last resort, though, so make it really expensive. Cost.BrokenHints += 10; } // Would this break a satisfied hint? bool BreaksHint = VRM->hasPreferredPhys(Intf->reg()); // Update eviction cost. Cost.BrokenHints += BreaksHint; Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight()); // Abort if this would be too expensive. if (!(Cost < MaxCost)) return false; if (Urgent) continue; // Apply the eviction policy for non-urgent evictions. if (!shouldEvict(VirtReg, IsHint, *Intf, BreaksHint)) return false; // If !MaxCost.isMax(), then we're just looking for a cheap register. // Evicting another local live range in this case could lead to suboptimal // coloring. if (!MaxCost.isMax() && IsLocal && LIS->intervalIsInOneMBB(*Intf) && (!EnableLocalReassign || !canReassign(*Intf, PhysReg))) { return false; } } } MaxCost = Cost; return true; } /// Return true if all interferences between VirtReg and PhysReg between /// Start and End can be evicted. /// /// \param VirtReg Live range that is about to be assigned. /// \param PhysReg Desired register for assignment. /// \param Start Start of range to look for interferences. /// \param End End of range to look for interferences. /// \param MaxCost Only look for cheaper candidates and update with new cost /// when returning true. /// \return True when interference can be evicted cheaper than MaxCost. bool RAGreedy::canEvictInterferenceInRange(const LiveInterval &VirtReg, MCRegister PhysReg, SlotIndex Start, SlotIndex End, EvictionCost &MaxCost) const { EvictionCost Cost; for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units); Q.collectInterferingVRegs(); // Check if any interfering live range is heavier than MaxWeight. for (const LiveInterval *Intf : reverse(Q.interferingVRegs())) { // Check if interference overlast the segment in interest. if (!Intf->overlaps(Start, End)) continue; // Cannot evict non virtual reg interference. if (!Register::isVirtualRegister(Intf->reg())) return false; // Never evict spill products. They cannot split or spill. if (getStage(*Intf) == RS_Done) return false; // Would this break a satisfied hint? bool BreaksHint = VRM->hasPreferredPhys(Intf->reg()); // Update eviction cost. Cost.BrokenHints += BreaksHint; Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight()); // Abort if this would be too expensive. if (!(Cost < MaxCost)) return false; } } if (Cost.MaxWeight == 0) return false; MaxCost = Cost; return true; } /// Return the physical register that will be best /// candidate for eviction by a local split interval that will be created /// between Start and End. /// /// \param Order The allocation order /// \param VirtReg Live range that is about to be assigned. /// \param Start Start of range to look for interferences /// \param End End of range to look for interferences /// \param BestEvictweight The eviction cost of that eviction /// \return The PhysReg which is the best candidate for eviction and the /// eviction cost in BestEvictweight MCRegister RAGreedy::getCheapestEvicteeWeight(const AllocationOrder &Order, const LiveInterval &VirtReg, SlotIndex Start, SlotIndex End, float *BestEvictweight) const { EvictionCost BestEvictCost; BestEvictCost.setMax(); BestEvictCost.MaxWeight = VirtReg.weight(); MCRegister BestEvicteePhys; // Go over all physical registers and find the best candidate for eviction for (MCRegister PhysReg : Order.getOrder()) { if (!canEvictInterferenceInRange(VirtReg, PhysReg, Start, End, BestEvictCost)) continue; // Best so far. BestEvicteePhys = PhysReg; } *BestEvictweight = BestEvictCost.MaxWeight; return BestEvicteePhys; } /// evictInterference - Evict any interferring registers that prevent VirtReg /// from being assigned to Physreg. This assumes that canEvictInterference /// returned true. void RAGreedy::evictInterference(LiveInterval &VirtReg, MCRegister PhysReg, SmallVectorImpl<Register> &NewVRegs) { // Make sure that VirtReg has a cascade number, and assign that cascade // number to every evicted register. These live ranges than then only be // evicted by a newer cascade, preventing infinite loops. unsigned Cascade = ExtraRegInfo[VirtReg.reg()].Cascade; if (!Cascade) Cascade = ExtraRegInfo[VirtReg.reg()].Cascade = NextCascade++; LLVM_DEBUG(dbgs() << "evicting " << printReg(PhysReg, TRI) << " interference: Cascade " << Cascade << '\n'); // Collect all interfering virtregs first. SmallVector<LiveInterval*, 8> Intfs; for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units); // We usually have the interfering VRegs cached so collectInterferingVRegs() // should be fast, we may need to recalculate if when different physregs // overlap the same register unit so we had different SubRanges queried // against it. Q.collectInterferingVRegs(); ArrayRef<LiveInterval*> IVR = Q.interferingVRegs(); Intfs.append(IVR.begin(), IVR.end()); } // Evict them second. This will invalidate the queries. for (LiveInterval *Intf : Intfs) { // The same VirtReg may be present in multiple RegUnits. Skip duplicates. if (!VRM->hasPhys(Intf->reg())) continue; LastEvicted.addEviction(PhysReg, VirtReg.reg(), Intf->reg()); Matrix->unassign(*Intf); assert((ExtraRegInfo[Intf->reg()].Cascade < Cascade || VirtReg.isSpillable() < Intf->isSpillable()) && "Cannot decrease cascade number, illegal eviction"); ExtraRegInfo[Intf->reg()].Cascade = Cascade; ++NumEvicted; NewVRegs.push_back(Intf->reg()); } } /// Returns true if the given \p PhysReg is a callee saved register and has not /// been used for allocation yet. bool RAGreedy::isUnusedCalleeSavedReg(MCRegister PhysReg) const { MCRegister CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg); if (!CSR) return false; return !Matrix->isPhysRegUsed(PhysReg); } /// tryEvict - Try to evict all interferences for a physreg. /// @param VirtReg Currently unassigned virtual register. /// @param Order Physregs to try. /// @return Physreg to assign VirtReg, or 0. MCRegister RAGreedy::tryEvict(LiveInterval &VirtReg, AllocationOrder &Order, SmallVectorImpl<Register> &NewVRegs, uint8_t CostPerUseLimit, const SmallVirtRegSet &FixedRegisters) { NamedRegionTimer T("evict", "Evict", TimerGroupName, TimerGroupDescription, TimePassesIsEnabled); // Keep track of the cheapest interference seen so far. EvictionCost BestCost; BestCost.setMax(); MCRegister BestPhys; unsigned OrderLimit = Order.getOrder().size(); // When we are just looking for a reduced cost per use, don't break any // hints, and only evict smaller spill weights. if (CostPerUseLimit < uint8_t(~0u)) { BestCost.BrokenHints = 0; BestCost.MaxWeight = VirtReg.weight(); // Check of any registers in RC are below CostPerUseLimit. const TargetRegisterClass *RC = MRI->getRegClass(VirtReg.reg()); uint8_t MinCost = RegClassInfo.getMinCost(RC); if (MinCost >= CostPerUseLimit) { LLVM_DEBUG(dbgs() << TRI->getRegClassName(RC) << " minimum cost = " << MinCost << ", no cheaper registers to be found.\n"); return 0; } // It is normal for register classes to have a long tail of registers with // the same cost. We don't need to look at them if they're too expensive. if (RegCosts[Order.getOrder().back()] >= CostPerUseLimit) { OrderLimit = RegClassInfo.getLastCostChange(RC); LLVM_DEBUG(dbgs() << "Only trying the first " << OrderLimit << " regs.\n"); } } for (auto I = Order.begin(), E = Order.getOrderLimitEnd(OrderLimit); I != E; ++I) { MCRegister PhysReg = *I; assert(PhysReg); if (RegCosts[PhysReg] >= CostPerUseLimit) continue; // The first use of a callee-saved register in a function has cost 1. // Don't start using a CSR when the CostPerUseLimit is low. if (CostPerUseLimit == 1 && isUnusedCalleeSavedReg(PhysReg)) { LLVM_DEBUG( dbgs() << printReg(PhysReg, TRI) << " would clobber CSR " << printReg(RegClassInfo.getLastCalleeSavedAlias(PhysReg), TRI) << '\n'); continue; } if (!canEvictInterference(VirtReg, PhysReg, false, BestCost, FixedRegisters)) continue; // Best so far. BestPhys = PhysReg; // Stop if the hint can be used. if (I.isHint()) break; } if (BestPhys.isValid()) evictInterference(VirtReg, BestPhys, NewVRegs); return BestPhys; } //===----------------------------------------------------------------------===// // Region Splitting //===----------------------------------------------------------------------===// /// addSplitConstraints - Fill out the SplitConstraints vector based on the /// interference pattern in Physreg and its aliases. Add the constraints to /// SpillPlacement and return the static cost of this split in Cost, assuming /// that all preferences in SplitConstraints are met. /// Return false if there are no bundles with positive bias. bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf, BlockFrequency &Cost) { ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); // Reset interference dependent info. SplitConstraints.resize(UseBlocks.size()); BlockFrequency StaticCost = 0; for (unsigned I = 0; I != UseBlocks.size(); ++I) { const SplitAnalysis::BlockInfo &BI = UseBlocks[I]; SpillPlacement::BlockConstraint &BC = SplitConstraints[I]; BC.Number = BI.MBB->getNumber(); Intf.moveToBlock(BC.Number); BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare; BC.Exit = (BI.LiveOut && !LIS->getInstructionFromIndex(BI.LastInstr)->isImplicitDef()) ? SpillPlacement::PrefReg : SpillPlacement::DontCare; BC.ChangesValue = BI.FirstDef.isValid(); if (!Intf.hasInterference()) continue; // Number of spill code instructions to insert. unsigned Ins = 0; // Interference for the live-in value. if (BI.LiveIn) { if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number)) { BC.Entry = SpillPlacement::MustSpill; ++Ins; } else if (Intf.first() < BI.FirstInstr) { BC.Entry = SpillPlacement::PrefSpill; ++Ins; } else if (Intf.first() < BI.LastInstr) { ++Ins; } // Abort if the spill cannot be inserted at the MBB' start if (((BC.Entry == SpillPlacement::MustSpill) || (BC.Entry == SpillPlacement::PrefSpill)) && SlotIndex::isEarlierInstr(BI.FirstInstr, SA->getFirstSplitPoint(BC.Number))) return false; } // Interference for the live-out value. if (BI.LiveOut) { if (Intf.last() >= SA->getLastSplitPoint(BC.Number)) { BC.Exit = SpillPlacement::MustSpill; ++Ins; } else if (Intf.last() > BI.LastInstr) { BC.Exit = SpillPlacement::PrefSpill; ++Ins; } else if (Intf.last() > BI.FirstInstr) { ++Ins; } } // Accumulate the total frequency of inserted spill code. while (Ins--) StaticCost += SpillPlacer->getBlockFrequency(BC.Number); } Cost = StaticCost; // Add constraints for use-blocks. Note that these are the only constraints // that may add a positive bias, it is downhill from here. SpillPlacer->addConstraints(SplitConstraints); return SpillPlacer->scanActiveBundles(); } /// addThroughConstraints - Add constraints and links to SpillPlacer from the /// live-through blocks in Blocks. bool RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf, ArrayRef<unsigned> Blocks) { const unsigned GroupSize = 8; SpillPlacement::BlockConstraint BCS[GroupSize]; unsigned TBS[GroupSize]; unsigned B = 0, T = 0; for (unsigned Number : Blocks) { Intf.moveToBlock(Number); if (!Intf.hasInterference()) { assert(T < GroupSize && "Array overflow"); TBS[T] = Number; if (++T == GroupSize) { SpillPlacer->addLinks(makeArrayRef(TBS, T)); T = 0; } continue; } assert(B < GroupSize && "Array overflow"); BCS[B].Number = Number; // Abort if the spill cannot be inserted at the MBB' start MachineBasicBlock *MBB = MF->getBlockNumbered(Number); auto FirstNonDebugInstr = MBB->getFirstNonDebugInstr(); if (FirstNonDebugInstr != MBB->end() && SlotIndex::isEarlierInstr(LIS->getInstructionIndex(*FirstNonDebugInstr), SA->getFirstSplitPoint(Number))) return false; // Interference for the live-in value. if (Intf.first() <= Indexes->getMBBStartIdx(Number)) BCS[B].Entry = SpillPlacement::MustSpill; else BCS[B].Entry = SpillPlacement::PrefSpill; // Interference for the live-out value. if (Intf.last() >= SA->getLastSplitPoint(Number)) BCS[B].Exit = SpillPlacement::MustSpill; else BCS[B].Exit = SpillPlacement::PrefSpill; if (++B == GroupSize) { SpillPlacer->addConstraints(makeArrayRef(BCS, B)); B = 0; } } SpillPlacer->addConstraints(makeArrayRef(BCS, B)); SpillPlacer->addLinks(makeArrayRef(TBS, T)); return true; } bool RAGreedy::growRegion(GlobalSplitCandidate &Cand) { // Keep track of through blocks that have not been added to SpillPlacer. BitVector Todo = SA->getThroughBlocks(); SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks; unsigned AddedTo = 0; #ifndef NDEBUG unsigned Visited = 0; #endif while (true) { ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive(); // Find new through blocks in the periphery of PrefRegBundles. for (unsigned Bundle : NewBundles) { // Look at all blocks connected to Bundle in the full graph. ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle); for (unsigned Block : Blocks) { if (!Todo.test(Block)) continue; Todo.reset(Block); // This is a new through block. Add it to SpillPlacer later. ActiveBlocks.push_back(Block); #ifndef NDEBUG ++Visited; #endif } } // Any new blocks to add? if (ActiveBlocks.size() == AddedTo) break; // Compute through constraints from the interference, or assume that all // through blocks prefer spilling when forming compact regions. auto NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo); if (Cand.PhysReg) { if (!addThroughConstraints(Cand.Intf, NewBlocks)) return false; } else // Provide a strong negative bias on through blocks to prevent unwanted // liveness on loop backedges. SpillPlacer->addPrefSpill(NewBlocks, /* Strong= */ true); AddedTo = ActiveBlocks.size(); // Perhaps iterating can enable more bundles? SpillPlacer->iterate(); } LLVM_DEBUG(dbgs() << ", v=" << Visited); return true; } /// calcCompactRegion - Compute the set of edge bundles that should be live /// when splitting the current live range into compact regions. Compact /// regions can be computed without looking at interference. They are the /// regions formed by removing all the live-through blocks from the live range. /// /// Returns false if the current live range is already compact, or if the /// compact regions would form single block regions anyway. bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) { // Without any through blocks, the live range is already compact. if (!SA->getNumThroughBlocks()) return false; // Compact regions don't correspond to any physreg. Cand.reset(IntfCache, MCRegister::NoRegister); LLVM_DEBUG(dbgs() << "Compact region bundles"); // Use the spill placer to determine the live bundles. GrowRegion pretends // that all the through blocks have interference when PhysReg is unset. SpillPlacer->prepare(Cand.LiveBundles); // The static split cost will be zero since Cand.Intf reports no interference. BlockFrequency Cost; if (!addSplitConstraints(Cand.Intf, Cost)) { LLVM_DEBUG(dbgs() << ", none.\n"); return false; } if (!growRegion(Cand)) { LLVM_DEBUG(dbgs() << ", cannot spill all interferences.\n"); return false; } SpillPlacer->finish(); if (!Cand.LiveBundles.any()) { LLVM_DEBUG(dbgs() << ", none.\n"); return false; } LLVM_DEBUG({ for (int I : Cand.LiveBundles.set_bits()) dbgs() << " EB#" << I; dbgs() << ".\n"; }); return true; } /// calcSpillCost - Compute how expensive it would be to split the live range in /// SA around all use blocks instead of forming bundle regions. BlockFrequency RAGreedy::calcSpillCost() { BlockFrequency Cost = 0; ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); for (const SplitAnalysis::BlockInfo &BI : UseBlocks) { unsigned Number = BI.MBB->getNumber(); // We normally only need one spill instruction - a load or a store. Cost += SpillPlacer->getBlockFrequency(Number); // Unless the value is redefined in the block. if (BI.LiveIn && BI.LiveOut && BI.FirstDef) Cost += SpillPlacer->getBlockFrequency(Number); } return Cost; } /// Check if splitting Evictee will create a local split interval in /// basic block number BBNumber that may cause a bad eviction chain. This is /// intended to prevent bad eviction sequences like: /// movl %ebp, 8(%esp) # 4-byte Spill /// movl %ecx, %ebp /// movl %ebx, %ecx /// movl %edi, %ebx /// movl %edx, %edi /// cltd /// idivl %esi /// movl %edi, %edx /// movl %ebx, %edi /// movl %ecx, %ebx /// movl %ebp, %ecx /// movl 16(%esp), %ebp # 4 - byte Reload /// /// Such sequences are created in 2 scenarios: /// /// Scenario #1: /// %0 is evicted from physreg0 by %1. /// Evictee %0 is intended for region splitting with split candidate /// physreg0 (the reg %0 was evicted from). /// Region splitting creates a local interval because of interference with the /// evictor %1 (normally region splitting creates 2 interval, the "by reg" /// and "by stack" intervals and local interval created when interference /// occurs). /// One of the split intervals ends up evicting %2 from physreg1. /// Evictee %2 is intended for region splitting with split candidate /// physreg1. /// One of the split intervals ends up evicting %3 from physreg2, etc. /// /// Scenario #2 /// %0 is evicted from physreg0 by %1. /// %2 is evicted from physreg2 by %3 etc. /// Evictee %0 is intended for region splitting with split candidate /// physreg1. /// Region splitting creates a local interval because of interference with the /// evictor %1. /// One of the split intervals ends up evicting back original evictor %1 /// from physreg0 (the reg %0 was evicted from). /// Another evictee %2 is intended for region splitting with split candidate /// physreg1. /// One of the split intervals ends up evicting %3 from physreg2, etc. /// /// \param Evictee The register considered to be split. /// \param Cand The split candidate that determines the physical register /// we are splitting for and the interferences. /// \param BBNumber The number of a BB for which the region split process will /// create a local split interval. /// \param Order The physical registers that may get evicted by a split /// artifact of Evictee. /// \return True if splitting Evictee may cause a bad eviction chain, false /// otherwise. bool RAGreedy::splitCanCauseEvictionChain(Register Evictee, GlobalSplitCandidate &Cand, unsigned BBNumber, const AllocationOrder &Order) { EvictionTrack::EvictorInfo VregEvictorInfo = LastEvicted.getEvictor(Evictee); unsigned Evictor = VregEvictorInfo.first; MCRegister PhysReg = VregEvictorInfo.second; // No actual evictor. if (!Evictor || !PhysReg) return false; float MaxWeight = 0; MCRegister FutureEvictedPhysReg = getCheapestEvicteeWeight(Order, LIS->getInterval(Evictee), Cand.Intf.first(), Cand.Intf.last(), &MaxWeight); // The bad eviction chain occurs when either the split candidate is the // evicting reg or one of the split artifact will evict the evicting reg. if ((PhysReg != Cand.PhysReg) && (PhysReg != FutureEvictedPhysReg)) return false; Cand.Intf.moveToBlock(BBNumber); // Check to see if the Evictor contains interference (with Evictee) in the // given BB. If so, this interference caused the eviction of Evictee from // PhysReg. This suggest that we will create a local interval during the // region split to avoid this interference This local interval may cause a bad // eviction chain. if (!LIS->hasInterval(Evictor)) return false; LiveInterval &EvictorLI = LIS->getInterval(Evictor); if (EvictorLI.FindSegmentContaining(Cand.Intf.first()) == EvictorLI.end()) return false; // Now, check to see if the local interval we will create is going to be // expensive enough to evict somebody If so, this may cause a bad eviction // chain. float splitArtifactWeight = VRAI->futureWeight(LIS->getInterval(Evictee), Cand.Intf.first().getPrevIndex(), Cand.Intf.last()); if (splitArtifactWeight >= 0 && splitArtifactWeight < MaxWeight) return false; return true; } /// Check if splitting VirtRegToSplit will create a local split interval /// in basic block number BBNumber that may cause a spill. /// /// \param VirtRegToSplit The register considered to be split. /// \param Cand The split candidate that determines the physical /// register we are splitting for and the interferences. /// \param BBNumber The number of a BB for which the region split process /// will create a local split interval. /// \param Order The physical registers that may get evicted by a /// split artifact of VirtRegToSplit. /// \return True if splitting VirtRegToSplit may cause a spill, false /// otherwise. bool RAGreedy::splitCanCauseLocalSpill(unsigned VirtRegToSplit, GlobalSplitCandidate &Cand, unsigned BBNumber, const AllocationOrder &Order) { Cand.Intf.moveToBlock(BBNumber); // Check if the local interval will find a non interfereing assignment. for (auto PhysReg : Order.getOrder()) { if (!Matrix->checkInterference(Cand.Intf.first().getPrevIndex(), Cand.Intf.last(), PhysReg)) return false; } // The local interval is not able to find non interferencing assignment // and not able to evict a less worthy interval, therfore, it can cause a // spill. return true; } /// calcGlobalSplitCost - Return the global split cost of following the split /// pattern in LiveBundles. This cost should be added to the local cost of the /// interference pattern in SplitConstraints. /// BlockFrequency RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand, const AllocationOrder &Order, bool *CanCauseEvictionChain) { BlockFrequency GlobalCost = 0; const BitVector &LiveBundles = Cand.LiveBundles; Register VirtRegToSplit = SA->getParent().reg(); ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); for (unsigned I = 0; I != UseBlocks.size(); ++I) { const SplitAnalysis::BlockInfo &BI = UseBlocks[I]; SpillPlacement::BlockConstraint &BC = SplitConstraints[I]; bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, false)]; bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, true)]; unsigned Ins = 0; Cand.Intf.moveToBlock(BC.Number); // Check wheather a local interval is going to be created during the region // split. Calculate adavanced spilt cost (cost of local intervals) if option // is enabled. if (EnableAdvancedRASplitCost && Cand.Intf.hasInterference() && BI.LiveIn && BI.LiveOut && RegIn && RegOut) { if (CanCauseEvictionChain && splitCanCauseEvictionChain(VirtRegToSplit, Cand, BC.Number, Order)) { // This interference causes our eviction from this assignment, we might // evict somebody else and eventually someone will spill, add that cost. // See splitCanCauseEvictionChain for detailed description of scenarios. GlobalCost += SpillPlacer->getBlockFrequency(BC.Number); GlobalCost += SpillPlacer->getBlockFrequency(BC.Number); *CanCauseEvictionChain = true; } else if (splitCanCauseLocalSpill(VirtRegToSplit, Cand, BC.Number, Order)) { // This interference causes local interval to spill, add that cost. GlobalCost += SpillPlacer->getBlockFrequency(BC.Number); GlobalCost += SpillPlacer->getBlockFrequency(BC.Number); } } if (BI.LiveIn) Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg); if (BI.LiveOut) Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg); while (Ins--) GlobalCost += SpillPlacer->getBlockFrequency(BC.Number); } for (unsigned Number : Cand.ActiveBlocks) { bool RegIn = LiveBundles[Bundles->getBundle(Number, false)]; bool RegOut = LiveBundles[Bundles->getBundle(Number, true)]; if (!RegIn && !RegOut) continue; if (RegIn && RegOut) { // We need double spill code if this block has interference. Cand.Intf.moveToBlock(Number); if (Cand.Intf.hasInterference()) { GlobalCost += SpillPlacer->getBlockFrequency(Number); GlobalCost += SpillPlacer->getBlockFrequency(Number); // Check wheather a local interval is going to be created during the // region split. if (EnableAdvancedRASplitCost && CanCauseEvictionChain && splitCanCauseEvictionChain(VirtRegToSplit, Cand, Number, Order)) { // This interference cause our eviction from this assignment, we might // evict somebody else, add that cost. // See splitCanCauseEvictionChain for detailed description of // scenarios. GlobalCost += SpillPlacer->getBlockFrequency(Number); GlobalCost += SpillPlacer->getBlockFrequency(Number); *CanCauseEvictionChain = true; } } continue; } // live-in / stack-out or stack-in live-out. GlobalCost += SpillPlacer->getBlockFrequency(Number); } return GlobalCost; } /// splitAroundRegion - Split the current live range around the regions /// determined by BundleCand and GlobalCand. /// /// Before calling this function, GlobalCand and BundleCand must be initialized /// so each bundle is assigned to a valid candidate, or NoCand for the /// stack-bound bundles. The shared SA/SE SplitAnalysis and SplitEditor /// objects must be initialized for the current live range, and intervals /// created for the used candidates. /// /// @param LREdit The LiveRangeEdit object handling the current split. /// @param UsedCands List of used GlobalCand entries. Every BundleCand value /// must appear in this list. void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit, ArrayRef<unsigned> UsedCands) { // These are the intervals created for new global ranges. We may create more // intervals for local ranges. const unsigned NumGlobalIntvs = LREdit.size(); LLVM_DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs << " globals.\n"); assert(NumGlobalIntvs && "No global intervals configured"); // Isolate even single instructions when dealing with a proper sub-class. // That guarantees register class inflation for the stack interval because it // is all copies. Register Reg = SA->getParent().reg(); bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); // First handle all the blocks with uses. ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); for (const SplitAnalysis::BlockInfo &BI : UseBlocks) { unsigned Number = BI.MBB->getNumber(); unsigned IntvIn = 0, IntvOut = 0; SlotIndex IntfIn, IntfOut; if (BI.LiveIn) { unsigned CandIn = BundleCand[Bundles->getBundle(Number, false)]; if (CandIn != NoCand) { GlobalSplitCandidate &Cand = GlobalCand[CandIn]; IntvIn = Cand.IntvIdx; Cand.Intf.moveToBlock(Number); IntfIn = Cand.Intf.first(); } } if (BI.LiveOut) { unsigned CandOut = BundleCand[Bundles->getBundle(Number, true)]; if (CandOut != NoCand) { GlobalSplitCandidate &Cand = GlobalCand[CandOut]; IntvOut = Cand.IntvIdx; Cand.Intf.moveToBlock(Number); IntfOut = Cand.Intf.last(); } } // Create separate intervals for isolated blocks with multiple uses. if (!IntvIn && !IntvOut) { LLVM_DEBUG(dbgs() << printMBBReference(*BI.MBB) << " isolated.\n"); if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) SE->splitSingleBlock(BI); continue; } if (IntvIn && IntvOut) SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); else if (IntvIn) SE->splitRegInBlock(BI, IntvIn, IntfIn); else SE->splitRegOutBlock(BI, IntvOut, IntfOut); } // Handle live-through blocks. The relevant live-through blocks are stored in // the ActiveBlocks list with each candidate. We need to filter out // duplicates. BitVector Todo = SA->getThroughBlocks(); for (unsigned c = 0; c != UsedCands.size(); ++c) { ArrayRef<unsigned> Blocks = GlobalCand[UsedCands[c]].ActiveBlocks; for (unsigned Number : Blocks) { if (!Todo.test(Number)) continue; Todo.reset(Number); unsigned IntvIn = 0, IntvOut = 0; SlotIndex IntfIn, IntfOut; unsigned CandIn = BundleCand[Bundles->getBundle(Number, false)]; if (CandIn != NoCand) { GlobalSplitCandidate &Cand = GlobalCand[CandIn]; IntvIn = Cand.IntvIdx; Cand.Intf.moveToBlock(Number); IntfIn = Cand.Intf.first(); } unsigned CandOut = BundleCand[Bundles->getBundle(Number, true)]; if (CandOut != NoCand) { GlobalSplitCandidate &Cand = GlobalCand[CandOut]; IntvOut = Cand.IntvIdx; Cand.Intf.moveToBlock(Number); IntfOut = Cand.Intf.last(); } if (!IntvIn && !IntvOut) continue; SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); } } ++NumGlobalSplits; SmallVector<unsigned, 8> IntvMap; SE->finish(&IntvMap); DebugVars->splitRegister(Reg, LREdit.regs(), *LIS); ExtraRegInfo.resize(MRI->getNumVirtRegs()); unsigned OrigBlocks = SA->getNumLiveBlocks(); // Sort out the new intervals created by splitting. We get four kinds: // - Remainder intervals should not be split again. // - Candidate intervals can be assigned to Cand.PhysReg. // - Block-local splits are candidates for local splitting. // - DCE leftovers should go back on the queue. for (unsigned I = 0, E = LREdit.size(); I != E; ++I) { LiveInterval &Reg = LIS->getInterval(LREdit.get(I)); // Ignore old intervals from DCE. if (getStage(Reg) != RS_New) continue; // Remainder interval. Don't try splitting again, spill if it doesn't // allocate. if (IntvMap[I] == 0) { setStage(Reg, RS_Spill); continue; } // Global intervals. Allow repeated splitting as long as the number of live // blocks is strictly decreasing. if (IntvMap[I] < NumGlobalIntvs) { if (SA->countLiveBlocks(&Reg) >= OrigBlocks) { LLVM_DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks << " blocks as original.\n"); // Don't allow repeated splitting as a safe guard against looping. setStage(Reg, RS_Split2); } continue; } // Other intervals are treated as new. This includes local intervals created // for blocks with multiple uses, and anything created by DCE. } if (VerifyEnabled) MF->verify(this, "After splitting live range around region"); } MCRegister RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order, SmallVectorImpl<Register> &NewVRegs) { if (!TRI->shouldRegionSplitForVirtReg(*MF, VirtReg)) return MCRegister::NoRegister; unsigned NumCands = 0; BlockFrequency SpillCost = calcSpillCost(); BlockFrequency BestCost; // Check if we can split this live range around a compact region. bool HasCompact = calcCompactRegion(GlobalCand.front()); if (HasCompact) { // Yes, keep GlobalCand[0] as the compact region candidate. NumCands = 1; BestCost = BlockFrequency::getMaxFrequency(); } else { // No benefit from the compact region, our fallback will be per-block // splitting. Make sure we find a solution that is cheaper than spilling. BestCost = SpillCost; LLVM_DEBUG(dbgs() << "Cost of isolating all blocks = "; MBFI->printBlockFreq(dbgs(), BestCost) << '\n'); } bool CanCauseEvictionChain = false; unsigned BestCand = calculateRegionSplitCost(VirtReg, Order, BestCost, NumCands, false /*IgnoreCSR*/, &CanCauseEvictionChain); // Split candidates with compact regions can cause a bad eviction sequence. // See splitCanCauseEvictionChain for detailed description of scenarios. // To avoid it, we need to comapre the cost with the spill cost and not the // current max frequency. if (HasCompact && (BestCost > SpillCost) && (BestCand != NoCand) && CanCauseEvictionChain) { return MCRegister::NoRegister; } // No solutions found, fall back to single block splitting. if (!HasCompact && BestCand == NoCand) return MCRegister::NoRegister; return doRegionSplit(VirtReg, BestCand, HasCompact, NewVRegs); } unsigned RAGreedy::calculateRegionSplitCost(LiveInterval &VirtReg, AllocationOrder &Order, BlockFrequency &BestCost, unsigned &NumCands, bool IgnoreCSR, bool *CanCauseEvictionChain) { unsigned BestCand = NoCand; for (MCPhysReg PhysReg : Order) { assert(PhysReg); if (IgnoreCSR && isUnusedCalleeSavedReg(PhysReg)) continue; // Discard bad candidates before we run out of interference cache cursors. // This will only affect register classes with a lot of registers (>32). if (NumCands == IntfCache.getMaxCursors()) { unsigned WorstCount = ~0u; unsigned Worst = 0; for (unsigned CandIndex = 0; CandIndex != NumCands; ++CandIndex) { if (CandIndex == BestCand || !GlobalCand[CandIndex].PhysReg) continue; unsigned Count = GlobalCand[CandIndex].LiveBundles.count(); if (Count < WorstCount) { Worst = CandIndex; WorstCount = Count; } } --NumCands; GlobalCand[Worst] = GlobalCand[NumCands]; if (BestCand == NumCands) BestCand = Worst; } if (GlobalCand.size() <= NumCands) GlobalCand.resize(NumCands+1); GlobalSplitCandidate &Cand = GlobalCand[NumCands]; Cand.reset(IntfCache, PhysReg); SpillPlacer->prepare(Cand.LiveBundles); BlockFrequency Cost; if (!addSplitConstraints(Cand.Intf, Cost)) { LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << "\tno positive bundles\n"); continue; } LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << "\tstatic = "; MBFI->printBlockFreq(dbgs(), Cost)); if (Cost >= BestCost) { LLVM_DEBUG({ if (BestCand == NoCand) dbgs() << " worse than no bundles\n"; else dbgs() << " worse than " << printReg(GlobalCand[BestCand].PhysReg, TRI) << '\n'; }); continue; } if (!growRegion(Cand)) { LLVM_DEBUG(dbgs() << ", cannot spill all interferences.\n"); continue; } SpillPlacer->finish(); // No live bundles, defer to splitSingleBlocks(). if (!Cand.LiveBundles.any()) { LLVM_DEBUG(dbgs() << " no bundles.\n"); continue; } bool HasEvictionChain = false; Cost += calcGlobalSplitCost(Cand, Order, &HasEvictionChain); LLVM_DEBUG({ dbgs() << ", total = "; MBFI->printBlockFreq(dbgs(), Cost) << " with bundles"; for (int I : Cand.LiveBundles.set_bits()) dbgs() << " EB#" << I; dbgs() << ".\n"; }); if (Cost < BestCost) { BestCand = NumCands; BestCost = Cost; // See splitCanCauseEvictionChain for detailed description of bad // eviction chain scenarios. if (CanCauseEvictionChain) *CanCauseEvictionChain = HasEvictionChain; } ++NumCands; } if (CanCauseEvictionChain && BestCand != NoCand) { // See splitCanCauseEvictionChain for detailed description of bad // eviction chain scenarios. LLVM_DEBUG(dbgs() << "Best split candidate of vreg " << printReg(VirtReg.reg(), TRI) << " may "); if (!(*CanCauseEvictionChain)) LLVM_DEBUG(dbgs() << "not "); LLVM_DEBUG(dbgs() << "cause bad eviction chain\n"); } return BestCand; } unsigned RAGreedy::doRegionSplit(LiveInterval &VirtReg, unsigned BestCand, bool HasCompact, SmallVectorImpl<Register> &NewVRegs) { SmallVector<unsigned, 8> UsedCands; // Prepare split editor. LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats); SE->reset(LREdit, SplitSpillMode); // Assign all edge bundles to the preferred candidate, or NoCand. BundleCand.assign(Bundles->getNumBundles(), NoCand); // Assign bundles for the best candidate region. if (BestCand != NoCand) { GlobalSplitCandidate &Cand = GlobalCand[BestCand]; if (unsigned B = Cand.getBundles(BundleCand, BestCand)) { UsedCands.push_back(BestCand); Cand.IntvIdx = SE->openIntv(); LLVM_DEBUG(dbgs() << "Split for " << printReg(Cand.PhysReg, TRI) << " in " << B << " bundles, intv " << Cand.IntvIdx << ".\n"); (void)B; } } // Assign bundles for the compact region. if (HasCompact) { GlobalSplitCandidate &Cand = GlobalCand.front(); assert(!Cand.PhysReg && "Compact region has no physreg"); if (unsigned B = Cand.getBundles(BundleCand, 0)) { UsedCands.push_back(0); Cand.IntvIdx = SE->openIntv(); LLVM_DEBUG(dbgs() << "Split for compact region in " << B << " bundles, intv " << Cand.IntvIdx << ".\n"); (void)B; } } splitAroundRegion(LREdit, UsedCands); return 0; } //===----------------------------------------------------------------------===// // Per-Block Splitting //===----------------------------------------------------------------------===// /// tryBlockSplit - Split a global live range around every block with uses. This /// creates a lot of local live ranges, that will be split by tryLocalSplit if /// they don't allocate. unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order, SmallVectorImpl<Register> &NewVRegs) { assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed"); Register Reg = VirtReg.reg(); bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats); SE->reset(LREdit, SplitSpillMode); ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); for (const SplitAnalysis::BlockInfo &BI : UseBlocks) { if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) SE->splitSingleBlock(BI); } // No blocks were split. if (LREdit.empty()) return 0; // We did split for some blocks. SmallVector<unsigned, 8> IntvMap; SE->finish(&IntvMap); // Tell LiveDebugVariables about the new ranges. DebugVars->splitRegister(Reg, LREdit.regs(), *LIS); ExtraRegInfo.resize(MRI->getNumVirtRegs()); // Sort out the new intervals created by splitting. The remainder interval // goes straight to spilling, the new local ranges get to stay RS_New. for (unsigned I = 0, E = LREdit.size(); I != E; ++I) { LiveInterval &LI = LIS->getInterval(LREdit.get(I)); if (getStage(LI) == RS_New && IntvMap[I] == 0) setStage(LI, RS_Spill); } if (VerifyEnabled) MF->verify(this, "After splitting live range around basic blocks"); return 0; } //===----------------------------------------------------------------------===// // Per-Instruction Splitting //===----------------------------------------------------------------------===// /// Get the number of allocatable registers that match the constraints of \p Reg /// on \p MI and that are also in \p SuperRC. static unsigned getNumAllocatableRegsForConstraints( const MachineInstr *MI, Register Reg, const TargetRegisterClass *SuperRC, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, const RegisterClassInfo &RCI) { assert(SuperRC && "Invalid register class"); const TargetRegisterClass *ConstrainedRC = MI->getRegClassConstraintEffectForVReg(Reg, SuperRC, TII, TRI, /* ExploreBundle */ true); if (!ConstrainedRC) return 0; return RCI.getNumAllocatableRegs(ConstrainedRC); } /// tryInstructionSplit - Split a live range around individual instructions. /// This is normally not worthwhile since the spiller is doing essentially the /// same thing. However, when the live range is in a constrained register /// class, it may help to insert copies such that parts of the live range can /// be moved to a larger register class. /// /// This is similar to spilling to a larger register class. unsigned RAGreedy::tryInstructionSplit(LiveInterval &VirtReg, AllocationOrder &Order, SmallVectorImpl<Register> &NewVRegs) { const TargetRegisterClass *CurRC = MRI->getRegClass(VirtReg.reg()); // There is no point to this if there are no larger sub-classes. if (!RegClassInfo.isProperSubClass(CurRC)) return 0; // Always enable split spill mode, since we're effectively spilling to a // register. LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats); SE->reset(LREdit, SplitEditor::SM_Size); ArrayRef<SlotIndex> Uses = SA->getUseSlots(); if (Uses.size() <= 1) return 0; LLVM_DEBUG(dbgs() << "Split around " << Uses.size() << " individual instrs.\n"); const TargetRegisterClass *SuperRC = TRI->getLargestLegalSuperClass(CurRC, *MF); unsigned SuperRCNumAllocatableRegs = RCI.getNumAllocatableRegs(SuperRC); // Split around every non-copy instruction if this split will relax // the constraints on the virtual register. // Otherwise, splitting just inserts uncoalescable copies that do not help // the allocation. for (const auto &Use : Uses) { if (const MachineInstr *MI = Indexes->getInstructionFromIndex(Use)) if (MI->isFullCopy() || SuperRCNumAllocatableRegs == getNumAllocatableRegsForConstraints(MI, VirtReg.reg(), SuperRC, TII, TRI, RCI)) { LLVM_DEBUG(dbgs() << " skip:\t" << Use << '\t' << *MI); continue; } SE->openIntv(); SlotIndex SegStart = SE->enterIntvBefore(Use); SlotIndex SegStop = SE->leaveIntvAfter(Use); SE->useIntv(SegStart, SegStop); } if (LREdit.empty()) { LLVM_DEBUG(dbgs() << "All uses were copies.\n"); return 0; } SmallVector<unsigned, 8> IntvMap; SE->finish(&IntvMap); DebugVars->splitRegister(VirtReg.reg(), LREdit.regs(), *LIS); ExtraRegInfo.resize(MRI->getNumVirtRegs()); // Assign all new registers to RS_Spill. This was the last chance. setStage(LREdit.begin(), LREdit.end(), RS_Spill); return 0; } //===----------------------------------------------------------------------===// // Local Splitting //===----------------------------------------------------------------------===// /// calcGapWeights - Compute the maximum spill weight that needs to be evicted /// in order to use PhysReg between two entries in SA->UseSlots. /// /// GapWeight[I] represents the gap between UseSlots[I] and UseSlots[I + 1]. /// void RAGreedy::calcGapWeights(MCRegister PhysReg, SmallVectorImpl<float> &GapWeight) { assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); ArrayRef<SlotIndex> Uses = SA->getUseSlots(); const unsigned NumGaps = Uses.size()-1; // Start and end points for the interference check. SlotIndex StartIdx = BI.LiveIn ? BI.FirstInstr.getBaseIndex() : BI.FirstInstr; SlotIndex StopIdx = BI.LiveOut ? BI.LastInstr.getBoundaryIndex() : BI.LastInstr; GapWeight.assign(NumGaps, 0.0f); // Add interference from each overlapping register. for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { if (!Matrix->query(const_cast<LiveInterval&>(SA->getParent()), *Units) .checkInterference()) continue; // We know that VirtReg is a continuous interval from FirstInstr to // LastInstr, so we don't need InterferenceQuery. // // Interference that overlaps an instruction is counted in both gaps // surrounding the instruction. The exception is interference before // StartIdx and after StopIdx. // LiveIntervalUnion::SegmentIter IntI = Matrix->getLiveUnions()[*Units] .find(StartIdx); for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) { // Skip the gaps before IntI. while (Uses[Gap+1].getBoundaryIndex() < IntI.start()) if (++Gap == NumGaps) break; if (Gap == NumGaps) break; // Update the gaps covered by IntI. const float weight = IntI.value()->weight(); for (; Gap != NumGaps; ++Gap) { GapWeight[Gap] = std::max(GapWeight[Gap], weight); if (Uses[Gap+1].getBaseIndex() >= IntI.stop()) break; } if (Gap == NumGaps) break; } } // Add fixed interference. for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { const LiveRange &LR = LIS->getRegUnit(*Units); LiveRange::const_iterator I = LR.find(StartIdx); LiveRange::const_iterator E = LR.end(); // Same loop as above. Mark any overlapped gaps as HUGE_VALF. for (unsigned Gap = 0; I != E && I->start < StopIdx; ++I) { while (Uses[Gap+1].getBoundaryIndex() < I->start) if (++Gap == NumGaps) break; if (Gap == NumGaps) break; for (; Gap != NumGaps; ++Gap) { GapWeight[Gap] = huge_valf; if (Uses[Gap+1].getBaseIndex() >= I->end) break; } if (Gap == NumGaps) break; } } } /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only /// basic block. /// unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order, SmallVectorImpl<Register> &NewVRegs) { // TODO: the function currently only handles a single UseBlock; it should be // possible to generalize. if (SA->getUseBlocks().size() != 1) return 0; const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); // Note that it is possible to have an interval that is live-in or live-out // while only covering a single block - A phi-def can use undef values from // predecessors, and the block could be a single-block loop. // We don't bother doing anything clever about such a case, we simply assume // that the interval is continuous from FirstInstr to LastInstr. We should // make sure that we don't do anything illegal to such an interval, though. ArrayRef<SlotIndex> Uses = SA->getUseSlots(); if (Uses.size() <= 2) return 0; const unsigned NumGaps = Uses.size()-1; LLVM_DEBUG({ dbgs() << "tryLocalSplit: "; for (const auto &Use : Uses) dbgs() << ' ' << Use; dbgs() << '\n'; }); // If VirtReg is live across any register mask operands, compute a list of // gaps with register masks. SmallVector<unsigned, 8> RegMaskGaps; if (Matrix->checkRegMaskInterference(VirtReg)) { // Get regmask slots for the whole block. ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber()); LLVM_DEBUG(dbgs() << RMS.size() << " regmasks in block:"); // Constrain to VirtReg's live range. unsigned RI = llvm::lower_bound(RMS, Uses.front().getRegSlot()) - RMS.begin(); unsigned RE = RMS.size(); for (unsigned I = 0; I != NumGaps && RI != RE; ++I) { // Look for Uses[I] <= RMS <= Uses[I + 1]. assert(!SlotIndex::isEarlierInstr(RMS[RI], Uses[I])); if (SlotIndex::isEarlierInstr(Uses[I + 1], RMS[RI])) continue; // Skip a regmask on the same instruction as the last use. It doesn't // overlap the live range. if (SlotIndex::isSameInstr(Uses[I + 1], RMS[RI]) && I + 1 == NumGaps) break; LLVM_DEBUG(dbgs() << ' ' << RMS[RI] << ':' << Uses[I] << '-' << Uses[I + 1]); RegMaskGaps.push_back(I); // Advance ri to the next gap. A regmask on one of the uses counts in // both gaps. while (RI != RE && SlotIndex::isEarlierInstr(RMS[RI], Uses[I + 1])) ++RI; } LLVM_DEBUG(dbgs() << '\n'); } // Since we allow local split results to be split again, there is a risk of // creating infinite loops. It is tempting to require that the new live // ranges have less instructions than the original. That would guarantee // convergence, but it is too strict. A live range with 3 instructions can be // split 2+3 (including the COPY), and we want to allow that. // // Instead we use these rules: // // 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the // noop split, of course). // 2. Require progress be made for ranges with getStage() == RS_Split2. All // the new ranges must have fewer instructions than before the split. // 3. New ranges with the same number of instructions are marked RS_Split2, // smaller ranges are marked RS_New. // // These rules allow a 3 -> 2+3 split once, which we need. They also prevent // excessive splitting and infinite loops. // bool ProgressRequired = getStage(VirtReg) >= RS_Split2; // Best split candidate. unsigned BestBefore = NumGaps; unsigned BestAfter = 0; float BestDiff = 0; const float blockFreq = SpillPlacer->getBlockFrequency(BI.MBB->getNumber()).getFrequency() * (1.0f / MBFI->getEntryFreq()); SmallVector<float, 8> GapWeight; for (MCPhysReg PhysReg : Order) { assert(PhysReg); // Keep track of the largest spill weight that would need to be evicted in // order to make use of PhysReg between UseSlots[I] and UseSlots[I + 1]. calcGapWeights(PhysReg, GapWeight); // Remove any gaps with regmask clobbers. if (Matrix->checkRegMaskInterference(VirtReg, PhysReg)) for (unsigned I = 0, E = RegMaskGaps.size(); I != E; ++I) GapWeight[RegMaskGaps[I]] = huge_valf; // Try to find the best sequence of gaps to close. // The new spill weight must be larger than any gap interference. // We will split before Uses[SplitBefore] and after Uses[SplitAfter]. unsigned SplitBefore = 0, SplitAfter = 1; // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]). // It is the spill weight that needs to be evicted. float MaxGap = GapWeight[0]; while (true) { // Live before/after split? const bool LiveBefore = SplitBefore != 0 || BI.LiveIn; const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut; LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << ' ' << Uses[SplitBefore] << '-' << Uses[SplitAfter] << " I=" << MaxGap); // Stop before the interval gets so big we wouldn't be making progress. if (!LiveBefore && !LiveAfter) { LLVM_DEBUG(dbgs() << " all\n"); break; } // Should the interval be extended or shrunk? bool Shrink = true; // How many gaps would the new range have? unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter; // Legally, without causing looping? bool Legal = !ProgressRequired || NewGaps < NumGaps; if (Legal && MaxGap < huge_valf) { // Estimate the new spill weight. Each instruction reads or writes the // register. Conservatively assume there are no read-modify-write // instructions. // // Try to guess the size of the new interval. const float EstWeight = normalizeSpillWeight( blockFreq * (NewGaps + 1), Uses[SplitBefore].distance(Uses[SplitAfter]) + (LiveBefore + LiveAfter) * SlotIndex::InstrDist, 1); // Would this split be possible to allocate? // Never allocate all gaps, we wouldn't be making progress. LLVM_DEBUG(dbgs() << " w=" << EstWeight); if (EstWeight * Hysteresis >= MaxGap) { Shrink = false; float Diff = EstWeight - MaxGap; if (Diff > BestDiff) { LLVM_DEBUG(dbgs() << " (best)"); BestDiff = Hysteresis * Diff; BestBefore = SplitBefore; BestAfter = SplitAfter; } } } // Try to shrink. if (Shrink) { if (++SplitBefore < SplitAfter) { LLVM_DEBUG(dbgs() << " shrink\n"); // Recompute the max when necessary. if (GapWeight[SplitBefore - 1] >= MaxGap) { MaxGap = GapWeight[SplitBefore]; for (unsigned I = SplitBefore + 1; I != SplitAfter; ++I) MaxGap = std::max(MaxGap, GapWeight[I]); } continue; } MaxGap = 0; } // Try to extend the interval. if (SplitAfter >= NumGaps) { LLVM_DEBUG(dbgs() << " end\n"); break; } LLVM_DEBUG(dbgs() << " extend\n"); MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]); } } // Didn't find any candidates? if (BestBefore == NumGaps) return 0; LLVM_DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore] << '-' << Uses[BestAfter] << ", " << BestDiff << ", " << (BestAfter - BestBefore + 1) << " instrs\n"); LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats); SE->reset(LREdit); SE->openIntv(); SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]); SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]); SE->useIntv(SegStart, SegStop); SmallVector<unsigned, 8> IntvMap; SE->finish(&IntvMap); DebugVars->splitRegister(VirtReg.reg(), LREdit.regs(), *LIS); // If the new range has the same number of instructions as before, mark it as // RS_Split2 so the next split will be forced to make progress. Otherwise, // leave the new intervals as RS_New so they can compete. bool LiveBefore = BestBefore != 0 || BI.LiveIn; bool LiveAfter = BestAfter != NumGaps || BI.LiveOut; unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter; if (NewGaps >= NumGaps) { LLVM_DEBUG(dbgs() << "Tagging non-progress ranges: "); assert(!ProgressRequired && "Didn't make progress when it was required."); for (unsigned I = 0, E = IntvMap.size(); I != E; ++I) if (IntvMap[I] == 1) { setStage(LIS->getInterval(LREdit.get(I)), RS_Split2); LLVM_DEBUG(dbgs() << printReg(LREdit.get(I))); } LLVM_DEBUG(dbgs() << '\n'); } ++NumLocalSplits; return 0; } //===----------------------------------------------------------------------===// // Live Range Splitting //===----------------------------------------------------------------------===// /// trySplit - Try to split VirtReg or one of its interferences, making it /// assignable. /// @return Physreg when VirtReg may be assigned and/or new NewVRegs. unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order, SmallVectorImpl<Register> &NewVRegs, const SmallVirtRegSet &FixedRegisters) { // Ranges must be Split2 or less. if (getStage(VirtReg) >= RS_Spill) return 0; // Local intervals are handled separately. if (LIS->intervalIsInOneMBB(VirtReg)) { NamedRegionTimer T("local_split", "Local Splitting", TimerGroupName, TimerGroupDescription, TimePassesIsEnabled); SA->analyze(&VirtReg); Register PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs); if (PhysReg || !NewVRegs.empty()) return PhysReg; return tryInstructionSplit(VirtReg, Order, NewVRegs); } NamedRegionTimer T("global_split", "Global Splitting", TimerGroupName, TimerGroupDescription, TimePassesIsEnabled); SA->analyze(&VirtReg); // FIXME: SplitAnalysis may repair broken live ranges coming from the // coalescer. That may cause the range to become allocatable which means that // tryRegionSplit won't be making progress. This check should be replaced with // an assertion when the coalescer is fixed. if (SA->didRepairRange()) { // VirtReg has changed, so all cached queries are invalid. Matrix->invalidateVirtRegs(); if (Register PhysReg = tryAssign(VirtReg, Order, NewVRegs, FixedRegisters)) return PhysReg; } // First try to split around a region spanning multiple blocks. RS_Split2 // ranges already made dubious progress with region splitting, so they go // straight to single block splitting. if (getStage(VirtReg) < RS_Split2) { MCRegister PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs); if (PhysReg || !NewVRegs.empty()) return PhysReg; } // Then isolate blocks. return tryBlockSplit(VirtReg, Order, NewVRegs); } //===----------------------------------------------------------------------===// // Last Chance Recoloring //===----------------------------------------------------------------------===// /// Return true if \p reg has any tied def operand. static bool hasTiedDef(MachineRegisterInfo *MRI, unsigned reg) { for (const MachineOperand &MO : MRI->def_operands(reg)) if (MO.isTied()) return true; return false; } /// mayRecolorAllInterferences - Check if the virtual registers that /// interfere with \p VirtReg on \p PhysReg (or one of its aliases) may be /// recolored to free \p PhysReg. /// When true is returned, \p RecoloringCandidates has been augmented with all /// the live intervals that need to be recolored in order to free \p PhysReg /// for \p VirtReg. /// \p FixedRegisters contains all the virtual registers that cannot be /// recolored. bool RAGreedy::mayRecolorAllInterferences( MCRegister PhysReg, LiveInterval &VirtReg, SmallLISet &RecoloringCandidates, const SmallVirtRegSet &FixedRegisters) { const TargetRegisterClass *CurRC = MRI->getRegClass(VirtReg.reg()); for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units); // If there is LastChanceRecoloringMaxInterference or more interferences, // chances are one would not be recolorable. if (Q.collectInterferingVRegs(LastChanceRecoloringMaxInterference) >= LastChanceRecoloringMaxInterference && !ExhaustiveSearch) { LLVM_DEBUG(dbgs() << "Early abort: too many interferences.\n"); CutOffInfo |= CO_Interf; return false; } for (LiveInterval *Intf : reverse(Q.interferingVRegs())) { // If Intf is done and sit on the same register class as VirtReg, // it would not be recolorable as it is in the same state as VirtReg. // However, if VirtReg has tied defs and Intf doesn't, then // there is still a point in examining if it can be recolorable. if (((getStage(*Intf) == RS_Done && MRI->getRegClass(Intf->reg()) == CurRC) && !(hasTiedDef(MRI, VirtReg.reg()) && !hasTiedDef(MRI, Intf->reg()))) || FixedRegisters.count(Intf->reg())) { LLVM_DEBUG( dbgs() << "Early abort: the interference is not recolorable.\n"); return false; } RecoloringCandidates.insert(Intf); } } return true; } /// tryLastChanceRecoloring - Try to assign a color to \p VirtReg by recoloring /// its interferences. /// Last chance recoloring chooses a color for \p VirtReg and recolors every /// virtual register that was using it. The recoloring process may recursively /// use the last chance recoloring. Therefore, when a virtual register has been /// assigned a color by this mechanism, it is marked as Fixed, i.e., it cannot /// be last-chance-recolored again during this recoloring "session". /// E.g., /// Let /// vA can use {R1, R2 } /// vB can use { R2, R3} /// vC can use {R1 } /// Where vA, vB, and vC cannot be split anymore (they are reloads for /// instance) and they all interfere. /// /// vA is assigned R1 /// vB is assigned R2 /// vC tries to evict vA but vA is already done. /// Regular register allocation fails. /// /// Last chance recoloring kicks in: /// vC does as if vA was evicted => vC uses R1. /// vC is marked as fixed. /// vA needs to find a color. /// None are available. /// vA cannot evict vC: vC is a fixed virtual register now. /// vA does as if vB was evicted => vA uses R2. /// vB needs to find a color. /// R3 is available. /// Recoloring => vC = R1, vA = R2, vB = R3 /// /// \p Order defines the preferred allocation order for \p VirtReg. /// \p NewRegs will contain any new virtual register that have been created /// (split, spill) during the process and that must be assigned. /// \p FixedRegisters contains all the virtual registers that cannot be /// recolored. /// \p Depth gives the current depth of the last chance recoloring. /// \return a physical register that can be used for VirtReg or ~0u if none /// exists. unsigned RAGreedy::tryLastChanceRecoloring(LiveInterval &VirtReg, AllocationOrder &Order, SmallVectorImpl<Register> &NewVRegs, SmallVirtRegSet &FixedRegisters, unsigned Depth) { if (!TRI->shouldUseLastChanceRecoloringForVirtReg(*MF, VirtReg)) return ~0u; LLVM_DEBUG(dbgs() << "Try last chance recoloring for " << VirtReg << '\n'); // Ranges must be Done. assert((getStage(VirtReg) >= RS_Done || !VirtReg.isSpillable()) && "Last chance recoloring should really be last chance"); // Set the max depth to LastChanceRecoloringMaxDepth. // We may want to reconsider that if we end up with a too large search space // for target with hundreds of registers. // Indeed, in that case we may want to cut the search space earlier. if (Depth >= LastChanceRecoloringMaxDepth && !ExhaustiveSearch) { LLVM_DEBUG(dbgs() << "Abort because max depth has been reached.\n"); CutOffInfo |= CO_Depth; return ~0u; } // Set of Live intervals that will need to be recolored. SmallLISet RecoloringCandidates; // Record the original mapping virtual register to physical register in case // the recoloring fails. DenseMap<Register, MCRegister> VirtRegToPhysReg; // Mark VirtReg as fixed, i.e., it will not be recolored pass this point in // this recoloring "session". assert(!FixedRegisters.count(VirtReg.reg())); FixedRegisters.insert(VirtReg.reg()); SmallVector<Register, 4> CurrentNewVRegs; for (MCRegister PhysReg : Order) { assert(PhysReg.isValid()); LLVM_DEBUG(dbgs() << "Try to assign: " << VirtReg << " to " << printReg(PhysReg, TRI) << '\n'); RecoloringCandidates.clear(); VirtRegToPhysReg.clear(); CurrentNewVRegs.clear(); // It is only possible to recolor virtual register interference. if (Matrix->checkInterference(VirtReg, PhysReg) > LiveRegMatrix::IK_VirtReg) { LLVM_DEBUG( dbgs() << "Some interferences are not with virtual registers.\n"); continue; } // Early give up on this PhysReg if it is obvious we cannot recolor all // the interferences. if (!mayRecolorAllInterferences(PhysReg, VirtReg, RecoloringCandidates, FixedRegisters)) { LLVM_DEBUG(dbgs() << "Some interferences cannot be recolored.\n"); continue; } // RecoloringCandidates contains all the virtual registers that interfer // with VirtReg on PhysReg (or one of its aliases). // Enqueue them for recoloring and perform the actual recoloring. PQueue RecoloringQueue; for (LiveInterval *RC : RecoloringCandidates) { Register ItVirtReg = RC->reg(); enqueue(RecoloringQueue, RC); assert(VRM->hasPhys(ItVirtReg) && "Interferences are supposed to be with allocated variables"); // Record the current allocation. VirtRegToPhysReg[ItVirtReg] = VRM->getPhys(ItVirtReg); // unset the related struct. Matrix->unassign(*RC); } // Do as if VirtReg was assigned to PhysReg so that the underlying // recoloring has the right information about the interferes and // available colors. Matrix->assign(VirtReg, PhysReg); // Save the current recoloring state. // If we cannot recolor all the interferences, we will have to start again // at this point for the next physical register. SmallVirtRegSet SaveFixedRegisters(FixedRegisters); if (tryRecoloringCandidates(RecoloringQueue, CurrentNewVRegs, FixedRegisters, Depth)) { // Push the queued vregs into the main queue. for (Register NewVReg : CurrentNewVRegs) NewVRegs.push_back(NewVReg); // Do not mess up with the global assignment process. // I.e., VirtReg must be unassigned. Matrix->unassign(VirtReg); return PhysReg; } LLVM_DEBUG(dbgs() << "Fail to assign: " << VirtReg << " to " << printReg(PhysReg, TRI) << '\n'); // The recoloring attempt failed, undo the changes. FixedRegisters = SaveFixedRegisters; Matrix->unassign(VirtReg); // For a newly created vreg which is also in RecoloringCandidates, // don't add it to NewVRegs because its physical register will be restored // below. Other vregs in CurrentNewVRegs are created by calling // selectOrSplit and should be added into NewVRegs. for (Register &R : CurrentNewVRegs) { if (RecoloringCandidates.count(&LIS->getInterval(R))) continue; NewVRegs.push_back(R); } for (LiveInterval *RC : RecoloringCandidates) { Register ItVirtReg = RC->reg(); if (VRM->hasPhys(ItVirtReg)) Matrix->unassign(*RC); MCRegister ItPhysReg = VirtRegToPhysReg[ItVirtReg]; Matrix->assign(*RC, ItPhysReg); } } // Last chance recoloring did not worked either, give up. return ~0u; } /// tryRecoloringCandidates - Try to assign a new color to every register /// in \RecoloringQueue. /// \p NewRegs will contain any new virtual register created during the /// recoloring process. /// \p FixedRegisters[in/out] contains all the registers that have been /// recolored. /// \return true if all virtual registers in RecoloringQueue were successfully /// recolored, false otherwise. bool RAGreedy::tryRecoloringCandidates(PQueue &RecoloringQueue, SmallVectorImpl<Register> &NewVRegs, SmallVirtRegSet &FixedRegisters, unsigned Depth) { while (!RecoloringQueue.empty()) { LiveInterval *LI = dequeue(RecoloringQueue); LLVM_DEBUG(dbgs() << "Try to recolor: " << *LI << '\n'); MCRegister PhysReg = selectOrSplitImpl(*LI, NewVRegs, FixedRegisters, Depth + 1); // When splitting happens, the live-range may actually be empty. // In that case, this is okay to continue the recoloring even // if we did not find an alternative color for it. Indeed, // there will not be anything to color for LI in the end. if (PhysReg == ~0u || (!PhysReg && !LI->empty())) return false; if (!PhysReg) { assert(LI->empty() && "Only empty live-range do not require a register"); LLVM_DEBUG(dbgs() << "Recoloring of " << *LI << " succeeded. Empty LI.\n"); continue; } LLVM_DEBUG(dbgs() << "Recoloring of " << *LI << " succeeded with: " << printReg(PhysReg, TRI) << '\n'); Matrix->assign(*LI, PhysReg); FixedRegisters.insert(LI->reg()); } return true; } //===----------------------------------------------------------------------===// // Main Entry Point //===----------------------------------------------------------------------===// MCRegister RAGreedy::selectOrSplit(LiveInterval &VirtReg, SmallVectorImpl<Register> &NewVRegs) { CutOffInfo = CO_None; LLVMContext &Ctx = MF->getFunction().getContext(); SmallVirtRegSet FixedRegisters; MCRegister Reg = selectOrSplitImpl(VirtReg, NewVRegs, FixedRegisters); if (Reg == ~0U && (CutOffInfo != CO_None)) { uint8_t CutOffEncountered = CutOffInfo & (CO_Depth | CO_Interf); if (CutOffEncountered == CO_Depth) Ctx.emitError("register allocation failed: maximum depth for recoloring " "reached. Use -fexhaustive-register-search to skip " "cutoffs"); else if (CutOffEncountered == CO_Interf) Ctx.emitError("register allocation failed: maximum interference for " "recoloring reached. Use -fexhaustive-register-search " "to skip cutoffs"); else if (CutOffEncountered == (CO_Depth | CO_Interf)) Ctx.emitError("register allocation failed: maximum interference and " "depth for recoloring reached. Use " "-fexhaustive-register-search to skip cutoffs"); } return Reg; } /// Using a CSR for the first time has a cost because it causes push|pop /// to be added to prologue|epilogue. Splitting a cold section of the live /// range can have lower cost than using the CSR for the first time; /// Spilling a live range in the cold path can have lower cost than using /// the CSR for the first time. Returns the physical register if we decide /// to use the CSR; otherwise return 0. MCRegister RAGreedy::tryAssignCSRFirstTime(LiveInterval &VirtReg, AllocationOrder &Order, MCRegister PhysReg, uint8_t &CostPerUseLimit, SmallVectorImpl<Register> &NewVRegs) { if (getStage(VirtReg) == RS_Spill && VirtReg.isSpillable()) { // We choose spill over using the CSR for the first time if the spill cost // is lower than CSRCost. SA->analyze(&VirtReg); if (calcSpillCost() >= CSRCost) return PhysReg; // We are going to spill, set CostPerUseLimit to 1 to make sure that // we will not use a callee-saved register in tryEvict. CostPerUseLimit = 1; return 0; } if (getStage(VirtReg) < RS_Split) { // We choose pre-splitting over using the CSR for the first time if // the cost of splitting is lower than CSRCost. SA->analyze(&VirtReg); unsigned NumCands = 0; BlockFrequency BestCost = CSRCost; // Don't modify CSRCost. unsigned BestCand = calculateRegionSplitCost(VirtReg, Order, BestCost, NumCands, true /*IgnoreCSR*/); if (BestCand == NoCand) // Use the CSR if we can't find a region split below CSRCost. return PhysReg; // Perform the actual pre-splitting. doRegionSplit(VirtReg, BestCand, false/*HasCompact*/, NewVRegs); return 0; } return PhysReg; } void RAGreedy::aboutToRemoveInterval(LiveInterval &LI) { // Do not keep invalid information around. SetOfBrokenHints.remove(&LI); } void RAGreedy::initializeCSRCost() { // We use the larger one out of the command-line option and the value report // by TRI. CSRCost = BlockFrequency( std::max((unsigned)CSRFirstTimeCost, TRI->getCSRFirstUseCost())); if (!CSRCost.getFrequency()) return; // Raw cost is relative to Entry == 2^14; scale it appropriately. uint64_t ActualEntry = MBFI->getEntryFreq(); if (!ActualEntry) { CSRCost = 0; return; } uint64_t FixedEntry = 1 << 14; if (ActualEntry < FixedEntry) CSRCost *= BranchProbability(ActualEntry, FixedEntry); else if (ActualEntry <= UINT32_MAX) // Invert the fraction and divide. CSRCost /= BranchProbability(FixedEntry, ActualEntry); else // Can't use BranchProbability in general, since it takes 32-bit numbers. CSRCost = CSRCost.getFrequency() * (ActualEntry / FixedEntry); } /// Collect the hint info for \p Reg. /// The results are stored into \p Out. /// \p Out is not cleared before being populated. void RAGreedy::collectHintInfo(Register Reg, HintsInfo &Out) { for (const MachineInstr &Instr : MRI->reg_nodbg_instructions(Reg)) { if (!Instr.isFullCopy()) continue; // Look for the other end of the copy. Register OtherReg = Instr.getOperand(0).getReg(); if (OtherReg == Reg) { OtherReg = Instr.getOperand(1).getReg(); if (OtherReg == Reg) continue; } // Get the current assignment. MCRegister OtherPhysReg = OtherReg.isPhysical() ? OtherReg.asMCReg() : VRM->getPhys(OtherReg); // Push the collected information. Out.push_back(HintInfo(MBFI->getBlockFreq(Instr.getParent()), OtherReg, OtherPhysReg)); } } /// Using the given \p List, compute the cost of the broken hints if /// \p PhysReg was used. /// \return The cost of \p List for \p PhysReg. BlockFrequency RAGreedy::getBrokenHintFreq(const HintsInfo &List, MCRegister PhysReg) { BlockFrequency Cost = 0; for (const HintInfo &Info : List) { if (Info.PhysReg != PhysReg) Cost += Info.Freq; } return Cost; } /// Using the register assigned to \p VirtReg, try to recolor /// all the live ranges that are copy-related with \p VirtReg. /// The recoloring is then propagated to all the live-ranges that have /// been recolored and so on, until no more copies can be coalesced or /// it is not profitable. /// For a given live range, profitability is determined by the sum of the /// frequencies of the non-identity copies it would introduce with the old /// and new register. void RAGreedy::tryHintRecoloring(LiveInterval &VirtReg) { // We have a broken hint, check if it is possible to fix it by // reusing PhysReg for the copy-related live-ranges. Indeed, we evicted // some register and PhysReg may be available for the other live-ranges. SmallSet<Register, 4> Visited; SmallVector<unsigned, 2> RecoloringCandidates; HintsInfo Info; Register Reg = VirtReg.reg(); MCRegister PhysReg = VRM->getPhys(Reg); // Start the recoloring algorithm from the input live-interval, then // it will propagate to the ones that are copy-related with it. Visited.insert(Reg); RecoloringCandidates.push_back(Reg); LLVM_DEBUG(dbgs() << "Trying to reconcile hints for: " << printReg(Reg, TRI) << '(' << printReg(PhysReg, TRI) << ")\n"); do { Reg = RecoloringCandidates.pop_back_val(); // We cannot recolor physical register. if (Register::isPhysicalRegister(Reg)) continue; assert(VRM->hasPhys(Reg) && "We have unallocated variable!!"); // Get the live interval mapped with this virtual register to be able // to check for the interference with the new color. LiveInterval &LI = LIS->getInterval(Reg); MCRegister CurrPhys = VRM->getPhys(Reg); // Check that the new color matches the register class constraints and // that it is free for this live range. if (CurrPhys != PhysReg && (!MRI->getRegClass(Reg)->contains(PhysReg) || Matrix->checkInterference(LI, PhysReg))) continue; LLVM_DEBUG(dbgs() << printReg(Reg, TRI) << '(' << printReg(CurrPhys, TRI) << ") is recolorable.\n"); // Gather the hint info. Info.clear(); collectHintInfo(Reg, Info); // Check if recoloring the live-range will increase the cost of the // non-identity copies. if (CurrPhys != PhysReg) { LLVM_DEBUG(dbgs() << "Checking profitability:\n"); BlockFrequency OldCopiesCost = getBrokenHintFreq(Info, CurrPhys); BlockFrequency NewCopiesCost = getBrokenHintFreq(Info, PhysReg); LLVM_DEBUG(dbgs() << "Old Cost: " << OldCopiesCost.getFrequency() << "\nNew Cost: " << NewCopiesCost.getFrequency() << '\n'); if (OldCopiesCost < NewCopiesCost) { LLVM_DEBUG(dbgs() << "=> Not profitable.\n"); continue; } // At this point, the cost is either cheaper or equal. If it is // equal, we consider this is profitable because it may expose // more recoloring opportunities. LLVM_DEBUG(dbgs() << "=> Profitable.\n"); // Recolor the live-range. Matrix->unassign(LI); Matrix->assign(LI, PhysReg); } // Push all copy-related live-ranges to keep reconciling the broken // hints. for (const HintInfo &HI : Info) { if (Visited.insert(HI.Reg).second) RecoloringCandidates.push_back(HI.Reg); } } while (!RecoloringCandidates.empty()); } /// Try to recolor broken hints. /// Broken hints may be repaired by recoloring when an evicted variable /// freed up a register for a larger live-range. /// Consider the following example: /// BB1: /// a = /// b = /// BB2: /// ... /// = b /// = a /// Let us assume b gets split: /// BB1: /// a = /// b = /// BB2: /// c = b /// ... /// d = c /// = d /// = a /// Because of how the allocation work, b, c, and d may be assigned different /// colors. Now, if a gets evicted later: /// BB1: /// a = /// st a, SpillSlot /// b = /// BB2: /// c = b /// ... /// d = c /// = d /// e = ld SpillSlot /// = e /// This is likely that we can assign the same register for b, c, and d, /// getting rid of 2 copies. void RAGreedy::tryHintsRecoloring() { for (LiveInterval *LI : SetOfBrokenHints) { assert(Register::isVirtualRegister(LI->reg()) && "Recoloring is possible only for virtual registers"); // Some dead defs may be around (e.g., because of debug uses). // Ignore those. if (!VRM->hasPhys(LI->reg())) continue; tryHintRecoloring(*LI); } } MCRegister RAGreedy::selectOrSplitImpl(LiveInterval &VirtReg, SmallVectorImpl<Register> &NewVRegs, SmallVirtRegSet &FixedRegisters, unsigned Depth) { uint8_t CostPerUseLimit = uint8_t(~0u); // First try assigning a free register. auto Order = AllocationOrder::create(VirtReg.reg(), *VRM, RegClassInfo, Matrix); if (MCRegister PhysReg = tryAssign(VirtReg, Order, NewVRegs, FixedRegisters)) { // If VirtReg got an assignment, the eviction info is no longer relevant. LastEvicted.clearEvicteeInfo(VirtReg.reg()); // When NewVRegs is not empty, we may have made decisions such as evicting // a virtual register, go with the earlier decisions and use the physical // register. if (CSRCost.getFrequency() && isUnusedCalleeSavedReg(PhysReg) && NewVRegs.empty()) { MCRegister CSRReg = tryAssignCSRFirstTime(VirtReg, Order, PhysReg, CostPerUseLimit, NewVRegs); if (CSRReg || !NewVRegs.empty()) // Return now if we decide to use a CSR or create new vregs due to // pre-splitting. return CSRReg; } else return PhysReg; } LiveRangeStage Stage = getStage(VirtReg); LLVM_DEBUG(dbgs() << StageName[Stage] << " Cascade " << ExtraRegInfo[VirtReg.reg()].Cascade << '\n'); // Try to evict a less worthy live range, but only for ranges from the primary // queue. The RS_Split ranges already failed to do this, and they should not // get a second chance until they have been split. if (Stage != RS_Split) if (Register PhysReg = tryEvict(VirtReg, Order, NewVRegs, CostPerUseLimit, FixedRegisters)) { Register Hint = MRI->getSimpleHint(VirtReg.reg()); // If VirtReg has a hint and that hint is broken record this // virtual register as a recoloring candidate for broken hint. // Indeed, since we evicted a variable in its neighborhood it is // likely we can at least partially recolor some of the // copy-related live-ranges. if (Hint && Hint != PhysReg) SetOfBrokenHints.insert(&VirtReg); // If VirtReg eviction someone, the eviction info for it as an evictee is // no longer relevant. LastEvicted.clearEvicteeInfo(VirtReg.reg()); return PhysReg; } assert((NewVRegs.empty() || Depth) && "Cannot append to existing NewVRegs"); // The first time we see a live range, don't try to split or spill. // Wait until the second time, when all smaller ranges have been allocated. // This gives a better picture of the interference to split around. if (Stage < RS_Split) { setStage(VirtReg, RS_Split); LLVM_DEBUG(dbgs() << "wait for second round\n"); NewVRegs.push_back(VirtReg.reg()); return 0; } if (Stage < RS_Spill) { // Try splitting VirtReg or interferences. unsigned NewVRegSizeBefore = NewVRegs.size(); Register PhysReg = trySplit(VirtReg, Order, NewVRegs, FixedRegisters); if (PhysReg || (NewVRegs.size() - NewVRegSizeBefore)) { // If VirtReg got split, the eviction info is no longer relevant. LastEvicted.clearEvicteeInfo(VirtReg.reg()); return PhysReg; } } // If we couldn't allocate a register from spilling, there is probably some // invalid inline assembly. The base class will report it. if (Stage >= RS_Done || !VirtReg.isSpillable()) return tryLastChanceRecoloring(VirtReg, Order, NewVRegs, FixedRegisters, Depth); // Finally spill VirtReg itself. if ((EnableDeferredSpilling || TRI->shouldUseDeferredSpillingForVirtReg(*MF, VirtReg)) && getStage(VirtReg) < RS_Memory) { // TODO: This is experimental and in particular, we do not model // the live range splitting done by spilling correctly. // We would need a deep integration with the spiller to do the // right thing here. Anyway, that is still good for early testing. setStage(VirtReg, RS_Memory); LLVM_DEBUG(dbgs() << "Do as if this register is in memory\n"); NewVRegs.push_back(VirtReg.reg()); } else { NamedRegionTimer T("spill", "Spiller", TimerGroupName, TimerGroupDescription, TimePassesIsEnabled); LiveRangeEdit LRE(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats); spiller().spill(LRE); setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done); // Tell LiveDebugVariables about the new ranges. Ranges not being covered by // the new regs are kept in LDV (still mapping to the old register), until // we rewrite spilled locations in LDV at a later stage. DebugVars->splitRegister(VirtReg.reg(), LRE.regs(), *LIS); if (VerifyEnabled) MF->verify(this, "After spilling"); } // The live virtual register requesting allocation was spilled, so tell // the caller not to allocate anything during this round. return 0; } void RAGreedy::RAGreedyStats::report(MachineOptimizationRemarkMissed &R) { using namespace ore; if (Spills) { R << NV("NumSpills", Spills) << " spills "; R << NV("TotalSpillsCost", SpillsCost) << " total spills cost "; } if (FoldedSpills) { R << NV("NumFoldedSpills", FoldedSpills) << " folded spills "; R << NV("TotalFoldedSpillsCost", FoldedSpillsCost) << " total folded spills cost "; } if (Reloads) { R << NV("NumReloads", Reloads) << " reloads "; R << NV("TotalReloadsCost", ReloadsCost) << " total reloads cost "; } if (FoldedReloads) { R << NV("NumFoldedReloads", FoldedReloads) << " folded reloads "; R << NV("TotalFoldedReloadsCost", FoldedReloadsCost) << " total folded reloads cost "; } if (ZeroCostFoldedReloads) R << NV("NumZeroCostFoldedReloads", ZeroCostFoldedReloads) << " zero cost folded reloads "; if (Copies) { R << NV("NumVRCopies", Copies) << " virtual registers copies "; R << NV("TotalCopiesCost", CopiesCost) << " total copies cost "; } } RAGreedy::RAGreedyStats RAGreedy::computeStats(MachineBasicBlock &MBB) { RAGreedyStats Stats; const MachineFrameInfo &MFI = MF->getFrameInfo(); int FI; auto isSpillSlotAccess = [&MFI](const MachineMemOperand *A) { return MFI.isSpillSlotObjectIndex(cast<FixedStackPseudoSourceValue>( A->getPseudoValue())->getFrameIndex()); }; auto isPatchpointInstr = [](const MachineInstr &MI) { return MI.getOpcode() == TargetOpcode::PATCHPOINT || MI.getOpcode() == TargetOpcode::STACKMAP || MI.getOpcode() == TargetOpcode::STATEPOINT; }; for (MachineInstr &MI : MBB) { if (MI.isCopy()) { MachineOperand &Dest = MI.getOperand(0); MachineOperand &Src = MI.getOperand(1); if (Dest.isReg() && Src.isReg() && Dest.getReg().isVirtual() && Src.getReg().isVirtual()) ++Stats.Copies; continue; } SmallVector<const MachineMemOperand *, 2> Accesses; if (TII->isLoadFromStackSlot(MI, FI) && MFI.isSpillSlotObjectIndex(FI)) { ++Stats.Reloads; continue; } if (TII->isStoreToStackSlot(MI, FI) && MFI.isSpillSlotObjectIndex(FI)) { ++Stats.Spills; continue; } if (TII->hasLoadFromStackSlot(MI, Accesses) && llvm::any_of(Accesses, isSpillSlotAccess)) { if (!isPatchpointInstr(MI)) { Stats.FoldedReloads += Accesses.size(); continue; } // For statepoint there may be folded and zero cost folded stack reloads. std::pair<unsigned, unsigned> NonZeroCostRange = TII->getPatchpointUnfoldableRange(MI); SmallSet<unsigned, 16> FoldedReloads; SmallSet<unsigned, 16> ZeroCostFoldedReloads; for (unsigned Idx = 0, E = MI.getNumOperands(); Idx < E; ++Idx) { MachineOperand &MO = MI.getOperand(Idx); if (!MO.isFI() || !MFI.isSpillSlotObjectIndex(MO.getIndex())) continue; if (Idx >= NonZeroCostRange.first && Idx < NonZeroCostRange.second) FoldedReloads.insert(MO.getIndex()); else ZeroCostFoldedReloads.insert(MO.getIndex()); } // If stack slot is used in folded reload it is not zero cost then. for (unsigned Slot : FoldedReloads) ZeroCostFoldedReloads.erase(Slot); Stats.FoldedReloads += FoldedReloads.size(); Stats.ZeroCostFoldedReloads += ZeroCostFoldedReloads.size(); continue; } Accesses.clear(); if (TII->hasStoreToStackSlot(MI, Accesses) && llvm::any_of(Accesses, isSpillSlotAccess)) { Stats.FoldedSpills += Accesses.size(); } } // Set cost of collected statistic by multiplication to relative frequency of // this basic block. float RelFreq = MBFI->getBlockFreqRelativeToEntryBlock(&MBB); Stats.ReloadsCost = RelFreq * Stats.Reloads; Stats.FoldedReloadsCost = RelFreq * Stats.FoldedReloads; Stats.SpillsCost = RelFreq * Stats.Spills; Stats.FoldedSpillsCost = RelFreq * Stats.FoldedSpills; Stats.CopiesCost = RelFreq * Stats.Copies; return Stats; } RAGreedy::RAGreedyStats RAGreedy::reportStats(MachineLoop *L) { RAGreedyStats Stats; // Sum up the spill and reloads in subloops. for (MachineLoop *SubLoop : *L) Stats.add(reportStats(SubLoop)); for (MachineBasicBlock *MBB : L->getBlocks()) // Handle blocks that were not included in subloops. if (Loops->getLoopFor(MBB) == L) Stats.add(computeStats(*MBB)); if (!Stats.isEmpty()) { using namespace ore; ORE->emit([&]() { MachineOptimizationRemarkMissed R(DEBUG_TYPE, "LoopSpillReloadCopies", L->getStartLoc(), L->getHeader()); Stats.report(R); R << "generated in loop"; return R; }); } return Stats; } void RAGreedy::reportStats() { if (!ORE->allowExtraAnalysis(DEBUG_TYPE)) return; RAGreedyStats Stats; for (MachineLoop *L : *Loops) Stats.add(reportStats(L)); // Process non-loop blocks. for (MachineBasicBlock &MBB : *MF) if (!Loops->getLoopFor(&MBB)) Stats.add(computeStats(MBB)); if (!Stats.isEmpty()) { using namespace ore; ORE->emit([&]() { DebugLoc Loc; if (auto *SP = MF->getFunction().getSubprogram()) Loc = DILocation::get(SP->getContext(), SP->getLine(), 1, SP); MachineOptimizationRemarkMissed R(DEBUG_TYPE, "SpillReloadCopies", Loc, &MF->front()); Stats.report(R); R << "generated in function"; return R; }); } } bool RAGreedy::runOnMachineFunction(MachineFunction &mf) { LLVM_DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n" << "********** Function: " << mf.getName() << '\n'); MF = &mf; TRI = MF->getSubtarget().getRegisterInfo(); TII = MF->getSubtarget().getInstrInfo(); RCI.runOnMachineFunction(mf); EnableLocalReassign = EnableLocalReassignment || MF->getSubtarget().enableRALocalReassignment( MF->getTarget().getOptLevel()); EnableAdvancedRASplitCost = ConsiderLocalIntervalCost.getNumOccurrences() ? ConsiderLocalIntervalCost : MF->getSubtarget().enableAdvancedRASplitCost(); if (VerifyEnabled) MF->verify(this, "Before greedy register allocator"); RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>(), getAnalysis<LiveRegMatrix>()); Indexes = &getAnalysis<SlotIndexes>(); MBFI = &getAnalysis<MachineBlockFrequencyInfo>(); DomTree = &getAnalysis<MachineDominatorTree>(); ORE = &getAnalysis<MachineOptimizationRemarkEmitterPass>().getORE(); Loops = &getAnalysis<MachineLoopInfo>(); Bundles = &getAnalysis<EdgeBundles>(); SpillPlacer = &getAnalysis<SpillPlacement>(); DebugVars = &getAnalysis<LiveDebugVariables>(); AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); initializeCSRCost(); RegCosts = TRI->getRegisterCosts(*MF); VRAI = std::make_unique<VirtRegAuxInfo>(*MF, *LIS, *VRM, *Loops, *MBFI); SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM, *VRAI)); VRAI->calculateSpillWeightsAndHints(); LLVM_DEBUG(LIS->dump()); SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops)); SE.reset(new SplitEditor(*SA, *AA, *LIS, *VRM, *DomTree, *MBFI, *VRAI)); ExtraRegInfo.clear(); ExtraRegInfo.resize(MRI->getNumVirtRegs()); NextCascade = 1; IntfCache.init(MF, Matrix->getLiveUnions(), Indexes, LIS, TRI); GlobalCand.resize(32); // This will grow as needed. SetOfBrokenHints.clear(); LastEvicted.clear(); allocatePhysRegs(); tryHintsRecoloring(); if (VerifyEnabled) MF->verify(this, "Before post optimization"); postOptimization(); reportStats(); releaseMemory(); return true; }
// Copyright 1998-2017 Epic Games, Inc. All Rights Reserved. #include "OpenMesh.h" #include "Runtime/Projects/Public/Interfaces/IPluginManager.h" #define LOCTEXT_NAMESPACE "FOpenMeshModule" void FOpenMeshModule::StartupModule() { // This code will execute after your module is loaded into memory; the exact timing is specified in the .uplugin file per-module //FString PluginBaseDir = IPluginManager::Get().FindPlugin("RuntimeMeshLoader")->GetBaseDir(); } void FOpenMeshModule::ShutdownModule() { // This function may be called during shutdown to clean up your module. For modules that support dynamic reloading, // we call this function before unloading the module. } #undef LOCTEXT_NAMESPACE IMPLEMENT_MODULE(FOpenMeshModule, OpenMesh)
// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "google/cloud/testing_util/example_driver.h" #include "google/cloud/internal/getenv.h" #include "google/cloud/log.h" #include <iostream> #if GOOGLE_CLOUD_CPP_HAVE_EXCEPTIONS namespace google { namespace cloud { GOOGLE_CLOUD_CPP_INLINE_NAMESPACE_BEGIN namespace testing_util { Example::Example(std::map<std::string, CommandType> commands) : commands_(std::move(commands)) { // Force each command to generate its Usage string, so we can provide a good // usage string for the whole program. for (auto const& kv : commands_) { if (kv.first == "auto") continue; try { kv.second({"--help"}); } catch (Usage const& u) { full_usage_ += " "; full_usage_ += u.what(); full_usage_ += "\n"; } } } int Example::Run(int argc, char const* const argv[]) try { bool auto_run = google::cloud::internal::GetEnv("GOOGLE_CLOUD_CPP_AUTO_RUN_EXAMPLES") .value_or("") == "yes"; if (argc == 1 && auto_run) { auto entry = commands_.find("auto"); if (entry == commands_.end()) { PrintUsage(argv[0], "Requested auto run but there is no 'auto' command"); return 1; } entry->second({}); return 0; } if (argc < 2) { PrintUsage(argv[0], "Missing command"); return 1; } std::string const command_name = argv[1]; auto command = commands_.find(command_name); if (commands_.end() == command) { PrintUsage(argv[0], "Unknown command: " + command_name); return 1; } command->second({argv + 2, argv + argc}); return 0; } catch (Usage const& u) { PrintUsage(argv[0], u.what()); return 1; } catch (std::exception const& ex) { std::cerr << "Standard exception raised: " << ex.what() << "\n"; google::cloud::LogSink::Instance().Flush(); throw; } void Example::PrintUsage(std::string const& cmd, std::string const& msg) { auto last_slash = cmd.find_last_of('/'); auto program = cmd.substr(last_slash + 1); std::cerr << msg << "\nUsage: " << program << " <command> [arguments]\n\n" << "Commands:\n" << full_usage_ << "\n"; } void CheckEnvironmentVariablesAreSet(std::vector<std::string> const& vars) { for (auto const& var : vars) { auto const value = google::cloud::internal::GetEnv(var.c_str()); if (!value) { throw std::runtime_error("The " + var + " environment variable is not set"); } if (value->empty()) { throw std::runtime_error("The " + var + " environment variable has an empty value"); } } } } // namespace testing_util GOOGLE_CLOUD_CPP_INLINE_NAMESPACE_END } // namespace cloud } // namespace google #endif // GOOGLE_CLOUD_CPP_HAVE_EXCEPTIONS
//===-- CommandObjectMultiword.cpp ------------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "lldb/Interpreter/CommandObjectMultiword.h" #include "lldb/Core/Debugger.h" #include "lldb/Interpreter/CommandInterpreter.h" #include "lldb/Interpreter/CommandReturnObject.h" #include "lldb/Interpreter/Options.h" using namespace lldb; using namespace lldb_private; //------------------------------------------------------------------------- // CommandObjectMultiword //------------------------------------------------------------------------- CommandObjectMultiword::CommandObjectMultiword(CommandInterpreter &interpreter, const char *name, const char *help, const char *syntax, uint32_t flags) : CommandObject(interpreter, name, help, syntax, flags), m_can_be_removed(false) {} CommandObjectMultiword::~CommandObjectMultiword() = default; CommandObjectSP CommandObjectMultiword::GetSubcommandSP(llvm::StringRef sub_cmd, StringList *matches) { CommandObjectSP return_cmd_sp; CommandObject::CommandMap::iterator pos; if (!m_subcommand_dict.empty()) { pos = m_subcommand_dict.find(sub_cmd); if (pos != m_subcommand_dict.end()) { // An exact match; append the sub_cmd to the 'matches' string list. if (matches) matches->AppendString(sub_cmd); return_cmd_sp = pos->second; } else { StringList local_matches; if (matches == nullptr) matches = &local_matches; int num_matches = AddNamesMatchingPartialString(m_subcommand_dict, sub_cmd, *matches); if (num_matches == 1) { // Cleaner, but slightly less efficient would be to call back into this // function, since I now know I have an exact match... sub_cmd = matches->GetStringAtIndex(0); pos = m_subcommand_dict.find(sub_cmd); if (pos != m_subcommand_dict.end()) return_cmd_sp = pos->second; } } } return return_cmd_sp; } CommandObject * CommandObjectMultiword::GetSubcommandObject(llvm::StringRef sub_cmd, StringList *matches) { return GetSubcommandSP(sub_cmd, matches).get(); } bool CommandObjectMultiword::LoadSubCommand(llvm::StringRef name, const CommandObjectSP &cmd_obj) { if (cmd_obj) assert((&GetCommandInterpreter() == &cmd_obj->GetCommandInterpreter()) && "tried to add a CommandObject from a different interpreter"); CommandMap::iterator pos; bool success = true; pos = m_subcommand_dict.find(name); if (pos == m_subcommand_dict.end()) { m_subcommand_dict[name] = cmd_obj; } else success = false; return success; } bool CommandObjectMultiword::Execute(const char *args_string, CommandReturnObject &result) { Args args(args_string); const size_t argc = args.GetArgumentCount(); if (argc == 0) { this->CommandObject::GenerateHelpText(result); return result.Succeeded(); } auto sub_command = args[0].ref; if (sub_command.empty()) return result.Succeeded(); if (sub_command.equals_lower("help")) { this->CommandObject::GenerateHelpText(result); return result.Succeeded(); } if (m_subcommand_dict.empty()) { result.AppendErrorWithFormat("'%s' does not have any subcommands.\n", GetCommandName().str().c_str()); result.SetStatus(eReturnStatusFailed); return false; } StringList matches; CommandObject *sub_cmd_obj = GetSubcommandObject(sub_command, &matches); if (sub_cmd_obj != nullptr) { // Now call CommandObject::Execute to process options in `rest_of_line`. // From there the command-specific version of Execute will be called, with // the processed arguments. args.Shift(); sub_cmd_obj->Execute(args_string, result); return result.Succeeded(); } std::string error_msg; const size_t num_subcmd_matches = matches.GetSize(); if (num_subcmd_matches > 0) error_msg.assign("ambiguous command "); else error_msg.assign("invalid command "); error_msg.append("'"); error_msg.append(GetCommandName()); error_msg.append(" "); error_msg.append(sub_command); error_msg.append("'."); if (num_subcmd_matches > 0) { error_msg.append(" Possible completions:"); for (size_t i = 0; i < matches.GetSize(); i++) { error_msg.append("\n\t"); error_msg.append(matches.GetStringAtIndex(i)); } } error_msg.append("\n"); result.AppendRawError(error_msg.c_str()); result.SetStatus(eReturnStatusFailed); return false; } void CommandObjectMultiword::GenerateHelpText(Stream &output_stream) { // First time through here, generate the help text for the object and push it // to the return result object as well CommandObject::GenerateHelpText(output_stream); output_stream.PutCString("\nThe following subcommands are supported:\n\n"); CommandMap::iterator pos; uint32_t max_len = FindLongestCommandWord(m_subcommand_dict); if (max_len) max_len += 4; // Indent the output by 4 spaces. for (pos = m_subcommand_dict.begin(); pos != m_subcommand_dict.end(); ++pos) { std::string indented_command(" "); indented_command.append(pos->first); if (pos->second->WantsRawCommandString()) { std::string help_text(pos->second->GetHelp()); help_text.append(" Expects 'raw' input (see 'help raw-input'.)"); m_interpreter.OutputFormattedHelpText(output_stream, indented_command.c_str(), "--", help_text.c_str(), max_len); } else m_interpreter.OutputFormattedHelpText(output_stream, indented_command.c_str(), "--", pos->second->GetHelp(), max_len); } output_stream.PutCString("\nFor more help on any particular subcommand, type " "'help <command> <subcommand>'.\n"); } int CommandObjectMultiword::HandleCompletion(CompletionRequest &request) { // Any of the command matches will provide a complete word, otherwise the // individual completers will override this. request.SetWordComplete(true); auto arg0 = request.GetParsedLine()[0].ref; if (request.GetCursorIndex() == 0) { StringList new_matches, descriptions; AddNamesMatchingPartialString(m_subcommand_dict, arg0, new_matches, &descriptions); request.AddCompletions(new_matches, descriptions); if (new_matches.GetSize() == 1 && new_matches.GetStringAtIndex(0) != nullptr && (arg0 == new_matches.GetStringAtIndex(0))) { StringList temp_matches; CommandObject *cmd_obj = GetSubcommandObject(arg0, &temp_matches); if (cmd_obj != nullptr) { if (request.GetParsedLine().GetArgumentCount() == 1) { request.SetWordComplete(true); } else { request.GetParsedLine().Shift(); request.SetCursorCharPosition(0); request.GetParsedLine().AppendArgument(llvm::StringRef()); return cmd_obj->HandleCompletion(request); } } } return new_matches.GetSize(); } else { StringList new_matches; CommandObject *sub_command_object = GetSubcommandObject(arg0, &new_matches); if (sub_command_object == nullptr) { request.AddCompletions(new_matches); return request.GetNumberOfMatches(); } else { // Remove the one match that we got from calling GetSubcommandObject. new_matches.DeleteStringAtIndex(0); request.AddCompletions(new_matches); request.GetParsedLine().Shift(); request.SetCursorIndex(request.GetCursorIndex() - 1); return sub_command_object->HandleCompletion(request); } } } const char *CommandObjectMultiword::GetRepeatCommand(Args &current_command_args, uint32_t index) { index++; if (current_command_args.GetArgumentCount() <= index) return nullptr; CommandObject *sub_command_object = GetSubcommandObject(current_command_args[index].ref); if (sub_command_object == nullptr) return nullptr; return sub_command_object->GetRepeatCommand(current_command_args, index); } void CommandObjectMultiword::AproposAllSubCommands(llvm::StringRef prefix, llvm::StringRef search_word, StringList &commands_found, StringList &commands_help) { CommandObject::CommandMap::const_iterator pos; for (pos = m_subcommand_dict.begin(); pos != m_subcommand_dict.end(); ++pos) { const char *command_name = pos->first.c_str(); CommandObject *sub_cmd_obj = pos->second.get(); StreamString complete_command_name; complete_command_name << prefix << " " << command_name; if (sub_cmd_obj->HelpTextContainsWord(search_word)) { commands_found.AppendString(complete_command_name.GetString()); commands_help.AppendString(sub_cmd_obj->GetHelp()); } if (sub_cmd_obj->IsMultiwordObject()) sub_cmd_obj->AproposAllSubCommands(complete_command_name.GetString(), search_word, commands_found, commands_help); } } CommandObjectProxy::CommandObjectProxy(CommandInterpreter &interpreter, const char *name, const char *help, const char *syntax, uint32_t flags) : CommandObject(interpreter, name, help, syntax, flags) {} CommandObjectProxy::~CommandObjectProxy() = default; llvm::StringRef CommandObjectProxy::GetHelpLong() { CommandObject *proxy_command = GetProxyCommandObject(); if (proxy_command) return proxy_command->GetHelpLong(); return llvm::StringRef(); } bool CommandObjectProxy::IsRemovable() const { const CommandObject *proxy_command = const_cast<CommandObjectProxy *>(this)->GetProxyCommandObject(); if (proxy_command) return proxy_command->IsRemovable(); return false; } bool CommandObjectProxy::IsMultiwordObject() { CommandObject *proxy_command = GetProxyCommandObject(); if (proxy_command) return proxy_command->IsMultiwordObject(); return false; } CommandObjectMultiword *CommandObjectProxy::GetAsMultiwordCommand() { CommandObject *proxy_command = GetProxyCommandObject(); if (proxy_command) return proxy_command->GetAsMultiwordCommand(); return nullptr; } void CommandObjectProxy::GenerateHelpText(Stream &result) { CommandObject *proxy_command = GetProxyCommandObject(); if (proxy_command) return proxy_command->GenerateHelpText(result); } lldb::CommandObjectSP CommandObjectProxy::GetSubcommandSP(llvm::StringRef sub_cmd, StringList *matches) { CommandObject *proxy_command = GetProxyCommandObject(); if (proxy_command) return proxy_command->GetSubcommandSP(sub_cmd, matches); return lldb::CommandObjectSP(); } CommandObject *CommandObjectProxy::GetSubcommandObject(llvm::StringRef sub_cmd, StringList *matches) { CommandObject *proxy_command = GetProxyCommandObject(); if (proxy_command) return proxy_command->GetSubcommandObject(sub_cmd, matches); return nullptr; } void CommandObjectProxy::AproposAllSubCommands(llvm::StringRef prefix, llvm::StringRef search_word, StringList &commands_found, StringList &commands_help) { CommandObject *proxy_command = GetProxyCommandObject(); if (proxy_command) return proxy_command->AproposAllSubCommands(prefix, search_word, commands_found, commands_help); } bool CommandObjectProxy::LoadSubCommand( llvm::StringRef cmd_name, const lldb::CommandObjectSP &command_sp) { CommandObject *proxy_command = GetProxyCommandObject(); if (proxy_command) return proxy_command->LoadSubCommand(cmd_name, command_sp); return false; } bool CommandObjectProxy::WantsRawCommandString() { CommandObject *proxy_command = GetProxyCommandObject(); if (proxy_command) return proxy_command->WantsRawCommandString(); return false; } bool CommandObjectProxy::WantsCompletion() { CommandObject *proxy_command = GetProxyCommandObject(); if (proxy_command) return proxy_command->WantsCompletion(); return false; } Options *CommandObjectProxy::GetOptions() { CommandObject *proxy_command = GetProxyCommandObject(); if (proxy_command) return proxy_command->GetOptions(); return nullptr; } int CommandObjectProxy::HandleCompletion(CompletionRequest &request) { CommandObject *proxy_command = GetProxyCommandObject(); if (proxy_command) return proxy_command->HandleCompletion(request); return 0; } int CommandObjectProxy::HandleArgumentCompletion( CompletionRequest &request, OptionElementVector &opt_element_vector) { CommandObject *proxy_command = GetProxyCommandObject(); if (proxy_command) return proxy_command->HandleArgumentCompletion(request, opt_element_vector); return 0; } const char *CommandObjectProxy::GetRepeatCommand(Args &current_command_args, uint32_t index) { CommandObject *proxy_command = GetProxyCommandObject(); if (proxy_command) return proxy_command->GetRepeatCommand(current_command_args, index); return nullptr; } bool CommandObjectProxy::Execute(const char *args_string, CommandReturnObject &result) { CommandObject *proxy_command = GetProxyCommandObject(); if (proxy_command) return proxy_command->Execute(args_string, result); result.AppendError("command is not implemented"); result.SetStatus(eReturnStatusFailed); return false; }
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/framework/framework.h" #include "oneflow/core/kernel/new_kernel_util.h" #include "oneflow/core/ndarray/ndarray_util.h" #include "oneflow/user/kernels/loss_kernel_util.h" namespace oneflow { namespace user_op { namespace { using namespace loss; template<typename T, typename K> void ComputeNllOut(int64_t num_instances, K num_classes, K ignore_index, const T* input, const K* target, T* out, const T* weight, T* total_weight) { *total_weight = 0; FOR_RANGE(int64_t, i, 0, num_instances) { K label = target[i]; if (label == ignore_index) { out[i] = 0; continue; } CHECK_GE(label, 0); CHECK_LT(label, num_classes); T cur_weight = weight == nullptr ? 1 : weight[label]; *total_weight += cur_weight; out[i] = -input[i * num_classes + label] * cur_weight; } } template<typename T, typename K> void ComputeNllGradOut(int64_t num_instances, K num_classes, K ignore_index, const K* target, const T* dy, T* dx, const T* weight, const T* total_weight) { FOR_RANGE(int64_t, i, 0, num_instances) { K label = target[i]; if (label == ignore_index) { continue; } CHECK_GE(label, 0); CHECK_LT(label, num_classes); T cur_weight = weight == nullptr ? -1 : -weight[label]; dx[i * num_classes + label] = dy[i] * cur_weight; } } template<typename T, typename K> class NllKernel final : public user_op::OpKernel { public: NllKernel() = default; ~NllKernel() = default; private: using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx) const override { const auto* input_blob = ctx->Tensor4ArgNameAndIndex("input", 0); const auto* target_blob = ctx->Tensor4ArgNameAndIndex("target", 0); auto* out_blob = ctx->Tensor4ArgNameAndIndex("out", 0); auto* total_weight_blob = ctx->Tensor4ArgNameAndIndex("total_weight", 0); const int64_t num_instances = target_blob->shape().elem_cnt(); CHECK_EQ(input_blob->shape().elem_cnt() % num_instances, 0); const K num_classes = static_cast<K>(input_blob->shape().elem_cnt() / num_instances); const K ignore_index = static_cast<K>(ctx->Attr<int64_t>("ignore_index")); const T* input = input_blob->dptr<T>(); const K* target = target_blob->dptr<K>(); T* out = out_blob->mut_dptr<T>(); T* total_weight = total_weight_blob->mut_dptr<T>(); const T* weight = ctx->has_input("weight", 0) ? ctx->Tensor4ArgNameAndIndex("weight", 0)->dptr<T>() : nullptr; ComputeNllOut(num_instances, num_classes, ignore_index, input, target, out, weight, total_weight); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<typename T, typename K> class NllGradKernel final : public user_op::OpKernel { public: NllGradKernel() = default; ~NllGradKernel() = default; private: using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx) const override { const auto* input_blob = ctx->Tensor4ArgNameAndIndex("input", 0); const auto* target_blob = ctx->Tensor4ArgNameAndIndex("target", 0); const auto* dy_blob = ctx->Tensor4ArgNameAndIndex("dy", 0); auto* dx_blob = ctx->Tensor4ArgNameAndIndex("dx", 0); auto* total_weight_blob = ctx->Tensor4ArgNameAndIndex("total_weight", 0); const int64_t num_instances = target_blob->shape().elem_cnt(); const int64_t input_elem_cnt = input_blob->shape().elem_cnt(); CHECK_EQ(input_elem_cnt % num_instances, 0); const K num_classes = static_cast<K>(input_elem_cnt / num_instances); const K ignore_index = static_cast<K>(ctx->Attr<int64_t>("ignore_index")); const T* dy = dy_blob->dptr<T>(); const K* target = target_blob->dptr<K>(); const T* total_weight = total_weight_blob->dptr<T>(); T* dx = dx_blob->mut_dptr<T>(); const T* weight = ctx->has_input("weight", 0) ? ctx->Tensor4ArgNameAndIndex("weight", 0)->dptr<T>() : nullptr; Memset<DeviceType::kCPU>(ctx->stream(), dx, 0, GetCudaAlignedSize(input_elem_cnt * sizeof(T))); ComputeNllGradOut(num_instances, num_classes, ignore_index, target, dy, dx, weight, total_weight); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; } // namespace #define REGISTER_NLL_KERNEL(dtype_pair, ltype_pair) \ REGISTER_USER_KERNEL("nll") \ .SetCreateFn<NllKernel<OF_PP_PAIR_FIRST(dtype_pair), OF_PP_PAIR_FIRST(ltype_pair)>>() \ .SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCPU) \ && (user_op::HobDataType("target", 0) == OF_PP_PAIR_SECOND(ltype_pair)) \ && (user_op::HobDataType("out", 0) == OF_PP_PAIR_SECOND(dtype_pair))); #define REGISTER_NLL_GRAD_KERNEL(dtype_pair, ltype_pair) \ REGISTER_USER_KERNEL("nll_grad") \ .SetCreateFn<NllGradKernel<OF_PP_PAIR_FIRST(dtype_pair), OF_PP_PAIR_FIRST(ltype_pair)>>() \ .SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCPU) \ && (user_op::HobDataType("target", 0) == OF_PP_PAIR_SECOND(ltype_pair)) \ && (user_op::HobDataType("dy", 0) == OF_PP_PAIR_SECOND(dtype_pair)) \ && (user_op::HobDataType("dx", 0) == OF_PP_PAIR_SECOND(dtype_pair))); OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_NLL_KERNEL, FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ) OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_NLL_GRAD_KERNEL, FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ) } // namespace user_op } // namespace oneflow
// This sample can go in TrackBarRenderer class overview. // - Snippet2 can go in GetTopPointingThumbSize, and possibly other Gets // - Snippet4 can go in IsSupported, DrawHorizontalTrack, and DrawTopPointingThumb // - Snippet6 can go in DrawVerticalTick; see below about bug in the meantime, though //<Snippet0> #using <System.Drawing.dll> #using <System.Windows.Forms.dll> #using <System.dll> using namespace System; using namespace System::Drawing; using namespace System::Windows::Forms; using namespace System::Windows::Forms::VisualStyles; namespace TrackBarRendererSample { ref class CustomTrackBar : public Control { private: int numberTicks; Rectangle trackRectangle; Rectangle ticksRectangle; Rectangle thumbRectangle; int currentTickPosition; float tickSpace; bool thumbClicked; TrackBarThumbState thumbState; public: CustomTrackBar(int ticks, System::Drawing::Size trackBarSize) { this->Location = Point(10, 10); this->Size = trackBarSize; this->numberTicks = ticks; this->BackColor = Color::DarkCyan; this->DoubleBuffered = true; numberTicks = 10; thumbState = TrackBarThumbState::Normal; // Calculate the initial sizes of the bar, // thumb and ticks. SetupTrackBar(); } //<Snippet2> //<Snippet6> // Calculate the sizes of the bar, thumb, and ticks rectangle. private: void SetupTrackBar() { if (!TrackBarRenderer::IsSupported) { return; } Graphics^ g = this->CreateGraphics(); // Calculate the size of the track bar. trackRectangle.X = ClientRectangle.X + 2; trackRectangle.Y = ClientRectangle.Y + 28; trackRectangle.Width = ClientRectangle.Width - 4; trackRectangle.Height = 4; // Calculate the size of the rectangle in which to // draw the ticks. ticksRectangle.X = trackRectangle.X + 4; ticksRectangle.Y = trackRectangle.Y - 8; ticksRectangle.Width = trackRectangle.Width - 8; ticksRectangle.Height = 4; tickSpace = ((float)ticksRectangle.Width - 1) / ((float)numberTicks - 1); // Calculate the size of the thumb. thumbRectangle.Size = TrackBarRenderer::GetTopPointingThumbSize(g, TrackBarThumbState::Normal); thumbRectangle.X = CurrentTickXCoordinate(); thumbRectangle.Y = trackRectangle.Y - 8; } //</Snippet2> private: int CurrentTickXCoordinate() { if (tickSpace == 0) { return 0; } else { return ((int)Math::Round(tickSpace) * currentTickPosition); } } //<Snippet4> // Draw the track bar. protected: virtual void OnPaint(PaintEventArgs^ e) override { if (!TrackBarRenderer::IsSupported) { this->Parent->Text = "CustomTrackBar Disabled"; return; } this->Parent->Text = "CustomTrackBar Enabled"; TrackBarRenderer::DrawHorizontalTrack(e->Graphics, trackRectangle); TrackBarRenderer::DrawTopPointingThumb(e->Graphics, thumbRectangle, thumbState); TrackBarRenderer::DrawHorizontalTicks(e->Graphics, ticksRectangle, numberTicks, EdgeStyle::Raised); } //</Snippet6> // Determine whether the user has clicked the track bar thumb. protected: virtual void OnMouseDown(MouseEventArgs^ e) override { if (!TrackBarRenderer::IsSupported) { return; } if (this->thumbRectangle.Contains(e->Location)) { thumbClicked = true; thumbState = TrackBarThumbState::Pressed; } this->Invalidate(); } //</Snippet4> // Redraw the track bar thumb if the user has moved it. protected: virtual void OnMouseUp(MouseEventArgs^ e) override { if (!TrackBarRenderer::IsSupported) { return; } if (thumbClicked == true) { if (e->Location.X > trackRectangle.X && e->Location.X < (trackRectangle.X + trackRectangle.Width - thumbRectangle.Width)) { thumbClicked = false; thumbState = TrackBarThumbState::Hot; this->Invalidate(); } thumbClicked = false; } } // Track cursor movements. protected: virtual void OnMouseMove(MouseEventArgs^ e) override { if (!TrackBarRenderer::IsSupported) { return; } // The user is moving the thumb. if (thumbClicked == true) { // Track movements to the next tick to the right, if // the cursor has moved halfway to the next tick. if (currentTickPosition < numberTicks - 1 && e->Location.X > CurrentTickXCoordinate() + (int)(tickSpace)) { currentTickPosition++; } // Track movements to the next tick to the left, if // cursor has moved halfway to the next tick. else if (currentTickPosition > 0 && e->Location.X < CurrentTickXCoordinate() - (int)(tickSpace / 2)) { currentTickPosition--; } thumbRectangle.X = CurrentTickXCoordinate(); } // The cursor is passing over the track. else { if (thumbRectangle.Contains(e->Location)) { thumbState = TrackBarThumbState::Hot; } else { thumbState = TrackBarThumbState::Normal; } } Invalidate(); } }; ref class Form1 : public Form { public: Form1() { CustomTrackBar^ TrackBar1 = gcnew CustomTrackBar(19, System::Drawing::Size(300, 50)); this->Width = 500; this->Controls->Add(TrackBar1); } }; } [STAThread] int main() { // Note that the call to EnableVisualStyles below does // not affect whether TrackBarRenderer.IsSupported is true; // as long as visual styles are enabled by the operating system, // IsSupported is true. Application::EnableVisualStyles(); Application::Run(gcnew TrackBarRendererSample::Form1()); return 0; } //</Snippet0>
#include "GPUFilterRectMesh.h" #include "GPUFilterTool.h" GPUFilterRectMesh::GPUFilterRectMesh(QObject* parent) :GPUFilterMesh(parent) { } GPUFilterRectMesh::~GPUFilterRectMesh() { } void GPUFilterRectMesh::init(void) { // Add Vertex this->addVertexPostion(QVector3D(-1.0f, -1.0f, 0.0f)); this->addVertexPostion(QVector3D(1.0f, -1.0f, 0.0f)); this->addVertexPostion(QVector3D(1.0f, 1.0f, 0.0f)); this->addVertexPostion(QVector3D(1.0f, 1.0f, 0.0f)); this->addVertexPostion(QVector3D(-1.0f, 1.0f, 0.0f)); this->addVertexPostion(QVector3D(-1.0f, -1.0f, 0.0f)); // Add Color this->addColor(QVector3D(1.0f, 0.0f, 0.0f)); this->addColor(QVector3D(1.0f, 0.0f, 0.0f)); this->addColor(QVector3D(1.0f, 0.0f, 0.0f)); this->addColor(QVector3D(1.0f, 0.0f, 0.0f)); this->addColor(QVector3D(1.0f, 0.0f, 0.0f)); this->addColor(QVector3D(1.0f, 0.0f, 0.0f)); // Add Normal this->addNormal(QVector3D(0.0f, 0.0f, 1.0f)); this->addNormal(QVector3D(0.0f, 0.0f, 1.0f)); this->addNormal(QVector3D(0.0f, 0.0f, 1.0f)); this->addNormal(QVector3D(0.0f, 0.0f, 1.0f)); this->addNormal(QVector3D(0.0f, 0.0f, 1.0f)); this->addNormal(QVector3D(0.0f, 0.0f, 1.0f)); // Add Texture Coords this->addCoord(QVector3D(0.0f, 0.0f, 0.0f)); this->addCoord(QVector3D(1.0f, 0.0f, 0.0f)); this->addCoord(QVector3D(1.0f, 1.0f, 0.0f)); this->addCoord(QVector3D(1.0f, 1.0f, 0.0f)); this->addCoord(QVector3D(0.0f, 1.0f, 0.0f)); this->addCoord(QVector3D(0.0f, 0.0f, 0.0f)); this->create(); } void GPUFilterRectMesh::draw(void) { GPUFilterMesh::draw(); }
/* * This file is part of the Dronecode Camera Manager * * Copyright (C) 2017 Intel Corporation. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cstring> #include "CameraComponent.h" #include "CameraDeviceV4l2.h" #include "ImageCaptureGst.h" #include "VideoCaptureGst.h" #ifdef ENABLE_MAVLINK #include "mavlink_server.h" #endif #include "util.h" #include <algorithm> #ifdef ENABLE_GAZEBO #include "CameraDeviceGazebo.h" #include "VideoStreamUdp.h" #endif CameraComponent::CameraComponent(std::string camdev_name) : mCamDevName(camdev_name) , mCamInfo{} , mStoreInfo{} , mImgPath("") , mVidPath("") { log_debug("%s path:%s", __func__, camdev_name.c_str()); // Create a camera device based on device path mCamDev = create_camera_device(camdev_name); if (!mCamDev) return; // TODO :: Raise exception // Get info from the camera device mCamDev->getInfo(mCamInfo); // append uri-null info to the structure // Get list of Parameters supported & its default value mCamDev->init(mCamParam); // start the camera device mCamDev->start(); initStorageInfo(mStoreInfo); #ifdef ENABLE_GAZEBO mVidStream = std::make_shared<VideoStreamUdp>(mCamDev); mVidStream->init(); mVidStream->start(); #endif } CameraComponent::CameraComponent(std::string camdev_name, std::string camdef_uri) : mCamDevName(camdev_name) , mCamInfo{} , mStoreInfo{} , mCamDefURI(camdef_uri) , mImgPath("") { log_debug("%s path:%s with Camera Definition", __func__, camdev_name.c_str()); // Create a camera device based on device path mCamDev = create_camera_device(camdev_name); if (!mCamDev) return; // TODO :: Raise exception // Get info from the camera device mCamDev->getInfo(mCamInfo); if (sizeof(mCamInfo.cam_definition_uri) > mCamDefURI.size()) { strcpy((char *)mCamInfo.cam_definition_uri, mCamDefURI.c_str()); } else { log_error("URI length bigger than permitted"); // TODO::Continue with no parameter support } // Get list of Parameters supported & its default value mCamDev->init(mCamParam); // start the camera device mCamDev->start(); initStorageInfo(mStoreInfo); #ifdef ENABLE_GAZEBO mVidStream = std::make_shared<VideoStreamUdp>(mCamDev); mVidStream->init(); mVidStream->start(); #endif } CameraComponent::~CameraComponent() { log_debug("%s", __func__); #ifdef ENABLE_GAZEBO if (mVidStream) { mVidStream->stop(); mVidStream->uninit(); } #endif if (mVidCap) { mVidCap->stop(); mVidCap->uninit(); mVidCap.reset(); } if (mImgCap) { mImgCap->stop(); mImgCap->uninit(); mImgCap.reset(); } // stop the camera device mCamDev->stop(); // Uninit the camera device mCamDev->uninit(); } const CameraInfo &CameraComponent::getCameraInfo() const { return mCamInfo; } const StorageInfo &CameraComponent::getStorageInfo() const { return mStoreInfo; } const std::map<std::string, std::string> &CameraComponent::getParamList() const { return mCamParam.getParameterList(); } void CameraComponent::initStorageInfo(struct StorageInfo &storeInfo) { // TODO:: Fill storage details with real values storeInfo.storage_id = 1; storeInfo.storage_count = 1; storeInfo.status = 2; /*formatted*/ storeInfo.total_capacity = 50.0; storeInfo.used_capacity = 0.0; storeInfo.available_capacity = 50.0; storeInfo.read_speed = 128; storeInfo.write_speed = 128; } int CameraComponent::getParamType(const char *param_id, size_t id_size) { if (!param_id) return 0; return mCamParam.getParameterType(toString(param_id, id_size)); } int CameraComponent::getParam(const char *param_id, size_t id_size, char *param_value, size_t value_size) { // query the value set in the map and fill the output, return appropriate value if (!param_id || !param_value || value_size == 0) return 1; std::string value = mCamParam.getParameter(toString(param_id, id_size)); if (!value.empty()) return 1; mem_cpy(param_value, value_size, value.data(), value.size(), value_size); return 0; } int CameraComponent::setParam(const char *param_name, size_t id_size, const char *param_value, size_t value_size, int param_type) { int ret = 1; std::string param = toString(param_name, id_size); ret = mCamDev->setParam(mCamParam, param, param_value, value_size, param_type); return ret; } int CameraComponent::setCameraMode(uint32_t mode) { mCamDev->setMode(mode); return 0; } int CameraComponent::resetCameraSettings() { int ret = mCamDev->resetParams(mCamParam); if (ret != 0) log_debug("Error in reset of camera parameters. Could not open the device."); return ret; } int CameraComponent::getCameraMode() { return mCamDev->getMode(); } int CameraComponent::setImageCaptureLocation(std::string imgPath) { mImgPath = imgPath; return 0; } int CameraComponent::setImageCaptureSettings(ImageSettings &imgSetting) { if (mImgSetting) mImgSetting.reset(); mImgSetting = std::make_shared<ImageSettings>(); *mImgSetting = imgSetting; return 0; } /* 0: idle, 1: capture in progress, 2: interval set but idle, 3: interval set and capture in * progress */ void CameraComponent::getImageCaptureStatus(uint8_t &status, int &interval) { if (!mImgCap) { status = 0; interval = 0; return; } // get interval interval = mImgCap->getInterval(); switch (mImgCap->getState()) { case ImageCapture::STATE_ERROR: case ImageCapture::STATE_IDLE: case ImageCapture::STATE_INIT: status = 0; break; case ImageCapture::STATE_RUN: if (interval > 0) status = 3; // or 2? else status = 1; break; default: status = 0; break; } log_debug("%s Status:%d Interval:%d", __func__, status, interval); return; } int CameraComponent::startImageCapture(int interval, int count, capture_callback_t cb) { int ret = 0; // TODO :: Check if video capture or video streaming is running mImgCapCB = cb; // Delete imgCap instance if already exists // This could be because of no StopImageCapture call after done // Or new startImageCapture call while prev call is still not done if (mImgCap) mImgCap.reset(); // check if settings are available if (mImgSetting) mImgCap = std::make_shared<ImageCaptureGst>(mCamDev, *mImgSetting); else mImgCap = std::make_shared<ImageCaptureGst>(mCamDev); if (!mImgPath.empty()) mImgCap->setLocation(mImgPath); ret = mImgCap->init(); if (!ret) { ret = mImgCap->start(interval, count, std::bind(&CameraComponent::cbImageCaptured, this, std::placeholders::_1, std::placeholders::_2)); if (ret) { mImgCap->uninit(); mImgCap.reset(); } } return ret; } int CameraComponent::stopImageCapture() { if (!mImgCap) return 0; mImgCap->stop(); mImgCap->uninit(); mImgCap.reset(); return 0; } void CameraComponent::cbImageCaptured(int result, int seq_num) { log_debug("%s result:%d sequenc:%d", __func__, result, seq_num); // TODO :: Get the file path of the image and host it via http if (mImgCapCB) mImgCapCB(result, seq_num); } int CameraComponent::setVideoCaptureLocation(std::string vidPath) { mVidPath = vidPath; return 0; } int CameraComponent::setVideoCaptureSettings(VideoSettings &vidSetting) { if (mVidSetting) mVidSetting.reset(); mVidSetting = std::make_shared<VideoSettings>(); *mVidSetting = vidSetting; return 0; } int CameraComponent::startVideoCapture(int status_freq) { int ret = 0; if (mVidCap) mVidCap.reset(); // TODO :: Check if video capture or video streaming is running // check if settings are available if (mVidSetting) mVidCap = std::make_shared<VideoCaptureGst>(mCamDev, *mVidSetting); else mVidCap = std::make_shared<VideoCaptureGst>(mCamDev); if (!mVidPath.empty()) mVidCap->setLocation(mVidPath); ret = mVidCap->init(); if (!ret) { ret = mVidCap->start(); if (ret) { mVidCap->uninit(); mVidCap.reset(); } } return ret; } int CameraComponent::stopVideoCapture() { int ret = 0; if (!mVidCap) return 0; mVidCap->stop(); mVidCap->uninit(); mVidCap.reset(); return ret; } /* 0: idle, 1: capture in progress */ uint8_t CameraComponent::getVideoCaptureStatus() { uint8_t ret = 0; if (!mVidCap) return 0; switch (mVidCap->getState()) { case VideoCapture::STATE_ERROR: case VideoCapture::STATE_IDLE: case VideoCapture::STATE_INIT: ret = 0; break; case VideoCapture::STATE_RUN: ret = 1; break; default: ret = 0; break; } log_debug("%s Status:%d", __func__, ret); return ret; } int CameraComponent::setVideoSize(uint32_t param_value) { return 0; } int CameraComponent::setVideoFrameFormat(uint32_t param_value) { return 0; } // TODO:: Move this operation to a factory class std::shared_ptr<CameraDevice> CameraComponent::create_camera_device(std::string camdev_name) { if (camdev_name.find("/dev/video") != std::string::npos) { log_debug("V4L2 device : %s", camdev_name.c_str()); return std::make_shared<CameraDeviceV4l2>(camdev_name); } else if (camdev_name.find("camera/image") != std::string::npos) { log_debug("Gazebo device : %s", camdev_name.c_str()); #ifdef ENABLE_GAZEBO return std::make_shared<CameraDeviceGazebo>(camdev_name); #else log_error("Gazebo device not supported"); return nullptr; #endif } else { log_error("Camera device not found"); return nullptr; } } /* Input string can be either null-terminated or not */ std::string CameraComponent::toString(const char *buf, size_t buf_size) { const char *end = std::find(buf, buf + buf_size, '\0'); return std::string(buf, end); }
//===- RISCVVEmitter.cpp - Generate riscv_vector.h for use with clang -----===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This tablegen backend is responsible for emitting riscv_vector.h which // includes a declaration and definition of each intrinsic functions specified // in https://github.com/riscv/rvv-intrinsic-doc. // // See also the documentation in include/clang/Basic/riscv_vector.td. // //===----------------------------------------------------------------------===// #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringSet.h" #include "llvm/ADT/Twine.h" #include "llvm/TableGen/Error.h" #include "llvm/TableGen/Record.h" #include <numeric> using namespace llvm; using BasicType = char; using VScaleVal = Optional<unsigned>; namespace { // Exponential LMUL struct LMULType { int Log2LMUL; LMULType(int Log2LMUL); // Return the C/C++ string representation of LMUL std::string str() const; Optional<unsigned> getScale(unsigned ElementBitwidth) const; void MulLog2LMUL(int Log2LMUL); LMULType &operator*=(uint32_t RHS); }; // This class is compact representation of a valid and invalid RVVType. class RVVType { enum ScalarTypeKind : uint32_t { Void, Size_t, Ptrdiff_t, UnsignedLong, SignedLong, Boolean, SignedInteger, UnsignedInteger, Float, Invalid, }; BasicType BT; ScalarTypeKind ScalarType = Invalid; LMULType LMUL; bool IsPointer = false; // IsConstant indices are "int", but have the constant expression. bool IsImmediate = false; // Const qualifier for pointer to const object or object of const type. bool IsConstant = false; unsigned ElementBitwidth = 0; VScaleVal Scale = 0; bool Valid; std::string BuiltinStr; std::string ClangBuiltinStr; std::string Str; std::string ShortStr; public: RVVType() : RVVType(BasicType(), 0, StringRef()) {} RVVType(BasicType BT, int Log2LMUL, StringRef prototype); // Return the string representation of a type, which is an encoded string for // passing to the BUILTIN() macro in Builtins.def. const std::string &getBuiltinStr() const { return BuiltinStr; } // Return the clang buitlin type for RVV vector type which are used in the // riscv_vector.h header file. const std::string &getClangBuiltinStr() const { return ClangBuiltinStr; } // Return the C/C++ string representation of a type for use in the // riscv_vector.h header file. const std::string &getTypeStr() const { return Str; } // Return the short name of a type for C/C++ name suffix. const std::string &getShortStr() { // Not all types are used in short name, so compute the short name by // demanded. if (ShortStr.empty()) initShortStr(); return ShortStr; } bool isValid() const { return Valid; } bool isScalar() const { return Scale.hasValue() && Scale.getValue() == 0; } bool isVector() const { return Scale.hasValue() && Scale.getValue() != 0; } bool isFloat() const { return ScalarType == ScalarTypeKind::Float; } bool isSignedInteger() const { return ScalarType == ScalarTypeKind::SignedInteger; } bool isFloatVector(unsigned Width) const { return isVector() && isFloat() && ElementBitwidth == Width; } bool isFloat(unsigned Width) const { return isFloat() && ElementBitwidth == Width; } private: // Verify RVV vector type and set Valid. bool verifyType() const; // Creates a type based on basic types of TypeRange void applyBasicType(); // Applies a prototype modifier to the current type. The result maybe an // invalid type. void applyModifier(StringRef prototype); // Compute and record a string for legal type. void initBuiltinStr(); // Compute and record a builtin RVV vector type string. void initClangBuiltinStr(); // Compute and record a type string for used in the header. void initTypeStr(); // Compute and record a short name of a type for C/C++ name suffix. void initShortStr(); }; using RVVTypePtr = RVVType *; using RVVTypes = std::vector<RVVTypePtr>; enum RISCVExtension : uint8_t { Basic = 0, F = 1 << 1, D = 1 << 2, Zfh = 1 << 3, Zvamo = 1 << 4, Zvlsseg = 1 << 5, }; // TODO refactor RVVIntrinsic class design after support all intrinsic // combination. This represents an instantiation of an intrinsic with a // particular type and prototype class RVVIntrinsic { private: std::string Name; // Builtin name std::string MangledName; std::string IRName; bool HasSideEffects; bool IsMask; bool HasMaskedOffOperand; bool HasVL; bool HasPolicy; bool HasNoMaskedOverloaded; bool HasAutoDef; // There is automiatic definition in header std::string ManualCodegen; RVVTypePtr OutputType; // Builtin output type RVVTypes InputTypes; // Builtin input types // The types we use to obtain the specific LLVM intrinsic. They are index of // InputTypes. -1 means the return type. std::vector<int64_t> IntrinsicTypes; uint8_t RISCVExtensions = 0; unsigned NF = 1; public: RVVIntrinsic(StringRef Name, StringRef Suffix, StringRef MangledName, StringRef MangledSuffix, StringRef IRName, bool HasSideEffects, bool IsMask, bool HasMaskedOffOperand, bool HasVL, bool HasPolicy, bool HasNoMaskedOverloaded, bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &Types, const std::vector<int64_t> &IntrinsicTypes, StringRef RequiredExtension, unsigned NF); ~RVVIntrinsic() = default; StringRef getName() const { return Name; } StringRef getMangledName() const { return MangledName; } bool hasSideEffects() const { return HasSideEffects; } bool hasMaskedOffOperand() const { return HasMaskedOffOperand; } bool hasVL() const { return HasVL; } bool hasPolicy() const { return HasPolicy; } bool hasNoMaskedOverloaded() const { return HasNoMaskedOverloaded; } bool hasManualCodegen() const { return !ManualCodegen.empty(); } bool hasAutoDef() const { return HasAutoDef; } bool isMask() const { return IsMask; } StringRef getIRName() const { return IRName; } StringRef getManualCodegen() const { return ManualCodegen; } uint8_t getRISCVExtensions() const { return RISCVExtensions; } unsigned getNF() const { return NF; } // Return the type string for a BUILTIN() macro in Builtins.def. std::string getBuiltinTypeStr() const; // Emit the code block for switch body in EmitRISCVBuiltinExpr, it should // init the RVVIntrinsic ID and IntrinsicTypes. void emitCodeGenSwitchBody(raw_ostream &o) const; // Emit the define macors for mask intrinsics using _mt intrinsics. void emitIntrinsicMaskMacro(raw_ostream &o) const; // Emit the macros for mapping C/C++ intrinsic function to builtin functions. void emitIntrinsicMacro(raw_ostream &o) const; // Emit the mangled function definition. void emitMangledFuncDef(raw_ostream &o) const; }; class RVVEmitter { private: RecordKeeper &Records; std::string HeaderCode; // Concat BasicType, LMUL and Proto as key StringMap<RVVType> LegalTypes; StringSet<> IllegalTypes; public: RVVEmitter(RecordKeeper &R) : Records(R) {} /// Emit riscv_vector.h void createHeader(raw_ostream &o); /// Emit all the __builtin prototypes and code needed by Sema. void createBuiltins(raw_ostream &o); /// Emit all the information needed to map builtin -> LLVM IR intrinsic. void createCodeGen(raw_ostream &o); std::string getSuffixStr(char Type, int Log2LMUL, StringRef Prototypes); private: /// Create all intrinsics and add them to \p Out void createRVVIntrinsics(std::vector<std::unique_ptr<RVVIntrinsic>> &Out); /// Create Headers and add them to \p Out void createRVVHeaders(raw_ostream &OS); /// Compute output and input types by applying different config (basic type /// and LMUL with type transformers). It also record result of type in legal /// or illegal set to avoid compute the same config again. The result maybe /// have illegal RVVType. Optional<RVVTypes> computeTypes(BasicType BT, int Log2LMUL, unsigned NF, ArrayRef<std::string> PrototypeSeq); Optional<RVVTypePtr> computeType(BasicType BT, int Log2LMUL, StringRef Proto); /// Emit Acrh predecessor definitions and body, assume the element of Defs are /// sorted by extension. void emitArchMacroAndBody( std::vector<std::unique_ptr<RVVIntrinsic>> &Defs, raw_ostream &o, std::function<void(raw_ostream &, const RVVIntrinsic &)>); // Emit the architecture preprocessor definitions. Return true when emits // non-empty string. bool emitExtDefStr(uint8_t Extensions, raw_ostream &o); // Slice Prototypes string into sub prototype string and process each sub // prototype string individually in the Handler. void parsePrototypes(StringRef Prototypes, std::function<void(StringRef)> Handler); }; } // namespace //===----------------------------------------------------------------------===// // Type implementation //===----------------------------------------------------------------------===// LMULType::LMULType(int NewLog2LMUL) { // Check Log2LMUL is -3, -2, -1, 0, 1, 2, 3 assert(NewLog2LMUL <= 3 && NewLog2LMUL >= -3 && "Bad LMUL number!"); Log2LMUL = NewLog2LMUL; } std::string LMULType::str() const { if (Log2LMUL < 0) return "mf" + utostr(1ULL << (-Log2LMUL)); return "m" + utostr(1ULL << Log2LMUL); } VScaleVal LMULType::getScale(unsigned ElementBitwidth) const { int Log2ScaleResult = 0; switch (ElementBitwidth) { default: break; case 8: Log2ScaleResult = Log2LMUL + 3; break; case 16: Log2ScaleResult = Log2LMUL + 2; break; case 32: Log2ScaleResult = Log2LMUL + 1; break; case 64: Log2ScaleResult = Log2LMUL; break; } // Illegal vscale result would be less than 1 if (Log2ScaleResult < 0) return None; return 1 << Log2ScaleResult; } void LMULType::MulLog2LMUL(int log2LMUL) { Log2LMUL += log2LMUL; } LMULType &LMULType::operator*=(uint32_t RHS) { assert(isPowerOf2_32(RHS)); this->Log2LMUL = this->Log2LMUL + Log2_32(RHS); return *this; } RVVType::RVVType(BasicType BT, int Log2LMUL, StringRef prototype) : BT(BT), LMUL(LMULType(Log2LMUL)) { applyBasicType(); applyModifier(prototype); Valid = verifyType(); if (Valid) { initBuiltinStr(); initTypeStr(); if (isVector()) { initClangBuiltinStr(); } } } // clang-format off // boolean type are encoded the ratio of n (SEW/LMUL) // SEW/LMUL | 1 | 2 | 4 | 8 | 16 | 32 | 64 // c type | vbool64_t | vbool32_t | vbool16_t | vbool8_t | vbool4_t | vbool2_t | vbool1_t // IR type | nxv1i1 | nxv2i1 | nxv4i1 | nxv8i1 | nxv16i1 | nxv32i1 | nxv64i1 // type\lmul | 1/8 | 1/4 | 1/2 | 1 | 2 | 4 | 8 // -------- |------ | -------- | ------- | ------- | -------- | -------- | -------- // i64 | N/A | N/A | N/A | nxv1i64 | nxv2i64 | nxv4i64 | nxv8i64 // i32 | N/A | N/A | nxv1i32 | nxv2i32 | nxv4i32 | nxv8i32 | nxv16i32 // i16 | N/A | nxv1i16 | nxv2i16 | nxv4i16 | nxv8i16 | nxv16i16 | nxv32i16 // i8 | nxv1i8 | nxv2i8 | nxv4i8 | nxv8i8 | nxv16i8 | nxv32i8 | nxv64i8 // double | N/A | N/A | N/A | nxv1f64 | nxv2f64 | nxv4f64 | nxv8f64 // float | N/A | N/A | nxv1f32 | nxv2f32 | nxv4f32 | nxv8f32 | nxv16f32 // half | N/A | nxv1f16 | nxv2f16 | nxv4f16 | nxv8f16 | nxv16f16 | nxv32f16 // clang-format on bool RVVType::verifyType() const { if (ScalarType == Invalid) return false; if (isScalar()) return true; if (!Scale.hasValue()) return false; if (isFloat() && ElementBitwidth == 8) return false; unsigned V = Scale.getValue(); switch (ElementBitwidth) { case 1: case 8: // Check Scale is 1,2,4,8,16,32,64 return (V <= 64 && isPowerOf2_32(V)); case 16: // Check Scale is 1,2,4,8,16,32 return (V <= 32 && isPowerOf2_32(V)); case 32: // Check Scale is 1,2,4,8,16 return (V <= 16 && isPowerOf2_32(V)); case 64: // Check Scale is 1,2,4,8 return (V <= 8 && isPowerOf2_32(V)); } return false; } void RVVType::initBuiltinStr() { assert(isValid() && "RVVType is invalid"); switch (ScalarType) { case ScalarTypeKind::Void: BuiltinStr = "v"; return; case ScalarTypeKind::Size_t: BuiltinStr = "z"; if (IsImmediate) BuiltinStr = "I" + BuiltinStr; if (IsPointer) BuiltinStr += "*"; return; case ScalarTypeKind::Ptrdiff_t: BuiltinStr = "Y"; return; case ScalarTypeKind::UnsignedLong: BuiltinStr = "ULi"; return; case ScalarTypeKind::SignedLong: BuiltinStr = "Li"; return; case ScalarTypeKind::Boolean: assert(ElementBitwidth == 1); BuiltinStr += "b"; break; case ScalarTypeKind::SignedInteger: case ScalarTypeKind::UnsignedInteger: switch (ElementBitwidth) { case 8: BuiltinStr += "c"; break; case 16: BuiltinStr += "s"; break; case 32: BuiltinStr += "i"; break; case 64: BuiltinStr += "Wi"; break; default: llvm_unreachable("Unhandled ElementBitwidth!"); } if (isSignedInteger()) BuiltinStr = "S" + BuiltinStr; else BuiltinStr = "U" + BuiltinStr; break; case ScalarTypeKind::Float: switch (ElementBitwidth) { case 16: BuiltinStr += "x"; break; case 32: BuiltinStr += "f"; break; case 64: BuiltinStr += "d"; break; default: llvm_unreachable("Unhandled ElementBitwidth!"); } break; default: llvm_unreachable("ScalarType is invalid!"); } if (IsImmediate) BuiltinStr = "I" + BuiltinStr; if (isScalar()) { if (IsConstant) BuiltinStr += "C"; if (IsPointer) BuiltinStr += "*"; return; } BuiltinStr = "q" + utostr(Scale.getValue()) + BuiltinStr; // Pointer to vector types. Defined for Zvlsseg load intrinsics. // Zvlsseg load intrinsics have pointer type arguments to store the loaded // vector values. if (IsPointer) BuiltinStr += "*"; } void RVVType::initClangBuiltinStr() { assert(isValid() && "RVVType is invalid"); assert(isVector() && "Handle Vector type only"); ClangBuiltinStr = "__rvv_"; switch (ScalarType) { case ScalarTypeKind::Boolean: ClangBuiltinStr += "bool" + utostr(64 / Scale.getValue()) + "_t"; return; case ScalarTypeKind::Float: ClangBuiltinStr += "float"; break; case ScalarTypeKind::SignedInteger: ClangBuiltinStr += "int"; break; case ScalarTypeKind::UnsignedInteger: ClangBuiltinStr += "uint"; break; default: llvm_unreachable("ScalarTypeKind is invalid"); } ClangBuiltinStr += utostr(ElementBitwidth) + LMUL.str() + "_t"; } void RVVType::initTypeStr() { assert(isValid() && "RVVType is invalid"); if (IsConstant) Str += "const "; auto getTypeString = [&](StringRef TypeStr) { if (isScalar()) return Twine(TypeStr + Twine(ElementBitwidth) + "_t").str(); return Twine("v" + TypeStr + Twine(ElementBitwidth) + LMUL.str() + "_t") .str(); }; switch (ScalarType) { case ScalarTypeKind::Void: Str = "void"; return; case ScalarTypeKind::Size_t: Str = "size_t"; if (IsPointer) Str += " *"; return; case ScalarTypeKind::Ptrdiff_t: Str = "ptrdiff_t"; return; case ScalarTypeKind::UnsignedLong: Str = "unsigned long"; return; case ScalarTypeKind::SignedLong: Str = "long"; return; case ScalarTypeKind::Boolean: if (isScalar()) Str += "bool"; else // Vector bool is special case, the formulate is // `vbool<N>_t = MVT::nxv<64/N>i1` ex. vbool16_t = MVT::4i1 Str += "vbool" + utostr(64 / Scale.getValue()) + "_t"; break; case ScalarTypeKind::Float: if (isScalar()) { if (ElementBitwidth == 64) Str += "double"; else if (ElementBitwidth == 32) Str += "float"; else if (ElementBitwidth == 16) Str += "_Float16"; else llvm_unreachable("Unhandled floating type."); } else Str += getTypeString("float"); break; case ScalarTypeKind::SignedInteger: Str += getTypeString("int"); break; case ScalarTypeKind::UnsignedInteger: Str += getTypeString("uint"); break; default: llvm_unreachable("ScalarType is invalid!"); } if (IsPointer) Str += " *"; } void RVVType::initShortStr() { switch (ScalarType) { case ScalarTypeKind::Boolean: assert(isVector()); ShortStr = "b" + utostr(64 / Scale.getValue()); return; case ScalarTypeKind::Float: ShortStr = "f" + utostr(ElementBitwidth); break; case ScalarTypeKind::SignedInteger: ShortStr = "i" + utostr(ElementBitwidth); break; case ScalarTypeKind::UnsignedInteger: ShortStr = "u" + utostr(ElementBitwidth); break; default: PrintFatalError("Unhandled case!"); } if (isVector()) ShortStr += LMUL.str(); } void RVVType::applyBasicType() { switch (BT) { case 'c': ElementBitwidth = 8; ScalarType = ScalarTypeKind::SignedInteger; break; case 's': ElementBitwidth = 16; ScalarType = ScalarTypeKind::SignedInteger; break; case 'i': ElementBitwidth = 32; ScalarType = ScalarTypeKind::SignedInteger; break; case 'l': ElementBitwidth = 64; ScalarType = ScalarTypeKind::SignedInteger; break; case 'x': ElementBitwidth = 16; ScalarType = ScalarTypeKind::Float; break; case 'f': ElementBitwidth = 32; ScalarType = ScalarTypeKind::Float; break; case 'd': ElementBitwidth = 64; ScalarType = ScalarTypeKind::Float; break; default: PrintFatalError("Unhandled type code!"); } assert(ElementBitwidth != 0 && "Bad element bitwidth!"); } void RVVType::applyModifier(StringRef Transformer) { if (Transformer.empty()) return; // Handle primitive type transformer auto PType = Transformer.back(); switch (PType) { case 'e': Scale = 0; break; case 'v': Scale = LMUL.getScale(ElementBitwidth); break; case 'w': ElementBitwidth *= 2; LMUL *= 2; Scale = LMUL.getScale(ElementBitwidth); break; case 'q': ElementBitwidth *= 4; LMUL *= 4; Scale = LMUL.getScale(ElementBitwidth); break; case 'o': ElementBitwidth *= 8; LMUL *= 8; Scale = LMUL.getScale(ElementBitwidth); break; case 'm': ScalarType = ScalarTypeKind::Boolean; Scale = LMUL.getScale(ElementBitwidth); ElementBitwidth = 1; break; case '0': ScalarType = ScalarTypeKind::Void; break; case 'z': ScalarType = ScalarTypeKind::Size_t; break; case 't': ScalarType = ScalarTypeKind::Ptrdiff_t; break; case 'u': ScalarType = ScalarTypeKind::UnsignedLong; break; case 'l': ScalarType = ScalarTypeKind::SignedLong; break; default: PrintFatalError("Illegal primitive type transformers!"); } Transformer = Transformer.drop_back(); // Extract and compute complex type transformer. It can only appear one time. if (Transformer.startswith("(")) { size_t Idx = Transformer.find(')'); assert(Idx != StringRef::npos); StringRef ComplexType = Transformer.slice(1, Idx); Transformer = Transformer.drop_front(Idx + 1); assert(Transformer.find('(') == StringRef::npos && "Only allow one complex type transformer"); auto UpdateAndCheckComplexProto = [&]() { Scale = LMUL.getScale(ElementBitwidth); const StringRef VectorPrototypes("vwqom"); if (!VectorPrototypes.contains(PType)) PrintFatalError("Complex type transformer only supports vector type!"); if (Transformer.find_first_of("PCKWS") != StringRef::npos) PrintFatalError( "Illegal type transformer for Complex type transformer"); }; auto ComputeFixedLog2LMUL = [&](StringRef Value, std::function<bool(const int32_t &, const int32_t &)> Compare) { int32_t Log2LMUL; Value.getAsInteger(10, Log2LMUL); if (!Compare(Log2LMUL, LMUL.Log2LMUL)) { ScalarType = Invalid; return false; } // Update new LMUL LMUL = LMULType(Log2LMUL); UpdateAndCheckComplexProto(); return true; }; auto ComplexTT = ComplexType.split(":"); if (ComplexTT.first == "Log2EEW") { uint32_t Log2EEW; ComplexTT.second.getAsInteger(10, Log2EEW); // update new elmul = (eew/sew) * lmul LMUL.MulLog2LMUL(Log2EEW - Log2_32(ElementBitwidth)); // update new eew ElementBitwidth = 1 << Log2EEW; ScalarType = ScalarTypeKind::SignedInteger; UpdateAndCheckComplexProto(); } else if (ComplexTT.first == "FixedSEW") { uint32_t NewSEW; ComplexTT.second.getAsInteger(10, NewSEW); // Set invalid type if src and dst SEW are same. if (ElementBitwidth == NewSEW) { ScalarType = Invalid; return; } // Update new SEW ElementBitwidth = NewSEW; UpdateAndCheckComplexProto(); } else if (ComplexTT.first == "LFixedLog2LMUL") { // New LMUL should be larger than old if (!ComputeFixedLog2LMUL(ComplexTT.second, std::greater<int32_t>())) return; } else if (ComplexTT.first == "SFixedLog2LMUL") { // New LMUL should be smaller than old if (!ComputeFixedLog2LMUL(ComplexTT.second, std::less<int32_t>())) return; } else { PrintFatalError("Illegal complex type transformers!"); } } // Compute the remain type transformers for (char I : Transformer) { switch (I) { case 'P': if (IsConstant) PrintFatalError("'P' transformer cannot be used after 'C'"); if (IsPointer) PrintFatalError("'P' transformer cannot be used twice"); IsPointer = true; break; case 'C': if (IsConstant) PrintFatalError("'C' transformer cannot be used twice"); IsConstant = true; break; case 'K': IsImmediate = true; break; case 'U': ScalarType = ScalarTypeKind::UnsignedInteger; break; case 'I': ScalarType = ScalarTypeKind::SignedInteger; break; case 'F': ScalarType = ScalarTypeKind::Float; break; case 'S': LMUL = LMULType(0); // Update ElementBitwidth need to update Scale too. Scale = LMUL.getScale(ElementBitwidth); break; default: PrintFatalError("Illegal non-primitive type transformer!"); } } } //===----------------------------------------------------------------------===// // RVVIntrinsic implementation //===----------------------------------------------------------------------===// RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix, StringRef NewMangledName, StringRef MangledSuffix, StringRef IRName, bool HasSideEffects, bool IsMask, bool HasMaskedOffOperand, bool HasVL, bool HasPolicy, bool HasNoMaskedOverloaded, bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &OutInTypes, const std::vector<int64_t> &NewIntrinsicTypes, StringRef RequiredExtension, unsigned NF) : IRName(IRName), HasSideEffects(HasSideEffects), IsMask(IsMask), HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL), HasPolicy(HasPolicy), HasNoMaskedOverloaded(HasNoMaskedOverloaded), HasAutoDef(HasAutoDef), ManualCodegen(ManualCodegen.str()), NF(NF) { // Init Name and MangledName Name = NewName.str(); if (NewMangledName.empty()) MangledName = NewName.split("_").first.str(); else MangledName = NewMangledName.str(); if (!Suffix.empty()) Name += "_" + Suffix.str(); if (!MangledSuffix.empty()) MangledName += "_" + MangledSuffix.str(); if (IsMask) { Name += "_m"; if (HasPolicy) Name += "t"; } // Init RISC-V extensions for (const auto &T : OutInTypes) { if (T->isFloatVector(16) || T->isFloat(16)) RISCVExtensions |= RISCVExtension::Zfh; else if (T->isFloatVector(32) || T->isFloat(32)) RISCVExtensions |= RISCVExtension::F; else if (T->isFloatVector(64) || T->isFloat(64)) RISCVExtensions |= RISCVExtension::D; } if (RequiredExtension == "Zvamo") RISCVExtensions |= RISCVExtension::Zvamo; if (RequiredExtension == "Zvlsseg") RISCVExtensions |= RISCVExtension::Zvlsseg; // Init OutputType and InputTypes OutputType = OutInTypes[0]; InputTypes.assign(OutInTypes.begin() + 1, OutInTypes.end()); // IntrinsicTypes is nonmasked version index. Need to update it // if there is maskedoff operand (It is always in first operand). IntrinsicTypes = NewIntrinsicTypes; if (IsMask && HasMaskedOffOperand) { for (auto &I : IntrinsicTypes) { if (I >= 0) I += NF; } } } std::string RVVIntrinsic::getBuiltinTypeStr() const { std::string S; S += OutputType->getBuiltinStr(); for (const auto &T : InputTypes) { S += T->getBuiltinStr(); } return S; } void RVVIntrinsic::emitCodeGenSwitchBody(raw_ostream &OS) const { if (!getIRName().empty()) OS << " ID = Intrinsic::riscv_" + getIRName() + ";\n"; if (NF >= 2) OS << " NF = " + utostr(getNF()) + ";\n"; if (hasManualCodegen()) { OS << ManualCodegen; OS << "break;\n"; return; } if (isMask()) { if (hasVL()) { if (hasPolicy()) OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);\n"; else OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);\n"; } else { OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());\n"; } } OS << " IntrinsicTypes = {"; ListSeparator LS; for (const auto &Idx : IntrinsicTypes) { if (Idx == -1) OS << LS << "ResultType"; else OS << LS << "Ops[" << Idx << "]->getType()"; } // VL could be i64 or i32, need to encode it in IntrinsicTypes. VL is // always last operand. if (hasVL()) OS << ", Ops.back()->getType()"; OS << "};\n"; OS << " break;\n"; } void RVVIntrinsic::emitIntrinsicMacro(raw_ostream &OS) const { OS << "#define " << getName() << "("; if (!InputTypes.empty()) { ListSeparator LS; for (unsigned i = 0, e = InputTypes.size(); i != e; ++i) OS << LS << "op" << i; } OS << ") \\\n"; OS << "__builtin_rvv_" << getName() << "("; if (!InputTypes.empty()) { ListSeparator LS; for (unsigned i = 0, e = InputTypes.size(); i != e; ++i) OS << LS << "(" << InputTypes[i]->getTypeStr() << ")(op" << i << ")"; } OS << ")\n"; } void RVVIntrinsic::emitIntrinsicMaskMacro(raw_ostream &OS) const { OS << "#define " << getName().drop_back() << "("; if (!InputTypes.empty()) { ListSeparator LS; for (unsigned i = 0, e = InputTypes.size() - 1; i != e; ++i) OS << LS << "op" << i; } OS << ") \\\n"; OS << "__builtin_rvv_" << getName() << "("; ListSeparator LS; if (!InputTypes.empty()) { for (unsigned i = 0, e = InputTypes.size() - 1; i != e; ++i) OS << LS << "(" << InputTypes[i]->getTypeStr() << ")(op" << i << ")"; } OS << LS << "(size_t)VE_TAIL_AGNOSTIC"; OS << ")\n"; } void RVVIntrinsic::emitMangledFuncDef(raw_ostream &OS) const { OS << "__attribute__((clang_builtin_alias("; OS << "__builtin_rvv_" << getName() << ")))\n"; OS << OutputType->getTypeStr() << " " << getMangledName() << "("; // Emit function arguments if (!InputTypes.empty()) { ListSeparator LS; for (unsigned i = 0; i < InputTypes.size(); ++i) OS << LS << InputTypes[i]->getTypeStr() << " op" << i; } OS << ");\n\n"; } //===----------------------------------------------------------------------===// // RVVEmitter implementation //===----------------------------------------------------------------------===// void RVVEmitter::createHeader(raw_ostream &OS) { OS << "/*===---- riscv_vector.h - RISC-V V-extension RVVIntrinsics " "-------------------===\n" " *\n" " *\n" " * Part of the LLVM Project, under the Apache License v2.0 with LLVM " "Exceptions.\n" " * See https://llvm.org/LICENSE.txt for license information.\n" " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n" " *\n" " *===-----------------------------------------------------------------" "------===\n" " */\n\n"; OS << "#ifndef __RISCV_VECTOR_H\n"; OS << "#define __RISCV_VECTOR_H\n\n"; OS << "#include <stdint.h>\n"; OS << "#include <stddef.h>\n\n"; OS << "#ifndef __riscv_vector\n"; OS << "#error \"Vector intrinsics require the vector extension.\"\n"; OS << "#endif\n\n"; OS << "#ifdef __cplusplus\n"; OS << "extern \"C\" {\n"; OS << "#endif\n\n"; createRVVHeaders(OS); std::vector<std::unique_ptr<RVVIntrinsic>> Defs; createRVVIntrinsics(Defs); // Print header code if (!HeaderCode.empty()) { OS << HeaderCode; } auto printType = [&](auto T) { OS << "typedef " << T->getClangBuiltinStr() << " " << T->getTypeStr() << ";\n"; }; constexpr int Log2LMULs[] = {-3, -2, -1, 0, 1, 2, 3}; // Print RVV boolean types. for (int Log2LMUL : Log2LMULs) { auto T = computeType('c', Log2LMUL, "m"); if (T.hasValue()) printType(T.getValue()); } // Print RVV int/float types. for (char I : StringRef("csil")) { for (int Log2LMUL : Log2LMULs) { auto T = computeType(I, Log2LMUL, "v"); if (T.hasValue()) { printType(T.getValue()); auto UT = computeType(I, Log2LMUL, "Uv"); printType(UT.getValue()); } } } OS << "#if defined(__riscv_zfh)\n"; for (int Log2LMUL : Log2LMULs) { auto T = computeType('x', Log2LMUL, "v"); if (T.hasValue()) printType(T.getValue()); } OS << "#endif\n"; OS << "#if defined(__riscv_f)\n"; for (int Log2LMUL : Log2LMULs) { auto T = computeType('f', Log2LMUL, "v"); if (T.hasValue()) printType(T.getValue()); } OS << "#endif\n"; OS << "#if defined(__riscv_d)\n"; for (int Log2LMUL : Log2LMULs) { auto T = computeType('d', Log2LMUL, "v"); if (T.hasValue()) printType(T.getValue()); } OS << "#endif\n\n"; // The same extension include in the same arch guard marco. std::stable_sort(Defs.begin(), Defs.end(), [](const std::unique_ptr<RVVIntrinsic> &A, const std::unique_ptr<RVVIntrinsic> &B) { return A->getRISCVExtensions() < B->getRISCVExtensions(); }); // Print intrinsic functions with macro emitArchMacroAndBody(Defs, OS, [](raw_ostream &OS, const RVVIntrinsic &Inst) { Inst.emitIntrinsicMacro(OS); }); // Use _mt to implement _m intrinsics. emitArchMacroAndBody(Defs, OS, [](raw_ostream &OS, const RVVIntrinsic &Inst) { if (Inst.isMask() && Inst.hasPolicy()) Inst.emitIntrinsicMaskMacro(OS); }); OS << "#define __riscv_v_intrinsic_overloading 1\n"; // Print Overloaded APIs OS << "#define __rvv_overloaded static inline " "__attribute__((__always_inline__, __nodebug__, __overloadable__))\n"; emitArchMacroAndBody(Defs, OS, [](raw_ostream &OS, const RVVIntrinsic &Inst) { if (!Inst.isMask() && !Inst.hasNoMaskedOverloaded()) return; OS << "__rvv_overloaded "; Inst.emitMangledFuncDef(OS); }); OS << "\n#ifdef __cplusplus\n"; OS << "}\n"; OS << "#endif // __riscv_vector\n"; OS << "#endif // __RISCV_VECTOR_H\n"; } void RVVEmitter::createBuiltins(raw_ostream &OS) { std::vector<std::unique_ptr<RVVIntrinsic>> Defs; createRVVIntrinsics(Defs); OS << "#if defined(TARGET_BUILTIN) && !defined(RISCVV_BUILTIN)\n"; OS << "#define RISCVV_BUILTIN(ID, TYPE, ATTRS) TARGET_BUILTIN(ID, TYPE, " "ATTRS, \"experimental-v\")\n"; OS << "#endif\n"; for (auto &Def : Defs) { OS << "RISCVV_BUILTIN(__builtin_rvv_" << Def->getName() << ",\"" << Def->getBuiltinTypeStr() << "\", "; if (!Def->hasSideEffects()) OS << "\"n\")\n"; else OS << "\"\")\n"; } OS << "#undef RISCVV_BUILTIN\n"; } void RVVEmitter::createCodeGen(raw_ostream &OS) { std::vector<std::unique_ptr<RVVIntrinsic>> Defs; createRVVIntrinsics(Defs); // IR name could be empty, use the stable sort preserves the relative order. std::stable_sort(Defs.begin(), Defs.end(), [](const std::unique_ptr<RVVIntrinsic> &A, const std::unique_ptr<RVVIntrinsic> &B) { return A->getIRName() < B->getIRName(); }); // Print switch body when the ir name or ManualCodegen changes from previous // iteration. RVVIntrinsic *PrevDef = Defs.begin()->get(); for (auto &Def : Defs) { StringRef CurIRName = Def->getIRName(); if (CurIRName != PrevDef->getIRName() || (Def->getManualCodegen() != PrevDef->getManualCodegen())) { PrevDef->emitCodeGenSwitchBody(OS); } PrevDef = Def.get(); OS << "case RISCV::BI__builtin_rvv_" << Def->getName() << ":\n"; } Defs.back()->emitCodeGenSwitchBody(OS); OS << "\n"; } void RVVEmitter::parsePrototypes(StringRef Prototypes, std::function<void(StringRef)> Handler) { const StringRef Primaries("evwqom0ztul"); while (!Prototypes.empty()) { size_t Idx = 0; // Skip over complex prototype because it could contain primitive type // character. if (Prototypes[0] == '(') Idx = Prototypes.find_first_of(')'); Idx = Prototypes.find_first_of(Primaries, Idx); assert(Idx != StringRef::npos); Handler(Prototypes.slice(0, Idx + 1)); Prototypes = Prototypes.drop_front(Idx + 1); } } std::string RVVEmitter::getSuffixStr(char Type, int Log2LMUL, StringRef Prototypes) { SmallVector<std::string> SuffixStrs; parsePrototypes(Prototypes, [&](StringRef Proto) { auto T = computeType(Type, Log2LMUL, Proto); SuffixStrs.push_back(T.getValue()->getShortStr()); }); return join(SuffixStrs, "_"); } void RVVEmitter::createRVVIntrinsics( std::vector<std::unique_ptr<RVVIntrinsic>> &Out) { std::vector<Record *> RV = Records.getAllDerivedDefinitions("RVVBuiltin"); for (auto *R : RV) { StringRef Name = R->getValueAsString("Name"); StringRef SuffixProto = R->getValueAsString("Suffix"); StringRef MangledName = R->getValueAsString("MangledName"); StringRef MangledSuffixProto = R->getValueAsString("MangledSuffix"); StringRef Prototypes = R->getValueAsString("Prototype"); StringRef TypeRange = R->getValueAsString("TypeRange"); bool HasMask = R->getValueAsBit("HasMask"); bool HasMaskedOffOperand = R->getValueAsBit("HasMaskedOffOperand"); bool HasVL = R->getValueAsBit("HasVL"); bool HasPolicy = R->getValueAsBit("HasPolicy"); bool HasNoMaskedOverloaded = R->getValueAsBit("HasNoMaskedOverloaded"); bool HasSideEffects = R->getValueAsBit("HasSideEffects"); std::vector<int64_t> Log2LMULList = R->getValueAsListOfInts("Log2LMUL"); StringRef ManualCodegen = R->getValueAsString("ManualCodegen"); StringRef ManualCodegenMask = R->getValueAsString("ManualCodegenMask"); std::vector<int64_t> IntrinsicTypes = R->getValueAsListOfInts("IntrinsicTypes"); StringRef RequiredExtension = R->getValueAsString("RequiredExtension"); StringRef IRName = R->getValueAsString("IRName"); StringRef IRNameMask = R->getValueAsString("IRNameMask"); unsigned NF = R->getValueAsInt("NF"); StringRef HeaderCodeStr = R->getValueAsString("HeaderCode"); bool HasAutoDef = HeaderCodeStr.empty(); if (!HeaderCodeStr.empty()) { HeaderCode += HeaderCodeStr.str(); } // Parse prototype and create a list of primitive type with transformers // (operand) in ProtoSeq. ProtoSeq[0] is output operand. SmallVector<std::string> ProtoSeq; parsePrototypes(Prototypes, [&ProtoSeq](StringRef Proto) { ProtoSeq.push_back(Proto.str()); }); // Compute Builtin types SmallVector<std::string> ProtoMaskSeq = ProtoSeq; if (HasMask) { // If HasMaskedOffOperand, insert result type as first input operand. if (HasMaskedOffOperand) { if (NF == 1) { ProtoMaskSeq.insert(ProtoMaskSeq.begin() + 1, ProtoSeq[0]); } else { // Convert // (void, op0 address, op1 address, ...) // to // (void, op0 address, op1 address, ..., maskedoff0, maskedoff1, ...) for (unsigned I = 0; I < NF; ++I) ProtoMaskSeq.insert( ProtoMaskSeq.begin() + NF + 1, ProtoSeq[1].substr(1)); // Use substr(1) to skip '*' } } if (HasMaskedOffOperand && NF > 1) { // Convert // (void, op0 address, op1 address, ..., maskedoff0, maskedoff1, ...) // to // (void, op0 address, op1 address, ..., mask, maskedoff0, maskedoff1, // ...) ProtoMaskSeq.insert(ProtoMaskSeq.begin() + NF + 1, "m"); } else { // If HasMask, insert 'm' as first input operand. ProtoMaskSeq.insert(ProtoMaskSeq.begin() + 1, "m"); } } // If HasVL, append 'z' to the operand list. if (HasVL) { ProtoSeq.push_back("z"); ProtoMaskSeq.push_back("z"); } if (HasPolicy) { ProtoMaskSeq.push_back("Kz"); } // Create Intrinsics for each type and LMUL. for (char I : TypeRange) { for (int Log2LMUL : Log2LMULList) { Optional<RVVTypes> Types = computeTypes(I, Log2LMUL, NF, ProtoSeq); // Ignored to create new intrinsic if there are any illegal types. if (!Types.hasValue()) continue; auto SuffixStr = getSuffixStr(I, Log2LMUL, SuffixProto); auto MangledSuffixStr = getSuffixStr(I, Log2LMUL, MangledSuffixProto); // Create a non-mask intrinsic Out.push_back(std::make_unique<RVVIntrinsic>( Name, SuffixStr, MangledName, MangledSuffixStr, IRName, HasSideEffects, /*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL, HasPolicy, HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, Types.getValue(), IntrinsicTypes, RequiredExtension, NF)); if (HasMask) { // Create a mask intrinsic Optional<RVVTypes> MaskTypes = computeTypes(I, Log2LMUL, NF, ProtoMaskSeq); Out.push_back(std::make_unique<RVVIntrinsic>( Name, SuffixStr, MangledName, MangledSuffixStr, IRNameMask, HasSideEffects, /*IsMask=*/true, HasMaskedOffOperand, HasVL, HasPolicy, HasNoMaskedOverloaded, HasAutoDef, ManualCodegenMask, MaskTypes.getValue(), IntrinsicTypes, RequiredExtension, NF)); } } // end for Log2LMULList } // end for TypeRange } } void RVVEmitter::createRVVHeaders(raw_ostream &OS) { std::vector<Record *> RVVHeaders = Records.getAllDerivedDefinitions("RVVHeader"); for (auto *R : RVVHeaders) { StringRef HeaderCodeStr = R->getValueAsString("HeaderCode"); OS << HeaderCodeStr.str(); } } Optional<RVVTypes> RVVEmitter::computeTypes(BasicType BT, int Log2LMUL, unsigned NF, ArrayRef<std::string> PrototypeSeq) { // LMUL x NF must be less than or equal to 8. if ((Log2LMUL >= 1) && (1 << Log2LMUL) * NF > 8) return llvm::None; RVVTypes Types; for (const std::string &Proto : PrototypeSeq) { auto T = computeType(BT, Log2LMUL, Proto); if (!T.hasValue()) return llvm::None; // Record legal type index Types.push_back(T.getValue()); } return Types; } Optional<RVVTypePtr> RVVEmitter::computeType(BasicType BT, int Log2LMUL, StringRef Proto) { std::string Idx = Twine(Twine(BT) + Twine(Log2LMUL) + Proto).str(); // Search first auto It = LegalTypes.find(Idx); if (It != LegalTypes.end()) return &(It->second); if (IllegalTypes.count(Idx)) return llvm::None; // Compute type and record the result. RVVType T(BT, Log2LMUL, Proto); if (T.isValid()) { // Record legal type index and value. LegalTypes.insert({Idx, T}); return &(LegalTypes[Idx]); } // Record illegal type index. IllegalTypes.insert(Idx); return llvm::None; } void RVVEmitter::emitArchMacroAndBody( std::vector<std::unique_ptr<RVVIntrinsic>> &Defs, raw_ostream &OS, std::function<void(raw_ostream &, const RVVIntrinsic &)> PrintBody) { uint8_t PrevExt = (*Defs.begin())->getRISCVExtensions(); bool NeedEndif = emitExtDefStr(PrevExt, OS); for (auto &Def : Defs) { uint8_t CurExt = Def->getRISCVExtensions(); if (CurExt != PrevExt) { if (NeedEndif) OS << "#endif\n\n"; NeedEndif = emitExtDefStr(CurExt, OS); PrevExt = CurExt; } if (Def->hasAutoDef()) PrintBody(OS, *Def); } if (NeedEndif) OS << "#endif\n\n"; } bool RVVEmitter::emitExtDefStr(uint8_t Extents, raw_ostream &OS) { if (Extents == RISCVExtension::Basic) return false; OS << "#if "; ListSeparator LS(" && "); if (Extents & RISCVExtension::F) OS << LS << "defined(__riscv_f)"; if (Extents & RISCVExtension::D) OS << LS << "defined(__riscv_d)"; if (Extents & RISCVExtension::Zfh) OS << LS << "defined(__riscv_zfh)"; if (Extents & RISCVExtension::Zvamo) OS << LS << "defined(__riscv_zvamo)"; if (Extents & RISCVExtension::Zvlsseg) OS << LS << "defined(__riscv_zvlsseg)"; OS << "\n"; return true; } namespace clang { void EmitRVVHeader(RecordKeeper &Records, raw_ostream &OS) { RVVEmitter(Records).createHeader(OS); } void EmitRVVBuiltins(RecordKeeper &Records, raw_ostream &OS) { RVVEmitter(Records).createBuiltins(OS); } void EmitRVVBuiltinCG(RecordKeeper &Records, raw_ostream &OS) { RVVEmitter(Records).createCodeGen(OS); } } // End namespace clang
#ifndef _DEF_OMEGLOND3D_CFILE_HPP #define _DEF_OMEGLOND3D_CFILE_HPP #include <string> namespace OMGL3D { namespace UTILS { class CFile { public: CFile(const std::string & name); CFile(const char * name); CFile(const CFile & file); bool IsExists() const; const std::string & GetFullName() const; const std::string GetFileName() const; const std::string GetExtension() const; const std::string GetPath() const; private: std::string _name; }; } } #endif
/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include <grpcpp/support/error_details.h> #include <gtest/gtest.h> #include "src/proto/grpc/status/status.pb.h" #include "src/proto/grpc/testing/echo_messages.pb.h" namespace grpc { namespace { TEST(ExtractTest, Success) { google::rpc::Status expected; expected.set_code(13); // INTERNAL expected.set_message("I am an error message"); testing::EchoRequest expected_details; expected_details.set_message(grpc::string(100, '\0')); expected.add_details()->PackFrom(expected_details); google::rpc::Status to; grpc::string error_details = expected.SerializeAsString(); Status from(static_cast<StatusCode>(expected.code()), expected.message(), error_details); EXPECT_TRUE(ExtractErrorDetails(from, &to).ok()); EXPECT_EQ(expected.code(), to.code()); EXPECT_EQ(expected.message(), to.message()); EXPECT_EQ(1, to.details_size()); testing::EchoRequest details; to.details(0).UnpackTo(&details); EXPECT_EQ(expected_details.message(), details.message()); } TEST(ExtractTest, NullInput) { EXPECT_EQ(StatusCode::FAILED_PRECONDITION, ExtractErrorDetails(Status(), nullptr).error_code()); } TEST(ExtractTest, Unparsable) { grpc::string error_details("I am not a status object"); Status from(StatusCode::INTERNAL, "", error_details); google::rpc::Status to; EXPECT_EQ(StatusCode::INVALID_ARGUMENT, ExtractErrorDetails(from, &to).error_code()); } TEST(SetTest, Success) { google::rpc::Status expected; expected.set_code(13); // INTERNAL expected.set_message("I am an error message"); testing::EchoRequest expected_details; expected_details.set_message(grpc::string(100, '\0')); expected.add_details()->PackFrom(expected_details); Status to; Status s = SetErrorDetails(expected, &to); EXPECT_TRUE(s.ok()); EXPECT_EQ(expected.code(), to.error_code()); EXPECT_EQ(expected.message(), to.error_message()); EXPECT_EQ(expected.SerializeAsString(), to.error_details()); } TEST(SetTest, NullInput) { EXPECT_EQ(StatusCode::FAILED_PRECONDITION, SetErrorDetails(google::rpc::Status(), nullptr).error_code()); } TEST(SetTest, OutOfScopeErrorCode) { google::rpc::Status expected; expected.set_code(17); // Out of scope (UNAUTHENTICATED is 16). expected.set_message("I am an error message"); testing::EchoRequest expected_details; expected_details.set_message(grpc::string(100, '\0')); expected.add_details()->PackFrom(expected_details); Status to; Status s = SetErrorDetails(expected, &to); EXPECT_TRUE(s.ok()); EXPECT_EQ(StatusCode::UNKNOWN, to.error_code()); EXPECT_EQ(expected.message(), to.error_message()); EXPECT_EQ(expected.SerializeAsString(), to.error_details()); } TEST(SetTest, ValidScopeErrorCode) { for (int c = StatusCode::OK; c <= StatusCode::UNAUTHENTICATED; c++) { google::rpc::Status expected; expected.set_code(c); expected.set_message("I am an error message"); testing::EchoRequest expected_details; expected_details.set_message(grpc::string(100, '\0')); expected.add_details()->PackFrom(expected_details); Status to; Status s = SetErrorDetails(expected, &to); EXPECT_TRUE(s.ok()); EXPECT_EQ(c, to.error_code()); EXPECT_EQ(expected.message(), to.error_message()); EXPECT_EQ(expected.SerializeAsString(), to.error_details()); } } } // namespace } // namespace grpc int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
// Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2019 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) #include <config/bitcoin-config.h> #endif #include <randomenv.h> #include <clientversion.h> #include <compat/cpuid.h> #include <crypto/sha512.h> #include <support/cleanse.h> #include <util/time.h> // for GetTime() #ifdef WIN32 #include <compat.h> // for Windows API #endif #include <algorithm> #include <atomic> #include <chrono> #include <climits> #include <thread> #include <vector> #include <cstdint> #include <cstring> #ifndef WIN32 #include <sys/types.h> // must go before a number of other headers #include <fcntl.h> #include <netinet/in.h> #include <sys/resource.h> #include <sys/socket.h> #include <sys/stat.h> #include <sys/time.h> #include <sys/utsname.h> #include <unistd.h> #endif #ifdef __MACH__ #include <mach/clock.h> #include <mach/mach.h> #include <mach/mach_time.h> #endif #if HAVE_DECL_GETIFADDRS #include <ifaddrs.h> #endif #if HAVE_SYSCTL #include <sys/sysctl.h> #if HAVE_VM_VM_PARAM_H #include <vm/vm_param.h> #endif #if HAVE_SYS_RESOURCES_H #include <sys/resources.h> #endif #if HAVE_SYS_VMMETER_H #include <sys/vmmeter.h> #endif #endif #ifdef __linux__ #include <sys/auxv.h> #endif //! Necessary on some platforms extern char **environ; namespace { void RandAddSeedPerfmon(CSHA512 &hasher) { #ifdef WIN32 // Seed with the entire set of perfmon data // This can take up to 2 seconds, so only do it every 10 minutes static std::atomic<std::chrono::seconds> last_perfmon{ std::chrono::seconds{0}}; auto last_time = last_perfmon.load(); auto current_time = GetTime<std::chrono::seconds>(); if (current_time < last_time + std::chrono::minutes{10}) { return; } last_perfmon = current_time; std::vector<uint8_t> vData(250000, 0); long ret = 0; unsigned long nSize = 0; // Bail out at more than 10MB of performance data const size_t nMaxSize = 10000000; while (true) { nSize = vData.size(); ret = RegQueryValueExA(HKEY_PERFORMANCE_DATA, "Global", nullptr, nullptr, vData.data(), &nSize); if (ret != ERROR_MORE_DATA || vData.size() >= nMaxSize) { break; } // Grow size of buffer exponentially vData.resize(std::max((vData.size() * 3) / 2, nMaxSize)); } RegCloseKey(HKEY_PERFORMANCE_DATA); if (ret == ERROR_SUCCESS) { hasher.Write(vData.data(), nSize); memory_cleanse(vData.data(), nSize); } else { // Performance data is only a best-effort attempt at improving the // situation when the OS randomness (and other sources) aren't // adequate. As a result, failure to read it is isn't considered // critical, so we don't call RandFailure(). // TODO: Add logging when the logger is made functional before global // constructors have been invoked. } #endif } /** Helper to easily feed data into a CSHA512. * * Note that this does not serialize the passed object (like stream.h's << * operators do). Its raw memory representation is used directly. */ template <typename T> CSHA512 &operator<<(CSHA512 &hasher, const T &data) { static_assert( !std::is_same<typename std::decay<T>::type, char *>::value, "Calling operator<<(CSHA512, char*) is probably not what you want"); static_assert( !std::is_same<typename std::decay<T>::type, uint8_t *>::value, "Calling operator<<(CSHA512, uint8_t*) is probably not what you " "want"); static_assert( !std::is_same<typename std::decay<T>::type, const char *>::value, "Calling operator<<(CSHA512, const char*) is probably not what you " "want"); static_assert( !std::is_same<typename std::decay<T>::type, const uint8_t *>::value, "Calling operator<<(CSHA512, const uint8_t*) is " "probably not what you want"); hasher.Write((const uint8_t *)&data, sizeof(data)); return hasher; } #ifndef WIN32 void AddSockaddr(CSHA512 &hasher, const struct sockaddr *addr) { if (addr == nullptr) { return; } switch (addr->sa_family) { case AF_INET: hasher.Write((const uint8_t *)addr, sizeof(sockaddr_in)); break; case AF_INET6: hasher.Write((const uint8_t *)addr, sizeof(sockaddr_in6)); break; default: hasher.Write((const uint8_t *)&addr->sa_family, sizeof(addr->sa_family)); } } void AddFile(CSHA512 &hasher, const char *path) { struct stat sb = {}; int f = open(path, O_RDONLY); size_t total = 0; if (f != -1) { uint8_t fbuf[4096]; int n; hasher.Write((const uint8_t *)&f, sizeof(f)); if (fstat(f, &sb) == 0) { hasher << sb; } do { n = read(f, fbuf, sizeof(fbuf)); if (n > 0) { hasher.Write(fbuf, n); } total += n; /* not bothering with EINTR handling. */ } while (n == sizeof(fbuf) && total < 1048576); // Read only the first 1 Mbyte close(f); } } void AddPath(CSHA512 &hasher, const char *path) { struct stat sb = {}; if (stat(path, &sb) == 0) { hasher.Write((const uint8_t *)path, strlen(path) + 1); hasher << sb; } } #endif #if HAVE_SYSCTL template <int... S> void AddSysctl(CSHA512 &hasher) { int CTL[sizeof...(S)] = {S...}; uint8_t buffer[65536]; size_t siz = 65536; int ret = sysctl(CTL, sizeof...(S), buffer, &siz, nullptr, 0); if (ret == 0 || (ret == -1 && errno == ENOMEM)) { hasher << sizeof(CTL); hasher.Write((const uint8_t *)CTL, sizeof(CTL)); if (siz > sizeof(buffer)) { siz = sizeof(buffer); } hasher << siz; hasher.Write(buffer, siz); } } #endif #ifdef HAVE_GETCPUID void inline AddCPUID(CSHA512 &hasher, uint32_t leaf, uint32_t subleaf, uint32_t &ax, uint32_t &bx, uint32_t &cx, uint32_t &dx) { GetCPUID(leaf, subleaf, ax, bx, cx, dx); hasher << leaf << subleaf << ax << bx << cx << dx; } void AddAllCPUID(CSHA512 &hasher) { uint32_t ax, bx, cx, dx; // Iterate over all standard leaves // Returns max leaf in ax AddCPUID(hasher, 0, 0, ax, bx, cx, dx); uint32_t max = ax; for (uint32_t leaf = 1; leaf <= max && leaf <= 0xFF; ++leaf) { uint32_t maxsub = 0; for (uint32_t subleaf = 0; subleaf <= 0xFF; ++subleaf) { AddCPUID(hasher, leaf, subleaf, ax, bx, cx, dx); // Iterate subleafs for leaf values 4, 7, 11, 13 if (leaf == 4) { if ((ax & 0x1f) == 0) { break; } } else if (leaf == 7) { if (subleaf == 0) { maxsub = ax; } if (subleaf == maxsub) { break; } } else if (leaf == 11) { if ((cx & 0xff00) == 0) { break; } } else if (leaf == 13) { if (ax == 0 && bx == 0 && cx == 0 && dx == 0) { break; } } else { // For any other leaf, stop after subleaf 0. break; } } } // Iterate over all extended leaves // Returns max extended leaf in ax AddCPUID(hasher, 0x80000000, 0, ax, bx, cx, dx); uint32_t ext_max = ax; for (uint32_t leaf = 0x80000001; leaf <= ext_max && leaf <= 0x800000FF; ++leaf) { AddCPUID(hasher, leaf, 0, ax, bx, cx, dx); } } #endif } // namespace void RandAddDynamicEnv(CSHA512 &hasher) { RandAddSeedPerfmon(hasher); // Various clocks #ifdef WIN32 FILETIME ftime; GetSystemTimeAsFileTime(&ftime); hasher << ftime; #else #ifndef __MACH__ // On non-MacOS systems, use various clock_gettime() calls. struct timespec ts = {}; #ifdef CLOCK_MONOTONIC clock_gettime(CLOCK_MONOTONIC, &ts); hasher << ts; #endif #ifdef CLOCK_REALTIME clock_gettime(CLOCK_REALTIME, &ts); hasher << ts; #endif #ifdef CLOCK_BOOTTIME clock_gettime(CLOCK_BOOTTIME, &ts); hasher << ts.tv_sec << ts.tv_nsec; #endif #else // On MacOS use mach_absolute_time (number of CPU ticks since boot) as a // replacement for CLOCK_MONOTONIC, and clock_get_time for CALENDAR_CLOCK as // a replacement for CLOCK_REALTIME. hasher << mach_absolute_time(); // From https://gist.github.com/jbenet/1087739 clock_serv_t cclock; mach_timespec_t mts = {}; if (host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock) == KERN_SUCCESS && clock_get_time(cclock, &mts) == KERN_SUCCESS) { hasher << mts; mach_port_deallocate(mach_task_self(), cclock); } #endif // gettimeofday is available on all UNIX systems, but only has microsecond // precision. struct timeval tv = {}; gettimeofday(&tv, nullptr); hasher << tv; #endif // Probably redundant, but also use all the clocks C++11 provides: hasher << std::chrono::system_clock::now().time_since_epoch().count(); hasher << std::chrono::steady_clock::now().time_since_epoch().count(); hasher << std::chrono::high_resolution_clock::now().time_since_epoch().count(); #ifndef WIN32 // Current resource usage. struct rusage usage = {}; if (getrusage(RUSAGE_SELF, &usage) == 0) { hasher << usage; } #endif #ifdef __linux__ AddFile(hasher, "/proc/diskstats"); AddFile(hasher, "/proc/vmstat"); AddFile(hasher, "/proc/schedstat"); AddFile(hasher, "/proc/zoneinfo"); AddFile(hasher, "/proc/meminfo"); AddFile(hasher, "/proc/softirqs"); AddFile(hasher, "/proc/stat"); AddFile(hasher, "/proc/self/schedstat"); AddFile(hasher, "/proc/self/status"); #endif #if HAVE_SYSCTL #ifdef CTL_KERN #if defined(KERN_PROC) && defined(KERN_PROC_ALL) AddSysctl<CTL_KERN, KERN_PROC, KERN_PROC_ALL>(hasher); #endif #endif #ifdef CTL_HW #ifdef HW_DISKSTATS AddSysctl<CTL_HW, HW_DISKSTATS>(hasher); #endif #endif #ifdef CTL_VM #ifdef VM_LOADAVG AddSysctl<CTL_VM, VM_LOADAVG>(hasher); #endif #ifdef VM_TOTAL AddSysctl<CTL_VM, VM_TOTAL>(hasher); #endif #ifdef VM_METER AddSysctl<CTL_VM, VM_METER>(hasher); #endif #endif #endif // Stack and heap location void *addr = malloc(4097); hasher << &addr << addr; free(addr); } void RandAddStaticEnv(CSHA512 &hasher) { // Some compile-time static properties hasher << (CHAR_MIN < 0) << sizeof(void *) << sizeof(long) << sizeof(int); #if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__) hasher << __GNUC__ << __GNUC_MINOR__ << __GNUC_PATCHLEVEL__; #endif #ifdef _MSC_VER hasher << _MSC_VER; #endif hasher << __cplusplus; #ifdef _XOPEN_VERSION hasher << _XOPEN_VERSION; #endif #ifdef __VERSION__ const char *COMPILER_VERSION = __VERSION__; hasher.Write((const uint8_t *)COMPILER_VERSION, strlen(COMPILER_VERSION) + 1); #endif // Bitcoin client version hasher << CLIENT_VERSION; #ifdef __linux__ // Information available through getauxval() #ifdef AT_HWCAP hasher << getauxval(AT_HWCAP); #endif #ifdef AT_HWCAP2 hasher << getauxval(AT_HWCAP2); #endif #ifdef AT_RANDOM const uint8_t *random_aux = (const uint8_t *)getauxval(AT_RANDOM); if (random_aux) { hasher.Write(random_aux, 16); } #endif #ifdef AT_PLATFORM const char *platform_str = (const char *)getauxval(AT_PLATFORM); if (platform_str) { hasher.Write((const uint8_t *)platform_str, strlen(platform_str) + 1); } #endif #ifdef AT_EXECFN const char *exec_str = (const char *)getauxval(AT_EXECFN); if (exec_str) { hasher.Write((const uint8_t *)exec_str, strlen(exec_str) + 1); } #endif #endif // __linux__ #ifdef HAVE_GETCPUID AddAllCPUID(hasher); #endif // Memory locations hasher << &hasher << &RandAddStaticEnv << &malloc << &errno << &environ; // Hostname char hname[256]; if (gethostname(hname, 256) == 0) { hasher.Write((const uint8_t *)hname, strnlen(hname, 256)); } #if HAVE_DECL_GETIFADDRS // Network interfaces struct ifaddrs *ifad = NULL; getifaddrs(&ifad); struct ifaddrs *ifit = ifad; while (ifit != NULL) { hasher.Write((const uint8_t *)&ifit, sizeof(ifit)); hasher.Write((const uint8_t *)ifit->ifa_name, strlen(ifit->ifa_name) + 1); hasher.Write((const uint8_t *)&ifit->ifa_flags, sizeof(ifit->ifa_flags)); AddSockaddr(hasher, ifit->ifa_addr); AddSockaddr(hasher, ifit->ifa_netmask); AddSockaddr(hasher, ifit->ifa_dstaddr); ifit = ifit->ifa_next; } freeifaddrs(ifad); #endif #ifndef WIN32 // UNIX kernel information struct utsname name; if (uname(&name) != -1) { hasher.Write((const uint8_t *)&name.sysname, strlen(name.sysname) + 1); hasher.Write((const uint8_t *)&name.nodename, strlen(name.nodename) + 1); hasher.Write((const uint8_t *)&name.release, strlen(name.release) + 1); hasher.Write((const uint8_t *)&name.version, strlen(name.version) + 1); hasher.Write((const uint8_t *)&name.machine, strlen(name.machine) + 1); } /* Path and filesystem provided data */ AddPath(hasher, "/"); AddPath(hasher, "."); AddPath(hasher, "/tmp"); AddPath(hasher, "/home"); AddPath(hasher, "/proc"); #ifdef __linux__ AddFile(hasher, "/proc/cmdline"); AddFile(hasher, "/proc/cpuinfo"); AddFile(hasher, "/proc/version"); #endif AddFile(hasher, "/etc/passwd"); AddFile(hasher, "/etc/group"); AddFile(hasher, "/etc/hosts"); AddFile(hasher, "/etc/resolv.conf"); AddFile(hasher, "/etc/timezone"); AddFile(hasher, "/etc/localtime"); #endif // For MacOS/BSDs, gather data through sysctl instead of /proc. Not all of // these will exist on every system. #if HAVE_SYSCTL #ifdef CTL_HW #ifdef HW_MACHINE AddSysctl<CTL_HW, HW_MACHINE>(hasher); #endif #ifdef HW_MODEL AddSysctl<CTL_HW, HW_MODEL>(hasher); #endif #ifdef HW_NCPU AddSysctl<CTL_HW, HW_NCPU>(hasher); #endif #ifdef HW_PHYSMEM AddSysctl<CTL_HW, HW_PHYSMEM>(hasher); #endif #ifdef HW_USERMEM AddSysctl<CTL_HW, HW_USERMEM>(hasher); #endif #ifdef HW_MACHINE_ARCH AddSysctl<CTL_HW, HW_MACHINE_ARCH>(hasher); #endif #ifdef HW_REALMEM AddSysctl<CTL_HW, HW_REALMEM>(hasher); #endif #ifdef HW_CPU_FREQ AddSysctl<CTL_HW, HW_CPU_FREQ>(hasher); #endif #ifdef HW_BUS_FREQ AddSysctl<CTL_HW, HW_BUS_FREQ>(hasher); #endif #ifdef HW_CACHELINE AddSysctl<CTL_HW, HW_CACHELINE>(hasher); #endif #endif #ifdef CTL_KERN #ifdef KERN_BOOTFILE AddSysctl<CTL_KERN, KERN_BOOTFILE>(hasher); #endif #ifdef KERN_BOOTTIME AddSysctl<CTL_KERN, KERN_BOOTTIME>(hasher); #endif #ifdef KERN_CLOCKRATE AddSysctl<CTL_KERN, KERN_CLOCKRATE>(hasher); #endif #ifdef KERN_HOSTID AddSysctl<CTL_KERN, KERN_HOSTID>(hasher); #endif #ifdef KERN_HOSTUUID AddSysctl<CTL_KERN, KERN_HOSTUUID>(hasher); #endif #ifdef KERN_HOSTNAME AddSysctl<CTL_KERN, KERN_HOSTNAME>(hasher); #endif #ifdef KERN_OSRELDATE AddSysctl<CTL_KERN, KERN_OSRELDATE>(hasher); #endif #ifdef KERN_OSRELEASE AddSysctl<CTL_KERN, KERN_OSRELEASE>(hasher); #endif #ifdef KERN_OSREV AddSysctl<CTL_KERN, KERN_OSREV>(hasher); #endif #ifdef KERN_OSTYPE AddSysctl<CTL_KERN, KERN_OSTYPE>(hasher); #endif #ifdef KERN_POSIX1 AddSysctl<CTL_KERN, KERN_OSREV>(hasher); #endif #ifdef KERN_VERSION AddSysctl<CTL_KERN, KERN_VERSION>(hasher); #endif #endif #endif // Env variables if (environ) { for (size_t i = 0; environ[i]; ++i) { hasher.Write((const uint8_t *)environ[i], strlen(environ[i])); } } // Process, thread, user, session, group, ... ids. #ifdef WIN32 hasher << GetCurrentProcessId() << GetCurrentThreadId(); #else hasher << getpid() << getppid() << getsid(0) << getpgid(0) << getuid() << geteuid() << getgid() << getegid(); #endif hasher << std::this_thread::get_id(); }
class Solution { public: int rangeBitwiseAnd(int left, int right) { const int MAX_BIT = 30; int rangeAND = 0; for(int bit = MAX_BIT; bit >= 0; --bit){ int leftBitVal = (left >> bit) & 1; int rightBitVal = (right >> bit) & 1; if(leftBitVal == rightBitVal){ if(rightBitVal == 1){ rangeAND |= (1 << bit); } }else{ break; } } return rangeAND; } };
#ifndef SEGMATCH_PARAMETERS_HPP_ #define SEGMATCH_PARAMETERS_HPP_ #include <string> #include <vector> namespace segmatch { // TODO: Check that parameter values are reasonable when setting them. struct KeypointSelectionParams { std::string keypoint_selection; double uniform_sample_size; int minimum_point_number_per_voxel; double harris_threshold; double minimum_keypoint_distance; }; // struct KeypointSelectionParams struct DescriptorsParameters { std::vector<std::string> descriptor_types; // FastPointFeatureHistograms parameters. double fast_point_feature_histograms_search_radius = 0.8; double fast_point_feature_histograms_normals_search_radius = 0.5; // PointFeatureHistograms parameters. double point_feature_histograms_search_radius = 0.8; double point_feature_histograms_normals_search_radius = 0.5; // CNN parameters. std::string cnn_model_path = "MUST_BE_SET"; std::string semantics_nn_path = "MUST_BE_SET"; }; // struct DescriptorsParameters struct SegmenterParameters { // Region growing segmenter parameters. std::string segmenter_type = "IncrementalEuclideanDistance"; int min_cluster_size; int max_cluster_size; float radius_for_growing; // Parameters specific for the SmoothnessConstraint growing policy. float sc_smoothness_threshold_deg; float sc_curvature_threshold; }; // struct SegmenterParameters struct ClassifierParams { std::string classifier_filename; double threshold_to_accept_match; // OpenCv random forest parameters. int rf_max_depth; double rf_min_sample_ratio; double rf_regression_accuracy; bool rf_use_surrogates; int rf_max_categories; std::vector<double> rf_priors; bool rf_calc_var_importance; int rf_n_active_vars; int rf_max_num_of_trees; double rf_accuracy; // A convenience copy from DescriptorsParameters. std::vector<std::string> descriptor_types; int n_nearest_neighbours; bool enable_two_stage_retrieval; int knn_feature_dim; bool apply_hard_threshold_on_feature_distance; double feature_distance_threshold; bool normalize_eigen_for_knn; bool normalize_eigen_for_hard_threshold; std::vector<double> max_eigen_features_values; bool do_not_use_cars; }; // struct ClassifierParams struct CorrespondeceParams { std::string matching_method; double corr_sqr_dist_thresh = 500.0; int n_neighbours; }; // struct CorrespondeceParams struct GeometricConsistencyParams { // Type of recognizer. std::string recognizer_type; // Higher resolutions lead to higher tolerances. double resolution = 0.2; // Minimum number of matches necessary to consider cluster. int min_cluster_size = 10; // Maximum consistency distance between two matches in order for them to be cached as candidates. // Used in the incremental recognizer only. float max_consistency_distance_for_caching = 10.0f; }; // struct GeometricConsistencyParams struct GroundTruthParameters { double overlap_radius = 0.4; double significance_percentage = 50.0; int number_nearest_segments; double maximum_centroid_distance_m; }; // struct GroundTruthParameters struct Parameters { DescriptorsParameters descriptors_parameters; SegmenterParameters segmenter_parameters; GroundTruthParameters ground_truth_parameters; }; } // namespace segmatch #endif // SEGMATCH_PARAMETERS_HPP_
// Copyright (c) the JPEG XL Project Authors. All rights reserved. // // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. #include "lib/extras/codec_pgx.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <algorithm> #include <string> #include <utility> #include <vector> #include "lib/jxl/base/bits.h" #include "lib/jxl/base/byte_order.h" #include "lib/jxl/base/compiler_specific.h" #include "lib/jxl/base/file_io.h" #include "lib/jxl/base/printf_macros.h" #include "lib/jxl/color_management.h" #include "lib/jxl/dec_external_image.h" #include "lib/jxl/enc_external_image.h" #include "lib/jxl/enc_image_bundle.h" #include "lib/jxl/fields.h" // AllDefault #include "lib/jxl/image.h" #include "lib/jxl/image_bundle.h" #include "lib/jxl/luminance.h" namespace jxl { namespace extras { namespace { struct HeaderPGX { // NOTE: PGX is always grayscale size_t xsize; size_t ysize; size_t bits_per_sample; bool big_endian; bool is_signed; }; class Parser { public: explicit Parser(const Span<const uint8_t> bytes) : pos_(bytes.data()), end_(pos_ + bytes.size()) {} // Sets "pos" to the first non-header byte/pixel on success. Status ParseHeader(HeaderPGX* header, const uint8_t** pos) { // codec.cc ensures we have at least two bytes => no range check here. if (pos_[0] != 'P' || pos_[1] != 'G') return false; pos_ += 2; return ParseHeaderPGX(header, pos); } // Exposed for testing Status ParseUnsigned(size_t* number) { if (pos_ == end_) return JXL_FAILURE("PGX: reached end before number"); if (!IsDigit(*pos_)) return JXL_FAILURE("PGX: expected unsigned number"); *number = 0; while (pos_ < end_ && *pos_ >= '0' && *pos_ <= '9') { *number *= 10; *number += *pos_ - '0'; ++pos_; } return true; } private: static bool IsDigit(const uint8_t c) { return '0' <= c && c <= '9'; } static bool IsLineBreak(const uint8_t c) { return c == '\r' || c == '\n'; } static bool IsWhitespace(const uint8_t c) { return IsLineBreak(c) || c == '\t' || c == ' '; } Status SkipSpace() { if (pos_ == end_) return JXL_FAILURE("PGX: reached end before space"); const uint8_t c = *pos_; if (c != ' ') return JXL_FAILURE("PGX: expected space"); ++pos_; return true; } Status SkipLineBreak() { if (pos_ == end_) return JXL_FAILURE("PGX: reached end before line break"); // Line break can be either "\n" (0a) or "\r\n" (0d 0a). if (*pos_ == '\n') { pos_++; return true; } else if (*pos_ == '\r' && pos_ + 1 != end_ && *(pos_ + 1) == '\n') { pos_ += 2; return true; } return JXL_FAILURE("PGX: expected line break"); } Status SkipSingleWhitespace() { if (pos_ == end_) return JXL_FAILURE("PGX: reached end before whitespace"); if (!IsWhitespace(*pos_)) return JXL_FAILURE("PGX: expected whitespace"); ++pos_; return true; } Status ParseHeaderPGX(HeaderPGX* header, const uint8_t** pos) { JXL_RETURN_IF_ERROR(SkipSpace()); if (pos_ + 2 > end_) return JXL_FAILURE("PGX: header too small"); if (*pos_ == 'M' && *(pos_ + 1) == 'L') { header->big_endian = true; } else if (*pos_ == 'L' && *(pos_ + 1) == 'M') { header->big_endian = false; } else { return JXL_FAILURE("PGX: invalid endianness"); } pos_ += 2; JXL_RETURN_IF_ERROR(SkipSpace()); if (pos_ == end_) return JXL_FAILURE("PGX: header too small"); if (*pos_ == '+') { header->is_signed = false; } else if (*pos_ == '-') { header->is_signed = true; } else { return JXL_FAILURE("PGX: invalid signedness"); } pos_++; // Skip optional space if (pos_ < end_ && *pos_ == ' ') pos_++; JXL_RETURN_IF_ERROR(ParseUnsigned(&header->bits_per_sample)); JXL_RETURN_IF_ERROR(SkipSingleWhitespace()); JXL_RETURN_IF_ERROR(ParseUnsigned(&header->xsize)); JXL_RETURN_IF_ERROR(SkipSingleWhitespace()); JXL_RETURN_IF_ERROR(ParseUnsigned(&header->ysize)); // 0xa, or 0xd 0xa. JXL_RETURN_IF_ERROR(SkipLineBreak()); if (header->bits_per_sample > 16) { return JXL_FAILURE("PGX: >16 bits not yet supported"); } // TODO(lode): support signed integers. This may require changing the way // external_image works. if (header->is_signed) { return JXL_FAILURE("PGX: signed not yet supported"); } size_t numpixels = header->xsize * header->ysize; size_t bytes_per_pixel = header->bits_per_sample <= 8 ? 1 : header->bits_per_sample <= 16 ? 2 : 4; if (pos_ + numpixels * bytes_per_pixel > end_) { return JXL_FAILURE("PGX: data too small"); } *pos = pos_; return true; } const uint8_t* pos_; const uint8_t* const end_; }; constexpr size_t kMaxHeaderSize = 200; Status EncodeHeader(const ImageBundle& ib, const size_t bits_per_sample, char* header, int* JXL_RESTRICT chars_written) { if (ib.HasAlpha()) return JXL_FAILURE("PGX: can't store alpha"); if (!ib.IsGray()) return JXL_FAILURE("PGX: must be grayscale"); // TODO(lode): verify other bit depths: for other bit depths such as 1 or 4 // bits, have a test case to verify it works correctly. For bits > 16, we may // need to change the way external_image works. if (bits_per_sample != 8 && bits_per_sample != 16) { return JXL_FAILURE("PGX: bits other than 8 or 16 not yet supported"); } // Use ML (Big Endian), LM may not be well supported by all decoders. *chars_written = snprintf(header, kMaxHeaderSize, "PG ML + %" PRIuS " %" PRIuS " %" PRIuS "\n", bits_per_sample, ib.xsize(), ib.ysize()); JXL_RETURN_IF_ERROR(static_cast<unsigned int>(*chars_written) < kMaxHeaderSize); return true; } } // namespace Status DecodeImagePGX(const Span<const uint8_t> bytes, const ColorHints& color_hints, const SizeConstraints& constraints, PackedPixelFile* ppf) { Parser parser(bytes); HeaderPGX header = {}; const uint8_t* pos; if (!parser.ParseHeader(&header, &pos)) return false; JXL_RETURN_IF_ERROR( VerifyDimensions(&constraints, header.xsize, header.ysize)); if (header.bits_per_sample == 0 || header.bits_per_sample > 32) { return JXL_FAILURE("PGX: bits_per_sample invalid"); } JXL_RETURN_IF_ERROR(ApplyColorHints(color_hints, /*color_already_set=*/false, /*is_gray=*/true, ppf)); ppf->info.xsize = header.xsize; ppf->info.ysize = header.ysize; // Original data is uint, so exponent_bits_per_sample = 0. ppf->info.bits_per_sample = header.bits_per_sample; ppf->info.exponent_bits_per_sample = 0; ppf->info.uses_original_profile = true; // No alpha in PGX ppf->info.alpha_bits = 0; ppf->info.alpha_exponent_bits = 0; ppf->info.num_color_channels = 1; // Always grayscale ppf->info.orientation = JXL_ORIENT_IDENTITY; JxlDataType data_type; if (header.bits_per_sample > 16) { data_type = JXL_TYPE_UINT32; } else if (header.bits_per_sample > 8) { data_type = JXL_TYPE_UINT16; } else { data_type = JXL_TYPE_UINT8; } const JxlPixelFormat format{ /*num_channels=*/1, /*data_type=*/data_type, /*endianness=*/header.big_endian ? JXL_BIG_ENDIAN : JXL_LITTLE_ENDIAN, /*align=*/0, }; ppf->frames.clear(); // Allocates the frame buffer. ppf->frames.emplace_back(header.xsize, header.ysize, format); const auto& frame = ppf->frames.back(); size_t pgx_remaining_size = bytes.data() + bytes.size() - pos; if (pgx_remaining_size < frame.color.pixels_size) { return JXL_FAILURE("PGX file too small"); } memcpy(frame.color.pixels(), pos, frame.color.pixels_size); return true; } Status EncodeImagePGX(const CodecInOut* io, const ColorEncoding& c_desired, size_t bits_per_sample, ThreadPool* pool, PaddedBytes* bytes) { if (!Bundle::AllDefault(io->metadata.m)) { JXL_WARNING("PGX encoder ignoring metadata - use a different codec"); } if (!c_desired.IsSRGB()) { JXL_WARNING( "PGX encoder cannot store custom ICC profile; decoder\n" "will need hint key=color_space to get the same values"); } ImageBundle ib = io->Main().Copy(); ImageMetadata metadata = io->metadata.m; ImageBundle store(&metadata); const ImageBundle* transformed; JXL_RETURN_IF_ERROR( TransformIfNeeded(ib, c_desired, pool, &store, &transformed)); PaddedBytes pixels(ib.xsize() * ib.ysize() * (bits_per_sample / kBitsPerByte)); size_t stride = ib.xsize() * (bits_per_sample / kBitsPerByte); JXL_RETURN_IF_ERROR( ConvertToExternal(*transformed, bits_per_sample, /*float_out=*/false, /*num_channels=*/1, JXL_BIG_ENDIAN, stride, pool, pixels.data(), pixels.size(), /*out_callback=*/nullptr, /*out_opaque=*/nullptr, metadata.GetOrientation())); char header[kMaxHeaderSize]; int header_size = 0; JXL_RETURN_IF_ERROR(EncodeHeader(ib, bits_per_sample, header, &header_size)); bytes->resize(static_cast<size_t>(header_size) + pixels.size()); memcpy(bytes->data(), header, static_cast<size_t>(header_size)); memcpy(bytes->data() + header_size, pixels.data(), pixels.size()); return true; } } // namespace extras } // namespace jxl
/* * intrusive_list.cxx * Copyright© 2017 rsw0x * * Distributed under terms of the MIT license. */ #include "../intrusive_list.hpp" #include "doctest.h" #include <algorithm> #include <array> #include <memory> #include <numeric> struct S { int i; pep::intrusive_node n; friend bool operator==(const S& lhs, const S& rhs) { return &lhs == &rhs; } ~S() {} S(const S&) = delete; S& operator=(const S&) = delete; S(S&&) = default; S& operator=(S&&) = default; S() = default; }; using sl = pep::intrusive_list<S, &S::n>; struct empty {}; template <typename iter> void reverse(iter first, iter last) { while (true) if (first == last || first == --last) return; else { std::printf("SWAP BEGIN.\n"); std::swap(*first, *last); std::printf("SWAP END.\n"); // std::iter_swap(first, last); ++first; } } TEST_CASE("types") { using C = pep::intrusive_list<S, &S::n>; static_assert((std::is_same<C::value_type, S>::value)); static_assert((std::is_same<C::reference, S&>::value)); static_assert((std::is_same<C::const_reference, const S&>::value)); static_assert((std::is_same<C::size_type, std::make_unsigned<C::difference_type>::type>::value)); static_assert((std::is_same<C::difference_type, std::ptrdiff_t>::value)); static_assert((std::is_signed<typename C::difference_type>::value)); static_assert((std::is_unsigned<typename C::size_type>::value)); static_assert( (std::is_same<typename C::difference_type, typename std::iterator_traits<typename C::iterator>::difference_type>::value)); static_assert( (std::is_same< typename C::difference_type, typename std::iterator_traits<typename C::const_iterator>::difference_type>::value)); } TEST_CASE("ilist") { std::array<S, 10> arr; sl sl_; REQUIRE(sl_.empty()); SUBCASE("1") { S s; REQUIRE(sl_.empty()); sl_.push_back(s); REQUIRE(!sl_.empty()); sl_.pop_front(); REQUIRE(sl_.empty()); sl_.push_back(s); REQUIRE(!sl_.empty()); sl_.pop_front(); REQUIRE(sl_.empty()); sl_.push_front(s); REQUIRE(!sl_.empty()); sl_.pop_front(); REQUIRE(sl_.empty()); sl_.push_front(s); REQUIRE(&sl_.back() == &sl_.front()); sl sl3{}; for (auto& v : sl_) { (void)v; } for (auto& v : sl3) { (void)v; } int i = 0; for (auto& e : sl_) { (void)e; ++i; } REQUIRE(i == 1); sl_.pop_back(); REQUIRE(sl_.empty()); } SUBCASE("2") { REQUIRE(std::distance(sl_.begin(), sl_.end()) == 0); REQUIRE(std::distance(sl_.cbegin(), sl_.cend()) == 0); S s; sl_.push_front(s); REQUIRE(std::distance(sl_.begin(), sl_.end()) == 1); REQUIRE(std::distance(sl_.cbegin(), sl_.cend()) == 1); sl_.pop_front(); REQUIRE(std::distance(sl_.begin(), sl_.end()) == 0); REQUIRE(std::distance(sl_.cbegin(), sl_.cend()) == 0); REQUIRE(sl_.empty()); } SUBCASE("3") { std::for_each(arr.begin(), arr.end(), [&](S& s) { sl_.push_back(s); }); REQUIRE(sl_.begin() == sl_.begin()); REQUIRE(sl_.begin() != ++sl_.begin()); REQUIRE(sl_.end() == sl_.end()); REQUIRE((++sl_.begin()).ptr_ != nullptr); } SUBCASE("4") { std::for_each(arr.begin(), arr.end(), [&](S& s) { sl_.push_back(s); }); REQUIRE(std::equal(sl_.begin(), sl_.end(), arr.begin())); } SUBCASE("5") { std::for_each(arr.begin(), arr.end(), [&](S& s) { sl_.push_back(s); }); std::for_each(sl_.begin(), sl_.end(), [](S&) {}); for (auto& s : sl_) { (void)s; } } } TEST_CASE("iterators") { S a, b, c; sl sl_; sl_.push_front(a); sl_.push_front(b); sl_.push_front(c); SUBCASE("prefix") { auto it = sl_.begin(); REQUIRE(&(*it) == &c); ++it; REQUIRE(&(*it) == &b); ++it; REQUIRE(&(*it) == &a); ++it; REQUIRE(it == sl_.end()); --it; REQUIRE(&(*it) == &a); --it; REQUIRE(&(*it) == &b); --it; REQUIRE(&(*it) == &c); } SUBCASE("postfix") { auto it = sl_.begin(); REQUIRE(&(*it) == &c); it++; REQUIRE(&(*it) == &b); it++; REQUIRE(&(*it) == &a); it++; REQUIRE(it == sl_.end()); it--; REQUIRE(&(*it) == &a); it--; REQUIRE(&(*it) == &b); it--; REQUIRE(&(*it) == &c); } } TEST_CASE("non-empty list destructor") { S a, b, c; { sl sl_; REQUIRE(sl_.empty()); sl_.push_front(a); sl_.push_front(b); sl_.push_front(c); REQUIRE(!sl_.empty()); // REQUIRE(sl_.size() == 3); REQUIRE(a.n.is_linked()); REQUIRE(b.n.is_linked()); REQUIRE(c.n.is_linked()); } REQUIRE(!a.n.is_linked()); REQUIRE(!b.n.is_linked()); REQUIRE(!c.n.is_linked()); } TEST_CASE("size, iterators") { sl sl_; const sl& csl = sl_; S a{1, {}}, b{2, {}}, c{3, {}}; REQUIRE(sl_.is_empty()); // REQUIRE(sl_.size() == 0); REQUIRE(std::distance(sl_.begin(), sl_.end()) == 0); REQUIRE(std::distance(sl_.cbegin(), sl_.cend()) == 0); REQUIRE(sl_.begin() == sl_.end()); REQUIRE(csl.is_empty()); // REQUIRE(csl.size() == 0); REQUIRE(std::distance(csl.begin(), csl.end()) == 0); REQUIRE(std::distance(csl.cbegin(), csl.cend()) == 0); REQUIRE(csl.begin() == csl.end()); sl_.push_front(a); REQUIRE(!sl_.is_empty()); // REQUIRE(sl_.size() == 1); REQUIRE(!csl.is_empty()); // REQUIRE(csl.size() == 1); sl_.push_front(b); REQUIRE(!sl_.is_empty()); // REQUIRE(sl_.size() == 2); REQUIRE(!csl.is_empty()); // REQUIRE(csl.size() == 2); sl_.push_front(c); REQUIRE(!sl_.is_empty()); // REQUIRE(sl_.size() == 3); REQUIRE(!csl.is_empty()); // REQUIRE(csl.size() == 3); REQUIRE(std::distance(sl_.begin(), sl_.end()) == 3); REQUIRE(std::distance(sl_.cbegin(), sl_.cend()) == 3); REQUIRE(sl_.begin() != sl_.end()); REQUIRE(std::distance(csl.begin(), csl.end()) == 3); REQUIRE(std::distance(csl.cbegin(), csl.cend()) == 3); REQUIRE(csl.begin() != csl.end()); REQUIRE(std::accumulate(sl_.begin(), sl_.end(), 0, [](int a, const S& b) { return a + b.i; }) == 6); REQUIRE(std::accumulate(csl.begin(), csl.end(), 0, [](int a, const S& b) { return a + b.i; }) == 6); sl_.pop_front(); REQUIRE(!sl_.is_empty()); // REQUIRE(sl_.size() == 2); REQUIRE(!csl.is_empty()); // REQUIRE(csl.size() == 2); sl_.pop_front(); REQUIRE(!sl_.is_empty()); // REQUIRE(sl_.size() == 1); REQUIRE(!csl.is_empty()); // REQUIRE(csl.size() == 1); sl_.pop_front(); REQUIRE(sl_.is_empty()); // REQUIRE(sl_.size() == 0); REQUIRE(std::distance(sl_.begin(), sl_.end()) == 0); REQUIRE(std::distance(sl_.cbegin(), sl_.cend()) == 0); REQUIRE(sl_.begin() == sl_.end()); REQUIRE(csl.is_empty()); // REQUIRE(csl.size() == 0); REQUIRE(std::distance(csl.begin(), csl.end()) == 0); REQUIRE(std::distance(csl.cbegin(), csl.cend()) == 0); REQUIRE(csl.begin() == csl.end()); REQUIRE(std::accumulate(sl_.begin(), sl_.end(), 0, [](int a, const S& b) { return a + b.i; }) == 0); REQUIRE(std::accumulate(csl.begin(), csl.end(), 0, [](int a, const S& b) { return a + b.i; }) == 0); } TEST_CASE("move ctor") { S a{1, {}}, b{2, {}}, c{3, {}}; sl sl_; { sl_.push_front(a); sl_.push_front(b); sl_.push_front(c); S d{std::move(a)}; REQUIRE(!a.n.is_linked()); REQUIRE(d.n.is_linked()); } } TEST_CASE("list move ctor"){ S a{1, {}}, b{2, {}}, c{3, {}}; sl sl_; sl_.push_front(a); sl_.push_front(b); sl_.push_front(c); REQUIRE(!sl_.empty()); REQUIRE(&sl_.front() == &c); REQUIRE(&sl_.back() == &a); /* sl sl2 = sl_; shouldn't compile */ sl sl2 = std::move(sl_); REQUIRE(sl_.empty()); REQUIRE(!sl2.empty()); REQUIRE(&sl2.front() == &c); REQUIRE(&sl2.back() == &a); sl2 = std::move(sl_); REQUIRE(sl_.empty()); REQUIRE(sl2.empty()); }
// Copyright (C) 2015 The Regents of the University of California (Regents). // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of The Regents or University of California nor the // names of its contributors may be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // // Please contact the author of this library if you have any questions. // Author: Chris Sweeney (cmsweeney@cs.ucsb.edu) #include <Eigen/Core> #include <Eigen/Geometry> #include <glog/logging.h> #include "gtest/gtest.h" #include "theia/math/util.h" #include "theia/sfm/camera/camera.h" #include "theia/sfm/estimators/camera_and_feature_correspondence_2d_3d.h" #include "theia/sfm/estimators/estimate_similarity_transformation_2d_3d.h" #include "theia/sfm/pose/test_util.h" #include "theia/sfm/pose/util.h" #include "theia/sfm/similarity_transformation.h" #include "theia/solvers/sample_consensus_estimator.h" #include "theia/test/test_utils.h" #include "theia/util/random.h" namespace theia { static const double kFocalLength = 1000.0; static const double kReprojectionError = 4.0; static const int kNumCameras = 100; RandomNumberGenerator rng(154); inline Camera RandomCamera() { Camera camera; camera.SetPosition(rng.RandVector3d(-10.0, 10.0)); camera.SetOrientationFromRotationMatrix(RandomRotation(10.0, &rng)); camera.SetImageSize(1000, 1000); camera.SetFocalLength(kFocalLength); camera.SetPrincipalPoint(kFocalLength / 2.0, kFocalLength / 2.0); return camera; } void ExecuteRandomTest( const RansacParameters& params, const SimilarityTransformation& similarity_transformation, const double inlier_ratio, const double noise, const double tolerance) { std::vector<CameraAndFeatureCorrespondence2D3D> correspondences; for (int i = 0; i < kNumCameras; i++) { CameraAndFeatureCorrespondence2D3D correspondence; // Set up random camera. correspondence.camera = RandomCamera(); // Set up random 3D point and reproject it into the image. Make sure the // point is in front of the camera. double depth = -1; do { // Create a 3D point randomly, and try to ensure that it is in front of // the camera. correspondence.point3d = Eigen::Vector4d(rng.RandDouble(-5, 5), rng.RandDouble(-5, 5), rng.RandDouble(10, 20), 1.0); depth = correspondence.camera.ProjectPoint(correspondence.point3d, &correspondence.observation); } while (depth < 0); correspondences.emplace_back(std::move(correspondence)); } // Add noise to the image observations. if (noise) { for (int i = 0; i < kNumCameras; i++) { correspondences[i].observation += noise * rng.RandVector2d(); } } // Add outliers. for (int i = 0; i < kNumCameras; i++) { if (i > inlier_ratio * kNumCameras) { correspondences[i].observation = kFocalLength * rng.RandVector2d(); } } // Apply the similarity transformation to the 3d points. for (int i = 0; i < kNumCameras; i++) { const Eigen::Vector3d old_point = correspondences[i].point3d.hnormalized(); const Eigen::Vector3d new_point = similarity_transformation.scale * similarity_transformation.rotation * old_point + similarity_transformation.translation; correspondences[i].point3d = new_point.homogeneous(); } // Estimate the similarity transformation; SimilarityTransformation estimated_similarity_transformation; RansacSummary ransac_summary; EXPECT_TRUE(EstimateSimilarityTransformation2D3D( params, RansacType::RANSAC, correspondences, &estimated_similarity_transformation, &ransac_summary)); // We should have found at least one good solution. EXPECT_GT(ransac_summary.inliers.size(), 4); // Expect poses are near. EXPECT_TRUE(test::ArraysEqualUpToScale( 9, estimated_similarity_transformation.rotation.data(), similarity_transformation.rotation.data(), tolerance)); EXPECT_TRUE(test::ArraysEqualUpToScale( 3, estimated_similarity_transformation.translation.data(), similarity_transformation.translation.data(), tolerance)); EXPECT_NEAR(estimated_similarity_transformation.scale, similarity_transformation.scale, 10 * tolerance); } TEST(EstimateSimilarityTransformation2D3D, AllInliersNoNoise) { RansacParameters options; options.rng = std::make_shared<RandomNumberGenerator>(rng); options.use_mle = true; options.error_thresh = kReprojectionError; options.failure_probability = 0.001; options.max_iterations = 1000; const double kInlierRatio = 1.0; const double kNoise = 0.0; const double kPoseTolerance = 1e-4; SimilarityTransformation similarity_transformation; similarity_transformation.rotation = Eigen::AngleAxisd(DegToRad(12.0), Eigen::Vector3d(1.0, 0.2, -0.8) .normalized()).toRotationMatrix(); similarity_transformation.translation = Eigen::Vector3d(-1.3, 2.1, 0.5); similarity_transformation.scale = 0.8; ExecuteRandomTest(options, similarity_transformation, kInlierRatio, kNoise, kPoseTolerance); } TEST(EstimateSimilarityTransformation2D3D, AllInliersWithNoise) { RansacParameters options; options.rng = std::make_shared<RandomNumberGenerator>(rng); options.use_mle = true; options.error_thresh = kReprojectionError; options.failure_probability = 0.001; options.max_iterations = 1000; const double kInlierRatio = 1.0; const double kNoise = 1.0; const double kPoseTolerance = 1e-2; SimilarityTransformation similarity_transformation; similarity_transformation.rotation = Eigen::AngleAxisd(DegToRad(12.0), Eigen::Vector3d(1.0, 0.2, -0.8) .normalized()).toRotationMatrix(); similarity_transformation.translation = Eigen::Vector3d(-1.3, 2.1, 0.5); similarity_transformation.scale = 0.8; ExecuteRandomTest(options, similarity_transformation, kInlierRatio, kNoise, kPoseTolerance); } TEST(EstimateSimilarityTransformation2D3D, OutliersNoNoise) { RansacParameters options; options.rng = std::make_shared<RandomNumberGenerator>(rng); options.use_mle = true; options.error_thresh = kReprojectionError; options.failure_probability = 0.001; options.max_iterations = 1000; const double kInlierRatio = 0.8; const double kNoise = 0.0; const double kPoseTolerance = 1e-4; SimilarityTransformation similarity_transformation; similarity_transformation.rotation = Eigen::AngleAxisd(DegToRad(12.0), Eigen::Vector3d(1.0, 0.2, -0.8) .normalized()).toRotationMatrix(); similarity_transformation.translation = Eigen::Vector3d(-1.3, 2.1, 0.5); similarity_transformation.scale = 0.8; ExecuteRandomTest(options, similarity_transformation, kInlierRatio, kNoise, kPoseTolerance); } TEST(EstimateSimilarityTransformation2D3D, OutliersWithNoise) { RansacParameters options; options.rng = std::make_shared<RandomNumberGenerator>(rng); options.use_mle = true; options.error_thresh = kReprojectionError; options.failure_probability = 0.001; options.max_iterations = 1000; const double kInlierRatio = 0.8; const double kNoise = 1.0; const double kPoseTolerance = 1e-2; SimilarityTransformation similarity_transformation; similarity_transformation.rotation = Eigen::AngleAxisd(DegToRad(12.0), Eigen::Vector3d(1.0, 0.2, -0.8) .normalized()).toRotationMatrix(); similarity_transformation.translation = Eigen::Vector3d(-1.3, 2.1, 0.5); similarity_transformation.scale = 0.8; ExecuteRandomTest(options, similarity_transformation, kInlierRatio, kNoise, kPoseTolerance); } } // namespace theia
#include <node/BoxPack.h> #include <node/BasicChar.h> #include <node/AxisCenter.h> #include <node/MakeVBox.h> #include "Delimiter.h" #include "Const.h" namespace formulae { namespace node { charCode_t getExCharCode(charCode_t ch) { if (ch.value() == '(') return charCode_t{0}; else if (ch.value() == ')') return charCode_t{1}; else throw NotImplemented{}; } std::tuple<NodeRef, bool> getLargerDelimiter(style_t st, bool larger, charCode_t ch) { NodeRef node; std::tie(node, std::ignore) = basicChar(st, larger, family_t::EX, ch); bool hasLarger = (node.as<Char_t>().charCode != ch); return std::make_tuple(node, hasLarger); } NodeRef varDelimiter(style_t st, dist_t delimiterSize, delim_t delim) { if (delim == delim_t(0)) return Kern_t::create(GV::nullDelimiterSpace); else{ /*--------------------------improve this part-----------------------------*/ charCode_t ch = getExCharCode(delim.to<charCode_t>()); NodeRef node; bool hasLarger; std::tie(node, hasLarger) = getLargerDelimiter(st, false, ch); for ( hasLarger = true; node.vsize() < delimiterSize and hasLarger; ) { std::tie(node, hasLarger) = getLargerDelimiter(st, true, node.as<Char_t>().charCode); } if (not hasLarger) { auto hl = hlist_t{node}; std::tie(node, std::ignore) = getLargerDelimiter(st, false, node.as<Char_t>().charCode+charCode_t{16}); hl.push_back(node); auto box = above(hl[0],zero(), zero(), hl[1]); auto centeredBox = axisCenter(st, box.as<Box_t>().box); return centeredBox; } else { auto box = boxList(hlist_t{node}); auto centeredBox = axisCenter(st, box); return centeredBox; } } } NodeRef makeDelimiter(style_t st, delim_t delim) { return varDelimiter(st, font::Delim(st), delim); } } }
// This file is derivative work from Ulrich Germann's Tightly Packed Tries // package (TPTs and related software). // // Original Copyright: // Copyright 2005-2009 Ulrich Germann; all rights reserved. // Under licence to NRC. // // Copyright for modifications: // Technologies langagieres interactives / Interactive Language Technologies // Inst. de technologie de l'information / Institute for Information Technology // Conseil national de recherches Canada / National Research Council Canada // Copyright 2008-2010, Sa Majeste la Reine du Chef du Canada / // Copyright 2008-2010, Her Majesty in Right of Canada /** * @author Darlene Stewart * @file tpt_utils.h Utility functions for the tpt module. * * Technologies langagieres interactives / Interactive Language Technologies * Inst. de technologie de l'information / Institute for Information Technology * Conseil national de recherches Canada / National Research Council Canada * Copyright 2010, Sa Majeste la Reine du Chef du Canada / * Copyright 2010, Her Majesty in Right of Canada */ #include <sys/stat.h> #include <cstring> #include <cerrno> #include "tpt_utils.h" #include "tpt_typedefs.h" #if IN_PORTAGE #include "file_utils.h" #endif namespace ugdiss { #if defined(Darwin) || defined(CYGWIN) #define stat64 stat #endif uint64_t getFileSize(const std::string& fname) { struct stat64 buf; if (stat64(fname.c_str(),&buf) < 0) return -1; return buf.st_size; } void open_mapped_file_source(bio::mapped_file_source& mfs, const string& fname) { #if IN_PORTAGE error_unless_exists(fname, false, "memory mapped"); #endif try { mfs.open(fname); if (!mfs.is_open()) throw std::exception(); } catch(std::exception& e) { cerr << efatal << "Unable to open memory mapped file '" << fname << "' for reading." << endl << e.what() << endl << "errno=" << errno << ": " << strerror(errno) << exit_1; } } } // namespace ugdiss
int my_function() { return 0xbaba; }
/* +------------------------------------------------------------------------+ | Mobile Robot Programming Toolkit (MRPT) | | http://www.mrpt.org/ | | | | Copyright (c) 2005-2018, Individual contributors, see AUTHORS file | | See: http://www.mrpt.org/Authors - All rights reserved. | | Released under BSD License. See details in http://www.mrpt.org/License | +------------------------------------------------------------------------+ */ #include "mrpt_bridge/map.h" #include "mrpt_bridge/pose.h" #include <nav_msgs/OccupancyGrid.h> #include <ros/console.h> // Only include MRPT classes that are really used to avoid slow down compilation #include <mrpt/random.h> #if MRPT_VERSION >= 0x199 #include <mrpt/config/CConfigFile.h> #include <mrpt/io/CFileGZInputStream.h> using namespace mrpt::config; using namespace mrpt::io; #else #include <mrpt/utils/CConfigFile.h> #include <mrpt/utils/CFileGZInputStream.h> using namespace mrpt::utils; #endif #include <mrpt/system/filesystem.h> // for fileExists() #include <mrpt/system/string_utils.h> // for lowerCase() #include <mrpt/version.h> #include <mrpt/maps/COccupancyGridMap2D.h> #include <mrpt/maps/CMultiMetricMap.h> #include <mrpt/maps/CSimpleMap.h> using mrpt::maps::CLogOddsGridMapLUT; using mrpt::maps::CMultiMetricMap; using mrpt::maps::COccupancyGridMap2D; using mrpt::maps::CSimpleMap; #if MRPT_VERSION >= 0x199 #include <mrpt/serialization/CArchive.h> #endif #ifndef INT8_MAX // avoid duplicated #define's #define INT8_MAX 0x7f #define INT8_MIN (-INT8_MAX - 1) #define INT16_MAX 0x7fff #define INT16_MIN (-INT16_MAX - 1) #endif // INT8_MAX namespace mrpt_bridge { MapHdl* MapHdl::instance_ = NULL; MapHdl::MapHdl() { /// creation of the lookup table and pointers CLogOddsGridMapLUT<COccupancyGridMap2D::cellType> table; #ifdef OCCUPANCY_GRIDMAP_CELL_SIZE_8BITS lut_cellmrpt2rosPtr = lut_cellmrpt2ros + INT8_MAX + 1; // center the pointer lut_cellros2mrptPtr = lut_cellros2mrpt + INT8_MAX + 1; // center the pointer for (int i = INT8_MIN; i < INT8_MAX; i++) { #else lut_cellmrpt2rosPtr = lut_cellmrpt2ros + INT16_MAX + 1; // center the pointer for (int i = INT16_MIN; INT16_MIN < INT16_MAX; i++) { #endif float p = 1.0 - table.l2p(i); int idx = round(p * 100.); lut_cellmrpt2rosPtr[i] = idx; // printf("- cell -> ros = %4i -> %4i, p=%4.3f\n", i, idx, p); } for (int i = INT8_MIN; i < INT8_MAX; i++) { float v = i; if (v > 100) v = 50; if (v < 0) v = 50; float p = 1.0 - (v / 100.0); int idx = table.p2l(p); if (i < 0) lut_cellros2mrptPtr[i] = table.p2l(0.5); else if (i > 100) lut_cellros2mrptPtr[i] = table.p2l(0.5); else lut_cellros2mrptPtr[i] = idx; // printf("- ros -> cell = %4i -> %4i, p=%4.3f\n", i, idx, p); fflush(stdout); } } MapHdl::~MapHdl() {} MapHdl* MapHdl::instance() { if (instance_ == NULL) instance_ = new MapHdl(); return instance_; } bool convert(const nav_msgs::OccupancyGrid& src, COccupancyGridMap2D& des) { if ((src.info.origin.orientation.x != 0) || (src.info.origin.orientation.y != 0) || (src.info.origin.orientation.z != 0) || (src.info.origin.orientation.w != 1)) { std::cerr << "Rotated maps are not supported by mrpt!" << std::endl; return false; } float xmin = src.info.origin.position.x; float ymin = src.info.origin.position.y; float xmax = xmin + src.info.width * src.info.resolution; float ymax = ymin + src.info.height * src.info.resolution; MRPT_START des.setSize(xmin, xmax, ymin, ymax, src.info.resolution); MRPT_END // printf("--------convert: %i x %i, %4.3f, %4.3f, %4.3f, %4.3f, // r:%4.3f\n",des.getSizeX(), des.getSizeY(), des.getXMin(), des.getXMax(), // des.getYMin(), des.getYMax(), des.getResolution()); /// I hope the data is allways aligned for (unsigned int h = 0; h < src.info.height; h++) { COccupancyGridMap2D::cellType* pDes = des.getRow(h); const int8_t* pSrc = &src.data[h * src.info.width]; for (unsigned int w = 0; w < src.info.width; w++) { *pDes++ = MapHdl::instance()->cellRos2Mrpt(*pSrc++); } } return true; } bool convert( const COccupancyGridMap2D& src, nav_msgs::OccupancyGrid& des, const std_msgs::Header& header) { des.header = header; return convert(src, des); } bool convert(const COccupancyGridMap2D& src, nav_msgs::OccupancyGrid& des) { // printf("--------mrpt2ros: %f, %f, %f, %f, r:%f\n",src.getXMin(), // src.getXMax(), src.getYMin(), src.getYMax(), src.getResolution()); des.info.width = src.getSizeX(); des.info.height = src.getSizeY(); des.info.resolution = src.getResolution(); des.info.origin.position.x = src.getXMin(); des.info.origin.position.y = src.getYMin(); des.info.origin.position.z = 0; des.info.origin.orientation.x = 0; des.info.origin.orientation.y = 0; des.info.origin.orientation.z = 0; des.info.origin.orientation.w = 1; /// I hope the data is allways aligned des.data.resize(des.info.width * des.info.height); for (unsigned int h = 0; h < des.info.height; h++) { const COccupancyGridMap2D::cellType* pSrc = src.getRow(h); int8_t* pDes = &des.data[h * des.info.width]; for (unsigned int w = 0; w < des.info.width; w++) { *pDes++ = MapHdl::instance()->cellMrpt2Ros(*pSrc++); } } return true; } const bool MapHdl::loadMap( CMultiMetricMap& _metric_map, const CConfigFile& _config_file, const std::string& _map_file, const std::string& _section_name, bool _debug) { using namespace mrpt::maps; TSetOfMetricMapInitializers mapInitializers; mapInitializers.loadFromConfigFile(_config_file, _section_name); CSimpleMap simpleMap; // Load the set of metric maps to consider in the experiments: _metric_map.setListOfMaps(mapInitializers); if (_debug) mapInitializers.dumpToConsole(); #if MRPT_VERSION >= 0x199 auto& r = mrpt::random::getRandomGenerator(); #else auto& r = mrpt::random::randomGenerator; #endif r.randomize(); if (_debug) printf( "%s, _map_file.size() = %zu\n", _map_file.c_str(), _map_file.size()); // Load the map (if any): if (_map_file.size() < 3) { if (_debug) printf("No mrpt map file!\n"); return false; } else { ASSERT_(mrpt::system::fileExists(_map_file)); // Detect file extension: std::string mapExt = mrpt::system::lowerCase(mrpt::system::extractFileExtension( _map_file, true)); // Ignore possible .gz extensions if (!mapExt.compare("simplemap")) { // It's a ".simplemap": if (_debug) printf("Loading '.simplemap' file..."); CFileGZInputStream f(_map_file); #if MRPT_VERSION >= 0x199 mrpt::serialization::archiveFrom(f) >> simpleMap; #else f >> simpleMap; #endif printf("Ok\n"); ASSERTMSG_( simpleMap.size() > 0, "Simplemap was aparently loaded OK, but it is empty!"); // Build metric map: if (_debug) printf("Building metric map(s) from '.simplemap'..."); _metric_map.loadFromSimpleMap(simpleMap); if (_debug) printf("Ok\n"); } else if (!mapExt.compare("gridmap")) { // It's a ".gridmap": if (_debug) printf("Loading gridmap from '.gridmap'..."); ASSERTMSG_( #if MRPT_VERSION >= 0x199 _metric_map.countMapsByClass<COccupancyGridMap2D>() == 1, #else _metric_map.m_gridMaps.size() == 1, #endif "Error: Trying to load a gridmap into a multi-metric map " "requires 1 gridmap member."); CFileGZInputStream fm(_map_file); #if MRPT_VERSION >= 0x199 mrpt::serialization::archiveFrom(fm) >> (*_metric_map.mapByClass<COccupancyGridMap2D>()); #else fm >> (*_metric_map.m_gridMaps[0]); #endif if (_debug) printf("Ok\n"); } else { THROW_EXCEPTION(mrpt::format( "Map file has unknown extension: '%s'", mapExt.c_str())); return false; } } return true; } } // namespace mrpt_bridge
#pragma once #include "c/struct-stat.h" #include "Result.hxx" #define EBADF 9 #define EFAULT 14 #define EIO 5 #define EOVERFLOW 98 #define __NR_fstat 189 namespace freebsd { static inline auto fstat(int fd, struct stat* sb) noexcept { enum Error { _E(BADF), _E(FAULT), _E(IO), _E(OVERFLOW), }; Result<void, Error> result; #if defined(__x86_64__) asm volatile ("syscall" "\nsbb %1, %1" : "=a" (result.__word), "=r" (result.__is_error) : "a" (__NR_fstat), "D" (fd), "S" (sb) : "memory"); #else # error #endif return result; } }
/* * basetree.cpp * * Created on: 08-Jan-2019 * Author: P. Sashittal */ #include "basetree.h" #include <lemon/bfs.h> BaseTree::BaseTree() : _tree() , _nodeToId(_tree) , _idToNode() , _nodeToIndex(_tree) , _indexToNode() , _arcToIndex(_tree) , _timestamp(_tree) , _label(_tree) , _hostLabel() , _nhosts() , _unshosts() , _enttime() , _remtime() , _maxInf() , _totalTime() { _unshosts = 0; } BaseTree::BaseTree(int unhosts) : _tree() , _nodeToId(_tree) , _idToNode() , _nodeToIndex(_tree) , _indexToNode() , _arcToIndex(_tree) , _timestamp(_tree) , _label(_tree) , _hostLabel() , _nhosts() , _unshosts(unhosts) , _enttime() , _remtime() , _maxInf() , _totalTime() { } void BaseTree::writePtree(std::ostream& out) const { out << "no solution"; } void BaseTree::writePtree(std::ostream& out, const std::string& msg) const { out << msg; } void BaseTree::writePtree(std::ostream& out, const IntNodeMap& sol_label) const { int ntotal = lemon::countNodes(_tree); for (int i = 1; i <= ntotal; ++i) { assert(_idToNode.count(std::to_string(i))); Node v = _idToNode.find(std::to_string(i))->second; if (lemon::countOutArcs(_tree, v) == 0) { // is leaf out << _timestamp[v] << "\t0\t0\t" << _label[v] + 1 << std::endl; } else { // is internal node out << _timestamp[v] << "\t"; for (OutArcIt e(_tree, v); e != lemon::INVALID; ++e) { out << _nodeToId[_tree.target(e)] << "\t"; } //out << _label[_idToNode[std::to_string(i)]] + 1 << std::endl; out << sol_label[v] + 1 << std::endl; } } } void BaseTree::writePtree(std::ostream& out, const IntNodeMap& sol_label, const IntArcMap& xi_label, const DoubleArcMap& xi_time) const { int ntotal = lemon::countNodes(_tree); int leaf_counter = 0; // count the number of leaves written //int internal_counter = 0; // count the number of internal nodes written int row_counter = 0; // count the number of rows written for (int i = 1; i <= ntotal; ++i) { assert(_idToNode.count(std::to_string(i))); Node v = _idToNode.find(std::to_string(i))->second; if (lemon::countOutArcs(_tree, v) == 0) { // is leaf out << _timestamp[v] << "\t0\t0\t" << _label[v] + 1 << std::endl; ++leaf_counter; ++row_counter; } else { // is internal node // first write xi of each outgoing arc of the node for (OutArcIt e(_tree, v); e != lemon::INVALID; ++e) { out << xi_time[e] << "\t"; if (std::stoi(_nodeToId[_tree.target(e)]) <= leaf_counter) { out << _nodeToId[_tree.target(e)] << "\t0\t"; } else { out << 3*(std::stoi(_nodeToId[_tree.target(e)])) - 2*leaf_counter << "\t0\t"; } out << xi_label[e] + 1 << std::endl; ++row_counter; } out << _timestamp[v] << "\t" << row_counter - 1 << "\t" << row_counter << "\t" << sol_label[v] + 1 << std::endl; ++row_counter; } } } void BaseTree::writeTtree(std::ostream& out) const { for (ArcIt a(_tree); a != lemon::INVALID; ++a) { Node u = _tree.source(a); Node v = _tree.target(a); out << _nodeToId[u] << " " << _nodeToId[v] << std::endl; } } bool BaseTree::readTtree(std::istream& in) { int idx = 0; while (in.good()) { std::string line; getline(in, line); if (line.empty()) break; StringVector s; boost::split(s, line, boost::is_any_of("\t ")); if (s.size() != 4) { std::cerr << "Error: line '" << line << "' incorrect number of columns" << std::endl; return false; } double time = std::stod(s[1]); _enttime.push_back(0); _remtime.push_back(time); idx++; } /// initialize nhosts _nhosts = idx; /// initiaize totalTime _totalTime = *std::max_element(_remtime.begin(), _remtime.end()); /* std::cout << "number of hosts is: " << _nhosts << std::endl; std::cout << "removal times are: " << std::endl; for (int s = 0; s < _nhosts; ++s) { std::cout << "host " << s+1 << " has " << _remtime[s] << std::endl; } std::cout<< "total time is " << _totalTime << std::endl; */ return true; } void BaseTree::writeDOT(const ArcMatrix& N, const IntNodeMap& l, std::ostream& out) const { StringVector colorMap({ "#3243BA", "#0363E1", "#0D75DC", "#1485D4", "#0998D1", "#06A7C6", "#15B1B4", "#38B99E", "#65BE86", "#92BF73", "#B7BD64", "#D9BA56", "#F8BB44", "#FCCE2E", "#F5E41D", "#F9FB0E" }); out << "digraph N {" << std::endl; out << "\toverlap=\"false\"" << std::endl; out << "\trankdir=\"LR\"" << std::endl; for (int s = 0; s < _nhosts; ++s) { out << "\t" << s << " [label=\"" << _hostLabel[s] << "\",width=1.2,height=1.2,style=\"\",penwidth=3,color=\"" << colorMap[s % colorMap.size()] << "\"]" << std::endl; } for (const ArcVector& event : N) { int s = l[_tree.source(event.front())]; int t = l[_tree.target(event.front())]; out << "\t" << s << " -> " << t << " [penwidth=1,color=black,label=\"" << event.size() << "\"]" << std::endl; } out << "}" << std::endl; } bool BaseTree::readHost(std::istream& in) { // TODO: Clear everything before starting int idx = 0; _hostLabel.clear(); while (in.good()) { std::string line; getline(in, line); if (line.empty()) break; StringVector s; boost::split(s, line, boost::is_any_of("\t ")); if (s.size() != 3) { std::cerr << "Error: line '" << line << "' incorrect number of columns" << std::endl; return false; } double etime = std::stod(s[1]); double rtime = std::stod(s[2]); _hostLabel.push_back(s[0]); _enttime.push_back(etime); _remtime.push_back(rtime); idx++; } /// initiaize totalTime _totalTime = *std::max_element(_remtime.begin(), _remtime.end()); /// add unsampled hosts for (int s = 0; s < _unshosts; ++s) { _enttime.push_back(0); _remtime.push_back(_totalTime); idx++; } // initialize nhosts _nhosts = idx; std::cout << "number of hosts is: " << _nhosts << std::endl; /* std::cout << "removal times are: " << std::endl; for (int s = 0; s < _nhosts; ++s) { std::cout << "host " << s+1 << " has " << _enttime[s] << " and " << _remtime[s] << std::endl; } */ std::cout<< "total time is " << _totalTime << std::endl; return true; } bool BaseTree::readPtree(std::istream& in, bool binaryTree) { _idToNode.clear(); int idx = 1; while (in.good()) { std::string line; getline(in, line); if (line.empty()) break; StringVector s; boost::split(s, line, boost::is_any_of("\t ")); if (binaryTree) { if (s.size() != 4) { std::cerr << "Error: line '" << line << "' incorrect number of columns" << std::endl; return false; } double time = std::stod(s[0]); std::string child1 = s[1]; std::string child2 = s[2]; int vlabel = std::stoi(s[3]) - 1; if((child1 == "0") && (child2 == "0")) { // node is a leaf Node u = _tree.addNode(); _label[u] = vlabel; _timestamp[u] = time; _nodeToId[u] = std::to_string(idx); _nodeToIndex[u] = idx - 1; _indexToNode.push_back(u); _idToNode[std::to_string(idx)] = u; } else { // node is internal Node u = _tree.addNode(); _label[u] = vlabel; _timestamp[u] = time; _tree.addArc(u, _idToNode[child1]); _tree.addArc(u, _idToNode[child2]); _nodeToId[u] = std::to_string(idx); _nodeToIndex[u] = idx - 1; _indexToNode.push_back(u); _idToNode[std::to_string(idx)] = u; } } else { if (s.size() < 2) { std::cerr << "Error: line '" << line << "' incorrect number of columns" << std::endl; return false; } double time = std::stod(s[0]); int vlabel = std::stoi(s[1]) - 1; if (s.size() == 2) { // is leaf Node u = _tree.addNode(); _label[u] = vlabel; _timestamp[u] = time; _nodeToId[u] = std::to_string(idx); _nodeToIndex[u] = idx - 1; _indexToNode.push_back(u); _idToNode[std::to_string(idx)] = u; } else { // is internal node Node u = _tree.addNode(); _label[u] = vlabel; _timestamp[u] = time; for (int i = 2; i < s.size(); ++i) { _tree.addArc(u, _idToNode[s[i]]); } _nodeToId[u] = std::to_string(idx); _nodeToIndex[u] = idx - 1; _indexToNode.push_back(u); _idToNode[std::to_string(idx)] = u; } } ++idx; } // check that tree has a single root _root = lemon::INVALID; for (NodeIt node(_tree); node != lemon::INVALID; ++node) { if (InArcIt(_tree, node) == lemon::INVALID) { if (_root != lemon::INVALID) { std::cerr << "Error: multiple root node '" << _nodeToId[node] << "' and '" << _nodeToId[_root] << "'" << std::endl; return false; } _root = node; } } // check if tree is empty if (_idToNode.empty()) { std::cerr << "Error: empty tree" << std::endl; return false; } // initialize arc-to-index idx = 0; for (ArcIt a(_tree); a!= lemon::INVALID; ++a) { _arcToIndex[a] = idx; ++idx; } // some print statements for debugging // for (NodeIt v(_tree); v != lemon::INVALID; ++v) // { // std::cout << _nodeToId[v] << " has name: " << _nodeToId[v] << // " and time: " << _timestamp[v] << std::endl; // } // // for (ArcIt a(_tree); a != lemon::INVALID; ++a) // { // Node s = _tree.source(a); // Node t = _tree.target(a); // // std::cout << "Arc: (" << _nodeToId[s] << "," // << _nodeToId[t] << ")" << std::endl; // } // // std::cout << " root is " << _nodeToId[_root] << std::endl; return true; } int BaseTree::getMu(const IntNodeMap& l) const { int mu = 0; for (ArcIt a(_tree); a != lemon::INVALID; ++a) { Node u = _tree.source(a); Node v = _tree.target(a); if (l[u] != l[v]) ++mu; } return mu; } ArcMatrix BaseTree::getN(const IntNodeMap& l) const { ArcMatrix N; const int nrInfectedHosts = getNrHost(); int gamma = 0; for (int s = 0; s < nrInfectedHosts; ++s) { for (int t = 0; t < nrInfectedHosts; ++t) { if (s != t) { // timeIntervals - ((endtime, starttime), arc) std::vector<std::pair<std::pair<double, double>, Arc> > timeIntervals; for (ArcIt eij(_tree); eij != lemon::INVALID; ++eij) { Node vi = _tree.source(eij); Node vj = _tree.target(eij); if (l[vi] == s && l[vj] == t) { timeIntervals.push_back(std::make_pair(std::make_pair(getTime(vj), getTime(vi)), eij)); } } while (timeIntervals.size() > 0) { auto minTime = *std::min_element(timeIntervals.cbegin(), timeIntervals.cend()); std::vector<std::pair<std::pair<double, double>, Arc> > timeIntervalsToRemove; N.push_back(ArcVector()); for (const auto& x : timeIntervals) { if (x.first.second <= minTime.first.first) { N.back().push_back(x.second); } } timeIntervals.erase(std::remove_if(timeIntervals.begin(), timeIntervals.end(), [minTime](const std::pair<std::pair<double, double>, Arc>& x){ return x.first.second <= minTime.first.first; }), timeIntervals.end()); ++gamma; } } } } return N; } void BaseTree::readInputWithHosts(std::istream& hfile, std::istream& pfile, int unsampled_hosts, bool binaryTree, BaseTree& B) { // construct the basetree B.setUnHosts(unsampled_hosts); // read the host file B.readHost(hfile); for (int i = 0; i < unsampled_hosts; ++i) { B._hostLabel.push_back("Unsampled"); } // read the pfile B.readPtree(pfile, binaryTree); } void BaseTree::readInputWithTransmission(std::istream& tfile, std::istream& pfile, BaseTree& B) { // read the tfile B.readTtree(tfile); // read the pfile B.readPtree(pfile, false); } int BaseTree::getGamma(const IntNodeMap& l) const { return getN(l).size(); }
/*----------------------------------------------------------------------------*/ /* Copyright (c) 2018 FIRST. All Rights Reserved. */ /* Open Source Software - may be modified and shared by FRC teams. The code */ /* must be accompanied by the FIRST BSD license file in the root directory of */ /* the project. */ /*----------------------------------------------------------------------------*/ #include "wpi/uv/Tcp.h" #include <cstring> #include "wpi/uv/util.h" namespace wpi { namespace uv { std::shared_ptr<Tcp> Tcp::Create(Loop& loop, unsigned int flags) { auto h = std::make_shared<Tcp>(private_init{}); int err = uv_tcp_init_ex(loop.GetRaw(), h->GetRaw(), flags); if (err < 0) { loop.ReportError(err); return nullptr; } h->Keep(); return h; } void Tcp::Reuse(std::function<void()> callback, unsigned int flags) { if (IsClosing()) return; if (!m_reuseData) m_reuseData = std::make_unique<ReuseData>(); m_reuseData->callback = callback; m_reuseData->flags = flags; uv_close(GetRawHandle(), [](uv_handle_t* handle) { Tcp& h = *static_cast<Tcp*>(handle->data); if (!h.m_reuseData) return; // just in case auto data = std::move(h.m_reuseData); int err = uv_tcp_init_ex(h.GetLoopRef().GetRaw(), h.GetRaw(), data->flags); if (err < 0) { h.ReportError(err); return; } data->callback(); }); } std::shared_ptr<Tcp> Tcp::Accept() { auto client = Create(GetLoopRef()); if (!client) return nullptr; if (!Accept(client)) { client->Release(); return nullptr; } return client; } Tcp* Tcp::DoAccept() { return Accept().get(); } void Tcp::Bind(const Twine& ip, unsigned int port, unsigned int flags) { sockaddr_in addr; int err = NameToAddr(ip, port, &addr); if (err < 0) ReportError(err); else Bind(reinterpret_cast<const sockaddr&>(addr), flags); } void Tcp::Bind6(const Twine& ip, unsigned int port, unsigned int flags) { sockaddr_in6 addr; int err = NameToAddr(ip, port, &addr); if (err < 0) ReportError(err); else Bind(reinterpret_cast<const sockaddr&>(addr), flags); } sockaddr_storage Tcp::GetSock() { sockaddr_storage name; int len = sizeof(name); if (!Invoke(&uv_tcp_getsockname, GetRaw(), reinterpret_cast<sockaddr*>(&name), &len)) std::memset(&name, 0, sizeof(name)); return name; } sockaddr_storage Tcp::GetPeer() { sockaddr_storage name; int len = sizeof(name); if (!Invoke(&uv_tcp_getpeername, GetRaw(), reinterpret_cast<sockaddr*>(&name), &len)) std::memset(&name, 0, sizeof(name)); return name; } void Tcp::Connect(const sockaddr& addr, const std::shared_ptr<TcpConnectReq>& req) { if (Invoke(&uv_tcp_connect, req->GetRaw(), GetRaw(), &addr, [](uv_connect_t* req, int status) { auto& h = *static_cast<TcpConnectReq*>(req->data); if (status < 0) h.ReportError(status); else h.connected(); h.Release(); // this is always a one-shot })) req->Keep(); } void Tcp::Connect(const sockaddr& addr, std::function<void()> callback) { auto req = std::make_shared<TcpConnectReq>(); req->connected.connect(callback); Connect(addr, req); } void Tcp::Connect(const Twine& ip, unsigned int port, const std::shared_ptr<TcpConnectReq>& req) { sockaddr_in addr; int err = NameToAddr(ip, port, &addr); if (err < 0) ReportError(err); else Connect(reinterpret_cast<const sockaddr&>(addr), req); } void Tcp::Connect(const Twine& ip, unsigned int port, std::function<void()> callback) { sockaddr_in addr; int err = NameToAddr(ip, port, &addr); if (err < 0) ReportError(err); else Connect(reinterpret_cast<const sockaddr&>(addr), callback); } void Tcp::Connect6(const Twine& ip, unsigned int port, const std::shared_ptr<TcpConnectReq>& req) { sockaddr_in6 addr; int err = NameToAddr(ip, port, &addr); if (err < 0) ReportError(err); else Connect(reinterpret_cast<const sockaddr&>(addr), req); } void Tcp::Connect6(const Twine& ip, unsigned int port, std::function<void()> callback) { sockaddr_in6 addr; int err = NameToAddr(ip, port, &addr); if (err < 0) ReportError(err); else Connect(reinterpret_cast<const sockaddr&>(addr), callback); } } // namespace uv } // namespace wpi
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "gpu/command_buffer/service/async_pixel_transfer_manager_share_group.h" #include <list> #include "base/bind.h" #include "base/debug/trace_event.h" #include "base/lazy_instance.h" #include "base/logging.h" #include "base/memory/ref_counted.h" #include "base/memory/weak_ptr.h" #include "base/synchronization/cancellation_flag.h" #include "base/synchronization/lock.h" #include "base/synchronization/waitable_event.h" #include "base/threading/thread.h" #include "base/threading/thread_checker.h" #include "gpu/command_buffer/service/async_pixel_transfer_delegate.h" #include "gpu/command_buffer/service/safe_shared_memory_pool.h" #include "ui/gl/gl_bindings.h" #include "ui/gl/gl_context.h" #include "ui/gl/gl_surface.h" #include "ui/gl/gpu_preference.h" #include "ui/gl/scoped_binders.h" namespace gpu { namespace { const char kAsyncTransferThreadName[] = "AsyncTransferThread"; void PerformNotifyCompletion( AsyncMemoryParams mem_params, ScopedSafeSharedMemory* safe_shared_memory, scoped_refptr<AsyncPixelTransferCompletionObserver> observer) { TRACE_EVENT0("gpu", "PerformNotifyCompletion"); AsyncMemoryParams safe_mem_params = mem_params; safe_mem_params.shared_memory = safe_shared_memory->shared_memory(); observer->DidComplete(safe_mem_params); } // TODO(backer): Factor out common thread scheduling logic from the EGL and // ShareGroup implementations. http://crbug.com/239889 class TransferThread : public base::Thread { public: TransferThread() : base::Thread(kAsyncTransferThreadName), initialized_(false) { Start(); #if defined(OS_ANDROID) || defined(OS_LINUX) SetPriority(base::kThreadPriority_Background); #endif } virtual ~TransferThread() { // The only instance of this class was declared leaky. NOTREACHED(); } void InitializeOnMainThread(gfx::GLContext* parent_context) { TRACE_EVENT0("gpu", "TransferThread::InitializeOnMainThread"); if (initialized_) return; base::WaitableEvent wait_for_init(true, false); message_loop_proxy()->PostTask( FROM_HERE, base::Bind(&TransferThread::InitializeOnTransferThread, base::Unretained(this), base::Unretained(parent_context), &wait_for_init)); wait_for_init.Wait(); } virtual void CleanUp() OVERRIDE { surface_ = NULL; context_ = NULL; } SafeSharedMemoryPool* safe_shared_memory_pool() { return &safe_shared_memory_pool_; } private: bool initialized_; scoped_refptr<gfx::GLSurface> surface_; scoped_refptr<gfx::GLContext> context_; SafeSharedMemoryPool safe_shared_memory_pool_; void InitializeOnTransferThread(gfx::GLContext* parent_context, base::WaitableEvent* caller_wait) { TRACE_EVENT0("gpu", "InitializeOnTransferThread"); if (!parent_context) { LOG(ERROR) << "No parent context provided."; caller_wait->Signal(); return; } surface_ = gfx::GLSurface::CreateOffscreenGLSurface(gfx::Size(1, 1)); if (!surface_.get()) { LOG(ERROR) << "Unable to create GLSurface"; caller_wait->Signal(); return; } // TODO(backer): This is coded for integrated GPUs. For discrete GPUs // we would probably want to use a PBO texture upload for a true async // upload (that would hopefully be optimized as a DMA transfer by the // driver). context_ = gfx::GLContext::CreateGLContext(parent_context->share_group(), surface_.get(), gfx::PreferIntegratedGpu); if (!context_.get()) { LOG(ERROR) << "Unable to create GLContext."; caller_wait->Signal(); return; } context_->MakeCurrent(surface_.get()); initialized_ = true; caller_wait->Signal(); } DISALLOW_COPY_AND_ASSIGN(TransferThread); }; base::LazyInstance<TransferThread>::Leaky g_transfer_thread = LAZY_INSTANCE_INITIALIZER; base::MessageLoopProxy* transfer_message_loop_proxy() { return g_transfer_thread.Pointer()->message_loop_proxy().get(); } SafeSharedMemoryPool* safe_shared_memory_pool() { return g_transfer_thread.Pointer()->safe_shared_memory_pool(); } class PendingTask : public base::RefCountedThreadSafe<PendingTask> { public: explicit PendingTask(const base::Closure& task) : task_(task), task_pending_(true, false) {} bool TryRun() { // This is meant to be called on the main thread where the texture // is already bound. DCHECK(checker_.CalledOnValidThread()); if (task_lock_.Try()) { // Only run once. if (!task_.is_null()) task_.Run(); task_.Reset(); task_lock_.Release(); task_pending_.Signal(); return true; } return false; } void BindAndRun(GLuint texture_id) { // This is meant to be called on the upload thread where we don't have to // restore the previous texture binding. DCHECK(!checker_.CalledOnValidThread()); base::AutoLock locked(task_lock_); if (!task_.is_null()) { glBindTexture(GL_TEXTURE_2D, texture_id); task_.Run(); task_.Reset(); glBindTexture(GL_TEXTURE_2D, 0); // Flush for synchronization between threads. glFlush(); task_pending_.Signal(); } } void Cancel() { base::AutoLock locked(task_lock_); task_.Reset(); task_pending_.Signal(); } bool TaskIsInProgress() { return !task_pending_.IsSignaled(); } void WaitForTask() { task_pending_.Wait(); } private: friend class base::RefCountedThreadSafe<PendingTask>; virtual ~PendingTask() {} base::ThreadChecker checker_; base::Lock task_lock_; base::Closure task_; base::WaitableEvent task_pending_; DISALLOW_COPY_AND_ASSIGN(PendingTask); }; // Class which holds async pixel transfers state. // The texture_id is accessed by either thread, but everything // else accessed only on the main thread. class TransferStateInternal : public base::RefCountedThreadSafe<TransferStateInternal> { public: TransferStateInternal(GLuint texture_id, const AsyncTexImage2DParams& define_params) : texture_id_(texture_id), define_params_(define_params) {} bool TransferIsInProgress() { return pending_upload_task_.get() && pending_upload_task_->TaskIsInProgress(); } void BindTransfer() { TRACE_EVENT2("gpu", "BindAsyncTransfer", "width", define_params_.width, "height", define_params_.height); DCHECK(texture_id_); glBindTexture(GL_TEXTURE_2D, texture_id_); bind_callback_.Run(); } void WaitForTransferCompletion() { TRACE_EVENT0("gpu", "WaitForTransferCompletion"); DCHECK(pending_upload_task_.get()); if (!pending_upload_task_->TryRun()) { pending_upload_task_->WaitForTask(); } pending_upload_task_ = NULL; } void CancelUpload() { TRACE_EVENT0("gpu", "CancelUpload"); if (pending_upload_task_.get()) pending_upload_task_->Cancel(); pending_upload_task_ = NULL; } void ScheduleAsyncTexImage2D( const AsyncTexImage2DParams tex_params, const AsyncMemoryParams mem_params, scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats, const base::Closure& bind_callback) { pending_upload_task_ = new PendingTask(base::Bind( &TransferStateInternal::PerformAsyncTexImage2D, this, tex_params, mem_params, // Duplicate the shared memory so there is no way we can get // a use-after-free of the raw pixels. base::Owned(new ScopedSafeSharedMemory(safe_shared_memory_pool(), mem_params.shared_memory, mem_params.shm_size)), texture_upload_stats)); transfer_message_loop_proxy()->PostTask( FROM_HERE, base::Bind( &PendingTask::BindAndRun, pending_upload_task_, texture_id_)); // Save the late bind callback, so we can notify the client when it is // bound. bind_callback_ = bind_callback; } void ScheduleAsyncTexSubImage2D( AsyncTexSubImage2DParams tex_params, AsyncMemoryParams mem_params, scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) { pending_upload_task_ = new PendingTask(base::Bind( &TransferStateInternal::PerformAsyncTexSubImage2D, this, tex_params, mem_params, base::Owned(new ScopedSafeSharedMemory(safe_shared_memory_pool(), mem_params.shared_memory, mem_params.shm_size)), texture_upload_stats)); transfer_message_loop_proxy()->PostTask( FROM_HERE, base::Bind( &PendingTask::BindAndRun, pending_upload_task_, texture_id_)); } private: friend class base::RefCountedThreadSafe<TransferStateInternal>; virtual ~TransferStateInternal() { } void PerformAsyncTexImage2D( AsyncTexImage2DParams tex_params, AsyncMemoryParams mem_params, ScopedSafeSharedMemory* safe_shared_memory, scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) { TRACE_EVENT2("gpu", "PerformAsyncTexImage", "width", tex_params.width, "height", tex_params.height); DCHECK_EQ(0, tex_params.level); base::TimeTicks begin_time; if (texture_upload_stats.get()) begin_time = base::TimeTicks::HighResNow(); void* data = AsyncPixelTransferDelegate::GetAddress(safe_shared_memory, mem_params); { TRACE_EVENT0("gpu", "glTexImage2D"); glTexImage2D(GL_TEXTURE_2D, tex_params.level, tex_params.internal_format, tex_params.width, tex_params.height, tex_params.border, tex_params.format, tex_params.type, data); } if (texture_upload_stats.get()) { texture_upload_stats->AddUpload(base::TimeTicks::HighResNow() - begin_time); } } void PerformAsyncTexSubImage2D( AsyncTexSubImage2DParams tex_params, AsyncMemoryParams mem_params, ScopedSafeSharedMemory* safe_shared_memory, scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) { TRACE_EVENT2("gpu", "PerformAsyncTexSubImage2D", "width", tex_params.width, "height", tex_params.height); DCHECK_EQ(0, tex_params.level); base::TimeTicks begin_time; if (texture_upload_stats.get()) begin_time = base::TimeTicks::HighResNow(); void* data = AsyncPixelTransferDelegate::GetAddress(safe_shared_memory, mem_params); { TRACE_EVENT0("gpu", "glTexSubImage2D"); glTexSubImage2D(GL_TEXTURE_2D, tex_params.level, tex_params.xoffset, tex_params.yoffset, tex_params.width, tex_params.height, tex_params.format, tex_params.type, data); } if (texture_upload_stats.get()) { texture_upload_stats->AddUpload(base::TimeTicks::HighResNow() - begin_time); } } scoped_refptr<PendingTask> pending_upload_task_; GLuint texture_id_; // Definition params for texture that needs binding. AsyncTexImage2DParams define_params_; // Callback to invoke when AsyncTexImage2D is complete // and the client can safely use the texture. This occurs // during BindCompletedAsyncTransfers(). base::Closure bind_callback_; }; } // namespace class AsyncPixelTransferDelegateShareGroup : public AsyncPixelTransferDelegate, public base::SupportsWeakPtr<AsyncPixelTransferDelegateShareGroup> { public: AsyncPixelTransferDelegateShareGroup( AsyncPixelTransferManagerShareGroup::SharedState* shared_state, GLuint texture_id, const AsyncTexImage2DParams& define_params); virtual ~AsyncPixelTransferDelegateShareGroup(); void BindTransfer() { state_->BindTransfer(); } // Implement AsyncPixelTransferDelegate: virtual void AsyncTexImage2D( const AsyncTexImage2DParams& tex_params, const AsyncMemoryParams& mem_params, const base::Closure& bind_callback) OVERRIDE; virtual void AsyncTexSubImage2D( const AsyncTexSubImage2DParams& tex_params, const AsyncMemoryParams& mem_params) OVERRIDE; virtual bool TransferIsInProgress() OVERRIDE; virtual void WaitForTransferCompletion() OVERRIDE; private: // A raw pointer is safe because the SharedState is owned by the Manager, // which owns this Delegate. AsyncPixelTransferManagerShareGroup::SharedState* shared_state_; scoped_refptr<TransferStateInternal> state_; DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferDelegateShareGroup); }; AsyncPixelTransferDelegateShareGroup::AsyncPixelTransferDelegateShareGroup( AsyncPixelTransferManagerShareGroup::SharedState* shared_state, GLuint texture_id, const AsyncTexImage2DParams& define_params) : shared_state_(shared_state), state_(new TransferStateInternal(texture_id, define_params)) {} AsyncPixelTransferDelegateShareGroup::~AsyncPixelTransferDelegateShareGroup() { TRACE_EVENT0("gpu", " ~AsyncPixelTransferDelegateShareGroup"); state_->CancelUpload(); } bool AsyncPixelTransferDelegateShareGroup::TransferIsInProgress() { return state_->TransferIsInProgress(); } void AsyncPixelTransferDelegateShareGroup::WaitForTransferCompletion() { if (state_->TransferIsInProgress()) { state_->WaitForTransferCompletion(); DCHECK(!state_->TransferIsInProgress()); } // Fast track the BindTransfer, if applicable. for (AsyncPixelTransferManagerShareGroup::SharedState::TransferQueue::iterator iter = shared_state_->pending_allocations.begin(); iter != shared_state_->pending_allocations.end(); ++iter) { if (iter->get() != this) continue; shared_state_->pending_allocations.erase(iter); BindTransfer(); break; } } void AsyncPixelTransferDelegateShareGroup::AsyncTexImage2D( const AsyncTexImage2DParams& tex_params, const AsyncMemoryParams& mem_params, const base::Closure& bind_callback) { DCHECK(mem_params.shared_memory); DCHECK_LE(mem_params.shm_data_offset + mem_params.shm_data_size, mem_params.shm_size); DCHECK(!state_->TransferIsInProgress()); DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target); DCHECK_EQ(tex_params.level, 0); shared_state_->pending_allocations.push_back(AsWeakPtr()); state_->ScheduleAsyncTexImage2D(tex_params, mem_params, shared_state_->texture_upload_stats, bind_callback); } void AsyncPixelTransferDelegateShareGroup::AsyncTexSubImage2D( const AsyncTexSubImage2DParams& tex_params, const AsyncMemoryParams& mem_params) { TRACE_EVENT2("gpu", "AsyncTexSubImage2D", "width", tex_params.width, "height", tex_params.height); DCHECK(!state_->TransferIsInProgress()); DCHECK(mem_params.shared_memory); DCHECK_LE(mem_params.shm_data_offset + mem_params.shm_data_size, mem_params.shm_size); DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target); DCHECK_EQ(tex_params.level, 0); state_->ScheduleAsyncTexSubImage2D( tex_params, mem_params, shared_state_->texture_upload_stats); } AsyncPixelTransferManagerShareGroup::SharedState::SharedState() // TODO(reveman): Skip this if --enable-gpu-benchmarking is not present. : texture_upload_stats(new AsyncPixelTransferUploadStats) {} AsyncPixelTransferManagerShareGroup::SharedState::~SharedState() {} AsyncPixelTransferManagerShareGroup::AsyncPixelTransferManagerShareGroup( gfx::GLContext* context) { g_transfer_thread.Pointer()->InitializeOnMainThread(context); } AsyncPixelTransferManagerShareGroup::~AsyncPixelTransferManagerShareGroup() {} void AsyncPixelTransferManagerShareGroup::BindCompletedAsyncTransfers() { scoped_ptr<gfx::ScopedTextureBinder> texture_binder; while (!shared_state_.pending_allocations.empty()) { if (!shared_state_.pending_allocations.front().get()) { shared_state_.pending_allocations.pop_front(); continue; } AsyncPixelTransferDelegateShareGroup* delegate = shared_state_.pending_allocations.front().get(); // Terminate early, as all transfers finish in order, currently. if (delegate->TransferIsInProgress()) break; if (!texture_binder) texture_binder.reset(new gfx::ScopedTextureBinder(GL_TEXTURE_2D, 0)); // Used to set tex info from the gles2 cmd decoder once upload has // finished (it'll bind the texture and call a callback). delegate->BindTransfer(); shared_state_.pending_allocations.pop_front(); } } void AsyncPixelTransferManagerShareGroup::AsyncNotifyCompletion( const AsyncMemoryParams& mem_params, AsyncPixelTransferCompletionObserver* observer) { DCHECK(mem_params.shared_memory); DCHECK_LE(mem_params.shm_data_offset + mem_params.shm_data_size, mem_params.shm_size); // Post a PerformNotifyCompletion task to the upload thread. This task // will run after all async transfers are complete. transfer_message_loop_proxy()->PostTask( FROM_HERE, base::Bind(&PerformNotifyCompletion, mem_params, base::Owned( new ScopedSafeSharedMemory(safe_shared_memory_pool(), mem_params.shared_memory, mem_params.shm_size)), make_scoped_refptr(observer))); } uint32 AsyncPixelTransferManagerShareGroup::GetTextureUploadCount() { return shared_state_.texture_upload_stats->GetStats(NULL); } base::TimeDelta AsyncPixelTransferManagerShareGroup::GetTotalTextureUploadTime() { base::TimeDelta total_texture_upload_time; shared_state_.texture_upload_stats->GetStats(&total_texture_upload_time); return total_texture_upload_time; } void AsyncPixelTransferManagerShareGroup::ProcessMorePendingTransfers() { } bool AsyncPixelTransferManagerShareGroup::NeedsProcessMorePendingTransfers() { return false; } AsyncPixelTransferDelegate* AsyncPixelTransferManagerShareGroup::CreatePixelTransferDelegateImpl( gles2::TextureRef* ref, const AsyncTexImage2DParams& define_params) { return new AsyncPixelTransferDelegateShareGroup( &shared_state_, ref->service_id(), define_params); } } // namespace gpu
// Copyright (c) 2018-2019 The Pexa Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <memory> #include <boost/test/unit_test.hpp> #include <fs.h> #include <test/util/setup_common.h> #include <wallet/db.h> BOOST_FIXTURE_TEST_SUITE(db_tests, BasicTestingSetup) BOOST_AUTO_TEST_CASE(getwalletenv_file) { std::string test_name = "test_name.dat"; const fs::path datadir = GetDataDir(); fs::path file_path = datadir / test_name; std::ofstream f(file_path.BOOST_FILESYSTEM_C_STR); f.close(); std::string filename; std::shared_ptr<BerkeleyEnvironment> env = GetWalletEnv(file_path, filename); BOOST_CHECK(filename == test_name); BOOST_CHECK(env->Directory() == datadir); } BOOST_AUTO_TEST_CASE(getwalletenv_directory) { std::string expected_name = "wallet.dat"; const fs::path datadir = GetDataDir(); std::string filename; std::shared_ptr<BerkeleyEnvironment> env = GetWalletEnv(datadir, filename); BOOST_CHECK(filename == expected_name); BOOST_CHECK(env->Directory() == datadir); } BOOST_AUTO_TEST_CASE(getwalletenv_g_dbenvs_multiple) { fs::path datadir = GetDataDir() / "1"; fs::path datadir_2 = GetDataDir() / "2"; std::string filename; std::shared_ptr<BerkeleyEnvironment> env_1 = GetWalletEnv(datadir, filename); std::shared_ptr<BerkeleyEnvironment> env_2 = GetWalletEnv(datadir, filename); std::shared_ptr<BerkeleyEnvironment> env_3 = GetWalletEnv(datadir_2, filename); BOOST_CHECK(env_1 == env_2); BOOST_CHECK(env_2 != env_3); } BOOST_AUTO_TEST_CASE(getwalletenv_g_dbenvs_free_instance) { fs::path datadir = GetDataDir() / "1"; fs::path datadir_2 = GetDataDir() / "2"; std::string filename; std::shared_ptr <BerkeleyEnvironment> env_1_a = GetWalletEnv(datadir, filename); std::shared_ptr <BerkeleyEnvironment> env_2_a = GetWalletEnv(datadir_2, filename); env_1_a.reset(); std::shared_ptr<BerkeleyEnvironment> env_1_b = GetWalletEnv(datadir, filename); std::shared_ptr<BerkeleyEnvironment> env_2_b = GetWalletEnv(datadir_2, filename); BOOST_CHECK(env_1_a != env_1_b); BOOST_CHECK(env_2_a == env_2_b); } BOOST_AUTO_TEST_SUITE_END()
// Copyright (c) 2012-2016 The Bitcoin Core developers // Copyright (c) 2017-2019 The Sheet Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "coins.h" #include "consensus/consensus.h" #include "memusage.h" #include "random.h" #include "util.h" #include "validation.h" #include "tinyformat.h" #include "base58.h" #include <assert.h> #include <assets/assets.h> #include <wallet/wallet.h> bool CCoinsView::GetCoin(const COutPoint &outpoint, Coin &coin) const { return false; } uint256 CCoinsView::GetBestBlock() const { return uint256(); } std::vector<uint256> CCoinsView::GetHeadBlocks() const { return std::vector<uint256>(); } bool CCoinsView::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) { return false; } CCoinsViewCursor *CCoinsView::Cursor() const { return nullptr; } bool CCoinsView::HaveCoin(const COutPoint &outpoint) const { Coin coin; return GetCoin(outpoint, coin); } CCoinsViewBacked::CCoinsViewBacked(CCoinsView *viewIn) : base(viewIn) { } bool CCoinsViewBacked::GetCoin(const COutPoint &outpoint, Coin &coin) const { return base->GetCoin(outpoint, coin); } bool CCoinsViewBacked::HaveCoin(const COutPoint &outpoint) const { return base->HaveCoin(outpoint); } uint256 CCoinsViewBacked::GetBestBlock() const { return base->GetBestBlock(); } std::vector<uint256> CCoinsViewBacked::GetHeadBlocks() const { return base->GetHeadBlocks(); } void CCoinsViewBacked::SetBackend(CCoinsView &viewIn) { base = &viewIn; } bool CCoinsViewBacked::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) { return base->BatchWrite(mapCoins, hashBlock); } CCoinsViewCursor *CCoinsViewBacked::Cursor() const { return base->Cursor(); } size_t CCoinsViewBacked::EstimateSize() const { return base->EstimateSize(); } SaltedOutpointHasher::SaltedOutpointHasher() : k0(GetRand(std::numeric_limits<uint64_t>::max())), k1(GetRand(std::numeric_limits<uint64_t>::max())) {} CCoinsViewCache::CCoinsViewCache(CCoinsView *baseIn) : CCoinsViewBacked(baseIn), cachedCoinsUsage(0) {} size_t CCoinsViewCache::DynamicMemoryUsage() const { return memusage::DynamicUsage(cacheCoins) + cachedCoinsUsage; } CCoinsMap::iterator CCoinsViewCache::FetchCoin(const COutPoint &outpoint) const { CCoinsMap::iterator it = cacheCoins.find(outpoint); if (it != cacheCoins.end()) return it; Coin tmp; if (!base->GetCoin(outpoint, tmp)) return cacheCoins.end(); CCoinsMap::iterator ret = cacheCoins.emplace(std::piecewise_construct, std::forward_as_tuple(outpoint), std::forward_as_tuple(std::move(tmp))).first; if (ret->second.coin.IsSpent()) { // The parent only has an empty entry for this outpoint; we can consider our // version as fresh. ret->second.flags = CCoinsCacheEntry::FRESH; } cachedCoinsUsage += ret->second.coin.DynamicMemoryUsage(); return ret; } bool CCoinsViewCache::GetCoin(const COutPoint &outpoint, Coin &coin) const { CCoinsMap::const_iterator it = FetchCoin(outpoint); if (it != cacheCoins.end()) { coin = it->second.coin; return !coin.IsSpent(); } return false; } void CCoinsViewCache::AddCoin(const COutPoint &outpoint, Coin&& coin, bool possible_overwrite) { assert(!coin.IsSpent()); if (coin.out.scriptPubKey.IsUnspendable()) return; CCoinsMap::iterator it; bool inserted; std::tie(it, inserted) = cacheCoins.emplace(std::piecewise_construct, std::forward_as_tuple(outpoint), std::tuple<>()); bool fresh = false; if (!inserted) { cachedCoinsUsage -= it->second.coin.DynamicMemoryUsage(); } if (!possible_overwrite) { if (!it->second.coin.IsSpent()) { throw std::logic_error("Adding new coin that replaces non-pruned entry"); } fresh = !(it->second.flags & CCoinsCacheEntry::DIRTY); } it->second.coin = std::move(coin); it->second.flags |= CCoinsCacheEntry::DIRTY | (fresh ? CCoinsCacheEntry::FRESH : 0); cachedCoinsUsage += it->second.coin.DynamicMemoryUsage(); } void AddCoins(CCoinsViewCache& cache, const CTransaction &tx, int nHeight, uint256 blockHash, bool check, CAssetsCache* assetsCache, std::pair<std::string, CBlockAssetUndo>* undoAssetData) { bool fCoinbase = tx.IsCoinBase(); const uint256& txid = tx.GetHash(); /** SHTX START */ if (AreAssetsDeployed()) { if (assetsCache) { if (tx.IsNewAsset()) { // This works are all new root assets, sub asset, and restricted assets CNewAsset asset; std::string strAddress; AssetFromTransaction(tx, asset, strAddress); std::string ownerName; std::string ownerAddress; OwnerFromTransaction(tx, ownerName, ownerAddress); // Add the new asset to cache if (!assetsCache->AddNewAsset(asset, strAddress, nHeight, blockHash)) error("%s : Failed at adding a new asset to our cache. asset: %s", __func__, asset.strName); // Add the owner asset to cache if (!assetsCache->AddOwnerAsset(ownerName, ownerAddress)) error("%s : Failed at adding a new asset to our cache. asset: %s", __func__, asset.strName); } else if (tx.IsReissueAsset()) { CReissueAsset reissue; std::string strAddress; ReissueAssetFromTransaction(tx, reissue, strAddress); int reissueIndex = tx.vout.size() - 1; // Get the asset before we change it CNewAsset asset; if (!assetsCache->GetAssetMetaDataIfExists(reissue.strName, asset)) error("%s: Failed to get the original asset that is getting reissued. Asset Name : %s", __func__, reissue.strName); if (!assetsCache->AddReissueAsset(reissue, strAddress, COutPoint(txid, reissueIndex))) error("%s: Failed to reissue an asset. Asset Name : %s", __func__, reissue.strName); // Check to see if we are reissuing a restricted asset bool fFoundRestrictedAsset = false; AssetType type; IsAssetNameValid(asset.strName, type); if (type == AssetType::RESTRICTED) { fFoundRestrictedAsset = true; } // Set the old IPFSHash for the blockundo bool fIPFSChanged = !reissue.strIPFSHash.empty(); bool fUnitsChanged = reissue.nUnits != -1; bool fVerifierChanged = false; std::string strOldVerifier = ""; // If we are reissuing a restricted asset, we need to check to see if the verifier string is being reissued if (fFoundRestrictedAsset) { CNullAssetTxVerifierString verifier; // Search through all outputs until you find a restricted verifier change. for (auto index: tx.vout) { if (index.scriptPubKey.IsNullAssetVerifierTxDataScript()) { if (!AssetNullVerifierDataFromScript(index.scriptPubKey, verifier)) { error("%s: Failed to get asset null verifier data and add it to the coins CTxOut: %s", __func__, index.ToString()); break; } fVerifierChanged = true; break; } } CNullAssetTxVerifierString oldVerifer{strOldVerifier}; if (fVerifierChanged && !assetsCache->GetAssetVerifierStringIfExists(asset.strName, oldVerifer)) error("%s : Failed to get asset original verifier string that is getting reissued, Asset Name: %s", __func__, asset.strName); if (fVerifierChanged) { strOldVerifier = oldVerifer.verifier_string; } // Add the verifier to the cache if there was one found if (fVerifierChanged && !assetsCache->AddRestrictedVerifier(asset.strName, verifier.verifier_string)) error("%s : Failed at adding a restricted verifier to our cache: asset: %s, verifier : %s", asset.strName, verifier.verifier_string); } // If any of the following items were changed by reissuing, we need to database the old values so it can be undone correctly if (fIPFSChanged || fUnitsChanged || fVerifierChanged) { undoAssetData->first = reissue.strName; // Asset Name undoAssetData->second = CBlockAssetUndo {fIPFSChanged, fUnitsChanged, asset.strIPFSHash, asset.units, ASSET_UNDO_INCLUDES_VERIFIER_STRING, fVerifierChanged, strOldVerifier}; // ipfschanged, unitchanged, Old Assets IPFSHash, old units } } else if (tx.IsNewUniqueAsset()) { for (int n = 0; n < (int)tx.vout.size(); n++) { auto out = tx.vout[n]; CNewAsset asset; std::string strAddress; if (IsScriptNewUniqueAsset(out.scriptPubKey)) { AssetFromScript(out.scriptPubKey, asset, strAddress); // Add the new asset to cache if (!assetsCache->AddNewAsset(asset, strAddress, nHeight, blockHash)) error("%s : Failed at adding a new asset to our cache. asset: %s", __func__, asset.strName); } } } else if (tx.IsNewMsgChannelAsset()) { CNewAsset asset; std::string strAddress; MsgChannelAssetFromTransaction(tx, asset, strAddress); // Add the new asset to cache if (!assetsCache->AddNewAsset(asset, strAddress, nHeight, blockHash)) error("%s : Failed at adding a new asset to our cache. asset: %s", __func__, asset.strName); } else if (tx.IsNewQualifierAsset()) { CNewAsset asset; std::string strAddress; QualifierAssetFromTransaction(tx, asset, strAddress); // Add the new asset to cache if (!assetsCache->AddNewAsset(asset, strAddress, nHeight, blockHash)) error("%s : Failed at adding a new qualifier asset to our cache. asset: %s", __func__, asset.strName); } else if (tx.IsNewRestrictedAsset()) { CNewAsset asset; std::string strAddress; RestrictedAssetFromTransaction(tx, asset, strAddress); // Add the new asset to cache if (!assetsCache->AddNewAsset(asset, strAddress, nHeight, blockHash)) error("%s : Failed at adding a new restricted asset to our cache. asset: %s", __func__, asset.strName); // Find the restricted verifier string and cache it CNullAssetTxVerifierString verifier; // Search through all outputs until you find a restricted verifier change. for (auto index: tx.vout) { if (index.scriptPubKey.IsNullAssetVerifierTxDataScript()) { CNullAssetTxVerifierString verifier; if (!AssetNullVerifierDataFromScript(index.scriptPubKey, verifier)) error("%s: Failed to get asset null data and add it to the coins CTxOut: %s", __func__, index.ToString()); // Add the verifier to the cache if (!assetsCache->AddRestrictedVerifier(asset.strName, verifier.verifier_string)) error("%s : Failed at adding a restricted verifier to our cache: asset: %s, verifier : %s", asset.strName, verifier.verifier_string); break; } } } } } /** SHTX END */ for (size_t i = 0; i < tx.vout.size(); ++i) { bool overwrite = check ? cache.HaveCoin(COutPoint(txid, i)) : fCoinbase; // Always set the possible_overwrite flag to AddCoin for coinbase txn, in order to correctly // deal with the pre-BIP30 occurrences of duplicate coinbase transactions. cache.AddCoin(COutPoint(txid, i), Coin(tx.vout[i], nHeight, fCoinbase), overwrite); /** SHTX START */ if (AreAssetsDeployed()) { if (assetsCache) { CAssetOutputEntry assetData; if (GetAssetData(tx.vout[i].scriptPubKey, assetData)) { // If this is a transfer asset, and the amount is greater than zero // We want to make sure it is added to the asset addresses database if (fAssetIndex == true) if (assetData.type == TX_TRANSFER_ASSET && assetData.nAmount > 0) { // Create the objects needed from the assetData CAssetTransfer assetTransfer(assetData.assetName, assetData.nAmount, assetData.message, assetData.expireTime); std::string address = EncodeDestination(assetData.destination); // Add the transfer asset data to the asset cache if (!assetsCache->AddTransferAsset(assetTransfer, address, COutPoint(txid, i), tx.vout[i])) LogPrintf("%s : ERROR - Failed to add transfer asset CTxOut: %s\n", __func__, tx.vout[i].ToString()); /** Subscribe to new message channels if they are sent to a new address, or they are the owner token or message channel */ #ifdef ENABLE_WALLET if (fMessaging && pMessageSubscribedChannelsCache) { LOCK(cs_messaging); if (vpwallets.size() && vpwallets[0]->IsMine(tx.vout[i]) == ISMINE_SPENDABLE) { AssetType aType; IsAssetNameValid(assetTransfer.strName, aType); if (aType == AssetType::ROOT || aType == AssetType::SUB) { if (!IsChannelSubscribed(GetParentName(assetTransfer.strName) + OWNER_TAG)) { if (!IsAddressSeen(address)) { AddChannel(GetParentName(assetTransfer.strName) + OWNER_TAG); AddAddressSeen(address); } } } else if (aType == AssetType::OWNER || aType == AssetType::MSGCHANNEL) { AddChannel(assetTransfer.strName); AddAddressSeen(address); } } } #endif } else if (assetData.type == TX_NEW_ASSET) { /** Subscribe to new message channels if they are assets you created, or are new msgchannels of channels already being watched */ #ifdef ENABLE_WALLET if (fMessaging && pMessageSubscribedChannelsCache) { LOCK(cs_messaging); if (vpwallets.size()) { AssetType aType; IsAssetNameValid(assetData.assetName, aType); if (vpwallets[0]->IsMine(tx.vout[i]) == ISMINE_SPENDABLE) { if (aType == AssetType::ROOT || aType == AssetType::SUB) { AddChannel(assetData.assetName + OWNER_TAG); AddAddressSeen(EncodeDestination(assetData.destination)); } else if (aType == AssetType::OWNER || aType == AssetType::MSGCHANNEL) { AddChannel(assetData.assetName); AddAddressSeen(EncodeDestination(assetData.destination)); } } else { if (aType == AssetType::MSGCHANNEL) { if (IsChannelSubscribed(GetParentName(assetData.assetName) + OWNER_TAG)) { AddChannel(assetData.assetName); } } } } } #endif } } CScript script = tx.vout[i].scriptPubKey; if (script.IsNullAsset()) { if (script.IsNullAssetTxDataScript()) { CNullAssetTxData data; std::string address; AssetNullDataFromScript(script, data, address); AssetType type; IsAssetNameValid(data.asset_name, type); if (type == AssetType::RESTRICTED) { assetsCache->AddRestrictedAddress(data.asset_name, address, data.flag ? RestrictedType::FREEZE_ADDRESS : RestrictedType::UNFREEZE_ADDRESS); } else if (type == AssetType::QUALIFIER || type == AssetType::SUB_QUALIFIER) { assetsCache->AddQualifierAddress(data.asset_name, address, data.flag ? QualifierType::ADD_QUALIFIER : QualifierType::REMOVE_QUALIFIER); } } else if (script.IsNullGlobalRestrictionAssetTxDataScript()) { CNullAssetTxData data; GlobalAssetNullDataFromScript(script, data); assetsCache->AddGlobalRestricted(data.asset_name, data.flag ? RestrictedType::GLOBAL_FREEZE : RestrictedType::GLOBAL_UNFREEZE); } } } } /** SHTX END */ } } bool CCoinsViewCache::SpendCoin(const COutPoint &outpoint, Coin* moveout, CAssetsCache* assetsCache) { CCoinsMap::iterator it = FetchCoin(outpoint); if (it == cacheCoins.end()) return false; cachedCoinsUsage -= it->second.coin.DynamicMemoryUsage(); /** SHTX START */ Coin tempCoin = it->second.coin; /** SHTX END */ if (moveout) { *moveout = std::move(it->second.coin); } if (it->second.flags & CCoinsCacheEntry::FRESH) { cacheCoins.erase(it); } else { it->second.flags |= CCoinsCacheEntry::DIRTY; it->second.coin.Clear(); } /** SHTX START */ if (AreAssetsDeployed()) { if (assetsCache) { if (!assetsCache->TrySpendCoin(outpoint, tempCoin.out)) { return error("%s : Failed to try and spend the asset. COutPoint : %s", __func__, outpoint.ToString()); } } } /** SHTX END */ return true; } static const Coin coinEmpty; const Coin& CCoinsViewCache::AccessCoin(const COutPoint &outpoint) const { CCoinsMap::const_iterator it = FetchCoin(outpoint); if (it == cacheCoins.end()) { return coinEmpty; } else { return it->second.coin; } } bool CCoinsViewCache::HaveCoin(const COutPoint &outpoint) const { CCoinsMap::const_iterator it = FetchCoin(outpoint); return (it != cacheCoins.end() && !it->second.coin.IsSpent()); } bool CCoinsViewCache::HaveCoinInCache(const COutPoint &outpoint) const { CCoinsMap::const_iterator it = cacheCoins.find(outpoint); return (it != cacheCoins.end() && !it->second.coin.IsSpent()); } uint256 CCoinsViewCache::GetBestBlock() const { if (hashBlock.IsNull()) hashBlock = base->GetBestBlock(); return hashBlock; } void CCoinsViewCache::SetBestBlock(const uint256 &hashBlockIn) { hashBlock = hashBlockIn; } bool CCoinsViewCache::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlockIn) { for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end();) { if (it->second.flags & CCoinsCacheEntry::DIRTY) { // Ignore non-dirty entries (optimization). CCoinsMap::iterator itUs = cacheCoins.find(it->first); if (itUs == cacheCoins.end()) { // The parent cache does not have an entry, while the child does // We can ignore it if it's both FRESH and pruned in the child if (!(it->second.flags & CCoinsCacheEntry::FRESH && it->second.coin.IsSpent())) { // Otherwise we will need to create it in the parent // and move the data up and mark it as dirty CCoinsCacheEntry& entry = cacheCoins[it->first]; entry.coin = std::move(it->second.coin); cachedCoinsUsage += entry.coin.DynamicMemoryUsage(); entry.flags = CCoinsCacheEntry::DIRTY; // We can mark it FRESH in the parent if it was FRESH in the child // Otherwise it might have just been flushed from the parent's cache // and already exist in the grandparent if (it->second.flags & CCoinsCacheEntry::FRESH) entry.flags |= CCoinsCacheEntry::FRESH; } } else { // Assert that the child cache entry was not marked FRESH if the // parent cache entry has unspent outputs. If this ever happens, // it means the FRESH flag was misapplied and there is a logic // error in the calling code. if ((it->second.flags & CCoinsCacheEntry::FRESH) && !itUs->second.coin.IsSpent()) throw std::logic_error("FRESH flag misapplied to cache entry for base transaction with spendable outputs"); // Found the entry in the parent cache if ((itUs->second.flags & CCoinsCacheEntry::FRESH) && it->second.coin.IsSpent()) { // The grandparent does not have an entry, and the child is // modified and being pruned. This means we can just delete // it from the parent. cachedCoinsUsage -= itUs->second.coin.DynamicMemoryUsage(); cacheCoins.erase(itUs); } else { // A normal modification. cachedCoinsUsage -= itUs->second.coin.DynamicMemoryUsage(); itUs->second.coin = std::move(it->second.coin); cachedCoinsUsage += itUs->second.coin.DynamicMemoryUsage(); itUs->second.flags |= CCoinsCacheEntry::DIRTY; // NOTE: It is possible the child has a FRESH flag here in // the event the entry we found in the parent is pruned. But // we must not copy that FRESH flag to the parent as that // pruned state likely still needs to be communicated to the // grandparent. } } } CCoinsMap::iterator itOld = it++; mapCoins.erase(itOld); } hashBlock = hashBlockIn; return true; } bool CCoinsViewCache::Flush() { bool fOk = base->BatchWrite(cacheCoins, hashBlock); cacheCoins.clear(); cachedCoinsUsage = 0; return fOk; } void CCoinsViewCache::Uncache(const COutPoint& hash) { CCoinsMap::iterator it = cacheCoins.find(hash); if (it != cacheCoins.end() && it->second.flags == 0) { cachedCoinsUsage -= it->second.coin.DynamicMemoryUsage(); cacheCoins.erase(it); } } unsigned int CCoinsViewCache::GetCacheSize() const { return cacheCoins.size(); } CAmount CCoinsViewCache::GetValueIn(const CTransaction& tx) const { if (tx.IsCoinBase()) return 0; CAmount nResult = 0; for (unsigned int i = 0; i < tx.vin.size(); i++) nResult += AccessCoin(tx.vin[i].prevout).out.nValue; return nResult; } bool CCoinsViewCache::HaveInputs(const CTransaction& tx) const { if (!tx.IsCoinBase()) { for (unsigned int i = 0; i < tx.vin.size(); i++) { if (!HaveCoin(tx.vin[i].prevout)) { return false; } } } return true; } static const size_t MIN_TRANSACTION_OUTPUT_WEIGHT = WITNESS_SCALE_FACTOR * ::GetSerializeSize(CTxOut(), SER_NETWORK, PROTOCOL_VERSION); //static const size_t MAX_OUTPUTS_PER_BLOCK = MAX_BLOCK_WEIGHT / MIN_TRANSACTION_OUTPUT_WEIGHT; const Coin& AccessByTxid(const CCoinsViewCache& view, const uint256& txid) { COutPoint iter(txid, 0); while (iter.n < GetMaxBlockWeight() / MIN_TRANSACTION_OUTPUT_WEIGHT) { const Coin& alternate = view.AccessCoin(iter); if (!alternate.IsSpent()) return alternate; ++iter.n; } return coinEmpty; }
 #pragma once #include <functional> #include <map> #include <set> #include <string> #include <vector> #include "google/protobuf/descriptor.h" namespace principia { namespace tools { namespace internal_journal_proto_processor { using ::google::protobuf::Descriptor; using ::google::protobuf::FieldDescriptor; using ::google::protobuf::FieldOptions; using ::google::protobuf::FileDescriptor; class JournalProtoProcessor final { public: void ProcessMessages(); // ksp_plugin_adapter/interface.cs std::vector<std::string> GetCsInterfaceMethodDeclarations() const; std::vector<std::string> GetCsInterfaceTypeDeclarations() const; // ksp_plugin/interface.hpp std::vector<std::string> GetCxxInterfaceMethodDeclarations() const; std::vector<std::string> GetCxxInterfaceTypeDeclarations() const; // journal/profiles.{hpp,cpp} std::vector<std::string> GetCxxInterchangeImplementations() const; std::vector<std::string> GetCxxMethodImplementations() const; std::vector<std::string> GetCxxMethodTypes() const; // journal/player.cpp std::vector<std::string> GetCxxPlayStatements() const; private: void ProcessRepeatedMessageField(FieldDescriptor const* descriptor); void ProcessOptionalNonStringField(FieldDescriptor const* descriptor, std::string const& cs_boxed_type, std::string const& cs_unboxed_type, std::string const& cxx_type); void ProcessOptionalDoubleField(FieldDescriptor const* descriptor); void ProcessOptionalInt32Field(FieldDescriptor const* descriptor); void ProcessOptionalMessageField(FieldDescriptor const* descriptor); void ProcessRequiredFixed32Field(FieldDescriptor const* descriptor); void ProcessRequiredFixed64Field(FieldDescriptor const* descriptor); void ProcessRequiredMessageField(FieldDescriptor const* descriptor); void ProcessRequiredBoolField(FieldDescriptor const* descriptor); void ProcessRequiredBytesField(FieldDescriptor const* descriptor); void ProcessRequiredDoubleField(FieldDescriptor const* descriptor); void ProcessRequiredInt32Field(FieldDescriptor const* descriptor); void ProcessRequiredInt64Field(FieldDescriptor const* descriptor); void ProcessRequiredUint32Field(FieldDescriptor const* descriptor); void ProcessSingleStringField(FieldDescriptor const* descriptor); void ProcessOptionalField(FieldDescriptor const* descriptor); void ProcessRepeatedField(FieldDescriptor const* descriptor); void ProcessRequiredField(FieldDescriptor const* descriptor); void ProcessField(FieldDescriptor const* descriptor); void ProcessInOut(Descriptor const* descriptor, std::vector<FieldDescriptor const*>* field_descriptors); void ProcessReturn(Descriptor const* descriptor); void ProcessInterchangeMessage(Descriptor const* descriptor); void ProcessMethodExtension(Descriptor const* descriptor); // As the recursive methods above traverse the protocol buffer type // declarations, they enter in the following maps (and set) various pieces of // information to help in generating C++ and C# code. For the simplest use // cases (mostly, the generation of declarations), the values are merely one // or several strings for C++ or C# code snippets. For more complex use cases // (the generation of implementation code) the values are lambdas which // transform one or two code snippets by wrapping them in a more complex // structure. // We use cxx to designate C++ code and cs to designate C# code. // The fields that are in. Note that the out fields present in |in_out_| are // not in |in_|. std::set<FieldDescriptor const*> in_; // The fields that are in-out, i.e. for which fields of the same name exist in // both the In and the Out messages. Note that both fields are present in // this set. Those fields are transmitted through the interface with an extra // level of indirection. std::set<FieldDescriptor const*> in_out_; // The fields that are out. Those fields are transmitted through the // interface with an extra level of indirection. Note that the in fields // present in |in_out_| are not in |out_|. std::set<FieldDescriptor const*> out_; // For fields that have a (size) option, the name of the size member variable // in the In or Out struct. Special processing is required when filling those // fields from the struct members. No data for other fields. This map is // language-independent. std::map<FieldDescriptor const*, std::string> size_member_name_; // For all fields, a lambda that takes the name of a local variable containing // data extracted (and deserialized) from the field and returns a list of // expressions to be passed to the interface. Deals with passing address and // size for fields that have a size member, and with passing by reference for // fields that are in-out or optional. std::map<FieldDescriptor const*, std::function<std::vector<std::string>( std::string const& identifier)>> field_cxx_arguments_fn_; // For all fields, a lambda that takes a serialized expression |expr| and a // protocol buffer denoted by |prefix| and returns a statement to assign // |expr| to the proper field of |prefix|. |prefix| must be suitable as a // prefix of a call, i.e., it must be a pointer followed by "->" or a // reference followed by ".". The lambda calls |field_cxx_serializer_fn_| to // serialize expressions as necessary; thus, |expr| must *not* be serialized. std::map<FieldDescriptor const*, std::function<std::string(std::string const& prefix, std::string const& expr)>> field_cxx_assignment_fn_; // For fields that have an (is_consumed) or (is_consumed_if) option, a lambda // producing a statement to call Delete() to remove the appropriate entry from // the pointer_map. |expr| is a uint64 expression for the entry to be // removed (typically something like |message.in().bar()|). No data for other // fields. std::map<FieldDescriptor const*, std::function<std::string(std::string const& expr)>> field_cxx_deleter_fn_; // For all fields, a lambda that takes an expression for reading a protobuf // field (typically something like |message.in().bar()|) and returns an // expression for the deserialized form of |expr| suitable for storing in a // local variable (typically a call to some Deserialize function, but other // transformations are possible). std::map<FieldDescriptor const*, std::function<std::string(std::string const& expr)>> field_cxx_deserializer_fn_; // For all fields, a lambda that takes an expression for a struct member and // returns an expression that dereferences it if the field uses a level of // indirection (e.g., is optional or in-out). std::map<FieldDescriptor const*, std::function<std::string(std::string const& expr)>> field_cxx_indirect_member_get_fn_; // For fields that have an (is_produced) or (is_produced_if) option, a lambda // producing a statement to call Insert() to enter the appropriate entry into // the pointer_map. |expr1| is an uint64 expression for the serialized value // of the pointer (typically something like |message.in().bar()|), |expr2| is // a pointer expression for the current value of the pointer (typically the // name of a local variable). No data for other fields. std::map<FieldDescriptor const*, std::function<std::string(std::string const& expr1, std::string const& expr2)>> field_cxx_inserter_fn_; // For all fields, a lambda that takes a C# parameter type as stored in // |field_cs_type_|, and adds a mode to it. std::map<FieldDescriptor const*, std::function<std::string(std::string const& type)>> field_cs_mode_fn_; // For all fields, a lambda that takes a C++ parameter or member type as // stored in |field_cxx_type_|, and adds a mode to it. std::map<FieldDescriptor const*, std::function<std::string(std::string const& type)>> field_cxx_mode_fn_; // For all fields, a lambda that takes a pointer expression and a statement // generated by |field_cxx_assignment_fn_|. If the field is optional, returns // an if statement that only executes |stmt| if |expr| in nonnull. std::map<FieldDescriptor const*, std::function<std::string(std::string const& expr, std::string const& stmt)>> field_cxx_optional_assignment_fn_; // For all fields, a lambda that takes a condition to take for the presence // of an optional field (typically something like |message.in().has_bar()|) // and a deserialized expression for reading the field (typically the result // of |field_cxx_deserializer_fn_|) and returns a conditional expression for // either a pointer to the deserialized value or nullptr. std::map<FieldDescriptor const*, std::function<std::string(std::string const& condition, std::string const& expr)>> field_cxx_optional_pointer_fn_; // For all fields, a lambda that takes an expression for reading a local // variable (possibly with dereferencing) and returns a protocol buffer // expression suitable for assigning to some field either using set_bar() or // mutable_bar() (typically the result is a call to some Serialize function). std::map<FieldDescriptor const*, std::function<std::string(std::string const& expr)>> field_cxx_serializer_fn_; // The C# attribute for marshalling a field. std::map<FieldDescriptor const*, std::string> field_cs_marshal_; // The C# type for a field, suitable for use in a private member when the // actual data cannot be exposed directly (think bool). std::map<FieldDescriptor const*, std::string> field_cs_private_type_; // The C#/C++ type for a field, suitable for use in a member or parameter // declaration, in a typedef, etc. std::map<FieldDescriptor const*, std::string> field_cs_type_; std::map<FieldDescriptor const*, std::string> field_cxx_type_; // The C#/C++ declaration of an interface method corresponding to a method // message. The key is a descriptor for a method message. std::map<Descriptor const*, std::string> cs_interface_method_declaration_; std::map<Descriptor const*, std::string> cxx_interface_method_declaration_; // A list of C#/C++ parameters for an interface method. The key is a // descriptor for an In or Out message. Produced but not used for Out // messages. std::map<Descriptor const*, std::vector<std::string>> cs_interface_parameters_; std::map<Descriptor const*, std::vector<std::string>> cxx_interface_parameters_; // The C#/C++ return type of an interface method. The key is a descriptor for // a Return message. std::map<Descriptor const*, std::string> cs_interface_return_type_; std::map<Descriptor const*, std::string> cxx_interface_return_type_; // The C# attribute for marshalling the return value of an interface method. // The key is a descriptor for a Return message. std::map<Descriptor const*, std::string> cs_interface_return_marshal_; // The C#/C++ definition of a type corresponding to an interchange message. // The key is a descriptor for an interchange message. std::map<Descriptor const*, std::string> cs_interface_type_declaration_; std::map<Descriptor const*, std::string> cxx_interface_type_declaration_; // The definitions of the Serialize and Deserialize functions for interchange // messages. The key is a descriptor for an interchange message. std::map<Descriptor const*, std::string> cxx_deserialize_definition_; std::map<Descriptor const*, std::string> cxx_serialize_definition_; // The statements to be included in the body of the Play function. std::map<Descriptor const*, std::string> cxx_play_statement_; // The entire sequence of statements for the body of a Fill function. The key // is a descriptor for an In or Out message. std::map<Descriptor const*, std::string> cxx_fill_body_; // A code snippet that goes before the call to the interface in the // body of the Run function. The key is a descriptor for an In or Out // message. Produced but not used for Out messages. std::map<Descriptor const*, std::string> cxx_run_body_prolog_; // A list of code snippets for arguments to be passed to the interface in the // body of the Run function. std::map<Descriptor const*, std::vector<std::string>> cxx_run_arguments_; // A code snippet that goes after the call to the interface in the // body of the Run function. The key is a descriptor for an In or Out // message. std::map<Descriptor const*, std::string> cxx_run_body_epilog_; // A code snippet for the implementation of the Fill and Run functions. The // key is a descriptor for a method message. std::map<Descriptor const*, std::string> cxx_functions_implementation_; // A code snippet for the declaration of the top-level struct for a method. // The key is a descriptor for a method message. std::map<Descriptor const*, std::string> cxx_toplevel_type_declaration_; // A code snippet for the declaration of a nested In or Out struct or a Return // typedef. The key is a descriptor for an In, Out or Return message. std::map<Descriptor const*, std::string> cxx_nested_type_declaration_; }; } // namespace internal_journal_proto_processor using internal_journal_proto_processor::JournalProtoProcessor; } // namespace tools } // namespace principia
#include "ffmpegs.h" #include <QtDebug> #include <QFile> extern "C" { // 重采样相关API #include <libswresample/swresample.h> // 工具相关API(比如错误处理) #include <libavutil/avutil.h> } // 宏定义 #define ERROR_BUF(ret) \ char errbuf[1024]; \ av_strerror(ret, errbuf, sizeof (errbuf)); FFmpegs::FFmpegs() { } void FFmpegs::resampleAudio(ResampleAudioSpec &in, ResampleAudioSpec &out) { resampleAudio(in.filename, in.sampleRate, in.sampleFmt, in.chLayout, out.filename, out.sampleRate, out.sampleFmt, out.chLayout); } void FFmpegs::resampleAudio(const char *inFilename, int inSampleRate, AVSampleFormat inSampleFmt, int inChLayout, const char *outFilename, int outSampleRate, AVSampleFormat outSampleFmt, int outChLayout) { // 向下取整,AV_ROUND_DOWN(2.66) = 2 // qDebug() << av_rescale_rnd(8, 1, 3, AV_ROUND_DOWN); // 向上取整,AV_ROUND_UP(1.25) = 2 // qDebug() << av_rescale_rnd(5, 1, 4, AV_ROUND_UP); // 文件名 QFile inFile(inFilename); QFile outFile(outFilename); // 输入缓冲区 // 指向缓冲区的指针 uint8_t **inData = nullptr; // 缓冲区的大小 int inLinesize = 0; // 声道数 int inChs = av_get_channel_layout_nb_channels(inChLayout); // 一个样本的大小 int inBytesPerSample = inChs * av_get_bytes_per_sample(inSampleFmt); // 缓冲区的样本数量 int inSamples = 1024; // 读取文件数据的大小 int len = 0; // 输出缓冲区 // 指向缓冲区的指针 uint8_t **outData = nullptr; // 缓冲区的大小 int outLinesize = 0; // 声道数 int outChs = av_get_channel_layout_nb_channels(outChLayout); // 一个样本的大小 int outBytesPerSample = outChs * av_get_bytes_per_sample(outSampleFmt); // 缓冲区的样本数量(向上取整) int outSamples = av_rescale_rnd(outSampleRate, inSamples, inSampleRate, AV_ROUND_UP); /* inSampleRate inSamples ------------- = ----------- outSampleRate outSamples outSamples = outSampleRate * inSamples / inSampleRate */ qDebug() << "输入缓冲区" << inSampleRate << inSamples; qDebug() << "输出缓冲区" << outSampleRate << outSamples; // 返回结果 int ret = 0; // 创建重采样上下文 SwrContext *ctx = swr_alloc_set_opts(nullptr, // 输出参数 outChLayout, outSampleFmt, outSampleRate, // 输入参数 inChLayout, inSampleFmt, inSampleRate, 0, nullptr); if (!ctx) { qDebug() << "swr_alloc_set_opts error"; goto end; } // 初始化重采样上下文 ret = swr_init(ctx); if (ret < 0) { ERROR_BUF(ret); qDebug() << "swr_init error:" << errbuf; goto end; } /* 指针类型(64bit,8个字节) int *; double *; void *; int **; int ***; int ******; */ // int *p; // *(p + i) == p[i] // *(p + 0) == p[0] // *p == p[0] // int *p = new int[15]; // int *p = av_calloc(15, sizeof (int)); // int **pp = av_calloc(7, sizeof (int *)); // uint8_t **inData = av_calloc(1, sizeof(uint8_t *)); // 创建输入缓冲区 ret = av_samples_alloc_array_and_samples( &inData, &inLinesize, inChs, inSamples, inSampleFmt, 1); if (ret < 0) { ERROR_BUF(ret); qDebug() << "av_samples_alloc_array_and_samples :" << errbuf; goto end; } // 创建输出缓冲区 ret = av_samples_alloc_array_and_samples( &outData, &outLinesize, outChs, outSamples, outSampleFmt, 1); if (ret < 0) { ERROR_BUF(ret); qDebug() << "av_samples_alloc_array_and_samples :" << errbuf; goto end; } // 打开文件 if (!inFile.open(QFile::ReadOnly)) { qDebug() << "file open error:" << inFilename; goto end; } if (!outFile.open(QFile::WriteOnly)) { qDebug() << "file open error:" << outFilename; goto end; } // 读取文件数据 // inData[0] == *inData while ((len = inFile.read((char *)inData[0], inLinesize)) > 0) { // 读取的样本数量 inSamples = len / inBytesPerSample; // 重采样(返回值转换后的样本数量) ret = swr_convert(ctx, outData, outSamples, (const uint8_t **)inData, inSamples); if (ret < 0) { ERROR_BUF(ret); qDebug() << "swr_convert error:" << errbuf; goto end; } // 方式1,将转换后的数据写入到输出文件中 // int size = av_samples_get_buffer_size(nullptr, outChs, ret, outSampleFmt, 1); // outFile.write((char *) outData[0], size); // 方式2,将转换后的数据写入到输出文件中 // outData[0] == *outData outFile.write((char *) outData[0], ret * outBytesPerSample); } // 检查一下输出缓冲区是否还有残留的样本(已经重采样过的,转换过的) while ((ret = swr_convert(ctx, outData, outSamples, nullptr, 0)) > 0) { int size = av_samples_get_buffer_size(nullptr, outChs, ret, outSampleFmt, 1); outFile.write((char *) outData[0], size); } end: // 释放资源 // 关闭文件 inFile.close(); outFile.close(); // 释放输入缓冲区 if (inData) { av_freep(&inData[0]); } av_freep(&inData); // 释放输出缓冲区 if (outData) { av_freep(&outData[0]); } av_freep(&outData); //释放重采样上下文 swr_free(&ctx); //void freep(void **ptr) { // free(*ptr); // *ptr = nullptr; //} }
/* -*- mode: c++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ /* Copyright (C) 2000, 2001, 2002, 2003 RiskMap srl Copyright (C) 2003, 2004 Ferdinando Ametrano Copyright (C) 2007, 2008 StatPro Italia srl This file is part of QuantLib, a free-software/open-source library for financial quantitative analysts and developers - http://quantlib.org/ QuantLib is free software: you can redistribute it and/or modify it under the terms of the QuantLib license. You should have received a copy of the license along with this program; if not, please email <quantlib-dev@lists.sf.net>. The license is also available online at <http://quantlib.org/license.shtml>. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the license for more details. */ /*! \file mc_discr_geom_av_price.hpp \brief Monte Carlo engine for discrete geometric average price Asian */ #ifndef quantlib_mc_discrete_geometric_average_price_asian_engine_h #define quantlib_mc_discrete_geometric_average_price_asian_engine_h #include <ql/pricingengines/asian/mcdiscreteasianengine.hpp> #include <ql/termstructures/volatility/equityfx/blackconstantvol.hpp> #include <ql/termstructures/volatility/equityfx/blackvariancecurve.hpp> #include <ql/exercise.hpp> namespace QuantLib { //! Monte Carlo pricing engine for discrete geometric average price Asian /*! \ingroup asianengines \test the correctness of the returned value is tested by reproducing results available in literature. */ template <class RNG = PseudoRandom, class S = Statistics> class MCDiscreteGeometricAPEngine : public MCDiscreteAveragingAsianEngine<RNG,S> { public: typedef typename MCDiscreteAveragingAsianEngine<RNG,S>::path_generator_type path_generator_type; typedef typename MCDiscreteAveragingAsianEngine<RNG,S>::path_pricer_type path_pricer_type; typedef typename MCDiscreteAveragingAsianEngine<RNG,S>::stats_type stats_type; // constructor MCDiscreteGeometricAPEngine( const ext::shared_ptr<GeneralizedBlackScholesProcess>& process, bool brownianBridge, bool antitheticVariate, Size requiredSamples, Real requiredTolerance, Size maxSamples, BigNatural seed); protected: ext::shared_ptr<path_pricer_type> pathPricer() const; }; class GeometricAPOPathPricer : public PathPricer<Path> { public: GeometricAPOPathPricer(Option::Type type, Real strike, DiscountFactor discount, Real runningProduct = 1.0, Size pastFixings = 0); Real operator()(const Path& path) const; private: PlainVanillaPayoff payoff_; DiscountFactor discount_; Real runningProduct_; Size pastFixings_; }; // inline definitions template <class RNG, class S> inline MCDiscreteGeometricAPEngine<RNG,S>::MCDiscreteGeometricAPEngine( const ext::shared_ptr<GeneralizedBlackScholesProcess>& process, bool brownianBridge, bool antitheticVariate, Size requiredSamples, Real requiredTolerance, Size maxSamples, BigNatural seed) : MCDiscreteAveragingAsianEngine<RNG,S>(process, brownianBridge, antitheticVariate, false, requiredSamples, requiredTolerance, maxSamples, seed) {} template <class RNG, class S> inline ext::shared_ptr< typename MCDiscreteGeometricAPEngine<RNG,S>::path_pricer_type> MCDiscreteGeometricAPEngine<RNG,S>::pathPricer() const { ext::shared_ptr<PlainVanillaPayoff> payoff = ext::dynamic_pointer_cast<PlainVanillaPayoff>( this->arguments_.payoff); QL_REQUIRE(payoff, "non-plain payoff given"); ext::shared_ptr<EuropeanExercise> exercise = ext::dynamic_pointer_cast<EuropeanExercise>( this->arguments_.exercise); QL_REQUIRE(exercise, "wrong exercise given"); return ext::shared_ptr<typename MCDiscreteGeometricAPEngine<RNG,S>::path_pricer_type>( new GeometricAPOPathPricer( payoff->optionType(), payoff->strike(), this->process_->riskFreeRate()->discount( exercise->lastDate()), this->arguments_.runningAccumulator, this->arguments_.pastFixings)); } template <class RNG = PseudoRandom, class S = Statistics> class MakeMCDiscreteGeometricAPEngine { public: MakeMCDiscreteGeometricAPEngine( const ext::shared_ptr<GeneralizedBlackScholesProcess>& process); // named parameters MakeMCDiscreteGeometricAPEngine& withBrownianBridge(bool b = true); MakeMCDiscreteGeometricAPEngine& withSamples(Size samples); MakeMCDiscreteGeometricAPEngine& withAbsoluteTolerance(Real tolerance); MakeMCDiscreteGeometricAPEngine& withMaxSamples(Size samples); MakeMCDiscreteGeometricAPEngine& withSeed(BigNatural seed); MakeMCDiscreteGeometricAPEngine& withAntitheticVariate(bool b = true); // conversion to pricing engine operator ext::shared_ptr<PricingEngine>() const; private: ext::shared_ptr<GeneralizedBlackScholesProcess> process_; bool antithetic_; Size samples_, maxSamples_; Real tolerance_; bool brownianBridge_; BigNatural seed_; }; template <class RNG, class S> inline MakeMCDiscreteGeometricAPEngine<RNG,S>::MakeMCDiscreteGeometricAPEngine( const ext::shared_ptr<GeneralizedBlackScholesProcess>& process) : process_(process), antithetic_(false), samples_(Null<Size>()), maxSamples_(Null<Size>()), tolerance_(Null<Real>()), brownianBridge_(true), seed_(0) {} template <class RNG, class S> inline MakeMCDiscreteGeometricAPEngine<RNG,S>& MakeMCDiscreteGeometricAPEngine<RNG,S>::withSamples(Size samples) { QL_REQUIRE(tolerance_ == Null<Real>(), "tolerance already set"); samples_ = samples; return *this; } template <class RNG, class S> inline MakeMCDiscreteGeometricAPEngine<RNG,S>& MakeMCDiscreteGeometricAPEngine<RNG,S>::withAbsoluteTolerance( Real tolerance) { QL_REQUIRE(samples_ == Null<Size>(), "number of samples already set"); QL_REQUIRE(RNG::allowsErrorEstimate, "chosen random generator policy " "does not allow an error estimate"); tolerance_ = tolerance; return *this; } template <class RNG, class S> inline MakeMCDiscreteGeometricAPEngine<RNG,S>& MakeMCDiscreteGeometricAPEngine<RNG,S>::withMaxSamples(Size samples) { maxSamples_ = samples; return *this; } template <class RNG, class S> inline MakeMCDiscreteGeometricAPEngine<RNG,S>& MakeMCDiscreteGeometricAPEngine<RNG,S>::withSeed(BigNatural seed) { seed_ = seed; return *this; } template <class RNG, class S> inline MakeMCDiscreteGeometricAPEngine<RNG,S>& MakeMCDiscreteGeometricAPEngine<RNG,S>::withBrownianBridge(bool b) { brownianBridge_ = b; return *this; } template <class RNG, class S> inline MakeMCDiscreteGeometricAPEngine<RNG,S>& MakeMCDiscreteGeometricAPEngine<RNG,S>::withAntitheticVariate(bool b) { antithetic_ = b; return *this; } template <class RNG, class S> inline MakeMCDiscreteGeometricAPEngine<RNG,S>::operator ext::shared_ptr<PricingEngine>() const { return ext::shared_ptr<PricingEngine>(new MCDiscreteGeometricAPEngine<RNG,S>(process_, brownianBridge_, antithetic_, samples_, tolerance_, maxSamples_, seed_)); } } #endif
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "pch.h" #include "AdaptiveCardGetResourceStreamArgs.h" const double c_playIconSize = 30; const double c_playIconCornerRadius = 5; const double c_playIconOpacity = .5; const winrt::hstring supportedMimeTypes[] = {L"video/mp4", L"audio/mp4", L"audio/aac", L"audio/mpeg"}; const std::unordered_set<winrt::hstring> supportedCaptionTypes = {L"vtt", L"srt"}; namespace AdaptiveCards::Rendering::Uwp::MediaHelpers { winrt::Image GetMediaPosterAsImage(winrt::AdaptiveRenderContext const& renderContext, winrt::AdaptiveRenderArgs const& renderArgs, winrt::AdaptiveMedia const& adaptiveMedia) { auto posterString = adaptiveMedia.Poster(); if (posterString.empty()) { auto hostConfig = renderContext.HostConfig(); auto mediaConfig = hostConfig.Media(); posterString = mediaConfig.DefaultPoster(); if (posterString.empty()) { return nullptr; } } winrt::AdaptiveImage adaptiveImage{}; adaptiveImage.Url(posterString); auto altText = adaptiveMedia.AltText(); adaptiveImage.AltText(altText); auto elementRenderers = renderContext.ElementRenderers(); auto imageRenderer = elementRenderers.Get(L"Image"); if (const auto posterUiElement = imageRenderer.Render(adaptiveImage, renderContext, renderArgs)) { return posterUiElement.as<winrt::Image>(); } // Not logging a warning because if we get nullptr from imageRendere - it will log warning for us. return nullptr; } void AddDefaultPlayIcon(winrt::Panel const& posterPanel, winrt::AdaptiveHostConfig const& hostConfig, winrt::AdaptiveRenderArgs const& renderArgs) { // Create a rectangle winrt::Rectangle rectangle{}; // Set the size rectangle.Height(c_playIconSize); rectangle.Width(c_playIconSize); // Round the corners rectangle.RadiusX(c_playIconCornerRadius); rectangle.RadiusY(c_playIconCornerRadius); // Set it's fill and opacity winrt::Windows::UI::Color whiteBrushColor{0xFF, 0xFF, 0xFF, 0xFF}; rectangle.Fill(winrt::SolidColorBrush{whiteBrushColor}); rectangle.Opacity(c_playIconOpacity); // Outline it in the Dark color auto containerStyle = renderArgs.ContainerStyle(); auto darkBrushColor = GetColorFromAdaptiveColor(hostConfig, winrt::ForegroundColor::Dark, containerStyle, false, false); winrt::SolidColorBrush darkBrush{darkBrushColor}; rectangle.Stroke(darkBrush); // Create a play symbol icon winrt::SymbolIcon playIcon{winrt::Symbol::Play}; playIcon.Foreground(darkBrush); ::AdaptiveCards::Rendering::Uwp::XamlHelpers::AppendXamlElementToPanel(rectangle, posterPanel); winrt::RelativePanel::SetAlignVerticalCenterWithPanel(rectangle, true); winrt::RelativePanel::SetAlignHorizontalCenterWithPanel(rectangle, true); ::AdaptiveCards::Rendering::Uwp::XamlHelpers::AppendXamlElementToPanel(playIcon, posterPanel); winrt::RelativePanel::SetAlignHorizontalCenterWithPanel(playIcon, true); winrt::RelativePanel::SetAlignVerticalCenterWithPanel(playIcon, true); } void AddCustomPlayIcon(winrt::Panel const& posterPanel, winrt::hstring const& playIconString, winrt::AdaptiveRenderContext const& renderContext, winrt::AdaptiveRenderArgs const& renderArgs) { // Render the custom play icon using the image renderer winrt::AdaptiveImage playIconAdaptiveImage{}; playIconAdaptiveImage.Url(playIconString); auto imageRenderer = renderContext.ElementRenderers().Get(L"Image"); auto playIconUIElement = imageRenderer.Render(playIconAdaptiveImage, renderContext, renderArgs); if (const auto playIconAsFrameworkElement = playIconUIElement.try_as<winrt::FrameworkElement>()) { playIconAsFrameworkElement.Height(c_playIconSize); // Add it to the panel and center it ::AdaptiveCards::Rendering::Uwp::XamlHelpers::AppendXamlElementToPanel(playIconUIElement, posterPanel); winrt::RelativePanel::SetAlignHorizontalCenterWithPanel(playIconUIElement, true); winrt::RelativePanel::SetAlignVerticalCenterWithPanel(playIconUIElement, true); } } void AddPlayIcon(winrt::Panel const& posterPanel, winrt::AdaptiveRenderContext const& renderContext, winrt::AdaptiveRenderArgs const& renderArgs) { auto hostConfig = renderContext.HostConfig(); auto mediaConfig = hostConfig.Media(); winrt::hstring customPlayIconString = mediaConfig.PlayButton(); if (customPlayIconString.empty()) { AddDefaultPlayIcon(posterPanel, hostConfig, renderArgs); } else { AddCustomPlayIcon(posterPanel, customPlayIconString, renderContext, renderArgs); } } winrt::UIElement CreatePosterContainerWithPlayButton(winrt::Image const& posterImage, winrt::AdaptiveRenderContext const& renderContext, winrt::AdaptiveRenderArgs const& renderArgs) { winrt::RelativePanel posterRelativePanel{}; if (posterImage) { ::AdaptiveCards::Rendering::Uwp::XamlHelpers::AppendXamlElementToPanel(posterImage, posterRelativePanel); } AddPlayIcon(posterRelativePanel, renderContext, renderArgs); return posterRelativePanel; } std::tuple<winrt::Uri, winrt::hstring> GetMediaSource(winrt::AdaptiveHostConfig const& hostConfig, winrt::AdaptiveMedia const& adaptiveMedia) { winrt::Uri mediaSourceUrl{nullptr}; winrt::hstring mimeType{}; auto sources = adaptiveMedia.Sources(); winrt::AdaptiveMediaSource selectedSource{nullptr}; for (auto source : sources) { winrt::hstring currentMimeType = source.MimeType(); for (uint32_t i = 0; i < std::size(supportedMimeTypes); i++) { if (currentMimeType == supportedMimeTypes[i]) { selectedSource = source; break; } } } if (selectedSource) { mediaSourceUrl = GetUrlFromString(hostConfig, selectedSource.Url()); mimeType = selectedSource.MimeType(); } return {mediaSourceUrl, mimeType}; } void SetMediaSourceHelper(winrt::MediaElement const& mediaElement, winrt::AdaptiveMedia const& adaptiveMedia, winrt::AdaptiveRenderContext const& renderContext, winrt::MediaSource const& mediaSrc) { if (adaptiveMedia.CaptionSources().Size() > 0) { for (const auto captionSource : adaptiveMedia.CaptionSources()) { if (const auto search = supportedCaptionTypes.find(captionSource.MimeType()); search != supportedCaptionTypes.end()) { const auto timedTextURL = GetUrlFromString(renderContext.HostConfig(), captionSource.Url()); winrt::IAdaptiveCardResourceResolver resourceResolver{nullptr}; if (const auto resourceResolvers = renderContext.ResourceResolvers()) { resourceResolver = resourceResolvers.Get(timedTextURL.SchemeName()); } const auto timedTextSrcResolvedHelper = [label = captionSource.Label()](winrt::TimedTextSource const& /*sender*/, winrt::TimedTextSourceResolveResultEventArgs const& args) { if (!args.Error()) { args.Tracks().GetAt(0).Label(label); } }; if (!resourceResolver) { const auto timedTextSrc = winrt::TimedTextSource::CreateFromUri(timedTextURL); timedTextSrc.Resolved(timedTextSrcResolvedHelper); mediaSrc.ExternalTimedTextSources().Append(timedTextSrc); } else { auto args = winrt::make<winrt::implementation::AdaptiveCardGetResourceStreamArgs>(timedTextURL); const auto randomAccessStream = resourceResolver.GetResourceStreamAsync(args); auto timedTextSrc = winrt::TimedTextSource::CreateFromStream(randomAccessStream.get()); timedTextSrc.Resolved(timedTextSrcResolvedHelper); mediaSrc.ExternalTimedTextSources().Append(timedTextSrc); } } } } winrt::MediaPlaybackItem playbackItem{mediaSrc}; playbackItem.TimedMetadataTracksChanged( [playbackItem](winrt::IInspectable const& /*sender*/, winrt::IInspectable const& /*args*/) { playbackItem.TimedMetadataTracks().SetPresentationMode(0, winrt::TimedMetadataTrackPresentationMode::PlatformPresented); }); mediaElement.SetPlaybackSource(playbackItem); } void HandleMediaResourceResolverCompleted(winrt::IAsyncOperation<winrt::IRandomAccessStream> const& operation, winrt::AsyncStatus status, winrt::MediaElement const& mediaElement, winrt::hstring const& mimeType, winrt::AdaptiveMedia const& adaptiveMedia, winrt::AdaptiveRenderContext const& renderContext) { if (status == winrt::AsyncStatus::Completed) { // Get the random access stream if (const auto randomAccessStream = operation.GetResults()) { auto mediaSrc = winrt::MediaSource::CreateFromStream(randomAccessStream, mimeType); SetMediaSourceHelper(mediaElement, adaptiveMedia, renderContext, mediaSrc); } } } void HandleMediaClick(winrt::AdaptiveRenderContext const& renderContext, winrt::AdaptiveMedia const& adaptiveMedia, winrt::MediaElement const& mediaElement, winrt::UIElement const& posterContainer, winrt::Uri const& mediaSourceUrl, winrt::hstring const& mimeType, winrt::AdaptiveMediaEventInvoker const& mediaInvoker) { if (mediaElement) { posterContainer.Visibility(winrt::Visibility::Collapsed); mediaElement.Visibility(winrt::Visibility::Visible); winrt::IAdaptiveCardResourceResolver resourceResolver{nullptr}; if (const auto resourceResolvers = renderContext.ResourceResolvers()) { resourceResolver = resourceResolvers.Get(mediaSourceUrl.SchemeName()); } if (resourceResolver == nullptr) { auto mediaSrc = winrt::MediaSource::CreateFromUri(mediaSourceUrl); SetMediaSourceHelper(mediaElement, adaptiveMedia, renderContext, mediaSrc); } else { auto args = winrt::make<winrt::implementation::AdaptiveCardGetResourceStreamArgs>(mediaSourceUrl); auto getResourceStreamOperation = resourceResolver.GetResourceStreamAsync(args); getResourceStreamOperation.Completed( [mediaElement, mimeType, adaptiveMedia, renderContext](winrt::IAsyncOperation<winrt::IRandomAccessStream> operation, winrt::AsyncStatus status) -> void { return HandleMediaResourceResolverCompleted(operation, status, mediaElement, mimeType, adaptiveMedia, renderContext); }); } mediaElement.MediaOpened( [](winrt::IInspectable const& sender, winrt::RoutedEventArgs const& /*args*/) -> void { if (const auto mediaElement = sender.try_as<winrt::MediaElement>()) { mediaElement.Play(); } }); } else { mediaInvoker.SendMediaClickedEvent(adaptiveMedia); } } }
/* * Copyright 2013 Google Inc. * * Use of this source code is governed by a BSD-style license that can be * found in the LICENSE file. */ #include "Benchmark.h" #include "SkBlurMask.h" #include "SkCanvas.h" #include "SkPaint.h" #include "SkRandom.h" #include "SkShader.h" #include "SkString.h" class BitmapScaleBench: public Benchmark { int fLoopCount; int fInputSize; int fOutputSize; SkString fName; public: BitmapScaleBench( int is, int os) { fInputSize = is; fOutputSize = os; fLoopCount = 20; } protected: SkBitmap fInputBitmap, fOutputBitmap; SkMatrix fMatrix; virtual const char* onGetName() { return fName.c_str(); } int inputSize() const { return fInputSize; } int outputSize() const { return fOutputSize; } float scale() const { return float(outputSize())/inputSize(); } SkIPoint onGetSize() override { return SkIPoint::Make( fOutputSize, fOutputSize ); } void setName(const char * name) { fName.printf( "bitmap_scale_%s_%d_%d", name, fInputSize, fOutputSize ); } virtual void onPreDraw() { fInputBitmap.allocN32Pixels(fInputSize, fInputSize, true); fInputBitmap.eraseColor(SK_ColorWHITE); fOutputBitmap.allocN32Pixels(fOutputSize, fOutputSize, true); fMatrix.setScale( scale(), scale() ); } virtual void onDraw(const int loops, SkCanvas*) { SkPaint paint; this->setupPaint(&paint); preBenchSetup(); for (int i = 0; i < loops; i++) { doScaleImage(); } } virtual void doScaleImage() = 0; virtual void preBenchSetup() {} private: typedef Benchmark INHERITED; }; class BitmapFilterScaleBench: public BitmapScaleBench { public: BitmapFilterScaleBench( int is, int os) : INHERITED(is, os) { setName( "filter" ); } protected: void doScaleImage() override { SkCanvas canvas( fOutputBitmap ); SkPaint paint; paint.setFilterQuality(kHigh_SkFilterQuality); fInputBitmap.notifyPixelsChanged(); canvas.concat(fMatrix); canvas.drawBitmap(fInputBitmap, 0, 0, &paint ); } private: typedef BitmapScaleBench INHERITED; }; DEF_BENCH(return new BitmapFilterScaleBench(10, 90);) DEF_BENCH(return new BitmapFilterScaleBench(30, 90);) DEF_BENCH(return new BitmapFilterScaleBench(80, 90);) DEF_BENCH(return new BitmapFilterScaleBench(90, 90);) DEF_BENCH(return new BitmapFilterScaleBench(90, 80);) DEF_BENCH(return new BitmapFilterScaleBench(90, 30);) DEF_BENCH(return new BitmapFilterScaleBench(90, 10);) DEF_BENCH(return new BitmapFilterScaleBench(256, 64);) DEF_BENCH(return new BitmapFilterScaleBench(64, 256);)
// Copyright (c) 2012-2015 The Bitcoin Core developers // Copyright (c) 2014-2017 The Dash Core developers // Copyright (c) 2017-2018 The FXRate Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "netbase.h" #include "test/test_fxrate.h" #include <string> #include <boost/assign/list_of.hpp> #include <boost/test/unit_test.hpp> using namespace std; BOOST_FIXTURE_TEST_SUITE(netbase_tests, BasicTestingSetup) BOOST_AUTO_TEST_CASE(netbase_networks) { BOOST_CHECK(CNetAddr("127.0.0.1").GetNetwork() == NET_UNROUTABLE); BOOST_CHECK(CNetAddr("::1").GetNetwork() == NET_UNROUTABLE); BOOST_CHECK(CNetAddr("8.8.8.8").GetNetwork() == NET_IPV4); BOOST_CHECK(CNetAddr("2001::8888").GetNetwork() == NET_IPV6); BOOST_CHECK(CNetAddr("FD87:D87E:EB43:edb1:8e4:3588:e546:35ca").GetNetwork() == NET_TOR); } BOOST_AUTO_TEST_CASE(netbase_properties) { BOOST_CHECK(CNetAddr("127.0.0.1").IsIPv4()); BOOST_CHECK(CNetAddr("::FFFF:192.168.1.1").IsIPv4()); BOOST_CHECK(CNetAddr("::1").IsIPv6()); BOOST_CHECK(CNetAddr("10.0.0.1").IsRFC1918()); BOOST_CHECK(CNetAddr("192.168.1.1").IsRFC1918()); BOOST_CHECK(CNetAddr("172.31.255.255").IsRFC1918()); BOOST_CHECK(CNetAddr("2001:0DB8::").IsRFC3849()); BOOST_CHECK(CNetAddr("169.254.1.1").IsRFC3927()); BOOST_CHECK(CNetAddr("2002::1").IsRFC3964()); BOOST_CHECK(CNetAddr("FC00::").IsRFC4193()); BOOST_CHECK(CNetAddr("2001::2").IsRFC4380()); BOOST_CHECK(CNetAddr("2001:10::").IsRFC4843()); BOOST_CHECK(CNetAddr("FE80::").IsRFC4862()); BOOST_CHECK(CNetAddr("64:FF9B::").IsRFC6052()); BOOST_CHECK(CNetAddr("FD87:D87E:EB43:edb1:8e4:3588:e546:35ca").IsTor()); BOOST_CHECK(CNetAddr("127.0.0.1").IsLocal()); BOOST_CHECK(CNetAddr("::1").IsLocal()); BOOST_CHECK(CNetAddr("8.8.8.8").IsRoutable()); BOOST_CHECK(CNetAddr("2001::1").IsRoutable()); BOOST_CHECK(CNetAddr("127.0.0.1").IsValid()); } bool static TestSplitHost(string test, string host, int port) { string hostOut; int portOut = -1; SplitHostPort(test, portOut, hostOut); return hostOut == host && port == portOut; } BOOST_AUTO_TEST_CASE(netbase_splithost) { BOOST_CHECK(TestSplitHost("www.bitcoin.org", "www.bitcoin.org", -1)); BOOST_CHECK(TestSplitHost("[www.bitcoin.org]", "www.bitcoin.org", -1)); BOOST_CHECK(TestSplitHost("www.bitcoin.org:80", "www.bitcoin.org", 80)); BOOST_CHECK(TestSplitHost("[www.bitcoin.org]:80", "www.bitcoin.org", 80)); BOOST_CHECK(TestSplitHost("127.0.0.1", "127.0.0.1", -1)); BOOST_CHECK(TestSplitHost("127.0.0.1:34222", "127.0.0.1", 34222)); BOOST_CHECK(TestSplitHost("[127.0.0.1]", "127.0.0.1", -1)); BOOST_CHECK(TestSplitHost("[127.0.0.1]:34222", "127.0.0.1", 34222)); BOOST_CHECK(TestSplitHost("::ffff:127.0.0.1", "::ffff:127.0.0.1", -1)); BOOST_CHECK(TestSplitHost("[::ffff:127.0.0.1]:34222", "::ffff:127.0.0.1", 34222)); BOOST_CHECK(TestSplitHost("[::]:34222", "::", 34222)); BOOST_CHECK(TestSplitHost("::34222", "::34222", -1)); BOOST_CHECK(TestSplitHost(":34222", "", 34222)); BOOST_CHECK(TestSplitHost("[]:34222", "", 34222)); BOOST_CHECK(TestSplitHost("", "", -1)); } bool static TestParse(string src, string canon) { CService addr; if (!LookupNumeric(src.c_str(), addr, 65535)) return canon == ""; return canon == addr.ToString(); } BOOST_AUTO_TEST_CASE(netbase_lookupnumeric) { BOOST_CHECK(TestParse("127.0.0.1", "127.0.0.1:65535")); BOOST_CHECK(TestParse("127.0.0.1:34222", "127.0.0.1:34222")); BOOST_CHECK(TestParse("::ffff:127.0.0.1", "127.0.0.1:65535")); BOOST_CHECK(TestParse("::", "[::]:65535")); BOOST_CHECK(TestParse("[::]:34222", "[::]:34222")); BOOST_CHECK(TestParse("[127.0.0.1]", "127.0.0.1:65535")); BOOST_CHECK(TestParse(":::", "")); } BOOST_AUTO_TEST_CASE(onioncat_test) { // values from https://web.archive.org/web/20121122003543/http://www.cypherpunk.at/onioncat/wiki/OnionCat CNetAddr addr1("5wyqrzbvrdsumnok.onion"); CNetAddr addr2("FD87:D87E:EB43:edb1:8e4:3588:e546:35ca"); BOOST_CHECK(addr1 == addr2); BOOST_CHECK(addr1.IsTor()); BOOST_CHECK(addr1.ToStringIP() == "5wyqrzbvrdsumnok.onion"); BOOST_CHECK(addr1.IsRoutable()); } BOOST_AUTO_TEST_CASE(subnet_test) { BOOST_CHECK(CSubNet("1.2.3.0/24") == CSubNet("1.2.3.0/255.255.255.0")); BOOST_CHECK(CSubNet("1.2.3.0/24") != CSubNet("1.2.4.0/255.255.255.0")); BOOST_CHECK(CSubNet("1.2.3.0/24").Match(CNetAddr("1.2.3.4"))); BOOST_CHECK(!CSubNet("1.2.2.0/24").Match(CNetAddr("1.2.3.4"))); BOOST_CHECK(CSubNet("1.2.3.4").Match(CNetAddr("1.2.3.4"))); BOOST_CHECK(CSubNet("1.2.3.4/32").Match(CNetAddr("1.2.3.4"))); BOOST_CHECK(!CSubNet("1.2.3.4").Match(CNetAddr("5.6.7.8"))); BOOST_CHECK(!CSubNet("1.2.3.4/32").Match(CNetAddr("5.6.7.8"))); BOOST_CHECK(CSubNet("::ffff:127.0.0.1").Match(CNetAddr("127.0.0.1"))); BOOST_CHECK(CSubNet("1:2:3:4:5:6:7:8").Match(CNetAddr("1:2:3:4:5:6:7:8"))); BOOST_CHECK(!CSubNet("1:2:3:4:5:6:7:8").Match(CNetAddr("1:2:3:4:5:6:7:9"))); BOOST_CHECK(CSubNet("1:2:3:4:5:6:7:0/112").Match(CNetAddr("1:2:3:4:5:6:7:1234"))); BOOST_CHECK(CSubNet("192.168.0.1/24").Match(CNetAddr("192.168.0.2"))); BOOST_CHECK(CSubNet("192.168.0.20/29").Match(CNetAddr("192.168.0.18"))); BOOST_CHECK(CSubNet("1.2.2.1/24").Match(CNetAddr("1.2.2.4"))); BOOST_CHECK(CSubNet("1.2.2.110/31").Match(CNetAddr("1.2.2.111"))); BOOST_CHECK(CSubNet("1.2.2.20/26").Match(CNetAddr("1.2.2.63"))); // All-Matching IPv6 Matches arbitrary IPv4 and IPv6 BOOST_CHECK(CSubNet("::/0").Match(CNetAddr("1:2:3:4:5:6:7:1234"))); BOOST_CHECK(CSubNet("::/0").Match(CNetAddr("1.2.3.4"))); // All-Matching IPv4 does not Match IPv6 BOOST_CHECK(!CSubNet("0.0.0.0/0").Match(CNetAddr("1:2:3:4:5:6:7:1234"))); // Invalid subnets Match nothing (not even invalid addresses) BOOST_CHECK(!CSubNet().Match(CNetAddr("1.2.3.4"))); BOOST_CHECK(!CSubNet("").Match(CNetAddr("4.5.6.7"))); BOOST_CHECK(!CSubNet("bloop").Match(CNetAddr("0.0.0.0"))); BOOST_CHECK(!CSubNet("bloop").Match(CNetAddr("hab"))); // Check valid/invalid BOOST_CHECK(CSubNet("1.2.3.0/0").IsValid()); BOOST_CHECK(!CSubNet("1.2.3.0/-1").IsValid()); BOOST_CHECK(CSubNet("1.2.3.0/32").IsValid()); BOOST_CHECK(!CSubNet("1.2.3.0/33").IsValid()); BOOST_CHECK(CSubNet("1:2:3:4:5:6:7:8/0").IsValid()); BOOST_CHECK(CSubNet("1:2:3:4:5:6:7:8/33").IsValid()); BOOST_CHECK(!CSubNet("1:2:3:4:5:6:7:8/-1").IsValid()); BOOST_CHECK(CSubNet("1:2:3:4:5:6:7:8/128").IsValid()); BOOST_CHECK(!CSubNet("1:2:3:4:5:6:7:8/129").IsValid()); BOOST_CHECK(!CSubNet("fuzzy").IsValid()); //CNetAddr constructor test BOOST_CHECK(CSubNet(CNetAddr("127.0.0.1")).IsValid()); BOOST_CHECK(CSubNet(CNetAddr("127.0.0.1")).Match(CNetAddr("127.0.0.1"))); BOOST_CHECK(!CSubNet(CNetAddr("127.0.0.1")).Match(CNetAddr("127.0.0.2"))); BOOST_CHECK(CSubNet(CNetAddr("127.0.0.1")).ToString() == "127.0.0.1/32"); BOOST_CHECK(CSubNet(CNetAddr("1:2:3:4:5:6:7:8")).IsValid()); BOOST_CHECK(CSubNet(CNetAddr("1:2:3:4:5:6:7:8")).Match(CNetAddr("1:2:3:4:5:6:7:8"))); BOOST_CHECK(!CSubNet(CNetAddr("1:2:3:4:5:6:7:8")).Match(CNetAddr("1:2:3:4:5:6:7:9"))); BOOST_CHECK(CSubNet(CNetAddr("1:2:3:4:5:6:7:8")).ToString() == "1:2:3:4:5:6:7:8/128"); CSubNet subnet = CSubNet("1.2.3.4/255.255.255.255"); BOOST_CHECK_EQUAL(subnet.ToString(), "1.2.3.4/32"); subnet = CSubNet("1.2.3.4/255.255.255.254"); BOOST_CHECK_EQUAL(subnet.ToString(), "1.2.3.4/31"); subnet = CSubNet("1.2.3.4/255.255.255.252"); BOOST_CHECK_EQUAL(subnet.ToString(), "1.2.3.4/30"); subnet = CSubNet("1.2.3.4/255.255.255.248"); BOOST_CHECK_EQUAL(subnet.ToString(), "1.2.3.0/29"); subnet = CSubNet("1.2.3.4/255.255.255.240"); BOOST_CHECK_EQUAL(subnet.ToString(), "1.2.3.0/28"); subnet = CSubNet("1.2.3.4/255.255.255.224"); BOOST_CHECK_EQUAL(subnet.ToString(), "1.2.3.0/27"); subnet = CSubNet("1.2.3.4/255.255.255.192"); BOOST_CHECK_EQUAL(subnet.ToString(), "1.2.3.0/26"); subnet = CSubNet("1.2.3.4/255.255.255.128"); BOOST_CHECK_EQUAL(subnet.ToString(), "1.2.3.0/25"); subnet = CSubNet("1.2.3.4/255.255.255.0"); BOOST_CHECK_EQUAL(subnet.ToString(), "1.2.3.0/24"); subnet = CSubNet("1.2.3.4/255.255.254.0"); BOOST_CHECK_EQUAL(subnet.ToString(), "1.2.2.0/23"); subnet = CSubNet("1.2.3.4/255.255.252.0"); BOOST_CHECK_EQUAL(subnet.ToString(), "1.2.0.0/22"); subnet = CSubNet("1.2.3.4/255.255.248.0"); BOOST_CHECK_EQUAL(subnet.ToString(), "1.2.0.0/21"); subnet = CSubNet("1.2.3.4/255.255.240.0"); BOOST_CHECK_EQUAL(subnet.ToString(), "1.2.0.0/20"); subnet = CSubNet("1.2.3.4/255.255.224.0"); BOOST_CHECK_EQUAL(subnet.ToString(), "1.2.0.0/19"); subnet = CSubNet("1.2.3.4/255.255.192.0"); BOOST_CHECK_EQUAL(subnet.ToString(), "1.2.0.0/18"); subnet = CSubNet("1.2.3.4/255.255.128.0"); BOOST_CHECK_EQUAL(subnet.ToString(), "1.2.0.0/17"); subnet = CSubNet("1.2.3.4/255.255.0.0"); BOOST_CHECK_EQUAL(subnet.ToString(), "1.2.0.0/16"); subnet = CSubNet("1.2.3.4/255.254.0.0"); BOOST_CHECK_EQUAL(subnet.ToString(), "1.2.0.0/15"); subnet = CSubNet("1.2.3.4/255.252.0.0"); BOOST_CHECK_EQUAL(subnet.ToString(), "1.0.0.0/14"); subnet = CSubNet("1.2.3.4/255.248.0.0"); BOOST_CHECK_EQUAL(subnet.ToString(), "1.0.0.0/13"); subnet = CSubNet("1.2.3.4/255.240.0.0"); BOOST_CHECK_EQUAL(subnet.ToString(), "1.0.0.0/12"); subnet = CSubNet("1.2.3.4/255.224.0.0"); BOOST_CHECK_EQUAL(subnet.ToString(), "1.0.0.0/11"); subnet = CSubNet("1.2.3.4/255.192.0.0"); BOOST_CHECK_EQUAL(subnet.ToString(), "1.0.0.0/10"); subnet = CSubNet("1.2.3.4/255.128.0.0"); BOOST_CHECK_EQUAL(subnet.ToString(), "1.0.0.0/9"); subnet = CSubNet("1.2.3.4/255.0.0.0"); BOOST_CHECK_EQUAL(subnet.ToString(), "1.0.0.0/8"); subnet = CSubNet("1.2.3.4/254.0.0.0"); BOOST_CHECK_EQUAL(subnet.ToString(), "0.0.0.0/7"); subnet = CSubNet("1.2.3.4/252.0.0.0"); BOOST_CHECK_EQUAL(subnet.ToString(), "0.0.0.0/6"); subnet = CSubNet("1.2.3.4/248.0.0.0"); BOOST_CHECK_EQUAL(subnet.ToString(), "0.0.0.0/5"); subnet = CSubNet("1.2.3.4/240.0.0.0"); BOOST_CHECK_EQUAL(subnet.ToString(), "0.0.0.0/4"); subnet = CSubNet("1.2.3.4/224.0.0.0"); BOOST_CHECK_EQUAL(subnet.ToString(), "0.0.0.0/3"); subnet = CSubNet("1.2.3.4/192.0.0.0"); BOOST_CHECK_EQUAL(subnet.ToString(), "0.0.0.0/2"); subnet = CSubNet("1.2.3.4/128.0.0.0"); BOOST_CHECK_EQUAL(subnet.ToString(), "0.0.0.0/1"); subnet = CSubNet("1.2.3.4/0.0.0.0"); BOOST_CHECK_EQUAL(subnet.ToString(), "0.0.0.0/0"); subnet = CSubNet("1:2:3:4:5:6:7:8/ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"); BOOST_CHECK_EQUAL(subnet.ToString(), "1:2:3:4:5:6:7:8/128"); subnet = CSubNet("1:2:3:4:5:6:7:8/ffff:0000:0000:0000:0000:0000:0000:0000"); BOOST_CHECK_EQUAL(subnet.ToString(), "1::/16"); subnet = CSubNet("1:2:3:4:5:6:7:8/0000:0000:0000:0000:0000:0000:0000:0000"); BOOST_CHECK_EQUAL(subnet.ToString(), "::/0"); subnet = CSubNet("1.2.3.4/255.255.232.0"); BOOST_CHECK_EQUAL(subnet.ToString(), "1.2.0.0/255.255.232.0"); subnet = CSubNet("1:2:3:4:5:6:7:8/ffff:ffff:ffff:fffe:ffff:ffff:ffff:ff0f"); BOOST_CHECK_EQUAL(subnet.ToString(), "1:2:3:4:5:6:7:8/ffff:ffff:ffff:fffe:ffff:ffff:ffff:ff0f"); } BOOST_AUTO_TEST_CASE(netbase_getgroup) { BOOST_CHECK(CNetAddr("127.0.0.1").GetGroup() == boost::assign::list_of(0)); // Local -> !Routable() BOOST_CHECK(CNetAddr("257.0.0.1").GetGroup() == boost::assign::list_of(0)); // !Valid -> !Routable() BOOST_CHECK(CNetAddr("10.0.0.1").GetGroup() == boost::assign::list_of(0)); // RFC1918 -> !Routable() BOOST_CHECK(CNetAddr("169.254.1.1").GetGroup() == boost::assign::list_of(0)); // RFC3927 -> !Routable() BOOST_CHECK(CNetAddr("1.2.3.4").GetGroup() == boost::assign::list_of((unsigned char)NET_IPV4)(1)(2)); // IPv4 BOOST_CHECK(CNetAddr("::FFFF:0:102:304").GetGroup() == boost::assign::list_of((unsigned char)NET_IPV4)(1)(2)); // RFC6145 BOOST_CHECK(CNetAddr("64:FF9B::102:304").GetGroup() == boost::assign::list_of((unsigned char)NET_IPV4)(1)(2)); // RFC6052 BOOST_CHECK(CNetAddr("2002:102:304:9999:9999:9999:9999:9999").GetGroup() == boost::assign::list_of((unsigned char)NET_IPV4)(1)(2)); // RFC3964 BOOST_CHECK(CNetAddr("2001:0:9999:9999:9999:9999:FEFD:FCFB").GetGroup() == boost::assign::list_of((unsigned char)NET_IPV4)(1)(2)); // RFC4380 BOOST_CHECK(CNetAddr("FD87:D87E:EB43:edb1:8e4:3588:e546:35ca").GetGroup() == boost::assign::list_of((unsigned char)NET_TOR)(239)); // Tor BOOST_CHECK(CNetAddr("2001:470:abcd:9999:9999:9999:9999:9999").GetGroup() == boost::assign::list_of((unsigned char)NET_IPV6)(32)(1)(4)(112)(175)); //he.net BOOST_CHECK(CNetAddr("2001:2001:9999:9999:9999:9999:9999:9999").GetGroup() == boost::assign::list_of((unsigned char)NET_IPV6)(32)(1)(32)(1)); //IPv6 } BOOST_AUTO_TEST_SUITE_END()
/**************************************************************************/ /* */ /* WWIV Version 5.x */ /* Copyright (C)2007-2021, WWIV Software Services */ /* */ /* Licensed under the Apache License, Version 2.0 (the "License"); */ /* you may not use this file except in compliance with the License. */ /* You may obtain a copy of the License at */ /* */ /* http://www.apache.org/licenses/LICENSE-2.0 */ /* */ /* Unless required by applicable law or agreed to in writing, */ /* software distributed under the License is distributed on an */ /* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, */ /* either express or implied. See the License for the specific */ /* language governing permissions and limitations under the License. */ /* */ /**************************************************************************/ #include "gtest/gtest.h" #include "bbs/stuffin.h" #include "bbs_test/bbs_helper.h" #include "core/file.h" #include "core/strings.h" #include "sdk/filenames.h" using std::cout; using std::endl; using std::ostringstream; using std::string; using namespace wwiv::core; using namespace wwiv::strings; class StuffInTest : public testing::Test { protected: void SetUp() override { helper.SetUp(); a()->sess().incom(false); a()->modem_speed_ = 0; } public: static std::string t(const std::string& name) { return FilePath(a()->sess().dirs().temp_directory(), name).string(); } BbsHelper helper; }; TEST_F(StuffInTest, SimpleCase) { const auto actual = stuff_in("foo %1 %c %2 %k", "one", "two", "", "", ""); ostringstream os; os << "foo one " << t(DROPFILE_CHAIN_TXT) << " two " << helper.gfiles() << COMMENT_TXT; const auto expected = os.str(); EXPECT_EQ(expected, actual); } TEST_F(StuffInTest, Empty) { const auto actual = stuff_in("", "", "", "", "", ""); EXPECT_TRUE(actual.empty()); } // Param Description Example // --------------------------------------------------------------------- // %% A single '%' "%" // %1-%5 Specified passed-in parameter TEST_F(StuffInTest, AllNumbers) { const auto actual = stuff_in("%0%1%2%3%4%5%6%%", "1", "2", "3", "4", "5"); const string expected = "12345%"; EXPECT_EQ(expected, actual); } // Param Description Example // --------------------------------------------------------------------- // %A callinfo full pathname "c:\wwiv\temp\callinfo.bbs" // %C chain.txt full pathname "c:\wwiv\temp\chain.txt" // %D doorinfo full pathname "c:\wwiv\temp\dorinfo1.def" // %E door32.sys full pathname "C:\wwiv\temp\door32.sys" string in = "foo %1 %c // %2 %k"; %O pcboard full pathname "c:\wwiv\temp\pcboard.sys" %R door full // pathname "c:\wwiv\temp\door.sys" TEST_F(StuffInTest, AllDropFiles) { const auto actual_lower = stuff_in("%a %c %d %e %o %r ", "", "", "", "", ""); const auto actual_upper = stuff_in("%A %C %D %E %O %R ", "", "", "", "", ""); ostringstream expected; expected << t("callinfo.bbs") << " " << t(DROPFILE_CHAIN_TXT) << " " << t("dorinfo1.def") << " " << t("door32.sys") << " " << t("pcboard.sys") << " " << t("door.sys") << " "; EXPECT_EQ(expected.str(), actual_lower); EXPECT_EQ(expected.str(), actual_upper); } // Param Description Example // --------------------------------------------------------------------- // %N Instance number "1" // %P Com port number "1" TEST_F(StuffInTest, PortAndNode) { a()->sess().incom(false); EXPECT_EQ(string("0"), stuff_in("%P", "", "", "", "", "")); a()->sess().incom(true); EXPECT_EQ(string("1"), stuff_in("%P", "", "", "", "", "")); EXPECT_EQ(string("42"), stuff_in("%N", "", "", "", "", "")); } // Param Description Example // --------------------------------------------------------------------- // %M Modem baud rate "14400" // %S Com port baud rate "38400" TEST_F(StuffInTest, Speeds) { EXPECT_EQ(string("0"), stuff_in("%M", "", "", "", "", "")); EXPECT_EQ(string("0"), stuff_in("%S", "", "", "", "", "")); a()->modem_speed_ = 38400; EXPECT_EQ(string("38400"), stuff_in("%M", "", "", "", "", "")); EXPECT_EQ(string("38400"), stuff_in("%S", "", "", "", "", "")); }
#ifndef RENDERBOI__CORE__INTERFACES__BASIS_PROVIDER_HPP #define RENDERBOI__CORE__INTERFACES__BASIS_PROVIDER_HPP #include <memory> #include <glm/vec3.hpp> namespace Renderboi { /// @brief Interface for a class able to provide basis vectors. class BasisProvider { public: /// @brief Get the X vector of the basis, potentially in terms of a /// different basis. /// /// @return X vector of the basis. virtual glm::vec3 left() const = 0; /// @brief Get the Y vector of the basis, potentially in terms of a /// different basis. /// /// @return Y vector of the basis. virtual glm::vec3 up() const = 0; /// @brief Get the Z vector of the basis, potentially in terms of a /// different basis. /// /// @return Z vector of the basis. virtual glm::vec3 forward() const = 0; }; using BasisProviderPtr = std::shared_ptr<BasisProvider>; }//namespace Renderboi #endif//RENDERBOI__CORE__INTERFACES__BASIS_PROVIDER_HPP
/* Copyright (c) 2012 Patrick Ruoff * Copyright (c) 2014-2015 Stanislaw Halik <sthalik@misaki.pl> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. */ #include "ftnoir_tracker_pt_dialog.h" #include "compat/math.hpp" #include "video/camera.hpp" #include <QString> #include <QtGlobal> #include <QDebug> using namespace options; static void init_resources() { Q_INIT_RESOURCE(tracker_pt_base); } namespace pt_impl { TrackerDialog_PT::TrackerDialog_PT(const QString& module_name) : s(module_name), tracker(nullptr), timer(this), trans_calib(1, 2) { init_resources(); ui.setupUi(this); for (const QString& str : video::camera_names()) ui.camdevice_combo->addItem(str); tie_setting(s.camera_name, ui.camdevice_combo); tie_setting(s.cam_res_x, ui.res_x_spin); tie_setting(s.cam_res_y, ui.res_y_spin); tie_setting(s.cam_fps, ui.fps_spin); tie_setting(s.use_mjpeg, ui.use_mjpeg); tie_setting(s.threshold_slider, ui.threshold_slider); tie_setting(s.min_point_size, ui.mindiam_spin); tie_setting(s.max_point_size, ui.maxdiam_spin); tie_setting(s.clip_by, ui.clip_bheight_spin); tie_setting(s.clip_bz, ui.clip_blength_spin); tie_setting(s.clip_ty, ui.clip_theight_spin); tie_setting(s.clip_tz, ui.clip_tlength_spin); tie_setting(s.cap_x, ui.cap_width_spin); tie_setting(s.cap_y, ui.cap_height_spin); tie_setting(s.cap_z, ui.cap_length_spin); tie_setting(s.m01_x, ui.m1x_spin); tie_setting(s.m01_y, ui.m1y_spin); tie_setting(s.m01_z, ui.m1z_spin); tie_setting(s.m02_x, ui.m2x_spin); tie_setting(s.m02_y, ui.m2y_spin); tie_setting(s.m02_z, ui.m2z_spin); tie_setting(s.t_MH_x, ui.tx_spin); tie_setting(s.t_MH_y, ui.ty_spin); tie_setting(s.t_MH_z, ui.tz_spin); tie_setting(s.fov, ui.fov); tie_setting(s.active_model_panel, ui.model_tabs); tie_setting(s.dynamic_pose, ui.dynamic_pose); tie_setting(s.init_phase_timeout, ui.init_phase_timeout); tie_setting(s.auto_threshold, ui.auto_threshold); connect(ui.tcalib_button,SIGNAL(toggled(bool)), this, SLOT(startstop_trans_calib(bool))); connect(ui.buttonBox, SIGNAL(accepted()), this, SLOT(doOK())); connect(ui.buttonBox, SIGNAL(rejected()), this, SLOT(doCancel())); connect(ui.camdevice_combo, &QComboBox::currentTextChanged, this, &TrackerDialog_PT::set_camera_settings_available); set_camera_settings_available(ui.camdevice_combo->currentText()); connect(ui.camera_settings, &QPushButton::clicked, this, &TrackerDialog_PT::show_camera_settings); connect(&timer, &QTimer::timeout, this, &TrackerDialog_PT::poll_tracker_info_impl); timer.setInterval(250); connect(&calib_timer, &QTimer::timeout, this, &TrackerDialog_PT::trans_calib_step); calib_timer.setInterval(35); poll_tracker_info_impl(); constexpr pt_color_type color_types[] = { pt_color_average, pt_color_natural, pt_color_red_only, pt_color_green_only, pt_color_blue_only, pt_color_red_chromakey, pt_color_green_chromakey, pt_color_blue_chromakey, pt_color_cyan_chromakey, pt_color_yellow_chromakey, pt_color_magenta_chromakey, }; for (unsigned k = 0; k < std::size(color_types); k++) ui.blob_color->setItemData(k, int(color_types[k])); tie_setting(s.blob_color, ui.blob_color); tie_setting(s.threshold_slider, ui.threshold_value_display, [this](const slider_value& val) { return threshold_display_text(int(val)); }); // refresh threshold display on auto-threshold checkbox state change tie_setting(s.auto_threshold, this, [this](bool) { s.threshold_slider.notify(); }); tie_setting(s.enable_point_filter, ui.enable_point_filter); tie_setting(s.point_filter_coefficient, ui.point_filter_slider); connect(&s.point_filter_coefficient, value_::value_changed<slider_value>(), ui.point_filter_label, [this] { ui.point_filter_label->setValue(*s.point_filter_coefficient); } ); ui.point_filter_label->setValue(*s.point_filter_coefficient); } QString TrackerDialog_PT::threshold_display_text(int threshold_value) { if (!s.auto_threshold) return tr("Brightness %1/255").arg(threshold_value); else { pt_camera_info info; int w = s.cam_res_x, h = s.cam_res_y; if (w * h <= 0) { w = 640; h = 480; } if (tracker && tracker->get_cam_info(info) && info.res_x * info.res_y != 0) { w = info.res_x; h = info.res_y; } double value = (double)pt_point_extractor::threshold_radius_value(w, h, threshold_value); return tr("LED radius %1 pixels").arg(value, 0, 'f', 2); } } void TrackerDialog_PT::startstop_trans_calib(bool start) { QMutexLocker l(&calibrator_mutex); if (start) { qDebug() << "pt: starting translation calibration"; calib_timer.start(); trans_calib.reset(); s.t_MH_x = 0; s.t_MH_y = 0; s.t_MH_z = 0; ui.sample_count_display->setText(QString()); } else { calib_timer.stop(); qDebug() << "pt: stopping translation calibration"; { auto [tmp, nsamples] = trans_calib.get_estimate(); s.t_MH_x = int(tmp[0]); s.t_MH_y = int(tmp[1]); s.t_MH_z = int(tmp[2]); constexpr int min_yaw_samples = 15; constexpr int min_pitch_samples = 15; constexpr int min_samples = min_yaw_samples+min_pitch_samples; // Don't bother counting roll samples. Roll calibration is hard enough // that it's a hidden unsupported feature anyway. QString sample_feedback; if (nsamples[0] < min_yaw_samples) sample_feedback = tr("%1 yaw samples. Yaw more to %2 samples for stable calibration.").arg(nsamples[0]).arg(min_yaw_samples); else if (nsamples[1] < min_pitch_samples) sample_feedback = tr("%1 pitch samples. Pitch more to %2 samples for stable calibration.").arg(nsamples[1]).arg(min_pitch_samples); else { const int nsamples_total = nsamples[0] + nsamples[1]; sample_feedback = tr("%1 samples. Over %2, good!").arg(nsamples_total).arg(min_samples); } ui.sample_count_display->setText(sample_feedback); } } ui.tx_spin->setEnabled(!start); ui.ty_spin->setEnabled(!start); ui.tz_spin->setEnabled(!start); if (start) ui.tcalib_button->setText(tr("Stop calibration")); else ui.tcalib_button->setText(tr("Start calibration")); } void TrackerDialog_PT::poll_tracker_info_impl() { pt_camera_info info; if (tracker && tracker->get_cam_info(info)) { ui.caminfo_label->setText(tr("%1x%2 @ %3 FPS").arg(info.res_x).arg(info.res_y).arg(iround(info.fps))); // display point info const int n_points = tracker->get_n_points(); ui.pointinfo_label->setText((n_points == 3 ? tr("%1 OK!") : tr("%1 BAD!")).arg(n_points)); } else { ui.caminfo_label->setText(tr("Tracker offline")); ui.pointinfo_label->setText(QString()); } } void TrackerDialog_PT::set_camera_settings_available(const QString& /* camera_name */) { ui.camera_settings->setEnabled(true); } void TrackerDialog_PT::show_camera_settings() { if (tracker) tracker->open_camera_dialog_flag = true; else (void)video::show_dialog(s.camera_name); } void TrackerDialog_PT::trans_calib_step() { QMutexLocker l(&calibrator_mutex); if (tracker) { Affine X_CM = tracker->pose(); trans_calib.update(X_CM.R, X_CM.t); } else startstop_trans_calib(false); } void TrackerDialog_PT::save() { s.b->save(); } void TrackerDialog_PT::doOK() { save(); close(); } void TrackerDialog_PT::doCancel() { close(); } void TrackerDialog_PT::register_tracker(ITracker *t) { tracker = static_cast<Tracker_PT*>(t); ui.tcalib_button->setEnabled(true); poll_tracker_info_impl(); timer.start(); } void TrackerDialog_PT::unregister_tracker() { tracker = nullptr; ui.tcalib_button->setEnabled(false); poll_tracker_info_impl(); timer.stop(); } } // ns pt_impl
#include <iostream> #include <fstream> #include <utility> #include <set> #include <CGAL/Random.h> #include <CGAL/Simple_cartesian.h> #include <CGAL/Exact_predicates_inexact_constructions_kernel.h> #include <CGAL/Polyhedron_3.h> #include <CGAL/Polyhedron_items_with_id_3.h> #include <CGAL/IO/Polyhedron_iostream.h> #include <CGAL/boost/graph/graph_traits_Polyhedron_3.h> #include <CGAL/boost/graph/iterator.h> #include <CGAL/Surface_mesh_shortest_path/Surface_mesh_shortest_path_traits.h> #include <CGAL/Surface_mesh_shortest_path/Surface_mesh_shortest_path.h> #include <CGAL/Surface_mesh_shortest_path/function_objects.h> #include <CGAL/Surface_mesh_shortest_path/barycentric.h> #include <CGAL/Surface_mesh_shortest_path/internal/misc_functions.h> #include <CGAL/test_util.h> #include "check.h" int main(int argc, char* argv[]) { typedef CGAL::Exact_predicates_inexact_constructions_kernel Kernel; typedef CGAL::Polyhedron_3<Kernel, CGAL::Polyhedron_items_with_id_3> Polyhedron_3; typedef CGAL::Surface_mesh_shortest_path_traits<Kernel, Polyhedron_3> Traits; typedef Traits::Barycentric_coordinate Barycentric_coordinate; typedef Traits::FT FT; typedef boost::graph_traits<Polyhedron_3> Graph_traits; typedef Graph_traits::vertex_descriptor vertex_descriptor; typedef Graph_traits::vertex_iterator vertex_iterator; typedef Graph_traits::face_descriptor face_descriptor; typedef Graph_traits::face_iterator face_iterator; typedef CGAL::Surface_mesh_shortest_path<Traits> Surface_mesh_shortest_path; typedef Surface_mesh_shortest_path::Face_location Face_location; typedef boost::property_map<Polyhedron_3, boost::vertex_index_t>::type VIM; typedef boost::property_map<Polyhedron_3, boost::halfedge_index_t>::type HIM; typedef boost::property_map<Polyhedron_3, boost::face_index_t>::type FIM; Traits traits; std::string mesh(argv[1]); int randSeed = 4983304; const size_t numTests = 15; if (argc > 2) { randSeed = std::atoi(argv[2]); } CGAL::Random rand(randSeed); Polyhedron_3 polyhedron; std::ifstream in(mesh.c_str()); in >> polyhedron; in.close(); CGAL::set_halfedgeds_items_id(polyhedron); VIM vertexIndexMap(get(boost::vertex_index, polyhedron)); HIM halfedgeIndexMap(get(boost::halfedge_index, polyhedron)); FIM faceIndexMap(get(boost::face_index, polyhedron)); face_iterator facesStart; face_iterator facesEnd; std::vector<face_descriptor> faces; boost::tie(facesStart, facesEnd) = CGAL::faces(polyhedron); for (face_iterator it = facesStart; it != facesEnd; ++it) { faces.push_back(*it); } Surface_mesh_shortest_path shortestPaths(polyhedron, traits); const size_t numInitialLocations = 10; std::vector<Face_location> sourcePoints; std::vector<Surface_mesh_shortest_path::Source_point_iterator> handles; // First, try adding a few locations for (size_t i = 0; i < numInitialLocations; ++i) { size_t faceId = rand.get_int(0, faces.size()); sourcePoints.push_back(Face_location(faces[faceId], CGAL::test::random_coordinate<Traits>(rand))); shortestPaths.add_source_point(sourcePoints.back().first, sourcePoints.back().second); } BOOST_CHECK_EQUAL(numInitialLocations, shortestPaths.number_of_source_points()); size_t checkNumLocations = 0; for (Surface_mesh_shortest_path::Source_point_iterator it = shortestPaths.source_points_begin(); it != shortestPaths.source_points_end(); ++it) { handles.push_back(it); ++checkNumLocations; } BOOST_CHECK_EQUAL(checkNumLocations, shortestPaths.number_of_source_points()); for (Surface_mesh_shortest_path::Source_point_iterator it = shortestPaths.source_points_begin(); it != shortestPaths.source_points_end(); ++it) { Surface_mesh_shortest_path::Shortest_path_result result = shortestPaths.shortest_distance_to_source_points(it->first, it->second); BOOST_CHECK_CLOSE(FT(0.0), result.first, FT(0.000001)); assert(result.second == it); } size_t currentCounter = 0; // Then, remove half of them for (size_t i = 0; i < handles.size(); ++i) { if (i % 2 == 0) { shortestPaths.remove_source_point(handles[i]); } } BOOST_CHECK_EQUAL(numInitialLocations / 2, shortestPaths.number_of_source_points()); // and ensure that they are indeed removed for (size_t i = 0; i < sourcePoints.size(); ++i) { Surface_mesh_shortest_path::Shortest_path_result result = shortestPaths.shortest_distance_to_source_points(sourcePoints[i].first, sourcePoints[i].second); if (i % 2 != 0) { BOOST_CHECK_CLOSE(FT(0.0), result.first, FT(0.000001)); assert(handles[i] == result.second); } else { BOOST_CHECK_MESSAGE(result.first < FT(0.0) || result.first > FT(0.00001), "Incorrect resulting distance: " << result.first); } } // add a few back for (size_t i = 0; i < handles.size(); ++i) { if (i % 2 == 0) { handles[i] = shortestPaths.add_source_point(sourcePoints[i]); } } // ... and remove some others for (size_t i = 0; i < handles.size(); ++i) { if (i % 3 == 0) { shortestPaths.remove_source_point(handles[i]); } } // and check it once again for (size_t i = 0; i < sourcePoints.size(); ++i) { Surface_mesh_shortest_path::Shortest_path_result result = shortestPaths.shortest_distance_to_source_points(sourcePoints[i].first, sourcePoints[i].second); if (i % 3 != 0) { BOOST_CHECK_CLOSE(FT(0.0), result.first, FT(0.000001)); assert(handles[i] == result.second); } else { BOOST_CHECK_MESSAGE(result.first < FT(0.0) || result.first > FT(0.00001), "Resulted distance: " << result.first); } } return 0; }
/* The MIT License (MIT) Copyright (c) 2016 British Broadcasting Corporation. This software is provided by Lancaster University by arrangement with the BBC. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /** * Class definition for the AKHILAFLEXMessageBus. * * The AKHILAFLEXMessageBus is the common mechanism to deliver asynchronous events on the * AKHILAFLEX platform. It serves a number of purposes: * * 1) It provides an eventing abstraction that is independent of the underlying substrate. * * 2) It provides a mechanism to decouple user code from trusted system code * i.e. the basis of a message passing nano kernel. * * 3) It allows a common high level eventing abstraction across a range of hardware types.e.g. buttons, BLE... * * 4) It provides a mechanim for extensibility - new devices added via I/O pins can have OO based * drivers and communicate via the message bus with minima impact on user level languages. * * 5) It allows for the possiblility of event / data aggregation, which in turn can save energy. * * It has the following design principles: * * 1) Maintain a low RAM footprint where possible * * 2) Make few assumptions about the underlying platform, but allow optimizations where possible. */ #include "AKHILAFLEXConfig.h" #include "AKHILAFLEXMessageBus.h" #include "AKHILAFLEXFiber.h" #include "ErrorNo.h" /** * Default constructor. * * Adds itself as a fiber component, and also configures itself to be the * default EventModel if defaultEventBus is NULL. */ AKHILAFLEXMessageBus::AKHILAFLEXMessageBus() { this->listeners = NULL; this->evt_queue_head = NULL; this->evt_queue_tail = NULL; this->queueLength = 0; fiber_add_idle_component(this); if(EventModel::defaultEventBus == NULL) EventModel::defaultEventBus = this; } /** * Invokes a callback on a given AKHILAFLEXListener * * Internal wrapper function, used to enable * parameterised callbacks through the fiber scheduler. */ void async_callback(void *param) { AKHILAFLEXListener *listener = (AKHILAFLEXListener *)param; // OK, now we need to decide how to behave depending on our configuration. // If this a fiber f already active within this listener then check our // configuration to determine the correct course of action. // if (listener->flags & MESSAGE_BUS_LISTENER_BUSY) { // Drop this event, if that's how we've been configured. if (listener->flags & MESSAGE_BUS_LISTENER_DROP_IF_BUSY) return; // Queue this event up for later, if that's how we've been configured. if (listener->flags & MESSAGE_BUS_LISTENER_QUEUE_IF_BUSY) { #if (MESSAGE_BUS_CONCURRENCY_MODE == MESSAGE_BUS_CONCURRENT_LISTENERS) listener->queue(listener->evt); return; #endif } } // Determine the calling convention for the callback, and invoke... // C++ is really bad at this! Especially as the ARM compiler is yet to support C++ 11 :-/ #if (MESSAGE_BUS_CONCURRENCY_MODE == MESSAGE_BUS_CONCURRENT_EVENTS) listener->lock.wait(); #endif // Record that we have a fiber going into this listener... listener->flags |= MESSAGE_BUS_LISTENER_BUSY; while (1) { // Firstly, check for a method callback into an object. if (listener->flags & MESSAGE_BUS_LISTENER_METHOD) listener->cb_method->fire(listener->evt); // Now a parameterised C function else if (listener->flags & MESSAGE_BUS_LISTENER_PARAMETERISED) listener->cb_param(listener->evt, listener->cb_arg); // We must have a plain C function else listener->cb(listener->evt); // If there are more events to process, dequeue the next one and process it. if ((listener->flags & MESSAGE_BUS_LISTENER_QUEUE_IF_BUSY) && listener->evt_queue) { AKHILAFLEXEventQueueItem *item = listener->evt_queue; listener->evt = item->evt; listener->evt_queue = listener->evt_queue->next; delete item; // We spin the scheduler here, to preven any particular event handler from continuously holding onto resources. schedule(); } else break; } // The fiber of exiting... clear our state. listener->flags &= ~MESSAGE_BUS_LISTENER_BUSY; #if (MESSAGE_BUS_CONCURRENCY_MODE == MESSAGE_BUS_CONCURRENT_EVENTS) listener->lock.notify(); #endif } /** * Queue the given event for processing at a later time. * Add the given event at the tail of our queue. * * @param The event to queue. */ void AKHILAFLEXMessageBus::queueEvent(AKHILAFLEXEvent &evt) { int processingComplete; AKHILAFLEXEventQueueItem *prev = evt_queue_tail; // Now process all handler regsitered as URGENT. // These pre-empt the queue, and are useful for fast, high priority services. processingComplete = this->process(evt, true); // If we've already processed all event handlers, we're all done. // No need to queue the event. if (processingComplete) return; // If we need to queue, but there is no space, then there's nothg we can do. if (queueLength >= MESSAGE_BUS_LISTENER_MAX_QUEUE_DEPTH) return; // Otherwise, we need to queue this event for later processing... // We queue this event at the tail of the queue at the point where we entered queueEvent() // This is important as the processing above *may* have generated further events, and // we want to maintain ordering of events. AKHILAFLEXEventQueueItem *item = new AKHILAFLEXEventQueueItem(evt); // The queue was empty when we entered this function, so queue our event at the start of the queue. __disable_irq(); if (prev == NULL) { item->next = evt_queue_head; evt_queue_head = item; } else { item->next = prev->next; prev->next = item; } if (item->next == NULL) evt_queue_tail = item; queueLength++; __enable_irq(); } /** * Extract the next event from the front of the event queue (if present). * * @return a pointer to the AKHILAFLEXEventQueueItem that is at the head of the list. */ AKHILAFLEXEventQueueItem* AKHILAFLEXMessageBus::dequeueEvent() { AKHILAFLEXEventQueueItem *item = NULL; __disable_irq(); if (evt_queue_head != NULL) { item = evt_queue_head; evt_queue_head = item->next; if (evt_queue_head == NULL) evt_queue_tail = NULL; queueLength--; } __enable_irq(); return item; } /** * Cleanup any AKHILAFLEXListeners marked for deletion from the list. * * @return The number of listeners removed from the list. */ int AKHILAFLEXMessageBus::deleteMarkedListeners() { AKHILAFLEXListener *l, *p; int removed = 0; l = listeners; p = NULL; // Walk this list of event handlers. Delete any that match the given listener. while (l != NULL) { if ((l->flags & MESSAGE_BUS_LISTENER_DELETING) && !(l->flags & MESSAGE_BUS_LISTENER_BUSY)) { if (p == NULL) listeners = l->next; else p->next = l->next; // delete the listener. AKHILAFLEXListener *t = l; l = l->next; delete t; removed++; continue; } p = l; l = l->next; } return removed; } AKHILAFLEXEvent last_event; void process_sequentially(void *param) { AKHILAFLEXMessageBus *m = (AKHILAFLEXMessageBus *)param; m->process(last_event); } /** * Periodic callback from AKHILAFLEX. * * Process at least one event from the event queue, if it is not empty. * We then continue processing events until something appears on the runqueue. */ void AKHILAFLEXMessageBus::idleTick() { // Clear out any listeners marked for deletion this->deleteMarkedListeners(); AKHILAFLEXEventQueueItem *item = this->dequeueEvent(); // Whilst there are events to process and we have no useful other work to do, pull them off the queue and process them. while (item) { // send the event to all standard event listeners. #if (MESSAGE_BUS_CONCURRENCY_MODE == MESSAGE_BUS_CONCURRENT_EVENTS) last_event = item->evt; invoke(process_sequentially,this); #else this->process(item->evt); #endif // Free the queue item. delete item; // If we have created some useful work to do, we stop processing. // This helps to minimise the number of blocked fibers we create at any point in time, therefore // also reducing the RAM footprint. if(!scheduler_runqueue_empty()) break; // Pull the next event to process, if there is one. item = this->dequeueEvent(); } } /** * Queues the given event to be sent to all registered recipients. * * @param evt The event to send. * * @code * AKHILAFLEXMessageBus bus; * * // Creates and sends the AKHILAFLEXEvent using bus. * AKHILAFLEXEvent evt(AKHILAFLEX_ID_BUTTON_A, AKHILAFLEX_BUTTON_EVT_CLICK); * * // Creates the AKHILAFLEXEvent, but delays the sending of that event. * AKHILAFLEXEvent evt1(AKHILAFLEX_ID_BUTTON_A, AKHILAFLEX_BUTTON_EVT_CLICK, CREATE_ONLY); * * bus.send(evt1); * * // This has the same effect! * evt1.fire() * @endcode */ int AKHILAFLEXMessageBus::send(AKHILAFLEXEvent evt) { // We simply queue processing of the event until we're scheduled in normal thread context. // We do this to avoid the possibility of executing event handler code in IRQ context, which may bring // hidden race conditions to kids code. Queuing all events ensures causal ordering (total ordering in fact). this->queueEvent(evt); return AKHILAFLEX_OK; } /** * Internal function, used to deliver the given event to all relevant recipients. * Normally, this is called once an event has been removed from the event queue. * * @param evt The event to send. * * @param urgent The type of listeners to process (optional). If set to true, only listeners defined as urgent and non-blocking will be processed * otherwise, all other (standard) listeners will be processed. Defaults to false. * * @return 1 if all matching listeners were processed, 0 if further processing is required. * * @note It is recommended that all external code uses the send() function instead of this function, * or the constructors provided by AKHILAFLEXEvent. */ int AKHILAFLEXMessageBus::process(AKHILAFLEXEvent evt, bool urgent) { AKHILAFLEXListener *l; int complete = 1; bool listenerUrgent; l = listeners; while (l != NULL) { if((l->id == evt.source || l->id == AKHILAFLEX_ID_ANY) && (l->value == evt.value || l->value == AKHILAFLEX_EVT_ANY)) { // If we're running under the fiber scheduler, then derive the THREADING_MODE for the callback based on the // metadata in the listener itself. if (fiber_scheduler_running()) listenerUrgent = (l->flags & MESSAGE_BUS_LISTENER_IMMEDIATE) == MESSAGE_BUS_LISTENER_IMMEDIATE; else listenerUrgent = true; // If we should process this event hander in this pass, then activate the listener. if(listenerUrgent == urgent && !(l->flags & MESSAGE_BUS_LISTENER_DELETING)) { l->evt = evt; // OK, if this handler has regisitered itself as non-blocking, we just execute it directly... // This is normally only done for trusted system components. // Otherwise, we invoke it in a 'fork on block' context, that will automatically create a fiber // should the event handler attempt a blocking operation, but doesn't have the overhead // of creating a fiber needlessly. (cool huh?) #if (MESSAGE_BUS_CONCURRENCY_MODE == MESSAGE_BUS_CONCURRENT_LISTENERS) if (!(l->flags & MESSAGE_BUS_LISTENER_NONBLOCKING) && fiber_scheduler_running()) invoke(async_callback, l); else #endif async_callback(l); } else { complete = 0; } } l = l->next; } return complete; } /** * Add the given AKHILAFLEXListener to the list of event handlers, unconditionally. * * @param listener The AKHILAFLEXListener to add. * * @return AKHILAFLEX_OK if the listener is valid, AKHILAFLEX_INVALID_PARAMETER otherwise. */ int AKHILAFLEXMessageBus::add(AKHILAFLEXListener *newListener) { AKHILAFLEXListener *l, *p; int methodCallback; //handler can't be NULL! if (newListener == NULL) return AKHILAFLEX_INVALID_PARAMETER; l = listeners; // Firstly, we treat a listener as an idempotent operation. Ensure we don't already have this handler // registered in a that will already capture these events. If we do, silently ignore. // We always check the ID, VALUE and CB_METHOD fields. // If we have a callback to a method, check the cb_method class. Otherwise, the cb function point is sufficient. while (l != NULL) { methodCallback = (newListener->flags & MESSAGE_BUS_LISTENER_METHOD) && (l->flags & MESSAGE_BUS_LISTENER_METHOD); if (l->id == newListener->id && l->value == newListener->value && (methodCallback ? *l->cb_method == *newListener->cb_method : l->cb == newListener->cb) && newListener->cb_arg == l->cb_arg) { // We have a perfect match for this event listener already registered. // If it's marked for deletion, we simply resurrect the listener, and we're done. // Either way, we return an error code, as the *new* listener should be released... if(l->flags & MESSAGE_BUS_LISTENER_DELETING) l->flags &= ~MESSAGE_BUS_LISTENER_DELETING; return AKHILAFLEX_NOT_SUPPORTED; } l = l->next; } // We have a valid, new event handler. Add it to the list. // if listeners is null - we can automatically add this listener to the list at the beginning... if (listeners == NULL) { listeners = newListener; AKHILAFLEXEvent(AKHILAFLEX_ID_MESSAGE_BUS_LISTENER, newListener->id); return AKHILAFLEX_OK; } // We maintain an ordered list of listeners. // The chain is held stictly in increasing order of ID (first level), then value code (second level). // Find the correct point in the chain for this event. // Adding a listener is a rare occurance, so we just walk the list... p = listeners; l = listeners; while (l != NULL && l->id < newListener->id) { p = l; l = l->next; } while (l != NULL && l->id == newListener->id && l->value <= newListener->value) { p = l; l = l->next; } //add at front of list if (p == listeners && (newListener->id < p->id || (p->id == newListener->id && p->value > newListener->value))) { newListener->next = p; //this new listener is now the front! listeners = newListener; } //add after p else { newListener->next = p->next; p->next = newListener; } AKHILAFLEXEvent(AKHILAFLEX_ID_MESSAGE_BUS_LISTENER, newListener->id); return AKHILAFLEX_OK; } /** * Remove the given AKHILAFLEXListener from the list of event handlers. * * @param listener The AKHILAFLEXListener to remove. * * @return AKHILAFLEX_OK if the listener is valid, AKHILAFLEX_INVALID_PARAMETER otherwise. */ int AKHILAFLEXMessageBus::remove(AKHILAFLEXListener *listener) { AKHILAFLEXListener *l; int removed = 0; //handler can't be NULL! if (listener == NULL) return AKHILAFLEX_INVALID_PARAMETER; l = listeners; // Walk this list of event handlers. Delete any that match the given listener. while (l != NULL) { if ((listener->flags & MESSAGE_BUS_LISTENER_METHOD) == (l->flags & MESSAGE_BUS_LISTENER_METHOD)) { if(((listener->flags & MESSAGE_BUS_LISTENER_METHOD) && (*l->cb_method == *listener->cb_method)) || ((!(listener->flags & MESSAGE_BUS_LISTENER_METHOD) && l->cb == listener->cb))) { if ((listener->id == AKHILAFLEX_ID_ANY || listener->id == l->id) && (listener->value == AKHILAFLEX_EVT_ANY || listener->value == l->value) && (listener->cb_arg == l->cb_arg || listener->cb_arg == NULL)) { // if notification of deletion has been requested, invoke the listener deletion callback. if (listener_deletion_callback) listener_deletion_callback(l); // Found a match. mark this to be removed from the list. l->flags |= MESSAGE_BUS_LISTENER_DELETING; removed++; } } } l = l->next; } if (removed > 0) return AKHILAFLEX_OK; else return AKHILAFLEX_INVALID_PARAMETER; } /** * Returns the AKHILAFLEXListener with the given position in our list. * * @param n The position in the list to return. * * @return the AKHILAFLEXListener at postion n in the list, or NULL if the position is invalid. */ AKHILAFLEXListener* AKHILAFLEXMessageBus::elementAt(int n) { AKHILAFLEXListener *l = listeners; while (n > 0) { if (l == NULL) return NULL; n--; l = l->next; } return l; } /** * Destructor for AKHILAFLEXMessageBus, where we deregister this instance from the array of fiber components. */ AKHILAFLEXMessageBus::~AKHILAFLEXMessageBus() { fiber_remove_idle_component(this); }
//===-- Array ---------------------------------------------------*- C++ -*-===// /// /// \file /// \brief Array /// \details STL compliant container wrapper for arrays of constant size. /// \details 符合 STL 的容器包装,用于恒定大小的数组。 /// /// \sa <https://boost.org/doc/libs/master/libs/array/> /// /// \version 2019-11-09 /// \since 2019-11-09 /// \authors zhengrr /// \copyright Unlicense /// //===----------------------------------------------------------------------===//
/// See ../../License.txt for license info. #pragma once #include <cmath> #include <vector> #include "../Core/Vector2d.hpp" namespace HANDYMATH_NS { struct AABB2d { Vector2d Min; Vector2d Max; AABB2d(); AABB2d(Vector2d const & point); AABB2d(std::vector<Vector2d> const & points); AABB2d(Vector2d const & min, Vector2d const & max); bool IsEverything() const; bool IsNothing() const; bool IsPoint() const; Vector2d PointCenter() const; Vector2d PointN() const; Vector2d PointS() const; Vector2d PointE() const; Vector2d PointW() const; Vector2d PointNW() const; Vector2d PointNE() const; Vector2d PointSE() const; Vector2d PointSW() const; Vector2d Size() const; Vector2d HalfSize() const; double Area() const; double Perimeter() const; void AddPoint(Vector2d const & p); void AddPoints(std::vector<Vector2d> const & ps); void AddAABB(AABB2d const & aabb); bool Envelopes (AABB2d const & aabb) const; bool Intersects(AABB2d const & aabb) const; bool IntersectsFastFinite(AABB2d const & aabb) const; bool Intersects(Vector2d const & v) const; AABB2d Intersection(AABB2d const & aabb) const; static AABB2d Everything(); static AABB2d Nothing(); }; FORCEINLINE AABB2d::AABB2d() : Min(Vector2d::NaN()), Max(Vector2d::NaN()) { } FORCEINLINE AABB2d::AABB2d(Vector2d const & point) : AABB2d() { AddPoint(point); } FORCEINLINE AABB2d::AABB2d(std::vector<Vector2d> const & points) : AABB2d() { AddPoints(points); } FORCEINLINE AABB2d::AABB2d(Vector2d const & min, Vector2d const & max) : Min(min), Max(max) { } FORCEINLINE bool AABB2d::IsEverything() const { return Min.IsNegativeInfinity() && Max.IsPositiveInfinity(); } FORCEINLINE bool AABB2d::IsNothing() const { return Min.IsNaN() || Max.IsNaN(); } FORCEINLINE bool AABB2d::IsPoint() const { return Min == Max; } FORCEINLINE Vector2d AABB2d::PointCenter() const { return (Min + Max) * 0.5f; } FORCEINLINE Vector2d AABB2d::PointN() const { return Vector2d((Min.X + Max.X) * 0.5f, Max.Y); } FORCEINLINE Vector2d AABB2d::PointS() const { return Vector2d((Min.X + Max.X) * 0.5f, Min.Y); } FORCEINLINE Vector2d AABB2d::PointE() const { return Vector2d( Max.X, (Min.Y + Max.Y) * 0.5f); } FORCEINLINE Vector2d AABB2d::PointW() const { return Vector2d( Min.X, (Min.Y + Max.Y) * 0.5f); } FORCEINLINE Vector2d AABB2d::PointNW() const { return Vector2d( Min.X, Max.Y); } FORCEINLINE Vector2d AABB2d::PointNE() const { return Max; } FORCEINLINE Vector2d AABB2d::PointSE() const { return Vector2d(Max.X, Min.Y); } FORCEINLINE Vector2d AABB2d::PointSW() const { return Min; } FORCEINLINE Vector2d AABB2d::Size() const { return (Max - Min); } FORCEINLINE Vector2d AABB2d::HalfSize() const { return (Max - Min) * 0.5f; } FORCEINLINE double AABB2d::Area() const { return Size().Product(); } FORCEINLINE double AABB2d::Perimeter() const { return Size().Sum() * 2.0f; } FORCEINLINE void AABB2d::AddPoint(Vector2d const & point) { if (point.HasNaN()) throw std::runtime_error("Cannot add NaN to AABB2d."); if (IsEverything())// || (IsNothing() && point.HasNaN())) return; if (IsNothing()) { Min = Max = point; return; } Min = Min.Min(point); Max = Max.Max(point); } FORCEINLINE void AABB2d::AddPoints(std::vector<Vector2d> const & points) { if (IsEverything()) return; for (auto const & point : points) AddPoint(point); } FORCEINLINE void AABB2d::AddAABB(AABB2d const & aabb) { if (aabb.IsNothing() || IsEverything()) return; if (aabb.IsEverything() || IsNothing()) { *this = aabb; return; } AddPoint(aabb.Min); AddPoint(aabb.Max); } FORCEINLINE bool AABB2d::Envelopes(AABB2d const & aabb) const { if (IsNothing() || aabb.IsNothing()) return false; if (IsEverything()) return true; return Min.X <= aabb.Min.X && Min.Y <= aabb.Min.Y && Max.X >= aabb.Max.X && Max.Y >= aabb.Max.Y; } FORCEINLINE bool AABB2d::Intersects(AABB2d const & aabb) const { if (IsNothing() || aabb.IsNothing()) return false; if (IsEverything() || aabb.IsEverything()) return true; return !(Max.X < aabb.Min.X || Min.X > aabb.Max.X || Max.Y < aabb.Min.Y || Min.Y > aabb.Max.Y); } FORCEINLINE bool AABB2d::IntersectsFastFinite(AABB2d const & aabb) const { //return !(Max.Compare<Math::CMP::AnyLT>(aabb.Min) || Min.Compare<Math::CMP::AnyGT>(aabb.Max)); return !(Max.X < aabb.Min.X || Min.X > aabb.Max.X || Max.Y < aabb.Min.Y || Min.Y > aabb.Max.Y); } FORCEINLINE AABB2d AABB2d::Intersection(AABB2d const & aabb) const { return AABB2d(Min.Max(aabb.Min), Max.Min(aabb.Max)); } FORCEINLINE bool AABB2d::Intersects(Vector2d const & v) const { return Intersects(AABB2d(v)); } FORCEINLINE /*static*/ AABB2d AABB2d::Everything() { return AABB2d(Vector2d::NegativeInfinity(), Vector2d::PositiveInfinity()); } FORCEINLINE /*static*/ AABB2d AABB2d::Nothing() { return AABB2d(Vector2d::NaN(), Vector2d::NaN()); } } namespace std { FORCEINLINE std::string to_string(::HANDYMATH_NS::AABB2d const & v) { return "["s + std::to_string(v.Min) + "; "s + std::to_string(v.Max) + "]"s; } }
#include "stdafx.h" #include "CppUnitTest.h" #include <vector> #include <algorithm> #include <list> #include "../../../varray.hpp" using namespace Microsoft::VisualStudio::CppUnitTestFramework; namespace hmLib { TEST_CLASS(test_varray) { public: TEST_METHOD(construct) { varray<double, 4> Element(0.0); Assert::AreEqual(0.0, Element[0], 1.0e-10, L"ValueError"); Assert::AreEqual(0.0, Element[1], 1.0e-10, L"ValueError"); Assert::AreEqual(0.0, Element[2], 1.0e-10, L"ValueError"); Assert::AreEqual(0.0, Element[3], 1.0e-10, L"ValueError"); } TEST_METHOD(construct_value) { varray<double, 4> Element(0.4); Assert::AreEqual(0.4, Element[0], 1.0e-10, L"ValueError"); Assert::AreEqual(0.4, Element[1], 1.0e-10, L"ValueError"); Assert::AreEqual(0.4, Element[2], 1.0e-10, L"ValueError"); Assert::AreEqual(0.4, Element[3], 1.0e-10, L"ValueError"); } TEST_METHOD(construct_initialize_list) { varray<double, 4> Element = { 4.0,5.0,6.0,7.0 }; Assert::AreEqual(4.0, Element[0], 1.0e-10, L"ValueError"); Assert::AreEqual(5.0, Element[1], 1.0e-10, L"ValueError"); Assert::AreEqual(6.0, Element[2], 1.0e-10, L"ValueError"); Assert::AreEqual(7.0, Element[3], 1.0e-10, L"ValueError"); } TEST_METHOD(add) { varray<double, 3> e1{ 1.2,2.4,3.6 }; varray<double, 3> e2{ 1.0,1.0,1.0 }; auto e3 = e1 + e2; Assert::AreEqual(2.2, e3[0], 1.0e-10, L"ValueError"); Assert::AreEqual(3.4, e3[1], 1.0e-10, L"ValueError"); Assert::AreEqual(4.6, e3[2], 1.0e-10, L"ValueError"); } TEST_METHOD(multiple) { varray<double, 3> e1{ 1.2,2.4,3.6 }; auto e3 = e1 * 2; Assert::AreEqual(2.4, e3[0], 1.0e-10, L"ValueError"); Assert::AreEqual(4.8, e3[1], 1.0e-10, L"ValueError"); Assert::AreEqual(7.2, e3[2], 1.0e-10, L"ValueError"); } TEST_METHOD(add_scalar) { varray<int, 3> ei{ 1,2,3 }; auto eii = ei + 2; Assert::IsTrue(std::is_same_v<decltype(eii), varray<int, 3>>, L"add int+int = int"); auto eif = ei + 2.0; Assert::IsTrue(std::is_same_v<decltype(eif), varray<double, 3>>, L"add int+double= double"); auto eeii = ei+ei; Assert::IsTrue(std::is_same_v<decltype(eeii), varray<int, 3>>, L"add double+double= double"); varray<double, 3> ef{ 1.0,2.0,3.0 }; auto efi = ef + 2; Assert::IsTrue(std::is_same_v<decltype(efi), varray<double, 3>>, L"add double+int = double"); auto eff = ef + 2.0; Assert::IsTrue(std::is_same_v<decltype(eff), varray<double, 3>>, L"add double+double= double"); auto eeff = ei+ef; Assert::IsTrue(std::is_same_v<decltype(eeff), varray<double, 3>>, L"add double+double= double"); } TEST_METHOD(divmod) { //varray<double, 3> e1{ 1.2,2.4,3.6 }; //auto e1a = e1%3; //compile error varray<int, 3> e2{ 5,6,7 }; auto e2a = e2%3; Assert::AreEqual(2, e2a[0]); Assert::AreEqual(0, e2a[1]); Assert::AreEqual(1, e2a[2]); } }; }
// Copyright 2017,2018,2019,2020,2021 Sony Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // array.cpp #include <nbla/array.hpp> #include <nbla/common.hpp> #include <nbla/exception.hpp> #include <vector> namespace nbla { using std::vector; Array::Array(const Size_t size, dtypes dtype, const Context &ctx, AllocatorMemory &&mem) : size_(size), dtype_(dtype), ctx_(ctx), mem_(std::move(mem)) {} Array::~Array() { wait_event(ctx_); } size_t Array::size_as_bytes(Size_t size, dtypes dtype) { return size * sizeof_dtype(dtype); } Context Array::filter_context(const Context &ctx) { NBLA_ERROR(error_code::not_implemented, "Array must implement filter_context(const Context&)."); } void Array::set_event(EventPtr e) { event_ = e; } void Array::wait_event(const Context ctx, const int async_flags) { if (event_) { event_->wait_event(ctx, async_flags); event_.reset(); } } bool Array::have_event() { return event_ != nullptr; } Array::Ptr Array::getptr() { return shared_from_this(); } }
/*============================================================================= Copyright (c) 2011-2019 Bolero MURAKAMI https://github.com/bolero-MURAKAMI/Sprout Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) =============================================================================*/ #ifndef SPROUT_VALARRAY_TUPLE_HPP #define SPROUT_VALARRAY_TUPLE_HPP #include <type_traits> #include <tuple> #include <sprout/config.hpp> #include <sprout/workaround/std/cstddef.hpp> #include <sprout/valarray/valarray.hpp> #include <sprout/utility/move.hpp> #include <sprout/tuple/tuple/get.hpp> #include <sprout/type_traits/integral_constant.hpp> #include <sprout/type_traits/identity.hpp> #include <sprout/detail/nil_base.hpp> namespace sprout { // // tuple_get // template<std::size_t I, typename T, std::size_t N> inline SPROUT_CONSTEXPR T& tuple_get(sprout::valarray<T, N>& t) SPROUT_NOEXCEPT { static_assert(I < N, "tuple_get: index out of range"); return t[I]; } template<std::size_t I, typename T, std::size_t N> inline SPROUT_CONSTEXPR T const& tuple_get(sprout::valarray<T, N> const& t) SPROUT_NOEXCEPT { static_assert(I < N, "tuple_get: index out of range"); return t[I]; } template<std::size_t I, typename T, std::size_t N> inline SPROUT_CONSTEXPR T&& tuple_get(sprout::valarray<T, N>&& t) SPROUT_NOEXCEPT { return sprout::move(sprout::tuples::get<I>(t)); } } // namespace sprout namespace std { #if defined(__clang__) # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wmismatched-tags" #endif // // tuple_size // template<typename T, std::size_t N> struct tuple_size<sprout::valarray<T, N> > : public sprout::integral_constant<std::size_t, N> {}; // // tuple_element // template<std::size_t I, typename T, std::size_t N> struct tuple_element<I, sprout::valarray<T, N> > : public std::conditional<(I < N), sprout::identity<T>, sprout::detail::nil_base>::type {}; #if defined(__clang__) # pragma clang diagnostic pop #endif } // namespace std #endif // #ifndef SPROUT_VALARRAY_TUPLE_HPP
/* This file is part of Cloudy and is copyright (C)1978-2019 by Gary J. Ferland and * others. For conditions of distribution and use see copyright notice in license.txt */ /* HyperfineCreat establish space for hf arrays, reads atomic data from hyperfine.dat */ /* HyperfineCS - returns collision strengths for hyperfine struc transitions */ /*H21cm computes rate for H 21 cm from upper to lower excitation by atomic hydrogen */ /*h21_t_ge_20 compute rate for H 21 cm from upper to lower excitation by atomic hydrogen */ /*h21_t_lt_20 compute rate for H 21 cm from upper to lower excitation by atomic hydrogen */ /*H21cm_electron compute H 21 cm rate from upper to lower excitation by electrons - call by CoolEvaluate */ /*H21cm_H_atom - evaluate H atom spin changing collision rate, called by CoolEvaluate */ /*H21cm_proton - evaluate proton spin changing H atom collision rate, */ #include "cddefines.h" #include "abund.h" #include "conv.h" #include "phycon.h" #include "dense.h" #include "rfield.h" #include "taulines.h" #include "iso.h" #include "trace.h" #include "hyperfine.h" #include "lines_service.h" #include "service.h" /* H21_cm_pops - fine level populations for 21 cm with Lya pumping included * called in CoolEvaluate */ void H21_cm_pops( void ) { /*atom_level2( HFLines[0] );*/ /*return;*/ /* things we know on entry to this routine: total population of 2p: iso_sp[ipH_LIKE][ipHYDROGEN].st[ipH2p].Pop total population of 1s: iso_sp[ipH_LIKE][ipHYDROGEN].st[ipH1s].Pop continuum pumping rate (lo-up) inside 21 cm line: HFLines[0].pump() upper to lower collision rate inside 21 cm line: HFLines[0].cs*dense.cdsqte occupation number inside Lya: OccupationNumberLine( &iso_sp[ipH_LIKE][ipHYDROGEN].trans(ipH2p,ipH1s) ) level populations (cm-3) must be computed: population of upper level of 21cm: HFLines[0].Hi->Pop population of lower level of 21cm: (*HFLines[0].Lo()).Pop stimulated emission corrected population of lower level: HFLines[0].Emis->PopOpc() */ double PopTot = iso_sp[ipH_LIKE][ipHYDROGEN].st[ipH1s].Pop(); /* population can be zero in certain tests where H is turned off, * also if initial solver does not see any obvious source of ionization * also possible to set H0 density to zero with element ionization command, * as is done in func_set_ion test case */ if( PopTot <0 ) TotalInsanity(); else if( PopTot == 0 ) { /*return after zeroing local variables */ (*HFLines[0].Hi()).Pop() = 0.; (*HFLines[0].Lo()).Pop() = 0.; HFLines[0].Emis().PopOpc() = 0.; HFLines[0].Emis().xIntensity() = 0.; HFLines[0].Emis().xObsIntensity() = 0.; HFLines[0].Emis().ColOvTot() = 0.; hyperfine.Tspin21cm = 0.; return; } double e1 = 0.; double e2 = HFLines[ 0 ].EnergyWN(); /* The 2p fine stucture energies are current with NIST as of May 14, 2019. */ double e2p12 = 82258.9191133; double e2p32 = 82259.2850014; /* The hyperfine splittings of the 2p fine structure levels are from * >>refer HI Bethe & Salpeter (1977) Section 22, page 110. */ double e2p12_splitting = e2 / 24.; double e2p32_splitting = e2 / 60.; /* The hyperfine states have statistical weights 2F+1, so they differ from * the unsplit level energy by: * El = E - dE * gu / (gu+gl) * Eu = E + dE * gl / (gu+gl) * where E is the unsplit energy, dE the hyperfine splitting, and gu, gl the * statistical weights of the hyperfine states. * For 2p1/2: g(F=1) = 3, g(F=0) = 1 * For 2p3/2: g(F=2) = 5, g(F=1) = 3 * The levels of interest here are the 2p1/2(F=1) and 2p3/2(F=1), the top * and bottom levels of the hyperfine states, resp. * >>refer HI Deguchi & Watson 1985 ApJ, 290, 578 * refcon see their Fig. 1 */ double e3 = e2p12 + 0.25 * e2p12_splitting; double e4 = e2p32 - 0.625 * e2p32_splitting; double de31 = e3 - e1; double de32 = e3 - e2; double de41 = e4 - e1; double de42 = e4 - e2; if( false ) { fprintf( ioQQQ, "-------\n" ); fprintf( ioQQQ, "de32 = %.9e\n", de32 ); fprintf( ioQQQ, "de31 = %.9e\n", de31 ); fprintf( ioQQQ, "de42 = %.9e\n", de42 ); fprintf( ioQQQ, "de41 = %.9e\n", de41 ); fprintf( ioQQQ, "-------\n" ); } double a31 = 2.08e8; /* Einstein co-efficient for transition 1p1/2 to 0s1/2 */ double a32 = 4.16e8; /* Einstein co-efficient for transition 1p1/2 to 1s1/2 */ double a41 = 4.16e8; /* Einstein co-efficient for transition 1p3/2 to 0s1/2 */ double a42 = 2.08e8; /* Einstein co-efficient for transition 1p3/2 to 1s1/2 */ /* These A values are determined from eqn. 17.64 of "The theory of Atomic structure * and Spectra" by R. D. Cowan * A hyperfine level has degeneracy Gf=(2F + 1) * a2p1s = 6.24e8; Einstein co-efficient for transition 2p to 1s */ double a21 = HFLines[0].Emis().Aul(); /* Einstein co-efficient for transition 1s1/2 to 0s1/2 */ /* above is spontaneous rate - the net rate is this times escape and destruction * probabilities */ a21 *= HFLines[0].Emis().Ploss(); ASSERT( a21 >= 0. ); /* hyperfine.lgLya_pump_21cm is option to turn off Lya pump * of 21 cm, with 'no 21cm lya pump' command */ double occnu_lya = OccupationNumberLine( iso_sp[ipH_LIKE][ipHYDROGEN].trans(ipH2p,ipH1s) ) * hyperfine.lgLya_pump_21cm; if( !conv.lgSearch && occnu_lya < 0. ) { occnu_lya = 0.; fixit( "PopOpc <0 but Pesc > 0: We may need to review when Pesc is computed to get non-negative occupation numbers" ); } /* Lya occupation number for the hyperfine levels 0S1/2 and 1S1/2 can be different * this is related to the "Wouthuysen-Field coupling", * https://en.wikipedia.org/wiki/Wouthuysen%E2%80%93Field_coupling * which assumes that the variation of the Lya source function is the gas kinetic temperature. * Following Adams 1971 we assume variation is line excitation temperature. * Third possibility is that given in stellar atmosphere texts, S_nu = constant */ double occnu_lya_23 = occnu_lya, occnu_lya_13 = 0., occnu_lya_24 = 0., occnu_lya_14 = 0.; /* selected with SET LYA 21CM command */ if( hyperfine.LyaSourceFunctionShape_assumed == t_hyperfine::EXCITATION || hyperfine.LyaSourceFunctionShape_assumed == t_hyperfine::KINETIC ) { double Temp = phycon.te; if( hyperfine.LyaSourceFunctionShape_assumed == t_hyperfine::EXCITATION ) Temp = TexcLine( iso_sp[ipH_LIKE][ipHYDROGEN].trans(ipH2p,ipH1s) ); if( occnu_lya * Temp > 0. ) { /* If the continuum is described by a Planck function, then the continuum * within Lya seen by the two levels is not exactly of the same brightness. * They differ by the exp when Lya is on the Wien tail of black body, which * must be true if 21 cm is important. */ double texc1 = sexp( HFLines[0].EnergyK() / Temp ); double texc2 = sexp( ((e2p32-e2p12)*T1CM) / Temp ); occnu_lya_23 = occnu_lya; occnu_lya_13 = occnu_lya * texc1; occnu_lya_24 = occnu_lya * texc2; occnu_lya_14 = occnu_lya_13 * texc2; } enum { DEBUG_SPEC = false }; if( DEBUG_SPEC ) { fprintf(ioQQQ,"DEBUG texc %12.3e excitation %12.3e kinetic %12.3e\n", Temp, TexcLine( iso_sp[ipH_LIKE][ipHYDROGEN].trans(ipH2p,ipH1s) ) , phycon.te ); } } else if( hyperfine.LyaSourceFunctionShape_assumed == t_hyperfine::CONSTANT ) { occnu_lya_23 = occnu_lya; occnu_lya_13 = powi(de32/de31, 3) * occnu_lya; occnu_lya_24 = powi(de32/de42, 3) * occnu_lya; occnu_lya_14 = powi(de32/de41, 3) * occnu_lya; } else TotalInsanity(); if( false ) { fprintf( ioQQQ, "=======\n" ); fprintf( ioQQQ, "oc32 = %.9e\n", occnu_lya_23 ); fprintf( ioQQQ, "oc31 = %.9e\n", occnu_lya_13 ); fprintf( ioQQQ, "oc42 = %.9e\n", occnu_lya_24 ); fprintf( ioQQQ, "oc41 = %.9e\n", occnu_lya_14 ); fprintf( ioQQQ, "=======\n" ); } /* this is the 21 cm upward continuum pumping rate [s-1] for the attenuated incident and * local continuum and including line optical depths */ double pump12 = HFLines[0].Emis().pump(); double pump21 = pump12 * (*HFLines[0].Lo()).g() / (*HFLines[0].Hi()).g(); /* collision rates s-1 within 1s, * were multiplied by collider density when evaluated in CoolEvaluate */ /* ContBoltz is Boltzmann factor for wavelength of line */ ASSERT( HFLines[0].Coll().col_str()>0. ); double coll12 = HFLines[0].Coll().col_str()*dense.cdsqte/(*HFLines[0].Lo()).g()*rfield.ContBoltz[HFLines[0].ipCont()-1]; double coll21 = HFLines[0].Coll().col_str()*dense.cdsqte/(*HFLines[0].Hi()).g(); /* set up rate (s-1) equations * all process out of 1 that eventually go to 2 */ double rate12 = /* collision rate (s-1) from 1 to 2 */ coll12 + /* direct external continuum pumping (s-1) in 21 cm line - usually dominated by CMB */ pump12 + /* pump rate (s-1) up to 3, times fraction that decay to 2, hence net 1-2 */ 3.*a31*occnu_lya_13 *a32/(a31+a32)+ /* pump rate (s-1) up to 4, times fraction that decay to 2, hence net 1-2 */ /* >>chng 05 apr 04, GS, degeneracy corrected from 6 to 3 */ 3.*a41*occnu_lya_14 *a42/(a41+a42); /* set up rate (s-1) equations * all process out of 2 that eventually go to 1 */ /* spontaneous + induced 2 -> 1 by external continuum inside 21 cm line */ /* >>chng 04 dec 03, do not include spontaneous decay, for numerical stability */ double rate21 = /* collisional deexcitation */ coll21 + /* net spontaneous decay plus external continuum pumping in 21 cm line */ pump21 + /* rate from 2 to 3 time fraction that go back to 1, hence net 2 - 1 */ /* >>chng 05 apr 04,GS, degeneracy corrected from 2 to unity */ occnu_lya_23*a32 * a31/(a31+a32)+ occnu_lya_24*a42*a41/(a41+a42); /* x = (*HFLines[0].Hi()).Pop/(*HFLines[0].Lo()).Pop */ double x = rate12 / SDIV(a21 + rate21); ASSERT( x > 0. ); /* the Transitions term is the total population of 1s */ (*HFLines[0].Hi()).Pop() = (x/(1.+x))* PopTot; (*HFLines[0].Lo()).Pop() = (1./(1.+x))* PopTot; /* the population with correction for stimulated emission */ HFLines[0].Emis().PopOpc() = (*HFLines[0].Lo()).Pop()*((3*rate21- rate12) + 3*a21)/SDIV(3*(a21+ rate21)); /* ratio of collisional to total (collisional + pumped) excitation */ HFLines[0].Emis().ColOvTot() = 0.; if( rate12 > 0. ) HFLines[0].Emis().ColOvTot() = coll12 / rate12; /* set number of escaping line photons, used elsewhere for outward beam * and line intensity * NB: continuum subtraction is performed within PutLine() */ set_xIntensity(HFLines[0]); /* finally save the spin temperature */ hyperfine.Tspin21cm = phycon.te; if( (*HFLines[0].Hi()).Pop() > SMALLFLOAT ) { hyperfine.Tspin21cm = TexcLine( HFLines[0] ); /* this line must be non-zero - it does strongly mase in limit_compton_hi_t sim - * in that sim pop ratio goes to unity for a float and TexcLine ret zero */ if( hyperfine.Tspin21cm == 0. ) hyperfine.Tspin21cm = phycon.te; } return; } /*H21cm_electron computes rate for H 21 cm from upper to lower excitation by electrons - call by CoolEvaluate * >>refer H1 CS Smith, F. J. 1966, Planet. Space Sci., 14, 929 */ double H21cm_electron( double temp ) { temp = MIN2(1e4 , temp ); /* following fit is from */ /* >>refer H1 21cm Liszt, H. 2001, A&A, 371, 698 */ return exp10( -9.607 + log10( sqrt(temp)) * sexp( powpq(log10(temp),9,2) / 1800. )); } /* computes rate for H 21 cm from upper to lower excitation by atomic hydrogen * from * >>refer H1 CS Allison, A. C., & Dalgarno A. 1969, ApJ 158, 423 */ /* the following is the best current survey of 21 cm excitation */ /* >>refer H1 21cm Liszt, H. 2001, A&A, 371, 698 */ #if 0 STATIC double h21_t_ge_20( double temp ) { double y; double x1, teorginal = temp; /* data go up to 1,000K must not go above this */ temp = MIN2( 1000.,temp ); x1 =1.0/sqrt(temp); y =-21.70880995483007-13.76259674006133*x1; y = exp(y); /* >>chng 02 feb 14, extrapolate above 1e3 K as per Liszt 2001 recommendation * page 699 of */ /* >>refer H1 21cm Liszt, H. 2001, A&A, 371, 698 */ if( teorginal > 1e3 ) { y *= pow(teorginal/1e3 , 0.33 ); } return( y ); } /* this branch for T < 20K, data go down to 1 K */ STATIC double h21_t_lt_20( double temp ) { double y; double x1; /* must not go below 1K */ temp = MAX2( 1., temp ); x1 =temp*log(temp); y =9.720710314268267E-08+6.325515312006680E-08*x1; return(y*y); } #endif /* >> chng 04 dec 15, GS. The fitted rate co-efficients (cm3s-1) in the temperature range 1K to 300K is from * >>refer H1 CS Zygelman, B. 2005, ApJ, 622, 1356 * The rate is 4/3 times the Dalgarno (1969) rate for the temperature range 300K to 1000K. Above 1000K, the rate is extrapolated according to Liszt 2001.*/ STATIC double h21_t_ge_10( double temp ) { double teorginal = temp; /* data go up to 300K */ temp = MIN2( 300., temp ); double y = 1.4341127e-9 + 9.4161077e-15 * temp - 9.2998995e-9 / log(temp) + 6.9539411e-9 / sqrt(temp) + 1.7742293e-8 * log(temp)/pow2(temp); if( teorginal > 300. ) { /* data go up to 1000*/ temp = MIN2( 1000., teorginal ); y = -21.70880995483007 - 13.76259674006133 / sqrt(temp); y = 1.236686*exp(y); } if( teorginal > 1e3 ) { /*data go above 1000*/ y *= pow( teorginal/1e3 , 0.33 ); } return( y ); } /* this branch for T < 10K, data go down to 1 K */ STATIC double h21_t_lt_10( double temp ) { /* must not go below 1K */ temp = MAX2(1., temp ); return 8.5622857e-10 + 2.331358e-11 * temp + 9.5640586e-11 * pow2(log(temp)) - 4.6220869e-10 * sqrt(temp) - 4.1719545e-10 / sqrt(temp); } /*H21cm_H_atom - evaluate H atom spin changing H atom collision rate, * called by CoolEvaluate * >>refer H1 CS Allison, A. C. & Dalgarno, A. 1969, ApJ 158, 423 */ double H21cm_H_atom( double temp ) { double hold; if( temp >= 10. ) { hold = h21_t_ge_10( temp ); } else { hold = h21_t_lt_10( temp ); } return hold; } /*H21cm_proton - evaluate proton spin changing H atom collision rate, * called by CoolEvaluate */ double H21cm_proton( double temp ) { /*>>refer 21cm p coll Furlanetto, S. R. & Furlanetto, M. R. 2007, MNRAS, 379, 130 * previously had used proton rate, which is 3.2 times H0 rate according to *>>refer 21cm CS Liszt, H. 2001, A&A, 371, 698 */ /* fit to table 1 of first paper */ /*--------------------------------------------------------------* TableCurve Function: c:\storage\litera~1\21cm\atomic~1\p21cm.c Jun 20, 2007 3:37:50 PM proton coll deex X= temperature (K) Y= rate coefficient (1e-9 cm3 s-1) Eqn# 4419 y=a+bx+cx^2+dx^(0.5)+elnx/x r2=0.9999445384690351 r2adj=0.9999168077035526 StdErr=5.559328579039901E-12 Fstat=49581.16793656295 a= 9.588389834316704E-11 b= -5.158891920816405E-14 c= 5.895348443553458E-19 d= 2.05304960232429E-11 e= 9.122617940315725E-10 *--------------------------------------------------------------*/ /* only fit this range, did not include T = 1K point which * causes an inflection */ temp = MAX2( 2. , temp ); temp = MIN2( 2e4 , temp ); /* within range of fitted rate coefficients */ return 9.588389834316704E-11 - 5.158891920816405E-14 * temp + 5.895348443553458E-19 * temp * temp + 2.053049602324290E-11 * sqrt(temp) + 9.122617940315725E-10 * log(temp) / temp; } /* * HyperfineCreate, HyperfineCS written July 2001 * William Goddard for Gary Ferland * This code calculates line intensities for known * hyperfine transitions. */ /* two products, the transition structure HFLines, which contains all information for the lines, * and nHFLines, the number of these lines. * * these are in taulines.h * * info to create them contained in hyperfine.dat * * abundances of nuclei are also in hyperfine.dat, stored in */ /* Ion contains varying temperatures, specified above, used for */ /* calculating collision strengths. */ static int Ntemp = -1; static vector<double> csTemp; typedef struct { vector<double> cs; vector<double> cs2d; } t_ColStr; static vector<t_ColStr> colstr; const double ENERGY_MIN_WN = 1e-10; /* HyperfineCreate establish space for HFS arrays, reads atomic data from hyperfine.dat */ void HyperfineCreate(void) { vector<string> data; DEBUG_ENTRY( "HyperfineCreate()" ); /* list of ion collision strengths for the temperatures listed in table */ /* HFLines containing all the data in Hyperfine.dat, and transition is */ /* defined in cddefines.h */ /*transition *HFLines;*/ /* get the line data for the hyperfine lines */ if( trace.lgTrace ) fprintf( ioQQQ," Hyperfine opening hyperfine.dat:"); FILE *ioDATA = open_data( "hyperfine.dat", "r" ); /* first line is a version number and does not count */ string chLine; if( !read_whole_line( chLine, ioDATA ) ) { fprintf( ioQQQ, " Hyperfine could not read first line of hyperfine.dat.\n"); cdEXIT(EXIT_FAILURE); } /* count lines in the file, ignoring lines starting with '#', * and get temperature array for HFS collision strengths */ size_t nHFLines = 0; while( read_whole_line( chLine, ioDATA ) ) { if( chLine[0] == '#' ) { continue; } else if( chLine.find("TDATA:") == string::npos ) { Split(chLine, "\t", data, SPM_STRICT); int Aiso = atoi(data[0].c_str()); int nelem = atoi(data[1].c_str()); double wavelength = atof(data[3].c_str()); if( abund.IsoAbn[nelem-1].getAbn( Aiso ) > 0 && WAVNRYD/wavelength > rfield.emm() ) ++nHFLines; } else { Split(chLine, " ", data, SPM_STRICT); if (data.size() <= 1) { fprintf(ioQQQ, "HyperfineCreate: Error: Invalid number of temperatures in 'TDATA:': %d\n", (int) data.size()); cdEXIT(EXIT_FAILURE); } Ntemp = data.size() - 1; csTemp.resize(Ntemp); int i = 0; for (std::vector<string>::const_iterator it = data.begin(); it != data.end() && i <= Ntemp; it++, i++) { if(i == 0) continue; csTemp[i-1] = atof((*it).c_str()); } } data.resize(0); } ASSERT(nHFLines > 0 && Ntemp > 0); for(int i = 0; i < Ntemp; i++) { ASSERT( csTemp[i] > phycon.TEMP_LIMIT_LOW && csTemp[i] < phycon.TEMP_LIMIT_HIGH ); if( i > 0 ) ASSERT(csTemp[i] > csTemp[i-1]); // printf("i=%d\t t = %g\n", i, csTemp[i]); } /* allocate the transition HFLines array */ HFLines.resize(nHFLines); AllTransitions.push_back(HFLines); /* initialize array to impossible values to make sure eventually done right */ for( size_t i=0; i< HFLines.size(); ++i ) { HFLines[i].Junk(); HFLines[i].AddHiState(); HFLines[i].AddLoState(); HFLines[i].AddLine2Stack(); } colstr.resize(HFLines.size()); for (size_t j = 0; j < HFLines.size(); j++) { colstr[j].cs.resize(Ntemp); colstr[j].cs2d.resize(Ntemp); } hyperfine.HFLabundance.resize(HFLines.size()); /* now rewind the file so we can read it a second time*/ if( fseek( ioDATA , 0 , SEEK_SET ) != 0 ) { fprintf( ioQQQ, " Hyperfine could not rewind hyperfine.dat.\n"); cdEXIT(EXIT_FAILURE); } /* check that magic number is ok, read the line */ if( !read_whole_line( chLine, ioDATA ) ) { fprintf( ioQQQ, " Hyperfine could not read first line of hyperfine.dat.\n"); cdEXIT(EXIT_FAILURE); } /* check that magic number is ok, scan it in */ { long j = 1; bool lgEOL; int year = (int) FFmtRead(chLine.c_str(),&j,chLine.length(),&lgEOL); int month = (int) FFmtRead(chLine.c_str(),&j,chLine.length(),&lgEOL); int day = (int) FFmtRead(chLine.c_str(),&j,chLine.length(),&lgEOL); /* the following is the set of numbers that appear at the start of hyperfine.dat 13 02 09 */ const int iYR=13, iMN=10, iDY=18; if( ( year != iYR ) || ( month != iMN ) || ( day != iDY ) ) { fprintf( ioQQQ, " Hyperfine: the version of hyperfine.dat in the data directory is not the current version.\n" ); fprintf( ioQQQ, " I expected to find the number %i %i %i and got %i %i %i instead.\n" , iYR, iMN , iDY , year , month , day ); cdEXIT(EXIT_FAILURE); } } /* * scan the string taken from Hyperfine.dat, parsing into * needed variables. * nelem is the atomic number. * IonStg is the ionization stage. Atom = 1, Z+ = 2, Z++ = 3, etc. * Aul is used to find the oscillator strength in the function GetGF. * most of the variables are floats. */ size_t j = 0; while( j < HFLines.size() && read_whole_line( chLine, ioDATA ) ) { /* skip lines starting with '#' or containing the temperature array */ if( chLine[0] == '#' || chLine.find("TDATA:") != string::npos ) continue; Split(chLine, "\t", data, SPM_STRICT); int Aiso = atoi(data[0].c_str()); int nelem = atoi(data[1].c_str()); double wavelength = atof(data[3].c_str()); /* Ignore lines that fall beyond the lowest energy. */ if( ! ( abund.IsoAbn[nelem-1].getAbn( Aiso ) > 0 && WAVNRYD/wavelength > rfield.emm() ) ) { data.resize(0); continue; } (*HFLines[j].Hi()).nelem() = nelem; ASSERT((*HFLines[j].Hi()).nelem() > 0); (*HFLines[j].Hi()).IonStg() = atoi(data[2].c_str()); ASSERT((*HFLines[j].Hi()).IonStg() > 0); hyperfine.HFLabundance[j] = abund.IsoAbn[nelem-1].getAbn( Aiso ); ASSERT(hyperfine.HFLabundance[j] >= 0.0 && hyperfine.HFLabundance[j] <= 1.0); HFLines[j].Emis().Aul() = (realnum) atof(data[4].c_str()); HFLines[j].Emis().damp() = 1e-20f; (*HFLines[j].Hi()).g() = (realnum) (2*(abund.IsoAbn[nelem-1].getSpin( Aiso ) + .5) + 1); (*HFLines[j].Lo()).g() = (realnum) (2*(abund.IsoAbn[nelem-1].getSpin( Aiso ) - .5) + 1); /* account for inverted levels */ if( abund.IsoAbn[nelem-1].getMagMom( Aiso ) < 0 ) { double tmp = (*HFLines[j].Hi()).g(); (*HFLines[j].Hi()).g() = (*HFLines[j].Lo()).g(); (*HFLines[j].Lo()).g() = tmp; } double fenergyWN = MAX2(ENERGY_MIN_WN, 1.0 / wavelength); HFLines[j].WLAng() = (realnum)(wavelength * 1e8f); HFLines[j].EnergyWN() = (realnum) fenergyWN; HFLines[j].Emis().gf() = (realnum)(GetGF(HFLines[j].Emis().Aul(), fenergyWN, (*HFLines[j].Hi()).g())); ASSERT(HFLines[j].Emis().gf() > 0.0); (*HFLines[j].Lo()).nelem() = (*HFLines[j].Hi()).nelem(); (*HFLines[j].Lo()).IonStg() = (*HFLines[j].Hi()).IonStg(); // printf("line %3ld:\t A= %2d\t Z= %2d\t Spin= %3.1f\t MagMom= %8.5f\t IonStg= %2d\t Frac= %6.4f\t" // " Wl= %7.4f\t Aul= %.4e\t glo= %1.0f\t ghi= %1.0f\n", // j, Aiso, nelem, // abund.IsoAbn[nelem-1].getSpin( Aiso ), // abund.IsoAbn[nelem-1].getMagMom( Aiso ), // (*HFLines[j].Hi()).IonStg(), // hyperfine.HFLabundance[j], wavelength, HFLines[j].Emis().Aul(), // (*HFLines[j].Lo()).g(), (*HFLines[j].Hi()).g()); if( data.size() > 6 ) { // printf("data for line %ld\t %d\t %d:\t", j, nelem, (*HFLines[j].Hi()).IonStg()); for (int ij = 6, ii = 0; ij < (int) data.size() && ii < Ntemp; ij++, ii++) { colstr[j].cs[ii] = atof(data[ij].c_str()); ASSERT(colstr[j].cs[ii] >= 0.0); // printf("%g\t", colstr[j].cs[ii]); } // printf("\n"); spline(csTemp.data(), colstr[j].cs.data(), Ntemp, 2e31, 2e31, colstr[j].cs2d.data()); } else { MakeCS( HFLines[j] ); colstr[j].cs.clear(); colstr[j].cs2d.clear(); } data.resize(0); j++; } fclose(ioDATA); ASSERT( j == HFLines.size() ); /* Discard no-longer needed nuclear data */ for( long nelem = 0; nelem < LIMELM; nelem++ ) abund.IsoAbn[nelem].rm_nuc_data(); # if 0 /* for debugging and developing only */ /* calculating the luminosity for each isotope */ for(int i = 0; i < HFLines.size(); i++) { N = dense.xIonDense[(*HFLines[i].Hi()).nelem()-1][(*HFLines[i].Hi()).IonStg()-1]; Ne = dense.eden; h = 6.626076e-27; /* erg * sec */ c = 3e10; /* cm / sec */ k = 1.380658e-16; /* erg / K */ upsilon = HyperfineCS(i); /*statistical weights must still be identified */ q21 = COLL_CONST * upsilon / (phycon.sqrte * (*HFLines[i].Hi()).g()); q12 = (*HFLines[i].Hi()).g()/ (*HFLines[i].Lo()).g() * q21 * exp(-1 * h * c * HFLines[i].EnergyWN / (k * phycon.te)); x = Ne * q12 / (HFLines[i].Emis().Aul() * (1 + Ne * q21 / HFLines[i].Aul())); HFLines[i].xIntensity() = N * HFLines[i].Emis().Aul() * x / (1.0 + x) * h * c / (HFLines[i].EnergyAng() / 1e8); } # endif return; } /*HyperfineCS returns interpolated collision strength for transition index i */ double HyperfineCS( size_t i ) { double upsilon; DEBUG_ENTRY( "HyperfineCS()" ); ASSERT( i >= 0. && i < HFLines.size() ); if( colstr[i].cs.size() == 0 ) return HFLines[i].Coll().col_str(); if( phycon.te <= csTemp[0] ) { /* constant CS, if temperature below bounds of table */ upsilon = colstr[i].cs[0]; } else if( phycon.te >= csTemp[Ntemp-1] ) { /* extrapolate, if temperature above bounds of table */ int j = Ntemp - 1; double slope = log10(colstr[i].cs[j-1]/colstr[i].cs[j]) / log10(csTemp[j-1]/csTemp[j]); upsilon = log10(phycon.te/csTemp[j])*slope + log10(colstr[i].cs[j]); upsilon = exp10( upsilon); } else { splint( csTemp.data(), colstr[i].cs.data(), colstr[i].cs2d.data(), Ntemp, phycon.te, &upsilon ); } return upsilon; }
// Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2014 The Bitcoin developers // Copyright (c) 2014-2015 The Dash developers // Copyright (c) 2015-2017 The PIVX developers // Copyright (c) 2017 The Bitcoinlegend developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "main.h" #include "accumulators.h" #include "addrman.h" #include "alert.h" #include "chainparams.h" #include "checkpoints.h" #include "checkqueue.h" #include "init.h" #include "kernel.h" #include "masternode-budget.h" #include "masternode-payments.h" #include "masternodeman.h" #include "merkleblock.h" #include "net.h" #include "obfuscation.h" #include "pow.h" #include "spork.h" #include "sporkdb.h" #include "swifttx.h" #include "txdb.h" #include "txmempool.h" #include "ui_interface.h" #include "util.h" #include "utilmoneystr.h" #include "primitives/zerocoin.h" #include "libzerocoin/Denominations.h" #include <sstream> #include <boost/algorithm/string/replace.hpp> #include <boost/filesystem.hpp> #include <boost/filesystem/fstream.hpp> #include <boost/lexical_cast.hpp> #include <boost/thread.hpp> using namespace boost; using namespace std; using namespace libzerocoin; #if defined(NDEBUG) #error "Bitcoinlegend cannot be compiled without assertions." #endif // 6 comes from OPCODE (1) + vch.size() (1) + BIGNUM size (4) #define SCRIPT_OFFSET 6 // For Script size (BIGNUM/Uint256 size) #define BIGNUM_SIZE 4 /** * Global state */ CCriticalSection cs_main; BlockMap mapBlockIndex; map<uint256, uint256> mapProofOfStake; set<pair<COutPoint, unsigned int> > setStakeSeen; map<unsigned int, unsigned int> mapHashedBlocks; CChain chainActive; CBlockIndex* pindexBestHeader = NULL; int64_t nTimeBestReceived = 0; CWaitableCriticalSection csBestBlock; CConditionVariable cvBlockChange; int nScriptCheckThreads = 0; bool fImporting = false; bool fReindex = false; bool fTxIndex = true; bool fIsBareMultisigStd = true; bool fCheckBlockIndex = false; bool fVerifyingBlocks = false; unsigned int nCoinCacheSize = 5000; bool fAlerts = DEFAULT_ALERTS; unsigned int nStakeMinAge = 3 * 60 * 60; int64_t nReserveBalance = 0; /** Fees smaller than this (in duffs) are considered zero fee (for relaying and mining) * We are ~100 times smaller then bitcoin now (2015-06-23), set minRelayTxFee only 10 times higher * so it's still 10 times lower comparing to bitcoin. */ CFeeRate minRelayTxFee = CFeeRate(10000); CTxMemPool mempool(::minRelayTxFee); struct COrphanTx { CTransaction tx; NodeId fromPeer; }; map<uint256, COrphanTx> mapOrphanTransactions; map<uint256, set<uint256> > mapOrphanTransactionsByPrev; map<uint256, int64_t> mapRejectedBlocks; void EraseOrphansFor(NodeId peer); static void CheckBlockIndex(); /** Constant stuff for coinbase transactions we create: */ CScript COINBASE_FLAGS; const string strMessageMagic = "DarkNet Signed Message:\n"; // Internal stuff namespace { struct CBlockIndexWorkComparator { bool operator()(CBlockIndex* pa, CBlockIndex* pb) const { // First sort by most total work, ... if (pa->nChainWork > pb->nChainWork) return false; if (pa->nChainWork < pb->nChainWork) return true; // ... then by earliest time received, ... if (pa->nSequenceId < pb->nSequenceId) return false; if (pa->nSequenceId > pb->nSequenceId) return true; // Use pointer address as tie breaker (should only happen with blocks // loaded from disk, as those all have id 0). if (pa < pb) return false; if (pa > pb) return true; // Identical blocks. return false; } }; CBlockIndex* pindexBestInvalid; /** * The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS (for itself and all ancestors) and * as good as our current tip or better. Entries may be failed, though. */ set<CBlockIndex*, CBlockIndexWorkComparator> setBlockIndexCandidates; /** Number of nodes with fSyncStarted. */ int nSyncStarted = 0; /** All pairs A->B, where A (or one if its ancestors) misses transactions, but B has transactions. */ multimap<CBlockIndex*, CBlockIndex*> mapBlocksUnlinked; CCriticalSection cs_LastBlockFile; std::vector<CBlockFileInfo> vinfoBlockFile; int nLastBlockFile = 0; /** * Every received block is assigned a unique and increasing identifier, so we * know which one to give priority in case of a fork. */ CCriticalSection cs_nBlockSequenceId; /** Blocks loaded from disk are assigned id 0, so start the counter at 1. */ uint32_t nBlockSequenceId = 1; /** * Sources of received blocks, to be able to send them reject messages or ban * them, if processing happens afterwards. Protected by cs_main. */ map<uint256, NodeId> mapBlockSource; /** Blocks that are in flight, and that are in the queue to be downloaded. Protected by cs_main. */ struct QueuedBlock { uint256 hash; CBlockIndex* pindex; //! Optional. int64_t nTime; //! Time of "getdata" request in microseconds. int nValidatedQueuedBefore; //! Number of blocks queued with validated headers (globally) at the time this one is requested. bool fValidatedHeaders; //! Whether this block has validated headers at the time of request. }; map<uint256, pair<NodeId, list<QueuedBlock>::iterator> > mapBlocksInFlight; /** Number of blocks in flight with validated headers. */ int nQueuedValidatedHeaders = 0; /** Number of preferable block download peers. */ int nPreferredDownload = 0; /** Dirty block index entries. */ set<CBlockIndex*> setDirtyBlockIndex; /** Dirty block file entries. */ set<int> setDirtyFileInfo; } // anon namespace ////////////////////////////////////////////////////////////////////////////// // // dispatching functions // // These functions dispatch to one or all registered wallets namespace { struct CMainSignals { /** Notifies listeners of updated transaction data (transaction, and optionally the block it is found in. */ boost::signals2::signal<void(const CTransaction&, const CBlock*)> SyncTransaction; /** Notifies listeners of an erased transaction (currently disabled, requires transaction replacement). */ // XX42 boost::signals2::signal<void(const uint256&)> EraseTransaction; /** Notifies listeners of an updated transaction without new data (for now: a coinbase potentially becoming visible). */ boost::signals2::signal<void(const uint256&)> UpdatedTransaction; /** Notifies listeners of a new active block chain. */ boost::signals2::signal<void(const CBlockLocator&)> SetBestChain; /** Notifies listeners about an inventory item being seen on the network. */ boost::signals2::signal<void(const uint256&)> Inventory; /** Tells listeners to broadcast their data. */ boost::signals2::signal<void()> Broadcast; /** Notifies listeners of a block validation result */ boost::signals2::signal<void(const CBlock&, const CValidationState&)> BlockChecked; } g_signals; } // anon namespace void RegisterValidationInterface(CValidationInterface* pwalletIn) { g_signals.SyncTransaction.connect(boost::bind(&CValidationInterface::SyncTransaction, pwalletIn, _1, _2)); // XX42 g_signals.EraseTransaction.connect(boost::bind(&CValidationInterface::EraseFromWallet, pwalletIn, _1)); g_signals.UpdatedTransaction.connect(boost::bind(&CValidationInterface::UpdatedTransaction, pwalletIn, _1)); g_signals.SetBestChain.connect(boost::bind(&CValidationInterface::SetBestChain, pwalletIn, _1)); g_signals.Inventory.connect(boost::bind(&CValidationInterface::Inventory, pwalletIn, _1)); g_signals.Broadcast.connect(boost::bind(&CValidationInterface::ResendWalletTransactions, pwalletIn)); g_signals.BlockChecked.connect(boost::bind(&CValidationInterface::BlockChecked, pwalletIn, _1, _2)); } void UnregisterValidationInterface(CValidationInterface* pwalletIn) { g_signals.BlockChecked.disconnect(boost::bind(&CValidationInterface::BlockChecked, pwalletIn, _1, _2)); g_signals.Broadcast.disconnect(boost::bind(&CValidationInterface::ResendWalletTransactions, pwalletIn)); g_signals.Inventory.disconnect(boost::bind(&CValidationInterface::Inventory, pwalletIn, _1)); g_signals.SetBestChain.disconnect(boost::bind(&CValidationInterface::SetBestChain, pwalletIn, _1)); g_signals.UpdatedTransaction.disconnect(boost::bind(&CValidationInterface::UpdatedTransaction, pwalletIn, _1)); // XX42 g_signals.EraseTransaction.disconnect(boost::bind(&CValidationInterface::EraseFromWallet, pwalletIn, _1)); g_signals.SyncTransaction.disconnect(boost::bind(&CValidationInterface::SyncTransaction, pwalletIn, _1, _2)); } void UnregisterAllValidationInterfaces() { g_signals.BlockChecked.disconnect_all_slots(); g_signals.Broadcast.disconnect_all_slots(); g_signals.Inventory.disconnect_all_slots(); g_signals.SetBestChain.disconnect_all_slots(); g_signals.UpdatedTransaction.disconnect_all_slots(); // XX42 g_signals.EraseTransaction.disconnect_all_slots(); g_signals.SyncTransaction.disconnect_all_slots(); } void SyncWithWallets(const CTransaction& tx, const CBlock* pblock) { g_signals.SyncTransaction(tx, pblock); } ////////////////////////////////////////////////////////////////////////////// // // Registration of network node signals. // namespace { struct CBlockReject { unsigned char chRejectCode; string strRejectReason; uint256 hashBlock; }; /** * Maintain validation-specific state about nodes, protected by cs_main, instead * by CNode's own locks. This simplifies asynchronous operation, where * processing of incoming data is done after the ProcessMessage call returns, * and we're no longer holding the node's locks. */ struct CNodeState { //! The peer's address CService address; //! Whether we have a fully established connection. bool fCurrentlyConnected; //! Accumulated misbehaviour score for this peer. int nMisbehavior; //! Whether this peer should be disconnected and banned (unless whitelisted). bool fShouldBan; //! String name of this peer (debugging/logging purposes). std::string name; //! List of asynchronously-determined block rejections to notify this peer about. std::vector<CBlockReject> rejects; //! The best known block we know this peer has announced. CBlockIndex* pindexBestKnownBlock; //! The hash of the last unknown block this peer has announced. uint256 hashLastUnknownBlock; //! The last full block we both have. CBlockIndex* pindexLastCommonBlock; //! Whether we've started headers synchronization with this peer. bool fSyncStarted; //! Since when we're stalling block download progress (in microseconds), or 0. int64_t nStallingSince; list<QueuedBlock> vBlocksInFlight; int nBlocksInFlight; //! Whether we consider this a preferred download peer. bool fPreferredDownload; CNodeState() { fCurrentlyConnected = false; nMisbehavior = 0; fShouldBan = false; pindexBestKnownBlock = NULL; hashLastUnknownBlock = uint256(0); pindexLastCommonBlock = NULL; fSyncStarted = false; nStallingSince = 0; nBlocksInFlight = 0; fPreferredDownload = false; } }; /** Map maintaining per-node state. Requires cs_main. */ map<NodeId, CNodeState> mapNodeState; // Requires cs_main. CNodeState* State(NodeId pnode) { map<NodeId, CNodeState>::iterator it = mapNodeState.find(pnode); if (it == mapNodeState.end()) return NULL; return &it->second; } int GetHeight() { while (true) { TRY_LOCK(cs_main, lockMain); if (!lockMain) { MilliSleep(50); continue; } return chainActive.Height(); } } void UpdatePreferredDownload(CNode* node, CNodeState* state) { nPreferredDownload -= state->fPreferredDownload; // Whether this node should be marked as a preferred download node. state->fPreferredDownload = (!node->fInbound || node->fWhitelisted) && !node->fOneShot && !node->fClient; nPreferredDownload += state->fPreferredDownload; } void InitializeNode(NodeId nodeid, const CNode* pnode) { LOCK(cs_main); CNodeState& state = mapNodeState.insert(std::make_pair(nodeid, CNodeState())).first->second; state.name = pnode->addrName; state.address = pnode->addr; } void FinalizeNode(NodeId nodeid) { LOCK(cs_main); CNodeState* state = State(nodeid); if (state->fSyncStarted) nSyncStarted--; if (state->nMisbehavior == 0 && state->fCurrentlyConnected) { AddressCurrentlyConnected(state->address); } BOOST_FOREACH (const QueuedBlock& entry, state->vBlocksInFlight) mapBlocksInFlight.erase(entry.hash); EraseOrphansFor(nodeid); nPreferredDownload -= state->fPreferredDownload; mapNodeState.erase(nodeid); } // Requires cs_main. void MarkBlockAsReceived(const uint256& hash) { map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash); if (itInFlight != mapBlocksInFlight.end()) { CNodeState* state = State(itInFlight->second.first); nQueuedValidatedHeaders -= itInFlight->second.second->fValidatedHeaders; state->vBlocksInFlight.erase(itInFlight->second.second); state->nBlocksInFlight--; state->nStallingSince = 0; mapBlocksInFlight.erase(itInFlight); } } // Requires cs_main. void MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, CBlockIndex* pindex = NULL) { CNodeState* state = State(nodeid); assert(state != NULL); // Make sure it's not listed somewhere already. MarkBlockAsReceived(hash); QueuedBlock newentry = {hash, pindex, GetTimeMicros(), nQueuedValidatedHeaders, pindex != NULL}; nQueuedValidatedHeaders += newentry.fValidatedHeaders; list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(), newentry); state->nBlocksInFlight++; mapBlocksInFlight[hash] = std::make_pair(nodeid, it); } /** Check whether the last unknown block a peer advertized is not yet known. */ void ProcessBlockAvailability(NodeId nodeid) { CNodeState* state = State(nodeid); assert(state != NULL); if (state->hashLastUnknownBlock != 0) { BlockMap::iterator itOld = mapBlockIndex.find(state->hashLastUnknownBlock); if (itOld != mapBlockIndex.end() && itOld->second->nChainWork > 0) { if (state->pindexBestKnownBlock == NULL || itOld->second->nChainWork >= state->pindexBestKnownBlock->nChainWork) state->pindexBestKnownBlock = itOld->second; state->hashLastUnknownBlock = uint256(0); } } } /** Update tracking information about which blocks a peer is assumed to have. */ void UpdateBlockAvailability(NodeId nodeid, const uint256& hash) { CNodeState* state = State(nodeid); assert(state != NULL); ProcessBlockAvailability(nodeid); BlockMap::iterator it = mapBlockIndex.find(hash); if (it != mapBlockIndex.end() && it->second->nChainWork > 0) { // An actually better block was announced. if (state->pindexBestKnownBlock == NULL || it->second->nChainWork >= state->pindexBestKnownBlock->nChainWork) state->pindexBestKnownBlock = it->second; } else { // An unknown block was announced; just assume that the latest one is the best one. state->hashLastUnknownBlock = hash; } } /** Find the last common ancestor two blocks have. * Both pa and pb must be non-NULL. */ CBlockIndex* LastCommonAncestor(CBlockIndex* pa, CBlockIndex* pb) { if (pa->nHeight > pb->nHeight) { pa = pa->GetAncestor(pb->nHeight); } else if (pb->nHeight > pa->nHeight) { pb = pb->GetAncestor(pa->nHeight); } while (pa != pb && pa && pb) { pa = pa->pprev; pb = pb->pprev; } // Eventually all chain branches meet at the genesis block. assert(pa == pb); return pa; } /** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has * at most count entries. */ void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<CBlockIndex*>& vBlocks, NodeId& nodeStaller) { if (count == 0) return; vBlocks.reserve(vBlocks.size() + count); CNodeState* state = State(nodeid); assert(state != NULL); // Make sure pindexBestKnownBlock is up to date, we'll need it. ProcessBlockAvailability(nodeid); if (state->pindexBestKnownBlock == NULL || state->pindexBestKnownBlock->nChainWork < chainActive.Tip()->nChainWork) { // This peer has nothing interesting. return; } if (state->pindexLastCommonBlock == NULL) { // Bootstrap quickly by guessing a parent of our best tip is the forking point. // Guessing wrong in either direction is not a problem. state->pindexLastCommonBlock = chainActive[std::min(state->pindexBestKnownBlock->nHeight, chainActive.Height())]; } // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor // of their current tip anymore. Go back enough to fix that. state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock); if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) return; std::vector<CBlockIndex*> vToFetch; CBlockIndex* pindexWalk = state->pindexLastCommonBlock; // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to // download that next block if the window were 1 larger. int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW; int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1); NodeId waitingfor = -1; while (pindexWalk->nHeight < nMaxHeight) { // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive // as iterating over ~100 CBlockIndex* entries anyway. int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128)); vToFetch.resize(nToFetch); pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch); vToFetch[nToFetch - 1] = pindexWalk; for (unsigned int i = nToFetch - 1; i > 0; i--) { vToFetch[i - 1] = vToFetch[i]->pprev; } // Iterate over those blocks in vToFetch (in forward direction), adding the ones that // are not yet downloaded and not in flight to vBlocks. In the mean time, update // pindexLastCommonBlock as long as all ancestors are already downloaded. BOOST_FOREACH (CBlockIndex* pindex, vToFetch) { if (!pindex->IsValid(BLOCK_VALID_TREE)) { // We consider the chain that this peer is on invalid. return; } if (pindex->nStatus & BLOCK_HAVE_DATA) { if (pindex->nChainTx) state->pindexLastCommonBlock = pindex; } else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) { // The block is not already downloaded, and not yet in flight. if (pindex->nHeight > nWindowEnd) { // We reached the end of the window. if (vBlocks.size() == 0 && waitingfor != nodeid) { // We aren't able to fetch anything, but we would be if the download window was one larger. nodeStaller = waitingfor; } return; } vBlocks.push_back(pindex); if (vBlocks.size() == count) { return; } } else if (waitingfor == -1) { // This is the first already-in-flight block. waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first; } } } } } // anon namespace bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) { LOCK(cs_main); CNodeState* state = State(nodeid); if (state == NULL) return false; stats.nMisbehavior = state->nMisbehavior; stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1; stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1; BOOST_FOREACH (const QueuedBlock& queue, state->vBlocksInFlight) { if (queue.pindex) stats.vHeightInFlight.push_back(queue.pindex->nHeight); } return true; } void RegisterNodeSignals(CNodeSignals& nodeSignals) { nodeSignals.GetHeight.connect(&GetHeight); nodeSignals.ProcessMessages.connect(&ProcessMessages); nodeSignals.SendMessages.connect(&SendMessages); nodeSignals.InitializeNode.connect(&InitializeNode); nodeSignals.FinalizeNode.connect(&FinalizeNode); } void UnregisterNodeSignals(CNodeSignals& nodeSignals) { nodeSignals.GetHeight.disconnect(&GetHeight); nodeSignals.ProcessMessages.disconnect(&ProcessMessages); nodeSignals.SendMessages.disconnect(&SendMessages); nodeSignals.InitializeNode.disconnect(&InitializeNode); nodeSignals.FinalizeNode.disconnect(&FinalizeNode); } CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& locator) { // Find the first block the caller has in the main chain BOOST_FOREACH (const uint256& hash, locator.vHave) { BlockMap::iterator mi = mapBlockIndex.find(hash); if (mi != mapBlockIndex.end()) { CBlockIndex* pindex = (*mi).second; if (chain.Contains(pindex)) return pindex; } } return chain.Genesis(); } CCoinsViewCache* pcoinsTip = NULL; CBlockTreeDB* pblocktree = NULL; CZerocoinDB* zerocoinDB = NULL; CSporkDB* pSporkDB = NULL; ////////////////////////////////////////////////////////////////////////////// // // mapOrphanTransactions // bool AddOrphanTx(const CTransaction& tx, NodeId peer) { uint256 hash = tx.GetHash(); if (mapOrphanTransactions.count(hash)) return false; // Ignore big transactions, to avoid a // send-big-orphans memory exhaustion attack. If a peer has a legitimate // large transaction with a missing parent then we assume // it will rebroadcast it later, after the parent transaction(s) // have been mined or received. // 10,000 orphans, each of which is at most 5,000 bytes big is // at most 500 megabytes of orphans: unsigned int sz = tx.GetSerializeSize(SER_NETWORK, CTransaction::CURRENT_VERSION); if (sz > 5000) { LogPrint("mempool", "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString()); return false; } mapOrphanTransactions[hash].tx = tx; mapOrphanTransactions[hash].fromPeer = peer; BOOST_FOREACH (const CTxIn& txin, tx.vin) mapOrphanTransactionsByPrev[txin.prevout.hash].insert(hash); LogPrint("mempool", "stored orphan tx %s (mapsz %u prevsz %u)\n", hash.ToString(), mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size()); return true; } void static EraseOrphanTx(uint256 hash) { map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash); if (it == mapOrphanTransactions.end()) return; BOOST_FOREACH (const CTxIn& txin, it->second.tx.vin) { map<uint256, set<uint256> >::iterator itPrev = mapOrphanTransactionsByPrev.find(txin.prevout.hash); if (itPrev == mapOrphanTransactionsByPrev.end()) continue; itPrev->second.erase(hash); if (itPrev->second.empty()) mapOrphanTransactionsByPrev.erase(itPrev); } mapOrphanTransactions.erase(it); } void EraseOrphansFor(NodeId peer) { int nErased = 0; map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin(); while (iter != mapOrphanTransactions.end()) { map<uint256, COrphanTx>::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid if (maybeErase->second.fromPeer == peer) { EraseOrphanTx(maybeErase->second.tx.GetHash()); ++nErased; } } if (nErased > 0) LogPrint("mempool", "Erased %d orphan tx from peer %d\n", nErased, peer); } unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) { unsigned int nEvicted = 0; while (mapOrphanTransactions.size() > nMaxOrphans) { // Evict a random orphan: uint256 randomhash = GetRandHash(); map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.lower_bound(randomhash); if (it == mapOrphanTransactions.end()) it = mapOrphanTransactions.begin(); EraseOrphanTx(it->first); ++nEvicted; } return nEvicted; } bool IsStandardTx(const CTransaction& tx, string& reason) { AssertLockHeld(cs_main); if (tx.nVersion > CTransaction::CURRENT_VERSION || tx.nVersion < 1) { reason = "version"; return false; } // Treat non-final transactions as non-standard to prevent a specific type // of double-spend attack, as well as DoS attacks. (if the transaction // can't be mined, the attacker isn't expending resources broadcasting it) // Basically we don't want to propagate transactions that can't be included in // the next block. // // However, IsFinalTx() is confusing... Without arguments, it uses // chainActive.Height() to evaluate nLockTime; when a block is accepted, chainActive.Height() // is set to the value of nHeight in the block. However, when IsFinalTx() // is called within CBlock::AcceptBlock(), the height of the block *being* // evaluated is what is used. Thus if we want to know if a transaction can // be part of the *next* block, we need to call IsFinalTx() with one more // than chainActive.Height(). // // Timestamps on the other hand don't get any special treatment, because we // can't know what timestamp the next block will have, and there aren't // timestamp applications where it matters. if (!IsFinalTx(tx, chainActive.Height() + 1)) { reason = "non-final"; return false; } // Extremely large transactions with lots of inputs can cost the network // almost as much to process as they cost the sender in fees, because // computing signature hashes is O(ninputs*txsize). Limiting transactions // to MAX_STANDARD_TX_SIZE mitigates CPU exhaustion attacks. unsigned int sz = tx.GetSerializeSize(SER_NETWORK, CTransaction::CURRENT_VERSION); unsigned int nMaxSize = tx.ContainsZerocoins() ? MAX_ZEROCOIN_TX_SIZE : MAX_STANDARD_TX_SIZE; if (sz >= nMaxSize) { reason = "tx-size"; return false; } for (const CTxIn& txin : tx.vin) { if (txin.scriptSig.IsZerocoinSpend()) continue; // Biggest 'standard' txin is a 15-of-15 P2SH multisig with compressed // keys. (remember the 520 byte limit on redeemScript size) That works // out to a (15*(33+1))+3=513 byte redeemScript, 513+1+15*(73+1)+3=1627 // bytes of scriptSig, which we round off to 1650 bytes for some minor // future-proofing. That's also enough to spend a 20-of-20 // CHECKMULTISIG scriptPubKey, though such a scriptPubKey is not // considered standard) if (txin.scriptSig.size() > 1650) { reason = "scriptsig-size"; return false; } if (!txin.scriptSig.IsPushOnly()) { reason = "scriptsig-not-pushonly"; return false; } } unsigned int nDataOut = 0; txnouttype whichType; BOOST_FOREACH (const CTxOut& txout, tx.vout) { if (!::IsStandard(txout.scriptPubKey, whichType)) { reason = "scriptpubkey"; return false; } if (whichType == TX_NULL_DATA) nDataOut++; else if ((whichType == TX_MULTISIG) && (!fIsBareMultisigStd)) { reason = "bare-multisig"; return false; } else if (txout.IsDust(::minRelayTxFee)) { reason = "dust"; return false; } } // only one OP_RETURN txout is permitted if (nDataOut > 1) { reason = "multi-op-return"; return false; } return true; } bool IsFinalTx(const CTransaction& tx, int nBlockHeight, int64_t nBlockTime) { AssertLockHeld(cs_main); // Time based nLockTime implemented in 0.1.6 if (tx.nLockTime == 0) return true; if (nBlockHeight == 0) nBlockHeight = chainActive.Height(); if (nBlockTime == 0) nBlockTime = GetAdjustedTime(); if ((int64_t)tx.nLockTime < ((int64_t)tx.nLockTime < LOCKTIME_THRESHOLD ? (int64_t)nBlockHeight : nBlockTime)) return true; BOOST_FOREACH (const CTxIn& txin, tx.vin) if (!txin.IsFinal()) return false; return true; } /** * Check transaction inputs to mitigate two * potential denial-of-service attacks: * * 1. scriptSigs with extra data stuffed into them, * not consumed by scriptPubKey (or P2SH script) * 2. P2SH scripts with a crazy number of expensive * CHECKSIG/CHECKMULTISIG operations */ bool AreInputsStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs) { if (tx.IsCoinBase() || tx.IsZerocoinSpend()) return true; // coinbase has no inputs and zerocoinspend has a special input //todo should there be a check for a 'standard' zerocoinspend here? for (unsigned int i = 0; i < tx.vin.size(); i++) { const CTxOut& prev = mapInputs.GetOutputFor(tx.vin[i]); vector<vector<unsigned char> > vSolutions; txnouttype whichType; // get the scriptPubKey corresponding to this input: const CScript& prevScript = prev.scriptPubKey; if (!Solver(prevScript, whichType, vSolutions)) return false; int nArgsExpected = ScriptSigArgsExpected(whichType, vSolutions); if (nArgsExpected < 0) return false; // Transactions with extra stuff in their scriptSigs are // non-standard. Note that this EvalScript() call will // be quick, because if there are any operations // beside "push data" in the scriptSig // IsStandard() will have already returned false // and this method isn't called. vector<vector<unsigned char> > stack; if (!EvalScript(stack, tx.vin[i].scriptSig, false, BaseSignatureChecker())) return false; if (whichType == TX_SCRIPTHASH) { if (stack.empty()) return false; CScript subscript(stack.back().begin(), stack.back().end()); vector<vector<unsigned char> > vSolutions2; txnouttype whichType2; if (Solver(subscript, whichType2, vSolutions2)) { int tmpExpected = ScriptSigArgsExpected(whichType2, vSolutions2); if (tmpExpected < 0) return false; nArgsExpected += tmpExpected; } else { // Any other Script with less than 15 sigops OK: unsigned int sigops = subscript.GetSigOpCount(true); // ... extra data left on the stack after execution is OK, too: return (sigops <= MAX_P2SH_SIGOPS); } } if (stack.size() != (unsigned int)nArgsExpected) return false; } return true; } unsigned int GetLegacySigOpCount(const CTransaction& tx) { unsigned int nSigOps = 0; BOOST_FOREACH (const CTxIn& txin, tx.vin) { nSigOps += txin.scriptSig.GetSigOpCount(false); } BOOST_FOREACH (const CTxOut& txout, tx.vout) { nSigOps += txout.scriptPubKey.GetSigOpCount(false); } return nSigOps; } unsigned int GetP2SHSigOpCount(const CTransaction& tx, const CCoinsViewCache& inputs) { if (tx.IsCoinBase() || tx.IsZerocoinSpend()) return 0; unsigned int nSigOps = 0; for (unsigned int i = 0; i < tx.vin.size(); i++) { const CTxOut& prevout = inputs.GetOutputFor(tx.vin[i]); if (prevout.scriptPubKey.IsPayToScriptHash()) nSigOps += prevout.scriptPubKey.GetSigOpCount(tx.vin[i].scriptSig); } return nSigOps; } int GetInputAge(CTxIn& vin) { CCoinsView viewDummy; CCoinsViewCache view(&viewDummy); { LOCK(mempool.cs); CCoinsViewMemPool viewMempool(pcoinsTip, mempool); view.SetBackend(viewMempool); // temporarily switch cache backend to db+mempool view const CCoins* coins = view.AccessCoins(vin.prevout.hash); if (coins) { if (coins->nHeight < 0) return 0; return (chainActive.Tip()->nHeight + 1) - coins->nHeight; } else return -1; } } int GetInputAgeIX(uint256 nTXHash, CTxIn& vin) { int sigs = 0; int nResult = GetInputAge(vin); if (nResult < 0) nResult = 0; if (nResult < 6) { std::map<uint256, CTransactionLock>::iterator i = mapTxLocks.find(nTXHash); if (i != mapTxLocks.end()) { sigs = (*i).second.CountSignatures(); } if (sigs >= SWIFTTX_SIGNATURES_REQUIRED) { return nSwiftTXDepth + nResult; } } return -1; } int GetIXConfirmations(uint256 nTXHash) { int sigs = 0; std::map<uint256, CTransactionLock>::iterator i = mapTxLocks.find(nTXHash); if (i != mapTxLocks.end()) { sigs = (*i).second.CountSignatures(); } if (sigs >= SWIFTTX_SIGNATURES_REQUIRED) { return nSwiftTXDepth; } return 0; } // ppcoin: total coin age spent in transaction, in the unit of coin-days. // Only those coins meeting minimum age requirement counts. As those // transactions not in main chain are not currently indexed so we // might not find out about their coin age. Older transactions are // guaranteed to be in main chain by sync-checkpoint. This rule is // introduced to help nodes establish a consistent view of the coin // age (trust score) of competing branches. bool GetCoinAge(const CTransaction& tx, const unsigned int nTxTime, uint64_t& nCoinAge) { uint256 bnCentSecond = 0; // coin age in the unit of cent-seconds nCoinAge = 0; CBlockIndex* pindex = NULL; BOOST_FOREACH (const CTxIn& txin, tx.vin) { // First try finding the previous transaction in database CTransaction txPrev; uint256 hashBlockPrev; if (!GetTransaction(txin.prevout.hash, txPrev, hashBlockPrev, true)) { LogPrintf("GetCoinAge: failed to find vin transaction \n"); continue; // previous transaction not in main chain } BlockMap::iterator it = mapBlockIndex.find(hashBlockPrev); if (it != mapBlockIndex.end()) pindex = it->second; else { LogPrintf("GetCoinAge() failed to find block index \n"); continue; } // Read block header CBlockHeader prevblock = pindex->GetBlockHeader(); if (prevblock.nTime + nStakeMinAge > nTxTime) continue; // only count coins meeting min age requirement if (nTxTime < prevblock.nTime) { LogPrintf("GetCoinAge: Timestamp Violation: txtime less than txPrev.nTime"); return false; // Transaction timestamp violation } int64_t nValueIn = txPrev.vout[txin.prevout.n].nValue; bnCentSecond += uint256(nValueIn) * (nTxTime - prevblock.nTime); } uint256 bnCoinDay = bnCentSecond / COIN / (24 * 60 * 60); LogPrintf("coin age bnCoinDay=%s\n", bnCoinDay.ToString().c_str()); nCoinAge = bnCoinDay.GetCompact(); return true; } bool MoneyRange(CAmount nValueOut) { return nValueOut >= 0 && nValueOut <= Params().MaxMoneyOut(); } int GetZerocoinStartHeight() { return Params().Zerocoin_StartHeight(); } void FindMints(vector<CZerocoinMint> vMintsToFind, vector<CZerocoinMint>& vMintsToUpdate, vector<CZerocoinMint>& vMissingMints, bool fExtendedSearch) { // see which mints are in our public zerocoin database. The mint should be here if it exists, unless // something went wrong for (CZerocoinMint mint : vMintsToFind) { uint256 txHash; if (!zerocoinDB->ReadCoinMint(mint.GetValue(), txHash)) { vMissingMints.push_back(mint); continue; } // make sure the txhash and block height meta data are correct for this mint CTransaction tx; uint256 hashBlock; if (!GetTransaction(txHash, tx, hashBlock, true)) { LogPrintf("%s : cannot find tx %s\n", __func__, txHash.GetHex()); vMissingMints.push_back(mint); continue; } if (!mapBlockIndex.count(hashBlock)) { LogPrintf("%s : cannot find block %s\n", __func__, hashBlock.GetHex()); vMissingMints.push_back(mint); continue; } //see if this mint is spent uint256 hashTxSpend = 0; zerocoinDB->ReadCoinSpend(mint.GetSerialNumber(), hashTxSpend); bool fSpent = hashTxSpend != 0; //if marked as spent, check that it actually made it into the chain CTransaction txSpend; uint256 hashBlockSpend; if (fSpent && !GetTransaction(hashTxSpend, txSpend, hashBlockSpend, true)) { LogPrintf("%s : cannot find spend tx %s\n", __func__, hashTxSpend.GetHex()); zerocoinDB->EraseCoinSpend(mint.GetSerialNumber()); mint.SetUsed(false); vMintsToUpdate.push_back(mint); continue; } //The mint has been incorrectly labelled as spent in zerocoinDB and needs to be undone int nHeightTx = 0; if (fSpent && !IsSerialInBlockchain(mint.GetSerialNumber(), nHeightTx)) { LogPrintf("%s : cannot find block %s. Erasing coinspend from zerocoinDB.\n", __func__, hashBlockSpend.GetHex()); zerocoinDB->EraseCoinSpend(mint.GetSerialNumber()); mint.SetUsed(false); vMintsToUpdate.push_back(mint); continue; } // if meta data is correct, then no need to update if (mint.GetTxHash() == txHash && mint.GetHeight() == mapBlockIndex[hashBlock]->nHeight && mint.IsUsed() == fSpent) continue; //mark this mint for update mint.SetTxHash(txHash); mint.SetHeight(mapBlockIndex[hashBlock]->nHeight); mint.SetUsed(fSpent); vMintsToUpdate.push_back(mint); } if (fExtendedSearch) { // search the blockchain for the meta data on our missing mints int nZerocoinStartHeight = GetZerocoinStartHeight(); for (int i = nZerocoinStartHeight; i < chainActive.Height(); i++) { if(i % 1000 == 0) LogPrintf("%s : scanned %d blocks\n", __func__, i - nZerocoinStartHeight); if(chainActive[i]->vMintDenominationsInBlock.empty()) continue; CBlock block; if(!ReadBlockFromDisk(block, chainActive[i])) continue; list<CZerocoinMint> vMints; if(!BlockToZerocoinMintList(block, vMints)) continue; // search the blocks mints to see if it contains the mint that is requesting meta data updates for (CZerocoinMint mintBlockChain : vMints) { for (CZerocoinMint mintMissing : vMissingMints) { if (mintMissing.GetValue() == mintBlockChain.GetValue()) { LogPrintf("%s FOUND %s in block %d\n", __func__, mintMissing.GetValue().GetHex(), i); mintMissing.SetHeight(i); mintMissing.SetTxHash(mintBlockChain.GetTxHash()); vMintsToUpdate.push_back(mintMissing); } } } } } //remove any missing mints that were found for (CZerocoinMint mintMissing : vMissingMints) { for (CZerocoinMint mintFound : vMintsToUpdate) { if (mintMissing.GetValue() == mintFound.GetValue()) std::remove(vMissingMints.begin(), vMissingMints.end(), mintMissing); } } } bool GetZerocoinMint(const CBigNum& bnPubcoin, uint256& txHash) { txHash = 0; return zerocoinDB->ReadCoinMint(bnPubcoin, txHash); } bool IsSerialKnown(const CBigNum& bnSerial) { uint256 txHash = 0; return zerocoinDB->ReadCoinSpend(bnSerial, txHash); } bool IsSerialInBlockchain(const CBigNum& bnSerial, int& nHeightTx) { uint256 txHash = 0; // if not in zerocoinDB then its not in the blockchain if (!zerocoinDB->ReadCoinSpend(bnSerial, txHash)) return false; CTransaction tx; uint256 hashBlock; if (!GetTransaction(txHash, tx, hashBlock, true)) return false; bool inChain = mapBlockIndex.count(hashBlock) && chainActive.Contains(mapBlockIndex[hashBlock]); if (inChain) nHeightTx = mapBlockIndex.at(hashBlock)->nHeight; return inChain; } bool RemoveSerialFromDB(const CBigNum& bnSerial) { return zerocoinDB->EraseCoinSpend(bnSerial); } /** zerocoin transaction checks */ bool RecordMintToDB(PublicCoin publicZerocoin, const uint256& txHash) { //Check the pubCoinValue didn't already store in the zerocoin database. todo: pubcoin memory map? //write the zerocoinmint to db if we don't already have it //note that many of the mint parameters are not set here because those params are private to the minter CZerocoinMint pubCoinTx; uint256 hashFromDB; if (zerocoinDB->ReadCoinMint(publicZerocoin.getValue(), hashFromDB)) { if(hashFromDB == txHash) return true; LogPrintf("RecordMintToDB: failed, we already have this public coin recorded\n"); return false; } if (!zerocoinDB->WriteCoinMint(publicZerocoin, txHash)) { LogPrintf("RecordMintToDB: failed to record public coin to DB\n"); return false; } return true; } bool TxOutToPublicCoin(const CTxOut txout, PublicCoin& pubCoin, CValidationState& state) { CBigNum publicZerocoin; vector<unsigned char> vchZeroMint; vchZeroMint.insert(vchZeroMint.end(), txout.scriptPubKey.begin() + SCRIPT_OFFSET, txout.scriptPubKey.begin() + txout.scriptPubKey.size()); publicZerocoin.setvch(vchZeroMint); CoinDenomination denomination = AmountToZerocoinDenomination(txout.nValue); LogPrint("zero", "%s ZCPRINT denomination %d pubcoin %s\n", __func__, denomination, publicZerocoin.GetHex()); if (denomination == ZQ_ERROR) return state.DoS(100, error("TxOutToPublicCoin : txout.nValue is not correct")); PublicCoin checkPubCoin(Params().Zerocoin_Params(), publicZerocoin, denomination); pubCoin = checkPubCoin; return true; } bool BlockToPubcoinList(const CBlock& block, list<PublicCoin>& listPubcoins) { for (const CTransaction tx : block.vtx) { if(!tx.IsZerocoinMint()) continue; for (unsigned int i = 0; i < tx.vout.size(); i++) { const CTxOut txOut = tx.vout[i]; if(!txOut.scriptPubKey.IsZerocoinMint()) continue; CValidationState state; PublicCoin pubCoin(Params().Zerocoin_Params()); if(!TxOutToPublicCoin(txOut, pubCoin, state)) return false; listPubcoins.emplace_back(pubCoin); } } return true; } //return a list of zerocoin mints contained in a specific block bool BlockToZerocoinMintList(const CBlock& block, std::list<CZerocoinMint>& vMints) { for (const CTransaction tx : block.vtx) { if(!tx.IsZerocoinMint()) continue; for (unsigned int i = 0; i < tx.vout.size(); i++) { const CTxOut txOut = tx.vout[i]; if(!txOut.scriptPubKey.IsZerocoinMint()) continue; CValidationState state; PublicCoin pubCoin(Params().Zerocoin_Params()); if(!TxOutToPublicCoin(txOut, pubCoin, state)) return false; CZerocoinMint mint = CZerocoinMint(pubCoin.getDenomination(), pubCoin.getValue(), 0, 0, false); mint.SetTxHash(tx.GetHash()); vMints.push_back(mint); } } return true; } bool BlockToMintValueVector(const CBlock& block, const CoinDenomination denom, vector<CBigNum>& vValues) { for (const CTransaction tx : block.vtx) { if(!tx.IsZerocoinMint()) continue; for (const CTxOut txOut : tx.vout) { if(!txOut.scriptPubKey.IsZerocoinMint()) continue; CValidationState state; PublicCoin coin(Params().Zerocoin_Params()); if(!TxOutToPublicCoin(txOut, coin, state)) return false; if (coin.getDenomination() != denom) continue; vValues.push_back(coin.getValue()); } } return true; } //return a list of zerocoin spends contained in a specific block, list may have many denominations std::list<libzerocoin::CoinDenomination> ZerocoinSpendListFromBlock(const CBlock& block) { std::list<libzerocoin::CoinDenomination> vSpends; for (const CTransaction tx : block.vtx) { if (!tx.IsZerocoinSpend()) continue; for (const CTxIn txin : tx.vin) { if (!txin.scriptSig.IsZerocoinSpend()) continue; libzerocoin::CoinDenomination c = libzerocoin::IntToZerocoinDenomination(txin.nSequence); vSpends.push_back(c); } } return vSpends; } bool CheckZerocoinMint(const uint256& txHash, const CTxOut& txout, CValidationState& state, bool fCheckOnly) { PublicCoin pubCoin(Params().Zerocoin_Params()); if(!TxOutToPublicCoin(txout, pubCoin, state)) return state.DoS(100, error("CheckZerocoinMint(): TxOutToPublicCoin() failed")); if (!pubCoin.validate()) return state.DoS(100, error("CheckZerocoinMint() : PubCoin does not validate\n")); if(!fCheckOnly && !RecordMintToDB(pubCoin, txHash)) return state.DoS(100, error("CheckZerocoinMint(): RecordMintToDB() failed")); return true; } CoinSpend TxInToZerocoinSpend(const CTxIn& txin) { // Deserialize the CoinSpend intro a fresh object std::vector<char, zero_after_free_allocator<char> > dataTxIn; dataTxIn.insert(dataTxIn.end(), txin.scriptSig.begin() + BIGNUM_SIZE, txin.scriptSig.end()); CDataStream serializedCoinSpend(dataTxIn, SER_NETWORK, PROTOCOL_VERSION); return CoinSpend(Params().Zerocoin_Params(), serializedCoinSpend); } bool IsZerocoinSpendUnknown(CoinSpend coinSpend, uint256 hashTx, CValidationState& state) { uint256 hashTxFromDB; if(zerocoinDB->ReadCoinSpend(coinSpend.getCoinSerialNumber(), hashTxFromDB)) return hashTx == hashTxFromDB; if(!zerocoinDB->WriteCoinSpend(coinSpend.getCoinSerialNumber(), hashTx)) return state.DoS(100, error("CheckZerocoinSpend(): Failed to write zerocoin mint to database")); return true; } bool CheckZerocoinSpend(const CTransaction tx, bool fVerifySignature, CValidationState& state) { //max needed non-mint outputs should be 2 - one for redemption address and a possible 2nd for change if (tx.vout.size() > 2) { int outs = 0; for (const CTxOut out : tx.vout) { if (out.IsZerocoinMint()) continue; outs++; } if (outs > 2) return state.DoS(100, error("CheckZerocoinSpend(): over two non-mint outputs in a zerocoinspend transaction")); } //compute the txout hash that is used for the zerocoinspend signatures CMutableTransaction txTemp; for (const CTxOut out : tx.vout) { txTemp.vout.push_back(out); } uint256 hashTxOut = txTemp.GetHash(); bool fValidated = false; set<CBigNum> serials; list<CoinSpend> vSpends; CAmount nTotalRedeemed = 0; for (const CTxIn& txin : tx.vin) { //only check txin that is a zcspend if (!txin.scriptSig.IsZerocoinSpend()) continue; CoinSpend newSpend = TxInToZerocoinSpend(txin); vSpends.push_back(newSpend); //check that the denomination is valid if (newSpend.getDenomination() == ZQ_ERROR) return state.DoS(100, error("Zerocoinspend does not have the correct denomination")); //check that denomination is what it claims to be in nSequence if (newSpend.getDenomination() != txin.nSequence) return state.DoS(100, error("Zerocoinspend nSequence denomination does not match CoinSpend")); //make sure the txout has not changed if (newSpend.getTxOutHash() != hashTxOut) return state.DoS(100, error("Zerocoinspend does not use the same txout that was used in the SoK")); // Skip signature verification during initial block download if (fVerifySignature) { //see if we have record of the accumulator used in the spend tx CBigNum bnAccumulatorValue = 0; if(!zerocoinDB->ReadAccumulatorValue(newSpend.getAccumulatorChecksum(), bnAccumulatorValue)) return state.DoS(100, error("Zerocoinspend could not find accumulator associated with checksum")); Accumulator accumulator(Params().Zerocoin_Params(), newSpend.getDenomination(), bnAccumulatorValue); //Check that the coin is on the accumulator if(!newSpend.Verify(accumulator)) return state.DoS(100, error("CheckZerocoinSpend(): zerocoin spend did not verify")); } if (serials.count(newSpend.getCoinSerialNumber())) return state.DoS(100, error("Zerocoinspend serial is used twice in the same tx")); serials.insert(newSpend.getCoinSerialNumber()); //make sure that there is no over redemption of coins nTotalRedeemed += ZerocoinDenominationToAmount(newSpend.getDenomination()); fValidated = true; } if (nTotalRedeemed < tx.GetValueOut()) { LogPrintf("redeemed = %s , spend = %s \n", FormatMoney(nTotalRedeemed), FormatMoney(tx.GetValueOut())); return state.DoS(100, error("Transaction spend more than was redeemed in zerocoins")); } // Send signal to wallet if this is ours if (pwalletMain) { CWalletDB walletdb(pwalletMain->strWalletFile); list <CBigNum> listMySerials = walletdb.ListMintedCoinsSerial(); for (const auto& newSpend : vSpends) { list<CBigNum>::iterator it = find(listMySerials.begin(), listMySerials.end(), newSpend.getCoinSerialNumber()); if (it != listMySerials.end()) { LogPrintf("%s: %s detected spent zerocoin mint in transaction %s \n", __func__, it->GetHex(), tx.GetHash().GetHex()); pwalletMain->NotifyZerocoinChanged(pwalletMain, it->GetHex(), "Used", CT_UPDATED); } } } return fValidated; } bool CheckTransaction(const CTransaction& tx, bool fZerocoinActive, bool fRejectBadUTXO, CValidationState& state) { // Basic checks that don't depend on any context if (tx.vin.empty()) return state.DoS(10, error("CheckTransaction() : vin empty"), REJECT_INVALID, "bad-txns-vin-empty"); if (tx.vout.empty()) return state.DoS(10, error("CheckTransaction() : vout empty"), REJECT_INVALID, "bad-txns-vout-empty"); // Size limits unsigned int nMaxSize = MAX_ZEROCOIN_TX_SIZE; if (::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION) > nMaxSize) return state.DoS(100, error("CheckTransaction() : size limits failed"), REJECT_INVALID, "bad-txns-oversize"); // Check for negative or overflow output values CAmount nValueOut = 0; int nZCSpendCount = 0; BOOST_FOREACH (const CTxOut& txout, tx.vout) { if (txout.IsEmpty() && !tx.IsCoinBase() && !tx.IsCoinStake()) return state.DoS(100, error("CheckTransaction(): txout empty for user transaction")); if (txout.nValue < 0) return state.DoS(100, error("CheckTransaction() : txout.nValue negative"), REJECT_INVALID, "bad-txns-vout-negative"); if (txout.nValue > Params().MaxMoneyOut()) return state.DoS(100, error("CheckTransaction() : txout.nValue too high"), REJECT_INVALID, "bad-txns-vout-toolarge"); nValueOut += txout.nValue; if (!MoneyRange(nValueOut)) return state.DoS(100, error("CheckTransaction() : txout total out of range"), REJECT_INVALID, "bad-txns-txouttotal-toolarge"); if (fZerocoinActive && txout.IsZerocoinMint()) { if(!CheckZerocoinMint(tx.GetHash(), txout, state, false)) { if (fRejectBadUTXO) return state.DoS(100, error("CheckTransaction() : invalid zerocoin mint")); } } if (fZerocoinActive && txout.scriptPubKey.IsZerocoinSpend()) nZCSpendCount++; } if (fZerocoinActive) { if (nZCSpendCount > Params().Zerocoin_MaxSpendsPerTransaction()) return state.DoS(100, error("CheckTransaction() : there are more zerocoin spends than are allowed in one transaction")); if (tx.IsZerocoinSpend()) { //require that a zerocoinspend only has inputs that are zerocoins for (const CTxIn in : tx.vin) { if (!in.scriptSig.IsZerocoinSpend()) return state.DoS(100, error("CheckTransaction() : zerocoinspend contains inputs that are not zerocoins")); } // Do not require signature verification if this is initial sync and a block over 24 hours old bool fVerifySignature = !IsInitialBlockDownload() && (GetTime() - chainActive.Tip()->GetBlockTime() < (60*60*24)); if (!CheckZerocoinSpend(tx, fVerifySignature, state)) return state.DoS(100, error("CheckTransaction() : invalid zerocoin spend")); } } // Check for duplicate inputs set<COutPoint> vInOutPoints; set<CBigNum> vZerocoinSpendSerials; for (const CTxIn& txin : tx.vin) { if (vInOutPoints.count(txin.prevout)) return state.DoS(100, error("CheckTransaction() : duplicate inputs"), REJECT_INVALID, "bad-txns-inputs-duplicate"); //duplicate zcspend serials are checked in CheckZerocoinSpend() if (!txin.scriptSig.IsZerocoinSpend()) vInOutPoints.insert(txin.prevout); } if (tx.IsCoinBase()) { if (tx.vin[0].scriptSig.size() < 2 || tx.vin[0].scriptSig.size() > 150) return state.DoS(100, error("CheckTransaction() : coinbase script size=%d", tx.vin[0].scriptSig.size()), REJECT_INVALID, "bad-cb-length"); } else if (fZerocoinActive && tx.IsZerocoinSpend()) { if(tx.vin.size() < 1 || static_cast<int>(tx.vin.size()) > Params().Zerocoin_MaxSpendsPerTransaction()) return state.DoS(10, error("CheckTransaction() : Zerocoin Spend has more than allowed txin's"), REJECT_INVALID, "bad-zerocoinspend"); } else { BOOST_FOREACH (const CTxIn& txin, tx.vin) if (txin.prevout.IsNull() && (fZerocoinActive && !txin.scriptSig.IsZerocoinSpend())) return state.DoS(10, error("CheckTransaction() : prevout is null"), REJECT_INVALID, "bad-txns-prevout-null"); } return true; } bool CheckFinalTx(const CTransaction& tx, int flags) { AssertLockHeld(cs_main); // By convention a negative value for flags indicates that the // current network-enforced consensus rules should be used. In // a future soft-fork scenario that would mean checking which // rules would be enforced for the next block and setting the // appropriate flags. At the present time no soft-forks are // scheduled, so no flags are set. flags = std::max(flags, 0); // CheckFinalTx() uses chainActive.Height()+1 to evaluate // nLockTime because when IsFinalTx() is called within // CBlock::AcceptBlock(), the height of the block *being* // evaluated is what is used. Thus if we want to know if a // transaction can be part of the *next* block, we need to call // IsFinalTx() with one more than chainActive.Height(). const int nBlockHeight = chainActive.Height() + 1; // BIP113 will require that time-locked transactions have nLockTime set to // less than the median time of the previous block they're contained in. // When the next block is created its previous block will be the current // chain tip, so we use that to calculate the median time passed to // IsFinalTx() if LOCKTIME_MEDIAN_TIME_PAST is set. const int64_t nBlockTime = (flags & LOCKTIME_MEDIAN_TIME_PAST) ? chainActive.Tip()->GetMedianTimePast() : GetAdjustedTime(); return IsFinalTx(tx, nBlockHeight, nBlockTime); } CAmount GetMinRelayFee(const CTransaction& tx, unsigned int nBytes, bool fAllowFree) { { LOCK(mempool.cs); uint256 hash = tx.GetHash(); double dPriorityDelta = 0; CAmount nFeeDelta = 0; mempool.ApplyDeltas(hash, dPriorityDelta, nFeeDelta); if (dPriorityDelta > 0 || nFeeDelta > 0) return 0; } CAmount nMinFee = ::minRelayTxFee.GetFee(nBytes); if (fAllowFree) { // There is a free transaction area in blocks created by most miners, // * If we are relaying we allow transactions up to DEFAULT_BLOCK_PRIORITY_SIZE - 1000 // to be considered to fall into this category. We don't want to encourage sending // multiple transactions instead of one big transaction to avoid fees. if (nBytes < (DEFAULT_BLOCK_PRIORITY_SIZE - 1000)) nMinFee = 0; } if (!MoneyRange(nMinFee)) nMinFee = Params().MaxMoneyOut(); return nMinFee; } bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState& state, const CTransaction& tx, bool fLimitFree, bool* pfMissingInputs, bool fRejectInsaneFee, bool ignoreFees) { AssertLockHeld(cs_main); if (pfMissingInputs) *pfMissingInputs = false; //Temporarily disable zerocoin for maintenance if (GetAdjustedTime() > GetSporkValue(SPORK_16_ZEROCOIN_MAINTENANCE_MODE) && tx.ContainsZerocoins()) return state.DoS(10, error("AcceptToMemoryPool : Zerocoin transactions are temporarily disabled for maintenance"), REJECT_INVALID, "bad-tx"); if (!CheckTransaction(tx, chainActive.Height() >= Params().Zerocoin_AccumulatorStartHeight(), true, state)) return state.DoS(100, error("AcceptToMemoryPool: : CheckTransaction failed"), REJECT_INVALID, "bad-tx"); // Coinbase is only valid in a block, not as a loose transaction if (tx.IsCoinBase()) return state.DoS(100, error("AcceptToMemoryPool: : coinbase as individual tx"), REJECT_INVALID, "coinbase"); //Coinstake is also only valid in a block, not as a loose transaction if (tx.IsCoinStake()) return state.DoS(100, error("AcceptToMemoryPool: coinstake as individual tx"), REJECT_INVALID, "coinstake"); // Rather not work on nonstandard transactions (unless -testnet/-regtest) string reason; if (Params().RequireStandard() && !IsStandardTx(tx, reason)) return state.DoS(0, error("AcceptToMemoryPool : nonstandard transaction: %s", reason), REJECT_NONSTANDARD, reason); // is it already in the memory pool? uint256 hash = tx.GetHash(); if (pool.exists(hash)) { LogPrintf("%s tx already in mempool\n", __func__); return false; } // ----------- swiftTX transaction scanning ----------- BOOST_FOREACH (const CTxIn& in, tx.vin) { if (mapLockedInputs.count(in.prevout)) { if (mapLockedInputs[in.prevout] != tx.GetHash()) { return state.DoS(0, error("AcceptToMemoryPool : conflicts with existing transaction lock: %s", reason), REJECT_INVALID, "tx-lock-conflict"); } } } // Check for conflicts with in-memory transactions if (!tx.IsZerocoinSpend()) { LOCK(pool.cs); // protect pool.mapNextTx for (unsigned int i = 0; i < tx.vin.size(); i++) { COutPoint outpoint = tx.vin[i].prevout; if (pool.mapNextTx.count(outpoint)) { // Disable replacement feature for now return false; } } } { CCoinsView dummy; CCoinsViewCache view(&dummy); CAmount nValueIn = 0; if(tx.IsZerocoinSpend()){ nValueIn = tx.GetZerocoinSpent(); //Check that txid is not already in the chain int nHeightTx = 0; if (IsTransactionInChain(tx.GetHash(), nHeightTx)) return state.Invalid(error("AcceptToMemoryPool : zBcl spend tx %s already in block %d", tx.GetHash().GetHex(), nHeightTx), REJECT_DUPLICATE, "bad-txns-inputs-spent"); //Check for double spending of serial #'s for (const CTxIn& txIn : tx.vin) { if (!txIn.scriptSig.IsZerocoinSpend()) continue; CoinSpend spend = TxInToZerocoinSpend(txIn); int nHeightTx = 0; if (IsSerialInBlockchain(spend.getCoinSerialNumber(), nHeightTx)) return state.Invalid(error("%s : zBcl spend with serial %s is already in block %d\n", __func__, spend.getCoinSerialNumber().GetHex(), nHeightTx)); //Is serial in the acceptable range if (!spend.HasValidSerial(Params().Zerocoin_Params())) return state.Invalid(error("%s : zBcl spend with serial %s from tx %s is not in valid range\n", __func__, spend.getCoinSerialNumber().GetHex(), tx.GetHash().GetHex())); } } else { LOCK(pool.cs); CCoinsViewMemPool viewMemPool(pcoinsTip, pool); view.SetBackend(viewMemPool); // do we already have it? if (view.HaveCoins(hash)) return false; // do all inputs exist? // Note that this does not check for the presence of actual outputs (see the next check for that), // only helps filling in pfMissingInputs (to determine missing vs spent). for (const CTxIn txin : tx.vin) { if (!view.HaveCoins(txin.prevout.hash)) { if (pfMissingInputs) *pfMissingInputs = true; return false; } } // are the actual inputs available? if (!view.HaveInputs(tx)) return state.Invalid(error("AcceptToMemoryPool : inputs already spent"), REJECT_DUPLICATE, "bad-txns-inputs-spent"); // Bring the best block into scope view.GetBestBlock(); nValueIn = view.GetValueIn(tx); // we have all inputs cached now, so switch back to dummy, so we don't need to keep lock on mempool view.SetBackend(dummy); } // Check for non-standard pay-to-script-hash in inputs if (Params().RequireStandard() && !AreInputsStandard(tx, view)) return error("AcceptToMemoryPool: : nonstandard transaction input"); // Check that the transaction doesn't have an excessive number of // sigops, making it impossible to mine. Since the coinbase transaction // itself can contain sigops MAX_TX_SIGOPS is less than // MAX_BLOCK_SIGOPS; we still consider this an invalid rather than // merely non-standard transaction. if (!tx.IsZerocoinSpend()) { unsigned int nSigOps = GetLegacySigOpCount(tx); unsigned int nMaxSigOps = MAX_TX_SIGOPS_CURRENT; nSigOps += GetP2SHSigOpCount(tx, view); if(nSigOps > nMaxSigOps) return state.DoS(0, error("AcceptToMemoryPool : too many sigops %s, %d > %d", hash.ToString(), nSigOps, nMaxSigOps), REJECT_NONSTANDARD, "bad-txns-too-many-sigops"); } CAmount nValueOut = tx.GetValueOut(); CAmount nFees = nValueIn - nValueOut; double dPriority = 0; if (!tx.IsZerocoinSpend()) view.GetPriority(tx, chainActive.Height()); CTxMemPoolEntry entry(tx, nFees, GetTime(), dPriority, chainActive.Height()); unsigned int nSize = entry.GetTxSize(); // Don't accept it if it can't get into a block // but prioritise dstx and don't check fees for it if (mapObfuscationBroadcastTxes.count(hash)) { mempool.PrioritiseTransaction(hash, hash.ToString(), 1000, 0.1 * COIN); } else if (!ignoreFees) { CAmount txMinFee = GetMinRelayFee(tx, nSize, true); if (fLimitFree && nFees < txMinFee && !tx.IsZerocoinSpend()) return state.DoS(0, error("AcceptToMemoryPool : not enough fees %s, %d < %d", hash.ToString(), nFees, txMinFee), REJECT_INSUFFICIENTFEE, "insufficient fee"); // Require that free transactions have sufficient priority to be mined in the next block. if (tx.IsZerocoinMint()) { if(nFees < Params().Zerocoin_MintFee() * tx.GetZerocoinMintCount()) return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "insufficient fee for zerocoinmint"); } else if (!tx.IsZerocoinSpend() && GetBoolArg("-relaypriority", true) && nFees < ::minRelayTxFee.GetFee(nSize) && !AllowFree(view.GetPriority(tx, chainActive.Height() + 1))) { return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "insufficient priority"); } // Continuously rate-limit free (really, very-low-fee) transactions // This mitigates 'penny-flooding' -- sending thousands of free transactions just to // be annoying or make others' transactions take longer to confirm. if (fLimitFree && nFees < ::minRelayTxFee.GetFee(nSize) && !tx.IsZerocoinSpend()) { static CCriticalSection csFreeLimiter; static double dFreeCount; static int64_t nLastTime; int64_t nNow = GetTime(); LOCK(csFreeLimiter); // Use an exponentially decaying ~10-minute window: dFreeCount *= pow(1.0 - 1.0 / 600.0, (double)(nNow - nLastTime)); nLastTime = nNow; // -limitfreerelay unit is thousand-bytes-per-minute // At default rate it would take over a month to fill 1GB if (dFreeCount >= GetArg("-limitfreerelay", 30) * 10 * 1000) return state.DoS(0, error("AcceptToMemoryPool : free transaction rejected by rate limiter"), REJECT_INSUFFICIENTFEE, "rate limited free transaction"); LogPrint("mempool", "Rate limit dFreeCount: %g => %g\n", dFreeCount, dFreeCount + nSize); dFreeCount += nSize; } } if (fRejectInsaneFee && nFees > ::minRelayTxFee.GetFee(nSize) * 10000) return error("AcceptToMemoryPool: : insane fees %s, %d > %d", hash.ToString(), nFees, ::minRelayTxFee.GetFee(nSize) * 10000); // Check against previous transactions // This is done last to help prevent CPU exhaustion denial-of-service attacks. if (!CheckInputs(tx, state, view, true, STANDARD_SCRIPT_VERIFY_FLAGS, true)) { return error("AcceptToMemoryPool: : ConnectInputs failed %s", hash.ToString()); } // Check again against just the consensus-critical mandatory script // verification flags, in case of bugs in the standard flags that cause // transactions to pass as valid when they're actually invalid. For // instance the STRICTENC flag was incorrectly allowing certain // CHECKSIG NOT scripts to pass, even though they were invalid. // // There is a similar check in CreateNewBlock() to prevent creating // invalid blocks, however allowing such transactions into the mempool // can be exploited as a DoS attack. if (!CheckInputs(tx, state, view, true, MANDATORY_SCRIPT_VERIFY_FLAGS, true)) { return error("AcceptToMemoryPool: : BUG! PLEASE REPORT THIS! ConnectInputs failed against MANDATORY but not STANDARD flags %s", hash.ToString()); } // Store transaction in memory pool.addUnchecked(hash, entry); } SyncWithWallets(tx, NULL); return true; } bool AcceptableInputs(CTxMemPool& pool, CValidationState& state, const CTransaction& tx, bool fLimitFree, bool* pfMissingInputs, bool fRejectInsaneFee, bool isDSTX) { AssertLockHeld(cs_main); if (pfMissingInputs) *pfMissingInputs = false; if (!CheckTransaction(tx, chainActive.Height() >= Params().Zerocoin_AccumulatorStartHeight(), true, state)) return error("AcceptableInputs: : CheckTransaction failed"); // Coinbase is only valid in a block, not as a loose transaction if (tx.IsCoinBase()) return state.DoS(100, error("AcceptableInputs: : coinbase as individual tx"), REJECT_INVALID, "coinbase"); // Rather not work on nonstandard transactions (unless -testnet/-regtest) string reason; // for any real tx this will be checked on AcceptToMemoryPool anyway // if (Params().RequireStandard() && !IsStandardTx(tx, reason)) // return state.DoS(0, // error("AcceptableInputs : nonstandard transaction: %s", reason), // REJECT_NONSTANDARD, reason); // is it already in the memory pool? uint256 hash = tx.GetHash(); if (pool.exists(hash)) return false; // ----------- swiftTX transaction scanning ----------- BOOST_FOREACH (const CTxIn& in, tx.vin) { if (mapLockedInputs.count(in.prevout)) { if (mapLockedInputs[in.prevout] != tx.GetHash()) { return state.DoS(0, error("AcceptableInputs : conflicts with existing transaction lock: %s", reason), REJECT_INVALID, "tx-lock-conflict"); } } } // Check for conflicts with in-memory transactions if (!tx.IsZerocoinSpend()) { LOCK(pool.cs); // protect pool.mapNextTx for (unsigned int i = 0; i < tx.vin.size(); i++) { COutPoint outpoint = tx.vin[i].prevout; if (pool.mapNextTx.count(outpoint)) { // Disable replacement feature for now return false; } } } { CCoinsView dummy; CCoinsViewCache view(&dummy); CAmount nValueIn = 0; { LOCK(pool.cs); CCoinsViewMemPool viewMemPool(pcoinsTip, pool); view.SetBackend(viewMemPool); // do we already have it? if (view.HaveCoins(hash)) return false; // do all inputs exist? // Note that this does not check for the presence of actual outputs (see the next check for that), // only helps filling in pfMissingInputs (to determine missing vs spent). for (const CTxIn txin : tx.vin) { if (!view.HaveCoins(txin.prevout.hash)) { if (pfMissingInputs) *pfMissingInputs = true; return false; } } // are the actual inputs available? if (!view.HaveInputs(tx)) return state.Invalid(error("AcceptableInputs : inputs already spent"), REJECT_DUPLICATE, "bad-txns-inputs-spent"); // Bring the best block into scope view.GetBestBlock(); nValueIn = view.GetValueIn(tx); // we have all inputs cached now, so switch back to dummy, so we don't need to keep lock on mempool view.SetBackend(dummy); } // Check for non-standard pay-to-script-hash in inputs // for any real tx this will be checked on AcceptToMemoryPool anyway // if (Params().RequireStandard() && !AreInputsStandard(tx, view)) // return error("AcceptableInputs: : nonstandard transaction input"); // Check that the transaction doesn't have an excessive number of // sigops, making it impossible to mine. Since the coinbase transaction // itself can contain sigops MAX_TX_SIGOPS is less than // MAX_BLOCK_SIGOPS; we still consider this an invalid rather than // merely non-standard transaction. unsigned int nSigOps = GetLegacySigOpCount(tx); unsigned int nMaxSigOps = MAX_TX_SIGOPS_CURRENT; nSigOps += GetP2SHSigOpCount(tx, view); if (nSigOps > nMaxSigOps) return state.DoS(0, error("AcceptableInputs : too many sigops %s, %d > %d", hash.ToString(), nSigOps, nMaxSigOps), REJECT_NONSTANDARD, "bad-txns-too-many-sigops"); CAmount nValueOut = tx.GetValueOut(); CAmount nFees = nValueIn - nValueOut; double dPriority = view.GetPriority(tx, chainActive.Height()); CTxMemPoolEntry entry(tx, nFees, GetTime(), dPriority, chainActive.Height()); unsigned int nSize = entry.GetTxSize(); // Don't accept it if it can't get into a block // but prioritise dstx and don't check fees for it if (isDSTX) { mempool.PrioritiseTransaction(hash, hash.ToString(), 1000, 0.1 * COIN); } else { // same as !ignoreFees for AcceptToMemoryPool CAmount txMinFee = GetMinRelayFee(tx, nSize, true); if (fLimitFree && nFees < txMinFee && !tx.IsZerocoinSpend()) return state.DoS(0, error("AcceptableInputs : not enough fees %s, %d < %d", hash.ToString(), nFees, txMinFee), REJECT_INSUFFICIENTFEE, "insufficient fee"); // Require that free transactions have sufficient priority to be mined in the next block. if (GetBoolArg("-relaypriority", true) && nFees < ::minRelayTxFee.GetFee(nSize) && !AllowFree(view.GetPriority(tx, chainActive.Height() + 1))) { return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "insufficient priority"); } // Continuously rate-limit free (really, very-low-fee) transactions // This mitigates 'penny-flooding' -- sending thousands of free transactions just to // be annoying or make others' transactions take longer to confirm. if (fLimitFree && nFees < ::minRelayTxFee.GetFee(nSize) && !tx.IsZerocoinSpend()) { static CCriticalSection csFreeLimiter; static double dFreeCount; static int64_t nLastTime; int64_t nNow = GetTime(); LOCK(csFreeLimiter); // Use an exponentially decaying ~10-minute window: dFreeCount *= pow(1.0 - 1.0 / 600.0, (double)(nNow - nLastTime)); nLastTime = nNow; // -limitfreerelay unit is thousand-bytes-per-minute // At default rate it would take over a month to fill 1GB if (dFreeCount >= GetArg("-limitfreerelay", 30) * 10 * 1000) return state.DoS(0, error("AcceptableInputs : free transaction rejected by rate limiter"), REJECT_INSUFFICIENTFEE, "rate limited free transaction"); LogPrint("mempool", "Rate limit dFreeCount: %g => %g\n", dFreeCount, dFreeCount + nSize); dFreeCount += nSize; } } if (fRejectInsaneFee && nFees > ::minRelayTxFee.GetFee(nSize) * 10000) return error("AcceptableInputs: : insane fees %s, %d > %d", hash.ToString(), nFees, ::minRelayTxFee.GetFee(nSize) * 10000); // Check against previous transactions // This is done last to help prevent CPU exhaustion denial-of-service attacks. if (!CheckInputs(tx, state, view, false, STANDARD_SCRIPT_VERIFY_FLAGS, true)) { return error("AcceptableInputs: : ConnectInputs failed %s", hash.ToString()); } // Check again against just the consensus-critical mandatory script // verification flags, in case of bugs in the standard flags that cause // transactions to pass as valid when they're actually invalid. For // instance the STRICTENC flag was incorrectly allowing certain // CHECKSIG NOT scripts to pass, even though they were invalid. // // There is a similar check in CreateNewBlock() to prevent creating // invalid blocks, however allowing such transactions into the mempool // can be exploited as a DoS attack. // for any real tx this will be checked on AcceptToMemoryPool anyway // if (!CheckInputs(tx, state, view, false, MANDATORY_SCRIPT_VERIFY_FLAGS, true)) // { // return error("AcceptableInputs: : BUG! PLEASE REPORT THIS! ConnectInputs failed against MANDATORY but not STANDARD flags %s", hash.ToString()); // } // Store transaction in memory // pool.addUnchecked(hash, entry); } // SyncWithWallets(tx, NULL); return true; } /** Return transaction in tx, and if it was found inside a block, its hash is placed in hashBlock */ bool GetTransaction(const uint256& hash, CTransaction& txOut, uint256& hashBlock, bool fAllowSlow) { CBlockIndex* pindexSlow = NULL; { LOCK(cs_main); { if (mempool.lookup(hash, txOut)) { return true; } } if (fTxIndex) { CDiskTxPos postx; if (pblocktree->ReadTxIndex(hash, postx)) { CAutoFile file(OpenBlockFile(postx, true), SER_DISK, CLIENT_VERSION); if (file.IsNull()) return error("%s: OpenBlockFile failed", __func__); CBlockHeader header; try { file >> header; fseek(file.Get(), postx.nTxOffset, SEEK_CUR); file >> txOut; } catch (std::exception& e) { return error("%s : Deserialize or I/O error - %s", __func__, e.what()); } hashBlock = header.GetHash(); if (txOut.GetHash() != hash) return error("%s : txid mismatch", __func__); return true; } } if (fAllowSlow) { // use coin database to locate block that contains transaction, and scan it int nHeight = -1; { CCoinsViewCache& view = *pcoinsTip; const CCoins* coins = view.AccessCoins(hash); if (coins) nHeight = coins->nHeight; } if (nHeight > 0) pindexSlow = chainActive[nHeight]; } } if (pindexSlow) { CBlock block; if (ReadBlockFromDisk(block, pindexSlow)) { BOOST_FOREACH (const CTransaction& tx, block.vtx) { if (tx.GetHash() == hash) { txOut = tx; hashBlock = pindexSlow->GetBlockHash(); return true; } } } } return false; } ////////////////////////////////////////////////////////////////////////////// // // CBlock and CBlockIndex // bool WriteBlockToDisk(CBlock& block, CDiskBlockPos& pos) { // Open history file to append CAutoFile fileout(OpenBlockFile(pos), SER_DISK, CLIENT_VERSION); if (fileout.IsNull()) return error("WriteBlockToDisk : OpenBlockFile failed"); // Write index header unsigned int nSize = fileout.GetSerializeSize(block); fileout << FLATDATA(Params().MessageStart()) << nSize; // Write block long fileOutPos = ftell(fileout.Get()); if (fileOutPos < 0) return error("WriteBlockToDisk : ftell failed"); pos.nPos = (unsigned int)fileOutPos; fileout << block; return true; } bool ReadBlockFromDisk(CBlock& block, const CDiskBlockPos& pos) { block.SetNull(); // Open history file to read CAutoFile filein(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION); if (filein.IsNull()) return error("ReadBlockFromDisk : OpenBlockFile failed"); // Read block try { filein >> block; } catch (std::exception& e) { return error("%s : Deserialize or I/O error - %s", __func__, e.what()); } // Check the header if (block.IsProofOfWork()) { if (!CheckProofOfWork(block.GetHash(), block.nBits)) return error("ReadBlockFromDisk : Errors in block header"); } return true; } bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex) { if (!ReadBlockFromDisk(block, pindex->GetBlockPos())) return false; if (block.GetHash() != pindex->GetBlockHash()) { LogPrintf("%s : block=%s index=%s\n", __func__, block.GetHash().ToString().c_str(), pindex->GetBlockHash().ToString().c_str()); return error("ReadBlockFromDisk(CBlock&, CBlockIndex*) : GetHash() doesn't match index"); } return true; } double ConvertBitsToDouble(unsigned int nBits) { int nShift = (nBits >> 24) & 0xff; double dDiff = (double)0x0000ffff / (double)(nBits & 0x00ffffff); while (nShift < 29) { dDiff *= 256.0; nShift++; } while (nShift > 29) { dDiff /= 256.0; nShift--; } return dDiff; } int64_t GetBlockValue(int nHeight) { int64_t nSubsidy = 0; if (Params().NetworkID() == CBaseChainParams::TESTNET) { if (nHeight < 200 && nHeight > 0) return 250000 * COIN; } if (nHeight == 0) { nSubsidy = 20000000 * COIN; } else if (nHeight <= 50 && nHeight > 0) { nSubsidy = 6.25 * COIN; } else if (nHeight <= 100 && nHeight > 50) { nSubsidy = 6.25 * COIN; } else if (nHeight <= 20000 && nHeight > 100) { nSubsidy = 6.25 * COIN; } else if (nHeight <= 25000 && nHeight > 20000) { nSubsidy = 6.25 * COIN; } else if (nHeight <= 28000 && nHeight > 25000) { nSubsidy = 6.25 * COIN; } else if (nHeight <= 80000 && nHeight > 28000) { nSubsidy = 6.25 * COIN; } else if (nHeight > 80000) { nSubsidy = 6.25 * COIN; } else { nSubsidy = 6.25 * COIN; } if (fDebug) { LogPrintf("%s %s: nSubsidy=%s\n", __FILE__, __FUNCTION__, FormatMoney(nSubsidy)); } return nSubsidy; } int64_t GetMasternodePayment(int nHeight, int64_t blockValue, int nMasternodeCount) { int64_t ret = 0; if (Params().NetworkID() == CBaseChainParams::TESTNET) { if (nHeight < 200) return 0; } if(nHeight > 30000) { ret = blockValue / 100 * 1; } return ret; } bool IsInitialBlockDownload() { LOCK(cs_main); if (fImporting || fReindex || chainActive.Height() < Checkpoints::GetTotalBlocksEstimate()) return true; static bool lockIBDState = false; if (lockIBDState) return false; bool state = (chainActive.Height() < pindexBestHeader->nHeight - 24 * 6 || pindexBestHeader->GetBlockTime() < GetTime() - 6 * 60 * 60) && chainActive.Height() > 3030; // ~144 blocks behind -> 2 x fork detection time if (!state) lockIBDState = true; return state; } bool fLargeWorkForkFound = false; bool fLargeWorkInvalidChainFound = false; CBlockIndex *pindexBestForkTip = NULL, *pindexBestForkBase = NULL; void CheckForkWarningConditions() { AssertLockHeld(cs_main); // Before we get past initial download, we cannot reliably alert about forks // (we assume we don't get stuck on a fork before the last checkpoint) if (IsInitialBlockDownload()) return; // If our best fork is no longer within 72 blocks (+/- 3 hours if no one mines it) // of our head, drop it if (pindexBestForkTip && chainActive.Height() - pindexBestForkTip->nHeight >= 72) pindexBestForkTip = NULL; if (pindexBestForkTip || (pindexBestInvalid && pindexBestInvalid->nChainWork > chainActive.Tip()->nChainWork + (GetBlockProof(*chainActive.Tip()) * 6))) { if (!fLargeWorkForkFound && pindexBestForkBase) { if (pindexBestForkBase->phashBlock) { std::string warning = std::string("'Warning: Large-work fork detected, forking after block ") + pindexBestForkBase->phashBlock->ToString() + std::string("'"); CAlert::Notify(warning, true); } } if (pindexBestForkTip && pindexBestForkBase) { if (pindexBestForkBase->phashBlock) { LogPrintf("CheckForkWarningConditions: Warning: Large valid fork found\n forking the chain at height %d (%s)\n lasting to height %d (%s).\nChain state database corruption likely.\n", pindexBestForkBase->nHeight, pindexBestForkBase->phashBlock->ToString(), pindexBestForkTip->nHeight, pindexBestForkTip->phashBlock->ToString()); fLargeWorkForkFound = true; } } else { LogPrintf("CheckForkWarningConditions: Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.\n"); fLargeWorkInvalidChainFound = true; } } else { fLargeWorkForkFound = false; fLargeWorkInvalidChainFound = false; } } void CheckForkWarningConditionsOnNewFork(CBlockIndex* pindexNewForkTip) { AssertLockHeld(cs_main); // If we are on a fork that is sufficiently large, set a warning flag CBlockIndex* pfork = pindexNewForkTip; CBlockIndex* plonger = chainActive.Tip(); while (pfork && pfork != plonger) { while (plonger && plonger->nHeight > pfork->nHeight) plonger = plonger->pprev; if (pfork == plonger) break; pfork = pfork->pprev; } // We define a condition which we should warn the user about as a fork of at least 7 blocks // who's tip is within 72 blocks (+/- 3 hours if no one mines it) of ours // or a chain that is entirely longer than ours and invalid (note that this should be detected by both) // We use 7 blocks rather arbitrarily as it represents just under 10% of sustained network // hash rate operating on the fork. // We define it this way because it allows us to only store the highest fork tip (+ base) which meets // the 7-block condition and from this always have the most-likely-to-cause-warning fork if (pfork && (!pindexBestForkTip || (pindexBestForkTip && pindexNewForkTip->nHeight > pindexBestForkTip->nHeight)) && pindexNewForkTip->nChainWork - pfork->nChainWork > (GetBlockProof(*pfork) * 7) && chainActive.Height() - pindexNewForkTip->nHeight < 72) { pindexBestForkTip = pindexNewForkTip; pindexBestForkBase = pfork; } CheckForkWarningConditions(); } // Requires cs_main. void Misbehaving(NodeId pnode, int howmuch) { if (howmuch == 0) return; CNodeState* state = State(pnode); if (state == NULL) return; state->nMisbehavior += howmuch; int banscore = GetArg("-banscore", 100); if (state->nMisbehavior >= banscore && state->nMisbehavior - howmuch < banscore) { LogPrintf("Misbehaving: %s (%d -> %d) BAN THRESHOLD EXCEEDED\n", state->name, state->nMisbehavior - howmuch, state->nMisbehavior); state->fShouldBan = true; } else LogPrintf("Misbehaving: %s (%d -> %d)\n", state->name, state->nMisbehavior - howmuch, state->nMisbehavior); } void static InvalidChainFound(CBlockIndex* pindexNew) { if (!pindexBestInvalid || pindexNew->nChainWork > pindexBestInvalid->nChainWork) pindexBestInvalid = pindexNew; LogPrintf("InvalidChainFound: invalid block=%s height=%d log2_work=%.8g date=%s\n", pindexNew->GetBlockHash().ToString(), pindexNew->nHeight, log(pindexNew->nChainWork.getdouble()) / log(2.0), DateTimeStrFormat("%Y-%m-%d %H:%M:%S", pindexNew->GetBlockTime())); LogPrintf("InvalidChainFound: current best=%s height=%d log2_work=%.8g date=%s\n", chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(), log(chainActive.Tip()->nChainWork.getdouble()) / log(2.0), DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime())); CheckForkWarningConditions(); } void static InvalidBlockFound(CBlockIndex* pindex, const CValidationState& state) { int nDoS = 0; if (state.IsInvalid(nDoS)) { std::map<uint256, NodeId>::iterator it = mapBlockSource.find(pindex->GetBlockHash()); if (it != mapBlockSource.end() && State(it->second)) { CBlockReject reject = {state.GetRejectCode(), state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), pindex->GetBlockHash()}; State(it->second)->rejects.push_back(reject); if (nDoS > 0) Misbehaving(it->second, nDoS); } } if (!state.CorruptionPossible()) { pindex->nStatus |= BLOCK_FAILED_VALID; setDirtyBlockIndex.insert(pindex); setBlockIndexCandidates.erase(pindex); InvalidChainFound(pindex); } } void UpdateCoins(const CTransaction& tx, CValidationState& state, CCoinsViewCache& inputs, CTxUndo& txundo, int nHeight) { // mark inputs spent if (!tx.IsCoinBase() && !tx.IsZerocoinSpend()) { txundo.vprevout.reserve(tx.vin.size()); BOOST_FOREACH (const CTxIn& txin, tx.vin) { txundo.vprevout.push_back(CTxInUndo()); bool ret = inputs.ModifyCoins(txin.prevout.hash)->Spend(txin.prevout, txundo.vprevout.back()); assert(ret); } } // add outputs inputs.ModifyCoins(tx.GetHash())->FromTx(tx, nHeight); } bool CScriptCheck::operator()() { const CScript& scriptSig = ptxTo->vin[nIn].scriptSig; if (!VerifyScript(scriptSig, scriptPubKey, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, cacheStore), &error)) { return ::error("CScriptCheck(): %s:%d VerifySignature failed: %s", ptxTo->GetHash().ToString(), nIn, ScriptErrorString(error)); } return true; } bool CheckInputs(const CTransaction& tx, CValidationState& state, const CCoinsViewCache& inputs, bool fScriptChecks, unsigned int flags, bool cacheStore, std::vector<CScriptCheck>* pvChecks) { if (!tx.IsCoinBase() && !tx.IsZerocoinSpend()) { if (pvChecks) pvChecks->reserve(tx.vin.size()); // This doesn't trigger the DoS code on purpose; if it did, it would make it easier // for an attacker to attempt to split the network. if (!inputs.HaveInputs(tx)) return state.Invalid(error("CheckInputs() : %s inputs unavailable", tx.GetHash().ToString())); // While checking, GetBestBlock() refers to the parent block. // This is also true for mempool checks. CBlockIndex* pindexPrev = mapBlockIndex.find(inputs.GetBestBlock())->second; int nSpendHeight = pindexPrev->nHeight + 1; CAmount nValueIn = 0; CAmount nFees = 0; for (unsigned int i = 0; i < tx.vin.size(); i++) { const COutPoint& prevout = tx.vin[i].prevout; const CCoins* coins = inputs.AccessCoins(prevout.hash); assert(coins); // If prev is coinbase, check that it's matured if (coins->IsCoinBase() || coins->IsCoinStake()) { if (nSpendHeight - coins->nHeight < Params().COINBASE_MATURITY()) return state.Invalid( error("CheckInputs() : tried to spend coinbase at depth %d, coinstake=%d", nSpendHeight - coins->nHeight, coins->IsCoinStake()), REJECT_INVALID, "bad-txns-premature-spend-of-coinbase"); } // Check for negative or overflow input values nValueIn += coins->vout[prevout.n].nValue; if (!MoneyRange(coins->vout[prevout.n].nValue) || !MoneyRange(nValueIn)) return state.DoS(100, error("CheckInputs() : txin values out of range"), REJECT_INVALID, "bad-txns-inputvalues-outofrange"); } if (!tx.IsCoinStake()) { if (nValueIn < tx.GetValueOut()) return state.DoS(100, error("CheckInputs() : %s value in (%s) < value out (%s)", tx.GetHash().ToString(), FormatMoney(nValueIn), FormatMoney(tx.GetValueOut())), REJECT_INVALID, "bad-txns-in-belowout"); // Tally transaction fees CAmount nTxFee = nValueIn - tx.GetValueOut(); if (nTxFee < 0) return state.DoS(100, error("CheckInputs() : %s nTxFee < 0", tx.GetHash().ToString()), REJECT_INVALID, "bad-txns-fee-negative"); nFees += nTxFee; if (!MoneyRange(nFees)) return state.DoS(100, error("CheckInputs() : nFees out of range"), REJECT_INVALID, "bad-txns-fee-outofrange"); } // The first loop above does all the inexpensive checks. // Only if ALL inputs pass do we perform expensive ECDSA signature checks. // Helps prevent CPU exhaustion attacks. // Skip ECDSA signature verification when connecting blocks // before the last block chain checkpoint. This is safe because block merkle hashes are // still computed and checked, and any change will be caught at the next checkpoint. if (fScriptChecks) { for (unsigned int i = 0; i < tx.vin.size(); i++) { const COutPoint& prevout = tx.vin[i].prevout; const CCoins* coins = inputs.AccessCoins(prevout.hash); assert(coins); // Verify signature CScriptCheck check(*coins, tx, i, flags, cacheStore); if (pvChecks) { pvChecks->push_back(CScriptCheck()); check.swap(pvChecks->back()); } else if (!check()) { if (flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) { // Check whether the failure was caused by a // non-mandatory script verification check, such as // non-standard DER encodings or non-null dummy // arguments; if so, don't trigger DoS protection to // avoid splitting the network between upgraded and // non-upgraded nodes. CScriptCheck check(*coins, tx, i, flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheStore); if (check()) return state.Invalid(false, REJECT_NONSTANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError()))); } // Failures of other flags indicate a transaction that is // invalid in new blocks, e.g. a invalid P2SH. We DoS ban // such nodes as they are not following the protocol. That // said during an upgrade careful thought should be taken // as to the correct behavior - we may want to continue // peering with non-upgraded nodes even after a soft-fork // super-majority vote has passed. return state.DoS(100, false, REJECT_INVALID, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError()))); } } } } return true; } bool DisconnectBlock(CBlock& block, CValidationState& state, CBlockIndex* pindex, CCoinsViewCache& view, bool* pfClean) { assert(pindex->GetBlockHash() == view.GetBestBlock()); if (pfClean) *pfClean = false; bool fClean = true; CBlockUndo blockUndo; CDiskBlockPos pos = pindex->GetUndoPos(); if (pos.IsNull()) return error("DisconnectBlock() : no undo data available"); if (!blockUndo.ReadFromDisk(pos, pindex->pprev->GetBlockHash())) return error("DisconnectBlock() : failure reading undo data"); if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) return error("DisconnectBlock() : block and undo data inconsistent"); // undo transactions in reverse order for (int i = block.vtx.size() - 1; i >= 0; i--) { const CTransaction& tx = block.vtx[i]; /** UNDO ZEROCOIN DATABASING * note we only undo zerocoin databasing in the following statement, value to and from Bitcoinlegend * addresses should still be handled by the typical bitcoin based undo code * */ if (tx.ContainsZerocoins()) { if (tx.IsZerocoinSpend()) { //erase all zerocoinspends in this transaction for (const CTxIn txin : tx.vin) { if (txin.scriptSig.IsZerocoinSpend()) { CoinSpend spend = TxInToZerocoinSpend(txin); if (!zerocoinDB->EraseCoinSpend(spend.getCoinSerialNumber())) return error("failed to erase spent zerocoin in block"); } } } if (tx.IsZerocoinMint()) { //erase all zerocoinmints in this transaction for (const CTxOut txout : tx.vout) { if (txout.scriptPubKey.empty() || !txout.scriptPubKey.IsZerocoinMint()) continue; PublicCoin pubCoin(Params().Zerocoin_Params()); if (!TxOutToPublicCoin(txout, pubCoin, state)) return error("DisconnectBlock(): TxOutToPublicCoin() failed"); if(!zerocoinDB->EraseCoinMint(pubCoin.getValue())) return error("DisconnectBlock(): Failed to erase coin mint"); } } } uint256 hash = tx.GetHash(); // Check that all outputs are available and match the outputs in the block itself // exactly. Note that transactions with only provably unspendable outputs won't // have outputs available even in the block itself, so we handle that case // specially with outsEmpty. { CCoins outsEmpty; CCoinsModifier outs = view.ModifyCoins(hash); outs->ClearUnspendable(); CCoins outsBlock(tx, pindex->nHeight); // The CCoins serialization does not serialize negative numbers. // No network rules currently depend on the version here, so an inconsistency is harmless // but it must be corrected before txout nversion ever influences a network rule. if (outsBlock.nVersion < 0) outs->nVersion = outsBlock.nVersion; if (*outs != outsBlock) fClean = fClean && error("DisconnectBlock() : added transaction mismatch? database corrupted"); // remove outputs outs->Clear(); } // restore inputs if (!tx.IsCoinBase() && !tx.IsZerocoinSpend()) { // not coinbases or zerocoinspend because they dont have traditional inputs const CTxUndo& txundo = blockUndo.vtxundo[i - 1]; if (txundo.vprevout.size() != tx.vin.size()) return error("DisconnectBlock() : transaction and undo data inconsistent - txundo.vprevout.siz=%d tx.vin.siz=%d", txundo.vprevout.size(), tx.vin.size()); for (unsigned int j = tx.vin.size(); j-- > 0;) { const COutPoint& out = tx.vin[j].prevout; const CTxInUndo& undo = txundo.vprevout[j]; CCoinsModifier coins = view.ModifyCoins(out.hash); if (undo.nHeight != 0) { // undo data contains height: this is the last output of the prevout tx being spent if (!coins->IsPruned()) fClean = fClean && error("DisconnectBlock() : undo data overwriting existing transaction"); coins->Clear(); coins->fCoinBase = undo.fCoinBase; coins->nHeight = undo.nHeight; coins->nVersion = undo.nVersion; } else { if (coins->IsPruned()) fClean = fClean && error("DisconnectBlock() : undo data adding output to missing transaction"); } if (coins->IsAvailable(out.n)) fClean = fClean && error("DisconnectBlock() : undo data overwriting existing output"); if (coins->vout.size() < out.n + 1) coins->vout.resize(out.n + 1); coins->vout[out.n] = undo.txout; } } } // move best block pointer to prevout block view.SetBestBlock(pindex->pprev->GetBlockHash()); if (!fVerifyingBlocks) { //if block is an accumulator checkpoint block, remove checkpoint and checksums from db uint256 nCheckpoint = pindex->nAccumulatorCheckpoint; if(nCheckpoint != pindex->pprev->nAccumulatorCheckpoint) { if(!EraseAccumulatorValues(nCheckpoint, pindex->pprev->nAccumulatorCheckpoint)) return error("DisconnectBlock(): failed to erase checkpoint"); } } if (pfClean) { *pfClean = fClean; return true; } else { return fClean; } } void static FlushBlockFile(bool fFinalize = false) { LOCK(cs_LastBlockFile); CDiskBlockPos posOld(nLastBlockFile, 0); FILE* fileOld = OpenBlockFile(posOld); if (fileOld) { if (fFinalize) TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nSize); FileCommit(fileOld); fclose(fileOld); } fileOld = OpenUndoFile(posOld); if (fileOld) { if (fFinalize) TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nUndoSize); FileCommit(fileOld); fclose(fileOld); } } bool FindUndoPos(CValidationState& state, int nFile, CDiskBlockPos& pos, unsigned int nAddSize); static CCheckQueue<CScriptCheck> scriptcheckqueue(128); void ThreadScriptCheck() { RenameThread("bitcoinlegend-scriptch"); scriptcheckqueue.Thread(); } void RecalculateZBCLMinted() { CBlockIndex *pindex = chainActive[Params().Zerocoin_AccumulatorStartHeight()]; int nHeightEnd = chainActive.Height(); while (true) { if (pindex->nHeight % 1000 == 0) LogPrintf("%s : block %d...\n", __func__, pindex->nHeight); //overwrite possibly wrong vMintsInBlock data CBlock block; assert(ReadBlockFromDisk(block, pindex)); std::list<CZerocoinMint> listMints; BlockToZerocoinMintList(block, listMints); vector<libzerocoin::CoinDenomination> vDenomsBefore = pindex->vMintDenominationsInBlock; pindex->vMintDenominationsInBlock.clear(); for (auto mint : listMints) pindex->vMintDenominationsInBlock.emplace_back(mint.GetDenomination()); //Record mints to disk assert(pblocktree->WriteBlockIndex(CDiskBlockIndex(pindex))); if (pindex->nHeight < nHeightEnd) pindex = chainActive.Next(pindex); else break; } pblocktree->Flush(); } void RecalculateZBCLSpent() { CBlockIndex* pindex = chainActive[Params().Zerocoin_AccumulatorStartHeight()]; while (true) { if (pindex->nHeight % 1000 == 0) LogPrintf("%s : block %d...\n", __func__, pindex->nHeight); //Rewrite zBCL supply CBlock block; assert(ReadBlockFromDisk(block, pindex)); list<libzerocoin::CoinDenomination> listDenomsSpent = ZerocoinSpendListFromBlock(block); //Reset the supply to previous block pindex->mapZerocoinSupply = pindex->pprev->mapZerocoinSupply; //Add mints to zBCL supply for (auto denom : libzerocoin::zerocoinDenomList) { long nDenomAdded = count(pindex->vMintDenominationsInBlock.begin(), pindex->vMintDenominationsInBlock.end(), denom); pindex->mapZerocoinSupply.at(denom) += nDenomAdded; } //Remove spends from zBCL supply for (auto denom : listDenomsSpent) pindex->mapZerocoinSupply.at(denom)--; //Rewrite money supply assert(pblocktree->WriteBlockIndex(CDiskBlockIndex(pindex))); if (pindex->nHeight < chainActive.Height()) pindex = chainActive.Next(pindex); else break; } pblocktree->Flush(); } bool RecalculateBCLSupply(int nHeightStart) { if (nHeightStart > chainActive.Height()) return false; CBlockIndex* pindex = chainActive[nHeightStart]; CAmount nSupplyPrev = pindex->pprev->nMoneySupply; while (true) { if (pindex->nHeight % 1000 == 0) LogPrintf("%s : block %d...\n", __func__, pindex->nHeight); CBlock block; assert(ReadBlockFromDisk(block, pindex)); CAmount nValueIn = 0; CAmount nValueOut = 0; for (const CTransaction tx : block.vtx) { for (unsigned int i = 0; i < tx.vin.size(); i++) { if (tx.IsCoinBase()) break; if (tx.vin[i].scriptSig.IsZerocoinSpend()) { nValueIn += tx.vin[i].nSequence * COIN; continue; } COutPoint prevout = tx.vin[i].prevout; CTransaction txPrev; uint256 hashBlock; assert(GetTransaction(prevout.hash, txPrev, hashBlock, true)); nValueIn += txPrev.vout[prevout.n].nValue; } for (unsigned int i = 0; i < tx.vout.size(); i++) { if (i == 0 && tx.IsCoinStake()) continue; nValueOut += tx.vout[i].nValue; } } // Rewrite money supply pindex->nMoneySupply = nSupplyPrev + nValueOut - nValueIn; nSupplyPrev = pindex->nMoneySupply; assert(pblocktree->WriteBlockIndex(CDiskBlockIndex(pindex))); if (pindex->nHeight < chainActive.Height()) pindex = chainActive.Next(pindex); else break; } pblocktree->Flush(); return true; } static int64_t nTimeVerify = 0; static int64_t nTimeConnect = 0; static int64_t nTimeIndex = 0; static int64_t nTimeCallbacks = 0; static int64_t nTimeTotal = 0; bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pindex, CCoinsViewCache& view, bool fJustCheck, bool fAlreadyChecked) { AssertLockHeld(cs_main); // Check it again in case a previous version let a bad block in if (!fAlreadyChecked && !CheckBlock(block, state, !fJustCheck, !fJustCheck)) return false; // verify that the view's current state corresponds to the previous block uint256 hashPrevBlock = pindex->pprev == NULL ? uint256(0) : pindex->pprev->GetBlockHash(); if (hashPrevBlock != view.GetBestBlock()) LogPrintf("%s: hashPrev=%s view=%s\n", __func__, hashPrevBlock.ToString().c_str(), view.GetBestBlock().ToString().c_str()); assert(hashPrevBlock == view.GetBestBlock()); // Special case for the genesis block, skipping connection of its transactions // (its coinbase is unspendable) if (block.GetHash() == Params().HashGenesisBlock()) { view.SetBestBlock(pindex->GetBlockHash()); return true; } if (pindex->nHeight <= Params().LAST_POW_BLOCK() && block.IsProofOfStake()) return state.DoS(100, error("ConnectBlock() : PoS period not active"), REJECT_INVALID, "PoS-early"); if (pindex->nHeight > Params().LAST_POW_BLOCK() && block.IsProofOfWork()) return state.DoS(100, error("ConnectBlock() : PoW period ended"), REJECT_INVALID, "PoW-ended"); bool fScriptChecks = pindex->nHeight >= Checkpoints::GetTotalBlocksEstimate(); // Do not allow blocks that contain transactions which 'overwrite' older transactions, // unless those are already completely spent. // If such overwrites are allowed, coinbases and transactions depending upon those // can be duplicated to remove the ability to spend the first instance -- even after // being sent to another address. // See BIP30 and http://r6.ca/blog/20120206T005236Z.html for more information. // This logic is not necessary for memory pool transactions, as AcceptToMemoryPool // already refuses previously-known transaction ids entirely. // This rule was originally applied all blocks whose timestamp was after March 15, 2012, 0:00 UTC. // Now that the whole chain is irreversibly beyond that time it is applied to all blocks except the // two in the chain that violate it. This prevents exploiting the issue against nodes in their // initial block download. bool fEnforceBIP30 = (!pindex->phashBlock) || // Enforce on CreateNewBlock invocations which don't have a hash. !((pindex->nHeight == 91842 && pindex->GetBlockHash() == uint256("0x00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) || (pindex->nHeight == 91880 && pindex->GetBlockHash() == uint256("0x00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721"))); if (fEnforceBIP30) { BOOST_FOREACH (const CTransaction& tx, block.vtx) { const CCoins* coins = view.AccessCoins(tx.GetHash()); if (coins && !coins->IsPruned()) return state.DoS(100, error("ConnectBlock() : tried to overwrite transaction"), REJECT_INVALID, "bad-txns-BIP30"); } } // BIP16 didn't become active until Apr 1 2012 int64_t nBIP16SwitchTime = 1333238400; bool fStrictPayToScriptHash = (pindex->GetBlockTime() >= nBIP16SwitchTime); unsigned int flags = fStrictPayToScriptHash ? SCRIPT_VERIFY_P2SH : SCRIPT_VERIFY_NONE; // Start enforcing the DERSIG (BIP66) rules, for block.nVersion=3 blocks, when 75% of the network has upgraded: if (block.nVersion >= 3 && CBlockIndex::IsSuperMajority(3, pindex->pprev, Params().EnforceBlockUpgradeMajority())) { flags |= SCRIPT_VERIFY_DERSIG; } CBlockUndo blockundo; CCheckQueueControl<CScriptCheck> control(fScriptChecks && nScriptCheckThreads ? &scriptcheckqueue : NULL); int64_t nTimeStart = GetTimeMicros(); CAmount nFees = 0; int nInputs = 0; unsigned int nSigOps = 0; CDiskTxPos pos(pindex->GetBlockPos(), GetSizeOfCompactSize(block.vtx.size())); std::vector<std::pair<uint256, CDiskTxPos> > vPos; vPos.reserve(block.vtx.size()); blockundo.vtxundo.reserve(block.vtx.size() - 1); CAmount nValueOut = 0; CAmount nValueIn = 0; unsigned int nMaxBlockSigOps = MAX_BLOCK_SIGOPS_CURRENT; for (unsigned int i = 0; i < block.vtx.size(); i++) { const CTransaction& tx = block.vtx[i]; nInputs += tx.vin.size(); nSigOps += GetLegacySigOpCount(tx); if (nSigOps > nMaxBlockSigOps) return state.DoS(100, error("ConnectBlock() : too many sigops"), REJECT_INVALID, "bad-blk-sigops"); //Temporarily disable zerocoin transactions for maintenance if (block.nTime > GetSporkValue(SPORK_16_ZEROCOIN_MAINTENANCE_MODE) && !IsInitialBlockDownload() && tx.ContainsZerocoins()) return state.DoS(100, error("ConnectBlock() : zerocoin transactions are currently in maintenance mode")); if (tx.IsZerocoinSpend()) { int nHeightTx = 0; if (IsTransactionInChain(tx.GetHash(), nHeightTx)) { //when verifying blocks on init, the blocks are scanned without being disconnected - prevent that from causing an error if (!fVerifyingBlocks || (fVerifyingBlocks && pindex->nHeight > nHeightTx)) return state.DoS(100, error("%s : txid %s already exists in block %d , trying to include it again in block %d", __func__, tx.GetHash().GetHex(), nHeightTx, pindex->nHeight), REJECT_INVALID, "bad-txns-inputs-missingorspent"); } //Check for double spending of serial #'s for (const CTxIn& txIn : tx.vin) { if (!txIn.scriptSig.IsZerocoinSpend()) continue; CoinSpend spend = TxInToZerocoinSpend(txIn); nValueIn += spend.getDenomination() * COIN; // Make sure that the serial number is in valid range if (!spend.HasValidSerial(Params().Zerocoin_Params())) { string strError = strprintf("%s : txid=%s in block %d contains invalid serial %s\n", __func__, tx.GetHash().GetHex(), pindex->nHeight, spend.getCoinSerialNumber()); if (pindex->nHeight >= Params().Zerocoin_Block_EnforceSerialRange()) return state.DoS(100, error(strError.c_str())); strError = "NOT ENFORCING : " + strError; LogPrintf(strError.c_str()); } //Is the serial already in the blockchain? uint256 hashTxFromDB; int nHeightTxSpend = 0; if (zerocoinDB->ReadCoinSpend(spend.getCoinSerialNumber(), hashTxFromDB)) { if(IsSerialInBlockchain(spend.getCoinSerialNumber(), nHeightTxSpend)) { if(!fVerifyingBlocks || (fVerifyingBlocks && pindex->nHeight > nHeightTxSpend)) return state.DoS(100, error("%s : zBcl with serial %s is already in the block %d\n", __func__, spend.getCoinSerialNumber().GetHex(), nHeightTxSpend)); } } //record spend to database if (!zerocoinDB->WriteCoinSpend(spend.getCoinSerialNumber(), tx.GetHash())) return error("%s : failed to record coin serial to database"); } } else if (!tx.IsCoinBase()) { if (!view.HaveInputs(tx)) return state.DoS(100, error("ConnectBlock() : inputs missing/spent"), REJECT_INVALID, "bad-txns-inputs-missingorspent"); if (fStrictPayToScriptHash) { // Add in sigops done by pay-to-script-hash inputs; // this is to prevent a "rogue miner" from creating // an incredibly-expensive-to-validate block. nSigOps += GetP2SHSigOpCount(tx, view); if (nSigOps > nMaxBlockSigOps) return state.DoS(100, error("ConnectBlock() : too many sigops"), REJECT_INVALID, "bad-blk-sigops"); } if (!tx.IsCoinStake()) nFees += view.GetValueIn(tx) - tx.GetValueOut(); nValueIn += view.GetValueIn(tx); std::vector<CScriptCheck> vChecks; if (!CheckInputs(tx, state, view, fScriptChecks, flags, false, nScriptCheckThreads ? &vChecks : NULL)) return false; control.Add(vChecks); } nValueOut += tx.GetValueOut(); CTxUndo undoDummy; if (i > 0) { blockundo.vtxundo.push_back(CTxUndo()); } UpdateCoins(tx, state, view, i == 0 ? undoDummy : blockundo.vtxundo.back(), pindex->nHeight); vPos.push_back(std::make_pair(tx.GetHash(), pos)); pos.nTxOffset += ::GetSerializeSize(tx, SER_DISK, CLIENT_VERSION); } std::list<CZerocoinMint> listMints; BlockToZerocoinMintList(block, listMints); std::list<libzerocoin::CoinDenomination> listSpends = ZerocoinSpendListFromBlock(block); if (!fVerifyingBlocks && pindex->nHeight == Params().Zerocoin_StartHeight() + 1) { RecalculateZBCLMinted(); RecalculateZBCLSpent(); RecalculateBCLSupply(1); } // Initialize zerocoin supply to the supply from previous block if (pindex->pprev && pindex->pprev->GetBlockHeader().nVersion > 3) { for (auto& denom : zerocoinDenomList) { pindex->mapZerocoinSupply.at(denom) = pindex->pprev->mapZerocoinSupply.at(denom); } } // Track zerocoin money supply CAmount nAmountZerocoinSpent = 0; pindex->vMintDenominationsInBlock.clear(); if (pindex->pprev) { for (auto& m : listMints) { libzerocoin::CoinDenomination denom = m.GetDenomination(); pindex->vMintDenominationsInBlock.push_back(m.GetDenomination()); pindex->mapZerocoinSupply.at(denom)++; } for (auto& denom : listSpends) { pindex->mapZerocoinSupply.at(denom)--; nAmountZerocoinSpent += libzerocoin::ZerocoinDenominationToAmount(denom); // zerocoin failsafe if (pindex->mapZerocoinSupply.at(denom) < 0) return state.DoS(100, error("Block contains zerocoins that spend more than are in the available supply to spend")); } } for (auto& denom : zerocoinDenomList) { LogPrint("zero" "%s coins for denomination %d pubcoin %s\n", __func__, pindex->mapZerocoinSupply.at(denom), denom); } // track money supply and mint amount info CAmount nMoneySupplyPrev = pindex->pprev ? pindex->pprev->nMoneySupply : 0; pindex->nMoneySupply = nMoneySupplyPrev + nValueOut - nValueIn; pindex->nMint = pindex->nMoneySupply - nMoneySupplyPrev + nFees; // LogPrintf("XX69----------> ConnectBlock(): nValueOut: %s, nValueIn: %s, nFees: %s, nMint: %s zBclSpent: %s\n", // FormatMoney(nValueOut), FormatMoney(nValueIn), // FormatMoney(nFees), FormatMoney(pindex->nMint), FormatMoney(nAmountZerocoinSpent)); if (!pblocktree->WriteBlockIndex(CDiskBlockIndex(pindex))) return error("Connect() : WriteBlockIndex for pindex failed"); int64_t nTime1 = GetTimeMicros(); nTimeConnect += nTime1 - nTimeStart; LogPrint("bench", " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs]\n", (unsigned)block.vtx.size(), 0.001 * (nTime1 - nTimeStart), 0.001 * (nTime1 - nTimeStart) / block.vtx.size(), nInputs <= 1 ? 0 : 0.001 * (nTime1 - nTimeStart) / (nInputs - 1), nTimeConnect * 0.000001); //PoW phase redistributed fees to miner. PoS stage destroys fees. CAmount nExpectedMint = GetBlockValue(pindex->pprev->nHeight); if (block.IsProofOfWork()) nExpectedMint += nFees; if (!IsBlockValueValid(block, nExpectedMint, pindex->nMint)) { return state.DoS(100, error("ConnectBlock() : reward pays too much (actual=%s vs limit=%s)", FormatMoney(pindex->nMint), FormatMoney(nExpectedMint)), REJECT_INVALID, "bad-cb-amount"); } // zerocoin accumulator: if a new accumulator checkpoint was generated, check that it is the correct value if (!fVerifyingBlocks && pindex->nHeight >= Params().Zerocoin_StartHeight() && pindex->nHeight % 10 == 0) { uint256 nCheckpointCalculated = 0; if (!CalculateAccumulatorCheckpoint(pindex->nHeight, nCheckpointCalculated)) return state.DoS(100, error("ConnectBlock() : failed to calculate accumulator checkpoint")); if (nCheckpointCalculated != block.nAccumulatorCheckpoint) { LogPrintf("%s: block=%d calculated: %s\n block: %s\n", __func__, pindex->nHeight, nCheckpointCalculated.GetHex(), block.nAccumulatorCheckpoint.GetHex()); return state.DoS(100, error("ConnectBlock() : accumulator does not match calculated value")); } } else if (!fVerifyingBlocks) { if (block.nAccumulatorCheckpoint != pindex->pprev->nAccumulatorCheckpoint) { return state.DoS(100, error("ConnectBlock() : new accumulator checkpoint generated on a block that is not multiple of 10")); } } if (!control.Wait()) return state.DoS(100, false); int64_t nTime2 = GetTimeMicros(); nTimeVerify += nTime2 - nTimeStart; LogPrint("bench", " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs]\n", nInputs - 1, 0.001 * (nTime2 - nTimeStart), nInputs <= 1 ? 0 : 0.001 * (nTime2 - nTimeStart) / (nInputs - 1), nTimeVerify * 0.000001); if (fJustCheck) return true; // Write undo information to disk if (pindex->GetUndoPos().IsNull() || !pindex->IsValid(BLOCK_VALID_SCRIPTS)) { if (pindex->GetUndoPos().IsNull()) { CDiskBlockPos pos; if (!FindUndoPos(state, pindex->nFile, pos, ::GetSerializeSize(blockundo, SER_DISK, CLIENT_VERSION) + 40)) return error("ConnectBlock() : FindUndoPos failed"); if (!blockundo.WriteToDisk(pos, pindex->pprev->GetBlockHash())) return state.Abort("Failed to write undo data"); // update nUndoPos in block index pindex->nUndoPos = pos.nPos; pindex->nStatus |= BLOCK_HAVE_UNDO; } pindex->RaiseValidity(BLOCK_VALID_SCRIPTS); setDirtyBlockIndex.insert(pindex); } if (fTxIndex) if (!pblocktree->WriteTxIndex(vPos)) return state.Abort("Failed to write transaction index"); // add this block to the view's block chain view.SetBestBlock(pindex->GetBlockHash()); int64_t nTime3 = GetTimeMicros(); nTimeIndex += nTime3 - nTime2; LogPrint("bench", " - Index writing: %.2fms [%.2fs]\n", 0.001 * (nTime3 - nTime2), nTimeIndex * 0.000001); // Watch for changes to the previous coinbase transaction. static uint256 hashPrevBestCoinBase; g_signals.UpdatedTransaction(hashPrevBestCoinBase); hashPrevBestCoinBase = block.vtx[0].GetHash(); int64_t nTime4 = GetTimeMicros(); nTimeCallbacks += nTime4 - nTime3; LogPrint("bench", " - Callbacks: %.2fms [%.2fs]\n", 0.001 * (nTime4 - nTime3), nTimeCallbacks * 0.000001); return true; } enum FlushStateMode { FLUSH_STATE_IF_NEEDED, FLUSH_STATE_PERIODIC, FLUSH_STATE_ALWAYS }; /** * Update the on-disk chain state. * The caches and indexes are flushed if either they're too large, forceWrite is set, or * fast is not set and it's been a while since the last write. */ bool static FlushStateToDisk(CValidationState& state, FlushStateMode mode) { LOCK(cs_main); static int64_t nLastWrite = 0; try { if ((mode == FLUSH_STATE_ALWAYS) || ((mode == FLUSH_STATE_PERIODIC || mode == FLUSH_STATE_IF_NEEDED) && pcoinsTip->GetCacheSize() > nCoinCacheSize) || (mode == FLUSH_STATE_PERIODIC && GetTimeMicros() > nLastWrite + DATABASE_WRITE_INTERVAL * 1000000)) { // Typical CCoins structures on disk are around 100 bytes in size. // Pushing a new one to the database can cause it to be written // twice (once in the log, and once in the tables). This is already // an overestimation, as most will delete an existing entry or // overwrite one. Still, use a conservative safety factor of 2. if (!CheckDiskSpace(100 * 2 * 2 * pcoinsTip->GetCacheSize())) return state.Error("out of disk space"); // First make sure all block and undo data is flushed to disk. FlushBlockFile(); // Then update all block file information (which may refer to block and undo files). bool fileschanged = false; for (set<int>::iterator it = setDirtyFileInfo.begin(); it != setDirtyFileInfo.end();) { if (!pblocktree->WriteBlockFileInfo(*it, vinfoBlockFile[*it])) { return state.Abort("Failed to write to block index"); } fileschanged = true; setDirtyFileInfo.erase(it++); } if (fileschanged && !pblocktree->WriteLastBlockFile(nLastBlockFile)) { return state.Abort("Failed to write to block index"); } for (set<CBlockIndex*>::iterator it = setDirtyBlockIndex.begin(); it != setDirtyBlockIndex.end();) { if (!pblocktree->WriteBlockIndex(CDiskBlockIndex(*it))) { return state.Abort("Failed to write to block index"); } setDirtyBlockIndex.erase(it++); } pblocktree->Sync(); // Finally flush the chainstate (which may refer to block index entries). if (!pcoinsTip->Flush()) return state.Abort("Failed to write to coin database"); // Update best block in wallet (so we can detect restored wallets). if (mode != FLUSH_STATE_IF_NEEDED) { g_signals.SetBestChain(chainActive.GetLocator()); } nLastWrite = GetTimeMicros(); } } catch (const std::runtime_error& e) { return state.Abort(std::string("System error while flushing: ") + e.what()); } return true; } void FlushStateToDisk() { CValidationState state; FlushStateToDisk(state, FLUSH_STATE_ALWAYS); } /** Update chainActive and related internal data structures. */ void static UpdateTip(CBlockIndex* pindexNew) { chainActive.SetTip(pindexNew); // If turned on AutoZeromint will automatically convert BCL to zBCL if (pwalletMain->isZeromintEnabled ()) pwalletMain->AutoZeromint (); // New best block nTimeBestReceived = GetTime(); mempool.AddTransactionsUpdated(1); LogPrintf("UpdateTip: new best=%s height=%d log2_work=%.8g tx=%lu date=%s progress=%f cache=%u\n", chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(), log(chainActive.Tip()->nChainWork.getdouble()) / log(2.0), (unsigned long)chainActive.Tip()->nChainTx, DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()), Checkpoints::GuessVerificationProgress(chainActive.Tip()), (unsigned int)pcoinsTip->GetCacheSize()); cvBlockChange.notify_all(); // Check the version of the last 100 blocks to see if we need to upgrade: static bool fWarned = false; if (!IsInitialBlockDownload() && !fWarned) { int nUpgraded = 0; const CBlockIndex* pindex = chainActive.Tip(); for (int i = 0; i < 100 && pindex != NULL; i++) { if (pindex->nVersion > CBlock::CURRENT_VERSION) ++nUpgraded; pindex = pindex->pprev; } if (nUpgraded > 0) LogPrintf("SetBestChain: %d of last 100 blocks above version %d\n", nUpgraded, (int)CBlock::CURRENT_VERSION); if (nUpgraded > 100 / 2) { // strMiscWarning is read by GetWarnings(), called by Qt and the JSON-RPC code to warn the user: strMiscWarning = _("Warning: This version is obsolete, upgrade required!"); CAlert::Notify(strMiscWarning, true); fWarned = true; } } } /** Disconnect chainActive's tip. */ bool static DisconnectTip(CValidationState& state) { CBlockIndex* pindexDelete = chainActive.Tip(); assert(pindexDelete); mempool.check(pcoinsTip); // Read block from disk. CBlock block; if (!ReadBlockFromDisk(block, pindexDelete)) return state.Abort("Failed to read block"); // Apply the block atomically to the chain state. int64_t nStart = GetTimeMicros(); { CCoinsViewCache view(pcoinsTip); if (!DisconnectBlock(block, state, pindexDelete, view)) return error("DisconnectTip() : DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString()); assert(view.Flush()); } LogPrint("bench", "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * 0.001); // Write the chain state to disk, if necessary. if (!FlushStateToDisk(state, FLUSH_STATE_ALWAYS)) return false; // Resurrect mempool transactions from the disconnected block. BOOST_FOREACH (const CTransaction& tx, block.vtx) { // ignore validation errors in resurrected transactions list<CTransaction> removed; CValidationState stateDummy; if (tx.IsCoinBase() || tx.IsCoinStake() || !AcceptToMemoryPool(mempool, stateDummy, tx, false, NULL)) mempool.remove(tx, removed, true); } mempool.removeCoinbaseSpends(pcoinsTip, pindexDelete->nHeight); mempool.check(pcoinsTip); // Update chainActive and related variables. UpdateTip(pindexDelete->pprev); // Let wallets know transactions went from 1-confirmed to // 0-confirmed or conflicted: BOOST_FOREACH (const CTransaction& tx, block.vtx) { SyncWithWallets(tx, NULL); } return true; } static int64_t nTimeReadFromDisk = 0; static int64_t nTimeConnectTotal = 0; static int64_t nTimeFlush = 0; static int64_t nTimeChainState = 0; static int64_t nTimePostConnect = 0; /** * Connect a new block to chainActive. pblock is either NULL or a pointer to a CBlock * corresponding to pindexNew, to bypass loading it again from disk. */ bool static ConnectTip(CValidationState& state, CBlockIndex* pindexNew, CBlock* pblock, bool fAlreadyChecked) { assert(pindexNew->pprev == chainActive.Tip()); mempool.check(pcoinsTip); CCoinsViewCache view(pcoinsTip); if (pblock == NULL) fAlreadyChecked = false; // Read block from disk. int64_t nTime1 = GetTimeMicros(); CBlock block; if (!pblock) { if (!ReadBlockFromDisk(block, pindexNew)) return state.Abort("Failed to read block"); pblock = &block; } // Apply the block atomically to the chain state. int64_t nTime2 = GetTimeMicros(); nTimeReadFromDisk += nTime2 - nTime1; int64_t nTime3; LogPrint("bench", " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * 0.001, nTimeReadFromDisk * 0.000001); { CInv inv(MSG_BLOCK, pindexNew->GetBlockHash()); bool rv = ConnectBlock(*pblock, state, pindexNew, view, false, fAlreadyChecked); g_signals.BlockChecked(*pblock, state); if (!rv) { if (state.IsInvalid()) InvalidBlockFound(pindexNew, state); return error("ConnectTip() : ConnectBlock %s failed", pindexNew->GetBlockHash().ToString()); } mapBlockSource.erase(inv.hash); nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2; LogPrint("bench", " - Connect total: %.2fms [%.2fs]\n", (nTime3 - nTime2) * 0.001, nTimeConnectTotal * 0.000001); assert(view.Flush()); } int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3; LogPrint("bench", " - Flush: %.2fms [%.2fs]\n", (nTime4 - nTime3) * 0.001, nTimeFlush * 0.000001); // Write the chain state to disk, if necessary. Always write to disk if this is the first of a new file. FlushStateMode flushMode = FLUSH_STATE_IF_NEEDED; if (pindexNew->pprev && (pindexNew->GetBlockPos().nFile != pindexNew->pprev->GetBlockPos().nFile)) flushMode = FLUSH_STATE_ALWAYS; if (!FlushStateToDisk(state, flushMode)) return false; int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4; LogPrint("bench", " - Writing chainstate: %.2fms [%.2fs]\n", (nTime5 - nTime4) * 0.001, nTimeChainState * 0.000001); // Remove conflicting transactions from the mempool. list<CTransaction> txConflicted; mempool.removeForBlock(pblock->vtx, pindexNew->nHeight, txConflicted); mempool.check(pcoinsTip); // Update chainActive & related variables. UpdateTip(pindexNew); // Tell wallet about transactions that went from mempool // to conflicted: BOOST_FOREACH (const CTransaction& tx, txConflicted) { SyncWithWallets(tx, NULL); } // ... and about transactions that got confirmed: BOOST_FOREACH (const CTransaction& tx, pblock->vtx) { SyncWithWallets(tx, pblock); } int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1; LogPrint("bench", " - Connect postprocess: %.2fms [%.2fs]\n", (nTime6 - nTime5) * 0.001, nTimePostConnect * 0.000001); LogPrint("bench", "- Connect block: %.2fms [%.2fs]\n", (nTime6 - nTime1) * 0.001, nTimeTotal * 0.000001); return true; } bool DisconnectBlocksAndReprocess(int blocks) { LOCK(cs_main); CValidationState state; LogPrintf("DisconnectBlocksAndReprocess: Got command to replay %d blocks\n", blocks); for (int i = 0; i <= blocks; i++) DisconnectTip(state); return true; } /* DisconnectBlockAndInputs Remove conflicting blocks for successful SwiftX transaction locks This should be very rare (Probably will never happen) */ // ***TODO*** clean up here bool DisconnectBlockAndInputs(CValidationState& state, CTransaction txLock) { // All modifications to the coin state will be done in this cache. // Only when all have succeeded, we push it to pcoinsTip. // CCoinsViewCache view(*pcoinsTip, true); CBlockIndex* BlockReading = chainActive.Tip(); CBlockIndex* pindexNew = NULL; bool foundConflictingTx = false; //remove anything conflicting in the memory pool list<CTransaction> txConflicted; mempool.removeConflicts(txLock, txConflicted); // List of what to disconnect (typically nothing) vector<CBlockIndex*> vDisconnect; for (unsigned int i = 1; BlockReading && BlockReading->nHeight > 0 && !foundConflictingTx && i < 6; i++) { vDisconnect.push_back(BlockReading); pindexNew = BlockReading->pprev; //new best block CBlock block; if (!ReadBlockFromDisk(block, BlockReading)) return state.Abort(_("Failed to read block")); // Queue memory transactions to resurrect. // We only do this for blocks after the last checkpoint (reorganisation before that // point should only happen with -reindex/-loadblock, or a misbehaving peer. BOOST_FOREACH (const CTransaction& tx, block.vtx) { if (!tx.IsCoinBase()) { BOOST_FOREACH (const CTxIn& in1, txLock.vin) { BOOST_FOREACH (const CTxIn& in2, tx.vin) { if (in1.prevout == in2.prevout) foundConflictingTx = true; } } } } if (BlockReading->pprev == NULL) { assert(BlockReading); break; } BlockReading = BlockReading->pprev; } if (!foundConflictingTx) { LogPrintf("DisconnectBlockAndInputs: Can't find a conflicting transaction to inputs\n"); return false; } if (vDisconnect.size() > 0) { LogPrintf("REORGANIZE: Disconnect Conflicting Blocks %lli blocks; %s..\n", vDisconnect.size(), pindexNew->GetBlockHash().ToString()); BOOST_FOREACH (CBlockIndex* pindex, vDisconnect) { LogPrintf(" -- disconnect %s\n", pindex->GetBlockHash().ToString()); DisconnectTip(state); } } return true; } /** * Return the tip of the chain with the most work in it, that isn't * known to be invalid (it's however far from certain to be valid). */ static CBlockIndex* FindMostWorkChain() { do { CBlockIndex* pindexNew = NULL; // Find the best candidate header. { std::set<CBlockIndex*, CBlockIndexWorkComparator>::reverse_iterator it = setBlockIndexCandidates.rbegin(); if (it == setBlockIndexCandidates.rend()) return NULL; pindexNew = *it; } // Check whether all blocks on the path between the currently active chain and the candidate are valid. // Just going until the active chain is an optimization, as we know all blocks in it are valid already. CBlockIndex* pindexTest = pindexNew; bool fInvalidAncestor = false; while (pindexTest && !chainActive.Contains(pindexTest)) { assert(pindexTest->nChainTx || pindexTest->nHeight == 0); // Pruned nodes may have entries in setBlockIndexCandidates for // which block files have been deleted. Remove those as candidates // for the most work chain if we come across them; we can't switch // to a chain unless we have all the non-active-chain parent blocks. bool fFailedChain = pindexTest->nStatus & BLOCK_FAILED_MASK; bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA); if (fFailedChain || fMissingData) { // Candidate chain is not usable (either invalid or missing data) if (fFailedChain && (pindexBestInvalid == NULL || pindexNew->nChainWork > pindexBestInvalid->nChainWork)) pindexBestInvalid = pindexNew; CBlockIndex* pindexFailed = pindexNew; // Remove the entire chain from the set. while (pindexTest != pindexFailed) { if (fFailedChain) { pindexFailed->nStatus |= BLOCK_FAILED_CHILD; } else if (fMissingData) { // If we're missing data, then add back to mapBlocksUnlinked, // so that if the block arrives in the future we can try adding // to setBlockIndexCandidates again. mapBlocksUnlinked.insert(std::make_pair(pindexFailed->pprev, pindexFailed)); } setBlockIndexCandidates.erase(pindexFailed); pindexFailed = pindexFailed->pprev; } setBlockIndexCandidates.erase(pindexTest); fInvalidAncestor = true; break; } pindexTest = pindexTest->pprev; } if (!fInvalidAncestor) return pindexNew; } while (true); } /** Delete all entries in setBlockIndexCandidates that are worse than the current tip. */ static void PruneBlockIndexCandidates() { // Note that we can't delete the current block itself, as we may need to return to it later in case a // reorganization to a better block fails. std::set<CBlockIndex*, CBlockIndexWorkComparator>::iterator it = setBlockIndexCandidates.begin(); while (it != setBlockIndexCandidates.end() && setBlockIndexCandidates.value_comp()(*it, chainActive.Tip())) { setBlockIndexCandidates.erase(it++); } // Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates. assert(!setBlockIndexCandidates.empty()); } /** * Try to make some progress towards making pindexMostWork the active block. * pblock is either NULL or a pointer to a CBlock corresponding to pindexMostWork. */ static bool ActivateBestChainStep(CValidationState& state, CBlockIndex* pindexMostWork, CBlock* pblock, bool fAlreadyChecked) { AssertLockHeld(cs_main); if (pblock == NULL) fAlreadyChecked = false; bool fInvalidFound = false; const CBlockIndex* pindexOldTip = chainActive.Tip(); const CBlockIndex* pindexFork = chainActive.FindFork(pindexMostWork); // Disconnect active blocks which are no longer in the best chain. while (chainActive.Tip() && chainActive.Tip() != pindexFork) { if (!DisconnectTip(state)) return false; } // Build list of new blocks to connect. std::vector<CBlockIndex*> vpindexToConnect; bool fContinue = true; int nHeight = pindexFork ? pindexFork->nHeight : -1; while (fContinue && nHeight != pindexMostWork->nHeight) { // Don't iterate the entire list of potential improvements toward the best tip, as we likely only need // a few blocks along the way. int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight); vpindexToConnect.clear(); vpindexToConnect.reserve(nTargetHeight - nHeight); CBlockIndex* pindexIter = pindexMostWork->GetAncestor(nTargetHeight); while (pindexIter && pindexIter->nHeight != nHeight) { vpindexToConnect.push_back(pindexIter); pindexIter = pindexIter->pprev; } nHeight = nTargetHeight; // Connect new blocks. BOOST_REVERSE_FOREACH (CBlockIndex* pindexConnect, vpindexToConnect) { if (!ConnectTip(state, pindexConnect, pindexConnect == pindexMostWork ? pblock : NULL, fAlreadyChecked)) { if (state.IsInvalid()) { // The block violates a consensus rule. if (!state.CorruptionPossible()) InvalidChainFound(vpindexToConnect.back()); state = CValidationState(); fInvalidFound = true; fContinue = false; break; } else { // A system error occurred (disk space, database error, ...). return false; } } else { PruneBlockIndexCandidates(); if (!pindexOldTip || chainActive.Tip()->nChainWork > pindexOldTip->nChainWork) { // We're in a better position than we were. Return temporarily to release the lock. fContinue = false; break; } } } } // Callbacks/notifications for a new best chain. if (fInvalidFound) CheckForkWarningConditionsOnNewFork(vpindexToConnect.back()); else CheckForkWarningConditions(); return true; } /** * Make the best chain active, in multiple steps. The result is either failure * or an activated best chain. pblock is either NULL or a pointer to a block * that is already loaded (to avoid loading it again from disk). */ bool ActivateBestChain(CValidationState& state, CBlock* pblock, bool fAlreadyChecked) { CBlockIndex* pindexNewTip = NULL; CBlockIndex* pindexMostWork = NULL; do { boost::this_thread::interruption_point(); bool fInitialDownload; while (true) { TRY_LOCK(cs_main, lockMain); if (!lockMain) { MilliSleep(50); continue; } pindexMostWork = FindMostWorkChain(); // Whether we have anything to do at all. if (pindexMostWork == NULL || pindexMostWork == chainActive.Tip()) return true; if (!ActivateBestChainStep(state, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : NULL, fAlreadyChecked)) return false; pindexNewTip = chainActive.Tip(); fInitialDownload = IsInitialBlockDownload(); break; } // When we reach this point, we switched to a new tip (stored in pindexNewTip). // Notifications/callbacks that can run without cs_main if (!fInitialDownload) { uint256 hashNewTip = pindexNewTip->GetBlockHash(); // Relay inventory, but don't relay old inventory during initial block download. int nBlockEstimate = Checkpoints::GetTotalBlocksEstimate(); { LOCK(cs_vNodes); BOOST_FOREACH (CNode* pnode, vNodes) if (chainActive.Height() > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : nBlockEstimate)) pnode->PushInventory(CInv(MSG_BLOCK, hashNewTip)); } // Notify external listeners about the new tip. uiInterface.NotifyBlockTip(hashNewTip); } } while (pindexMostWork != chainActive.Tip()); CheckBlockIndex(); // Write changes periodically to disk, after relay. if (!FlushStateToDisk(state, FLUSH_STATE_PERIODIC)) { return false; } return true; } bool InvalidateBlock(CValidationState& state, CBlockIndex* pindex) { AssertLockHeld(cs_main); // Mark the block itself as invalid. pindex->nStatus |= BLOCK_FAILED_VALID; setDirtyBlockIndex.insert(pindex); setBlockIndexCandidates.erase(pindex); while (chainActive.Contains(pindex)) { CBlockIndex* pindexWalk = chainActive.Tip(); pindexWalk->nStatus |= BLOCK_FAILED_CHILD; setDirtyBlockIndex.insert(pindexWalk); setBlockIndexCandidates.erase(pindexWalk); // ActivateBestChain considers blocks already in chainActive // unconditionally valid already, so force disconnect away from it. if (!DisconnectTip(state)) { return false; } } // The resulting new best tip may not be in setBlockIndexCandidates anymore, so // add them again. BlockMap::iterator it = mapBlockIndex.begin(); while (it != mapBlockIndex.end()) { if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->nChainTx && !setBlockIndexCandidates.value_comp()(it->second, chainActive.Tip())) { setBlockIndexCandidates.insert(it->second); } it++; } InvalidChainFound(pindex); return true; } bool ReconsiderBlock(CValidationState& state, CBlockIndex* pindex) { AssertLockHeld(cs_main); int nHeight = pindex->nHeight; // Remove the invalidity flag from this block and all its descendants. BlockMap::iterator it = mapBlockIndex.begin(); while (it != mapBlockIndex.end()) { if (!it->second->IsValid() && it->second->GetAncestor(nHeight) == pindex) { it->second->nStatus &= ~BLOCK_FAILED_MASK; setDirtyBlockIndex.insert(it->second); if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->nChainTx && setBlockIndexCandidates.value_comp()(chainActive.Tip(), it->second)) { setBlockIndexCandidates.insert(it->second); } if (it->second == pindexBestInvalid) { // Reset invalid block marker if it was pointing to one of those. pindexBestInvalid = NULL; } } it++; } // Remove the invalidity flag from all ancestors too. while (pindex != NULL) { if (pindex->nStatus & BLOCK_FAILED_MASK) { pindex->nStatus &= ~BLOCK_FAILED_MASK; setDirtyBlockIndex.insert(pindex); } pindex = pindex->pprev; } return true; } CBlockIndex* AddToBlockIndex(const CBlock& block) { // Check for duplicate uint256 hash = block.GetHash(); BlockMap::iterator it = mapBlockIndex.find(hash); if (it != mapBlockIndex.end()) return it->second; // Construct new block index object CBlockIndex* pindexNew = new CBlockIndex(block); assert(pindexNew); // We assign the sequence id to blocks only when the full data is available, // to avoid miners withholding blocks but broadcasting headers, to get a // competitive advantage. pindexNew->nSequenceId = 0; BlockMap::iterator mi = mapBlockIndex.insert(make_pair(hash, pindexNew)).first; //mark as PoS seen if (pindexNew->IsProofOfStake()) setStakeSeen.insert(make_pair(pindexNew->prevoutStake, pindexNew->nStakeTime)); pindexNew->phashBlock = &((*mi).first); BlockMap::iterator miPrev = mapBlockIndex.find(block.hashPrevBlock); if (miPrev != mapBlockIndex.end()) { pindexNew->pprev = (*miPrev).second; pindexNew->nHeight = pindexNew->pprev->nHeight + 1; pindexNew->BuildSkip(); //update previous block pointer pindexNew->pprev->pnext = pindexNew; // ppcoin: compute chain trust score pindexNew->bnChainTrust = (pindexNew->pprev ? pindexNew->pprev->bnChainTrust : 0) + pindexNew->GetBlockTrust(); // ppcoin: compute stake entropy bit for stake modifier if (!pindexNew->SetStakeEntropyBit(pindexNew->GetStakeEntropyBit())) LogPrintf("AddToBlockIndex() : SetStakeEntropyBit() failed \n"); // ppcoin: record proof-of-stake hash value if (pindexNew->IsProofOfStake()) { if (!mapProofOfStake.count(hash)) LogPrintf("AddToBlockIndex() : hashProofOfStake not found in map \n"); pindexNew->hashProofOfStake = mapProofOfStake[hash]; } // ppcoin: compute stake modifier uint64_t nStakeModifier = 0; bool fGeneratedStakeModifier = false; if (!ComputeNextStakeModifier(pindexNew->pprev, nStakeModifier, fGeneratedStakeModifier)) LogPrintf("AddToBlockIndex() : ComputeNextStakeModifier() failed \n"); pindexNew->SetStakeModifier(nStakeModifier, fGeneratedStakeModifier); pindexNew->nStakeModifierChecksum = GetStakeModifierChecksum(pindexNew); if (!CheckStakeModifierCheckpoints(pindexNew->nHeight, pindexNew->nStakeModifierChecksum)) LogPrintf("AddToBlockIndex() : Rejected by stake modifier checkpoint height=%d, modifier=%s \n", pindexNew->nHeight, boost::lexical_cast<std::string>(nStakeModifier)); } pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew); pindexNew->RaiseValidity(BLOCK_VALID_TREE); if (pindexBestHeader == NULL || pindexBestHeader->nChainWork < pindexNew->nChainWork) pindexBestHeader = pindexNew; //update previous block pointer if (pindexNew->nHeight) pindexNew->pprev->pnext = pindexNew; setDirtyBlockIndex.insert(pindexNew); return pindexNew; } /** Mark a block as having its data received and checked (up to BLOCK_VALID_TRANSACTIONS). */ bool ReceivedBlockTransactions(const CBlock& block, CValidationState& state, CBlockIndex* pindexNew, const CDiskBlockPos& pos) { if (block.IsProofOfStake()) pindexNew->SetProofOfStake(); pindexNew->nTx = block.vtx.size(); pindexNew->nChainTx = 0; pindexNew->nFile = pos.nFile; pindexNew->nDataPos = pos.nPos; pindexNew->nUndoPos = 0; pindexNew->nStatus |= BLOCK_HAVE_DATA; pindexNew->RaiseValidity(BLOCK_VALID_TRANSACTIONS); setDirtyBlockIndex.insert(pindexNew); if (pindexNew->pprev == NULL || pindexNew->pprev->nChainTx) { // If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS. deque<CBlockIndex*> queue; queue.push_back(pindexNew); // Recursively process any descendant blocks that now may be eligible to be connected. while (!queue.empty()) { CBlockIndex* pindex = queue.front(); queue.pop_front(); pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx; { LOCK(cs_nBlockSequenceId); pindex->nSequenceId = nBlockSequenceId++; } if (chainActive.Tip() == NULL || !setBlockIndexCandidates.value_comp()(pindex, chainActive.Tip())) { setBlockIndexCandidates.insert(pindex); } std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = mapBlocksUnlinked.equal_range(pindex); while (range.first != range.second) { std::multimap<CBlockIndex*, CBlockIndex*>::iterator it = range.first; queue.push_back(it->second); range.first++; mapBlocksUnlinked.erase(it); } } } else { if (pindexNew->pprev && pindexNew->pprev->IsValid(BLOCK_VALID_TREE)) { mapBlocksUnlinked.insert(std::make_pair(pindexNew->pprev, pindexNew)); } } return true; } bool FindBlockPos(CValidationState& state, CDiskBlockPos& pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown = false) { LOCK(cs_LastBlockFile); unsigned int nFile = fKnown ? pos.nFile : nLastBlockFile; if (vinfoBlockFile.size() <= nFile) { vinfoBlockFile.resize(nFile + 1); } if (!fKnown) { while (vinfoBlockFile[nFile].nSize + nAddSize >= MAX_BLOCKFILE_SIZE) { LogPrintf("Leaving block file %i: %s\n", nFile, vinfoBlockFile[nFile].ToString()); FlushBlockFile(true); nFile++; if (vinfoBlockFile.size() <= nFile) { vinfoBlockFile.resize(nFile + 1); } } pos.nFile = nFile; pos.nPos = vinfoBlockFile[nFile].nSize; } nLastBlockFile = nFile; vinfoBlockFile[nFile].AddBlock(nHeight, nTime); if (fKnown) vinfoBlockFile[nFile].nSize = std::max(pos.nPos + nAddSize, vinfoBlockFile[nFile].nSize); else vinfoBlockFile[nFile].nSize += nAddSize; if (!fKnown) { unsigned int nOldChunks = (pos.nPos + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE; unsigned int nNewChunks = (vinfoBlockFile[nFile].nSize + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE; if (nNewChunks > nOldChunks) { if (CheckDiskSpace(nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos)) { FILE* file = OpenBlockFile(pos); if (file) { LogPrintf("Pre-allocating up to position 0x%x in blk%05u.dat\n", nNewChunks * BLOCKFILE_CHUNK_SIZE, pos.nFile); AllocateFileRange(file, pos.nPos, nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos); fclose(file); } } else return state.Error("out of disk space"); } } setDirtyFileInfo.insert(nFile); return true; } bool FindUndoPos(CValidationState& state, int nFile, CDiskBlockPos& pos, unsigned int nAddSize) { pos.nFile = nFile; LOCK(cs_LastBlockFile); unsigned int nNewSize; pos.nPos = vinfoBlockFile[nFile].nUndoSize; nNewSize = vinfoBlockFile[nFile].nUndoSize += nAddSize; setDirtyFileInfo.insert(nFile); unsigned int nOldChunks = (pos.nPos + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE; unsigned int nNewChunks = (nNewSize + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE; if (nNewChunks > nOldChunks) { if (CheckDiskSpace(nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos)) { FILE* file = OpenUndoFile(pos); if (file) { LogPrintf("Pre-allocating up to position 0x%x in rev%05u.dat\n", nNewChunks * UNDOFILE_CHUNK_SIZE, pos.nFile); AllocateFileRange(file, pos.nPos, nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos); fclose(file); } } else return state.Error("out of disk space"); } return true; } bool CheckBlockHeader(const CBlockHeader& block, CValidationState& state, bool fCheckPOW) { // Check proof of work matches claimed amount if (fCheckPOW && !CheckProofOfWork(block.GetHash(), block.nBits)) return state.DoS(50, error("CheckBlockHeader() : proof of work failed"), REJECT_INVALID, "high-hash"); return true; } bool CheckBlock(const CBlock& block, CValidationState& state, bool fCheckPOW, bool fCheckMerkleRoot, bool fCheckSig) { // These are checks that are independent of context. // Check that the header is valid (particularly PoW). This is mostly // redundant with the call in AcceptBlockHeader. if (!CheckBlockHeader(block, state, fCheckPOW)) return state.DoS(100, error("CheckBlock() : CheckBlockHeader failed"), REJECT_INVALID, "bad-header", true); // Check timestamp LogPrint("debug", "%s: block=%s is proof of stake=%d\n", __func__, block.GetHash().ToString().c_str(), block.IsProofOfStake()); if (block.GetBlockTime() > GetAdjustedTime() + (block.IsProofOfStake() ? 180 : 7200)) // 3 minute future drift for PoS return state.Invalid(error("CheckBlock() : block timestamp too far in the future"), REJECT_INVALID, "time-too-new"); // Check the merkle root. if (fCheckMerkleRoot) { bool mutated; uint256 hashMerkleRoot2 = block.BuildMerkleTree(&mutated); if (block.hashMerkleRoot != hashMerkleRoot2) return state.DoS(100, error("CheckBlock() : hashMerkleRoot mismatch"), REJECT_INVALID, "bad-txnmrklroot", true); // Check for merkle tree malleability (CVE-2012-2459): repeating sequences // of transactions in a block without affecting the merkle root of a block, // while still invalidating it. if (mutated) return state.DoS(100, error("CheckBlock() : duplicate transaction"), REJECT_INVALID, "bad-txns-duplicate", true); } // All potential-corruption validation must be done before we do any // transaction validation, as otherwise we may mark the header as invalid // because we receive the wrong transactions for it. // Size limits unsigned int nMaxBlockSize = MAX_BLOCK_SIZE_CURRENT; if (block.vtx.empty() || block.vtx.size() > nMaxBlockSize || ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION) > nMaxBlockSize) return state.DoS(100, error("CheckBlock() : size limits failed"), REJECT_INVALID, "bad-blk-length"); // First transaction must be coinbase, the rest must not be if (block.vtx.empty() || !block.vtx[0].IsCoinBase()) return state.DoS(100, error("CheckBlock() : first tx is not coinbase"), REJECT_INVALID, "bad-cb-missing"); for (unsigned int i = 1; i < block.vtx.size(); i++) if (block.vtx[i].IsCoinBase()) return state.DoS(100, error("CheckBlock() : more than one coinbase"), REJECT_INVALID, "bad-cb-multiple"); if (block.IsProofOfStake()) { // Coinbase output should be empty if proof-of-stake block if (block.vtx[0].vout.size() != 1 || !block.vtx[0].vout[0].IsEmpty()) return state.DoS(100, error("CheckBlock() : coinbase output not empty for proof-of-stake block")); // Second transaction must be coinstake, the rest must not be if (block.vtx.empty() || !block.vtx[1].IsCoinStake()) return state.DoS(100, error("CheckBlock() : second tx is not coinstake")); for (unsigned int i = 2; i < block.vtx.size(); i++) if (block.vtx[i].IsCoinStake()) return state.DoS(100, error("CheckBlock() : more than one coinstake")); } // ----------- swiftTX transaction scanning ----------- if (IsSporkActive(SPORK_3_SWIFTTX_BLOCK_FILTERING)) { BOOST_FOREACH (const CTransaction& tx, block.vtx) { if (!tx.IsCoinBase()) { //only reject blocks when it's based on complete consensus BOOST_FOREACH (const CTxIn& in, tx.vin) { if (mapLockedInputs.count(in.prevout)) { if (mapLockedInputs[in.prevout] != tx.GetHash()) { mapRejectedBlocks.insert(make_pair(block.GetHash(), GetTime())); LogPrintf("CheckBlock() : found conflicting transaction with transaction lock %s %s\n", mapLockedInputs[in.prevout].ToString(), tx.GetHash().ToString()); return state.DoS(0, error("CheckBlock() : found conflicting transaction with transaction lock"), REJECT_INVALID, "conflicting-tx-ix"); } } } } } } else { LogPrintf("CheckBlock() : skipping transaction locking checks\n"); } // masternode payments / budgets and zerocoin check CBlockIndex* pindexPrev = chainActive.Tip(); int nHeight = 0; if (pindexPrev != NULL) { if (pindexPrev->GetBlockHash() == block.hashPrevBlock) { nHeight = pindexPrev->nHeight + 1; } else { //out of order BlockMap::iterator mi = mapBlockIndex.find(block.hashPrevBlock); if (mi != mapBlockIndex.end() && (*mi).second) nHeight = (*mi).second->nHeight + 1; } // Version 4 header must be used after Params().Zerocoin_StartHeight(). And never before. if (nHeight > Params().Zerocoin_StartHeight()) { if(block.nVersion < Params().Zerocoin_HeaderVersion()) return state.DoS(50, error("CheckBlockHeader() : block version must be above 4 after ZerocoinStartHeight"), REJECT_INVALID, "block-version"); } // Bitcoinlegend // It is entierly possible that we don't have enough data and this could fail // (i.e. the block could indeed be valid). Store the block for later consideration // but issue an initial reject message. // The case also exists that the sending peer could not have enough data to see // that this block is invalid, so don't issue an outright ban. if (nHeight != 0 && !IsInitialBlockDownload()) { if (!IsBlockPayeeValid(block, nHeight)) { mapRejectedBlocks.insert(make_pair(block.GetHash(), GetTime())); return state.DoS(0, error("CheckBlock() : Couldn't find masternode/budget payment"), REJECT_INVALID, "bad-cb-payee"); } } else { if (fDebug) LogPrintf("CheckBlock(): Masternode payment check skipped on sync - skipping IsBlockPayeeValid()\n"); } } // Check transactions bool fZerocoinActive = true; vector<CBigNum> vBlockSerials; for (const CTransaction& tx : block.vtx) { if (!CheckTransaction(tx, fZerocoinActive, chainActive.Height() + 1 >= Params().Zerocoin_StartHeight(), state)) return error("CheckBlock() : CheckTransaction failed"); // double check that there are no double spent zBcl spends in this block if (tx.IsZerocoinSpend()) { for (const CTxIn txIn : tx.vin) { if (txIn.scriptSig.IsZerocoinSpend()) { libzerocoin::CoinSpend spend = TxInToZerocoinSpend(txIn); if (count(vBlockSerials.begin(), vBlockSerials.end(), spend.getCoinSerialNumber())) return state.DoS(100, error("%s : Double spending of zBcl serial %s in block\n Block: %s", __func__, spend.getCoinSerialNumber().GetHex(), block.ToString())); vBlockSerials.emplace_back(spend.getCoinSerialNumber()); } } } } unsigned int nSigOps = 0; BOOST_FOREACH (const CTransaction& tx, block.vtx) { nSigOps += GetLegacySigOpCount(tx); } unsigned int nMaxBlockSigOps = fZerocoinActive ? MAX_BLOCK_SIGOPS_CURRENT : MAX_BLOCK_SIGOPS_LEGACY; if (nSigOps > nMaxBlockSigOps) return state.DoS(100, error("CheckBlock() : out-of-bounds SigOpCount"), REJECT_INVALID, "bad-blk-sigops", true); return true; } bool CheckWork(const CBlock block, CBlockIndex* const pindexPrev) { if (pindexPrev == NULL) return error("%s : null pindexPrev for block %s", __func__, block.GetHash().ToString().c_str()); unsigned int nBitsRequired = GetNextWorkRequired(pindexPrev, &block); if (block.IsProofOfWork() && (pindexPrev->nHeight + 1 <= 68589)) { double n1 = ConvertBitsToDouble(block.nBits); double n2 = ConvertBitsToDouble(nBitsRequired); if (abs(n1 - n2) > n1 * 0.5) return error("%s : incorrect proof of work (DGW pre-fork) - %f %f %f at %d", __func__, abs(n1 - n2), n1, n2, pindexPrev->nHeight + 1); return true; } if (block.nBits != nBitsRequired) return error("%s : incorrect proof of work at %d", __func__, pindexPrev->nHeight + 1); if (block.IsProofOfStake()) { uint256 hashProofOfStake; uint256 hash = block.GetHash(); if(!CheckProofOfStake(block, hashProofOfStake)) { LogPrintf("WARNING: ProcessBlock(): check proof-of-stake failed for block %s\n", hash.ToString().c_str()); return false; } if(!mapProofOfStake.count(hash)) // add to mapProofOfStake mapProofOfStake.insert(make_pair(hash, hashProofOfStake)); } return true; } bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, CBlockIndex* const pindexPrev) { uint256 hash = block.GetHash(); if (hash == Params().HashGenesisBlock()) return true; assert(pindexPrev); int nHeight = pindexPrev->nHeight + 1; //If this is a reorg, check that it is not too deep int nMaxReorgDepth = GetArg("-maxreorg", Params().MaxReorganizationDepth()); if (chainActive.Height() - nHeight >= nMaxReorgDepth) return state.DoS(1, error("%s: forked chain older than max reorganization depth (height %d)", __func__, nHeight)); // Check timestamp against prev if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast()) { LogPrintf("Block time = %d , GetMedianTimePast = %d \n", block.GetBlockTime(), pindexPrev->GetMedianTimePast()); return state.Invalid(error("%s : block's timestamp is too early", __func__), REJECT_INVALID, "time-too-old"); } // Check that the block chain matches the known block chain up to a checkpoint if (!Checkpoints::CheckBlock(nHeight, hash)) return state.DoS(100, error("%s : rejected by checkpoint lock-in at %d", __func__, nHeight), REJECT_CHECKPOINT, "checkpoint mismatch"); // Don't accept any forks from the main chain prior to last checkpoint CBlockIndex* pcheckpoint = Checkpoints::GetLastCheckpoint(); if (pcheckpoint && nHeight < pcheckpoint->nHeight) return state.DoS(0, error("%s : forked chain older than last checkpoint (height %d)", __func__, nHeight)); return true; } bool IsBlockHashInChain(const uint256& hashBlock) { if (hashBlock == 0 || !mapBlockIndex.count(hashBlock)) return false; return chainActive.Contains(mapBlockIndex[hashBlock]); } bool IsTransactionInChain(uint256 txId, int& nHeightTx) { uint256 hashBlock; CTransaction tx; GetTransaction(txId, tx, hashBlock, true); if (!IsBlockHashInChain(hashBlock)) return false; nHeightTx = mapBlockIndex.at(hashBlock)->nHeight; return true; } bool ContextualCheckBlock(const CBlock& block, CValidationState& state, CBlockIndex* const pindexPrev) { const int nHeight = pindexPrev == NULL ? 0 : pindexPrev->nHeight + 1; // Check that all transactions are finalized BOOST_FOREACH (const CTransaction& tx, block.vtx) if (!IsFinalTx(tx, nHeight, block.GetBlockTime())) { return state.DoS(10, error("%s : contains a non-final transaction", __func__), REJECT_INVALID, "bad-txns-nonfinal"); } return true; } bool AcceptBlockHeader(const CBlock& block, CValidationState& state, CBlockIndex** ppindex) { AssertLockHeld(cs_main); // Check for duplicate uint256 hash = block.GetHash(); BlockMap::iterator miSelf = mapBlockIndex.find(hash); CBlockIndex* pindex = NULL; // TODO : ENABLE BLOCK CACHE IN SPECIFIC CASES if (miSelf != mapBlockIndex.end()) { // Block header is already known. pindex = miSelf->second; if (ppindex) *ppindex = pindex; if (pindex->nStatus & BLOCK_FAILED_MASK) return state.Invalid(error("%s : block is marked invalid", __func__), 0, "duplicate"); return true; } if (!CheckBlockHeader(block, state, false)) { LogPrintf("AcceptBlockHeader(): CheckBlockHeader failed \n"); return false; } // Get prev block index CBlockIndex* pindexPrev = NULL; if (hash != Params().HashGenesisBlock()) { BlockMap::iterator mi = mapBlockIndex.find(block.hashPrevBlock); if (mi == mapBlockIndex.end()) return state.DoS(0, error("%s : prev block %s not found", __func__, block.hashPrevBlock.ToString().c_str()), 0, "bad-prevblk"); pindexPrev = (*mi).second; if (pindexPrev->nStatus & BLOCK_FAILED_MASK) return state.DoS(100, error("%s : prev block %s is invalid, unable to add block %s", __func__, block.hashPrevBlock.GetHex(), block.GetHash().GetHex()), REJECT_INVALID, "bad-prevblk"); } if (!ContextualCheckBlockHeader(block, state, pindexPrev)) return false; if (pindex == NULL) pindex = AddToBlockIndex(block); if (ppindex) *ppindex = pindex; return true; } bool AcceptBlock(CBlock& block, CValidationState& state, CBlockIndex** ppindex, CDiskBlockPos* dbp, bool fAlreadyCheckedBlock) { AssertLockHeld(cs_main); CBlockIndex*& pindex = *ppindex; // Get prev block index CBlockIndex* pindexPrev = NULL; if (block.GetHash() != Params().HashGenesisBlock()) { BlockMap::iterator mi = mapBlockIndex.find(block.hashPrevBlock); if (mi == mapBlockIndex.end()) return state.DoS(0, error("%s : prev block %s not found", __func__, block.hashPrevBlock.ToString().c_str()), 0, "bad-prevblk"); pindexPrev = (*mi).second; if (pindexPrev->nStatus & BLOCK_FAILED_MASK) return state.DoS(100, error("%s : prev block %s is invalid, unable to add block %s", __func__, block.hashPrevBlock.GetHex(), block.GetHash().GetHex()), REJECT_INVALID, "bad-prevblk"); } if (block.GetHash() != Params().HashGenesisBlock() && !CheckWork(block, pindexPrev)) return false; if (!AcceptBlockHeader(block, state, &pindex)) return false; if (pindex->nStatus & BLOCK_HAVE_DATA) { // TODO: deal better with duplicate blocks. // return state.DoS(20, error("AcceptBlock() : already have block %d %s", pindex->nHeight, pindex->GetBlockHash().ToString()), REJECT_DUPLICATE, "duplicate"); return true; } if ((!fAlreadyCheckedBlock && !CheckBlock(block, state)) || !ContextualCheckBlock(block, state, pindex->pprev)) { if (state.IsInvalid() && !state.CorruptionPossible()) { pindex->nStatus |= BLOCK_FAILED_VALID; setDirtyBlockIndex.insert(pindex); } return false; } int nHeight = pindex->nHeight; // Write block to history file try { unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION); CDiskBlockPos blockPos; if (dbp != NULL) blockPos = *dbp; if (!FindBlockPos(state, blockPos, nBlockSize + 8, nHeight, block.GetBlockTime(), dbp != NULL)) return error("AcceptBlock() : FindBlockPos failed"); if (dbp == NULL) if (!WriteBlockToDisk(block, blockPos)) return state.Abort("Failed to write block"); if (!ReceivedBlockTransactions(block, state, pindex, blockPos)) return error("AcceptBlock() : ReceivedBlockTransactions failed"); } catch (std::runtime_error& e) { return state.Abort(std::string("System error: ") + e.what()); } return true; } bool CBlockIndex::IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned int nRequired) { unsigned int nToCheck = Params().ToCheckBlockUpgradeMajority(); unsigned int nFound = 0; for (unsigned int i = 0; i < nToCheck && nFound < nRequired && pstart != NULL; i++) { if (pstart->nVersion >= minVersion) ++nFound; pstart = pstart->pprev; } return (nFound >= nRequired); } /** Turn the lowest '1' bit in the binary representation of a number into a '0'. */ int static inline InvertLowestOne(int n) { return n & (n - 1); } /** Compute what height to jump back to with the CBlockIndex::pskip pointer. */ int static inline GetSkipHeight(int height) { if (height < 2) return 0; // Determine which height to jump back to. Any number strictly lower than height is acceptable, // but the following expression seems to perform well in simulations (max 110 steps to go back // up to 2**18 blocks). return (height & 1) ? InvertLowestOne(InvertLowestOne(height - 1)) + 1 : InvertLowestOne(height); } CBlockIndex* CBlockIndex::GetAncestor(int height) { if (height > nHeight || height < 0) return NULL; CBlockIndex* pindexWalk = this; int heightWalk = nHeight; while (heightWalk > height) { int heightSkip = GetSkipHeight(heightWalk); int heightSkipPrev = GetSkipHeight(heightWalk - 1); if (heightSkip == height || (heightSkip > height && !(heightSkipPrev < heightSkip - 2 && heightSkipPrev >= height))) { // Only follow pskip if pprev->pskip isn't better than pskip->pprev. pindexWalk = pindexWalk->pskip; heightWalk = heightSkip; } else { pindexWalk = pindexWalk->pprev; heightWalk--; } } return pindexWalk; } const CBlockIndex* CBlockIndex::GetAncestor(int height) const { return const_cast<CBlockIndex*>(this)->GetAncestor(height); } void CBlockIndex::BuildSkip() { if (pprev) pskip = pprev->GetAncestor(GetSkipHeight(nHeight)); } bool ProcessNewBlock(CValidationState& state, CNode* pfrom, CBlock* pblock, CDiskBlockPos* dbp) { // Preliminary checks int64_t nStartTime = GetTimeMillis(); bool checked = CheckBlock(*pblock, state); int nMints = 0; int nSpends = 0; for (const CTransaction tx : pblock->vtx) { if (tx.ContainsZerocoins()) { for (const CTxIn in : tx.vin) { if (in.scriptSig.IsZerocoinSpend()) nSpends++; } for (const CTxOut out : tx.vout) { if (out.IsZerocoinMint()) nMints++; } } } if (nMints || nSpends) LogPrintf("%s : block contains %d zBcl mints and %d zBcl spends\n", __func__, nMints, nSpends); // ppcoin: check proof-of-stake // Limited duplicity on stake: prevents block flood attack // Duplicate stake allowed only when there is orphan child block //if (pblock->IsProofOfStake() && setStakeSeen.count(pblock->GetProofOfStake())/* && !mapOrphanBlocksByPrev.count(hash)*/) // return error("ProcessNewBlock() : duplicate proof-of-stake (%s, %d) for block %s", pblock->GetProofOfStake().first.ToString().c_str(), pblock->GetProofOfStake().second, pblock->GetHash().ToString().c_str()); // NovaCoin: check proof-of-stake block signature if (!pblock->CheckBlockSignature()) return error("ProcessNewBlock() : bad proof-of-stake block signature"); if (pblock->GetHash() != Params().HashGenesisBlock() && pfrom != NULL) { //if we get this far, check if the prev block is our prev block, if not then request sync and return false BlockMap::iterator mi = mapBlockIndex.find(pblock->hashPrevBlock); if (mi == mapBlockIndex.end()) { pfrom->PushMessage("getblocks", chainActive.GetLocator(), uint256(0)); return false; } } { LOCK(cs_main); // Replaces the former TRY_LOCK loop because busy waiting wastes too much resources MarkBlockAsReceived (pblock->GetHash ()); if (!checked) { return error ("%s : CheckBlock FAILED for block %s", __func__, pblock->GetHash().GetHex()); } // Store to disk CBlockIndex* pindex = NULL; bool ret = AcceptBlock (*pblock, state, &pindex, dbp, checked); if (pindex && pfrom) { mapBlockSource[pindex->GetBlockHash ()] = pfrom->GetId (); } CheckBlockIndex (); if (!ret) return error ("%s : AcceptBlock FAILED", __func__); } if (!ActivateBestChain(state, pblock, checked)) return error("%s : ActivateBestChain failed", __func__); if (!fLiteMode) { if (masternodeSync.RequestedMasternodeAssets > MASTERNODE_SYNC_LIST) { obfuScationPool.NewBlock(); masternodePayments.ProcessBlock(GetHeight() + 10); budget.NewBlock(); } } if (pwalletMain) { // If turned on MultiSend will send a transaction (or more) on the after maturity of a stake if (pwalletMain->isMultiSendEnabled()) pwalletMain->MultiSend(); // If turned on Auto Combine will scan wallet for dust to combine if (pwalletMain->fCombineDust) pwalletMain->AutoCombineDust(); } LogPrintf("%s : ACCEPTED in %ld milliseconds with size=%d\n", __func__, GetTimeMillis() - nStartTime, pblock->GetSerializeSize(SER_DISK, CLIENT_VERSION)); return true; } bool TestBlockValidity(CValidationState& state, const CBlock& block, CBlockIndex* const pindexPrev, bool fCheckPOW, bool fCheckMerkleRoot) { AssertLockHeld(cs_main); assert(pindexPrev == chainActive.Tip()); CCoinsViewCache viewNew(pcoinsTip); CBlockIndex indexDummy(block); indexDummy.pprev = pindexPrev; indexDummy.nHeight = pindexPrev->nHeight + 1; // NOTE: CheckBlockHeader is called by CheckBlock if (!ContextualCheckBlockHeader(block, state, pindexPrev)) return false; if (!CheckBlock(block, state, fCheckPOW, fCheckMerkleRoot)) return false; if (!ContextualCheckBlock(block, state, pindexPrev)) return false; if (!ConnectBlock(block, state, &indexDummy, viewNew, true)) return false; assert(state.IsValid()); return true; } bool AbortNode(const std::string& strMessage, const std::string& userMessage) { strMiscWarning = strMessage; LogPrintf("*** %s\n", strMessage); uiInterface.ThreadSafeMessageBox( userMessage.empty() ? _("Error: A fatal internal error occured, see debug.log for details") : userMessage, "", CClientUIInterface::MSG_ERROR); StartShutdown(); return false; } bool CheckDiskSpace(uint64_t nAdditionalBytes) { uint64_t nFreeBytesAvailable = filesystem::space(GetDataDir()).available; // Check for nMinDiskSpace bytes (currently 50MB) if (nFreeBytesAvailable < nMinDiskSpace + nAdditionalBytes) return AbortNode("Disk space is low!", _("Error: Disk space is low!")); return true; } FILE* OpenDiskFile(const CDiskBlockPos& pos, const char* prefix, bool fReadOnly) { if (pos.IsNull()) return NULL; boost::filesystem::path path = GetBlockPosFilename(pos, prefix); boost::filesystem::create_directories(path.parent_path()); FILE* file = fopen(path.string().c_str(), "rb+"); if (!file && !fReadOnly) file = fopen(path.string().c_str(), "wb+"); if (!file) { LogPrintf("Unable to open file %s\n", path.string()); return NULL; } if (pos.nPos) { if (fseek(file, pos.nPos, SEEK_SET)) { LogPrintf("Unable to seek to position %u of %s\n", pos.nPos, path.string()); fclose(file); return NULL; } } return file; } FILE* OpenBlockFile(const CDiskBlockPos& pos, bool fReadOnly) { return OpenDiskFile(pos, "blk", fReadOnly); } FILE* OpenUndoFile(const CDiskBlockPos& pos, bool fReadOnly) { return OpenDiskFile(pos, "rev", fReadOnly); } boost::filesystem::path GetBlockPosFilename(const CDiskBlockPos& pos, const char* prefix) { return GetDataDir() / "blocks" / strprintf("%s%05u.dat", prefix, pos.nFile); } CBlockIndex* InsertBlockIndex(uint256 hash) { if (hash == 0) return NULL; // Return existing BlockMap::iterator mi = mapBlockIndex.find(hash); if (mi != mapBlockIndex.end()) return (*mi).second; // Create new CBlockIndex* pindexNew = new CBlockIndex(); if (!pindexNew) throw runtime_error("LoadBlockIndex() : new CBlockIndex failed"); mi = mapBlockIndex.insert(make_pair(hash, pindexNew)).first; //mark as PoS seen if (pindexNew->IsProofOfStake()) setStakeSeen.insert(make_pair(pindexNew->prevoutStake, pindexNew->nStakeTime)); pindexNew->phashBlock = &((*mi).first); return pindexNew; } bool static LoadBlockIndexDB() { if (!pblocktree->LoadBlockIndexGuts()) return false; boost::this_thread::interruption_point(); // Calculate nChainWork vector<pair<int, CBlockIndex*> > vSortedByHeight; vSortedByHeight.reserve(mapBlockIndex.size()); BOOST_FOREACH (const PAIRTYPE(uint256, CBlockIndex*) & item, mapBlockIndex) { CBlockIndex* pindex = item.second; vSortedByHeight.push_back(make_pair(pindex->nHeight, pindex)); } sort(vSortedByHeight.begin(), vSortedByHeight.end()); BOOST_FOREACH (const PAIRTYPE(int, CBlockIndex*) & item, vSortedByHeight) { CBlockIndex* pindex = item.second; pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex); if (pindex->nStatus & BLOCK_HAVE_DATA) { if (pindex->pprev) { if (pindex->pprev->nChainTx) { pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx; } else { pindex->nChainTx = 0; mapBlocksUnlinked.insert(std::make_pair(pindex->pprev, pindex)); } } else { pindex->nChainTx = pindex->nTx; } } if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->nChainTx || pindex->pprev == NULL)) setBlockIndexCandidates.insert(pindex); if (pindex->nStatus & BLOCK_FAILED_MASK && (!pindexBestInvalid || pindex->nChainWork > pindexBestInvalid->nChainWork)) pindexBestInvalid = pindex; if (pindex->pprev) pindex->BuildSkip(); if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == NULL || CBlockIndexWorkComparator()(pindexBestHeader, pindex))) pindexBestHeader = pindex; } // Load block file info pblocktree->ReadLastBlockFile(nLastBlockFile); vinfoBlockFile.resize(nLastBlockFile + 1); LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile); for (int nFile = 0; nFile <= nLastBlockFile; nFile++) { pblocktree->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]); } LogPrintf("%s: last block file info: %s\n", __func__, vinfoBlockFile[nLastBlockFile].ToString()); for (int nFile = nLastBlockFile + 1; true; nFile++) { CBlockFileInfo info; if (pblocktree->ReadBlockFileInfo(nFile, info)) { vinfoBlockFile.push_back(info); } else { break; } } // Check presence of blk files LogPrintf("Checking all blk files are present...\n"); set<int> setBlkDataFiles; BOOST_FOREACH (const PAIRTYPE(uint256, CBlockIndex*) & item, mapBlockIndex) { CBlockIndex* pindex = item.second; if (pindex->nStatus & BLOCK_HAVE_DATA) { setBlkDataFiles.insert(pindex->nFile); } } for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++) { CDiskBlockPos pos(*it, 0); if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) { return false; } } //Check if the shutdown procedure was followed on last client exit bool fLastShutdownWasPrepared = true; pblocktree->ReadFlag("shutdown", fLastShutdownWasPrepared); LogPrintf("%s: Last shutdown was prepared: %s\n", __func__, fLastShutdownWasPrepared); //Check for inconsistency with block file info and internal state if (!fLastShutdownWasPrepared && !GetBoolArg("-forcestart", false) && !GetBoolArg("-reindex", false) && (vSortedByHeight.size() != vinfoBlockFile[nLastBlockFile].nHeightLast + 1) && (vinfoBlockFile[nLastBlockFile].nHeightLast != 0)) { //The database is in a state where a block has been accepted and written to disk, but not //all of the block has perculated through the code. The block and the index should both be //intact (although assertions are added if they are not), and the block will be reprocessed //to ensure all data will be accounted for. LogPrintf("%s: Inconsistent State Detected mapBlockIndex.size()=%d blockFileBlocks=%d\n", __func__, vSortedByHeight.size(), vinfoBlockFile[nLastBlockFile].nHeightLast + 1); LogPrintf("%s: lastIndexPos=%d blockFileSize=%d\n", __func__, vSortedByHeight[vSortedByHeight.size() - 1].second->GetBlockPos().nPos, vinfoBlockFile[nLastBlockFile].nSize); //try reading the block from the last index we have bool isFixed = true; string strError = ""; LogPrintf("%s: Attempting to re-add last block that was recorded to disk\n", __func__); //get the last block that was properly recorded to the block info file CBlockIndex* pindexLastMeta = vSortedByHeight[vinfoBlockFile[nLastBlockFile].nHeightLast + 1].second; //fix Assertion `hashPrevBlock == view.GetBestBlock()' failed. By adjusting height to the last recorded by coinsview CBlockIndex* pindexCoinsView = mapBlockIndex[pcoinsTip->GetBestBlock()]; for(unsigned int i = vinfoBlockFile[nLastBlockFile].nHeightLast + 1; i < vSortedByHeight.size(); i++) { pindexLastMeta = vSortedByHeight[i].second; if(pindexLastMeta->nHeight > pindexCoinsView->nHeight) break; } LogPrintf("%s: Last block properly recorded: #%d %s\n", __func__, pindexLastMeta->nHeight, pindexLastMeta->GetBlockHash().ToString().c_str()); CBlock lastMetaBlock; if (!ReadBlockFromDisk(lastMetaBlock, pindexLastMeta)) { isFixed = false; strError = strprintf("failed to read block %d from disk", pindexLastMeta->nHeight); } //set the chain to the block before lastMeta so that the meta block will be seen as new chainActive.SetTip(pindexLastMeta->pprev); //Process the lastMetaBlock again, using the known location on disk CDiskBlockPos blockPos = pindexLastMeta->GetBlockPos(); CValidationState state; ProcessNewBlock(state, NULL, &lastMetaBlock, &blockPos); //ensure that everything is as it should be if (pcoinsTip->GetBestBlock() != vSortedByHeight[vSortedByHeight.size() - 1].second->GetBlockHash()) { isFixed = false; strError = "pcoinsTip best block is not correct"; } //properly account for all of the blocks that were not in the meta data. If this is not done the file //positioning will be wrong and blocks will be overwritten and later cause serialization errors CBlockIndex *pindexLast = vSortedByHeight[vSortedByHeight.size() - 1].second; CBlock lastBlock; if (!ReadBlockFromDisk(lastBlock, pindexLast)) { isFixed = false; strError = strprintf("failed to read block %d from disk", pindexLast->nHeight); } vinfoBlockFile[nLastBlockFile].nHeightLast = pindexLast->nHeight; vinfoBlockFile[nLastBlockFile].nSize = pindexLast->GetBlockPos().nPos + ::GetSerializeSize(lastBlock, SER_DISK, CLIENT_VERSION);; setDirtyFileInfo.insert(nLastBlockFile); FlushStateToDisk(state, FLUSH_STATE_ALWAYS); //Print out file info again pblocktree->ReadLastBlockFile(nLastBlockFile); vinfoBlockFile.resize(nLastBlockFile + 1); LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile); for (int nFile = 0; nFile <= nLastBlockFile; nFile++) { pblocktree->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]); } LogPrintf("%s: last block file info: %s\n", __func__, vinfoBlockFile[nLastBlockFile].ToString()); if (!isFixed) { strError = "Failed reading from database. " + strError + ". The block database is in an inconsistent state and may cause issues in the future." "To force start use -forcestart"; uiInterface.ThreadSafeMessageBox(strError, "", CClientUIInterface::MSG_ERROR); abort(); } LogPrintf("Passed corruption fix\n"); } // Check whether we need to continue reindexing bool fReindexing = false; pblocktree->ReadReindexing(fReindexing); fReindex |= fReindexing; // Check whether we have a transaction index pblocktree->ReadFlag("txindex", fTxIndex); LogPrintf("LoadBlockIndexDB(): transaction index %s\n", fTxIndex ? "enabled" : "disabled"); // If this is written true before the next client init, then we know the shutdown process failed pblocktree->WriteFlag("shutdown", false); // Load pointer to end of best chain BlockMap::iterator it = mapBlockIndex.find(pcoinsTip->GetBestBlock()); if (it == mapBlockIndex.end()) return true; chainActive.SetTip(it->second); PruneBlockIndexCandidates(); LogPrintf("LoadBlockIndexDB(): hashBestChain=%s height=%d date=%s progress=%f\n", chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(), DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()), Checkpoints::GuessVerificationProgress(chainActive.Tip())); return true; } CVerifyDB::CVerifyDB() { uiInterface.ShowProgress(_("Verifying blocks..."), 0); } CVerifyDB::~CVerifyDB() { uiInterface.ShowProgress("", 100); } bool CVerifyDB::VerifyDB(CCoinsView* coinsview, int nCheckLevel, int nCheckDepth) { LOCK(cs_main); if (chainActive.Tip() == NULL || chainActive.Tip()->pprev == NULL) return true; // Verify blocks in the best chain if (nCheckDepth <= 0) nCheckDepth = 1000000000; // suffices until the year 19000 if (nCheckDepth > chainActive.Height()) nCheckDepth = chainActive.Height(); nCheckLevel = std::max(0, std::min(4, nCheckLevel)); LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel); CCoinsViewCache coins(coinsview); CBlockIndex* pindexState = chainActive.Tip(); CBlockIndex* pindexFailure = NULL; int nGoodTransactions = 0; CValidationState state; for (CBlockIndex* pindex = chainActive.Tip(); pindex && pindex->pprev; pindex = pindex->pprev) { boost::this_thread::interruption_point(); uiInterface.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100))))); if (pindex->nHeight < chainActive.Height() - nCheckDepth) break; CBlock block; // check level 0: read from disk if (!ReadBlockFromDisk(block, pindex)) return error("VerifyDB() : *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); // check level 1: verify block validity if (nCheckLevel >= 1 && !CheckBlock(block, state)) return error("VerifyDB() : *** found bad block at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); // check level 2: verify undo validity if (nCheckLevel >= 2 && pindex) { CBlockUndo undo; CDiskBlockPos pos = pindex->GetUndoPos(); if (!pos.IsNull()) { if (!undo.ReadFromDisk(pos, pindex->pprev->GetBlockHash())) return error("VerifyDB() : *** found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); } } // check level 3: check for inconsistencies during memory-only disconnect of tip blocks if (nCheckLevel >= 3 && pindex == pindexState && (coins.GetCacheSize() + pcoinsTip->GetCacheSize()) <= nCoinCacheSize) { bool fClean = true; if (!DisconnectBlock(block, state, pindex, coins, &fClean)) return error("VerifyDB() : *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); pindexState = pindex->pprev; if (!fClean) { nGoodTransactions = 0; pindexFailure = pindex; } else nGoodTransactions += block.vtx.size(); } if (ShutdownRequested()) return true; } if (pindexFailure) return error("VerifyDB() : *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainActive.Height() - pindexFailure->nHeight + 1, nGoodTransactions); // check level 4: try reconnecting blocks if (nCheckLevel >= 4) { CBlockIndex* pindex = pindexState; while (pindex != chainActive.Tip()) { boost::this_thread::interruption_point(); uiInterface.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, 100 - (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * 50)))); pindex = chainActive.Next(pindex); CBlock block; if (!ReadBlockFromDisk(block, pindex)) return error("VerifyDB() : *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); if (!ConnectBlock(block, state, pindex, coins, false)) return error("VerifyDB() : *** found unconnectable block at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); } } LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", chainActive.Height() - pindexState->nHeight, nGoodTransactions); return true; } void UnloadBlockIndex() { mapBlockIndex.clear(); setBlockIndexCandidates.clear(); chainActive.SetTip(NULL); pindexBestInvalid = NULL; } bool LoadBlockIndex() { // Load block index from databases if (!fReindex && !LoadBlockIndexDB()) return false; return true; } bool InitBlockIndex() { LOCK(cs_main); // Check whether we're already initialized if (chainActive.Genesis() != NULL) return true; // Use the provided setting for -txindex in the new database fTxIndex = GetBoolArg("-txindex", true); pblocktree->WriteFlag("txindex", fTxIndex); LogPrintf("Initializing databases...\n"); // Only add the genesis block if not reindexing (in which case we reuse the one already on disk) if (!fReindex) { try { CBlock& block = const_cast<CBlock&>(Params().GenesisBlock()); // Start new block file unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION); CDiskBlockPos blockPos; CValidationState state; if (!FindBlockPos(state, blockPos, nBlockSize + 8, 0, block.GetBlockTime())) return error("LoadBlockIndex() : FindBlockPos failed"); if (!WriteBlockToDisk(block, blockPos)) return error("LoadBlockIndex() : writing genesis block to disk failed"); CBlockIndex* pindex = AddToBlockIndex(block); if (!ReceivedBlockTransactions(block, state, pindex, blockPos)) return error("LoadBlockIndex() : genesis block not accepted"); if (!ActivateBestChain(state, &block)) return error("LoadBlockIndex() : genesis block cannot be activated"); // Force a chainstate write so that when we VerifyDB in a moment, it doesnt check stale data return FlushStateToDisk(state, FLUSH_STATE_ALWAYS); } catch (std::runtime_error& e) { return error("LoadBlockIndex() : failed to initialize block database: %s", e.what()); } } return true; } bool LoadExternalBlockFile(FILE* fileIn, CDiskBlockPos* dbp) { // Map of disk positions for blocks with unknown parent (only used for reindex) static std::multimap<uint256, CDiskBlockPos> mapBlocksUnknownParent; int64_t nStart = GetTimeMillis(); int nLoaded = 0; try { // This takes over fileIn and calls fclose() on it in the CBufferedFile destructor CBufferedFile blkdat(fileIn, 2 * MAX_BLOCK_SIZE_CURRENT, MAX_BLOCK_SIZE_CURRENT + 8, SER_DISK, CLIENT_VERSION); uint64_t nRewind = blkdat.GetPos(); while (!blkdat.eof()) { boost::this_thread::interruption_point(); blkdat.SetPos(nRewind); nRewind++; // start one byte further next time, in case of failure blkdat.SetLimit(); // remove former limit unsigned int nSize = 0; try { // locate a header unsigned char buf[MESSAGE_START_SIZE]; blkdat.FindByte(Params().MessageStart()[0]); nRewind = blkdat.GetPos() + 1; blkdat >> FLATDATA(buf); if (memcmp(buf, Params().MessageStart(), MESSAGE_START_SIZE)) continue; // read size blkdat >> nSize; if (nSize < 80 || nSize > MAX_BLOCK_SIZE_CURRENT) continue; } catch (const std::exception&) { // no valid block header found; don't complain break; } try { // read block uint64_t nBlockPos = blkdat.GetPos(); if (dbp) dbp->nPos = nBlockPos; blkdat.SetLimit(nBlockPos + nSize); blkdat.SetPos(nBlockPos); CBlock block; blkdat >> block; nRewind = blkdat.GetPos(); // detect out of order blocks, and store them for later uint256 hash = block.GetHash(); if (hash != Params().HashGenesisBlock() && mapBlockIndex.find(block.hashPrevBlock) == mapBlockIndex.end()) { LogPrint("reindex", "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(), block.hashPrevBlock.ToString()); if (dbp) mapBlocksUnknownParent.insert(std::make_pair(block.hashPrevBlock, *dbp)); continue; } // process in case the block isn't known yet if (mapBlockIndex.count(hash) == 0 || (mapBlockIndex[hash]->nStatus & BLOCK_HAVE_DATA) == 0) { CValidationState state; if (ProcessNewBlock(state, NULL, &block, dbp)) nLoaded++; if (state.IsError()) break; } else if (hash != Params().HashGenesisBlock() && mapBlockIndex[hash]->nHeight % 1000 == 0) { LogPrintf("Block Import: already had block %s at height %d\n", hash.ToString(), mapBlockIndex[hash]->nHeight); } // Recursively process earlier encountered successors of this block deque<uint256> queue; queue.push_back(hash); while (!queue.empty()) { uint256 head = queue.front(); queue.pop_front(); std::pair<std::multimap<uint256, CDiskBlockPos>::iterator, std::multimap<uint256, CDiskBlockPos>::iterator> range = mapBlocksUnknownParent.equal_range(head); while (range.first != range.second) { std::multimap<uint256, CDiskBlockPos>::iterator it = range.first; if (ReadBlockFromDisk(block, it->second)) { LogPrintf("%s: Processing out of order child %s of %s\n", __func__, block.GetHash().ToString(), head.ToString()); CValidationState dummy; if (ProcessNewBlock(dummy, NULL, &block, &it->second)) { nLoaded++; queue.push_back(block.GetHash()); } } range.first++; mapBlocksUnknownParent.erase(it); } } } catch (std::exception& e) { LogPrintf("%s : Deserialize or I/O error - %s", __func__, e.what()); } } } catch (std::runtime_error& e) { AbortNode(std::string("System error: ") + e.what()); } if (nLoaded > 0) LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded, GetTimeMillis() - nStart); return nLoaded > 0; } void static CheckBlockIndex() { if (!fCheckBlockIndex) { return; } LOCK(cs_main); // During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain, // so we have the genesis block in mapBlockIndex but no active chain. (A few of the tests when // iterating the block tree require that chainActive has been initialized.) if (chainActive.Height() < 0) { assert(mapBlockIndex.size() <= 1); return; } // Build forward-pointing map of the entire block tree. std::multimap<CBlockIndex*, CBlockIndex*> forward; for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); it++) { forward.insert(std::make_pair(it->second->pprev, it->second)); } assert(forward.size() == mapBlockIndex.size()); std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> rangeGenesis = forward.equal_range(NULL); CBlockIndex* pindex = rangeGenesis.first->second; rangeGenesis.first++; assert(rangeGenesis.first == rangeGenesis.second); // There is only one index entry with parent NULL. // Iterate over the entire block tree, using depth-first search. // Along the way, remember whether there are blocks on the path from genesis // block being explored which are the first to have certain properties. size_t nNodes = 0; int nHeight = 0; CBlockIndex* pindexFirstInvalid = NULL; // Oldest ancestor of pindex which is invalid. CBlockIndex* pindexFirstMissing = NULL; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA. CBlockIndex* pindexFirstNotTreeValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not). CBlockIndex* pindexFirstNotChainValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not). CBlockIndex* pindexFirstNotScriptsValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not). while (pindex != NULL) { nNodes++; if (pindexFirstInvalid == NULL && pindex->nStatus & BLOCK_FAILED_VALID) pindexFirstInvalid = pindex; if (pindexFirstMissing == NULL && !(pindex->nStatus & BLOCK_HAVE_DATA)) pindexFirstMissing = pindex; if (pindex->pprev != NULL && pindexFirstNotTreeValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TREE) pindexFirstNotTreeValid = pindex; if (pindex->pprev != NULL && pindexFirstNotChainValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_CHAIN) pindexFirstNotChainValid = pindex; if (pindex->pprev != NULL && pindexFirstNotScriptsValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_SCRIPTS) pindexFirstNotScriptsValid = pindex; // Begin: actual consistency checks. if (pindex->pprev == NULL) { // Genesis block checks. assert(pindex->GetBlockHash() == Params().HashGenesisBlock()); // Genesis block's hash must match. assert(pindex == chainActive.Genesis()); // The current active chain's genesis block must be this block. } // HAVE_DATA is equivalent to VALID_TRANSACTIONS and equivalent to nTx > 0 (we stored the number of transactions in the block) assert(!(pindex->nStatus & BLOCK_HAVE_DATA) == (pindex->nTx == 0)); assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); if (pindex->nChainTx == 0) assert(pindex->nSequenceId == 0); // nSequenceId can't be set for blocks that aren't linked // All parents having data is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to nChainTx being set. assert((pindexFirstMissing != NULL) == (pindex->nChainTx == 0)); // nChainTx == 0 is used to signal that all parent block's transaction data is available. assert(pindex->nHeight == nHeight); // nHeight must be consistent. assert(pindex->pprev == NULL || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's. assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->nHeight < nHeight))); // The pskip pointer must point back for all but the first 2 blocks. assert(pindexFirstNotTreeValid == NULL); // All mapBlockIndex entries must at least be TREE valid if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE) assert(pindexFirstNotTreeValid == NULL); // TREE valid implies all parents are TREE valid if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_CHAIN) assert(pindexFirstNotChainValid == NULL); // CHAIN valid implies all parents are CHAIN valid if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_SCRIPTS) assert(pindexFirstNotScriptsValid == NULL); // SCRIPTS valid implies all parents are SCRIPTS valid if (pindexFirstInvalid == NULL) { // Checks for not-invalid blocks. assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0); // The failed mask cannot be set for blocks without invalid parents. } if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && pindexFirstMissing == NULL) { if (pindexFirstInvalid == NULL) { // If this block sorts at least as good as the current tip and is valid, it must be in setBlockIndexCandidates. assert(setBlockIndexCandidates.count(pindex)); } } else { // If this block sorts worse than the current tip, it cannot be in setBlockIndexCandidates. assert(setBlockIndexCandidates.count(pindex) == 0); } // Check whether this block is in mapBlocksUnlinked. std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> rangeUnlinked = mapBlocksUnlinked.equal_range(pindex->pprev); bool foundInUnlinked = false; while (rangeUnlinked.first != rangeUnlinked.second) { assert(rangeUnlinked.first->first == pindex->pprev); if (rangeUnlinked.first->second == pindex) { foundInUnlinked = true; break; } rangeUnlinked.first++; } if (pindex->pprev && pindex->nStatus & BLOCK_HAVE_DATA && pindexFirstMissing != NULL) { if (pindexFirstInvalid == NULL) { // If this block has block data available, some parent doesn't, and has no invalid parents, it must be in mapBlocksUnlinked. assert(foundInUnlinked); } } else { // If this block does not have block data available, or all parents do, it cannot be in mapBlocksUnlinked. assert(!foundInUnlinked); } // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow // End: actual consistency checks. // Try descending into the first subnode. std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = forward.equal_range(pindex); if (range.first != range.second) { // A subnode was found. pindex = range.first->second; nHeight++; continue; } // This is a leaf node. // Move upwards until we reach a node of which we have not yet visited the last child. while (pindex) { // We are going to either move to a parent or a sibling of pindex. // If pindex was the first with a certain property, unset the corresponding variable. if (pindex == pindexFirstInvalid) pindexFirstInvalid = NULL; if (pindex == pindexFirstMissing) pindexFirstMissing = NULL; if (pindex == pindexFirstNotTreeValid) pindexFirstNotTreeValid = NULL; if (pindex == pindexFirstNotChainValid) pindexFirstNotChainValid = NULL; if (pindex == pindexFirstNotScriptsValid) pindexFirstNotScriptsValid = NULL; // Find our parent. CBlockIndex* pindexPar = pindex->pprev; // Find which child we just visited. std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> rangePar = forward.equal_range(pindexPar); while (rangePar.first->second != pindex) { assert(rangePar.first != rangePar.second); // Our parent must have at least the node we're coming from as child. rangePar.first++; } // Proceed to the next one. rangePar.first++; if (rangePar.first != rangePar.second) { // Move to the sibling. pindex = rangePar.first->second; break; } else { // Move up further. pindex = pindexPar; nHeight--; continue; } } } // Check that we actually traversed the entire map. assert(nNodes == forward.size()); } ////////////////////////////////////////////////////////////////////////////// // // CAlert // string GetWarnings(string strFor) { int nPriority = 0; string strStatusBar; string strRPC; if (!CLIENT_VERSION_IS_RELEASE) strStatusBar = _("This is a pre-release test build - use at your own risk - do not use for staking or merchant applications!"); if (GetBoolArg("-testsafemode", false)) strStatusBar = strRPC = "testsafemode enabled"; // Misc warnings like out of disk space and clock is wrong if (strMiscWarning != "") { nPriority = 1000; strStatusBar = strMiscWarning; } if (fLargeWorkForkFound) { nPriority = 2000; strStatusBar = strRPC = _("Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues."); } else if (fLargeWorkInvalidChainFound) { nPriority = 2000; strStatusBar = strRPC = _("Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade."); } // Alerts { LOCK(cs_mapAlerts); BOOST_FOREACH (PAIRTYPE(const uint256, CAlert) & item, mapAlerts) { const CAlert& alert = item.second; if (alert.AppliesToMe() && alert.nPriority > nPriority) { nPriority = alert.nPriority; strStatusBar = alert.strStatusBar; } } } if (strFor == "statusbar") return strStatusBar; else if (strFor == "rpc") return strRPC; assert(!"GetWarnings() : invalid parameter"); return "error"; } ////////////////////////////////////////////////////////////////////////////// // // Messages // bool static AlreadyHave(const CInv& inv) { switch (inv.type) { case MSG_TX: { bool txInMap = false; txInMap = mempool.exists(inv.hash); return txInMap || mapOrphanTransactions.count(inv.hash) || pcoinsTip->HaveCoins(inv.hash); } case MSG_DSTX: return mapObfuscationBroadcastTxes.count(inv.hash); case MSG_BLOCK: return mapBlockIndex.count(inv.hash); case MSG_TXLOCK_REQUEST: return mapTxLockReq.count(inv.hash) || mapTxLockReqRejected.count(inv.hash); case MSG_TXLOCK_VOTE: return mapTxLockVote.count(inv.hash); case MSG_SPORK: return mapSporks.count(inv.hash); case MSG_MASTERNODE_WINNER: if (masternodePayments.mapMasternodePayeeVotes.count(inv.hash)) { masternodeSync.AddedMasternodeWinner(inv.hash); return true; } return false; case MSG_BUDGET_VOTE: if (budget.mapSeenMasternodeBudgetVotes.count(inv.hash)) { masternodeSync.AddedBudgetItem(inv.hash); return true; } return false; case MSG_BUDGET_PROPOSAL: if (budget.mapSeenMasternodeBudgetProposals.count(inv.hash)) { masternodeSync.AddedBudgetItem(inv.hash); return true; } return false; case MSG_BUDGET_FINALIZED_VOTE: if (budget.mapSeenFinalizedBudgetVotes.count(inv.hash)) { masternodeSync.AddedBudgetItem(inv.hash); return true; } return false; case MSG_BUDGET_FINALIZED: if (budget.mapSeenFinalizedBudgets.count(inv.hash)) { masternodeSync.AddedBudgetItem(inv.hash); return true; } return false; case MSG_MASTERNODE_ANNOUNCE: if (mnodeman.mapSeenMasternodeBroadcast.count(inv.hash)) { masternodeSync.AddedMasternodeList(inv.hash); return true; } return false; case MSG_MASTERNODE_PING: return mnodeman.mapSeenMasternodePing.count(inv.hash); } // Don't know what it is, just say we already got one return true; } void static ProcessGetData(CNode* pfrom) { std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin(); vector<CInv> vNotFound; LOCK(cs_main); while (it != pfrom->vRecvGetData.end()) { // Don't bother if send buffer is too full to respond anyway if (pfrom->nSendSize >= SendBufferSize()) break; const CInv& inv = *it; { boost::this_thread::interruption_point(); it++; if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK) { bool send = false; BlockMap::iterator mi = mapBlockIndex.find(inv.hash); if (mi != mapBlockIndex.end()) { if (chainActive.Contains(mi->second)) { send = true; } else { // To prevent fingerprinting attacks, only send blocks outside of the active // chain if they are valid, and no more than a max reorg depth than the best header // chain we know about. send = mi->second->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != NULL) && (chainActive.Height() - mi->second->nHeight < Params().MaxReorganizationDepth()); if (!send) { LogPrintf("ProcessGetData(): ignoring request from peer=%i for old block that isn't in the main chain\n", pfrom->GetId()); } } } // Don't send not-validated blocks if (send && (mi->second->nStatus & BLOCK_HAVE_DATA)) { // Send block from disk CBlock block; if (!ReadBlockFromDisk(block, (*mi).second)) assert(!"cannot load block from disk"); if (inv.type == MSG_BLOCK) pfrom->PushMessage("block", block); else // MSG_FILTERED_BLOCK) { LOCK(pfrom->cs_filter); if (pfrom->pfilter) { CMerkleBlock merkleBlock(block, *pfrom->pfilter); pfrom->PushMessage("merkleblock", merkleBlock); // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see // This avoids hurting performance by pointlessly requiring a round-trip // Note that there is currently no way for a node to request any single transactions we didnt send here - // they must either disconnect and retry or request the full block. // Thus, the protocol spec specified allows for us to provide duplicate txn here, // however we MUST always provide at least what the remote peer needs typedef std::pair<unsigned int, uint256> PairType; BOOST_FOREACH (PairType& pair, merkleBlock.vMatchedTxn) if (!pfrom->setInventoryKnown.count(CInv(MSG_TX, pair.second))) pfrom->PushMessage("tx", block.vtx[pair.first]); } // else // no response } // Trigger them to send a getblocks request for the next batch of inventory if (inv.hash == pfrom->hashContinue) { // Bypass PushInventory, this must send even if redundant, // and we want it right after the last block so they don't // wait for other stuff first. vector<CInv> vInv; vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash())); pfrom->PushMessage("inv", vInv); pfrom->hashContinue = 0; } } } else if (inv.IsKnownType()) { // Send stream from relay memory bool pushed = false; { LOCK(cs_mapRelay); map<CInv, CDataStream>::iterator mi = mapRelay.find(inv); if (mi != mapRelay.end()) { pfrom->PushMessage(inv.GetCommand(), (*mi).second); pushed = true; } } if (!pushed && inv.type == MSG_TX) { CTransaction tx; if (mempool.lookup(inv.hash, tx)) { CDataStream ss(SER_NETWORK, PROTOCOL_VERSION); ss.reserve(1000); ss << tx; pfrom->PushMessage("tx", ss); pushed = true; } } if (!pushed && inv.type == MSG_TXLOCK_VOTE) { if (mapTxLockVote.count(inv.hash)) { CDataStream ss(SER_NETWORK, PROTOCOL_VERSION); ss.reserve(1000); ss << mapTxLockVote[inv.hash]; pfrom->PushMessage("txlvote", ss); pushed = true; } } if (!pushed && inv.type == MSG_TXLOCK_REQUEST) { if (mapTxLockReq.count(inv.hash)) { CDataStream ss(SER_NETWORK, PROTOCOL_VERSION); ss.reserve(1000); ss << mapTxLockReq[inv.hash]; pfrom->PushMessage("ix", ss); pushed = true; } } if (!pushed && inv.type == MSG_SPORK) { if (mapSporks.count(inv.hash)) { CDataStream ss(SER_NETWORK, PROTOCOL_VERSION); ss.reserve(1000); ss << mapSporks[inv.hash]; pfrom->PushMessage("spork", ss); pushed = true; } } if (!pushed && inv.type == MSG_MASTERNODE_WINNER) { if (masternodePayments.mapMasternodePayeeVotes.count(inv.hash)) { CDataStream ss(SER_NETWORK, PROTOCOL_VERSION); ss.reserve(1000); ss << masternodePayments.mapMasternodePayeeVotes[inv.hash]; pfrom->PushMessage("mnw", ss); pushed = true; } } if (!pushed && inv.type == MSG_BUDGET_VOTE) { if (budget.mapSeenMasternodeBudgetVotes.count(inv.hash)) { CDataStream ss(SER_NETWORK, PROTOCOL_VERSION); ss.reserve(1000); ss << budget.mapSeenMasternodeBudgetVotes[inv.hash]; pfrom->PushMessage("mvote", ss); pushed = true; } } if (!pushed && inv.type == MSG_BUDGET_PROPOSAL) { if (budget.mapSeenMasternodeBudgetProposals.count(inv.hash)) { CDataStream ss(SER_NETWORK, PROTOCOL_VERSION); ss.reserve(1000); ss << budget.mapSeenMasternodeBudgetProposals[inv.hash]; pfrom->PushMessage("mprop", ss); pushed = true; } } if (!pushed && inv.type == MSG_BUDGET_FINALIZED_VOTE) { if (budget.mapSeenFinalizedBudgetVotes.count(inv.hash)) { CDataStream ss(SER_NETWORK, PROTOCOL_VERSION); ss.reserve(1000); ss << budget.mapSeenFinalizedBudgetVotes[inv.hash]; pfrom->PushMessage("fbvote", ss); pushed = true; } } if (!pushed && inv.type == MSG_BUDGET_FINALIZED) { if (budget.mapSeenFinalizedBudgets.count(inv.hash)) { CDataStream ss(SER_NETWORK, PROTOCOL_VERSION); ss.reserve(1000); ss << budget.mapSeenFinalizedBudgets[inv.hash]; pfrom->PushMessage("fbs", ss); pushed = true; } } if (!pushed && inv.type == MSG_MASTERNODE_ANNOUNCE) { if (mnodeman.mapSeenMasternodeBroadcast.count(inv.hash)) { CDataStream ss(SER_NETWORK, PROTOCOL_VERSION); ss.reserve(1000); ss << mnodeman.mapSeenMasternodeBroadcast[inv.hash]; pfrom->PushMessage("mnb", ss); pushed = true; } } if (!pushed && inv.type == MSG_MASTERNODE_PING) { if (mnodeman.mapSeenMasternodePing.count(inv.hash)) { CDataStream ss(SER_NETWORK, PROTOCOL_VERSION); ss.reserve(1000); ss << mnodeman.mapSeenMasternodePing[inv.hash]; pfrom->PushMessage("mnp", ss); pushed = true; } } if (!pushed && inv.type == MSG_DSTX) { if (mapObfuscationBroadcastTxes.count(inv.hash)) { CDataStream ss(SER_NETWORK, PROTOCOL_VERSION); ss.reserve(1000); ss << mapObfuscationBroadcastTxes[inv.hash].tx << mapObfuscationBroadcastTxes[inv.hash].vin << mapObfuscationBroadcastTxes[inv.hash].vchSig << mapObfuscationBroadcastTxes[inv.hash].sigTime; pfrom->PushMessage("dstx", ss); pushed = true; } } if (!pushed) { vNotFound.push_back(inv); } } // Track requests for our stuff. g_signals.Inventory(inv.hash); if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK) break; } } pfrom->vRecvGetData.erase(pfrom->vRecvGetData.begin(), it); if (!vNotFound.empty()) { // Let the peer know that we didn't find what it asked for, so it doesn't // have to wait around forever. Currently only SPV clients actually care // about this message: it's needed when they are recursively walking the // dependencies of relevant unconfirmed transactions. SPV clients want to // do that because they want to know about (and store and rebroadcast and // risk analyze) the dependencies of transactions relevant to them, without // having to download the entire memory pool. pfrom->PushMessage("notfound", vNotFound); } } bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, int64_t nTimeReceived) { RandAddSeedPerfmon(); if (fDebug) LogPrintf("received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->id); if (mapArgs.count("-dropmessagestest") && GetRand(atoi(mapArgs["-dropmessagestest"])) == 0) { LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n"); return true; } if (strCommand == "version") { // Each connection can only send one version message if (pfrom->nVersion != 0) { pfrom->PushMessage("reject", strCommand, REJECT_DUPLICATE, string("Duplicate version message")); Misbehaving(pfrom->GetId(), 1); return false; } // Bitcoinlegend: We use certain sporks during IBD, so check to see if they are // available. If not, ask the first peer connected for them. if (!pSporkDB->SporkExists(SPORK_14_NEW_PROTOCOL_ENFORCEMENT) && !pSporkDB->SporkExists(SPORK_15_NEW_PROTOCOL_ENFORCEMENT_2) && !pSporkDB->SporkExists(SPORK_11_LOCK_INVALID_UTXO) && !pSporkDB->SporkExists(SPORK_16_ZEROCOIN_MAINTENANCE_MODE)) { LogPrintf("Required sporks not found, asking peer to send them\n"); pfrom->PushMessage("getsporks"); } int64_t nTime; CAddress addrMe; CAddress addrFrom; uint64_t nNonce = 1; vRecv >> pfrom->nVersion >> pfrom->nServices >> nTime >> addrMe; if (pfrom->DisconnectOldProtocol(ActiveProtocol(), strCommand)) return false; if (pfrom->nVersion == 10300) pfrom->nVersion = 300; if (!vRecv.empty()) vRecv >> addrFrom >> nNonce; if (!vRecv.empty()) { vRecv >> LIMITED_STRING(pfrom->strSubVer, 256); pfrom->cleanSubVer = SanitizeString(pfrom->strSubVer); } if (!vRecv.empty()) vRecv >> pfrom->nStartingHeight; if (!vRecv.empty()) vRecv >> pfrom->fRelayTxes; // set to true after we get the first filter* message else pfrom->fRelayTxes = true; // Disconnect if we connected to ourself if (nNonce == nLocalHostNonce && nNonce > 1) { LogPrintf("connected to self at %s, disconnecting\n", pfrom->addr.ToString()); pfrom->fDisconnect = true; return true; } pfrom->addrLocal = addrMe; if (pfrom->fInbound && addrMe.IsRoutable()) { SeenLocal(addrMe); } // Be shy and don't send version until we hear if (pfrom->fInbound) pfrom->PushVersion(); pfrom->fClient = !(pfrom->nServices & NODE_NETWORK); // Potentially mark this peer as a preferred download peer. UpdatePreferredDownload(pfrom, State(pfrom->GetId())); // Change version pfrom->PushMessage("verack"); pfrom->ssSend.SetVersion(min(pfrom->nVersion, PROTOCOL_VERSION)); if (!pfrom->fInbound) { // Advertise our address if (fListen && !IsInitialBlockDownload()) { CAddress addr = GetLocalAddress(&pfrom->addr); if (addr.IsRoutable()) { LogPrintf("ProcessMessages: advertizing address %s\n", addr.ToString()); pfrom->PushAddress(addr); } else if (IsPeerAddrLocalGood(pfrom)) { addr.SetIP(pfrom->addrLocal); LogPrintf("ProcessMessages: advertizing address %s\n", addr.ToString()); pfrom->PushAddress(addr); } } // Get recent addresses if (pfrom->fOneShot || pfrom->nVersion >= CADDR_TIME_VERSION || addrman.size() < 1000) { pfrom->PushMessage("getaddr"); pfrom->fGetAddr = true; } addrman.Good(pfrom->addr); } else { if (((CNetAddr)pfrom->addr) == (CNetAddr)addrFrom) { addrman.Add(addrFrom, addrFrom); addrman.Good(addrFrom); } } // Relay alerts { LOCK(cs_mapAlerts); BOOST_FOREACH (PAIRTYPE(const uint256, CAlert) & item, mapAlerts) item.second.RelayTo(pfrom); } pfrom->fSuccessfullyConnected = true; string remoteAddr; if (fLogIPs) remoteAddr = ", peeraddr=" + pfrom->addr.ToString(); LogPrintf("receive version message: %s: version %d, blocks=%d, us=%s, peer=%d%s\n", pfrom->cleanSubVer, pfrom->nVersion, pfrom->nStartingHeight, addrMe.ToString(), pfrom->id, remoteAddr); AddTimeData(pfrom->addr, nTime); } else if (pfrom->nVersion == 0) { // Must have a version message before anything else Misbehaving(pfrom->GetId(), 1); return false; } else if (strCommand == "verack") { pfrom->SetRecvVersion(min(pfrom->nVersion, PROTOCOL_VERSION)); // Mark this node as currently connected, so we update its timestamp later. if (pfrom->fNetworkNode) { LOCK(cs_main); State(pfrom->GetId())->fCurrentlyConnected = true; } } else if (strCommand == "addr") { vector<CAddress> vAddr; vRecv >> vAddr; // Don't want addr from older versions unless seeding if (pfrom->nVersion < CADDR_TIME_VERSION && addrman.size() > 1000) return true; if (vAddr.size() > 1000) { Misbehaving(pfrom->GetId(), 20); return error("message addr size() = %u", vAddr.size()); } // Store the new addresses vector<CAddress> vAddrOk; int64_t nNow = GetAdjustedTime(); int64_t nSince = nNow - 10 * 60; BOOST_FOREACH (CAddress& addr, vAddr) { boost::this_thread::interruption_point(); if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60) addr.nTime = nNow - 5 * 24 * 60 * 60; pfrom->AddAddressKnown(addr); bool fReachable = IsReachable(addr); if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 && addr.IsRoutable()) { // Relay to a limited number of other nodes { LOCK(cs_vNodes); // Use deterministic randomness to send to the same nodes for 24 hours // at a time so the setAddrKnowns of the chosen nodes prevent repeats static uint256 hashSalt; if (hashSalt == 0) hashSalt = GetRandHash(); uint64_t hashAddr = addr.GetHash(); uint256 hashRand = hashSalt ^ (hashAddr << 32) ^ ((GetTime() + hashAddr) / (24 * 60 * 60)); hashRand = Hash(BEGIN(hashRand), END(hashRand)); multimap<uint256, CNode*> mapMix; BOOST_FOREACH (CNode* pnode, vNodes) { if (pnode->nVersion < CADDR_TIME_VERSION) continue; unsigned int nPointer; memcpy(&nPointer, &pnode, sizeof(nPointer)); uint256 hashKey = hashRand ^ nPointer; hashKey = Hash(BEGIN(hashKey), END(hashKey)); mapMix.insert(make_pair(hashKey, pnode)); } int nRelayNodes = fReachable ? 2 : 1; // limited relaying of addresses outside our network(s) for (multimap<uint256, CNode*>::iterator mi = mapMix.begin(); mi != mapMix.end() && nRelayNodes-- > 0; ++mi) ((*mi).second)->PushAddress(addr); } } // Do not store addresses outside our network if (fReachable) vAddrOk.push_back(addr); } addrman.Add(vAddrOk, pfrom->addr, 2 * 60 * 60); if (vAddr.size() < 1000) pfrom->fGetAddr = false; if (pfrom->fOneShot) pfrom->fDisconnect = true; } else if (strCommand == "inv") { vector<CInv> vInv; vRecv >> vInv; if (vInv.size() > MAX_INV_SZ) { Misbehaving(pfrom->GetId(), 20); return error("message inv size() = %u", vInv.size()); } LOCK(cs_main); std::vector<CInv> vToFetch; for (unsigned int nInv = 0; nInv < vInv.size(); nInv++) { const CInv& inv = vInv[nInv]; boost::this_thread::interruption_point(); pfrom->AddInventoryKnown(inv); bool fAlreadyHave = AlreadyHave(inv); LogPrint("net", "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom->id); if (!fAlreadyHave && !fImporting && !fReindex && inv.type != MSG_BLOCK) pfrom->AskFor(inv); if (inv.type == MSG_BLOCK) { UpdateBlockAvailability(pfrom->GetId(), inv.hash); if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) { // Add this to the list of blocks to request vToFetch.push_back(inv); LogPrint("net", "getblocks (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->id); } } // Track requests for our stuff g_signals.Inventory(inv.hash); if (pfrom->nSendSize > (SendBufferSize() * 2)) { Misbehaving(pfrom->GetId(), 50); return error("send buffer size() = %u", pfrom->nSendSize); } } if (!vToFetch.empty()) pfrom->PushMessage("getdata", vToFetch); } else if (strCommand == "getdata") { vector<CInv> vInv; vRecv >> vInv; if (vInv.size() > MAX_INV_SZ) { Misbehaving(pfrom->GetId(), 20); return error("message getdata size() = %u", vInv.size()); } if (fDebug || (vInv.size() != 1)) LogPrint("net", "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom->id); if ((fDebug && vInv.size() > 0) || (vInv.size() == 1)) LogPrint("net", "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom->id); pfrom->vRecvGetData.insert(pfrom->vRecvGetData.end(), vInv.begin(), vInv.end()); ProcessGetData(pfrom); } else if (strCommand == "getblocks" || strCommand == "getheaders") { CBlockLocator locator; uint256 hashStop; vRecv >> locator >> hashStop; LOCK(cs_main); // Find the last block the caller has in the main chain CBlockIndex* pindex = FindForkInGlobalIndex(chainActive, locator); // Send the rest of the chain if (pindex) pindex = chainActive.Next(pindex); int nLimit = 500; LogPrint("net", "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop == uint256(0) ? "end" : hashStop.ToString(), nLimit, pfrom->id); for (; pindex; pindex = chainActive.Next(pindex)) { if (pindex->GetBlockHash() == hashStop) { LogPrint("net", " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); break; } pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash())); if (--nLimit <= 0) { // When this block is requested, we'll send an inv that'll make them // getblocks the next batch of inventory. LogPrint("net", " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); pfrom->hashContinue = pindex->GetBlockHash(); break; } } } else if (strCommand == "headers" && Params().HeadersFirstSyncingActive()) { CBlockLocator locator; uint256 hashStop; vRecv >> locator >> hashStop; LOCK(cs_main); if (IsInitialBlockDownload()) return true; CBlockIndex* pindex = NULL; if (locator.IsNull()) { // If locator is null, return the hashStop block BlockMap::iterator mi = mapBlockIndex.find(hashStop); if (mi == mapBlockIndex.end()) return true; pindex = (*mi).second; } else { // Find the last block the caller has in the main chain pindex = FindForkInGlobalIndex(chainActive, locator); if (pindex) pindex = chainActive.Next(pindex); } // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end vector<CBlock> vHeaders; int nLimit = MAX_HEADERS_RESULTS; if (fDebug) LogPrintf("getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.ToString(), pfrom->id); for (; pindex; pindex = chainActive.Next(pindex)) { vHeaders.push_back(pindex->GetBlockHeader()); if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) break; } pfrom->PushMessage("headers", vHeaders); } else if (strCommand == "tx" || strCommand == "dstx") { vector<uint256> vWorkQueue; vector<uint256> vEraseQueue; CTransaction tx; //masternode signed transaction bool ignoreFees = false; CTxIn vin; vector<unsigned char> vchSig; int64_t sigTime; if (strCommand == "tx") { vRecv >> tx; } else if (strCommand == "dstx") { //these allow masternodes to publish a limited amount of free transactions vRecv >> tx >> vin >> vchSig >> sigTime; CMasternode* pmn = mnodeman.Find(vin); if (pmn != NULL) { if (!pmn->allowFreeTx) { //multiple peers can send us a valid masternode transaction if (fDebug) LogPrintf("dstx: Masternode sending too many transactions %s\n", tx.GetHash().ToString()); return true; } std::string strMessage = tx.GetHash().ToString() + boost::lexical_cast<std::string>(sigTime); std::string errorMessage = ""; if (!obfuScationSigner.VerifyMessage(pmn->pubKeyMasternode, vchSig, strMessage, errorMessage)) { LogPrintf("dstx: Got bad masternode address signature %s \n", vin.ToString()); //pfrom->Misbehaving(20); return false; } LogPrintf("dstx: Got Masternode transaction %s\n", tx.GetHash().ToString()); ignoreFees = true; pmn->allowFreeTx = false; if (!mapObfuscationBroadcastTxes.count(tx.GetHash())) { CObfuscationBroadcastTx dstx; dstx.tx = tx; dstx.vin = vin; dstx.vchSig = vchSig; dstx.sigTime = sigTime; mapObfuscationBroadcastTxes.insert(make_pair(tx.GetHash(), dstx)); } } } CInv inv(MSG_TX, tx.GetHash()); pfrom->AddInventoryKnown(inv); LOCK(cs_main); bool fMissingInputs = false; bool fMissingZerocoinInputs = false; CValidationState state; mapAlreadyAskedFor.erase(inv); if (!tx.IsZerocoinSpend() && AcceptToMemoryPool(mempool, state, tx, true, &fMissingInputs, false, ignoreFees)) { mempool.check(pcoinsTip); RelayTransaction(tx); vWorkQueue.push_back(inv.hash); LogPrint("mempool", "AcceptToMemoryPool: peer=%d %s : accepted %s (poolsz %u)\n", pfrom->id, pfrom->cleanSubVer, tx.GetHash().ToString(), mempool.mapTx.size()); // Recursively process any orphan transactions that depended on this one set<NodeId> setMisbehaving; for(unsigned int i = 0; i < vWorkQueue.size(); i++) { map<uint256, set<uint256> >::iterator itByPrev = mapOrphanTransactionsByPrev.find(vWorkQueue[i]); if(itByPrev == mapOrphanTransactionsByPrev.end()) continue; for(set<uint256>::iterator mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) { const uint256 &orphanHash = *mi; const CTransaction &orphanTx = mapOrphanTransactions[orphanHash].tx; NodeId fromPeer = mapOrphanTransactions[orphanHash].fromPeer; bool fMissingInputs2 = false; // Use a dummy CValidationState so someone can't setup nodes to counter-DoS based on orphan // resolution (that is, feeding people an invalid transaction based on LegitTxX in order to get // anyone relaying LegitTxX banned) CValidationState stateDummy; if(setMisbehaving.count(fromPeer)) continue; if(AcceptToMemoryPool(mempool, stateDummy, orphanTx, true, &fMissingInputs2)) { LogPrint("mempool", " accepted orphan tx %s\n", orphanHash.ToString()); RelayTransaction(orphanTx); vWorkQueue.push_back(orphanHash); vEraseQueue.push_back(orphanHash); } else if(!fMissingInputs2) { int nDos = 0; if(stateDummy.IsInvalid(nDos) && nDos > 0) { // Punish peer that gave us an invalid orphan tx Misbehaving(fromPeer, nDos); setMisbehaving.insert(fromPeer); LogPrint("mempool", " invalid orphan tx %s\n", orphanHash.ToString()); } // Has inputs but not accepted to mempool // Probably non-standard or insufficient fee/priority LogPrint("mempool", " removed orphan tx %s\n", orphanHash.ToString()); vEraseQueue.push_back(orphanHash); } mempool.check(pcoinsTip); } } BOOST_FOREACH (uint256 hash, vEraseQueue)EraseOrphanTx(hash); } else if (tx.IsZerocoinSpend() && AcceptToMemoryPool(mempool, state, tx, true, &fMissingZerocoinInputs, false, ignoreFees)) { //Presstab: ZCoin has a bunch of code commented out here. Is this something that should have more going on? //Also there is nothing that handles fMissingZerocoinInputs. Does there need to be? RelayTransaction(tx); LogPrint("mempool", "AcceptToMemoryPool: Zerocoinspend peer=%d %s : accepted %s (poolsz %u)\n", pfrom->id, pfrom->cleanSubVer, tx.GetHash().ToString(), mempool.mapTx.size()); } else if (fMissingInputs) { AddOrphanTx(tx, pfrom->GetId()); // DoS prevention: do not allow mapOrphanTransactions to grow unbounded unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS)); unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx); if (nEvicted > 0) LogPrint("mempool", "mapOrphan overflow, removed %u tx\n", nEvicted); } else if (pfrom->fWhitelisted) { // Always relay transactions received from whitelisted peers, even // if they are already in the mempool (allowing the node to function // as a gateway for nodes hidden behind it). RelayTransaction(tx); } if (strCommand == "dstx") { CInv inv(MSG_DSTX, tx.GetHash()); RelayInv(inv); } int nDoS = 0; if (state.IsInvalid(nDoS)) { LogPrint("mempool", "%s from peer=%d %s was not accepted into the memory pool: %s\n", tx.GetHash().ToString(), pfrom->id, pfrom->cleanSubVer, state.GetRejectReason()); pfrom->PushMessage("reject", strCommand, state.GetRejectCode(), state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), inv.hash); if (nDoS > 0) Misbehaving(pfrom->GetId(), nDoS); } } else if (strCommand == "headers" && Params().HeadersFirstSyncingActive() && !fImporting && !fReindex) // Ignore headers received while importing { std::vector<CBlockHeader> headers; // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks. unsigned int nCount = ReadCompactSize(vRecv); if (nCount > MAX_HEADERS_RESULTS) { Misbehaving(pfrom->GetId(), 20); return error("headers message size = %u", nCount); } headers.resize(nCount); for (unsigned int n = 0; n < nCount; n++) { vRecv >> headers[n]; ReadCompactSize(vRecv); // ignore tx count; assume it is 0. } LOCK(cs_main); if (nCount == 0) { // Nothing interesting. Stop asking this peers for more headers. return true; } CBlockIndex* pindexLast = NULL; BOOST_FOREACH (const CBlockHeader& header, headers) { CValidationState state; if (pindexLast != NULL && header.hashPrevBlock != pindexLast->GetBlockHash()) { Misbehaving(pfrom->GetId(), 20); return error("non-continuous headers sequence"); } /*TODO: this has a CBlock cast on it so that it will compile. There should be a solution for this * before headers are reimplemented on mainnet */ if (!AcceptBlockHeader((CBlock)header, state, &pindexLast)) { int nDoS; if (state.IsInvalid(nDoS)) { if (nDoS > 0) Misbehaving(pfrom->GetId(), nDoS); std::string strError = "invalid header received " + header.GetHash().ToString(); return error(strError.c_str()); } } } if (pindexLast) UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash()); if (nCount == MAX_HEADERS_RESULTS && pindexLast) { // Headers message had its maximum size; the peer may have more headers. // TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue // from there instead. LogPrintf("more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->id, pfrom->nStartingHeight); pfrom->PushMessage("getheaders", chainActive.GetLocator(pindexLast), uint256(0)); } CheckBlockIndex(); } else if (strCommand == "block" && !fImporting && !fReindex) // Ignore blocks received while importing { CBlock block; vRecv >> block; uint256 hashBlock = block.GetHash(); CInv inv(MSG_BLOCK, hashBlock); LogPrint("net", "received block %s peer=%d\n", inv.hash.ToString(), pfrom->id); //sometimes we will be sent their most recent block and its not the one we want, in that case tell where we are if (!mapBlockIndex.count(block.hashPrevBlock)) { if (find(pfrom->vBlockRequested.begin(), pfrom->vBlockRequested.end(), hashBlock) != pfrom->vBlockRequested.end()) { //we already asked for this block, so lets work backwards and ask for the previous block pfrom->PushMessage("getblocks", chainActive.GetLocator(), block.hashPrevBlock); pfrom->vBlockRequested.push_back(block.hashPrevBlock); } else { //ask to sync to this block pfrom->PushMessage("getblocks", chainActive.GetLocator(), hashBlock); pfrom->vBlockRequested.push_back(hashBlock); } } else { pfrom->AddInventoryKnown(inv); CValidationState state; if (!mapBlockIndex.count(block.GetHash())) { ProcessNewBlock(state, pfrom, &block); int nDoS; if(state.IsInvalid(nDoS)) { pfrom->PushMessage("reject", strCommand, state.GetRejectCode(), state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), inv.hash); if(nDoS > 0) { TRY_LOCK(cs_main, lockMain); if(lockMain) Misbehaving(pfrom->GetId(), nDoS); } } //disconnect this node if its old protocol version pfrom->DisconnectOldProtocol(ActiveProtocol(), strCommand); } else { LogPrint("net", "%s : Already processed block %s, skipping ProcessNewBlock()\n", __func__, block.GetHash().GetHex()); } } } // This asymmetric behavior for inbound and outbound connections was introduced // to prevent a fingerprinting attack: an attacker can send specific fake addresses // to users' AddrMan and later request them by sending getaddr messages. // Making users (which are behind NAT and can only make outgoing connections) ignore // getaddr message mitigates the attack. else if ((strCommand == "getaddr") && (pfrom->fInbound)) { pfrom->vAddrToSend.clear(); vector<CAddress> vAddr = addrman.GetAddr(); BOOST_FOREACH (const CAddress& addr, vAddr) pfrom->PushAddress(addr); } else if (strCommand == "mempool") { LOCK2(cs_main, pfrom->cs_filter); std::vector<uint256> vtxid; mempool.queryHashes(vtxid); vector<CInv> vInv; BOOST_FOREACH (uint256& hash, vtxid) { CInv inv(MSG_TX, hash); CTransaction tx; bool fInMemPool = mempool.lookup(hash, tx); if (!fInMemPool) continue; // another thread removed since queryHashes, maybe... if ((pfrom->pfilter && pfrom->pfilter->IsRelevantAndUpdate(tx)) || (!pfrom->pfilter)) vInv.push_back(inv); if (vInv.size() == MAX_INV_SZ) { pfrom->PushMessage("inv", vInv); vInv.clear(); } } if (vInv.size() > 0) pfrom->PushMessage("inv", vInv); } else if (strCommand == "ping") { if (pfrom->nVersion > BIP0031_VERSION) { uint64_t nonce = 0; vRecv >> nonce; // Echo the message back with the nonce. This allows for two useful features: // // 1) A remote node can quickly check if the connection is operational // 2) Remote nodes can measure the latency of the network thread. If this node // is overloaded it won't respond to pings quickly and the remote node can // avoid sending us more work, like chain download requests. // // The nonce stops the remote getting confused between different pings: without // it, if the remote node sends a ping once per second and this node takes 5 // seconds to respond to each, the 5th ping the remote sends would appear to // return very quickly. pfrom->PushMessage("pong", nonce); } } else if (strCommand == "pong") { int64_t pingUsecEnd = nTimeReceived; uint64_t nonce = 0; size_t nAvail = vRecv.in_avail(); bool bPingFinished = false; std::string sProblem; if (nAvail >= sizeof(nonce)) { vRecv >> nonce; // Only process pong message if there is an outstanding ping (old ping without nonce should never pong) if (pfrom->nPingNonceSent != 0) { if (nonce == pfrom->nPingNonceSent) { // Matching pong received, this ping is no longer outstanding bPingFinished = true; int64_t pingUsecTime = pingUsecEnd - pfrom->nPingUsecStart; if (pingUsecTime > 0) { // Successful ping time measurement, replace previous pfrom->nPingUsecTime = pingUsecTime; } else { // This should never happen sProblem = "Timing mishap"; } } else { // Nonce mismatches are normal when pings are overlapping sProblem = "Nonce mismatch"; if (nonce == 0) { // This is most likely a bug in another implementation somewhere, cancel this ping bPingFinished = true; sProblem = "Nonce zero"; } } } else { sProblem = "Unsolicited pong without ping"; } } else { // This is most likely a bug in another implementation somewhere, cancel this ping bPingFinished = true; sProblem = "Short payload"; } if (!(sProblem.empty())) { LogPrint("net", "pong peer=%d %s: %s, %x expected, %x received, %u bytes\n", pfrom->id, pfrom->cleanSubVer, sProblem, pfrom->nPingNonceSent, nonce, nAvail); } if (bPingFinished) { pfrom->nPingNonceSent = 0; } } else if (fAlerts && strCommand == "alert") { CAlert alert; vRecv >> alert; uint256 alertHash = alert.GetHash(); if (pfrom->setKnown.count(alertHash) == 0) { if (alert.ProcessAlert()) { // Relay pfrom->setKnown.insert(alertHash); { LOCK(cs_vNodes); BOOST_FOREACH (CNode* pnode, vNodes) alert.RelayTo(pnode); } } else { // Small DoS penalty so peers that send us lots of // duplicate/expired/invalid-signature/whatever alerts // eventually get banned. // This isn't a Misbehaving(100) (immediate ban) because the // peer might be an older or different implementation with // a different signature key, etc. Misbehaving(pfrom->GetId(), 10); } } } else if (!(nLocalServices & NODE_BLOOM) && (strCommand == "filterload" || strCommand == "filteradd" || strCommand == "filterclear")) { LogPrintf("bloom message=%s\n", strCommand); Misbehaving(pfrom->GetId(), 100); } else if (strCommand == "filterload") { CBloomFilter filter; vRecv >> filter; if (!filter.IsWithinSizeConstraints()) // There is no excuse for sending a too-large filter Misbehaving(pfrom->GetId(), 100); else { LOCK(pfrom->cs_filter); delete pfrom->pfilter; pfrom->pfilter = new CBloomFilter(filter); pfrom->pfilter->UpdateEmptyFull(); } pfrom->fRelayTxes = true; } else if (strCommand == "filteradd") { vector<unsigned char> vData; vRecv >> vData; // Nodes must NEVER send a data item > 520 bytes (the max size for a script data object, // and thus, the maximum size any matched object can have) in a filteradd message if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) { Misbehaving(pfrom->GetId(), 100); } else { LOCK(pfrom->cs_filter); if (pfrom->pfilter) pfrom->pfilter->insert(vData); else Misbehaving(pfrom->GetId(), 100); } } else if (strCommand == "filterclear") { LOCK(pfrom->cs_filter); delete pfrom->pfilter; pfrom->pfilter = new CBloomFilter(); pfrom->fRelayTxes = true; } else if (strCommand == "reject") { if (fDebug) { try { string strMsg; unsigned char ccode; string strReason; vRecv >> LIMITED_STRING(strMsg, CMessageHeader::COMMAND_SIZE) >> ccode >> LIMITED_STRING(strReason, MAX_REJECT_MESSAGE_LENGTH); ostringstream ss; ss << strMsg << " code " << itostr(ccode) << ": " << strReason; if (strMsg == "block" || strMsg == "tx") { uint256 hash; vRecv >> hash; ss << ": hash " << hash.ToString(); } LogPrint("net", "Reject %s\n", SanitizeString(ss.str())); } catch (std::ios_base::failure& e) { // Avoid feedback loops by preventing reject messages from triggering a new reject message. LogPrint("net", "Unparseable reject message received\n"); } } } else { //probably one the extensions obfuScationPool.ProcessMessageObfuscation(pfrom, strCommand, vRecv); mnodeman.ProcessMessage(pfrom, strCommand, vRecv); budget.ProcessMessage(pfrom, strCommand, vRecv); masternodePayments.ProcessMessageMasternodePayments(pfrom, strCommand, vRecv); ProcessMessageSwiftTX(pfrom, strCommand, vRecv); ProcessSpork(pfrom, strCommand, vRecv); masternodeSync.ProcessMessage(pfrom, strCommand, vRecv); } return true; } // Note: whenever a protocol update is needed toggle between both implementations (comment out the formerly active one) // so we can leave the existing clients untouched (old SPORK will stay on so they don't see even older clients). // Those old clients won't react to the changes of the other (new) SPORK because at the time of their implementation // it was the one which was commented out int ActiveProtocol() { // SPORK_14 was used for 70910. Leave it 'ON' so they don't see > 70910 nodes. They won't react to SPORK_15 // messages because it's not in their code /* if (IsSporkActive(SPORK_14_NEW_PROTOCOL_ENFORCEMENT)) return MIN_PEER_PROTO_VERSION_AFTER_ENFORCEMENT; */ // SPORK_15 is used for 70911. Nodes < 70911 don't see it and still get their protocol version via SPORK_14 and their // own ModifierUpgradeBlock() if (IsSporkActive(SPORK_15_NEW_PROTOCOL_ENFORCEMENT_2)) return MIN_PEER_PROTO_VERSION_AFTER_ENFORCEMENT; return MIN_PEER_PROTO_VERSION_BEFORE_ENFORCEMENT; } // requires LOCK(cs_vRecvMsg) bool ProcessMessages(CNode* pfrom) { //if (fDebug) // LogPrintf("ProcessMessages(%u messages)\n", pfrom->vRecvMsg.size()); // // Message format // (4) message start // (12) command // (4) size // (4) checksum // (x) data // bool fOk = true; if (!pfrom->vRecvGetData.empty()) ProcessGetData(pfrom); // this maintains the order of responses if (!pfrom->vRecvGetData.empty()) return fOk; std::deque<CNetMessage>::iterator it = pfrom->vRecvMsg.begin(); while (!pfrom->fDisconnect && it != pfrom->vRecvMsg.end()) { // Don't bother if send buffer is too full to respond anyway if (pfrom->nSendSize >= SendBufferSize()) break; // get next message CNetMessage& msg = *it; //if (fDebug) // LogPrintf("ProcessMessages(message %u msgsz, %u bytes, complete:%s)\n", // msg.hdr.nMessageSize, msg.vRecv.size(), // msg.complete() ? "Y" : "N"); // end, if an incomplete message is found if (!msg.complete()) break; // at this point, any failure means we can delete the current message it++; // Scan for message start if (memcmp(msg.hdr.pchMessageStart, Params().MessageStart(), MESSAGE_START_SIZE) != 0) { LogPrintf("PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg.hdr.GetCommand()), pfrom->id); fOk = false; break; } // Read header CMessageHeader& hdr = msg.hdr; if (!hdr.IsValid()) { LogPrintf("PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(hdr.GetCommand()), pfrom->id); continue; } string strCommand = hdr.GetCommand(); // Message size unsigned int nMessageSize = hdr.nMessageSize; // Checksum CDataStream& vRecv = msg.vRecv; uint256 hash = Hash(vRecv.begin(), vRecv.begin() + nMessageSize); unsigned int nChecksum = 0; memcpy(&nChecksum, &hash, sizeof(nChecksum)); if (nChecksum != hdr.nChecksum) { LogPrintf("ProcessMessages(%s, %u bytes): CHECKSUM ERROR nChecksum=%08x hdr.nChecksum=%08x\n", SanitizeString(strCommand), nMessageSize, nChecksum, hdr.nChecksum); continue; } // Process message bool fRet = false; try { fRet = ProcessMessage(pfrom, strCommand, vRecv, msg.nTime); boost::this_thread::interruption_point(); } catch (std::ios_base::failure& e) { pfrom->PushMessage("reject", strCommand, REJECT_MALFORMED, string("error parsing message")); if (strstr(e.what(), "end of data")) { // Allow exceptions from under-length message on vRecv LogPrintf("ProcessMessages(%s, %u bytes): Exception '%s' caught, normally caused by a message being shorter than its stated length\n", SanitizeString(strCommand), nMessageSize, e.what()); } else if (strstr(e.what(), "size too large")) { // Allow exceptions from over-long size LogPrintf("ProcessMessages(%s, %u bytes): Exception '%s' caught\n", SanitizeString(strCommand), nMessageSize, e.what()); } else { PrintExceptionContinue(&e, "ProcessMessages()"); } } catch (boost::thread_interrupted) { throw; } catch (std::exception& e) { PrintExceptionContinue(&e, "ProcessMessages()"); } catch (...) { PrintExceptionContinue(NULL, "ProcessMessages()"); } if (!fRet) LogPrintf("ProcessMessage(%s, %u bytes) FAILED peer=%d\n", SanitizeString(strCommand), nMessageSize, pfrom->id); break; } // In case the connection got shut down, its receive buffer was wiped if (!pfrom->fDisconnect) pfrom->vRecvMsg.erase(pfrom->vRecvMsg.begin(), it); return fOk; } bool SendMessages(CNode* pto, bool fSendTrickle) { { // Don't send anything until we get their version message if (pto->nVersion == 0) return true; // // Message: ping // bool pingSend = false; if (pto->fPingQueued) { // RPC ping request by user pingSend = true; } if (pto->nPingNonceSent == 0 && pto->nPingUsecStart + PING_INTERVAL * 1000000 < GetTimeMicros()) { // Ping automatically sent as a latency probe & keepalive. pingSend = true; } if (pingSend) { uint64_t nonce = 0; while (nonce == 0) { GetRandBytes((unsigned char*)&nonce, sizeof(nonce)); } pto->fPingQueued = false; pto->nPingUsecStart = GetTimeMicros(); if (pto->nVersion > BIP0031_VERSION) { pto->nPingNonceSent = nonce; pto->PushMessage("ping", nonce); } else { // Peer is too old to support ping command with nonce, pong will never arrive. pto->nPingNonceSent = 0; pto->PushMessage("ping"); } } TRY_LOCK(cs_main, lockMain); // Acquire cs_main for IsInitialBlockDownload() and CNodeState() if (!lockMain) return true; // Address refresh broadcast static int64_t nLastRebroadcast; if (!IsInitialBlockDownload() && (GetTime() - nLastRebroadcast > 24 * 60 * 60)) { LOCK(cs_vNodes); BOOST_FOREACH (CNode* pnode, vNodes) { // Periodically clear setAddrKnown to allow refresh broadcasts if (nLastRebroadcast) pnode->setAddrKnown.clear(); // Rebroadcast our address AdvertizeLocal(pnode); } if (!vNodes.empty()) nLastRebroadcast = GetTime(); } // // Message: addr // if (fSendTrickle) { vector<CAddress> vAddr; vAddr.reserve(pto->vAddrToSend.size()); BOOST_FOREACH (const CAddress& addr, pto->vAddrToSend) { // returns true if wasn't already contained in the set if (pto->setAddrKnown.insert(addr).second) { vAddr.push_back(addr); // receiver rejects addr messages larger than 1000 if (vAddr.size() >= 1000) { pto->PushMessage("addr", vAddr); vAddr.clear(); } } } pto->vAddrToSend.clear(); if (!vAddr.empty()) pto->PushMessage("addr", vAddr); } CNodeState& state = *State(pto->GetId()); if (state.fShouldBan) { if (pto->fWhitelisted) LogPrintf("Warning: not punishing whitelisted peer %s!\n", pto->addr.ToString()); else { pto->fDisconnect = true; if (pto->addr.IsLocal()) LogPrintf("Warning: not banning local peer %s!\n", pto->addr.ToString()); else { CNode::Ban(pto->addr); } } state.fShouldBan = false; } BOOST_FOREACH (const CBlockReject& reject, state.rejects) pto->PushMessage("reject", (string) "block", reject.chRejectCode, reject.strRejectReason, reject.hashBlock); state.rejects.clear(); // Start block sync if (pindexBestHeader == NULL) pindexBestHeader = chainActive.Tip(); bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot); // Download if this is a nice peer, or we have no nice peers and this one might do. if (!state.fSyncStarted && !pto->fClient && fFetch /*&& !fImporting*/ && !fReindex) { // Only actively request headers from a single peer, unless we're close to end of initial download. if (nSyncStarted == 0 || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 6 * 60 * 60) { // NOTE: was "close to today" and 24h in Bitcoin state.fSyncStarted = true; nSyncStarted++; //CBlockIndex *pindexStart = pindexBestHeader->pprev ? pindexBestHeader->pprev : pindexBestHeader; //LogPrint("net", "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->id, pto->nStartingHeight); //pto->PushMessage("getheaders", chainActive.GetLocator(pindexStart), uint256(0)); pto->PushMessage("getblocks", chainActive.GetLocator(chainActive.Tip()), uint256(0)); } } // Resend wallet transactions that haven't gotten in a block yet // Except during reindex, importing and IBD, when old wallet // transactions become unconfirmed and spams other nodes. if (!fReindex /*&& !fImporting && !IsInitialBlockDownload()*/) { g_signals.Broadcast(); } // // Message: inventory // vector<CInv> vInv; vector<CInv> vInvWait; { LOCK(pto->cs_inventory); vInv.reserve(pto->vInventoryToSend.size()); vInvWait.reserve(pto->vInventoryToSend.size()); BOOST_FOREACH (const CInv& inv, pto->vInventoryToSend) { if (pto->setInventoryKnown.count(inv)) continue; // trickle out tx inv to protect privacy if (inv.type == MSG_TX && !fSendTrickle) { // 1/4 of tx invs blast to all immediately static uint256 hashSalt; if (hashSalt == 0) hashSalt = GetRandHash(); uint256 hashRand = inv.hash ^ hashSalt; hashRand = Hash(BEGIN(hashRand), END(hashRand)); bool fTrickleWait = ((hashRand & 3) != 0); if (fTrickleWait) { vInvWait.push_back(inv); continue; } } // returns true if wasn't already contained in the set if (pto->setInventoryKnown.insert(inv).second) { vInv.push_back(inv); if (vInv.size() >= 1000) { pto->PushMessage("inv", vInv); vInv.clear(); } } } pto->vInventoryToSend = vInvWait; } if (!vInv.empty()) pto->PushMessage("inv", vInv); // Detect whether we're stalling int64_t nNow = GetTimeMicros(); if (!pto->fDisconnect && state.nStallingSince && state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) { // Stalling only triggers when the block download window cannot move. During normal steady state, // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection // should only happen during initial block download. LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->id); pto->fDisconnect = true; } // In case there is a block that has been in flight from this peer for (2 + 0.5 * N) times the block interval // (with N the number of validated blocks that were in flight at the time it was requested), disconnect due to // timeout. We compensate for in-flight blocks to prevent killing off peers due to our own downstream link // being saturated. We only count validated in-flight blocks so peers can't advertize nonexisting block hashes // to unreasonably increase our timeout. if (!pto->fDisconnect && state.vBlocksInFlight.size() > 0 && state.vBlocksInFlight.front().nTime < nNow - 500000 * Params().TargetSpacing() * (4 + state.vBlocksInFlight.front().nValidatedQueuedBefore)) { LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", state.vBlocksInFlight.front().hash.ToString(), pto->id); pto->fDisconnect = true; } // // Message: getdata (blocks) // vector<CInv> vGetData; if (!pto->fDisconnect && !pto->fClient && fFetch && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { vector<CBlockIndex*> vToDownload; NodeId staller = -1; FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller); BOOST_FOREACH (CBlockIndex* pindex, vToDownload) { vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash())); MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), pindex); LogPrintf("Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(), pindex->nHeight, pto->id); } if (state.nBlocksInFlight == 0 && staller != -1) { if (State(staller)->nStallingSince == 0) { State(staller)->nStallingSince = nNow; LogPrint("net", "Stall started peer=%d\n", staller); } } } // // Message: getdata (non-blocks) // while (!pto->fDisconnect && !pto->mapAskFor.empty() && (*pto->mapAskFor.begin()).first <= nNow) { const CInv& inv = (*pto->mapAskFor.begin()).second; if (!AlreadyHave(inv)) { if (fDebug) LogPrint("net", "Requesting %s peer=%d\n", inv.ToString(), pto->id); vGetData.push_back(inv); if (vGetData.size() >= 1000) { pto->PushMessage("getdata", vGetData); vGetData.clear(); } } pto->mapAskFor.erase(pto->mapAskFor.begin()); } if (!vGetData.empty()) pto->PushMessage("getdata", vGetData); } return true; } bool CBlockUndo::WriteToDisk(CDiskBlockPos& pos, const uint256& hashBlock) { // Open history file to append CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION); if (fileout.IsNull()) return error("CBlockUndo::WriteToDisk : OpenUndoFile failed"); // Write index header unsigned int nSize = fileout.GetSerializeSize(*this); fileout << FLATDATA(Params().MessageStart()) << nSize; // Write undo data long fileOutPos = ftell(fileout.Get()); if (fileOutPos < 0) return error("CBlockUndo::WriteToDisk : ftell failed"); pos.nPos = (unsigned int)fileOutPos; fileout << *this; // calculate & write checksum CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION); hasher << hashBlock; hasher << *this; fileout << hasher.GetHash(); return true; } bool CBlockUndo::ReadFromDisk(const CDiskBlockPos& pos, const uint256& hashBlock) { // Open history file to read CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION); if (filein.IsNull()) return error("CBlockUndo::ReadFromDisk : OpenBlockFile failed"); // Read block uint256 hashChecksum; try { filein >> *this; filein >> hashChecksum; } catch (std::exception& e) { return error("%s : Deserialize or I/O error - %s", __func__, e.what()); } // Verify checksum CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION); hasher << hashBlock; hasher << *this; if (hashChecksum != hasher.GetHash()) return error("CBlockUndo::ReadFromDisk : Checksum mismatch"); return true; } std::string CBlockFileInfo::ToString() const { return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, DateTimeStrFormat("%Y-%m-%d", nTimeFirst), DateTimeStrFormat("%Y-%m-%d", nTimeLast)); } class CMainCleanup { public: CMainCleanup() {} ~CMainCleanup() { // block headers BlockMap::iterator it1 = mapBlockIndex.begin(); for (; it1 != mapBlockIndex.end(); it1++) delete (*it1).second; mapBlockIndex.clear(); // orphan transactions mapOrphanTransactions.clear(); mapOrphanTransactionsByPrev.clear(); } } instance_of_cmaincleanup;
/* * Copyright 2014-present Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <thrift/lib/cpp/async/TAsyncSocket.h> #include <thrift/lib/cpp/protocol/TBinaryProtocol.h> #include <thrift/lib/cpp/transport/THttpServer.h> #include <thrift/lib/cpp/util/ScopedServerThread.h> #include <thrift/lib/cpp/util/TThreadedServerCreator.h> #include <thrift/lib/cpp2/test/gen-cpp/TestService.h> #include <thrift/lib/cpp2/test/gen-cpp2/TestService.h> #include <thrift/lib/cpp2/async/HeaderClientChannel.h> #include <boost/lexical_cast.hpp> using namespace apache::thrift; using namespace apache::thrift::async; using namespace apache::thrift::protocol; using namespace apache::thrift::test; using namespace apache::thrift::test::cpp2; using namespace apache::thrift::transport; using namespace apache::thrift::util; using std::string; class TestServiceHandler : public TestServiceIf { public: void sendResponse(string& _return, int64_t size) override { _return = "test" + boost::lexical_cast<std::string>(size); } void noResponse(int64_t size) override { usleep(size); } void echoRequest(string& _return, const string& req) override { _return = req + "ccccccccccccccccccccccccccccccccccccccccccccc"; } void serializationTest(string& _return, bool /* inEventBase */) override { _return = string(4096, 'a'); } void eventBaseAsync(string& _return) override { _return = "hello world"; } void notCalledBack() override {} void voidResponse() override {} int32_t processHeader() override { return 1; } void echoIOBuf(string& /*_return*/, const string& /*req*/) override {} }; std::unique_ptr<ScopedServerThread> createHttpServer() { auto handler = std::make_shared<TestServiceHandler>(); auto processor = std::make_shared<TestServiceProcessor>(handler); std::shared_ptr<TTransportFactory> transportFactory = std::make_shared<THttpServerTransportFactory>(); std::shared_ptr<TProtocolFactory> protocolFactory = std::make_shared<TBinaryProtocolFactoryT<THttpServer>>(); TThreadedServerCreator serverCreator(processor, 0, transportFactory, protocolFactory); return std::make_unique<ScopedServerThread>(&serverCreator); } TEST(HeaderClientChannelHttpTest, SimpleTest) { std::unique_ptr<ScopedServerThread> serverThread = createHttpServer(); folly::EventBase eb; const folly::SocketAddress* addr = serverThread->getAddress(); std::shared_ptr<TAsyncSocket> socket = TAsyncSocket::newSocket(&eb, *addr); auto channel = HeaderClientChannel::newChannel(socket); channel->useAsHttpClient("127.0.0.1", "meh"); channel->setProtocolId(T_BINARY_PROTOCOL); TestServiceAsyncClient client(std::move(channel)); client.sendResponse( [] (apache::thrift::ClientReceiveState&& state) { if (state.exception()) { try { std::rethrow_exception(state.exception()); } catch (const std::exception& e) { LOG(INFO) << e.what(); } } EXPECT_TRUE(state.exception() == nullptr); std::string res; TestServiceAsyncClient::recv_sendResponse(res, state); EXPECT_EQ(res, "test24"); }, 24); eb.loop(); client.eventBaseAsync( [] (apache::thrift::ClientReceiveState&& state) { EXPECT_TRUE(state.exception() == nullptr); std::string res; TestServiceAsyncClient::recv_eventBaseAsync(res, state); EXPECT_EQ(res, "hello world"); }); eb.loop(); } TEST(HeaderClientChannel, LongResponse) { std::unique_ptr<ScopedServerThread> serverThread = createHttpServer(); folly::EventBase eb; const folly::SocketAddress* addr = serverThread->getAddress(); std::shared_ptr<TAsyncSocket> socket = TAsyncSocket::newSocket(&eb, *addr); auto channel = HeaderClientChannel::newChannel(socket); channel->useAsHttpClient("127.0.0.1", "meh"); channel->setProtocolId(T_BINARY_PROTOCOL); TestServiceAsyncClient client(std::move(channel)); client.serializationTest( [] (apache::thrift::ClientReceiveState&& state) { EXPECT_TRUE(state.exception() == nullptr); std::string res; TestServiceAsyncClient::recv_serializationTest(res, state); EXPECT_EQ(res, string(4096, 'a')); }, true); eb.loop(); }
/*ckwg +29 * Copyright 2018 by Kitware, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither name of Kitware, Inc. nor the names of any contributors may be used * to endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * \file * \brief Test reading KPF activities (and tracks) */ #include <gtest/gtest.h> #include <test_gtest.h> #include <track_oracle/core/track_oracle_core.h> #include <track_oracle/core/track_field.h> #include <track_oracle/data_terms/data_terms.h> #include <track_oracle/file_formats/file_format_manager.h> #include <track_oracle/file_formats/track_filter_kpf_activity/track_filter_kpf_activity.h> #include <map> #include <utility> namespace to = ::kwiver::track_oracle; namespace dt = ::kwiver::track_oracle::dt; using std::string; string g_data_dir; // ---------------------------------------------------------------------------- int main(int argc, char** argv) { ::testing::InitGoogleTest( &argc, argv ); GET_ARG(1, g_data_dir); return RUN_ALL_TESTS(); } // ------------------------------------------------------------------ TEST(track_oracle, kpf_activities) { to::track_handle_list_type kpf_tracks, kpf_activities; { string fn = g_data_dir+"/test-large-IDs.geom.yml"; bool rc = to::file_format_manager::read( fn, kpf_tracks ); EXPECT_TRUE( rc ) << " reading tracks from '" << fn << "'"; size_t n_read = kpf_tracks.size(); EXPECT_EQ( n_read, 1 ) << " number of tracks read"; } { const int domain=2; string fn = g_data_dir+"/test-large-IDs.activities.yml"; bool rc = to::track_filter_kpf_activity::read( fn, kpf_tracks, domain, kpf_activities ); EXPECT_TRUE( rc ) << " reading activities from '" << fn << "'"; } }
#include "InputChecker.h" namespace HuobiSwap { InputChecker* InputChecker::instance = nullptr; InputChecker* InputChecker::checker() { if (instance == nullptr) { instance = new InputChecker(); } return instance; } }
#pragma once #include "custom-types/shared/macros.hpp" #include "custom-types/shared/coroutine.hpp" #include "TestPathParser.hpp" DECLARE_CLASS(Tester, ParserCustomType, "UnityEngine", "MonoBehaviour", sizeof(Il2CppObject) + sizeof(void*), TestPathParser* parser; constexpr void Init(TestPathParser& p) { parser = &p; } DECLARE_INSTANCE_METHOD(void, Start); )
//===- AMDGPURegisterBankInfo.cpp -------------------------------*- C++ -*-==// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// \file /// This file implements the targeting of the RegisterBankInfo class for /// AMDGPU. /// /// \par /// /// AMDGPU has unique register bank constraints that require special high level /// strategies to deal with. There are two main true physical register banks /// VGPR (vector), and SGPR (scalar). Additionally the VCC register bank is a /// sort of pseudo-register bank needed to represent SGPRs used in a vector /// boolean context. There is also the AGPR bank, which is a special purpose /// physical register bank present on some subtargets. /// /// Copying from VGPR to SGPR is generally illegal, unless the value is known to /// be uniform. It is generally not valid to legalize operands by inserting /// copies as on other targets. Operations which require uniform, SGPR operands /// generally require scalarization by repeatedly executing the instruction, /// activating each set of lanes using a unique set of input values. This is /// referred to as a waterfall loop. /// /// \par Booleans /// /// Booleans (s1 values) requires special consideration. A vector compare result /// is naturally a bitmask with one bit per lane, in a 32 or 64-bit /// register. These are represented with the VCC bank. During selection, we need /// to be able to unambiguously go back from a register class to a register /// bank. To distinguish whether an SGPR should use the SGPR or VCC register /// bank, we need to know the use context type. An SGPR s1 value always means a /// VCC bank value, otherwise it will be the SGPR bank. A scalar compare sets /// SCC, which is a 1-bit unaddressable register. This will need to be copied to /// a 32-bit virtual register. Taken together, this means we need to adjust the /// type of boolean operations to be regbank legal. All SALU booleans need to be /// widened to 32-bits, and all VALU booleans need to be s1 values. /// /// A noteworthy exception to the s1-means-vcc rule is for legalization artifact /// casts. G_TRUNC s1 results, and G_SEXT/G_ZEXT/G_ANYEXT sources are never vcc /// bank. A non-boolean source (such as a truncate from a 1-bit load from /// memory) will require a copy to the VCC bank which will require clearing the /// high bits and inserting a compare. /// /// \par Constant bus restriction /// /// VALU instructions have a limitation known as the constant bus /// restriction. Most VALU instructions can use SGPR operands, but may read at /// most 1 SGPR or constant literal value (this to 2 in gfx10 for most /// instructions). This is one unique SGPR, so the same SGPR may be used for /// multiple operands. From a register bank perspective, any combination of /// operands should be legal as an SGPR, but this is contextually dependent on /// the SGPR operands all being the same register. There is therefore optimal to /// choose the SGPR with the most uses to minimize the number of copies. /// /// We avoid trying to solve this problem in RegBankSelect. Any VALU G_* /// operation should have its source operands all mapped to VGPRs (except for /// VCC), inserting copies from any SGPR operands. This the most trival legal /// mapping. Anything beyond the simplest 1:1 instruction selection would be too /// complicated to solve here. Every optimization pattern or instruction /// selected to multiple outputs would have to enforce this rule, and there /// would be additional complexity in tracking this rule for every G_* /// operation. By forcing all inputs to VGPRs, it also simplifies the task of /// picking the optimal operand combination from a post-isel optimization pass. /// //===----------------------------------------------------------------------===// #include "AMDGPURegisterBankInfo.h" #include "AMDGPUGlobalISelUtils.h" #include "AMDGPUInstrInfo.h" #include "AMDGPUSubtarget.h" #include "MCTargetDesc/AMDGPUMCTargetDesc.h" #include "SIMachineFunctionInfo.h" #include "SIRegisterInfo.h" #include "llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h" #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" #include "llvm/CodeGen/GlobalISel/RegisterBank.h" #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h" #include "llvm/CodeGen/TargetRegisterInfo.h" #include "llvm/CodeGen/TargetSubtargetInfo.h" #include "llvm/IR/Constants.h" #define GET_TARGET_REGBANK_IMPL #include "AMDGPUGenRegisterBank.inc" // This file will be TableGen'ed at some point. #include "AMDGPUGenRegisterBankInfo.def" using namespace llvm; using namespace MIPatternMatch; namespace { // Observer to apply a register bank to new registers created by LegalizerHelper. class ApplyRegBankMapping final : public GISelChangeObserver { private: const AMDGPURegisterBankInfo &RBI; MachineRegisterInfo &MRI; const RegisterBank *NewBank; SmallVector<MachineInstr *, 4> NewInsts; public: ApplyRegBankMapping(const AMDGPURegisterBankInfo &RBI_, MachineRegisterInfo &MRI_, const RegisterBank *RB) : RBI(RBI_), MRI(MRI_), NewBank(RB) {} ~ApplyRegBankMapping() { for (MachineInstr *MI : NewInsts) applyBank(*MI); } /// Set any registers that don't have a set register class or bank to SALU. void applyBank(MachineInstr &MI) { const unsigned Opc = MI.getOpcode(); if (Opc == AMDGPU::G_ANYEXT || Opc == AMDGPU::G_ZEXT || Opc == AMDGPU::G_SEXT) { // LegalizerHelper wants to use the basic legalization artifacts when // widening etc. We don't handle selection with vcc in artifact sources, // so we need to use a sslect instead to handle these properly. Register DstReg = MI.getOperand(0).getReg(); Register SrcReg = MI.getOperand(1).getReg(); const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, MRI, *RBI.TRI); if (SrcBank == &AMDGPU::VCCRegBank) { const LLT S32 = LLT::scalar(32); assert(MRI.getType(SrcReg) == LLT::scalar(1)); assert(MRI.getType(DstReg) == S32); assert(NewBank == &AMDGPU::VGPRRegBank); // Replace the extension with a select, which really uses the boolean // source. MachineIRBuilder B(MI); auto True = B.buildConstant(S32, Opc == AMDGPU::G_SEXT ? -1 : 1); auto False = B.buildConstant(S32, 0); B.buildSelect(DstReg, SrcReg, True, False); MRI.setRegBank(True.getReg(0), *NewBank); MRI.setRegBank(False.getReg(0), *NewBank); MI.eraseFromParent(); } assert(!MRI.getRegClassOrRegBank(DstReg)); MRI.setRegBank(DstReg, *NewBank); return; } #ifndef NDEBUG if (Opc == AMDGPU::G_TRUNC) { Register DstReg = MI.getOperand(0).getReg(); const RegisterBank *DstBank = RBI.getRegBank(DstReg, MRI, *RBI.TRI); assert(DstBank != &AMDGPU::VCCRegBank); } #endif for (MachineOperand &Op : MI.operands()) { if (!Op.isReg()) continue; // We may see physical registers if building a real MI Register Reg = Op.getReg(); if (Reg.isPhysical() || MRI.getRegClassOrRegBank(Reg)) continue; const RegisterBank *RB = NewBank; if (MRI.getType(Reg) == LLT::scalar(1)) { assert(NewBank == &AMDGPU::VGPRRegBank && "s1 operands should only be used for vector bools"); assert((MI.getOpcode() != AMDGPU::G_TRUNC && MI.getOpcode() != AMDGPU::G_ANYEXT) && "not expecting legalization artifacts here"); RB = &AMDGPU::VCCRegBank; } MRI.setRegBank(Reg, *RB); } } void erasingInstr(MachineInstr &MI) override {} void createdInstr(MachineInstr &MI) override { // At this point, the instruction was just inserted and has no operands. NewInsts.push_back(&MI); } void changingInstr(MachineInstr &MI) override {} void changedInstr(MachineInstr &MI) override {} }; } AMDGPURegisterBankInfo::AMDGPURegisterBankInfo(const GCNSubtarget &ST) : AMDGPUGenRegisterBankInfo(), Subtarget(ST), TRI(Subtarget.getRegisterInfo()), TII(Subtarget.getInstrInfo()) { // HACK: Until this is fully tablegen'd. static llvm::once_flag InitializeRegisterBankFlag; static auto InitializeRegisterBankOnce = [this]() { assert(&getRegBank(AMDGPU::SGPRRegBankID) == &AMDGPU::SGPRRegBank && &getRegBank(AMDGPU::VGPRRegBankID) == &AMDGPU::VGPRRegBank && &getRegBank(AMDGPU::AGPRRegBankID) == &AMDGPU::AGPRRegBank); (void)this; }; llvm::call_once(InitializeRegisterBankFlag, InitializeRegisterBankOnce); } static bool isVectorRegisterBank(const RegisterBank &Bank) { unsigned BankID = Bank.getID(); return BankID == AMDGPU::VGPRRegBankID || BankID == AMDGPU::AGPRRegBankID; } unsigned AMDGPURegisterBankInfo::copyCost(const RegisterBank &Dst, const RegisterBank &Src, unsigned Size) const { // TODO: Should there be a UniformVGPRRegBank which can use readfirstlane? if (Dst.getID() == AMDGPU::SGPRRegBankID && (isVectorRegisterBank(Src) || Src.getID() == AMDGPU::VCCRegBankID)) { return std::numeric_limits<unsigned>::max(); } // Bool values are tricky, because the meaning is based on context. The SCC // and VCC banks are for the natural scalar and vector conditions produced by // a compare. // // Legalization doesn't know about the necessary context, so an s1 use may // have been a truncate from an arbitrary value, in which case a copy (lowered // as a compare with 0) needs to be inserted. if (Size == 1 && (Dst.getID() == AMDGPU::SGPRRegBankID) && (isVectorRegisterBank(Src) || Src.getID() == AMDGPU::SGPRRegBankID || Src.getID() == AMDGPU::VCCRegBankID)) return std::numeric_limits<unsigned>::max(); // There is no direct copy between AGPRs. if (Dst.getID() == AMDGPU::AGPRRegBankID && Src.getID() == AMDGPU::AGPRRegBankID) return 4; return RegisterBankInfo::copyCost(Dst, Src, Size); } unsigned AMDGPURegisterBankInfo::getBreakDownCost( const ValueMapping &ValMapping, const RegisterBank *CurBank) const { // Check if this is a breakdown for G_LOAD to move the pointer from SGPR to // VGPR. // FIXME: Is there a better way to do this? if (ValMapping.NumBreakDowns >= 2 || ValMapping.BreakDown[0].Length >= 64) return 10; // This is expensive. assert(ValMapping.NumBreakDowns == 2 && ValMapping.BreakDown[0].Length == 32 && ValMapping.BreakDown[0].StartIdx == 0 && ValMapping.BreakDown[1].Length == 32 && ValMapping.BreakDown[1].StartIdx == 32 && ValMapping.BreakDown[0].RegBank == ValMapping.BreakDown[1].RegBank); // 32-bit extract of a 64-bit value is just access of a subregister, so free. // TODO: Cost of 0 hits assert, though it's not clear it's what we really // want. // TODO: 32-bit insert to a 64-bit SGPR may incur a non-free copy due to SGPR // alignment restrictions, but this probably isn't important. return 1; } const RegisterBank & AMDGPURegisterBankInfo::getRegBankFromRegClass(const TargetRegisterClass &RC, LLT Ty) const { if (&RC == &AMDGPU::SReg_1RegClass) return AMDGPU::VCCRegBank; // We promote real scalar booleans to SReg_32. Any SGPR using s1 is really a // VCC-like use. if (TRI->isSGPRClass(&RC)) { // FIXME: This probably came from a copy from a physical register, which // should be inferrrable from the copied to-type. We don't have many boolean // physical register constraints so just assume a normal SGPR for now. if (!Ty.isValid()) return AMDGPU::SGPRRegBank; return Ty == LLT::scalar(1) ? AMDGPU::VCCRegBank : AMDGPU::SGPRRegBank; } return TRI->isAGPRClass(&RC) ? AMDGPU::AGPRRegBank : AMDGPU::VGPRRegBank; } template <unsigned NumOps> RegisterBankInfo::InstructionMappings AMDGPURegisterBankInfo::addMappingFromTable( const MachineInstr &MI, const MachineRegisterInfo &MRI, const std::array<unsigned, NumOps> RegSrcOpIdx, ArrayRef<OpRegBankEntry<NumOps>> Table) const { InstructionMappings AltMappings; SmallVector<const ValueMapping *, 10> Operands(MI.getNumOperands()); unsigned Sizes[NumOps]; for (unsigned I = 0; I < NumOps; ++I) { Register Reg = MI.getOperand(RegSrcOpIdx[I]).getReg(); Sizes[I] = getSizeInBits(Reg, MRI, *TRI); } for (unsigned I = 0, E = MI.getNumExplicitDefs(); I != E; ++I) { unsigned SizeI = getSizeInBits(MI.getOperand(I).getReg(), MRI, *TRI); Operands[I] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, SizeI); } // getInstrMapping's default mapping uses ID 1, so start at 2. unsigned MappingID = 2; for (const auto &Entry : Table) { for (unsigned I = 0; I < NumOps; ++I) { int OpIdx = RegSrcOpIdx[I]; Operands[OpIdx] = AMDGPU::getValueMapping(Entry.RegBanks[I], Sizes[I]); } AltMappings.push_back(&getInstructionMapping(MappingID++, Entry.Cost, getOperandsMapping(Operands), Operands.size())); } return AltMappings; } RegisterBankInfo::InstructionMappings AMDGPURegisterBankInfo::getInstrAlternativeMappingsIntrinsic( const MachineInstr &MI, const MachineRegisterInfo &MRI) const { switch (MI.getIntrinsicID()) { case Intrinsic::amdgcn_readlane: { static const OpRegBankEntry<3> Table[2] = { // Perfectly legal. { { AMDGPU::SGPRRegBankID, AMDGPU::VGPRRegBankID, AMDGPU::SGPRRegBankID }, 1 }, // Need a readfirstlane for the index. { { AMDGPU::SGPRRegBankID, AMDGPU::VGPRRegBankID, AMDGPU::VGPRRegBankID }, 2 } }; const std::array<unsigned, 3> RegSrcOpIdx = { { 0, 2, 3 } }; return addMappingFromTable<3>(MI, MRI, RegSrcOpIdx, makeArrayRef(Table)); } case Intrinsic::amdgcn_writelane: { static const OpRegBankEntry<4> Table[4] = { // Perfectly legal. { { AMDGPU::VGPRRegBankID, AMDGPU::SGPRRegBankID, AMDGPU::SGPRRegBankID, AMDGPU::VGPRRegBankID }, 1 }, // Need readfirstlane of first op { { AMDGPU::VGPRRegBankID, AMDGPU::VGPRRegBankID, AMDGPU::SGPRRegBankID, AMDGPU::VGPRRegBankID }, 2 }, // Need readfirstlane of second op { { AMDGPU::VGPRRegBankID, AMDGPU::SGPRRegBankID, AMDGPU::VGPRRegBankID, AMDGPU::VGPRRegBankID }, 2 }, // Need readfirstlane of both ops { { AMDGPU::VGPRRegBankID, AMDGPU::VGPRRegBankID, AMDGPU::VGPRRegBankID, AMDGPU::VGPRRegBankID }, 3 } }; // rsrc, voffset, offset const std::array<unsigned, 4> RegSrcOpIdx = { { 0, 2, 3, 4 } }; return addMappingFromTable<4>(MI, MRI, RegSrcOpIdx, makeArrayRef(Table)); } default: return RegisterBankInfo::getInstrAlternativeMappings(MI); } } RegisterBankInfo::InstructionMappings AMDGPURegisterBankInfo::getInstrAlternativeMappingsIntrinsicWSideEffects( const MachineInstr &MI, const MachineRegisterInfo &MRI) const { switch (MI.getIntrinsicID()) { case Intrinsic::amdgcn_s_buffer_load: { static const OpRegBankEntry<2> Table[4] = { // Perfectly legal. { { AMDGPU::SGPRRegBankID, AMDGPU::SGPRRegBankID }, 1 }, // Only need 1 register in loop { { AMDGPU::SGPRRegBankID, AMDGPU::VGPRRegBankID }, 300 }, // Have to waterfall the resource. { { AMDGPU::VGPRRegBankID, AMDGPU::SGPRRegBankID }, 1000 }, // Have to waterfall the resource, and the offset. { { AMDGPU::VGPRRegBankID, AMDGPU::VGPRRegBankID }, 1500 } }; // rsrc, offset const std::array<unsigned, 2> RegSrcOpIdx = { { 2, 3 } }; return addMappingFromTable<2>(MI, MRI, RegSrcOpIdx, makeArrayRef(Table)); } case Intrinsic::amdgcn_ds_ordered_add: case Intrinsic::amdgcn_ds_ordered_swap: { // VGPR = M0, VGPR static const OpRegBankEntry<3> Table[2] = { // Perfectly legal. { { AMDGPU::VGPRRegBankID, AMDGPU::SGPRRegBankID, AMDGPU::VGPRRegBankID }, 1 }, // Need a readfirstlane for m0 { { AMDGPU::VGPRRegBankID, AMDGPU::VGPRRegBankID, AMDGPU::VGPRRegBankID }, 2 } }; const std::array<unsigned, 3> RegSrcOpIdx = { { 0, 2, 3 } }; return addMappingFromTable<3>(MI, MRI, RegSrcOpIdx, makeArrayRef(Table)); } case Intrinsic::amdgcn_s_sendmsg: case Intrinsic::amdgcn_s_sendmsghalt: { // FIXME: Should have no register for immediate static const OpRegBankEntry<1> Table[2] = { // Perfectly legal. { { AMDGPU::SGPRRegBankID }, 1 }, // Need readlane { { AMDGPU::VGPRRegBankID }, 3 } }; const std::array<unsigned, 1> RegSrcOpIdx = { { 2 } }; return addMappingFromTable<1>(MI, MRI, RegSrcOpIdx, makeArrayRef(Table)); } default: return RegisterBankInfo::getInstrAlternativeMappings(MI); } } static bool memOpHasNoClobbered(const MachineMemOperand *MMO) { const Instruction *I = dyn_cast_or_null<Instruction>(MMO->getValue()); return I && I->getMetadata("amdgpu.noclobber"); } // FIXME: Returns uniform if there's no source value information. This is // probably wrong. static bool isScalarLoadLegal(const MachineInstr &MI) { if (!MI.hasOneMemOperand()) return false; const MachineMemOperand *MMO = *MI.memoperands_begin(); const unsigned AS = MMO->getAddrSpace(); const bool IsConst = AS == AMDGPUAS::CONSTANT_ADDRESS || AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT; // There are no extending SMRD/SMEM loads, and they require 4-byte alignment. return MMO->getSize() >= 4 && MMO->getAlign() >= Align(4) && // Can't do a scalar atomic load. !MMO->isAtomic() && // Don't use scalar loads for volatile accesses to non-constant address // spaces. (IsConst || !MMO->isVolatile()) && // Memory must be known constant, or not written before this load. (IsConst || MMO->isInvariant() || memOpHasNoClobbered(MMO)) && AMDGPUInstrInfo::isUniformMMO(MMO); } RegisterBankInfo::InstructionMappings AMDGPURegisterBankInfo::getInstrAlternativeMappings( const MachineInstr &MI) const { const MachineFunction &MF = *MI.getParent()->getParent(); const MachineRegisterInfo &MRI = MF.getRegInfo(); InstructionMappings AltMappings; switch (MI.getOpcode()) { case TargetOpcode::G_CONSTANT: { unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI); if (Size == 1) { static const OpRegBankEntry<1> Table[3] = { { { AMDGPU::VGPRRegBankID }, 1 }, { { AMDGPU::SGPRRegBankID }, 1 }, { { AMDGPU::VCCRegBankID }, 1 } }; return addMappingFromTable<1>(MI, MRI, {{ 0 }}, Table); } LLVM_FALLTHROUGH; } case TargetOpcode::G_FCONSTANT: case TargetOpcode::G_FRAME_INDEX: case TargetOpcode::G_GLOBAL_VALUE: { static const OpRegBankEntry<1> Table[2] = { { { AMDGPU::VGPRRegBankID }, 1 }, { { AMDGPU::SGPRRegBankID }, 1 } }; return addMappingFromTable<1>(MI, MRI, {{ 0 }}, Table); } case TargetOpcode::G_AND: case TargetOpcode::G_OR: case TargetOpcode::G_XOR: { unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI); if (Size == 1) { // s_{and|or|xor}_b32 set scc when the result of the 32-bit op is not 0. const InstructionMapping &SCCMapping = getInstructionMapping( 1, 1, getOperandsMapping( {AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 32), AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 32), AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 32)}), 3); // Num Operands AltMappings.push_back(&SCCMapping); const InstructionMapping &VCCMapping0 = getInstructionMapping( 2, 1, getOperandsMapping( {AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, Size), AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, Size), AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, Size)}), 3); // Num Operands AltMappings.push_back(&VCCMapping0); return AltMappings; } if (Size != 64) break; const InstructionMapping &SSMapping = getInstructionMapping( 1, 1, getOperandsMapping( {AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size), AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size), AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size)}), 3); // Num Operands AltMappings.push_back(&SSMapping); const InstructionMapping &VVMapping = getInstructionMapping( 2, 2, getOperandsMapping( {AMDGPU::getValueMappingSGPR64Only(AMDGPU::VGPRRegBankID, Size), AMDGPU::getValueMappingSGPR64Only(AMDGPU::VGPRRegBankID, Size), AMDGPU::getValueMappingSGPR64Only(AMDGPU::VGPRRegBankID, Size)}), 3); // Num Operands AltMappings.push_back(&VVMapping); break; } case TargetOpcode::G_LOAD: case TargetOpcode::G_ZEXTLOAD: case TargetOpcode::G_SEXTLOAD: { unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI); LLT PtrTy = MRI.getType(MI.getOperand(1).getReg()); unsigned PtrSize = PtrTy.getSizeInBits(); unsigned AS = PtrTy.getAddressSpace(); if ((AS != AMDGPUAS::LOCAL_ADDRESS && AS != AMDGPUAS::REGION_ADDRESS && AS != AMDGPUAS::PRIVATE_ADDRESS) && isScalarLoadLegal(MI)) { const InstructionMapping &SSMapping = getInstructionMapping( 1, 1, getOperandsMapping( {AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size), AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, PtrSize)}), 2); // Num Operands AltMappings.push_back(&SSMapping); } const InstructionMapping &VVMapping = getInstructionMapping( 2, 1, getOperandsMapping( {AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size), AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, PtrSize)}), 2); // Num Operands AltMappings.push_back(&VVMapping); // It may be possible to have a vgpr = load sgpr mapping here, because // the mubuf instructions support this kind of load, but probably for only // gfx7 and older. However, the addressing mode matching in the instruction // selector should be able to do a better job of detecting and selecting // these kinds of loads from the vgpr = load vgpr mapping. return AltMappings; } case TargetOpcode::G_SELECT: { unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI); const InstructionMapping &SSMapping = getInstructionMapping(1, 1, getOperandsMapping({AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size), AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 1), AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size), AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size)}), 4); // Num Operands AltMappings.push_back(&SSMapping); const InstructionMapping &VVMapping = getInstructionMapping(2, 1, getOperandsMapping({AMDGPU::getValueMappingSGPR64Only(AMDGPU::VGPRRegBankID, Size), AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1), AMDGPU::getValueMappingSGPR64Only(AMDGPU::VGPRRegBankID, Size), AMDGPU::getValueMappingSGPR64Only(AMDGPU::VGPRRegBankID, Size)}), 4); // Num Operands AltMappings.push_back(&VVMapping); return AltMappings; } case TargetOpcode::G_SMIN: case TargetOpcode::G_SMAX: case TargetOpcode::G_UMIN: case TargetOpcode::G_UMAX: { static const OpRegBankEntry<3> Table[2] = { { { AMDGPU::VGPRRegBankID, AMDGPU::VGPRRegBankID, AMDGPU::VGPRRegBankID }, 1 }, // Scalar requires cmp+select, and extends if 16-bit. // FIXME: Should there be separate costs for 32 and 16-bit { { AMDGPU::SGPRRegBankID, AMDGPU::SGPRRegBankID, AMDGPU::SGPRRegBankID }, 3 } }; const std::array<unsigned, 3> RegSrcOpIdx = { { 0, 1, 2 } }; return addMappingFromTable<3>(MI, MRI, RegSrcOpIdx, makeArrayRef(Table)); } case TargetOpcode::G_UADDE: case TargetOpcode::G_USUBE: case TargetOpcode::G_SADDE: case TargetOpcode::G_SSUBE: { unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI); const InstructionMapping &SSMapping = getInstructionMapping(1, 1, getOperandsMapping( {AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size), AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 1), AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size), AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size), AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 1)}), 5); // Num Operands AltMappings.push_back(&SSMapping); const InstructionMapping &VVMapping = getInstructionMapping(2, 1, getOperandsMapping({AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size), AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1), AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size), AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size), AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1)}), 5); // Num Operands AltMappings.push_back(&VVMapping); return AltMappings; } case AMDGPU::G_BRCOND: { assert(MRI.getType(MI.getOperand(0).getReg()).getSizeInBits() == 1); // TODO: Change type to 32 for scalar const InstructionMapping &SMapping = getInstructionMapping( 1, 1, getOperandsMapping( {AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 1), nullptr}), 2); // Num Operands AltMappings.push_back(&SMapping); const InstructionMapping &VMapping = getInstructionMapping( 1, 1, getOperandsMapping( {AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1), nullptr }), 2); // Num Operands AltMappings.push_back(&VMapping); return AltMappings; } case AMDGPU::G_INTRINSIC: return getInstrAlternativeMappingsIntrinsic(MI, MRI); case AMDGPU::G_INTRINSIC_W_SIDE_EFFECTS: return getInstrAlternativeMappingsIntrinsicWSideEffects(MI, MRI); default: break; } return RegisterBankInfo::getInstrAlternativeMappings(MI); } void AMDGPURegisterBankInfo::split64BitValueForMapping( MachineIRBuilder &B, SmallVector<Register, 2> &Regs, LLT HalfTy, Register Reg) const { assert(HalfTy.getSizeInBits() == 32); MachineRegisterInfo *MRI = B.getMRI(); Register LoLHS = MRI->createGenericVirtualRegister(HalfTy); Register HiLHS = MRI->createGenericVirtualRegister(HalfTy); const RegisterBank *Bank = getRegBank(Reg, *MRI, *TRI); MRI->setRegBank(LoLHS, *Bank); MRI->setRegBank(HiLHS, *Bank); Regs.push_back(LoLHS); Regs.push_back(HiLHS); B.buildInstr(AMDGPU::G_UNMERGE_VALUES) .addDef(LoLHS) .addDef(HiLHS) .addUse(Reg); } /// Replace the current type each register in \p Regs has with \p NewTy static void setRegsToType(MachineRegisterInfo &MRI, ArrayRef<Register> Regs, LLT NewTy) { for (Register Reg : Regs) { assert(MRI.getType(Reg).getSizeInBits() == NewTy.getSizeInBits()); MRI.setType(Reg, NewTy); } } static LLT getHalfSizedType(LLT Ty) { if (Ty.isVector()) { assert(Ty.getNumElements() % 2 == 0); return LLT::scalarOrVector(Ty.getNumElements() / 2, Ty.getElementType()); } assert(Ty.getSizeInBits() % 2 == 0); return LLT::scalar(Ty.getSizeInBits() / 2); } /// Legalize instruction \p MI where operands in \p OpIndices must be SGPRs. If /// any of the required SGPR operands are VGPRs, perform a waterfall loop to /// execute the instruction for each unique combination of values in all lanes /// in the wave. The block will be split such that rest of the instructions are /// moved to a new block. /// /// Essentially performs this loop: // /// Save Execution Mask /// For (Lane : Wavefront) { /// Enable Lane, Disable all other lanes /// SGPR = read SGPR value for current lane from VGPR /// VGPRResult[Lane] = use_op SGPR /// } /// Restore Execution Mask /// /// There is additional complexity to try for compare values to identify the /// unique values used. bool AMDGPURegisterBankInfo::executeInWaterfallLoop( MachineIRBuilder &B, iterator_range<MachineBasicBlock::iterator> Range, SmallSet<Register, 4> &SGPROperandRegs, MachineRegisterInfo &MRI) const { SmallVector<Register, 4> ResultRegs; SmallVector<Register, 4> InitResultRegs; SmallVector<Register, 4> PhiRegs; // Track use registers which have already been expanded with a readfirstlane // sequence. This may have multiple uses if moving a sequence. DenseMap<Register, Register> WaterfalledRegMap; MachineBasicBlock &MBB = B.getMBB(); MachineFunction *MF = &B.getMF(); const TargetRegisterClass *WaveRC = TRI->getWaveMaskRegClass(); const unsigned WaveAndOpc = Subtarget.isWave32() ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64; const unsigned MovTermOpc = Subtarget.isWave32() ? AMDGPU::S_MOV_B32_term : AMDGPU::S_MOV_B64_term; const unsigned XorTermOpc = Subtarget.isWave32() ? AMDGPU::S_XOR_B32_term : AMDGPU::S_XOR_B64_term; const unsigned AndSaveExecOpc = Subtarget.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32 : AMDGPU::S_AND_SAVEEXEC_B64; const unsigned ExecReg = Subtarget.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; #ifndef NDEBUG const int OrigRangeSize = std::distance(Range.begin(), Range.end()); #endif for (MachineInstr &MI : Range) { for (MachineOperand &Def : MI.defs()) { LLT ResTy = MRI.getType(Def.getReg()); const RegisterBank *DefBank = getRegBank(Def.getReg(), MRI, *TRI); ResultRegs.push_back(Def.getReg()); Register InitReg = B.buildUndef(ResTy).getReg(0); Register PhiReg = MRI.createGenericVirtualRegister(ResTy); InitResultRegs.push_back(InitReg); PhiRegs.push_back(PhiReg); MRI.setRegBank(PhiReg, *DefBank); MRI.setRegBank(InitReg, *DefBank); } } Register SaveExecReg = MRI.createVirtualRegister(WaveRC); Register InitSaveExecReg = MRI.createVirtualRegister(WaveRC); // Don't bother using generic instructions/registers for the exec mask. B.buildInstr(TargetOpcode::IMPLICIT_DEF) .addDef(InitSaveExecReg); Register PhiExec = MRI.createVirtualRegister(WaveRC); Register NewExec = MRI.createVirtualRegister(WaveRC); // To insert the loop we need to split the block. Move everything before this // point to a new block, and insert a new empty block before this instruction. MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock(); MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock(); MachineBasicBlock *RestoreExecBB = MF->CreateMachineBasicBlock(); MachineFunction::iterator MBBI(MBB); ++MBBI; MF->insert(MBBI, LoopBB); MF->insert(MBBI, RestoreExecBB); MF->insert(MBBI, RemainderBB); LoopBB->addSuccessor(RestoreExecBB); LoopBB->addSuccessor(LoopBB); // Move the rest of the block into a new block. RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); RemainderBB->splice(RemainderBB->begin(), &MBB, Range.end(), MBB.end()); MBB.addSuccessor(LoopBB); RestoreExecBB->addSuccessor(RemainderBB); B.setInsertPt(*LoopBB, LoopBB->end()); B.buildInstr(TargetOpcode::PHI) .addDef(PhiExec) .addReg(InitSaveExecReg) .addMBB(&MBB) .addReg(NewExec) .addMBB(LoopBB); for (auto Result : zip(InitResultRegs, ResultRegs, PhiRegs)) { B.buildInstr(TargetOpcode::G_PHI) .addDef(std::get<2>(Result)) .addReg(std::get<0>(Result)) // Initial value / implicit_def .addMBB(&MBB) .addReg(std::get<1>(Result)) // Mid-loop value. .addMBB(LoopBB); } const DebugLoc &DL = B.getDL(); MachineInstr &FirstInst = *Range.begin(); // Move the instruction into the loop. Note we moved everything after // Range.end() already into a new block, so Range.end() is no longer valid. LoopBB->splice(LoopBB->end(), &MBB, Range.begin(), MBB.end()); // Figure out the iterator range after splicing the instructions. MachineBasicBlock::iterator NewBegin = FirstInst.getIterator(); auto NewEnd = LoopBB->end(); MachineBasicBlock::iterator I = Range.begin(); B.setInsertPt(*LoopBB, I); Register CondReg; assert(std::distance(NewBegin, NewEnd) == OrigRangeSize); for (MachineInstr &MI : make_range(NewBegin, NewEnd)) { for (MachineOperand &Op : MI.uses()) { if (!Op.isReg() || Op.isDef()) continue; Register OldReg = Op.getReg(); if (!SGPROperandRegs.count(OldReg)) continue; // See if we already processed this register in another instruction in the // sequence. auto OldVal = WaterfalledRegMap.find(OldReg); if (OldVal != WaterfalledRegMap.end()) { Op.setReg(OldVal->second); continue; } LLT OpTy = MRI.getType(Op.getReg()); unsigned OpSize = OpTy.getSizeInBits(); // Can only do a readlane of 32-bit pieces. if (OpSize == 32) { // Avoid extra copies in the simple case of one 32-bit register. Register CurrentLaneOpReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); MRI.setType(CurrentLaneOpReg, OpTy); constrainGenericRegister(Op.getReg(), AMDGPU::VGPR_32RegClass, MRI); // Read the next variant <- also loop target. BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentLaneOpReg) .addReg(Op.getReg()); Register NewCondReg = MRI.createVirtualRegister(WaveRC); bool First = CondReg == AMDGPU::NoRegister; if (First) CondReg = NewCondReg; // Compare the just read M0 value to all possible Idx values. B.buildInstr(AMDGPU::V_CMP_EQ_U32_e64) .addDef(NewCondReg) .addReg(CurrentLaneOpReg) .addReg(Op.getReg()); Op.setReg(CurrentLaneOpReg); if (!First) { Register AndReg = MRI.createVirtualRegister(WaveRC); // If there are multiple operands to consider, and the conditions. B.buildInstr(WaveAndOpc) .addDef(AndReg) .addReg(NewCondReg) .addReg(CondReg); CondReg = AndReg; } } else { LLT S32 = LLT::scalar(32); SmallVector<Register, 8> ReadlanePieces; // The compares can be done as 64-bit, but the extract needs to be done // in 32-bit pieces. bool Is64 = OpSize % 64 == 0; LLT UnmergeTy = OpSize % 64 == 0 ? LLT::scalar(64) : LLT::scalar(32); unsigned CmpOp = OpSize % 64 == 0 ? AMDGPU::V_CMP_EQ_U64_e64 : AMDGPU::V_CMP_EQ_U32_e64; // The compares can be done as 64-bit, but the extract needs to be done // in 32-bit pieces. // Insert the unmerge before the loop. B.setMBB(MBB); auto Unmerge = B.buildUnmerge(UnmergeTy, Op.getReg()); B.setInstr(*I); unsigned NumPieces = Unmerge->getNumOperands() - 1; for (unsigned PieceIdx = 0; PieceIdx != NumPieces; ++PieceIdx) { Register UnmergePiece = Unmerge.getReg(PieceIdx); Register CurrentLaneOpReg; if (Is64) { Register CurrentLaneOpRegLo = MRI.createGenericVirtualRegister(S32); Register CurrentLaneOpRegHi = MRI.createGenericVirtualRegister(S32); MRI.setRegClass(UnmergePiece, &AMDGPU::VReg_64RegClass); MRI.setRegClass(CurrentLaneOpRegLo, &AMDGPU::SReg_32_XM0RegClass); MRI.setRegClass(CurrentLaneOpRegHi, &AMDGPU::SReg_32_XM0RegClass); // Read the next variant <- also loop target. BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentLaneOpRegLo) .addReg(UnmergePiece, 0, AMDGPU::sub0); // Read the next variant <- also loop target. BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentLaneOpRegHi) .addReg(UnmergePiece, 0, AMDGPU::sub1); CurrentLaneOpReg = B.buildMerge(LLT::scalar(64), {CurrentLaneOpRegLo, CurrentLaneOpRegHi}) .getReg(0); MRI.setRegClass(CurrentLaneOpReg, &AMDGPU::SReg_64_XEXECRegClass); if (OpTy.getScalarSizeInBits() == 64) { // If we need to produce a 64-bit element vector, so use the // merged pieces ReadlanePieces.push_back(CurrentLaneOpReg); } else { // 32-bit element type. ReadlanePieces.push_back(CurrentLaneOpRegLo); ReadlanePieces.push_back(CurrentLaneOpRegHi); } } else { CurrentLaneOpReg = MRI.createGenericVirtualRegister(S32); MRI.setRegClass(UnmergePiece, &AMDGPU::VGPR_32RegClass); MRI.setRegClass(CurrentLaneOpReg, &AMDGPU::SReg_32_XM0RegClass); // Read the next variant <- also loop target. BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentLaneOpReg) .addReg(UnmergePiece); ReadlanePieces.push_back(CurrentLaneOpReg); } Register NewCondReg = MRI.createVirtualRegister(WaveRC); bool First = CondReg == AMDGPU::NoRegister; if (First) CondReg = NewCondReg; B.buildInstr(CmpOp) .addDef(NewCondReg) .addReg(CurrentLaneOpReg) .addReg(UnmergePiece); if (!First) { Register AndReg = MRI.createVirtualRegister(WaveRC); // If there are multiple operands to consider, and the conditions. B.buildInstr(WaveAndOpc) .addDef(AndReg) .addReg(NewCondReg) .addReg(CondReg); CondReg = AndReg; } } // FIXME: Build merge seems to switch to CONCAT_VECTORS but not // BUILD_VECTOR if (OpTy.isVector()) { auto Merge = B.buildBuildVector(OpTy, ReadlanePieces); Op.setReg(Merge.getReg(0)); } else { auto Merge = B.buildMerge(OpTy, ReadlanePieces); Op.setReg(Merge.getReg(0)); } MRI.setRegBank(Op.getReg(), AMDGPU::SGPRRegBank); } // Make sure we don't re-process this register again. WaterfalledRegMap.insert(std::make_pair(OldReg, Op.getReg())); } } B.setInsertPt(*LoopBB, LoopBB->end()); // Update EXEC, save the original EXEC value to VCC. B.buildInstr(AndSaveExecOpc) .addDef(NewExec) .addReg(CondReg, RegState::Kill); MRI.setSimpleHint(NewExec, CondReg); // Update EXEC, switch all done bits to 0 and all todo bits to 1. B.buildInstr(XorTermOpc) .addDef(ExecReg) .addReg(ExecReg) .addReg(NewExec); // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use // s_cbranch_scc0? // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover. B.buildInstr(AMDGPU::S_CBRANCH_EXECNZ) .addMBB(LoopBB); // Save the EXEC mask before the loop. BuildMI(MBB, MBB.end(), DL, TII->get(MovTermOpc), SaveExecReg) .addReg(ExecReg); // Restore the EXEC mask after the loop. B.setMBB(*RestoreExecBB); B.buildInstr(MovTermOpc) .addDef(ExecReg) .addReg(SaveExecReg); // Set the insert point after the original instruction, so any new // instructions will be in the remainder. B.setInsertPt(*RemainderBB, RemainderBB->begin()); return true; } // Return any unique registers used by \p MI at \p OpIndices that need to be // handled in a waterfall loop. Returns these registers in \p // SGPROperandRegs. Returns true if there are any operansd to handle and a // waterfall loop is necessary. bool AMDGPURegisterBankInfo::collectWaterfallOperands( SmallSet<Register, 4> &SGPROperandRegs, MachineInstr &MI, MachineRegisterInfo &MRI, ArrayRef<unsigned> OpIndices) const { for (unsigned Op : OpIndices) { assert(MI.getOperand(Op).isUse()); Register Reg = MI.getOperand(Op).getReg(); const RegisterBank *OpBank = getRegBank(Reg, MRI, *TRI); if (OpBank->getID() == AMDGPU::VGPRRegBankID) SGPROperandRegs.insert(Reg); } // No operands need to be replaced, so no need to loop. return !SGPROperandRegs.empty(); } bool AMDGPURegisterBankInfo::executeInWaterfallLoop( MachineIRBuilder &B, MachineInstr &MI, MachineRegisterInfo &MRI, ArrayRef<unsigned> OpIndices) const { // Use a set to avoid extra readfirstlanes in the case where multiple operands // are the same register. SmallSet<Register, 4> SGPROperandRegs; if (!collectWaterfallOperands(SGPROperandRegs, MI, MRI, OpIndices)) return false; MachineBasicBlock::iterator I = MI.getIterator(); return executeInWaterfallLoop(B, make_range(I, std::next(I)), SGPROperandRegs, MRI); } bool AMDGPURegisterBankInfo::executeInWaterfallLoop( MachineInstr &MI, MachineRegisterInfo &MRI, ArrayRef<unsigned> OpIndices) const { MachineIRBuilder B(MI); return executeInWaterfallLoop(B, MI, MRI, OpIndices); } // Legalize an operand that must be an SGPR by inserting a readfirstlane. void AMDGPURegisterBankInfo::constrainOpWithReadfirstlane( MachineInstr &MI, MachineRegisterInfo &MRI, unsigned OpIdx) const { Register Reg = MI.getOperand(OpIdx).getReg(); const RegisterBank *Bank = getRegBank(Reg, MRI, *TRI); if (Bank != &AMDGPU::VGPRRegBank) return; MachineIRBuilder B(MI); Register SGPR = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); B.buildInstr(AMDGPU::V_READFIRSTLANE_B32) .addDef(SGPR) .addReg(Reg); MRI.setType(SGPR, MRI.getType(Reg)); const TargetRegisterClass *Constrained = constrainGenericRegister(Reg, AMDGPU::VGPR_32RegClass, MRI); (void)Constrained; assert(Constrained && "Failed to constrain readfirstlane src reg"); MI.getOperand(OpIdx).setReg(SGPR); } /// Split \p Ty into 2 pieces. The first will have \p FirstSize bits, and the /// rest will be in the remainder. static std::pair<LLT, LLT> splitUnequalType(LLT Ty, unsigned FirstSize) { unsigned TotalSize = Ty.getSizeInBits(); if (!Ty.isVector()) return {LLT::scalar(FirstSize), LLT::scalar(TotalSize - FirstSize)}; LLT EltTy = Ty.getElementType(); unsigned EltSize = EltTy.getSizeInBits(); assert(FirstSize % EltSize == 0); unsigned FirstPartNumElts = FirstSize / EltSize; unsigned RemainderElts = (TotalSize - FirstSize) / EltSize; return {LLT::scalarOrVector(FirstPartNumElts, EltTy), LLT::scalarOrVector(RemainderElts, EltTy)}; } static LLT widen96To128(LLT Ty) { if (!Ty.isVector()) return LLT::scalar(128); LLT EltTy = Ty.getElementType(); assert(128 % EltTy.getSizeInBits() == 0); return LLT::vector(128 / EltTy.getSizeInBits(), EltTy); } bool AMDGPURegisterBankInfo::applyMappingLoad(MachineInstr &MI, const AMDGPURegisterBankInfo::OperandsMapper &OpdMapper, MachineRegisterInfo &MRI) const { Register DstReg = MI.getOperand(0).getReg(); const LLT LoadTy = MRI.getType(DstReg); unsigned LoadSize = LoadTy.getSizeInBits(); const unsigned MaxNonSmrdLoadSize = 128; const RegisterBank *PtrBank = OpdMapper.getInstrMapping().getOperandMapping(1).BreakDown[0].RegBank; if (PtrBank == &AMDGPU::SGPRRegBank) { // If the pointer is an SGPR, we ordinarily have nothing to do. if (LoadSize != 96) return false; MachineMemOperand *MMO = *MI.memoperands_begin(); Register PtrReg = MI.getOperand(1).getReg(); // 96-bit loads are only available for vector loads. We need to split this // into a 64-bit part, and 32 (unless we can widen to a 128-bit load). MachineIRBuilder B(MI); ApplyRegBankMapping O(*this, MRI, &AMDGPU::SGPRRegBank); GISelObserverWrapper Observer(&O); B.setChangeObserver(Observer); if (MMO->getAlign() < Align(16)) { LLT Part64, Part32; std::tie(Part64, Part32) = splitUnequalType(LoadTy, 64); auto Load0 = B.buildLoadFromOffset(Part64, PtrReg, *MMO, 0); auto Load1 = B.buildLoadFromOffset(Part32, PtrReg, *MMO, 8); auto Undef = B.buildUndef(LoadTy); auto Ins0 = B.buildInsert(LoadTy, Undef, Load0, 0); B.buildInsert(MI.getOperand(0), Ins0, Load1, 64); } else { LLT WiderTy = widen96To128(LoadTy); auto WideLoad = B.buildLoadFromOffset(WiderTy, PtrReg, *MMO, 0); B.buildExtract(MI.getOperand(0), WideLoad, 0); } MI.eraseFromParent(); return true; } // 128-bit loads are supported for all instruction types. if (LoadSize <= MaxNonSmrdLoadSize) return false; SmallVector<Register, 16> DefRegs(OpdMapper.getVRegs(0)); SmallVector<Register, 1> SrcRegs(OpdMapper.getVRegs(1)); if (SrcRegs.empty()) SrcRegs.push_back(MI.getOperand(1).getReg()); assert(LoadSize % MaxNonSmrdLoadSize == 0); // RegBankSelect only emits scalar types, so we need to reset the pointer // operand to a pointer type. Register BasePtrReg = SrcRegs[0]; LLT PtrTy = MRI.getType(MI.getOperand(1).getReg()); MRI.setType(BasePtrReg, PtrTy); MachineIRBuilder B(MI); unsigned NumSplitParts = LoadTy.getSizeInBits() / MaxNonSmrdLoadSize; const LLT LoadSplitTy = LoadTy.divide(NumSplitParts); ApplyRegBankMapping O(*this, MRI, &AMDGPU::VGPRRegBank); GISelObserverWrapper Observer(&O); B.setChangeObserver(Observer); LegalizerHelper Helper(B.getMF(), Observer, B); if (LoadTy.isVector()) { if (Helper.fewerElementsVector(MI, 0, LoadSplitTy) != LegalizerHelper::Legalized) return false; } else { if (Helper.narrowScalar(MI, 0, LoadSplitTy) != LegalizerHelper::Legalized) return false; } MRI.setRegBank(DstReg, AMDGPU::VGPRRegBank); return true; } bool AMDGPURegisterBankInfo::applyMappingDynStackAlloc( MachineInstr &MI, const AMDGPURegisterBankInfo::OperandsMapper &OpdMapper, MachineRegisterInfo &MRI) const { const MachineFunction &MF = *MI.getMF(); const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); const auto &TFI = *ST.getFrameLowering(); // Guard in case the stack growth direction ever changes with scratch // instructions. if (TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown) return false; Register Dst = MI.getOperand(0).getReg(); Register AllocSize = MI.getOperand(1).getReg(); Align Alignment = assumeAligned(MI.getOperand(2).getImm()); const RegisterBank *SizeBank = getRegBank(AllocSize, MRI, *TRI); // TODO: Need to emit a wave reduction to get the maximum size. if (SizeBank != &AMDGPU::SGPRRegBank) return false; LLT PtrTy = MRI.getType(Dst); LLT IntPtrTy = LLT::scalar(PtrTy.getSizeInBits()); const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); Register SPReg = Info->getStackPtrOffsetReg(); ApplyRegBankMapping ApplyBank(*this, MRI, &AMDGPU::SGPRRegBank); GISelObserverWrapper Observer(&ApplyBank); MachineIRBuilder B(MI); B.setChangeObserver(Observer); auto WaveSize = B.buildConstant(LLT::scalar(32), ST.getWavefrontSizeLog2()); auto ScaledSize = B.buildShl(IntPtrTy, AllocSize, WaveSize); auto SPCopy = B.buildCopy(PtrTy, SPReg); if (Alignment > TFI.getStackAlign()) { auto PtrAdd = B.buildPtrAdd(PtrTy, SPCopy, ScaledSize); B.buildMaskLowPtrBits(Dst, PtrAdd, Log2(Alignment) + ST.getWavefrontSizeLog2()); } else { B.buildPtrAdd(Dst, SPCopy, ScaledSize); } MI.eraseFromParent(); return true; } bool AMDGPURegisterBankInfo::applyMappingImage( MachineInstr &MI, const AMDGPURegisterBankInfo::OperandsMapper &OpdMapper, MachineRegisterInfo &MRI, int RsrcIdx) const { const int NumDefs = MI.getNumExplicitDefs(); // The reported argument index is relative to the IR intrinsic call arguments, // so we need to shift by the number of defs and the intrinsic ID. RsrcIdx += NumDefs + 1; // Insert copies to VGPR arguments. applyDefaultMapping(OpdMapper); // Fixup any SGPR arguments. SmallVector<unsigned, 4> SGPRIndexes; for (int I = NumDefs, NumOps = MI.getNumOperands(); I != NumOps; ++I) { if (!MI.getOperand(I).isReg()) continue; // If this intrinsic has a sampler, it immediately follows rsrc. if (I == RsrcIdx || I == RsrcIdx + 1) SGPRIndexes.push_back(I); } executeInWaterfallLoop(MI, MRI, SGPRIndexes); return true; } static Register getSrcRegIgnoringCopies(const MachineRegisterInfo &MRI, Register Reg) { MachineInstr *Def = getDefIgnoringCopies(Reg, MRI); if (!Def) return Reg; // TODO: Guard against this being an implicit def return Def->getOperand(0).getReg(); } // Analyze a combined offset from an llvm.amdgcn.s.buffer intrinsic and store // the three offsets (voffset, soffset and instoffset) static unsigned setBufferOffsets(MachineIRBuilder &B, const AMDGPURegisterBankInfo &RBI, Register CombinedOffset, Register &VOffsetReg, Register &SOffsetReg, int64_t &InstOffsetVal, Align Alignment) { const LLT S32 = LLT::scalar(32); MachineRegisterInfo *MRI = B.getMRI(); if (Optional<int64_t> Imm = getConstantVRegVal(CombinedOffset, *MRI)) { uint32_t SOffset, ImmOffset; if (AMDGPU::splitMUBUFOffset(*Imm, SOffset, ImmOffset, &RBI.Subtarget, Alignment)) { VOffsetReg = B.buildConstant(S32, 0).getReg(0); SOffsetReg = B.buildConstant(S32, SOffset).getReg(0); InstOffsetVal = ImmOffset; B.getMRI()->setRegBank(VOffsetReg, AMDGPU::VGPRRegBank); B.getMRI()->setRegBank(SOffsetReg, AMDGPU::SGPRRegBank); return SOffset + ImmOffset; } } Register Base; unsigned Offset; MachineInstr *Unused; std::tie(Base, Offset, Unused) = AMDGPU::getBaseWithConstantOffset(*MRI, CombinedOffset); uint32_t SOffset, ImmOffset; if (Offset > 0 && AMDGPU::splitMUBUFOffset(Offset, SOffset, ImmOffset, &RBI.Subtarget, Alignment)) { if (RBI.getRegBank(Base, *MRI, *RBI.TRI) == &AMDGPU::VGPRRegBank) { VOffsetReg = Base; SOffsetReg = B.buildConstant(S32, SOffset).getReg(0); B.getMRI()->setRegBank(SOffsetReg, AMDGPU::SGPRRegBank); InstOffsetVal = ImmOffset; return 0; // XXX - Why is this 0? } // If we have SGPR base, we can use it for soffset. if (SOffset == 0) { VOffsetReg = B.buildConstant(S32, 0).getReg(0); B.getMRI()->setRegBank(VOffsetReg, AMDGPU::VGPRRegBank); SOffsetReg = Base; InstOffsetVal = ImmOffset; return 0; // XXX - Why is this 0? } } // Handle the variable sgpr + vgpr case. if (MachineInstr *Add = getOpcodeDef(AMDGPU::G_ADD, CombinedOffset, *MRI)) { Register Src0 = getSrcRegIgnoringCopies(*MRI, Add->getOperand(1).getReg()); Register Src1 = getSrcRegIgnoringCopies(*MRI, Add->getOperand(2).getReg()); const RegisterBank *Src0Bank = RBI.getRegBank(Src0, *MRI, *RBI.TRI); const RegisterBank *Src1Bank = RBI.getRegBank(Src1, *MRI, *RBI.TRI); if (Src0Bank == &AMDGPU::VGPRRegBank && Src1Bank == &AMDGPU::SGPRRegBank) { VOffsetReg = Src0; SOffsetReg = Src1; return 0; } if (Src0Bank == &AMDGPU::SGPRRegBank && Src1Bank == &AMDGPU::VGPRRegBank) { VOffsetReg = Src1; SOffsetReg = Src0; return 0; } } // Ensure we have a VGPR for the combined offset. This could be an issue if we // have an SGPR offset and a VGPR resource. if (RBI.getRegBank(CombinedOffset, *MRI, *RBI.TRI) == &AMDGPU::VGPRRegBank) { VOffsetReg = CombinedOffset; } else { VOffsetReg = B.buildCopy(S32, CombinedOffset).getReg(0); B.getMRI()->setRegBank(VOffsetReg, AMDGPU::VGPRRegBank); } SOffsetReg = B.buildConstant(S32, 0).getReg(0); B.getMRI()->setRegBank(SOffsetReg, AMDGPU::SGPRRegBank); return 0; } bool AMDGPURegisterBankInfo::applyMappingSBufferLoad( const OperandsMapper &OpdMapper) const { MachineInstr &MI = OpdMapper.getMI(); MachineRegisterInfo &MRI = OpdMapper.getMRI(); const LLT S32 = LLT::scalar(32); Register Dst = MI.getOperand(0).getReg(); LLT Ty = MRI.getType(Dst); const RegisterBank *RSrcBank = OpdMapper.getInstrMapping().getOperandMapping(1).BreakDown[0].RegBank; const RegisterBank *OffsetBank = OpdMapper.getInstrMapping().getOperandMapping(2).BreakDown[0].RegBank; if (RSrcBank == &AMDGPU::SGPRRegBank && OffsetBank == &AMDGPU::SGPRRegBank) return true; // Legal mapping // FIXME: 96-bit case was widened during legalize. We neeed to narrow it back // here but don't have an MMO. unsigned LoadSize = Ty.getSizeInBits(); int NumLoads = 1; if (LoadSize == 256 || LoadSize == 512) { NumLoads = LoadSize / 128; Ty = Ty.divide(NumLoads); } // Use the alignment to ensure that the required offsets will fit into the // immediate offsets. const Align Alignment = NumLoads > 1 ? Align(16 * NumLoads) : Align(1); MachineIRBuilder B(MI); MachineFunction &MF = B.getMF(); Register SOffset; Register VOffset; int64_t ImmOffset = 0; unsigned MMOOffset = setBufferOffsets(B, *this, MI.getOperand(2).getReg(), VOffset, SOffset, ImmOffset, Alignment); // TODO: 96-bit loads were widened to 128-bit results. Shrink the result if we // can, but we neeed to track an MMO for that. const unsigned MemSize = (Ty.getSizeInBits() + 7) / 8; const Align MemAlign(4); // FIXME: ABI type alignment? MachineMemOperand *BaseMMO = MF.getMachineMemOperand( MachinePointerInfo(), MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | MachineMemOperand::MOInvariant, MemSize, MemAlign); if (MMOOffset != 0) BaseMMO = MF.getMachineMemOperand(BaseMMO, MMOOffset, MemSize); // If only the offset is divergent, emit a MUBUF buffer load instead. We can // assume that the buffer is unswizzled. Register RSrc = MI.getOperand(1).getReg(); Register VIndex = B.buildConstant(S32, 0).getReg(0); B.getMRI()->setRegBank(VIndex, AMDGPU::VGPRRegBank); SmallVector<Register, 4> LoadParts(NumLoads); MachineBasicBlock::iterator MII = MI.getIterator(); MachineInstrSpan Span(MII, &B.getMBB()); for (int i = 0; i < NumLoads; ++i) { if (NumLoads == 1) { LoadParts[i] = Dst; } else { LoadParts[i] = MRI.createGenericVirtualRegister(Ty); MRI.setRegBank(LoadParts[i], AMDGPU::VGPRRegBank); } MachineMemOperand *MMO = BaseMMO; if (i != 0) BaseMMO = MF.getMachineMemOperand(BaseMMO, MMOOffset + 16 * i, MemSize); B.buildInstr(AMDGPU::G_AMDGPU_BUFFER_LOAD) .addDef(LoadParts[i]) // vdata .addUse(RSrc) // rsrc .addUse(VIndex) // vindex .addUse(VOffset) // voffset .addUse(SOffset) // soffset .addImm(ImmOffset + 16 * i) // offset(imm) .addImm(0) // cachepolicy, swizzled buffer(imm) .addImm(0) // idxen(imm) .addMemOperand(MMO); } // TODO: If only the resource is a VGPR, it may be better to execute the // scalar load in the waterfall loop if the resource is expected to frequently // be dynamically uniform. if (RSrcBank != &AMDGPU::SGPRRegBank) { // Remove the original instruction to avoid potentially confusing the // waterfall loop logic. B.setInstr(*Span.begin()); MI.eraseFromParent(); SmallSet<Register, 4> OpsToWaterfall; OpsToWaterfall.insert(RSrc); executeInWaterfallLoop(B, make_range(Span.begin(), Span.end()), OpsToWaterfall, MRI); } if (NumLoads != 1) { if (Ty.isVector()) B.buildConcatVectors(Dst, LoadParts); else B.buildMerge(Dst, LoadParts); } // We removed the instruction earlier with a waterfall loop. if (RSrcBank == &AMDGPU::SGPRRegBank) MI.eraseFromParent(); return true; } bool AMDGPURegisterBankInfo::applyMappingBFEIntrinsic( const OperandsMapper &OpdMapper, bool Signed) const { MachineInstr &MI = OpdMapper.getMI(); MachineRegisterInfo &MRI = OpdMapper.getMRI(); // Insert basic copies applyDefaultMapping(OpdMapper); Register DstReg = MI.getOperand(0).getReg(); LLT Ty = MRI.getType(DstReg); const LLT S32 = LLT::scalar(32); const RegisterBank *DstBank = OpdMapper.getInstrMapping().getOperandMapping(0).BreakDown[0].RegBank; if (DstBank == &AMDGPU::VGPRRegBank) { if (Ty == S32) return true; // TODO: 64-bit version is scalar only, so we need to expand this. return false; } Register SrcReg = MI.getOperand(2).getReg(); Register OffsetReg = MI.getOperand(3).getReg(); Register WidthReg = MI.getOperand(4).getReg(); // The scalar form packs the offset and width in a single operand. ApplyRegBankMapping ApplyBank(*this, MRI, &AMDGPU::SGPRRegBank); GISelObserverWrapper Observer(&ApplyBank); MachineIRBuilder B(MI); B.setChangeObserver(Observer); // Ensure the high bits are clear to insert the offset. auto OffsetMask = B.buildConstant(S32, maskTrailingOnes<unsigned>(6)); auto ClampOffset = B.buildAnd(S32, OffsetReg, OffsetMask); // Zeros out the low bits, so don't bother clamping the input value. auto ShiftWidth = B.buildShl(S32, WidthReg, B.buildConstant(S32, 16)); // Transformation function, pack the offset and width of a BFE into // the format expected by the S_BFE_I32 / S_BFE_U32. In the second // source, bits [5:0] contain the offset and bits [22:16] the width. auto MergedInputs = B.buildOr(S32, ClampOffset, ShiftWidth); // TODO: It might be worth using a pseudo here to avoid scc clobber and // register class constraints. unsigned Opc = Ty == S32 ? (Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32) : (Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64); auto MIB = B.buildInstr(Opc, {DstReg}, {SrcReg, MergedInputs}); if (!constrainSelectedInstRegOperands(*MIB, *TII, *TRI, *this)) llvm_unreachable("failed to constrain BFE"); MI.eraseFromParent(); return true; } // FIXME: Duplicated from LegalizerHelper static CmpInst::Predicate minMaxToCompare(unsigned Opc) { switch (Opc) { case TargetOpcode::G_SMIN: return CmpInst::ICMP_SLT; case TargetOpcode::G_SMAX: return CmpInst::ICMP_SGT; case TargetOpcode::G_UMIN: return CmpInst::ICMP_ULT; case TargetOpcode::G_UMAX: return CmpInst::ICMP_UGT; default: llvm_unreachable("not in integer min/max"); } } static unsigned minMaxToExtend(unsigned Opc) { switch (Opc) { case TargetOpcode::G_SMIN: case TargetOpcode::G_SMAX: return TargetOpcode::G_SEXT; case TargetOpcode::G_UMIN: case TargetOpcode::G_UMAX: return TargetOpcode::G_ZEXT; default: llvm_unreachable("not in integer min/max"); } } // Emit a legalized extension from <2 x s16> to 2 32-bit components, avoiding // any illegal vector extend or unmerge operations. static std::pair<Register, Register> unpackV2S16ToS32(MachineIRBuilder &B, Register Src, unsigned ExtOpcode) { const LLT S32 = LLT::scalar(32); auto Bitcast = B.buildBitcast(S32, Src); if (ExtOpcode == TargetOpcode::G_SEXT) { auto ExtLo = B.buildSExtInReg(S32, Bitcast, 16); auto ShiftHi = B.buildAShr(S32, Bitcast, B.buildConstant(S32, 16)); return std::make_pair(ExtLo.getReg(0), ShiftHi.getReg(0)); } auto ShiftHi = B.buildLShr(S32, Bitcast, B.buildConstant(S32, 16)); if (ExtOpcode == TargetOpcode::G_ZEXT) { auto ExtLo = B.buildAnd(S32, Bitcast, B.buildConstant(S32, 0xffff)); return std::make_pair(ExtLo.getReg(0), ShiftHi.getReg(0)); } assert(ExtOpcode == TargetOpcode::G_ANYEXT); return std::make_pair(Bitcast.getReg(0), ShiftHi.getReg(0)); } static MachineInstr *buildExpandedScalarMinMax(MachineIRBuilder &B, CmpInst::Predicate Pred, Register Dst, Register Src0, Register Src1) { const LLT CmpType = LLT::scalar(32); auto Cmp = B.buildICmp(Pred, CmpType, Src0, Src1); return B.buildSelect(Dst, Cmp, Src0, Src1); } // FIXME: Duplicated from LegalizerHelper, except changing the boolean type. void AMDGPURegisterBankInfo::lowerScalarMinMax(MachineIRBuilder &B, MachineInstr &MI) const { Register Dst = MI.getOperand(0).getReg(); Register Src0 = MI.getOperand(1).getReg(); Register Src1 = MI.getOperand(2).getReg(); const CmpInst::Predicate Pred = minMaxToCompare(MI.getOpcode()); MachineInstr *Sel = buildExpandedScalarMinMax(B, Pred, Dst, Src0, Src1); Register CmpReg = Sel->getOperand(1).getReg(); B.getMRI()->setRegBank(CmpReg, AMDGPU::SGPRRegBank); MI.eraseFromParent(); } // For cases where only a single copy is inserted for matching register banks. // Replace the register in the instruction operand static bool substituteSimpleCopyRegs( const AMDGPURegisterBankInfo::OperandsMapper &OpdMapper, unsigned OpIdx) { SmallVector<unsigned, 1> SrcReg(OpdMapper.getVRegs(OpIdx)); if (!SrcReg.empty()) { assert(SrcReg.size() == 1); OpdMapper.getMI().getOperand(OpIdx).setReg(SrcReg[0]); return true; } return false; } /// Handle register layout difference for f16 images for some subtargets. Register AMDGPURegisterBankInfo::handleD16VData(MachineIRBuilder &B, MachineRegisterInfo &MRI, Register Reg) const { if (!Subtarget.hasUnpackedD16VMem()) return Reg; const LLT S16 = LLT::scalar(16); LLT StoreVT = MRI.getType(Reg); if (!StoreVT.isVector() || StoreVT.getElementType() != S16) return Reg; auto Unmerge = B.buildUnmerge(S16, Reg); SmallVector<Register, 4> WideRegs; for (int I = 0, E = Unmerge->getNumOperands() - 1; I != E; ++I) WideRegs.push_back(Unmerge.getReg(I)); const LLT S32 = LLT::scalar(32); int NumElts = StoreVT.getNumElements(); return B.buildMerge(LLT::vector(NumElts, S32), WideRegs).getReg(0); } static std::pair<Register, unsigned> getBaseWithConstantOffset(MachineRegisterInfo &MRI, Register Reg) { int64_t Const; if (mi_match(Reg, MRI, m_ICst(Const))) return std::make_pair(Register(), Const); Register Base; if (mi_match(Reg, MRI, m_GAdd(m_Reg(Base), m_ICst(Const)))) return std::make_pair(Base, Const); // TODO: Handle G_OR used for add case return std::make_pair(Reg, 0); } std::pair<Register, unsigned> AMDGPURegisterBankInfo::splitBufferOffsets(MachineIRBuilder &B, Register OrigOffset) const { const unsigned MaxImm = 4095; Register BaseReg; unsigned ImmOffset; const LLT S32 = LLT::scalar(32); std::tie(BaseReg, ImmOffset) = getBaseWithConstantOffset(*B.getMRI(), OrigOffset); unsigned C1 = 0; if (ImmOffset != 0) { // If the immediate value is too big for the immoffset field, put the value // and -4096 into the immoffset field so that the value that is copied/added // for the voffset field is a multiple of 4096, and it stands more chance // of being CSEd with the copy/add for another similar load/store. // However, do not do that rounding down to a multiple of 4096 if that is a // negative number, as it appears to be illegal to have a negative offset // in the vgpr, even if adding the immediate offset makes it positive. unsigned Overflow = ImmOffset & ~MaxImm; ImmOffset -= Overflow; if ((int32_t)Overflow < 0) { Overflow += ImmOffset; ImmOffset = 0; } C1 = ImmOffset; if (Overflow != 0) { if (!BaseReg) BaseReg = B.buildConstant(S32, Overflow).getReg(0); else { auto OverflowVal = B.buildConstant(S32, Overflow); BaseReg = B.buildAdd(S32, BaseReg, OverflowVal).getReg(0); } } } if (!BaseReg) BaseReg = B.buildConstant(S32, 0).getReg(0); return {BaseReg, C1}; } static bool isZero(Register Reg, MachineRegisterInfo &MRI) { int64_t C; return mi_match(Reg, MRI, m_ICst(C)) && C == 0; } static unsigned extractGLC(unsigned CachePolicy) { return CachePolicy & 1; } static unsigned extractSLC(unsigned CachePolicy) { return (CachePolicy >> 1) & 1; } static unsigned extractDLC(unsigned CachePolicy) { return (CachePolicy >> 2) & 1; } MachineInstr * AMDGPURegisterBankInfo::selectStoreIntrinsic(MachineIRBuilder &B, MachineInstr &MI) const { MachineRegisterInfo &MRI = *B.getMRI(); executeInWaterfallLoop(B, MI, MRI, {2, 4}); // FIXME: DAG lowering brokenly changes opcode based on FP vs. integer. Register VData = MI.getOperand(1).getReg(); LLT Ty = MRI.getType(VData); int EltSize = Ty.getScalarSizeInBits(); int Size = Ty.getSizeInBits(); // FIXME: Broken integer truncstore. if (EltSize != 32) report_fatal_error("unhandled intrinsic store"); // FIXME: Verifier should enforce 1 MMO for these intrinsics. const int MemSize = (*MI.memoperands_begin())->getSize(); Register RSrc = MI.getOperand(2).getReg(); Register VOffset = MI.getOperand(3).getReg(); Register SOffset = MI.getOperand(4).getReg(); unsigned CachePolicy = MI.getOperand(5).getImm(); unsigned ImmOffset; std::tie(VOffset, ImmOffset) = splitBufferOffsets(B, VOffset); const bool Offen = !isZero(VOffset, MRI); unsigned Opc = AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact; switch (8 * MemSize) { case 8: Opc = Offen ? AMDGPU::BUFFER_STORE_BYTE_OFFEN_exact : AMDGPU::BUFFER_STORE_BYTE_OFFSET_exact; break; case 16: Opc = Offen ? AMDGPU::BUFFER_STORE_SHORT_OFFEN_exact : AMDGPU::BUFFER_STORE_SHORT_OFFSET_exact; break; default: Opc = Offen ? AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact : AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact; if (Size > 32) Opc = AMDGPU::getMUBUFOpcode(Opc, Size / 32); break; } // Set the insertion point back to the instruction in case it was moved into a // loop. B.setInstr(MI); MachineInstrBuilder MIB = B.buildInstr(Opc) .addUse(VData); if (Offen) MIB.addUse(VOffset); MIB.addUse(RSrc) .addUse(SOffset) .addImm(ImmOffset) .addImm(extractGLC(CachePolicy)) .addImm(extractSLC(CachePolicy)) .addImm(0) // tfe: FIXME: Remove from inst .addImm(extractDLC(CachePolicy)) .cloneMemRefs(MI); // FIXME: We need a way to report failure from applyMappingImpl. // Insert constrain copies before inserting the loop. if (!constrainSelectedInstRegOperands(*MIB, *TII, *TRI, *this)) report_fatal_error("failed to constrain selected store intrinsic"); return MIB; } bool AMDGPURegisterBankInfo::buildVCopy(MachineIRBuilder &B, Register DstReg, Register SrcReg) const { MachineRegisterInfo &MRI = *B.getMRI(); LLT SrcTy = MRI.getType(SrcReg); if (SrcTy.getSizeInBits() == 32) { // Use a v_mov_b32 here to make the exec dependency explicit. B.buildInstr(AMDGPU::V_MOV_B32_e32) .addDef(DstReg) .addUse(SrcReg); return constrainGenericRegister(DstReg, AMDGPU::VGPR_32RegClass, MRI) && constrainGenericRegister(SrcReg, AMDGPU::SReg_32RegClass, MRI); } Register TmpReg0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); Register TmpReg1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); B.buildInstr(AMDGPU::V_MOV_B32_e32) .addDef(TmpReg0) .addUse(SrcReg, 0, AMDGPU::sub0); B.buildInstr(AMDGPU::V_MOV_B32_e32) .addDef(TmpReg1) .addUse(SrcReg, 0, AMDGPU::sub1); B.buildInstr(AMDGPU::REG_SEQUENCE) .addDef(DstReg) .addUse(TmpReg0) .addImm(AMDGPU::sub0) .addUse(TmpReg1) .addImm(AMDGPU::sub1); return constrainGenericRegister(SrcReg, AMDGPU::SReg_64RegClass, MRI) && constrainGenericRegister(DstReg, AMDGPU::VReg_64RegClass, MRI); } /// Utility function for pushing dynamic vector indexes with a constant offset /// into waterwall loops. static void reinsertVectorIndexAdd(MachineIRBuilder &B, MachineInstr &IdxUseInstr, unsigned OpIdx, unsigned ConstOffset) { MachineRegisterInfo &MRI = *B.getMRI(); const LLT S32 = LLT::scalar(32); Register WaterfallIdx = IdxUseInstr.getOperand(OpIdx).getReg(); B.setInsertPt(*IdxUseInstr.getParent(), IdxUseInstr.getIterator()); auto MaterializedOffset = B.buildConstant(S32, ConstOffset); auto Add = B.buildAdd(S32, WaterfallIdx, MaterializedOffset); MRI.setRegBank(MaterializedOffset.getReg(0), AMDGPU::SGPRRegBank); MRI.setRegBank(Add.getReg(0), AMDGPU::SGPRRegBank); IdxUseInstr.getOperand(OpIdx).setReg(Add.getReg(0)); } /// Implement extending a 32-bit value to a 64-bit value. \p Lo32Reg is the /// original 32-bit source value (to be inserted in the low part of the combined /// 64-bit result), and \p Hi32Reg is the high half of the combined 64-bit /// value. static void extendLow32IntoHigh32(MachineIRBuilder &B, Register Hi32Reg, Register Lo32Reg, unsigned ExtOpc, const RegisterBank &RegBank, bool IsBooleanSrc = false) { if (ExtOpc == AMDGPU::G_ZEXT) { B.buildConstant(Hi32Reg, 0); } else if (ExtOpc == AMDGPU::G_SEXT) { if (IsBooleanSrc) { // If we know the original source was an s1, the high half is the same as // the low. B.buildCopy(Hi32Reg, Lo32Reg); } else { // Replicate sign bit from 32-bit extended part. auto ShiftAmt = B.buildConstant(LLT::scalar(32), 31); B.getMRI()->setRegBank(ShiftAmt.getReg(0), RegBank); B.buildAShr(Hi32Reg, Lo32Reg, ShiftAmt); } } else { assert(ExtOpc == AMDGPU::G_ANYEXT && "not an integer extension"); B.buildUndef(Hi32Reg); } } bool AMDGPURegisterBankInfo::foldExtractEltToCmpSelect( MachineInstr &MI, MachineRegisterInfo &MRI, const OperandsMapper &OpdMapper) const { Register VecReg = MI.getOperand(1).getReg(); Register Idx = MI.getOperand(2).getReg(); const RegisterBank &IdxBank = *OpdMapper.getInstrMapping().getOperandMapping(2).BreakDown[0].RegBank; bool IsDivergentIdx = IdxBank == AMDGPU::VGPRRegBank; LLT VecTy = MRI.getType(VecReg); unsigned EltSize = VecTy.getScalarSizeInBits(); unsigned NumElem = VecTy.getNumElements(); if (!SITargetLowering::shouldExpandVectorDynExt(EltSize, NumElem, IsDivergentIdx)) return false; MachineIRBuilder B(MI); LLT S32 = LLT::scalar(32); const RegisterBank &DstBank = *OpdMapper.getInstrMapping().getOperandMapping(0).BreakDown[0].RegBank; const RegisterBank &SrcBank = *OpdMapper.getInstrMapping().getOperandMapping(1).BreakDown[0].RegBank; const RegisterBank &CCBank = (DstBank == AMDGPU::SGPRRegBank && SrcBank == AMDGPU::SGPRRegBank && IdxBank == AMDGPU::SGPRRegBank) ? AMDGPU::SGPRRegBank : AMDGPU::VCCRegBank; LLT CCTy = (CCBank == AMDGPU::SGPRRegBank) ? S32 : LLT::scalar(1); if (CCBank == AMDGPU::VCCRegBank && IdxBank == AMDGPU::SGPRRegBank) { Idx = B.buildCopy(S32, Idx)->getOperand(0).getReg(); MRI.setRegBank(Idx, AMDGPU::VGPRRegBank); } LLT EltTy = VecTy.getScalarType(); SmallVector<Register, 2> DstRegs(OpdMapper.getVRegs(0)); unsigned NumLanes = DstRegs.size(); if (!NumLanes) NumLanes = 1; else EltTy = MRI.getType(DstRegs[0]); auto UnmergeToEltTy = B.buildUnmerge(EltTy, VecReg); SmallVector<Register, 2> Res(NumLanes); for (unsigned L = 0; L < NumLanes; ++L) Res[L] = UnmergeToEltTy.getReg(L); for (unsigned I = 1; I < NumElem; ++I) { auto IC = B.buildConstant(S32, I); MRI.setRegBank(IC->getOperand(0).getReg(), AMDGPU::SGPRRegBank); auto Cmp = B.buildICmp(CmpInst::ICMP_EQ, CCTy, Idx, IC); MRI.setRegBank(Cmp->getOperand(0).getReg(), CCBank); for (unsigned L = 0; L < NumLanes; ++L) { auto S = B.buildSelect(EltTy, Cmp, UnmergeToEltTy.getReg(I * NumLanes + L), Res[L]); for (unsigned N : { 0, 2, 3 }) MRI.setRegBank(S->getOperand(N).getReg(), DstBank); Res[L] = S->getOperand(0).getReg(); } } for (unsigned L = 0; L < NumLanes; ++L) { Register DstReg = (NumLanes == 1) ? MI.getOperand(0).getReg() : DstRegs[L]; B.buildCopy(DstReg, Res[L]); MRI.setRegBank(DstReg, DstBank); } MRI.setRegBank(MI.getOperand(0).getReg(), DstBank); MI.eraseFromParent(); return true; } bool AMDGPURegisterBankInfo::foldInsertEltToCmpSelect( MachineInstr &MI, MachineRegisterInfo &MRI, const OperandsMapper &OpdMapper) const { Register VecReg = MI.getOperand(1).getReg(); Register Idx = MI.getOperand(3).getReg(); const RegisterBank &IdxBank = *OpdMapper.getInstrMapping().getOperandMapping(3).BreakDown[0].RegBank; bool IsDivergentIdx = IdxBank == AMDGPU::VGPRRegBank; LLT VecTy = MRI.getType(VecReg); unsigned EltSize = VecTy.getScalarSizeInBits(); unsigned NumElem = VecTy.getNumElements(); if (!SITargetLowering::shouldExpandVectorDynExt(EltSize, NumElem, IsDivergentIdx)) return false; MachineIRBuilder B(MI); LLT S32 = LLT::scalar(32); const RegisterBank &DstBank = *OpdMapper.getInstrMapping().getOperandMapping(0).BreakDown[0].RegBank; const RegisterBank &SrcBank = *OpdMapper.getInstrMapping().getOperandMapping(1).BreakDown[0].RegBank; const RegisterBank &InsBank = *OpdMapper.getInstrMapping().getOperandMapping(2).BreakDown[0].RegBank; const RegisterBank &CCBank = (DstBank == AMDGPU::SGPRRegBank && SrcBank == AMDGPU::SGPRRegBank && InsBank == AMDGPU::SGPRRegBank && IdxBank == AMDGPU::SGPRRegBank) ? AMDGPU::SGPRRegBank : AMDGPU::VCCRegBank; LLT CCTy = (CCBank == AMDGPU::SGPRRegBank) ? S32 : LLT::scalar(1); if (CCBank == AMDGPU::VCCRegBank && IdxBank == AMDGPU::SGPRRegBank) { Idx = B.buildCopy(S32, Idx)->getOperand(0).getReg(); MRI.setRegBank(Idx, AMDGPU::VGPRRegBank); } LLT EltTy = VecTy.getScalarType(); SmallVector<Register, 2> InsRegs(OpdMapper.getVRegs(2)); unsigned NumLanes = InsRegs.size(); if (!NumLanes) { NumLanes = 1; InsRegs.push_back(MI.getOperand(2).getReg()); } else { EltTy = MRI.getType(InsRegs[0]); } auto UnmergeToEltTy = B.buildUnmerge(EltTy, VecReg); SmallVector<Register, 16> Ops(NumElem * NumLanes); for (unsigned I = 0; I < NumElem; ++I) { auto IC = B.buildConstant(S32, I); MRI.setRegBank(IC->getOperand(0).getReg(), AMDGPU::SGPRRegBank); auto Cmp = B.buildICmp(CmpInst::ICMP_EQ, CCTy, Idx, IC); MRI.setRegBank(Cmp->getOperand(0).getReg(), CCBank); for (unsigned L = 0; L < NumLanes; ++L) { auto S = B.buildSelect(EltTy, Cmp, InsRegs[L], UnmergeToEltTy.getReg(I * NumLanes + L)); for (unsigned N : { 0, 2, 3 }) MRI.setRegBank(S->getOperand(N).getReg(), DstBank); Ops[I * NumLanes + L] = S->getOperand(0).getReg(); } } LLT MergeTy = LLT::vector(Ops.size(), EltTy); if (MergeTy == MRI.getType(MI.getOperand(0).getReg())) { B.buildBuildVector(MI.getOperand(0), Ops); } else { auto Vec = B.buildBuildVector(MergeTy, Ops); MRI.setRegBank(Vec->getOperand(0).getReg(), DstBank); B.buildBitcast(MI.getOperand(0).getReg(), Vec); } MRI.setRegBank(MI.getOperand(0).getReg(), DstBank); MI.eraseFromParent(); return true; } void AMDGPURegisterBankInfo::applyMappingImpl( const OperandsMapper &OpdMapper) const { MachineInstr &MI = OpdMapper.getMI(); unsigned Opc = MI.getOpcode(); MachineRegisterInfo &MRI = OpdMapper.getMRI(); switch (Opc) { case AMDGPU::G_PHI: { Register DstReg = MI.getOperand(0).getReg(); LLT DstTy = MRI.getType(DstReg); if (DstTy != LLT::scalar(1)) break; const LLT S32 = LLT::scalar(32); const RegisterBank *DstBank = OpdMapper.getInstrMapping().getOperandMapping(0).BreakDown[0].RegBank; if (DstBank == &AMDGPU::VCCRegBank) { applyDefaultMapping(OpdMapper); // The standard handling only considers the result register bank for // phis. For VCC, blindly inserting a copy when the phi is lowered will // produce an invalid copy. We can only copy with some kind of compare to // get a vector boolean result. Insert a regitser bank copy that will be // correctly lowered to a compare. MachineIRBuilder B(*MI.getParent()->getParent()); for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { Register SrcReg = MI.getOperand(I).getReg(); const RegisterBank *SrcBank = getRegBank(SrcReg, MRI, *TRI); if (SrcBank != &AMDGPU::VCCRegBank) { MachineBasicBlock *SrcMBB = MI.getOperand(I + 1).getMBB(); B.setInsertPt(*SrcMBB, SrcMBB->getFirstTerminator()); auto Copy = B.buildCopy(LLT::scalar(1), SrcReg); MRI.setRegBank(Copy.getReg(0), AMDGPU::VCCRegBank); MI.getOperand(I).setReg(Copy.getReg(0)); } } return; } // Phi handling is strange and only considers the bank of the destination. substituteSimpleCopyRegs(OpdMapper, 0); // Promote SGPR/VGPR booleans to s32 MachineFunction *MF = MI.getParent()->getParent(); ApplyRegBankMapping ApplyBank(*this, MRI, DstBank); GISelObserverWrapper Observer(&ApplyBank); MachineIRBuilder B(MI); LegalizerHelper Helper(*MF, Observer, B); if (Helper.widenScalar(MI, 0, S32) != LegalizerHelper::Legalized) llvm_unreachable("widen scalar should have succeeded"); return; } case AMDGPU::G_ICMP: case AMDGPU::G_UADDO: case AMDGPU::G_USUBO: case AMDGPU::G_UADDE: case AMDGPU::G_SADDE: case AMDGPU::G_USUBE: case AMDGPU::G_SSUBE: { unsigned BoolDstOp = Opc == AMDGPU::G_ICMP ? 0 : 1; Register DstReg = MI.getOperand(BoolDstOp).getReg(); const RegisterBank *DstBank = OpdMapper.getInstrMapping().getOperandMapping(0).BreakDown[0].RegBank; if (DstBank != &AMDGPU::SGPRRegBank) break; const bool HasCarryIn = MI.getNumOperands() == 5; // If this is a scalar compare, promote the result to s32, as the selection // will end up using a copy to a 32-bit vreg. const LLT S32 = LLT::scalar(32); Register NewDstReg = MRI.createGenericVirtualRegister(S32); MRI.setRegBank(NewDstReg, AMDGPU::SGPRRegBank); MI.getOperand(BoolDstOp).setReg(NewDstReg); MachineIRBuilder B(MI); if (HasCarryIn) { Register NewSrcReg = MRI.createGenericVirtualRegister(S32); MRI.setRegBank(NewSrcReg, AMDGPU::SGPRRegBank); B.buildZExt(NewSrcReg, MI.getOperand(4).getReg()); MI.getOperand(4).setReg(NewSrcReg); } MachineBasicBlock *MBB = MI.getParent(); B.setInsertPt(*MBB, std::next(MI.getIterator())); // If we had a constrained VCC result register, a copy was inserted to VCC // from SGPR. SmallVector<Register, 1> DefRegs(OpdMapper.getVRegs(0)); if (DefRegs.empty()) DefRegs.push_back(DstReg); B.buildTrunc(DefRegs[0], NewDstReg); return; } case AMDGPU::G_SELECT: { Register DstReg = MI.getOperand(0).getReg(); LLT DstTy = MRI.getType(DstReg); SmallVector<Register, 1> CondRegs(OpdMapper.getVRegs(1)); if (CondRegs.empty()) CondRegs.push_back(MI.getOperand(1).getReg()); else { assert(CondRegs.size() == 1); } const RegisterBank *CondBank = getRegBank(CondRegs[0], MRI, *TRI); if (CondBank == &AMDGPU::SGPRRegBank) { MachineIRBuilder B(MI); const LLT S32 = LLT::scalar(32); Register NewCondReg = MRI.createGenericVirtualRegister(S32); MRI.setRegBank(NewCondReg, AMDGPU::SGPRRegBank); MI.getOperand(1).setReg(NewCondReg); B.buildZExt(NewCondReg, CondRegs[0]); } if (DstTy.getSizeInBits() != 64) break; MachineIRBuilder B(MI); LLT HalfTy = getHalfSizedType(DstTy); SmallVector<Register, 2> DefRegs(OpdMapper.getVRegs(0)); SmallVector<Register, 2> Src1Regs(OpdMapper.getVRegs(2)); SmallVector<Register, 2> Src2Regs(OpdMapper.getVRegs(3)); // All inputs are SGPRs, nothing special to do. if (DefRegs.empty()) { assert(Src1Regs.empty() && Src2Regs.empty()); break; } if (Src1Regs.empty()) split64BitValueForMapping(B, Src1Regs, HalfTy, MI.getOperand(2).getReg()); else { setRegsToType(MRI, Src1Regs, HalfTy); } if (Src2Regs.empty()) split64BitValueForMapping(B, Src2Regs, HalfTy, MI.getOperand(3).getReg()); else setRegsToType(MRI, Src2Regs, HalfTy); setRegsToType(MRI, DefRegs, HalfTy); B.buildSelect(DefRegs[0], CondRegs[0], Src1Regs[0], Src2Regs[0]); B.buildSelect(DefRegs[1], CondRegs[0], Src1Regs[1], Src2Regs[1]); MRI.setRegBank(DstReg, AMDGPU::VGPRRegBank); MI.eraseFromParent(); return; } case AMDGPU::G_BRCOND: { Register CondReg = MI.getOperand(0).getReg(); // FIXME: Should use legalizer helper, but should change bool ext type. const RegisterBank *CondBank = OpdMapper.getInstrMapping().getOperandMapping(0).BreakDown[0].RegBank; if (CondBank == &AMDGPU::SGPRRegBank) { MachineIRBuilder B(MI); const LLT S32 = LLT::scalar(32); Register NewCondReg = MRI.createGenericVirtualRegister(S32); MRI.setRegBank(NewCondReg, AMDGPU::SGPRRegBank); MI.getOperand(0).setReg(NewCondReg); B.buildZExt(NewCondReg, CondReg); return; } break; } case AMDGPU::G_AND: case AMDGPU::G_OR: case AMDGPU::G_XOR: { // 64-bit and is only available on the SALU, so split into 2 32-bit ops if // there is a VGPR input. Register DstReg = MI.getOperand(0).getReg(); LLT DstTy = MRI.getType(DstReg); if (DstTy.getSizeInBits() == 1) { const RegisterBank *DstBank = OpdMapper.getInstrMapping().getOperandMapping(0).BreakDown[0].RegBank; if (DstBank == &AMDGPU::VCCRegBank) break; MachineFunction *MF = MI.getParent()->getParent(); ApplyRegBankMapping ApplyBank(*this, MRI, DstBank); GISelObserverWrapper Observer(&ApplyBank); MachineIRBuilder B(MI); LegalizerHelper Helper(*MF, Observer, B); if (Helper.widenScalar(MI, 0, LLT::scalar(32)) != LegalizerHelper::Legalized) llvm_unreachable("widen scalar should have succeeded"); return; } if (DstTy.getSizeInBits() != 64) break; LLT HalfTy = getHalfSizedType(DstTy); SmallVector<Register, 2> DefRegs(OpdMapper.getVRegs(0)); SmallVector<Register, 2> Src0Regs(OpdMapper.getVRegs(1)); SmallVector<Register, 2> Src1Regs(OpdMapper.getVRegs(2)); // All inputs are SGPRs, nothing special to do. if (DefRegs.empty()) { assert(Src0Regs.empty() && Src1Regs.empty()); break; } assert(DefRegs.size() == 2); assert(Src0Regs.size() == Src1Regs.size() && (Src0Regs.empty() || Src0Regs.size() == 2)); // Depending on where the source registers came from, the generic code may // have decided to split the inputs already or not. If not, we still need to // extract the values. MachineIRBuilder B(MI); if (Src0Regs.empty()) split64BitValueForMapping(B, Src0Regs, HalfTy, MI.getOperand(1).getReg()); else setRegsToType(MRI, Src0Regs, HalfTy); if (Src1Regs.empty()) split64BitValueForMapping(B, Src1Regs, HalfTy, MI.getOperand(2).getReg()); else setRegsToType(MRI, Src1Regs, HalfTy); setRegsToType(MRI, DefRegs, HalfTy); B.buildInstr(Opc) .addDef(DefRegs[0]) .addUse(Src0Regs[0]) .addUse(Src1Regs[0]); B.buildInstr(Opc) .addDef(DefRegs[1]) .addUse(Src0Regs[1]) .addUse(Src1Regs[1]); MRI.setRegBank(DstReg, AMDGPU::VGPRRegBank); MI.eraseFromParent(); return; } case AMDGPU::G_ADD: case AMDGPU::G_SUB: case AMDGPU::G_MUL: case AMDGPU::G_SHL: case AMDGPU::G_LSHR: case AMDGPU::G_ASHR: { Register DstReg = MI.getOperand(0).getReg(); LLT DstTy = MRI.getType(DstReg); // 16-bit operations are VALU only, but can be promoted to 32-bit SALU. // Packed 16-bit operations need to be scalarized and promoted. if (DstTy != LLT::scalar(16) && DstTy != LLT::vector(2, 16)) break; const RegisterBank *DstBank = OpdMapper.getInstrMapping().getOperandMapping(0).BreakDown[0].RegBank; if (DstBank == &AMDGPU::VGPRRegBank) break; const LLT S32 = LLT::scalar(32); MachineBasicBlock *MBB = MI.getParent(); MachineFunction *MF = MBB->getParent(); MachineIRBuilder B(MI); ApplyRegBankMapping ApplySALU(*this, MRI, &AMDGPU::SGPRRegBank); GISelObserverWrapper Observer(&ApplySALU); if (DstTy.isVector()) { B.setChangeObserver(Observer); Register WideSrc0Lo, WideSrc0Hi; Register WideSrc1Lo, WideSrc1Hi; std::tie(WideSrc0Lo, WideSrc0Hi) = unpackV2S16ToS32(B, MI.getOperand(1).getReg(), AMDGPU::G_ANYEXT); std::tie(WideSrc1Lo, WideSrc1Hi) = unpackV2S16ToS32(B, MI.getOperand(2).getReg(), AMDGPU::G_ANYEXT); auto Lo = B.buildInstr(MI.getOpcode(), {S32}, {WideSrc0Lo, WideSrc1Lo}); auto Hi = B.buildInstr(MI.getOpcode(), {S32}, {WideSrc0Hi, WideSrc1Hi}); B.buildBuildVectorTrunc(DstReg, {Lo.getReg(0), Hi.getReg(0)}); MI.eraseFromParent(); } else { LegalizerHelper Helper(*MF, Observer, B); if (Helper.widenScalar(MI, 0, S32) != LegalizerHelper::Legalized) llvm_unreachable("widen scalar should have succeeded"); // FIXME: s16 shift amounts should be legal. if (Opc == AMDGPU::G_SHL || Opc == AMDGPU::G_LSHR || Opc == AMDGPU::G_ASHR) { B.setInsertPt(*MBB, MI.getIterator()); if (Helper.widenScalar(MI, 1, S32) != LegalizerHelper::Legalized) llvm_unreachable("widen scalar should have succeeded"); } } return; } case AMDGPU::G_SMIN: case AMDGPU::G_SMAX: case AMDGPU::G_UMIN: case AMDGPU::G_UMAX: { Register DstReg = MI.getOperand(0).getReg(); const RegisterBank *DstBank = OpdMapper.getInstrMapping().getOperandMapping(0).BreakDown[0].RegBank; if (DstBank == &AMDGPU::VGPRRegBank) break; MachineFunction *MF = MI.getParent()->getParent(); MachineIRBuilder B(MI); // Turn scalar min/max into a compare and select. LLT Ty = MRI.getType(DstReg); const LLT S32 = LLT::scalar(32); const LLT S16 = LLT::scalar(16); const LLT V2S16 = LLT::vector(2, 16); if (Ty == V2S16) { ApplyRegBankMapping ApplySALU(*this, MRI, &AMDGPU::SGPRRegBank); GISelObserverWrapper Observer(&ApplySALU); B.setChangeObserver(Observer); // Need to widen to s32, and expand as cmp + select, and avoid producing // illegal vector extends or unmerges that would need further // legalization. // // TODO: Should we just readfirstlane? That should probably be handled // with a UniformVGPR register bank that wouldn't need special // consideration here. Register Dst = MI.getOperand(0).getReg(); Register Src0 = MI.getOperand(1).getReg(); Register Src1 = MI.getOperand(2).getReg(); Register WideSrc0Lo, WideSrc0Hi; Register WideSrc1Lo, WideSrc1Hi; unsigned ExtendOp = minMaxToExtend(MI.getOpcode()); std::tie(WideSrc0Lo, WideSrc0Hi) = unpackV2S16ToS32(B, Src0, ExtendOp); std::tie(WideSrc1Lo, WideSrc1Hi) = unpackV2S16ToS32(B, Src1, ExtendOp); Register Lo = MRI.createGenericVirtualRegister(S32); Register Hi = MRI.createGenericVirtualRegister(S32); const CmpInst::Predicate Pred = minMaxToCompare(MI.getOpcode()); buildExpandedScalarMinMax(B, Pred, Lo, WideSrc0Lo, WideSrc1Lo); buildExpandedScalarMinMax(B, Pred, Hi, WideSrc0Hi, WideSrc1Hi); B.buildBuildVectorTrunc(Dst, {Lo, Hi}); MI.eraseFromParent(); } else if (Ty == S16) { ApplyRegBankMapping ApplySALU(*this, MRI, &AMDGPU::SGPRRegBank); GISelObserverWrapper Observer(&ApplySALU); LegalizerHelper Helper(*MF, Observer, B); // Need to widen to s32, and expand as cmp + select. if (Helper.widenScalar(MI, 0, S32) != LegalizerHelper::Legalized) llvm_unreachable("widenScalar should have succeeded"); // FIXME: This is relying on widenScalar leaving MI in place. lowerScalarMinMax(B, MI); } else lowerScalarMinMax(B, MI); return; } case AMDGPU::G_SEXT_INREG: { SmallVector<Register, 2> SrcRegs(OpdMapper.getVRegs(1)); if (SrcRegs.empty()) break; // Nothing to repair const LLT S32 = LLT::scalar(32); MachineIRBuilder B(MI); ApplyRegBankMapping O(*this, MRI, &AMDGPU::VGPRRegBank); GISelObserverWrapper Observer(&O); B.setChangeObserver(Observer); // Don't use LegalizerHelper's narrowScalar. It produces unwanted G_SEXTs // we would need to further expand, and doesn't let us directly set the // result registers. SmallVector<Register, 2> DstRegs(OpdMapper.getVRegs(0)); int Amt = MI.getOperand(2).getImm(); if (Amt <= 32) { if (Amt == 32) { // The low bits are unchanged. B.buildCopy(DstRegs[0], SrcRegs[0]); } else { // Extend in the low bits and propagate the sign bit to the high half. B.buildSExtInReg(DstRegs[0], SrcRegs[0], Amt); } B.buildAShr(DstRegs[1], DstRegs[0], B.buildConstant(S32, 31)); } else { // The low bits are unchanged, and extend in the high bits. B.buildCopy(DstRegs[0], SrcRegs[0]); B.buildSExtInReg(DstRegs[1], DstRegs[0], Amt - 32); } Register DstReg = MI.getOperand(0).getReg(); MRI.setRegBank(DstReg, AMDGPU::VGPRRegBank); MI.eraseFromParent(); return; } case AMDGPU::G_CTPOP: case AMDGPU::G_CTLZ_ZERO_UNDEF: case AMDGPU::G_CTTZ_ZERO_UNDEF: { MachineIRBuilder B(MI); MachineFunction &MF = B.getMF(); const RegisterBank *DstBank = OpdMapper.getInstrMapping().getOperandMapping(0).BreakDown[0].RegBank; if (DstBank == &AMDGPU::SGPRRegBank) break; Register SrcReg = MI.getOperand(1).getReg(); const LLT S32 = LLT::scalar(32); LLT Ty = MRI.getType(SrcReg); if (Ty == S32) break; ApplyRegBankMapping ApplyVALU(*this, MRI, &AMDGPU::VGPRRegBank); GISelObserverWrapper Observer(&ApplyVALU); LegalizerHelper Helper(MF, Observer, B); if (Helper.narrowScalar(MI, 1, S32) != LegalizerHelper::Legalized) llvm_unreachable("narrowScalar should have succeeded"); return; } case AMDGPU::G_SEXT: case AMDGPU::G_ZEXT: case AMDGPU::G_ANYEXT: { Register SrcReg = MI.getOperand(1).getReg(); LLT SrcTy = MRI.getType(SrcReg); const bool Signed = Opc == AMDGPU::G_SEXT; assert(empty(OpdMapper.getVRegs(1))); MachineIRBuilder B(MI); const RegisterBank *SrcBank = OpdMapper.getInstrMapping().getOperandMapping(1).BreakDown[0].RegBank; Register DstReg = MI.getOperand(0).getReg(); LLT DstTy = MRI.getType(DstReg); if (DstTy.isScalar() && SrcBank != &AMDGPU::SGPRRegBank && SrcBank != &AMDGPU::VCCRegBank && // FIXME: Should handle any type that round to s64 when irregular // breakdowns supported. DstTy.getSizeInBits() == 64 && SrcTy.getSizeInBits() <= 32) { SmallVector<Register, 2> DefRegs(OpdMapper.getVRegs(0)); // Extend to 32-bit, and then extend the low half. if (Signed) { // TODO: Should really be buildSExtOrCopy B.buildSExtOrTrunc(DefRegs[0], SrcReg); } else if (Opc == AMDGPU::G_ZEXT) { B.buildZExtOrTrunc(DefRegs[0], SrcReg); } else { B.buildAnyExtOrTrunc(DefRegs[0], SrcReg); } extendLow32IntoHigh32(B, DefRegs[1], DefRegs[0], Opc, *SrcBank); MRI.setRegBank(DstReg, *SrcBank); MI.eraseFromParent(); return; } if (SrcTy != LLT::scalar(1)) return; // It is not legal to have a legalization artifact with a VCC source. Rather // than introducing a copy, insert the select we would have to select the // copy to. if (SrcBank == &AMDGPU::VCCRegBank) { SmallVector<Register, 2> DefRegs(OpdMapper.getVRegs(0)); const RegisterBank *DstBank = &AMDGPU::VGPRRegBank; unsigned DstSize = DstTy.getSizeInBits(); // 64-bit select is SGPR only const bool UseSel64 = DstSize > 32 && SrcBank->getID() == AMDGPU::SGPRRegBankID; // TODO: Should s16 select be legal? LLT SelType = UseSel64 ? LLT::scalar(64) : LLT::scalar(32); auto True = B.buildConstant(SelType, Signed ? -1 : 1); auto False = B.buildConstant(SelType, 0); MRI.setRegBank(True.getReg(0), *DstBank); MRI.setRegBank(False.getReg(0), *DstBank); MRI.setRegBank(DstReg, *DstBank); if (DstSize > 32) { B.buildSelect(DefRegs[0], SrcReg, True, False); extendLow32IntoHigh32(B, DefRegs[1], DefRegs[0], Opc, *SrcBank, true); } else if (DstSize < 32) { auto Sel = B.buildSelect(SelType, SrcReg, True, False); MRI.setRegBank(Sel.getReg(0), *DstBank); B.buildTrunc(DstReg, Sel); } else { B.buildSelect(DstReg, SrcReg, True, False); } MI.eraseFromParent(); return; } break; } case AMDGPU::G_BUILD_VECTOR: case AMDGPU::G_BUILD_VECTOR_TRUNC: { Register DstReg = MI.getOperand(0).getReg(); LLT DstTy = MRI.getType(DstReg); if (DstTy != LLT::vector(2, 16)) break; assert(MI.getNumOperands() == 3 && OpdMapper.getVRegs(0).empty()); substituteSimpleCopyRegs(OpdMapper, 1); substituteSimpleCopyRegs(OpdMapper, 2); const RegisterBank *DstBank = OpdMapper.getInstrMapping().getOperandMapping(0).BreakDown[0].RegBank; if (DstBank == &AMDGPU::SGPRRegBank) break; // Can use S_PACK_* instructions. MachineIRBuilder B(MI); Register Lo = MI.getOperand(1).getReg(); Register Hi = MI.getOperand(2).getReg(); const LLT S32 = LLT::scalar(32); const RegisterBank *BankLo = OpdMapper.getInstrMapping().getOperandMapping(1).BreakDown[0].RegBank; const RegisterBank *BankHi = OpdMapper.getInstrMapping().getOperandMapping(2).BreakDown[0].RegBank; Register ZextLo; Register ShiftHi; if (Opc == AMDGPU::G_BUILD_VECTOR) { ZextLo = B.buildZExt(S32, Lo).getReg(0); MRI.setRegBank(ZextLo, *BankLo); Register ZextHi = B.buildZExt(S32, Hi).getReg(0); MRI.setRegBank(ZextHi, *BankHi); auto ShiftAmt = B.buildConstant(S32, 16); MRI.setRegBank(ShiftAmt.getReg(0), *BankHi); ShiftHi = B.buildShl(S32, ZextHi, ShiftAmt).getReg(0); MRI.setRegBank(ShiftHi, *BankHi); } else { Register MaskLo = B.buildConstant(S32, 0xffff).getReg(0); MRI.setRegBank(MaskLo, *BankLo); auto ShiftAmt = B.buildConstant(S32, 16); MRI.setRegBank(ShiftAmt.getReg(0), *BankHi); ShiftHi = B.buildShl(S32, Hi, ShiftAmt).getReg(0); MRI.setRegBank(ShiftHi, *BankHi); ZextLo = B.buildAnd(S32, Lo, MaskLo).getReg(0); MRI.setRegBank(ZextLo, *BankLo); } auto Or = B.buildOr(S32, ZextLo, ShiftHi); MRI.setRegBank(Or.getReg(0), *DstBank); B.buildBitcast(DstReg, Or); MI.eraseFromParent(); return; } case AMDGPU::G_EXTRACT_VECTOR_ELT: { SmallVector<Register, 2> DstRegs(OpdMapper.getVRegs(0)); assert(OpdMapper.getVRegs(1).empty() && OpdMapper.getVRegs(2).empty()); Register DstReg = MI.getOperand(0).getReg(); Register SrcReg = MI.getOperand(1).getReg(); const LLT S32 = LLT::scalar(32); LLT DstTy = MRI.getType(DstReg); LLT SrcTy = MRI.getType(SrcReg); if (foldExtractEltToCmpSelect(MI, MRI, OpdMapper)) return; MachineIRBuilder B(MI); const ValueMapping &DstMapping = OpdMapper.getInstrMapping().getOperandMapping(0); const RegisterBank *DstBank = DstMapping.BreakDown[0].RegBank; const RegisterBank *SrcBank = OpdMapper.getInstrMapping().getOperandMapping(1).BreakDown[0].RegBank; const RegisterBank *IdxBank = OpdMapper.getInstrMapping().getOperandMapping(2).BreakDown[0].RegBank; Register BaseIdxReg; unsigned ConstOffset; MachineInstr *OffsetDef; std::tie(BaseIdxReg, ConstOffset, OffsetDef) = AMDGPU::getBaseWithConstantOffset(MRI, MI.getOperand(2).getReg()); // See if the index is an add of a constant which will be foldable by moving // the base register of the index later if this is going to be executed in a // waterfall loop. This is essentially to reassociate the add of a constant // with the readfirstlane. bool ShouldMoveIndexIntoLoop = IdxBank != &AMDGPU::SGPRRegBank && ConstOffset > 0 && ConstOffset < SrcTy.getNumElements(); // Move the base register. We'll re-insert the add later. if (ShouldMoveIndexIntoLoop) MI.getOperand(2).setReg(BaseIdxReg); // If this is a VGPR result only because the index was a VGPR result, the // actual indexing will be done on the SGPR source vector, which will // produce a scalar result. We need to copy to the VGPR result inside the // waterfall loop. const bool NeedCopyToVGPR = DstBank == &AMDGPU::VGPRRegBank && SrcBank == &AMDGPU::SGPRRegBank; if (DstRegs.empty()) { applyDefaultMapping(OpdMapper); executeInWaterfallLoop(MI, MRI, { 2 }); if (NeedCopyToVGPR) { // We don't want a phi for this temporary reg. Register TmpReg = MRI.createGenericVirtualRegister(DstTy); MRI.setRegBank(TmpReg, AMDGPU::SGPRRegBank); MI.getOperand(0).setReg(TmpReg); B.setInsertPt(*MI.getParent(), ++MI.getIterator()); // Use a v_mov_b32 here to make the exec dependency explicit. buildVCopy(B, DstReg, TmpReg); } // Re-insert the constant offset add inside the waterfall loop. if (ShouldMoveIndexIntoLoop) reinsertVectorIndexAdd(B, MI, 2, ConstOffset); return; } assert(DstTy.getSizeInBits() == 64); LLT Vec32 = LLT::vector(2 * SrcTy.getNumElements(), 32); auto CastSrc = B.buildBitcast(Vec32, SrcReg); auto One = B.buildConstant(S32, 1); MachineBasicBlock::iterator MII = MI.getIterator(); // Split the vector index into 32-bit pieces. Prepare to move all of the // new instructions into a waterfall loop if necessary. // // Don't put the bitcast or constant in the loop. MachineInstrSpan Span(MII, &B.getMBB()); // Compute 32-bit element indices, (2 * OrigIdx, 2 * OrigIdx + 1). auto IdxLo = B.buildShl(S32, BaseIdxReg, One); auto IdxHi = B.buildAdd(S32, IdxLo, One); auto Extract0 = B.buildExtractVectorElement(DstRegs[0], CastSrc, IdxLo); auto Extract1 = B.buildExtractVectorElement(DstRegs[1], CastSrc, IdxHi); MRI.setRegBank(DstReg, *DstBank); MRI.setRegBank(CastSrc.getReg(0), *SrcBank); MRI.setRegBank(One.getReg(0), AMDGPU::SGPRRegBank); MRI.setRegBank(IdxLo.getReg(0), AMDGPU::SGPRRegBank); MRI.setRegBank(IdxHi.getReg(0), AMDGPU::SGPRRegBank); SmallSet<Register, 4> OpsToWaterfall; if (!collectWaterfallOperands(OpsToWaterfall, MI, MRI, { 2 })) { MI.eraseFromParent(); return; } // Remove the original instruction to avoid potentially confusing the // waterfall loop logic. B.setInstr(*Span.begin()); MI.eraseFromParent(); executeInWaterfallLoop(B, make_range(Span.begin(), Span.end()), OpsToWaterfall, MRI); if (NeedCopyToVGPR) { MachineBasicBlock *LoopBB = Extract1->getParent(); Register TmpReg0 = MRI.createGenericVirtualRegister(S32); Register TmpReg1 = MRI.createGenericVirtualRegister(S32); MRI.setRegBank(TmpReg0, AMDGPU::SGPRRegBank); MRI.setRegBank(TmpReg1, AMDGPU::SGPRRegBank); Extract0->getOperand(0).setReg(TmpReg0); Extract1->getOperand(0).setReg(TmpReg1); B.setInsertPt(*LoopBB, ++Extract1->getIterator()); buildVCopy(B, DstRegs[0], TmpReg0); buildVCopy(B, DstRegs[1], TmpReg1); } if (ShouldMoveIndexIntoLoop) reinsertVectorIndexAdd(B, *IdxLo, 1, ConstOffset); return; } case AMDGPU::G_INSERT_VECTOR_ELT: { SmallVector<Register, 2> InsRegs(OpdMapper.getVRegs(2)); Register DstReg = MI.getOperand(0).getReg(); LLT VecTy = MRI.getType(DstReg); assert(OpdMapper.getVRegs(0).empty()); assert(OpdMapper.getVRegs(3).empty()); if (substituteSimpleCopyRegs(OpdMapper, 1)) MRI.setType(MI.getOperand(1).getReg(), VecTy); if (foldInsertEltToCmpSelect(MI, MRI, OpdMapper)) return; const RegisterBank *IdxBank = OpdMapper.getInstrMapping().getOperandMapping(3).BreakDown[0].RegBank; Register SrcReg = MI.getOperand(1).getReg(); Register InsReg = MI.getOperand(2).getReg(); LLT InsTy = MRI.getType(InsReg); (void)InsTy; Register BaseIdxReg; unsigned ConstOffset; MachineInstr *OffsetDef; std::tie(BaseIdxReg, ConstOffset, OffsetDef) = AMDGPU::getBaseWithConstantOffset(MRI, MI.getOperand(3).getReg()); // See if the index is an add of a constant which will be foldable by moving // the base register of the index later if this is going to be executed in a // waterfall loop. This is essentially to reassociate the add of a constant // with the readfirstlane. bool ShouldMoveIndexIntoLoop = IdxBank != &AMDGPU::SGPRRegBank && ConstOffset > 0 && ConstOffset < VecTy.getNumElements(); // Move the base register. We'll re-insert the add later. if (ShouldMoveIndexIntoLoop) MI.getOperand(3).setReg(BaseIdxReg); if (InsRegs.empty()) { executeInWaterfallLoop(MI, MRI, { 3 }); // Re-insert the constant offset add inside the waterfall loop. if (ShouldMoveIndexIntoLoop) { MachineIRBuilder B(MI); reinsertVectorIndexAdd(B, MI, 3, ConstOffset); } return; } assert(InsTy.getSizeInBits() == 64); const LLT S32 = LLT::scalar(32); LLT Vec32 = LLT::vector(2 * VecTy.getNumElements(), 32); MachineIRBuilder B(MI); auto CastSrc = B.buildBitcast(Vec32, SrcReg); auto One = B.buildConstant(S32, 1); // Split the vector index into 32-bit pieces. Prepare to move all of the // new instructions into a waterfall loop if necessary. // // Don't put the bitcast or constant in the loop. MachineInstrSpan Span(MachineBasicBlock::iterator(&MI), &B.getMBB()); // Compute 32-bit element indices, (2 * OrigIdx, 2 * OrigIdx + 1). auto IdxLo = B.buildShl(S32, BaseIdxReg, One); auto IdxHi = B.buildAdd(S32, IdxLo, One); auto InsLo = B.buildInsertVectorElement(Vec32, CastSrc, InsRegs[0], IdxLo); auto InsHi = B.buildInsertVectorElement(Vec32, InsLo, InsRegs[1], IdxHi); const RegisterBank *DstBank = OpdMapper.getInstrMapping().getOperandMapping(0).BreakDown[0].RegBank; const RegisterBank *SrcBank = OpdMapper.getInstrMapping().getOperandMapping(1).BreakDown[0].RegBank; const RegisterBank *InsSrcBank = OpdMapper.getInstrMapping().getOperandMapping(2).BreakDown[0].RegBank; MRI.setRegBank(InsReg, *InsSrcBank); MRI.setRegBank(CastSrc.getReg(0), *SrcBank); MRI.setRegBank(InsLo.getReg(0), *DstBank); MRI.setRegBank(InsHi.getReg(0), *DstBank); MRI.setRegBank(One.getReg(0), AMDGPU::SGPRRegBank); MRI.setRegBank(IdxLo.getReg(0), AMDGPU::SGPRRegBank); MRI.setRegBank(IdxHi.getReg(0), AMDGPU::SGPRRegBank); SmallSet<Register, 4> OpsToWaterfall; if (!collectWaterfallOperands(OpsToWaterfall, MI, MRI, { 3 })) { B.setInsertPt(B.getMBB(), MI); B.buildBitcast(DstReg, InsHi); MI.eraseFromParent(); return; } B.setInstr(*Span.begin()); MI.eraseFromParent(); // Figure out the point after the waterfall loop before mangling the control // flow. executeInWaterfallLoop(B, make_range(Span.begin(), Span.end()), OpsToWaterfall, MRI); // The insertion point is now right after the original instruction. // // Keep the bitcast to the original vector type out of the loop. Doing this // saved an extra phi we don't need inside the loop. B.buildBitcast(DstReg, InsHi); // Re-insert the constant offset add inside the waterfall loop. if (ShouldMoveIndexIntoLoop) reinsertVectorIndexAdd(B, *IdxLo, 1, ConstOffset); return; } case AMDGPU::G_AMDGPU_BUFFER_LOAD: case AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT: case AMDGPU::G_AMDGPU_BUFFER_LOAD_SSHORT: case AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE: case AMDGPU::G_AMDGPU_BUFFER_LOAD_SBYTE: case AMDGPU::G_AMDGPU_BUFFER_LOAD_FORMAT: case AMDGPU::G_AMDGPU_BUFFER_LOAD_FORMAT_D16: case AMDGPU::G_AMDGPU_TBUFFER_LOAD_FORMAT: case AMDGPU::G_AMDGPU_TBUFFER_LOAD_FORMAT_D16: case AMDGPU::G_AMDGPU_BUFFER_STORE: case AMDGPU::G_AMDGPU_BUFFER_STORE_BYTE: case AMDGPU::G_AMDGPU_BUFFER_STORE_SHORT: case AMDGPU::G_AMDGPU_BUFFER_STORE_FORMAT: case AMDGPU::G_AMDGPU_BUFFER_STORE_FORMAT_D16: case AMDGPU::G_AMDGPU_TBUFFER_STORE_FORMAT: case AMDGPU::G_AMDGPU_TBUFFER_STORE_FORMAT_D16: { applyDefaultMapping(OpdMapper); executeInWaterfallLoop(MI, MRI, {1, 4}); return; } case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SWAP: case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_ADD: case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SUB: case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMIN: case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMIN: case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMAX: case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMAX: case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_AND: case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_OR: case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_XOR: case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_INC: case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_DEC: { applyDefaultMapping(OpdMapper); executeInWaterfallLoop(MI, MRI, {2, 5}); return; } case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_CMPSWAP: { applyDefaultMapping(OpdMapper); executeInWaterfallLoop(MI, MRI, {3, 6}); return; } case AMDGPU::G_AMDGPU_S_BUFFER_LOAD: { applyMappingSBufferLoad(OpdMapper); return; } case AMDGPU::G_INTRINSIC: { switch (MI.getIntrinsicID()) { case Intrinsic::amdgcn_readlane: { substituteSimpleCopyRegs(OpdMapper, 2); assert(OpdMapper.getVRegs(0).empty()); assert(OpdMapper.getVRegs(3).empty()); // Make sure the index is an SGPR. It doesn't make sense to run this in a // waterfall loop, so assume it's a uniform value. constrainOpWithReadfirstlane(MI, MRI, 3); // Index return; } case Intrinsic::amdgcn_writelane: { assert(OpdMapper.getVRegs(0).empty()); assert(OpdMapper.getVRegs(2).empty()); assert(OpdMapper.getVRegs(3).empty()); substituteSimpleCopyRegs(OpdMapper, 4); // VGPR input val constrainOpWithReadfirstlane(MI, MRI, 2); // Source value constrainOpWithReadfirstlane(MI, MRI, 3); // Index return; } case Intrinsic::amdgcn_ballot: case Intrinsic::amdgcn_interp_p1: case Intrinsic::amdgcn_interp_p2: case Intrinsic::amdgcn_interp_mov: case Intrinsic::amdgcn_interp_p1_f16: case Intrinsic::amdgcn_interp_p2_f16: { applyDefaultMapping(OpdMapper); // Readlane for m0 value, which is always the last operand. // FIXME: Should this be a waterfall loop instead? constrainOpWithReadfirstlane(MI, MRI, MI.getNumOperands() - 1); // Index return; } case Intrinsic::amdgcn_permlane16: case Intrinsic::amdgcn_permlanex16: { // Doing a waterfall loop over these wouldn't make any sense. substituteSimpleCopyRegs(OpdMapper, 2); substituteSimpleCopyRegs(OpdMapper, 3); constrainOpWithReadfirstlane(MI, MRI, 4); constrainOpWithReadfirstlane(MI, MRI, 5); return; } case Intrinsic::amdgcn_sbfe: applyMappingBFEIntrinsic(OpdMapper, true); return; case Intrinsic::amdgcn_ubfe: applyMappingBFEIntrinsic(OpdMapper, false); return; } break; } case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD: case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE: { const AMDGPU::RsrcIntrinsic *RSrcIntrin = AMDGPU::lookupRsrcIntrinsic(MI.getIntrinsicID()); assert(RSrcIntrin && RSrcIntrin->IsImage); // Non-images can have complications from operands that allow both SGPR // and VGPR. For now it's too complicated to figure out the final opcode // to derive the register bank from the MCInstrDesc. applyMappingImage(MI, OpdMapper, MRI, RSrcIntrin->RsrcArg); return; } case AMDGPU::G_INTRINSIC_W_SIDE_EFFECTS: { auto IntrID = MI.getIntrinsicID(); switch (IntrID) { case Intrinsic::amdgcn_ds_ordered_add: case Intrinsic::amdgcn_ds_ordered_swap: { // This is only allowed to execute with 1 lane, so readfirstlane is safe. assert(OpdMapper.getVRegs(0).empty()); substituteSimpleCopyRegs(OpdMapper, 3); constrainOpWithReadfirstlane(MI, MRI, 2); // M0 return; } case Intrinsic::amdgcn_ds_gws_init: case Intrinsic::amdgcn_ds_gws_barrier: case Intrinsic::amdgcn_ds_gws_sema_br: { // Only the first lane is executes, so readfirstlane is safe. substituteSimpleCopyRegs(OpdMapper, 1); constrainOpWithReadfirstlane(MI, MRI, 2); // M0 return; } case Intrinsic::amdgcn_ds_gws_sema_v: case Intrinsic::amdgcn_ds_gws_sema_p: case Intrinsic::amdgcn_ds_gws_sema_release_all: { // Only the first lane is executes, so readfirstlane is safe. constrainOpWithReadfirstlane(MI, MRI, 1); // M0 return; } case Intrinsic::amdgcn_ds_append: case Intrinsic::amdgcn_ds_consume: { constrainOpWithReadfirstlane(MI, MRI, 2); // M0 return; } case Intrinsic::amdgcn_s_sendmsg: case Intrinsic::amdgcn_s_sendmsghalt: { // FIXME: Should this use a waterfall loop? constrainOpWithReadfirstlane(MI, MRI, 2); // M0 return; } case Intrinsic::amdgcn_s_setreg: { constrainOpWithReadfirstlane(MI, MRI, 2); return; } default: { if (const AMDGPU::RsrcIntrinsic *RSrcIntrin = AMDGPU::lookupRsrcIntrinsic(IntrID)) { // Non-images can have complications from operands that allow both SGPR // and VGPR. For now it's too complicated to figure out the final opcode // to derive the register bank from the MCInstrDesc. if (RSrcIntrin->IsImage) { applyMappingImage(MI, OpdMapper, MRI, RSrcIntrin->RsrcArg); return; } } break; } } break; } case AMDGPU::G_LOAD: case AMDGPU::G_ZEXTLOAD: case AMDGPU::G_SEXTLOAD: { if (applyMappingLoad(MI, OpdMapper, MRI)) return; break; } case AMDGPU::G_DYN_STACKALLOC: applyMappingDynStackAlloc(MI, OpdMapper, MRI); return; default: break; } return applyDefaultMapping(OpdMapper); } bool AMDGPURegisterBankInfo::isSALUMapping(const MachineInstr &MI) const { const MachineFunction &MF = *MI.getParent()->getParent(); const MachineRegisterInfo &MRI = MF.getRegInfo(); for (unsigned i = 0, e = MI.getNumOperands();i != e; ++i) { if (!MI.getOperand(i).isReg()) continue; Register Reg = MI.getOperand(i).getReg(); if (const RegisterBank *Bank = getRegBank(Reg, MRI, *TRI)) { if (Bank->getID() != AMDGPU::SGPRRegBankID) return false; } } return true; } const RegisterBankInfo::InstructionMapping & AMDGPURegisterBankInfo::getDefaultMappingSOP(const MachineInstr &MI) const { const MachineFunction &MF = *MI.getParent()->getParent(); const MachineRegisterInfo &MRI = MF.getRegInfo(); SmallVector<const ValueMapping*, 8> OpdsMapping(MI.getNumOperands()); for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { const MachineOperand &SrcOp = MI.getOperand(i); if (!SrcOp.isReg()) continue; unsigned Size = getSizeInBits(SrcOp.getReg(), MRI, *TRI); OpdsMapping[i] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size); } return getInstructionMapping(1, 1, getOperandsMapping(OpdsMapping), MI.getNumOperands()); } const RegisterBankInfo::InstructionMapping & AMDGPURegisterBankInfo::getDefaultMappingVOP(const MachineInstr &MI) const { const MachineFunction &MF = *MI.getParent()->getParent(); const MachineRegisterInfo &MRI = MF.getRegInfo(); SmallVector<const ValueMapping*, 8> OpdsMapping(MI.getNumOperands()); // Even though we technically could use SGPRs, this would require knowledge of // the constant bus restriction. Force all sources to VGPR (except for VCC). // // TODO: Unary ops are trivially OK, so accept SGPRs? for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { const MachineOperand &Src = MI.getOperand(i); if (!Src.isReg()) continue; unsigned Size = getSizeInBits(Src.getReg(), MRI, *TRI); unsigned BankID = Size == 1 ? AMDGPU::VCCRegBankID : AMDGPU::VGPRRegBankID; OpdsMapping[i] = AMDGPU::getValueMapping(BankID, Size); } return getInstructionMapping(1, 1, getOperandsMapping(OpdsMapping), MI.getNumOperands()); } const RegisterBankInfo::InstructionMapping & AMDGPURegisterBankInfo::getDefaultMappingAllVGPR(const MachineInstr &MI) const { const MachineFunction &MF = *MI.getParent()->getParent(); const MachineRegisterInfo &MRI = MF.getRegInfo(); SmallVector<const ValueMapping*, 8> OpdsMapping(MI.getNumOperands()); for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) { const MachineOperand &Op = MI.getOperand(I); if (!Op.isReg()) continue; unsigned Size = getSizeInBits(Op.getReg(), MRI, *TRI); OpdsMapping[I] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size); } return getInstructionMapping(1, 1, getOperandsMapping(OpdsMapping), MI.getNumOperands()); } const RegisterBankInfo::InstructionMapping & AMDGPURegisterBankInfo::getImageMapping(const MachineRegisterInfo &MRI, const MachineInstr &MI, int RsrcIdx) const { // The reported argument index is relative to the IR intrinsic call arguments, // so we need to shift by the number of defs and the intrinsic ID. RsrcIdx += MI.getNumExplicitDefs() + 1; const int NumOps = MI.getNumOperands(); SmallVector<const ValueMapping *, 8> OpdsMapping(NumOps); // TODO: Should packed/unpacked D16 difference be reported here as part of // the value mapping? for (int I = 0; I != NumOps; ++I) { if (!MI.getOperand(I).isReg()) continue; Register OpReg = MI.getOperand(I).getReg(); // We replace some dead address operands with $noreg if (!OpReg) continue; unsigned Size = getSizeInBits(OpReg, MRI, *TRI); // FIXME: Probably need a new intrinsic register bank searchable table to // handle arbitrary intrinsics easily. // // If this has a sampler, it immediately follows rsrc. const bool MustBeSGPR = I == RsrcIdx || I == RsrcIdx + 1; if (MustBeSGPR) { // If this must be an SGPR, so we must report whatever it is as legal. unsigned NewBank = getRegBankID(OpReg, MRI, AMDGPU::SGPRRegBankID); OpdsMapping[I] = AMDGPU::getValueMapping(NewBank, Size); } else { // Some operands must be VGPR, and these are easy to copy to. OpdsMapping[I] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size); } } return getInstructionMapping(1, 1, getOperandsMapping(OpdsMapping), NumOps); } /// Return the mapping for a pointer arugment. const RegisterBankInfo::ValueMapping * AMDGPURegisterBankInfo::getValueMappingForPtr(const MachineRegisterInfo &MRI, Register PtrReg) const { LLT PtrTy = MRI.getType(PtrReg); unsigned Size = PtrTy.getSizeInBits(); if (Subtarget.useFlatForGlobal() || !AMDGPU::isFlatGlobalAddrSpace(PtrTy.getAddressSpace())) return AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size); // If we're using MUBUF instructions for global memory, an SGPR base register // is possible. Otherwise this needs to be a VGPR. const RegisterBank *PtrBank = getRegBank(PtrReg, MRI, *TRI); return AMDGPU::getValueMapping(PtrBank->getID(), Size); } const RegisterBankInfo::InstructionMapping & AMDGPURegisterBankInfo::getInstrMappingForLoad(const MachineInstr &MI) const { const MachineFunction &MF = *MI.getParent()->getParent(); const MachineRegisterInfo &MRI = MF.getRegInfo(); SmallVector<const ValueMapping*, 2> OpdsMapping(2); unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI); Register PtrReg = MI.getOperand(1).getReg(); LLT PtrTy = MRI.getType(PtrReg); unsigned AS = PtrTy.getAddressSpace(); unsigned PtrSize = PtrTy.getSizeInBits(); const ValueMapping *ValMapping; const ValueMapping *PtrMapping; const RegisterBank *PtrBank = getRegBank(PtrReg, MRI, *TRI); if (PtrBank == &AMDGPU::SGPRRegBank && AMDGPU::isFlatGlobalAddrSpace(AS)) { if (isScalarLoadLegal(MI)) { // We have a uniform instruction so we want to use an SMRD load ValMapping = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size); PtrMapping = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, PtrSize); } else { ValMapping = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size); // If we're using MUBUF instructions for global memory, an SGPR base // register is possible. Otherwise this needs to be a VGPR. unsigned PtrBankID = Subtarget.useFlatForGlobal() ? AMDGPU::VGPRRegBankID : AMDGPU::SGPRRegBankID; PtrMapping = AMDGPU::getValueMapping(PtrBankID, PtrSize); } } else { ValMapping = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size); PtrMapping = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, PtrSize); } OpdsMapping[0] = ValMapping; OpdsMapping[1] = PtrMapping; const RegisterBankInfo::InstructionMapping &Mapping = getInstructionMapping( 1, 1, getOperandsMapping(OpdsMapping), MI.getNumOperands()); return Mapping; // FIXME: Do we want to add a mapping for FLAT load, or should we just // handle that during instruction selection? } unsigned AMDGPURegisterBankInfo::getRegBankID(Register Reg, const MachineRegisterInfo &MRI, unsigned Default) const { const RegisterBank *Bank = getRegBank(Reg, MRI, *TRI); return Bank ? Bank->getID() : Default; } static unsigned regBankUnion(unsigned RB0, unsigned RB1) { return (RB0 == AMDGPU::SGPRRegBankID && RB1 == AMDGPU::SGPRRegBankID) ? AMDGPU::SGPRRegBankID : AMDGPU::VGPRRegBankID; } static int regBankBoolUnion(int RB0, int RB1) { if (RB0 == -1) return RB1; if (RB1 == -1) return RB0; // vcc, vcc -> vcc // vcc, sgpr -> vcc // vcc, vgpr -> vcc if (RB0 == AMDGPU::VCCRegBankID || RB1 == AMDGPU::VCCRegBankID) return AMDGPU::VCCRegBankID; // vcc, vgpr -> vgpr return regBankUnion(RB0, RB1); } const RegisterBankInfo::ValueMapping * AMDGPURegisterBankInfo::getSGPROpMapping(Register Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const { // Lie and claim anything is legal, even though this needs to be an SGPR // applyMapping will have to deal with it as a waterfall loop. unsigned Bank = getRegBankID(Reg, MRI, AMDGPU::SGPRRegBankID); unsigned Size = getSizeInBits(Reg, MRI, TRI); return AMDGPU::getValueMapping(Bank, Size); } const RegisterBankInfo::ValueMapping * AMDGPURegisterBankInfo::getVGPROpMapping(Register Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const { unsigned Size = getSizeInBits(Reg, MRI, TRI); return AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size); } const RegisterBankInfo::ValueMapping * AMDGPURegisterBankInfo::getAGPROpMapping(Register Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const { unsigned Size = getSizeInBits(Reg, MRI, TRI); return AMDGPU::getValueMapping(AMDGPU::AGPRRegBankID, Size); } /// /// This function must return a legal mapping, because /// AMDGPURegisterBankInfo::getInstrAlternativeMappings() is not called /// in RegBankSelect::Mode::Fast. Any mapping that would cause a /// VGPR to SGPR generated is illegal. /// // Operands that must be SGPRs must accept potentially divergent VGPRs as // legal. These will be dealt with in applyMappingImpl. // const RegisterBankInfo::InstructionMapping & AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const { const MachineFunction &MF = *MI.getParent()->getParent(); const MachineRegisterInfo &MRI = MF.getRegInfo(); if (MI.isCopy() || MI.getOpcode() == AMDGPU::G_FREEZE) { // The default logic bothers to analyze impossible alternative mappings. We // want the most straightforward mapping, so just directly handle this. const RegisterBank *DstBank = getRegBank(MI.getOperand(0).getReg(), MRI, *TRI); const RegisterBank *SrcBank = getRegBank(MI.getOperand(1).getReg(), MRI, *TRI); assert(SrcBank && "src bank should have been assigned already"); if (!DstBank) DstBank = SrcBank; unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI); if (cannotCopy(*DstBank, *SrcBank, Size)) return getInvalidInstructionMapping(); const ValueMapping &ValMap = getValueMapping(0, Size, *DstBank); unsigned OpdsMappingSize = MI.isCopy() ? 1 : 2; SmallVector<const ValueMapping *, 1> OpdsMapping(OpdsMappingSize); OpdsMapping[0] = &ValMap; if (MI.getOpcode() == AMDGPU::G_FREEZE) OpdsMapping[1] = &ValMap; return getInstructionMapping( 1, /*Cost*/ 1, /*OperandsMapping*/ getOperandsMapping(OpdsMapping), OpdsMappingSize); } if (MI.isRegSequence()) { // If any input is a VGPR, the result must be a VGPR. The default handling // assumes any copy between banks is legal. unsigned BankID = AMDGPU::SGPRRegBankID; for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { auto OpBank = getRegBankID(MI.getOperand(I).getReg(), MRI); // It doesn't make sense to use vcc or scc banks here, so just ignore // them. if (OpBank != AMDGPU::SGPRRegBankID) { BankID = AMDGPU::VGPRRegBankID; break; } } unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI); const ValueMapping &ValMap = getValueMapping(0, Size, getRegBank(BankID)); return getInstructionMapping( 1, /*Cost*/ 1, /*OperandsMapping*/ getOperandsMapping({&ValMap}), 1); } // The default handling is broken and doesn't handle illegal SGPR->VGPR copies // properly. // // TODO: There are additional exec masking dependencies to analyze. if (MI.getOpcode() == TargetOpcode::G_PHI) { // TODO: Generate proper invalid bank enum. int ResultBank = -1; Register DstReg = MI.getOperand(0).getReg(); // Sometimes the result may have already been assigned a bank. if (const RegisterBank *DstBank = getRegBank(DstReg, MRI, *TRI)) ResultBank = DstBank->getID(); for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { Register Reg = MI.getOperand(I).getReg(); const RegisterBank *Bank = getRegBank(Reg, MRI, *TRI); // FIXME: Assuming VGPR for any undetermined inputs. if (!Bank || Bank->getID() == AMDGPU::VGPRRegBankID) { ResultBank = AMDGPU::VGPRRegBankID; break; } // FIXME: Need to promote SGPR case to s32 unsigned OpBank = Bank->getID(); ResultBank = regBankBoolUnion(ResultBank, OpBank); } assert(ResultBank != -1); unsigned Size = MRI.getType(DstReg).getSizeInBits(); const ValueMapping &ValMap = getValueMapping(0, Size, getRegBank(ResultBank)); return getInstructionMapping( 1, /*Cost*/ 1, /*OperandsMapping*/ getOperandsMapping({&ValMap}), 1); } const RegisterBankInfo::InstructionMapping &Mapping = getInstrMappingImpl(MI); if (Mapping.isValid()) return Mapping; SmallVector<const ValueMapping*, 8> OpdsMapping(MI.getNumOperands()); switch (MI.getOpcode()) { default: return getInvalidInstructionMapping(); case AMDGPU::G_AND: case AMDGPU::G_OR: case AMDGPU::G_XOR: { unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); if (Size == 1) { const RegisterBank *DstBank = getRegBank(MI.getOperand(0).getReg(), MRI, *TRI); unsigned TargetBankID = -1; unsigned BankLHS = -1; unsigned BankRHS = -1; if (DstBank) { TargetBankID = DstBank->getID(); if (DstBank == &AMDGPU::VCCRegBank) { TargetBankID = AMDGPU::VCCRegBankID; BankLHS = AMDGPU::VCCRegBankID; BankRHS = AMDGPU::VCCRegBankID; } else { BankLHS = getRegBankID(MI.getOperand(1).getReg(), MRI, AMDGPU::SGPRRegBankID); BankRHS = getRegBankID(MI.getOperand(2).getReg(), MRI, AMDGPU::SGPRRegBankID); } } else { BankLHS = getRegBankID(MI.getOperand(1).getReg(), MRI, AMDGPU::VCCRegBankID); BankRHS = getRegBankID(MI.getOperand(2).getReg(), MRI, AMDGPU::VCCRegBankID); // Both inputs should be true booleans to produce a boolean result. if (BankLHS == AMDGPU::VGPRRegBankID || BankRHS == AMDGPU::VGPRRegBankID) { TargetBankID = AMDGPU::VGPRRegBankID; } else if (BankLHS == AMDGPU::VCCRegBankID || BankRHS == AMDGPU::VCCRegBankID) { TargetBankID = AMDGPU::VCCRegBankID; BankLHS = AMDGPU::VCCRegBankID; BankRHS = AMDGPU::VCCRegBankID; } else if (BankLHS == AMDGPU::SGPRRegBankID && BankRHS == AMDGPU::SGPRRegBankID) { TargetBankID = AMDGPU::SGPRRegBankID; } } OpdsMapping[0] = AMDGPU::getValueMapping(TargetBankID, Size); OpdsMapping[1] = AMDGPU::getValueMapping(BankLHS, Size); OpdsMapping[2] = AMDGPU::getValueMapping(BankRHS, Size); break; } if (Size == 64) { if (isSALUMapping(MI)) { OpdsMapping[0] = getValueMappingSGPR64Only(AMDGPU::SGPRRegBankID, Size); OpdsMapping[1] = OpdsMapping[2] = OpdsMapping[0]; } else { OpdsMapping[0] = getValueMappingSGPR64Only(AMDGPU::VGPRRegBankID, Size); unsigned Bank1 = getRegBankID(MI.getOperand(1).getReg(), MRI /*, DefaultBankID*/); OpdsMapping[1] = AMDGPU::getValueMapping(Bank1, Size); unsigned Bank2 = getRegBankID(MI.getOperand(2).getReg(), MRI /*, DefaultBankID*/); OpdsMapping[2] = AMDGPU::getValueMapping(Bank2, Size); } break; } LLVM_FALLTHROUGH; } case AMDGPU::G_PTR_ADD: case AMDGPU::G_PTRMASK: case AMDGPU::G_ADD: case AMDGPU::G_SUB: case AMDGPU::G_MUL: case AMDGPU::G_SHL: case AMDGPU::G_LSHR: case AMDGPU::G_ASHR: case AMDGPU::G_UADDO: case AMDGPU::G_USUBO: case AMDGPU::G_UADDE: case AMDGPU::G_SADDE: case AMDGPU::G_USUBE: case AMDGPU::G_SSUBE: case AMDGPU::G_SMIN: case AMDGPU::G_SMAX: case AMDGPU::G_UMIN: case AMDGPU::G_UMAX: case AMDGPU::G_SHUFFLE_VECTOR: if (isSALUMapping(MI)) return getDefaultMappingSOP(MI); LLVM_FALLTHROUGH; case AMDGPU::G_SADDSAT: // FIXME: Could lower sat ops for SALU case AMDGPU::G_SSUBSAT: case AMDGPU::G_UADDSAT: case AMDGPU::G_USUBSAT: case AMDGPU::G_FADD: case AMDGPU::G_FSUB: case AMDGPU::G_FPTOSI: case AMDGPU::G_FPTOUI: case AMDGPU::G_FMUL: case AMDGPU::G_FMA: case AMDGPU::G_FMAD: case AMDGPU::G_FSQRT: case AMDGPU::G_FFLOOR: case AMDGPU::G_FCEIL: case AMDGPU::G_FRINT: case AMDGPU::G_SITOFP: case AMDGPU::G_UITOFP: case AMDGPU::G_FPTRUNC: case AMDGPU::G_FPEXT: case AMDGPU::G_FEXP2: case AMDGPU::G_FLOG2: case AMDGPU::G_FMINNUM: case AMDGPU::G_FMAXNUM: case AMDGPU::G_FMINNUM_IEEE: case AMDGPU::G_FMAXNUM_IEEE: case AMDGPU::G_FCANONICALIZE: case AMDGPU::G_INTRINSIC_TRUNC: case AMDGPU::G_BSWAP: // TODO: Somehow expand for scalar? case AMDGPU::G_FSHR: // TODO: Expand for scalar case AMDGPU::G_AMDGPU_FFBH_U32: case AMDGPU::G_AMDGPU_FMIN_LEGACY: case AMDGPU::G_AMDGPU_FMAX_LEGACY: case AMDGPU::G_AMDGPU_RCP_IFLAG: case AMDGPU::G_AMDGPU_CVT_F32_UBYTE0: case AMDGPU::G_AMDGPU_CVT_F32_UBYTE1: case AMDGPU::G_AMDGPU_CVT_F32_UBYTE2: case AMDGPU::G_AMDGPU_CVT_F32_UBYTE3: return getDefaultMappingVOP(MI); case AMDGPU::G_UMULH: case AMDGPU::G_SMULH: { if (Subtarget.hasScalarMulHiInsts() && isSALUMapping(MI)) return getDefaultMappingSOP(MI); return getDefaultMappingVOP(MI); } case AMDGPU::G_IMPLICIT_DEF: { unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size); break; } case AMDGPU::G_FCONSTANT: case AMDGPU::G_CONSTANT: case AMDGPU::G_GLOBAL_VALUE: case AMDGPU::G_BLOCK_ADDR: case AMDGPU::G_READCYCLECOUNTER: { unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size); break; } case AMDGPU::G_FRAME_INDEX: { // TODO: This should be the same as other constants, but eliminateFrameIndex // currently assumes VALU uses. unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size); break; } case AMDGPU::G_DYN_STACKALLOC: { // Result is always uniform, and a wave reduction is needed for the source. OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, 32); unsigned SrcBankID = getRegBankID(MI.getOperand(1).getReg(), MRI); OpdsMapping[1] = AMDGPU::getValueMapping(SrcBankID, 32); break; } case AMDGPU::G_INSERT: { unsigned BankID = isSALUMapping(MI) ? AMDGPU::SGPRRegBankID : AMDGPU::VGPRRegBankID; unsigned DstSize = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI); unsigned SrcSize = getSizeInBits(MI.getOperand(1).getReg(), MRI, *TRI); unsigned EltSize = getSizeInBits(MI.getOperand(2).getReg(), MRI, *TRI); OpdsMapping[0] = AMDGPU::getValueMapping(BankID, DstSize); OpdsMapping[1] = AMDGPU::getValueMapping(BankID, SrcSize); OpdsMapping[2] = AMDGPU::getValueMapping(BankID, EltSize); OpdsMapping[3] = nullptr; break; } case AMDGPU::G_EXTRACT: { unsigned BankID = getRegBankID(MI.getOperand(1).getReg(), MRI); unsigned DstSize = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI); unsigned SrcSize = getSizeInBits(MI.getOperand(1).getReg(), MRI, *TRI); OpdsMapping[0] = AMDGPU::getValueMapping(BankID, DstSize); OpdsMapping[1] = AMDGPU::getValueMapping(BankID, SrcSize); OpdsMapping[2] = nullptr; break; } case AMDGPU::G_BUILD_VECTOR: case AMDGPU::G_BUILD_VECTOR_TRUNC: { LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); if (DstTy == LLT::vector(2, 16)) { unsigned DstSize = DstTy.getSizeInBits(); unsigned SrcSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits(); unsigned Src0BankID = getRegBankID(MI.getOperand(1).getReg(), MRI); unsigned Src1BankID = getRegBankID(MI.getOperand(2).getReg(), MRI); unsigned DstBankID = regBankUnion(Src0BankID, Src1BankID); OpdsMapping[0] = AMDGPU::getValueMapping(DstBankID, DstSize); OpdsMapping[1] = AMDGPU::getValueMapping(Src0BankID, SrcSize); OpdsMapping[2] = AMDGPU::getValueMapping(Src1BankID, SrcSize); break; } LLVM_FALLTHROUGH; } case AMDGPU::G_MERGE_VALUES: case AMDGPU::G_CONCAT_VECTORS: { unsigned Bank = isSALUMapping(MI) ? AMDGPU::SGPRRegBankID : AMDGPU::VGPRRegBankID; unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); unsigned SrcSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits(); OpdsMapping[0] = AMDGPU::getValueMapping(Bank, DstSize); // Op1 and Dst should use the same register bank. for (unsigned i = 1, e = MI.getNumOperands(); i != e; ++i) OpdsMapping[i] = AMDGPU::getValueMapping(Bank, SrcSize); break; } case AMDGPU::G_BITCAST: case AMDGPU::G_INTTOPTR: case AMDGPU::G_PTRTOINT: case AMDGPU::G_BITREVERSE: case AMDGPU::G_FABS: case AMDGPU::G_FNEG: { unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); unsigned BankID = getRegBankID(MI.getOperand(1).getReg(), MRI); OpdsMapping[0] = OpdsMapping[1] = AMDGPU::getValueMapping(BankID, Size); break; } case AMDGPU::G_CTLZ_ZERO_UNDEF: case AMDGPU::G_CTTZ_ZERO_UNDEF: case AMDGPU::G_CTPOP: { unsigned Size = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits(); unsigned BankID = getRegBankID(MI.getOperand(1).getReg(), MRI); OpdsMapping[0] = AMDGPU::getValueMapping(BankID, 32); // This should really be getValueMappingSGPR64Only, but allowing the generic // code to handle the register split just makes using LegalizerHelper more // difficult. OpdsMapping[1] = AMDGPU::getValueMapping(BankID, Size); break; } case AMDGPU::G_TRUNC: { Register Dst = MI.getOperand(0).getReg(); Register Src = MI.getOperand(1).getReg(); unsigned Bank = getRegBankID(Src, MRI); unsigned DstSize = getSizeInBits(Dst, MRI, *TRI); unsigned SrcSize = getSizeInBits(Src, MRI, *TRI); OpdsMapping[0] = AMDGPU::getValueMapping(Bank, DstSize); OpdsMapping[1] = AMDGPU::getValueMapping(Bank, SrcSize); break; } case AMDGPU::G_ZEXT: case AMDGPU::G_SEXT: case AMDGPU::G_ANYEXT: case AMDGPU::G_SEXT_INREG: { Register Dst = MI.getOperand(0).getReg(); Register Src = MI.getOperand(1).getReg(); unsigned DstSize = getSizeInBits(Dst, MRI, *TRI); unsigned SrcSize = getSizeInBits(Src, MRI, *TRI); unsigned DstBank; const RegisterBank *SrcBank = getRegBank(Src, MRI, *TRI); assert(SrcBank); switch (SrcBank->getID()) { case AMDGPU::SGPRRegBankID: DstBank = AMDGPU::SGPRRegBankID; break; default: DstBank = AMDGPU::VGPRRegBankID; break; } // Scalar extend can use 64-bit BFE, but VGPRs require extending to // 32-bits, and then to 64. OpdsMapping[0] = AMDGPU::getValueMappingSGPR64Only(DstBank, DstSize); OpdsMapping[1] = AMDGPU::getValueMappingSGPR64Only(SrcBank->getID(), SrcSize); break; } case AMDGPU::G_FCMP: { unsigned Size = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits(); unsigned Op2Bank = getRegBankID(MI.getOperand(2).getReg(), MRI); OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1); OpdsMapping[1] = nullptr; // Predicate Operand. OpdsMapping[2] = AMDGPU::getValueMapping(Op2Bank, Size); OpdsMapping[3] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size); break; } case AMDGPU::G_STORE: { assert(MI.getOperand(0).isReg()); unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); // FIXME: We need to specify a different reg bank once scalar stores are // supported. const ValueMapping *ValMapping = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size); OpdsMapping[0] = ValMapping; OpdsMapping[1] = getValueMappingForPtr(MRI, MI.getOperand(1).getReg()); break; } case AMDGPU::G_ICMP: { auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()); unsigned Size = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits(); // See if the result register has already been constrained to vcc, which may // happen due to control flow intrinsic lowering. unsigned DstBank = getRegBankID(MI.getOperand(0).getReg(), MRI, AMDGPU::SGPRRegBankID); unsigned Op2Bank = getRegBankID(MI.getOperand(2).getReg(), MRI); unsigned Op3Bank = getRegBankID(MI.getOperand(3).getReg(), MRI); bool CanUseSCC = DstBank == AMDGPU::SGPRRegBankID && Op2Bank == AMDGPU::SGPRRegBankID && Op3Bank == AMDGPU::SGPRRegBankID && (Size == 32 || (Size == 64 && (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) && Subtarget.hasScalarCompareEq64())); DstBank = CanUseSCC ? AMDGPU::SGPRRegBankID : AMDGPU::VCCRegBankID; unsigned SrcBank = CanUseSCC ? AMDGPU::SGPRRegBankID : AMDGPU::VGPRRegBankID; // TODO: Use 32-bit for scalar output size. // SCC results will need to be copied to a 32-bit SGPR virtual register. const unsigned ResultSize = 1; OpdsMapping[0] = AMDGPU::getValueMapping(DstBank, ResultSize); OpdsMapping[2] = AMDGPU::getValueMapping(SrcBank, Size); OpdsMapping[3] = AMDGPU::getValueMapping(SrcBank, Size); break; } case AMDGPU::G_EXTRACT_VECTOR_ELT: { // VGPR index can be used for waterfall when indexing a SGPR vector. unsigned SrcBankID = getRegBankID(MI.getOperand(1).getReg(), MRI); unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); unsigned SrcSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits(); unsigned IdxSize = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits(); unsigned IdxBank = getRegBankID(MI.getOperand(2).getReg(), MRI); unsigned OutputBankID = regBankUnion(SrcBankID, IdxBank); OpdsMapping[0] = AMDGPU::getValueMappingSGPR64Only(OutputBankID, DstSize); OpdsMapping[1] = AMDGPU::getValueMapping(SrcBankID, SrcSize); // The index can be either if the source vector is VGPR. OpdsMapping[2] = AMDGPU::getValueMapping(IdxBank, IdxSize); break; } case AMDGPU::G_INSERT_VECTOR_ELT: { unsigned OutputBankID = isSALUMapping(MI) ? AMDGPU::SGPRRegBankID : AMDGPU::VGPRRegBankID; unsigned VecSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); unsigned InsertSize = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits(); unsigned IdxSize = MRI.getType(MI.getOperand(3).getReg()).getSizeInBits(); unsigned InsertEltBankID = getRegBankID(MI.getOperand(2).getReg(), MRI); unsigned IdxBankID = getRegBankID(MI.getOperand(3).getReg(), MRI); OpdsMapping[0] = AMDGPU::getValueMapping(OutputBankID, VecSize); OpdsMapping[1] = AMDGPU::getValueMapping(OutputBankID, VecSize); // This is a weird case, because we need to break down the mapping based on // the register bank of a different operand. if (InsertSize == 64 && OutputBankID == AMDGPU::VGPRRegBankID) { OpdsMapping[2] = AMDGPU::getValueMappingSplit64(InsertEltBankID, InsertSize); } else { assert(InsertSize == 32 || InsertSize == 64); OpdsMapping[2] = AMDGPU::getValueMapping(InsertEltBankID, InsertSize); } // The index can be either if the source vector is VGPR. OpdsMapping[3] = AMDGPU::getValueMapping(IdxBankID, IdxSize); break; } case AMDGPU::G_UNMERGE_VALUES: { unsigned Bank = isSALUMapping(MI) ? AMDGPU::SGPRRegBankID : AMDGPU::VGPRRegBankID; // Op1 and Dst should use the same register bank. // FIXME: Shouldn't this be the default? Why do we need to handle this? for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { unsigned Size = getSizeInBits(MI.getOperand(i).getReg(), MRI, *TRI); OpdsMapping[i] = AMDGPU::getValueMapping(Bank, Size); } break; } case AMDGPU::G_AMDGPU_BUFFER_LOAD: case AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE: case AMDGPU::G_AMDGPU_BUFFER_LOAD_SBYTE: case AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT: case AMDGPU::G_AMDGPU_BUFFER_LOAD_SSHORT: case AMDGPU::G_AMDGPU_BUFFER_LOAD_FORMAT: case AMDGPU::G_AMDGPU_BUFFER_LOAD_FORMAT_D16: case AMDGPU::G_AMDGPU_TBUFFER_LOAD_FORMAT: case AMDGPU::G_AMDGPU_TBUFFER_LOAD_FORMAT_D16: case AMDGPU::G_AMDGPU_TBUFFER_STORE_FORMAT: case AMDGPU::G_AMDGPU_TBUFFER_STORE_FORMAT_D16: case AMDGPU::G_AMDGPU_BUFFER_STORE: case AMDGPU::G_AMDGPU_BUFFER_STORE_BYTE: case AMDGPU::G_AMDGPU_BUFFER_STORE_SHORT: case AMDGPU::G_AMDGPU_BUFFER_STORE_FORMAT: case AMDGPU::G_AMDGPU_BUFFER_STORE_FORMAT_D16: { OpdsMapping[0] = getVGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI); // rsrc OpdsMapping[1] = getSGPROpMapping(MI.getOperand(1).getReg(), MRI, *TRI); // vindex OpdsMapping[2] = getVGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI); // voffset OpdsMapping[3] = getVGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI); // soffset OpdsMapping[4] = getSGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI); // Any remaining operands are immediates and were correctly null // initialized. break; } case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SWAP: case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_ADD: case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SUB: case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMIN: case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMIN: case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMAX: case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMAX: case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_AND: case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_OR: case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_XOR: case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_INC: case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_DEC: { // vdata_out OpdsMapping[0] = getVGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI); // vdata_in OpdsMapping[1] = getVGPROpMapping(MI.getOperand(1).getReg(), MRI, *TRI); // rsrc OpdsMapping[2] = getSGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI); // vindex OpdsMapping[3] = getVGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI); // voffset OpdsMapping[4] = getVGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI); // soffset OpdsMapping[5] = getSGPROpMapping(MI.getOperand(5).getReg(), MRI, *TRI); // Any remaining operands are immediates and were correctly null // initialized. break; } case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_CMPSWAP: { // vdata_out OpdsMapping[0] = getVGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI); // vdata_in OpdsMapping[1] = getVGPROpMapping(MI.getOperand(1).getReg(), MRI, *TRI); // cmp OpdsMapping[2] = getVGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI); // rsrc OpdsMapping[3] = getSGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI); // vindex OpdsMapping[4] = getVGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI); // voffset OpdsMapping[5] = getVGPROpMapping(MI.getOperand(5).getReg(), MRI, *TRI); // soffset OpdsMapping[6] = getSGPROpMapping(MI.getOperand(6).getReg(), MRI, *TRI); // Any remaining operands are immediates and were correctly null // initialized. break; } case AMDGPU::G_AMDGPU_S_BUFFER_LOAD: { // Lie and claim everything is legal, even though some need to be // SGPRs. applyMapping will have to deal with it as a waterfall loop. OpdsMapping[1] = getSGPROpMapping(MI.getOperand(1).getReg(), MRI, *TRI); OpdsMapping[2] = getSGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI); // We need to convert this to a MUBUF if either the resource of offset is // VGPR. unsigned RSrcBank = OpdsMapping[1]->BreakDown[0].RegBank->getID(); unsigned OffsetBank = OpdsMapping[2]->BreakDown[0].RegBank->getID(); unsigned ResultBank = regBankUnion(RSrcBank, OffsetBank); unsigned Size0 = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); OpdsMapping[0] = AMDGPU::getValueMapping(ResultBank, Size0); break; } case AMDGPU::G_INTRINSIC: { switch (MI.getIntrinsicID()) { default: return getInvalidInstructionMapping(); case Intrinsic::amdgcn_div_fmas: case Intrinsic::amdgcn_div_fixup: case Intrinsic::amdgcn_trig_preop: case Intrinsic::amdgcn_sin: case Intrinsic::amdgcn_cos: case Intrinsic::amdgcn_log_clamp: case Intrinsic::amdgcn_rcp: case Intrinsic::amdgcn_rcp_legacy: case Intrinsic::amdgcn_sqrt: case Intrinsic::amdgcn_rsq: case Intrinsic::amdgcn_rsq_legacy: case Intrinsic::amdgcn_rsq_clamp: case Intrinsic::amdgcn_fmul_legacy: case Intrinsic::amdgcn_ldexp: case Intrinsic::amdgcn_frexp_mant: case Intrinsic::amdgcn_frexp_exp: case Intrinsic::amdgcn_fract: case Intrinsic::amdgcn_cvt_pkrtz: case Intrinsic::amdgcn_cvt_pknorm_i16: case Intrinsic::amdgcn_cvt_pknorm_u16: case Intrinsic::amdgcn_cvt_pk_i16: case Intrinsic::amdgcn_cvt_pk_u16: case Intrinsic::amdgcn_fmed3: case Intrinsic::amdgcn_cubeid: case Intrinsic::amdgcn_cubema: case Intrinsic::amdgcn_cubesc: case Intrinsic::amdgcn_cubetc: case Intrinsic::amdgcn_sffbh: case Intrinsic::amdgcn_fmad_ftz: case Intrinsic::amdgcn_mbcnt_lo: case Intrinsic::amdgcn_mbcnt_hi: case Intrinsic::amdgcn_mul_u24: case Intrinsic::amdgcn_mul_i24: case Intrinsic::amdgcn_lerp: case Intrinsic::amdgcn_sad_u8: case Intrinsic::amdgcn_msad_u8: case Intrinsic::amdgcn_sad_hi_u8: case Intrinsic::amdgcn_sad_u16: case Intrinsic::amdgcn_qsad_pk_u16_u8: case Intrinsic::amdgcn_mqsad_pk_u16_u8: case Intrinsic::amdgcn_mqsad_u32_u8: case Intrinsic::amdgcn_cvt_pk_u8_f32: case Intrinsic::amdgcn_alignbit: case Intrinsic::amdgcn_alignbyte: case Intrinsic::amdgcn_fdot2: case Intrinsic::amdgcn_sdot2: case Intrinsic::amdgcn_udot2: case Intrinsic::amdgcn_sdot4: case Intrinsic::amdgcn_udot4: case Intrinsic::amdgcn_sdot8: case Intrinsic::amdgcn_udot8: return getDefaultMappingVOP(MI); case Intrinsic::amdgcn_sbfe: case Intrinsic::amdgcn_ubfe: if (isSALUMapping(MI)) return getDefaultMappingSOP(MI); return getDefaultMappingVOP(MI); case Intrinsic::amdgcn_ds_swizzle: case Intrinsic::amdgcn_ds_permute: case Intrinsic::amdgcn_ds_bpermute: case Intrinsic::amdgcn_update_dpp: case Intrinsic::amdgcn_mov_dpp8: case Intrinsic::amdgcn_mov_dpp: case Intrinsic::amdgcn_wwm: case Intrinsic::amdgcn_wqm: case Intrinsic::amdgcn_softwqm: case Intrinsic::amdgcn_set_inactive: return getDefaultMappingAllVGPR(MI); case Intrinsic::amdgcn_kernarg_segment_ptr: case Intrinsic::amdgcn_s_getpc: case Intrinsic::amdgcn_groupstaticsize: case Intrinsic::amdgcn_reloc_constant: case Intrinsic::returnaddress: { unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size); break; } case Intrinsic::amdgcn_wqm_vote: { unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); OpdsMapping[0] = OpdsMapping[2] = AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, Size); break; } case Intrinsic::amdgcn_ps_live: { OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1); break; } case Intrinsic::amdgcn_div_scale: { unsigned Dst0Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); unsigned Dst1Size = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits(); OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Dst0Size); OpdsMapping[1] = AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, Dst1Size); unsigned SrcSize = MRI.getType(MI.getOperand(3).getReg()).getSizeInBits(); OpdsMapping[3] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, SrcSize); OpdsMapping[4] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, SrcSize); break; } case Intrinsic::amdgcn_class: { Register Src0Reg = MI.getOperand(2).getReg(); Register Src1Reg = MI.getOperand(3).getReg(); unsigned Src0Size = MRI.getType(Src0Reg).getSizeInBits(); unsigned Src1Size = MRI.getType(Src1Reg).getSizeInBits(); unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, DstSize); OpdsMapping[2] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Src0Size); OpdsMapping[3] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Src1Size); break; } case Intrinsic::amdgcn_icmp: case Intrinsic::amdgcn_fcmp: { unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); // This is not VCCRegBank because this is not used in boolean contexts. OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, DstSize); unsigned OpSize = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits(); OpdsMapping[2] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, OpSize); OpdsMapping[3] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, OpSize); break; } case Intrinsic::amdgcn_readlane: { // This must be an SGPR, but accept a VGPR. Register IdxReg = MI.getOperand(3).getReg(); unsigned IdxSize = MRI.getType(IdxReg).getSizeInBits(); unsigned IdxBank = getRegBankID(IdxReg, MRI, AMDGPU::SGPRRegBankID); OpdsMapping[3] = AMDGPU::getValueMapping(IdxBank, IdxSize); LLVM_FALLTHROUGH; } case Intrinsic::amdgcn_readfirstlane: { unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); unsigned SrcSize = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits(); OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, DstSize); OpdsMapping[2] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, SrcSize); break; } case Intrinsic::amdgcn_writelane: { unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); Register SrcReg = MI.getOperand(2).getReg(); unsigned SrcSize = MRI.getType(SrcReg).getSizeInBits(); unsigned SrcBank = getRegBankID(SrcReg, MRI, AMDGPU::SGPRRegBankID); Register IdxReg = MI.getOperand(3).getReg(); unsigned IdxSize = MRI.getType(IdxReg).getSizeInBits(); unsigned IdxBank = getRegBankID(IdxReg, MRI, AMDGPU::SGPRRegBankID); OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, DstSize); // These 2 must be SGPRs, but accept VGPRs. Readfirstlane will be inserted // to legalize. OpdsMapping[2] = AMDGPU::getValueMapping(SrcBank, SrcSize); OpdsMapping[3] = AMDGPU::getValueMapping(IdxBank, IdxSize); OpdsMapping[4] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, SrcSize); break; } case Intrinsic::amdgcn_if_break: { unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI); OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size); OpdsMapping[2] = AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1); OpdsMapping[3] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size); break; } case Intrinsic::amdgcn_permlane16: case Intrinsic::amdgcn_permlanex16: { unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI); OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size); OpdsMapping[2] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size); OpdsMapping[3] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size); OpdsMapping[4] = getSGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI); OpdsMapping[5] = getSGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI); break; } case Intrinsic::amdgcn_mfma_f32_4x4x1f32: case Intrinsic::amdgcn_mfma_f32_4x4x4f16: case Intrinsic::amdgcn_mfma_i32_4x4x4i8: case Intrinsic::amdgcn_mfma_f32_4x4x2bf16: case Intrinsic::amdgcn_mfma_f32_16x16x1f32: case Intrinsic::amdgcn_mfma_f32_16x16x4f32: case Intrinsic::amdgcn_mfma_f32_16x16x4f16: case Intrinsic::amdgcn_mfma_f32_16x16x16f16: case Intrinsic::amdgcn_mfma_i32_16x16x4i8: case Intrinsic::amdgcn_mfma_i32_16x16x16i8: case Intrinsic::amdgcn_mfma_f32_16x16x2bf16: case Intrinsic::amdgcn_mfma_f32_16x16x8bf16: case Intrinsic::amdgcn_mfma_f32_32x32x1f32: case Intrinsic::amdgcn_mfma_f32_32x32x2f32: case Intrinsic::amdgcn_mfma_f32_32x32x4f16: case Intrinsic::amdgcn_mfma_f32_32x32x8f16: case Intrinsic::amdgcn_mfma_i32_32x32x4i8: case Intrinsic::amdgcn_mfma_i32_32x32x8i8: case Intrinsic::amdgcn_mfma_f32_32x32x2bf16: case Intrinsic::amdgcn_mfma_f32_32x32x4bf16: { // Default for MAI intrinsics. // srcC can also be an immediate which can be folded later. // FIXME: Should we eventually add an alternative mapping with AGPR src // for srcA/srcB? // // vdst, srcA, srcB, srcC OpdsMapping[0] = getAGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI); OpdsMapping[2] = getVGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI); OpdsMapping[3] = getVGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI); OpdsMapping[4] = getAGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI); break; } case Intrinsic::amdgcn_interp_p1: case Intrinsic::amdgcn_interp_p2: case Intrinsic::amdgcn_interp_mov: case Intrinsic::amdgcn_interp_p1_f16: case Intrinsic::amdgcn_interp_p2_f16: { const int M0Idx = MI.getNumOperands() - 1; Register M0Reg = MI.getOperand(M0Idx).getReg(); unsigned M0Bank = getRegBankID(M0Reg, MRI, AMDGPU::SGPRRegBankID); unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, DstSize); for (int I = 2; I != M0Idx && MI.getOperand(I).isReg(); ++I) OpdsMapping[I] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32); // Must be SGPR, but we must take whatever the original bank is and fix it // later. OpdsMapping[M0Idx] = AMDGPU::getValueMapping(M0Bank, 32); break; } case Intrinsic::amdgcn_ballot: { unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); unsigned SrcSize = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits(); OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, DstSize); OpdsMapping[2] = AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, SrcSize); break; } } break; } case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD: case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE: { auto IntrID = MI.getIntrinsicID(); const AMDGPU::RsrcIntrinsic *RSrcIntrin = AMDGPU::lookupRsrcIntrinsic(IntrID); assert(RSrcIntrin && "missing RsrcIntrinsic for image intrinsic"); // Non-images can have complications from operands that allow both SGPR // and VGPR. For now it's too complicated to figure out the final opcode // to derive the register bank from the MCInstrDesc. assert(RSrcIntrin->IsImage); return getImageMapping(MRI, MI, RSrcIntrin->RsrcArg); } case AMDGPU::G_INTRINSIC_W_SIDE_EFFECTS: { auto IntrID = MI.getIntrinsicID(); switch (IntrID) { case Intrinsic::amdgcn_s_getreg: case Intrinsic::amdgcn_s_memtime: case Intrinsic::amdgcn_s_memrealtime: case Intrinsic::amdgcn_s_get_waveid_in_workgroup: { unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size); break; } case Intrinsic::amdgcn_ds_fadd: case Intrinsic::amdgcn_ds_fmin: case Intrinsic::amdgcn_ds_fmax: case Intrinsic::amdgcn_global_atomic_csub: return getDefaultMappingAllVGPR(MI); case Intrinsic::amdgcn_ds_ordered_add: case Intrinsic::amdgcn_ds_ordered_swap: { unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, DstSize); unsigned M0Bank = getRegBankID(MI.getOperand(2).getReg(), MRI, AMDGPU::SGPRRegBankID); OpdsMapping[2] = AMDGPU::getValueMapping(M0Bank, 32); OpdsMapping[3] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32); break; } case Intrinsic::amdgcn_ds_append: case Intrinsic::amdgcn_ds_consume: { unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, DstSize); OpdsMapping[2] = getSGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI); break; } case Intrinsic::amdgcn_exp_compr: OpdsMapping[3] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32); OpdsMapping[4] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32); break; case Intrinsic::amdgcn_exp: // FIXME: Could we support packed types here? OpdsMapping[3] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32); OpdsMapping[4] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32); OpdsMapping[5] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32); OpdsMapping[6] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32); break; case Intrinsic::amdgcn_s_sendmsg: case Intrinsic::amdgcn_s_sendmsghalt: { // This must be an SGPR, but accept a VGPR. unsigned Bank = getRegBankID(MI.getOperand(2).getReg(), MRI, AMDGPU::SGPRRegBankID); OpdsMapping[2] = AMDGPU::getValueMapping(Bank, 32); break; } case Intrinsic::amdgcn_s_setreg: { // This must be an SGPR, but accept a VGPR. unsigned Bank = getRegBankID(MI.getOperand(2).getReg(), MRI, AMDGPU::SGPRRegBankID); OpdsMapping[2] = AMDGPU::getValueMapping(Bank, 32); break; } case Intrinsic::amdgcn_end_cf: { unsigned Size = getSizeInBits(MI.getOperand(1).getReg(), MRI, *TRI); OpdsMapping[1] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size); break; } case Intrinsic::amdgcn_else: { unsigned WaveSize = getSizeInBits(MI.getOperand(1).getReg(), MRI, *TRI); OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1); OpdsMapping[1] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, WaveSize); OpdsMapping[3] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, WaveSize); break; } case Intrinsic::amdgcn_kill: { OpdsMapping[1] = AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1); break; } case Intrinsic::amdgcn_raw_buffer_load: case Intrinsic::amdgcn_raw_tbuffer_load: { // FIXME: Should make intrinsic ID the last operand of the instruction, // then this would be the same as store OpdsMapping[0] = getVGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI); OpdsMapping[2] = getSGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI); OpdsMapping[3] = getVGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI); OpdsMapping[4] = getSGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI); break; } case Intrinsic::amdgcn_raw_buffer_store: case Intrinsic::amdgcn_raw_buffer_store_format: case Intrinsic::amdgcn_raw_tbuffer_store: { OpdsMapping[1] = getVGPROpMapping(MI.getOperand(1).getReg(), MRI, *TRI); OpdsMapping[2] = getSGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI); OpdsMapping[3] = getVGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI); OpdsMapping[4] = getSGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI); break; } case Intrinsic::amdgcn_struct_buffer_load: case Intrinsic::amdgcn_struct_tbuffer_load: { OpdsMapping[0] = getVGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI); OpdsMapping[2] = getSGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI); OpdsMapping[3] = getVGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI); OpdsMapping[4] = getVGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI); OpdsMapping[5] = getSGPROpMapping(MI.getOperand(5).getReg(), MRI, *TRI); break; } case Intrinsic::amdgcn_struct_buffer_store: case Intrinsic::amdgcn_struct_tbuffer_store: { OpdsMapping[1] = getVGPROpMapping(MI.getOperand(1).getReg(), MRI, *TRI); OpdsMapping[2] = getSGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI); OpdsMapping[3] = getVGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI); OpdsMapping[4] = getVGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI); OpdsMapping[5] = getSGPROpMapping(MI.getOperand(5).getReg(), MRI, *TRI); break; } case Intrinsic::amdgcn_init_exec_from_input: { unsigned Size = getSizeInBits(MI.getOperand(1).getReg(), MRI, *TRI); OpdsMapping[1] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, Size); break; } case Intrinsic::amdgcn_ds_gws_init: case Intrinsic::amdgcn_ds_gws_barrier: case Intrinsic::amdgcn_ds_gws_sema_br: { OpdsMapping[1] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, 32); // This must be an SGPR, but accept a VGPR. unsigned Bank = getRegBankID(MI.getOperand(2).getReg(), MRI, AMDGPU::SGPRRegBankID); OpdsMapping[2] = AMDGPU::getValueMapping(Bank, 32); break; } case Intrinsic::amdgcn_ds_gws_sema_v: case Intrinsic::amdgcn_ds_gws_sema_p: case Intrinsic::amdgcn_ds_gws_sema_release_all: { // This must be an SGPR, but accept a VGPR. unsigned Bank = getRegBankID(MI.getOperand(1).getReg(), MRI, AMDGPU::SGPRRegBankID); OpdsMapping[1] = AMDGPU::getValueMapping(Bank, 32); break; } default: return getInvalidInstructionMapping(); } break; } case AMDGPU::G_SELECT: { unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); unsigned Op2Bank = getRegBankID(MI.getOperand(2).getReg(), MRI, AMDGPU::SGPRRegBankID); unsigned Op3Bank = getRegBankID(MI.getOperand(3).getReg(), MRI, AMDGPU::SGPRRegBankID); bool SGPRSrcs = Op2Bank == AMDGPU::SGPRRegBankID && Op3Bank == AMDGPU::SGPRRegBankID; unsigned CondBankDefault = SGPRSrcs ? AMDGPU::SGPRRegBankID : AMDGPU::VCCRegBankID; unsigned CondBank = getRegBankID(MI.getOperand(1).getReg(), MRI, CondBankDefault); if (CondBank == AMDGPU::SGPRRegBankID) CondBank = SGPRSrcs ? AMDGPU::SGPRRegBankID : AMDGPU::VCCRegBankID; else if (CondBank == AMDGPU::VGPRRegBankID) CondBank = AMDGPU::VCCRegBankID; unsigned Bank = SGPRSrcs && CondBank == AMDGPU::SGPRRegBankID ? AMDGPU::SGPRRegBankID : AMDGPU::VGPRRegBankID; assert(CondBank == AMDGPU::VCCRegBankID || CondBank == AMDGPU::SGPRRegBankID); // TODO: Should report 32-bit for scalar condition type. if (Size == 64) { OpdsMapping[0] = AMDGPU::getValueMappingSGPR64Only(Bank, Size); OpdsMapping[1] = AMDGPU::getValueMapping(CondBank, 1); OpdsMapping[2] = AMDGPU::getValueMappingSGPR64Only(Bank, Size); OpdsMapping[3] = AMDGPU::getValueMappingSGPR64Only(Bank, Size); } else { OpdsMapping[0] = AMDGPU::getValueMapping(Bank, Size); OpdsMapping[1] = AMDGPU::getValueMapping(CondBank, 1); OpdsMapping[2] = AMDGPU::getValueMapping(Bank, Size); OpdsMapping[3] = AMDGPU::getValueMapping(Bank, Size); } break; } case AMDGPU::G_LOAD: case AMDGPU::G_ZEXTLOAD: case AMDGPU::G_SEXTLOAD: return getInstrMappingForLoad(MI); case AMDGPU::G_ATOMICRMW_XCHG: case AMDGPU::G_ATOMICRMW_ADD: case AMDGPU::G_ATOMICRMW_SUB: case AMDGPU::G_ATOMICRMW_AND: case AMDGPU::G_ATOMICRMW_OR: case AMDGPU::G_ATOMICRMW_XOR: case AMDGPU::G_ATOMICRMW_MAX: case AMDGPU::G_ATOMICRMW_MIN: case AMDGPU::G_ATOMICRMW_UMAX: case AMDGPU::G_ATOMICRMW_UMIN: case AMDGPU::G_ATOMICRMW_FADD: case AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG: case AMDGPU::G_AMDGPU_ATOMIC_INC: case AMDGPU::G_AMDGPU_ATOMIC_DEC: { OpdsMapping[0] = getVGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI); OpdsMapping[1] = getValueMappingForPtr(MRI, MI.getOperand(1).getReg()); OpdsMapping[2] = getVGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI); break; } case AMDGPU::G_ATOMIC_CMPXCHG: { OpdsMapping[0] = getVGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI); OpdsMapping[1] = getValueMappingForPtr(MRI, MI.getOperand(1).getReg()); OpdsMapping[2] = getVGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI); OpdsMapping[3] = getVGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI); break; } case AMDGPU::G_BRCOND: { unsigned Bank = getRegBankID(MI.getOperand(0).getReg(), MRI, AMDGPU::SGPRRegBankID); assert(MRI.getType(MI.getOperand(0).getReg()).getSizeInBits() == 1); if (Bank != AMDGPU::SGPRRegBankID) Bank = AMDGPU::VCCRegBankID; OpdsMapping[0] = AMDGPU::getValueMapping(Bank, 1); break; } } return getInstructionMapping(/*ID*/1, /*Cost*/1, getOperandsMapping(OpdsMapping), MI.getNumOperands()); }
#include "usb_pipe.h" #include <cstdlib> #include <cstring> #include <zconf.h> #include "usb_host.h" #include "usb_endpoint.h" #include <linux/usbdevice_fs.h> #include "android_debug.h" #include <chrono> using namespace librealsense::usb_host; usb_pipe::usb_pipe(usb_device_handle *usb_device, usb_endpoint endpoint, int buffer_size) : _endpoint(endpoint), _device(usb_device) { _request = std::shared_ptr<usb_request>(usb_request_new(_device, _endpoint.get_descriptor()), [](usb_request* req) {usb_request_free(req);}); } usb_pipe::~usb_pipe() { usb_request_cancel(_request.get()); } bool usb_pipe::reset() { return usb_endpoint_reset(_device, _endpoint.get_endpoint_address()) == 0; } size_t usb_pipe::read_pipe(uint8_t *buffer, size_t buffer_len, unsigned int timeout_ms) { using namespace std::chrono; int bytes_copied = -1; // Wait until pipe gets data std::unique_lock<std::mutex> lk(_mutex); submit_request(buffer, buffer_len); auto res = _cv.wait_for(lk, std::chrono::milliseconds(timeout_ms), [&] { return _received; }); _received = false; if (res == true) { bytes_copied = _request->actual_length; } else { LOGE("Timeout reached waiting for response!"); usb_request_cancel(_request.get()); } lk.unlock(); return bytes_copied; } void usb_pipe::queue_finished_request(usb_request* response) { assert (response->endpoint == _endpoint.get_endpoint_address()); std::unique_lock<std::mutex> lk(_mutex); _received = true; lk.unlock(); _cv.notify_all(); } void usb_pipe::submit_request(uint8_t *buffer, size_t buffer_len) { _request->buffer = buffer; _request->buffer_length = buffer_len; int res = usb_request_queue(_request.get()); if(res < 0) { LOGE("Cannot queue request: %s", strerror(errno)); } }
#include "signverifymessagedialog.h" #include "ui_signverifymessagedialog.h" #include "addressbookpage.h" #include "base58.h" #include "guiutil.h" #include "init.h" #include "main.h" #include "optionsmodel.h" #include "walletmodel.h" #include "wallet.h" #include <QClipboard> #include <string> #include <vector> SignVerifyMessageDialog::SignVerifyMessageDialog(QWidget *parent) : QDialog(parent), ui(new Ui::SignVerifyMessageDialog), model(0) { ui->setupUi(this); #if (QT_VERSION >= 0x040700) /* Do not move this to the XML file, Qt before 4.7 will choke on it */ ui->addressIn_SM->setPlaceholderText(tr("Enter a GenesysCoin address (e.g. GPjCkWLCRnwDU9USRKh7p5a8NQc5LS695K)")); ui->signatureOut_SM->setPlaceholderText(tr("Click \"Sign Message\" to generate signature")); ui->addressIn_VM->setPlaceholderText(tr("Enter a GenesysCoin address (e.g. GPjCkWLCRnwDU9USRKh7p5a8NQc5LS695K)")); ui->signatureIn_VM->setPlaceholderText(tr("Enter GenesysCoin signature")); #endif GUIUtil::setupAddressWidget(ui->addressIn_SM, this); GUIUtil::setupAddressWidget(ui->addressIn_VM, this); ui->addressIn_SM->installEventFilter(this); ui->messageIn_SM->installEventFilter(this); ui->signatureOut_SM->installEventFilter(this); ui->addressIn_VM->installEventFilter(this); ui->messageIn_VM->installEventFilter(this); ui->signatureIn_VM->installEventFilter(this); ui->signatureOut_SM->setFont(GUIUtil::bitcoinAddressFont()); ui->signatureIn_VM->setFont(GUIUtil::bitcoinAddressFont()); } SignVerifyMessageDialog::~SignVerifyMessageDialog() { delete ui; } void SignVerifyMessageDialog::setModel(WalletModel *model) { this->model = model; } void SignVerifyMessageDialog::setAddress_SM(QString address) { ui->addressIn_SM->setText(address); ui->messageIn_SM->setFocus(); } void SignVerifyMessageDialog::setAddress_VM(QString address) { ui->addressIn_VM->setText(address); ui->messageIn_VM->setFocus(); } void SignVerifyMessageDialog::showTab_SM(bool fShow) { ui->tabWidget->setCurrentIndex(0); if (fShow) this->show(); } void SignVerifyMessageDialog::showTab_VM(bool fShow) { ui->tabWidget->setCurrentIndex(1); if (fShow) this->show(); } void SignVerifyMessageDialog::on_addressBookButton_SM_clicked() { if (model && model->getAddressTableModel()) { AddressBookPage dlg(AddressBookPage::ForSending, AddressBookPage::ReceivingTab, this); dlg.setModel(model->getAddressTableModel()); if (dlg.exec()) { setAddress_SM(dlg.getReturnValue()); } } } void SignVerifyMessageDialog::on_pasteButton_SM_clicked() { setAddress_SM(QApplication::clipboard()->text()); } void SignVerifyMessageDialog::on_signMessageButton_SM_clicked() { if (!model) return; /* Clear old signature to ensure users don't get confused on error with an old signature displayed */ ui->signatureOut_SM->clear(); CBitcoinAddress addr(ui->addressIn_SM->text().toStdString()); if (!addr.IsValid()) { ui->addressIn_SM->setValid(false); ui->statusLabel_SM->setStyleSheet("QLabel { color: red; }"); ui->statusLabel_SM->setText(tr("The entered address is invalid.") + QString(" ") + tr("Please check the address and try again.")); return; } CKeyID keyID; if (!addr.GetKeyID(keyID)) { ui->addressIn_SM->setValid(false); ui->statusLabel_SM->setStyleSheet("QLabel { color: red; }"); ui->statusLabel_SM->setText(tr("The entered address does not refer to a key.") + QString(" ") + tr("Please check the address and try again.")); return; } WalletModel::UnlockContext ctx(model->requestUnlock()); if (!ctx.isValid()) { ui->statusLabel_SM->setStyleSheet("QLabel { color: red; }"); ui->statusLabel_SM->setText(tr("Wallet unlock was cancelled.")); return; } CKey key; if (!pwalletMain->GetKey(keyID, key)) { ui->statusLabel_SM->setStyleSheet("QLabel { color: red; }"); ui->statusLabel_SM->setText(tr("Private key for the entered address is not available.")); return; } CDataStream ss(SER_GETHASH, 0); ss << strMessageMagic; ss << ui->messageIn_SM->document()->toPlainText().toStdString(); std::vector<unsigned char> vchSig; if (!key.SignCompact(Hash(ss.begin(), ss.end()), vchSig)) { ui->statusLabel_SM->setStyleSheet("QLabel { color: red; }"); ui->statusLabel_SM->setText(QString("<nobr>") + tr("Message signing failed.") + QString("</nobr>")); return; } ui->statusLabel_SM->setStyleSheet("QLabel { color: green; }"); ui->statusLabel_SM->setText(QString("<nobr>") + tr("Message signed.") + QString("</nobr>")); ui->signatureOut_SM->setText(QString::fromStdString(EncodeBase64(&vchSig[0], vchSig.size()))); } void SignVerifyMessageDialog::on_copySignatureButton_SM_clicked() { QApplication::clipboard()->setText(ui->signatureOut_SM->text()); } void SignVerifyMessageDialog::on_clearButton_SM_clicked() { ui->addressIn_SM->clear(); ui->messageIn_SM->clear(); ui->signatureOut_SM->clear(); ui->statusLabel_SM->clear(); ui->addressIn_SM->setFocus(); } void SignVerifyMessageDialog::on_addressBookButton_VM_clicked() { if (model && model->getAddressTableModel()) { AddressBookPage dlg(AddressBookPage::ForSending, AddressBookPage::SendingTab, this); dlg.setModel(model->getAddressTableModel()); if (dlg.exec()) { setAddress_VM(dlg.getReturnValue()); } } } void SignVerifyMessageDialog::on_verifyMessageButton_VM_clicked() { CBitcoinAddress addr(ui->addressIn_VM->text().toStdString()); if (!addr.IsValid()) { ui->addressIn_VM->setValid(false); ui->statusLabel_VM->setStyleSheet("QLabel { color: red; }"); ui->statusLabel_VM->setText(tr("The entered address is invalid.") + QString(" ") + tr("Please check the address and try again.")); return; } CKeyID keyID; if (!addr.GetKeyID(keyID)) { ui->addressIn_VM->setValid(false); ui->statusLabel_VM->setStyleSheet("QLabel { color: red; }"); ui->statusLabel_VM->setText(tr("The entered address does not refer to a key.") + QString(" ") + tr("Please check the address and try again.")); return; } bool fInvalid = false; std::vector<unsigned char> vchSig = DecodeBase64(ui->signatureIn_VM->text().toStdString().c_str(), &fInvalid); if (fInvalid) { ui->signatureIn_VM->setValid(false); ui->statusLabel_VM->setStyleSheet("QLabel { color: red; }"); ui->statusLabel_VM->setText(tr("The signature could not be decoded.") + QString(" ") + tr("Please check the signature and try again.")); return; } CDataStream ss(SER_GETHASH, 0); ss << strMessageMagic; ss << ui->messageIn_VM->document()->toPlainText().toStdString(); CPubKey pubkey; if (!pubkey.RecoverCompact(Hash(ss.begin(), ss.end()), vchSig)) { ui->signatureIn_VM->setValid(false); ui->statusLabel_VM->setStyleSheet("QLabel { color: red; }"); ui->statusLabel_VM->setText(tr("The signature did not match the message digest.") + QString(" ") + tr("Please check the signature and try again.")); return; } if (!(CBitcoinAddress(pubkey.GetID()) == addr)) { ui->statusLabel_VM->setStyleSheet("QLabel { color: red; }"); ui->statusLabel_VM->setText(QString("<nobr>") + tr("Message verification failed.") + QString("</nobr>")); return; } ui->statusLabel_VM->setStyleSheet("QLabel { color: green; }"); ui->statusLabel_VM->setText(QString("<nobr>") + tr("Message verified.") + QString("</nobr>")); } void SignVerifyMessageDialog::on_clearButton_VM_clicked() { ui->addressIn_VM->clear(); ui->signatureIn_VM->clear(); ui->messageIn_VM->clear(); ui->statusLabel_VM->clear(); ui->addressIn_VM->setFocus(); } bool SignVerifyMessageDialog::eventFilter(QObject *object, QEvent *event) { if (event->type() == QEvent::MouseButtonPress || event->type() == QEvent::FocusIn) { if (ui->tabWidget->currentIndex() == 0) { /* Clear status message on focus change */ ui->statusLabel_SM->clear(); /* Select generated signature */ if (object == ui->signatureOut_SM) { ui->signatureOut_SM->selectAll(); return true; } } else if (ui->tabWidget->currentIndex() == 1) { /* Clear status message on focus change */ ui->statusLabel_VM->clear(); } } return QDialog::eventFilter(object, event); }
#include "../Include/AccountUI.h" #include<iostream> #include<iomanip> #include<ctime> #include<cstdlib> #include <vector> using namespace std; #include<string.h> #include<unistd.h> /*long int AccountUI::getAccountNo(){ long int l_accountno; cout<<"\n Account No:"; cin>>l_accountno; return l_accountno; } string AccountUI::getAccounttype(){ string l_accounttype; cout<<"\n Account Type:"; cin>>l_accounttype; return l_accounttype; } double AccountUI::getAccountBalance() { double l_accountBalance; cout<<"Balance :"; cin>>l_accountBalance; return l_accountBalance; } char AccountUI::displayRecord(long int p_accno,char *p_acctype,long int p_custid,double p_accbalance,int p_status) { long int l_accno; char *l_acctype; long int l_custid; double l_accbalance; int l_status; l_accno=p_accno; strcpy(l_acctype,p_acctype); l_custid=p_custid; l_accbalance=p_accbalance; l_status=p_status; cout<<"\n ACCOUNTNUMBER"<<setw(20)<<"ACCOUNTTYPE"<<setw(20)<<"CUSTOMERID"<<setw(20)<<"BALANCE"<<setw(20)<<"STATUS \n"; cout<<"\n ------------------------------------------------------------------------------------------------------------ \n \n"; cout<<l_accno<<setw(20)<<l_acctype<<setw(15)<< l_custid<<setw(15)<<l_accbalance<<setw(15)<<l_status; cout<<"\n Do you want to update status(y/n)?:"; char choice; cin>>choice; return choice; } double AccountUI :: getDepositAmount(){ long int l_depositAmount; cout<<"\n Enter FD Amount:"; cin>>l_depositAmount; return l_depositAmount; } int AccountUI :: getDepositYears(){ long int l_depositYears; cout<<"\n Enter period(in years):"; cin>>l_depositYears; return l_depositYears; } double AccountUI :: getBalance(){ long int l_balance; cout<<"\n Enter Balance:"; cin>>l_balance; return l_balance; } double AccountUI :: getSalariedBalance(){ long int l_salminBalance; cout<<"\n Enter Minimum Balance:"; cin>>l_salminBalance; return l_salminBalance; } double AccountUI :: getNonSalBalance(){ long int l_nonsalminBalance; cout<<"\n Enter Minimum Balance:"; cin>>l_nonsalminBalance; return l_nonsalminBalance; } int AccountUI :: displaySavingsMenu(){ int choice; cout<<"\n SAVINGS MENU"<<endl; cout<<"1. Salaried Account"<<endl<<"2. Non Salaried Account"<<endl; cout<<"\n Enter your choice:"<<endl; cin>>choice; return choice; } */ /* long int CustomerUI::getCustomerID(){ string l_customerID; int l_Validstatus; do{ cout<<"\n Customer ID:"; cin>>l_customerID; l_Validstatus=Validation::ValidateNumber(l_customerID); if(l_Validstatus==1) return Utility :: convertToLongint(l_customerID); else cout<<"\n Invalid ID.. \n Enter again"; }while(l_Validstatus==0); } string CustomerUI::getCustomerName(){ string l_customerName; int l_Validstatus; do{ cin.ignore(); cout<<"\n Name:"; // cin>>l_customerName; // cin.ignore(); getline(cin,l_customerName); l_Validstatus=Validation::ValidateString(l_customerName); if(l_Validstatus==1) return l_customerName; else cout<<"\n Enter valid name"; }while(l_Validstatus==0); } */ long int AccountUI::getAccountNo(){ int i=0; bool status; string c_accountno; // validate val; // utility u; cout<<"\nnote:Maximum number of attempts is 3\n"<<endl; cin.ignore(1000,'\n'); for(int i=0;i<3;i++) { cout<<"\n\t\t\tEnter your 6 digit Account Number:"; getline(cin,c_accountno); if(c_accountno=="exit") { return 2; } status=Validation::validateNumber(c_accountno); if(status== true) { return Utility::convertToLong(c_accountno); } else cout<<"Invalid input"<<endl; } return 0; } string AccountUI::getAccounttype(){ string l_accounttype; int l_Validstatus; do{ cin.ignore(); cout<<"\n Account Type:"; // cin>>l_accounttype; //cin.ignore(); getline(cin,l_accounttype); l_Validstatus=Validation::ValidateString(l_accounttype); if(l_Validstatus==1) return l_accounttype; else cout<<"\n Enter valid type"; }while(l_Validstatus==0); } double AccountUI::getAccountBalance() { int l_Validstatus; string l_accountBalance; do{ cout<<"Balance :"; cin>>l_accountBalance; l_Validstatus=Validation::ValidateBalance(l_accountBalance); if(l_Validstatus==1) return Utility::convertToDouble( l_accountBalance); else cout<<"\n Enter valid input"; }while(l_Validstatus==0); } double AccountUI :: getDepositAmount(){ int l_Validstatus; string l_depositAmount; do{ cout<<"\n Enter FD Amount:"; cin>>l_depositAmount; l_Validstatus=Validation::ValidateDepositAmount(l_depositAmount); if(l_Validstatus==1) return Utility::convertToDouble(l_depositAmount); else cout<<"\n Enter valid amount"; }while(l_Validstatus==0); } int AccountUI :: getDepositYears(){ int l_Validstatus; string l_depositYears; do{ cout<<"\n Enter period(in years):"; cin>>l_depositYears; l_Validstatus=Validation::ValidateYear(l_depositYears); if(l_Validstatus==1) return Utility::convertToInt(l_depositYears); else cout<<"\n Invalid year.. FD years available are 1,3 and 5 only..\n Enter again..."; }while(l_Validstatus==0); } double AccountUI :: getBalance(){ int l_Validstatus; string l_balance; do{ cout<<"\n Enter Balance:"; cin>>l_balance; l_Validstatus=Validation::ValidateBalance(l_balance); if(l_Validstatus==1) return Utility::convertToDouble(l_balance); else cout<<"\n Enter valid amount"; }while(l_Validstatus==0); } double AccountUI :: getSalariedBalance(){ string l_salminBalance; int l_Validstatus; do{ cout<<"\n Enter Minimum Balance:"; cin>>l_salminBalance; l_Validstatus=Validation::ValidateBalance(l_salminBalance); if(l_Validstatus==1) return Utility::convertToDouble( l_salminBalance); else cout<<"\n Enter valid amount"; }while(l_Validstatus==0); } double AccountUI :: getNonSalBalance(){ string l_nonsalminBalance; int l_Validstatus; do{ cout<<"\n Enter Minimum Balance:"; cin>>l_nonsalminBalance; l_Validstatus=Validation::ValidateNonSalBalance(l_nonsalminBalance); if(l_Validstatus==1) return Utility :: convertToDouble( l_nonsalminBalance); else cout<<"\n Enter valid amount..\n Amount should be minimum of 5000 or 10000"; }while(l_Validstatus==0); } string AccountUI :: getAccountStatus(){ string l_status; int l_Validstatus; do{ cin.ignore(); cout<<"\n Enter Status to update:"; // cin>>l_status; // cin.ignore(); getline(cin,l_status); l_Validstatus=Validation::ValidateString(l_status); if(l_Validstatus==1) return l_status; else cout<<"\n Enter valid input"; }while(l_Validstatus==0); } int AccountUI :: displaySavingsMenu(){ int choice; int l_Validstatus; cout<<"\n \t \t \t ------------- SAVINGS MENU --------------"<<endl; cout<<" \t \t \t 1. Salaried Account"<<endl<<"\t \t \t 2. Non Salaried Account"<<endl<<"\t \t \t 3. Exit"<<endl; cout<<"\n Enter your choice:"; cin>>choice; return choice; } int AccountUI :: displayAccountTypeMenu() { int mchoice; system("clear"); cout<<"\n \t \t \t-------------- Account Type ------------- \n \n"; cout<<" \t \t \t 1.savings Account"<<endl; cout<<" \t \t \t 2.current Account"<<endl; cout<<" \t \t \t 3.Fixed Deposit Account"<<endl; cout<<" \t \t \t 4.Exit"<<endl; cout<<" \t \t Enter choice:"; cin>>mchoice; return mchoice; } void AccountUI::displayAccountlist(vector<Account> accountlist) { system("clear"); AccountUI obj; cout<<"\n \t \t \t \t ACCOUNT DETAILS \n \n \n"; cout<<"\n\tAccountNumber"<<setw(15)<<"AccountType"<<setw(15)<<"CustomerID"<<setw(20)<<"AccountBalance"<<setw(20)<<"status"<<endl; cout<<"\n\t________________________________________________________________________________________\n"; for (int ViewList=0; ViewList<accountlist.size(); ViewList++) { cout<<"\t"<<accountlist[ViewList].m_getAccountNo()<<"\t \t "<<accountlist[ViewList].m_getAccountType()<<"\t"<<accountlist[ViewList].m_getCustomerID()<<"\t \t"<< accountlist[ViewList].m_getBalance()<<"\t \t"<<accountlist[ViewList].m_getAccountStatus()<<endl; cout<<"\t_______________________________________________________________________________________\n"; } } char AccountUI::displayRecord(long int p_accno,char *p_acctype,long int p_custid,double p_accbalance,int p_status) { long int l_accno; char *l_acctype; long int l_custid; double l_accbalance; int l_status; l_accno=p_accno; strcpy(l_acctype,p_acctype); l_custid=p_custid; l_accbalance=p_accbalance; l_status=p_status; cout<<"\n ACCOUNTNUMBER"<<setw(20)<<"ACCOUNTTYPE"<<setw(20)<<"CUSTOMERID"<<setw(20)<<"BALANCE"<<setw(20)<<"STATUS \n"; cout<<"\n ------------------------------------------------------------------------------------------------------------ \n \n"; cout<<l_accno<<setw(20)<<l_acctype<<setw(25)<< l_custid<<setw(20)<<l_accbalance<<setw(20)<<l_status; cout<<"\n Do you want to update status(y/n)?:"; char choice; cin>>choice; return choice; } void AccountUI :: displayUpdate() { cout<<"\n Updated Successfully..."; } void AccountUI :: displayDeactivate() { cout<<"\n Deactivated Successfully..."; } void AccountUI :: displayAction() { cout<<"\n Action Successfull..."; } void AccountUI :: displayExists() { cout<<"\n Account already exists..."; } void AccountUI :: displayInvalid() { cout<<"\n Invalid input..."; } /*int AccountUI :: displayAccountTypeMenu() { int mchoice; cout<<"\n Account Type"; cout<<"\n1.savings"<<endl; cout<<"\n2.current"<<endl; cout<<"\n3.fixed"<<endl; cout<<"enter choice: "<<endl; } */ double AccountUI::getAmount() { A:cin.ignore(1000,'\n'); string l_amount; double l_amt; cout<<"\n Enter Amount:"; getline(cin,l_amount); int l_status=Validation::validateDouble(l_amount); if(l_status==1) { l_amt=Utility::convertToDouble(l_amount); return l_amt; } else goto A; } char AccountUI::displayConfirmAction() { char l_val; cout<<"Confirm your Action:"; cin>>l_val; return l_val; } void AccountUI::displayMessage(string p_str) { cout<<p_str; } int AccountUI :: getMaturityPeriod() { int ch; cout<<"select a choice from the available macturity period list:"; cout<<"\n 1. 1year"; cout<<"\n 2. 3year"; cout<<"\n 3. 5year \n"; cin>>ch; return ch; } void AccountUI::displayfind(long int CustomerID,char Name[21],char DOB[12],char Address[31],char Accounttype[11],double balance) { long int l_CustomerID; char l_Name[21]; char l_DOB[12]; char l_Address[31]; double l_balance; char l_Accounttype[11]; l_CustomerID=CustomerID; strcpy(l_Name,Name); strcpy(l_DOB,DOB); strcpy(l_Address,Address); strcpy(l_Accounttype,Accounttype); l_balance=balance; cout<<"\n\t\t\t Customer Account Details\n"; cout<<" _____________________________________________________________________________________________________________________"; cout<<"\n| CUSTOMER ID | NAME | DATE OF BIRTH | ADDRESS | ACCOUNT TYPE |BALANCE |"; cout<<"\n ___________________________________________________________________________________________________________________"; cout<<"\n|"<<l_CustomerID<<" |"<<l_Name<<" |"<<l_DOB<<" |"<<l_Address<<" |"<<l_Accounttype<<" |"<<l_balance<<" |"; } void AccountUI::displayAccount(char Accounttype[11],char type[10],double Balance,char date[20]) { char l_Accounttype[11]; double l_Balance; char l_date[20]; char l_type[10]; strcpy(l_Accounttype,Accounttype); strcpy(l_type,type); l_Balance=Balance; strcpy(l_date,date); cout<<"\n\t\t\t Transaction Details\n"; cout<<" ________________________________________________________________________________ "; cout<<"\n| ACCOUNT TYPE | TRANSACTION TYPE | TRANSACTION DATE | BALANCE |"; cout<<"\n ______________________________________________________________________________"; cout<<"\n|"<<l_Accounttype<<" |"<<l_type<<" |"<<l_date<<" |"<<l_Balance<<" |"; sleep(1); } void AccountUI::displayfixed(char type[10],int period,char date[20],double balance,char tdate[20]) { char l_type[10]; int l_period; char l_date[21]; char l_tdate[21]; double l_balance; l_period=period; strcpy(l_type,type); strcpy(l_date,date); strcpy(l_tdate,tdate); l_balance=balance; cout<<"\n\t\t\t Transaction Details\n"; cout<<" _____________________________________________________________________________________"; cout<<"\n| TRANSACTION TYPE | MACTURITY PERIOD | FD OPENING DATE | BALANCE | TRANACTION DATE |"; cout<<"\n ___________________________________________________________________________________"; cout<<"\n|"<<l_type<<" |"<<l_period<<" |"<<l_date<<" |"<<l_balance<<" |"<<l_tdate<<"|"; } void AccountUI::displayTransactionHeader() { cout<<" ________________________________________________________________________"; cout<<"\n| TRANSACTION ID | ACCOUNT NO | TRANSACTION DATE | ACCOUNT TYPE | AMOUNT |"; cout<<"\n ________________________________________________________________________"; } void AccountUI::displayTransaction(long int l_transactionID,long int l_accountno, string l_transactiondate,string l_type,double l_amount) { //cout<<"\nIn UI..\n"; cout<<"\n|"<<l_transactionID<<" |"<<l_accountno<<" |"<<l_transactiondate<<"|"<<l_type<<" |"<<l_amount<<"|"; } void AccountUI::displayDetails(int l_id,long int l_accountnum, char l_type[30], char l_dates[30],char l_type1[11], int l_Amount) { cout<<"\t\t\tCHEQUE BOOK REQUEST-Summary"<<endl; cout<<"\t\t\t___________________________"<<endl; cout<<"\t\t\tRequest ID:"<<l_id<<endl; cout<<"\t\t\tAccount Number:"<<l_accountnum<<endl; cout<<"\t\t\tRequest Type:"<<l_type<<endl; cout<<"\t\t\tRequest raised on "<<l_dates<<endl; cout<<"\t\t\tRs."<<l_Amount<<" deducted from your "<<l_type1<<"account bearing number "<<l_accountnum<<endl; } void AccountUI::displayRequest(long h_acc,long h_bal,char h_type[20],long h_customerid) { cout<<"\n\t\t\tACCOUNT BALANCE-Summary"<<endl; cout<<"\t\t\t___________________________"<<endl; //cout<<"Account Number:"<<h_acc<<endl; cout<<"\t\t\tAccount Balance:"<<h_bal<<endl; cout<<"\t\t\tAccount Type:"<<h_type<<endl; cout<<"\t\t\tCustomer ID "<<h_customerid<<endl; } int AccountUI::getNumberOfCheques() { int l_Choice; int l_Cheques; cout<<"\t\t\tCHEQUE BOOK TYPES"<<endl; cout<<"\t\t\t1. 50 page cheque book - Rs.50"<<endl; cout<<"\t\t\t2. 100 page cheque book - Rs.100"<<endl; cout<<"\t\t\tSelect your choice: "; while(true) { cin>>l_Choice; switch(l_Choice) { case 1: l_Cheques=50; break; case 2: l_Cheques=100; break; default: cout<<"\t\t\tInvalid Choice! Try Again: "; continue; } break; } return l_Cheques; } int AccountUI::displayAccountlist2(long int l_customerID) { vector<Account> accountlist; Bank obj; accountlist=obj.dbViewAccount(l_customerID); if(accountlist.size()==0) { cout<<"no records"<<endl; return 0; } else { cout<<"\n\t\t\tAccount Number"<<setw(20)<<"AccountType"<<endl; for(int i=0;i<accountlist.size();i++) { cout<<"\t\t\t______________________________"<<endl; cout<<"\t\t\t"<<accountlist[i].m_getAccountNo()<<"\t\t\t"<<setw(20)<<accountlist[i].m_getAccountType()<<endl; } } } void AccountUI::displayNoRecords() { cout<<"\n\t\t\tNo records Found"<<endl; } void AccountUI::displayRequestGenerated() { cout<<"\n\t\t\tRequest Already Generated"<<endl; } void AccountUI::displayheader() { cout<<"\t\t\t\t\t\t\t\t___________________________________"<<endl; cout<<"\t\t\t\t\t\t\t\t SAHARA BANK"<<endl; time_t now = time(0); char* dt = ctime(&now); cout<<"\t\t\t\t\t\t\t\t\t"<<dt<<endl; } int AccountUI :: getBillId() { string BillId; lab:cout<<"\nEnter Bill Id:"; cin>>BillId; bool l_val,l_val1; l_val=Validation::validateNumber(BillId); if(l_val==true) { l_val1=Validation::validateSize(BillId,4); if(l_val1==false) { cout<<"\nPlease Enter an four digit bill ID(for example:1111)\n"; goto lab; } } else { cout<<"\nPlease Enter an existing four digit bill ID(for example:1111)\n"; goto lab; } return (Utility::convertToInt(BillId)); } float AccountUI::getBillAmount() { string BillAmount; bool l_validStatus; lab: cout<<"\nEnter the Bill Amount:"; cin>>BillAmount; l_validStatus = Validation :: validateNumber(BillAmount); if(l_validStatus==true) return Utility :: convertToFloat(BillAmount); else {cout<<"Enter the proper bill amount(for example:1000)"; goto lab; } } int AccountUI::getBillScreen() { string l_choice; bool l_val; lab: system ("clear"); cout<<"\n\t\t\t\t\t\t SAHARA BANK "; cout<<"\n\t\t\t\t\t\t____________________________"<<endl; cout<<"\n\t\t\t\t\t\t BILL PAYMENT"; cout<<"\n\t\t\t\t\t\t____________________________"<<endl; cout<<"\n\t\t\t\t\t\t1.Pay Water Bill"; cout<<"\n\t\t\t\t\t\t2.Pay Electricity Bill"; cout<<"\n\t\t\t\t\t\t3.Pay Phone Bill"; cout<<"\n\t\t\t\t\t\t4.Exit"; cout<<"\n\t\t\tEnter a Choice(1-4) : "; cin>>l_choice; l_val=Validation::validateNumber(l_choice); if(l_val==true) { return (Utility::convertToInt(l_choice)); } else { cout<<"\nEnter a valid choice"; goto lab; } } void AccountUI::displayBillWater(long int p_Accountno,int p_billId,double p_Amount,long int p_refId) { cout<<"\n\n\t SAHARA BANK"; cout<<"\n\t___________________________________"; cout<<"\n\n\t BILL RECEIPT"; cout<<"\n\t------------------------------------"; cout<<"\n\n\tAccountNo : "<<p_Accountno; cout<<"\n\n\tBillID : "<<p_billId; cout<<"\n\n\tAmount paid(Rs) : "<<p_Amount; cout<<"\n\n\tBill Type : Electricity Bill"; cout<<"\n\n\tBill Reference ID : "<<p_refId; cout<<"\n\t------------------------------------"; } void AccountUI::displayBillElec(long int p_Accountno,int p_billId,double p_Amount,long int p_refId) { cout<<"\n\n\tSAHARA BANK"; cout<<"\n\t_________________________________"; cout<<"\n\n\tBILL RECEIPT"; cout<<"\n\t--------------------------------"; cout<<"\n\n\tAccountNo : "<<p_Accountno; cout<<"\n\n\tBillID : "<<p_billId; cout<<"\n\n\tAmount paid(Rs) : "<<p_Amount; cout<<"\n\n\tBill Type : Water Bill"; cout<<"\n\n\tBill Reference ID : "<<p_refId; cout<<"\n\t---------------------------------"; } void AccountUI::displayBillPhone(long int p_Accountno,int p_billId,double p_Amount,long int p_refId) { cout<<"\n\n SAHARA BANK"; cout<<"\n___________________________________"; cout<<"\n\n BILL RECEIPT"; cout<<"\n\t------------------------------------"; cout<<"\n\n\tAccountNo : "<<p_Accountno; cout<<"\n\n\tBillID : "<<p_billId; cout<<"\n\n\tAmount paid(Rs) : "<<p_Amount; cout<<"\n\n\tBill Type : Phone Bill"; cout<<"\n\n\tBill Reference ID : "<<p_refId; cout<<"\n\t------------------------------------"; }
//============================================================================ // MCKL/include/mckl/random/rng_set.hpp //---------------------------------------------------------------------------- // MCKL: Monte Carlo Kernel Library //---------------------------------------------------------------------------- // Copyright (c) 2013-2018, Yan Zhou // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS AS IS // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. //============================================================================ #ifndef MCKL_RANDOM_RNG_SET_HPP #define MCKL_RANDOM_RNG_SET_HPP #include <mckl/random/internal/common.hpp> #include <mckl/random/rng.hpp> #include <mckl/random/seed.hpp> #if MCKL_HAS_TBB #include <tbb/enumerable_thread_specific.h> #endif /// \brief Default RNG set type /// \ingroup Config #ifndef MCKL_RNG_SET_TYPE #if MCKL_USE_TBB_TLS #define MCKL_RNG_SET_TYPE ::mckl::RNGSetTBB #else #define MCKL_RNG_SET_TYPE ::mckl::RNGSetVector #endif #endif namespace mckl { /// \brief Scalar RNG set /// \ingroup Random template <typename RNGType = RNG> class RNGSetScalar { public: using rng_type = RNGType; using size_type = std::size_t; explicit RNGSetScalar(size_type = 0) { reset(); } size_type size() const { return 1; } void resize(std::size_t) {} void reset() { rng_.seed(Seed<rng_type>::instance().get()); } rng_type &operator[](size_type) { return rng_; } private: std::size_t size_; rng_type rng_; }; // class RNGSetScalar /// \brief Vector RNG set /// \ingroup Random template <typename RNGType = RNG> class RNGSetVector { public: using rng_type = RNGType; using size_type = typename Vector<rng_type>::size_type; explicit RNGSetVector(size_type N = 0) : rng_(N, rng_type()) { reset(); } size_type size() const { return rng_.size(); } void resize(std::size_t n) { if (n == rng_.size()) { return; } if (n < rng_.size()) { rng_.resize(n); } size_type m = rng_.size(); rng_.resize(n); for (std::size_t i = m; i != n; ++i) { rng_[i].seed(Seed<rng_type>::instance().get()); } } void reset() { for (auto &rng : rng_) { rng.seed(Seed<rng_type>::instance().get()); } } rng_type &operator[](size_type id) { return rng_[id % size()]; } private: Vector<rng_type> rng_; }; // class RNGSetVector #if MCKL_HAS_TBB /// \brief Thread-local storage RNG set using tbb::enumerable_thread_specific /// \ingroup Random template <typename RNGType = RNG, typename Alloc = ::tbb::cache_aligned_allocator<RNGType>, ::tbb::ets_key_usage_type ETSKeyType = ::tbb::ets_no_key> class RNGSetTBBEnumerable { public: using rng_type = RNGType; using size_type = std::size_t; explicit RNGSetTBBEnumerable(size_type = 0) : rng_([]() { return rng_type(Seed<rng_type>::instance().get()); }) { reset(); } size_type size() const { return rng_.size(); } void resize(std::size_t) {} void reset() { rng_.clear(); } rng_type &operator[](size_type) { return rng_.local(); } private: std::size_t size_; ::tbb::enumerable_thread_specific<rng_type, Alloc, ETSKeyType> rng_; }; // class RNGSetTBBEnumerable /// \brief Thread-local storage RNG set using tbb::enumerable_thread_specific /// without native TLS keys /// \ingroup Random template <typename RNGType = RNG> using RNGSetTBB = RNGSetTBBEnumerable<RNGType, ::tbb::cache_aligned_allocator<RNGType>, ::tbb::ets_no_key>; /// \brief Thread-local storage RNG set using tbb::enumerable_thread_specific /// with native TLS keys /// \ingroup Random template <typename RNGType = RNG> using RNGSetTBBKPI = RNGSetTBBEnumerable<RNGType, ::tbb::cache_aligned_allocator<RNGType>, ::tbb::ets_key_per_instance>; #endif // MCKL_HAS_TBB /// \brief Default RNG set /// \ingroup Random template <typename RNGType = typename MCKL_RNG_SET_TYPE<>::rng_type> using RNGSet = MCKL_RNG_SET_TYPE<RNGType>; /// \brief Particle::rng_set_type trait /// \ingroup Traits MCKL_DEFINE_TYPE_DISPATCH_TRAIT(RNGSetType, rng_set_type, RNGSet<>) } // namespace mckl #endif // MCKL_RANDOM_RNG_SET_HPP
/* ** $Id: ldblib.c,v 1.132.1.1 2013/04/12 18:48:47 roberto Exp roberto $ ** Interface from Lua to its debug API ** See Copyright Notice in lua.h */ #include <stdio.h> #include <stdlib.h> #include <string.h> #define ldblib_c #define LUA_LIB #include "lua.h" #include "lauxlib.h" #include "lualib.h" #define HOOKKEY "_HKEY" static void checkstack (lua_State *L, lua_State *L1, int n) { if (L != L1 && !lua_checkstack(L1, n)) luaL_error(L, "stack overflow"); } static int db_getregistry (lua_State *L) { lua_pushvalue(L, LUA_REGISTRYINDEX); return 1; } static int db_getmetatable (lua_State *L) { luaL_checkany(L, 1); if (!lua_getmetatable(L, 1)) { lua_pushnil(L); /* no metatable */ } return 1; } static int db_setmetatable (lua_State *L) { int t = lua_type(L, 2); luaL_argcheck(L, t == LUA_TNIL || t == LUA_TTABLE, 2, "nil or table expected"); lua_settop(L, 2); lua_setmetatable(L, 1); return 1; /* return 1st argument */ } static int db_getuservalue (lua_State *L) { if (lua_type(L, 1) != LUA_TUSERDATA) lua_pushnil(L); else lua_getuservalue(L, 1); return 1; } static int db_setuservalue (lua_State *L) { if (lua_type(L, 1) == LUA_TLIGHTUSERDATA) luaL_argerror(L, 1, "full userdata expected, got light userdata"); luaL_checktype(L, 1, LUA_TUSERDATA); if (!lua_isnoneornil(L, 2)) luaL_checktype(L, 2, LUA_TTABLE); lua_settop(L, 2); lua_setuservalue(L, 1); return 1; } static void settabss (lua_State *L, const char *i, const char *v) { lua_pushstring(L, v); lua_setfield(L, -2, i); } static void settabsi (lua_State *L, const char *i, int v) { lua_pushinteger(L, v); lua_setfield(L, -2, i); } static void settabsb (lua_State *L, const char *i, int v) { lua_pushboolean(L, v); lua_setfield(L, -2, i); } static lua_State *getthread (lua_State *L, int *arg) { if (lua_isthread(L, 1)) { *arg = 1; return lua_tothread(L, 1); } else { *arg = 0; return L; } } static void treatstackoption (lua_State *L, lua_State *L1, const char *fname) { if (L == L1) { lua_pushvalue(L, -2); lua_remove(L, -3); } else lua_xmove(L1, L, 1); lua_setfield(L, -2, fname); } static int db_getinfo (lua_State *L) { lua_Debug ar; int arg; lua_State *L1 = getthread(L, &arg); const char *options = luaL_optstring(L, arg+2, "flnStu"); checkstack(L, L1, 3); if (lua_isnumber(L, arg+1)) { if (!lua_getstack(L1, (int)lua_tointeger(L, arg+1), &ar)) { lua_pushnil(L); /* level out of range */ return 1; } } else if (lua_isfunction(L, arg+1)) { lua_pushfstring(L, ">%s", options); options = lua_tostring(L, -1); lua_pushvalue(L, arg+1); lua_xmove(L, L1, 1); } else return luaL_argerror(L, arg+1, "function or level expected"); if (!lua_getinfo(L1, options, &ar)) return luaL_argerror(L, arg+2, "invalid option"); lua_createtable(L, 0, 2); if (strchr(options, 'S')) { settabss(L, "source", ar.source); settabss(L, "short_src", ar.short_src); settabsi(L, "linedefined", ar.linedefined); settabsi(L, "lastlinedefined", ar.lastlinedefined); settabss(L, "what", ar.what); } if (strchr(options, 'l')) settabsi(L, "currentline", ar.currentline); if (strchr(options, 'u')) { settabsi(L, "nups", ar.nups); settabsi(L, "nparams", ar.nparams); settabsb(L, "isvararg", ar.isvararg); } if (strchr(options, 'n')) { settabss(L, "name", ar.name); settabss(L, "namewhat", ar.namewhat); } if (strchr(options, 't')) settabsb(L, "istailcall", ar.istailcall); if (strchr(options, 'L')) treatstackoption(L, L1, "activelines"); if (strchr(options, 'f')) treatstackoption(L, L1, "func"); return 1; /* return table */ } static int db_getlocal (lua_State *L) { int arg; lua_State *L1 = getthread(L, &arg); lua_Debug ar; const char *name; int nvar = luaL_checkint(L, arg+2); /* local-variable index */ if (lua_isfunction(L, arg + 1)) { /* function argument? */ lua_pushvalue(L, arg + 1); /* push function */ lua_pushstring(L, lua_getlocal(L, NULL, nvar)); /* push local name */ return 1; } else { /* stack-level argument */ if (!lua_getstack(L1, luaL_checkint(L, arg+1), &ar)) /* out of range? */ return luaL_argerror(L, arg+1, "level out of range"); checkstack(L, L1, 1); name = lua_getlocal(L1, &ar, nvar); if (name) { lua_xmove(L1, L, 1); /* push local value */ lua_pushstring(L, name); /* push name */ lua_pushvalue(L, -2); /* re-order */ return 2; } else { lua_pushnil(L); /* no name (nor value) */ return 1; } } } static int db_setlocal (lua_State *L) { int arg; lua_State *L1 = getthread(L, &arg); lua_Debug ar; if (!lua_getstack(L1, luaL_checkint(L, arg+1), &ar)) /* out of range? */ return luaL_argerror(L, arg+1, "level out of range"); luaL_checkany(L, arg+3); lua_settop(L, arg+3); checkstack(L, L1, 1); lua_xmove(L, L1, 1); lua_pushstring(L, lua_setlocal(L1, &ar, luaL_checkint(L, arg+2))); return 1; } static int auxupvalue (lua_State *L, int get) { const char *name; int n = luaL_checkint(L, 2); luaL_checktype(L, 1, LUA_TFUNCTION); name = get ? lua_getupvalue(L, 1, n) : lua_setupvalue(L, 1, n); if (name == NULL) return 0; lua_pushstring(L, name); lua_insert(L, -(get+1)); return get + 1; } static int db_getupvalue (lua_State *L) { return auxupvalue(L, 1); } static int db_setupvalue (lua_State *L) { luaL_checkany(L, 3); return auxupvalue(L, 0); } static int checkupval (lua_State *L, int argf, int argnup) { lua_Debug ar; int nup = luaL_checkint(L, argnup); luaL_checktype(L, argf, LUA_TFUNCTION); lua_pushvalue(L, argf); lua_getinfo(L, ">u", &ar); luaL_argcheck(L, 1 <= nup && nup <= ar.nups, argnup, "invalid upvalue index"); return nup; } static int db_upvalueid (lua_State *L) { int n = checkupval(L, 1, 2); lua_pushlightuserdata(L, lua_upvalueid(L, 1, n)); return 1; } static int db_upvaluejoin (lua_State *L) { int n1 = checkupval(L, 1, 2); int n2 = checkupval(L, 3, 4); luaL_argcheck(L, !lua_iscfunction(L, 1), 1, "Lua function expected"); luaL_argcheck(L, !lua_iscfunction(L, 3), 3, "Lua function expected"); lua_upvaluejoin(L, 1, n1, 3, n2); return 0; } #define gethooktable(L) luaL_getsubtable(L, LUA_REGISTRYINDEX, HOOKKEY) static void hookf (lua_State *L, lua_Debug *ar) { static const char *const hooknames[] = {"call", "return", "line", "count", "tail call"}; gethooktable(L); lua_pushthread(L); lua_rawget(L, -2); if (lua_isfunction(L, -1)) { lua_pushstring(L, hooknames[(int)ar->event]); if (ar->currentline >= 0) lua_pushinteger(L, ar->currentline); else lua_pushnil(L); lua_assert(lua_getinfo(L, "lS", ar)); lua_call(L, 2, 0); } } static int makemask (const char *smask, int count) { int mask = 0; if (strchr(smask, 'c')) mask |= LUA_MASKCALL; if (strchr(smask, 'r')) mask |= LUA_MASKRET; if (strchr(smask, 'l')) mask |= LUA_MASKLINE; if (count > 0) mask |= LUA_MASKCOUNT; return mask; } static char *unmakemask (int mask, char *smask) { int i = 0; if (mask & LUA_MASKCALL) smask[i++] = 'c'; if (mask & LUA_MASKRET) smask[i++] = 'r'; if (mask & LUA_MASKLINE) smask[i++] = 'l'; smask[i] = '\0'; return smask; } static int db_sethook (lua_State *L) { int arg, mask, count; lua_Hook func; lua_State *L1 = getthread(L, &arg); if (lua_isnoneornil(L, arg+1)) { lua_settop(L, arg+1); func = NULL; mask = 0; count = 0; /* turn off hooks */ } else { const char *smask = luaL_checkstring(L, arg+2); luaL_checktype(L, arg+1, LUA_TFUNCTION); count = luaL_optint(L, arg+3, 0); func = hookf; mask = makemask(smask, count); } if (gethooktable(L) == 0) { /* creating hook table? */ lua_pushstring(L, "k"); lua_setfield(L, -2, "__mode"); /** hooktable.__mode = "k" */ lua_pushvalue(L, -1); lua_setmetatable(L, -2); /* setmetatable(hooktable) = hooktable */ } checkstack(L, L1, 1); lua_pushthread(L1); lua_xmove(L1, L, 1); lua_pushvalue(L, arg+1); lua_rawset(L, -3); /* set new hook */ lua_sethook(L1, func, mask, count); /* set hooks */ return 0; } static int db_gethook (lua_State *L) { int arg; lua_State *L1 = getthread(L, &arg); char buff[5]; int mask = lua_gethookmask(L1); lua_Hook hook = lua_gethook(L1); if (hook != NULL && hook != hookf) /* external hook? */ lua_pushliteral(L, "external hook"); else { gethooktable(L); checkstack(L, L1, 1); lua_pushthread(L1); lua_xmove(L1, L, 1); lua_rawget(L, -2); /* get hook */ lua_remove(L, -2); /* remove hook table */ } lua_pushstring(L, unmakemask(mask, buff)); lua_pushinteger(L, lua_gethookcount(L1)); return 3; } static int db_debug (lua_State *L) { for (;;) { char buffer[250]; luai_writestringerror("%s", "lua_debug> "); if (fgets(buffer, sizeof(buffer), stdin) == 0 || strcmp(buffer, "cont\n") == 0) return 0; if (luaL_loadbuffer(L, buffer, strlen(buffer), "=(debug command)") || lua_pcall(L, 0, 0, 0)) luai_writestringerror("%s\n", lua_tostring(L, -1)); lua_settop(L, 0); /* remove eventual returns */ } } static int db_traceback (lua_State *L) { int arg; lua_State *L1 = getthread(L, &arg); const char *msg = lua_tostring(L, arg + 1); if (msg == NULL && !lua_isnoneornil(L, arg + 1)) /* non-string 'msg'? */ lua_pushvalue(L, arg + 1); /* return it untouched */ else { int level = luaL_optint(L, arg + 2, (L == L1) ? 1 : 0); luaL_traceback(L, L1, msg, level); } return 1; } static const luaL_Reg dblib[] = { {"debug", db_debug}, {"getuservalue", db_getuservalue}, {"gethook", db_gethook}, {"getinfo", db_getinfo}, {"getlocal", db_getlocal}, {"getregistry", db_getregistry}, {"getmetatable", db_getmetatable}, {"getupvalue", db_getupvalue}, {"upvaluejoin", db_upvaluejoin}, {"upvalueid", db_upvalueid}, {"setuservalue", db_setuservalue}, {"sethook", db_sethook}, {"setlocal", db_setlocal}, {"setmetatable", db_setmetatable}, {"setupvalue", db_setupvalue}, {"traceback", db_traceback}, {NULL, NULL} }; LUAMOD_API int luaopen_debug (lua_State *L) { luaL_newlib(L, dblib); return 1; }
/* * This software is distributed under BSD 3-clause license (see LICENSE file). * * Authors: Soeren Sonnenburg, Heiko Strathmann, Jacob Walker, Viktor Gal, * Evgeniy Andreev, Soumyajit De, Sergey Lisitsyn */ #include <shogun/base/ShogunEnv.h> #include <shogun/features/DenseFeatures.h> #include <shogun/labels/MulticlassLabels.h> #include <shogun/multiclass/MulticlassLibLinear.h> #include <shogun/io/SGIO.h> #include <shogun/io/CSVFile.h> #include <shogun/evaluation/CrossValidation.h> #include <shogun/evaluation/StratifiedCrossValidationSplitting.h> #include <shogun/evaluation/MulticlassAccuracy.h> using namespace shogun; // Prepare to read a file for the training data const char fname_feats[] = "../data/fm_train_real.dat"; const char fname_labels[] = "../data/label_train_multiclass.dat"; void test_cross_validation() { /* dense features from matrix */ auto feature_file = std::make_shared<CSVFile>(fname_feats); SGMatrix<float64_t> mat=SGMatrix<float64_t>(); mat.load(feature_file); auto features=std::make_shared<DenseFeatures<float64_t>>(mat); /* labels from vector */ auto label_file = std::make_shared<CSVFile>(fname_labels); SGVector<float64_t> label_vec; label_vec.load(label_file); auto labels=std::make_shared<MulticlassLabels>(label_vec); /* create svm via libsvm */ float64_t svm_C=10; float64_t svm_eps=0.0001; auto svm=std::make_shared<MulticlassLibLinear>(svm_C, features, labels); svm->set_epsilon(svm_eps); /* train and output */ svm->train(features); auto output = svm->apply(features)->as<MulticlassLabels>(); for (index_t i=0; i<features->get_num_vectors(); ++i) SG_SPRINT("i=%d, class=%f,\n", i, output->get_label(i)); /* evaluation criterion */ auto eval_crit = std::make_shared<MulticlassAccuracy>(); /* evaluate training error */ float64_t eval_result=eval_crit->evaluate(output, labels); SG_SPRINT("training accuracy: %f\n", eval_result); /* assert that regression "works". this is not guaranteed to always work * but should be a really coarse check to see if everything is going * approx. right */ ASSERT(eval_result<2); /* splitting strategy */ index_t n_folds=5; auto splitting= std::make_shared<StratifiedCrossValidationSplitting>(labels, n_folds); /* cross validation instance, 10 runs, 95% confidence interval */ auto cross=std::make_shared<CrossValidation>(svm, features, labels, splitting, eval_crit); cross->set_num_runs(1); // cross->set_conf_int_alpha(0.05); /* actual evaluation */ auto result=cross->evaluate()->as<CrossValidationResult>(); if (result->get_result_type() != CROSSVALIDATION_RESULT) SG_SERROR("Evaluation result is not of type CrossValidationResult!"); result->print_result(); /* clean up */ } int main(int argc, char **argv) { env()->io()->set_loglevel(MSG_DEBUG); test_cross_validation(); return 0; }
#pragma once // ARKSurvivalEvolved (329.9) SDK #ifdef _MSC_VER #pragma pack(push, 0x8) #endif #include "ARKSurvivalEvolved_Paracer_Character_BP_classes.hpp" namespace sdk { //--------------------------------------------------------------------------- //Parameters //--------------------------------------------------------------------------- // Function Paracer_Character_BP.Paracer_Character_BP_C.UserConstructionScript struct AParacer_Character_BP_C_UserConstructionScript_Params { }; // Function Paracer_Character_BP.Paracer_Character_BP_C.ExecuteUbergraph_Paracer_Character_BP struct AParacer_Character_BP_C_ExecuteUbergraph_Paracer_Character_BP_Params { int EntryPoint; // (Parm, ZeroConstructor, IsPlainOldData) }; } #ifdef _MSC_VER #pragma pack(pop) #endif
#include <iostream> #include "tinysplinecxx.h" int main(int argc, char **argv) { std::vector<tinyspline::real> points; // P0 points.push_back(1600); // time points.push_back(10); points.push_back(100); points.push_back(1); // P1 points.push_back(1650); // time points.push_back(20); points.push_back(200); points.push_back(2); // P2 points.push_back(1700); // time points.push_back(30); points.push_back(300); points.push_back(3); // P3 points.push_back(1800); // time points.push_back(40); points.push_back(400); points.push_back(4); // P4 points.push_back(1900); // time points.push_back(80); points.push_back(600); points.push_back(10); // P5 points.push_back(2000); // time points.push_back(40); points.push_back(400); points.push_back(4); tinyspline::BSpline spline = tinyspline::BSpline:: interpolateCubicNatural(points, 4); tinyspline::DeBoorNet net = spline.bisect(1850); std::vector<tinyspline::real> result = net.result(); std::cout << "t = " << result[0] << ", p = (" << result[1] << ", " << result[2] << ", " << result[3] << ")" << std::endl; }
/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include <aws/lexv2-models/model/ImportSummary.h> #include <aws/core/utils/json/JsonSerializer.h> #include <utility> using namespace Aws::Utils::Json; using namespace Aws::Utils; namespace Aws { namespace LexModelsV2 { namespace Model { ImportSummary::ImportSummary() : m_importIdHasBeenSet(false), m_importedResourceIdHasBeenSet(false), m_importedResourceNameHasBeenSet(false), m_importStatus(ImportStatus::NOT_SET), m_importStatusHasBeenSet(false), m_mergeStrategy(MergeStrategy::NOT_SET), m_mergeStrategyHasBeenSet(false), m_creationDateTimeHasBeenSet(false), m_lastUpdatedDateTimeHasBeenSet(false), m_importedResourceType(ImportResourceType::NOT_SET), m_importedResourceTypeHasBeenSet(false) { } ImportSummary::ImportSummary(JsonView jsonValue) : m_importIdHasBeenSet(false), m_importedResourceIdHasBeenSet(false), m_importedResourceNameHasBeenSet(false), m_importStatus(ImportStatus::NOT_SET), m_importStatusHasBeenSet(false), m_mergeStrategy(MergeStrategy::NOT_SET), m_mergeStrategyHasBeenSet(false), m_creationDateTimeHasBeenSet(false), m_lastUpdatedDateTimeHasBeenSet(false), m_importedResourceType(ImportResourceType::NOT_SET), m_importedResourceTypeHasBeenSet(false) { *this = jsonValue; } ImportSummary& ImportSummary::operator =(JsonView jsonValue) { if(jsonValue.ValueExists("importId")) { m_importId = jsonValue.GetString("importId"); m_importIdHasBeenSet = true; } if(jsonValue.ValueExists("importedResourceId")) { m_importedResourceId = jsonValue.GetString("importedResourceId"); m_importedResourceIdHasBeenSet = true; } if(jsonValue.ValueExists("importedResourceName")) { m_importedResourceName = jsonValue.GetString("importedResourceName"); m_importedResourceNameHasBeenSet = true; } if(jsonValue.ValueExists("importStatus")) { m_importStatus = ImportStatusMapper::GetImportStatusForName(jsonValue.GetString("importStatus")); m_importStatusHasBeenSet = true; } if(jsonValue.ValueExists("mergeStrategy")) { m_mergeStrategy = MergeStrategyMapper::GetMergeStrategyForName(jsonValue.GetString("mergeStrategy")); m_mergeStrategyHasBeenSet = true; } if(jsonValue.ValueExists("creationDateTime")) { m_creationDateTime = jsonValue.GetDouble("creationDateTime"); m_creationDateTimeHasBeenSet = true; } if(jsonValue.ValueExists("lastUpdatedDateTime")) { m_lastUpdatedDateTime = jsonValue.GetDouble("lastUpdatedDateTime"); m_lastUpdatedDateTimeHasBeenSet = true; } if(jsonValue.ValueExists("importedResourceType")) { m_importedResourceType = ImportResourceTypeMapper::GetImportResourceTypeForName(jsonValue.GetString("importedResourceType")); m_importedResourceTypeHasBeenSet = true; } return *this; } JsonValue ImportSummary::Jsonize() const { JsonValue payload; if(m_importIdHasBeenSet) { payload.WithString("importId", m_importId); } if(m_importedResourceIdHasBeenSet) { payload.WithString("importedResourceId", m_importedResourceId); } if(m_importedResourceNameHasBeenSet) { payload.WithString("importedResourceName", m_importedResourceName); } if(m_importStatusHasBeenSet) { payload.WithString("importStatus", ImportStatusMapper::GetNameForImportStatus(m_importStatus)); } if(m_mergeStrategyHasBeenSet) { payload.WithString("mergeStrategy", MergeStrategyMapper::GetNameForMergeStrategy(m_mergeStrategy)); } if(m_creationDateTimeHasBeenSet) { payload.WithDouble("creationDateTime", m_creationDateTime.SecondsWithMSPrecision()); } if(m_lastUpdatedDateTimeHasBeenSet) { payload.WithDouble("lastUpdatedDateTime", m_lastUpdatedDateTime.SecondsWithMSPrecision()); } if(m_importedResourceTypeHasBeenSet) { payload.WithString("importedResourceType", ImportResourceTypeMapper::GetNameForImportResourceType(m_importedResourceType)); } return payload; } } // namespace Model } // namespace LexModelsV2 } // namespace Aws
#include "PrettyPrinter.h" #include <iostream> #include <sstream> void PrettyPrinter::print(ASTProgram *p, std::ostream &os, char c, int n) { PrettyPrinter visitor(os, c, n); p->accept(&visitor); } /** * join last sz items from the visitedResults with delimiter delim. Then, remove these items from the stack * * the delimiter before the last skip items are not appended (e.g. to not add trailing commas) * * returns the joined string */ std::string joinWithDelim(std::vector<std::string>& visitResults, std::string delim, int sz, int skip) { std::string out; int i; for (i = visitResults.size() - sz; i < visitResults.size() - skip; i++) { out += visitResults[i]; out += delim; } // skip the delimiter for the last several tokens for (; i < visitResults.size(); i++) out += visitResults[i]; visitResults.erase(visitResults.begin() + visitResults.size() - sz, visitResults.end()); return out; } void PrettyPrinter::endVisit(ASTProgram * element) { os << joinWithDelim(visitResults, "\n", element->getFunctions().size(), 1); os.flush(); } /* * General approach taken by visit methods. * - visit() is used to increase indentation (decrease should happen in endVisit). * - endVisit() should expect a string for all of its AST nodes in reverse order in visitResults. * Communicate the single string for the visited node by pushing to the back of visitedResults. */ /* * Before visiting function, record string for signature and setup indentation for body. * This visit method pushes a string result, that the endVisit method should extend. */ bool PrettyPrinter::visit(ASTFunction * element) { indentLevel++; return true; } /* * After visiting function, collect the string representations for the: * statements, declarations, formals, and then function name * they are on the visit stack in that order. */ void PrettyPrinter::endVisit(ASTFunction * element) { auto bodyString = joinWithDelim(visitResults, "\n", element->getStmts().size(), 0); auto declString = joinWithDelim(visitResults, "\n", element->getDeclarations().size(), 0); auto formalsString = joinWithDelim(visitResults, ", ", element->getFormals().size(), 1); // function name is last element on stack // we modify it in place visitResults.back() += "(" + formalsString + ") \n{\n" + declString + bodyString + "}\n"; indentLevel--; } void PrettyPrinter::endVisit(ASTNumberExpr * element) { visitResults.push_back(std::to_string(element->getValue())); } void PrettyPrinter::endVisit(ASTVariableExpr * element) { visitResults.push_back(element->getName()); } void PrettyPrinter::endVisit(ASTBinaryExpr * element) { std::string rightString = visitResults.back(); visitResults.pop_back(); std::string leftString = visitResults.back(); visitResults.pop_back(); visitResults.push_back("(" + leftString + " " + element->getOp() + " " + rightString + ")"); } void PrettyPrinter::endVisit(ASTInputExpr * element) { visitResults.push_back("input"); } void PrettyPrinter::endVisit(ASTFunAppExpr * element) { auto actualsString = joinWithDelim(visitResults, ", ", element->getActuals().size(), 1); visitResults.back() += "(" + actualsString + ")"; } void PrettyPrinter::endVisit(ASTAllocExpr * element) { std::string init = visitResults.back(); visitResults.pop_back(); visitResults.push_back("alloc " + init); } void PrettyPrinter::endVisit(ASTRefExpr * element) { std::string var = visitResults.back(); visitResults.pop_back(); visitResults.push_back("&" + var); } void PrettyPrinter::endVisit(ASTDeRefExpr * element) { std::string base = visitResults.back(); visitResults.pop_back(); visitResults.push_back("*" + base); } void PrettyPrinter::endVisit(ASTNullExpr * element) { visitResults.push_back("null"); } void PrettyPrinter::endVisit(ASTFieldExpr * element) { std::string init = visitResults.back(); visitResults.pop_back(); visitResults.push_back(element->getField() + ":" + init); } void PrettyPrinter::endVisit(ASTRecordExpr * element) { visitResults.push_back("{" + joinWithDelim(visitResults, ", ", element->getFields().size(), 1) + "}"); } void PrettyPrinter::endVisit(ASTAccessExpr * element) { std::string accessString = visitResults.back(); visitResults.pop_back(); visitResults.push_back(accessString + '.' + element->getField()); } void PrettyPrinter::endVisit(ASTDeclNode * element) { visitResults.push_back(element->getName()); } void PrettyPrinter::endVisit(ASTDeclStmt * element) { visitResults.push_back(indent() + "var " + joinWithDelim(visitResults, ", ", element->getVars().size(), 1) + ";"); } void PrettyPrinter::endVisit(ASTAssignStmt * element) { std::string rhsString = visitResults.back(); visitResults.pop_back(); std::string lhsString = visitResults.back(); visitResults.pop_back(); visitResults.push_back(indent() + lhsString + " = " + rhsString + ";"); } bool PrettyPrinter::visit(ASTBlockStmt * element) { indentLevel++; return true; } void PrettyPrinter::endVisit(ASTBlockStmt * element) { indentLevel--; visitResults.push_back(indent() + "{\n" + joinWithDelim(visitResults, "\n", element->getStmts().size(), 0) + indent() + "}"); } /* * For a while the body should be indented, but not the condition. * Since conditions are expressions and their visit methods never indent * incrementing here works. */ bool PrettyPrinter::visit(ASTWhileStmt * element) { indentLevel++; return true; } void PrettyPrinter::endVisit(ASTWhileStmt * element) { std::string bodyString = visitResults.back(); visitResults.pop_back(); std::string condString = visitResults.back(); visitResults.pop_back(); indentLevel--; std::string whileString = indent() + "while (" + condString + ") \n" + bodyString; visitResults.push_back(whileString); } bool PrettyPrinter::visit(ASTIfStmt * element) { indentLevel++; return true; } void PrettyPrinter::endVisit(ASTIfStmt * element) { std::string elseString; if (element->getElse() != nullptr) { elseString = visitResults.back(); visitResults.pop_back(); } std::string thenString = visitResults.back(); visitResults.pop_back(); std::string condString = visitResults.back(); visitResults.pop_back(); indentLevel--; std::string ifString = indent() + "if (" + condString + ") \n" + thenString; if (element->getElse() != nullptr) { ifString += "\n" + indent() + "else\n" + elseString; } visitResults.push_back(ifString); } void PrettyPrinter::endVisit(ASTOutputStmt * element) { std::string argString = visitResults.back(); visitResults.pop_back(); visitResults.push_back(indent() + "output " + argString + ";"); } void PrettyPrinter::endVisit(ASTErrorStmt * element) { std::string argString = visitResults.back(); visitResults.pop_back(); visitResults.push_back(indent() + "error " + argString + ";"); } void PrettyPrinter::endVisit(ASTReturnStmt * element) { std::string argString = visitResults.back(); visitResults.pop_back(); visitResults.push_back(indent() + "return " + argString + ";"); } std::string PrettyPrinter::indent() const { return std::string(indentLevel*indentSize, indentChar); }
// // Utility class to print CaloHitMC // #ifndef Print_inc_CaloHitMCPrinter_hh #define Print_inc_CaloHitMCPrinter_hh #include <cstring> #include <iostream> #include "Offline/Print/inc/ProductPrinter.hh" #include "Offline/MCDataProducts/inc/CaloHitMC.hh" #include "art/Framework/Principal/Handle.h" #include "canvas/Persistency/Common/Ptr.h" namespace mu2e { class CaloHitMCPrinter : public ProductPrinter { public: CaloHitMCPrinter() { } CaloHitMCPrinter(const ConfigE& conf):ProductPrinter(conf) { _eCut = conf.eCut(); } // do not print if p is below this cut void setECut(double e) { _eCut = e; } double eCut() const { return _eCut; } // all the ways to request a printout void Print(art::Event const& event, std::ostream& os = std::cout) override; void Print(const art::Handle<CaloHitMCCollection>& handle, std::ostream& os = std::cout); void Print(const art::ValidHandle<CaloHitMCCollection>& handle, std::ostream& os = std::cout); void Print(const CaloHitMCCollection& coll, std::ostream& os = std::cout); void Print(const art::Ptr<CaloHitMC>& ptr, int ind = -1, std::ostream& os = std::cout); void Print(const mu2e::CaloHitMC& obj, int ind = -1, std::ostream& os = std::cout); void PrintHeader(const std::string& tag, std::ostream& os = std::cout); void PrintListHeader(std::ostream& os = std::cout); private: double _eCut; }; } #endif
#include "codec.h" #include <muduo/base/Logging.h> #include <muduo/base/Mutex.h> #include <muduo/net/EventLoop.h> #include <muduo/net/SocketsOps.h> #include <muduo/net/TcpServer.h> #include <boost/bind.hpp> #include <set> #include <stdio.h> using namespace muduo; using namespace muduo::net; class ChatServer : boost::noncopyable { public: ChatServer(EventLoop* loop, const InetAddress& listenAddr) : loop_(loop), server_(loop, listenAddr, "ChatServer"), codec_(boost::bind(&ChatServer::onStringMessage, this, _1, _2, _3)) { server_.setConnectionCallback( boost::bind(&ChatServer::onConnection, this, _1)); server_.setMessageCallback( boost::bind(&LengthHeaderCodec::onMessage, &codec_, _1, _2, _3)); } void start() { server_.start(); } private: void onConnection(const TcpConnectionPtr& conn) { LOG_INFO << conn->localAddress().toHostPort() << " -> " << conn->peerAddress().toHostPort() << " is " << (conn->connected() ? "UP" : "DOWN"); MutexLockGuard lock(mutex_); if (conn->connected()) { conn->setContext(Timestamp()); connections_.insert(conn); } else { connections_.erase(conn); } } void onStringMessage(const TcpConnectionPtr&, const string& message, Timestamp) { MutexLockGuard lock(mutex_); for (ConnectionList::iterator it = connections_.begin(); it != connections_.end(); ++it) { codec_.send(get_pointer(*it), message); } } typedef std::set<TcpConnectionPtr> ConnectionList; EventLoop* loop_; TcpServer server_; LengthHeaderCodec codec_; MutexLock mutex_; ConnectionList connections_; }; int main(int argc, char* argv[]) { LOG_INFO << "pid = " << getpid(); if (argc > 1) { EventLoop loop; uint16_t port = static_cast<uint16_t>(atoi(argv[1])); InetAddress serverAddr(port); ChatServer server(&loop, serverAddr); server.start(); loop.loop(); } else { printf("Usage: %s port\n", argv[0]); } }
/*********************************************************************************************************************** * OpenStudio(R), Copyright (c) 2008-2018, Alliance for Sustainable Energy, LLC. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the * following conditions are met: * * (1) Redistributions of source code must retain the above copyright notice, this list of conditions and the following * disclaimer. * * (2) Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the * following disclaimer in the documentation and/or other materials provided with the distribution. * * (3) Neither the name of the copyright holder nor the names of any contributors may be used to endorse or promote * products derived from this software without specific prior written permission from the respective party. * * (4) Other than as required in clauses (1) and (2), distributions in any form of modifications or other derivative * works may not use the "OpenStudio" trademark, "OS", "os", or any other confusingly similar designation without * specific prior written permission from Alliance for Sustainable Energy, LLC. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER, THE UNITED STATES GOVERNMENT, OR ANY CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **********************************************************************************************************************/ #include <gtest/gtest.h> #include "ModelFixture.hpp" #include "../EvaporativeCoolerDirectResearchSpecial.hpp" #include "../EvaporativeCoolerDirectResearchSpecial_Impl.hpp" #include "../AirLoopHVAC.hpp" #include "../PlantLoop.hpp" #include "../Schedule.hpp" #include "../Node.hpp" #include "../Node_Impl.hpp" #include "../AirLoopHVACZoneSplitter.hpp" using namespace openstudio::model; TEST_F(ModelFixture,EvaporativeCoolerDirectResearchSpecial_EvaporativeCoolerDirectResearchSpecial) { ::testing::FLAGS_gtest_death_test_style = "threadsafe"; ASSERT_EXIT ( { Model m; Schedule s = m.alwaysOnDiscreteSchedule(); EvaporativeCoolerDirectResearchSpecial testObject(m,s); exit(0); } , ::testing::ExitedWithCode(0), "" ); } TEST_F(ModelFixture,EvaporativeCoolerDirectResearchSpecial_addToNode) { Model m; Schedule s = m.alwaysOnDiscreteSchedule(); EvaporativeCoolerDirectResearchSpecial testObject(m,s); AirLoopHVAC airLoop(m); Node supplyOutletNode = airLoop.supplyOutletNode(); EXPECT_TRUE(testObject.addToNode(supplyOutletNode)); EXPECT_EQ( (unsigned)3, airLoop.supplyComponents().size() ); Node inletNode = airLoop.zoneSplitter().lastOutletModelObject()->cast<Node>(); EXPECT_FALSE(testObject.addToNode(inletNode)); EXPECT_EQ((unsigned)5, airLoop.demandComponents().size()); PlantLoop plantLoop(m); supplyOutletNode = plantLoop.supplyOutletNode(); EXPECT_FALSE(testObject.addToNode(supplyOutletNode)); EXPECT_EQ( (unsigned)5, plantLoop.supplyComponents().size() ); Node demandOutletNode = plantLoop.demandOutletNode(); EXPECT_FALSE(testObject.addToNode(demandOutletNode)); EXPECT_EQ( (unsigned)5, plantLoop.demandComponents().size() ); EvaporativeCoolerDirectResearchSpecial testObjectClone = testObject.clone(m).cast<EvaporativeCoolerDirectResearchSpecial>(); supplyOutletNode = airLoop.supplyOutletNode(); EXPECT_TRUE(testObjectClone.addToNode(supplyOutletNode)); EXPECT_EQ( (unsigned)5, airLoop.supplyComponents().size() ); }
// // Layer List Widget // // // Copyright (C) 2016 Peter Niekamp // #include "layerlistwidget.h" #include <QDragMoveEvent> #include <QMimeData> #include <QFileInfo> #include <QMessageBox> #include <QDebug> using namespace std; //|---------------------- LayerListWidget ----------------------------------- //|-------------------------------------------------------------------------- ///////////////////////// LayerListWidget::Constructor ////////////////////// LayerListWidget::LayerListWidget(QWidget *parent) : QListWidget(parent) { setContextMenuPolicy(Qt::CustomContextMenu); m_deleteaction = new QAction("Delete", this); m_deleteaction->setShortcut(QKeySequence(Qt::Key_Delete)); m_deleteaction->setShortcutContext(Qt::WidgetWithChildrenShortcut); addAction(m_deleteaction); connect(m_deleteaction, &QAction::triggered, this, &LayerListWidget::on_Delete_triggered); connect(this, &QListWidget::customContextMenuRequested, this, &LayerListWidget::on_contextmenu_requested); } ///////////////////////// LayerListWidget::edit ///////////////////////////// void LayerListWidget::edit(Studio::Document *document) { m_document = document; connect(&m_document, &SpriteSheetDocument::document_changed, this, &LayerListWidget::refresh); connect(&m_document, &SpriteSheetDocument::dependant_changed, this, &LayerListWidget::refresh); refresh(); } ///////////////////////// LayerListWidget::refresh ////////////////////////// void LayerListWidget::refresh() { auto currentrow = currentRow(); clear(); for(int i = 0; i < m_document.layers(); ++i) { auto name = QString("Layer %1").arg(i); if (m_document.layer(i)) { name = QFileInfo(Studio::Core::instance()->find_object<Studio::DocumentManager>()->path(m_document.layer(i))).completeBaseName(); } QListWidgetItem *item = new QListWidgetItem(name, this); item->setFlags(Qt::ItemIsEnabled | Qt::ItemIsSelectable | Qt::ItemIsDragEnabled); item->setIcon(m_document.layer(i) ? m_document.layer(i)->icon() : QIcon(":/spriteplugin/blank.png")); } setCurrentRow(currentrow); } ///////////////////////// LayerListWidget::dragEnterEvent /////////////////// QStringList LayerListWidget::mimeTypes() const { return { "datumstudio/spritelayermodelitem", "text/uri-list" }; } ///////////////////////// LayerListWidget::mimeData ///////////////////////// QMimeData *LayerListWidget::mimeData(QList<QListWidgetItem *> const items) const { QMimeData *mimedata = new QMimeData(); QByteArray encoded; QDataStream stream(&encoded, QIODevice::WriteOnly); for(auto &item : items) { stream << indexFromItem(item).row(); } mimedata->setData("datumstudio/spritelayermodelitem", encoded); return mimedata; } ///////////////////////// LayerListWidget::supportedDropActions ///////////// Qt::DropActions LayerListWidget::supportedDropActions() const { return Qt::CopyAction | Qt::MoveAction; } ////////////////////// LayerListWidget::dragEnterEvent ////////////////////// void LayerListWidget::dragEnterEvent(QDragEnterEvent *event) { if (event->mimeData()->hasFormat("datumstudio/spritelayermodelitem") && event->source() == this) { event->accept(); } if (event->mimeData()->urls().size() == 1 && event->source()) { auto documentmanager = Studio::Core::instance()->find_object<Studio::DocumentManager>(); if (auto document = documentmanager->open(event->mimeData()->urls().at(0).toLocalFile())) { if (document->type() == "Image") { event->accept(); } documentmanager->close(document); } } if (!event->isAccepted()) return; QListWidget::dragEnterEvent(event); } ///////////////////////// LayerListWidget::dropEvent //////////////////////// void LayerListWidget::dropEvent(QDropEvent *event) { int position = 0; switch(dropIndicatorPosition()) { case QAbstractItemView::OnItem: break; case QAbstractItemView::AboveItem: position = indexAt(event->pos()).row(); break; case QAbstractItemView::BelowItem: position = indexAt(event->pos()).row() + 1; break; case QAbstractItemView::OnViewport: position = count(); break; } if (event->mimeData()->hasFormat("datumstudio/spritelayermodelitem")) { QByteArray encoded = event->mimeData()->data("datumstudio/spritelayermodelitem"); QDataStream stream(&encoded, QIODevice::ReadOnly); while (!stream.atEnd()) { int index; stream >> index; m_document.move_layer(index, position); position = (position <= index) ? position + 1 : position; } } if (event->mimeData()->hasUrls()) { for(auto &url : event->mimeData()->urls()) { QString src = url.toLocalFile(); m_document.add_layer(position, src); position += 1; } } event->setDropAction(Qt::IgnoreAction); event->accept(); QListWidget::dropEvent(event); } ///////////////////////// LayerListWidget::contextmenu ////////////////////// void LayerListWidget::on_contextmenu_requested(QPoint pos) { if (currentRow() != -1) { QMenu menu; menu.addAction(m_deleteaction); menu.exec(QCursor::pos()); } } ///////////////////////// LayerListWidget::Delete /////////////////////////// void LayerListWidget::on_Delete_triggered() { if (currentRow() != -1) { if (QMessageBox::question(this, "Remove Layer", "Remove Selected Layer\n\nSure ?", QMessageBox::Ok | QMessageBox::Cancel) == QMessageBox::Ok) { m_document.erase_layer(currentRow()); } } }
//===--- driver.cpp - Swift Compiler Driver -------------------------------===// // // This source file is part of the Swift.org open source project // // Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// // // This is the entry point to the swift compiler driver. // //===----------------------------------------------------------------------===// #include "swift/AST/DiagnosticEngine.h" #include "swift/AST/DiagnosticsDriver.h" #include "swift/Basic/LLVMInitialize.h" #include "swift/Basic/InitializeSwiftModules.h" #include "swift/Basic/PrettyStackTrace.h" #include "swift/Basic/Program.h" #include "swift/Basic/TaskQueue.h" #include "swift/Basic/SourceManager.h" #include "swift/Driver/Compilation.h" #include "swift/Driver/Driver.h" #include "swift/Driver/FrontendUtil.h" #include "swift/Driver/Job.h" #include "swift/Driver/ToolChain.h" #include "swift/Frontend/Frontend.h" #include "swift/Frontend/PrintingDiagnosticConsumer.h" #include "swift/FrontendTool/FrontendTool.h" #include "swift/DriverTool/DriverTool.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/ConvertUTF.h" #include "llvm/Support/Errno.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/Host.h" #include "llvm/Support/ManagedStatic.h" #include "llvm/Support/Path.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/Process.h" #include "llvm/Support/Program.h" #include "llvm/Support/Signals.h" #include "llvm/Support/StringSaver.h" #include "llvm/Support/TargetSelect.h" #include "llvm/Support/raw_ostream.h" #include <memory> #include <stdlib.h> #if defined(_WIN32) #include <windows.h> #endif using namespace swift; using namespace swift::driver; std::string getExecutablePath(const char *FirstArg) { void *P = (void *)(intptr_t)getExecutablePath; return llvm::sys::fs::getMainExecutable(FirstArg, P); } /// Run 'swift-autolink-extract'. extern int autolink_extract_main(ArrayRef<const char *> Args, const char *Argv0, void *MainAddr); extern int modulewrap_main(ArrayRef<const char *> Args, const char *Argv0, void *MainAddr); /// Run 'swift-indent' extern int swift_indent_main(ArrayRef<const char *> Args, const char *Argv0, void *MainAddr); /// Run 'swift-symbolgraph-extract' extern int swift_symbolgraph_extract_main(ArrayRef<const char *> Args, const char *Argv0, void *MainAddr); /// Run 'swift-api-digester' extern int swift_api_digester_main(ArrayRef<const char *> Args, const char *Argv0, void *MainAddr); /// Run 'swift-api-extract' extern int swift_api_extract_main(ArrayRef<const char *> Args, const char *Argv0, void *MainAddr); /// Determine if the given invocation should run as a "subcommand". /// /// Examples of "subcommands" are 'swift build' or 'swift test', which are /// usually used to invoke the Swift package manager executables 'swift-build' /// and 'swift-test', respectively. /// /// \param ExecName The name of the argv[0] we were invoked as. /// \param SubcommandName On success, the full name of the subcommand to invoke. /// \param Args On return, the adjusted program arguments to use. /// \returns True if running as a subcommand. static bool shouldRunAsSubcommand(StringRef ExecName, SmallString<256> &SubcommandName, const ArrayRef<const char *> Args) { assert(!Args.empty()); // If we are not run as 'swift', don't do anything special. This doesn't work // with symlinks with alternate names, but we can't detect 'swift' vs 'swiftc' // if we try and resolve using the actual executable path. if (ExecName != "swift") return false; // If there are no program arguments, always invoke as normal. if (Args.size() == 1) return false; // Otherwise, we have a program argument. If it looks like an option or a // path, then invoke in interactive mode with the arguments as given. StringRef FirstArg(Args[1]); if (FirstArg.startswith("-") || FirstArg.contains('.') || FirstArg.contains('/')) return false; // Otherwise, we should have some sort of subcommand. Get the subcommand name // and remove it from the program arguments. StringRef Subcommand = Args[1]; // If the subcommand is the "built-in" 'repl', then use the // normal driver. if (Subcommand == "repl") { return false; } // Form the subcommand name. SubcommandName.assign("swift-"); SubcommandName.append(Subcommand); return true; } static bool shouldDisallowNewDriver(DiagnosticEngine &diags, StringRef ExecName, const ArrayRef<const char *> argv) { // We are not invoking the driver, so don't forward. if (ExecName != "swift" && ExecName != "swiftc") { return true; } StringRef disableArg = "-disallow-use-new-driver"; StringRef disableEnv = "SWIFT_USE_OLD_DRIVER"; auto shouldWarn = !llvm::sys::Process:: GetEnv("SWIFT_AVOID_WARNING_USING_OLD_DRIVER").hasValue(); // If user specified using the old driver, don't forward. if (llvm::find_if(argv, [&](const char* arg) { return StringRef(arg) == disableArg; }) != argv.end()) { if (shouldWarn) diags.diagnose(SourceLoc(), diag::old_driver_deprecated, disableArg); return true; } if (llvm::sys::Process::GetEnv(disableEnv).hasValue()) { if (shouldWarn) diags.diagnose(SourceLoc(), diag::old_driver_deprecated, disableEnv); return true; } return false; } static bool appendSwiftDriverName(SmallString<256> &buffer) { assert(llvm::sys::fs::exists(buffer)); if (auto driverNameOp = llvm::sys::Process::GetEnv("SWIFT_USE_NEW_DRIVER")) { llvm::sys::path::append(buffer, *driverNameOp); return true; } llvm::sys::path::append(buffer, "swift-driver"); if (llvm::sys::fs::exists(buffer)) { return true; } llvm::sys::path::remove_filename(buffer); llvm::sys::path::append(buffer, "swift-driver-new"); if (llvm::sys::fs::exists(buffer)) { return true; } return false; } static int run_driver(StringRef ExecName, ArrayRef<const char *> argv, const ArrayRef<const char *> originalArgv) { // This is done here and not done in FrontendTool.cpp, because // FrontendTool.cpp is linked to tools, which don't use swift modules. initializeSwiftModules(); bool isRepl = false; // Handle integrated tools. if (argv.size() > 1) { StringRef FirstArg(argv[1]); if (FirstArg == "-frontend") { return performFrontend(llvm::makeArrayRef(argv.data()+2, argv.data()+argv.size()), argv[0], (void *)(intptr_t)getExecutablePath); } if (FirstArg == "-modulewrap") { return modulewrap_main(llvm::makeArrayRef(argv.data()+2, argv.data()+argv.size()), argv[0], (void *)(intptr_t)getExecutablePath); } // Run the integrated Swift frontend when called as "swift-frontend" but // without a leading "-frontend". if (!FirstArg.startswith("--driver-mode=") && ExecName == "swift-frontend") { return performFrontend(llvm::makeArrayRef(argv.data()+1, argv.data()+argv.size()), argv[0], (void *)(intptr_t)getExecutablePath); } if (FirstArg == "repl") { isRepl = true; argv = argv.drop_front(); } } std::string Path = getExecutablePath(argv[0]); PrintingDiagnosticConsumer PDC; SourceManager SM; DiagnosticEngine Diags(SM); Diags.addConsumer(PDC); // Forwarding calls to the swift driver if the C++ driver is invoked as `swift` // or `swiftc`, and an environment variable SWIFT_USE_NEW_DRIVER is defined. if (!shouldDisallowNewDriver(Diags, ExecName, argv)) { SmallString<256> NewDriverPath(llvm::sys::path::parent_path(Path)); if (appendSwiftDriverName(NewDriverPath) && llvm::sys::fs::exists(NewDriverPath)) { std::vector<const char *> subCommandArgs; // Rewrite the program argument. subCommandArgs.push_back(NewDriverPath.c_str()); if (ExecName == "swiftc") { subCommandArgs.push_back("--driver-mode=swiftc"); } else { assert(ExecName == "swift"); subCommandArgs.push_back("--driver-mode=swift"); } // Push these non-op frontend arguments so the build log can indicate // the new driver is used. subCommandArgs.push_back("-Xfrontend"); subCommandArgs.push_back("-new-driver-path"); subCommandArgs.push_back("-Xfrontend"); subCommandArgs.push_back(NewDriverPath.c_str()); // Push on the source program arguments if (isRepl) { subCommandArgs.push_back("-repl"); subCommandArgs.insert(subCommandArgs.end(), originalArgv.begin() + 2, originalArgv.end()); } else { subCommandArgs.insert(subCommandArgs.end(), originalArgv.begin() + 1, originalArgv.end()); } // Execute the subcommand. subCommandArgs.push_back(nullptr); ExecuteInPlace(NewDriverPath.c_str(), subCommandArgs.data()); // If we reach here then an error occurred (typically a missing path). std::string ErrorString = llvm::sys::StrError(); llvm::errs() << "error: unable to invoke subcommand: " << subCommandArgs[0] << " (" << ErrorString << ")\n"; return 2; } } Driver TheDriver(Path, ExecName, argv, Diags); switch (TheDriver.getDriverKind()) { case Driver::DriverKind::AutolinkExtract: return autolink_extract_main( TheDriver.getArgsWithoutProgramNameAndDriverMode(argv), argv[0], (void *)(intptr_t)getExecutablePath); case Driver::DriverKind::SwiftIndent: return swift_indent_main( TheDriver.getArgsWithoutProgramNameAndDriverMode(argv), argv[0], (void *)(intptr_t)getExecutablePath); case Driver::DriverKind::SymbolGraph: return swift_symbolgraph_extract_main(TheDriver.getArgsWithoutProgramNameAndDriverMode(argv), argv[0], (void *)(intptr_t)getExecutablePath); case Driver::DriverKind::APIExtract: return swift_api_extract_main( TheDriver.getArgsWithoutProgramNameAndDriverMode(argv), argv[0], (void *)(intptr_t)getExecutablePath); case Driver::DriverKind::APIDigester: return swift_api_digester_main( TheDriver.getArgsWithoutProgramNameAndDriverMode(argv), argv[0], (void *)(intptr_t)getExecutablePath); default: break; } std::unique_ptr<llvm::opt::InputArgList> ArgList = TheDriver.parseArgStrings(ArrayRef<const char*>(argv).slice(1)); if (Diags.hadAnyError()) return 1; std::unique_ptr<ToolChain> TC = TheDriver.buildToolChain(*ArgList); if (Diags.hadAnyError()) return 1; for (auto arg: ArgList->getArgs()) { if (arg->getOption().hasFlag(options::NewDriverOnlyOption)) { Diags.diagnose(SourceLoc(), diag::warning_unsupported_driver_option, arg->getSpelling()); } } std::unique_ptr<Compilation> C = TheDriver.buildCompilation(*TC, std::move(ArgList)); if (Diags.hadAnyError()) return 1; if (C) { std::unique_ptr<sys::TaskQueue> TQ = TheDriver.buildTaskQueue(*C); if (!TQ) return 1; return C->performJobs(std::move(TQ)).exitCode; } return 0; } int swift::mainEntry(int argc_, const char **argv_) { #if defined(_WIN32) LPWSTR *wargv_ = CommandLineToArgvW(GetCommandLineW(), &argc_); std::vector<std::string> utf8Args; // We use UTF-8 as the internal character encoding. On Windows, // arguments passed to wmain are encoded in UTF-16 for (int i = 0; i < argc_; i++) { const wchar_t *wideArg = wargv_[i]; int wideArgLen = std::wcslen(wideArg); utf8Args.push_back(""); llvm::ArrayRef<char> uRef((const char *)wideArg, (const char *)(wideArg + wideArgLen)); llvm::convertUTF16ToUTF8String(uRef, utf8Args[i]); } std::vector<const char *> utf8CStrs; llvm::transform(utf8Args, std::back_inserter(utf8CStrs), std::mem_fn(&std::string::c_str)); argv_ = utf8CStrs.data(); #endif // Expand any response files in the command line argument vector - arguments // may be passed through response files in the event of command line length // restrictions. SmallVector<const char *, 256> ExpandedArgs(&argv_[0], &argv_[argc_]); llvm::BumpPtrAllocator Allocator; llvm::StringSaver Saver(Allocator); swift::driver::ExpandResponseFilesWithRetry(Saver, ExpandedArgs); // Initialize the stack trace using the parsed argument vector with expanded // response files. // PROGRAM_START/InitLLVM overwrites the passed in arguments with UTF-8 // versions of them on Windows. This also has the effect of overwriting the // response file expansion. Since we handle the UTF-8 conversion above, we // pass in a copy and throw away the modifications. int ThrowawayExpandedArgc = ExpandedArgs.size(); const char **ThrowawayExpandedArgv = ExpandedArgs.data(); PROGRAM_START(ThrowawayExpandedArgc, ThrowawayExpandedArgv); ArrayRef<const char *> argv(ExpandedArgs); PrettyStackTraceSwiftVersion versionStackTrace; // Check if this invocation should execute a subcommand. StringRef ExecName = llvm::sys::path::stem(argv[0]); SmallString<256> SubcommandName; if (shouldRunAsSubcommand(ExecName, SubcommandName, argv)) { // Preserve argv for the stack trace. SmallVector<const char *, 256> subCommandArgs(argv.begin(), argv.end()); subCommandArgs.erase(&subCommandArgs[1]); // We are running as a subcommand, try to find the subcommand adjacent to // the executable we are running as. SmallString<256> SubcommandPath( llvm::sys::path::parent_path(getExecutablePath(argv[0]))); llvm::sys::path::append(SubcommandPath, SubcommandName); // If we didn't find the tool there, let the OS search for it. if (!llvm::sys::fs::exists(SubcommandPath)) { // Search for the program and use the path if found. If there was an // error, ignore it and just let the exec fail. auto result = llvm::sys::findProgramByName(SubcommandName); if (!result.getError()) SubcommandPath = *result; } // Rewrite the program argument. subCommandArgs[0] = SubcommandPath.c_str(); // Execute the subcommand. subCommandArgs.push_back(nullptr); ExecuteInPlace(SubcommandPath.c_str(), subCommandArgs.data()); // If we reach here then an error occurred (typically a missing path). std::string ErrorString = llvm::sys::StrError(); llvm::errs() << "error: unable to invoke subcommand: " << subCommandArgs[0] << " (" << ErrorString << ")\n"; return 2; } ArrayRef<const char *> originalArgv(argv_, &argv_[argc_]); return run_driver(ExecName, argv, originalArgv); }
/***************************************************************************** * Non-Rigid Face Tracking ****************************************************************************** * by Jason Saragih, 5th Dec 2012 * http://jsaragih.org/ ****************************************************************************** * Ch6 of the book "Mastering OpenCV with Practical Computer Vision Projects" * Copyright Packt Publishing 2012. * http://www.packtpub.com/cool-projects-with-opencv/book *****************************************************************************/ /* face_tracker: face tracking classes Jason Saragih (2012) */ #include "opencv_hotshots/ft/face_tracker.hpp" #include "opencv_hotshots/ft/ft.hpp" #include <iostream> #include "stdio.h" // For 'sprintf()' #define fl at<float> #include "opencv_hotshots/ft/fps_timer.hpp" //============================================================================== //============================================================================== //============================================================================== //========================== face_tracker_params =============================== //============================================================================== //============================================================================== //============================================================================== face_tracker_params:: face_tracker_params() { ssize.resize(3); ssize[0] = Size(21,21); ssize[1] = Size(11,11); ssize[2] = Size(5,5); robust = false; itol = 20; ftol = 1e-3; scaleFactor = 1.15; minNeighbours = 2; minSize = Size(100,150); } //============================================================================== void face_tracker_params:: write(FileStorage &fs) const { assert(fs.isOpened()); fs << "{"; fs << "nlevels" << int(ssize.size()); for(int i = 0; i < int(ssize.size()); i++){ char str[256]; const char* ss; sprintf(str,"w %d",i); ss = str; fs << ss << ssize[i].width; sprintf(str,"h %d",i); ss = str; fs << ss << ssize[i].height; } fs << "robust" << robust << "itol" << itol << "ftol" << ftol << "scaleFactor" << scaleFactor << "minNeighbours" << minNeighbours << "minWidth" << minSize.width << "minHeight" << minSize.height << "}"; } //============================================================================== void face_tracker_params:: read(const FileNode& node) { assert(node.type() == FileNode::MAP); int n; node["nlevels"] >> n; ssize.resize(n); for(int i = 0; i < n; i++){ char str[256]; const char* ss; sprintf(str,"w %d",i); ss = str; node[ss] >> ssize[i].width; sprintf(str,"h %d",i); ss = str; node[ss] >> ssize[i].height; } node["robust"] >> robust; node["itol"] >> itol; node["ftol"] >> ftol; node["scaleFactor"] >> scaleFactor; node["minNeighbours"] >> minNeighbours; node["minWidth"] >> minSize.width; node["minHeight"] >> minSize.height; } //============================================================================== void write(FileStorage& fs, const string&, const face_tracker_params& x) { x.write(fs); } //============================================================================== void read(const FileNode& node, face_tracker_params& x, const face_tracker_params& d) { if(node.empty())x = d; else x.read(node); } //============================================================================== face_tracker_params load_face_tracker_params(const char* fname) { face_tracker_params x; FileStorage f(fname,FileStorage::READ); f["face_tracker_params"] >> x; f.release(); return x; } //============================================================================== void save_face_tracker_params(const char* fname, const face_tracker_params& x) { FileStorage f(fname,FileStorage::WRITE); f << "face_tracker_params" << x; f.release(); } //============================================================================== //============================================================================== //============================================================================== //============================== face_tracker ================================== //============================================================================== //============================================================================== //============================================================================== int face_tracker:: track(const Mat &im,const face_tracker_params &p) { //convert image to greyscale Mat gray; if(im.channels()==1)gray = im; else cvtColor(im,gray,COLOR_RGB2GRAY); //initialise if(!tracking) points = detector.detect(gray,p.scaleFactor,p.minNeighbours,p.minSize); if((int)points.size() != smodel.npts())return 0; //fit for(int level = 0; level < int(p.ssize.size()); level++) points = this->fit(gray,points,p.ssize[level],p.robust,p.itol,p.ftol); //set tracking flag and increment timer tracking = true; timer.increment(); return 1; } //============================================================================== void face_tracker:: draw(Mat &im, const Scalar pts_color, const Scalar con_color) { int n = points.size(); if(n == 0)return; for(int i = 0; i < smodel.C.rows; i++){ int j = smodel.C.at<int>(i,0),k = smodel.C.at<int>(i,1); line(im,points[j],points[k],con_color,1); } for(int i = 0; i < n; i++)circle(im,points[i],1,pts_color,2,LINE_AA); } //============================================================================== vector<Point2f> face_tracker:: fit(const Mat &image, const vector<Point2f> &init, const Size ssize, const bool robust, const int itol, const float ftol) { int n = smodel.npts(); assert((int(init.size())==n) && (pmodel.n_patches()==n)); smodel.calc_params(init); vector<Point2f> pts = smodel.calc_shape(); //find facial features in image around current estimates vector<Point2f> peaks = pmodel.calc_peaks(image,pts,ssize); //optimise if(!robust){ smodel.calc_params(peaks); //compute shape model parameters pts = smodel.calc_shape(); //update shape }else{ Mat weight(n,1,CV_32F),weight_sort(n,1,CV_32F); vector<Point2f> pts_old = pts; for(int iter = 0; iter < itol; iter++){ //compute robust weight for(int i = 0; i < n; i++)weight.fl(i) = norm(pts[i] - peaks[i]); cv::sort(weight,weight_sort,SORT_EVERY_COLUMN|SORT_ASCENDING); double var = 1.4826*weight_sort.fl(n/2); if(var < 0.1)var = 0.1; pow(weight,2,weight); weight *= -0.5/(var*var); cv::exp(weight,weight); //compute shape model parameters smodel.calc_params(peaks,weight); //update shape pts = smodel.calc_shape(); //check for convergence float v = 0; for(int i = 0; i < n; i++)v += norm(pts[i]-pts_old[i]); if(v < ftol)break; else pts_old = pts; } }return pts; } //============================================================================== void face_tracker:: write(FileStorage &fs) const { assert(fs.isOpened()); fs << "{" << "detector" << detector << "smodel" << smodel << "pmodel" << pmodel << "}"; } //============================================================================== void face_tracker:: read(const FileNode& node) { assert(node.type() == FileNode::MAP); node["detector"] >> detector; node["smodel"] >> smodel; node["pmodel"] >> pmodel; } //==============================================================================
/* -------------------------------------------------------------------------- */ /* Copyright 2002-2018, OpenNebula Project, OpenNebula Systems */ /* */ /* Licensed under the Apache License, Version 2.0 (the "License"); you may */ /* not use this file except in compliance with the License. You may obtain */ /* a copy of the License at */ /* */ /* http://www.apache.org/licenses/LICENSE-2.0 */ /* */ /* Unless required by applicable law or agreed to in writing, software */ /* distributed under the License is distributed on an "AS IS" BASIS, */ /* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ /* See the License for the specific language governing permissions and */ /* limitations under the License. */ /* -------------------------------------------------------------------------- */ #include "Nebula.h" #include "NebulaLog.h" #include "VirtualMachine.h" #include "SqliteDB.h" #include "MySqlDB.h" #include "Client.h" #include <stdlib.h> #include <stdexcept> #include <libxml/parser.h> #include <signal.h> #include <unistd.h> #include <fcntl.h> #include <sys/types.h> #include <sys/stat.h> #include <pthread.h> #ifdef SYSTEMD #include <systemd/sd-daemon.h> #endif using namespace std; /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ void Nebula::start(bool bootstrap_only) { int rc; int fd; sigset_t mask; int signal; char hn[80]; string scripts_remote_dir; SqlDB * db_backend; bool solo; SqlDB * db_ptr; if ( gethostname(hn,79) != 0 ) { throw runtime_error("Error getting hostname"); } hostname = hn; // ----------------------------------------------------------- // Configuration // ----------------------------------------------------------- nebula_configuration = new OpenNebulaTemplate(etc_location, var_location); rc = nebula_configuration->load_configuration(); if ( rc != 0 ) { throw runtime_error("Could not load nebula configuration file."); } string config_fname = var_location + "config"; ofstream config_file(config_fname.c_str(), ios_base::trunc & ios_base::out); if (config_file.fail() == false) { config_file << *nebula_configuration << endl; config_file.close(); } nebula_configuration->get("SCRIPTS_REMOTE_DIR", scripts_remote_dir); hook_location = scripts_remote_dir + "/hooks/"; // ----------------------------------------------------------- // Log system // ----------------------------------------------------------- ostringstream os; try { Log::MessageType clevel; NebulaLog::LogType log_system; log_system = get_log_system(); clevel = get_debug_level(); // Initializing ONE Daemon log system if ( log_system != NebulaLog::UNDEFINED ) { string log_fname; log_fname = log_location + "oned.log"; NebulaLog::init_log_system(log_system, clevel, log_fname.c_str(), ios_base::trunc, "oned"); } else { throw runtime_error("Unknown LOG_SYSTEM."); } os << "Starting " << version() << endl; os << "----------------------------------------\n"; os << " OpenNebula Configuration File \n"; os << "----------------------------------------\n"; os << *nebula_configuration; os << "----------------------------------------"; NebulaLog::log("ONE",Log::INFO,os); os.str(""); os << "Log level:" << clevel << " [0=ERROR,1=WARNING,2=INFO,3=DEBUG]"; NebulaLog::log("ONE",Log::INFO,os); os.str(""); os << "Support for xmlrpc-c > 1.31: "; #ifdef OLD_XMLRPC os << "no. MAX_CONN and MAX_CONN_BACKLOG configuration will not be used"; #else os << "yes"; #endif NebulaLog::log("ONE",Log::INFO,os); } catch(runtime_error&) { throw; } // ----------------------------------------------------------- // Load the OpenNebula master key and keep it in memory // ----------------------------------------------------------- rc = nebula_configuration->load_key(); if ( rc != 0 ) { throw runtime_error("Could not load nebula master key file."); } // ----------------------------------------------------------- // Initialize the XML library // ----------------------------------------------------------- xmlInitParser(); // ----------------------------------------------------------- // Init federation configuration // ----------------------------------------------------------- federation_enabled = false; federation_master = false; zone_id = 0; server_id = -1; master_oned = ""; const VectorAttribute * vatt = nebula_configuration->get("FEDERATION"); if (vatt != 0) { string mode = vatt->vector_value("MODE"); one_util::toupper(mode); if (mode == "STANDALONE") { federation_enabled = false; federation_master = false; zone_id = 0; } else if (mode == "MASTER") { federation_enabled = true; federation_master = true; } else if (mode == "SLAVE") { federation_enabled = true; federation_master = false; } else { throw runtime_error( "FEDERATION MODE must be one of STANDALONE, MASTER, SLAVE."); } if (federation_enabled) { rc = vatt->vector_value("ZONE_ID", zone_id); if (rc != 0) { throw runtime_error("FEDERATION ZONE_ID must be set for " "federated instances."); } master_oned = vatt->vector_value("MASTER_ONED"); if (master_oned.empty() && !federation_master) { throw runtime_error( "FEDERATION MASTER_ONED endpoint is missing."); } } if ( vatt->vector_value("SERVER_ID", server_id) != 0 ) { server_id = -1; } } vatt = nebula_configuration->get("RAFT"); long long election_ms; long long bcast_ms; time_t xmlrpc_ms; time_t log_purge; unsigned int log_retention; vatt->vector_value("LOG_PURGE_TIMEOUT", log_purge); vatt->vector_value("ELECTION_TIMEOUT_MS", election_ms); vatt->vector_value("BROADCAST_TIMEOUT_MS", bcast_ms); vatt->vector_value("XMLRPC_TIMEOUT_MS", xmlrpc_ms); vatt->vector_value("LOG_RETENTION", log_retention); Log::set_zone_id(zone_id); // ----------------------------------------------------------- // Database // ----------------------------------------------------------- try { bool db_is_sqlite = true; string server; int port; string user; string passwd; string db_name; int connections; const VectorAttribute * _db = nebula_configuration->get("DB"); if ( _db != 0 ) { string value = _db->vector_value("BACKEND"); if (value == "mysql") { db_is_sqlite = false; } if (_db->vector_value("SERVER", server) == -1) { server = "localhost"; } if (_db->vector_value("PORT", port) == -1) { port = 0; } if (_db->vector_value("USER", user) == -1) { user = "oneadmin"; } if (_db->vector_value("PASSWD", passwd) == -1) { passwd = "oneadmin"; } if (_db->vector_value("DB_NAME", db_name) == -1) { db_name = "opennebula"; } if (_db->vector_value("CONNECTIONS", connections) == -1) { connections = 50; } } if ( db_is_sqlite ) { db_backend = new SqliteDB(var_location + "one.db"); } else { db_backend = new MySqlDB(server, port, user, passwd, db_name, connections); } // --------------------------------------------------------------------- // Check Database Versions // --------------------------------------------------------------------- bool local_bootstrap; bool shared_bootstrap; NebulaLog::log("ONE",Log::INFO,"Checking database version."); SystemDB sysdb(db_backend); rc = sysdb.check_db_version(is_federation_slave(), local_bootstrap, shared_bootstrap); if( rc == -1 ) { throw runtime_error("Database version mismatch. Check oned.log."); } // --------------------------------------------------------------------- // Initialize logging and federation database facilities and SystemDB // --------------------------------------------------------------------- solo = server_id == -1; if ( (solo && local_bootstrap) || bootstrap_only) { if ( logdb->bootstrap(db_backend) != 0 ) { throw runtime_error("Error bootstrapping database."); } } logdb = new LogDB(db_backend, solo, log_retention); if ( federation_master ) { fed_logdb = new FedLogDB(logdb); db_ptr = fed_logdb; } else { db_ptr = logdb; } system_db = new SystemDB(logdb); // --------------------------------------------------------------------- // DB Bootstraping // --------------------------------------------------------------------- rc = 0; if ( (local_bootstrap || shared_bootstrap) && !solo ) { throw runtime_error("Database has to be bootstraped to start" " oned in HA"); } if (local_bootstrap) { NebulaLog::log("ONE",Log::INFO, "Bootstrapping OpenNebula database, stage 1."); rc += VirtualMachinePool::bootstrap(logdb); rc += HostPool::bootstrap(logdb); rc += VirtualNetworkPool::bootstrap(logdb); rc += ImagePool::bootstrap(logdb); rc += VMTemplatePool::bootstrap(logdb); rc += DatastorePool::bootstrap(logdb); rc += ClusterPool::bootstrap(logdb); rc += DocumentPool::bootstrap(logdb); rc += UserQuotas::bootstrap(logdb); rc += GroupQuotas::bootstrap(logdb); rc += SecurityGroupPool::bootstrap(logdb); rc += VirtualRouterPool::bootstrap(logdb); rc += VMGroupPool::bootstrap(logdb); // Create the system tables only if bootstrap went well if (rc == 0) { rc += system_db->local_bootstrap(); } // Insert default system attributes rc += default_user_quota.insert(); rc += default_group_quota.insert(); } if (shared_bootstrap) { NebulaLog::log("ONE",Log::INFO, "Bootstrapping OpenNebula database, stage 2."); rc += GroupPool::bootstrap(logdb); rc += UserPool::bootstrap(logdb); rc += AclManager::bootstrap(logdb); rc += ZonePool::bootstrap(logdb); rc += VdcPool::bootstrap(logdb); rc += MarketPlacePool::bootstrap(logdb); rc += MarketPlaceAppPool::bootstrap(logdb); // Create the system tables only if bootstrap went well if ( rc == 0 ) { rc += system_db->shared_bootstrap(); } } if ( rc != 0 ) { throw runtime_error("Error bootstrapping database."); } } catch (exception&) { throw; } if (bootstrap_only) { //XML Library xmlCleanupParser(); NebulaLog::log("ONE", Log::INFO, "Database bootstrap finalized, exiting.\n"); return; } // ----------------------------------------------------------- // Close stds, we no longer need them // ----------------------------------------------------------- if (NebulaLog::log_type() != NebulaLog::STD ) { fd = open("/dev/null", O_RDWR); dup2(fd,0); dup2(fd,1); dup2(fd,2); close(fd); fcntl(0, F_SETFD, 0); // Keep them open across exec funcs fcntl(1, F_SETFD, 0); fcntl(2, F_SETFD, 0); } else { fcntl(0, F_SETFD, FD_CLOEXEC); fcntl(1, F_SETFD, FD_CLOEXEC); fcntl(2, F_SETFD, FD_CLOEXEC); } // ----------------------------------------------------------- // Block all signals before creating any Nebula thread // ----------------------------------------------------------- sigfillset(&mask); pthread_sigmask(SIG_BLOCK, &mask, NULL); one_util::SSLMutex::initialize(); // ----------------------------------------------------------- //Managers // ----------------------------------------------------------- MadManager::mad_manager_system_init(); time_t timer_period; time_t monitor_period; nebula_configuration->get("MANAGER_TIMER", timer_period); nebula_configuration->get("MONITORING_INTERVAL", monitor_period); // ---- ACL Manager ---- try { aclm = new AclManager(db_ptr, zone_id, is_federation_slave(), timer_period); } catch (bad_alloc&) { throw; } rc = aclm->start(); if ( rc != 0 ) { throw runtime_error("Could not start the ACL Manager"); } // ------------------------------------------------------------------------- // Pools // ------------------------------------------------------------------------- try { /* -------------------------- Cluster Pool -------------------------- */ const VectorAttribute * vnc_conf; vnc_conf = nebula_configuration->get("VNC_PORTS"); clpool = new ClusterPool(logdb, vnc_conf); /* --------------------- VirtualMachine Pool ------------------------ */ vector<const VectorAttribute *> vm_hooks; vector<const SingleAttribute *> vm_restricted_attrs; time_t vm_expiration; bool vm_submit_on_hold; float cpu_cost; float mem_cost; float disk_cost; const VectorAttribute * default_cost; nebula_configuration->get("VM_HOOK", vm_hooks); nebula_configuration->get("VM_RESTRICTED_ATTR", vm_restricted_attrs); nebula_configuration->get("VM_MONITORING_EXPIRATION_TIME",vm_expiration); nebula_configuration->get("VM_SUBMIT_ON_HOLD",vm_submit_on_hold); default_cost = nebula_configuration->get("DEFAULT_COST"); if (default_cost->vector_value("CPU_COST", cpu_cost) != 0) { cpu_cost = 0; } if (default_cost->vector_value("MEMORY_COST", mem_cost) != 0) { mem_cost = 0; } if (default_cost->vector_value("DISK_COST", disk_cost) != 0) { disk_cost = 0; } vmpool = new VirtualMachinePool(logdb, vm_hooks, hook_location, remotes_location, vm_restricted_attrs, vm_expiration, vm_submit_on_hold, cpu_cost, mem_cost, disk_cost); /* ---------------------------- Host Pool --------------------------- */ vector<const VectorAttribute *> host_hooks; time_t host_expiration; nebula_configuration->get("HOST_HOOK", host_hooks); nebula_configuration->get("HOST_MONITORING_EXPIRATION_TIME", host_expiration); hpool = new HostPool(logdb, host_hooks, hook_location, remotes_location, host_expiration); /* --------------------- VirtualRouter Pool ------------------------- */ vector<const VectorAttribute *> vrouter_hooks; nebula_configuration->get("VROUTER_HOOK", vrouter_hooks); vrouterpool = new VirtualRouterPool(logdb, vrouter_hooks, remotes_location); /* -------------------- VirtualNetwork Pool ------------------------- */ int size; string mac_prefix; vector<const SingleAttribute *> inherit_vnet_attrs; vector<const SingleAttribute *> vnet_restricted_attrs; vector<const VectorAttribute *> vnet_hooks; const VectorAttribute * vlan_id; const VectorAttribute * vxlan_id; nebula_configuration->get("MAC_PREFIX", mac_prefix); nebula_configuration->get("NETWORK_SIZE", size); nebula_configuration->get("VNET_RESTRICTED_ATTR", vnet_restricted_attrs); nebula_configuration->get("VNET_HOOK", vnet_hooks); nebula_configuration->get("INHERIT_VNET_ATTR", inherit_vnet_attrs); vlan_id = nebula_configuration->get("VLAN_IDS"); vxlan_id = nebula_configuration->get("VXLAN_IDS"); vnpool = new VirtualNetworkPool(logdb, mac_prefix, size, vnet_restricted_attrs, vnet_hooks, remotes_location, inherit_vnet_attrs, vlan_id, vxlan_id); /* ----------------------- Group/User Pool -------------------------- */ vector<const VectorAttribute *> user_hooks; vector<const VectorAttribute *> group_hooks; time_t expiration_time; nebula_configuration->get("GROUP_HOOK", group_hooks); gpool = new GroupPool(db_ptr, group_hooks, remotes_location, is_federation_slave()); nebula_configuration->get("SESSION_EXPIRATION_TIME", expiration_time); nebula_configuration->get("USER_HOOK", user_hooks); upool = new UserPool(db_ptr, expiration_time, user_hooks, remotes_location, is_federation_slave()); /* -------------------- Image/Datastore Pool ------------------------ */ string image_type; string device_prefix; string cd_dev_prefix; vector<const VectorAttribute *> image_hooks; vector<const SingleAttribute *> img_restricted_attrs; vector<const SingleAttribute *> inherit_image_attrs; vector<const SingleAttribute *> inherit_ds_attrs; nebula_configuration->get("DEFAULT_IMAGE_TYPE", image_type); nebula_configuration->get("DEFAULT_DEVICE_PREFIX", device_prefix); nebula_configuration->get("DEFAULT_CDROM_DEVICE_PREFIX", cd_dev_prefix); nebula_configuration->get("IMAGE_HOOK", image_hooks); nebula_configuration->get("IMAGE_RESTRICTED_ATTR", img_restricted_attrs); nebula_configuration->get("INHERIT_IMAGE_ATTR", inherit_image_attrs); ipool = new ImagePool(logdb, image_type, device_prefix, cd_dev_prefix, img_restricted_attrs, image_hooks, remotes_location, inherit_image_attrs); nebula_configuration->get("INHERIT_DATASTORE_ATTR", inherit_ds_attrs); dspool = new DatastorePool(logdb, inherit_ds_attrs); /* ----- Document, Zone, VDC, VMTemplate, SG and Makerket Pools ----- */ docpool = new DocumentPool(logdb); zonepool = new ZonePool(db_ptr, is_federation_slave()); vdcpool = new VdcPool(db_ptr, is_federation_slave()); tpool = new VMTemplatePool(logdb); secgrouppool = new SecurityGroupPool(logdb); marketpool = new MarketPlacePool(db_ptr, is_federation_slave()); apppool = new MarketPlaceAppPool(db_ptr); vmgrouppool = new VMGroupPool(logdb); default_user_quota.select(); default_group_quota.select(); } catch (exception&) { throw runtime_error("Error Initializing OpenNebula pools"); } // ---- XMLRPC Client for federation slaves ---- if (is_federation_slave()) { long long msg_size; unsigned int timeout; get_configuration_attribute("MESSAGE_SIZE", msg_size); get_configuration_attribute("TIMEOUT", timeout); Client::initialize("", get_master_oned(), msg_size, timeout); } // ---- Hook Manager ---- try { vector<const VectorAttribute *> hm_mads; nebula_configuration->get("HM_MAD", hm_mads); hm = new HookManager(hm_mads,vmpool); } catch (bad_alloc&) { throw; } rc = hm->start(); if ( rc != 0 ) { throw runtime_error("Could not start the Hook Manager"); } if (hm->load_mads(0) != 0) { goto error_mad; } // ---- Raft Manager ---- const VectorAttribute * raft_leader_hook; const VectorAttribute * raft_follower_hook; raft_leader_hook = nebula_configuration->get("RAFT_LEADER_HOOK"); raft_follower_hook = nebula_configuration->get("RAFT_FOLLOWER_HOOK"); try { raftm = new RaftManager(server_id, raft_leader_hook, raft_follower_hook, log_purge, bcast_ms, election_ms, xmlrpc_ms, remotes_location); } catch (bad_alloc&) { throw; } rc = raftm->start(); if ( rc != 0 ) { throw runtime_error("Could not start the Raft Consensus Manager"); } // ---- FedReplica Manager ---- try { frm = new FedReplicaManager(logdb); } catch (bad_alloc&) { throw; } rc = frm->start(); if ( is_federation_master() && solo ) { // Replica threads are started on master in solo mode. // HA start/stop the replica threads on leader/follower states frm->start_replica_threads(); } if ( rc != 0 ) { throw runtime_error("Could not start the Federation Replica Manager"); } // ---- Virtual Machine Manager ---- try { vector<const VectorAttribute *> vmm_mads; int vm_limit; bool do_poll; time_t poll_period = 0; nebula_configuration->get("VM_PER_INTERVAL", vm_limit); nebula_configuration->get("VM_MAD", vmm_mads); nebula_configuration->get("VM_INDIVIDUAL_MONITORING", do_poll); poll_period = monitor_period * 2.5; vmm = new VirtualMachineManager( timer_period, poll_period, do_poll, vm_limit, vmm_mads); } catch (bad_alloc&) { throw; } rc = vmm->start(); if ( rc != 0 ) { throw runtime_error("Could not start the Virtual Machine Manager"); } // ---- Life-cycle Manager ---- try { lcm = new LifeCycleManager(); } catch (bad_alloc&) { throw; } rc = lcm->start(); if ( rc != 0 ) { throw runtime_error("Could not start the Life-cycle Manager"); } // ---- Information Manager ---- try { vector<const VectorAttribute *> im_mads; int host_limit; int monitor_threads; nebula_configuration->get("HOST_PER_INTERVAL", host_limit); nebula_configuration->get("MONITORING_THREADS", monitor_threads); nebula_configuration->get("IM_MAD", im_mads); im = new InformationManager(hpool, clpool, timer_period, monitor_period, host_limit, monitor_threads, remotes_location, im_mads); } catch (bad_alloc&) { throw; } rc = im->start(); if ( rc != 0 ) { throw runtime_error("Could not start the Information Manager"); } // ---- Transfer Manager ---- try { vector<const VectorAttribute *> tm_mads; nebula_configuration->get("TM_MAD", tm_mads); tm = new TransferManager(vmpool, hpool, tm_mads); } catch (bad_alloc&) { throw; } rc = tm->start(); if ( rc != 0 ) { throw runtime_error("Could not start the Transfer Manager"); } // ---- Dispatch Manager ---- try { dm = new DispatchManager(); } catch (bad_alloc&) { throw; } rc = dm->start(); if ( rc != 0 ) { throw runtime_error("Could not start the Dispatch Manager"); } // ---- Auth Manager ---- try { vector<const VectorAttribute *> auth_mads; nebula_configuration->get("AUTH_MAD", auth_mads); if (!auth_mads.empty()) { authm = new AuthManager(timer_period, auth_mads); } else { authm = 0; //Built-in authm/authz } } catch (bad_alloc&) { throw; } if (authm != 0) { rc = authm->start(); if ( rc != 0 ) { throw runtime_error("Could not start the Auth Manager"); } } // ---- Image Manager ---- try { vector<const VectorAttribute *> image_mads; nebula_configuration->get("DATASTORE_MAD", image_mads); imagem = new ImageManager(timer_period, monitor_period, ipool, dspool, image_mads); } catch (bad_alloc&) { throw; } rc = imagem->start(); if ( rc != 0 ) { throw runtime_error("Could not start the Image Manager"); } // ---- Marketplace Manager ---- try { vector<const VectorAttribute *> mmads ; nebula_configuration->get("MARKET_MAD", mmads); marketm = new MarketPlaceManager(timer_period, monitor_period, mmads); } catch (bad_alloc&) { throw; } rc = marketm->start(); if ( rc != 0 ) { throw runtime_error("Could not start the Marketplace Manager"); } // ---- IPAM Manager ---- try { vector<const VectorAttribute *> ipam_mads ; nebula_configuration->get("IPAM_MAD", ipam_mads); ipamm = new IPAMManager(timer_period, ipam_mads); } catch (bad_alloc&) { throw; } rc = ipamm->start(); if ( rc != 0 ) { throw runtime_error("Could not start the IPAM Manager"); } // ----------------------------------------------------------- // Load mads // ----------------------------------------------------------- usleep(2500000); rc = 0; if (vmm->load_mads(0) != 0) { goto error_mad; } if (im->load_mads(0) != 0) { goto error_mad; } if (tm->load_mads(0) != 0) { goto error_mad; } if (imagem->load_mads(0) != 0) { goto error_mad; } if (marketm->load_mads(0) != 0) { goto error_mad; } if (ipamm->load_mads(0) != 0) { goto error_mad; } if ( authm != 0 ) { if (authm->load_mads(0) != 0) { goto error_mad; } } // ---- Request Manager ---- try { string rm_port; int max_conn; int max_conn_backlog; int keepalive_timeout; int keepalive_max_conn; int timeout; bool rpc_log; string log_call_format; string rpc_filename = ""; int message_size; string rm_listen_address = "0.0.0.0"; nebula_configuration->get("PORT", rm_port); nebula_configuration->get("LISTEN_ADDRESS", rm_listen_address); nebula_configuration->get("MAX_CONN", max_conn); nebula_configuration->get("MAX_CONN_BACKLOG", max_conn_backlog); nebula_configuration->get("KEEPALIVE_TIMEOUT", keepalive_timeout); nebula_configuration->get("KEEPALIVE_MAX_CONN", keepalive_max_conn); nebula_configuration->get("TIMEOUT", timeout); nebula_configuration->get("RPC_LOG", rpc_log); nebula_configuration->get("LOG_CALL_FORMAT", log_call_format); nebula_configuration->get("MESSAGE_SIZE", message_size); if (rpc_log) { rpc_filename = log_location + "one_xmlrpc.log"; } rm = new RequestManager(rm_port, max_conn, max_conn_backlog, keepalive_timeout, keepalive_max_conn, timeout, rpc_filename, log_call_format, rm_listen_address, message_size); } catch (bad_alloc&) { NebulaLog::log("ONE", Log::ERROR, "Error starting RM"); throw; } // ---- Initialize Manager cross-reference pointers and pool references ---- dm->init_managers(); lcm->init_managers(); marketm->init_managers(); // ---- Start the Request Manager ---- rc = rm->start(); if ( rc != 0 ) { throw runtime_error("Could not start the Request Manager"); } #ifdef SYSTEMD // ---- Notify service manager ---- sd_notify(0, "READY=1"); #endif // ----------------------------------------------------------- // Wait for a SIGTERM or SIGINT signal // ----------------------------------------------------------- sigemptyset(&mask); sigaddset(&mask, SIGINT); sigaddset(&mask, SIGTERM); sigwait(&mask, &signal); // ----------------------------------------------------------- // Stop the managers & free resources // ----------------------------------------------------------- vmm->finalize(); lcm->finalize(); tm->finalize(); dm->finalize(); im->finalize(); rm->finalize(); authm->finalize(); hm->finalize(); imagem->finalize(); marketm->finalize(); ipamm->finalize(); aclm->finalize(); raftm->finalize(); frm->finalize(); //sleep to wait drivers??? pthread_join(vmm->get_thread_id(),0); pthread_join(lcm->get_thread_id(),0); pthread_join(tm->get_thread_id(),0); pthread_join(dm->get_thread_id(),0); pthread_join(im->get_thread_id(),0); pthread_join(rm->get_thread_id(),0); pthread_join(hm->get_thread_id(),0); pthread_join(authm->get_thread_id(),0); pthread_join(imagem->get_thread_id(),0); pthread_join(marketm->get_thread_id(),0); pthread_join(ipamm->get_thread_id(),0); pthread_join(raftm->get_thread_id(),0); pthread_join(frm->get_thread_id(),0); if(is_federation_slave()) { pthread_join(aclm->get_thread_id(),0); } //XML Library xmlCleanupParser(); one_util::SSLMutex::finalize(); NebulaLog::log("ONE", Log::INFO, "All modules finalized, exiting.\n"); return; error_mad: NebulaLog::log("ONE", Log::ERROR, "Could not load driver"); throw runtime_error("Could not load an OpenNebula driver"); } /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ Log::MessageType Nebula::get_debug_level() const { Log::MessageType clevel = Log::ERROR; int log_level_int; const VectorAttribute * log = nebula_configuration->get("LOG"); if ( log != 0 ) { string value = log->vector_value("DEBUG_LEVEL"); log_level_int = atoi(value.c_str()); if ( Log::ERROR <= log_level_int && log_level_int <= Log::DDDEBUG ) { clevel = static_cast<Log::MessageType>(log_level_int); } } return clevel; } /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ NebulaLog::LogType Nebula::get_log_system() const { NebulaLog::LogType log_system = NebulaLog::UNDEFINED; const VectorAttribute * log = nebula_configuration->get("LOG"); if ( log != 0 ) { string value = log->vector_value("SYSTEM"); log_system = NebulaLog::str_to_type(value); } return log_system; }; /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ void Nebula::get_ds_location(string& dsloc) { get_configuration_attribute("DATASTORE_LOCATION", dsloc); } /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ string Nebula::get_vm_log_filename(int oid) { ostringstream oss; if (nebula_location == "/") { oss << log_location << oid << ".log"; } else { oss << vms_location << oid << "/vm.log"; } return oss.str(); }; /* -------------------------------------------------------------------------- */ /* -------------------------------------------------------------------------- */ int Nebula::get_conf_attribute( const std::string& key, const std::string& name, const VectorAttribute* &value) const { std::vector<const VectorAttribute*>::const_iterator it; std::vector<const VectorAttribute*> values; std::string template_name; std::string name_upper = name; one_util::toupper(name_upper); nebula_configuration->get(key, values); for (it = values.begin(); it != values.end(); it ++) { value = *it; template_name = (*it)->vector_value("NAME"); one_util::toupper(template_name); if ( template_name == name_upper ) { return 0; } } value = 0; return -1; };
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/core/compat/op_utils.h" namespace phi { KernelSignature GatherNdGradArgumentMapping(const ArgumentMappingContext& ctx) { return KernelSignature("gather_nd_grad", {"X", "Index", GradVarName("Out")}, {}, {GradVarName("X")}); } KernelSignature ScatterGradArgumentMapping(const ArgumentMappingContext& ctx) { return KernelSignature("scatter_grad", {"Ids", "Updates", GradVarName("Out")}, {"overwrite"}, {GradVarName("X"), GradVarName("Updates")}); } KernelSignature ScatterNdAddGradArgumentMapping( const ArgumentMappingContext& ctx) { return KernelSignature("scatter_nd_add_grad", {"Index", "Updates", GradVarName("Out")}, {}, {GradVarName("X"), GradVarName("Updates")}); } } // namespace phi PD_REGISTER_ARG_MAPPING_FN(gather_nd_grad, phi::GatherNdGradArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(scatter_grad, phi::ScatterGradArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(scatter_nd_add_grad, phi::ScatterNdAddGradArgumentMapping);