text
stringlengths 5
1.04M
|
|---|
// This file is licensed under the Elastic License 2.0. Copyright 2021-present, StarRocks Limited.
#include "storage/delta_writer.h"
#include "runtime/current_thread.h"
#include "storage/memtable.h"
#include "storage/memtable_flush_executor.h"
#include "storage/rowset/rowset_factory.h"
#include "storage/schema.h"
#include "storage/storage_engine.h"
#include "storage/tablet_updates.h"
#include "storage/update_manager.h"
namespace starrocks::vectorized {
StatusOr<std::unique_ptr<DeltaWriter>> DeltaWriter::open(const DeltaWriterOptions& opt, MemTracker* mem_tracker) {
std::unique_ptr<DeltaWriter> writer(new DeltaWriter(opt, mem_tracker, StorageEngine::instance()));
SCOPED_THREAD_LOCAL_MEM_SETTER(mem_tracker, false);
RETURN_IF_ERROR(writer->_init());
return std::move(writer);
}
DeltaWriter::DeltaWriter(const DeltaWriterOptions& opt, MemTracker* mem_tracker, StorageEngine* storage_engine)
: _state(kUninitialized),
_opt(opt),
_mem_tracker(mem_tracker),
_storage_engine(storage_engine),
_tablet(nullptr),
_cur_rowset(nullptr),
_rowset_writer(nullptr),
_mem_table(nullptr),
_tablet_schema(nullptr),
_flush_token(nullptr) {}
DeltaWriter::~DeltaWriter() {
SCOPED_THREAD_LOCAL_MEM_SETTER(_mem_tracker, false);
switch (_get_state()) {
case kUninitialized:
case kCommitted:
case kInitialized:
break;
case kPrepared:
case kClosed:
case kAborted:
_garbage_collection();
break;
}
_mem_table.reset();
_rowset_writer.reset();
_cur_rowset.reset();
}
void DeltaWriter::_garbage_collection() {
Status rollback_status = Status::OK();
if (_tablet != nullptr) {
rollback_status = _storage_engine->txn_manager()->rollback_txn(_opt.partition_id, _tablet, _opt.txn_id);
}
// has to check rollback status, because the rowset maybe committed in this thread and
// published in another thread, then rollback will failed.
// when rollback failed should not delete rowset
if (rollback_status.ok()) {
_storage_engine->add_unused_rowset(_cur_rowset);
}
}
Status DeltaWriter::_init() {
SCOPED_THREAD_LOCAL_MEM_SETTER(_mem_tracker, false);
TabletManager* tablet_mgr = _storage_engine->tablet_manager();
_tablet = tablet_mgr->get_tablet(_opt.tablet_id, false);
if (_tablet == nullptr) {
_set_state(kUninitialized);
std::stringstream ss;
ss << "Fail to get tablet. tablet_id=" << _opt.tablet_id;
LOG(WARNING) << ss.str();
return Status::InternalError(ss.str());
}
if (_tablet->updates() != nullptr) {
auto tracker = _storage_engine->update_manager()->mem_tracker();
if (tracker->limit_exceeded()) {
_set_state(kUninitialized);
auto msg = Substitute(
"Primary-key index exceeds the limit. tablet_id: $0, consumption: $1, limit: $2."
" Memory stats of top five tablets: $3",
_opt.tablet_id, tracker->consumption(), tracker->limit(),
_storage_engine->update_manager()->topn_memory_stats(5));
LOG(WARNING) << msg;
return Status::MemoryLimitExceeded(msg);
}
if (_tablet->updates()->is_error()) {
_set_state(kUninitialized);
auto msg = fmt::format("Tablet is in error state. This is a primary key table. tablet_id: {}",
_tablet->tablet_id());
return Status::ServiceUnavailable(msg);
}
}
if (_tablet->version_count() > config::tablet_max_versions) {
_set_state(kUninitialized);
auto msg = fmt::format("Too many versions. tablet_id: {}, version_count: {}, limit: {}", _opt.tablet_id,
_tablet->version_count(), config::tablet_max_versions);
LOG(ERROR) << msg;
return Status::ServiceUnavailable(msg);
}
// The tablet may have been migrated during delta writer init,
// and the latest tablet needs to be obtained when loading.
// Here, the while loop checks whether the obtained tablet has changed
// to get the latest tablet.
while (true) {
std::shared_lock base_migration_rlock(_tablet->get_migration_lock());
TabletSharedPtr new_tablet;
if (!_tablet->is_migrating()) {
// maybe migration just finish, get the tablet again
new_tablet = tablet_mgr->get_tablet(_opt.tablet_id, _opt.schema_hash);
if (new_tablet == nullptr) {
_set_state(kAborted);
return Status::NotFound(fmt::format("Not found tablet. tablet_id: {}", _opt.tablet_id));
}
if (_tablet != new_tablet) {
_tablet = new_tablet;
continue;
}
}
break;
}
RowsetWriterContext writer_context(kDataFormatV2, config::storage_format_version);
const std::size_t partial_cols_num = [this]() {
if (_opt.slots->size() > 0 && _opt.slots->back()->col_name() == "__op") {
return _opt.slots->size() - 1;
} else {
return _opt.slots->size();
}
}();
// maybe partial update, change to partial tablet schema
if (_tablet->tablet_schema().keys_type() == KeysType::PRIMARY_KEYS &&
partial_cols_num < _tablet->tablet_schema().num_columns()) {
writer_context.referenced_column_ids.reserve(partial_cols_num);
for (auto i = 0; i < partial_cols_num; ++i) {
const auto& slot_col_name = (*_opt.slots)[i]->col_name();
int32_t index = _tablet->field_index(slot_col_name);
if (index < 0) {
auto msg = strings::Substitute("Invalid column name: $0", slot_col_name);
LOG(WARNING) << msg;
return Status::InvalidArgument(msg);
}
writer_context.referenced_column_ids.push_back(index);
}
writer_context.partial_update_tablet_schema =
TabletSchema::create(_tablet->tablet_schema(), writer_context.referenced_column_ids);
writer_context.tablet_schema = writer_context.partial_update_tablet_schema.get();
} else {
writer_context.tablet_schema = &_tablet->tablet_schema();
}
writer_context.rowset_id = _storage_engine->next_rowset_id();
writer_context.tablet_uid = _tablet->tablet_uid();
writer_context.tablet_id = _opt.tablet_id;
writer_context.partition_id = _opt.partition_id;
writer_context.tablet_schema_hash = _opt.schema_hash;
writer_context.rowset_type = BETA_ROWSET;
writer_context.rowset_path_prefix = _tablet->schema_hash_path();
writer_context.rowset_state = PREPARED;
writer_context.txn_id = _opt.txn_id;
writer_context.load_id = _opt.load_id;
writer_context.segments_overlap = OVERLAPPING;
writer_context.global_dicts = _opt.global_dicts;
Status st = RowsetFactory::create_rowset_writer(writer_context, &_rowset_writer);
if (!st.ok()) {
_set_state(kUninitialized);
auto msg = strings::Substitute("Fail to create rowset writer. tablet_id: $0, error: $1", _opt.tablet_id,
st.to_string());
LOG(WARNING) << msg;
return Status::InternalError(msg);
}
_tablet_schema = writer_context.tablet_schema;
_reset_mem_table();
_flush_token = _storage_engine->memtable_flush_executor()->create_flush_token();
_set_state(kInitialized);
return Status::OK();
}
Status DeltaWriter::_prepare() {
auto state = _get_state();
switch (state) {
case kUninitialized:
case kCommitted:
case kAborted:
case kClosed:
return Status::InternalError(
fmt::format("Fail to prepare. tablet_id: {}, state: {}", _opt.tablet_id, _state_name(state)));
case kPrepared:
return Status::OK();
case kInitialized: {
std::shared_lock base_migration_rlock(_tablet->get_migration_lock());
std::lock_guard push_lock(_tablet->get_push_lock());
auto st = _storage_engine->txn_manager()->prepare_txn(_opt.partition_id, _tablet, _opt.txn_id, _opt.load_id);
if (!st.ok()) {
_set_state(kAborted);
return st;
}
_set_state(kPrepared);
} break;
}
return Status::OK();
}
Status DeltaWriter::write(const Chunk& chunk, const uint32_t* indexes, uint32_t from, uint32_t size) {
SCOPED_THREAD_LOCAL_MEM_SETTER(_mem_tracker, false);
RETURN_IF_ERROR(_prepare());
Status st;
bool full = _mem_table->insert(chunk, indexes, from, size);
if (_mem_tracker->limit_exceeded()) {
VLOG(2) << "Flushing memory table due to memory limit exceeded";
st = _flush_memtable();
_reset_mem_table();
} else if (_mem_tracker->parent() && _mem_tracker->parent()->limit_exceeded()) {
VLOG(2) << "Flushing memory table due to parent memory limit exceeded";
st = _flush_memtable();
_reset_mem_table();
} else if (full) {
st = _flush_memtable_async();
_reset_mem_table();
}
if (!st.ok()) {
_set_state(kAborted);
}
return st;
}
Status DeltaWriter::close() {
SCOPED_THREAD_LOCAL_MEM_SETTER(_mem_tracker, false);
Status st;
auto state = _get_state();
switch (state) {
case kUninitialized:
case kCommitted:
case kAborted:
return Status::InternalError(fmt::format("Fail to close delta writer. tablet_id: {}, state: {}", _opt.tablet_id,
_state_name(state)));
case kClosed:
return Status::OK();
case kInitialized:
_set_state(kClosed);
return st;
case kPrepared:
st = _flush_memtable_async();
_set_state(st.ok() ? kClosed : kAborted);
return st;
}
return Status::OK();
}
Status DeltaWriter::_flush_memtable_async() {
RETURN_IF_ERROR(_mem_table->finalize());
return _flush_token->submit(std::move(_mem_table));
}
Status DeltaWriter::_flush_memtable() {
RETURN_IF_ERROR(_flush_memtable_async());
return _flush_token->wait();
}
void DeltaWriter::_reset_mem_table() {
_mem_table = std::make_unique<MemTable>(_tablet->tablet_id(), _tablet_schema, _opt.slots, _rowset_writer.get(),
_mem_tracker);
}
Status DeltaWriter::commit() {
SCOPED_THREAD_LOCAL_MEM_SETTER(_mem_tracker, false);
auto state = _get_state();
switch (state) {
case kUninitialized:
case kInitialized:
case kAborted:
case kPrepared:
return Status::InternalError(fmt::format("Fail to commit delta writer. tablet_id: {}, state: {}",
_opt.tablet_id, _state_name(state)));
case kCommitted:
return Status::OK();
case kClosed:
break;
}
if (auto st = _flush_token->wait(); UNLIKELY(!st.ok())) {
LOG(WARNING) << st;
_set_state(kAborted);
return st;
}
if (auto res = _rowset_writer->build(); res.ok()) {
_cur_rowset = std::move(res).value();
} else {
LOG(WARNING) << res.status();
_set_state(kAborted);
return res.status();
}
_cur_rowset->set_schema(&_tablet->tablet_schema());
if (_tablet->keys_type() == KeysType::PRIMARY_KEYS) {
auto st = _storage_engine->update_manager()->on_rowset_finished(_tablet.get(), _cur_rowset.get());
if (!st.ok()) {
_set_state(kAborted);
return st;
}
}
auto res = _storage_engine->txn_manager()->commit_txn(_opt.partition_id, _tablet, _opt.txn_id, _opt.load_id,
_cur_rowset, false);
if (!res.ok()) {
_storage_engine->update_manager()->on_rowset_cancel(_tablet.get(), _cur_rowset.get());
}
if (!res.ok() && !res.is_already_exist()) {
_set_state(kAborted);
return res;
}
State curr_state = kClosed;
if (!_state.compare_exchange_strong(curr_state, kCommitted, std::memory_order_acq_rel)) {
return Status::InternalError(fmt::format("Delta writer has been aborted. tablet_id: {}, state: {}",
_opt.tablet_id, _state_name(state)));
}
LOG(INFO) << "Closed delta writer. tablet_id: " << _tablet->tablet_id() << ", stats: " << _flush_token->get_stats();
return Status::OK();
}
void DeltaWriter::abort() {
_set_state(kAborted);
}
int64_t DeltaWriter::partition_id() const {
return _opt.partition_id;
}
const char* DeltaWriter::_state_name(State state) const {
switch (state) {
case kUninitialized:
return "kUninitialized";
case kInitialized:
return "kInitialized";
case kAborted:
return "kAborted";
case kPrepared:
return "kPrepared";
case kCommitted:
return "kCommitted";
case kClosed:
return "kClosed";
}
return "";
}
} // namespace starrocks::vectorized
|
#include "tomato.h"
Tomato::Tomato()
{
}
int Tomato::Id() const
{
return mID;
}
void Tomato::setID(int iD)
{
mID = iD;
}
int Tomato::getTaskId() const
{
return mTaskId;
}
void Tomato::setTaskId(int taskId)
{
mTaskId = taskId;
}
ProcessStates Tomato::processState() const
{
return mProcessState;
}
void Tomato::setProcessState(const ProcessStates &processState)
{
mProcessState = processState;
}
QDateTime Tomato::startTime() const
{
return mStartTime;
}
void Tomato::setStartTime(const QDateTime &startTime)
{
mStartTime = startTime;
}
QDateTime Tomato::completedTime() const
{
return mCompletedTime;
}
void Tomato::setCompletedTime(const QDateTime &completedTime)
{
mCompletedTime = completedTime;
}
|
/*++
Copyright (c) 2011 Microsoft Corporation
Module Name:
th_rewriter.h
Abstract:
Rewriter for applying all builtin (cheap) theory rewrite rules.
Author:
Leonardo (leonardo) 2011-04-07
Notes:
--*/
#include"th_rewriter.h"
#include"rewriter_params.hpp"
#include"bool_rewriter.h"
#include"arith_rewriter.h"
#include"bv_rewriter.h"
#include"datatype_rewriter.h"
#include"array_rewriter.h"
#include"fpa_rewriter.h"
#include"dl_rewriter.h"
#include"pb_rewriter.h"
#include"seq_rewriter.h"
#include"rewriter_def.h"
#include"expr_substitution.h"
#include"ast_smt2_pp.h"
#include"cooperate.h"
#include"var_subst.h"
#include"ast_util.h"
#include"well_sorted.h"
struct th_rewriter_cfg : public default_rewriter_cfg {
bool_rewriter m_b_rw;
arith_rewriter m_a_rw;
bv_rewriter m_bv_rw;
array_rewriter m_ar_rw;
datatype_rewriter m_dt_rw;
fpa_rewriter m_f_rw;
dl_rewriter m_dl_rw;
pb_rewriter m_pb_rw;
seq_rewriter m_seq_rw;
arith_util m_a_util;
bv_util m_bv_util;
unsigned long long m_max_memory; // in bytes
unsigned m_max_steps;
bool m_pull_cheap_ite;
bool m_flat;
bool m_cache_all;
bool m_push_ite_arith;
bool m_push_ite_bv;
// substitution support
expr_dependency_ref m_used_dependencies; // set of dependencies of used substitutions
expr_substitution * m_subst;
ast_manager & m() const { return m_b_rw.m(); }
void updt_local_params(params_ref const & _p) {
rewriter_params p(_p);
m_flat = p.flat();
m_max_memory = megabytes_to_bytes(p.max_memory());
m_max_steps = p.max_steps();
m_pull_cheap_ite = p.pull_cheap_ite();
m_cache_all = p.cache_all();
m_push_ite_arith = p.push_ite_arith();
m_push_ite_bv = p.push_ite_bv();
}
void updt_params(params_ref const & p) {
m_b_rw.updt_params(p);
m_a_rw.updt_params(p);
m_bv_rw.updt_params(p);
m_ar_rw.updt_params(p);
m_f_rw.updt_params(p);
m_seq_rw.updt_params(p);
updt_local_params(p);
}
bool flat_assoc(func_decl * f) const {
if (!m_flat) return false;
family_id fid = f->get_family_id();
if (fid == null_family_id)
return false;
decl_kind k = f->get_decl_kind();
if (fid == m_b_rw.get_fid())
return k == OP_AND || k == OP_OR;
if (fid == m_a_rw.get_fid())
return k == OP_ADD;
if (fid == m_bv_rw.get_fid())
return k == OP_BADD || k == OP_BOR || k == OP_BAND || k == OP_BXOR;
return false;
}
bool rewrite_patterns() const { return false; }
bool cache_all_results() const { return m_cache_all; }
bool max_steps_exceeded(unsigned num_steps) const {
cooperate("simplifier");
if (memory::get_allocation_size() > m_max_memory)
throw rewriter_exception(Z3_MAX_MEMORY_MSG);
return num_steps > m_max_steps;
}
// Return true if t is of the form
// (= t #b0)
// (= t #b1)
// (= #b0 t)
// (= #b1 t)
bool is_eq_bit(expr * t, expr * & x, unsigned & val) {
if (!m().is_eq(t))
return false;
expr * lhs = to_app(t)->get_arg(0);
if (!m_bv_rw.is_bv(lhs))
return false;
if (m_bv_rw.get_bv_size(lhs) != 1)
return false;
expr * rhs = to_app(t)->get_arg(1);
rational v;
unsigned sz;
if (m_bv_rw.is_numeral(lhs, v, sz)) {
x = rhs;
val = v.get_unsigned();
SASSERT(val == 0 || val == 1);
return true;
}
if (m_bv_rw.is_numeral(rhs, v, sz)) {
x = lhs;
val = v.get_unsigned();
SASSERT(val == 0 || val == 1);
return true;
}
return false;
}
// (iff (= x bit1) A)
// --->
// (= x (ite A bit1 bit0))
br_status apply_tamagotchi(expr * lhs, expr * rhs, expr_ref & result) {
expr * x;
unsigned val;
if (is_eq_bit(lhs, x, val)) {
result = m().mk_eq(x, m().mk_ite(rhs, m_bv_rw.mk_numeral(val, 1), m_bv_rw.mk_numeral(1-val, 1)));
return BR_REWRITE2;
}
if (is_eq_bit(rhs, x, val)) {
result = m().mk_eq(x, m().mk_ite(lhs, m_bv_rw.mk_numeral(val, 1), m_bv_rw.mk_numeral(1-val, 1)));
return BR_REWRITE2;
}
return BR_FAILED;
}
br_status reduce_app_core(func_decl * f, unsigned num, expr * const * args, expr_ref & result) {
family_id fid = f->get_family_id();
if (fid == null_family_id)
return BR_FAILED;
br_status st = BR_FAILED;
if (fid == m_b_rw.get_fid()) {
decl_kind k = f->get_decl_kind();
if (k == OP_EQ) {
// theory dispatch for =
SASSERT(num == 2);
family_id s_fid = m().get_sort(args[0])->get_family_id();
if (s_fid == m_a_rw.get_fid())
st = m_a_rw.mk_eq_core(args[0], args[1], result);
else if (s_fid == m_bv_rw.get_fid())
st = m_bv_rw.mk_eq_core(args[0], args[1], result);
else if (s_fid == m_dt_rw.get_fid())
st = m_dt_rw.mk_eq_core(args[0], args[1], result);
else if (s_fid == m_f_rw.get_fid())
st = m_f_rw.mk_eq_core(args[0], args[1], result);
else if (s_fid == m_ar_rw.get_fid())
st = m_ar_rw.mk_eq_core(args[0], args[1], result);
else if (s_fid == m_seq_rw.get_fid())
st = m_seq_rw.mk_eq_core(args[0], args[1], result);
if (st != BR_FAILED)
return st;
}
if (k == OP_EQ || k == OP_IFF) {
SASSERT(num == 2);
st = apply_tamagotchi(args[0], args[1], result);
if (st != BR_FAILED)
return st;
}
return m_b_rw.mk_app_core(f, num, args, result);
}
if (fid == m_a_rw.get_fid())
return m_a_rw.mk_app_core(f, num, args, result);
if (fid == m_bv_rw.get_fid())
return m_bv_rw.mk_app_core(f, num, args, result);
if (fid == m_ar_rw.get_fid())
return m_ar_rw.mk_app_core(f, num, args, result);
if (fid == m_dt_rw.get_fid())
return m_dt_rw.mk_app_core(f, num, args, result);
if (fid == m_f_rw.get_fid())
return m_f_rw.mk_app_core(f, num, args, result);
if (fid == m_dl_rw.get_fid())
return m_dl_rw.mk_app_core(f, num, args, result);
if (fid == m_pb_rw.get_fid())
return m_pb_rw.mk_app_core(f, num, args, result);
if (fid == m_seq_rw.get_fid())
return m_seq_rw.mk_app_core(f, num, args, result);
return BR_FAILED;
}
// auxiliary function for pull_ite_core
expr * mk_eq_value(expr * lhs, expr * value) {
if (m().are_equal(lhs, value)) {
return m().mk_true();
}
else if (m().are_distinct(lhs, value)) {
return m().mk_false();
}
return m().mk_eq(lhs, value);
}
template<bool SWAP>
br_status pull_ite_core(func_decl * p, app * ite, app * value, expr_ref & result) {
if (m().is_eq(p)) {
result = m().mk_ite(ite->get_arg(0),
mk_eq_value(ite->get_arg(1), value),
mk_eq_value(ite->get_arg(2), value));
return BR_REWRITE2;
}
else {
if (SWAP) {
result = m().mk_ite(ite->get_arg(0),
m().mk_app(p, value, ite->get_arg(1)),
m().mk_app(p, value, ite->get_arg(2)));
return BR_REWRITE2;
}
else {
result = m().mk_ite(ite->get_arg(0),
m().mk_app(p, ite->get_arg(1), value),
m().mk_app(p, ite->get_arg(2), value));
return BR_REWRITE2;
}
}
}
// Return true if t is an ite-value-tree form defined as:
// ite-value-tree := (ite c <subtree> <subtree>)
// subtree := value
// | (ite c <subtree> <subtree>)
//
bool is_ite_value_tree(expr * t) {
if (!m().is_ite(t))
return false;
ptr_buffer<app> todo;
todo.push_back(to_app(t));
while (!todo.empty()) {
app * ite = todo.back();
todo.pop_back();
expr * arg1 = ite->get_arg(1);
expr * arg2 = ite->get_arg(2);
if (m().is_ite(arg1) && arg1->get_ref_count() == 1) // do not apply on shared terms, since it may blowup
todo.push_back(to_app(arg1));
else if (!m().is_value(arg1))
return false;
if (m().is_ite(arg2) && arg2->get_ref_count() == 1) // do not apply on shared terms, since it may blowup
todo.push_back(to_app(arg2));
else if (!m().is_value(arg2))
return false;
}
return true;
}
br_status pull_ite(func_decl * f, unsigned num, expr * const * args, expr_ref & result) {
if (num == 2 && m().is_bool(f->get_range()) && !m().is_bool(args[0])) {
if (m().is_ite(args[0])) {
if (m().is_value(args[1]))
return pull_ite_core<false>(f, to_app(args[0]), to_app(args[1]), result);
if (m().is_ite(args[1]) && to_app(args[0])->get_arg(0) == to_app(args[1])->get_arg(0)) {
// (p (ite C A1 B1) (ite C A2 B2)) --> (ite (p A1 A2) (p B1 B2))
result = m().mk_ite(to_app(args[0])->get_arg(0),
m().mk_app(f, to_app(args[0])->get_arg(1), to_app(args[1])->get_arg(1)),
m().mk_app(f, to_app(args[0])->get_arg(2), to_app(args[1])->get_arg(2)));
return BR_REWRITE2;
}
}
if (m().is_ite(args[1]) && m().is_value(args[0]))
return pull_ite_core<true>(f, to_app(args[1]), to_app(args[0]), result);
}
family_id fid = f->get_family_id();
if (num == 2 && (fid == m().get_basic_family_id() || fid == m_a_rw.get_fid() || fid == m_bv_rw.get_fid())) {
// (f v3 (ite c v1 v2)) --> (ite v (f v3 v1) (f v3 v2))
if (m().is_value(args[0]) && is_ite_value_tree(args[1]))
return pull_ite_core<true>(f, to_app(args[1]), to_app(args[0]), result);
// (f (ite c v1 v2) v3) --> (ite v (f v1 v3) (f v2 v3))
if (m().is_value(args[1]) && is_ite_value_tree(args[0]))
return pull_ite_core<false>(f, to_app(args[0]), to_app(args[1]), result);
}
return BR_FAILED;
}
br_status pull_ite(expr_ref & result) {
expr * t = result.get();
if (is_app(t)) {
br_status st = pull_ite(to_app(t)->get_decl(), to_app(t)->get_num_args(), to_app(t)->get_args(), result);
if (st != BR_FAILED)
return st;
}
return BR_DONE;
}
bool is_arith_bv_app(expr * t) const {
if (!is_app(t))
return false;
family_id fid = to_app(t)->get_family_id();
return ((fid == m_a_rw.get_fid() && m_push_ite_arith) ||
(fid == m_bv_rw.get_fid() && m_push_ite_bv));
}
bool get_neutral_elem(app * t, expr_ref & n) {
family_id fid = t->get_family_id();
if (fid == m_a_rw.get_fid()) {
switch (t->get_decl_kind()) {
case OP_ADD: n = m_a_util.mk_numeral(rational(0), m().get_sort(t)); return true;
case OP_MUL: n = m_a_util.mk_numeral(rational(1), m().get_sort(t)); return true;
default:
return false;
}
}
if (fid == m_bv_rw.get_fid()) {
switch (t->get_decl_kind()) {
case OP_BADD: n = m_bv_util.mk_numeral(rational(0), m().get_sort(t)); return true;
case OP_BMUL: n = m_bv_util.mk_numeral(rational(1), m().get_sort(t)); return true;
default:
return false;
}
}
return false;
}
/**
\brief Try to "unify" t1 and t2
Examples
(+ 2 a) (+ 3 a) --> 2, 3, a
(+ 2 a) a --> 2, 0, a
...
*/
bool unify_core(app * t1, expr * t2, expr_ref & new_t1, expr_ref & new_t2, expr_ref & c, bool & first) {
if (t1->get_num_args() != 2)
return false;
expr * a1 = t1->get_arg(0);
expr * b1 = t1->get_arg(1);
if (t2 == b1) {
if (get_neutral_elem(t1, new_t2)) {
new_t1 = a1;
c = b1;
first = false;
return true;
}
}
else if (t2 == a1) {
if (get_neutral_elem(t1, new_t2)) {
new_t1 = b1;
c = a1;
first = true;
return true;
}
}
else if (is_app_of(t2, t1->get_decl()) && to_app(t2)->get_num_args() == 2) {
expr * a2 = to_app(t2)->get_arg(0);
expr * b2 = to_app(t2)->get_arg(1);
if (b1 == b2) {
new_t1 = a1;
new_t2 = a2;
c = b2;
first = false;
return true;
}
if (a1 == a2) {
new_t1 = b1;
new_t2 = b2;
c = a1;
first = true;
return true;
}
if (t1->get_decl()->is_commutative()) {
if (a1 == b2) {
new_t1 = b1;
new_t2 = a2;
c = a1;
first = true; // doesn't really matter for commutative ops.
return true;
}
if (b1 == a2) {
new_t1 = a1;
new_t2 = b2;
c = b1;
first = false; // doesn't really matter for commutative ops.
return true;
}
}
}
return false;
}
// Return true if t1 and t2 are of the form:
// t + a1*x1 + ... + an*xn
// t' + a1*x1 + ... + an*xn
// Store t in new_t1, t' in new_t2 and (a1*x1 + ... + an*xn) in c.
bool unify_add(app * t1, expr * t2, expr_ref & new_t1, expr_ref & new_t2, expr_ref & c) {
unsigned num1 = t1->get_num_args();
expr * const * ms1 = t1->get_args();
if (num1 < 2)
return false;
unsigned num2;
expr * const * ms2;
if (m_a_util.is_add(t2)) {
num2 = to_app(t2)->get_num_args();
ms2 = to_app(t2)->get_args();
}
else {
num2 = 1;
ms2 = &t2;
}
if (num1 != num2 && num1 != num2 + 1 && num1 != num2 - 1)
return false;
new_t1 = 0;
new_t2 = 0;
expr_fast_mark1 visited1;
expr_fast_mark2 visited2;
for (unsigned i = 0; i < num1; i++) {
expr * arg = ms1[i];
visited1.mark(arg);
}
for (unsigned i = 0; i < num2; i++) {
expr * arg = ms2[i];
visited2.mark(arg);
if (visited1.is_marked(arg))
continue;
if (new_t2)
return false; // more than one missing term
new_t2 = arg;
}
for (unsigned i = 0; i < num1; i++) {
expr * arg = ms1[i];
if (visited2.is_marked(arg))
continue;
if (new_t1)
return false; // more than one missing term
new_t1 = arg;
}
// terms matched...
bool is_int = m_a_util.is_int(t1);
if (!new_t1)
new_t1 = m_a_util.mk_numeral(rational(0), is_int);
if (!new_t2)
new_t2 = m_a_util.mk_numeral(rational(0), is_int);
// mk common part
ptr_buffer<expr> args;
for (unsigned i = 0; i < num1; i++) {
expr * arg = ms1[i];
if (arg == new_t1.get())
continue;
args.push_back(arg);
}
SASSERT(!args.empty());
if (args.size() == 1)
c = args[0];
else
c = m_a_util.mk_add(args.size(), args.c_ptr());
return true;
}
bool unify(expr * t1, expr * t2, func_decl * & f, expr_ref & new_t1, expr_ref & new_t2, expr_ref & c, bool & first) {
#if 0
// Did not work for ring benchmarks
// Hack for handling more complex cases of + apps
// such as (+ 2 t1 t2 t3) and (+ 3 t3 t2 t1)
if (m_a_util.is_add(t1)) {
first = true; // doesn't matter for AC ops
f = to_app(t1)->get_decl();
if (unify_add(to_app(t1), t2, new_t1, new_t2, c))
return true;
}
if (m_a_util.is_add(t2)) {
first = true; // doesn't matter for AC ops
f = to_app(t2)->get_decl();
if (unify_add(to_app(t2), t1, new_t2, new_t1, c))
return true;
}
#endif
if (is_arith_bv_app(t1)) {
f = to_app(t1)->get_decl();
return unify_core(to_app(t1), t2, new_t1, new_t2, c, first);
}
else if (is_arith_bv_app(t2)) {
f = to_app(t2)->get_decl();
return unify_core(to_app(t2), t1, new_t2, new_t1, c, first);
}
else {
return false;
}
}
// Apply transformations of the form
//
// (ite c (+ k1 a) (+ k2 a)) --> (+ (ite c k1 k2) a)
// (ite c (* k1 a) (* k2 a)) --> (* (ite c k1 k2) a)
//
// These transformations are useful for bit-vector problems, since
// they will minimize the number of adders/multipliers/etc
br_status push_ite(func_decl * f, unsigned num, expr * const * args, expr_ref & result) {
if (!m().is_ite(f))
return BR_FAILED;
expr * c = args[0];
expr * t = args[1];
expr * e = args[2];
func_decl * f_prime = 0;
expr_ref new_t(m()), new_e(m()), common(m());
bool first;
TRACE("push_ite", tout << "unifying:\n" << mk_ismt2_pp(t, m()) << "\n" << mk_ismt2_pp(e, m()) << "\n";);
if (unify(t, e, f_prime, new_t, new_e, common, first)) {
if (first)
result = m().mk_app(f_prime, common, m().mk_ite(c, new_t, new_e));
else
result = m().mk_app(f_prime, m().mk_ite(c, new_t, new_e), common);
return BR_DONE;
}
TRACE("push_ite", tout << "failed\n";);
return BR_FAILED;
}
br_status push_ite(expr_ref & result) {
expr * t = result.get();
if (m().is_ite(t)) {
br_status st = push_ite(to_app(t)->get_decl(), to_app(t)->get_num_args(), to_app(t)->get_args(), result);
if (st != BR_FAILED)
return st;
}
return BR_DONE;
}
br_status reduce_app(func_decl * f, unsigned num, expr * const * args, expr_ref & result, proof_ref & result_pr) {
result_pr = 0;
br_status st = reduce_app_core(f, num, args, result);
if (st != BR_DONE && st != BR_FAILED) {
CTRACE("th_rewriter_step", st != BR_FAILED,
tout << f->get_name() << "\n";
for (unsigned i = 0; i < num; i++) tout << mk_ismt2_pp(args[i], m()) << "\n";
tout << "---------->\n" << mk_ismt2_pp(result, m()) << "\n";);
return st;
}
if (m_push_ite_bv || m_push_ite_arith) {
if (st == BR_FAILED)
st = push_ite(f, num, args, result);
else
st = push_ite(result);
}
if (m_pull_cheap_ite) {
if (st == BR_FAILED)
st = pull_ite(f, num, args, result);
else
st = pull_ite(result);
}
CTRACE("th_rewriter_step", st != BR_FAILED,
tout << f->get_name() << "\n";
for (unsigned i = 0; i < num; i++) tout << mk_ismt2_pp(args[i], m()) << "\n";
tout << "---------->\n" << mk_ismt2_pp(result, m()) << "\n";);
return st;
}
expr_ref mk_app(func_decl* f, unsigned num_args, expr* const* args) {
expr_ref result(m());
proof_ref pr(m());
if (BR_FAILED == reduce_app(f, num_args, args, result, pr)) {
result = m().mk_app(f, num_args, args);
}
return result;
}
bool reduce_quantifier(quantifier * old_q,
expr * new_body,
expr * const * new_patterns,
expr * const * new_no_patterns,
expr_ref & result,
proof_ref & result_pr) {
quantifier_ref q1(m());
proof * p1 = 0;
if (is_quantifier(new_body) &&
to_quantifier(new_body)->is_forall() == old_q->is_forall() &&
!old_q->has_patterns() &&
!to_quantifier(new_body)->has_patterns()) {
quantifier * nested_q = to_quantifier(new_body);
ptr_buffer<sort> sorts;
buffer<symbol> names;
sorts.append(old_q->get_num_decls(), old_q->get_decl_sorts());
names.append(old_q->get_num_decls(), old_q->get_decl_names());
sorts.append(nested_q->get_num_decls(), nested_q->get_decl_sorts());
names.append(nested_q->get_num_decls(), nested_q->get_decl_names());
q1 = m().mk_quantifier(old_q->is_forall(),
sorts.size(),
sorts.c_ptr(),
names.c_ptr(),
nested_q->get_expr(),
std::min(old_q->get_weight(), nested_q->get_weight()),
old_q->get_qid(),
old_q->get_skid(),
0, 0, 0, 0);
SASSERT(is_well_sorted(m(), q1));
if (m().proofs_enabled()) {
SASSERT(old_q->get_expr() == new_body);
p1 = m().mk_pull_quant(old_q, q1);
}
}
else {
ptr_buffer<expr> new_patterns_buf;
ptr_buffer<expr> new_no_patterns_buf;
new_patterns_buf.append(old_q->get_num_patterns(), new_patterns);
new_no_patterns_buf.append(old_q->get_num_no_patterns(), new_no_patterns);
remove_duplicates(new_patterns_buf);
remove_duplicates(new_no_patterns_buf);
q1 = m().update_quantifier(old_q,
new_patterns_buf.size(), new_patterns_buf.c_ptr(), new_no_patterns_buf.size(), new_no_patterns_buf.c_ptr(),
new_body);
TRACE("reduce_quantifier", tout << mk_ismt2_pp(old_q, m()) << "\n----->\n" << mk_ismt2_pp(q1, m()) << "\n";);
SASSERT(is_well_sorted(m(), q1));
}
elim_unused_vars(m(), q1, result);
TRACE("reduce_quantifier", tout << "after elim_unused_vars:\n" << mk_ismt2_pp(result, m()) << "\n";);
result_pr = 0;
if (m().proofs_enabled()) {
proof * p2 = 0;
if (q1.get() != result.get())
p2 = m().mk_elim_unused_vars(q1, result);
result_pr = m().mk_transitivity(p1, p2);
}
return true;
}
th_rewriter_cfg(ast_manager & m, params_ref const & p):
m_b_rw(m, p),
m_a_rw(m, p),
m_bv_rw(m, p),
m_ar_rw(m, p),
m_dt_rw(m),
m_f_rw(m, p),
m_dl_rw(m),
m_pb_rw(m),
m_seq_rw(m),
m_a_util(m),
m_bv_util(m),
m_used_dependencies(m),
m_subst(0) {
updt_local_params(p);
}
void set_substitution(expr_substitution * s) {
reset();
m_subst = s;
}
void reset() {
m_subst = 0;
}
bool get_subst(expr * s, expr * & t, proof * & pr) {
if (m_subst == 0)
return false;
expr_dependency * d = 0;
if (m_subst->find(s, t, pr, d)) {
m_used_dependencies = m().mk_join(m_used_dependencies, d);
return true;
}
return false;
}
};
template class rewriter_tpl<th_rewriter_cfg>;
struct th_rewriter::imp : public rewriter_tpl<th_rewriter_cfg> {
th_rewriter_cfg m_cfg;
imp(ast_manager & m, params_ref const & p):
rewriter_tpl<th_rewriter_cfg>(m, m.proofs_enabled(), m_cfg),
m_cfg(m, p) {
}
expr_ref mk_app(func_decl* f, unsigned sz, expr* const* args) {
return m_cfg.mk_app(f, sz, args);
}
void set_solver(expr_solver* solver) {
m_cfg.m_seq_rw.set_solver(solver);
}
};
th_rewriter::th_rewriter(ast_manager & m, params_ref const & p):
m_params(p) {
m_imp = alloc(imp, m, p);
}
ast_manager & th_rewriter::m() const {
return m_imp->m();
}
void th_rewriter::updt_params(params_ref const & p) {
m_params = p;
m_imp->cfg().updt_params(p);
}
void th_rewriter::get_param_descrs(param_descrs & r) {
bool_rewriter::get_param_descrs(r);
arith_rewriter::get_param_descrs(r);
bv_rewriter::get_param_descrs(r);
array_rewriter::get_param_descrs(r);
rewriter_params::collect_param_descrs(r);
}
th_rewriter::~th_rewriter() {
dealloc(m_imp);
}
unsigned th_rewriter::get_cache_size() const {
return m_imp->get_cache_size();
}
unsigned th_rewriter::get_num_steps() const {
return m_imp->get_num_steps();
}
void th_rewriter::cleanup() {
ast_manager & m = m_imp->m();
dealloc(m_imp);
m_imp = alloc(imp, m, m_params);
}
void th_rewriter::reset() {
m_imp->reset();
m_imp->cfg().reset();
}
void th_rewriter::operator()(expr_ref & term) {
expr_ref result(term.get_manager());
m_imp->operator()(term, result);
term = result;
}
void th_rewriter::operator()(expr * t, expr_ref & result) {
m_imp->operator()(t, result);
}
void th_rewriter::operator()(expr * t, expr_ref & result, proof_ref & result_pr) {
m_imp->operator()(t, result, result_pr);
}
void th_rewriter::operator()(expr * n, unsigned num_bindings, expr * const * bindings, expr_ref & result) {
m_imp->operator()(n, num_bindings, bindings, result);
}
void th_rewriter::set_substitution(expr_substitution * s) {
m_imp->reset(); // reset the cache
m_imp->cfg().set_substitution(s);
}
expr_dependency * th_rewriter::get_used_dependencies() {
return m_imp->cfg().m_used_dependencies;
}
void th_rewriter::reset_used_dependencies() {
if (get_used_dependencies() != 0) {
set_substitution(m_imp->cfg().m_subst); // reset cache preserving subst
m_imp->cfg().m_used_dependencies = 0;
}
}
expr_ref th_rewriter::mk_app(func_decl* f, unsigned num_args, expr* const* args) {
return m_imp->mk_app(f, num_args, args);
}
void th_rewriter::set_solver(expr_solver* solver) {
m_imp->set_solver(solver);
}
|
// Copyright (c) 2018, Ryo Currency Project
// Portions copyright (c) 2014-2018, The Monero Project
//
// Portions of this file are available under BSD-3 license. Please see ORIGINAL-LICENSE for details
// All rights reserved.
//
// Ryo changes to this code are in public domain. Please note, other licences may apply to the file.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "daemon/command_server.h"
#include "cryptonote_config.h"
#include "string_tools.h"
#include "version.h"
#include <boost/algorithm/string.hpp>
//#undef RYO_DEFAULT_LOG_CATEGORY
//#define RYO_DEFAULT_LOG_CATEGORY "daemon"
namespace daemonize
{
namespace p = std::placeholders;
t_command_server::t_command_server(
uint32_t ip, uint16_t port, const boost::optional<tools::login> &login, bool is_rpc, cryptonote::core_rpc_server *rpc_server)
: m_parser(ip, port, login, is_rpc, rpc_server), m_command_lookup(), m_is_rpc(is_rpc)
{
m_command_lookup.set_handler(
"help", std::bind(&t_command_server::help, this, p::_1), "help [<command>]", "Show the help section or the documentation about a <command>.");
m_command_lookup.set_handler(
"print_height", std::bind(&t_command_parser_executor::print_height, &m_parser, p::_1), "Print the local blockchain height.");
m_command_lookup.set_handler(
"print_pl", std::bind(&t_command_parser_executor::print_peer_list, &m_parser, p::_1), "Print the current peer list.");
m_command_lookup.set_handler(
"print_pl_stats", std::bind(&t_command_parser_executor::print_peer_list_stats, &m_parser, p::_1), "Print the peer list statistics.");
m_command_lookup.set_handler(
"print_cn", std::bind(&t_command_parser_executor::print_connections, &m_parser, p::_1), "Print the current connections.");
m_command_lookup.set_handler(
"print_bc", std::bind(&t_command_parser_executor::print_blockchain_info, &m_parser, p::_1), "print_bc <begin_height> [<end_height>]", "Print the blockchain info in a given blocks range.");
m_command_lookup.set_handler(
"print_block", std::bind(&t_command_parser_executor::print_block, &m_parser, p::_1), "print_block <block_hash> | <block_height>", "Print a given block.");
m_command_lookup.set_handler(
"print_tx", std::bind(&t_command_parser_executor::print_transaction, &m_parser, p::_1), "print_tx <transaction_hash> [+hex] [+json]", "Print a given transaction.");
m_command_lookup.set_handler(
"is_key_image_spent", std::bind(&t_command_parser_executor::is_key_image_spent, &m_parser, p::_1), "is_key_image_spent <key_image>", "Print whether a given key image is in the spent key images set.");
m_command_lookup.set_handler(
"start_mining", std::bind(&t_command_parser_executor::start_mining, &m_parser, p::_1), "start_mining <addr> [<threads>] [do_background_mining] [ignore_battery]", "Start mining for specified address. Defaults to 1 thread and no background mining.");
m_command_lookup.set_handler(
"stop_mining", std::bind(&t_command_parser_executor::stop_mining, &m_parser, p::_1), "Stop mining.");
m_command_lookup.set_handler(
"print_pool", std::bind(&t_command_parser_executor::print_transaction_pool_long, &m_parser, p::_1), "Print the transaction pool using a long format.");
m_command_lookup.set_handler(
"print_pool_sh", std::bind(&t_command_parser_executor::print_transaction_pool_short, &m_parser, p::_1), "Print transaction pool using a short format.");
m_command_lookup.set_handler(
"print_pool_stats", std::bind(&t_command_parser_executor::print_transaction_pool_stats, &m_parser, p::_1), "Print the transaction pool's statistics.");
m_command_lookup.set_handler(
"show_hr", std::bind(&t_command_parser_executor::show_hash_rate, &m_parser, p::_1), "Start showing the current hash rate.");
m_command_lookup.set_handler(
"hide_hr", std::bind(&t_command_parser_executor::hide_hash_rate, &m_parser, p::_1), "Stop showing the hash rate.");
m_command_lookup.set_handler(
"save", std::bind(&t_command_parser_executor::save_blockchain, &m_parser, p::_1), "Save the blockchain.");
m_command_lookup.set_handler(
"set_log", std::bind(&t_command_parser_executor::set_log_level, &m_parser, p::_1), "set_log <level>|<{+,-,}categories>", "Change the current log level/categories where <level> is a number 0-4.");
m_command_lookup.set_handler(
"diff", std::bind(&t_command_parser_executor::show_difficulty, &m_parser, p::_1), "Show the current difficulty.");
m_command_lookup.set_handler(
"status", std::bind(&t_command_parser_executor::show_status, &m_parser, p::_1), "Show the current status.");
m_command_lookup.set_handler(
"stop_daemon", std::bind(&t_command_parser_executor::stop_daemon, &m_parser, p::_1), "Stop the daemon.");
m_command_lookup.set_handler(
"exit", std::bind(&t_command_parser_executor::stop_daemon, &m_parser, p::_1), "Stop the daemon.");
m_command_lookup.set_handler(
"print_status", std::bind(&t_command_parser_executor::print_status, &m_parser, p::_1), "Print the current daemon status.");
m_command_lookup.set_handler(
"limit", std::bind(&t_command_parser_executor::set_limit, &m_parser, p::_1), "limit [<kB/s>]", "Get or set the download and upload limit.");
m_command_lookup.set_handler(
"limit_up", std::bind(&t_command_parser_executor::set_limit_up, &m_parser, p::_1), "limit_up [<kB/s>]", "Get or set the upload limit.");
m_command_lookup.set_handler(
"limit_down", std::bind(&t_command_parser_executor::set_limit_down, &m_parser, p::_1), "limit_down [<kB/s>]", "Get or set the download limit.");
m_command_lookup.set_handler(
"out_peers", std::bind(&t_command_parser_executor::out_peers, &m_parser, p::_1), "out_peers <max_number>", "Set the <max_number> of out peers.");
m_command_lookup.set_handler(
"in_peers", std::bind(&t_command_parser_executor::in_peers, &m_parser, p::_1), "in_peers <max_number>", "Set the <max_number> of in peers.");
m_command_lookup.set_handler(
"hard_fork_info", std::bind(&t_command_parser_executor::hard_fork_info, &m_parser, p::_1), "Print the hard fork voting information.");
m_command_lookup.set_handler(
"bans", std::bind(&t_command_parser_executor::show_bans, &m_parser, p::_1), "Show the currently banned IPs.");
m_command_lookup.set_handler(
"ban", std::bind(&t_command_parser_executor::ban, &m_parser, p::_1), "ban <IP> [<seconds>]", "Ban a given <IP> for a given amount of <seconds>.");
m_command_lookup.set_handler(
"unban", std::bind(&t_command_parser_executor::unban, &m_parser, p::_1), "unban <IP>", "Unban a given <IP>.");
m_command_lookup.set_handler(
"flush_txpool", std::bind(&t_command_parser_executor::flush_txpool, &m_parser, p::_1), "flush_txpool [<txid>]", "Flush a transaction from the tx pool by its <txid>, or the whole tx pool.");
m_command_lookup.set_handler(
"output_histogram", std::bind(&t_command_parser_executor::output_histogram, &m_parser, p::_1), "output_histogram [@<amount>] <min_count> [<max_count>]", "Print the output histogram of outputs.");
m_command_lookup.set_handler(
"print_coinbase_tx_sum", std::bind(&t_command_parser_executor::print_coinbase_tx_sum, &m_parser, p::_1), "print_coinbase_tx_sum <start_height> [<block_count>]", "Print the sum of coinbase transactions.");
m_command_lookup.set_handler(
"alt_chain_info", std::bind(&t_command_parser_executor::alt_chain_info, &m_parser, p::_1), "Print the information about alternative chains.");
m_command_lookup.set_handler(
"bc_dyn_stats", std::bind(&t_command_parser_executor::print_blockchain_dynamic_stats, &m_parser, p::_1), "bc_dyn_stats <last_block_count>", "Print the information about current blockchain dynamic state.");
m_command_lookup.set_handler(
"update", std::bind(&t_command_parser_executor::update, &m_parser, p::_1), "update (check|download)", "Check if an update is available, optionally downloads it if there is. Updating is not yet implemented.");
m_command_lookup.set_handler(
"relay_tx", std::bind(&t_command_parser_executor::relay_tx, &m_parser, p::_1), "relay_tx <txid>", "Relay a given transaction by its <txid>.");
m_command_lookup.set_handler(
"sync_info", std::bind(&t_command_parser_executor::sync_info, &m_parser, p::_1), "Print information about the blockchain sync state.");
m_command_lookup.set_handler(
"version", std::bind(&t_command_parser_executor::version, &m_parser, p::_1), "Print version information.");
}
bool t_command_server::process_command_str(const std::string &cmd)
{
return m_command_lookup.process_command_str(cmd);
}
bool t_command_server::process_command_vec(const std::vector<std::string> &cmd)
{
bool result = m_command_lookup.process_command_vec(cmd);
if(!result)
{
help(std::vector<std::string>());
}
return result;
}
bool t_command_server::start_handling(std::function<void(void)> exit_handler)
{
if(m_is_rpc)
return false;
m_command_lookup.start_handling("", get_commands_str(), exit_handler);
return true;
}
void t_command_server::stop_handling()
{
if(m_is_rpc)
return;
m_command_lookup.stop_handling();
}
bool t_command_server::help(const std::vector<std::string> &args)
{
if(args.empty())
{
std::cout << get_commands_str() << std::endl;
}
else
{
std::cout << get_command_usage(args) << std::endl;
}
return true;
}
std::string t_command_server::get_commands_str()
{
std::stringstream ss;
ss << "Ryo '" << RYO_RELEASE_NAME << "' (" << RYO_VERSION_FULL << ")" << std::endl;
ss << "Commands: " << std::endl;
std::string usage = m_command_lookup.get_usage();
boost::replace_all(usage, "\n", "\n ");
usage.insert(0, " ");
ss << usage << std::endl;
return ss.str();
}
std::string t_command_server::get_command_usage(const std::vector<std::string> &args)
{
std::pair<std::string, std::string> documentation = m_command_lookup.get_documentation(args);
std::stringstream ss;
if(documentation.first.empty())
{
ss << "Unknown command: " << args.front() << std::endl;
}
else
{
std::string usage = documentation.second.empty() ? args.front() : documentation.first;
std::string description = documentation.second.empty() ? documentation.first : documentation.second;
usage.insert(0, " ");
ss << "Command usage: " << std::endl
<< usage << std::endl
<< std::endl;
boost::replace_all(description, "\n", "\n ");
description.insert(0, " ");
ss << "Command description: " << std::endl
<< description << std::endl;
}
return ss.str();
}
} // namespace daemonize
|
#include "downloadmanager.h"
#include <QApplication>
#include <QNetworkReply>
#include <QNetworkRequest>
#include <QFile>
#include <QDir>
#include <QDateTime>
DownloadManager::DownloadManager(QObject* parent) :
QObject(parent)
{
connect(&m_manager, &QNetworkAccessManager::finished, this, &DownloadManager::OnReply);
}
bool DownloadManager::DownloadFile(const QUrl &url, const QString &FileName)
{
if (url.isEmpty())
{
return false;
}
m_startTime = QDateTime::currentDateTime().toTime_t();
QString path = qApp->applicationDirPath() + QDir::separator() + "tmp" + QDir::separator();
QString file = path + (FileName.isEmpty() ? url.fileName() : FileName);
qDebug() << "Download Path: " << path;
qDebug() << "Download File Path: " << file;
if (!QDir(path).exists())
{
QDir().mkdir(path);
}
m_file = new QFile(file);
if (!m_file->open(QIODevice::WriteOnly))
{
delete m_file;
m_file = nullptr;
return false;
}
QNetworkRequest request(url);
request.setAttribute(QNetworkRequest::FollowRedirectsAttribute, true);
m_currentReply = m_manager.get(request);
connect(m_currentReply, &QNetworkReply::readyRead, this, &DownloadManager::OnReadyRead);
connect(m_currentReply, &QNetworkReply::downloadProgress, this, &DownloadManager::UpdateDownloadProgress);
return true;
}
QString DownloadManager::GetTimeRemaining(qint64 bytesReceived, qint64 bytesTotal)
{
uint difference = QDateTime::currentDateTime().toTime_t() - m_startTime;
if (difference > 0)
{
QString timeString;
qreal timeRemaining = bytesTotal / (bytesReceived / difference);
if (timeRemaining > 7200)
{
timeRemaining /= 3600;
int hours = int(timeRemaining + 0.5);
timeString = hours > 1 ? tr("about %1 hours").arg(hours) : tr("about one hour");
}
else if (timeRemaining > 60)
{
timeRemaining /= 60;
int minutes = int(timeRemaining + 0.5);
timeString = minutes > 1 ? tr("%1 minutes").arg(minutes) : tr("1 minute");
}
else if (timeRemaining <= 60)
{
int seconds = int(timeRemaining + 0.5);
timeString = seconds > 1 ? tr("%1 seconds").arg(seconds) : tr("1 second");
}
return timeString;
}
return "unknown";
}
void DownloadManager::InitInstall()
{
// TODO: Change the version.txt to the newly installed version
// TODO: actually apply the patch that we just downloaded
}
void DownloadManager::OnReadyRead()
{
if (m_file)
{
m_file->write(m_currentReply->readAll());
}
}
void DownloadManager::CancelDownload()
{
if (m_currentReply)
{
m_currentReply->abort();
}
}
void DownloadManager::OnReply(QNetworkReply* reply)
{
if (reply->error() == QNetworkReply::NoError)
{
m_file->flush();
m_file->close();
}
else
{
m_file->remove();
}
delete m_file;
m_file = nullptr;
reply->deleteLater();
}
|
// Copyright 2018 The Darwin Neuroevolution Framework Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <third_party/sqlite/sqlite3.h>
#include <core/exception.h>
#include <tests/testcase_output.h>
#include <third_party/gtest/gtest.h>
#include <string.h>
using namespace std;
#include <filesystem>
namespace fs = std::filesystem;
namespace sqlite_tests {
struct SqliteTest : public testing::Test {
SqliteTest() {
path_ = testdbPath();
fs::remove(path_);
if (sqlite3_open(path_.c_str(), &db_) != SQLITE_OK) {
CHECK(sqlite3_close(db_) == SQLITE_OK);
throw core::Exception("Failed to open the database: %s", path_);
}
}
~SqliteTest() {
CHECK(sqlite3_close(db_) == SQLITE_OK);
fs::remove(path_);
}
protected:
static string testdbPath() {
const auto test_info = ::testing::UnitTest::GetInstance()->current_test_info();
return string(TEST_TEMP_PATH) + "/" + test_info->test_case_name() + "_" +
test_info->name() + ".db";
}
static int execCallback(void* data, int argc, char* argv[], char* colv[]) {
auto result = static_cast<string*>(data);
for (int i = 0; i < argc; ++i) {
*result += colv[i];
*result += " = ";
*result += (argv[i] != nullptr) ? argv[i] : "NULL";
*result += "\n";
}
*result += "\n";
return 0;
}
string exec(const char* statement) {
string result;
int rc = sqlite3_exec(db_, statement, execCallback, &result, nullptr);
EXPECT_EQ(rc, SQLITE_OK);
return result;
}
protected:
sqlite3* db_ = nullptr;
string path_;
};
TEST_F(SqliteTest, Smoke) {
// just open a new database and execute an empty statement
exec(" ");
}
TEST_F(SqliteTest, Select) {
core_test::TestCaseOutput output;
exec("create table foo(key text primary key, flag int default 0, value real)");
exec("insert into foo values('a', 1, 1.0)");
exec("insert into foo values('b', 2, 2.2)");
exec("insert into foo values('c', 3, 3.3)");
auto results = exec("select * from foo");
fprintf(output, "%s", results.c_str());
fprintf(output, "----------------------------------\n");
auto results2 = exec("select * from sqlite_master");
fprintf(output, "%s", results2.c_str());
}
TEST_F(SqliteTest, BasicCRUD) {
exec("create table foo(key text primary key, flag int default 0, data blob)");
exec("insert into foo(key) values ('blah')");
exec("insert into foo(key, flag) values('', 1)");
exec("insert into foo(key, flag) values(null, 1)");
exec("insert into foo(key, flag) values(null, null)");
exec("insert into foo(key, flag) values(' ', null)");
exec("select * from foo");
exec("update foo set flag = 1 where flag = 0");
exec("select key, flag as gate from foo");
exec("update foo set key = 'moo' where key = ''");
exec("update foo set key = 'moo' where key = ''");
exec("update foo set key = 'moo' where key = 'moo'");
exec("delete from foo where key = 'moo'");
exec("drop table foo");
}
} // namespace sqlite_tests
|
#include <bits/stdc++.h>
#include <sstream>
#define pb push_back
#define mp make_pair
#define pii pair<int,int>
#define vi vector<int>
using namespace std;
typedef long long ll;
typedef unsigned long long ull;
typedef long double ld;
const ll MOD = 1000000007;
const int dx[4] = {1, -1, 0, 0};
const int dy[4] = {0, 0, 1, -1};
vector<int> rabin_karp(string needle, string haystack){
vector<int> answer;
if((int)needle.size() > (int)haystack.size()) return answer;
const ll p = 31;
vector<ll> power(max(needle.size(), haystack.size()));
power[0] = 1;
for(int i = 1; i < (int)power.size(); i++){
power[i] = (power[i - 1] * p) % MOD;
}
vector<ll> preffix(haystack.size(), 0);
preffix[0] = (haystack[0] - 'a' + 1) % MOD;
for(int i = 1; i < (int)preffix.size(); i++){
preffix[i] = (preffix[i - 1] + (haystack[i] - 'a' + 1)*power[i]) % MOD;
}
ll needle_hash = 0;
for(int i = 0; i < (int)needle.size(); i++){
needle_hash = (needle_hash + (needle[i] - 'a' + 1)*power[i]) % MOD;
}
int left = 0, right = (int)needle.size() - 1;
while(right < (int)haystack.size()){
if(left == 0){
if((needle_hash * power[left]) % MOD == preffix[right] % MOD){
answer.push_back(left);
}
}else{
if((needle_hash * power[left]) % MOD == (preffix[right] - preffix[left - 1] + MOD) % MOD){
answer.push_back(left);
}
}
left++;
right++;
}
return answer;
}
int main(){
ios_base::sync_with_stdio(false);
cin.tie(0);
int size;
ll p = 31;
string needle, haystack;
while(cin >> size){
cin >> needle;
cin >> haystack;
if((int)needle.size() > (int)haystack.size()){
cout << endl;
continue;
}
vector<int> occurrences = rabin_karp(needle, haystack);
if(occurrences.size() > 0){
for(auto x : occurrences){
cout << x << endl;
}
}
cout << endl;
}
return 0;
}
|
/*
* Copyright (C) 2018 ETH Zurich and University of Bologna
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Authors: Germain Haugou, ETH (germain.haugou@iis.ee.ethz.ch)
*/
#include <stdio.h>
#include <stdarg.h>
#include <signal.h>
#include <stdexcept>
|
//===--- SILPrinter.cpp - Pretty-printing of SIL Code ---------------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
///
/// \file
///
/// This file defines the logic to pretty-print SIL, Instructions, etc.
///
//===----------------------------------------------------------------------===//
#include "swift/Strings.h"
#include "swift/Demangling/Demangle.h"
#include "swift/Basic/QuotedString.h"
#include "swift/SIL/SILPrintContext.h"
#include "swift/SIL/ApplySite.h"
#include "swift/SIL/CFG.h"
#include "swift/SIL/SILFunction.h"
#include "swift/SIL/SILCoverageMap.h"
#include "swift/SIL/SILDebugScope.h"
#include "swift/SIL/SILDeclRef.h"
#include "swift/SIL/SILModule.h"
#include "swift/SIL/SILVisitor.h"
#include "swift/SIL/SILVTable.h"
#include "swift/AST/Decl.h"
#include "swift/AST/GenericEnvironment.h"
#include "swift/AST/Module.h"
#include "swift/AST/PrintOptions.h"
#include "swift/AST/ProtocolConformance.h"
#include "swift/AST/Types.h"
#include "swift/Basic/STLExtras.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/FileSystem.h"
using namespace swift;
using ID = SILPrintContext::ID;
llvm::cl::opt<bool>
SILPrintNoColor("sil-print-no-color", llvm::cl::init(""),
llvm::cl::desc("Don't use color when printing SIL"));
llvm::cl::opt<bool>
SILFullDemangle("sil-full-demangle", llvm::cl::init(false),
llvm::cl::desc("Fully demangle symbol names in SIL output"));
llvm::cl::opt<bool>
SILPrintDebugInfo("sil-print-debuginfo", llvm::cl::init(false),
llvm::cl::desc("Include debug info in SIL output"));
llvm::cl::opt<bool> SILPrintGenericSpecializationInfo(
"sil-print-generic-specialization-info", llvm::cl::init(false),
llvm::cl::desc("Include generic specialization"
"information info in SIL output"));
static std::string demangleSymbol(StringRef Name) {
if (SILFullDemangle)
return Demangle::demangleSymbolAsString(Name);
return Demangle::demangleSymbolAsString(Name,
Demangle::DemangleOptions::SimplifiedUIDemangleOptions());
}
enum SILColorKind {
SC_Type,
};
namespace {
/// RAII based coloring of SIL output.
class SILColor {
raw_ostream &OS;
enum raw_ostream::Colors Color;
public:
#define DEF_COL(NAME, RAW) case NAME: Color = raw_ostream::RAW; break;
explicit SILColor(raw_ostream &OS, SILColorKind K) : OS(OS) {
if (!OS.has_colors() || SILPrintNoColor)
return;
switch (K) {
DEF_COL(SC_Type, YELLOW)
}
OS.resetColor();
OS.changeColor(Color);
}
explicit SILColor(raw_ostream &OS, ID::ID_Kind K) : OS(OS) {
if (!OS.has_colors() || SILPrintNoColor)
return;
switch (K) {
DEF_COL(ID::SILUndef, RED)
DEF_COL(ID::SILBasicBlock, GREEN)
DEF_COL(ID::SSAValue, MAGENTA)
DEF_COL(ID::Null, YELLOW)
}
OS.resetColor();
OS.changeColor(Color);
}
~SILColor() {
if (!OS.has_colors() || SILPrintNoColor)
return;
// FIXME: instead of resetColor(), we can look into
// capturing the current active color and restoring it.
OS.resetColor();
}
#undef DEF_COL
};
} // end anonymous namespace
void SILPrintContext::ID::print(raw_ostream &OS) {
SILColor C(OS, Kind);
switch (Kind) {
case ID::SILUndef:
OS << "undef";
return;
case ID::SILBasicBlock: OS << "bb"; break;
case ID::SSAValue: OS << '%'; break;
case ID::Null: OS << "<<NULL OPERAND>>"; return;
}
OS << Number;
}
namespace swift {
raw_ostream &operator<<(raw_ostream &OS, SILPrintContext::ID i) {
i.print(OS);
return OS;
}
} // namespace swift
/// IDAndType - Used when a client wants to print something like "%0 : $Int".
struct SILValuePrinterInfo {
ID ValueID;
SILType Type;
Optional<ValueOwnershipKind> OwnershipKind;
SILValuePrinterInfo(ID ValueID) : ValueID(ValueID), Type(), OwnershipKind() {}
SILValuePrinterInfo(ID ValueID, SILType Type)
: ValueID(ValueID), Type(Type), OwnershipKind() {}
SILValuePrinterInfo(ID ValueID, SILType Type,
ValueOwnershipKind OwnershipKind)
: ValueID(ValueID), Type(Type), OwnershipKind(OwnershipKind) {}
};
/// Return the fully qualified dotted path for DeclContext.
static void printFullContext(const DeclContext *Context, raw_ostream &Buffer) {
if (!Context)
return;
switch (Context->getContextKind()) {
case DeclContextKind::Module:
if (Context == cast<ModuleDecl>(Context)->getASTContext().TheBuiltinModule)
Buffer << cast<ModuleDecl>(Context)->getName() << ".";
return;
case DeclContextKind::FileUnit:
// Ignore the file; just print the module.
printFullContext(Context->getParent(), Buffer);
return;
case DeclContextKind::Initializer:
// FIXME
Buffer << "<initializer>";
return;
case DeclContextKind::AbstractClosureExpr:
// FIXME
Buffer << "<anonymous function>";
return;
case DeclContextKind::SerializedLocal:
Buffer << "<serialized local context>";
return;
case DeclContextKind::GenericTypeDecl: {
auto *generic = cast<GenericTypeDecl>(Context);
printFullContext(generic->getDeclContext(), Buffer);
Buffer << generic->getName() << ".";
return;
}
case DeclContextKind::ExtensionDecl: {
const NominalTypeDecl *ExtNominal =
cast<ExtensionDecl>(Context)->getExtendedNominal();
printFullContext(ExtNominal->getDeclContext(), Buffer);
Buffer << ExtNominal->getName() << ".";
return;
}
case DeclContextKind::TopLevelCodeDecl:
// FIXME
Buffer << "<top level code>";
return;
case DeclContextKind::AbstractFunctionDecl:
// FIXME
Buffer << "<abstract function>";
return;
case DeclContextKind::SubscriptDecl:
// FIXME
Buffer << "<subscript>";
return;
}
llvm_unreachable("bad decl context");
}
static void printValueDecl(ValueDecl *Decl, raw_ostream &OS) {
printFullContext(Decl->getDeclContext(), OS);
assert(Decl->hasName());
if (Decl->isOperator()) {
OS << '"' << Decl->getBaseName() << '"';
} else {
bool shouldEscape = !Decl->getBaseName().isSpecial() &&
llvm::StringSwitch<bool>(Decl->getBaseName().userFacingName())
// FIXME: Represent "init" by a special name and remove this case
.Case("init", false)
#define KEYWORD(kw) \
.Case(#kw, true)
#include "swift/Syntax/TokenKinds.def"
.Default(false);
if (shouldEscape) {
OS << '`' << Decl->getBaseName().userFacingName() << '`';
} else {
OS << Decl->getBaseName().userFacingName();
}
}
}
/// SILDeclRef uses sigil "#" and prints the fully qualified dotted path.
void SILDeclRef::print(raw_ostream &OS) const {
OS << "#";
if (isNull()) {
OS << "<null>";
return;
}
bool isDot = true;
if (!hasDecl()) {
OS << "<anonymous function>";
} else if (kind == SILDeclRef::Kind::Func) {
auto *FD = cast<FuncDecl>(getDecl());
auto accessor = dyn_cast<AccessorDecl>(FD);
if (!accessor) {
printValueDecl(FD, OS);
isDot = false;
} else {
switch (accessor->getAccessorKind()) {
case AccessorKind::WillSet:
printValueDecl(accessor->getStorage(), OS);
OS << "!willSet";
break;
case AccessorKind::DidSet:
printValueDecl(accessor->getStorage(), OS);
OS << "!didSet";
break;
case AccessorKind::Get:
printValueDecl(accessor->getStorage(), OS);
OS << "!getter";
break;
case AccessorKind::Set:
printValueDecl(accessor->getStorage(), OS);
OS << "!setter";
break;
case AccessorKind::Address:
printValueDecl(accessor->getStorage(), OS);
OS << "!addressor";
break;
case AccessorKind::MutableAddress:
printValueDecl(accessor->getStorage(), OS);
OS << "!mutableAddressor";
break;
case AccessorKind::Read:
printValueDecl(accessor->getStorage(), OS);
OS << "!read";
break;
case AccessorKind::Modify:
printValueDecl(accessor->getStorage(), OS);
OS << "!modify";
break;
}
}
} else {
printValueDecl(getDecl(), OS);
}
switch (kind) {
case SILDeclRef::Kind::Func:
break;
case SILDeclRef::Kind::Allocator:
OS << "!allocator";
break;
case SILDeclRef::Kind::Initializer:
OS << "!initializer";
break;
case SILDeclRef::Kind::EnumElement:
OS << "!enumelt";
break;
case SILDeclRef::Kind::Destroyer:
OS << "!destroyer";
break;
case SILDeclRef::Kind::Deallocator:
OS << "!deallocator";
break;
case SILDeclRef::Kind::IVarInitializer:
OS << "!ivarinitializer";
break;
case SILDeclRef::Kind::IVarDestroyer:
OS << "!ivardestroyer";
break;
case SILDeclRef::Kind::GlobalAccessor:
OS << "!globalaccessor";
break;
case SILDeclRef::Kind::DefaultArgGenerator:
OS << "!defaultarg" << "." << defaultArgIndex;
break;
case SILDeclRef::Kind::StoredPropertyInitializer:
OS << "!propertyinit";
break;
}
auto uncurryLevel = getParameterListCount() - 1;
if (uncurryLevel != 0)
OS << (isDot ? '.' : '!') << uncurryLevel;
if (isForeign)
OS << ((isDot || uncurryLevel != 0) ? '.' : '!') << "foreign";
if (isDirectReference)
OS << ((isDot || uncurryLevel != 0) ? '.' : '!') << "direct";
}
void SILDeclRef::dump() const {
print(llvm::errs());
llvm::errs() << '\n';
}
/// Pretty-print the generic specialization information.
static void printGenericSpecializationInfo(
raw_ostream &OS, StringRef Kind, StringRef Name,
const GenericSpecializationInformation *SpecializationInfo,
SubstitutionMap Subs = { }) {
if (!SpecializationInfo)
return;
auto PrintSubstitutions = [&](SubstitutionMap Subs) {
OS << '<';
interleave(Subs.getReplacementTypes(),
[&](Type type) { OS << type; },
[&] { OS << ", "; });
OS << '>';
};
OS << "// Generic specialization information for " << Kind << " " << Name;
if (!Subs.empty()) {
OS << " ";
PrintSubstitutions(Subs);
}
OS << ":\n";
while (SpecializationInfo) {
OS << "// Caller: " << SpecializationInfo->getCaller()->getName() << '\n';
OS << "// Parent: " << SpecializationInfo->getParent()->getName() << '\n';
OS << "// Substitutions: ";
PrintSubstitutions(SpecializationInfo->getSubstitutions());
OS << '\n';
OS << "//\n";
if (!SpecializationInfo->getCaller()->isSpecialization())
return;
SpecializationInfo =
SpecializationInfo->getCaller()->getSpecializationInfo();
}
}
static void print(raw_ostream &OS, SILValueCategory category) {
switch (category) {
case SILValueCategory::Object: return;
case SILValueCategory::Address: OS << '*'; return;
}
llvm_unreachable("bad value category!");
}
static StringRef getCastConsumptionKindName(CastConsumptionKind kind) {
switch (kind) {
case CastConsumptionKind::TakeAlways: return "take_always";
case CastConsumptionKind::TakeOnSuccess: return "take_on_success";
case CastConsumptionKind::CopyOnSuccess: return "copy_on_success";
case CastConsumptionKind::BorrowAlways: return "borrow_always";
}
llvm_unreachable("bad cast consumption kind");
}
static void printSILTypeColorAndSigil(raw_ostream &OS, SILType t) {
SILColor C(OS, SC_Type);
OS << '$';
// Potentially add a leading sigil for the value category.
::print(OS, t.getCategory());
}
void SILType::print(raw_ostream &OS) const {
printSILTypeColorAndSigil(OS, *this);
// Print other types as their Swift representation.
PrintOptions SubPrinter = PrintOptions::printSIL();
getASTType().print(OS, SubPrinter);
}
void SILType::dump() const {
print(llvm::errs());
llvm::errs() << '\n';
}
namespace {
class SILPrinter;
/// SILPrinter class - This holds the internal implementation details of
/// printing SIL structures.
class SILPrinter : public SILInstructionVisitor<SILPrinter> {
SILPrintContext &Ctx;
struct {
llvm::formatted_raw_ostream OS;
PrintOptions ASTOptions;
} PrintState;
unsigned LastBufferID;
// Printers for the underlying stream.
#define SIMPLE_PRINTER(TYPE) \
SILPrinter &operator<<(TYPE value) { \
PrintState.OS << value; \
return *this; \
}
SIMPLE_PRINTER(char)
SIMPLE_PRINTER(unsigned)
SIMPLE_PRINTER(uint64_t)
SIMPLE_PRINTER(StringRef)
SIMPLE_PRINTER(Identifier)
SIMPLE_PRINTER(ID)
SIMPLE_PRINTER(QuotedString)
SIMPLE_PRINTER(SILDeclRef)
SIMPLE_PRINTER(APInt)
SIMPLE_PRINTER(ValueOwnershipKind)
#undef SIMPLE_PRINTER
SILPrinter &operator<<(SILValuePrinterInfo i) {
SILColor C(PrintState.OS, SC_Type);
*this << i.ValueID;
if (!i.Type)
return *this;
*this << " : ";
if (i.OwnershipKind && *i.OwnershipKind != ValueOwnershipKind::Any) {
*this << "@" << i.OwnershipKind.getValue() << " ";
}
return *this << i.Type;
}
SILPrinter &operator<<(Type t) {
// Print the type using our print options.
t.print(PrintState.OS, PrintState.ASTOptions);
return *this;
}
SILPrinter &operator<<(SILType t) {
printSILTypeColorAndSigil(PrintState.OS, t);
t.getASTType().print(PrintState.OS, PrintState.ASTOptions);
return *this;
}
public:
SILPrinter(
SILPrintContext &PrintCtx,
llvm::DenseMap<CanType, Identifier> *AlternativeTypeNames = nullptr)
: Ctx(PrintCtx),
PrintState{{PrintCtx.OS()}, PrintOptions::printSIL()},
LastBufferID(0) {
PrintState.ASTOptions.AlternativeTypeNames = AlternativeTypeNames;
PrintState.ASTOptions.PrintForSIL = true;
}
SILValuePrinterInfo getIDAndType(SILValue V) {
return {Ctx.getID(V), V ? V->getType() : SILType()};
}
SILValuePrinterInfo getIDAndTypeAndOwnership(SILValue V) {
return {Ctx.getID(V), V ? V->getType() : SILType(), V.getOwnershipKind()};
}
//===--------------------------------------------------------------------===//
// Big entrypoints.
void print(const SILFunction *F) {
// If we are asked to emit sorted SIL, print out our BBs in RPOT order.
if (Ctx.sortSIL()) {
std::vector<SILBasicBlock *> RPOT;
auto *UnsafeF = const_cast<SILFunction *>(F);
std::copy(po_begin(UnsafeF), po_end(UnsafeF),
std::back_inserter(RPOT));
std::reverse(RPOT.begin(), RPOT.end());
Ctx.initBlockIDs(RPOT);
interleave(RPOT,
[&](SILBasicBlock *B) { print(B); },
[&] { *this << '\n'; });
return;
}
interleave(*F,
[&](const SILBasicBlock &B) { print(&B); },
[&] { *this << '\n'; });
}
void printBlockArgumentUses(const SILBasicBlock *BB) {
if (BB->args_empty())
return;
for (SILValue V : BB->getArguments()) {
if (V->use_empty())
continue;
*this << "// " << Ctx.getID(V);
PrintState.OS.PadToColumn(50);
*this << "// user";
if (std::next(V->use_begin()) != V->use_end())
*this << 's';
*this << ": ";
llvm::SmallVector<ID, 32> UserIDs;
for (auto *Op : V->getUses())
UserIDs.push_back(Ctx.getID(Op->getUser()));
// Display the user ids sorted to give a stable use order in the
// printer's output if we are asked to do so. This makes diffing large
// sections of SIL significantly easier at the expense of not showing
// the _TRUE_ order of the users in the use list.
if (Ctx.sortSIL()) {
std::sort(UserIDs.begin(), UserIDs.end());
}
interleave(UserIDs.begin(), UserIDs.end(),
[&] (ID id) { *this << id; },
[&] { *this << ", "; });
*this << '\n';
}
}
void printBlockArguments(const SILBasicBlock *BB) {
if (BB->args_empty())
return;
*this << '(';
ArrayRef<SILArgument *> Args = BB->getArguments();
// If SIL ownership is enabled and the given function has not had ownership
// stripped out, print out ownership of SILArguments.
if (BB->getParent()->hasQualifiedOwnership()) {
*this << getIDAndTypeAndOwnership(Args[0]);
for (SILArgument *Arg : Args.drop_front()) {
*this << ", " << getIDAndTypeAndOwnership(Arg);
}
*this << ')';
return;
}
// Otherwise, fall back to the old behavior
*this << getIDAndType(Args[0]);
for (SILArgument *Arg : Args.drop_front()) {
*this << ", " << getIDAndType(Arg);
}
*this << ')';
}
void print(const SILBasicBlock *BB) {
// Output uses for BB arguments. These are put into place as comments before
// the block header.
printBlockArgumentUses(BB);
// Then print the name of our block, the arguments, and the block colon.
*this << Ctx.getID(BB);
printBlockArguments(BB);
*this << ":";
if (!BB->pred_empty()) {
PrintState.OS.PadToColumn(50);
*this << "// Preds:";
llvm::SmallVector<ID, 32> PredIDs;
for (auto *BBI : BB->getPredecessorBlocks())
PredIDs.push_back(Ctx.getID(BBI));
// Display the pred ids sorted to give a stable use order in the printer's
// output if we are asked to do so. This makes diffing large sections of
// SIL significantly easier at the expense of not showing the _TRUE_ order
// of the users in the use list.
if (Ctx.sortSIL()) {
std::sort(PredIDs.begin(), PredIDs.end());
}
for (auto Id : PredIDs)
*this << ' ' << Id;
}
*this << '\n';
for (const SILInstruction &I : *BB) {
Ctx.printInstructionCallBack(&I);
if (SILPrintGenericSpecializationInfo) {
if (auto AI = ApplySite::isa(const_cast<SILInstruction *>(&I)))
if (AI.getSpecializationInfo() && AI.getCalleeFunction())
printGenericSpecializationInfo(
PrintState.OS, "call-site", AI.getCalleeFunction()->getName(),
AI.getSpecializationInfo(), AI.getSubstitutionMap());
}
print(&I);
}
}
//===--------------------------------------------------------------------===//
// SILInstruction Printing Logic
bool printTypeDependentOperands(const SILInstruction *I) {
ArrayRef<Operand> TypeDepOps = I->getTypeDependentOperands();
if (TypeDepOps.empty())
return false;
PrintState.OS.PadToColumn(50);
*this << "// type-defs: ";
interleave(TypeDepOps,
[&](const Operand &op) { *this << Ctx.getID(op.get()); },
[&] { *this << ", "; });
return true;
}
/// Print out the users of the SILValue \p V. Return true if we printed out
/// either an id or a use list. Return false otherwise.
bool printUsersOfSILNode(const SILNode *node, bool printedSlashes) {
llvm::SmallVector<SILValue, 8> values;
if (auto *value = dyn_cast<ValueBase>(node)) {
values.push_back(value);
} else if (auto *inst = dyn_cast<SILInstruction>(node)) {
assert(!isa<SingleValueInstruction>(inst) && "SingleValueInstruction was "
"handled by the previous "
"value base check.");
copy(inst->getResults(), std::back_inserter(values));
}
// If the set of values is empty, we need to print the ID of
// the instruction. Otherwise, if none of the values has a use,
// we don't need to do anything.
if (!values.empty()) {
bool hasUse = false;
for (auto value : values) {
if (!value->use_empty()) hasUse = true;
}
if (!hasUse)
return printedSlashes;
}
if (printedSlashes) {
*this << "; ";
} else {
PrintState.OS.PadToColumn(50);
*this << "// ";
}
if (values.empty()) {
*this << "id: " << Ctx.getID(node);
return true;
}
llvm::SmallVector<ID, 32> UserIDs;
for (auto value : values)
for (auto *Op : value->getUses())
UserIDs.push_back(Ctx.getID(Op->getUser()));
*this << "user";
if (UserIDs.size() != 1)
*this << 's';
*this << ": ";
// If we are asked to, display the user ids sorted to give a stable use
// order in the printer's output. This makes diffing large sections of SIL
// significantly easier.
if (Ctx.sortSIL()) {
std::sort(UserIDs.begin(), UserIDs.end());
}
interleave(UserIDs.begin(), UserIDs.end(), [&](ID id) { *this << id; },
[&] { *this << ", "; });
return true;
}
void printDebugLocRef(SILLocation Loc, const SourceManager &SM,
bool PrintComma = true) {
auto DL = Loc.decodeDebugLoc(SM);
if (!DL.Filename.empty()) {
if (PrintComma)
*this << ", ";
*this << "loc " << QuotedString(DL.Filename) << ':' << DL.Line << ':'
<< DL.Column;
}
}
void printDebugScope(const SILDebugScope *DS, const SourceManager &SM) {
if (!DS)
return;
if (!Ctx.hasScopeID(DS)) {
printDebugScope(DS->Parent.dyn_cast<const SILDebugScope *>(), SM);
printDebugScope(DS->InlinedCallSite, SM);
unsigned ID = Ctx.assignScopeID(DS);
*this << "sil_scope " << ID << " { ";
printDebugLocRef(DS->Loc, SM, false);
*this << " parent ";
if (auto *F = DS->Parent.dyn_cast<SILFunction *>())
*this << "@" << F->getName() << " : $" << F->getLoweredFunctionType();
else {
auto *PS = DS->Parent.get<const SILDebugScope *>();
*this << Ctx.getScopeID(PS);
}
if (auto *CS = DS->InlinedCallSite)
*this << " inlined_at " << Ctx.getScopeID(CS);
*this << " }\n";
}
}
void printDebugScopeRef(const SILDebugScope *DS, const SourceManager &SM,
bool PrintComma = true) {
if (DS) {
if (PrintComma)
*this << ", ";
*this << "scope " << Ctx.getScopeID(DS);
}
}
void printSILLocation(SILLocation L, SILModule &M, const SILDebugScope *DS,
bool printedSlashes) {
if (!L.isNull()) {
if (!printedSlashes) {
PrintState.OS.PadToColumn(50);
*this << "//";
}
*this << " ";
// To minimize output, only print the line and column number for
// everything but the first instruction.
L.getSourceLoc().printLineAndColumn(PrintState.OS,
M.getASTContext().SourceMgr);
// Print the type of location.
switch (L.getKind()) {
case SILLocation::NoneKind:
assert(L.isAutoGenerated() && "This kind shouldn't be printed.");
break;
case SILLocation::RegularKind:
break;
case SILLocation::ReturnKind:
*this << ":return";
break;
case SILLocation::ImplicitReturnKind:
*this << ":imp_return";
break;
case SILLocation::InlinedKind:
*this << ":inlined";
break;
case SILLocation::MandatoryInlinedKind:
*this << ":minlined";
break;
case SILLocation::CleanupKind:
*this << ":cleanup";
break;
case SILLocation::ArtificialUnreachableKind:
*this << ":art_unreach";
break;
}
if (L.isSILFile())
*this << ":sil";
if (L.isAutoGenerated())
*this << ":auto_gen";
if (L.isInPrologue())
*this << ":in_prologue";
}
if (L.isNull()) {
if (!printedSlashes) {
PrintState.OS.PadToColumn(50);
*this << "//";
}
if (L.isInTopLevel())
*this << " top_level";
else if (L.isAutoGenerated())
*this << " auto_gen";
else
*this << " no_loc";
if (L.isInPrologue())
*this << ":in_prologue";
}
if (!DS)
return;
// Print inlined-at location, if any.
const SILDebugScope *CS = DS;
while ((CS = CS->InlinedCallSite)) {
*this << ": ";
if (auto *InlinedF = CS->getInlinedFunction())
*this << demangleSymbol(InlinedF->getName());
else
*this << '?';
*this << " perf_inlined_at ";
auto CallSite = CS->Loc;
if (!CallSite.isNull() && CallSite.isASTNode())
CallSite.getSourceLoc().print(
PrintState.OS, M.getASTContext().SourceMgr, LastBufferID);
else
*this << "?";
}
}
void printInstOpCode(const SILInstruction *I) {
*this << getSILInstructionName(I->getKind()) << " ";
}
void print(const SILInstruction *I) {
if (auto *FRI = dyn_cast<FunctionRefInst>(I))
*this << " // function_ref "
<< demangleSymbol(FRI->getReferencedFunction()->getName())
<< "\n";
else if (auto *FRI = dyn_cast<DynamicFunctionRefInst>(I))
*this << " // dynamic_function_ref "
<< demangleSymbol(FRI->getReferencedFunction()->getName())
<< "\n";
else if (auto *FRI = dyn_cast<PreviousDynamicFunctionRefInst>(I))
*this << " // prev_dynamic_function_ref "
<< demangleSymbol(FRI->getReferencedFunction()->getName())
<< "\n";
*this << " ";
// Print results.
auto results = I->getResults();
if (results.size() == 1 &&
I->isStaticInitializerInst() &&
I == &I->getParent()->back()) {
*this << "%initval = ";
} else if (results.size() == 1) {
ID Name = Ctx.getID(results[0]);
*this << Name << " = ";
} else if (results.size() > 1) {
*this << '(';
bool first = true;
for (auto result : results) {
if (first) {
first = false;
} else {
*this << ", ";
}
ID Name = Ctx.getID(result);
*this << Name;
}
*this << ") = ";
}
// Print the opcode.
printInstOpCode(I);
// Use the visitor to print the rest of the instruction.
visit(const_cast<SILInstruction*>(I));
// Maybe print debugging information.
bool printedSlashes = false;
if (Ctx.printDebugInfo() && !I->isStaticInitializerInst()) {
auto &SM = I->getModule().getASTContext().SourceMgr;
printDebugLocRef(I->getLoc(), SM);
printDebugScopeRef(I->getDebugScope(), SM);
}
printedSlashes = printTypeDependentOperands(I);
// Print users, or id for valueless instructions.
printedSlashes = printUsersOfSILNode(I, printedSlashes);
// Print SIL location.
if (Ctx.printVerbose()) {
printSILLocation(I->getLoc(), I->getModule(), I->getDebugScope(),
printedSlashes);
}
*this << '\n';
}
void print(const SILNode *node) {
switch (node->getKind()) {
#define INST(ID, PARENT) \
case SILNodeKind::ID:
#include "swift/SIL/SILNodes.def"
print(cast<SILInstruction>(node));
return;
#define ARGUMENT(ID, PARENT) \
case SILNodeKind::ID:
#include "swift/SIL/SILNodes.def"
printSILArgument(cast<SILArgument>(node));
return;
case SILNodeKind::SILUndef:
printSILUndef(cast<SILUndef>(node));
return;
#define MULTIPLE_VALUE_INST_RESULT(ID, PARENT) \
case SILNodeKind::ID:
#include "swift/SIL/SILNodes.def"
printSILMultipleValueInstructionResult(
cast<MultipleValueInstructionResult>(node));
return;
}
llvm_unreachable("bad kind");
}
void printSILArgument(const SILArgument *arg) {
// This should really only happen during debugging.
*this << Ctx.getID(arg) << " = argument of "
<< Ctx.getID(arg->getParent()) << " : " << arg->getType();
// Print users.
(void) printUsersOfSILNode(arg, false);
*this << '\n';
}
void printSILUndef(const SILUndef *undef) {
// This should really only happen during debugging.
*this << "undef<" << undef->getType() << ">\n";
}
void printSILMultipleValueInstructionResult(
const MultipleValueInstructionResult *result) {
// This should really only happen during debugging.
if (result->getParent()->getNumResults() == 1) {
*this << "**" << Ctx.getID(result) << "** = ";
} else {
*this << '(';
interleave(result->getParent()->getResults(),
[&](SILValue value) {
if (value == SILValue(result)) {
*this << "**" << Ctx.getID(result) << "**";
return;
}
*this << Ctx.getID(value);
},
[&] { *this << ", "; });
*this << ')';
}
*this << " = ";
printInstOpCode(result->getParent());
auto *nonConstParent =
const_cast<MultipleValueInstruction *>(result->getParent());
visit(static_cast<SILInstruction *>(nonConstParent));
// Print users.
(void)printUsersOfSILNode(result, false);
*this << '\n';
}
void printInContext(const SILNode *node) {
auto sortByID = [&](const SILNode *a, const SILNode *b) {
return Ctx.getID(a).Number < Ctx.getID(b).Number;
};
if (auto *I = dyn_cast<SILInstruction>(node)) {
auto operands = map<SmallVector<SILValue,4>>(I->getAllOperands(),
[](Operand const &o) {
return o.get();
});
std::sort(operands.begin(), operands.end(), sortByID);
for (auto &operand : operands) {
*this << " ";
print(operand);
}
}
*this << "-> ";
print(node);
if (auto V = dyn_cast<ValueBase>(node)) {
auto users = map<SmallVector<const SILInstruction*,4>>(V->getUses(),
[](Operand *o) {
return o->getUser();
});
std::sort(users.begin(), users.end(), sortByID);
for (auto &user : users) {
*this << " ";
print(user);
}
}
}
void printDebugVar(Optional<SILDebugVariable> Var) {
if (!Var || Var->Name.empty())
return;
if (Var->Constant)
*this << ", let";
else
*this << ", var";
*this << ", name \"" << Var->Name << '"';
if (Var->ArgNo)
*this << ", argno " << Var->ArgNo;
}
void visitAllocStackInst(AllocStackInst *AVI) {
*this << AVI->getElementType();
printDebugVar(AVI->getVarInfo());
}
void printAllocRefInstBase(AllocRefInstBase *ARI) {
if (ARI->isObjC())
*this << "[objc] ";
if (ARI->canAllocOnStack())
*this << "[stack] ";
auto Types = ARI->getTailAllocatedTypes();
auto Counts = ARI->getTailAllocatedCounts();
for (unsigned Idx = 0, NumTypes = Types.size(); Idx < NumTypes; ++Idx) {
*this << "[tail_elems " << Types[Idx] << " * "
<< getIDAndType(Counts[Idx].get()) << "] ";
}
}
void visitAllocRefInst(AllocRefInst *ARI) {
printAllocRefInstBase(ARI);
*this << ARI->getType();
}
void visitAllocRefDynamicInst(AllocRefDynamicInst *ARDI) {
printAllocRefInstBase(ARDI);
*this << getIDAndType(ARDI->getMetatypeOperand());
*this << ", " << ARDI->getType();
}
void visitAllocValueBufferInst(AllocValueBufferInst *AVBI) {
*this << AVBI->getValueType() << " in " << getIDAndType(AVBI->getOperand());
}
void visitAllocBoxInst(AllocBoxInst *ABI) {
*this << ABI->getType();
printDebugVar(ABI->getVarInfo());
}
void printSubstitutions(SubstitutionMap Subs,
GenericSignature *Sig = nullptr) {
if (!Subs.hasAnySubstitutableParams()) return;
// FIXME: This is a hack to cope with cases where the substitution map uses
// a generic signature that's close-to-but-not-the-same-as expected.
auto genericSig = Sig ? Sig : Subs.getGenericSignature();
*this << '<';
bool first = true;
for (auto gp : genericSig->getGenericParams()) {
if (first) first = false;
else *this << ", ";
*this << Type(gp).subst(Subs);
}
*this << '>';
}
template <class Inst>
void visitApplyInstBase(Inst *AI) {
*this << Ctx.getID(AI->getCallee());
printSubstitutions(AI->getSubstitutionMap(),
AI->getOrigCalleeType()->getGenericSignature());
*this << '(';
interleave(AI->getArguments(),
[&](const SILValue &arg) { *this << Ctx.getID(arg); },
[&] { *this << ", "; });
*this << ") : ";
if (auto callee = AI->getCallee())
*this << callee->getType();
else
*this << "<<NULL CALLEE>>";
}
void visitApplyInst(ApplyInst *AI) {
if (AI->isNonThrowing())
*this << "[nothrow] ";
visitApplyInstBase(AI);
}
void visitBeginApplyInst(BeginApplyInst *AI) {
if (AI->isNonThrowing())
*this << "[nothrow] ";
visitApplyInstBase(AI);
}
void visitTryApplyInst(TryApplyInst *AI) {
visitApplyInstBase(AI);
*this << ", normal " << Ctx.getID(AI->getNormalBB());
*this << ", error " << Ctx.getID(AI->getErrorBB());
}
void visitPartialApplyInst(PartialApplyInst *CI) {
switch (CI->getFunctionType()->getCalleeConvention()) {
case ParameterConvention::Direct_Owned:
// Default; do nothing.
break;
case ParameterConvention::Direct_Guaranteed:
*this << "[callee_guaranteed] ";
break;
// Should not apply to callees.
case ParameterConvention::Direct_Unowned:
case ParameterConvention::Indirect_In:
case ParameterConvention::Indirect_In_Constant:
case ParameterConvention::Indirect_Inout:
case ParameterConvention::Indirect_In_Guaranteed:
case ParameterConvention::Indirect_InoutAliasable:
llvm_unreachable("unexpected callee convention!");
}
visitApplyInstBase(CI);
}
void visitAbortApplyInst(AbortApplyInst *AI) {
*this << Ctx.getID(AI->getOperand());
}
void visitEndApplyInst(EndApplyInst *AI) {
*this << Ctx.getID(AI->getOperand());
}
void visitFunctionRefInst(FunctionRefInst *FRI) {
FRI->getReferencedFunction()->printName(PrintState.OS);
*this << " : " << FRI->getType();
}
void visitDynamicFunctionRefInst(DynamicFunctionRefInst *FRI) {
FRI->getReferencedFunction()->printName(PrintState.OS);
*this << " : " << FRI->getType();
}
void
visitPreviousDynamicFunctionRefInst(PreviousDynamicFunctionRefInst *FRI) {
FRI->getReferencedFunction()->printName(PrintState.OS);
*this << " : " << FRI->getType();
}
void visitBuiltinInst(BuiltinInst *BI) {
*this << QuotedString(BI->getName().str());
printSubstitutions(BI->getSubstitutions());
*this << "(";
interleave(BI->getArguments(), [&](SILValue v) {
*this << getIDAndType(v);
}, [&]{
*this << ", ";
});
*this << ") : ";
*this << BI->getType();
}
void visitAllocGlobalInst(AllocGlobalInst *AGI) {
if (AGI->getReferencedGlobal()) {
AGI->getReferencedGlobal()->printName(PrintState.OS);
} else {
*this << "<<placeholder>>";
}
}
void visitGlobalAddrInst(GlobalAddrInst *GAI) {
if (GAI->getReferencedGlobal()) {
GAI->getReferencedGlobal()->printName(PrintState.OS);
} else {
*this << "<<placeholder>>";
}
*this << " : " << GAI->getType();
}
void visitGlobalValueInst(GlobalValueInst *GVI) {
GVI->getReferencedGlobal()->printName(PrintState.OS);
*this << " : " << GVI->getType();
}
void visitIntegerLiteralInst(IntegerLiteralInst *ILI) {
const auto &lit = ILI->getValue();
*this << ILI->getType() << ", " << lit;
}
void visitFloatLiteralInst(FloatLiteralInst *FLI) {
*this << FLI->getType() << ", 0x";
APInt bits = FLI->getBits();
*this << bits.toString(16, /*Signed*/ false);
llvm::SmallString<12> decimal;
FLI->getValue().toString(decimal);
*this << " // " << decimal;
}
static StringRef getStringEncodingName(StringLiteralInst::Encoding kind) {
switch (kind) {
case StringLiteralInst::Encoding::Bytes: return "bytes ";
case StringLiteralInst::Encoding::UTF8: return "utf8 ";
case StringLiteralInst::Encoding::UTF16: return "utf16 ";
case StringLiteralInst::Encoding::ObjCSelector: return "objc_selector ";
}
llvm_unreachable("bad string literal encoding");
}
void visitStringLiteralInst(StringLiteralInst *SLI) {
*this << getStringEncodingName(SLI->getEncoding());
if (SLI->getEncoding() != StringLiteralInst::Encoding::Bytes) {
// FIXME: this isn't correct: this doesn't properly handle translating
// UTF16 into UTF8, and the SIL parser always parses as UTF8.
*this << QuotedString(SLI->getValue());
return;
}
// "Bytes" are always output in a hexadecimal form.
*this << '"' << llvm::toHex(SLI->getValue()) << '"';
}
void printLoadOwnershipQualifier(LoadOwnershipQualifier Qualifier) {
switch (Qualifier) {
case LoadOwnershipQualifier::Unqualified:
return;
case LoadOwnershipQualifier::Take:
*this << "[take] ";
return;
case LoadOwnershipQualifier::Copy:
*this << "[copy] ";
return;
case LoadOwnershipQualifier::Trivial:
*this << "[trivial] ";
return;
}
}
void visitLoadInst(LoadInst *LI) {
printLoadOwnershipQualifier(LI->getOwnershipQualifier());
*this << getIDAndType(LI->getOperand());
}
void visitLoadBorrowInst(LoadBorrowInst *LBI) {
*this << getIDAndType(LBI->getOperand());
}
void visitBeginBorrowInst(BeginBorrowInst *LBI) {
*this << getIDAndType(LBI->getOperand());
}
void printStoreOwnershipQualifier(StoreOwnershipQualifier Qualifier) {
switch (Qualifier) {
case StoreOwnershipQualifier::Unqualified:
return;
case StoreOwnershipQualifier::Init:
*this << "[init] ";
return;
case StoreOwnershipQualifier::Assign:
*this << "[assign] ";
return;
case StoreOwnershipQualifier::Trivial:
*this << "[trivial] ";
return;
}
}
void visitStoreInst(StoreInst *SI) {
*this << Ctx.getID(SI->getSrc()) << " to ";
printStoreOwnershipQualifier(SI->getOwnershipQualifier());
*this << getIDAndType(SI->getDest());
}
void visitStoreBorrowInst(StoreBorrowInst *SI) {
*this << Ctx.getID(SI->getSrc()) << " to ";
*this << getIDAndType(SI->getDest());
}
void visitEndBorrowInst(EndBorrowInst *EBI) {
*this << getIDAndType(EBI->getOperand());
}
void visitAssignInst(AssignInst *AI) {
*this << Ctx.getID(AI->getSrc()) << " to " << getIDAndType(AI->getDest());
}
void visitMarkUninitializedInst(MarkUninitializedInst *MU) {
switch (MU->getKind()) {
case MarkUninitializedInst::Var: *this << "[var] "; break;
case MarkUninitializedInst::RootSelf: *this << "[rootself] "; break;
case MarkUninitializedInst::CrossModuleRootSelf:
*this << "[crossmodulerootself] ";
break;
case MarkUninitializedInst::DerivedSelf: *this << "[derivedself] "; break;
case MarkUninitializedInst::DerivedSelfOnly:
*this << "[derivedselfonly] ";
break;
case MarkUninitializedInst::DelegatingSelf: *this << "[delegatingself] ";break;
}
*this << getIDAndType(MU->getOperand());
}
void visitMarkUninitializedBehaviorInst(MarkUninitializedBehaviorInst *MU) {
*this << Ctx.getID(MU->getInitStorageFunc());
printSubstitutions(MU->getInitStorageSubstitutions());
*this << '(' << Ctx.getID(MU->getStorage())
<< ") : " << MU->getInitStorageFunc()->getType() << ", "
<< Ctx.getID(MU->getSetterFunc());
printSubstitutions(MU->getSetterSubstitutions());
*this << '(' << Ctx.getID(MU->getSelf())
<< ") : " << MU->getSetterFunc()->getType();
}
void visitMarkFunctionEscapeInst(MarkFunctionEscapeInst *MFE) {
interleave(MFE->getElements(),
[&](SILValue Var) { *this << getIDAndType(Var); },
[&] { *this << ", "; });
}
void visitDebugValueInst(DebugValueInst *DVI) {
*this << getIDAndType(DVI->getOperand());
printDebugVar(DVI->getVarInfo());
}
void visitDebugValueAddrInst(DebugValueAddrInst *DVAI) {
*this << getIDAndType(DVAI->getOperand());
printDebugVar(DVAI->getVarInfo());
}
#define NEVER_LOADABLE_CHECKED_REF_STORAGE(Name, ...) \
void visitLoad##Name##Inst(Load##Name##Inst *LI) { \
if (LI->isTake()) \
*this << "[take] "; \
*this << getIDAndType(LI->getOperand()); \
} \
void visitStore##Name##Inst(Store##Name##Inst *SI) { \
*this << Ctx.getID(SI->getSrc()) << " to "; \
if (SI->isInitializationOfDest()) \
*this << "[initialization] "; \
*this << getIDAndType(SI->getDest()); \
}
#define SOMETIMES_LOADABLE_CHECKED_REF_STORAGE(Name, ...) \
NEVER_LOADABLE_CHECKED_REF_STORAGE(Name, "...")
#include "swift/AST/ReferenceStorage.def"
void visitCopyAddrInst(CopyAddrInst *CI) {
if (CI->isTakeOfSrc())
*this << "[take] ";
*this << Ctx.getID(CI->getSrc()) << " to ";
if (CI->isInitializationOfDest())
*this << "[initialization] ";
*this << getIDAndType(CI->getDest());
}
void visitBindMemoryInst(BindMemoryInst *BI) {
*this << getIDAndType(BI->getBase()) << ", ";
*this << getIDAndType(BI->getIndex()) << " to ";
*this << BI->getBoundType();
}
void visitUnconditionalCheckedCastInst(UnconditionalCheckedCastInst *CI) {
*this << getIDAndType(CI->getOperand()) << " to " << CI->getType();
}
void visitCheckedCastBranchInst(CheckedCastBranchInst *CI) {
if (CI->isExact())
*this << "[exact] ";
*this << getIDAndType(CI->getOperand()) << " to " << CI->getCastType()
<< ", " << Ctx.getID(CI->getSuccessBB()) << ", "
<< Ctx.getID(CI->getFailureBB());
if (CI->getTrueBBCount())
*this << " !true_count(" << CI->getTrueBBCount().getValue() << ")";
if (CI->getFalseBBCount())
*this << " !false_count(" << CI->getFalseBBCount().getValue() << ")";
}
void visitCheckedCastValueBranchInst(CheckedCastValueBranchInst *CI) {
*this << getIDAndType(CI->getOperand()) << " to " << CI->getCastType()
<< ", " << Ctx.getID(CI->getSuccessBB()) << ", "
<< Ctx.getID(CI->getFailureBB());
}
void visitUnconditionalCheckedCastAddrInst(UnconditionalCheckedCastAddrInst *CI) {
*this << CI->getSourceType() << " in " << getIDAndType(CI->getSrc())
<< " to " << CI->getTargetType() << " in "
<< getIDAndType(CI->getDest());
}
void visitUnconditionalCheckedCastValueInst(
UnconditionalCheckedCastValueInst *CI) {
*this << getIDAndType(CI->getOperand()) << " to " << CI->getType();
}
void visitCheckedCastAddrBranchInst(CheckedCastAddrBranchInst *CI) {
*this << getCastConsumptionKindName(CI->getConsumptionKind()) << ' '
<< CI->getSourceType() << " in " << getIDAndType(CI->getSrc())
<< " to " << CI->getTargetType() << " in "
<< getIDAndType(CI->getDest()) << ", "
<< Ctx.getID(CI->getSuccessBB()) << ", "
<< Ctx.getID(CI->getFailureBB());
if (CI->getTrueBBCount())
*this << " !true_count(" << CI->getTrueBBCount().getValue() << ")";
if (CI->getFalseBBCount())
*this << " !false_count(" << CI->getFalseBBCount().getValue() << ")";
}
void printUncheckedConversionInst(ConversionInst *CI, SILValue operand) {
*this << getIDAndType(operand) << " to " << CI->getType();
}
void visitUncheckedOwnershipConversionInst(
UncheckedOwnershipConversionInst *UOCI) {
*this << getIDAndType(UOCI->getOperand()) << ", "
<< "@" << UOCI->getOperand().getOwnershipKind() << " to "
<< "@" << UOCI->getConversionOwnershipKind();
}
void visitConvertFunctionInst(ConvertFunctionInst *CI) {
*this << getIDAndType(CI->getOperand()) << " to ";
if (CI->withoutActuallyEscaping())
*this << "[without_actually_escaping] ";
*this << CI->getType();
}
void visitConvertEscapeToNoEscapeInst(ConvertEscapeToNoEscapeInst *CI) {
*this << (CI->isLifetimeGuaranteed() ? "" : "[not_guaranteed] ")
<< (CI->isEscapedByUser() ? "[escaped] " : "")
<< getIDAndType(CI->getOperand()) << " to " << CI->getType();
}
void visitThinFunctionToPointerInst(ThinFunctionToPointerInst *CI) {
printUncheckedConversionInst(CI, CI->getOperand());
}
void visitPointerToThinFunctionInst(PointerToThinFunctionInst *CI) {
printUncheckedConversionInst(CI, CI->getOperand());
}
void visitUpcastInst(UpcastInst *CI) {
printUncheckedConversionInst(CI, CI->getOperand());
}
void visitAddressToPointerInst(AddressToPointerInst *CI) {
printUncheckedConversionInst(CI, CI->getOperand());
}
void visitPointerToAddressInst(PointerToAddressInst *CI) {
*this << getIDAndType(CI->getOperand()) << " to ";
if (CI->isStrict())
*this << "[strict] ";
if (CI->isInvariant())
*this << "[invariant] ";
*this << CI->getType();
}
void visitUncheckedRefCastInst(UncheckedRefCastInst *CI) {
printUncheckedConversionInst(CI, CI->getOperand());
}
void visitUncheckedRefCastAddrInst(UncheckedRefCastAddrInst *CI) {
*this << ' ' << CI->getSourceType() << " in " << getIDAndType(CI->getSrc())
<< " to " << CI->getTargetType() << " in "
<< getIDAndType(CI->getDest());
}
void visitUncheckedAddrCastInst(UncheckedAddrCastInst *CI) {
printUncheckedConversionInst(CI, CI->getOperand());
}
void visitUncheckedTrivialBitCastInst(UncheckedTrivialBitCastInst *CI) {
printUncheckedConversionInst(CI, CI->getOperand());
}
void visitUncheckedBitwiseCastInst(UncheckedBitwiseCastInst *CI) {
printUncheckedConversionInst(CI, CI->getOperand());
}
void visitRefToRawPointerInst(RefToRawPointerInst *CI) {
printUncheckedConversionInst(CI, CI->getOperand());
}
void visitRawPointerToRefInst(RawPointerToRefInst *CI) {
printUncheckedConversionInst(CI, CI->getOperand());
}
#define LOADABLE_REF_STORAGE(Name, ...) \
void visitRefTo##Name##Inst(RefTo##Name##Inst *CI) { \
printUncheckedConversionInst(CI, CI->getOperand()); \
} \
void visit##Name##ToRefInst(Name##ToRefInst *CI) { \
printUncheckedConversionInst(CI, CI->getOperand()); \
}
#include "swift/AST/ReferenceStorage.def"
void visitThinToThickFunctionInst(ThinToThickFunctionInst *CI) {
printUncheckedConversionInst(CI, CI->getOperand());
}
void visitThickToObjCMetatypeInst(ThickToObjCMetatypeInst *CI) {
printUncheckedConversionInst(CI, CI->getOperand());
}
void visitObjCToThickMetatypeInst(ObjCToThickMetatypeInst *CI) {
printUncheckedConversionInst(CI, CI->getOperand());
}
void visitObjCMetatypeToObjectInst(ObjCMetatypeToObjectInst *CI) {
printUncheckedConversionInst(CI, CI->getOperand());
}
void visitObjCExistentialMetatypeToObjectInst(
ObjCExistentialMetatypeToObjectInst *CI) {
printUncheckedConversionInst(CI, CI->getOperand());
}
void visitObjCProtocolInst(ObjCProtocolInst *CI) {
*this << "#" << CI->getProtocol()->getName() << " : " << CI->getType();
}
void visitRefToBridgeObjectInst(RefToBridgeObjectInst *I) {
*this << getIDAndType(I->getConverted()) << ", "
<< getIDAndType(I->getBitsOperand());
}
void visitBridgeObjectToRefInst(BridgeObjectToRefInst *I) {
printUncheckedConversionInst(I, I->getOperand());
}
void visitBridgeObjectToWordInst(BridgeObjectToWordInst *I) {
printUncheckedConversionInst(I, I->getOperand());
}
void visitCopyValueInst(CopyValueInst *I) {
*this << getIDAndType(I->getOperand());
}
#define ALWAYS_OR_SOMETIMES_LOADABLE_CHECKED_REF_STORAGE(Name, ...) \
void visitCopy##Name##ValueInst(Copy##Name##ValueInst *I) { \
*this << getIDAndType(I->getOperand()); \
}
#include "swift/AST/ReferenceStorage.def"
void visitDestroyValueInst(DestroyValueInst *I) {
*this << getIDAndType(I->getOperand());
}
void visitStructInst(StructInst *SI) {
*this << SI->getType() << " (";
interleave(SI->getElements(),
[&](const SILValue &V) { *this << getIDAndType(V); },
[&] { *this << ", "; });
*this << ')';
}
void visitObjectInst(ObjectInst *OI) {
*this << OI->getType() << " (";
interleave(OI->getBaseElements(),
[&](const SILValue &V) { *this << getIDAndType(V); },
[&] { *this << ", "; });
if (!OI->getTailElements().empty()) {
*this << ", [tail_elems] ";
interleave(OI->getTailElements(),
[&](const SILValue &V) { *this << getIDAndType(V); },
[&] { *this << ", "; });
}
*this << ')';
}
void visitTupleInst(TupleInst *TI) {
// Check to see if the type of the tuple can be inferred accurately from the
// elements.
bool SimpleType = true;
for (auto &Elt : TI->getType().castTo<TupleType>()->getElements()) {
if (Elt.hasName() || Elt.isVararg()) {
SimpleType = false;
break;
}
}
// If the type is simple, just print the tuple elements.
if (SimpleType) {
*this << '(';
interleave(TI->getElements(),
[&](const SILValue &V){ *this << getIDAndType(V); },
[&] { *this << ", "; });
*this << ')';
} else {
// Otherwise, print the type, then each value.
*this << TI->getType() << " (";
interleave(TI->getElements(),
[&](const SILValue &V) { *this << Ctx.getID(V); },
[&] { *this << ", "; });
*this << ')';
}
}
void visitEnumInst(EnumInst *UI) {
*this << UI->getType() << ", "
<< SILDeclRef(UI->getElement(), SILDeclRef::Kind::EnumElement);
if (UI->hasOperand()) {
*this << ", " << getIDAndType(UI->getOperand());
}
}
void visitInitEnumDataAddrInst(InitEnumDataAddrInst *UDAI) {
*this << getIDAndType(UDAI->getOperand()) << ", "
<< SILDeclRef(UDAI->getElement(), SILDeclRef::Kind::EnumElement);
}
void visitUncheckedEnumDataInst(UncheckedEnumDataInst *UDAI) {
*this << getIDAndType(UDAI->getOperand()) << ", "
<< SILDeclRef(UDAI->getElement(), SILDeclRef::Kind::EnumElement);
}
void visitUncheckedTakeEnumDataAddrInst(UncheckedTakeEnumDataAddrInst *UDAI) {
*this << getIDAndType(UDAI->getOperand()) << ", "
<< SILDeclRef(UDAI->getElement(), SILDeclRef::Kind::EnumElement);
}
void visitInjectEnumAddrInst(InjectEnumAddrInst *IUAI) {
*this << getIDAndType(IUAI->getOperand()) << ", "
<< SILDeclRef(IUAI->getElement(), SILDeclRef::Kind::EnumElement);
}
void visitTupleExtractInst(TupleExtractInst *EI) {
*this << getIDAndType(EI->getOperand()) << ", " << EI->getFieldNo();
}
void visitTupleElementAddrInst(TupleElementAddrInst *EI) {
*this << getIDAndType(EI->getOperand()) << ", " << EI->getFieldNo();
}
void visitStructExtractInst(StructExtractInst *EI) {
*this << getIDAndType(EI->getOperand()) << ", #";
printFullContext(EI->getField()->getDeclContext(), PrintState.OS);
*this << EI->getField()->getName().get();
}
void visitStructElementAddrInst(StructElementAddrInst *EI) {
*this << getIDAndType(EI->getOperand()) << ", #";
printFullContext(EI->getField()->getDeclContext(), PrintState.OS);
*this << EI->getField()->getName().get();
}
void visitRefElementAddrInst(RefElementAddrInst *EI) {
*this << getIDAndType(EI->getOperand()) << ", #";
printFullContext(EI->getField()->getDeclContext(), PrintState.OS);
*this << EI->getField()->getName().get();
}
void visitRefTailAddrInst(RefTailAddrInst *RTAI) {
*this << getIDAndType(RTAI->getOperand()) << ", " << RTAI->getTailType();
}
void visitDestructureStructInst(DestructureStructInst *DSI) {
*this << getIDAndType(DSI->getOperand());
}
void visitDestructureTupleInst(DestructureTupleInst *DTI) {
*this << getIDAndType(DTI->getOperand());
}
void printMethodInst(MethodInst *I, SILValue Operand) {
*this << getIDAndType(Operand) << ", " << I->getMember();
}
void visitClassMethodInst(ClassMethodInst *AMI) {
printMethodInst(AMI, AMI->getOperand());
*this << " : " << AMI->getMember().getDecl()->getInterfaceType();
*this << ", ";
*this << AMI->getType();
}
void visitSuperMethodInst(SuperMethodInst *AMI) {
printMethodInst(AMI, AMI->getOperand());
*this << " : " << AMI->getMember().getDecl()->getInterfaceType();
*this << ", ";
*this << AMI->getType();
}
void visitObjCMethodInst(ObjCMethodInst *AMI) {
printMethodInst(AMI, AMI->getOperand());
*this << " : " << AMI->getMember().getDecl()->getInterfaceType();
*this << ", ";
*this << AMI->getType();
}
void visitObjCSuperMethodInst(ObjCSuperMethodInst *AMI) {
printMethodInst(AMI, AMI->getOperand());
*this << " : " << AMI->getMember().getDecl()->getInterfaceType();
*this << ", ";
*this << AMI->getType();
}
void visitWitnessMethodInst(WitnessMethodInst *WMI) {
PrintOptions QualifiedSILTypeOptions =
PrintOptions::printQualifiedSILType();
QualifiedSILTypeOptions.CurrentModule = WMI->getModule().getSwiftModule();
*this << "$" << WMI->getLookupType() << ", " << WMI->getMember() << " : ";
WMI->getMember().getDecl()->getInterfaceType().print(
PrintState.OS, QualifiedSILTypeOptions);
if (!WMI->getTypeDependentOperands().empty()) {
*this << ", ";
*this << getIDAndType(WMI->getTypeDependentOperands()[0].get());
}
*this << " : " << WMI->getType();
}
void visitOpenExistentialAddrInst(OpenExistentialAddrInst *OI) {
if (OI->getAccessKind() == OpenedExistentialAccess::Immutable)
*this << "immutable_access ";
else
*this << "mutable_access ";
*this << getIDAndType(OI->getOperand()) << " to " << OI->getType();
}
void visitOpenExistentialRefInst(OpenExistentialRefInst *OI) {
*this << getIDAndType(OI->getOperand()) << " to " << OI->getType();
}
void visitOpenExistentialMetatypeInst(OpenExistentialMetatypeInst *OI) {
*this << getIDAndType(OI->getOperand()) << " to " << OI->getType();
}
void visitOpenExistentialBoxInst(OpenExistentialBoxInst *OI) {
*this << getIDAndType(OI->getOperand()) << " to " << OI->getType();
}
void visitOpenExistentialBoxValueInst(OpenExistentialBoxValueInst *OI) {
*this << getIDAndType(OI->getOperand()) << " to " << OI->getType();
}
void visitOpenExistentialValueInst(OpenExistentialValueInst *OI) {
*this << getIDAndType(OI->getOperand()) << " to " << OI->getType();
}
void visitInitExistentialAddrInst(InitExistentialAddrInst *AEI) {
*this << getIDAndType(AEI->getOperand()) << ", $"
<< AEI->getFormalConcreteType();
}
void visitInitExistentialValueInst(InitExistentialValueInst *AEI) {
*this << getIDAndType(AEI->getOperand()) << ", $"
<< AEI->getFormalConcreteType() << ", " << AEI->getType();
}
void visitInitExistentialRefInst(InitExistentialRefInst *AEI) {
*this << getIDAndType(AEI->getOperand()) << " : $"
<< AEI->getFormalConcreteType() << ", " << AEI->getType();
}
void visitInitExistentialMetatypeInst(InitExistentialMetatypeInst *AEI) {
*this << getIDAndType(AEI->getOperand()) << ", " << AEI->getType();
}
void visitAllocExistentialBoxInst(AllocExistentialBoxInst *AEBI) {
*this << AEBI->getExistentialType() << ", $"
<< AEBI->getFormalConcreteType();
}
void visitDeinitExistentialAddrInst(DeinitExistentialAddrInst *DEI) {
*this << getIDAndType(DEI->getOperand());
}
void visitDeinitExistentialValueInst(DeinitExistentialValueInst *DEI) {
*this << getIDAndType(DEI->getOperand());
}
void visitDeallocExistentialBoxInst(DeallocExistentialBoxInst *DEI) {
*this << getIDAndType(DEI->getOperand()) << ", $" << DEI->getConcreteType();
}
void visitProjectBlockStorageInst(ProjectBlockStorageInst *PBSI) {
*this << getIDAndType(PBSI->getOperand());
}
void visitInitBlockStorageHeaderInst(InitBlockStorageHeaderInst *IBSHI) {
*this << getIDAndType(IBSHI->getBlockStorage()) << ", invoke "
<< Ctx.getID(IBSHI->getInvokeFunction());
printSubstitutions(IBSHI->getSubstitutions());
*this << " : " << IBSHI->getInvokeFunction()->getType()
<< ", type " << IBSHI->getType();
}
void visitValueMetatypeInst(ValueMetatypeInst *MI) {
*this << MI->getType() << ", " << getIDAndType(MI->getOperand());
}
void visitExistentialMetatypeInst(ExistentialMetatypeInst *MI) {
*this << MI->getType() << ", " << getIDAndType(MI->getOperand());
}
void visitMetatypeInst(MetatypeInst *MI) { *this << MI->getType(); }
void visitFixLifetimeInst(FixLifetimeInst *RI) {
*this << getIDAndType(RI->getOperand());
}
void visitEndLifetimeInst(EndLifetimeInst *ELI) {
*this << getIDAndType(ELI->getOperand());
}
void visitValueToBridgeObjectInst(ValueToBridgeObjectInst *VBOI) {
*this << getIDAndType(VBOI->getOperand());
}
void visitClassifyBridgeObjectInst(ClassifyBridgeObjectInst *CBOI) {
*this << getIDAndType(CBOI->getOperand());
}
void visitMarkDependenceInst(MarkDependenceInst *MDI) {
*this << getIDAndType(MDI->getValue()) << " on "
<< getIDAndType(MDI->getBase());
}
void visitCopyBlockInst(CopyBlockInst *RI) {
*this << getIDAndType(RI->getOperand());
}
void visitCopyBlockWithoutEscapingInst(CopyBlockWithoutEscapingInst *RI) {
*this << getIDAndType(RI->getBlock()) << " withoutEscaping "
<< getIDAndType(RI->getClosure());
}
void visitRefCountingInst(RefCountingInst *I) {
if (I->isNonAtomic())
*this << "[nonatomic] ";
*this << getIDAndType(I->getOperand(0));
}
void visitIsUniqueInst(IsUniqueInst *CUI) {
*this << getIDAndType(CUI->getOperand());
}
void visitIsEscapingClosureInst(IsEscapingClosureInst *CUI) {
if (CUI->getVerificationType())
*this << "[objc] ";
*this << getIDAndType(CUI->getOperand());
}
void visitDeallocStackInst(DeallocStackInst *DI) {
*this << getIDAndType(DI->getOperand());
}
void visitDeallocRefInst(DeallocRefInst *DI) {
if (DI->canAllocOnStack())
*this << "[stack] ";
*this << getIDAndType(DI->getOperand());
}
void visitDeallocPartialRefInst(DeallocPartialRefInst *DPI) {
*this << getIDAndType(DPI->getInstance());
*this << ", ";
*this << getIDAndType(DPI->getMetatype());
}
void visitDeallocValueBufferInst(DeallocValueBufferInst *DVBI) {
*this << DVBI->getValueType() << " in " << getIDAndType(DVBI->getOperand());
}
void visitDeallocBoxInst(DeallocBoxInst *DI) {
*this << getIDAndType(DI->getOperand());
}
void visitDestroyAddrInst(DestroyAddrInst *DI) {
*this << getIDAndType(DI->getOperand());
}
void visitProjectValueBufferInst(ProjectValueBufferInst *PVBI) {
*this << PVBI->getValueType() << " in " << getIDAndType(PVBI->getOperand());
}
void visitProjectBoxInst(ProjectBoxInst *PBI) {
*this << getIDAndType(PBI->getOperand()) << ", " << PBI->getFieldIndex();
}
void visitProjectExistentialBoxInst(ProjectExistentialBoxInst *PEBI) {
*this << PEBI->getType().getObjectType()
<< " in " << getIDAndType(PEBI->getOperand());
}
void visitBeginAccessInst(BeginAccessInst *BAI) {
*this << '[' << getSILAccessKindName(BAI->getAccessKind()) << "] ["
<< getSILAccessEnforcementName(BAI->getEnforcement()) << "] "
<< (BAI->hasNoNestedConflict() ? "[no_nested_conflict] " : "")
<< (BAI->isFromBuiltin() ? "[builtin] " : "")
<< getIDAndType(BAI->getOperand());
}
void visitEndAccessInst(EndAccessInst *EAI) {
*this << (EAI->isAborting() ? "[abort] " : "")
<< getIDAndType(EAI->getOperand());
}
void visitBeginUnpairedAccessInst(BeginUnpairedAccessInst *BAI) {
*this << '[' << getSILAccessKindName(BAI->getAccessKind()) << "] ["
<< getSILAccessEnforcementName(BAI->getEnforcement()) << "] "
<< (BAI->hasNoNestedConflict() ? "[no_nested_conflict] " : "")
<< (BAI->isFromBuiltin() ? "[builtin] " : "")
<< getIDAndType(BAI->getSource()) << ", "
<< getIDAndType(BAI->getBuffer());
}
void visitEndUnpairedAccessInst(EndUnpairedAccessInst *EAI) {
*this << (EAI->isAborting() ? "[abort] " : "") << '['
<< getSILAccessEnforcementName(EAI->getEnforcement()) << "] "
<< (EAI->isFromBuiltin() ? "[builtin] " : "")
<< getIDAndType(EAI->getOperand());
}
void visitCondFailInst(CondFailInst *FI) {
*this << getIDAndType(FI->getOperand());
}
void visitIndexAddrInst(IndexAddrInst *IAI) {
*this << getIDAndType(IAI->getBase()) << ", "
<< getIDAndType(IAI->getIndex());
}
void visitTailAddrInst(TailAddrInst *TAI) {
*this << getIDAndType(TAI->getBase()) << ", "
<< getIDAndType(TAI->getIndex()) << ", " << TAI->getTailType();
}
void visitIndexRawPointerInst(IndexRawPointerInst *IAI) {
*this << getIDAndType(IAI->getBase()) << ", "
<< getIDAndType(IAI->getIndex());
}
void visitUnreachableInst(UnreachableInst *UI) {}
void visitReturnInst(ReturnInst *RI) {
*this << getIDAndType(RI->getOperand());
}
void visitThrowInst(ThrowInst *TI) {
*this << getIDAndType(TI->getOperand());
}
void visitUnwindInst(UnwindInst *UI) {
// no operands
}
void visitYieldInst(YieldInst *YI) {
auto values = YI->getYieldedValues();
if (values.size() != 1) *this << '(';
interleave(values,
[&](SILValue value) { *this << getIDAndType(value); },
[&] { *this << ", "; });
if (values.size() != 1) *this << ')';
*this << ", resume " << Ctx.getID(YI->getResumeBB())
<< ", unwind " << Ctx.getID(YI->getUnwindBB());
}
void visitSwitchValueInst(SwitchValueInst *SII) {
*this << getIDAndType(SII->getOperand());
for (unsigned i = 0, e = SII->getNumCases(); i < e; ++i) {
SILValue value;
SILBasicBlock *dest;
std::tie(value, dest) = SII->getCase(i);
*this << ", case " << Ctx.getID(value) << ": " << Ctx.getID(dest);
}
if (SII->hasDefault())
*this << ", default " << Ctx.getID(SII->getDefaultBB());
}
void printSwitchEnumInst(SwitchEnumInstBase *SOI) {
*this << getIDAndType(SOI->getOperand());
for (unsigned i = 0, e = SOI->getNumCases(); i < e; ++i) {
EnumElementDecl *elt;
SILBasicBlock *dest;
std::tie(elt, dest) = SOI->getCase(i);
*this << ", case " << SILDeclRef(elt, SILDeclRef::Kind::EnumElement)
<< ": " << Ctx.getID(dest);
if (SOI->getCaseCount(i)) {
*this << " !case_count(" << SOI->getCaseCount(i).getValue() << ")";
}
}
if (SOI->hasDefault()) {
*this << ", default " << Ctx.getID(SOI->getDefaultBB());
if (SOI->getDefaultCount()) {
*this << " !default_count(" << SOI->getDefaultCount().getValue() << ")";
}
}
}
void visitSwitchEnumInst(SwitchEnumInst *SOI) {
printSwitchEnumInst(SOI);
}
void visitSwitchEnumAddrInst(SwitchEnumAddrInst *SOI) {
printSwitchEnumInst(SOI);
}
void printSelectEnumInst(SelectEnumInstBase *SEI) {
*this << getIDAndType(SEI->getEnumOperand());
for (unsigned i = 0, e = SEI->getNumCases(); i < e; ++i) {
EnumElementDecl *elt;
SILValue result;
std::tie(elt, result) = SEI->getCase(i);
*this << ", case " << SILDeclRef(elt, SILDeclRef::Kind::EnumElement)
<< ": " << Ctx.getID(result);
}
if (SEI->hasDefault())
*this << ", default " << Ctx.getID(SEI->getDefaultResult());
*this << " : " << SEI->getType();
}
void visitSelectEnumInst(SelectEnumInst *SEI) {
printSelectEnumInst(SEI);
}
void visitSelectEnumAddrInst(SelectEnumAddrInst *SEI) {
printSelectEnumInst(SEI);
}
void visitSelectValueInst(SelectValueInst *SVI) {
*this << getIDAndType(SVI->getOperand());
for (unsigned i = 0, e = SVI->getNumCases(); i < e; ++i) {
SILValue casevalue;
SILValue result;
std::tie(casevalue, result) = SVI->getCase(i);
*this << ", case " << Ctx.getID(casevalue) << ": " << Ctx.getID(result);
}
if (SVI->hasDefault())
*this << ", default " << Ctx.getID(SVI->getDefaultResult());
*this << " : " << SVI->getType();
}
void visitDynamicMethodBranchInst(DynamicMethodBranchInst *DMBI) {
*this << getIDAndType(DMBI->getOperand()) << ", " << DMBI->getMember()
<< ", " << Ctx.getID(DMBI->getHasMethodBB()) << ", "
<< Ctx.getID(DMBI->getNoMethodBB());
}
void printBranchArgs(OperandValueArrayRef args) {
if (args.empty()) return;
*this << '(';
interleave(args,
[&](SILValue v) { *this << getIDAndType(v); },
[&] { *this << ", "; });
*this << ')';
}
void visitBranchInst(BranchInst *UBI) {
*this << Ctx.getID(UBI->getDestBB());
printBranchArgs(UBI->getArgs());
}
void visitCondBranchInst(CondBranchInst *CBI) {
*this << Ctx.getID(CBI->getCondition()) << ", "
<< Ctx.getID(CBI->getTrueBB());
printBranchArgs(CBI->getTrueArgs());
*this << ", " << Ctx.getID(CBI->getFalseBB());
printBranchArgs(CBI->getFalseArgs());
if (CBI->getTrueBBCount())
*this << " !true_count(" << CBI->getTrueBBCount().getValue() << ")";
if (CBI->getFalseBBCount())
*this << " !false_count(" << CBI->getFalseBBCount().getValue() << ")";
}
void visitKeyPathInst(KeyPathInst *KPI) {
*this << KPI->getType() << ", ";
auto pattern = KPI->getPattern();
if (pattern->getGenericSignature()) {
pattern->getGenericSignature()->print(PrintState.OS);
*this << ' ';
}
*this << "(";
if (!pattern->getObjCString().empty())
*this << "objc \"" << pattern->getObjCString() << "\"; ";
*this << "root $" << KPI->getPattern()->getRootType();
for (auto &component : pattern->getComponents()) {
*this << "; ";
printKeyPathPatternComponent(component);
}
*this << ')';
if (!KPI->getSubstitutions().empty()) {
*this << ' ';
printSubstitutions(KPI->getSubstitutions());
}
if (!KPI->getAllOperands().empty()) {
*this << " (";
interleave(KPI->getAllOperands(),
[&](const Operand &operand) {
*this << Ctx.getID(operand.get());
}, [&]{
*this << ", ";
});
*this << ")";
}
}
void
printKeyPathPatternComponent(const KeyPathPatternComponent &component) {
auto printComponentIndices =
[&](ArrayRef<KeyPathPatternComponent::Index> indices) {
*this << '[';
interleave(indices,
[&](const KeyPathPatternComponent::Index &i) {
*this << "%$" << i.Operand << " : $"
<< i.FormalType << " : "
<< i.LoweredType;
}, [&]{
*this << ", ";
});
*this << ']';
};
switch (auto kind = component.getKind()) {
case KeyPathPatternComponent::Kind::StoredProperty: {
auto prop = component.getStoredPropertyDecl();
*this << "stored_property #";
printValueDecl(prop, PrintState.OS);
*this << " : $" << component.getComponentType();
break;
}
case KeyPathPatternComponent::Kind::GettableProperty:
case KeyPathPatternComponent::Kind::SettableProperty: {
*this << (kind == KeyPathPatternComponent::Kind::GettableProperty
? "gettable_property $" : "settable_property $")
<< component.getComponentType() << ", "
<< " id ";
auto id = component.getComputedPropertyId();
switch (id.getKind()) {
case KeyPathPatternComponent::ComputedPropertyId::DeclRef: {
auto declRef = id.getDeclRef();
*this << declRef << " : "
<< declRef.getDecl()->getInterfaceType();
break;
}
case KeyPathPatternComponent::ComputedPropertyId::Function: {
id.getFunction()->printName(PrintState.OS);
*this << " : " << id.getFunction()->getLoweredType();
break;
}
case KeyPathPatternComponent::ComputedPropertyId::Property: {
*this << "##";
printValueDecl(id.getProperty(), PrintState.OS);
break;
}
}
*this << ", getter ";
component.getComputedPropertyGetter()->printName(PrintState.OS);
*this << " : "
<< component.getComputedPropertyGetter()->getLoweredType();
if (kind == KeyPathPatternComponent::Kind::SettableProperty) {
*this << ", setter ";
component.getComputedPropertySetter()->printName(PrintState.OS);
*this << " : "
<< component.getComputedPropertySetter()->getLoweredType();
}
if (!component.getSubscriptIndices().empty()) {
*this << ", indices ";
printComponentIndices(component.getSubscriptIndices());
*this << ", indices_equals ";
component.getSubscriptIndexEquals()->printName(PrintState.OS);
*this << " : "
<< component.getSubscriptIndexEquals()->getLoweredType();
*this << ", indices_hash ";
component.getSubscriptIndexHash()->printName(PrintState.OS);
*this << " : "
<< component.getSubscriptIndexHash()->getLoweredType();
}
if (auto external = component.getExternalDecl()) {
*this << ", external #";
printValueDecl(external, PrintState.OS);
auto subs = component.getExternalSubstitutions();
if (!subs.empty()) {
printSubstitutions(subs);
}
}
break;
}
case KeyPathPatternComponent::Kind::OptionalWrap:
case KeyPathPatternComponent::Kind::OptionalChain:
case KeyPathPatternComponent::Kind::OptionalForce: {
switch (kind) {
case KeyPathPatternComponent::Kind::OptionalWrap:
*this << "optional_wrap : $";
break;
case KeyPathPatternComponent::Kind::OptionalChain:
*this << "optional_chain : $";
break;
case KeyPathPatternComponent::Kind::OptionalForce:
*this << "optional_force : $";
break;
default:
llvm_unreachable("out of sync");
}
*this << component.getComponentType();
break;
}
}
}
};
} // end anonymous namespace
static void printBlockID(raw_ostream &OS, SILBasicBlock *bb) {
SILPrintContext Ctx(OS);
OS << Ctx.getID(bb);
}
void SILBasicBlock::printAsOperand(raw_ostream &OS, bool PrintType) {
printBlockID(OS, this);
}
//===----------------------------------------------------------------------===//
// Printing for SILInstruction, SILBasicBlock, SILFunction, and SILModule
//===----------------------------------------------------------------------===//
void SILNode::dump() const {
print(llvm::errs());
}
void SILNode::print(raw_ostream &OS) const {
SILPrintContext Ctx(OS);
SILPrinter(Ctx).print(this);
}
void SILInstruction::dump() const {
print(llvm::errs());
}
void SingleValueInstruction::dump() const {
SILInstruction::dump();
}
void SILInstruction::print(raw_ostream &OS) const {
SILPrintContext Ctx(OS);
SILPrinter(Ctx).print(this);
}
/// Pretty-print the SILBasicBlock to errs.
void SILBasicBlock::dump() const {
print(llvm::errs());
}
/// Pretty-print the SILBasicBlock to the designated stream.
void SILBasicBlock::print(raw_ostream &OS) const {
SILPrintContext Ctx(OS);
// Print the debug scope (and compute if we didn't do it already).
auto &SM = this->getParent()->getModule().getASTContext().SourceMgr;
for (auto &I : *this) {
SILPrinter P(Ctx);
P.printDebugScope(I.getDebugScope(), SM);
}
SILPrinter(Ctx).print(this);
}
void SILBasicBlock::print(raw_ostream &OS, SILPrintContext &Ctx) const {
SILPrinter(Ctx).print(this);
}
/// Pretty-print the SILFunction to errs.
void SILFunction::dump(bool Verbose) const {
SILPrintContext Ctx(llvm::errs(), Verbose);
print(Ctx);
}
// This is out of line so the debugger can find it.
void SILFunction::dump() const {
dump(false);
}
void SILFunction::dump(const char *FileName) const {
std::error_code EC;
llvm::raw_fd_ostream os(FileName, EC, llvm::sys::fs::OpenFlags::F_None);
print(os);
}
static StringRef getLinkageString(SILLinkage linkage) {
switch (linkage) {
case SILLinkage::Public: return "public ";
case SILLinkage::PublicNonABI: return "non_abi ";
case SILLinkage::Hidden: return "hidden ";
case SILLinkage::Shared: return "shared ";
case SILLinkage::Private: return "private ";
case SILLinkage::PublicExternal: return "public_external ";
case SILLinkage::HiddenExternal: return "hidden_external ";
case SILLinkage::SharedExternal: return "shared_external ";
case SILLinkage::PrivateExternal: return "private_external ";
}
llvm_unreachable("bad linkage");
}
static void printLinkage(llvm::raw_ostream &OS, SILLinkage linkage,
bool isDefinition) {
if ((isDefinition && linkage == SILLinkage::DefaultForDefinition) ||
(!isDefinition && linkage == SILLinkage::DefaultForDeclaration))
return;
OS << getLinkageString(linkage);
}
/// Pretty-print the SILFunction to the designated stream.
void SILFunction::print(SILPrintContext &PrintCtx) const {
llvm::raw_ostream &OS = PrintCtx.OS();
if (PrintCtx.printDebugInfo()) {
auto &SM = getModule().getASTContext().SourceMgr;
for (auto &BB : *this)
for (auto &I : BB) {
SILPrinter P(PrintCtx);
P.printDebugScope(I.getDebugScope(), SM);
}
OS << "\n";
}
if (SILPrintGenericSpecializationInfo) {
if (isSpecialization()) {
printGenericSpecializationInfo(OS, "function", getName(),
getSpecializationInfo());
}
}
OS << "// " << demangleSymbol(getName()) << '\n';
OS << "sil ";
printLinkage(OS, getLinkage(), isDefinition());
if (isTransparent())
OS << "[transparent] ";
switch (isSerialized()) {
case IsNotSerialized: break;
case IsSerializable: OS << "[serializable] "; break;
case IsSerialized: OS << "[serialized] "; break;
}
switch (isThunk()) {
case IsNotThunk: break;
case IsThunk: OS << "[thunk] "; break;
case IsSignatureOptimizedThunk:
OS << "[signature_optimized_thunk] ";
break;
case IsReabstractionThunk: OS << "[reabstraction_thunk] "; break;
}
if (isDynamicallyReplaceable()) {
OS << "[dynamically_replacable] ";
}
if (isWithoutActuallyEscapingThunk())
OS << "[without_actually_escaping] ";
if (isGlobalInit())
OS << "[global_init] ";
if (isWeakLinked())
OS << "[_weakLinked] ";
switch (getInlineStrategy()) {
case NoInline: OS << "[noinline] "; break;
case AlwaysInline: OS << "[always_inline] "; break;
case InlineDefault: break;
}
switch (getOptimizationMode()) {
case OptimizationMode::NoOptimization: OS << "[Onone] "; break;
case OptimizationMode::ForSpeed: OS << "[Ospeed] "; break;
case OptimizationMode::ForSize: OS << "[Osize] "; break;
default: break;
}
if (getEffectsKind() == EffectsKind::ReadOnly)
OS << "[readonly] ";
else if (getEffectsKind() == EffectsKind::ReadNone)
OS << "[readnone] ";
else if (getEffectsKind() == EffectsKind::ReadWrite)
OS << "[readwrite] ";
else if (getEffectsKind() == EffectsKind::ReleaseNone)
OS << "[releasenone] ";
if (auto *replacedFun = getDynamicallyReplacedFunction()) {
OS << "[dynamic_replacement_for \"";
OS << replacedFun->getName();
OS << "\"] ";
}
if (hasObjCReplacement()) {
OS << "[objc_replacement_for \"";
OS << getObjCReplacement().str();
OS << "\"] ";
}
for (auto &Attr : getSemanticsAttrs())
OS << "[_semantics \"" << Attr << "\"] ";
for (auto *Attr : getSpecializeAttrs()) {
OS << "[_specialize "; Attr->print(OS); OS << "] ";
}
// TODO: Handle clang node owners which don't have a name.
if (hasClangNode() && getClangNodeOwner()->hasName()) {
OS << "[clang ";
printValueDecl(getClangNodeOwner(), OS);
OS << "] ";
}
// Handle functions that are deserialized from canonical SIL. Normally, we
// should emit SIL with the correct SIL stage, so preserving this attribute
// won't be necessary. But consider serializing raw SIL (either textual SIL or
// SIB) after importing canonical SIL from another module. If the imported
// functions are reserialized (e.g. shared linkage), then we must preserve
// this attribute.
if (WasDeserializedCanonical && getModule().getStage() == SILStage::Raw)
OS << "[canonical] ";
printName(OS);
OS << " : $";
// Print the type by substituting our context parameter names for the dependent
// parameters. In SIL, we may end up with multiple generic parameters that
// have the same name from different contexts, for instance, a generic
// protocol requirement with a generic method parameter <T>, which is
// witnessed by a generic type that has a generic type parameter also named
// <T>, so we may need to introduce disambiguating aliases.
llvm::DenseMap<CanType, Identifier> Aliases;
llvm::DenseSet<Identifier> UsedNames;
auto sig = getLoweredFunctionType()->getGenericSignature();
auto *env = getGenericEnvironment();
if (sig && env) {
llvm::SmallString<16> disambiguatedNameBuf;
unsigned disambiguatedNameCounter = 1;
for (auto *paramTy : sig->getGenericParams()) {
auto sugaredTy = env->getSugaredType(paramTy);
Identifier name = sugaredTy->getName();
while (!UsedNames.insert(name).second) {
disambiguatedNameBuf.clear();
{
llvm::raw_svector_ostream names(disambiguatedNameBuf);
names << sugaredTy->getName() << disambiguatedNameCounter++;
}
name = getASTContext().getIdentifier(disambiguatedNameBuf);
}
if (name != sugaredTy->getName()) {
Aliases[paramTy->getCanonicalType()] = name;
// Also for the archetype
auto archetypeTy = env->mapTypeIntoContext(paramTy)
->getAs<ArchetypeType>();
if (archetypeTy)
Aliases[archetypeTy->getCanonicalType()] = name;
}
}
}
{
PrintOptions withGenericEnvironment = PrintOptions::printSIL();
withGenericEnvironment.GenericEnv = env;
withGenericEnvironment.AlternativeTypeNames =
Aliases.empty() ? nullptr : &Aliases;
LoweredType->print(OS, withGenericEnvironment);
}
if (!isExternalDeclaration()) {
if (auto eCount = getEntryCount()) {
OS << " !function_entry_count(" << eCount.getValue() << ")";
}
OS << " {\n";
SILPrinter(PrintCtx, (Aliases.empty() ? nullptr : &Aliases))
.print(this);
OS << "} // end sil function '" << getName() << '\'';
}
OS << "\n\n";
}
/// Pretty-print the SILFunction's name using SIL syntax,
/// '@function_mangled_name'.
void SILFunction::printName(raw_ostream &OS) const {
OS << "@" << Name;
}
/// Pretty-print a global variable to the designated stream.
void SILGlobalVariable::print(llvm::raw_ostream &OS, bool Verbose) const {
OS << "// " << demangleSymbol(getName()) << '\n';
OS << "sil_global ";
printLinkage(OS, getLinkage(), isDefinition());
if (isSerialized())
OS << "[serialized] ";
if (isLet())
OS << "[let] ";
printName(OS);
OS << " : " << LoweredType;
if (!StaticInitializerBlock.empty()) {
OS << " = {\n";
{
SILPrintContext Ctx(OS);
SILPrinter Printer(Ctx);
for (const SILInstruction &I : StaticInitializerBlock) {
Printer.print(&I);
}
}
OS << "}\n";
}
OS << "\n\n";
}
void SILGlobalVariable::dump(bool Verbose) const {
print(llvm::errs(), Verbose);
}
void SILGlobalVariable::dump() const {
dump(false);
}
void SILGlobalVariable::printName(raw_ostream &OS) const {
OS << "@" << Name;
}
/// Pretty-print the SILModule to errs.
void SILModule::dump(bool Verbose) const {
SILPrintContext Ctx(llvm::errs(), Verbose);
print(Ctx);
}
void SILModule::dump(const char *FileName, bool Verbose,
bool PrintASTDecls) const {
std::error_code EC;
llvm::raw_fd_ostream os(FileName, EC, llvm::sys::fs::OpenFlags::F_None);
SILPrintContext Ctx(os, Verbose);
print(Ctx, getSwiftModule(), PrintASTDecls);
}
static void printSILGlobals(SILPrintContext &Ctx,
const SILModule::GlobalListType &Globals) {
if (!Ctx.sortSIL()) {
for (const SILGlobalVariable &g : Globals)
g.print(Ctx.OS(), Ctx.printVerbose());
return;
}
std::vector<const SILGlobalVariable *> globals;
globals.reserve(Globals.size());
for (const SILGlobalVariable &g : Globals)
globals.push_back(&g);
std::sort(globals.begin(), globals.end(),
[] (const SILGlobalVariable *g1, const SILGlobalVariable *g2) -> bool {
return g1->getName().compare(g2->getName()) == -1;
}
);
for (const SILGlobalVariable *g : globals)
g->print(Ctx.OS(), Ctx.printVerbose());
}
static void printSILFunctions(SILPrintContext &Ctx,
const SILModule::FunctionListType &Functions) {
if (!Ctx.sortSIL()) {
for (const SILFunction &f : Functions)
f.print(Ctx);
return;
}
std::vector<const SILFunction *> functions;
functions.reserve(Functions.size());
for (const SILFunction &f : Functions)
functions.push_back(&f);
std::sort(functions.begin(), functions.end(),
[] (const SILFunction *f1, const SILFunction *f2) -> bool {
return f1->getName().compare(f2->getName()) == -1;
}
);
for (const SILFunction *f : functions)
f->print(Ctx);
}
static void printSILVTables(SILPrintContext &Ctx,
const SILModule::VTableListType &VTables) {
if (!Ctx.sortSIL()) {
for (const SILVTable &vt : VTables)
vt.print(Ctx.OS(), Ctx.printVerbose());
return;
}
std::vector<const SILVTable *> vtables;
vtables.reserve(VTables.size());
for (const SILVTable &vt : VTables)
vtables.push_back(&vt);
std::sort(vtables.begin(), vtables.end(),
[] (const SILVTable *v1, const SILVTable *v2) -> bool {
StringRef Name1 = v1->getClass()->getName().str();
StringRef Name2 = v2->getClass()->getName().str();
return Name1.compare(Name2) == -1;
}
);
for (const SILVTable *vt : vtables)
vt->print(Ctx.OS(), Ctx.printVerbose());
}
static void
printSILWitnessTables(SILPrintContext &Ctx,
const SILModule::WitnessTableListType &WTables) {
if (!Ctx.sortSIL()) {
for (const SILWitnessTable &wt : WTables)
wt.print(Ctx.OS(), Ctx.printVerbose());
return;
}
std::vector<const SILWitnessTable *> witnesstables;
witnesstables.reserve(WTables.size());
for (const SILWitnessTable &wt : WTables)
witnesstables.push_back(&wt);
std::sort(witnesstables.begin(), witnesstables.end(),
[] (const SILWitnessTable *w1, const SILWitnessTable *w2) -> bool {
return w1->getName().compare(w2->getName()) == -1;
}
);
for (const SILWitnessTable *wt : witnesstables)
wt->print(Ctx.OS(), Ctx.printVerbose());
}
static void
printSILDefaultWitnessTables(SILPrintContext &Ctx,
const SILModule::DefaultWitnessTableListType &WTables) {
if (!Ctx.sortSIL()) {
for (const SILDefaultWitnessTable &wt : WTables)
wt.print(Ctx.OS(), Ctx.printVerbose());
return;
}
std::vector<const SILDefaultWitnessTable *> witnesstables;
witnesstables.reserve(WTables.size());
for (const SILDefaultWitnessTable &wt : WTables)
witnesstables.push_back(&wt);
std::sort(witnesstables.begin(), witnesstables.end(),
[] (const SILDefaultWitnessTable *w1,
const SILDefaultWitnessTable *w2) -> bool {
return w1->getProtocol()->getName()
.compare(w2->getProtocol()->getName()) == -1;
}
);
for (const SILDefaultWitnessTable *wt : witnesstables)
wt->print(Ctx.OS(), Ctx.printVerbose());
}
static void
printSILCoverageMaps(SILPrintContext &Ctx,
const SILModule::CoverageMapCollectionType &CoverageMaps) {
if (!Ctx.sortSIL()) {
for (const auto &M : CoverageMaps)
M.second->print(Ctx);
return;
}
std::vector<const SILCoverageMap *> Maps;
Maps.reserve(CoverageMaps.size());
for (const auto &M : CoverageMaps)
Maps.push_back(M.second);
std::sort(Maps.begin(), Maps.end(),
[](const SILCoverageMap *LHS, const SILCoverageMap *RHS) -> bool {
return LHS->getName().compare(RHS->getName()) == -1;
});
for (const SILCoverageMap *M : Maps)
M->print(Ctx);
}
void SILProperty::print(SILPrintContext &Ctx) const {
PrintOptions Options = PrintOptions::printSIL();
auto &OS = Ctx.OS();
OS << "sil_property ";
if (isSerialized())
OS << "[serialized] ";
OS << '#';
printValueDecl(getDecl(), OS);
if (auto sig = getDecl()->getInnermostDeclContext()
->getGenericSignatureOfContext()) {
sig->getCanonicalSignature()->print(OS, Options);
}
OS << " (";
if (auto component = getComponent())
SILPrinter(Ctx).printKeyPathPatternComponent(*component);
OS << ")\n";
}
void SILProperty::dump() const {
SILPrintContext context(llvm::errs());
print(context);
}
static void printSILProperties(SILPrintContext &Ctx,
const SILModule::PropertyListType &Properties) {
for (const SILProperty &P : Properties) {
P.print(Ctx);
}
}
/// Pretty-print the SILModule to the designated stream.
void SILModule::print(SILPrintContext &PrintCtx, ModuleDecl *M,
bool PrintASTDecls) const {
llvm::raw_ostream &OS = PrintCtx.OS();
OS << "sil_stage ";
switch (Stage) {
case SILStage::Raw:
OS << "raw";
break;
case SILStage::Canonical:
OS << "canonical";
break;
case SILStage::Lowered:
OS << "lowered";
break;
}
OS << "\n\nimport " << BUILTIN_NAME
<< "\nimport " << STDLIB_NAME
<< "\nimport " << SWIFT_SHIMS_NAME << "\n\n";
// Print the declarations and types from the associated context (origin module or
// current file).
if (M && PrintASTDecls) {
PrintOptions Options = PrintOptions::printSIL();
Options.TypeDefinitions = true;
Options.VarInitializers = true;
// FIXME: ExplodePatternBindingDecls is incompatible with VarInitializers!
Options.ExplodePatternBindingDecls = true;
Options.SkipImplicit = false;
Options.PrintGetSetOnRWProperties = true;
Options.PrintInSILBody = false;
bool WholeModuleMode = (M == AssociatedDeclContext);
SmallVector<Decl *, 32> topLevelDecls;
M->getTopLevelDecls(topLevelDecls);
for (const Decl *D : topLevelDecls) {
if (!WholeModuleMode && !(D->getDeclContext() == AssociatedDeclContext))
continue;
if ((isa<ValueDecl>(D) || isa<OperatorDecl>(D) ||
isa<ExtensionDecl>(D) || isa<ImportDecl>(D)) &&
!D->isImplicit()) {
if (isa<AccessorDecl>(D))
continue;
// skip to visit ASTPrinter to avoid sil-opt prints duplicated import declarations
if (auto importDecl = dyn_cast<ImportDecl>(D)) {
StringRef importName = importDecl->getModule()->getName().str();
if (importName == BUILTIN_NAME ||
importName == STDLIB_NAME ||
importName == SWIFT_SHIMS_NAME)
continue;
}
D->print(OS, Options);
OS << "\n\n";
}
}
}
printSILGlobals(PrintCtx, getSILGlobalList());
printSILFunctions(PrintCtx, getFunctionList());
printSILVTables(PrintCtx, getVTableList());
printSILWitnessTables(PrintCtx, getWitnessTableList());
printSILDefaultWitnessTables(PrintCtx, getDefaultWitnessTableList());
printSILCoverageMaps(PrintCtx, getCoverageMaps());
printSILProperties(PrintCtx, getPropertyList());
OS << "\n\n";
}
void SILNode::dumpInContext() const {
printInContext(llvm::errs());
}
void SILNode::printInContext(llvm::raw_ostream &OS) const {
SILPrintContext Ctx(OS);
SILPrinter(Ctx).printInContext(this);
}
void SILInstruction::dumpInContext() const {
printInContext(llvm::errs());
}
void SILInstruction::printInContext(llvm::raw_ostream &OS) const {
SILPrintContext Ctx(OS);
SILPrinter(Ctx).printInContext(this);
}
void SILVTable::print(llvm::raw_ostream &OS, bool Verbose) const {
OS << "sil_vtable ";
if (isSerialized())
OS << "[serialized] ";
OS << getClass()->getName() << " {\n";
PrintOptions QualifiedSILTypeOptions = PrintOptions::printQualifiedSILType();
for (auto &entry : getEntries()) {
OS << " ";
entry.Method.print(OS);
OS << ": ";
bool HasSingleImplementation = false;
switch (entry.Method.kind) {
default:
break;
case SILDeclRef::Kind::IVarDestroyer:
case SILDeclRef::Kind::Destroyer:
case SILDeclRef::Kind::Deallocator:
HasSingleImplementation = true;
}
// No need to emit the signature for methods that may have only
// single implementation, e.g. for destructors.
if (!HasSingleImplementation) {
QualifiedSILTypeOptions.CurrentModule =
entry.Method.getDecl()->getDeclContext()->getParentModule();
entry.Method.getDecl()->getInterfaceType().print(OS,
QualifiedSILTypeOptions);
OS << " : ";
}
if (entry.Linkage !=
stripExternalFromLinkage(entry.Implementation->getLinkage())) {
OS << getLinkageString(entry.Linkage);
}
OS << '@' << entry.Implementation->getName();
switch (entry.TheKind) {
case SILVTable::Entry::Kind::Normal:
break;
case SILVTable::Entry::Kind::Inherited:
OS << " [inherited]";
break;
case SILVTable::Entry::Kind::Override:
OS << " [override]";
break;
}
OS << "\t// " << demangleSymbol(entry.Implementation->getName());
OS << "\n";
}
OS << "}\n\n";
}
void SILVTable::dump() const {
print(llvm::errs());
}
/// Returns true if anything was printed.
static bool printAssociatedTypePath(llvm::raw_ostream &OS, CanType path) {
if (auto memberType = dyn_cast<DependentMemberType>(path)) {
if (printAssociatedTypePath(OS, memberType.getBase()))
OS << '.';
OS << memberType->getName().str();
return true;
} else {
assert(isa<GenericTypeParamType>(path));
return false;
}
}
void SILWitnessTable::Entry::print(llvm::raw_ostream &out, bool verbose,
const PrintOptions &options) const {
PrintOptions QualifiedSILTypeOptions = PrintOptions::printQualifiedSILType();
out << " ";
switch (getKind()) {
case WitnessKind::Invalid:
out << "no_default";
break;
case WitnessKind::Method: {
// method #declref: @function
auto &methodWitness = getMethodWitness();
out << "method ";
methodWitness.Requirement.print(out);
out << ": ";
QualifiedSILTypeOptions.CurrentModule =
methodWitness.Requirement.getDecl()
->getDeclContext()
->getParentModule();
methodWitness.Requirement.getDecl()->getInterfaceType().print(
out, QualifiedSILTypeOptions);
out << " : ";
if (methodWitness.Witness) {
methodWitness.Witness->printName(out);
out << "\t// "
<< demangleSymbol(methodWitness.Witness->getName());
} else {
out << "nil";
}
break;
}
case WitnessKind::AssociatedType: {
// associated_type AssociatedTypeName: ConformingType
auto &assocWitness = getAssociatedTypeWitness();
out << "associated_type ";
out << assocWitness.Requirement->getName() << ": ";
assocWitness.Witness->print(out, options);
break;
}
case WitnessKind::AssociatedTypeProtocol: {
// associated_type_protocol (AssociatedTypeName: Protocol): <conformance>
auto &assocProtoWitness = getAssociatedTypeProtocolWitness();
out << "associated_type_protocol (";
(void) printAssociatedTypePath(out, assocProtoWitness.Requirement);
out << ": " << assocProtoWitness.Protocol->getName() << "): ";
if (assocProtoWitness.Witness.isConcrete())
assocProtoWitness.Witness.getConcrete()->printName(out, options);
else
out << "dependent";
break;
}
case WitnessKind::BaseProtocol: {
// base_protocol Protocol: <conformance>
auto &baseProtoWitness = getBaseProtocolWitness();
out << "base_protocol "
<< baseProtoWitness.Requirement->getName() << ": ";
baseProtoWitness.Witness->printName(out, options);
break;
}
}
out << '\n';
}
void SILWitnessTable::print(llvm::raw_ostream &OS, bool Verbose) const {
PrintOptions Options = PrintOptions::printSIL();
PrintOptions QualifiedSILTypeOptions = PrintOptions::printQualifiedSILType();
OS << "sil_witness_table ";
printLinkage(OS, getLinkage(), /*isDefinition*/ isDefinition());
if (isSerialized())
OS << "[serialized] ";
getConformance()->printName(OS, Options);
Options.GenericEnv =
getConformance()->getDeclContext()->getGenericEnvironmentOfContext();
if (isDeclaration()) {
OS << "\n\n";
return;
}
OS << " {\n";
for (auto &witness : getEntries()) {
witness.print(OS, Verbose, Options);
}
for (auto conditionalConformance : getConditionalConformances()) {
// conditional_conformance (TypeName: Protocol):
// <conformance>
OS << " conditional_conformance (";
conditionalConformance.Requirement.print(OS, Options);
OS << ": " << conditionalConformance.Conformance.getRequirement()->getName()
<< "): ";
if (conditionalConformance.Conformance.isConcrete())
conditionalConformance.Conformance.getConcrete()->printName(OS, Options);
else
OS << "dependent";
OS << '\n';
}
OS << "}\n\n";
}
void SILWitnessTable::dump() const {
print(llvm::errs());
}
void SILDefaultWitnessTable::print(llvm::raw_ostream &OS, bool Verbose) const {
// sil_default_witness_table [<Linkage>] <Protocol> <MinSize>
PrintOptions QualifiedSILTypeOptions = PrintOptions::printQualifiedSILType();
OS << "sil_default_witness_table ";
printLinkage(OS, getLinkage(), ForDefinition);
OS << getProtocol()->getName() << " {\n";
PrintOptions options = PrintOptions::printSIL();
options.GenericEnv = Protocol->getGenericEnvironmentOfContext();
for (auto &witness : getEntries()) {
witness.print(OS, Verbose, options);
}
OS << "}\n\n";
}
void SILDefaultWitnessTable::dump() const {
print(llvm::errs());
}
void SILCoverageMap::print(SILPrintContext &PrintCtx) const {
llvm::raw_ostream &OS = PrintCtx.OS();
OS << "sil_coverage_map " << QuotedString(getFile()) << " "
<< QuotedString(getName()) << " " << QuotedString(getPGOFuncName()) << " "
<< getHash() << " {\t// " << demangleSymbol(getName()) << "\n";
if (PrintCtx.sortSIL())
std::sort(MappedRegions.begin(), MappedRegions.end(),
[](const MappedRegion &LHS, const MappedRegion &RHS) {
return std::tie(LHS.StartLine, LHS.StartCol, LHS.EndLine, LHS.EndCol) <
std::tie(RHS.StartLine, RHS.StartCol, RHS.EndLine, RHS.EndCol);
});
for (auto &MR : getMappedRegions()) {
OS << " " << MR.StartLine << ":" << MR.StartCol << " -> " << MR.EndLine
<< ":" << MR.EndCol << " : ";
printCounter(OS, MR.Counter);
OS << "\n";
}
OS << "}\n\n";
}
void SILCoverageMap::dump() const {
print(llvm::errs());
}
#ifndef NDEBUG
void SILDebugScope::dump(SourceManager &SM, llvm::raw_ostream &OS,
unsigned Indent) const {
OS << "{\n";
OS.indent(Indent);
if (Loc.isASTNode())
Loc.getSourceLoc().print(OS, SM);
OS << "\n";
OS.indent(Indent + 2);
OS << " parent: ";
if (auto *P = Parent.dyn_cast<const SILDebugScope *>()) {
P->dump(SM, OS, Indent + 2);
OS.indent(Indent + 2);
}
else if (auto *F = Parent.dyn_cast<SILFunction *>())
OS << "@" << F->getName();
else
OS << "nullptr";
OS << "\n";
OS.indent(Indent + 2);
if (auto *CS = InlinedCallSite) {
OS << "inlinedCallSite: ";
CS->dump(SM, OS, Indent + 2);
OS.indent(Indent + 2);
}
OS << "}\n";
}
#endif
void SILSpecializeAttr::print(llvm::raw_ostream &OS) const {
SILPrintContext Ctx(OS);
// Print other types as their Swift representation.
PrintOptions SubPrinter = PrintOptions::printSIL();
auto exported = isExported() ? "true" : "false";
auto kind = isPartialSpecialization() ? "partial" : "full";
OS << "exported: " << exported << ", ";
OS << "kind: " << kind << ", ";
if (!getRequirements().empty()) {
OS << "where ";
SILFunction *F = getFunction();
assert(F);
auto GenericEnv = F->getGenericEnvironment();
interleave(getRequirements(),
[&](Requirement req) {
if (!GenericEnv) {
req.print(OS, SubPrinter);
return;
}
// Use GenericEnvironment to produce user-friendly
// names instead of something like t_0_0.
auto FirstTy = GenericEnv->getSugaredType(req.getFirstType());
if (req.getKind() != RequirementKind::Layout) {
auto SecondTy =
GenericEnv->getSugaredType(req.getSecondType());
Requirement ReqWithDecls(req.getKind(), FirstTy, SecondTy);
ReqWithDecls.print(OS, SubPrinter);
} else {
Requirement ReqWithDecls(req.getKind(), FirstTy,
req.getLayoutConstraint());
ReqWithDecls.print(OS, SubPrinter);
}
},
[&] { OS << ", "; });
}
}
//===----------------------------------------------------------------------===//
// SILPrintContext members
//===----------------------------------------------------------------------===//
SILPrintContext::SILPrintContext(llvm::raw_ostream &OS, bool Verbose,
bool SortedSIL) :
OutStream(OS), Verbose(Verbose), SortedSIL(SortedSIL),
DebugInfo(SILPrintDebugInfo) { }
SILPrintContext::SILPrintContext(llvm::raw_ostream &OS, bool Verbose,
bool SortedSIL, bool DebugInfo) :
OutStream(OS), Verbose(Verbose), SortedSIL(SortedSIL),
DebugInfo(DebugInfo) { }
void SILPrintContext::setContext(const void *FunctionOrBlock) {
if (FunctionOrBlock != ContextFunctionOrBlock) {
BlocksToIDMap.clear();
ValueToIDMap.clear();
ContextFunctionOrBlock = FunctionOrBlock;
}
}
SILPrintContext::~SILPrintContext() {
}
void SILPrintContext::printInstructionCallBack(const SILInstruction *I) {
}
void SILPrintContext::initBlockIDs(ArrayRef<const SILBasicBlock *> Blocks) {
if (Blocks.empty())
return;
setContext(Blocks[0]->getParent());
// Initialize IDs so our IDs are in RPOT as well. This is a hack.
for (unsigned Index : indices(Blocks))
BlocksToIDMap[Blocks[Index]] = Index;
}
ID SILPrintContext::getID(const SILBasicBlock *Block) {
setContext(Block->getParent());
// Lazily initialize the Blocks-to-IDs mapping.
// If we are asked to emit sorted SIL, print out our BBs in RPOT order.
if (BlocksToIDMap.empty()) {
if (sortSIL()) {
std::vector<SILBasicBlock *> RPOT;
auto *UnsafeF = const_cast<SILFunction *>(Block->getParent());
std::copy(po_begin(UnsafeF), po_end(UnsafeF), std::back_inserter(RPOT));
std::reverse(RPOT.begin(), RPOT.end());
// Initialize IDs so our IDs are in RPOT as well. This is a hack.
for (unsigned Index : indices(RPOT))
BlocksToIDMap[RPOT[Index]] = Index;
} else {
unsigned idx = 0;
for (const SILBasicBlock &B : *Block->getParent())
BlocksToIDMap[&B] = idx++;
}
}
ID R = {ID::SILBasicBlock, BlocksToIDMap[Block]};
return R;
}
ID SILPrintContext::getID(const SILNode *node) {
if (node == nullptr)
return {ID::Null, ~0U};
if (isa<SILUndef>(node))
return {ID::SILUndef, 0};
SILBasicBlock *BB = node->getParentBlock();
if (SILFunction *F = BB->getParent()) {
setContext(F);
// Lazily initialize the instruction -> ID mapping.
if (ValueToIDMap.empty())
F->numberValues(ValueToIDMap);
ID R = {ID::SSAValue, ValueToIDMap[node]};
return R;
}
setContext(BB);
// Check if we have initialized our ValueToIDMap yet. If we have, just use
// that.
if (!ValueToIDMap.empty()) {
ID R = {ID::SSAValue, ValueToIDMap[node]};
return R;
}
// Otherwise, initialize the instruction -> ID mapping cache.
unsigned idx = 0;
for (auto &I : *BB) {
// Give the instruction itself the next ID.
ValueToIDMap[&I] = idx;
// If there are no results, make sure we don't reuse that ID.
auto results = I.getResults();
if (results.empty()) {
idx++;
continue;
}
// Otherwise, assign all of the results an index. Note that
// we'll assign the same ID to both the instruction and the
// first result.
for (auto result : results) {
ValueToIDMap[result] = idx++;
}
}
ID R = {ID::SSAValue, ValueToIDMap[node]};
return R;
}
|
#ifndef CaseNode_hpp
#define CaseNode_hpp
#include <vector>
#include "../../exceptions/ExpressionException.hpp"
#include "../../exceptions/PugCompilerException.hpp"
#include "../../expression/IExpressionHandler.hpp"
#include "../../model/PugModel.hpp"
#include "../../template/PugTemplate.hpp"
#include "CaseConditionNode.hpp"
#include "Node.hpp"
namespace pugcpp
{
namespace parser
{
namespace node
{
class CaseNode : public Node
{
private:
vector<shared_ptr<CaseConditionNode>> caseConditionNodes_;
bool checkCondition(PugModel &model, shared_ptr<Node> &caseConditionNode, IExpressionHandler &expressionHandler);
public:
class When : public Node
{
public:
When();
void execute(IndentWriter &writer, PugModel &model, PugTemplate &tmplt) override;
};
CaseNode();
vector<shared_ptr<CaseConditionNode>> &getCaseConditionNodes();
void setConditions(const vector<shared_ptr<CaseConditionNode>> &caseConditionNodes);
void execute(IndentWriter &writer, PugModel &model, PugTemplate &tmplt) override;
};
} // namespace node
} // namespace parser
} // namespace pugcpp
#endif // CaseNode_hpp
|
#include "StdAfx.h"
#include "Spammer.h"
#include "Player.h"
#include "Projectile.h"
#include "PlayerVisTable.h"
#include <IVehicleSystem.h>
#include <IViewSystem.h>
#include "GameActions.h"
namespace
{
struct SpammerTarget
{
SpammerTarget(const EntityId _targetId, const float _radius)
: targetId(_targetId)
, radius(_radius)
{
}
const EntityId targetId;
const float radius;
};
SpammerTarget GetVisibilityTestTarget( const IEntity* pSourceEntity, const EntityId sourceEntityId, CActor* pActor, const AABB& sourceEntityBounds )
{
assert(pSourceEntity != NULL);
static IEntityClass* s_TacticalEntityClass = gEnv->pEntitySystem->GetClassRegistry()->FindClass("TacticalEntity");
if(pSourceEntity->GetClass() != s_TacticalEntityClass)
{
IVehicle* pVehicle = pActor ? pActor->GetLinkedVehicle() : 0;
if (pVehicle)
{
IEntity* pVehicleEntity = pVehicle->GetEntity();
AABB vehicleBounds;
pVehicleEntity->GetWorldBounds(vehicleBounds);
return SpammerTarget(pVehicleEntity->GetId(), vehicleBounds.GetRadius());
}
return SpammerTarget( sourceEntityId, sourceEntityBounds.GetRadius() );
}
const IEntity* pParentEntity = pSourceEntity->GetParent();
if(pParentEntity)
{
AABB parentBounds;
pParentEntity->GetWorldBounds(parentBounds);
return SpammerTarget( pParentEntity->GetId(), parentBounds.GetRadius() );
}
else
{
return SpammerTarget( sourceEntityId, sourceEntityBounds.GetRadius() );
}
}
};
CRY_IMPLEMENT_GTI(CSpammer, CSingle);
CSpammerCloudTargets::CSpammerCloudTargets()
: m_numLockOns(0)
{
}
bool CSpammerCloudTargets::Empty() const
{
return m_numLockOns == 0;
}
void CSpammerCloudTargets::Clear()
{
m_numLockOns = 0;
m_targets.clear();
}
void CSpammerCloudTargets::LockOn(EntityId target)
{
++m_numLockOns;
for (size_t i = 0; i < m_targets.size(); ++i)
{
if (m_targets[i].m_target == target)
{
m_targets[i].m_numLocks++;
return;
}
}
STarget newTarget;
newTarget.m_target = target;
newTarget.m_numLocks = 1;
m_targets.push_back(newTarget);
}
int CSpammerCloudTargets::GetNumLockOns() const
{
return m_numLockOns;
}
int CSpammerCloudTargets::GetNumLockOns(EntityId target) const
{
for (size_t i = 0; i < m_targets.size(); ++i)
if (m_targets[i].m_target == target)
return m_targets[i].m_numLocks;
return 0;
}
const CSpammerCloudTargets::STarget& CSpammerCloudTargets::GetTarget(int idx) const
{
return m_targets[idx];
}
int CSpammerCloudTargets::GetNumTargets() const
{
return static_cast<int>(m_targets.size());
}
EntityId CSpammerCloudTargets::UnlockNext()
{
EntityId result = 0;
if (Empty())
return result;
STarget& target = *(m_targets.end() - 1);
--target.m_numLocks;
result = target.m_target;
if (target.m_numLocks == 0)
m_targets.pop_back();
--m_numLockOns;
return result;
}
void CSpammerPotentialTargets::Clear()
{
m_potentialTargets.clear();
m_totalProbability = 0.0f;
}
void CSpammerPotentialTargets::AddTarget(EntityId target, float probability)
{
m_totalProbability += probability;
STarget newTarget;
newTarget.m_target = target;
newTarget.m_probability = probability;
m_potentialTargets.push_back(newTarget);
}
CSpammer::CSpammer()
: m_state(EState_None)
, m_timer(0.0f)
, m_numLoadedRockets(0)
, m_nextFireTimer(0.0f)
, m_firingPending(false)
{
}
void CSpammer::Activate(bool activate)
{
m_pWeapon->EnableUpdate(true, eIUS_FireMode);
BaseFiremode::Activate(activate);
CActor *pOwnerActor = m_pWeapon->GetOwnerActor();
if(pOwnerActor && pOwnerActor->IsClient())
{
if(activate)
{
// Make sure the vehicle we are attached to does not block view of targets. Disable ability to pick up other items whilst using the turret.
g_pGame->GetPlayerVisTable()->AddGlobalIgnoreEntity(m_pWeapon->GetHostId(), "CSpammer::Activate(true)");
g_pGameActions->FilterItemPickup()->Enable(true);
}
else
{
g_pGame->GetPlayerVisTable()->RemoveGlobalIgnoreEntity(m_pWeapon->GetHostId());
g_pGameActions->FilterItemPickup()->Enable(false);
}
}
}
void CSpammer::Update(float frameTime, uint32 frameId)
{
BaseFiremode::Update(frameTime, frameId);
UpdatePotentialTargets();
switch (m_state)
{
case EState_None:
m_nextFireTimer -= frameTime;
if (m_nextFireTimer < 0.0f)
{
if (m_firingPending)
StartFire();
m_nextFireTimer = 0.0f;
}
break;
case EState_LoadingIn:
UpdateLoadIn(frameTime);
break;
case EState_Bursting:
UpdateBurst(frameTime);
break;
}
}
void CSpammer::StartFire()
{
if (m_nextFireTimer > 0.0f)
{
m_firingPending = true;
return;
}
if (m_state != EState_None)
return;
StartLoadingIn();
m_firingPending = false;
}
void CSpammer::StopFire()
{
if (m_state != EState_LoadingIn)
return;
StartBursting();
m_firingPending = false;
}
bool CSpammer::ShootRocket(EntityId target)
{
IEntityClass* pAmmoClass = m_fireParams->fireparams.ammo_type_class;
int ammoCount = m_pWeapon->GetAmmoCount(pAmmoClass);
CActor *pOwnerActor = m_pWeapon->GetOwnerActor();
const bool playerIsShooter = pOwnerActor ? pOwnerActor->IsPlayer() : false;
const bool clientIsShooter = pOwnerActor ? pOwnerActor->IsClient() : false;
bool bHit = false;
ray_hit rayhit;
rayhit.pCollider = 0;
Vec3 hit = GetProbableHit(WEAPON_HIT_RANGE, &bHit, &rayhit);
Vec3 pos = GetFiringPos(hit);
Vec3 dir = GetFiringDir(hit, pos);
Vec3 vel = GetFiringVelocity(dir);
int flags = CItem::eIPAF_Default;
if (IsProceduralRecoilEnabled() && pOwnerActor)
{
pOwnerActor->ProceduralRecoil(m_fireParams->proceduralRecoilParams.duration, m_fireParams->proceduralRecoilParams.strength, m_fireParams->proceduralRecoilParams.kickIn, m_fireParams->proceduralRecoilParams.arms);
}
float speedOverride = -1.f;
GetWeapon()->PlayAction(GetFragmentIds().fire, 0, false, flags, speedOverride);
CheckNearMisses(hit, pos, dir, (hit-pos).len(), 1.0f);
CProjectile* pAmmo = m_pWeapon->SpawnAmmo(m_fireParams->fireparams.spawn_ammo_class, false);
const EntityId weaponOwnerId = m_pWeapon->GetOwnerId();
EntityId ammoId = 0;
if (pAmmo)
{
ammoId = pAmmo->GetEntityId();
CRY_ASSERT_MESSAGE(m_fireParams->fireparams.hitTypeId, string().Format("Invalid hit type '%s' in fire params for '%s'", m_fireParams->fireparams.hit_type.c_str(), m_pWeapon->GetEntity()->GetName()));
CRY_ASSERT_MESSAGE(m_fireParams->fireparams.hitTypeId == g_pGame->GetGameRules()->GetHitTypeId(m_fireParams->fireparams.hit_type.c_str()), "Sanity Check Failed: Stored hit type id does not match the type string, possibly CacheResources wasn't called on this weapon type");
pAmmo->SetParams(CProjectile::SProjectileDesc(
weaponOwnerId, m_pWeapon->GetHostId(), m_pWeapon->GetEntityId(),
m_fireParams->fireparams.damage, m_fireParams->fireparams.damage_drop_min_distance, m_fireParams->fireparams.damage_drop_per_meter, m_fireParams->fireparams.damage_drop_min_damage,
m_fireParams->fireparams.hitTypeId, m_fireParams->fireparams.bullet_pierceability_modifier, m_pWeapon->IsZoomed()));
// this must be done after owner is set
pAmmo->InitWithAI();
pAmmo->SetDestination(target);
pAmmo->Launch(pos, dir, vel, m_speed_scale);
m_projectileId = ammoId;
if (clientIsShooter && pAmmo->IsPredicted() && gEnv->IsClient() && gEnv->bServer)
{
pAmmo->GetGameObject()->BindToNetwork();
}
}
if (m_pWeapon->IsServer())
{
const char *ammoName = pAmmoClass != NULL ? pAmmoClass->GetName() : NULL;
g_pGame->GetIGameFramework()->GetIGameplayRecorder()->Event(m_pWeapon->GetOwner(), GameplayEvent(eGE_WeaponShot, ammoName, 1, (void *)(EXPAND_PTR)m_pWeapon->GetEntityId()));
}
OnShoot(weaponOwnerId, ammoId, pAmmoClass, pos, dir, vel);
if(playerIsShooter)
{
const SThermalVisionParams& thermalParams = m_fireParams->thermalVisionParams;
m_pWeapon->AddShootHeatPulse(pOwnerActor, thermalParams.weapon_shootHeatPulse, thermalParams.weapon_shootHeatPulseTime,
thermalParams.owner_shootHeatPulse, thermalParams.owner_shootHeatPulseTime);
}
m_muzzleEffect.Shoot(this, hit, m_barrelId);
RecoilImpulse(pos, dir);
m_fired = true;
m_next_shot += m_next_shot_dt;
if (++m_barrelId == m_fireParams->fireparams.barrel_count)
m_barrelId = 0;
ammoCount--;
int clipSize = GetClipSize();
if (clipSize != -1)
{
if (clipSize!=0)
m_pWeapon->SetAmmoCount(pAmmoClass, ammoCount);
else
m_pWeapon->SetInventoryAmmoCount(pAmmoClass, ammoCount);
}
m_pWeapon->RequestShoot(pAmmoClass, pos, dir, vel, hit, m_speed_scale, pAmmo? pAmmo->GetGameObject()->GetPredictionHandle() : 0, false);
return true;
}
void CSpammer::UpdateLoadIn(float frameTime)
{
const int currentAmmoCount = GetAmmoCount();
const bool infiniteAmmo = (GetClipSize() < 0);
if ((!infiniteAmmo) && (m_numLoadedRockets >= currentAmmoCount))
{
GetWeapon()->PlayAction(GetFragmentIds().empty_clip);
StopFire();
return;
}
const SSpammerParams& params = GetShared()->spammerParams;
const float loadInTime = 1.0f / (params.loadInRate / 60.0f);
m_timer -= frameTime;
while (m_timer < 0.0f && m_numLoadedRockets < params.maxNumRockets)
{
m_timer += loadInTime;
AddTarget();
GetWeapon()->PlayAction(GetFragmentIds().cock);
}
if (m_numLoadedRockets != m_targetsAssigned.GetNumLockOns())
{
EntityId nextTarget = GetNextLockOnTarget();
if (nextTarget != 0)
m_targetsAssigned.LockOn(nextTarget);
}
}
void CSpammer::UpdateBurst(float frameTime)
{
const SSpammerParams& params = GetShared()->spammerParams;
const float burstTime = 1.0f / (params.burstRate / 60.0f);
m_timer -= frameTime;
while (m_timer < 0.0f && m_numLoadedRockets != 0)
{
m_timer += burstTime;
--m_numLoadedRockets;
ShootNextTarget();
}
if (m_numLoadedRockets == 0)
m_state = EState_None;
}
void CSpammer::UpdatePotentialTargets()
{
const float minLockOnDistance = m_fireParams->spammerParams.minLockOnDistance;
const float maxLockOnDistance = m_fireParams->spammerParams.maxLockOnDistance;
const float maxAngleCos = cos_tpl(DEG2RAD(m_fireParams->spammerParams.targetingTolerance));
const CAutoAimManager& autoAimManager = g_pGame->GetAutoAimManager();
const TAutoaimTargets& aaTargets = autoAimManager.GetAutoAimTargets();
const int targetCount = aaTargets.size();
const Vec3 probableHit = Vec3Constants<float>::fVec3_Zero;
const Vec3 weaponPos = GetWeaponPosition(probableHit);
const Vec3 weaponFwd = GetWeaponDirection(weaponPos, probableHit);
m_potentialTargets.Clear();
CPlayerVisTable::SVisibilityParams queryTargetParams(0);
const bool flat2DMode = m_fireParams->spammerParams.targetingFlatMode;
for (int i = 0; i < targetCount; ++i)
{
const SAutoaimTarget& target = aaTargets[i];
CRY_ASSERT(target.entityId != m_pWeapon->GetOwnerId());
if (!target.HasFlagSet(eAATF_AIHostile))
continue;
IEntity* pTargetEntity = gEnv->pEntitySystem->GetEntity(target.entityId);
if (!pTargetEntity)
continue;
CActor* pActor = target.pActorWeak.lock().get();
AABB bounds;
pTargetEntity->GetWorldBounds(bounds);
Vec3 targetPos = bounds.GetCenter();
Vec3 targetDistVec = (targetPos - weaponPos).normalized();
float distance = targetPos.GetDistance(weaponPos);
if (distance <= minLockOnDistance || distance >= maxLockOnDistance)
continue;
float alignment;
if (!flat2DMode)
{
alignment = weaponFwd * targetDistVec;
}
else
{
const CCamera& viewCamera = gEnv->pSystem->GetViewCamera();
if (!viewCamera.IsPointVisible(targetPos))
continue;
alignment = Vec3(weaponFwd.x, weaponFwd.y, 0.0f).GetNormalizedSafe() * Vec3(targetDistVec.x, targetDistVec.y, 0.0f).GetNormalizedSafe();
}
if (alignment <= maxAngleCos)
continue;
const SpammerTarget finalTargetInfo = GetVisibilityTestTarget(pTargetEntity, target.entityId, pActor, bounds);
const int kAutoaimVisibilityLatency = 0;
queryTargetParams.targetEntityId = finalTargetInfo.targetId;
if (!g_pGame->GetPlayerVisTable()->CanLocalPlayerSee(queryTargetParams, kAutoaimVisibilityLatency))
continue;
float priority = 1.0f;
priority *= finalTargetInfo.radius;
priority /= m_targetsAssigned.GetNumLockOns(target.entityId)+1;
const float m = 1.0f / (1.0f - maxAngleCos);
priority *= m * alignment + (1.0f - m);
priority *= 0.1f;
priority = min(priority, 1.0f);
m_potentialTargets.AddTarget(target.entityId, priority);
}
float n = 0.0f;
size_t num = m_potentialTargets.m_potentialTargets.size();
for (size_t i = 0; i < num; ++i)
{
n = max(n, m_potentialTargets.m_potentialTargets[i].m_probability);
}
m_potentialTargets.m_totalProbability = 0.0f;
for (size_t i = 0; num && i < m_potentialTargets.m_potentialTargets.size(); ++i)
{
m_potentialTargets.m_potentialTargets[i].m_probability /= n + FLT_EPSILON;
m_potentialTargets.m_totalProbability += m_potentialTargets.m_potentialTargets[i].m_probability;
}
}
void CSpammer::StartLoadingIn()
{
m_state = EState_LoadingIn;
m_timer = 0.0f;
m_targetsAssigned.Clear();
m_numLoadedRockets = 0;
const SFireModeParams& params = *GetShared();
m_nextFireTimer = 1.0f / (params.fireparams.rate / 60.0f);
}
void CSpammer::StartBursting()
{
const SSpammerParams& params = GetShared()->spammerParams;
m_timer = 1.0f / (params.burstRate / 60.0f);
m_state = EState_Bursting;
}
void CSpammer::AddTarget()
{
++m_numLoadedRockets;
EntityId nextTarget = GetNextLockOnTarget();
if (nextTarget != 0)
m_targetsAssigned.LockOn(nextTarget);
}
void CSpammer::ShootNextTarget()
{
EntityId nextTarget = m_targetsAssigned.UnlockNext();
ShootRocket(nextTarget);
}
Vec3 CSpammer::GetWeaponPosition(const Vec3& probableHit) const
{
Vec3 position(ZERO);
if(gEnv->bServer)
{
CActor* pOwnerActor = m_pWeapon->GetOwnerActor();
IMovementController* pMC = pOwnerActor ? pOwnerActor->GetMovementController() : NULL;
if (pMC)
{
SMovementState state;
pMC->GetMovementState(state);
position = state.eyePosition;
}
else
{
return GetFiringPos(probableHit);
}
}
return position;
}
Vec3 CSpammer::GetWeaponDirection(const Vec3& firingPosition, const Vec3& probableHit) const
{
Vec3 direction(ZERO);
if(gEnv->bServer)
{
CActor* pOwnerActor = m_pWeapon->GetOwnerActor();
IMovementController* pMC = pOwnerActor ? pOwnerActor->GetMovementController() : NULL;
if (pMC)
{
SMovementState state;
pMC->GetMovementState(state);
direction = state.aimDirection;
}
else
{
return GetFiringDir(probableHit, firingPosition);
}
}
return direction;
}
struct PotentialTarget
{
EntityId entity;
float probability;
};
EntityId CSpammer::GetNextLockOnTarget() const
{
EntityId result = 0;
if (!m_potentialTargets.m_potentialTargets.empty())
{
float randomProbability = cry_frand() * m_potentialTargets.m_totalProbability;
int selectedIdx;
for (selectedIdx = 0; randomProbability > 0.0f; ++selectedIdx)
randomProbability -= m_potentialTargets.m_potentialTargets[selectedIdx].m_probability;
--selectedIdx;
result = m_potentialTargets.m_potentialTargets[selectedIdx].m_target;
}
return result;
}
|
// Copyright (c) 2011-2014 The Bitcoin developers
// Copyright (c) 2014-2015 The Dash developers
// Copyright (c) 2015-2017 The PIVX developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "transactionrecord.h"
#include "base58.h"
#include "obfuscation.h"
#include "swifttx.h"
#include "timedata.h"
#include "wallet.h"
#include <stdint.h>
/* Return positive answer if transaction should be shown in list.
*/
bool TransactionRecord::showTransaction(const CWalletTx& wtx)
{
if (wtx.IsCoinBase()) {
// Ensures we show generated coins / mined transactions at depth 1
if (!wtx.IsInMainChain()) {
return false;
}
}
return true;
}
/*
* Decompose CWallet transaction to model transaction records.
*/
QList<TransactionRecord> TransactionRecord::decomposeTransaction(const CWallet* wallet, const CWalletTx& wtx)
{
QList<TransactionRecord> parts;
int64_t nTime = wtx.GetComputedTxTime();
CAmount nCredit = wtx.GetCredit(ISMINE_ALL);
CAmount nDebit = wtx.GetDebit(ISMINE_ALL);
CAmount nNet = nCredit - nDebit;
uint256 hash = wtx.GetHash();
std::map<std::string, std::string> mapValue = wtx.mapValue;
if (wtx.IsCoinStake()) {
TransactionRecord sub(hash, nTime);
CTxDestination address;
if (!ExtractDestination(wtx.vout[1].scriptPubKey, address))
return parts;
if (!IsMine(*wallet, address)) {
//if the address is not yours then it means you have a tx sent to you in someone elses coinstake tx
for (unsigned int i = 1; i < wtx.vout.size(); i++) {
CTxDestination outAddress;
if (ExtractDestination(wtx.vout[i].scriptPubKey, outAddress)) {
if (IsMine(*wallet, outAddress)) {
isminetype mine = wallet->IsMine(wtx.vout[i]);
sub.involvesWatchAddress = mine & ISMINE_WATCH_ONLY;
sub.type = TransactionRecord::MNReward;
sub.address = CBitcoinAddress(outAddress).ToString();
sub.credit = wtx.vout[i].nValue;
}
}
}
} else {
//stake reward
isminetype mine = wallet->IsMine(wtx.vout[1]);
sub.involvesWatchAddress = mine & ISMINE_WATCH_ONLY;
sub.type = TransactionRecord::StakeMint;
sub.address = CBitcoinAddress(address).ToString();
sub.credit = nNet;
}
parts.append(sub);
} else if (wtx.IsZerocoinSpend()) {
// a zerocoin spend that was created by this wallet
libzerocoin::CoinSpend zcspend = TxInToZerocoinSpend(wtx.vin[0]);
bool fSpendFromMe = wallet->IsMyZerocoinSpend(zcspend.getCoinSerialNumber());
//zerocoin spend outputs
bool fFeeAssigned = false;
for (const CTxOut txout : wtx.vout) {
// change that was reminted as zerocoins
if (txout.IsZerocoinMint()) {
// do not display record if this isn't from our wallet
if (!fSpendFromMe)
continue;
TransactionRecord sub(hash, nTime);
sub.type = TransactionRecord::ZerocoinSpend_Change_zPiv;
sub.address = mapValue["zerocoinmint"];
sub.debit = -txout.nValue;
if (!fFeeAssigned) {
sub.debit -= (wtx.GetZerocoinSpent() - wtx.GetValueOut());
fFeeAssigned = true;
}
sub.idx = parts.size();
parts.append(sub);
continue;
}
string strAddress = "";
CTxDestination address;
if (ExtractDestination(txout.scriptPubKey, address))
strAddress = CBitcoinAddress(address).ToString();
// a zerocoinspend that was sent to an address held by this wallet
isminetype mine = wallet->IsMine(txout);
if (mine) {
TransactionRecord sub(hash, nTime);
sub.type = (fSpendFromMe ? TransactionRecord::ZerocoinSpend_FromMe : TransactionRecord::RecvFromZerocoinSpend);
sub.debit = txout.nValue;
sub.address = mapValue["recvzerocoinspend"];
if (strAddress != "")
sub.address = strAddress;
sub.idx = parts.size();
parts.append(sub);
continue;
}
// spend is not from us, so do not display the spend side of the record
if (!fSpendFromMe)
continue;
// zerocoin spend that was sent to someone else
TransactionRecord sub(hash, nTime);
sub.debit = -txout.nValue;
sub.type = TransactionRecord::ZerocoinSpend;
sub.address = mapValue["zerocoinspend"];
if (strAddress != "")
sub.address = strAddress;
sub.idx = parts.size();
parts.append(sub);
}
} else if (nNet > 0 || wtx.IsCoinBase()) {
//
// Credit
//
BOOST_FOREACH (const CTxOut& txout, wtx.vout) {
isminetype mine = wallet->IsMine(txout);
if (mine) {
TransactionRecord sub(hash, nTime);
CTxDestination address;
sub.idx = parts.size(); // sequence number
sub.credit = txout.nValue;
sub.involvesWatchAddress = mine & ISMINE_WATCH_ONLY;
if (ExtractDestination(txout.scriptPubKey, address) && IsMine(*wallet, address)) {
// Received by PIVX Address
sub.type = TransactionRecord::RecvWithAddress;
sub.address = CBitcoinAddress(address).ToString();
} else {
// Received by IP connection (deprecated features), or a multisignature or other non-simple transaction
sub.type = TransactionRecord::RecvFromOther;
sub.address = mapValue["from"];
}
if (wtx.IsCoinBase()) {
// Generated
sub.type = TransactionRecord::Generated;
}
parts.append(sub);
}
}
} else {
bool fAllFromMeDenom = true;
int nFromMe = 0;
bool involvesWatchAddress = false;
isminetype fAllFromMe = ISMINE_SPENDABLE;
BOOST_FOREACH (const CTxIn& txin, wtx.vin) {
if (wallet->IsMine(txin)) {
fAllFromMeDenom = fAllFromMeDenom && wallet->IsDenominated(txin);
nFromMe++;
}
isminetype mine = wallet->IsMine(txin);
if (mine & ISMINE_WATCH_ONLY) involvesWatchAddress = true;
if (fAllFromMe > mine) fAllFromMe = mine;
}
isminetype fAllToMe = ISMINE_SPENDABLE;
bool fAllToMeDenom = true;
int nToMe = 0;
BOOST_FOREACH (const CTxOut& txout, wtx.vout) {
if (wallet->IsMine(txout)) {
fAllToMeDenom = fAllToMeDenom && wallet->IsDenominatedAmount(txout.nValue);
nToMe++;
}
isminetype mine = wallet->IsMine(txout);
if (mine & ISMINE_WATCH_ONLY) involvesWatchAddress = true;
if (fAllToMe > mine) fAllToMe = mine;
}
if (fAllFromMeDenom && fAllToMeDenom && nFromMe * nToMe) {
parts.append(TransactionRecord(hash, nTime, TransactionRecord::ObfuscationDenominate, "", -nDebit, nCredit));
parts.last().involvesWatchAddress = false; // maybe pass to TransactionRecord as constructor argument
} else if (fAllFromMe && fAllToMe) {
// Payment to self
// TODO: this section still not accurate but covers most cases,
// might need some additional work however
TransactionRecord sub(hash, nTime);
// Payment to self by default
sub.type = TransactionRecord::SendToSelf;
sub.address = "";
if (mapValue["DS"] == "1") {
sub.type = TransactionRecord::Obfuscated;
CTxDestination address;
if (ExtractDestination(wtx.vout[0].scriptPubKey, address)) {
// Sent to PIVX Address
sub.address = CBitcoinAddress(address).ToString();
} else {
// Sent to IP, or other non-address transaction like OP_EVAL
sub.address = mapValue["to"];
}
} else {
for (unsigned int nOut = 0; nOut < wtx.vout.size(); nOut++) {
const CTxOut& txout = wtx.vout[nOut];
sub.idx = parts.size();
if (wallet->IsCollateralAmount(txout.nValue)) sub.type = TransactionRecord::ObfuscationMakeCollaterals;
if (wallet->IsDenominatedAmount(txout.nValue)) sub.type = TransactionRecord::ObfuscationCreateDenominations;
if (nDebit - wtx.GetValueOut() == OBFUSCATION_COLLATERAL) sub.type = TransactionRecord::ObfuscationCollateralPayment;
}
}
CAmount nChange = wtx.GetChange();
sub.debit = -(nDebit - nChange);
sub.credit = nCredit - nChange;
parts.append(sub);
parts.last().involvesWatchAddress = involvesWatchAddress; // maybe pass to TransactionRecord as constructor argument
} else if (fAllFromMe) {
//
// Debit
//
CAmount nTxFee = nDebit - wtx.GetValueOut();
for (unsigned int nOut = 0; nOut < wtx.vout.size(); nOut++) {
const CTxOut& txout = wtx.vout[nOut];
TransactionRecord sub(hash, nTime);
sub.idx = parts.size();
sub.involvesWatchAddress = involvesWatchAddress;
if (wallet->IsMine(txout)) {
// Ignore parts sent to self, as this is usually the change
// from a transaction sent back to our own address.
continue;
}
CTxDestination address;
if (ExtractDestination(txout.scriptPubKey, address)) {
// Sent to PIVX Address
sub.type = TransactionRecord::SendToAddress;
sub.address = CBitcoinAddress(address).ToString();
} else if (txout.IsZerocoinMint()){
sub.type = TransactionRecord::ZerocoinMint;
sub.address = mapValue["zerocoinmint"];
} else {
// Sent to IP, or other non-address transaction like OP_EVAL
sub.type = TransactionRecord::SendToOther;
sub.address = mapValue["to"];
}
if (mapValue["DS"] == "1") {
sub.type = TransactionRecord::Obfuscated;
}
CAmount nValue = txout.nValue;
/* Add fee to first output */
if (nTxFee > 0) {
nValue += nTxFee;
nTxFee = 0;
}
sub.debit = -nValue;
parts.append(sub);
}
} else {
//
// Mixed debit transaction, can't break down payees
//
parts.append(TransactionRecord(hash, nTime, TransactionRecord::Other, "", nNet, 0));
parts.last().involvesWatchAddress = involvesWatchAddress;
}
}
return parts;
}
void TransactionRecord::updateStatus(const CWalletTx& wtx)
{
AssertLockHeld(cs_main);
// Determine transaction status
// Find the block the tx is in
CBlockIndex* pindex = NULL;
BlockMap::iterator mi = mapBlockIndex.find(wtx.hashBlock);
if (mi != mapBlockIndex.end())
pindex = (*mi).second;
// Sort order, unrecorded transactions sort to the top
status.sortKey = strprintf("%010d-%01d-%010u-%03d",
(pindex ? pindex->nHeight : std::numeric_limits<int>::max()),
(wtx.IsCoinBase() ? 1 : 0),
wtx.nTimeReceived,
idx);
status.countsForBalance = wtx.IsTrusted() && !(wtx.GetBlocksToMaturity() > 0);
status.depth = wtx.GetDepthInMainChain();
status.cur_num_blocks = chainActive.Height();
status.cur_num_ix_locks = nCompleteTXLocks;
if (!IsFinalTx(wtx, chainActive.Height() + 1)) {
if (wtx.nLockTime < LOCKTIME_THRESHOLD) {
status.status = TransactionStatus::OpenUntilBlock;
status.open_for = wtx.nLockTime - chainActive.Height();
} else {
status.status = TransactionStatus::OpenUntilDate;
status.open_for = wtx.nLockTime;
}
}
// For generated transactions, determine maturity
else if (type == TransactionRecord::Generated || type == TransactionRecord::StakeMint || type == TransactionRecord::MNReward) {
if (wtx.GetBlocksToMaturity() > 0) {
status.status = TransactionStatus::Immature;
if (wtx.IsInMainChain()) {
status.matures_in = wtx.GetBlocksToMaturity();
// Check if the block was requested by anyone
if (GetAdjustedTime() - wtx.nTimeReceived > 2 * 60 && wtx.GetRequestCount() == 0)
status.status = TransactionStatus::MaturesWarning;
} else {
status.status = TransactionStatus::NotAccepted;
}
} else {
status.status = TransactionStatus::Confirmed;
}
} else {
if (status.depth < 0) {
status.status = TransactionStatus::Conflicted;
} else if (GetAdjustedTime() - wtx.nTimeReceived > 2 * 60 && wtx.GetRequestCount() == 0) {
status.status = TransactionStatus::Offline;
} else if (status.depth == 0) {
status.status = TransactionStatus::Unconfirmed;
} else if (status.depth < RecommendedNumConfirmations) {
status.status = TransactionStatus::Confirming;
} else {
status.status = TransactionStatus::Confirmed;
}
}
}
bool TransactionRecord::statusUpdateNeeded()
{
AssertLockHeld(cs_main);
return status.cur_num_blocks != chainActive.Height() || status.cur_num_ix_locks != nCompleteTXLocks;
}
QString TransactionRecord::getTxID() const
{
return QString::fromStdString(hash.ToString());
}
int TransactionRecord::getOutputIndex() const
{
return idx;
}
|
/****************************************************************************/
/* Copyright 2005-2006, Francis Russell */
/* */
/* Licensed under the Apache License, Version 2.0 (the License); */
/* you may not use this file except in compliance with the License. */
/* You may obtain a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an AS IS BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* */
/****************************************************************************/
#ifndef DESOLA_PROFILNG_BINOP_HPP
#define DESOLA_PROFILNG_BINOP_HPP
#include "Desola_profiling_fwd.hpp"
#include <map>
#include <cassert>
namespace desola
{
namespace detail
{
template<typename resultType, typename leftType, typename rightType, typename T_element>
class PBinOp : public PExprNode<resultType, T_element>
{
private:
PExprNode<leftType, T_element>* left;
PExprNode<rightType, T_element>* right;
public:
bool isEqual(const PBinOp& node, const std::map<const PExpressionNode<T_element>*, const PExpressionNode<T_element>*>& mappings) const
{
assert(mappings.find(left) != mappings.end());
assert(mappings.find(right) != mappings.end());
return PExprNode<resultType, T_element>::isEqual(node, mappings) &&
mappings.find(left)->second == node.left &&
mappings.find(right)->second == node.right;
}
PBinOp(PExprNode<leftType, T_element>& l, PExprNode<rightType, T_element>& r) : PExprNode<resultType, T_element>(), left(&l), right(&r)
{
}
inline PExprNode<leftType, T_element>& getLeft()
{
return *left;
}
inline const PExprNode<leftType, T_element>& getLeft() const
{
return *left;
}
inline PExprNode<rightType, T_element>& getRight()
{
return *right;
}
inline const PExprNode<rightType, T_element>& getRight() const
{
return *right;
}
};
}
}
#endif
|
/* $Id: VBoxUsbMon.cpp $ */
/** @file
* VBox USB Monitor
*/
/*
* Copyright (C) 2011-2017 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* The contents of this file may alternatively be used under the terms
* of the Common Development and Distribution License Version 1.0
* (CDDL) only, as it comes in the "COPYING.CDDL" file of the
* VirtualBox OSE distribution, in which case the provisions of the
* CDDL are applicable instead of those of the GPL.
*
* You may elect to license modified versions of this file under the
* terms and conditions of either the GPL or the CDDL or both.
*/
/*
*
* Theory of Operation
* - or -
* The Document I Wish The Original Author Had Written
*
*
* The USB Monitor (VBoxUSBMon.sys) serves to capture and uncapture USB
* devices. Its job is to ensure that the USB proxy (VBoxUSB.sys) gets installed
* for captured devices and removed again when not needed, restoring the regular
* driver (if any).
*
* The USB Monitor does not handle any actual USB traffic; that is the role of
* VBoxUSB.sys, the USB proxy. A typical solution for installing such USB proxy
* is using a filter driver, but that approach was rejected because filter drivers
* cannot be dynamically added and removed. What VBoxUSBMon does instead is hook
* into the dispatch routine of the bus driver, i.e. USB hub driver, and alter
* the PnP information returned by the bus driver.
*
* The key functionality for capturing is cycling a USB port (which causes a USB
* device reset and triggers re-enumeration in the Windows USB driver stack), and
* then modifying IRP_MN_QUERY_ID / BusQueryHardwareIDs and related requests so
* that they return the synthetic USB VID/PID that VBoxUSB.sys handles rather than
* the true hardware VID/PID. That causes Windows to install VBoxUSB.sys for the
* device.
*
* Uncapturing again cycles the USB port but returns unmodified hardware IDs,
* causing Windows to load the normal driver for the device.
*
* Identifying devices to capture or release (uncapture) is done through USB filters,
* a cross-platform concept which matches USB device based on their VID/PID, class,
* and other criteria.
*
* There is an IOCTL interface for adding/removing USB filters and applying them.
* The IOCTLs are normally issued by VBoxSVC.
*
* USB devices are enumerated by finding all USB hubs (GUID_DEVINTERFACE_USB_HUB)
* and querying their child devices (i.e. USB devices or other hubs) by sending
* IRP_MJ_PNP / IRP_MN_QUERY_DEVICE_RELATIONS / BusRelations. This is done when
* applying existing filters.
*
* Newly arrived USB devices are intercepted early in their PnP enumeration
* through the hooked bus driver dispatch routine. Devices which satisty the
* filter matching criteria are morphed (see above) such that VBoxUSB.sys loads
* for them before any default driver does.
*
* There is an IDC interface to VBoxUSB.sys which allows the USB proxy to report
* that it's installed for a given USB device, and also report when the USB proxy
* is unloaded (typically caused by either unplugging the device or uncapturing
* and cycling the port). VBoxUSBMon.sys relies on these IDC calls to track
* captured devices and be informed when VBoxUSB.sys unloads.
*
* Windows 8+ complicates the USB Monitor's life by automatically putting some
* USB devices to a low-power state where they are unable to respond to any USB
* requests and VBoxUSBMon can't read any of their descriptors (note that in
* userland, the device descriptor can always be read, but string descriptors
* can't). Such devices' USB VID/PID/revision is recovered using the Windows
* PnP Manager from their DevicePropertyHardwareID, but their USB class/subclass
* and protocol unfortunately cannot be unambiguously recovered from their
* DevicePropertyCompatibleIDs.
*
* Filter drivers add another complication. With filter drivers in place, the
* device objects returned by the BusRelations query (or passing through the PnP
* hooks) may not be PDOs but rather filter DOs higher in the stack. To avoid
* confusion, we flatten the references to their base, i.e. the real PDO, which
* should remain the same for the lifetime of a device. Note that VBoxUSB.sys
* always passes its own PDO in the proxy startup IOCTL.
*/
/*********************************************************************************************************************************
* Header Files *
*********************************************************************************************************************************/
#include "VBoxUsbMon.h"
#include "../cmn/VBoxUsbIdc.h"
#include <vbox/err.h>
#include <VBox/usblib.h>
#include <excpt.h>
#include <stdio.h>
/*********************************************************************************************************************************
* Defined Constants And Macros *
*********************************************************************************************************************************/
/*
* Note: Must match the VID & PID in the USB driver .inf file!!
*/
/*
BusQueryDeviceID USB\Vid_80EE&Pid_CAFE
BusQueryInstanceID 2
BusQueryHardwareIDs USB\Vid_80EE&Pid_CAFE&Rev_0100
BusQueryHardwareIDs USB\Vid_80EE&Pid_CAFE
BusQueryCompatibleIDs USB\Class_ff&SubClass_00&Prot_00
BusQueryCompatibleIDs USB\Class_ff&SubClass_00
BusQueryCompatibleIDs USB\Class_ff
*/
#define szBusQueryDeviceId L"USB\\Vid_80EE&Pid_CAFE"
#define szBusQueryHardwareIDs L"USB\\Vid_80EE&Pid_CAFE&Rev_0100\0USB\\Vid_80EE&Pid_CAFE\0\0"
#define szBusQueryCompatibleIDs L"USB\\Class_ff&SubClass_00&Prot_00\0USB\\Class_ff&SubClass_00\0USB\\Class_ff\0\0"
#define szDeviceTextDescription L"VirtualBox USB"
#define VBOXUSBMON_MEMTAG 'MUBV'
/*********************************************************************************************************************************
* Structures and Typedefs *
*********************************************************************************************************************************/
typedef struct VBOXUSBMONINS
{
void * pvDummy;
} VBOXUSBMONINS, *PVBOXUSBMONINS;
typedef struct VBOXUSBMONCTX
{
VBOXUSBFLTCTX FltCtx;
} VBOXUSBMONCTX, *PVBOXUSBMONCTX;
typedef struct VBOXUSBHUB_PNPHOOK
{
VBOXUSBHOOK_ENTRY Hook;
bool fUninitFailed;
} VBOXUSBHUB_PNPHOOK, *PVBOXUSBHUB_PNPHOOK;
typedef struct VBOXUSBHUB_PNPHOOK_COMPLETION
{
VBOXUSBHOOK_REQUEST Rq;
} VBOXUSBHUB_PNPHOOK_COMPLETION, *PVBOXUSBHUB_PNPHOOK_COMPLETION;
#define VBOXUSBMON_MAXDRIVERS 5
typedef struct VBOXUSB_PNPDRIVER
{
PDRIVER_OBJECT DriverObject;
VBOXUSBHUB_PNPHOOK UsbHubPnPHook;
PDRIVER_DISPATCH pfnHookStub;
} VBOXUSB_PNPDRIVER, *PVBOXUSB_PNPDRIVER;
typedef struct VBOXUSBMONGLOBALS
{
PDEVICE_OBJECT pDevObj;
VBOXUSB_PNPDRIVER pDrivers[VBOXUSBMON_MAXDRIVERS];
KEVENT OpenSynchEvent;
IO_REMOVE_LOCK RmLock;
uint32_t cOpens;
volatile LONG ulPreventUnloadOn;
PFILE_OBJECT pPreventUnloadFileObj;
} VBOXUSBMONGLOBALS, *PVBOXUSBMONGLOBALS;
/*********************************************************************************************************************************
* Global Variables *
*********************************************************************************************************************************/
static VBOXUSBMONGLOBALS g_VBoxUsbMonGlobals;
PVOID VBoxUsbMonMemAlloc(SIZE_T cbBytes)
{
PVOID pvMem = ExAllocatePoolWithTag(NonPagedPool, cbBytes, VBOXUSBMON_MEMTAG);
Assert(pvMem);
return pvMem;
}
PVOID VBoxUsbMonMemAllocZ(SIZE_T cbBytes)
{
PVOID pvMem = VBoxUsbMonMemAlloc(cbBytes);
if (pvMem)
{
RtlZeroMemory(pvMem, cbBytes);
}
return pvMem;
}
VOID VBoxUsbMonMemFree(PVOID pvMem)
{
ExFreePoolWithTag(pvMem, VBOXUSBMON_MEMTAG);
}
#define VBOXUSBDBG_STRCASE(_t) \
case _t: return #_t
#define VBOXUSBDBG_STRCASE_UNKNOWN(_v) \
default: LOG((__FUNCTION__": Unknown Value (0n%d), (0x%x)", _v, _v)); return "Unknown"
/* These minor code are semi-undocumented. */
#ifndef IRP_MN_QUERY_LEGACY_BUS_INFORMATION
#define IRP_MN_QUERY_LEGACY_BUS_INFORMATION 0x18
#endif
#ifndef IRP_MN_DEVICE_ENUMERATED
#define IRP_MN_DEVICE_ENUMERATED 0x19
#endif
static const char* vboxUsbDbgStrPnPMn(UCHAR uMn)
{
switch (uMn)
{
VBOXUSBDBG_STRCASE(IRP_MN_START_DEVICE);
VBOXUSBDBG_STRCASE(IRP_MN_QUERY_REMOVE_DEVICE);
VBOXUSBDBG_STRCASE(IRP_MN_REMOVE_DEVICE);
VBOXUSBDBG_STRCASE(IRP_MN_CANCEL_REMOVE_DEVICE);
VBOXUSBDBG_STRCASE(IRP_MN_STOP_DEVICE);
VBOXUSBDBG_STRCASE(IRP_MN_QUERY_STOP_DEVICE);
VBOXUSBDBG_STRCASE(IRP_MN_CANCEL_STOP_DEVICE);
VBOXUSBDBG_STRCASE(IRP_MN_QUERY_DEVICE_RELATIONS);
VBOXUSBDBG_STRCASE(IRP_MN_QUERY_INTERFACE);
VBOXUSBDBG_STRCASE(IRP_MN_QUERY_CAPABILITIES);
VBOXUSBDBG_STRCASE(IRP_MN_QUERY_RESOURCES);
VBOXUSBDBG_STRCASE(IRP_MN_QUERY_RESOURCE_REQUIREMENTS);
VBOXUSBDBG_STRCASE(IRP_MN_QUERY_DEVICE_TEXT);
VBOXUSBDBG_STRCASE(IRP_MN_FILTER_RESOURCE_REQUIREMENTS);
VBOXUSBDBG_STRCASE(IRP_MN_READ_CONFIG);
VBOXUSBDBG_STRCASE(IRP_MN_WRITE_CONFIG);
VBOXUSBDBG_STRCASE(IRP_MN_EJECT);
VBOXUSBDBG_STRCASE(IRP_MN_SET_LOCK);
VBOXUSBDBG_STRCASE(IRP_MN_QUERY_ID);
VBOXUSBDBG_STRCASE(IRP_MN_QUERY_PNP_DEVICE_STATE);
VBOXUSBDBG_STRCASE(IRP_MN_QUERY_BUS_INFORMATION);
VBOXUSBDBG_STRCASE(IRP_MN_DEVICE_USAGE_NOTIFICATION);
VBOXUSBDBG_STRCASE(IRP_MN_SURPRISE_REMOVAL);
VBOXUSBDBG_STRCASE(IRP_MN_QUERY_LEGACY_BUS_INFORMATION);
VBOXUSBDBG_STRCASE(IRP_MN_DEVICE_ENUMERATED);
VBOXUSBDBG_STRCASE_UNKNOWN(uMn);
}
}
/**
* Send IRP_MN_QUERY_DEVICE_RELATIONS
*
* @returns NT Status
* @param pDevObj USB device pointer
* @param pFileObj Valid file object pointer
* @param pDevRelations Pointer to DEVICE_RELATIONS pointer (out)
*/
NTSTATUS VBoxUsbMonQueryBusRelations(PDEVICE_OBJECT pDevObj, PFILE_OBJECT pFileObj, PDEVICE_RELATIONS *pDevRelations)
{
IO_STATUS_BLOCK IoStatus;
KEVENT Event;
NTSTATUS Status;
PIRP pIrp;
PIO_STACK_LOCATION pSl;
KeInitializeEvent(&Event, NotificationEvent, FALSE);
Assert(pDevRelations);
*pDevRelations = NULL;
pIrp = IoBuildSynchronousFsdRequest(IRP_MJ_PNP, pDevObj, NULL, 0, NULL, &Event, &IoStatus);
if (!pIrp)
{
WARN(("IoBuildDeviceIoControlRequest failed!!"));
return STATUS_INSUFFICIENT_RESOURCES;
}
pIrp->IoStatus.Status = STATUS_NOT_SUPPORTED;
pSl = IoGetNextIrpStackLocation(pIrp);
pSl->MajorFunction = IRP_MJ_PNP;
pSl->MinorFunction = IRP_MN_QUERY_DEVICE_RELATIONS;
pSl->Parameters.QueryDeviceRelations.Type = BusRelations;
pSl->FileObject = pFileObj;
Status = IoCallDriver(pDevObj, pIrp);
if (Status == STATUS_PENDING)
{
LOG(("IoCallDriver returned STATUS_PENDING!!"));
KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
Status = IoStatus.Status;
}
if (Status == STATUS_SUCCESS)
{
PDEVICE_RELATIONS pRel = (PDEVICE_RELATIONS)IoStatus.Information;
LOG(("pRel = %p", pRel));
if (VALID_PTR(pRel))
{
*pDevRelations = pRel;
}
else
{
WARN(("Invalid pointer %p", pRel));
}
}
else
{
WARN(("IRP_MN_QUERY_DEVICE_RELATIONS failed Status(0x%x)", Status));
}
LOG(("IoCallDriver returned %x", Status));
return Status;
}
VOID vboxUsbMonHubDevWalk(PFNVBOXUSBMONDEVWALKER pfnWalker, PVOID pvWalker)
{
NTSTATUS Status = STATUS_UNSUCCESSFUL;
PWSTR szwHubList;
Status = IoGetDeviceInterfaces(&GUID_DEVINTERFACE_USB_HUB, NULL, 0, &szwHubList);
if (Status != STATUS_SUCCESS)
{
LOG(("IoGetDeviceInterfaces failed with %d\n", Status));
return;
}
if (szwHubList)
{
UNICODE_STRING UnicodeName;
PDEVICE_OBJECT pHubDevObj;
PFILE_OBJECT pHubFileObj;
PWSTR szwHubName = szwHubList;
while (*szwHubName != UNICODE_NULL)
{
RtlInitUnicodeString(&UnicodeName, szwHubName);
Status = IoGetDeviceObjectPointer(&UnicodeName, FILE_READ_DATA, &pHubFileObj, &pHubDevObj);
if (Status == STATUS_SUCCESS)
{
/* We could not log hub name here.
* It is the paged memory and we cannot use it in logger cause it increases the IRQL
*/
LOG(("IoGetDeviceObjectPointer returned %p %p", pHubDevObj, pHubFileObj));
if (!pfnWalker(pHubFileObj, pHubDevObj, pvWalker))
{
LOG(("the walker said to stop"));
ObDereferenceObject(pHubFileObj);
break;
}
LOG(("going forward.."));
ObDereferenceObject(pHubFileObj);
}
szwHubName += wcslen(szwHubName) + 1;
}
ExFreePool(szwHubList);
}
}
/* NOTE: the stack location data is not the "actual" IRP stack location,
* but a copy being preserved on the IRP way down.
* See the note in VBoxUsbPnPCompletion for detail */
static NTSTATUS vboxUsbMonHandlePnPIoctl(PDEVICE_OBJECT pDevObj, PIO_STACK_LOCATION pSl, PIO_STATUS_BLOCK pIoStatus)
{
LOG(("IRQL = %d", KeGetCurrentIrql()));
switch(pSl->MinorFunction)
{
case IRP_MN_QUERY_DEVICE_TEXT:
{
LOG(("IRP_MN_QUERY_DEVICE_TEXT: pIoStatus->Status = %x", pIoStatus->Status));
if (pIoStatus->Status == STATUS_SUCCESS)
{
WCHAR *pId = (WCHAR *)pIoStatus->Information;
if (VALID_PTR(pId))
{
KIRQL Iqrl = KeGetCurrentIrql();
/* IRQL should be always passive here */
ASSERT_WARN(Iqrl == PASSIVE_LEVEL, ("irql is not PASSIVE"));
switch(pSl->Parameters.QueryDeviceText.DeviceTextType)
{
case DeviceTextLocationInformation:
LOG(("DeviceTextLocationInformation"));
LOG_STRW(pId);
break;
case DeviceTextDescription:
LOG(("DeviceTextDescription"));
LOG_STRW(pId);
if (VBoxUsbFltPdoIsFiltered(pDevObj))
{
LOG(("PDO (0x%p) is filtered", pDevObj));
WCHAR *pId = (WCHAR *)ExAllocatePool(PagedPool, sizeof(szDeviceTextDescription));
if (!pId)
{
AssertFailed();
break;
}
memcpy(pId, szDeviceTextDescription, sizeof(szDeviceTextDescription));
LOG(("NEW szDeviceTextDescription"));
LOG_STRW(pId);
ExFreePool((PVOID)pIoStatus->Information);
pIoStatus->Information = (ULONG_PTR)pId;
}
else
{
LOG(("PDO (0x%p) is NOT filtered", pDevObj));
}
break;
default:
LOG(("DeviceText %d", pSl->Parameters.QueryDeviceText.DeviceTextType));
break;
}
}
else
LOG(("Invalid pointer %p", pId));
}
break;
}
case IRP_MN_QUERY_ID:
{
LOG(("IRP_MN_QUERY_ID: Irp->pIoStatus->Status = %x", pIoStatus->Status));
if (pIoStatus->Status == STATUS_SUCCESS && pDevObj)
{
WCHAR *pId = (WCHAR *)pIoStatus->Information;
#ifdef VBOX_USB_WITH_VERBOSE_LOGGING
WCHAR *pTmp;
#endif
if (VALID_PTR(pId))
{
KIRQL Iqrl = KeGetCurrentIrql();
/* IRQL should be always passive here */
ASSERT_WARN(Iqrl == PASSIVE_LEVEL, ("irql is not PASSIVE"));
switch (pSl->Parameters.QueryId.IdType)
{
case BusQueryInstanceID:
LOG(("BusQueryInstanceID"));
LOG_STRW(pId);
break;
case BusQueryDeviceID:
{
LOG(("BusQueryDeviceID"));
pId = (WCHAR *)ExAllocatePool(PagedPool, sizeof(szBusQueryDeviceId));
if (!pId)
{
WARN(("ExAllocatePool failed"));
break;
}
BOOLEAN bFiltered = FALSE;
NTSTATUS Status = VBoxUsbFltPdoAdd(pDevObj, &bFiltered);
if (Status != STATUS_SUCCESS || !bFiltered)
{
if (Status == STATUS_SUCCESS)
{
LOG(("PDO (0x%p) is NOT filtered", pDevObj));
}
else
{
WARN(("VBoxUsbFltPdoAdd for PDO (0x%p) failed Status 0x%x", pDevObj, Status));
}
ExFreePool(pId);
break;
}
LOG(("PDO (0x%p) is filtered", pDevObj));
ExFreePool((PVOID)pIoStatus->Information);
memcpy(pId, szBusQueryDeviceId, sizeof(szBusQueryDeviceId));
pIoStatus->Information = (ULONG_PTR)pId;
break;
}
case BusQueryHardwareIDs:
{
LOG(("BusQueryHardwareIDs"));
#ifdef VBOX_USB_WITH_VERBOSE_LOGGING
while (*pId) //MULTI_SZ
{
LOG_STRW(pId);
while (*pId) pId++;
pId++;
}
#endif
pId = (WCHAR *)ExAllocatePool(PagedPool, sizeof(szBusQueryHardwareIDs));
if (!pId)
{
WARN(("ExAllocatePool failed"));
break;
}
BOOLEAN bFiltered = FALSE;
NTSTATUS Status = VBoxUsbFltPdoAdd(pDevObj, &bFiltered);
if (Status != STATUS_SUCCESS || !bFiltered)
{
if (Status == STATUS_SUCCESS)
{
LOG(("PDO (0x%p) is NOT filtered", pDevObj));
}
else
{
WARN(("VBoxUsbFltPdoAdd for PDO (0x%p) failed Status 0x%x", pDevObj, Status));
}
ExFreePool(pId);
break;
}
LOG(("PDO (0x%p) is filtered", pDevObj));
memcpy(pId, szBusQueryHardwareIDs, sizeof(szBusQueryHardwareIDs));
#ifdef VBOX_USB_WITH_VERBOSE_LOGGING
LOG(("NEW BusQueryHardwareIDs"));
pTmp = pId;
while (*pTmp) //MULTI_SZ
{
LOG_STRW(pTmp);
while (*pTmp) pTmp++;
pTmp++;
}
#endif
ExFreePool((PVOID)pIoStatus->Information);
pIoStatus->Information = (ULONG_PTR)pId;
break;
}
case BusQueryCompatibleIDs:
LOG(("BusQueryCompatibleIDs"));
#ifdef VBOX_USB_WITH_VERBOSE_LOGGING
while (*pId) //MULTI_SZ
{
LOG_STRW(pId);
while (*pId) pId++;
pId++;
}
#endif
if (VBoxUsbFltPdoIsFiltered(pDevObj))
{
LOG(("PDO (0x%p) is filtered", pDevObj));
pId = (WCHAR *)ExAllocatePool(PagedPool, sizeof(szBusQueryCompatibleIDs));
if (!pId)
{
WARN(("ExAllocatePool failed"));
break;
}
memcpy(pId, szBusQueryCompatibleIDs, sizeof(szBusQueryCompatibleIDs));
#ifdef VBOX_USB_WITH_VERBOSE_LOGGING
LOG(("NEW BusQueryCompatibleIDs"));
pTmp = pId;
while (*pTmp) //MULTI_SZ
{
LOG_STRW(pTmp);
while (*pTmp) pTmp++;
pTmp++;
}
#endif
ExFreePool((PVOID)pIoStatus->Information);
pIoStatus->Information = (ULONG_PTR)pId;
}
else
{
LOG(("PDO (0x%p) is NOT filtered", pDevObj));
}
break;
default:
/** @todo r=bird: handle BusQueryContainerID and whatever else we might see */
break;
}
}
else
{
LOG(("Invalid pointer %p", pId));
}
}
break;
}
#ifdef VBOX_USB_WITH_VERBOSE_LOGGING
case IRP_MN_QUERY_DEVICE_RELATIONS:
{
switch(pSl->Parameters.QueryDeviceRelations.Type)
{
case BusRelations:
LOG(("BusRelations"));
if (pIoStatus->Status == STATUS_SUCCESS)
{
PDEVICE_RELATIONS pRel = (PDEVICE_RELATIONS)pIoStatus->Information;
LOG(("pRel = %p", pRel));
if (VALID_PTR(pRel))
{
for (unsigned i=0;i<pRel->Count;i++)
{
if (VBoxUsbFltPdoIsFiltered(pDevObj))
LOG(("New PDO %p", pRel->Objects[i]));
}
}
else
LOG(("Invalid pointer %p", pRel));
}
break;
case TargetDeviceRelation:
LOG(("TargetDeviceRelation"));
break;
case RemovalRelations:
LOG(("RemovalRelations"));
break;
case EjectionRelations:
LOG(("EjectionRelations"));
break;
default:
LOG(("QueryDeviceRelations.Type=%d", pSl->Parameters.QueryDeviceRelations.Type));
}
break;
}
case IRP_MN_QUERY_CAPABILITIES:
{
LOG(("IRP_MN_QUERY_CAPABILITIES: pIoStatus->Status = %x", pIoStatus->Status));
if (pIoStatus->Status == STATUS_SUCCESS)
{
PDEVICE_CAPABILITIES pCaps = pSl->Parameters.DeviceCapabilities.Capabilities;
if (VALID_PTR(pCaps))
{
LOG(("Caps.SilentInstall = %d", pCaps->SilentInstall));
LOG(("Caps.UniqueID = %d", pCaps->UniqueID ));
LOG(("Caps.Address = %d", pCaps->Address ));
LOG(("Caps.UINumber = %d", pCaps->UINumber ));
}
else
LOG(("Invalid pointer %p", pCaps));
}
break;
}
default:
break;
#endif
} /*switch */
LOG(("Done returns %x (IRQL = %d)", pIoStatus->Status, KeGetCurrentIrql()));
return pIoStatus->Status;
}
NTSTATUS _stdcall VBoxUsbPnPCompletion(DEVICE_OBJECT *pDevObj, IRP *pIrp, void *pvContext)
{
LOG(("Completion PDO(0x%p), IRP(0x%p), Status(0x%x)", pDevObj, pIrp, pIrp->IoStatus.Status));
ASSERT_WARN(pvContext, ("zero context"));
PVBOXUSBHOOK_REQUEST pRequest = (PVBOXUSBHOOK_REQUEST)pvContext;
/* NOTE: despite a regular IRP processing the stack location in our completion
* differs from those of the PnP hook since the hook is invoked in the "context" of the calle,
* while the completion is in the "coller" context in terms of IRP,
* so the completion stack location is one level "up" here.
*
* Moreover we CAN NOT access irp stack location in the completion because we might not have one at all
* in case the hooked driver is at the top of the irp call stack
*
* This is why we use the stack location we saved on IRP way down.
* */
PIO_STACK_LOCATION pSl = &pRequest->OldLocation;
ASSERT_WARN(pIrp == pRequest->pIrp, ("completed IRP(0x%x) not match request IRP(0x%x)", pIrp, pRequest->pIrp));
/* NOTE: we can not rely on pDevObj passed in IoCompletion since it may be zero
* in case IRP was created with extra stack locations and the caller did not initialize
* the IO_STACK_LOCATION::DeviceObject */
DEVICE_OBJECT *pRealDevObj = pRequest->pDevObj;
// Assert(!pDevObj || pDevObj == pRealDevObj);
// Assert(pSl->DeviceObject == pDevObj);
switch(pSl->MinorFunction)
{
case IRP_MN_QUERY_DEVICE_TEXT:
case IRP_MN_QUERY_ID:
#ifdef VBOX_USB_WITH_VERBOSE_LOGGING
case IRP_MN_QUERY_DEVICE_RELATIONS:
case IRP_MN_QUERY_CAPABILITIES:
#endif
if (NT_SUCCESS(pIrp->IoStatus.Status))
{
vboxUsbMonHandlePnPIoctl(pRealDevObj, pSl, &pIrp->IoStatus);
}
else
{
ASSERT_WARN(pIrp->IoStatus.Status == STATUS_NOT_SUPPORTED, ("Irp failed with status(0x%x)", pIrp->IoStatus.Status));
}
break;
case IRP_MN_SURPRISE_REMOVAL:
case IRP_MN_REMOVE_DEVICE:
if (NT_SUCCESS(pIrp->IoStatus.Status))
{
VBoxUsbFltPdoRemove(pRealDevObj);
}
else
{
AssertFailed();
}
break;
/* These two IRPs are received when the PnP subsystem has determined the id of the newly arrived device */
/* IRP_MN_START_DEVICE only arrives if it's a USB device of a known class or with a present host driver */
case IRP_MN_QUERY_RESOURCE_REQUIREMENTS:
case IRP_MN_QUERY_RESOURCES:
/* There used to be code to support SUPUSBFLT_IOCTL_SET_NOTIFY_EVENT but it was not reliable. */
default:
break;
}
LOG(("<==PnP: Mn(%s), PDO(0x%p), IRP(0x%p), Status(0x%x), Sl PDO(0x%p), Compl PDO(0x%p)",
vboxUsbDbgStrPnPMn(pSl->MinorFunction),
pRealDevObj, pIrp, pIrp->IoStatus.Status,
pSl->DeviceObject, pDevObj));
#ifdef DEBUG_misha
NTSTATUS tmpStatus = pIrp->IoStatus.Status;
#endif
PVBOXUSBHOOK_ENTRY pHook = pRequest->pHook;
NTSTATUS Status = VBoxUsbHookRequestComplete(pHook, pDevObj, pIrp, pRequest);
VBoxUsbMonMemFree(pRequest);
#ifdef DEBUG_misha
if (Status != STATUS_MORE_PROCESSING_REQUIRED)
{
Assert(pIrp->IoStatus.Status == tmpStatus);
}
#endif
VBoxUsbHookRelease(pHook);
return Status;
}
/**
* Device PnP hook
*
* @param pDevObj Device object.
* @param pIrp Request packet.
*/
static NTSTATUS vboxUsbMonPnPHook(IN PVBOXUSBHOOK_ENTRY pHook, IN PDEVICE_OBJECT pDevObj, IN PIRP pIrp)
{
LOG(("==>PnP: Mn(%s), PDO(0x%p), IRP(0x%p), Status(0x%x)", vboxUsbDbgStrPnPMn(IoGetCurrentIrpStackLocation(pIrp)->MinorFunction), pDevObj, pIrp, pIrp->IoStatus.Status));
if (!VBoxUsbHookRetain(pHook))
{
WARN(("VBoxUsbHookRetain failed"));
return VBoxUsbHookRequestPassDownHookSkip(pHook, pDevObj, pIrp);
}
PVBOXUSBHUB_PNPHOOK_COMPLETION pCompletion = (PVBOXUSBHUB_PNPHOOK_COMPLETION)VBoxUsbMonMemAlloc(sizeof (*pCompletion));
if (!pCompletion)
{
WARN(("VBoxUsbMonMemAlloc failed"));
VBoxUsbHookRelease(pHook);
pIrp->IoStatus.Status = STATUS_INSUFFICIENT_RESOURCES;
pIrp->IoStatus.Information = 0;
IoCompleteRequest(pIrp, IO_NO_INCREMENT);
return STATUS_INSUFFICIENT_RESOURCES;
}
NTSTATUS Status = VBoxUsbHookRequestPassDownHookCompletion(pHook, pDevObj, pIrp, VBoxUsbPnPCompletion, &pCompletion->Rq);
#ifdef VBOX_USB_WITH_VERBOSE_LOGGING
if (Status != STATUS_PENDING)
{
LOG(("Request completed, Status(0x%x)", Status));
VBoxUsbHookVerifyCompletion(pHook, &pCompletion->Rq, pIrp);
}
else
{
LOG(("Request pending"));
}
#endif
return Status;
}
/**
* Device PnP hook stubs.
*
* @param pDevObj Device object.
* @param pIrp Request packet.
*/
#define VBOX_PNPHOOKSTUB(n) NTSTATUS _stdcall VBoxUsbMonPnPHook##n(IN PDEVICE_OBJECT pDevObj, IN PIRP pIrp) \
{ \
return vboxUsbMonPnPHook(&g_VBoxUsbMonGlobals.pDrivers[n].UsbHubPnPHook.Hook, pDevObj, pIrp); \
}
#define VBOX_PNPHOOKSTUB_INIT(n) g_VBoxUsbMonGlobals.pDrivers[n].pfnHookStub = VBoxUsbMonPnPHook##n
VBOX_PNPHOOKSTUB(0)
VBOX_PNPHOOKSTUB(1)
VBOX_PNPHOOKSTUB(2)
VBOX_PNPHOOKSTUB(3)
VBOX_PNPHOOKSTUB(4)
AssertCompile(VBOXUSBMON_MAXDRIVERS == 5);
typedef struct VBOXUSBMONHOOKDRIVERWALKER
{
PDRIVER_OBJECT pDrvObj;
} VBOXUSBMONHOOKDRIVERWALKER, *PVBOXUSBMONHOOKDRIVERWALKER;
/**
* Logs an error to the system event log.
*
* @param ErrCode Error to report to event log.
* @param ReturnedStatus Error that was reported by the driver to the caller.
* @param uErrId Unique error id representing the location in the driver.
* @param cbDumpData Number of bytes at pDumpData.
* @param pDumpData Pointer to data that will be added to the message (see 'details' tab).
*
* NB: We only use IoLogMsg.dll as the message file, limiting
* ErrCode to status codes and messages defined in ntiologc.h
*/
static void vboxUsbMonLogError(NTSTATUS ErrCode, NTSTATUS ReturnedStatus, ULONG uErrId, USHORT cbDumpData, PVOID pDumpData)
{
PIO_ERROR_LOG_PACKET pErrEntry;
/* Truncate dumps that do not fit into IO_ERROR_LOG_PACKET. */
if (FIELD_OFFSET(IO_ERROR_LOG_PACKET, DumpData) + cbDumpData > ERROR_LOG_MAXIMUM_SIZE)
cbDumpData = ERROR_LOG_MAXIMUM_SIZE - FIELD_OFFSET(IO_ERROR_LOG_PACKET, DumpData);
pErrEntry = (PIO_ERROR_LOG_PACKET)IoAllocateErrorLogEntry(g_VBoxUsbMonGlobals.pDevObj,
FIELD_OFFSET(IO_ERROR_LOG_PACKET, DumpData) + cbDumpData);
if (pErrEntry)
{
uint8_t *pDump = (uint8_t *)pErrEntry->DumpData;
if (cbDumpData)
memcpy(pDump, pDumpData, cbDumpData);
pErrEntry->MajorFunctionCode = 0;
pErrEntry->RetryCount = 0;
pErrEntry->DumpDataSize = cbDumpData;
pErrEntry->NumberOfStrings = 0;
pErrEntry->StringOffset = 0;
pErrEntry->ErrorCode = ErrCode;
pErrEntry->UniqueErrorValue = uErrId;
pErrEntry->FinalStatus = ReturnedStatus;
pErrEntry->IoControlCode = 0;
IoWriteErrorLogEntry(pErrEntry);
}
else
{
LOG(("Failed to allocate error log entry (cb=%d)\n", FIELD_OFFSET(IO_ERROR_LOG_PACKET, DumpData) + cbDumpData));
}
}
static DECLCALLBACK(BOOLEAN) vboxUsbMonHookDrvObjWalker(PFILE_OBJECT pHubFile, PDEVICE_OBJECT pHubDo, PVOID pvContext)
{
RT_NOREF2(pHubFile, pvContext);
PDRIVER_OBJECT pDrvObj = pHubDo->DriverObject;
/* First we try to figure out if we are already hooked to this driver. */
for (int i = 0; i < VBOXUSBMON_MAXDRIVERS; i++)
if (pDrvObj == g_VBoxUsbMonGlobals.pDrivers[i].DriverObject)
{
LOG(("Found %p at pDrivers[%d]\n", pDrvObj, i));
/* We've already hooked to this one -- nothing to do. */
return TRUE;
}
/* We are not hooked yet, find an empty slot. */
for (int i = 0; i < VBOXUSBMON_MAXDRIVERS; i++)
{
if (!g_VBoxUsbMonGlobals.pDrivers[i].DriverObject)
{
/* Found an emtpy slot, use it. */
g_VBoxUsbMonGlobals.pDrivers[i].DriverObject = pDrvObj;
ObReferenceObject(pDrvObj);
LOG(("pDrivers[%d] = %p, installing the hook...\n", i, pDrvObj));
VBoxUsbHookInit(&g_VBoxUsbMonGlobals.pDrivers[i].UsbHubPnPHook.Hook,
pDrvObj,
IRP_MJ_PNP,
g_VBoxUsbMonGlobals.pDrivers[i].pfnHookStub);
VBoxUsbHookInstall(&g_VBoxUsbMonGlobals.pDrivers[i].UsbHubPnPHook.Hook);
return TRUE; /* Must continue to find all drivers. */
}
if (pDrvObj == g_VBoxUsbMonGlobals.pDrivers[i].DriverObject)
{
LOG(("Found %p at pDrivers[%d]\n", pDrvObj, i));
/* We've already hooked to this one -- nothing to do. */
return TRUE;
}
}
/* No empty slots! No reason to continue. */
LOG(("No empty slots!\n"));
ANSI_STRING ansiDrvName;
NTSTATUS Status = RtlUnicodeStringToAnsiString(&ansiDrvName, &pDrvObj->DriverName, true);
if (Status != STATUS_SUCCESS)
{
ansiDrvName.Length = 0;
LOG(("RtlUnicodeStringToAnsiString failed with 0x%x", Status));
}
vboxUsbMonLogError(IO_ERR_INSUFFICIENT_RESOURCES, STATUS_SUCCESS, 1, ansiDrvName.Length, ansiDrvName.Buffer);
if (Status == STATUS_SUCCESS)
RtlFreeAnsiString(&ansiDrvName);
return FALSE;
}
/**
* Finds all USB drivers in the system and installs hooks if haven't done already.
*/
static NTSTATUS vboxUsbMonInstallAllHooks()
{
vboxUsbMonHubDevWalk(vboxUsbMonHookDrvObjWalker, NULL);
return STATUS_SUCCESS;
}
static NTSTATUS vboxUsbMonHookCheckInit()
{
static bool fIsHookInited = false;
if (fIsHookInited)
{
LOG(("hook inited already, success"));
return STATUS_SUCCESS;
}
return vboxUsbMonInstallAllHooks();
}
static NTSTATUS vboxUsbMonHookInstall()
{
/* Nothing to do here as we have already installed all hooks in vboxUsbMonHookCheckInit(). */
return STATUS_SUCCESS;
}
static NTSTATUS vboxUsbMonHookUninstall()
{
#ifdef VBOXUSBMON_DBG_NO_PNPHOOK
return STATUS_SUCCESS;
#else
NTSTATUS Status = STATUS_SUCCESS;
for (int i = 0; i < VBOXUSBMON_MAXDRIVERS; i++)
{
if (g_VBoxUsbMonGlobals.pDrivers[i].DriverObject)
{
Assert(g_VBoxUsbMonGlobals.pDrivers[i].DriverObject == g_VBoxUsbMonGlobals.pDrivers[i].UsbHubPnPHook.Hook.pDrvObj);
LOG(("Unhooking from %p...\n", g_VBoxUsbMonGlobals.pDrivers[i].DriverObject));
Status = VBoxUsbHookUninstall(&g_VBoxUsbMonGlobals.pDrivers[i].UsbHubPnPHook.Hook);
if (!NT_SUCCESS(Status))
{
/*
* We failed to uninstall the hook, so we keep the reference to the driver
* in order to prevent another driver re-using this slot because we are
* going to mark this hook as fUninitFailed.
*/
//AssertMsgFailed(("usbhub pnp unhook failed, setting the fUninitFailed flag, the current value of fUninitFailed (%d)", g_VBoxUsbMonGlobals.UsbHubPnPHook.fUninitFailed));
LOG(("usbhub pnp unhook failed, setting the fUninitFailed flag, the current value of fUninitFailed (%d)", g_VBoxUsbMonGlobals.pDrivers[i].UsbHubPnPHook.fUninitFailed));
g_VBoxUsbMonGlobals.pDrivers[i].UsbHubPnPHook.fUninitFailed = true;
}
else
{
/* The hook was removed successfully, now we can forget about this driver. */
ObDereferenceObject(g_VBoxUsbMonGlobals.pDrivers[i].DriverObject);
g_VBoxUsbMonGlobals.pDrivers[i].DriverObject = NULL;
}
}
}
return Status;
#endif
}
static NTSTATUS vboxUsbMonCheckTermStuff()
{
NTSTATUS Status = KeWaitForSingleObject(&g_VBoxUsbMonGlobals.OpenSynchEvent,
Executive, KernelMode,
FALSE, /* BOOLEAN Alertable */
NULL /* IN PLARGE_INTEGER Timeout */
);
AssertRelease(Status == STATUS_SUCCESS);
do
{
if (--g_VBoxUsbMonGlobals.cOpens)
break;
Status = vboxUsbMonHookUninstall();
NTSTATUS tmpStatus = VBoxUsbFltTerm();
if (!NT_SUCCESS(tmpStatus))
{
/* this means a driver state is screwed up, KeBugCheckEx here ? */
AssertReleaseFailed();
}
} while (0);
KeSetEvent(&g_VBoxUsbMonGlobals.OpenSynchEvent, 0, FALSE);
return Status;
}
static NTSTATUS vboxUsbMonCheckInitStuff()
{
NTSTATUS Status = KeWaitForSingleObject(&g_VBoxUsbMonGlobals.OpenSynchEvent,
Executive, KernelMode,
FALSE, /* BOOLEAN Alertable */
NULL /* IN PLARGE_INTEGER Timeout */
);
if (Status == STATUS_SUCCESS)
{
do
{
if (g_VBoxUsbMonGlobals.cOpens++)
{
LOG(("opens: %d, success", g_VBoxUsbMonGlobals.cOpens));
break;
}
Status = VBoxUsbFltInit();
if (NT_SUCCESS(Status))
{
Status = vboxUsbMonHookCheckInit();
if (NT_SUCCESS(Status))
{
Status = vboxUsbMonHookInstall();
if (NT_SUCCESS(Status))
{
Status = STATUS_SUCCESS;
LOG(("succeded!!"));
break;
}
else
{
WARN(("vboxUsbMonHookInstall failed, Status (0x%x)", Status));
}
}
else
{
WARN(("vboxUsbMonHookCheckInit failed, Status (0x%x)", Status));
}
VBoxUsbFltTerm();
}
else
{
WARN(("VBoxUsbFltInit failed, Status (0x%x)", Status));
}
--g_VBoxUsbMonGlobals.cOpens;
Assert(!g_VBoxUsbMonGlobals.cOpens);
} while (0);
KeSetEvent(&g_VBoxUsbMonGlobals.OpenSynchEvent, 0, FALSE);
}
else
{
WARN(("KeWaitForSingleObject failed, Status (0x%x)", Status));
}
return Status;
}
static NTSTATUS vboxUsbMonContextCreate(PVBOXUSBMONCTX *ppCtx)
{
NTSTATUS Status;
*ppCtx = NULL;
PVBOXUSBMONCTX pFileCtx = (PVBOXUSBMONCTX)VBoxUsbMonMemAllocZ(sizeof (*pFileCtx));
if (pFileCtx)
{
Status = vboxUsbMonCheckInitStuff();
if (Status == STATUS_SUCCESS)
{
Status = VBoxUsbFltCreate(&pFileCtx->FltCtx);
if (Status == STATUS_SUCCESS)
{
*ppCtx = pFileCtx;
LOG(("succeeded!!"));
return STATUS_SUCCESS;
}
else
{
WARN(("VBoxUsbFltCreate failed"));
}
vboxUsbMonCheckTermStuff();
}
else
{
WARN(("vboxUsbMonCheckInitStuff failed"));
}
VBoxUsbMonMemFree(pFileCtx);
}
else
{
WARN(("VBoxUsbMonMemAllocZ failed"));
Status = STATUS_NO_MEMORY;
}
return Status;
}
static NTSTATUS vboxUsbMonContextClose(PVBOXUSBMONCTX pCtx)
{
NTSTATUS Status = VBoxUsbFltClose(&pCtx->FltCtx);
if (Status == STATUS_SUCCESS)
{
Status = vboxUsbMonCheckTermStuff();
Assert(Status == STATUS_SUCCESS);
/* ignore the failure */
VBoxUsbMonMemFree(pCtx);
}
return Status;
}
static NTSTATUS _stdcall VBoxUsbMonClose(PDEVICE_OBJECT pDevObj, PIRP pIrp)
{
PIO_STACK_LOCATION pStack = IoGetCurrentIrpStackLocation(pIrp);
PFILE_OBJECT pFileObj = pStack->FileObject;
Assert(pFileObj->FsContext);
PVBOXUSBMONCTX pCtx = (PVBOXUSBMONCTX)pFileObj->FsContext;
LOG(("VBoxUsbMonClose"));
NTSTATUS Status = vboxUsbMonContextClose(pCtx);
if (Status != STATUS_SUCCESS)
{
WARN(("vboxUsbMonContextClose failed, Status (0x%x), prevent unload", Status));
if (!InterlockedExchange(&g_VBoxUsbMonGlobals.ulPreventUnloadOn, 1))
{
LOGREL(("ulPreventUnloadOn not set, preventing unload"));
UNICODE_STRING UniName;
PDEVICE_OBJECT pTmpDevObj;
RtlInitUnicodeString(&UniName, USBMON_DEVICE_NAME_NT);
NTSTATUS tmpStatus = IoGetDeviceObjectPointer(&UniName, FILE_ALL_ACCESS, &g_VBoxUsbMonGlobals.pPreventUnloadFileObj, &pTmpDevObj);
AssertRelease(NT_SUCCESS(tmpStatus));
AssertRelease(pTmpDevObj == pDevObj);
}
else
{
WARN(("ulPreventUnloadOn already set"));
}
LOG(("success!!"));
Status = STATUS_SUCCESS;
}
pFileObj->FsContext = NULL;
pIrp->IoStatus.Status = Status;
pIrp->IoStatus.Information = 0;
IoCompleteRequest(pIrp, IO_NO_INCREMENT);
return Status;
}
static NTSTATUS _stdcall VBoxUsbMonCreate(PDEVICE_OBJECT pDevObj, PIRP pIrp)
{
RT_NOREF1(pDevObj);
PIO_STACK_LOCATION pStack = IoGetCurrentIrpStackLocation(pIrp);
PFILE_OBJECT pFileObj = pStack->FileObject;
NTSTATUS Status;
LOG(("VBoxUSBMonCreate"));
if (pStack->Parameters.Create.Options & FILE_DIRECTORY_FILE)
{
WARN(("trying to open as a directory"));
pIrp->IoStatus.Status = STATUS_NOT_A_DIRECTORY;
pIrp->IoStatus.Information = 0;
IoCompleteRequest(pIrp, IO_NO_INCREMENT);
return STATUS_NOT_A_DIRECTORY;
}
pFileObj->FsContext = NULL;
PVBOXUSBMONCTX pCtx = NULL;
Status = vboxUsbMonContextCreate(&pCtx);
if (Status == STATUS_SUCCESS)
{
Assert(pCtx);
pFileObj->FsContext = pCtx;
}
else
{
WARN(("vboxUsbMonContextCreate failed Status (0x%x)", Status));
}
pIrp->IoStatus.Status = Status;
pIrp->IoStatus.Information = 0;
IoCompleteRequest(pIrp, IO_NO_INCREMENT);
return Status;
}
static int VBoxUsbMonFltAdd(PVBOXUSBMONCTX pContext, PUSBFILTER pFilter, uintptr_t *pId)
{
#ifdef VBOXUSBMON_DBG_NO_FILTERS
static uintptr_t idDummy = 1;
*pId = idDummy;
++idDummy;
return VINF_SUCCESS;
#else
int rc = VBoxUsbFltAdd(&pContext->FltCtx, pFilter, pId);
return rc;
#endif
}
static int VBoxUsbMonFltRemove(PVBOXUSBMONCTX pContext, uintptr_t uId)
{
#ifdef VBOXUSBMON_DBG_NO_FILTERS
return VINF_SUCCESS;
#else
int rc = VBoxUsbFltRemove(&pContext->FltCtx, uId);
return rc;
#endif
}
static NTSTATUS VBoxUsbMonRunFilters(PVBOXUSBMONCTX pContext)
{
NTSTATUS Status = VBoxUsbFltFilterCheck(&pContext->FltCtx);
return Status;
}
static NTSTATUS VBoxUsbMonGetDevice(PVBOXUSBMONCTX pContext, HVBOXUSBDEVUSR hDevice, PUSBSUP_GETDEV_MON pInfo)
{
NTSTATUS Status = VBoxUsbFltGetDevice(&pContext->FltCtx, hDevice, pInfo);
return Status;
}
static NTSTATUS vboxUsbMonIoctlDispatch(PVBOXUSBMONCTX pContext, ULONG Ctl, PVOID pvBuffer, ULONG cbInBuffer,
ULONG cbOutBuffer, ULONG_PTR *pInfo)
{
NTSTATUS Status = STATUS_SUCCESS;
ULONG_PTR Info = 0;
switch (Ctl)
{
case SUPUSBFLT_IOCTL_GET_VERSION:
{
PUSBSUP_VERSION pOut = (PUSBSUP_VERSION)pvBuffer;
LOG(("SUPUSBFLT_IOCTL_GET_VERSION"));
if (!pvBuffer || cbOutBuffer != sizeof(*pOut) || cbInBuffer != 0)
{
WARN(("SUPUSBFLT_IOCTL_GET_VERSION: Invalid input/output sizes. cbIn=%d expected %d. cbOut=%d expected %d.",
cbInBuffer, 0, cbOutBuffer, sizeof (*pOut)));
Status = STATUS_INVALID_PARAMETER;
break;
}
pOut->u32Major = USBMON_MAJOR_VERSION;
pOut->u32Minor = USBMON_MINOR_VERSION;
Info = sizeof (*pOut);
ASSERT_WARN(Status == STATUS_SUCCESS, ("unexpected status, 0x%x", Status));
break;
}
case SUPUSBFLT_IOCTL_ADD_FILTER:
{
PUSBFILTER pFilter = (PUSBFILTER)pvBuffer;
PUSBSUP_FLTADDOUT pOut = (PUSBSUP_FLTADDOUT)pvBuffer;
uintptr_t uId = 0;
int rc;
if (RT_UNLIKELY(!pvBuffer || cbInBuffer != sizeof (*pFilter) || cbOutBuffer != sizeof (*pOut)))
{
WARN(("SUPUSBFLT_IOCTL_ADD_FILTER: Invalid input/output sizes. cbIn=%d expected %d. cbOut=%d expected %d.",
cbInBuffer, sizeof (*pFilter), cbOutBuffer, sizeof (*pOut)));
Status = STATUS_INVALID_PARAMETER;
break;
}
rc = VBoxUsbMonFltAdd(pContext, pFilter, &uId);
pOut->rc = rc;
pOut->uId = uId;
Info = sizeof (*pOut);
ASSERT_WARN(Status == STATUS_SUCCESS, ("unexpected status, 0x%x", Status));
break;
}
case SUPUSBFLT_IOCTL_REMOVE_FILTER:
{
uintptr_t *pIn = (uintptr_t *)pvBuffer;
int *pRc = (int *)pvBuffer;
if (!pvBuffer || cbInBuffer != sizeof (*pIn) || (cbOutBuffer && cbOutBuffer != sizeof (*pRc)))
{
WARN(("SUPUSBFLT_IOCTL_REMOVE_FILTER: Invalid input/output sizes. cbIn=%d expected %d. cbOut=%d expected %d.",
cbInBuffer, sizeof (*pIn), cbOutBuffer, 0));
Status = STATUS_INVALID_PARAMETER;
break;
}
LOG(("SUPUSBFLT_IOCTL_REMOVE_FILTER %x", *pIn));
int rc = VBoxUsbMonFltRemove(pContext, *pIn);
if (cbOutBuffer)
{
/* we've validated that already */
Assert(cbOutBuffer == (ULONG)*pRc);
*pRc = rc;
Info = sizeof (*pRc);
}
ASSERT_WARN(Status == STATUS_SUCCESS, ("unexpected status, 0x%x", Status));
break;
}
case SUPUSBFLT_IOCTL_RUN_FILTERS:
{
if (pvBuffer || cbInBuffer || cbOutBuffer)
{
WARN(("SUPUSBFLT_IOCTL_RUN_FILTERS: Invalid input/output sizes. cbIn=%d expected %d. cbOut=%d expected %d.",
cbInBuffer, 0, cbOutBuffer, 0));
Status = STATUS_INVALID_PARAMETER;
break;
}
LOG(("SUPUSBFLT_IOCTL_RUN_FILTERS "));
Status = VBoxUsbMonRunFilters(pContext);
ASSERT_WARN(Status != STATUS_PENDING, ("status pending!"));
break;
}
case SUPUSBFLT_IOCTL_GET_DEVICE:
{
HVBOXUSBDEVUSR hDevice = *((HVBOXUSBDEVUSR*)pvBuffer);
PUSBSUP_GETDEV_MON pOut = (PUSBSUP_GETDEV_MON)pvBuffer;
if (!pvBuffer || cbInBuffer != sizeof (hDevice) || cbOutBuffer < sizeof (*pOut))
{
WARN(("SUPUSBFLT_IOCTL_GET_DEVICE: Invalid input/output sizes! cbIn=%d expected %d. cbOut=%d expected >= %d.",
cbInBuffer, sizeof (hDevice), cbOutBuffer, sizeof (*pOut)));
Status = STATUS_INVALID_PARAMETER;
break;
}
if (!hDevice)
{
WARN(("SUPUSBFLT_IOCTL_GET_DEVICE: hDevice is NULL!",
cbInBuffer, sizeof (hDevice), cbOutBuffer, sizeof (*pOut)));
Status = STATUS_INVALID_PARAMETER;
break;
}
Status = VBoxUsbMonGetDevice(pContext, hDevice, pOut);
if (NT_SUCCESS(Status))
{
Info = sizeof (*pOut);
}
else
{
WARN(("VBoxUsbMonGetDevice fail 0x%x", Status));
}
break;
}
default:
WARN(("Unknown code 0x%x", Ctl));
Status = STATUS_INVALID_PARAMETER;
break;
}
ASSERT_WARN(Status != STATUS_PENDING, ("Status pending!"));
*pInfo = Info;
return Status;
}
static NTSTATUS _stdcall VBoxUsbMonDeviceControl(PDEVICE_OBJECT pDevObj, PIRP pIrp)
{
ULONG_PTR Info = 0;
NTSTATUS Status = IoAcquireRemoveLock(&g_VBoxUsbMonGlobals.RmLock, pDevObj);
if (NT_SUCCESS(Status))
{
PIO_STACK_LOCATION pSl = IoGetCurrentIrpStackLocation(pIrp);
PFILE_OBJECT pFileObj = pSl->FileObject;
Assert(pFileObj);
Assert(pFileObj->FsContext);
PVBOXUSBMONCTX pCtx = (PVBOXUSBMONCTX)pFileObj->FsContext;
Assert(pCtx);
Status = vboxUsbMonIoctlDispatch(pCtx,
pSl->Parameters.DeviceIoControl.IoControlCode,
pIrp->AssociatedIrp.SystemBuffer,
pSl->Parameters.DeviceIoControl.InputBufferLength,
pSl->Parameters.DeviceIoControl.OutputBufferLength,
&Info);
ASSERT_WARN(Status != STATUS_PENDING, ("Status pending"));
IoReleaseRemoveLock(&g_VBoxUsbMonGlobals.RmLock, pDevObj);
}
else
{
WARN(("IoAcquireRemoveLock failed Status (0x%x)", Status));
}
pIrp->IoStatus.Information = Info;
pIrp->IoStatus.Status = Status;
IoCompleteRequest (pIrp, IO_NO_INCREMENT);
return Status;
}
static NTSTATUS vboxUsbMonInternalIoctlDispatch(ULONG Ctl, PVOID pvBuffer, ULONG_PTR *pInfo)
{
NTSTATUS Status = STATUS_SUCCESS;
*pInfo = 0;
switch (Ctl)
{
case VBOXUSBIDC_INTERNAL_IOCTL_GET_VERSION:
{
PVBOXUSBIDC_VERSION pOut = (PVBOXUSBIDC_VERSION)pvBuffer;
LOG(("VBOXUSBIDC_INTERNAL_IOCTL_GET_VERSION"));
if (!pvBuffer)
{
WARN(("VBOXUSBIDC_INTERNAL_IOCTL_GET_VERSION: Buffer is NULL"));
Status = STATUS_INVALID_PARAMETER;
break;
}
pOut->u32Major = VBOXUSBIDC_VERSION_MAJOR;
pOut->u32Minor = VBOXUSBIDC_VERSION_MINOR;
ASSERT_WARN(Status == STATUS_SUCCESS, ("unexpected status, 0x%x", Status));
break;
}
case VBOXUSBIDC_INTERNAL_IOCTL_PROXY_STARTUP:
{
PVBOXUSBIDC_PROXY_STARTUP pOut = (PVBOXUSBIDC_PROXY_STARTUP)pvBuffer;
LOG(("VBOXUSBIDC_INTERNAL_IOCTL_PROXY_STARTUP"));
if (!pvBuffer)
{
WARN(("VBOXUSBIDC_INTERNAL_IOCTL_PROXY_STARTUP: Buffer is NULL"));
Status = STATUS_INVALID_PARAMETER;
break;
}
PDEVICE_OBJECT pDevObj = pOut->u.pPDO;
pOut->u.hDev = VBoxUsbFltProxyStarted(pDevObj);
/* If we couldn't find the PDO in our list, that's a real problem and
* the capturing will not really work. Log an error.
*/
if (!pOut->u.hDev)
vboxUsbMonLogError(IO_ERR_DRIVER_ERROR, STATUS_SUCCESS, 2, sizeof("INTERNAL_IOCTL_PROXY_STARTUP"), "INTERNAL_IOCTL_PROXY_STARTUP");
ASSERT_WARN(pOut->u.hDev, ("zero hDev"));
ASSERT_WARN(Status == STATUS_SUCCESS, ("unexpected status, 0x%x", Status));
break;
}
case VBOXUSBIDC_INTERNAL_IOCTL_PROXY_TEARDOWN:
{
PVBOXUSBIDC_PROXY_TEARDOWN pOut = (PVBOXUSBIDC_PROXY_TEARDOWN)pvBuffer;
LOG(("VBOXUSBIDC_INTERNAL_IOCTL_PROXY_TEARDOWN"));
if (!pvBuffer)
{
WARN(("VBOXUSBIDC_INTERNAL_IOCTL_PROXY_TEARDOWN: Buffer is NULL"));
Status = STATUS_INVALID_PARAMETER;
break;
}
ASSERT_WARN(pOut->hDev, ("zero hDev"));
VBoxUsbFltProxyStopped(pOut->hDev);
ASSERT_WARN(Status == STATUS_SUCCESS, ("unexpected status, 0x%x", Status));
break;
}
default:
{
WARN(("Unknown code 0x%x", Ctl));
Status = STATUS_INVALID_PARAMETER;
break;
}
}
return Status;
}
static NTSTATUS _stdcall VBoxUsbMonInternalDeviceControl(PDEVICE_OBJECT pDevObj, PIRP pIrp)
{
ULONG_PTR Info = 0;
NTSTATUS Status = IoAcquireRemoveLock(&g_VBoxUsbMonGlobals.RmLock, pDevObj);
if (NT_SUCCESS(Status))
{
PIO_STACK_LOCATION pSl = IoGetCurrentIrpStackLocation(pIrp);
Status = vboxUsbMonInternalIoctlDispatch(pSl->Parameters.DeviceIoControl.IoControlCode,
pSl->Parameters.Others.Argument1,
&Info);
Assert(Status != STATUS_PENDING);
IoReleaseRemoveLock(&g_VBoxUsbMonGlobals.RmLock, pDevObj);
}
pIrp->IoStatus.Information = Info;
pIrp->IoStatus.Status = Status;
IoCompleteRequest (pIrp, IO_NO_INCREMENT);
return Status;
}
/**
* Unload the driver.
*
* @param pDrvObj Driver object.
*/
static void _stdcall VBoxUsbMonUnload(PDRIVER_OBJECT pDrvObj)
{
RT_NOREF1(pDrvObj);
LOG(("VBoxUSBMonUnload pDrvObj (0x%p)", pDrvObj));
IoReleaseRemoveLockAndWait(&g_VBoxUsbMonGlobals.RmLock, &g_VBoxUsbMonGlobals);
Assert(!g_VBoxUsbMonGlobals.cOpens);
UNICODE_STRING DosName;
RtlInitUnicodeString(&DosName, USBMON_DEVICE_NAME_DOS);
IoDeleteSymbolicLink(&DosName);
IoDeleteDevice(g_VBoxUsbMonGlobals.pDevObj);
/* cleanup the logger */
PRTLOGGER pLogger = RTLogRelSetDefaultInstance(NULL);
if (pLogger)
RTLogDestroy(pLogger);
pLogger = RTLogSetDefaultInstance(NULL);
if (pLogger)
RTLogDestroy(pLogger);
}
RT_C_DECLS_BEGIN
NTSTATUS _stdcall DriverEntry(PDRIVER_OBJECT pDrvObj, PUNICODE_STRING pRegPath);
RT_C_DECLS_END
/**
* Driver entry point.
*
* @returns appropriate status code.
* @param pDrvObj Pointer to driver object.
* @param pRegPath Registry base path.
*/
NTSTATUS _stdcall DriverEntry(PDRIVER_OBJECT pDrvObj, PUNICODE_STRING pRegPath)
{
RT_NOREF1(pRegPath);
#ifdef VBOX_USB_WITH_VERBOSE_LOGGING
RTLogGroupSettings(0, "+default.e.l.f.l2.l3");
RTLogDestinations(0, "debugger");
#endif
LOGREL(("Built %s %s", __DATE__, __TIME__));
memset (&g_VBoxUsbMonGlobals, 0, sizeof (g_VBoxUsbMonGlobals));
VBOX_PNPHOOKSTUB_INIT(0);
VBOX_PNPHOOKSTUB_INIT(1);
VBOX_PNPHOOKSTUB_INIT(2);
VBOX_PNPHOOKSTUB_INIT(3);
VBOX_PNPHOOKSTUB_INIT(4);
AssertCompile(VBOXUSBMON_MAXDRIVERS == 5);
KeInitializeEvent(&g_VBoxUsbMonGlobals.OpenSynchEvent, SynchronizationEvent, TRUE /* signaled */);
IoInitializeRemoveLock(&g_VBoxUsbMonGlobals.RmLock, VBOXUSBMON_MEMTAG, 1, 100);
UNICODE_STRING DevName;
PDEVICE_OBJECT pDevObj;
/* create the device */
RtlInitUnicodeString(&DevName, USBMON_DEVICE_NAME_NT);
NTSTATUS Status = IoAcquireRemoveLock(&g_VBoxUsbMonGlobals.RmLock, &g_VBoxUsbMonGlobals);
if (NT_SUCCESS(Status))
{
Status = IoCreateDevice(pDrvObj, sizeof (VBOXUSBMONINS), &DevName, FILE_DEVICE_UNKNOWN, 0, FALSE, &pDevObj);
if (NT_SUCCESS(Status))
{
UNICODE_STRING DosName;
RtlInitUnicodeString(&DosName, USBMON_DEVICE_NAME_DOS);
Status = IoCreateSymbolicLink(&DosName, &DevName);
if (NT_SUCCESS(Status))
{
PVBOXUSBMONINS pDevExt = (PVBOXUSBMONINS)pDevObj->DeviceExtension;
memset(pDevExt, 0, sizeof(*pDevExt));
pDrvObj->DriverUnload = VBoxUsbMonUnload;
pDrvObj->MajorFunction[IRP_MJ_CREATE] = VBoxUsbMonCreate;
pDrvObj->MajorFunction[IRP_MJ_CLOSE] = VBoxUsbMonClose;
pDrvObj->MajorFunction[IRP_MJ_DEVICE_CONTROL] = VBoxUsbMonDeviceControl;
pDrvObj->MajorFunction[IRP_MJ_INTERNAL_DEVICE_CONTROL] = VBoxUsbMonInternalDeviceControl;
g_VBoxUsbMonGlobals.pDevObj = pDevObj;
LOG(("VBoxUSBMon::DriverEntry returning STATUS_SUCCESS"));
return STATUS_SUCCESS;
}
IoDeleteDevice(pDevObj);
}
IoReleaseRemoveLockAndWait(&g_VBoxUsbMonGlobals.RmLock, &g_VBoxUsbMonGlobals);
}
return Status;
}
|
#include "sendcoinsentry.h"
#include "ui_sendcoinsentry.h"
#include "guiutil.h"
#include "albatroscoinunits.h"
#include "addressbookpage.h"
#include "walletmodel.h"
#include "optionsmodel.h"
#include "addresstablemodel.h"
#include <QApplication>
#include <QClipboard>
SendCoinsEntry::SendCoinsEntry(QWidget *parent) :
QFrame(parent),
ui(new Ui::SendCoinsEntry),
model(0)
{
ui->setupUi(this);
#ifdef Q_OS_MAC
ui->payToLayout->setSpacing(4);
#endif
#if QT_VERSION >= 0x040700
/* Do not move this to the XML file, Qt before 4.7 will choke on it */
ui->addAsLabel->setPlaceholderText(tr("Enter a label for this address to add it to your address book"));
ui->payTo->setPlaceholderText(tr("Enter a Albatroscoin address (e.g. iUuWwFn7HKcHeARezeBp5fx8Yer18hyNEN)"));
#endif
setFocusPolicy(Qt::TabFocus);
setFocusProxy(ui->payTo);
GUIUtil::setupAddressWidget(ui->payTo, this);
}
SendCoinsEntry::~SendCoinsEntry()
{
delete ui;
}
void SendCoinsEntry::on_pasteButton_clicked()
{
// Paste text from clipboard into recipient field
ui->payTo->setText(QApplication::clipboard()->text());
}
void SendCoinsEntry::on_addressBookButton_clicked()
{
if(!model)
return;
AddressBookPage dlg(AddressBookPage::ForSending, AddressBookPage::SendingTab, this);
dlg.setModel(model->getAddressTableModel());
if(dlg.exec())
{
ui->payTo->setText(dlg.getReturnValue());
ui->payAmount->setFocus();
}
}
void SendCoinsEntry::on_payTo_textChanged(const QString &address)
{
if(!model)
return;
// Fill in label from address book, if address has an associated label
QString associatedLabel = model->getAddressTableModel()->labelForAddress(address);
if(!associatedLabel.isEmpty())
ui->addAsLabel->setText(associatedLabel);
}
void SendCoinsEntry::setModel(WalletModel *model)
{
this->model = model;
if(model && model->getOptionsModel())
connect(model->getOptionsModel(), SIGNAL(displayUnitChanged(int)), this, SLOT(updateDisplayUnit()));
connect(ui->payAmount, SIGNAL(textChanged()), this, SIGNAL(payAmountChanged()));
clear();
}
void SendCoinsEntry::setRemoveEnabled(bool enabled)
{
ui->deleteButton->setEnabled(enabled);
}
void SendCoinsEntry::clear()
{
ui->payTo->clear();
ui->addAsLabel->clear();
ui->payAmount->clear();
ui->payTo->setFocus();
// update the display unit, to not use the default ("ATR")
updateDisplayUnit();
}
void SendCoinsEntry::on_deleteButton_clicked()
{
emit removeEntry(this);
}
bool SendCoinsEntry::validate()
{
// Check input validity
bool retval = true;
if(!ui->payAmount->validate())
{
retval = false;
}
else
{
if(ui->payAmount->value() <= 0)
{
// Cannot send 0 coins or less
ui->payAmount->setValid(false);
retval = false;
}
}
if(!ui->payTo->hasAcceptableInput() ||
(model && !model->validateAddress(ui->payTo->text())))
{
ui->payTo->setValid(false);
retval = false;
}
return retval;
}
SendCoinsRecipient SendCoinsEntry::getValue()
{
SendCoinsRecipient rv;
rv.address = ui->payTo->text();
rv.label = ui->addAsLabel->text();
rv.amount = ui->payAmount->value();
return rv;
}
QWidget *SendCoinsEntry::setupTabChain(QWidget *prev)
{
QWidget::setTabOrder(prev, ui->payTo);
QWidget::setTabOrder(ui->payTo, ui->addressBookButton);
QWidget::setTabOrder(ui->addressBookButton, ui->pasteButton);
QWidget::setTabOrder(ui->pasteButton, ui->deleteButton);
QWidget::setTabOrder(ui->deleteButton, ui->addAsLabel);
return ui->payAmount->setupTabChain(ui->addAsLabel);
}
void SendCoinsEntry::setValue(const SendCoinsRecipient &value)
{
ui->payTo->setText(value.address);
ui->addAsLabel->setText(value.label);
ui->payAmount->setValue(value.amount);
}
bool SendCoinsEntry::isClear()
{
return ui->payTo->text().isEmpty();
}
void SendCoinsEntry::setFocus()
{
ui->payTo->setFocus();
}
void SendCoinsEntry::updateDisplayUnit()
{
if(model && model->getOptionsModel())
{
// Update payAmount with the current unit
ui->payAmount->setDisplayUnit(model->getOptionsModel()->getDisplayUnit());
}
}
|
#include "include/firebase_core_desktop/firebase_core_desktop_plugin.h"
#include <flutter_linux/flutter_linux.h>
#include <gtk/gtk.h>
#include <sys/utsname.h>
#include <cstring>
#define FIREBASE_CORE_DESKTOP_PLUGIN(obj) \
(G_TYPE_CHECK_INSTANCE_CAST((obj), firebase_core_desktop_plugin_get_type(), \
FirebaseCoreDesktopPlugin))
struct _FirebaseCoreDesktopPlugin {
GObject parent_instance;
};
G_DEFINE_TYPE(FirebaseCoreDesktopPlugin, firebase_core_desktop_plugin, g_object_get_type())
// Called when a method call is received from Flutter.
static void firebase_core_desktop_plugin_handle_method_call(
FirebaseCoreDesktopPlugin* self,
FlMethodCall* method_call) {
g_autoptr(FlMethodResponse) response = nullptr;
const gchar* method = fl_method_call_get_name(method_call);
if (strcmp(method, "getPlatformVersion") == 0) {
struct utsname uname_data = {};
uname(&uname_data);
g_autofree gchar *version = g_strdup_printf("Linux %s", uname_data.version);
g_autoptr(FlValue) result = fl_value_new_string(version);
response = FL_METHOD_RESPONSE(fl_method_success_response_new(result));
} else {
response = FL_METHOD_RESPONSE(fl_method_not_implemented_response_new());
}
fl_method_call_respond(method_call, response, nullptr);
}
static void firebase_core_desktop_plugin_dispose(GObject* object) {
G_OBJECT_CLASS(firebase_core_desktop_plugin_parent_class)->dispose(object);
}
static void firebase_core_desktop_plugin_class_init(FirebaseCoreDesktopPluginClass* klass) {
G_OBJECT_CLASS(klass)->dispose = firebase_core_desktop_plugin_dispose;
}
static void firebase_core_desktop_plugin_init(FirebaseCoreDesktopPlugin* self) {}
static void method_call_cb(FlMethodChannel* channel, FlMethodCall* method_call,
gpointer user_data) {
FirebaseCoreDesktopPlugin* plugin = FIREBASE_CORE_DESKTOP_PLUGIN(user_data);
firebase_core_desktop_plugin_handle_method_call(plugin, method_call);
}
void firebase_core_desktop_plugin_register_with_registrar(FlPluginRegistrar* registrar) {
FirebaseCoreDesktopPlugin* plugin = FIREBASE_CORE_DESKTOP_PLUGIN(
g_object_new(firebase_core_desktop_plugin_get_type(), nullptr));
g_autoptr(FlStandardMethodCodec) codec = fl_standard_method_codec_new();
g_autoptr(FlMethodChannel) channel =
fl_method_channel_new(fl_plugin_registrar_get_messenger(registrar),
"firebase_core_desktop",
FL_METHOD_CODEC(codec));
fl_method_channel_set_method_call_handler(channel, method_call_cb,
g_object_ref(plugin),
g_object_unref);
g_object_unref(plugin);
}
|
/*
* Copyright 2012 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "GrSoftwarePathRenderer.h"
#include "GrAuditTrail.h"
#include "GrClip.h"
#include "GrContextPriv.h"
#include "GrDeferredProxyUploader.h"
#include "GrGpuResourcePriv.h"
#include "GrOpFlushState.h"
#include "GrOpList.h"
#include "GrProxyProvider.h"
#include "GrSWMaskHelper.h"
#include "SkMakeUnique.h"
#include "SkSemaphore.h"
#include "SkTaskGroup.h"
#include "SkTraceEvent.h"
#include "ops/GrDrawOp.h"
#include "ops/GrRectOpFactory.h"
////////////////////////////////////////////////////////////////////////////////
GrPathRenderer::CanDrawPath
GrSoftwarePathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
// Pass on any style that applies. The caller will apply the style if a suitable renderer is
// not found and try again with the new GrShape.
if (!args.fShape->style().applies() && SkToBool(fProxyProvider) &&
(args.fAAType == GrAAType::kCoverage || args.fAAType == GrAAType::kNone)) {
// This is the fallback renderer for when a path is too complicated for the GPU ones.
return CanDrawPath::kAsBackup;
}
return CanDrawPath::kNo;
}
////////////////////////////////////////////////////////////////////////////////
static bool get_unclipped_shape_dev_bounds(const GrShape& shape, const SkMatrix& matrix,
SkIRect* devBounds) {
SkRect shapeBounds = shape.styledBounds();
if (shapeBounds.isEmpty()) {
return false;
}
SkRect shapeDevBounds;
matrix.mapRect(&shapeDevBounds, shapeBounds);
// Even though these are "unclipped" bounds we still clip to the int32_t range.
// This is the largest int32_t that is representable exactly as a float. The next 63 larger ints
// would round down to this value when cast to a float, but who really cares.
// INT32_MIN is exactly representable.
static constexpr int32_t kMaxInt = 2147483520;
if (!shapeDevBounds.intersect(SkRect::MakeLTRB(INT32_MIN, INT32_MIN, kMaxInt, kMaxInt))) {
return false;
}
// Make sure that the resulting SkIRect can have representable width and height
if (SkScalarRoundToInt(shapeDevBounds.width()) > kMaxInt ||
SkScalarRoundToInt(shapeDevBounds.height()) > kMaxInt) {
return false;
}
shapeDevBounds.roundOut(devBounds);
return true;
}
// Gets the shape bounds, the clip bounds, and the intersection (if any). Returns false if there
// is no intersection.
static bool get_shape_and_clip_bounds(int width, int height,
const GrClip& clip,
const GrShape& shape,
const SkMatrix& matrix,
SkIRect* unclippedDevShapeBounds,
SkIRect* clippedDevShapeBounds,
SkIRect* devClipBounds) {
// compute bounds as intersection of rt size, clip, and path
clip.getConservativeBounds(width, height, devClipBounds);
if (!get_unclipped_shape_dev_bounds(shape, matrix, unclippedDevShapeBounds)) {
*unclippedDevShapeBounds = SkIRect::EmptyIRect();
*clippedDevShapeBounds = SkIRect::EmptyIRect();
return false;
}
if (!clippedDevShapeBounds->intersect(*devClipBounds, *unclippedDevShapeBounds)) {
*clippedDevShapeBounds = SkIRect::EmptyIRect();
return false;
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
void GrSoftwarePathRenderer::DrawNonAARect(GrRenderTargetContext* renderTargetContext,
GrPaint&& paint,
const GrUserStencilSettings& userStencilSettings,
const GrClip& clip,
const SkMatrix& viewMatrix,
const SkRect& rect,
const SkMatrix& localMatrix) {
renderTargetContext->addDrawOp(clip,
GrRectOpFactory::MakeNonAAFillWithLocalMatrix(
std::move(paint), viewMatrix, localMatrix, rect,
GrAAType::kNone, &userStencilSettings));
}
void GrSoftwarePathRenderer::DrawAroundInvPath(GrRenderTargetContext* renderTargetContext,
GrPaint&& paint,
const GrUserStencilSettings& userStencilSettings,
const GrClip& clip,
const SkMatrix& viewMatrix,
const SkIRect& devClipBounds,
const SkIRect& devPathBounds) {
SkMatrix invert;
if (!viewMatrix.invert(&invert)) {
return;
}
SkRect rect;
if (devClipBounds.fTop < devPathBounds.fTop) {
rect.iset(devClipBounds.fLeft, devClipBounds.fTop,
devClipBounds.fRight, devPathBounds.fTop);
DrawNonAARect(renderTargetContext, GrPaint::Clone(paint), userStencilSettings, clip,
SkMatrix::I(), rect, invert);
}
if (devClipBounds.fLeft < devPathBounds.fLeft) {
rect.iset(devClipBounds.fLeft, devPathBounds.fTop,
devPathBounds.fLeft, devPathBounds.fBottom);
DrawNonAARect(renderTargetContext, GrPaint::Clone(paint), userStencilSettings, clip,
SkMatrix::I(), rect, invert);
}
if (devClipBounds.fRight > devPathBounds.fRight) {
rect.iset(devPathBounds.fRight, devPathBounds.fTop,
devClipBounds.fRight, devPathBounds.fBottom);
DrawNonAARect(renderTargetContext, GrPaint::Clone(paint), userStencilSettings, clip,
SkMatrix::I(), rect, invert);
}
if (devClipBounds.fBottom > devPathBounds.fBottom) {
rect.iset(devClipBounds.fLeft, devPathBounds.fBottom,
devClipBounds.fRight, devClipBounds.fBottom);
DrawNonAARect(renderTargetContext, std::move(paint), userStencilSettings, clip,
SkMatrix::I(), rect, invert);
}
}
void GrSoftwarePathRenderer::DrawToTargetWithShapeMask(
sk_sp<GrTextureProxy> proxy,
GrRenderTargetContext* renderTargetContext,
GrPaint&& paint,
const GrUserStencilSettings& userStencilSettings,
const GrClip& clip,
const SkMatrix& viewMatrix,
const SkIPoint& textureOriginInDeviceSpace,
const SkIRect& deviceSpaceRectToDraw) {
SkMatrix invert;
if (!viewMatrix.invert(&invert)) {
return;
}
SkRect dstRect = SkRect::Make(deviceSpaceRectToDraw);
// We use device coords to compute the texture coordinates. We take the device coords and apply
// a translation so that the top-left of the device bounds maps to 0,0, and then a scaling
// matrix to normalized coords.
SkMatrix maskMatrix = SkMatrix::MakeTrans(SkIntToScalar(-textureOriginInDeviceSpace.fX),
SkIntToScalar(-textureOriginInDeviceSpace.fY));
maskMatrix.preConcat(viewMatrix);
paint.addCoverageFragmentProcessor(GrSimpleTextureEffect::Make(
std::move(proxy), maskMatrix, GrSamplerState::Filter::kNearest));
DrawNonAARect(renderTargetContext, std::move(paint), userStencilSettings, clip, SkMatrix::I(),
dstRect, invert);
}
static sk_sp<GrTextureProxy> make_deferred_mask_texture_proxy(GrContext* context, SkBackingFit fit,
int width, int height) {
GrProxyProvider* proxyProvider = context->contextPriv().proxyProvider();
GrSurfaceDesc desc;
desc.fOrigin = kTopLeft_GrSurfaceOrigin;
desc.fWidth = width;
desc.fHeight = height;
desc.fConfig = kAlpha_8_GrPixelConfig;
// MDB TODO: We're going to fill this proxy with an ASAP upload (which is out of order wrt to
// ops), so it can't have any pending IO.
return proxyProvider->createProxy(desc, fit, SkBudgeted::kYes,
GrResourceProvider::kNoPendingIO_Flag);
}
namespace {
/**
* Payload class for use with GrTDeferredProxyUploader. The software path renderer only draws
* a single path into the mask texture. This stores all of the information needed by the worker
* thread's call to drawShape (see below, in onDrawPath).
*/
class SoftwarePathData {
public:
SoftwarePathData(const SkIRect& maskBounds, const SkMatrix& viewMatrix, const GrShape& shape,
GrAA aa)
: fMaskBounds(maskBounds)
, fViewMatrix(viewMatrix)
, fShape(shape)
, fAA(aa) {}
const SkIRect& getMaskBounds() const { return fMaskBounds; }
const SkMatrix* getViewMatrix() const { return &fViewMatrix; }
const GrShape& getShape() const { return fShape; }
GrAA getAA() const { return fAA; }
private:
SkIRect fMaskBounds;
SkMatrix fViewMatrix;
GrShape fShape;
GrAA fAA;
};
// When the SkPathRef genID changes, invalidate a corresponding GrResource described by key.
class PathInvalidator : public SkPathRef::GenIDChangeListener {
public:
explicit PathInvalidator(const GrUniqueKey& key) : fMsg(key) {}
private:
GrUniqueKeyInvalidatedMessage fMsg;
void onChange() override {
SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(fMsg);
}
};
}
////////////////////////////////////////////////////////////////////////////////
// return true on success; false on failure
bool GrSoftwarePathRenderer::onDrawPath(const DrawPathArgs& args) {
GR_AUDIT_TRAIL_AUTO_FRAME(args.fRenderTargetContext->auditTrail(),
"GrSoftwarePathRenderer::onDrawPath");
if (!fProxyProvider) {
return false;
}
// We really need to know if the shape will be inverse filled or not
bool inverseFilled = false;
SkTLazy<GrShape> tmpShape;
SkASSERT(!args.fShape->style().applies());
// If the path is hairline, ignore inverse fill.
inverseFilled = args.fShape->inverseFilled() &&
!IsStrokeHairlineOrEquivalent(args.fShape->style(), *args.fViewMatrix, nullptr);
SkIRect unclippedDevShapeBounds, clippedDevShapeBounds, devClipBounds;
// To prevent overloading the cache with entries during animations we limit the cache of masks
// to cases where the matrix preserves axis alignment.
bool useCache = fAllowCaching && !inverseFilled && args.fViewMatrix->preservesAxisAlignment() &&
args.fShape->hasUnstyledKey() && GrAAType::kCoverage == args.fAAType;
if (!get_shape_and_clip_bounds(args.fRenderTargetContext->width(),
args.fRenderTargetContext->height(),
*args.fClip, *args.fShape,
*args.fViewMatrix, &unclippedDevShapeBounds,
&clippedDevShapeBounds,
&devClipBounds)) {
if (inverseFilled) {
DrawAroundInvPath(args.fRenderTargetContext, std::move(args.fPaint),
*args.fUserStencilSettings, *args.fClip, *args.fViewMatrix,
devClipBounds, unclippedDevShapeBounds);
}
return true;
}
const SkIRect* boundsForMask = &clippedDevShapeBounds;
if (useCache) {
// Use the cache only if >50% of the path is visible.
int unclippedWidth = unclippedDevShapeBounds.width();
int unclippedHeight = unclippedDevShapeBounds.height();
int64_t unclippedArea = sk_64_mul(unclippedWidth, unclippedHeight);
int64_t clippedArea = sk_64_mul(clippedDevShapeBounds.width(),
clippedDevShapeBounds.height());
int maxTextureSize = args.fRenderTargetContext->caps()->maxTextureSize();
if (unclippedArea > 2 * clippedArea || unclippedWidth > maxTextureSize ||
unclippedHeight > maxTextureSize) {
useCache = false;
} else {
boundsForMask = &unclippedDevShapeBounds;
}
}
GrUniqueKey maskKey;
if (useCache) {
// We require the upper left 2x2 of the matrix to match exactly for a cache hit.
SkScalar sx = args.fViewMatrix->get(SkMatrix::kMScaleX);
SkScalar sy = args.fViewMatrix->get(SkMatrix::kMScaleY);
SkScalar kx = args.fViewMatrix->get(SkMatrix::kMSkewX);
SkScalar ky = args.fViewMatrix->get(SkMatrix::kMSkewY);
static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
// Fractional translate does not affect caching on Android. This is done for better cache
// hit ratio and speed, but it is matching HWUI behavior, which doesn't consider the matrix
// at all when caching paths.
GrUniqueKey::Builder builder(&maskKey, kDomain, 4 + args.fShape->unstyledKeySize());
#else
SkScalar tx = args.fViewMatrix->get(SkMatrix::kMTransX);
SkScalar ty = args.fViewMatrix->get(SkMatrix::kMTransY);
// Allow 8 bits each in x and y of subpixel positioning.
SkFixed fracX = SkScalarToFixed(SkScalarFraction(tx)) & 0x0000FF00;
SkFixed fracY = SkScalarToFixed(SkScalarFraction(ty)) & 0x0000FF00;
GrUniqueKey::Builder builder(&maskKey, kDomain, 5 + args.fShape->unstyledKeySize());
#endif
builder[0] = SkFloat2Bits(sx);
builder[1] = SkFloat2Bits(sy);
builder[2] = SkFloat2Bits(kx);
builder[3] = SkFloat2Bits(ky);
#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
args.fShape->writeUnstyledKey(&builder[4]);
#else
builder[4] = fracX | (fracY >> 8);
args.fShape->writeUnstyledKey(&builder[5]);
#endif
}
sk_sp<GrTextureProxy> proxy;
if (useCache) {
proxy = fProxyProvider->findOrCreateProxyByUniqueKey(maskKey, kTopLeft_GrSurfaceOrigin);
}
if (!proxy) {
SkBackingFit fit = useCache ? SkBackingFit::kExact : SkBackingFit::kApprox;
GrAA aa = GrAAType::kCoverage == args.fAAType ? GrAA::kYes : GrAA::kNo;
SkTaskGroup* taskGroup = args.fContext->contextPriv().getTaskGroup();
if (taskGroup) {
proxy = make_deferred_mask_texture_proxy(args.fContext, fit,
boundsForMask->width(),
boundsForMask->height());
if (!proxy) {
return false;
}
auto uploader = skstd::make_unique<GrTDeferredProxyUploader<SoftwarePathData>>(
*boundsForMask, *args.fViewMatrix, *args.fShape, aa);
GrTDeferredProxyUploader<SoftwarePathData>* uploaderRaw = uploader.get();
auto drawAndUploadMask = [uploaderRaw] {
TRACE_EVENT0("skia", "Threaded SW Mask Render");
GrSWMaskHelper helper(uploaderRaw->getPixels());
if (helper.init(uploaderRaw->data().getMaskBounds())) {
helper.drawShape(uploaderRaw->data().getShape(),
*uploaderRaw->data().getViewMatrix(),
SkRegion::kReplace_Op, uploaderRaw->data().getAA(), 0xFF);
} else {
SkDEBUGFAIL("Unable to allocate SW mask.");
}
uploaderRaw->signalAndFreeData();
};
taskGroup->add(std::move(drawAndUploadMask));
proxy->texPriv().setDeferredUploader(std::move(uploader));
} else {
GrSWMaskHelper helper;
if (!helper.init(*boundsForMask)) {
return false;
}
helper.drawShape(*args.fShape, *args.fViewMatrix, SkRegion::kReplace_Op, aa, 0xFF);
proxy = helper.toTextureProxy(args.fContext, fit);
}
if (!proxy) {
return false;
}
if (useCache) {
SkASSERT(proxy->origin() == kTopLeft_GrSurfaceOrigin);
fProxyProvider->assignUniqueKeyToProxy(maskKey, proxy.get());
args.fShape->addGenIDChangeListener(new PathInvalidator(maskKey));
}
}
if (inverseFilled) {
DrawAroundInvPath(args.fRenderTargetContext, GrPaint::Clone(args.fPaint),
*args.fUserStencilSettings, *args.fClip, *args.fViewMatrix, devClipBounds,
unclippedDevShapeBounds);
}
DrawToTargetWithShapeMask(
std::move(proxy), args.fRenderTargetContext, std::move(args.fPaint),
*args.fUserStencilSettings, *args.fClip, *args.fViewMatrix,
SkIPoint{boundsForMask->fLeft, boundsForMask->fTop}, *boundsForMask);
return true;
}
|
//===- Local.cpp - Compute a local data structure graph for a function ----===//
//
// The LLVM Compiler Infrastructure
//
// This file was developed by the LLVM research group and is distributed under
// the University of Illinois Open Source License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Compute the local version of the data structure graph for a function. The
// external interface to this file is the DSGraph constructor.
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "dsa-local"
#include "dsa/DataStructure.h"
#include "dsa/DSGraph.h"
#include "dsa/DSMonitor.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/Triple.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Use.h"
#include "llvm/Support/CommandLine.h"
#include "smack/Debug.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/InstVisitor.h"
#include "llvm/Support/Timer.h"
#include <fstream>
// FIXME: This should eventually be a FunctionPass that is automatically
// aggregated into a Pass.
//
#include "llvm/IR/Module.h"
using namespace llvm;
namespace {
STATISTIC(NumDirectCall, "Number of direct calls added");
STATISTIC(NumIndirectCall, "Number of indirect calls added");
STATISTIC(NumAsmCall, "Number of asm calls collapsed/seen");
STATISTIC(NumIntrinsicCall, "Number of intrinsics called");
STATISTIC(NumBoringIntToPtr, "Number of inttoptr used only in cmp");
//STATISTIC(NumSimpleIntToPtr, "Number of inttoptr from ptrtoint");
STATISTIC(NumIgnoredInst, "Number of instructions ignored");
RegisterPass<LocalDataStructures>
X("dsa-local", "Local Data Structure Analysis");
cl::opt<std::string> hasMagicSections("dsa-magic-sections",
cl::desc("File with section to global mapping")); //, cl::ReallyHidden);
}
cl::opt<bool> TypeInferenceOptimize("enable-type-inference-opts",
cl::desc("Enable Type Inference Optimizations added to DSA."),
cl::Hidden,
cl::init(false));
namespace {
//===--------------------------------------------------------------------===//
// GraphBuilder Class
//===--------------------------------------------------------------------===//
//
/// This class is the builder class that constructs the local data structure
/// graph by performing a single pass over the function in question.
///
class GraphBuilder : InstVisitor<GraphBuilder> {
DSGraph &G;
Function* FB;
LocalDataStructures* DS;
const DataLayout& TD;
DSNode *VAArray;
DSMonitor M;
////////////////////////////////////////////////////////////////////////////
// Helper functions used to implement the visitation functions...
void MergeConstantInitIntoNode(DSNodeHandle &NH, Type* Ty, Constant *C);
/// createNode - Create a new DSNode, ensuring that it is properly added to
/// the graph.
///
DSNode *createNode()
{
DSNode* ret = new DSNode(&G);
assert(ret->getParentGraph() && "No parent?");
return ret;
}
/// setDestTo - Set the ScalarMap entry for the specified value to point to
/// the specified destination. If the Value already points to a node, make
/// sure to merge the two destinations together.
///
void setDestTo(Value &V, const DSNodeHandle &NH);
/// getValueDest - Return the DSNode that the actual value points to.
///
DSNodeHandle getValueDest(Value* V);
/// getLink - This method is used to return the specified link in the
/// specified node if one exists. If a link does not already exist (it's
/// null), then we create a new node, link it, then return it.
///
DSNodeHandle &getLink(const DSNodeHandle &Node, unsigned Link = 0);
////////////////////////////////////////////////////////////////////////////
// Visitor functions, used to handle each instruction type we encounter...
friend class InstVisitor<GraphBuilder>;
void visitAllocaInst(AllocaInst &AI)
{ setDestTo(AI, createNode()->setAllocaMarker()); }
//the simple ones
void visitPHINode(PHINode &PN);
void visitSelectInst(SelectInst &SI);
void visitLoadInst(LoadInst &LI);
void visitStoreInst(StoreInst &SI);
void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I);
void visitAtomicRMWInst(AtomicRMWInst &I);
void visitReturnInst(ReturnInst &RI);
void visitVAArgInst(VAArgInst &I);
void visitIntToPtrInst(IntToPtrInst &I);
void visitPtrToIntInst(PtrToIntInst &I);
void visitBitCastInst(BitCastInst &I);
void visitCmpInst(CmpInst &I);
void visitInsertValueInst(InsertValueInst& I);
void visitExtractValueInst(ExtractValueInst& I);
//the nasty ones
void visitGetElementPtrInst(User &GEP);
void visitCallInst(CallInst &CI);
void visitInvokeInst(InvokeInst &II);
void visitInstruction(Instruction &I);
bool visitIntrinsic(CallSite CS, Function* F);
void visitCallSite(CallSite CS);
void visitVAStart(CallSite CS);
void visitVAStartNode(DSNode* N);
public:
GraphBuilder(Function &f, DSGraph &g, LocalDataStructures& DSi)
: G(g), FB(&f), DS(&DSi), TD(g.getDataLayout()), VAArray(0) {
DEBUG(errs() << "[local] Building graph for function: "
<< f.getName() << "\n");
// Create scalar nodes for all pointer arguments...
for (auto &Arg : f.args()) {
if (isa<PointerType>(Arg.getType())) {
// WD: Why do we set the external marker so early in the analysis?
// Functions we have definitions for, but are externally reachable have no external contexts
// that we'd want to BU external information into (since those contexts are by definition
// ones we don't have code for). Shouldn't this just be set in TD?
#if 0
DSNode * Node = getValueDest(I).getNode();
if (!f.hasInternalLinkage() || !f.hasPrivateLinkage())
Node->setExternalMarker();
#else
getValueDest(&Arg).getNode();
#endif
}
}
// Create an entry for the return, which tracks which functions are in
// the graph
g.getOrCreateReturnNodeFor(f);
// Create a node to handle information about variable arguments
g.getOrCreateVANodeFor(f);
visit(f); // Single pass over the function
// If there are any constant globals referenced in this function, merge
// their initializers into the local graph from the globals graph.
// This resolves indirect calls in some common cases
// Only merge info for nodes that already exist in the local pass
// otherwise leaf functions could contain less collapsing than the globals
// graph
if (g.getScalarMap().global_begin() != g.getScalarMap().global_end()) {
ReachabilityCloner RC(&g, g.getGlobalsGraph(), 0);
for (DSScalarMap::global_iterator I = g.getScalarMap().global_begin(),
E = g.getScalarMap().global_end(); I != E; ++I) {
if (const GlobalVariable * GV = dyn_cast<GlobalVariable > (*I))
if (GV->isConstant())
RC.merge(g.getNodeForValue(GV), g.getGlobalsGraph()->getNodeForValue(GV));
}
}
g.markIncompleteNodes(DSGraph::MarkFormalArgs);
// Compute sources of external
unsigned EFlags = 0
| DSGraph::DontMarkFormalsExternal
| DSGraph::ProcessCallSites;
g.computeExternalFlags(EFlags);
g.computeIntPtrFlags();
// Remove any nodes made dead due to merging...
g.removeDeadNodes(DSGraph::KeepUnreachableGlobals);
}
// GraphBuilder ctor for working on the globals graph
explicit GraphBuilder(DSGraph& g, LocalDataStructures& DSi)
:G(g), FB(0), TD(g.getDataLayout()), VAArray(0)
{}
void mergeInGlobalInitializer(GlobalVariable *GV);
void mergeExternalGlobal(GlobalVariable* GV);
void mergeFunction(Function* F) { getValueDest(F); }
};
/// Traverse the whole DSGraph, and propagate the unknown flags through all
/// out edges.
static void propagateUnknownFlag(DSGraph * G) {
std::vector<DSNode *> workList;
DenseSet<DSNode *> visited;
for (DSGraph::node_iterator I = G->node_begin(), E = G->node_end(); I != E; ++I)
if (I->isUnknownNode())
workList.push_back(&*I);
while (!workList.empty()) {
DSNode * N = workList.back();
workList.pop_back();
if (visited.count(N) != 0) continue;
visited.insert(N);
N->setUnknownMarker();
for (DSNode::edge_iterator I = N->edge_begin(), E = N->edge_end(); I != E; ++I)
if (!I->second.isNull())
workList.push_back(I->second.getNode());
}
}
}
//===----------------------------------------------------------------------===//
// Helper method implementations...
//
///
/// getValueDest - Return the DSNode that the actual value points to.
///
DSNodeHandle GraphBuilder::getValueDest(Value* V) {
if (isa<Constant>(V) && cast<Constant>(V)->isNullValue())
return 0; // Null doesn't point to anything, don't add to ScalarMap!
DSNodeHandle &NH = G.getNodeForValue(V);
if (!NH.isNull())
return NH; // Already have a node? Just return it...
// Otherwise we need to create a new node to point to.
// Check first for constant expressions that must be traversed to
// extract the actual value.
DSNode* N;
if (Function * F = dyn_cast<Function > (V)) {
// Create a new global node for this function.
N = createNode();
N->addFunction(F);
if (F->isDeclaration())
N->setExternFuncMarker();
} else if (GlobalValue * GV = dyn_cast<GlobalValue > (V)) {
// Create a new global node for this global variable.
N = createNode();
N->addGlobal(GV);
if (GV->isDeclaration())
N->setExternGlobalMarker();
} else if (Constant *C = dyn_cast<Constant>(V)) {
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
if (CE->isCast()) {
if (isa<PointerType>(CE->getOperand(0)->getType()))
NH = getValueDest(CE->getOperand(0));
else
NH = createNode()->setUnknownMarker();
} else if (CE->getOpcode() == Instruction::GetElementPtr) {
visitGetElementPtrInst(*CE);
assert(G.hasNodeForValue(CE) && "GEP didn't get processed right?");
NH = G.getNodeForValue(CE);
} else {
// This returns a conservative unknown node for any unhandled ConstExpr
NH = createNode()->setUnknownMarker();
}
if (NH.isNull()) { // (getelementptr null, X) returns null
G.eraseNodeForValue(V);
return 0;
}
return NH;
} else if (isa<UndefValue>(C)) {
G.eraseNodeForValue(V);
return 0;
} else if (isa<GlobalAlias>(C)) {
// XXX: Need more investigation
// According to Andrew, DSA is broken on global aliasing, since it does
// not handle the aliases of parameters correctly. Here is only a quick
// fix for some special cases.
NH = getValueDest(cast<GlobalAlias>(C)->getAliasee());
return NH;
} else if (isa<BlockAddress>(C)) {
//
// FIXME: This may not be quite right; we should probably add a
// BlockAddress flag to the DSNode instead of using the unknown flag.
//
N = createNode();
N->setUnknownMarker();
} else if (isa<ConstantStruct>(C) || isa<ConstantArray>(C) ||
isa<ConstantDataSequential>(C) || isa<ConstantDataArray>(C) ||
isa<ConstantDataVector>(C)) {
// Treat these the same way we treat global initializers
N = createNode();
NH.mergeWith(N);
MergeConstantInitIntoNode(NH, C->getType(), C);
} else {
errs() << "Unknown constant: " << *C << "\n";
llvm_unreachable("Unknown constant type!");
}
N = createNode(); // just create a shadow node
} else {
// Otherwise just create a shadow node
N = createNode();
}
NH.setTo(N, 0); // Remember that we are pointing to it...
return NH;
}
/// getLink - This method is used to return the specified link in the
/// specified node if one exists. If a link does not already exist (it's
/// null), then we create a new node, link it, then return it. We must
/// specify the type of the Node field we are accessing so that we know what
/// type should be linked to if we need to create a new node.
///
DSNodeHandle &GraphBuilder::getLink(const DSNodeHandle &node, unsigned LinkNo) {
DSNodeHandle &Node = const_cast<DSNodeHandle&>(node);
DSNodeHandle &Link = Node.getLink(LinkNo);
if (Link.isNull()) {
// If the link hasn't been created yet, make and return a new shadow node
Link = createNode();
}
return Link;
}
/// setDestTo - Set the ScalarMap entry for the specified value to point to the
/// specified destination. If the Value already points to a node, make sure to
/// merge the two destinations together.
///
void GraphBuilder::setDestTo(Value &V, const DSNodeHandle &NH) {
G.getNodeForValue(&V).mergeWith(NH);
}
//===----------------------------------------------------------------------===//
// Specific instruction type handler implementations...
//
// PHINode - Make the scalar for the PHI node point to all of the things the
// incoming values point to... which effectively causes them to be merged.
//
void GraphBuilder::visitPHINode(PHINode &PN) {
DEBUG(errs() << "[local] visiting phi node: " << PN.getName() << "\n");
if (!isa<PointerType>(PN.getType())) return; // Only pointer PHIs
DSNodeHandle &PNDest = G.getNodeForValue(&PN);
for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
PNDest.mergeWith(getValueDest(PN.getIncomingValue(i)));
}
void GraphBuilder::visitSelectInst(SelectInst &SI) {
if (!isa<PointerType>(SI.getType()))
return; // Only pointer Selects
DSNodeHandle &Dest = G.getNodeForValue(&SI);
DSNodeHandle S1 = getValueDest(SI.getOperand(1));
DSNodeHandle S2 = getValueDest(SI.getOperand(2));
Dest.mergeWith(S1);
Dest.mergeWith(S2);
}
void GraphBuilder::visitLoadInst(LoadInst &LI) {
DEBUG(errs() << "[local] visiting load: " << LI << "\n");
//
// Create a DSNode for the pointer dereferenced by the load. If the DSNode
// is NULL, do nothing more (this can occur if the load is loading from a
// NULL pointer constant (bugpoint can generate such code).
//
DSNodeHandle Ptr = getValueDest(LI.getPointerOperand());
if (Ptr.isNull()) return; // Load from null
// Make that the node is read from...
Ptr.getNode()->setReadMarker();
// Ensure a typerecord exists...
Ptr.getNode()->growSizeForType(LI.getType(), Ptr.getOffset());
if (isa<PointerType>(LI.getType()))
setDestTo(LI, getLink(Ptr));
// check that it is the inserted value
if(TypeInferenceOptimize)
if(LI.hasOneUse())
if(StoreInst *SI = dyn_cast<StoreInst>(*(LI.use_begin())))
if(SI->getOperand(0) == &LI) {
++NumIgnoredInst;
return;
}
Ptr.getNode()->mergeTypeInfo(LI.getType(), Ptr.getOffset());
}
void GraphBuilder::visitStoreInst(StoreInst &SI) {
DEBUG(errs() << "[local] visiting store: " << SI << "\n");
Type *StoredTy = SI.getOperand(0)->getType();
DSNodeHandle Dest = getValueDest(SI.getOperand(1));
if (Dest.isNull()) return;
// Mark that the node is written to...
Dest.getNode()->setModifiedMarker();
// Ensure a type-record exists...
Dest.getNode()->growSizeForType(StoredTy, Dest.getOffset());
// Avoid adding edges from null, or processing non-"pointer" stores
if (isa<PointerType>(StoredTy))
Dest.addEdgeTo(getValueDest(SI.getOperand(0)));
if(TypeInferenceOptimize)
if(SI.getOperand(0)->hasOneUse())
if(isa<LoadInst>(SI.getOperand(0))){
++NumIgnoredInst;
return;
}
Dest.getNode()->mergeTypeInfo(StoredTy, Dest.getOffset());
}
void GraphBuilder::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
DEBUG(errs() << "[local] visiting atomic cmpxchg: " << I << "\n");
if (isa<PointerType>(I.getType())) {
visitInstruction (I);
return;
}
//
// Create a DSNode for the dereferenced pointer . If the DSNode is NULL, do
// nothing more (this can occur if the pointer is a NULL constant; bugpoint
// can generate such code).
//
DSNodeHandle Ptr = getValueDest(I.getPointerOperand());
if (Ptr.isNull()) return;
//
// Make that the memory object is read and written.
//
Ptr.getNode()->setReadMarker();
Ptr.getNode()->setModifiedMarker();
//
// If the result of the compare-and-swap is a pointer, then we need to do
// a few things:
// o Merge the compare and swap values (which are pointers) with the result
// o Merge the DSNode of the pointer *within* the memory object with the
// DSNode of the compare, swap, and result DSNode.
//
if (isa<PointerType>(I.getType())) {
//
// Get the DSNodeHandle of the memory object returned from the load. Make
// it the DSNodeHandle of the instruction's result.
//
DSNodeHandle FieldPtr = getLink (Ptr);
setDestTo(I, getLink(Ptr));
//
// Merge the result, compare, and swap values of the instruction.
//
FieldPtr.mergeWith (getValueDest (I.getCompareOperand()));
FieldPtr.mergeWith (getValueDest (I.getNewValOperand()));
}
//
// Modify the DSNode so that it has the loaded/written type at the
// appropriate offset.
//
Ptr.getNode()->growSizeForType(I.getType(), Ptr.getOffset());
Ptr.getNode()->mergeTypeInfo(I.getType(), Ptr.getOffset());
return;
}
void GraphBuilder::visitAtomicRMWInst(AtomicRMWInst &I) {
DEBUG(errs() << "[local] visiting atomic RMW: " << I << "\n");
//
// Create a DSNode for the dereferenced pointer . If the DSNode is NULL, do
// nothing more (this can occur if the pointer is a NULL constant; bugpoint
// can generate such code).
//
DSNodeHandle Ptr = getValueDest(I.getPointerOperand());
if (Ptr.isNull()) return;
//
// Make that the memory object is read and written.
//
Ptr.getNode()->setReadMarker();
Ptr.getNode()->setModifiedMarker();
//
// Modify the DSNode so that it has the loaded/written type at the
// appropriate offset.
//
Ptr.getNode()->growSizeForType(I.getType(), Ptr.getOffset());
Ptr.getNode()->mergeTypeInfo(I.getType(), Ptr.getOffset());
return;
}
void GraphBuilder::visitReturnInst(ReturnInst &RI) {
DEBUG(errs() << "[local] visiting return: " << RI << "\n");
if (RI.getNumOperands() && isa<PointerType>(RI.getOperand(0)->getType()))
G.getOrCreateReturnNodeFor(*FB).mergeWith(getValueDest(RI.getOperand(0)));
}
void GraphBuilder::visitVAArgInst(VAArgInst &I) {
DEBUG(errs() << "[local] visiting vaarg: " << I << "\n");
Module *M = FB->getParent();
Triple TargetTriple(M->getTargetTriple());
Triple::ArchType Arch = TargetTriple.getArch();
switch(Arch) {
case Triple::x86_64: {
// On x86_64, we have va_list as a struct {i32, i32, i8*, i8* }
// The first i8* is where arguments generally go, but the second i8* can
// be used also to pass arguments by register.
// We model this by having both the i8*'s point to an array of pointers
// to the arguments.
DSNodeHandle Ptr = G.getVANodeFor(*FB);
DSNodeHandle Dest = getValueDest(&I);
if (Ptr.isNull()) return;
// Make that the node is read and written
Ptr.getNode()->setReadMarker()->setModifiedMarker();
// Not updating type info, as it is already a collapsed node
if (isa<PointerType>(I.getType()))
Dest.mergeWith(Ptr);
return;
}
default: {
llvm_unreachable("What frontend generates this?");
DSNodeHandle Ptr = getValueDest(I.getOperand(0));
//FIXME: also updates the argument
if (Ptr.isNull()) return;
// Make that the node is read and written
Ptr.getNode()->setReadMarker()->setModifiedMarker();
// Ensure a type record exists.
DSNode *PtrN = Ptr.getNode();
PtrN->mergeTypeInfo(I.getType(), Ptr.getOffset());
if (isa<PointerType>(I.getType()))
setDestTo(I, getLink(Ptr));
}
}
}
void GraphBuilder::visitIntToPtrInst(IntToPtrInst &I) {
DEBUG(errs() << "[local] visiting inttoptr: " << I << "\n");
DSNode *N = createNode();
if(I.hasOneUse()) {
if(isa<ICmpInst>(*(I.use_begin()))) {
NumBoringIntToPtr++;
return;
}
}
N->setIntToPtrMarker();
N->setUnknownMarker();
setDestTo(I, N);
}
void GraphBuilder::visitPtrToIntInst(PtrToIntInst& I) {
DEBUG(errs() << "[local] visiting ptrtoint: " << I << "\n");
DSNode* N = getValueDest(I.getOperand(0)).getNode();
if(I.hasOneUse()) {
if(isa<ICmpInst>(*(I.use_begin()))) {
NumBoringIntToPtr++;
return;
}
}
if(I.hasOneUse()) {
Value *V = dyn_cast<Value>(*(I.use_begin()));
DenseSet<Value *> Seen;
while(V && V->hasOneUse() &&
Seen.insert(V).second) {
if(isa<LoadInst>(V))
break;
if(isa<StoreInst>(V))
break;
if(isa<CallInst>(V))
break;
V = dyn_cast<Value>(*(V->use_begin()));
}
if(isa<BranchInst>(V)){
NumBoringIntToPtr++;
return;
}
}
if(N)
N->setPtrToIntMarker();
}
void GraphBuilder::visitBitCastInst(BitCastInst &I) {
DEBUG(errs() << "[local] visiting bitcast: " << I << "\n");
if (!isa<PointerType>(I.getType())) return; // Only pointers
DSNodeHandle Ptr = getValueDest(I.getOperand(0));
if (Ptr.isNull()) return;
setDestTo(I, Ptr);
}
void GraphBuilder::visitCmpInst(CmpInst &I) {
DEBUG(errs() << "[local] visiting compare: " << I << "\n");
//Address can escape through cmps
}
unsigned getValueOffset(Type *Ty, ArrayRef<unsigned> Idxs,
const DataLayout &TD) {
unsigned Offset = 0;
for (ArrayRef<unsigned>::iterator I = Idxs.begin(), E = Idxs.end(); I != E;
++I) {
// Lifted from DataLayout.cpp's getIndexedOffset.
// We can't use that because it insists on only allowing pointer types.
if (StructType *STy = dyn_cast<StructType>(Ty)) {
unsigned FieldNo = *I;
// Get structure layout information...
const StructLayout *Layout = TD.getStructLayout(STy);
// Add in the offset, as calculated by the structure layout info...
Offset += Layout->getElementOffset(FieldNo);
// Update Ty to refer to current element
Ty = STy->getElementType(FieldNo);
} else {
// Update Ty to refer to current element
Ty = cast<SequentialType>(Ty)->getElementType();
// Get the array index and the size of each array element.
int64_t arrayIdx = *I;
Offset += (uint64_t)arrayIdx * TD.getTypeAllocSize(Ty);
}
}
return Offset;
}
void GraphBuilder::visitInsertValueInst(InsertValueInst& I) {
DEBUG(errs() << "[local] visiting insertvalue: " << I << "\n");
setDestTo(I, createNode()->setAllocaMarker());
Type *StoredTy = I.getInsertedValueOperand()->getType();
DSNodeHandle Dest = getValueDest(&I);
Dest.mergeWith(getValueDest(I.getAggregateOperand()));
// Mark that the node is written to...
Dest.getNode()->setModifiedMarker();
Type* STy = I.getAggregateOperand()->getType();
unsigned Offset = getValueOffset(STy, I.getIndices(), TD);
// Ensure a type-record exists...
Dest.getNode()->mergeTypeInfo(StoredTy, Offset);
// Avoid adding edges from null, or processing non-"pointer" stores
if (isa<PointerType>(StoredTy))
Dest.addEdgeTo(getValueDest(I.getInsertedValueOperand()));
}
void GraphBuilder::visitExtractValueInst(ExtractValueInst& I) {
DEBUG(errs() << "[local] visiting extractvalue: " << I << "\n");
DSNodeHandle Ptr = getValueDest(I.getAggregateOperand());
// Make that the node is read from...
Ptr.getNode()->setReadMarker();
Type* STy = I.getAggregateOperand()->getType();
unsigned Offset = getValueOffset(STy, I.getIndices(), TD);
// Ensure a typerecord exists...
Ptr.getNode()->mergeTypeInfo(I.getType(), Offset);
if (isa<PointerType>(I.getType()))
setDestTo(I, getLink(Ptr));
}
void GraphBuilder::visitGetElementPtrInst(User &GEP) {
DEBUG(errs() << "[local] visiting GEP: " << GEP << "\n");
//
// Ensure that the indexed pointer has a DSNode.
//
DSNodeHandle NodeH = getValueDest(GEP.getOperand(0));
if (NodeH.isNull())
NodeH = createNode();
//
// There are a few quick and easy cases to handle. If the DSNode of the
// indexed pointer is already folded, then we know that the result of the
// GEP will have the same offset into the same DSNode
// as the indexed pointer.
//
if (!NodeH.isNull() &&
NodeH.getNode()->isNodeCompletelyFolded()) {
setDestTo(GEP, NodeH);
return;
}
//
// Okay, no easy way out. Calculate the offset into the object being
// indexed.
//
int Offset = 0;
// FIXME: I am not sure if the code below is completely correct (especially
// if we start doing fancy analysis on non-constant array indices).
// What if the array is indexed using a larger index than its declared
// size? Does the LLVM verifier catch such issues?
//
//
// Determine the offset (in bytes) between the result of the GEP and the
// GEP's pointer operand.
//
// Note: All of these subscripts are indexing INTO the elements we have...
//
// FIXME: We can do better for array indexing. First, if the array index is
// constant, we can determine how much farther we're moving the
// pointer. Second, we can try to use the results of other analysis
// passes (e.g., ScalarEvolution) to find min/max values to do less
// conservative type-folding.
//
for (gep_type_iterator I = gep_type_begin(GEP), E = gep_type_end(GEP);
I != E; ++I)
if (StructType *STy = dyn_cast<StructType>(*I)) {
// indexing into a structure
// next index must be a constant
const ConstantInt* CUI = cast<ConstantInt>(I.getOperand());
int FieldNo = CUI->getSExtValue();
// increment the offset by the actual byte offset being accessed
unsigned requiredSize = TD.getTypeAllocSize(STy) + NodeH.getOffset() + Offset;
//
// Grow the DSNode size as needed.
//
if (!NodeH.getNode()->isArrayNode() || NodeH.getNode()->getSize() <= 0){
if (requiredSize > NodeH.getNode()->getSize())
NodeH.getNode()->growSize(requiredSize);
}
Offset += (unsigned)TD.getStructLayout(STy)->getElementOffset(FieldNo);
if (TypeInferenceOptimize) {
if (ArrayType* AT = dyn_cast<ArrayType>(STy->getTypeAtIndex(FieldNo))) {
NodeH.getNode()->mergeTypeInfo(AT, NodeH.getOffset() + Offset);
if ((++I) == E) {
break;
}
// Check if we are still indexing into an array.
// We only record the topmost array type of any nested array.
// Keep skipping indexes till we reach a non-array type.
// J is the type of the next index.
// Uncomment the line below to get all the nested types.
gep_type_iterator J = I;
while (isa<ArrayType>(*(++J))) {
// NodeH.getNode()->mergeTypeInfo(AT1, NodeH.getOffset() + Offset);
if((++I) == E) {
break;
}
J = I;
}
if ((I) == E) {
break;
}
}
}
} else if (ArrayType *ATy = dyn_cast<ArrayType>(*I)) {
// indexing into an array.
NodeH.getNode()->setArrayMarker();
Type *CurTy = ATy->getElementType();
//
// Ensure that the DSNode's size is large enough to contain one
// element of the type to which the pointer points.
//
if (!isa<ArrayType>(CurTy) && NodeH.getNode()->getSize() <= 0) {
NodeH.getNode()->growSize(TD.getTypeAllocSize(CurTy));
} else if(isa<ArrayType>(CurTy) && NodeH.getNode()->getSize() <= 0){
Type *ETy = (cast<ArrayType>(CurTy))->getElementType();
while(isa<ArrayType>(ETy)) {
ETy = (cast<ArrayType>(ETy))->getElementType();
}
NodeH.getNode()->growSize(TD.getTypeAllocSize(ETy));
}
// Find if the DSNode belongs to the array
// If not fold.
if((NodeH.getOffset() || Offset != 0)
|| (!isa<ArrayType>(CurTy)
&& (NodeH.getNode()->getSize() != TD.getTypeAllocSize(CurTy)))) {
M.witness(NodeH, {&GEP, I.getOperand()},
"node does not belong to array"
);
DEBUG(
errs() << "[local] FOLDING FOR ARRAY ACCESS" << "\n";
errs() << "[local] type: " << *CurTy << "\n";
errs() << "[local] offset: " << Offset
<< " (" << NodeH.getOffset() << ")\n";
errs() << "[local] size: " << TD.getTypeAllocSize(CurTy)
<< " (" << NodeH.getNode()->getSize() << ")\n";
errs() << "[local] value: " << GEP << "\n";
errs() << "[local] index: " << *I.getOperand() << "\n";
);
NodeH.getNode()->foldNodeCompletely();
NodeH.getNode();
Offset = 0;
break;
}
} else if (const PointerType *PtrTy = dyn_cast<PointerType>(*I)) {
// Get the type pointed to by the pointer
Type *CurTy = PtrTy->getElementType();
//
// Some LLVM transforms lower structure indexing into byte-level
// indexing. Try to recognize forms of that here.
//
Type * Int8Type = Type::getInt8Ty(CurTy->getContext());
ConstantInt * IS = dyn_cast<ConstantInt>(I.getOperand());
if (IS &&
(NodeH.getOffset() == 0) &&
(!(NodeH.getNode()->isArrayNode())) &&
(CurTy == Int8Type)) {
// Calculate the offset of the field
Offset += IS->getSExtValue() * TD.getTypeAllocSize (Int8Type);
//
// Grow the DSNode size as needed.
//
unsigned requiredSize = Offset + TD.getTypeAllocSize (Int8Type);
if (NodeH.getNode()->getSize() <= requiredSize){
NodeH.getNode()->growSize (requiredSize);
}
// Add in the offset calculated...
NodeH.setOffset(NodeH.getOffset()+Offset);
// Check the offset
DSNode *N = NodeH.getNode();
if (N) N->checkOffsetFoldIfNeeded(NodeH.getOffset());
// NodeH is now the pointer we want to GEP to be...
setDestTo(GEP, NodeH);
return;
}
//
// Unless we're advancing the pointer by zero bytes via array indexing,
// fold the node (i.e., mark it type-unknown) and indicate that we're
// indexing zero bytes into the object (because all fields are aliased).
//
// Note that we break out of the loop if we fold the node. Once
// something is folded, all values within it are considered to alias.
//
if (!isa<Constant>(I.getOperand()) ||
!cast<Constant>(I.getOperand())->isNullValue()) {
//
// Treat the memory object (DSNode) as an array.
//
NodeH.getNode()->setArrayMarker();
//
// Ensure that the DSNode's size is large enough to contain one
// element of the type to which the pointer points.
//
if (!isa<ArrayType>(CurTy) && NodeH.getNode()->getSize() <= 0){
NodeH.getNode()->growSize(TD.getTypeAllocSize(CurTy));
} else if (isa<ArrayType>(CurTy) && NodeH.getNode()->getSize() <= 0){
Type *ETy = (cast<ArrayType>(CurTy))->getElementType();
while (isa<ArrayType>(ETy)) {
ETy = (cast<ArrayType>(ETy))->getElementType();
}
NodeH.getNode()->growSize(TD.getTypeAllocSize(ETy));
}
//
// Fold the DSNode if we're indexing into it in a type-incompatible
// manner. That can occur if:
// 1) The DSNode represents a pointer into the object at a non-zero
// offset.
// 2) The offset of the pointer is already non-zero.
// 3) The size of the array element does not match the size into which
// the pointer indexing is indexing.
//
if (NodeH.getOffset() || Offset != 0 ||
(!isa<ArrayType>(CurTy) &&
(NodeH.getNode()->getSize() != TD.getTypeAllocSize(CurTy)))) {
M.witness(NodeH, {&GEP, I.getOperand()},
"type-incompatible access into node"
);
DEBUG(
errs() << "[local] FOLDING FOR POINTER ACCESS" << "\n";
errs() << "[local] type: " << *CurTy << "\n";
errs() << "[local] offset: " << Offset
<< " (" << NodeH.getOffset() << ")\n";
errs() << "[local] size: " << TD.getTypeAllocSize(CurTy)
<< " (" << NodeH.getNode()->getSize() << ")\n";
errs() << "[local] value: " << GEP << "\n";
errs() << "[local] index: " << *I.getOperand() << "\n";
);
NodeH.getNode()->foldNodeCompletely();
NodeH.getNode();
Offset = 0;
break;
}
}
}
// Add in the offset calculated...
NodeH.setOffset(NodeH.getOffset()+Offset);
// Check the offset
DSNode *N = NodeH.getNode();
if (N) N->checkOffsetFoldIfNeeded(NodeH.getOffset());
// NodeH is now the pointer we want to GEP to be...
setDestTo(GEP, NodeH);
}
void GraphBuilder::visitCallInst(CallInst &CI) {
DEBUG(errs() << "[local] visiting call: " << CI << "\n");
visitCallSite(&CI);
}
void GraphBuilder::visitInvokeInst(InvokeInst &II) {
DEBUG(errs() << "[local] visiting invoke: " << II << "\n");
visitCallSite(&II);
}
void GraphBuilder::visitVAStart(CallSite CS) {
DEBUG(errs() << "[local] visiting VA start: "
<< CS.getCalledValue()->getName() << "\n");
// Build out DSNodes for the va_list depending on the target arch
// And assosiate the right node with the VANode for this function
// so it can be merged with the right arguments from callsites
DSNodeHandle RetNH = getValueDest(CS.getArgument(0));
if (DSNode *N = RetNH.getNode())
visitVAStartNode(N);
}
void GraphBuilder::visitVAStartNode(DSNode* N) {
assert(N && "Null node as argument");
assert(FB && "No function for this graph?");
Module *M = FB->getParent();
assert(M && "No module for function");
Triple TargetTriple(M->getTargetTriple());
Triple::ArchType Arch = TargetTriple.getArch();
// Fetch the VANode associated with the func containing the call to va_start
DSNodeHandle & VANH = G.getVANodeFor(*FB);
// Make sure this NodeHandle has a node to go with it
if (VANH.isNull()) VANH.mergeWith(createNode());
// Create a dsnode for an array of pointers to the VAInfo for this func
// We create one such array for each function analyzed, as all
// calls to va_start will populate their argument with the same data.
if (!VAArray) VAArray = createNode();
VAArray->setArrayMarker();
VAArray->foldNodeCompletely();
VAArray->setLink(0,VANH);
//VAStart modifies its argument
N->setModifiedMarker();
// For the architectures we support, build dsnodes that match
// how we know va_list is used.
switch (Arch) {
case Triple::x86:
// On x86, we have:
// va_list as a pointer to an array of pointers to the variable arguments
if (N->getSize() < 1)
N->growSize(1);
N->setLink(0, VAArray);
break;
case Triple::x86_64:
// On x86_64, we have va_list as a struct {i32, i32, i8*, i8* }
// The first i8* is where arguments generally go, but the second i8* can
// be used also to pass arguments by register.
// We model this by having both the i8*'s point to an array of pointers
// to the arguments.
if (N->getSize() < 24)
N->growSize(24); //sizeof the va_list struct mentioned above
N->setLink(8,VAArray); //first i8*
N->setLink(16,VAArray); //second i8*
break;
default:
// FIXME: For now we abort if we don't know how to handle this arch
// Either add support for other architectures, or at least mark the
// nodes unknown/incomplete or whichever results in the correct
// conservative behavior in the general case
llvm_unreachable("VAstart not supported on this architecture!");
//XXX: This might be good enough in those cases that we don't know
//what the arch does
N->setIncompleteMarker()->setUnknownMarker()->foldNodeCompletely();
}
// XXX: We used to set the alloca marker for the DSNode passed to va_start.
// Seems to me that you could allocate the va_list on the heap, so ignoring
// for now.
N->setModifiedMarker()->setVAStartMarker();
}
///
/// Method: visitIntrinsic()
///
/// Description:
/// Generate correct DSNodes for calls to LLVM intrinsic functions.
///
/// Inputs:
/// CS - The CallSite representing the call or invoke to the intrinsic.
/// F - A pointer to the function called by the call site.
///
/// Return value:
/// true - This intrinsic is properly handled by this method.
/// false - This intrinsic is not recognized by DSA.
///
bool GraphBuilder::visitIntrinsic(CallSite CS, Function *F) {
DEBUG(errs() << "[local] visiting intrinsic: " << F->getName() << "\n");
++NumIntrinsicCall;
//
// If this is a debug intrinsic, then don't do any special processing.
//
if (isa<DbgInfoIntrinsic>(CS.getInstruction()))
return true;
switch (F->getIntrinsicID()) {
case Intrinsic::vastart: {
visitVAStart(CS);
return true;
}
case Intrinsic::vacopy: {
// Simply merge the two arguments to va_copy.
// This results in loss of precision on the temporaries used to manipulate
// the va_list, and so isn't a big deal. In theory we would build a
// separate graph for this (like the one created in visitVAStartNode)
// and only merge the node containing the variable arguments themselves.
DSNodeHandle destNH = getValueDest(CS.getArgument(0));
DSNodeHandle srcNH = getValueDest(CS.getArgument(1));
destNH.mergeWith(srcNH);
return true;
}
case Intrinsic::stacksave: {
DSNode * Node = createNode();
Node->setAllocaMarker()->setIncompleteMarker()->setUnknownMarker();
Node->foldNodeCompletely();
setDestTo (*(CS.getInstruction()), Node);
return true;
}
case Intrinsic::stackrestore:
getValueDest(CS.getInstruction()).getNode()->setAllocaMarker()
->setIncompleteMarker()
->setUnknownMarker()
->foldNodeCompletely();
return true;
case Intrinsic::vaend:
// TODO: What to do here?
return true;
case Intrinsic::memcpy:
case Intrinsic::memmove: {
// Merge the first & second arguments, and mark the memory read and
// modified.
DSNodeHandle RetNH = getValueDest(CS.getArgument(0));
RetNH.mergeWith(getValueDest(CS.getArgument(1)));
if (DSNode *N = RetNH.getNode())
N->setModifiedMarker()->setReadMarker();
return true;
}
case Intrinsic::memset:
// Mark the memory modified.
if (DSNode *N = getValueDest(CS.getArgument(0)).getNode())
N->setModifiedMarker();
return true;
// TODO: Add support for the new EH system
#if 0
case Intrinsic::eh_exception: {
DSNode * Node = createNode();
Node->setIncompleteMarker();
Node->foldNodeCompletely();
setDestTo (*(CS.getInstruction()), Node);
return true;
}
case Intrinsic::eh_selector: {
for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
I != E; ++I) {
if (isa<PointerType>((*I)->getType())) {
DSNodeHandle Ptr = getValueDest(*I);
if(Ptr.getNode()) {
Ptr.getNode()->setReadMarker();
Ptr.getNode()->setIncompleteMarker();
}
}
}
return true;
}
#endif
case Intrinsic::eh_typeid_for: {
DSNodeHandle Ptr = getValueDest(CS.getArgument(0));
Ptr.getNode()->setReadMarker();
Ptr.getNode()->setIncompleteMarker();
return true;
}
case Intrinsic::prefetch:
return true;
case Intrinsic::objectsize:
return true;
//
// The return address/frame address aliases with the stack,
// is type-unknown, and should
// have the unknown flag set since we don't know where it goes.
//
case Intrinsic::returnaddress:
case Intrinsic::frameaddress: {
DSNode * Node = createNode();
Node->setAllocaMarker()->setIncompleteMarker()->setUnknownMarker();
Node->foldNodeCompletely();
setDestTo (*(CS.getInstruction()), Node);
return true;
}
// Process lifetime intrinsics
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
case Intrinsic::invariant_start:
case Intrinsic::invariant_end:
return true;
default: {
//ignore pointer free intrinsics
if (!isa<PointerType>(F->getReturnType())) {
bool hasPtr = false;
for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
I != E && !hasPtr; ++I)
if (isa<PointerType>(I->getType()))
hasPtr = true;
if (!hasPtr)
return true;
}
DEBUG(errs() << "[dsa:local] Unhandled intrinsic: " << F->getName() << "\n");
llvm_unreachable("Unhandled intrinsic");
return false;
}
}
}
void GraphBuilder::visitCallSite(CallSite CS) {
//
// Get the called value. Strip off any casts which are lossless.
//
Value *Callee = CS.getCalledValue()->stripPointerCasts();
// Special case handling of certain libc allocation functions here.
if (Function *F = dyn_cast<Function>(Callee))
if (F->isIntrinsic() && visitIntrinsic(CS, F))
return;
//Can't do much about inline asm (yet!)
if (isa<InlineAsm> (Callee)) {
++NumAsmCall;
DSNodeHandle RetVal;
Instruction *I = CS.getInstruction();
if (isa<PointerType > (I->getType()))
RetVal = getValueDest(I);
// Calculate the arguments vector...
for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); I != E; ++I)
if (isa<PointerType > ((*I)->getType()))
RetVal.mergeWith(getValueDest(*I));
if (!RetVal.isNull()) {
M.witness(RetVal, {I},
"inline asm call"
);
RetVal.getNode()->foldNodeCompletely();
}
return;
}
// Set up the return value...
DSNodeHandle RetVal;
Instruction *I = CS.getInstruction();
if (isa<PointerType>(I->getType()))
RetVal = getValueDest(I);
DSNode *CalleeNode = 0;
if (!isa<Function>(Callee)) {
CalleeNode = getValueDest(Callee).getNode();
if (CalleeNode == 0) {
DEBUG(errs() << "WARNING: Program is calling through a null pointer?\n" << *I);
return; // Calling a null pointer?
}
}
// NOTE: This code is identical to 'DSGraph::getDSCallSiteForCallSite',
// the reason it's duplicated is because this calls getValueDest instead
// of getNodeForValue to get the DSNodes for the arguments. Since we're in
// local it's possible that we need to create a DSNode for the argument, as
// opposed to getNodeForValue which simply retrieves the existing node.
//Get the FunctionType for the called function
const FunctionType *CalleeFuncType = DSCallSite::FunctionTypeOfCallSite(CS);
int NumFixedArgs = CalleeFuncType->getNumParams();
// Sanity check--this really, really shouldn't happen
if (!CalleeFuncType->isVarArg())
assert(CS.arg_size() == static_cast<unsigned>(NumFixedArgs) &&
"Too many arguments/incorrect function signature!");
std::vector<DSNodeHandle> Args;
Args.reserve(CS.arg_size());
DSNodeHandle VarArgNH;
// Calculate the arguments vector...
// Add all fixed pointer arguments, then merge the rest together
for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
I != E; ++I)
if (isa<PointerType>((*I)->getType())) {
DSNodeHandle ArgNode = getValueDest(*I);
if (I - CS.arg_begin() < NumFixedArgs) {
Args.push_back(ArgNode);
} else {
VarArgNH.mergeWith(ArgNode);
}
}
// Add a new function call entry...
if (CalleeNode) {
++NumIndirectCall;
G.getFunctionCalls().push_back(DSCallSite(CS, RetVal, VarArgNH, CalleeNode,
Args));
} else {
++NumDirectCall;
G.getFunctionCalls().push_back(DSCallSite(CS, RetVal, VarArgNH,
cast<Function>(Callee),
Args));
}
}
// visitInstruction - For all other instruction types, if we have any arguments
// that are of pointer type, make them have unknown composition bits, and merge
// the nodes together.
void GraphBuilder::visitInstruction(Instruction &Inst) {
DEBUG(errs() << "[local] visiting instruction: " << Inst << "\n");
DSNodeHandle CurNode;
if (isa<PointerType>(Inst.getType()))
CurNode = getValueDest(&Inst);
for (User::op_iterator I = Inst.op_begin(), E = Inst.op_end(); I != E; ++I)
if (isa<PointerType>((*I)->getType()))
CurNode.mergeWith(getValueDest(*I));
if (DSNode *N = CurNode.getNode())
N->setUnknownMarker();
}
//===----------------------------------------------------------------------===//
// LocalDataStructures Implementation
//===----------------------------------------------------------------------===//
//
// Function: MergeConstantInitIntoNode()
//
// Description:
// Merge the specified constant into the specified DSNode.
//
void
GraphBuilder::MergeConstantInitIntoNode(DSNodeHandle &NH,
Type* Ty,
Constant *C) {
//
// Ensure a type-record exists...
//
DSNode *NHN = NH.getNode();
//NHN->mergeTypeInfo(Ty, NH.getOffset());
//
// If we've found something of pointer type, create or find its DSNode and
// make a link from the specified DSNode to the new DSNode describing the
// pointer we've just found.
//
if (isa<PointerType>(Ty)) {
NHN->mergeTypeInfo(Ty, NH.getOffset());
NH.addEdgeTo(getValueDest(C));
return;
}
//
// If the type of the object (array element, structure field, etc.) is an
// integer or floating point type, then just ignore it. It has no DSNode.
//
if (Ty->isIntOrIntVectorTy() || Ty->isFPOrFPVectorTy()) return;
//
// Handle aggregate constants.
//
if (ConstantArray *CA = dyn_cast<ConstantArray>(C)) {
//
// For an array, we don't worry about different elements pointing to
// different objects; we essentially pretend that all array elements alias.
//
Type * ElementType = cast<ArrayType>(Ty)->getElementType();
for (unsigned i = 0, e = CA->getNumOperands(); i != e; ++i) {
Constant * ConstElement = cast<Constant>(CA->getOperand(i));
MergeConstantInitIntoNode(NH, ElementType, ConstElement);
}
} else if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) {
//
// For a structure, we need to merge each element of the constant structure
// into the specified DSNode. However, we must also handle structures that
// end with a zero-length array ([0 x sbyte]); this is a common C idiom
// that continues to plague the world.
//
//NHN->mergeTypeInfo(Ty, NH.getOffset());
const StructLayout *SL = TD.getStructLayout(cast<StructType>(Ty));
for (unsigned i = 0, e = CS->getNumOperands(); i != e; ++i) {
DSNode *NHN = NH.getNode();
if (SL->getElementOffset(i) < SL->getSizeInBytes()) {
//
// Get the type and constant value of this particular element of the
// constant structure.
//
Type * ElementType = cast<StructType>(Ty)->getElementType(i);
Constant * ConstElement = cast<Constant>(CS->getOperand(i));
//
// Get the offset (in bytes) into the memory object that we're
// analyzing.
//
unsigned offset = NH.getOffset()+(unsigned)SL->getElementOffset(i);
NHN->mergeTypeInfo(ElementType, offset);
//
// Create a new DSNodeHandle. This DSNodeHandle will point to the same
// DSNode as the one we're constructing for our caller; however, it
// will point into a different offset into that DSNode.
//
DSNodeHandle NewNH (NHN, offset);
assert ((NHN->isNodeCompletelyFolded() || (NewNH.getOffset() == offset))
&& "Need to resize DSNode!");
//
// Recursively merge in this element of the constant struture into the
// DSNode.
//
MergeConstantInitIntoNode(NewNH, ElementType, ConstElement);
} else if (SL->getElementOffset(i) == SL->getSizeInBytes()) {
//
// If this is one of those cute structures that ends with a zero-length
// array, just fold the DSNode now and get it over with.
//
DEBUG(errs() << "Zero size element at end of struct\n" );
M.witness(NHN, {CS},
"zero size element at end of struct"
);
NHN->foldNodeCompletely();
} else {
llvm_unreachable("type was smaller than offsets of struct layout indicate");
}
}
} else if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) {
//
// Undefined values and NULL pointers have no DSNodes, so they do nothing.
//
} else if (isa<ConstantDataSequential>(C)) {
//
// ConstantDataSequential's are arrays of integers or floats, so they
// have no DSNodes. Nothing to do here.
//
} else {
llvm_unreachable("Unknown constant type!");
}
}
void GraphBuilder::mergeInGlobalInitializer(GlobalVariable *GV) {
// Ensure that the global variable is not external
assert(!GV->isDeclaration() && "Cannot merge in external global!");
//
// Get a node handle to the global node and merge the initializer into it.
//
DSNodeHandle NH = getValueDest(GV);
//
// Ensure that the DSNode is large enough to hold the new constant that we'll
// be adding to it.
//
Type * ElementType = GV->getType()->getElementType();
while(ArrayType *ATy = dyn_cast<ArrayType>(ElementType)) {
ElementType = ATy->getElementType();
}
if(!NH.getNode()->isNodeCompletelyFolded()) {
unsigned requiredSize = TD.getTypeAllocSize(ElementType) + NH.getOffset();
if (NH.getNode()->getSize() < requiredSize){
NH.getNode()->growSize (requiredSize);
}
}
//
// Do the actual merging in of the constant initializer.
//
MergeConstantInitIntoNode(NH, GV->getType()->getElementType(), GV->getInitializer());
}
void GraphBuilder::mergeExternalGlobal(GlobalVariable *GV) {
// Get a node handle to the global node and merge the initializer into it.
DSNodeHandle NH = getValueDest(GV);
}
// some evil programs use sections as linker generated arrays
// read a description of this behavior in and apply it
// format: numglobals section globals...
// terminates when numglobals == 0
void handleMagicSections(DSGraph* GlobalsGraph, Module& M) {
std::ifstream msf(hasMagicSections.c_str(), std::ifstream::in);
if (msf.good()) {
//no checking happens here
unsigned count = 0;
msf >> count;
while (count) {
std::string section;
msf >> section;
svset<Value*> inSection;
for (Function &F : M)
if (F.hasSection() && F.getSection() == section)
inSection.insert(&F);
for (GlobalVariable &GV : M.globals())
if (GV.hasSection() && GV.getSection() == section)
inSection.insert(&GV);
for (unsigned x = 0; x < count; ++x) {
std::string global;
msf >> global;
Value* V = M.getNamedValue(global);
if (V) {
DSNodeHandle& DHV = GlobalsGraph->getNodeForValue(V);
for (svset<Value*>::iterator SI = inSection.begin(),
SE = inSection.end(); SI != SE; ++SI) {
DEBUG(errs() << "Merging " << V->getName().str() << " with "
<< (*SI)->getName().str() << "\n");
GlobalsGraph->getNodeForValue(*SI).mergeWith(DHV);
}
}
}
msf >> count;
}
} else {
errs() << "Failed to open magic sections file:" << hasMagicSections <<
"\n";
}
}
char LocalDataStructures::ID;
bool LocalDataStructures::runOnModule(Module &M) {
init(&M.getDataLayout());
addrAnalysis = &getAnalysis<AddressTakenAnalysis>();
// First step, build the globals graph.
{
GraphBuilder GGB(*GlobalsGraph, *this);
// Add initializers for all of the globals to the globals graph.
for (GlobalVariable &GV : M.globals())
if (!(GV.hasSection() && std::string(GV.getSection()) == "llvm.metadata")) {
if (GV.isDeclaration())
GGB.mergeExternalGlobal(&GV);
else
GGB.mergeInGlobalInitializer(&GV);
}
// Add Functions to the globals graph.
for (Function &F : M) {
if(addrAnalysis->hasAddressTaken(&F)) {
GGB.mergeFunction(&F);
}
}
}
if (hasMagicSections.size())
handleMagicSections(GlobalsGraph, M);
// Next step, iterate through the nodes in the globals graph, unioning
// together the globals into equivalence classes.
formGlobalECs();
// Iterate through the address taken functions in the globals graph,
// collecting them in a list, to be used as target for call sites that
// cant be resolved.
formGlobalFunctionList();
GlobalsGraph->maskIncompleteMarkers();
// Calculate all of the graphs...
for (Function &F : M)
if (!F.isDeclaration()) {
DSGraph* G = new DSGraph(GlobalECs, getDataLayout(), *TypeSS, GlobalsGraph);
setDSGraph(F, G);
GraphBuilder GGB(F, *G, *this);
G->getAuxFunctionCalls() = G->getFunctionCalls();
propagateUnknownFlag(G);
callgraph.insureEntry(&F);
G->buildCallGraph(callgraph, GlobalFunctionList, true);
G->maskIncompleteMarkers();
G->markIncompleteNodes(DSGraph::MarkFormalArgs
|DSGraph::IgnoreGlobals);
cloneIntoGlobals(G, DSGraph::DontCloneCallNodes |
DSGraph::DontCloneAuxCallNodes |
DSGraph::StripAllocaBit);
formGlobalECs();
DEBUG(G->AssertGraphOK());
}
//GlobalsGraph->removeTriviallyDeadNodes();
GlobalsGraph->markIncompleteNodes(DSGraph::MarkFormalArgs
|DSGraph::IgnoreGlobals);
GlobalsGraph->computeExternalFlags(DSGraph::ProcessCallSites);
// Now that we've computed all of the graphs, and merged all of the info into
// the globals graph, see if we have further constrained the globals in the
// program if so, update GlobalECs and remove the extraneous globals from the
// program.
formGlobalECs();
propagateUnknownFlag(GlobalsGraph);
for (Function &F : M)
if (!F.isDeclaration()) {
DSGraph *Graph = getOrCreateGraph(&F);
Graph->maskIncompleteMarkers();
cloneGlobalsInto(Graph, DSGraph::DontCloneCallNodes |
DSGraph::DontCloneAuxCallNodes);
Graph->markIncompleteNodes(DSGraph::MarkFormalArgs
|DSGraph::IgnoreGlobals);
}
DEBUG(print(errs(), &M));
return false;
}
|
/* \file
* See header file for a description of this class.
*
* \author N. Amapane - INFN Torino
*/
#include "bSlab.h"
#include "MagneticField/VolumeGeometry/interface/MagVolume6Faces.h"
#include "MagneticField/Layers/interface/MagBSlab.h"
#include "MagneticField/Layers/interface/MagVerbosity.h"
#include "Utilities/General/interface/precomputed_value_sort.h"
#include <iostream>
using namespace SurfaceOrientation;
using namespace std;
using namespace magneticfield;
bSlab::bSlab(handles::const_iterator begin, handles::const_iterator end, bool debugVal)
: volumes(begin, end), mslab(nullptr), debug(debugVal) {
if (volumes.size() > 1) {
// Sort volumes by dphi i.e. phi(j)-phi(i) > 0 if j>1.
precomputed_value_sort(volumes.begin(), volumes.end(), ExtractPhiMax(), LessDPhi());
if (debug)
cout << " Slab has " << volumes.size() << " volumes" << endl;
// Check that all volumes have the same dZ
handles::const_iterator i = volumes.begin();
float Zmax = (*i)->surface(zplus).position().z();
float Zmin = (*i)->surface(zminus).position().z();
for (++i; i != volumes.end(); ++i) {
const float epsilon = 0.001;
if (fabs(Zmax - (*i)->surface(zplus).position().z()) > epsilon ||
fabs(Zmin - (*i)->surface(zminus).position().z()) > epsilon) {
if (debug)
cout << "*** WARNING: slabs Z coords not matching: D_Zmax = "
<< fabs(Zmax - (*i)->surface(zplus).position().z())
<< " D_Zmin = " << fabs(Zmin - (*i)->surface(zminus).position().z()) << endl;
}
}
}
}
Geom::Phi<float> bSlab::minPhi() const { return volumes.front()->minPhi(); }
Geom::Phi<float> bSlab::maxPhi() const { return volumes.back()->maxPhi(); }
MagBSlab* bSlab::buildMagBSlab() const {
if (mslab == nullptr) {
vector<MagVolume*> mVols;
for (handles::const_iterator vol = volumes.begin(); vol != volumes.end(); ++vol) {
mVols.push_back((*vol)->magVolume);
}
mslab = new MagBSlab(mVols, volumes.front()->surface(zminus).position().z()); //FIXME
}
return mslab;
}
|
// Copyright (c) 2001, Daniel C. Nuffer
// Copyright (c) 2001-2010 Hartmut Kaiser
// http://spirit.sourceforge.net/
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#if !defined(BOOST_SPIRIT_ITERATOR_MULTI_PASS_MAR_16_2007_1124AM)
#define BOOST_SPIRIT_ITERATOR_MULTI_PASS_MAR_16_2007_1124AM
#include <boost/config.hpp>
#include <boost/spirit/home/support/iterators/multi_pass_fwd.hpp>
#include <boost/spirit/home/support/iterators/detail/multi_pass.hpp>
#include <boost/spirit/home/support/iterators/detail/combine_policies.hpp>
#include <boost/limits.hpp>
#include <boost/detail/workaround.hpp>
#include <boost/utility/base_from_member.hpp>
namespace boost { namespace spirit
{
///////////////////////////////////////////////////////////////////////////
// The default multi_pass instantiation uses a ref-counted std_deque scheme.
///////////////////////////////////////////////////////////////////////////
template<typename T, typename Policies>
class multi_pass
: private boost::base_from_member<
typename Policies::BOOST_NESTED_TEMPLATE shared<T>*>
, public Policies::BOOST_NESTED_TEMPLATE unique<T>
#if defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)
, typename iterator_base_creator<T, typename Policies::input_policy>::type
#endif
{
private:
// unique and shared data types
typedef typename Policies::BOOST_NESTED_TEMPLATE unique<T>
policies_base_type;
typedef typename Policies::BOOST_NESTED_TEMPLATE shared<T>
shared_data_type;
typedef boost::base_from_member<shared_data_type*> member_base;
// define the types the standard embedded iterator typedefs are taken
// from
#if defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION)
typedef typename iterator_base_creator<Input, T>::type iterator_type;
#else
typedef typename policies_base_type::input_policy iterator_type;
#endif
public:
// standard iterator typedefs
typedef std::forward_iterator_tag iterator_category;
typedef typename iterator_type::value_type value_type;
typedef typename iterator_type::difference_type difference_type;
typedef typename iterator_type::distance_type distance_type;
typedef typename iterator_type::reference reference;
typedef typename iterator_type::pointer pointer;
multi_pass() : member_base((shared_data_type*)0) {}
explicit multi_pass(T& input)
: member_base(new shared_data_type(input)), policies_base_type(input) {}
explicit multi_pass(T const& input)
: member_base(new shared_data_type(input)), policies_base_type(input) {}
multi_pass(multi_pass const& x)
: member_base(x.member), policies_base_type(x)
{
policies_base_type::clone(*this);
}
#if BOOST_WORKAROUND(__GLIBCPP__, == 20020514)
// The standard library shipped with gcc-3.1 has a bug in
// bits/basic_string.tcc. It tries to use iter::iter(0) to
// construct an iterator. Ironically, this happens in sanity
// checking code that isn't required by the standard.
// The workaround is to provide an additional constructor that
// ignores its int argument and behaves like the default constructor.
multi_pass(int) : member_base((shared_data_type*)0) {}
#endif // BOOST_WORKAROUND(__GLIBCPP__, == 20020514)
~multi_pass()
{
if (policies_base_type::release(*this)) {
policies_base_type::destroy(*this);
delete this->member;
}
}
multi_pass& operator=(multi_pass const& x)
{
if (this != &x) {
multi_pass temp(x);
temp.swap(*this);
}
return *this;
}
void swap(multi_pass& x)
{
spirit::detail::swap(this->member, x.member);
this->policies_base_type::swap(x);
}
reference operator*() const
{
policies_base_type::check(*this);
return policies_base_type::dereference(*this);
}
pointer operator->() const
{
return &(operator*());
}
multi_pass& operator++()
{
policies_base_type::check(*this);
policies_base_type::increment(*this);
return *this;
}
multi_pass operator++(int)
{
multi_pass tmp(*this);
++*this;
return tmp;
}
void clear_queue(BOOST_SCOPED_ENUM(traits::clear_mode) mode =
traits::clear_mode::clear_if_enabled)
{
if (mode == traits::clear_mode::clear_always || !inhibit_clear_queue())
policies_base_type::clear_queue(*this);
}
bool inhibit_clear_queue() const
{
return this->member->inhibit_clear_queue_;
}
void inhibit_clear_queue(bool flag)
{
this->member->inhibit_clear_queue_ = flag;
}
bool operator==(multi_pass const& y) const
{
if (is_eof())
return y.is_eof();
if (y.is_eof())
return false;
return policies_base_type::equal_to(*this, y);
}
bool operator<(multi_pass const& y) const
{
return policies_base_type::less_than(*this, y);
}
// allow access to base member
shared_data_type* shared() const { return this->member; }
private: // helper functions
bool is_eof() const
{
return (0 == this->member) || policies_base_type::is_eof(*this);
}
};
template <typename T, typename Policies>
inline bool
operator!=(multi_pass<T, Policies> const& x, multi_pass<T, Policies> const& y)
{
return !(x == y);
}
template <typename T, typename Policies>
inline bool
operator>(multi_pass<T, Policies> const& x, multi_pass<T, Policies> const& y)
{
return y < x;
}
template <typename T, typename Policies>
inline bool
operator>=(multi_pass<T, Policies> const& x, multi_pass<T, Policies> const& y)
{
return !(x < y);
}
template <typename T, typename Policies>
inline bool
operator<=(multi_pass<T, Policies> const& x, multi_pass<T, Policies> const& y)
{
return !(y < x);
}
///////////////////////////////////////////////////////////////////////////
// Generator function
///////////////////////////////////////////////////////////////////////////
template <typename Policies, typename T>
inline multi_pass<T, Policies>
make_multi_pass(T& i)
{
return multi_pass<T, Policies>(i);
}
template <typename Policies, typename T>
inline multi_pass<T, Policies>
make_multi_pass(T const& i)
{
return multi_pass<T, Policies>(i);
}
///////////////////////////////////////////////////////////////////////////
template <typename T>
inline multi_pass<T>
make_default_multi_pass(T& i)
{
return multi_pass<T>(i);
}
template <typename T>
inline multi_pass<T>
make_default_multi_pass(T const& i)
{
return multi_pass<T>(i);
}
///////////////////////////////////////////////////////////////////////////
template <typename T, typename Policies>
inline void
swap(multi_pass<T, Policies> &x, multi_pass<T, Policies> &y)
{
x.swap(y);
}
///////////////////////////////////////////////////////////////////////////
// define special functions allowing to integrate any multi_pass iterator
// with expectation points
namespace traits
{
template <typename T, typename Policies>
void clear_queue(multi_pass<T, Policies>& mp
, BOOST_SCOPED_ENUM(traits::clear_mode) mode)
{
mp.clear_queue(mode);
}
template <typename T, typename Policies>
void inhibit_clear_queue(multi_pass<T, Policies>& mp, bool flag)
{
mp.inhibit_clear_queue(flag);
}
template <typename T, typename Policies>
bool inhibit_clear_queue(multi_pass<T, Policies>& mp)
{
return mp.inhibit_clear_queue();
}
}
}} // namespace boost::spirit
#endif
|
/*
* Copyright (C) 2018 Intel Corporation.All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/**
* @file IasMixerCmdInterface.cpp
* @date August 2016
* @brief implementation of the mixer command interface
*/
#include <cstdio>
#include <cmath>
#include "mixer/IasMixerCmdInterface.hpp"
#include "avbaudiomodules/internal/audio/common/IasAudioLogging.hpp"
#include "audio/mixerx/IasMixerCmd.hpp"
#include "mixer/IasMixerCore.hpp"
namespace IasAudio {
static const std::string cClassName = "IasMixerCmdInterface::";
#define LOG_PREFIX cClassName + __func__ + "(" + std::to_string(__LINE__) + "):"
static const int32_t cMinInputGainOffset = -200; //minimum allowed input gain
static const int32_t cMaxInputGainOffset = 200; //maximum allowed input gain
static const int32_t cCutOffValue = 1440; //cut off value for balance and fader
IasMixerCmdInterface::IasMixerCmdInterface(const IasIGenericAudioCompConfig* config, IasMixerCore* core) :
IasIModuleId{config}
,mCore{core}
,mCmdFuncTable{}
,mLogContext{IasAudioLogging::getDltContext("_MIX")}
{
}
IasMixerCmdInterface::~IasMixerCmdInterface()
{
DLT_LOG_CXX(*mLogContext, DLT_LOG_INFO, LOG_PREFIX, "Deleted");
}
IasIModuleId::IasResult IasMixerCmdInterface::init()
{
const std::size_t maxSize = IasMixer::IasMixerCmdIds::eIasLastEntry;
mCmdFuncTable.resize(maxSize);
mCmdFuncTable[IasMixer::IasMixerCmdIds::eIasSetModuleState] = &IasMixerCmdInterface::setModuleState;
mCmdFuncTable[IasMixer::IasMixerCmdIds::eIasSetInputGainOffset] = &IasMixerCmdInterface::setInputGainOffset;
mCmdFuncTable[IasMixer::IasMixerCmdIds::eIasSetBalance] = &IasMixerCmdInterface::setBalance;
mCmdFuncTable[IasMixer::IasMixerCmdIds::eIasSetFader] = &IasMixerCmdInterface::setFader;
return IasIModuleId::eIasOk;
}
IasIModuleId::IasResult IasMixerCmdInterface::processCmd(const IasProperties& cmdProperties, IasProperties& returnProperties)
{
int32_t cmdId;
IasProperties::IasResult status = cmdProperties.get("cmd", &cmdId);
if (status == IasProperties::eIasOk)
{
DLT_LOG_CXX(*mLogContext, DLT_LOG_INFO, LOG_PREFIX, "Property with key \"cmd\" found, cmdId=", cmdId);
if (static_cast<uint32_t>(cmdId) < mCmdFuncTable.size())
{
return mCmdFuncTable[cmdId](this, cmdProperties, returnProperties);
}
else
{
DLT_LOG_CXX(*mLogContext, DLT_LOG_ERROR, LOG_PREFIX, "Cmd with cmdId", cmdId, "not registered");
return IasIModuleId::eIasFailed;
}
}
else
{
DLT_LOG_CXX(*mLogContext, DLT_LOG_ERROR, LOG_PREFIX, "Property with key \"cmd\" not found");
return IasIModuleId::eIasFailed;
}
}
IasIModuleId::IasResult IasMixerCmdInterface::setModuleState(const IasProperties& cmdProperties, IasProperties& returnProperties)
{
DLT_LOG_CXX(*mLogContext, DLT_LOG_INFO, LOG_PREFIX, "called");
cmdProperties.dump("cmdProperties");
std::string moduleState;
IasProperties::IasResult result = cmdProperties.get("moduleState", &moduleState);
if (result == IasProperties::eIasOk)
{
if (moduleState.compare("on") == 0)
{
mCore->enableProcessing();
returnProperties.set<std::string>("moduleState", "on");
}
else
{
mCore->disableProcessing();
returnProperties.set<std::string>("moduleState", "off");
}
return IasIModuleId::eIasOk;
}
else
{
return IasIModuleId::eIasFailed;
}
}
IasIModuleId::IasResult IasMixerCmdInterface::getStreamId(const IasProperties& cmdProperties, int32_t& streamId)
{
std::string pinName;
IasProperties::IasResult result = cmdProperties.get("pin", &pinName);
if (result != IasProperties::eIasOk)
{
return IasIModuleId::eIasFailed;
}
IasAudioProcessingResult procres = mConfig->getStreamId(pinName, streamId);
if (procres == eIasAudioProcOK)
{
return IasIModuleId::eIasOk;
}
else
{
// Log already done in method getStreamId
return IasIModuleId::eIasFailed;
}
}
IasIModuleId::IasResult IasMixerCmdInterface::translate(IasAudioProcessingResult result)
{
if (result == eIasAudioProcOK)
{
return IasIModuleId::eIasOk;
}
else
{
return IasIModuleId::eIasFailed;
}
}
IasIModuleId::IasResult IasMixerCmdInterface::setBalance(const IasProperties& cmdProperties, IasProperties& returnProperties)
{
(void)returnProperties;
DLT_LOG_CXX(*mLogContext, DLT_LOG_INFO, LOG_PREFIX, "called");
cmdProperties.dump("cmdProperties");
int32_t streamId;
const auto modres = getStreamId(cmdProperties, streamId);
if(modres != IasIModuleId::eIasOk)
{
return modres;
}
int32_t balance;
const auto propres = cmdProperties.get("balance", &balance);
if(propres != IasProperties::eIasOk)
{
return IasIModuleId::eIasFailed;
}
float balanceRight;
float balanceLeft;
const float balanceLog = 0.1f * static_cast<float>(balance);
if(balance < 0)
{
if(balance <= -cCutOffValue)
{
balanceRight = 0.0f;
}
else
{
balanceRight = std::pow(10.0f, 0.05f * balanceLog);
}
balanceLeft = 1.0f;
}
else
{
if(balance >= cCutOffValue)
{
balanceLeft = 0.0f;
}
else
{
balanceLeft = std::pow(10.0f, -0.05f * balanceLog);
}
balanceRight = 1.0f;
}
return translate(mCore->setBalance(streamId, balanceLeft, balanceRight));
}
IasIModuleId::IasResult IasMixerCmdInterface::setFader(const IasProperties& cmdProperties, IasProperties& returnProperties)
{
(void)returnProperties;
DLT_LOG_CXX(*mLogContext, DLT_LOG_INFO, LOG_PREFIX, "called");
cmdProperties.dump("cmdProperties");
int32_t streamId;
const auto modres = getStreamId(cmdProperties, streamId);
if(modres != IasIModuleId::eIasOk)
{
return modres;
}
int32_t fader;
const auto propres = cmdProperties.get("fader", &fader);
if(propres != IasProperties::eIasOk)
{
return IasIModuleId::eIasFailed;
}
float faderFront;
float faderRear;
const float faderLog = 0.1f * static_cast<float>(fader);
if(fader < 0)
{
if(fader <= -cCutOffValue)
{
faderFront = 0.0f;
}
else
{
faderFront = std::pow(10.0f, 0.05f * faderLog);
}
faderRear = 1.0f;
}
else
{
if(fader >= cCutOffValue)
{
faderRear = 0.0f;
}
else
{
faderRear = std::pow(10.0f, -0.05f * faderLog);
}
faderFront = 1.0f;
}
return translate(mCore->setFader(streamId, faderFront, faderRear));
}
IasIModuleId::IasResult IasMixerCmdInterface::setInputGainOffset(const IasProperties& cmdProperties, IasProperties& returnProperties)
{
(void)returnProperties;
DLT_LOG_CXX(*mLogContext, DLT_LOG_INFO, LOG_PREFIX, "called");
cmdProperties.dump("cmdProperties");
int32_t streamId;
const auto modres = getStreamId(cmdProperties, streamId);
if(modres != IasIModuleId::eIasOk)
{
return modres;
}
int32_t gain;
const auto propres = cmdProperties.get("gain", &gain);
if(gain > cMaxInputGainOffset || gain < cMinInputGainOffset)
{
DLT_LOG_CXX(*mLogContext, DLT_LOG_ERROR, LOG_PREFIX, "Error, parameter InputGainOffset was set to",gain,", which is out of valid range");
return IasIModuleId::eIasFailed;
}
if(propres != IasProperties::eIasOk)
{
return IasIModuleId::eIasFailed;
}
const float gainLog = 0.1f * static_cast<float>(gain);
const float gainLin = std::pow(10.0f, 0.05f * gainLog);
return translate(mCore->setInputGainOffset(streamId, gainLin));
}
} // namespace IasAudio
|
// Environment_UNIX.cpp
//
// $Id: //poco/1.4/Foundation/src/Environment_UNIX.cpp#2 $
//
// Library: Foundation
// Package: Core
// Module: Environment
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// Permission is hereby granted, free of charge, to any person or organization
// obtaining a copy of the software and accompanying documentation covered by
// this license (the "Software") to use, reproduce, display, distribute,
// execute, and transmit the Software, and to prepare derivative works of the
// Software, and to permit third-parties to whom the Software is furnished to
// do so, all subject to the following:
//
// The copyright notices in the Software and this entire statement, including
// the above license grant, this restriction and the following disclaimer,
// must be included in all copies of the Software, in whole or in part, and
// all derivative works of the Software, unless such copies or derivative
// works are solely in the form of machine-executable object code generated by
// a source language processor.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
// SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
// FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
#include "Poco/Environment_UNIX.h"
#include "Poco/Exception.h"
#include "Poco/Buffer.h"
#include <cstring>
#include <unistd.h>
#include <stdlib.h>
#include <sys/utsname.h>
#include <sys/param.h>
#include <cstring>
#if defined(POCO_OS_FAMILY_BSD)
#include <sys/sysctl.h>
#elif POCO_OS == POCO_OS_HPUX
#include <pthread.h>
#endif
namespace Poco {
EnvironmentImpl::StringMap EnvironmentImpl::_map;
FastMutex EnvironmentImpl::_mutex;
std::string EnvironmentImpl::getImpl(const std::string& name)
{
FastMutex::ScopedLock lock(_mutex);
const char* val = getenv(name.c_str());
if (val)
return std::string(val);
else
throw NotFoundException(name);
}
bool EnvironmentImpl::hasImpl(const std::string& name)
{
FastMutex::ScopedLock lock(_mutex);
return getenv(name.c_str()) != 0;
}
void EnvironmentImpl::setImpl(const std::string& name, const std::string& value)
{
FastMutex::ScopedLock lock(_mutex);
std::string var = name;
var.append("=");
var.append(value);
_map[name] = var;
if (putenv((char*) _map[name].c_str()))
{
std::string msg = "cannot set environment variable: ";
msg.append(name);
throw SystemException(msg);
}
}
std::string EnvironmentImpl::osNameImpl()
{
struct utsname uts;
uname(&uts);
return uts.sysname;
}
std::string EnvironmentImpl::osDisplayNameImpl()
{
return osNameImpl();
}
std::string EnvironmentImpl::osVersionImpl()
{
struct utsname uts;
uname(&uts);
return uts.release;
}
std::string EnvironmentImpl::osArchitectureImpl()
{
struct utsname uts;
uname(&uts);
return uts.machine;
}
std::string EnvironmentImpl::nodeNameImpl()
{
struct utsname uts;
uname(&uts);
return uts.nodename;
}
unsigned EnvironmentImpl::processorCountImpl()
{
#if defined(POCO_OS_FAMILY_BSD)
unsigned count;
std::size_t size = sizeof(count);
if (sysctlbyname("hw.ncpu", &count, &size, 0, 0))
return 1;
else
return count;
#elif POCO_OS == POCO_OS_HPUX
return pthread_num_processors_np();
#elif defined(_SC_NPROCESSORS_ONLN)
int count = sysconf(_SC_NPROCESSORS_ONLN);
if (count <= 0) count = 1;
return static_cast<int>(count);
#else
return 1;
#endif
}
} // namespace Poco
//
// nodeIdImpl
//
#if defined(POCO_OS_FAMILY_BSD) || POCO_OS == POCO_OS_QNX
//
// BSD variants
//
#include <sys/types.h>
#include <sys/socket.h>
#include <ifaddrs.h>
#include <net/if_dl.h>
namespace Poco {
void EnvironmentImpl::nodeIdImpl(NodeId& id)
{
std::memset(&id, 0, sizeof(id));
struct ifaddrs* ifaphead;
int rc = getifaddrs(&ifaphead);
if (rc) return;
for (struct ifaddrs* ifap = ifaphead; ifap; ifap = ifap->ifa_next)
{
if (ifap->ifa_addr && ifap->ifa_addr->sa_family == AF_LINK)
{
struct sockaddr_dl* sdl = reinterpret_cast<struct sockaddr_dl*>(ifap->ifa_addr);
caddr_t ap = (caddr_t) (sdl->sdl_data + sdl->sdl_nlen);
int alen = sdl->sdl_alen;
if (ap && alen > 0)
{
std::memcpy(&id, ap, sizeof(id));
break;
}
}
}
freeifaddrs(ifaphead);
}
} // namespace Poco
#elif defined(__CYGWIN__) || POCO_OS == POCO_OS_LINUX
//
// Linux, Cygwin
//
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <net/if.h>
#ifndef __CYGWIN__
#include <net/if_arp.h>
#else // workaround for Cygwin, which does not have if_arp.h
#define ARPHRD_ETHER 1 /* Ethernet 10Mbps */
#endif
#include <arpa/inet.h>
#include <unistd.h>
namespace Poco {
void EnvironmentImpl::nodeIdImpl(NodeId& id)
{
std::memset(&id, 0, sizeof(id));
int sock = socket(PF_INET, SOCK_DGRAM, 0);
if (sock == -1) return;
// the following code is loosely based
// on W. Richard Stevens, UNIX Network Programming, pp 434ff.
int lastlen = 0;
int len = 100*sizeof(struct ifreq);
struct ifconf ifc;
char* buf = 0;
for (;;)
{
buf = new char[len];
ifc.ifc_len = len;
ifc.ifc_buf = buf;
if (::ioctl(sock, SIOCGIFCONF, &ifc) < 0)
{
if (errno != EINVAL || lastlen != 0)
{
close(sock);
delete [] buf;
return;
}
}
else
{
if (ifc.ifc_len == lastlen)
break;
lastlen = ifc.ifc_len;
}
len += 10*sizeof(struct ifreq);
delete [] buf;
}
for (const char* ptr = buf; ptr < buf + ifc.ifc_len;)
{
const struct ifreq* ifr = reinterpret_cast<const struct ifreq*>(ptr);
int rc = ioctl(sock, SIOCGIFHWADDR, ifr);
if (rc != -1)
{
const struct sockaddr* sa = reinterpret_cast<const struct sockaddr*>(&ifr->ifr_hwaddr);
if (sa->sa_family == ARPHRD_ETHER)
{
std::memcpy(&id, sa->sa_data, sizeof(id));
break;
}
}
ptr += sizeof(struct ifreq);
}
close(sock);
delete [] buf;
}
} // namespace Poco
#elif defined(POCO_OS_FAMILY_UNIX)
//
// General Unix
//
#include <sys/ioctl.h>
#if defined(sun) || defined(__sun)
#include <sys/sockio.h>
#endif
#include <sys/socket.h>
#include <sys/types.h>
#include <netinet/in.h>
#include <net/if.h>
#include <arpa/inet.h>
#include <netdb.h>
#include <net/if.h>
#include <net/if_arp.h>
#include <unistd.h>
namespace Poco {
void EnvironmentImpl::nodeIdImpl(NodeId& id)
{
std::memset(&id, 0, sizeof(id));
char name[MAXHOSTNAMELEN];
if (gethostname(name, sizeof(name)))
return;
struct hostent* pHost = gethostbyname(name);
if (!pHost) return;
int s = socket(PF_INET, SOCK_DGRAM, IPPROTO_UDP);
if (s == -1) return;
struct arpreq ar;
std::memset(&ar, 0, sizeof(ar));
struct sockaddr_in* pAddr = reinterpret_cast<struct sockaddr_in*>(&ar.arp_pa);
pAddr->sin_family = AF_INET;
std::memcpy(&pAddr->sin_addr, *pHost->h_addr_list, sizeof(struct in_addr));
int rc = ioctl(s, SIOCGARP, &ar);
close(s);
if (rc < 0) return;
std::memcpy(&id, ar.arp_ha.sa_data, sizeof(id));
}
} // namespace Poco
#endif
|
//===- OpDocGen.cpp - MLIR operation documentation generator --------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// OpDocGen uses the description of operations to generate documentation for the
// operations.
//
//===----------------------------------------------------------------------===//
#include "DocGenUtilities.h"
#include "mlir/Support/IndentedOstream.h"
#include "mlir/TableGen/GenInfo.h"
#include "mlir/TableGen/Operator.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/Signals.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/TableGenBackend.h"
using namespace llvm;
using namespace mlir;
using namespace mlir::tblgen;
using mlir::tblgen::Operator;
// Emit the description by aligning the text to the left per line (e.g.,
// removing the minimum indentation across the block).
//
// This expects that the description in the tablegen file is already formatted
// in a way the user wanted but has some additional indenting due to being
// nested in the op definition.
void mlir::tblgen::emitDescription(StringRef description, raw_ostream &os) {
raw_indented_ostream ros(os);
ros.reindent(description.rtrim(" \t"));
}
// Emits `str` with trailing newline if not empty.
static void emitIfNotEmpty(StringRef str, raw_ostream &os) {
if (!str.empty()) {
emitDescription(str, os);
os << "\n";
}
}
/// Emit the given named constraint.
template <typename T>
static void emitNamedConstraint(const T &it, raw_ostream &os) {
if (!it.name.empty())
os << "`" << it.name << "`";
else
os << "«unnamed»";
os << " | " << it.constraint.getDescription() << "\n";
}
//===----------------------------------------------------------------------===//
// Operation Documentation
//===----------------------------------------------------------------------===//
/// Emit the assembly format of an operation.
static void emitAssemblyFormat(StringRef opName, StringRef format,
raw_ostream &os) {
os << "\nSyntax:\n\n```\noperation ::= `" << opName << "` ";
// Print the assembly format aligned.
unsigned indent = strlen("operation ::= ");
std::pair<StringRef, StringRef> split = format.split('\n');
os << split.first.trim() << "\n";
do {
split = split.second.split('\n');
StringRef formatChunk = split.first.trim();
if (!formatChunk.empty())
os.indent(indent) << formatChunk << "\n";
} while (!split.second.empty());
os << "```\n\n";
}
static void emitOpDoc(Operator op, raw_ostream &os) {
os << llvm::formatv("### `{0}` ({1})\n", op.getOperationName(),
op.getQualCppClassName());
// Emit the summary, syntax, and description if present.
if (op.hasSummary())
os << "\n" << op.getSummary() << "\n\n";
if (op.hasAssemblyFormat())
emitAssemblyFormat(op.getOperationName(), op.getAssemblyFormat().trim(),
os);
if (op.hasDescription())
mlir::tblgen::emitDescription(op.getDescription(), os);
// Emit attributes.
if (op.getNumAttributes() != 0) {
// TODO: Attributes are only documented by TableGen name, with no further
// info. This should be improved.
os << "\n#### Attributes:\n\n";
os << "| Attribute | MLIR Type | Description |\n"
<< "| :-------: | :-------: | ----------- |\n";
for (const auto &it : op.getAttributes()) {
StringRef storageType = it.attr.getStorageType();
os << "`" << it.name << "` | " << storageType << " | "
<< it.attr.getDescription() << "\n";
}
}
// Emit each of the operands.
if (op.getNumOperands() != 0) {
os << "\n#### Operands:\n\n";
os << "| Operand | Description |\n"
<< "| :-----: | ----------- |\n";
for (const auto &it : op.getOperands())
emitNamedConstraint(it, os);
}
// Emit results.
if (op.getNumResults() != 0) {
os << "\n#### Results:\n\n";
os << "| Result | Description |\n"
<< "| :----: | ----------- |\n";
for (const auto &it : op.getResults())
emitNamedConstraint(it, os);
}
// Emit successors.
if (op.getNumSuccessors() != 0) {
os << "\n#### Successors:\n\n";
os << "| Successor | Description |\n"
<< "| :-------: | ----------- |\n";
for (const auto &it : op.getSuccessors())
emitNamedConstraint(it, os);
}
os << "\n";
}
static void emitOpDoc(const RecordKeeper &recordKeeper, raw_ostream &os) {
auto opDefs = recordKeeper.getAllDerivedDefinitions("Op");
os << "<!-- Autogenerated by mlir-tblgen; don't manually edit -->\n";
for (const llvm::Record *opDef : opDefs)
emitOpDoc(Operator(opDef), os);
}
//===----------------------------------------------------------------------===//
// Type Documentation
//===----------------------------------------------------------------------===//
static void emitTypeDoc(const Type &type, raw_ostream &os) {
os << "### " << type.getDescription() << "\n";
emitDescription(type.getTypeDescription(), os);
os << "\n";
}
//===----------------------------------------------------------------------===//
// Dialect Documentation
//===----------------------------------------------------------------------===//
static void emitDialectDoc(const Dialect &dialect, ArrayRef<Operator> ops,
ArrayRef<Type> types, raw_ostream &os) {
os << "# '" << dialect.getName() << "' Dialect\n\n";
emitIfNotEmpty(dialect.getSummary(), os);
emitIfNotEmpty(dialect.getDescription(), os);
os << "[TOC]\n\n";
// TODO: Add link between use and def for types
if (!types.empty()) {
os << "## Type definition\n\n";
for (const Type &type : types)
emitTypeDoc(type, os);
}
if (!ops.empty()) {
os << "## Operation definition\n\n";
for (const Operator &op : ops)
emitOpDoc(op, os);
}
}
static void emitDialectDoc(const RecordKeeper &recordKeeper, raw_ostream &os) {
const auto &opDefs = recordKeeper.getAllDerivedDefinitions("Op");
const auto &typeDefs = recordKeeper.getAllDerivedDefinitions("DialectType");
std::map<Dialect, std::vector<Operator>> dialectOps;
std::map<Dialect, std::vector<Type>> dialectTypes;
for (auto *opDef : opDefs) {
Operator op(opDef);
dialectOps[op.getDialect()].push_back(op);
}
for (auto *typeDef : typeDefs) {
Type type(typeDef);
if (auto dialect = type.getDialect())
dialectTypes[dialect].push_back(type);
}
os << "<!-- Autogenerated by mlir-tblgen; don't manually edit -->\n";
for (const auto &dialectWithOps : dialectOps)
emitDialectDoc(dialectWithOps.first, dialectWithOps.second,
dialectTypes[dialectWithOps.first], os);
}
//===----------------------------------------------------------------------===//
// Gen Registration
//===----------------------------------------------------------------------===//
static mlir::GenRegistration
genOpRegister("gen-op-doc", "Generate dialect documentation",
[](const RecordKeeper &records, raw_ostream &os) {
emitOpDoc(records, os);
return false;
});
static mlir::GenRegistration
genRegister("gen-dialect-doc", "Generate dialect documentation",
[](const RecordKeeper &records, raw_ostream &os) {
emitDialectDoc(records, os);
return false;
});
|
#pragma once
#ifndef __CONVERTER_CLI_HPP__
#define __CONVERTER_CLI_HPP__
#pragma managed
#include <msclr\marshal_cppstd.h>
#include <vcclr.h>
#pragma unmanaged
#include <string>
#pragma managed
namespace ess::clr {
using set_operand = System::Collections::Generic::List<System::String^>^;
/**
* Data types converter class
*
* ...
*
* Implements some necessary converts c++/cli <-> stdc++
*/
public ref class converter {
public:
/**
* Perform converting to c++/cli string for a given
* instance of stdc++ string
*
* @param[in] str Instance of std::string const& (stdc++)
*/
static System::String^ std_str_to_cli_str(std::string const& str);
/**
* Perform converting to stdc++ string for a given
* instance of c++/cli string
*
* @param[in] str Instance of System::String^ (c++/cli)
*/
static std::string cli_str_to_std_str(System::String^ str);
/**
* Perform converting to c++cli list of string for a given
* instance of c++/cli string
*
* @param[in] str Instance of System::String^ (c++/cli)
*/
static ess::clr::set_operand cli_str_to_cli_list(System::String^ str);
/**
* Perform converting to a c++/cli string for a given
* instance of c++cli list of string
*
* @param[in] set_operand Instance of ess::clr::set_operand (c++/cli)
*/
static System::String^ cli_list_to_cli_str(ess::clr::set_operand set_operand);
};
}; // namespace ess::clr
#endif // !__CONVERTER_CLI_HPP__
|
/*
* This software is distributed under BSD 3-clause license (see LICENSE file).
*
* Authors: Heiko Strathmann, Thoralf Klein, Bjoern Esser
*/
#include <gtest/gtest.h>
#include <shogun/mathematics/Statistics.h>
#include <shogun/features/streaming/generators/GaussianBlobsDataGenerator.h>
#include <shogun/features/streaming/generators/MeanShiftDataGenerator.h>
using namespace shogun;
TEST(GaussianBlobsDataGenerator,get_next_example1)
{
index_t num_blobs=1;
float64_t distance=3;
float64_t epsilon=2;
float64_t angle=Math::PI/4;
index_t num_samples=50000;
auto gen=std::make_shared<GaussianBlobsDataGenerator>(num_blobs,
distance, epsilon, angle);
/* two dimensional samples */
SGMatrix<float64_t> samples(2, num_samples);
for (index_t i=0; i<num_samples; ++i)
{
gen->get_next_example();
SGVector<float64_t> sample=gen->get_vector();
samples(0,i)=sample[0];
samples(1,i)=sample[1];
gen->release_example();
}
SGVector<float64_t> mean=Statistics::matrix_mean(samples, false);
SGMatrix<float64_t> cov=Statistics::covariance_matrix(samples);
/* rougly ensures right results, if test fails, set a bit larger */
float64_t accuracy=2e-1;
/* matrix is expected to look like [1.5, 0.5; 0.5, 1.5] */
EXPECT_NEAR(cov(0,0), 1.5, accuracy);
EXPECT_NEAR(cov(0,1), 0.5, accuracy);
EXPECT_NEAR(cov(1,0), 0.5, accuracy);
EXPECT_NEAR(cov(1,1), 1.5, accuracy);
/* mean is supposed to do [0, 0] */
EXPECT_LE(Math::abs(mean[0]-0), accuracy);
EXPECT_LE(Math::abs(mean[1]-0), accuracy);
}
TEST(GaussianBlobsDataGenerator,get_next_example2)
{
index_t num_blobs=3;
float64_t distance=3;
float64_t epsilon=2;
float64_t angle=Math::PI/4;
index_t num_samples=50000;
auto gen=std::make_shared<GaussianBlobsDataGenerator>(num_blobs,
distance, epsilon, angle);
/* and another one */
SGMatrix<float64_t> samples2(2, num_samples);
gen->set_blobs_model(num_blobs, distance, epsilon, angle);
for (index_t i=0; i<num_samples; ++i)
{
gen->get_next_example();
SGVector<float64_t> sample=gen->get_vector();
samples2(0,i)=sample[0];
samples2(1,i)=sample[1];
gen->release_example();
}
SGVector<float64_t> mean2=Statistics::matrix_mean(samples2, false);
SGMatrix<float64_t> cov2=Statistics::covariance_matrix(samples2);
/* rougly ensures right results, if test fails, set a bit larger */
float64_t accuracy=2e-1;
/* matrix is expected to look like [7.55, 0.55; 0.55, 7.55] */
EXPECT_NEAR(cov2(0,0), 7.55, accuracy);
EXPECT_NEAR(cov2(0,1), 0.55, accuracy);
EXPECT_NEAR(cov2(1,0), 0.55, accuracy);
EXPECT_NEAR(cov2(1,1), 7.55, accuracy);
/* mean is supposed to do [3, 3] */
EXPECT_LE(Math::abs(mean2[0]-3), accuracy);
EXPECT_LE(Math::abs(mean2[1]-3), accuracy);
}
TEST(MeanShiftDataGenerator,get_next_example)
{
index_t dimension=3;
index_t mean_shift=100;
index_t num_runs=1000;
auto gen=std::make_shared<MeanShiftDataGenerator>(mean_shift,
dimension, 0);
SGVector<float64_t> avg(dimension);
avg.zero();
for (index_t i=0; i<num_runs; ++i)
{
gen->get_next_example();
avg.add(gen->get_vector());
gen->release_example();
}
/* average */
avg.scale(1.0/num_runs);
//avg.display_vector("mean_shift");
/* roughly assert correct model parameters */
EXPECT_LE(avg[0]-mean_shift, mean_shift/100);
for (index_t i=1; i<dimension; ++i)
{
EXPECT_LE(avg[i], 0.5);
EXPECT_GE(avg[i], -0.5);
}
/* draw whole matrix and test that too */
auto features=
gen->get_streamed_features(num_runs)->as<DenseFeatures<float64_t>>();
avg=SGVector<float64_t>(dimension);
for (index_t i=0; i<dimension; ++i)
{
float64_t sum=0;
for (index_t j=0; j<num_runs; ++j)
sum+=features->get_feature_matrix()(i, j);
avg[i]=sum/num_runs;
}
//avg.display_vector("mean_shift");
ASSERT(avg[0]-mean_shift<mean_shift/100);
for (index_t i=1; i<dimension; ++i)
ASSERT(avg[i]<0.5 && avg[i]>-0.5);
}
|
/*
* This file is part of the GROMACS molecular simulation package.
*
* Copyright 2020- The GROMACS Authors
* and the project initiators Erik Lindahl, Berk Hess and David van der Spoel.
* Consult the AUTHORS/COPYING files and https://www.gromacs.org for details.
*
* GROMACS is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either version 2.1
* of the License, or (at your option) any later version.
*
* GROMACS is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with GROMACS; if not, see
* https://www.gnu.org/licenses, or write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* If you want to redistribute modifications to GROMACS, please
* consider that scientific software is very special. Version
* control is crucial - bugs must be traceable. We will be happy to
* consider code for inclusion in the official distribution, but
* derived work must not be called official GROMACS. Details are found
* in the README & COPYING files - if they are missing, get the
* official version at https://www.gromacs.org.
*
* To help us fund GROMACS development, we humbly ask that you cite
* the research papers on the package. Check out https://www.gromacs.org.
*/
#include "gmxpre.h"
#include "energyhistory.h"
#include "gromacs/utility/exceptions.h"
#include "gromacs/utility/stringutil.h"
#include "checkpointdata.h"
//! \cond INTERNAL
// mirroring the \cond from energyhistory.h to avoid Doxygen errors
namespace
{
/*!
* \brief Enum describing the contents delta_h_history_t writes to modular checkpoint
*
* When changing the checkpoint content, add a new element just above Count, and adjust the
* checkpoint functionality.
*/
enum class DeltaHHistoryCheckpointVersion
{
Base, //!< First version of modular checkpointing
Count //!< Number of entries. Add new versions right above this!
};
constexpr auto c_currentVersionDeltaHH =
DeltaHHistoryCheckpointVersion(int(DeltaHHistoryCheckpointVersion::Count) - 1);
} // namespace
//! Read / write vector size from / to checkpoint and resize vector if reading
template<gmx::CheckpointDataOperation operation, typename T>
static void checkpointVectorSize(gmx::CheckpointData<operation>* checkpointData,
const std::string& name,
std::vector<T>* vector)
{
auto size = static_cast<int64_t>(vector->size());
checkpointData->scalar(name, &size);
if (operation == gmx::CheckpointDataOperation::Read)
{
vector->resize(size);
}
};
template<gmx::CheckpointDataOperation operation>
void delta_h_history_t::doCheckpoint(gmx::CheckpointData<operation> checkpointData)
{
gmx::checkpointVersion(&checkpointData, "delta_h_history_t version", c_currentVersionDeltaHH);
checkpointVectorSize(&checkpointData, "numDeltaH", &dh);
checkpointData.scalar("start_time", &start_time);
checkpointData.scalar("start_lambda", &start_lambda);
checkpointData.scalar("start_lambda_set", &start_lambda_set);
for (std::size_t idx = 0; idx < dh.size(); ++idx)
{
checkpointVectorSize(&checkpointData, gmx::formatString("vecSize %zu", idx), &dh[idx]);
checkpointData.arrayRef(gmx::formatString("vec %zu", idx),
gmx::makeCheckpointArrayRef<operation>(dh[idx]));
}
}
namespace
{
/*!
* \brief Enum describing the contents energyhistory_t writes to modular checkpoint
*
* When changing the checkpoint content, add a new element just above Count, and adjust the
* checkpoint functionality.
*/
enum class EnergyHistoryCheckpointVersion
{
Base, //!< First version of modular checkpointing
Count //!< Number of entries. Add new versions right above this!
};
constexpr auto c_currentVersionEnergyHistory =
EnergyHistoryCheckpointVersion(int(EnergyHistoryCheckpointVersion::Count) - 1);
} // namespace
template<gmx::CheckpointDataOperation operation>
void energyhistory_t::doCheckpoint(gmx::CheckpointData<operation> checkpointData)
{
gmx::checkpointVersion(&checkpointData, "energyhistory_t version", c_currentVersionEnergyHistory);
bool useCheckpoint = (nsum <= 0 && nsum_sim <= 0);
checkpointData.scalar("useCheckpoint", &useCheckpoint);
if (!useCheckpoint)
{
return;
}
checkpointVectorSize(&checkpointData, "enerAveSize", &ener_ave);
checkpointVectorSize(&checkpointData, "enerSumSize", &ener_sum);
checkpointVectorSize(&checkpointData, "enerSumSimSize", &ener_sum_sim);
checkpointData.scalar("nsteps", &nsteps);
checkpointData.scalar("nsteps_sim", &nsteps_sim);
checkpointData.scalar("nsum", &nsum);
checkpointData.scalar("nsum_sim", &nsum_sim);
auto hasForeignLambdas = (deltaHForeignLambdas != nullptr);
checkpointData.scalar("has foreign lambdas", &hasForeignLambdas);
if (hasForeignLambdas && deltaHForeignLambdas == nullptr)
{
deltaHForeignLambdas = std::make_unique<delta_h_history_t>();
}
if (nsum > 0)
{
checkpointData.arrayRef("ener_ave", gmx::makeCheckpointArrayRef<operation>(ener_ave));
checkpointData.arrayRef("ener_sum", gmx::makeCheckpointArrayRef<operation>(ener_sum));
}
if (nsum_sim > 0)
{
checkpointData.arrayRef("ener_sum_sim", gmx::makeCheckpointArrayRef<operation>(ener_sum_sim));
}
if (hasForeignLambdas)
{
deltaHForeignLambdas->doCheckpoint<operation>(
checkpointData.subCheckpointData("deltaHForeignLambdas"));
}
}
// explicit template instatiation
template void energyhistory_t::doCheckpoint(gmx::CheckpointData<gmx::CheckpointDataOperation::Read> checkpointData);
template void energyhistory_t::doCheckpoint(gmx::CheckpointData<gmx::CheckpointDataOperation::Write> checkpointData);
//! \endcond
|
//
// Created by truefinch on 11.07.18.
//
#include "Fireball.h"
enums::CollideResult Fireball::collide(actor::Actor& other) {
enums::ActorID other_id = other.getID();
if (other_id == enums::DRAGON_ID) {
return enums::PICK;
} else if ((other_id == enums::WALL_ID) or (other_id == enums::PRINCESS_ID)) {
return enums::BARRIER;
} else if ((other_id == enums::HERO_ID) or (other_id == enums::ZOMBIE_ID)) {
return enums::FIGHT;
} else {
return enums::FREE;
}
}
enums::CollideResult Fireball::collide(actor::ActiveActor& other) {
enums::CollideResult result = enums::BARRIER;
enums::ActorID other_id = other.getID();
if (other.isDead()) {
result = enums::FREE;
} else if ((other_id == enums::ZOMBIE_ID) or (other_id == enums::HERO_ID)) {
result = enums::FIGHT;
} else if (other_id == enums::DRAGON_ID) {
result = enums::PICK;
}
return result;
}
enums::CollideResult Fireball::collide(actor::PassiveActor& other) {
enums::CollideResult result = enums::FREE;
enums::ActorID other_id = other.getID();
if (other.isDead() or (other_id == enums::FLOOR_ID)) {
result = enums::FREE;
} else if (other_id == enums::WALL_ID) {
result = enums::BARRIER;
}
return result;
}
enums::CollideResult Fireball::collide(actor::SpellActor&) {
return enums::FIGHT;
}
enums::CollideResult Fireball::collide(actor::CollectableActor&) {
return enums::FREE;
}
Event Fireball::doTurn() {
Event result = Event();
if (this->isDead()) {
return Event(this->getName(), this->getName(), enums::DO_NOTHING, -1);
}
Point fireball_pos = this->getPosition();
auto area = game::GameManager::Instance().getArea(fireball_pos, this->visibility_points_);
Point dir = this->findTarget(area);
Point other_pos = {fireball_pos.x + dir.x, fireball_pos.y + dir.y};
int row = (int) fireball_pos.x, col = (int) fireball_pos.y,
top_row_bound = row - std::min<int>(this->getVisibilityPoints(), row),
left_row_bound = col - std::min<int>(this->getVisibilityPoints(), col);
auto other = area[(int) other_pos.x - top_row_bound][(int) other_pos.y - left_row_bound];
// std::shared_ptr<actor::Actor> other = area[other_pos.x][other_pos.y];
enums::CollideResult collision = this->collide(*other);
switch (collision) {
case enums::BARRIER: {
this->isDead(true);
result = Event(this->getName(), this->getName(), enums::DO_NOTHING, -1);
break;
}
case enums::FREE: {
game::GameManager::Instance().move(fireball_pos, other_pos);
result = Event(this->getName(), this->getName(), enums::MOVED, -1);
break;
}
case enums::FIGHT: {
std::shared_ptr<actor::ActiveActor> enemy = std::static_pointer_cast<actor::ActiveActor>(other);
enemy->setCurHealthPoints(enemy->getCurHealthPoints() - this->getDamagePoints());
this->setCurHealthPoints(0);
this->isDead(true);
result = Event(this->getName(), enemy->getName(), enums::ATTACKED, this->getDamagePoints());
break;
}
case enums::WIN: {
result = Event(this->getName(), this->getName(), enums::DO_NOTHING, -1);
break;
}
case enums::PICK: {
std::shared_ptr<actor::ActiveActor> dragon = std::static_pointer_cast<actor::ActiveActor>(other);
this->setCurHealthPoints(this->getCurHealthPoints() + this->getCurHealthPoints());
result = Event(this->getName(), this->getName(), enums::DO_NOTHING, -1);
break;
}
}
return result;
}
|
/*=========================================================================
*
* Copyright UMC Utrecht and contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
/*=========================================================================
*
* Copyright Insight Software Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
#ifndef __itkGPUInPlaceImageFilter_hxx
#define __itkGPUInPlaceImageFilter_hxx
#include "itkGPUInPlaceImageFilter.h"
namespace itk
{
template< typename TInputImage, typename TOutputImage, typename TParentImageFilter >
void
GPUInPlaceImageFilter< TInputImage, TOutputImage, TParentImageFilter >
::PrintSelf( std::ostream & os, Indent indent ) const
{
GPUSuperclass::PrintSelf( os, indent );
}
//------------------------------------------------------------------------------
template< typename TInputImage, typename TOutputImage, typename TParentImageFilter >
void
GPUInPlaceImageFilter< TInputImage, TOutputImage, TParentImageFilter >
::ReleaseInputs()
{
CPUSuperclass::ReleaseInputs();
}
//------------------------------------------------------------------------------
template< typename TInputImage, typename TOutputImage, typename TParentImageFilter >
void
GPUInPlaceImageFilter< TInputImage, TOutputImage, TParentImageFilter >
::AllocateOutputs()
{
if( this->GetGPUEnabled() )
{
// if told to run in place and the types support it,
if( this->GetInPlace() && this->CanRunInPlace() )
{
// Graft this first input to the output. Later, we'll need to
// remove the input's hold on the bulk data.
//
OutputImagePointer inputAsOutput
= dynamic_cast< TOutputImage * >( const_cast< TInputImage * >( this->GetInput() ) );
if( inputAsOutput )
{
this->GraftOutput( inputAsOutput );
}
else
{
// if we cannot cast the input to an output type, then allocate
// an output usual.
OutputImagePointer outputPtr;
outputPtr = this->GetOutput( 0 );
outputPtr->SetBufferedRegion( outputPtr->GetRequestedRegion() );
outputPtr->Allocate();
}
typedef ImageBase< OutputImageDimension > ImageBaseType;
typename ImageBaseType::Pointer outputPtr;
// If there are more than one outputs, allocate the remaining outputs
for( unsigned int i = 1; i < this->GetNumberOfOutputs(); i++ )
{
// Check whether the output is an image of the appropriate
// dimension (use ProcessObject's version of the GetInput()
// method since it returns the input as a pointer to a
// DataObject as opposed to the subclass version which
// static_casts the input to an TInputImage).
outputPtr = dynamic_cast< ImageBaseType * >( this->ProcessObject::GetOutput( i ) );
if( outputPtr )
{
outputPtr->SetBufferedRegion( outputPtr->GetRequestedRegion() );
outputPtr->Allocate();
}
// if the output is not of simular type then it is assumed the
// the derived class allocated the output if needed.
}
}
else
{
CPUSuperclass::AllocateOutputs();
}
}
else
{
CPUSuperclass::AllocateOutputs();
}
}
} // end of namespace itk
#endif
|
/* --------------------------------------------------------------- */
/* JustTesting --------------------------------------------------- */
/* --------------------------------------------------------------- */
static void JustTesting(vector<Point> ap, vector<double> av, vector<Point> bp, vector<double> bv, FILE *of)
{
long size = 100000;
int mult = 1;
vector<CD> ftc;
for(int i=0; i<8; i++) {
double dx, dy;
ftc.clear(); // no caching wanted here
double c = CorrPatches(
of, false, dx, dy,
ap, av, bp, bv, 0, 0, 4000,
BigEnough, (void *)size,
NULL, NULL, ftc );
fprintf(of, " JT: %4d by %4d, size %6d, corr %f, dx, dy = %9.2f, %9.2f\n",
int(sqrt(ap.size())), int(sqrt(bp.size())),
size, c, dx*mult, dy*mult);
DecimateVector(ap, av, 2);
DecimateVector(bp, bv, 2);
size = size/4;
mult = mult * 2;
}
}
/* --------------------------------------------------------------- */
/* TryNewOptimizer ----------------------------------------------- */
/* --------------------------------------------------------------- */
static void TryNewOptimizer(vector<Point> &plist, vector<double> &spv, vector<double> &image2, TAffine &t, FILE *flog)
{
// compute the bounding box
fprintf(of,"\n---------- Try new optimizer on %d points----------------\n", plist.size());
DBox B;
BBoxFromPoints( B, plist );
fprintf(of,"region size is [%f %f] in x, [%f %f] in y\n", B.L, B.R, B.B, B.T);
// create 3 control points
vector<Point> cpts;
cpts.push_back(Point(B.L, B.B));
cpts.push_back(Point(B.R, B.B));
cpts.push_back(Point((B.L+B.R)/2, B.T));
fprintf(of,"Control points are (%f %f) (%f %f) (%f %f)\n", cpts[0].x, cpts[0].y, cpts[1].x, cpts[1].y, cpts[2].x, cpts[2].y);
// find each point as a linear combination of control points
double a[3][3];
a[0][0] = cpts[0].x; a[0][1] = cpts[1].x; a[0][2] = cpts[2].x;
a[1][0] = cpts[0].y; a[1][1] = cpts[1].y; a[1][2] = cpts[2].y;
a[2][0] = 1.0; a[2][1] = 1.0; a[2][2] = 1.0;
double inv[3][3];
Print3x3Matrix( of, a );
Invert3x3Matrix( inv, a );
Print3x3Matrix( of, inv );
vector<vector<double> > lambda;
for(int j=0; j<plist.size(); j++) {
//fprintf(of," Point is (%f %f)\n", plist[j].x, plist[j].y);
vector<double> lam;
lam.push_back(inv[0][0]*plist[j].x + inv[0][1]*plist[j].y + inv[0][2]*1.0);
lam.push_back(inv[1][0]*plist[j].x + inv[1][1]*plist[j].y + inv[1][2]*1.0);
lam.push_back(inv[2][0]*plist[j].x + inv[2][1]*plist[j].y + inv[2][2]*1.0);
//fprintf(of," lambdas are %f %f %f\n", lam[0], lam[1], lam[2]);
lambda.push_back(lam);
}
// Transform the control points to the target frame
vector<Point> orig = cpts;
t.Transform( cpts );
// call the optimizer
ImproveControlPts(cpts, lambda, spv, image2, 4096, flog, "new opt", 1.0);
// Now, find a transformation that maps ORIG into the new cpts
// first, create a transform that maps a unit right triangle to the original pts
TAffine o(orig[1].x-orig[0].x, orig[2].x-orig[0].x, orig[0].x,
orig[1].y-orig[0].y, orig[2].y-orig[0].y, orig[0].y);
// now one that maps the final optimized control points to the unit right triangle
TAffine c(cpts[1].x-cpts[0].x, cpts[2].x-cpts[0].x, cpts[0].x,
cpts[1].y-cpts[0].y, cpts[2].y-cpts[0].y, cpts[0].y);
// now, to get from the original to the final, apply o^-1, then c;
TAffine oi;
oi.InverseOf( o );
//TAffine temp;
t = c * oi;
t.TPrint( of );
}
/* --------------------------------------------------------------- */
/* ImproveCorrelation -------------------------------------------- */
/* --------------------------------------------------------------- */
// Improve the correlation, if possible, by tweaking the transform. pts are the points
// in the original, and pts are their values. image is a 4Kx4K matrix of doubles, already
// normalized. dx and dy are the initial estimates of how to map the original points into
// the array image2.
// returns the best correlation obtained.
static double ImproveCorrelation(vector<Point> &Plist, vector<double> &spv, vector<double>image2,
double dx, double dy, TAffine &t, FILE *flog)
{
fprintf(of,"Contains %d pixels\n", Plist.size() );
Normalize(spv);
if (dx != BIG) // if dx == BIG, start improving from the transform we have
t = TAffine(1.0, 0.0, dx, 0.0, 1.0, dy); // otherwise, create a transform with just dx, dy
double best_so_far = 0.0;
TryNewOptimizer(Plist, spv, image2, t, flog);
// Now t is the best transform we can find.
fprintf( of,
"Best transform is %9.4f %9.4f %10.2f\n"
" %9.4f %9.4f %10.2f\n",
t.t[0], t.t[1], t.t[2], t.t[3], t.t[4], t.t[5] );
return best_so_far;
}
|
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "shared_test_classes/single_layer/activation.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace LayerTestsDefinitions;
using namespace ngraph::helpers;
namespace {
TEST_P(ActivationLayerTest, Serialize) {
Serialize();
}
// Common params
const std::vector<InferenceEngine::Precision> inputPrecisions = {
InferenceEngine::Precision::FP32
// TODO: Fix Issue-27390
// InferenceEngine::Precision::I16,
// InferenceEngine::Precision::U8
};
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::FP16
};
const std::map<ActivationTypes, std::vector<std::vector<float>>> activationTypes = {
{Sigmoid, {}},
{Tanh, {}},
{Relu, {}},
{Exp, {}},
{Log, {}},
{Sign, {}},
{Abs, {}},
{Clamp, {{-2.0f, 2.0f}}},
{Negative, {}},
{Acos, {}},
{Asin, {}},
{Atan, {}},
{Cos, {}},
{Cosh, {}},
{Floor, {}},
{Sin, {}},
{Sinh, {}},
{Sqrt, {}},
{Tan, {}},
{Elu, {{0.1f}}},
{Erf, {}},
{HardSigmoid, {{0.2f, 0.5f}}},
{Selu, {{1.6732f, 1.0507f}}},
{Ceiling, {}},
{Mish, {}},
{HSwish, {}},
{SoftPlus, {}},
{HSigmoid, {}},
{RoundHalfToEven, {}},
{RoundHalfAwayFromZero, {}},
{Erf, {}},
{GeluErf, {}},
{GeluTanh, {}}
};
const std::map<ActivationTypes, std::vector<std::vector<float>>> activationParamTypes = {
{PReLu, {{-0.01f}}},
{LeakyRelu, {{0.01f}}}
};
std::map<std::vector<size_t>, std::vector<std::vector<size_t>>> basic = {
{{1, 50}, {{}}},
{{1, 128}, {{}}},
};
std::map<std::vector<size_t>, std::vector<std::vector<size_t>>> preluBasic = {
{{1, 50}, {{1}, {50}}},
{{1, 128}, {{1}, {128}}},
};
const auto basicCases = ::testing::Combine(
::testing::ValuesIn(CommonTestUtils::combineParams(activationTypes)),
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::ValuesIn(CommonTestUtils::combineParams(basic)),
::testing::Values(CommonTestUtils::DEVICE_CPU)
);
const auto basicPreluCases = ::testing::Combine(
::testing::ValuesIn(CommonTestUtils::combineParams(activationParamTypes)),
::testing::ValuesIn(netPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::Values(InferenceEngine::Layout::ANY),
::testing::ValuesIn(CommonTestUtils::combineParams(preluBasic)),
::testing::Values(CommonTestUtils::DEVICE_CPU)
);
INSTANTIATE_TEST_CASE_P(smoke_Activation_Basic, ActivationLayerTest, basicCases, ActivationLayerTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Activation_Basic_Prelu, ActivationLayerTest, basicPreluCases, ActivationLayerTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Activation_Basic, ActivationParamLayerTest, basicPreluCases, ActivationLayerTest::getTestCaseName);
INSTANTIATE_TEST_CASE_P(smoke_Activation_Basic, ActivationDynamicLayerTest, basicCases, ActivationLayerTest::getTestCaseName);
} // namespace
|
/******************************************************************************
* The MIT License (MIT)
*
* Copyright (c) 2015-2017 Baldur Karlsson
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
******************************************************************************/
#include "vk_resources.h"
#include "vk_info.h"
WRAPPED_POOL_INST(WrappedVkInstance)
WRAPPED_POOL_INST(WrappedVkPhysicalDevice)
WRAPPED_POOL_INST(WrappedVkDevice)
WRAPPED_POOL_INST(WrappedVkQueue)
WRAPPED_POOL_INST(WrappedVkCommandBuffer)
WRAPPED_POOL_INST(WrappedVkFence)
WRAPPED_POOL_INST(WrappedVkDeviceMemory)
WRAPPED_POOL_INST(WrappedVkBuffer)
WRAPPED_POOL_INST(WrappedVkImage)
WRAPPED_POOL_INST(WrappedVkSemaphore)
WRAPPED_POOL_INST(WrappedVkEvent)
WRAPPED_POOL_INST(WrappedVkQueryPool)
WRAPPED_POOL_INST(WrappedVkBufferView)
WRAPPED_POOL_INST(WrappedVkImageView)
WRAPPED_POOL_INST(WrappedVkShaderModule)
WRAPPED_POOL_INST(WrappedVkPipelineCache)
WRAPPED_POOL_INST(WrappedVkPipelineLayout)
WRAPPED_POOL_INST(WrappedVkRenderPass)
WRAPPED_POOL_INST(WrappedVkPipeline)
WRAPPED_POOL_INST(WrappedVkDescriptorSetLayout)
WRAPPED_POOL_INST(WrappedVkSampler)
WRAPPED_POOL_INST(WrappedVkDescriptorPool)
WRAPPED_POOL_INST(WrappedVkDescriptorSet)
WRAPPED_POOL_INST(WrappedVkFramebuffer)
WRAPPED_POOL_INST(WrappedVkCommandPool)
WRAPPED_POOL_INST(WrappedVkSwapchainKHR)
WRAPPED_POOL_INST(WrappedVkSurfaceKHR)
byte VkResourceRecord::markerValue[32] = {
0xaa, 0xbb, 0xcc, 0xdd, 0x88, 0x77, 0x66, 0x55, 0x01, 0x23, 0x45, 0x67, 0x98, 0x76, 0x54, 0x32,
};
bool IsDispatchableRes(WrappedVkRes *ptr)
{
return (WrappedVkPhysicalDevice::IsAlloc(ptr) || WrappedVkInstance::IsAlloc(ptr) ||
WrappedVkDevice::IsAlloc(ptr) || WrappedVkQueue::IsAlloc(ptr) ||
WrappedVkCommandBuffer::IsAlloc(ptr));
}
VkResourceType IdentifyTypeByPtr(WrappedVkRes *ptr)
{
if(WrappedVkPhysicalDevice::IsAlloc(ptr))
return eResPhysicalDevice;
if(WrappedVkInstance::IsAlloc(ptr))
return eResInstance;
if(WrappedVkDevice::IsAlloc(ptr))
return eResDevice;
if(WrappedVkQueue::IsAlloc(ptr))
return eResQueue;
if(WrappedVkDeviceMemory::IsAlloc(ptr))
return eResDeviceMemory;
if(WrappedVkBuffer::IsAlloc(ptr))
return eResBuffer;
if(WrappedVkBufferView::IsAlloc(ptr))
return eResBufferView;
if(WrappedVkImage::IsAlloc(ptr))
return eResImage;
if(WrappedVkImageView::IsAlloc(ptr))
return eResImageView;
if(WrappedVkFramebuffer::IsAlloc(ptr))
return eResFramebuffer;
if(WrappedVkRenderPass::IsAlloc(ptr))
return eResRenderPass;
if(WrappedVkShaderModule::IsAlloc(ptr))
return eResShaderModule;
if(WrappedVkPipelineCache::IsAlloc(ptr))
return eResPipelineCache;
if(WrappedVkPipelineLayout::IsAlloc(ptr))
return eResPipelineLayout;
if(WrappedVkPipeline::IsAlloc(ptr))
return eResPipeline;
if(WrappedVkSampler::IsAlloc(ptr))
return eResSampler;
if(WrappedVkDescriptorPool::IsAlloc(ptr))
return eResDescriptorPool;
if(WrappedVkDescriptorSetLayout::IsAlloc(ptr))
return eResDescriptorSetLayout;
if(WrappedVkDescriptorSet::IsAlloc(ptr))
return eResDescriptorSet;
if(WrappedVkCommandPool::IsAlloc(ptr))
return eResCommandPool;
if(WrappedVkCommandBuffer::IsAlloc(ptr))
return eResCommandBuffer;
if(WrappedVkFence::IsAlloc(ptr))
return eResFence;
if(WrappedVkEvent::IsAlloc(ptr))
return eResEvent;
if(WrappedVkQueryPool::IsAlloc(ptr))
return eResQueryPool;
if(WrappedVkSemaphore::IsAlloc(ptr))
return eResSemaphore;
if(WrappedVkSwapchainKHR::IsAlloc(ptr))
return eResSwapchain;
if(WrappedVkSurfaceKHR::IsAlloc(ptr))
return eResSurface;
RDCERR("Unknown type for ptr 0x%p", ptr);
return eResUnknown;
}
bool IsBlockFormat(VkFormat f)
{
switch(f)
{
case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
case VK_FORMAT_BC2_UNORM_BLOCK:
case VK_FORMAT_BC2_SRGB_BLOCK:
case VK_FORMAT_BC3_UNORM_BLOCK:
case VK_FORMAT_BC3_SRGB_BLOCK:
case VK_FORMAT_BC4_UNORM_BLOCK:
case VK_FORMAT_BC4_SNORM_BLOCK:
case VK_FORMAT_BC5_UNORM_BLOCK:
case VK_FORMAT_BC5_SNORM_BLOCK:
case VK_FORMAT_BC6H_UFLOAT_BLOCK:
case VK_FORMAT_BC6H_SFLOAT_BLOCK:
case VK_FORMAT_BC7_UNORM_BLOCK:
case VK_FORMAT_BC7_SRGB_BLOCK:
case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
case VK_FORMAT_EAC_R11_UNORM_BLOCK:
case VK_FORMAT_EAC_R11_SNORM_BLOCK:
case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
case VK_FORMAT_ASTC_12x12_SRGB_BLOCK: return true;
default: break;
}
return false;
}
bool IsDepthOrStencilFormat(VkFormat f)
{
switch(f)
{
case VK_FORMAT_D16_UNORM:
case VK_FORMAT_X8_D24_UNORM_PACK32:
case VK_FORMAT_D32_SFLOAT:
case VK_FORMAT_S8_UINT:
case VK_FORMAT_D16_UNORM_S8_UINT:
case VK_FORMAT_D24_UNORM_S8_UINT:
case VK_FORMAT_D32_SFLOAT_S8_UINT: return true;
default: break;
}
return false;
}
bool IsDepthAndStencilFormat(VkFormat f)
{
switch(f)
{
case VK_FORMAT_D16_UNORM_S8_UINT:
case VK_FORMAT_X8_D24_UNORM_PACK32:
case VK_FORMAT_D32_SFLOAT_S8_UINT: return true;
default: break;
}
return false;
}
bool IsStencilFormat(VkFormat f)
{
switch(f)
{
case VK_FORMAT_S8_UINT:
case VK_FORMAT_D16_UNORM_S8_UINT:
case VK_FORMAT_D24_UNORM_S8_UINT:
case VK_FORMAT_D32_SFLOAT_S8_UINT: return true;
default: break;
}
return false;
}
bool IsDepthOnlyFormat(VkFormat f)
{
switch(f)
{
case VK_FORMAT_D16_UNORM:
case VK_FORMAT_X8_D24_UNORM_PACK32:
case VK_FORMAT_D32_SFLOAT: return true;
default: break;
}
return false;
}
bool IsStencilOnlyFormat(VkFormat f)
{
switch(f)
{
case VK_FORMAT_S8_UINT: return true;
default: break;
}
return false;
}
bool IsSRGBFormat(VkFormat f)
{
switch(f)
{
case VK_FORMAT_R8_SRGB:
case VK_FORMAT_R8G8_SRGB:
case VK_FORMAT_R8G8B8_SRGB:
case VK_FORMAT_R8G8B8A8_SRGB:
case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
case VK_FORMAT_BC2_SRGB_BLOCK:
case VK_FORMAT_BC3_SRGB_BLOCK:
case VK_FORMAT_BC7_SRGB_BLOCK:
case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
case VK_FORMAT_B8G8R8_SRGB:
case VK_FORMAT_B8G8R8A8_SRGB: return true;
default: break;
}
return false;
}
bool IsUIntFormat(VkFormat f)
{
switch(f)
{
case VK_FORMAT_R8_UINT:
case VK_FORMAT_R8G8_UINT:
case VK_FORMAT_R8G8B8_UINT:
case VK_FORMAT_B8G8R8_UINT:
case VK_FORMAT_R8G8B8A8_UINT:
case VK_FORMAT_B8G8R8A8_UINT:
case VK_FORMAT_A8B8G8R8_UINT_PACK32:
case VK_FORMAT_A2R10G10B10_UINT_PACK32:
case VK_FORMAT_A2B10G10R10_UINT_PACK32:
case VK_FORMAT_R16_UINT:
case VK_FORMAT_R16G16_UINT:
case VK_FORMAT_R16G16B16_UINT:
case VK_FORMAT_R16G16B16A16_UINT:
case VK_FORMAT_R32_UINT:
case VK_FORMAT_R32G32_UINT:
case VK_FORMAT_R32G32B32_UINT:
case VK_FORMAT_R32G32B32A32_UINT:
case VK_FORMAT_R64_UINT:
case VK_FORMAT_R64G64_UINT:
case VK_FORMAT_R64G64B64_UINT:
case VK_FORMAT_R64G64B64A64_UINT:
case VK_FORMAT_S8_UINT: return true;
default: break;
}
return false;
}
bool IsSIntFormat(VkFormat f)
{
switch(f)
{
case VK_FORMAT_R8_SINT:
case VK_FORMAT_R8G8_SINT:
case VK_FORMAT_R8G8B8_SINT:
case VK_FORMAT_B8G8R8_SINT:
case VK_FORMAT_R8G8B8A8_SINT:
case VK_FORMAT_B8G8R8A8_SINT:
case VK_FORMAT_A8B8G8R8_SINT_PACK32:
case VK_FORMAT_A2R10G10B10_SINT_PACK32:
case VK_FORMAT_A2B10G10R10_SINT_PACK32:
case VK_FORMAT_R16_SINT:
case VK_FORMAT_R16G16_SINT:
case VK_FORMAT_R16G16B16_SINT:
case VK_FORMAT_R16G16B16A16_SINT:
case VK_FORMAT_R32_SINT:
case VK_FORMAT_R32G32_SINT:
case VK_FORMAT_R32G32B32_SINT:
case VK_FORMAT_R32G32B32A32_SINT:
case VK_FORMAT_R64_SINT:
case VK_FORMAT_R64G64_SINT:
case VK_FORMAT_R64G64B64_SINT:
case VK_FORMAT_R64G64B64A64_SINT: return true;
default: break;
}
return false;
}
VkFormat GetDepthOnlyFormat(VkFormat f)
{
switch(f)
{
case VK_FORMAT_D16_UNORM_S8_UINT: return VK_FORMAT_D16_UNORM;
case VK_FORMAT_D24_UNORM_S8_UINT: return VK_FORMAT_X8_D24_UNORM_PACK32;
case VK_FORMAT_D32_SFLOAT_S8_UINT: return VK_FORMAT_D32_SFLOAT;
default: break;
}
return f;
}
VkFormat GetUIntTypedFormat(VkFormat f)
{
switch(f)
{
case VK_FORMAT_R8_UNORM:
case VK_FORMAT_R8_SNORM:
case VK_FORMAT_R8_USCALED:
case VK_FORMAT_R8_SSCALED:
case VK_FORMAT_R8_SINT:
case VK_FORMAT_R8_SRGB: return VK_FORMAT_R8_UINT;
case VK_FORMAT_R8G8_UNORM:
case VK_FORMAT_R8G8_SNORM:
case VK_FORMAT_R8G8_USCALED:
case VK_FORMAT_R8G8_SSCALED:
case VK_FORMAT_R8G8_SINT:
case VK_FORMAT_R8G8_SRGB: return VK_FORMAT_R8G8_UINT;
case VK_FORMAT_R8G8B8_UNORM:
case VK_FORMAT_R8G8B8_SNORM:
case VK_FORMAT_R8G8B8_USCALED:
case VK_FORMAT_R8G8B8_SSCALED:
case VK_FORMAT_R8G8B8_SINT:
case VK_FORMAT_R8G8B8_SRGB: return VK_FORMAT_R8G8B8_UINT;
case VK_FORMAT_R8G8B8A8_UNORM:
case VK_FORMAT_R8G8B8A8_SNORM:
case VK_FORMAT_R8G8B8A8_USCALED:
case VK_FORMAT_R8G8B8A8_SSCALED:
case VK_FORMAT_R8G8B8A8_SINT:
case VK_FORMAT_R8G8B8A8_SRGB: return VK_FORMAT_R8G8B8A8_UINT;
case VK_FORMAT_B8G8R8_UNORM:
case VK_FORMAT_B8G8R8_SNORM:
case VK_FORMAT_B8G8R8_USCALED:
case VK_FORMAT_B8G8R8_SSCALED:
case VK_FORMAT_B8G8R8_SINT:
case VK_FORMAT_B8G8R8_SRGB: return VK_FORMAT_B8G8R8_UINT;
case VK_FORMAT_B8G8R8A8_UNORM:
case VK_FORMAT_B8G8R8A8_SNORM:
case VK_FORMAT_B8G8R8A8_USCALED:
case VK_FORMAT_B8G8R8A8_SSCALED:
case VK_FORMAT_B8G8R8A8_SINT:
case VK_FORMAT_B8G8R8A8_SRGB: return VK_FORMAT_B8G8R8A8_UINT;
case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
case VK_FORMAT_A8B8G8R8_SINT_PACK32:
case VK_FORMAT_A8B8G8R8_SRGB_PACK32: return VK_FORMAT_A8B8G8R8_UINT_PACK32;
case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
case VK_FORMAT_A2R10G10B10_SINT_PACK32: return VK_FORMAT_A2R10G10B10_UINT_PACK32;
case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
case VK_FORMAT_A2B10G10R10_SINT_PACK32: return VK_FORMAT_A2B10G10R10_UINT_PACK32;
case VK_FORMAT_R16_UNORM:
case VK_FORMAT_R16_SNORM:
case VK_FORMAT_R16_USCALED:
case VK_FORMAT_R16_SSCALED:
case VK_FORMAT_R16_SINT: return VK_FORMAT_R16_UINT;
case VK_FORMAT_R16G16_UNORM:
case VK_FORMAT_R16G16_SNORM:
case VK_FORMAT_R16G16_USCALED:
case VK_FORMAT_R16G16_SSCALED:
case VK_FORMAT_R16G16_SINT: return VK_FORMAT_R16G16_UINT;
case VK_FORMAT_R16G16B16_UNORM:
case VK_FORMAT_R16G16B16_SNORM:
case VK_FORMAT_R16G16B16_USCALED:
case VK_FORMAT_R16G16B16_SSCALED:
case VK_FORMAT_R16G16B16_SINT: return VK_FORMAT_R16G16B16_UINT;
case VK_FORMAT_R16G16B16A16_UNORM:
case VK_FORMAT_R16G16B16A16_SNORM:
case VK_FORMAT_R16G16B16A16_USCALED:
case VK_FORMAT_R16G16B16A16_SSCALED:
case VK_FORMAT_R16G16B16A16_SINT: return VK_FORMAT_R16G16B16A16_UINT;
case VK_FORMAT_R32_SINT:
case VK_FORMAT_R32_SFLOAT: return VK_FORMAT_R32_UINT;
case VK_FORMAT_R32G32_SINT:
case VK_FORMAT_R32G32_SFLOAT: return VK_FORMAT_R32G32_UINT;
case VK_FORMAT_R32G32B32_SINT:
case VK_FORMAT_R32G32B32_SFLOAT: return VK_FORMAT_R32G32B32_UINT;
case VK_FORMAT_R32G32B32A32_SINT:
case VK_FORMAT_R32G32B32A32_SFLOAT: return VK_FORMAT_R32G32B32A32_UINT;
case VK_FORMAT_R64_SINT:
case VK_FORMAT_R64_SFLOAT: return VK_FORMAT_R64_UINT;
case VK_FORMAT_R64G64_SINT:
case VK_FORMAT_R64G64_SFLOAT: return VK_FORMAT_R64G64_UINT;
case VK_FORMAT_R64G64B64_SINT:
case VK_FORMAT_R64G64B64_SFLOAT: return VK_FORMAT_R64G64B64_UINT;
case VK_FORMAT_R64G64B64A64_SINT:
case VK_FORMAT_R64G64B64A64_SFLOAT: return VK_FORMAT_R64G64B64A64_UINT;
case VK_FORMAT_S8_UINT: return VK_FORMAT_S8_UINT;
default: break;
}
return f;
}
uint32_t GetByteSize(uint32_t Width, uint32_t Height, uint32_t Depth, VkFormat Format, uint32_t mip)
{
uint32_t w = RDCMAX(Width >> mip, 1U);
uint32_t h = RDCMAX(Height >> mip, 1U);
uint32_t d = RDCMAX(Depth >> mip, 1U);
uint32_t ret = w * h * d;
uint32_t astc[2] = {0, 0};
switch(Format)
{
case VK_FORMAT_R64G64B64A64_SFLOAT: ret *= 32; break;
case VK_FORMAT_R64G64B64_SFLOAT: ret *= 24; break;
case VK_FORMAT_R32G32B32A32_UINT:
case VK_FORMAT_R32G32B32A32_SINT:
case VK_FORMAT_R32G32B32A32_SFLOAT:
case VK_FORMAT_R64G64_SFLOAT: ret *= 16; break;
case VK_FORMAT_R32G32B32_UINT:
case VK_FORMAT_R32G32B32_SINT:
case VK_FORMAT_R32G32B32_SFLOAT: ret *= 12; break;
case VK_FORMAT_R16G16B16A16_UNORM:
case VK_FORMAT_R16G16B16A16_SNORM:
case VK_FORMAT_R16G16B16A16_USCALED:
case VK_FORMAT_R16G16B16A16_SSCALED:
case VK_FORMAT_R16G16B16A16_UINT:
case VK_FORMAT_R16G16B16A16_SINT:
case VK_FORMAT_R16G16B16A16_SFLOAT:
case VK_FORMAT_R32G32_UINT:
case VK_FORMAT_R32G32_SINT:
case VK_FORMAT_R32G32_SFLOAT:
case VK_FORMAT_R64_SFLOAT: ret *= 8; break;
case VK_FORMAT_R16G16B16_UNORM:
case VK_FORMAT_R16G16B16_SNORM:
case VK_FORMAT_R16G16B16_USCALED:
case VK_FORMAT_R16G16B16_SSCALED:
case VK_FORMAT_R16G16B16_UINT:
case VK_FORMAT_R16G16B16_SINT:
case VK_FORMAT_R16G16B16_SFLOAT: ret *= 6; break;
case VK_FORMAT_D32_SFLOAT_S8_UINT: ret *= 8; break;
case VK_FORMAT_R8G8B8_UNORM:
case VK_FORMAT_R8G8B8_SNORM:
case VK_FORMAT_R8G8B8_USCALED:
case VK_FORMAT_R8G8B8_SSCALED:
case VK_FORMAT_R8G8B8_UINT:
case VK_FORMAT_R8G8B8_SINT:
case VK_FORMAT_R8G8B8_SRGB:
case VK_FORMAT_B8G8R8_UNORM:
case VK_FORMAT_B8G8R8_SNORM:
case VK_FORMAT_B8G8R8_USCALED:
case VK_FORMAT_B8G8R8_SSCALED:
case VK_FORMAT_B8G8R8_UINT:
case VK_FORMAT_B8G8R8_SINT:
case VK_FORMAT_B8G8R8_SRGB: ret *= 3; break;
case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
case VK_FORMAT_A2B10G10R10_SNORM_PACK32:
case VK_FORMAT_A2B10G10R10_USCALED_PACK32:
case VK_FORMAT_A2B10G10R10_SSCALED_PACK32:
case VK_FORMAT_A2B10G10R10_UINT_PACK32:
case VK_FORMAT_A2B10G10R10_SINT_PACK32:
case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
case VK_FORMAT_A2R10G10B10_SNORM_PACK32:
case VK_FORMAT_A2R10G10B10_USCALED_PACK32:
case VK_FORMAT_A2R10G10B10_SSCALED_PACK32:
case VK_FORMAT_A2R10G10B10_UINT_PACK32:
case VK_FORMAT_A2R10G10B10_SINT_PACK32:
case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
case VK_FORMAT_R8G8B8A8_UNORM:
case VK_FORMAT_R8G8B8A8_SNORM:
case VK_FORMAT_R8G8B8A8_USCALED:
case VK_FORMAT_R8G8B8A8_SSCALED:
case VK_FORMAT_R8G8B8A8_UINT:
case VK_FORMAT_R8G8B8A8_SINT:
case VK_FORMAT_R8G8B8A8_SRGB:
case VK_FORMAT_B8G8R8A8_UNORM:
case VK_FORMAT_B8G8R8A8_SNORM:
case VK_FORMAT_B8G8R8A8_USCALED:
case VK_FORMAT_B8G8R8A8_SSCALED:
case VK_FORMAT_B8G8R8A8_UINT:
case VK_FORMAT_B8G8R8A8_SINT:
case VK_FORMAT_B8G8R8A8_SRGB:
case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
case VK_FORMAT_A8B8G8R8_USCALED_PACK32:
case VK_FORMAT_A8B8G8R8_SSCALED_PACK32:
case VK_FORMAT_A8B8G8R8_SINT_PACK32:
case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
case VK_FORMAT_R16G16_UNORM:
case VK_FORMAT_R16G16_SNORM:
case VK_FORMAT_R16G16_USCALED:
case VK_FORMAT_R16G16_SSCALED:
case VK_FORMAT_R16G16_UINT:
case VK_FORMAT_R16G16_SINT:
case VK_FORMAT_R16G16_SFLOAT:
case VK_FORMAT_R32_UINT:
case VK_FORMAT_R32_SINT:
case VK_FORMAT_R32_SFLOAT:
case VK_FORMAT_X8_D24_UNORM_PACK32:
case VK_FORMAT_D24_UNORM_S8_UINT:
case VK_FORMAT_D32_SFLOAT:
case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32: ret *= 4; break;
case VK_FORMAT_D16_UNORM_S8_UINT: ret *= 4; break;
case VK_FORMAT_R8G8_UNORM:
case VK_FORMAT_R8G8_SNORM:
case VK_FORMAT_R8G8_USCALED:
case VK_FORMAT_R8G8_SSCALED:
case VK_FORMAT_R8G8_UINT:
case VK_FORMAT_R8G8_SINT:
case VK_FORMAT_R8G8_SRGB:
case VK_FORMAT_R16_UNORM:
case VK_FORMAT_R16_SNORM:
case VK_FORMAT_R16_USCALED:
case VK_FORMAT_R16_SSCALED:
case VK_FORMAT_R16_UINT:
case VK_FORMAT_R16_SINT:
case VK_FORMAT_R16_SFLOAT:
case VK_FORMAT_D16_UNORM:
case VK_FORMAT_R5G6B5_UNORM_PACK16:
case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
case VK_FORMAT_B5G6R5_UNORM_PACK16:
case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
case VK_FORMAT_B4G4R4A4_UNORM_PACK16: ret *= 2; break;
case VK_FORMAT_R4G4_UNORM_PACK8:
case VK_FORMAT_R8_UNORM:
case VK_FORMAT_R8_SNORM:
case VK_FORMAT_R8_USCALED:
case VK_FORMAT_R8_SSCALED:
case VK_FORMAT_R8_UINT:
case VK_FORMAT_R8_SINT:
case VK_FORMAT_R8_SRGB:
case VK_FORMAT_S8_UINT: ret *= 1; break;
case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
case VK_FORMAT_BC4_UNORM_BLOCK:
case VK_FORMAT_BC4_SNORM_BLOCK:
case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK:
case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK:
case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK:
case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
case VK_FORMAT_EAC_R11_UNORM_BLOCK:
case VK_FORMAT_EAC_R11_SNORM_BLOCK:
ret = AlignUp4(w) * AlignUp4(h) * d;
ret /= 2;
break;
case VK_FORMAT_BC2_UNORM_BLOCK:
case VK_FORMAT_BC2_SRGB_BLOCK:
case VK_FORMAT_BC3_UNORM_BLOCK:
case VK_FORMAT_BC3_SRGB_BLOCK:
case VK_FORMAT_BC5_UNORM_BLOCK:
case VK_FORMAT_BC5_SNORM_BLOCK:
case VK_FORMAT_BC6H_UFLOAT_BLOCK:
case VK_FORMAT_BC6H_SFLOAT_BLOCK:
case VK_FORMAT_BC7_UNORM_BLOCK:
case VK_FORMAT_BC7_SRGB_BLOCK:
case VK_FORMAT_EAC_R11G11_UNORM_BLOCK:
case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
ret = AlignUp4(w) * AlignUp4(h) * d;
ret *= 1;
break;
case VK_FORMAT_ASTC_4x4_UNORM_BLOCK:
case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
astc[0] = 4;
astc[1] = 4;
break;
case VK_FORMAT_ASTC_5x4_UNORM_BLOCK:
case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
astc[0] = 5;
astc[1] = 4;
break;
case VK_FORMAT_ASTC_5x5_UNORM_BLOCK:
case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
astc[0] = 5;
astc[1] = 5;
break;
case VK_FORMAT_ASTC_6x5_UNORM_BLOCK:
case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
astc[0] = 6;
astc[1] = 5;
break;
case VK_FORMAT_ASTC_6x6_UNORM_BLOCK:
case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
astc[0] = 6;
astc[1] = 6;
break;
case VK_FORMAT_ASTC_8x5_UNORM_BLOCK:
case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
astc[0] = 8;
astc[1] = 5;
break;
case VK_FORMAT_ASTC_8x6_UNORM_BLOCK:
case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
astc[0] = 8;
astc[1] = 6;
break;
case VK_FORMAT_ASTC_8x8_UNORM_BLOCK:
case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
astc[0] = 8;
astc[1] = 8;
break;
case VK_FORMAT_ASTC_10x5_UNORM_BLOCK:
case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
astc[0] = 10;
astc[1] = 5;
break;
case VK_FORMAT_ASTC_10x6_UNORM_BLOCK:
case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
astc[0] = 10;
astc[1] = 6;
break;
case VK_FORMAT_ASTC_10x8_UNORM_BLOCK:
case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
astc[0] = 10;
astc[1] = 8;
break;
case VK_FORMAT_ASTC_10x10_UNORM_BLOCK:
case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
astc[0] = 10;
astc[1] = 10;
break;
case VK_FORMAT_ASTC_12x10_UNORM_BLOCK:
case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
astc[0] = 12;
astc[1] = 10;
break;
case VK_FORMAT_ASTC_12x12_UNORM_BLOCK:
case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
astc[0] = 12;
astc[1] = 12;
break;
default:
ret = 1;
RDCERR("Unrecognised Vulkan Format: %d", Format);
break;
}
if(astc[0] > 0 && astc[1] > 0)
{
uint32_t blocks[2] = {(w / astc[0]), (h / astc[1])};
// how many blocks are needed - including any extra partial blocks
blocks[0] += (w % astc[0]) ? 1 : 0;
blocks[1] += (h % astc[1]) ? 1 : 0;
// ASTC blocks are all 128 bits each
return blocks[0] * blocks[1] * 16 * d;
}
return ret;
}
VkResourceRecord::~VkResourceRecord()
{
VkResourceType resType = Resource != NULL ? IdentifyTypeByPtr(Resource) : eResUnknown;
if(resType == eResPhysicalDevice)
SAFE_DELETE(memProps);
// bufferviews and imageviews have non-owning pointers to the sparseinfo struct
if(resType == eResBuffer || resType == eResImage)
SAFE_DELETE(sparseInfo);
if(resType == eResInstance || resType == eResDevice)
SAFE_DELETE(instDevInfo);
if(resType == eResSwapchain)
SAFE_DELETE(swapInfo);
if(resType == eResDeviceMemory && memMapState)
{
Serialiser::FreeAlignedBuffer(memMapState->refData);
SAFE_DELETE(memMapState);
}
if(resType == eResCommandBuffer)
SAFE_DELETE(cmdInfo);
if(resType == eResFramebuffer || resType == eResRenderPass)
SAFE_DELETE_ARRAY(imageAttachments);
// only the descriptor set layout actually owns this pointer, descriptor sets
// have a pointer to it but don't own it
if(resType == eResDescriptorSetLayout)
SAFE_DELETE(descInfo->layout);
if(resType == eResDescriptorSetLayout || resType == eResDescriptorSet)
SAFE_DELETE(descInfo);
}
void SparseMapping::Update(uint32_t numBindings, const VkSparseImageMemoryBind *pBindings)
{
// update image page table mappings
for(uint32_t b = 0; b < numBindings; b++)
{
const VkSparseImageMemoryBind &newBind = pBindings[b];
// VKTODOMED handle sparse image arrays or sparse images with mips
RDCASSERT(newBind.subresource.arrayLayer == 0 && newBind.subresource.mipLevel == 0);
pair<VkDeviceMemory, VkDeviceSize> *pageTable = pages[newBind.subresource.aspectMask];
VkOffset3D offsInPages = newBind.offset;
offsInPages.x /= pagedim.width;
offsInPages.y /= pagedim.height;
offsInPages.z /= pagedim.depth;
VkExtent3D extInPages = newBind.extent;
extInPages.width /= pagedim.width;
extInPages.height /= pagedim.height;
extInPages.depth /= pagedim.depth;
pair<VkDeviceMemory, VkDeviceSize> mempair = std::make_pair(newBind.memory, newBind.memoryOffset);
for(uint32_t z = offsInPages.z; z < offsInPages.z + extInPages.depth; z++)
{
for(uint32_t y = offsInPages.y; y < offsInPages.y + extInPages.height; y++)
{
for(uint32_t x = offsInPages.x; x < offsInPages.x + extInPages.width; x++)
{
pageTable[z * imgdim.width * imgdim.height + y * imgdim.width + x] = mempair;
}
}
}
}
}
void SparseMapping::Update(uint32_t numBindings, const VkSparseMemoryBind *pBindings)
{
// update opaque mappings
for(uint32_t b = 0; b < numBindings; b++)
{
const VkSparseMemoryBind &curRange = pBindings[b];
bool found = false;
// this could be improved to do a binary search since the vector is sorted.
for(auto it = opaquemappings.begin(); it != opaquemappings.end(); ++it)
{
VkSparseMemoryBind &newRange = *it;
// the binding we're applying is after this item in the list,
// keep searching
if(curRange.resourceOffset + curRange.size <= newRange.resourceOffset)
continue;
// the binding we're applying is before this item, but doesn't
// overlap. Insert before us in the list
if(curRange.resourceOffset >= newRange.resourceOffset + newRange.size)
{
opaquemappings.insert(it, newRange);
found = true;
break;
}
// with sparse mappings it will be reasonably common to update an exact
// existing range, so check that first
if(curRange.resourceOffset == newRange.resourceOffset && curRange.size == newRange.size)
{
*it = curRange;
found = true;
break;
}
// handle subranges within the current range
if(curRange.resourceOffset <= newRange.resourceOffset &&
curRange.resourceOffset + curRange.size >= newRange.resourceOffset + newRange.size)
{
// they start in the same place
if(curRange.resourceOffset == newRange.resourceOffset)
{
// change the current range to be the leftover second half
it->resourceOffset += curRange.size;
// insert the new mapping before our current one
opaquemappings.insert(it, newRange);
found = true;
break;
}
// they end in the same place
else if(curRange.resourceOffset + curRange.size == newRange.resourceOffset + newRange.size)
{
// save a copy
VkSparseMemoryBind cur = curRange;
// set the new size of the first half
cur.size = newRange.resourceOffset - curRange.resourceOffset;
// add the new range where the current iterator was
*it = newRange;
// insert the old truncated mapping before our current position
opaquemappings.insert(it, cur);
found = true;
break;
}
// the new range is a subsection
else
{
// save a copy
VkSparseMemoryBind first = curRange;
// set the new size of the first part
first.size = newRange.resourceOffset - curRange.resourceOffset;
// set the current range (third part) to start after the new range ends
it->resourceOffset = newRange.resourceOffset + newRange.size;
// first insert the new range before our current range
it = opaquemappings.insert(it, newRange);
// now insert the remaining first part before that
opaquemappings.insert(it, first);
found = true;
break;
}
}
// this new range overlaps the current one and some subsequent ranges. Merge together
// find where this new range stops overlapping
auto endit = it;
for(; endit != opaquemappings.end(); ++endit)
{
if(newRange.resourceOffset + newRange.size <= endit->resourceOffset + endit->size)
break;
}
// see if there are any leftovers of the overlapped ranges at the start or end
bool leftoverstart = (curRange.resourceOffset < newRange.resourceOffset);
bool leftoverend =
(endit != opaquemappings.end() &&
(endit->resourceOffset + endit->size > newRange.resourceOffset + newRange.size));
// no leftovers, the new range entirely covers the current and last (if there is one)
if(!leftoverstart && !leftoverend)
{
// erase all of the ranges. If endit points to a valid range,
// it won't be erased, so we overwrite it. Otherwise it pointed
// to end() so we just push_back()
auto last = opaquemappings.erase(it, endit);
if(last != opaquemappings.end())
*last = newRange;
else
opaquemappings.push_back(newRange);
}
// leftover at the start, but not the end
else if(leftoverstart && !leftoverend)
{
// save the current range
VkSparseMemoryBind cur = curRange;
// modify the size to reflect what's left over
cur.size = newRange.resourceOffset - cur.resourceOffset;
// as above, erase and either re-insert or push_back()
auto last = opaquemappings.erase(it, endit);
if(last != opaquemappings.end())
{
*last = newRange;
opaquemappings.insert(last, cur);
}
else
{
opaquemappings.push_back(cur);
opaquemappings.push_back(newRange);
}
}
// leftover at the end, but not the start
else if(!leftoverstart && leftoverend)
{
// erase up to but not including endit
auto last = opaquemappings.erase(it, endit);
// modify the leftovers at the end
last->resourceOffset = newRange.resourceOffset + newRange.size;
// insert the new range before
opaquemappings.insert(last, newRange);
}
// leftovers at both ends
else
{
// save the current range
VkSparseMemoryBind cur = curRange;
// modify the size to reflect what's left over
cur.size = newRange.resourceOffset - cur.resourceOffset;
// erase up to but not including endit
auto last = opaquemappings.erase(it, endit);
// modify the leftovers at the end
last->resourceOffset = newRange.resourceOffset + newRange.size;
// insert the new range before
auto newit = opaquemappings.insert(last, newRange);
// insert the modified leftovers before that
opaquemappings.insert(newit, cur);
}
found = true;
break;
}
// if it wasn't found, this binding is after all mappings in our list
if(!found)
opaquemappings.push_back(curRange);
}
}
|
// Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <lib/fidl/llcpp/message.h>
#include <lib/zx/event.h>
#include <gtest/gtest.h>
TEST(OutgoingToIncomingMessage, ByteMessage) {
uint8_t bytes[3] = {1, 2, 3};
fidl::OutgoingMessage msg(bytes, sizeof(bytes), sizeof(bytes), nullptr, 0, 0);
auto result = fidl::OutgoingToIncomingMessage(msg);
ASSERT_EQ(ZX_OK, result.status());
ASSERT_EQ(sizeof(bytes), result.incoming_message()->num_bytes);
EXPECT_EQ(0,
memcmp(result.incoming_message()->bytes, bytes, result.incoming_message()->num_bytes));
ASSERT_EQ(0u, result.incoming_message()->num_handles);
}
#ifdef __Fuchsia__
TEST(OutgoingToIncomingMessage, Handles) {
uint8_t bytes[16];
zx::event ev;
ASSERT_EQ(ZX_OK, zx::event::create(0, &ev));
zx_handle_disposition_t hd[1] = {zx_handle_disposition_t{
.operation = ZX_HANDLE_OP_MOVE,
.handle = ev.get(),
.type = ZX_OBJ_TYPE_EVENT,
.rights = ZX_DEFAULT_EVENT_RIGHTS,
.result = ZX_OK,
}};
fidl::OutgoingMessage msg(bytes, 16, 16, hd, 1, 1);
auto result = fidl::OutgoingToIncomingMessage(msg);
ASSERT_EQ(ZX_OK, result.status());
fidl_incoming_msg_t* output = result.incoming_message();
EXPECT_EQ(output->num_bytes, 16u);
EXPECT_EQ(0, memcmp(output->bytes, bytes, output->num_bytes));
EXPECT_EQ(output->num_handles, 1u);
EXPECT_EQ(output->handles[0].handle, ev.get());
EXPECT_EQ(output->handles[0].type, ZX_OBJ_TYPE_EVENT);
EXPECT_EQ(output->handles[0].rights, ZX_DEFAULT_EVENT_RIGHTS);
}
TEST(OutgoingToIncomingMessage, HandlesWrongType) {
uint8_t bytes[16];
zx::event ev;
ASSERT_EQ(ZX_OK, zx::event::create(0, &ev));
zx_handle_disposition_t hd[1] = {zx_handle_disposition_t{
.operation = ZX_HANDLE_OP_MOVE,
.handle = ev.get(),
.type = ZX_OBJ_TYPE_CHANNEL,
.rights = ZX_RIGHT_SAME_RIGHTS,
.result = ZX_OK,
}};
fidl::OutgoingMessage msg(bytes, 16, 16, hd, 1, 1);
auto result = fidl::OutgoingToIncomingMessage(msg);
ASSERT_EQ(ZX_ERR_INVALID_ARGS, result.status());
}
TEST(OutgoingToIncomingMessage, HandlesWrongRights) {
uint8_t bytes[16];
zx::event ev;
ASSERT_EQ(ZX_OK, zx::event::create(0, &ev));
zx_handle_disposition_t hd[1] = {zx_handle_disposition_t{
.operation = ZX_HANDLE_OP_MOVE,
.handle = ev.get(),
.type = ZX_OBJ_TYPE_EVENT,
.rights = ZX_RIGHT_DESTROY,
.result = ZX_OK,
}};
fidl::OutgoingMessage msg(bytes, 16, 16, hd, 1, 1);
auto result = fidl::OutgoingToIncomingMessage(msg);
ASSERT_EQ(ZX_ERR_INVALID_ARGS, result.status());
}
#endif
|
/*
* Copyright 2009, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2011, Oliver Tappe <zooey@hirschkaefer.de>
* Distributed under the terms of the MIT License.
*/
#include <package/hpkg/BlockBufferPoolNoLock.h>
namespace BPackageKit {
namespace BHPKG {
BBlockBufferPoolNoLock::BBlockBufferPoolNoLock(size_t blockSize,
uint32 maxCachedBlocks)
:
BBlockBufferPool(blockSize, maxCachedBlocks)
{
}
BBlockBufferPoolNoLock::~BBlockBufferPoolNoLock()
{
}
bool BBlockBufferPoolNoLock::Lock()
{
return true;
}
void BBlockBufferPoolNoLock::Unlock()
{
}
} // namespace BHPKG
} // namespace BPackageKit
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file target_info.cc
*/
#include <tvm/target_info.h>
#include <tvm/packed_func_ext.h>
namespace tvm {
TVM_STATIC_IR_FUNCTOR(IRPrinter, vtable)
.set_dispatch<MemoryInfoNode>([](const MemoryInfoNode *op, IRPrinter *p) {
p->stream << "mem-info("
<< "unit_bits=" << op->unit_bits << ", "
<< "max_num_bits=" << op->max_num_bits << ", "
<< "max_simd_bits=" << op->max_simd_bits << ", "
<< "head_address=" << op->head_address << ")";
});
TVM_REGISTER_NODE_TYPE(MemoryInfoNode);
MemoryInfo GetMemoryInfo(const std::string& scope) {
std::string fname = "tvm.info.mem." + scope;
const runtime::PackedFunc* f = runtime::Registry::Get(fname);
if (f == nullptr) {
return MemoryInfo();
} else {
return (*f)();
}
}
} // namespace tvm
|
/*
*******************************************************************************
* Copyright (c) 2020-2021, STMicroelectronics
* All rights reserved.
*
* This software component is licensed by ST under BSD 3-Clause license,
* the "License"; You may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
* opensource.org/licenses/BSD-3-Clause
*
*******************************************************************************
*/
#if defined(ARDUINO_GENERIC_L151RDTX) || defined(ARDUINO_GENERIC_L151RDYX) ||\
defined(ARDUINO_GENERIC_L152RDTX) || defined(ARDUINO_GENERIC_L152RDYX) ||\
defined(ARDUINO_GENERIC_L162RDTX) || defined(ARDUINO_GENERIC_L162RDYX)
#include "pins_arduino.h"
// Digital PinName array
const PinName digitalPin[] = {
PA_0, // D0/A0
PA_1, // D1/A1
PA_2, // D2/A2
PA_3, // D3/A3
PA_4, // D4/A4
PA_5, // D5/A5
PA_6, // D6/A6
PA_7, // D7/A7
PA_8, // D8
PA_9, // D9
PA_10, // D10
PA_11, // D11
PA_12, // D12
PA_13, // D13
PA_14, // D14
PA_15, // D15
PB_0, // D16/A8
PB_1, // D17/A9
PB_2, // D18
PB_3, // D19
PB_4, // D20
PB_5, // D21
PB_6, // D22
PB_7, // D23
PB_8, // D24
PB_9, // D25
PB_10, // D26
PB_11, // D27
PB_12, // D28/A10
PB_13, // D29/A11
PB_14, // D30/A12
PB_15, // D31/A13
PC_0, // D32/A14
PC_1, // D33/A15
PC_2, // D34/A16
PC_3, // D35/A17
PC_4, // D36/A18
PC_5, // D37/A19
PC_6, // D38
PC_7, // D39
PC_8, // D40
PC_9, // D41
PC_10, // D42
PC_11, // D43
PC_12, // D44
PC_13, // D45
PC_14, // D46
PC_15, // D47
PD_2, // D48
PH_0, // D49
PH_1 // D50
};
// Analog (Ax) pin number array
const uint32_t analogInputPin[] = {
0, // A0, PA0
1, // A1, PA1
2, // A2, PA2
3, // A3, PA3
4, // A4, PA4
5, // A5, PA5
6, // A6, PA6
7, // A7, PA7
16, // A8, PB0
17, // A9, PB1
28, // A10, PB12
29, // A11, PB13
30, // A12, PB14
31, // A13, PB15
32, // A14, PC0
33, // A15, PC1
34, // A16, PC2
35, // A17, PC3
36, // A18, PC4
37 // A19, PC5
};
#endif /* ARDUINO_GENERIC_* */
|
/*******************************************************************************
* Copyright 2020-2021 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <CL/sycl.hpp>
#include "oneapi/dnnl/dnnl_sycl.h"
#include "common/c_types_map.hpp"
#include "common/engine.hpp"
#include "common/memory.hpp"
#include "common/utils.hpp"
#include "sycl/sycl_c_types_map.hpp"
#include "sycl/sycl_engine.hpp"
#include "sycl/sycl_memory_storage.hpp"
using namespace dnnl::impl;
using namespace dnnl::impl::sycl;
status_t dnnl_sycl_interop_memory_create(memory_t **memory,
const memory_desc_t *md, engine_t *engine, memory_kind_t memory_kind,
void *handle) {
using namespace dnnl::impl::sycl;
bool ok = !utils::any_null(memory, md, engine)
&& engine->runtime_kind() == runtime_kind::sycl;
if (!ok) return status::invalid_arguments;
const auto mdw = memory_desc_wrapper(md);
if (mdw.format_any() || mdw.has_runtime_dims_or_strides())
return status::invalid_arguments;
size_t size = mdw.size();
unsigned flags = (handle == DNNL_MEMORY_ALLOCATE)
? memory_flags_t::alloc
: memory_flags_t::use_runtime_ptr;
void *handle_ptr = (handle == DNNL_MEMORY_ALLOCATE) ? nullptr : handle;
bool is_usm = memory_kind == memory_kind::usm;
std::unique_ptr<memory_storage_t> mem_storage;
if (is_usm)
mem_storage.reset(new sycl_usm_memory_storage_t(engine));
else
mem_storage.reset(new sycl_buffer_memory_storage_t(engine));
if (!mem_storage) return status::out_of_memory;
CHECK(mem_storage->init(flags, size, handle_ptr));
return safe_ptr_assign(
*memory, new memory_t(engine, md, std::move(mem_storage)));
}
status_t dnnl_sycl_interop_memory_set_buffer(
memory_t *memory, void *buffer, stream_t *stream) {
using namespace dnnl::impl::sycl;
bool ok = !utils::any_null(memory, buffer)
&& memory->engine()->runtime_kind() == runtime_kind::sycl;
if (!ok) return status::invalid_arguments;
std::unique_ptr<memory_storage_t> mem_storage(
new sycl_buffer_memory_storage_t(memory->engine()));
if (!mem_storage) return status::out_of_memory;
size_t size = memory_desc_wrapper(memory->md()).size();
CHECK(mem_storage->init(memory_flags_t::use_runtime_ptr, size, buffer));
if (stream) stream->before_exec_hook();
status_t status = memory->reset_memory_storage(std::move(mem_storage));
if (stream) stream->after_exec_hook();
return status;
}
status_t dnnl_sycl_interop_memory_get_memory_kind(
const memory_t *memory, memory_kind_t *memory_kind) {
using namespace dnnl::impl::sycl;
bool ok = !utils::any_null(memory, memory_kind)
&& memory->engine()->runtime_kind() == runtime_kind::sycl;
if (!ok) return status::invalid_arguments;
*memory_kind = utils::downcast<const sycl_memory_storage_base_t *>(
memory->memory_storage())
->memory_kind();
return status::success;
}
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "runtime/exec-env.h"
#include <vector>
#include <boost/algorithm/string.hpp>
#include <gflags/gflags.h>
#include <gutil/strings/substitute.h>
#include <kudu/client/client.h>
#include "common/logging.h"
#include "common/object-pool.h"
#include "exec/kudu-util.h"
#include "gen-cpp/ImpalaInternalService.h"
#include "kudu/rpc/service_if.h"
#include "rpc/rpc-mgr.h"
#include "runtime/backend-client.h"
#include "runtime/bufferpool/buffer-pool.h"
#include "runtime/bufferpool/reservation-tracker.h"
#include "runtime/client-cache.h"
#include "runtime/coordinator.h"
#include "runtime/data-stream-mgr.h"
#include "runtime/io/disk-io-mgr.h"
#include "runtime/hbase-table-factory.h"
#include "runtime/hdfs-fs-cache.h"
#include "runtime/krpc-data-stream-mgr.h"
#include "runtime/lib-cache.h"
#include "runtime/mem-tracker.h"
#include "runtime/query-exec-mgr.h"
#include "runtime/thread-resource-mgr.h"
#include "runtime/tmp-file-mgr.h"
#include "scheduling/admission-controller.h"
#include "scheduling/request-pool-service.h"
#include "scheduling/scheduler.h"
#include "service/data-stream-service.h"
#include "service/frontend.h"
#include "statestore/statestore-subscriber.h"
#include "util/debug-util.h"
#include "util/default-path-handlers.h"
#include "util/hdfs-bulk-ops.h"
#include "util/mem-info.h"
#include "util/memory-metrics.h"
#include "util/metrics.h"
#include "util/network-util.h"
#include "util/openssl-util.h"
#include "util/parse-util.h"
#include "util/pretty-printer.h"
#include "util/thread-pool.h"
#include "util/webserver.h"
#include "common/names.h"
using boost::algorithm::join;
using kudu::rpc::ServiceIf;
using namespace strings;
DEFINE_string(catalog_service_host, "localhost",
"hostname where CatalogService is running");
DEFINE_bool(enable_webserver, true, "If true, debug webserver is enabled");
DEFINE_string(state_store_host, "localhost",
"hostname where StatestoreService is running");
DEFINE_int32(state_store_subscriber_port, 23000,
"port where StatestoreSubscriberService should be exported");
DEFINE_int32(num_hdfs_worker_threads, 16,
"(Advanced) The number of threads in the global HDFS operation pool");
DEFINE_bool(disable_admission_control, false, "Disables admission control.");
DEFINE_bool_hidden(use_krpc, false, "Used to indicate whether to use KRPC for the "
"DataStream subsystem, or the Thrift RPC layer instead. Defaults to false. "
"KRPC not yet supported");
DEFINE_int32(datastream_service_queue_depth, 1024, "Size of datastream service queue");
DEFINE_int32(datastream_service_num_svc_threads, 0, "Number of datastream service "
"processing threads. If left at default value 0, it will be set to number of CPU "
"cores.");
DECLARE_int32(state_store_port);
DECLARE_int32(num_threads_per_core);
DECLARE_int32(num_cores);
DECLARE_int32(be_port);
DECLARE_int32(krpc_port);
DECLARE_string(mem_limit);
DECLARE_string(buffer_pool_limit);
DECLARE_string(buffer_pool_clean_pages_limit);
DECLARE_int64(min_buffer_size);
DECLARE_bool(is_coordinator);
DECLARE_int32(webserver_port);
DECLARE_int64(tcmalloc_max_total_thread_cache_bytes);
// TODO-MT: rename or retire
DEFINE_int32(coordinator_rpc_threads, 12, "(Advanced) Number of threads available to "
"start fragments on remote Impala daemons.");
DECLARE_string(ssl_client_ca_certificate);
DEFINE_int32(backend_client_connection_num_retries, 3, "Retry backend connections.");
// When network is unstable, TCP will retry and sending could take longer time.
// Choose 5 minutes as default timeout because we don't want RPC timeout be triggered
// by intermittent network issue. The timeout should not be too long either, otherwise
// query could hang for a while before it's cancelled.
DEFINE_int32(backend_client_rpc_timeout_ms, 300000, "(Advanced) The underlying "
"TSocket send/recv timeout in milliseconds for a backend client RPC. ");
DEFINE_int32(catalog_client_connection_num_retries, 3, "Retry catalog connections.");
DEFINE_int32(catalog_client_rpc_timeout_ms, 0, "(Advanced) The underlying TSocket "
"send/recv timeout in milliseconds for a catalog client RPC.");
const static string DEFAULT_FS = "fs.defaultFS";
namespace impala {
struct ExecEnv::KuduClientPtr {
kudu::client::sp::shared_ptr<kudu::client::KuduClient> kudu_client;
};
ExecEnv* ExecEnv::exec_env_ = nullptr;
ExecEnv::ExecEnv()
: ExecEnv(FLAGS_hostname, FLAGS_be_port, FLAGS_krpc_port,
FLAGS_state_store_subscriber_port, FLAGS_webserver_port,
FLAGS_state_store_host, FLAGS_state_store_port) {}
ExecEnv::ExecEnv(const string& hostname, int backend_port, int krpc_port,
int subscriber_port, int webserver_port, const string& statestore_host,
int statestore_port)
: obj_pool_(new ObjectPool),
metrics_(new MetricGroup("impala-metrics")),
impalad_client_cache_(
new ImpalaBackendClientCache(FLAGS_backend_client_connection_num_retries, 0,
FLAGS_backend_client_rpc_timeout_ms, FLAGS_backend_client_rpc_timeout_ms, "",
!FLAGS_ssl_client_ca_certificate.empty())),
catalogd_client_cache_(
new CatalogServiceClientCache(FLAGS_catalog_client_connection_num_retries, 0,
FLAGS_catalog_client_rpc_timeout_ms, FLAGS_catalog_client_rpc_timeout_ms, "",
!FLAGS_ssl_client_ca_certificate.empty())),
htable_factory_(new HBaseTableFactory()),
disk_io_mgr_(new io::DiskIoMgr()),
webserver_(new Webserver(webserver_port)),
pool_mem_trackers_(new PoolMemTrackerRegistry),
thread_mgr_(new ThreadResourceMgr),
tmp_file_mgr_(new TmpFileMgr),
frontend_(new Frontend()),
async_rpc_pool_(new CallableThreadPool("rpc-pool", "async-rpc-sender", 8, 10000)),
query_exec_mgr_(new QueryExecMgr()),
enable_webserver_(FLAGS_enable_webserver && webserver_port > 0),
backend_address_(MakeNetworkAddress(hostname, backend_port)) {
if (FLAGS_use_krpc) {
VLOG_QUERY << "Using KRPC.";
// KRPC relies on resolved IP address. It's set in StartServices().
krpc_address_.__set_port(krpc_port);
rpc_mgr_.reset(new RpcMgr(IsInternalTlsConfigured()));
stream_mgr_.reset(new KrpcDataStreamMgr(metrics_.get()));
} else {
stream_mgr_.reset(new DataStreamMgr(metrics_.get()));
}
request_pool_service_.reset(new RequestPoolService(metrics_.get()));
TNetworkAddress subscriber_address = MakeNetworkAddress(hostname, subscriber_port);
TNetworkAddress statestore_address =
MakeNetworkAddress(statestore_host, statestore_port);
statestore_subscriber_.reset(new StatestoreSubscriber(
Substitute("impalad@$0", TNetworkAddressToString(backend_address_)),
subscriber_address, statestore_address, metrics_.get()));
if (FLAGS_is_coordinator) {
hdfs_op_thread_pool_.reset(
CreateHdfsOpThreadPool("hdfs-worker-pool", FLAGS_num_hdfs_worker_threads, 1024));
exec_rpc_thread_pool_.reset(new CallableThreadPool("exec-rpc-pool", "worker",
FLAGS_coordinator_rpc_threads, numeric_limits<int32_t>::max()));
scheduler_.reset(new Scheduler(statestore_subscriber_.get(),
statestore_subscriber_->id(), metrics_.get(), webserver_.get(),
request_pool_service_.get()));
}
if (FLAGS_disable_admission_control) {
LOG(INFO) << "Admission control is disabled.";
} else {
admission_controller_.reset(new AdmissionController(statestore_subscriber_.get(),
request_pool_service_.get(), metrics_.get(), backend_address_));
}
exec_env_ = this;
}
ExecEnv::~ExecEnv() {
if (buffer_reservation_ != nullptr) buffer_reservation_->Close();
if (rpc_mgr_ != nullptr) rpc_mgr_->Shutdown();
disk_io_mgr_.reset(); // Need to tear down before mem_tracker_.
}
Status ExecEnv::InitForFeTests() {
mem_tracker_.reset(new MemTracker(-1, "Process"));
is_fe_tests_ = true;
return Status::OK();
}
Status ExecEnv::Init() {
// Initialize thread pools
if (FLAGS_is_coordinator) {
RETURN_IF_ERROR(exec_rpc_thread_pool_->Init());
RETURN_IF_ERROR(hdfs_op_thread_pool_->Init());
}
RETURN_IF_ERROR(async_rpc_pool_->Init());
// Initialize global memory limit.
// Depending on the system configuration, we will have to calculate the process
// memory limit either based on the available physical memory, or if overcommitting
// is turned off, we use the memory commit limit from /proc/meminfo (see
// IMPALA-1690).
int64_t bytes_limit = 0;
bool is_percent;
int64_t system_mem;
if (MemInfo::vm_overcommit() == 2 &&
MemInfo::commit_limit() < MemInfo::physical_mem()) {
system_mem = MemInfo::commit_limit();
bytes_limit = ParseUtil::ParseMemSpec(FLAGS_mem_limit, &is_percent, system_mem);
// There might be the case of misconfiguration, when on a system swap is disabled
// and overcommitting is turned off the actual usable memory is less than the
// available physical memory.
LOG(WARNING) << "This system shows a discrepancy between the available "
<< "memory and the memory commit limit allowed by the "
<< "operating system. ( Mem: " << MemInfo::physical_mem()
<< "<=> CommitLimit: "
<< MemInfo::commit_limit() << "). "
<< "Impala will adhere to the smaller value by setting the "
<< "process memory limit to " << bytes_limit << " "
<< "Please verify the system configuration. Specifically, "
<< "/proc/sys/vm/overcommit_memory and "
<< "/proc/sys/vm/overcommit_ratio.";
} else {
system_mem = MemInfo::physical_mem();
bytes_limit = ParseUtil::ParseMemSpec(FLAGS_mem_limit, &is_percent, system_mem);
}
// ParseMemSpec() returns -1 for invalid input and 0 to mean unlimited. From Impala
// 2.11 onwards we do not support unlimited process memory limits.
if (bytes_limit <= 0) {
return Status(Substitute("The process memory limit (--mem_limit) must be a positive "
"bytes value or percentage: $0", FLAGS_mem_limit));
}
if (!BitUtil::IsPowerOf2(FLAGS_min_buffer_size)) {
return Status(Substitute(
"--min_buffer_size must be a power-of-two: $0", FLAGS_min_buffer_size));
}
int64_t buffer_pool_limit = ParseUtil::ParseMemSpec(FLAGS_buffer_pool_limit,
&is_percent, bytes_limit);
if (buffer_pool_limit <= 0) {
return Status(Substitute("Invalid --buffer_pool_limit value, must be a percentage or "
"positive bytes value or percentage: $0", FLAGS_buffer_pool_limit));
}
buffer_pool_limit = BitUtil::RoundDown(buffer_pool_limit, FLAGS_min_buffer_size);
int64_t clean_pages_limit = ParseUtil::ParseMemSpec(FLAGS_buffer_pool_clean_pages_limit,
&is_percent, buffer_pool_limit);
if (clean_pages_limit <= 0) {
return Status(Substitute("Invalid --buffer_pool_clean_pages_limit value, must be a percentage or "
"positive bytes value or percentage: $0", FLAGS_buffer_pool_clean_pages_limit));
}
InitBufferPool(FLAGS_min_buffer_size, buffer_pool_limit, clean_pages_limit);
RETURN_IF_ERROR(metrics_->Init(enable_webserver_ ? webserver_.get() : nullptr));
impalad_client_cache_->InitMetrics(metrics_.get(), "impala-server.backends");
catalogd_client_cache_->InitMetrics(metrics_.get(), "catalog.server");
RETURN_IF_ERROR(RegisterMemoryMetrics(
metrics_.get(), true, buffer_reservation_.get(), buffer_pool_.get()));
// Resolve hostname to IP address.
RETURN_IF_ERROR(HostnameToIpAddr(backend_address_.hostname, &ip_address_));
mem_tracker_.reset(
new MemTracker(AggregateMemoryMetrics::TOTAL_USED, bytes_limit, "Process"));
// Add BufferPool MemTrackers for cached memory that is not tracked against queries
// but is included in process memory consumption.
obj_pool_->Add(new MemTracker(BufferPoolMetric::FREE_BUFFER_BYTES, -1,
"Buffer Pool: Free Buffers", mem_tracker_.get()));
obj_pool_->Add(new MemTracker(BufferPoolMetric::CLEAN_PAGE_BYTES, -1,
"Buffer Pool: Clean Pages", mem_tracker_.get()));
// Also need a MemTracker for unused reservations as a negative value. Unused
// reservations are counted against queries but not against the process memory
// consumption. This accounts for that difference.
IntGauge* negated_unused_reservation = obj_pool_->Add(new NegatedGauge(
MakeTMetricDef("negated_unused_reservation", TMetricKind::GAUGE, TUnit::BYTES),
BufferPoolMetric::UNUSED_RESERVATION_BYTES));
obj_pool_->Add(new MemTracker(negated_unused_reservation, -1,
"Buffer Pool: Unused Reservation", mem_tracker_.get()));
// Initialize the RPCMgr before allowing services registration.
if (FLAGS_use_krpc) {
krpc_address_.__set_hostname(ip_address_);
RETURN_IF_ERROR(rpc_mgr_->Init());
// Add a MemTracker for memory used to store incoming calls before they handed over to
// the data stream manager.
MemTracker* data_svc_tracker = obj_pool_->Add(
new MemTracker(-1, "Data Stream Service", mem_tracker_.get()));
// Add a MemTracker for the data stream manager, which uses it to track memory used by
// deferred RPC calls while they are buffered in the data stream manager.
MemTracker* stream_mgr_tracker = obj_pool_->Add(
new MemTracker(-1, "Data Stream Queued RPC Calls", mem_tracker_.get()));
RETURN_IF_ERROR(KrpcStreamMgr()->Init(stream_mgr_tracker, data_svc_tracker));
unique_ptr<ServiceIf> data_svc(new DataStreamService(rpc_mgr_.get()));
int num_svc_threads = FLAGS_datastream_service_num_svc_threads > 0 ?
FLAGS_datastream_service_num_svc_threads : CpuInfo::num_cores();
RETURN_IF_ERROR(rpc_mgr_->RegisterService(num_svc_threads,
FLAGS_datastream_service_queue_depth, move(data_svc), data_svc_tracker));
// Bump thread cache to 1GB to reduce contention for TCMalloc central
// list's spinlock.
if (FLAGS_tcmalloc_max_total_thread_cache_bytes == 0) {
FLAGS_tcmalloc_max_total_thread_cache_bytes = 1 << 30;
}
}
#if !defined(ADDRESS_SANITIZER) && !defined(THREAD_SANITIZER)
// Change the total TCMalloc thread cache size if necessary.
if (FLAGS_tcmalloc_max_total_thread_cache_bytes > 0 &&
!MallocExtension::instance()->SetNumericProperty(
"tcmalloc.max_total_thread_cache_bytes",
FLAGS_tcmalloc_max_total_thread_cache_bytes)) {
return Status("Failed to change TCMalloc total thread cache size.");
}
// A MemTracker for TCMalloc overhead which is the difference between the physical bytes
// reserved (TcmallocMetric::PHYSICAL_BYTES_RESERVED) and the bytes in use
// (TcmallocMetrics::BYTES_IN_USE). This overhead accounts for all the cached freelists
// used by TCMalloc.
IntGauge* negated_bytes_in_use = obj_pool_->Add(new NegatedGauge(
MakeTMetricDef("negated_tcmalloc_bytes_in_use", TMetricKind::GAUGE, TUnit::BYTES),
TcmallocMetric::BYTES_IN_USE));
vector<IntGauge*> overhead_metrics;
overhead_metrics.push_back(negated_bytes_in_use);
overhead_metrics.push_back(TcmallocMetric::PHYSICAL_BYTES_RESERVED);
SumGauge* tcmalloc_overhead = obj_pool_->Add(new SumGauge(
MakeTMetricDef("tcmalloc_overhead", TMetricKind::GAUGE, TUnit::BYTES),
overhead_metrics));
obj_pool_->Add(
new MemTracker(tcmalloc_overhead, -1, "TCMalloc Overhead", mem_tracker_.get()));
#endif
mem_tracker_->RegisterMetrics(metrics_.get(), "mem-tracker.process");
if (bytes_limit > MemInfo::physical_mem()) {
LOG(WARNING) << "Memory limit "
<< PrettyPrinter::Print(bytes_limit, TUnit::BYTES)
<< " exceeds physical memory of "
<< PrettyPrinter::Print(MemInfo::physical_mem(), TUnit::BYTES);
}
LOG(INFO) << "Using global memory limit: "
<< PrettyPrinter::Print(bytes_limit, TUnit::BYTES);
LOG(INFO) << "Buffer pool limit: "
<< PrettyPrinter::Print(buffer_pool_limit, TUnit::BYTES);
RETURN_IF_ERROR(disk_io_mgr_->Init(mem_tracker_.get()));
mem_tracker_->AddGcFunction(
[this](int64_t bytes_to_free) { disk_io_mgr_->GcIoBuffers(bytes_to_free); });
// Start services in order to ensure that dependencies between them are met
if (enable_webserver_) {
AddDefaultUrlCallbacks(webserver_.get(), mem_tracker_.get(), metrics_.get());
RETURN_IF_ERROR(webserver_->Start());
} else {
LOG(INFO) << "Not starting webserver";
}
if (scheduler_ != nullptr) {
RETURN_IF_ERROR(scheduler_->Init(backend_address_, krpc_address_, ip_address_));
}
if (admission_controller_ != nullptr) RETURN_IF_ERROR(admission_controller_->Init());
// Get the fs.defaultFS value set in core-site.xml and assign it to configured_defaultFs
TGetHadoopConfigRequest config_request;
config_request.__set_name(DEFAULT_FS);
TGetHadoopConfigResponse config_response;
RETURN_IF_ERROR(frontend_->GetHadoopConfig(config_request, &config_response));
if (config_response.__isset.value) {
default_fs_ = config_response.value;
} else {
default_fs_ = "hdfs://";
}
return Status::OK();
}
Status ExecEnv::StartStatestoreSubscriberService() {
LOG(INFO) << "Starting statestore subscriber service";
// Must happen after all topic registrations / callbacks are done
if (statestore_subscriber_.get() != nullptr) {
Status status = statestore_subscriber_->Start();
if (!status.ok()) {
status.AddDetail("Statestore subscriber did not start up.");
return status;
}
}
return Status::OK();
}
Status ExecEnv::StartKrpcService() {
if (FLAGS_use_krpc) {
LOG(INFO) << "Starting KRPC service";
RETURN_IF_ERROR(rpc_mgr_->StartServices(krpc_address_));
}
return Status::OK();
}
void ExecEnv::InitBufferPool(int64_t min_buffer_size, int64_t capacity,
int64_t clean_pages_limit) {
#if !defined(ADDRESS_SANITIZER) && !defined(THREAD_SANITIZER)
// Aggressive decommit is required so that unused pages in the TCMalloc page heap are
// not backed by physical pages and do not contribute towards memory consumption.
// Enable it in TCMalloc before InitBufferPool().
MallocExtension::instance()->SetNumericProperty(
"tcmalloc.aggressive_memory_decommit", 1);
#endif
buffer_pool_.reset(new BufferPool(min_buffer_size, capacity, clean_pages_limit));
buffer_reservation_.reset(new ReservationTracker());
buffer_reservation_->InitRootTracker(nullptr, capacity);
}
Status ExecEnv::GetKuduClient(
const vector<string>& master_addresses, kudu::client::KuduClient** client) {
string master_addr_concat = join(master_addresses, ",");
lock_guard<SpinLock> l(kudu_client_map_lock_);
auto kudu_client_map_it = kudu_client_map_.find(master_addr_concat);
if (kudu_client_map_it == kudu_client_map_.end()) {
// KuduClient doesn't exist, create it
KuduClientPtr* kudu_client_ptr = new KuduClientPtr;
RETURN_IF_ERROR(CreateKuduClient(master_addresses, &kudu_client_ptr->kudu_client));
kudu_client_map_[master_addr_concat].reset(kudu_client_ptr);
*client = kudu_client_ptr->kudu_client.get();
} else {
// Return existing KuduClient
*client = kudu_client_map_it->second->kudu_client.get();
}
return Status::OK();
}
DataStreamMgr* ExecEnv::ThriftStreamMgr() {
DCHECK(!FLAGS_use_krpc);
return dynamic_cast<DataStreamMgr*>(stream_mgr_.get());
}
KrpcDataStreamMgr* ExecEnv::KrpcStreamMgr() {
DCHECK(FLAGS_use_krpc);
return dynamic_cast<KrpcDataStreamMgr*>(stream_mgr_.get());
}
} // namespace impala
|
#include "XelaSocket.h"
#include <string.h>
#if defined(__linux__)
void XelaSocket::write_platformTCP(XelaSocket *s, char *msg, int msgLen) {
int n = send(s->sockfd, msg, msgLen, 0);
if (n >= 0) {
#if XELANETWORK_DEBUG_SOCKET
std::cout << "[XelaSocket] Sent message: " << msg << std::endl;
#endif
}
else {
#if XELANETWORK_DEBUG_SOCKET
std::cout << "[XelaSocket] Error sending message: " << msg << ":" << n << std::endl;
#endif
}
}
void XelaSocket::get_platformTCP(XelaSocket *s, char **buf, int bufLen, int *resLen, char **ip, char **port) {
bzero(*buf, bufLen);
*resLen = read(s->sockfd, *buf, bufLen);
if (*resLen >= 0) {
#if XELANETWORK_DEBUG_SOCKET
std::cout << "[XelaSocket] Recieved message: " << n << " bytes" << std::endl;
#endif
}
else {
#if XELANETWORK_DEBUG_SOCKET
std::cout << "[XelaSocket] Error reading from socket: " << n << std::endl;
#endif
return;
}
if (ip != nullptr) {
strncpy(*ip, s->ip, IPV4_STRLEN);
}
if (port != nullptr) {
strncpy(*port, s->port, PORT_STRLEN);
}
}
#endif
|
#include <algorithm>
#include "ArenaLevelUtils.h"
#include "ArenaVoxelUtils.h"
#include "VoxelDefinition.h"
#include "../Assets/ExeData.h"
#include "../Assets/MIFUtils.h"
#include "../Assets/RMDFile.h"
#include "../Math/Random.h"
#include "components/debug/Debug.h"
#include "components/utilities/Bytes.h"
#include "components/utilities/String.h"
uint8_t ArenaLevelUtils::getVoxelMostSigByte(ArenaTypes::VoxelID voxelID)
{
return (voxelID & 0x7F00) >> 8;
}
uint8_t ArenaLevelUtils::getVoxelLeastSigByte(ArenaTypes::VoxelID voxelID)
{
return voxelID & 0x007F;
}
double ArenaLevelUtils::convertCeilingHeightToScale(int ceilingHeight)
{
return static_cast<double>(ceilingHeight) / MIFUtils::ARENA_UNITS;
}
int ArenaLevelUtils::getMap2VoxelHeight(ArenaTypes::VoxelID map2Voxel)
{
if ((map2Voxel & 0x80) == 0x80)
{
return 2;
}
else if ((map2Voxel & 0x8000) == 0x8000)
{
return 3;
}
else if ((map2Voxel & 0x8080) == 0x8080)
{
return 4;
}
else
{
return 1;
}
}
int ArenaLevelUtils::getMap2Height(const BufferView2D<const ArenaTypes::VoxelID> &map2)
{
DebugAssert(map2.isValid());
int currentMap2Height = 1;
for (SNInt z = 0; z < map2.getHeight(); z++)
{
for (WEInt x = 0; x < map2.getWidth(); x++)
{
const uint16_t map2Voxel = map2.get(x, z);
const int map2Height = ArenaLevelUtils::getMap2VoxelHeight(map2Voxel);
currentMap2Height = std::max(currentMap2Height, map2Height);
}
}
return currentMap2Height;
}
int ArenaLevelUtils::getMifLevelHeight(const MIFFile::Level &level, const INFFile::CeilingData *ceiling)
{
const BufferView2D<const ArenaTypes::VoxelID> map2 = level.getMAP2();
if (map2.isValid())
{
return 2 + ArenaLevelUtils::getMap2Height(map2);
}
else
{
const bool hasCeiling = (ceiling != nullptr) && !ceiling->outdoorDungeon;
return hasCeiling ? 3 : 2;
}
}
uint16_t ArenaLevelUtils::getDoorVoxelOffset(WEInt x, SNInt y)
{
return (y << 8) + (x << 1);
}
std::string ArenaLevelUtils::getDoorVoxelMifName(WEInt x, SNInt y, int menuID, uint32_t rulerSeed,
bool palaceIsMainQuestDungeon, ArenaTypes::CityType cityType, MapType mapType, const ExeData &exeData)
{
// Get the menu type associated with the *MENU ID.
const ArenaTypes::MenuType menuType = ArenaVoxelUtils::getMenuType(menuID, mapType);
// Check special case first: if it's a palace block in the center province's city,
// the .MIF name is hardcoded.
const bool isFinalDungeonEntrance = palaceIsMainQuestDungeon &&
(menuType == ArenaTypes::MenuType::Palace);
if (isFinalDungeonEntrance)
{
return String::toUppercase(exeData.locations.finalDungeonMifName);
}
// Get the prefix associated with the menu type.
const std::string menuName = [&exeData, cityType, menuType]()
{
const std::string name = [&exeData, cityType, menuType]() -> std::string
{
// Mappings of menu types to menu .MIF prefix indices. Menus that have no .MIF
// filename mapping are considered special cases. TOWNPAL and VILPAL are not used
// since the palace type can be deduced from the current city type.
constexpr int NO_INDEX = -1;
constexpr std::array<std::pair<ArenaTypes::MenuType, int>, 12> MenuMifMappings =
{
{
{ ArenaTypes::MenuType::CityGates, NO_INDEX },
{ ArenaTypes::MenuType::Crypt, 7 },
{ ArenaTypes::MenuType::Dungeon, NO_INDEX },
{ ArenaTypes::MenuType::Equipment, 5 },
{ ArenaTypes::MenuType::House, 1 },
{ ArenaTypes::MenuType::MagesGuild, 6 },
{ ArenaTypes::MenuType::Noble, 2 },
{ ArenaTypes::MenuType::None, NO_INDEX },
{ ArenaTypes::MenuType::Palace, 0 },
{ ArenaTypes::MenuType::Tavern, 3 },
{ ArenaTypes::MenuType::Temple, 4 },
{ ArenaTypes::MenuType::Tower, 10 }
}
};
// See if the given menu type has a .MIF prefix mapping.
const auto iter = std::find_if(MenuMifMappings.begin(), MenuMifMappings.end(),
[menuType](const std::pair<ArenaTypes::MenuType, int> &pair)
{
return pair.first == menuType;
});
if (iter != MenuMifMappings.end())
{
const int index = iter->second;
if (index != NO_INDEX)
{
// Get the menu's .MIF prefix index. If it's a palace, then decide which palace
// prefix to use based on the location type.
const int menuMifIndex = [cityType, menuType, index]()
{
if (menuType == ArenaTypes::MenuType::Palace)
{
if (cityType == ArenaTypes::CityType::CityState)
{
return 0;
}
else if (cityType == ArenaTypes::CityType::Town)
{
return 8;
}
else if (cityType == ArenaTypes::CityType::Village)
{
return 9;
}
else
{
DebugUnhandledReturnMsg(int, std::to_string(static_cast<int>(cityType)));
}
}
else
{
return index;
}
}();
const auto &prefixes = exeData.locations.menuMifPrefixes;
DebugAssertIndex(prefixes, menuMifIndex);
return prefixes[menuMifIndex];
}
else
{
// The menu has no valid .MIF prefix.
return std::string();
}
}
else
{
DebugUnhandledReturnMsg(std::string, std::to_string(static_cast<int>(menuType)));
}
}();
return String::toUppercase(name);
}();
// Some menu names don't map to an actual building type, so they are special cases
// and should be ignored by the caller.
const bool isSpecialCase = menuName.size() == 0;
if (isSpecialCase)
{
// No .MIF filename. The caller knows not to try and load a .MIF file.
return std::string();
}
else
{
// Offset is based on X and Y position in world; used with variant calculation.
const uint16_t offset = ArenaLevelUtils::getDoorVoxelOffset(x, y);
// Decide which variant of the interior to use.
const int variantID = [rulerSeed, offset, menuType]()
{
// Palaces have fewer .MIF files to choose from, and their variant depends
// on the ruler seed. Although there are five city-state palace .MIF files,
// only three of them are used.
const bool isPalace = menuType == ArenaTypes::MenuType::Palace;
const int palaceCount = 3;
return isPalace ? (((rulerSeed >> 8) & 0xFFFF) % palaceCount) :
((Bytes::ror(offset, 4) ^ offset) % 8);
}();
// Generate .MIF filename.
return menuName + std::to_string(variantID + 1) + ".MIF";
}
}
int ArenaLevelUtils::getDoorVoxelLockLevel(WEInt x, SNInt y, ArenaRandom &random)
{
const uint16_t offset = ArenaLevelUtils::getDoorVoxelOffset(x, y);
const uint32_t seed = offset + (offset << 16);
random.srand(seed);
return (random.next() % 10) + 1; // 0..9 + 1.
}
int ArenaLevelUtils::getServiceSaveFileNumber(WEInt doorX, SNInt doorY)
{
return (doorY << 8) + doorX;
}
int ArenaLevelUtils::getWildernessServiceSaveFileNumber(int wildX, int wildY)
{
return (wildY << 16) + wildX;
}
|
/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
/*
* Copyright (c) 2013-2017 Regents of the University of California.
*
* This file is part of ndn-cxx library (NDN C++ library with eXperimental eXtensions).
*
* ndn-cxx library is free software: you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License as published by the Free Software
* Foundation, either version 3 of the License, or (at your option) any later version.
*
* ndn-cxx library is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
* PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
*
* You should have received copies of the GNU General Public License and GNU Lesser
* General Public License along with ndn-cxx, e.g., in COPYING.md file. If not, see
* <http://www.gnu.org/licenses/>.
*
* See AUTHORS.md for complete list of ndn-cxx authors and contributors.
*/
#include "in-memory-storage-persistent.hpp"
namespace ndn {
InMemoryStoragePersistent::InMemoryStoragePersistent()
: InMemoryStorage()
{
}
InMemoryStoragePersistent::InMemoryStoragePersistent(boost::asio::io_service& ioService)
: InMemoryStorage(ioService)
{
}
bool
InMemoryStoragePersistent::evictItem()
{
return false;
}
} // namespace ndn
|
//+----------------------------------------------------------------------------
//
// File: cmcfg.cpp
//
// Module: CMCFG32.DLL
//
// Synopsis: This DLL contains the call CMConfig that transfers information from
// a connectoid created by Connection Wizard to a Connection Manager
// profile. The phone number, username, and password are transferred.
// If a backup phone number exists in the pszInsFile, it also transferss
// The backup file. The name of the connectoid to translate is pszDUN.
// The format of the .ins file includes:
//
// [Backup Phone]
// Phone_Number=<TAPI phone number starting with + or literal dial string>
//
// If the number starts with a +, it is assumed to be TAPI formatted
//
// Copyright (c) 1998-1999 Microsoft Corporation
//
// Author: a-frankh created 05/06/97
// nickball cleaned-up 04/08/98
// quintinb deprecated the CMConfig private interface 03/23/01
//
//+----------------------------------------------------------------------------
#include "cmmaster.h"
HINSTANCE g_hInst;
//+---------------------------------------------------------------------------
//
// Function: CMConfig
//
// Synopsis: Transfers user information to CM profile
//
// Arguments: LPCSTR pszInsFile - full pathname of .ins file, pass NULL if no .ins file
// LPCSTR pszDUN - name of connectoid/CM profile
// THE NAME OF THE CONNECTOID AND SERVICE NAME OF THE CM PROFILE MUST MATCH!
//
// Notes: Operates by finding the location of the CM directory. Looks for .cmp files and
// gets the .cms file. Looks in the .cms file for the service name and compares it.
//
//
// Returns: TRUE if successful.
//
// History: a-frankh - Created - 5/6/97
//----------------------------------------------------------------------------
extern "C" BOOL WINAPI CMConfig(LPSTR pszInsFile, LPSTR pszDUN )
{
CMASSERTMSG(FALSE, TEXT("CMConfig -- The CMConfig Private Interface has been deprecated -- returning failure."));
SetLastError(ERROR_CALL_NOT_IMPLEMENTED);
return FALSE;
}
//+----------------------------------------------------------------------------
//
// Function: DllMain
//
// Synopsis: Main entry point into the DLL.
//
// Arguments: HINSTANCE hinstDLL - Our HINSTANCE
// DWORD fdwReason - The reason we are being called.
// LPVOID lpvReserved - Reserved
//
// Returns: BOOL WINAPI - TRUE - always
//
// History: nickball Created Header 4/8/98
//
//+----------------------------------------------------------------------------
extern "C" BOOL WINAPI DllMain(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
{
if (fdwReason == DLL_PROCESS_ATTACH)
{
g_hInst = hinstDLL;
//
// Disable thread attach notification
//
DisableThreadLibraryCalls(hinstDLL);
}
return TRUE;
}
|
//===- unittests/Basic/LaneBasedExecutionQueueTest.cpp --------------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2019 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See http://swift.org/LICENSE.txt for license information
// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#include "llbuild/Basic/FileSystem.h"
#include "llbuild/Basic/ExecutionQueue.h"
#include "../BuildSystem/TempDir.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/FileSystem.h"
#include "gtest/gtest.h"
#include <atomic>
#include <condition_variable>
#include <ctime>
#include <future>
#include <mutex>
using namespace llbuild;
using namespace llbuild::basic;
namespace {
class DummyDelegate : public ExecutionQueueDelegate {
public:
DummyDelegate() {}
virtual void queueJobStarted(JobDescriptor*) override {}
virtual void queueJobFinished(JobDescriptor*) override {}
virtual void processStarted(ProcessContext*, ProcessHandle) override {}
virtual void processHadError(ProcessContext*, ProcessHandle,
const Twine& message) override {}
virtual void processHadOutput(ProcessContext*, ProcessHandle,
StringRef data) override {}
virtual void processFinished(ProcessContext*, ProcessHandle,
const ProcessResult& result) override {}
};
class DummyCommand : public JobDescriptor {
public:
DummyCommand() {}
virtual StringRef getOrdinalName() const { return StringRef(""); }
virtual void getShortDescription(SmallVectorImpl<char> &result) const {}
virtual void getVerboseDescription(SmallVectorImpl<char> &result) const {}
};
TEST(LaneBasedExecutionQueueTest, basic) {
DummyDelegate delegate;
std::unique_ptr<FileSystem> fs = createLocalFileSystem();
TmpDir tempDir{"LaneBasedExecutionQueueTest"};
std::string outputFile = tempDir.str() + "/yes-output.txt";
auto queue = std::unique_ptr<ExecutionQueue>(
createLaneBasedExecutionQueue(delegate, 2,
SchedulerAlgorithm::NamePriority,
/*environment=*/nullptr));
auto fn = [&outputFile, &queue](QueueJobContext* context) {
queue->executeShellCommand(context, "yes >" + outputFile);
};
DummyCommand dummyCommand;
queue->addJob(QueueJob(&dummyCommand, fn));
// Busy wait until `outputFile` appears which indicates that `yes` is
// running.
time_t start = ::time(NULL);
while (fs->getFileInfo(outputFile).isMissing()) {
if (::time(NULL) > start + 5) {
// We can't fail gracefully because the `LaneBasedExecutionQueue` will
// always wait for spawned processes to exit
abort();
}
}
queue->cancelAllJobs();
queue.reset();
}
TEST(LaneBasedExecutionQueueTest, workingDirectory) {
DummyDelegate delegate;
std::unique_ptr<FileSystem> fs = createLocalFileSystem();
TmpDir tempDir{"LaneBasedExecutionQueueTest"};
std::string outputFile = tempDir.str() + "/yes-output.txt";
auto queue = std::unique_ptr<ExecutionQueue>(
createLaneBasedExecutionQueue(delegate, 2,
SchedulerAlgorithm::NamePriority,
/*environment=*/nullptr));
auto fn = [&tempDir, &queue](QueueJobContext* context) {
std::string yescmd = "yes >yes-output.txt";
std::vector<StringRef> commandLine(
{ DefaultShellPath, "-c", yescmd.c_str() });
std::promise<ProcessStatus> p;
auto result = p.get_future();
queue->executeProcess(context, commandLine, {}, {true, false, tempDir.str()},
{[&p](ProcessResult result) mutable {
p.set_value(result.status);
}});
result.get();
};
DummyCommand dummyCommand;
queue->addJob(QueueJob(&dummyCommand, fn));
// Busy wait until `outputFile` appears which indicates that `yes` is
// running.
time_t start = ::time(NULL);
while (fs->getFileInfo(outputFile).isMissing()) {
if (::time(NULL) > start + 5) {
// We can't fail gracefully because the `LaneBasedExecutionQueue` will
// always wait for spawned processes to exit
abort();
}
}
queue->cancelAllJobs();
queue.reset();
}
TEST(LaneBasedExecutionQueueTest, exhaustsQueueAfterCancellation) {
DummyDelegate delegate;
std::mutex queueMutex;
auto queue = std::unique_ptr<ExecutionQueue>(
createLaneBasedExecutionQueue(delegate, 1,
SchedulerAlgorithm::NamePriority,
/*environment=*/nullptr));
bool buildStarted { false };
std::condition_variable buildStartedCondition;
std::mutex buildStartedMutex;
std::atomic<int> executions { 0 };
auto fn = [&buildStarted, &buildStartedCondition, &buildStartedMutex,
&executions, &queueMutex, &queue](QueueJobContext* context) {
executions++;
{
std::lock_guard<std::mutex> lock(queueMutex);
if (queue) { queue->cancelAllJobs(); }
}
std::unique_lock<std::mutex> lock(buildStartedMutex);
buildStarted = true;
buildStartedCondition.notify_all();
};
DummyCommand dummyCommand1;
DummyCommand dummyCommand2;
{
std::lock_guard<std::mutex> lock(queueMutex);
queue->addJob(QueueJob(&dummyCommand1, fn));
queue->addJob(QueueJob(&dummyCommand2, fn));
}
{
std::unique_lock<std::mutex> lock(buildStartedMutex);
while (!buildStarted) {
buildStartedCondition.wait(lock);
}
}
{
std::lock_guard<std::mutex> lock(queueMutex);
queue.reset();
}
// Busy wait until our executions are done, but also have a timeout in case they never finish
time_t start = ::time(NULL);
while (executions < 2) {
if (::time(NULL) > start + 5) {
break;
}
}
EXPECT_EQ(executions, 2);
}
}
|
#include "ControlMap.h"
using hand = frc::XboxController::JoystickHand; // Only for FRC controllers
// using namespace wml
using namespace wml;
using namespace wml::controllers;
void ControlMap::InitSmartControllerGroup(SmartControllerGroup &contGroup) {
contGroup.GetController(ControlMap::TurretAutoAimAxis.cont).Map(ControlMap::TurretAutoAimAxis, ControlMap::TurretAutoAim, ControlMap::triggerDeadzone);
contGroup.GetController(ControlMap::ShiftMagazinePOV.cont).Map(ControlMap::ShiftMagazinePOV, {
{ Controller::POVPos::kTop, ControlMap::ShiftUpMagazine },
{ Controller::POVPos::kBottom, ControlMap::ShiftDownMagazine }
});
}
// -------------Defined Ports/Values-------------------
// Controllers
#if __CONTROLMAP_USING_JOYSTICK__
const int ControlMap::JoyController1Port = 0;
const int ControlMap::JoyController2Port = 1;
#else
const int ControlMap::XboxController1Port = 0;
const int ControlMap::XboxController2Port = 1;
const int ControlMap::JoyController3Port = 2;
#endif
// Deadzones
const double ControlMap::joyDeadzone = 0.15;
const double ControlMap::xboxDeadzone = 0.1;
const double ControlMap::triggerDeadzone = 0.15;
// PCMs
const int ControlMap::PCModule = 1;
// Drive Left
const int ControlMap::DriveMAXportFL = 12; // 10
const int ControlMap::DriveMAXportBL = 13; // 11
// Drive Right
const int ControlMap::DriveMAXportFR = 10; // 12
const int ControlMap::DriveMAXportBR = 11; // 13
// Drive Gearing
const int ControlMap::ChangeGearPort1 = 0; // 0
const int ControlMap::ChangeGearPort2 = 1; // 1
const int ControlMap::Shift2PTOPort1 = 4; // 4
const int ControlMap::Shift2PTOPort2 = 5; // 5
const double ControlMap::ChangeGearTime = 0;
// Ratchet
const int ControlMap::PTORatchetLeftPort = 99;
const int ControlMap::PTORatchetRightPort = 99;
const double ControlMap::PTORatchetLeftPosition = 0.5;
const double ControlMap::PTORatchetRightPosition = 0.5;
// Drive General Values
const double ControlMap::MaxDrivetrainAcceleration = 0.015; // 0.015
const double ControlMap::MaxDrivetrainSpeed = 1;
const double ControlMap::DriveTestCaseRotations = 50;
// Turret
const int ControlMap::TurretFlyWheelPort = 22; // 20
const int ControlMap::TurretFlyWheelPort2 = 21; // 21
const int ControlMap::TurretAnglePort = 20; // 22
const int ControlMap::TurretRotationPort = 23; // 23
const bool ControlMap::TuneTurretPID = true;
const bool ControlMap::TuneAnglePID = false;
const int ControlMap::TurretLeftLimitPort = 99;
const int ControlMap::TurretRightLimitPort = 99;
const int ControlMap::TurretAngleDownLimitPort = 99;
const bool ControlMap::TurretLeftLimitInvert = false;
const bool ControlMap::TurretRightLimitInvert = false;
const bool ControlMap::TurretAngleDownLimitInvert = false;
const double ControlMap::TurretZeroTimeoutSeconds = 5;
const double ControlMap::TurretEncoderSafeZone = 5;
const double ControlMap::TurretEncoderRotations = 500;
const double ControlMap::MaxAngleEncoderRotations = 500;
const double ControlMap::TurretRatio = 24; // 24:1
const double ControlMap::TurretGearBoxRatio = 40; // 40:1
const double ControlMap::MaxTurretSpeed = 0.3;
const double ControlMap::MaxTurretAngularSpeed = 0.3;
const double ControlMap::FlyWheelVelocity = 50;
const int ControlMap::FlyWheelEncoderPort1 = 1;
const int ControlMap::FlyWheelEncoderPort2 = 2;
const int ControlMap::AngleEncoderPort1 = 3;
const int ControlMap::AngleEncoderPort2 = 4;
// Angle Setpoints (Encoder Values)
const double ControlMap::AngleSetpoint1 = 0;
const double ControlMap::AngleSetpoint2 = 0.1;
const double ControlMap::AngleSetpoint3 = 0.2;
const double ControlMap::AngleSetpoint4 = 0.3;
const double ControlMap::AngleSetpoint5 = 0.4;
const double ControlMap::AngleSetpoint6 = 0.5;
const double ControlMap::AngleSetpoint7 = 0.6;
const double ControlMap::AngleSetpoint8 = 0.7;
const double ControlMap::AngleSetpoint9 = 0.8;
const double ControlMap::AngleSetpoint10 = 0.9;
// Intake
const int ControlMap::IntakeMotorPort = 24; // 24
const int ControlMap::IntakeDownPort1 = 6; // 6
const int ControlMap::IntakeDownPort2 = 7; // 7
const double ControlMap::PannelActuationTime = 0;
const double ControlMap::IntakeDownActuationTime = 0;
const double ControlMap::IntakeTestCaseRotations = 30;
// MagLoader
const int ControlMap::MagLoaderMotorPort = 25; // 25
const int ControlMap::StartMagLimitPort = 1;
const int ControlMap::Position1LimitPort = 3;
const int ControlMap::Position5LimitPort = 2;
const double ControlMap::MagazineBallThreshStart = 650;
const double ControlMap::MagazineBallThreshFinal = 1000; // 1300
const double ControlMap::MagazineBallThreshIndex = 2000;
const double ControlMap::MagTestCaseRotations = 5;
// Climber
const int ControlMap::ClimberActuatorPort1 = 2; // 2
const int ControlMap::ClimberActuatorPort2 = 3; // 3
const double ControlMap::ClimberActuationTime = 2;
const int ControlMap::ClimberMotor1Port = 27; // 27
const int ControlMap::ClimberMotor2Port = 26; // 26
const double ControlMap::ShiftPTOActuationTime = 0.2;
const double ControlMap::LiftMaxSpeed = 0.5;
// Control System
const int ControlMap::PressureSensorPort = 99;
const int ControlMap::CompressorPort = 0;
const int ControlMap::CamFOV = 60;
//Control Pannel
const int ControlMap::ControlPannelPort = 28; // 28
const int ControlMap::ControlPannelUpPort = 29; // 29
// Auto Values (In Meters)
const double ControlMap::AutoGearRatio = 7; // AutoGearRatio:1/output roation (15 = Neo Drive)
const double ControlMap::WheelDiameter = 15.24; // CM
// Auto Speed
const double ControlMap::MaxAutoDrivetrainSpeed = 0.4;
const double ControlMap::MaxAutoTurnSpeed = 0.25;
// LeftDrive
const double ControlMap::DriveKp = 0.02;
const double ControlMap::DriveKi = 0.01;
const double ControlMap::DriveKd = 0;
// -------------Defined Buttons-------------------
// Turret PID Tuner
const tButton ControlMap::kpUP{ DevController, XboxController::kB };
const tButton ControlMap::kpDOWN{ DevController, XboxController::kA };
const tButton ControlMap::kiUP{ DevController, XboxController::kY };
const tButton ControlMap::kiDOWN{ DevController, XboxController::kX };
const tButton ControlMap::kdUP{ DevController, XboxController::kStart };
const tButton ControlMap::kdDOWN{ DevController, XboxController::kBack };
// Drive System
#if __CONTROLMAP_USING_JOYSTICK__
const tAxis ControlMap::DrivetrainForward{ Driver, Joystick::kYAxis };
const tAxis ControlMap::DrivetrainTurn{ Driver, Joystick::kZAxis };
const tButton ControlMap::ReverseDrivetrain{ Driver, 2 };
const tButton ControlMap::ShiftGears{ Driver, 99 };
#else
const tAxis ControlMap::DrivetrainLeft{ Driver, XboxController::kLeftYAxis };
const tAxis ControlMap::DrivetrainRight{ Driver, XboxController::kRightYAxis };
const tButton ControlMap::ReverseDrivetrain{ Driver, XboxController::kStart };
const tButton ControlMap::ShiftGears{ Driver, XboxController::kBumperRight };
#endif
// Turret
#if __CONTROLMAP_USING_JOYSTICK__
//@todo
#else
const tAxis ControlMap::TurretAutoAimAxis{ CoDriver, XboxController::kLeftThrottle };
const std::vector<tButton> ControlMap::TurretAutoAim{ {CoDriver, 30}, {DevController, 30} };
const tAxis ControlMap::TurretManualRotate{ CoDriver, XboxController::kRightXAxis };
const tAxis ControlMap::TurretManualAngle{ CoDriver, XboxController::kLeftYAxis };
const tAxis ControlMap::TurretFlyWheelSpinUp{ CoDriver, XboxController::kRightThrottle };
const tButton ControlMap::TurretFire{ CoDriver, XboxController::kA };
const tButton ControlMap::RevFlyWheel{CoDriver, XboxController::kBack};
const tButton ControlMap::Ball3Fire{ CoDriver, XboxController::kBumperRight}; // just for auto testing
#endif
// Intake
#if __CONTROLMAP_USING_JOYSTICK__
const tButton ControlMap::Intake{ Driver, 11 };
const tButton ControlMap::Intake{ Driver, 12 };
#else
const tAxis ControlMap::Intake{ CoDriver, XboxController::kRightThrottle };
const tAxis ControlMap::Outake{ CoDriver, XboxController::kLeftThrottle };
const std::vector<tButton> ControlMap::DownIntake{ { CoDriver, XboxController::kX }, { Driver, XboxController::kY } }; // Allows both driver and co driver to access the button
#endif
//Control Pannel
#if __CONTROLMAP_USING_JOYSTICK__
//please change if we ever use a joystick
const tPOV ControlMap::ControlPannelUp{ Driver, 8};
const tPOV ControlMap::SpinControlPannelLeft{ Driver, 7};
const tPOV ControlMap::ControlPannelDown{ Driver, 6};
const tPOV ControlMap::SpinControlPannelRight{ Driver, 5};
#else
const tPOV ControlMap::ControlPannelUp{ Driver, XboxController::kTop};
const tPOV ControlMap::SpinControlPannelLeft{ Driver, XboxController::kLeft};
const tPOV ControlMap::ControlPannelDown{ Driver, XboxController::kBottom};
const tPOV ControlMap::SpinControlPannelRight{ Driver, XboxController::kRight};
#endif
// MagLoader
const tPOV ControlMap::ShiftMagazinePOV{ CoDriver, 0 };
#if __CONTROLMAP_USING_JOYSTICK__
#else
const tButton ControlMap::ShiftUpMagazine{ CoDriver, __LINE__ + 30 };
const tButton ControlMap::ShiftDownMagazine{ CoDriver, __LINE__ + 30 };
const tButton ControlMap::ManualMag{ CoDriver, XboxController::kB };
#endif
//Climber
#if __CONTROLMAP_USING_JOYSTICK__
//please change
const tAxis ControlMap::ClimberControl{ CoDriver, 2 };
#else
const tAxis ControlMap::ClimberControlLeft{ CoDriver, XboxController::kLeftYAxis};
const tAxis ControlMap::ClimberControlRight{ CoDriver, XboxController::kRightYAxis};
const tButton ControlMap::ClimberToggle{ CoDriver, XboxController::kY};
const tButton ControlMap::Shift2PTO{ Driver, XboxController::kBumperLeft };
#endif
|
/*
MIT License
Copyright (c) 2017 Arlen Keshabyan (arlen.albert@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "sharp_tcp.hpp"
#include <iostream>
#include <signal.h>
std::condition_variable cv;
void signint_handler(int) { cv.notify_all(); }
int main()
{
std::cout << "Press Ctrl+C to exit..." << std::endl << std::endl;
nstd::net::tcp_client client;
client.connect("127.0.0.1", 3001);
using namespace std::literals;
std::string msg { "Hello world!"s };
std::vector<uint8_t> message { std::begin(msg), std::end(msg) };
client.async_write({ message, nullptr });
signal(SIGINT, &signint_handler);
std::mutex mtx;
std::unique_lock<std::mutex> lock(mtx);
cv.wait(lock);
return 0;
}
|
/*
* npr_demo.cpp
*
* Author:
* Siddharth Kherada <siddharthkherada27[at]gmail[dot]com>
*
* This tutorial demonstrates how to use OpenCV Non-Photorealistic Rendering Module.
* 1) Edge Preserve Smoothing
* -> Using Normalized convolution Filter
* -> Using Recursive Filter
* 2) Detail Enhancement
* 3) Pencil sketch/Color Pencil Drawing
* 4) Stylization
*
*/
#include <signal.h>
#include "opencv2/photo.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/core.hpp"
#include <iostream>
#include <stdlib.h>
using namespace std;
using namespace cv;
int main(int argc, char* argv[])
{
if(argc < 2)
{
cout << "usage: " << argv[0] << " <Input image> " << endl;
exit(0);
}
int num,type;
Mat I = imread(argv[1]);
if(!I.data)
{
cout << "Image not found" << endl;
exit(0);
}
cout << endl;
cout << " Edge Preserve Filter" << endl;
cout << "----------------------" << endl;
cout << "Options: " << endl;
cout << endl;
cout << "1) Edge Preserve Smoothing" << endl;
cout << " -> Using Normalized convolution Filter" << endl;
cout << " -> Using Recursive Filter" << endl;
cout << "2) Detail Enhancement" << endl;
cout << "3) Pencil sketch/Color Pencil Drawing" << endl;
cout << "4) Stylization" << endl;
cout << endl;
cout << "Press number 1-4 to choose from above techniques: ";
cin >> num;
Mat img;
if(num == 1)
{
cout << endl;
cout << "Press 1 for Normalized Convolution Filter and 2 for Recursive Filter: ";
cin >> type;
edgePreservingFilter(I,img,type);
imshow("Edge Preserve Smoothing",img);
}
else if(num == 2)
{
detailEnhance(I,img);
imshow("Detail Enhanced",img);
}
else if(num == 3)
{
Mat img1;
pencilSketch(I,img1, img, 10 , 0.1f, 0.03f);
imshow("Pencil Sketch",img1);
imshow("Color Pencil Sketch",img);
}
else if(num == 4)
{
stylization(I,img);
imshow("Stylization",img);
}
waitKey(0);
}
|
#ifndef OCCA_LANG_PARSER_ATTRIBUTELOADER_HEADER
#define OCCA_LANG_PARSER_ATTRIBUTELOADER_HEADER
#include <occa/lang/attribute.hpp>
#include <occa/lang/tokenContext.hpp>
#include <occa/lang/variable.hpp>
namespace occa {
namespace lang {
class keywords_t;
class statementContext_t;
class tokenContext_t;
class vartype_t;
class attributeLoader_t {
private:
tokenContext_t &tokenContext;
statementContext_t &smntContext;
const keywords_t &keywords;
nameToAttributeMap &attributeMap;
bool success;
attributeLoader_t(tokenContext_t &tokenContext_,
statementContext_t &smntContext_,
const keywords_t &keywords_,
nameToAttributeMap &attributeMap_);
bool loadAttributes(attributeTokenMap &attrs);
void loadAttribute(attributeTokenMap &attrs);
void setAttributeArgs(attributeToken_t &attr,
tokenRangeVector &argRanges);
friend bool loadAttributes(tokenContext_t &tokenContext,
statementContext_t &smntContext,
const keywords_t &keywords,
nameToAttributeMap &attributeMap,
attributeTokenMap &attrs);
friend attribute_t* getAttribute(nameToAttributeMap &attributeMap,
const std::string &name);
};
bool loadAttributes(tokenContext_t &tokenContext,
statementContext_t &smntContext,
const keywords_t &keywords,
nameToAttributeMap &attributeMap,
attributeTokenMap &attrs);
attribute_t* getAttribute(nameToAttributeMap &attributeMap,
const std::string &name);
}
}
#endif
|
// Copyright (c) 2005-2014 Code Synthesis Tools CC
//
// This program was generated by CodeSynthesis XSD, an XML Schema to
// C++ data binding compiler.
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License version 2 as
// published by the Free Software Foundation.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
//
// In addition, as a special exception, Code Synthesis Tools CC gives
// permission to link this program with the Xerces-C++ library (or with
// modified versions of Xerces-C++ that use the same license as Xerces-C++),
// and distribute linked combinations including the two. You must obey
// the GNU General Public License version 2 in all respects for all of
// the code used other than Xerces-C++. If you modify this copy of the
// program, you may extend this exception to your version of the program,
// but you are not obligated to do so. If you do not wish to do so, delete
// this exception statement from your version.
//
// Furthermore, Code Synthesis Tools CC makes a special exception for
// the Free/Libre and Open Source Software (FLOSS) which is described
// in the accompanying FLOSSE file.
//
#ifndef SIM_RESULTS_TEXT_INFO_HXX
#define SIM_RESULTS_TEXT_INFO_HXX
#ifndef XSD_USE_CHAR
#define XSD_USE_CHAR
#endif
#ifndef XSD_CXX_TREE_USE_CHAR
#define XSD_CXX_TREE_USE_CHAR
#endif
// Begin prologue.
//
//
// End prologue.
#include <xsd/cxx/config.hxx>
#if (XSD_INT_VERSION != 4000000L)
#error XSD runtime version mismatch
#endif
#include <xsd/cxx/pre.hxx>
#include <xsd/cxx/xml/char-utf8.hxx>
#include <xsd/cxx/tree/exceptions.hxx>
#include <xsd/cxx/tree/elements.hxx>
#include <xsd/cxx/tree/types.hxx>
#include <xsd/cxx/xml/error-handler.hxx>
#include <xsd/cxx/xml/dom/auto-ptr.hxx>
#include <xsd/cxx/tree/parsing.hxx>
#include <xsd/cxx/tree/parsing/byte.hxx>
#include <xsd/cxx/tree/parsing/unsigned-byte.hxx>
#include <xsd/cxx/tree/parsing/short.hxx>
#include <xsd/cxx/tree/parsing/unsigned-short.hxx>
#include <xsd/cxx/tree/parsing/int.hxx>
#include <xsd/cxx/tree/parsing/unsigned-int.hxx>
#include <xsd/cxx/tree/parsing/long.hxx>
#include <xsd/cxx/tree/parsing/unsigned-long.hxx>
#include <xsd/cxx/tree/parsing/boolean.hxx>
#include <xsd/cxx/tree/parsing/float.hxx>
#include <xsd/cxx/tree/parsing/double.hxx>
#include <xsd/cxx/tree/parsing/decimal.hxx>
namespace xml_schema
{
// anyType and anySimpleType.
//
typedef ::xsd::cxx::tree::type type;
typedef ::xsd::cxx::tree::simple_type< char, type > simple_type;
typedef ::xsd::cxx::tree::type container;
// 8-bit
//
typedef signed char byte;
typedef unsigned char unsigned_byte;
// 16-bit
//
typedef short short_;
typedef unsigned short unsigned_short;
// 32-bit
//
typedef int int_;
typedef unsigned int unsigned_int;
// 64-bit
//
typedef long long long_;
typedef unsigned long long unsigned_long;
// Supposed to be arbitrary-length integral types.
//
typedef long long integer;
typedef long long non_positive_integer;
typedef unsigned long long non_negative_integer;
typedef unsigned long long positive_integer;
typedef long long negative_integer;
// Boolean.
//
typedef bool boolean;
// Floating-point types.
//
typedef float float_;
typedef double double_;
typedef double decimal;
// String types.
//
typedef ::xsd::cxx::tree::string< char, simple_type > string;
typedef ::xsd::cxx::tree::normalized_string< char, string > normalized_string;
typedef ::xsd::cxx::tree::token< char, normalized_string > token;
typedef ::xsd::cxx::tree::name< char, token > name;
typedef ::xsd::cxx::tree::nmtoken< char, token > nmtoken;
typedef ::xsd::cxx::tree::nmtokens< char, simple_type, nmtoken > nmtokens;
typedef ::xsd::cxx::tree::ncname< char, name > ncname;
typedef ::xsd::cxx::tree::language< char, token > language;
// ID/IDREF.
//
typedef ::xsd::cxx::tree::id< char, ncname > id;
typedef ::xsd::cxx::tree::idref< char, ncname, type > idref;
typedef ::xsd::cxx::tree::idrefs< char, simple_type, idref > idrefs;
// URI.
//
typedef ::xsd::cxx::tree::uri< char, simple_type > uri;
// Qualified name.
//
typedef ::xsd::cxx::tree::qname< char, simple_type, uri, ncname > qname;
// Binary.
//
typedef ::xsd::cxx::tree::buffer< char > buffer;
typedef ::xsd::cxx::tree::base64_binary< char, simple_type > base64_binary;
typedef ::xsd::cxx::tree::hex_binary< char, simple_type > hex_binary;
// Date/time.
//
typedef ::xsd::cxx::tree::time_zone time_zone;
typedef ::xsd::cxx::tree::date< char, simple_type > date;
typedef ::xsd::cxx::tree::date_time< char, simple_type > date_time;
typedef ::xsd::cxx::tree::duration< char, simple_type > duration;
typedef ::xsd::cxx::tree::gday< char, simple_type > gday;
typedef ::xsd::cxx::tree::gmonth< char, simple_type > gmonth;
typedef ::xsd::cxx::tree::gmonth_day< char, simple_type > gmonth_day;
typedef ::xsd::cxx::tree::gyear< char, simple_type > gyear;
typedef ::xsd::cxx::tree::gyear_month< char, simple_type > gyear_month;
typedef ::xsd::cxx::tree::time< char, simple_type > time;
// Entity.
//
typedef ::xsd::cxx::tree::entity< char, ncname > entity;
typedef ::xsd::cxx::tree::entities< char, simple_type, entity > entities;
typedef ::xsd::cxx::tree::content_order content_order;
// Flags and properties.
//
typedef ::xsd::cxx::tree::flags flags;
typedef ::xsd::cxx::tree::properties< char > properties;
// Parsing/serialization diagnostics.
//
typedef ::xsd::cxx::tree::severity severity;
typedef ::xsd::cxx::tree::error< char > error;
typedef ::xsd::cxx::tree::diagnostics< char > diagnostics;
// Exceptions.
//
typedef ::xsd::cxx::tree::exception< char > exception;
typedef ::xsd::cxx::tree::bounds< char > bounds;
typedef ::xsd::cxx::tree::duplicate_id< char > duplicate_id;
typedef ::xsd::cxx::tree::parsing< char > parsing;
typedef ::xsd::cxx::tree::expected_element< char > expected_element;
typedef ::xsd::cxx::tree::unexpected_element< char > unexpected_element;
typedef ::xsd::cxx::tree::expected_attribute< char > expected_attribute;
typedef ::xsd::cxx::tree::unexpected_enumerator< char > unexpected_enumerator;
typedef ::xsd::cxx::tree::expected_text_content< char > expected_text_content;
typedef ::xsd::cxx::tree::no_prefix_mapping< char > no_prefix_mapping;
typedef ::xsd::cxx::tree::no_type_info< char > no_type_info;
typedef ::xsd::cxx::tree::not_derived< char > not_derived;
// Error handler callback interface.
//
typedef ::xsd::cxx::xml::error_handler< char > error_handler;
// DOM interaction.
//
namespace dom
{
// Automatic pointer for DOMDocument.
//
using ::xsd::cxx::xml::dom::auto_ptr;
#ifndef XSD_CXX_TREE_TREE_NODE_KEY__XML_SCHEMA
#define XSD_CXX_TREE_TREE_NODE_KEY__XML_SCHEMA
// DOM user data key for back pointers to tree nodes.
//
const XMLCh* const tree_node_key = ::xsd::cxx::tree::user_data_keys::node;
#endif
}
}
// Forward declarations.
//
namespace schema
{
namespace simxml
{
namespace ResourcesGeneral
{
class SimResultsTextInfo;
}
}
}
#include <memory> // ::std::auto_ptr
#include <limits> // std::numeric_limits
#include <algorithm> // std::binary_search
#include <xsd/cxx/xml/char-utf8.hxx>
#include <xsd/cxx/tree/exceptions.hxx>
#include <xsd/cxx/tree/elements.hxx>
#include <xsd/cxx/tree/containers.hxx>
#include <xsd/cxx/tree/list.hxx>
#include <xsd/cxx/xml/dom/parsing-header.hxx>
#include "simresultsvisualization.hxx"
namespace schema
{
namespace simxml
{
namespace ResourcesGeneral
{
class SimResultsTextInfo: public ::schema::simxml::ResourcesGeneral::SimResultsVisualization
{
public:
// Text
//
typedef ::xml_schema::string Text_type;
typedef ::xsd::cxx::tree::optional< Text_type > Text_optional;
typedef ::xsd::cxx::tree::traits< Text_type, char > Text_traits;
const Text_optional&
Text () const;
Text_optional&
Text ();
void
Text (const Text_type& x);
void
Text (const Text_optional& x);
void
Text (::std::auto_ptr< Text_type > p);
// Bold
//
typedef ::xml_schema::boolean Bold_type;
typedef ::xsd::cxx::tree::optional< Bold_type > Bold_optional;
typedef ::xsd::cxx::tree::traits< Bold_type, char > Bold_traits;
const Bold_optional&
Bold () const;
Bold_optional&
Bold ();
void
Bold (const Bold_type& x);
void
Bold (const Bold_optional& x);
// Italic
//
typedef ::xml_schema::boolean Italic_type;
typedef ::xsd::cxx::tree::optional< Italic_type > Italic_optional;
typedef ::xsd::cxx::tree::traits< Italic_type, char > Italic_traits;
const Italic_optional&
Italic () const;
Italic_optional&
Italic ();
void
Italic (const Italic_type& x);
void
Italic (const Italic_optional& x);
// UnderLine
//
typedef ::xml_schema::boolean UnderLine_type;
typedef ::xsd::cxx::tree::optional< UnderLine_type > UnderLine_optional;
typedef ::xsd::cxx::tree::traits< UnderLine_type, char > UnderLine_traits;
const UnderLine_optional&
UnderLine () const;
UnderLine_optional&
UnderLine ();
void
UnderLine (const UnderLine_type& x);
void
UnderLine (const UnderLine_optional& x);
// FontColor
//
typedef ::xml_schema::string FontColor_type;
typedef ::xsd::cxx::tree::optional< FontColor_type > FontColor_optional;
typedef ::xsd::cxx::tree::traits< FontColor_type, char > FontColor_traits;
const FontColor_optional&
FontColor () const;
FontColor_optional&
FontColor ();
void
FontColor (const FontColor_type& x);
void
FontColor (const FontColor_optional& x);
void
FontColor (::std::auto_ptr< FontColor_type > p);
// TextFont
//
typedef ::xml_schema::string TextFont_type;
typedef ::xsd::cxx::tree::optional< TextFont_type > TextFont_optional;
typedef ::xsd::cxx::tree::traits< TextFont_type, char > TextFont_traits;
const TextFont_optional&
TextFont () const;
TextFont_optional&
TextFont ();
void
TextFont (const TextFont_type& x);
void
TextFont (const TextFont_optional& x);
void
TextFont (::std::auto_ptr< TextFont_type > p);
// FontSize
//
typedef ::xml_schema::double_ FontSize_type;
typedef ::xsd::cxx::tree::optional< FontSize_type > FontSize_optional;
typedef ::xsd::cxx::tree::traits< FontSize_type, char, ::xsd::cxx::tree::schema_type::double_ > FontSize_traits;
const FontSize_optional&
FontSize () const;
FontSize_optional&
FontSize ();
void
FontSize (const FontSize_type& x);
void
FontSize (const FontSize_optional& x);
// Alignment
//
typedef ::xml_schema::string Alignment_type;
typedef ::xsd::cxx::tree::optional< Alignment_type > Alignment_optional;
typedef ::xsd::cxx::tree::traits< Alignment_type, char > Alignment_traits;
const Alignment_optional&
Alignment () const;
Alignment_optional&
Alignment ();
void
Alignment (const Alignment_type& x);
void
Alignment (const Alignment_optional& x);
void
Alignment (::std::auto_ptr< Alignment_type > p);
// Constructors.
//
SimResultsTextInfo ();
SimResultsTextInfo (const RefId_type&);
SimResultsTextInfo (const ::xercesc::DOMElement& e,
::xml_schema::flags f = 0,
::xml_schema::container* c = 0);
SimResultsTextInfo (const SimResultsTextInfo& x,
::xml_schema::flags f = 0,
::xml_schema::container* c = 0);
virtual SimResultsTextInfo*
_clone (::xml_schema::flags f = 0,
::xml_schema::container* c = 0) const;
SimResultsTextInfo&
operator= (const SimResultsTextInfo& x);
virtual
~SimResultsTextInfo ();
// Implementation.
//
protected:
void
parse (::xsd::cxx::xml::dom::parser< char >&,
::xml_schema::flags);
protected:
Text_optional Text_;
Bold_optional Bold_;
Italic_optional Italic_;
UnderLine_optional UnderLine_;
FontColor_optional FontColor_;
TextFont_optional TextFont_;
FontSize_optional FontSize_;
Alignment_optional Alignment_;
};
}
}
}
#include <iosfwd>
#include <xercesc/sax/InputSource.hpp>
#include <xercesc/dom/DOMDocument.hpp>
#include <xercesc/dom/DOMErrorHandler.hpp>
namespace schema
{
namespace simxml
{
namespace ResourcesGeneral
{
}
}
}
#include <xsd/cxx/post.hxx>
// Begin epilogue.
//
//
// End epilogue.
#endif // SIM_RESULTS_TEXT_INFO_HXX
|
/*
* Copyright (c) 2017, Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "user_event.h"
#include "runtime/command_stream/command_stream_receiver.h"
#include "runtime/context/context.h"
#include "runtime/device/device.h"
#include "runtime/command_queue/command_queue.h"
namespace OCLRT {
UserEvent::UserEvent(Context *ctx)
: Event(ctx, nullptr, CL_COMMAND_USER, eventNotReady, eventNotReady) {
transitionExecutionStatus(CL_QUEUED);
}
void UserEvent::updateExecutionStatus() {
return;
}
bool UserEvent::wait(bool blocking) {
while (updateStatusAndCheckCompletion() == false) {
if (blocking == false) {
return false;
}
}
return true;
}
uint32_t UserEvent::getTaskLevel() {
uint32_t taskLevel = 0;
if (ctx != nullptr) {
Device *pDevice = ctx->getDevice(0);
auto &csr = pDevice->getCommandStreamReceiver();
taskLevel = csr.peekTaskLevel();
}
return taskLevel;
}
bool UserEvent::isInitialEventStatus() const {
return executionStatus == CL_QUEUED;
}
VirtualEvent::VirtualEvent(CommandQueue *cmdQ, Context *ctx)
: Event(ctx, cmdQ, -1, eventNotReady, eventNotReady) {
transitionExecutionStatus(CL_QUEUED);
// internal object - no need for API refcount
convertToInternalObject();
}
void VirtualEvent::updateExecutionStatus() {
;
}
bool VirtualEvent::wait(bool blocking) {
while (updateStatusAndCheckCompletion() == false) {
if (blocking == false) {
return false;
}
}
return true;
}
uint32_t VirtualEvent::getTaskLevel() {
uint32_t taskLevel = 0;
if (ctx != nullptr) {
Device *pDevice = ctx->getDevice(0);
auto &csr = pDevice->getCommandStreamReceiver();
taskLevel = csr.peekTaskLevel();
}
return taskLevel;
}
bool VirtualEvent::setStatus(cl_int status) {
// virtual events are just helper events and will have either
// "waiting" (after construction) or "complete" (on change if not blocked) execution state
if (isStatusCompletedByTermination(&status) == false) {
status = CL_COMPLETE;
}
return Event::setStatus(status);
}
} // namespace OCLRT
|
//---------------------------------------------------------------------------//
// Copyright (c) 2018-2021 Mikhail Komarov <nemo@nil.foundation>
// Copyright (c) 2020-2021 Nikita Kaskov <nbering@nil.foundation>
//
// MIT License
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//---------------------------------------------------------------------------//
// @file Declaration of interfaces for auxiliary components for the SHA256 component.
//---------------------------------------------------------------------------//
#ifndef CRYPTO3_ZK_BLUEPRINT_SHA256_AUX_HPP
#define CRYPTO3_ZK_BLUEPRINT_SHA256_AUX_HPP
#include <nil/crypto3/zk/components/packing.hpp>
#include <nil/crypto3/zk/components/blueprint_variable.hpp>
namespace nil {
namespace crypto3 {
namespace zk {
namespace components {
template<typename FieldType>
class lastbits_component : public component<FieldType> {
public:
blueprint_variable<FieldType> X;
std::size_t X_bits;
blueprint_variable<FieldType> result;
blueprint_linear_combination_vector<FieldType> result_bits;
blueprint_linear_combination_vector<FieldType> full_bits;
std::shared_ptr<packing_component<FieldType>> unpack_bits;
std::shared_ptr<packing_component<FieldType>> pack_result;
lastbits_component(blueprint<FieldType> &bp,
const blueprint_variable<FieldType> &X,
std::size_t X_bits,
const blueprint_variable<FieldType> &result,
const blueprint_linear_combination_vector<FieldType> &result_bits) :
component<FieldType>(bp),
X(X), X_bits(X_bits), result(result), result_bits(result_bits) {
full_bits = result_bits;
for (std::size_t i = result_bits.size(); i < X_bits; ++i) {
blueprint_variable<FieldType> full_bits_overflow;
full_bits_overflow.allocate(bp);
full_bits.emplace_back(full_bits_overflow);
}
unpack_bits.reset(new packing_component<FieldType>(bp, full_bits, X));
pack_result.reset(new packing_component<FieldType>(bp, result_bits, result));
}
void generate_r1cs_constraints() {
unpack_bits->generate_r1cs_constraints(true);
pack_result->generate_r1cs_constraints(false);
}
void generate_r1cs_witness() {
unpack_bits->generate_r1cs_witness_from_packed();
pack_result->generate_r1cs_witness_from_bits();
}
};
template<typename FieldType>
class XOR3_component : public component<FieldType> {
private:
blueprint_variable<FieldType> tmp;
public:
blueprint_linear_combination<FieldType> A;
blueprint_linear_combination<FieldType> B;
blueprint_linear_combination<FieldType> C;
bool assume_C_is_zero;
blueprint_linear_combination<FieldType> out;
XOR3_component(blueprint<FieldType> &bp,
const blueprint_linear_combination<FieldType> &A,
const blueprint_linear_combination<FieldType> &B,
const blueprint_linear_combination<FieldType> &C,
bool assume_C_is_zero,
const blueprint_linear_combination<FieldType> &out) :
component<FieldType>(bp),
A(A), B(B), C(C), assume_C_is_zero(assume_C_is_zero), out(out) {
if (!assume_C_is_zero) {
tmp.allocate(bp);
}
}
void generate_r1cs_constraints() {
/*
tmp = A + B - 2AB i.e. tmp = A xor B
out = tmp + C - 2tmp C i.e. out = tmp xor C
*/
if (assume_C_is_zero) {
this->bp.add_r1cs_constraint(snark::r1cs_constraint<FieldType>(2 * A, B, A + B - out));
} else {
this->bp.add_r1cs_constraint(snark::r1cs_constraint<FieldType>(2 * A, B, A + B - tmp));
this->bp.add_r1cs_constraint(snark::r1cs_constraint<FieldType>(2 * tmp, C, tmp + C - out));
}
}
void generate_r1cs_witness() {
if (assume_C_is_zero) {
this->bp.lc_val(out) =
this->bp.lc_val(A) + this->bp.lc_val(B) -
typename FieldType::value_type(0x02) * this->bp.lc_val(A) * this->bp.lc_val(B);
} else {
this->bp.val(tmp) =
this->bp.lc_val(A) + this->bp.lc_val(B) -
typename FieldType::value_type(0x02) * this->bp.lc_val(A) * this->bp.lc_val(B);
this->bp.lc_val(out) =
this->bp.val(tmp) + this->bp.lc_val(C) -
typename FieldType::value_type(0x02) * this->bp.val(tmp) * this->bp.lc_val(C);
}
}
};
#define SHA256_COMPONENT_ROTR(A, i, k) A[((i) + (k)) % 32]
/* Page 10 of http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf */
template<typename FieldType>
class small_sigma_component : public component<FieldType> {
private:
blueprint_variable_vector<FieldType> W;
blueprint_variable<FieldType> result;
public:
blueprint_variable_vector<FieldType> result_bits;
std::vector<std::shared_ptr<XOR3_component<FieldType>>> compute_bits;
std::shared_ptr<packing_component<FieldType>> pack_result;
small_sigma_component(blueprint<FieldType> &bp,
const blueprint_variable_vector<FieldType> &W,
const blueprint_variable<FieldType> &result,
std::size_t rot1,
std::size_t rot2,
std::size_t shift) :
component<FieldType>(bp),
W(W), result(result) {
result_bits.allocate(bp, 32);
compute_bits.resize(32);
for (std::size_t i = 0; i < 32; ++i) {
compute_bits[i].reset(new XOR3_component<FieldType>(
bp, SHA256_COMPONENT_ROTR(W, i, rot1), SHA256_COMPONENT_ROTR(W, i, rot2),
(i + shift < 32 ? W[i + shift] : blueprint_variable<FieldType>(0)), (i + shift >= 32),
result_bits[i]));
}
pack_result.reset(new packing_component<FieldType>(bp, result_bits, result));
}
void generate_r1cs_constraints() {
for (std::size_t i = 0; i < 32; ++i) {
compute_bits[i]->generate_r1cs_constraints();
}
pack_result->generate_r1cs_constraints(false);
}
void generate_r1cs_witness() {
for (std::size_t i = 0; i < 32; ++i) {
compute_bits[i]->generate_r1cs_witness();
}
pack_result->generate_r1cs_witness_from_bits();
}
};
/* Page 10 of http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf */
template<typename FieldType>
class big_sigma_component : public component<FieldType> {
private:
blueprint_linear_combination_vector<FieldType> W;
blueprint_variable<FieldType> result;
public:
blueprint_variable_vector<FieldType> result_bits;
std::vector<std::shared_ptr<XOR3_component<FieldType>>> compute_bits;
std::shared_ptr<packing_component<FieldType>> pack_result;
big_sigma_component(blueprint<FieldType> &bp,
const blueprint_linear_combination_vector<FieldType> &W,
const blueprint_variable<FieldType> &result,
std::size_t rot1,
std::size_t rot2,
std::size_t rot3) :
component<FieldType>(bp),
W(W), result(result) {
result_bits.allocate(bp, 32);
compute_bits.resize(32);
for (std::size_t i = 0; i < 32; ++i) {
compute_bits[i].reset(new XOR3_component<FieldType>(
bp, SHA256_COMPONENT_ROTR(W, i, rot1), SHA256_COMPONENT_ROTR(W, i, rot2),
SHA256_COMPONENT_ROTR(W, i, rot3), false, result_bits[i]));
}
pack_result.reset(new packing_component<FieldType>(bp, result_bits, result));
}
void generate_r1cs_constraints() {
for (std::size_t i = 0; i < 32; ++i) {
compute_bits[i]->generate_r1cs_constraints();
}
pack_result->generate_r1cs_constraints(false);
}
void generate_r1cs_witness() {
for (std::size_t i = 0; i < 32; ++i) {
compute_bits[i]->generate_r1cs_witness();
}
pack_result->generate_r1cs_witness_from_bits();
}
};
/* Page 10 of http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf */
template<typename FieldType>
class choice_component : public component<FieldType> {
private:
blueprint_variable_vector<FieldType> result_bits;
public:
blueprint_linear_combination_vector<FieldType> X;
blueprint_linear_combination_vector<FieldType> Y;
blueprint_linear_combination_vector<FieldType> Z;
blueprint_variable<FieldType> result;
std::shared_ptr<packing_component<FieldType>> pack_result;
choice_component(blueprint<FieldType> &bp,
const blueprint_linear_combination_vector<FieldType> &X,
const blueprint_linear_combination_vector<FieldType> &Y,
const blueprint_linear_combination_vector<FieldType> &Z,
const blueprint_variable<FieldType> &result) :
component<FieldType>(bp),
X(X), Y(Y), Z(Z), result(result) {
result_bits.allocate(bp, 32);
pack_result.reset(new packing_component<FieldType>(bp, result_bits, result));
}
void generate_r1cs_constraints() {
for (std::size_t i = 0; i < 32; ++i) {
/*
result = x * y + (1-x) * z
result - z = x * (y - z)
*/
this->bp.add_r1cs_constraint(
snark::r1cs_constraint<FieldType>(X[i], Y[i] - Z[i], result_bits[i] - Z[i]));
}
pack_result->generate_r1cs_constraints(false);
}
void generate_r1cs_witness() {
for (std::size_t i = 0; i < 32; ++i) {
this->bp.val(result_bits[i]) =
this->bp.lc_val(X[i]) * this->bp.lc_val(Y[i]) +
(FieldType::value_type::one() - this->bp.lc_val(X[i])) * this->bp.lc_val(Z[i]);
}
pack_result->generate_r1cs_witness_from_bits();
}
};
/* Page 10 of http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf */
template<typename FieldType>
class majority_component : public component<FieldType> {
private:
blueprint_variable_vector<FieldType> result_bits;
std::shared_ptr<packing_component<FieldType>> pack_result;
public:
blueprint_linear_combination_vector<FieldType> X;
blueprint_linear_combination_vector<FieldType> Y;
blueprint_linear_combination_vector<FieldType> Z;
blueprint_variable<FieldType> result;
majority_component(blueprint<FieldType> &bp,
const blueprint_linear_combination_vector<FieldType> &X,
const blueprint_linear_combination_vector<FieldType> &Y,
const blueprint_linear_combination_vector<FieldType> &Z,
const blueprint_variable<FieldType> &result) :
component<FieldType>(bp),
X(X), Y(Y), Z(Z), result(result) {
result_bits.allocate(bp, 32);
pack_result.reset(new packing_component<FieldType>(bp, result_bits, result));
}
void generate_r1cs_constraints() {
for (std::size_t i = 0; i < 32; ++i) {
/*
2*result + aux = x + y + z
x, y, z, aux -- bits
aux = x + y + z - 2*result
*/
generate_boolean_r1cs_constraint<FieldType>(this->bp, result_bits[i]);
this->bp.add_r1cs_constraint(
snark::r1cs_constraint<FieldType>(X[i] + Y[i] + Z[i] - 2 * result_bits[i],
1 - (X[i] + Y[i] + Z[i] - 2 * result_bits[i]), 0));
}
pack_result->generate_r1cs_constraints(false);
}
void generate_r1cs_witness() {
// temporary added until fixed-precision modular adaptor is ready:
typedef nil::crypto3::multiprecision::number<
nil::crypto3::multiprecision::backends::cpp_int_backend<>>
non_fixed_precision_modulus_type;
using modulus_type = typename FieldType::modulus_type;
for (std::size_t i = 0; i < 32; ++i) {
const non_fixed_precision_modulus_type v = non_fixed_precision_modulus_type(
(this->bp.lc_val(X[i]) + this->bp.lc_val(Y[i]) + this->bp.lc_val(Z[i])).data);
this->bp.val(result_bits[i]) = typename FieldType::value_type(modulus_type(v / 2));
}
pack_result->generate_r1cs_witness_from_bits();
}
};
} // namespace components
} // namespace zk
} // namespace crypto3
} // namespace nil
#endif // CRYPTO3_ZK_BLUEPRINT_SHA256_AUX_HPP
|
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2020 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Andrey Abramov
/// @author Vasiliy Nabatchikov
////////////////////////////////////////////////////////////////////////////////
#include <string>
#include "AqlHelper.h"
#include "Aql/Ast.h"
#include "Aql/ExecutionPlan.h"
#include "Aql/Expression.h"
#include "Aql/ExpressionContext.h"
#include "Aql/Function.h"
#include "Aql/Variable.h"
#include "Basics/fasthash.h"
#include "IResearchCommon.h"
#include "IResearchDocument.h"
#include "Logger/LogMacros.h"
#include "Misc.h"
#define __STDC_FORMAT_MACROS
#include <inttypes.h>
namespace {
arangodb::aql::AstNodeType const CmpMap[]{
arangodb::aql::NODE_TYPE_OPERATOR_BINARY_EQ, // NODE_TYPE_OPERATOR_BINARY_EQ: 3 == a <==> a == 3
arangodb::aql::NODE_TYPE_OPERATOR_BINARY_NE, // NODE_TYPE_OPERATOR_BINARY_NE: 3 != a <==> a != 3
arangodb::aql::NODE_TYPE_OPERATOR_BINARY_GT, // NODE_TYPE_OPERATOR_BINARY_LT: 3 < a <==> a > 3
arangodb::aql::NODE_TYPE_OPERATOR_BINARY_GE, // NODE_TYPE_OPERATOR_BINARY_LE: 3 <= a <==> a >= 3
arangodb::aql::NODE_TYPE_OPERATOR_BINARY_LT, // NODE_TYPE_OPERATOR_BINARY_GT: 3 > a <==> a < 3
arangodb::aql::NODE_TYPE_OPERATOR_BINARY_LE // NODE_TYPE_OPERATOR_BINARY_GE: 3 >= a <==> a <= 3
};
}
namespace arangodb {
namespace iresearch {
bool equalTo(aql::AstNode const* lhs, aql::AstNode const* rhs) {
if (lhs == rhs) {
return true;
}
if ((lhs == nullptr && rhs != nullptr) || (lhs != nullptr && rhs == nullptr)) {
return false;
}
// cppcheck-suppress nullPointerRedundantCheck
if (lhs->type != rhs->type) {
return false;
}
size_t const n = lhs->numMembers();
if (n != rhs->numMembers()) {
return false;
}
// check members for equality
for (size_t i = 0; i < n; ++i) {
if (!equalTo(lhs->getMemberUnchecked(i), rhs->getMemberUnchecked(i))) {
return false;
}
}
switch (lhs->type) {
case aql::NODE_TYPE_VARIABLE: {
return lhs->getData() == rhs->getData();
}
case aql::NODE_TYPE_OPERATOR_UNARY_PLUS:
case aql::NODE_TYPE_OPERATOR_UNARY_MINUS:
case aql::NODE_TYPE_OPERATOR_UNARY_NOT:
case aql::NODE_TYPE_OPERATOR_BINARY_AND:
case aql::NODE_TYPE_OPERATOR_BINARY_OR:
case aql::NODE_TYPE_OPERATOR_BINARY_PLUS:
case aql::NODE_TYPE_OPERATOR_BINARY_MINUS:
case aql::NODE_TYPE_OPERATOR_BINARY_TIMES:
case aql::NODE_TYPE_OPERATOR_BINARY_DIV:
case aql::NODE_TYPE_OPERATOR_BINARY_MOD:
case aql::NODE_TYPE_OPERATOR_BINARY_EQ:
case aql::NODE_TYPE_OPERATOR_BINARY_NE:
case aql::NODE_TYPE_OPERATOR_BINARY_LT:
case aql::NODE_TYPE_OPERATOR_BINARY_LE:
case aql::NODE_TYPE_OPERATOR_BINARY_GT:
case aql::NODE_TYPE_OPERATOR_BINARY_GE:
case aql::NODE_TYPE_OPERATOR_BINARY_IN:
case aql::NODE_TYPE_OPERATOR_BINARY_NIN:
case aql::NODE_TYPE_OPERATOR_TERNARY:
case aql::NODE_TYPE_OBJECT:
case aql::NODE_TYPE_CALCULATED_OBJECT_ELEMENT:
case aql::NODE_TYPE_ARRAY:
case aql::NODE_TYPE_RANGE:
case aql::NODE_TYPE_OPERATOR_BINARY_ARRAY_EQ:
case aql::NODE_TYPE_OPERATOR_BINARY_ARRAY_NE:
case aql::NODE_TYPE_OPERATOR_BINARY_ARRAY_LT:
case aql::NODE_TYPE_OPERATOR_BINARY_ARRAY_LE:
case aql::NODE_TYPE_OPERATOR_BINARY_ARRAY_GT:
case aql::NODE_TYPE_OPERATOR_BINARY_ARRAY_GE:
case aql::NODE_TYPE_OPERATOR_BINARY_ARRAY_IN:
case aql::NODE_TYPE_OPERATOR_BINARY_ARRAY_NIN: {
return true;
}
case aql::NODE_TYPE_ATTRIBUTE_ACCESS:
case aql::NODE_TYPE_INDEXED_ACCESS:
case aql::NODE_TYPE_EXPANSION: {
return attributeAccessEqual(lhs, rhs, nullptr);
}
case aql::NODE_TYPE_VALUE: {
return 0 == aql::CompareAstNodes(lhs, rhs, true);
}
case aql::NODE_TYPE_OBJECT_ELEMENT: {
irs::string_ref lhsValue, rhsValue;
iresearch::parseValue(lhsValue, *lhs);
iresearch::parseValue(rhsValue, *rhs);
return lhsValue == rhsValue;
}
case aql::NODE_TYPE_REFERENCE: {
return lhs->getData() == rhs->getData();
}
case aql::NODE_TYPE_FCALL: {
return lhs->getData() == rhs->getData();
}
case aql::NODE_TYPE_FCALL_USER: {
irs::string_ref lhsName, rhsName;
iresearch::parseValue(lhsName, *lhs);
iresearch::parseValue(rhsName, *rhs);
return lhsName == rhsName;
}
case aql::NODE_TYPE_QUANTIFIER: {
return lhs->value.value._int == rhs->value.value._int;
}
default: { return false; }
}
}
size_t hash(aql::AstNode const* node, size_t hash /*= 0*/) noexcept {
if (!node) {
return hash;
}
// hash node type
auto const& typeString = node->getTypeString();
hash = fasthash64(static_cast<const void*>(typeString.c_str()), typeString.size(), hash);
// hash node members
for (size_t i = 0, n = node->numMembers(); i < n; ++i) {
auto sub = node->getMemberUnchecked(i);
if (sub) {
hash = iresearch::hash(sub, hash);
}
}
switch (node->type) {
case aql::NODE_TYPE_VARIABLE: {
return fasthash64(node->getData(), sizeof(void*), hash);
}
case aql::NODE_TYPE_OPERATOR_UNARY_PLUS:
case aql::NODE_TYPE_OPERATOR_UNARY_MINUS:
case aql::NODE_TYPE_OPERATOR_UNARY_NOT:
case aql::NODE_TYPE_OPERATOR_BINARY_AND:
case aql::NODE_TYPE_OPERATOR_BINARY_OR:
case aql::NODE_TYPE_OPERATOR_BINARY_PLUS:
case aql::NODE_TYPE_OPERATOR_BINARY_MINUS:
case aql::NODE_TYPE_OPERATOR_BINARY_TIMES:
case aql::NODE_TYPE_OPERATOR_BINARY_DIV:
case aql::NODE_TYPE_OPERATOR_BINARY_MOD:
case aql::NODE_TYPE_OPERATOR_BINARY_EQ:
case aql::NODE_TYPE_OPERATOR_BINARY_NE:
case aql::NODE_TYPE_OPERATOR_BINARY_LT:
case aql::NODE_TYPE_OPERATOR_BINARY_LE:
case aql::NODE_TYPE_OPERATOR_BINARY_GT:
case aql::NODE_TYPE_OPERATOR_BINARY_GE:
case aql::NODE_TYPE_OPERATOR_BINARY_IN:
case aql::NODE_TYPE_OPERATOR_BINARY_NIN:
case aql::NODE_TYPE_OPERATOR_TERNARY:
case aql::NODE_TYPE_INDEXED_ACCESS:
case aql::NODE_TYPE_EXPANSION:
case aql::NODE_TYPE_ARRAY:
case aql::NODE_TYPE_OBJECT:
case aql::NODE_TYPE_CALCULATED_OBJECT_ELEMENT:
case aql::NODE_TYPE_RANGE:
case aql::NODE_TYPE_OPERATOR_BINARY_ARRAY_EQ:
case aql::NODE_TYPE_OPERATOR_BINARY_ARRAY_NE:
case aql::NODE_TYPE_OPERATOR_BINARY_ARRAY_LT:
case aql::NODE_TYPE_OPERATOR_BINARY_ARRAY_LE:
case aql::NODE_TYPE_OPERATOR_BINARY_ARRAY_GT:
case aql::NODE_TYPE_OPERATOR_BINARY_ARRAY_GE:
case aql::NODE_TYPE_OPERATOR_BINARY_ARRAY_IN:
case aql::NODE_TYPE_OPERATOR_BINARY_ARRAY_NIN: {
return hash;
}
case aql::NODE_TYPE_ATTRIBUTE_ACCESS: {
return aql::AstNode(node->value).hashValue(hash);
}
case aql::NODE_TYPE_VALUE: {
switch (node->value.type) {
case aql::VALUE_TYPE_NULL:
return fasthash64(static_cast<const void*>("null"), 4, hash);
case aql::VALUE_TYPE_BOOL:
if (node->value.value._bool) {
return fasthash64(static_cast<const void*>("true"), 4, hash);
}
return fasthash64(static_cast<const void*>("false"), 5, hash);
case aql::VALUE_TYPE_INT:
return fasthash64(static_cast<const void*>(&node->value.value._int),
sizeof(node->value.value._int), hash);
case aql::VALUE_TYPE_DOUBLE:
return fasthash64(static_cast<const void*>(&node->value.value._double),
sizeof(node->value.value._double), hash);
case aql::VALUE_TYPE_STRING:
return fasthash64(static_cast<const void*>(node->getStringValue()),
node->getStringLength(), hash);
}
return hash;
}
case aql::NODE_TYPE_OBJECT_ELEMENT: {
return fasthash64(static_cast<const void*>(node->getStringValue()),
node->getStringLength(), hash);
}
case aql::NODE_TYPE_REFERENCE: {
return fasthash64(node->getData(), sizeof(void*), hash);
}
case aql::NODE_TYPE_FCALL: {
auto* fn = static_cast<aql::Function*>(node->getData());
hash = fasthash64(node->getData(), sizeof(void*), hash);
return fasthash64(fn->name.c_str(), fn->name.size(), hash);
}
case aql::NODE_TYPE_FCALL_USER: {
return fasthash64(static_cast<const void*>(node->getStringValue()),
node->getStringLength(), hash);
}
case aql::NODE_TYPE_QUANTIFIER: {
return fasthash64(static_cast<const void*>(&node->value.value._int),
sizeof(node->value.value._int), hash);
}
default: {
return fasthash64(static_cast<void const*>(&node), sizeof(&node), hash);
}
}
}
irs::string_ref getFuncName(aql::AstNode const& node) {
irs::string_ref fname;
switch (node.type) {
case aql::NODE_TYPE_FCALL:
fname = reinterpret_cast<aql::Function const*>(node.getData())->name;
break;
case aql::NODE_TYPE_FCALL_USER:
parseValue(fname, node);
break;
default:
TRI_ASSERT(false);
}
return fname;
}
void visitReferencedVariables(aql::AstNode const& root,
std::function<void(aql::Variable const&)> const& visitor) {
auto preVisitor = [](aql::AstNode const* node) -> bool {
return !node->isConstant();
};
auto postVisitor = [&visitor](aql::AstNode const* node) {
if (node == nullptr) {
return;
}
// reference to a variable
if (node->type == aql::NODE_TYPE_REFERENCE) {
auto variable = static_cast<aql::Variable const*>(node->getData());
if (!variable) {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
"invalid reference in AST");
}
if (variable->needsRegister()) {
visitor(*variable);
}
}
};
aql::Ast::traverseReadOnly(&root, preVisitor, postVisitor);
}
// ----------------------------------------------------------------------------
// --SECTION-- AqlValueTraits implementation
// ----------------------------------------------------------------------------
aql::AstNode const ScopedAqlValue::INVALID_NODE(aql::NODE_TYPE_ROOT);
/*static*/ irs::string_ref const& ScopedAqlValue::typeString(ScopedValueType type) noexcept {
static irs::string_ref const TYPE_NAMES[] = {
"invalid", "null", "boolean", "double", "string", "array", "range", "object"
};
TRI_ASSERT(size_t(type) < IRESEARCH_COUNTOF(TYPE_NAMES));
return TYPE_NAMES[size_t(type)];
}
// ----------------------------------------------------------------------------
// --SECTION-- ScopedAqlValue implementation
// ----------------------------------------------------------------------------
bool ScopedAqlValue::execute(iresearch::QueryContext const& ctx) {
if (_executed && _node->isDeterministic()) {
// constant expression, nothing to do
return true;
}
if (!ctx.plan) { // || !ctx.ctx) {
// can't execute expression without `ExecutionPlan`
return false;
}
TRI_ASSERT(ctx.ctx); // FIXME remove, uncomment condition
if (!ctx.ast) {
// can't execute expression without `AST` and `ExpressionContext`
return false;
}
// don't really understand why we need `ExecutionPlan` and `Ast` here
aql::Expression expr(ctx.ast, const_cast<aql::AstNode*>(_node));
destroy();
try {
_value = expr.execute(ctx.ctx, _destroy);
} catch (basics::Exception const& e) {
// can't execute expression
LOG_TOPIC("0c06a", WARN, iresearch::TOPIC) << e.message();
return false;
} catch (...) {
// can't execute expression
return false;
}
_type = AqlValueTraits::type(_value);
_executed = true;
return true;
}
bool normalizeGeoDistanceCmpNode(
aql::AstNode const& in,
aql::Variable const& ref,
NormalizedCmpNode& out) {
static_assert(adjacencyChecker<aql::AstNodeType>::checkAdjacency<
aql::NODE_TYPE_OPERATOR_BINARY_GE, aql::NODE_TYPE_OPERATOR_BINARY_GT,
aql::NODE_TYPE_OPERATOR_BINARY_LE, aql::NODE_TYPE_OPERATOR_BINARY_LT>(),
"Values are not adjacent");
auto checkFCallGeoDistance = [](aql::AstNode const* node, aql::Variable const& ref) {
if (!node || aql::NODE_TYPE_FCALL != node->type) {
return false;
}
auto* impl = reinterpret_cast<aql::Function const*>(node->getData())->implementation;
if (impl != &aql::Functions::GeoDistance) {
return false;
}
auto* args = node->getMemberUnchecked(0);
return args && findReference(*args, ref);
};
if (!in.isDeterministic()) {
// unable normalize nondeterministic node
return false;
}
auto cmp = in.type;
if (cmp < aql::NODE_TYPE_OPERATOR_BINARY_LT ||
cmp > aql::NODE_TYPE_OPERATOR_BINARY_GE ||
in.numMembers() != 2) {
// wrong `in` type
return false;
}
auto const* fcall = in.getMemberUnchecked(0);
TRI_ASSERT(fcall);
auto const* value = in.getMemberUnchecked(1);
TRI_ASSERT(value);
if (!checkFCallGeoDistance(fcall, ref)) {
if (!checkFCallGeoDistance(value, ref)) {
return false;
}
std::swap(fcall, value);
cmp = CmpMap[cmp - aql::NODE_TYPE_OPERATOR_BINARY_EQ];
}
if (iresearch::findReference(*value, ref)) {
// value contains referenced variable
return false;
}
out.attribute = fcall;
out.value = value;
out.cmp = cmp;
return true;
}
bool normalizeCmpNode(aql::AstNode const& in,
aql::Variable const& ref, NormalizedCmpNode& out) {
static_assert(adjacencyChecker<aql::AstNodeType>::checkAdjacency<
aql::NODE_TYPE_OPERATOR_BINARY_GE, aql::NODE_TYPE_OPERATOR_BINARY_GT,
aql::NODE_TYPE_OPERATOR_BINARY_LE, aql::NODE_TYPE_OPERATOR_BINARY_LT,
aql::NODE_TYPE_OPERATOR_BINARY_NE, aql::NODE_TYPE_OPERATOR_BINARY_EQ>(),
"Values are not adjacent");
if (!in.isDeterministic()) {
// unable normalize nondeterministic node
return false;
}
auto cmp = in.type;
if (cmp < aql::NODE_TYPE_OPERATOR_BINARY_EQ ||
cmp > aql::NODE_TYPE_OPERATOR_BINARY_GE || in.numMembers() != 2) {
// wrong `in` type
return false;
}
auto const* attribute = in.getMemberUnchecked(0);
TRI_ASSERT(attribute);
auto const* value = in.getMemberUnchecked(1);
TRI_ASSERT(value);
if (!iresearch::checkAttributeAccess(attribute, ref)) {
if (!iresearch::checkAttributeAccess(value, ref)) {
// no suitable attribute access node found
return false;
}
std::swap(attribute, value);
cmp = CmpMap[cmp - aql::NODE_TYPE_OPERATOR_BINARY_EQ];
}
if (iresearch::findReference(*value, ref)) {
// value contains referenced variable
return false;
}
out.attribute = attribute;
out.value = value;
out.cmp = cmp;
return true;
}
bool attributeAccessEqual(aql::AstNode const* lhs,
aql::AstNode const* rhs, QueryContext const* ctx) {
struct NodeValue {
enum class Type {
INVALID = 0,
EXPANSION, // [*]
ACCESS, // [<offset>] | [<string>] | .
VALUE // REFERENCE | VALUE
};
bool read(aql::AstNode const* node, QueryContext const* ctx) noexcept {
this->strVal = irs::string_ref::NIL;
this->iVal = 0;
this->type = Type::INVALID;
this->root = nullptr;
if (!node) {
return false;
}
auto const n = node->numMembers();
auto const type = node->type;
if (n >= 2 && aql::NODE_TYPE_EXPANSION == type) { // [*]
auto* itr = node->getMemberUnchecked(0);
auto* ref = node->getMemberUnchecked(1);
if (itr && itr->numMembers() == 2) {
auto* var = itr->getMemberUnchecked(0);
auto* root = itr->getMemberUnchecked(1);
if (ref && aql::NODE_TYPE_ITERATOR == itr->type &&
aql::NODE_TYPE_REFERENCE == ref->type && root && var &&
aql::NODE_TYPE_VARIABLE == var->type) {
this->type = Type::EXPANSION;
this->root = root;
return true;
}
}
} else if (n == 2 && aql::NODE_TYPE_INDEXED_ACCESS == type) { // [<something>]
auto* root = node->getMemberUnchecked(0);
auto* offset = node->getMemberUnchecked(1);
if (root && offset) {
aqlValue.reset(*offset);
if (!aqlValue.isConstant()) {
if (!ctx) {
// can't evaluate expression at compile time
return true;
}
if (!aqlValue.execute(*ctx)) {
// failed to execute expression
return false;
}
}
switch (aqlValue.type()) {
case iresearch::SCOPED_VALUE_TYPE_DOUBLE:
this->iVal = aqlValue.getInt64();
this->type = Type::ACCESS;
this->root = root;
return true;
case iresearch::SCOPED_VALUE_TYPE_STRING:
if (!aqlValue.getString(this->strVal)) {
// failed to parse value as string
return false;
}
this->type = Type::ACCESS;
this->root = root;
return true;
default:
break;
}
}
} else if (n == 1 && aql::NODE_TYPE_ATTRIBUTE_ACCESS == type) {
auto* root = node->getMemberUnchecked(0);
if (root && aql::VALUE_TYPE_STRING == node->value.type) {
this->strVal = getStringRef(*node);
this->type = Type::ACCESS;
this->root = root;
return true;
}
} else if (!n) { // end of attribute path (base case)
if (aql::NODE_TYPE_REFERENCE == type) {
this->iVal = reinterpret_cast<int64_t>(node->value.value._data);
this->type = Type::VALUE;
this->root = node;
return false; // end of path
} else if (aql::VALUE_TYPE_STRING == node->value.type) {
this->strVal = getStringRef(*node);
this->type = Type::VALUE;
this->root = node;
return false; // end of path
}
}
return false; // invalid input
}
bool operator==(const NodeValue& rhs) const noexcept {
return type == rhs.type && strVal == rhs.strVal && iVal == rhs.iVal;
}
bool operator!=(const NodeValue& rhs) const noexcept {
return !(*this == rhs);
}
iresearch::ScopedAqlValue aqlValue;
irs::string_ref strVal;
int64_t iVal;
Type type{Type::INVALID};
aql::AstNode const* root = nullptr;
} lhsValue, rhsValue;
// TODO: is the "&" intionally. If yes: why?
//cppcheck-suppress uninitvar; false positive
while (lhsValue.read(lhs, ctx) & rhsValue.read(rhs, ctx)) {
if (lhsValue != rhsValue) {
return false;
}
lhs = lhsValue.root;
rhs = rhsValue.root;
}
return lhsValue.type != NodeValue::Type::INVALID &&
rhsValue.type != NodeValue::Type::INVALID && rhsValue == lhsValue;
}
bool nameFromAttributeAccess(std::string& name, aql::AstNode const& node,
QueryContext const& ctx) {
struct {
bool attributeAccess(aql::AstNode const& node) {
irs::string_ref strValue;
if (!parseValue(strValue, node)) {
// wrong type
return false;
}
append(strValue);
return true;
}
bool expansion(aql::AstNode const&) const {
return false; // do not support [*]
}
bool indexAccess(aql::AstNode const& node) {
value_.reset(node);
if (!value_.execute(*ctx_)) {
// failed to evalue value
return false;
}
switch (value_.type()) {
case iresearch::SCOPED_VALUE_TYPE_DOUBLE:
append(value_.getInt64());
return true;
case iresearch::SCOPED_VALUE_TYPE_STRING: {
irs::string_ref strValue;
if (!value_.getString(strValue)) {
// unable to parse value as string
return false;
}
append(strValue);
return true;
}
default:
return false;
}
}
void append(irs::string_ref const& value) {
if (!str_->empty()) {
(*str_) += NESTING_LEVEL_DELIMITER;
}
str_->append(value.c_str(), value.size());
}
void append(int64_t value) {
(*str_) += NESTING_LIST_OFFSET_PREFIX;
auto const written = sprintf(buf_, "%" PRIu64, value);
str_->append(buf_, written);
(*str_) += NESTING_LIST_OFFSET_SUFFIX;
}
ScopedAqlValue value_;
std::string* str_;
QueryContext const* ctx_;
char buf_[21]; // enough to hold all numbers up to 64-bits
} builder;
name.clear();
builder.str_ = &name;
builder.ctx_ = &ctx;
aql::AstNode const* head = nullptr;
return visitAttributeAccess(head, &node, builder) && head &&
aql::NODE_TYPE_REFERENCE == head->type;
}
aql::AstNode const* checkAttributeAccess(aql::AstNode const* node,
aql::Variable const& ref) noexcept {
struct {
bool attributeAccess(aql::AstNode const&) const { return true; }
bool indexAccess(aql::AstNode const&) const { return true; }
bool expansion(aql::AstNode const&) const {
return false; // do not support [*]
}
} checker;
aql::AstNode const* head = nullptr;
return node && aql::NODE_TYPE_REFERENCE != node->type // do not allow root node to be REFERENCE
&& visitAttributeAccess(head, node, checker) && head &&
aql::NODE_TYPE_REFERENCE == head->type &&
reinterpret_cast<void const*>(&ref) == head->getData() // same variable
? node
: nullptr;
}
} // namespace iresearch
} // namespace arangodb
// -----------------------------------------------------------------------------
// --SECTION-- END-OF-FILE
// -----------------------------------------------------------------------------
|
/**
* @file object_proxy.cpp
* @brief Registration mechanism for object that need to be readable / writable
* into file
* @ingroup io
*
* @author Christophe Ecabert
* @date 24.09.17
* Copyright © 2017 Christophe Ecabert. All rights reserved.
*/
#include "facekit/io/object_proxy.hpp"
#include "facekit/io/object_manager.hpp"
/**
* @namespace FaceKit
* @brief Development space
*/
namespace FaceKit {
/*
* @name ObjectProxy
* @fn ObjectProxy(const std::string& classname, const size_t id)
* @brief Constructor
* @param[in] classname Class name to be represented by this proxy
* @param[in] id Object's unique ID
*/
ObjectProxy::ObjectProxy(const std::string& classname,
const size_t id) : classname_(classname), id_(id) {
ObjectManager::Get().Register(this);
}
} // namespace FaceKit
|
#include "quantities/Attractor.h"
#include "catch.hpp"
#include "gravity/BarnesHut.h"
#include "gravity/BruteForceGravity.h"
#include "gravity/CachedGravity.h"
#include "gravity/Moments.h"
#include "quantities/Quantity.h"
#include "quantities/Storage.h"
#include "sph/initial/Distribution.h"
#include "tests/Approx.h"
#include "thread/Pool.h"
#include "utils/SequenceTest.h"
using namespace Sph;
template <typename T>
AutoPtr<IGravity> createGravity();
template <>
AutoPtr<IGravity> createGravity<BruteForceGravity>() {
return makeAuto<BruteForceGravity>(1._f);
}
template <>
AutoPtr<IGravity> createGravity<BarnesHut>() {
return makeAuto<BarnesHut>(0.4_f, MultipoleOrder::OCTUPOLE, 25, 50, 1._f);
}
template <>
AutoPtr<IGravity> createGravity<CachedGravity>() {
return makeAuto<CachedGravity>(0.5_f, createGravity<BruteForceGravity>());
}
template <typename T>
const Float gravityEps = EPS;
template <>
const Float gravityEps<BarnesHut> = 2.e-4_f;
TEMPLATE_TEST_CASE("Gravity with attractors", "[gravity]", BruteForceGravity, BarnesHut, CachedGravity) {
ThreadPool& pool = *ThreadPool::getGlobalInstance();
RandomDistribution distr(1234);
SphericalDomain domain1(Vector(0._f), 1.e6_f);
SphericalDomain domain2(Vector(1.e6_f, 0._f, 0._f), 5.e6_f);
Array<Vector> points1 = distr.generate(pool, 100, domain1);
Array<Vector> points2 = distr.generate(pool, 20, domain2);
const Float m1 = 3.e10_f;
const Float m2 = 1.5e10_f;
Storage storage1;
// combine particles and attractors in storage1
storage1.insert<Vector>(QuantityId::POSITION, OrderEnum::SECOND, points1.clone());
storage1.insert<Float>(QuantityId::MASS, OrderEnum::ZERO, m1);
for (const Vector& p : points2) {
storage1.addAttractor(Attractor(p, Vector(0._f), p[H], m2));
}
Storage storage2;
// put only particles in storage2
Array<Vector> allPoints;
allPoints.pushAll(points1);
allPoints.pushAll(points2);
storage2.insert<Vector>(QuantityId::POSITION, OrderEnum::SECOND, std::move(allPoints));
Array<Float> m(storage2.getParticleCnt());
for (Size i = 0; i < m.size(); ++i) {
m[i] = i < points1.size() ? m1 : m2;
}
storage2.insert<Float>(QuantityId::MASS, OrderEnum::ZERO, std::move(m));
Statistics stats;
stats.set(StatisticsId::RUN_TIME, 0._f);
ArrayView<Vector> dv1 = storage1.getD2t<Vector>(QuantityId::POSITION);
AutoPtr<IGravity> gravity = createGravity<TestType>();
gravity->build(pool, storage1);
gravity->evalSelfGravity(pool, dv1, stats);
gravity->evalAttractors(pool, storage1.getAttractors(), dv1);
ArrayView<Vector> dv2 = storage2.getD2t<Vector>(QuantityId::POSITION);
gravity->build(pool, storage2);
gravity->evalSelfGravity(pool, dv2, stats);
auto test1 = [&](const Size i) -> Outcome {
if (dv2[i] != approx(dv1[i], gravityEps<TestType>)) {
return makeFailed("Incorrect acceleration: {} == {}", dv2[i], dv1[i]);
}
return SUCCESS;
};
REQUIRE_SEQUENCE(test1, 0, dv1.size());
ArrayView<const Attractor> attractors = storage1.getAttractors();
auto test2 = [&](const Size i) -> Outcome {
const Vector acc1 = dv2[i + dv1.size()];
const Vector acc2 = attractors[i].acceleration;
if (acc1 != approx(acc2, gravityEps<TestType>)) {
return makeFailed("Incorrect acceleration: {} == {}", acc1, acc2);
}
return SUCCESS;
};
REQUIRE_SEQUENCE(test2, 0, attractors.size());
}
|
#include <chrono>
#include <cstdint>
#include <functional>
#include <string>
#include "envoy/config/core/v3/base.pb.h"
#include "envoy/extensions/transport_sockets/tls/v3/cert.pb.h"
#include "envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.pb.h"
#include "envoy/extensions/upstreams/http/http/v3/http_connection_pool.pb.h"
#include "envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.pb.h"
#include "envoy/extensions/upstreams/tcp/generic/v3/generic_connection_pool.pb.h"
#include "envoy/type/v3/percent.pb.h"
#include "source/common/buffer/buffer_impl.h"
#include "source/common/common/empty_string.h"
#include "source/common/config/metadata.h"
#include "source/common/config/well_known_names.h"
#include "source/common/http/context_impl.h"
#include "source/common/network/application_protocol.h"
#include "source/common/network/socket_option_factory.h"
#include "source/common/network/upstream_server_name.h"
#include "source/common/network/upstream_socket_options_filter_state.h"
#include "source/common/network/upstream_subject_alt_names.h"
#include "source/common/network/utility.h"
#include "source/common/network/win32_redirect_records_option_impl.h"
#include "source/common/router/config_impl.h"
#include "source/common/router/debug_config.h"
#include "source/common/router/router.h"
#include "source/common/stream_info/uint32_accessor_impl.h"
#include "source/common/stream_info/utility.h"
#include "source/common/tracing/http_tracer_impl.h"
#include "source/common/upstream/upstream_impl.h"
#include "test/common/http/common.h"
#include "test/common/router/router_test_base.h"
#include "test/mocks/http/mocks.h"
#include "test/mocks/local_info/mocks.h"
#include "test/mocks/network/mocks.h"
#include "test/mocks/router/mocks.h"
#include "test/mocks/runtime/mocks.h"
#include "test/mocks/ssl/mocks.h"
#include "test/mocks/tracing/mocks.h"
#include "test/mocks/upstream/cluster_manager.h"
#include "test/mocks/upstream/host.h"
#include "test/test_common/environment.h"
#include "test/test_common/printers.h"
#include "test/test_common/simulated_time_system.h"
#include "test/test_common/test_runtime.h"
#include "test/test_common/utility.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
using testing::_;
using testing::AtLeast;
using testing::Eq;
using testing::InSequence;
using testing::Invoke;
using testing::InvokeWithoutArgs;
using testing::MockFunction;
using testing::NiceMock;
using testing::Property;
using testing::Return;
using testing::ReturnRef;
namespace Envoy {
namespace Router {
// Allows verifying the state of the upstream StreamInfo
class TestAccessLog : public AccessLog::Instance {
public:
explicit TestAccessLog(std::function<void(const StreamInfo::StreamInfo&)> func) : func_(func) {}
void log(const Http::RequestHeaderMap*, const Http::ResponseHeaderMap*,
const Http::ResponseTrailerMap*, const StreamInfo::StreamInfo& info) override {
func_(info);
}
private:
std::function<void(const StreamInfo::StreamInfo&)> func_;
};
class RouterTest : public RouterTestBase {
public:
RouterTest() : RouterTestBase(false, false, false, Protobuf::RepeatedPtrField<std::string>{}) {
EXPECT_CALL(callbacks_, activeSpan()).WillRepeatedly(ReturnRef(span_));
};
void testRequestResponseSize(bool with_trailers) {
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(
Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions& options)
-> Http::ConnectionPool::Cancellable* {
EXPECT_FALSE(options.can_send_early_data_);
EXPECT_TRUE(options.can_use_http3_);
response_decoder = &decoder;
callbacks.onPoolReady(encoder, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
cm_.thread_local_cluster_.cluster_.info_->upstream_config_ =
absl::make_optional<envoy::config::core::v3::TypedExtensionConfig>();
envoy::extensions::upstreams::http::generic::v3::GenericConnectionPoolProto generic_config;
cm_.thread_local_cluster_.cluster_.info_->upstream_config_.value()
.mutable_typed_config()
->PackFrom(generic_config);
callbacks_.route_->route_entry_.connect_config_ =
absl::make_optional<RouteEntry::ConnectConfig>();
EXPECT_CALL(cm_.thread_local_cluster_, httpConnPool(_, _, _));
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
headers.setMethod("POST");
EXPECT_CALL(
cm_.thread_local_cluster_.cluster_.info_->request_response_size_stats_store_,
deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_headers_size"), 74ull));
router_.decodeHeaders(headers, false);
EXPECT_CALL(callbacks_.dispatcher_, createTimer_);
EXPECT_CALL(
cm_.thread_local_cluster_.cluster_.info_->request_response_size_stats_store_,
deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_body_size"), 5ull));
Buffer::InstancePtr body_data(new Buffer::OwnedImpl("hello"));
EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer,
router_.decodeData(*body_data, !with_trailers));
if (with_trailers) {
Http::TestRequestTrailerMapImpl trailers{{"some", "trailer"}};
router_.decodeTrailers(trailers);
}
EXPECT_CALL(
cm_.thread_local_cluster_.cluster_.info_->request_response_size_stats_store_,
deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rs_headers_size"), 10ull));
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
// NOLINTNEXTLINE: Silence null pointer access warning
response_decoder->decodeHeaders(std::move(response_headers), false);
EXPECT_CALL(
cm_.thread_local_cluster_.cluster_.info_->request_response_size_stats_store_,
deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rs_body_size"), 7ull));
Buffer::OwnedImpl response_data("goodbye");
// NOLINTNEXTLINE: Silence null pointer access warning
response_decoder->decodeData(response_data, !with_trailers);
if (with_trailers) {
Http::ResponseTrailerMapPtr response_trailers(
new Http::TestResponseTrailerMapImpl{{"some-trailer", "13"}});
// NOLINTNEXTLINE: Silence null pointer access warning
response_decoder->decodeTrailers(std::move(response_trailers));
}
router_.onDestroy();
}
void testAutoSniOptions(
absl::optional<envoy::config::core::v3::UpstreamHttpProtocolOptions> dummy_option,
Envoy::Http::TestRequestHeaderMapImpl headers, std::string server_name = "host",
bool should_validate_san = false, std::string alt_server_name = "host") {
NiceMock<StreamInfo::MockStreamInfo> stream_info;
ON_CALL(*cm_.thread_local_cluster_.cluster_.info_, upstreamHttpProtocolOptions())
.WillByDefault(ReturnRef(dummy_option));
ON_CALL(callbacks_.stream_info_, filterState())
.WillByDefault(ReturnRef(stream_info.filterState()));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(Return(&cancellable_));
stream_info.filterState()->setData(Network::UpstreamServerName::key(),
std::make_unique<Network::UpstreamServerName>("dummy"),
StreamInfo::FilterState::StateType::Mutable);
expectResponseTimerCreate();
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(server_name,
stream_info.filterState()
->getDataReadOnly<Network::UpstreamServerName>(Network::UpstreamServerName::key())
->value());
if (should_validate_san) {
EXPECT_EQ(alt_server_name, stream_info.filterState()
->getDataReadOnly<Network::UpstreamSubjectAltNames>(
Network::UpstreamSubjectAltNames::key())
->value()[0]);
}
EXPECT_CALL(cancellable_, cancel(_));
router_.onDestroy();
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
}
};
TEST_F(RouterTest, UpdateServerNameFilterStateWithoutHeaderOverride) {
auto dummy_option = absl::make_optional<envoy::config::core::v3::UpstreamHttpProtocolOptions>();
dummy_option.value().set_auto_sni(true);
Http::TestRequestHeaderMapImpl headers{};
testAutoSniOptions(dummy_option, headers);
}
TEST_F(RouterTest, UpdateServerNameFilterStateWithHostHeaderOverride) {
auto dummy_option = absl::make_optional<envoy::config::core::v3::UpstreamHttpProtocolOptions>();
dummy_option.value().set_auto_sni(true);
dummy_option.value().set_override_auto_sni_header(":authority");
Http::TestRequestHeaderMapImpl headers{};
testAutoSniOptions(dummy_option, headers);
}
TEST_F(RouterTest, UpdateServerNameFilterStateWithHeaderOverride) {
auto dummy_option = absl::make_optional<envoy::config::core::v3::UpstreamHttpProtocolOptions>();
dummy_option.value().set_auto_sni(true);
dummy_option.value().set_override_auto_sni_header("x-host");
const auto server_name = "foo.bar";
Http::TestRequestHeaderMapImpl headers{{"x-host", server_name}};
testAutoSniOptions(dummy_option, headers, server_name);
}
TEST_F(RouterTest, UpdateServerNameFilterStateWithEmptyValueHeaderOverride) {
auto dummy_option = absl::make_optional<envoy::config::core::v3::UpstreamHttpProtocolOptions>();
dummy_option.value().set_auto_sni(true);
dummy_option.value().set_override_auto_sni_header("x-host");
Http::TestRequestHeaderMapImpl headers{{"x-host", ""}};
testAutoSniOptions(dummy_option, headers);
}
TEST_F(RouterTest, UpdateSubjectAltNamesFilterStateWithoutHeaderOverride) {
auto dummy_option = absl::make_optional<envoy::config::core::v3::UpstreamHttpProtocolOptions>();
dummy_option.value().set_auto_sni(true);
dummy_option.value().set_auto_san_validation(true);
Http::TestRequestHeaderMapImpl headers{};
testAutoSniOptions(dummy_option, headers, "host", true);
}
TEST_F(RouterTest, UpdateSubjectAltNamesFilterStateWithHostHeaderOverride) {
auto dummy_option = absl::make_optional<envoy::config::core::v3::UpstreamHttpProtocolOptions>();
dummy_option.value().set_auto_sni(true);
dummy_option.value().set_auto_san_validation(true);
dummy_option.value().set_override_auto_sni_header(":authority");
Http::TestRequestHeaderMapImpl headers{};
testAutoSniOptions(dummy_option, headers, "host", true);
}
TEST_F(RouterTest, UpdateSubjectAltNamesFilterStateWithHeaderOverride) {
auto dummy_option = absl::make_optional<envoy::config::core::v3::UpstreamHttpProtocolOptions>();
dummy_option.value().set_auto_sni(true);
dummy_option.value().set_auto_san_validation(true);
dummy_option.value().set_override_auto_sni_header("x-host");
const auto server_name = "foo.bar";
Http::TestRequestHeaderMapImpl headers{{"x-host", server_name}};
testAutoSniOptions(dummy_option, headers, server_name, true, server_name);
}
TEST_F(RouterTest, UpdateSubjectAltNamesFilterStateWithEmptyValueHeaderOverride) {
auto dummy_option = absl::make_optional<envoy::config::core::v3::UpstreamHttpProtocolOptions>();
dummy_option.value().set_auto_sni(true);
dummy_option.value().set_auto_san_validation(true);
dummy_option.value().set_override_auto_sni_header("x-host");
Http::TestRequestHeaderMapImpl headers{{"x-host", ""}};
testAutoSniOptions(dummy_option, headers, "host", true);
}
TEST_F(RouterTest, UpdateSubjectAltNamesFilterStateWithIpHeaderOverride) {
auto dummy_option = absl::make_optional<envoy::config::core::v3::UpstreamHttpProtocolOptions>();
dummy_option.value().set_auto_sni(true);
dummy_option.value().set_auto_san_validation(true);
dummy_option.value().set_override_auto_sni_header("x-host");
const auto server_name = "127.0.0.1";
Http::TestRequestHeaderMapImpl headers{{"x-host", server_name}};
testAutoSniOptions(dummy_option, headers, "dummy", true, server_name);
}
TEST_F(RouterTest, RouteNotFound) {
EXPECT_CALL(callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::NoRouteFound));
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
EXPECT_CALL(callbacks_, route()).WillOnce(Return(nullptr));
router_.decodeHeaders(headers, true);
EXPECT_EQ(1UL, stats_store_.counter("test.no_route").value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_EQ(callbacks_.details(), "route_not_found");
}
TEST_F(RouterTest, MissingRequiredHeaders) {
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
headers.removeMethod();
EXPECT_CALL(encoder, encodeHeaders(_, _))
.WillOnce(Invoke([](const Http::RequestHeaderMap& headers, bool) -> Http::Status {
return Http::HeaderUtility::checkRequiredRequestHeaders(headers);
}));
EXPECT_CALL(
callbacks_,
sendLocalReply(Http::Code::ServiceUnavailable,
testing::Eq("missing required header: :method"), _, _,
"filter_removed_required_request_headers{missing_required_header:_:method}"))
.WillOnce(InvokeWithoutArgs([] {}));
router_.decodeHeaders(headers, true);
router_.onDestroy();
}
TEST_F(RouterTest, ClusterNotFound) {
EXPECT_CALL(callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::NoClusterFound));
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
ON_CALL(cm_, getThreadLocalCluster(_)).WillByDefault(Return(nullptr));
router_.decodeHeaders(headers, true);
EXPECT_EQ(1UL, stats_store_.counter("test.no_cluster").value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_EQ(callbacks_.details(), "cluster_not_found");
}
TEST_F(RouterTest, PoolFailureWithPriority) {
ON_CALL(callbacks_.route_->route_entry_, priority())
.WillByDefault(Return(Upstream::ResourcePriority::High));
EXPECT_CALL(cm_.thread_local_cluster_,
httpConnPool(Upstream::ResourcePriority::High, _, &router_));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(Invoke([&](Http::StreamDecoder&, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::RemoteConnectionFailure,
"tls version mismatch", cm_.thread_local_cluster_.conn_pool_.host_);
return nullptr;
}));
Http::TestResponseHeaderMapImpl response_headers{
{":status", "503"}, {"content-length", "139"}, {"content-type", "text/plain"}};
EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));
EXPECT_CALL(callbacks_, encodeData(_, true));
EXPECT_CALL(callbacks_.stream_info_,
setResponseFlag(StreamInfo::ResponseFlag::UpstreamConnectionFailure));
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// Pool failure, so upstream request was not initiated.
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_EQ(callbacks_.details(),
"upstream_reset_before_response_started{connection_failure,tls_version_mismatch}");
}
TEST_F(RouterTest, PoolFailureDueToConnectTimeout) {
ON_CALL(callbacks_.route_->route_entry_, priority())
.WillByDefault(Return(Upstream::ResourcePriority::High));
EXPECT_CALL(cm_.thread_local_cluster_,
httpConnPool(Upstream::ResourcePriority::High, _, &router_));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(Invoke([&](Http::StreamDecoder&, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::Timeout, "connect_timeout",
cm_.thread_local_cluster_.conn_pool_.host_);
return nullptr;
}));
Http::TestResponseHeaderMapImpl response_headers{
{":status", "503"}, {"content-length", "134"}, {"content-type", "text/plain"}};
EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));
EXPECT_CALL(callbacks_, encodeData(_, true));
EXPECT_CALL(callbacks_.stream_info_,
setResponseFlag(StreamInfo::ResponseFlag::UpstreamConnectionFailure));
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// Pool failure, so upstream request was not initiated.
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_EQ(callbacks_.details(),
"upstream_reset_before_response_started{connection_failure,connect_timeout}");
}
TEST_F(RouterTest, Http1Upstream) {
EXPECT_CALL(cm_.thread_local_cluster_, httpConnPool(_, absl::optional<Http::Protocol>(), _));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(Return(&cancellable_));
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
EXPECT_CALL(callbacks_.route_->route_entry_, finalizeRequestHeaders(_, _, true));
EXPECT_CALL(span_, injectContext(_));
router_.decodeHeaders(headers, true);
EXPECT_EQ("10", headers.get_("x-envoy-expected-rq-timeout-ms"));
// When the router filter gets reset we should cancel the pool request.
EXPECT_CALL(cancellable_, cancel(_));
router_.onDestroy();
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
}
TEST_F(RouterTest, Http2Upstream) {
EXPECT_CALL(cm_.thread_local_cluster_, httpConnPool(_, absl::optional<Http::Protocol>(), _));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(Return(&cancellable_));
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
EXPECT_CALL(span_, injectContext(_));
router_.decodeHeaders(headers, true);
// When the router filter gets reset we should cancel the pool request.
EXPECT_CALL(cancellable_, cancel(_));
router_.onDestroy();
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
}
TEST_F(RouterTest, HashPolicy) {
ON_CALL(callbacks_.route_->route_entry_, hashPolicy())
.WillByDefault(Return(&callbacks_.route_->route_entry_.hash_policy_));
EXPECT_CALL(callbacks_.route_->route_entry_.hash_policy_, generateHash(_, _, _, _))
.WillOnce(Return(absl::optional<uint64_t>(10)));
EXPECT_CALL(cm_.thread_local_cluster_, httpConnPool(_, _, _))
.WillOnce(Invoke([&](Upstream::ResourcePriority, absl::optional<Http::Protocol>,
Upstream::LoadBalancerContext* context) {
EXPECT_EQ(10UL, context->computeHashKey().value());
return Upstream::HttpPoolData([]() {}, &cm_.thread_local_cluster_.conn_pool_);
}));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(Return(&cancellable_));
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
// When the router filter gets reset we should cancel the pool request.
EXPECT_CALL(cancellable_, cancel(_));
router_.onDestroy();
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
}
TEST_F(RouterTest, HashPolicyNoHash) {
ON_CALL(callbacks_.route_->route_entry_, hashPolicy())
.WillByDefault(Return(&callbacks_.route_->route_entry_.hash_policy_));
EXPECT_CALL(callbacks_.route_->route_entry_.hash_policy_, generateHash(_, _, _, _))
.WillOnce(Return(absl::optional<uint64_t>()));
EXPECT_CALL(cm_.thread_local_cluster_, httpConnPool(_, _, &router_))
.WillOnce(Invoke([&](Upstream::ResourcePriority, absl::optional<Http::Protocol>,
Upstream::LoadBalancerContext* context) {
EXPECT_FALSE(context->computeHashKey());
return Upstream::HttpPoolData([]() {}, &cm_.thread_local_cluster_.conn_pool_);
}));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(Return(&cancellable_));
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
// When the router filter gets reset we should cancel the pool request.
EXPECT_CALL(cancellable_, cancel(_));
router_.onDestroy();
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
}
TEST_F(RouterTest, HashKeyNoHashPolicy) {
ON_CALL(callbacks_.route_->route_entry_, hashPolicy()).WillByDefault(Return(nullptr));
EXPECT_FALSE(router_.computeHashKey().has_value());
}
TEST_F(RouterTest, AddCookie) {
ON_CALL(callbacks_.route_->route_entry_, hashPolicy())
.WillByDefault(Return(&callbacks_.route_->route_entry_.hash_policy_));
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder, &response_decoder, Http::Protocol::Http10);
EXPECT_CALL(cm_.thread_local_cluster_, httpConnPool(_, _, _))
.WillOnce(Invoke([&](Upstream::ResourcePriority, absl::optional<Http::Protocol>,
Upstream::LoadBalancerContext* context) {
EXPECT_EQ(10UL, context->computeHashKey().value());
return Upstream::HttpPoolData([]() {}, &cm_.thread_local_cluster_.conn_pool_);
}));
std::string cookie_value;
EXPECT_CALL(callbacks_.route_->route_entry_.hash_policy_, generateHash(_, _, _, _))
.WillOnce(Invoke([&](const Network::Address::Instance*, const Http::HeaderMap&,
const Http::HashPolicy::AddCookieCallback add_cookie,
const StreamInfo::FilterStateSharedPtr) {
cookie_value = add_cookie("foo", "", std::chrono::seconds(1337));
return absl::optional<uint64_t>(10);
}));
EXPECT_CALL(callbacks_, encodeHeaders_(_, _))
.WillOnce(Invoke([&](const Http::HeaderMap& headers, const bool) -> void {
EXPECT_EQ(
std::string{headers.get(Http::Headers::get().SetCookie)[0]->value().getStringView()},
"foo=\"" + cookie_value + "\"; Max-Age=1337; HttpOnly");
}));
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
response_decoder->decodeHeaders(std::move(response_headers), true);
EXPECT_EQ(callbacks_.details(), "via_upstream");
// When the router filter gets reset we should cancel the pool request.
router_.onDestroy();
}
TEST_F(RouterTest, AddCookieNoDuplicate) {
ON_CALL(callbacks_.route_->route_entry_, hashPolicy())
.WillByDefault(Return(&callbacks_.route_->route_entry_.hash_policy_));
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder, &response_decoder, Http::Protocol::Http10);
EXPECT_CALL(cm_.thread_local_cluster_, httpConnPool(_, _, _))
.WillOnce(Invoke([&](Upstream::ResourcePriority, absl::optional<Http::Protocol>,
Upstream::LoadBalancerContext* context) {
EXPECT_EQ(10UL, context->computeHashKey().value());
return Upstream::HttpPoolData([]() {}, &cm_.thread_local_cluster_.conn_pool_);
}));
EXPECT_CALL(callbacks_.route_->route_entry_.hash_policy_, generateHash(_, _, _, _))
.WillOnce(Invoke([&](const Network::Address::Instance*, const Http::HeaderMap&,
const Http::HashPolicy::AddCookieCallback add_cookie,
const StreamInfo::FilterStateSharedPtr) {
// this should be ignored
add_cookie("foo", "", std::chrono::seconds(1337));
return absl::optional<uint64_t>(10);
}));
EXPECT_CALL(callbacks_, encodeHeaders_(_, _))
.WillOnce(Invoke([&](const Http::HeaderMap& headers, const bool) -> void {
EXPECT_EQ(
std::string{headers.get(Http::Headers::get().SetCookie)[0]->value().getStringView()},
"foo=baz");
}));
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}, {"set-cookie", "foo=baz"}});
response_decoder->decodeHeaders(std::move(response_headers), true);
// When the router filter gets reset we should cancel the pool request.
router_.onDestroy();
}
TEST_F(RouterTest, AddMultipleCookies) {
ON_CALL(callbacks_.route_->route_entry_, hashPolicy())
.WillByDefault(Return(&callbacks_.route_->route_entry_.hash_policy_));
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder, &response_decoder, Http::Protocol::Http10);
EXPECT_CALL(cm_.thread_local_cluster_, httpConnPool(_, _, _))
.WillOnce(Invoke([&](Upstream::ResourcePriority, absl::optional<Http::Protocol>,
Upstream::LoadBalancerContext* context) {
EXPECT_EQ(10UL, context->computeHashKey().value());
return Upstream::HttpPoolData([]() {}, &cm_.thread_local_cluster_.conn_pool_);
}));
std::string choco_c, foo_c;
EXPECT_CALL(callbacks_.route_->route_entry_.hash_policy_, generateHash(_, _, _, _))
.WillOnce(Invoke([&](const Network::Address::Instance*, const Http::HeaderMap&,
const Http::HashPolicy::AddCookieCallback add_cookie,
const StreamInfo::FilterStateSharedPtr) {
choco_c = add_cookie("choco", "", std::chrono::seconds(15));
foo_c = add_cookie("foo", "/path", std::chrono::seconds(1337));
return absl::optional<uint64_t>(10);
}));
EXPECT_CALL(callbacks_, encodeHeaders_(_, _))
.WillOnce(Invoke([&](const Http::HeaderMap& headers, const bool) -> void {
MockFunction<void(const std::string&)> cb;
EXPECT_CALL(cb, Call("foo=\"" + foo_c + "\"; Max-Age=1337; Path=/path; HttpOnly"));
EXPECT_CALL(cb, Call("choco=\"" + choco_c + "\"; Max-Age=15; HttpOnly"));
headers.iterate([&cb](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate {
if (header.key() == Http::Headers::get().SetCookie.get()) {
cb.Call(std::string(header.value().getStringView()));
}
return Http::HeaderMap::Iterate::Continue;
});
}));
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
response_decoder->decodeHeaders(std::move(response_headers), true);
router_.onDestroy();
}
TEST_F(RouterTest, MetadataNoOp) { EXPECT_EQ(nullptr, router_.metadataMatchCriteria()); }
TEST_F(RouterTest, MetadataMatchCriteria) {
ON_CALL(callbacks_.route_->route_entry_, metadataMatchCriteria())
.WillByDefault(Return(&callbacks_.route_->route_entry_.metadata_matches_criteria_));
EXPECT_CALL(cm_.thread_local_cluster_, httpConnPool(_, _, _))
.WillOnce(Invoke([&](Upstream::ResourcePriority, absl::optional<Http::Protocol>,
Upstream::LoadBalancerContext* context) {
EXPECT_EQ(context->metadataMatchCriteria(),
&callbacks_.route_->route_entry_.metadata_matches_criteria_);
return Upstream::HttpPoolData([]() {}, &cm_.thread_local_cluster_.conn_pool_);
}));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(Return(&cancellable_));
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
// When the router filter gets reset we should cancel the pool request.
EXPECT_CALL(cancellable_, cancel(_));
router_.onDestroy();
}
TEST_F(RouterTest, MetadataMatchCriteriaFromRequest) {
verifyMetadataMatchCriteriaFromRequest(true);
}
TEST_F(RouterTest, MetadataMatchCriteriaFromRequestNoRouteEntryMatch) {
verifyMetadataMatchCriteriaFromRequest(false);
}
TEST_F(RouterTest, NoMetadataMatchCriteria) {
ON_CALL(callbacks_.route_->route_entry_, metadataMatchCriteria()).WillByDefault(Return(nullptr));
EXPECT_CALL(cm_.thread_local_cluster_, httpConnPool(_, _, _))
.WillOnce(Invoke([&](Upstream::ResourcePriority, absl::optional<Http::Protocol>,
Upstream::LoadBalancerContext* context) {
EXPECT_EQ(context->metadataMatchCriteria(), nullptr);
return Upstream::HttpPoolData([]() {}, &cm_.thread_local_cluster_.conn_pool_);
}));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(Return(&cancellable_));
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
// When the router filter gets reset we should cancel the pool request.
EXPECT_CALL(cancellable_, cancel(_));
router_.onDestroy();
}
TEST_F(RouterTest, CancelBeforeBoundToPool) {
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(Return(&cancellable_));
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
// When the router filter gets reset we should cancel the pool request.
EXPECT_CALL(cancellable_, cancel(_));
router_.onDestroy();
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
}
TEST_F(RouterTest, NoHost) {
EXPECT_CALL(cm_.thread_local_cluster_, httpConnPool(_, _, _)).WillOnce(Return(absl::nullopt));
Http::TestResponseHeaderMapImpl response_headers{
{":status", "503"}, {"content-length", "19"}, {"content-type", "text/plain"}};
EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));
EXPECT_CALL(callbacks_, encodeData(_, true));
EXPECT_CALL(callbacks_.stream_info_,
setResponseFlag(StreamInfo::ResponseFlag::NoHealthyUpstream));
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(0U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("upstream_rq_maintenance_mode")
.value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_EQ(callbacks_.details(), "no_healthy_upstream");
}
TEST_F(RouterTest, MaintenanceMode) {
EXPECT_CALL(*cm_.thread_local_cluster_.cluster_.info_, maintenanceMode()).WillOnce(Return(true));
Http::TestResponseHeaderMapImpl response_headers{{":status", "503"},
{"content-length", "16"},
{"content-type", "text/plain"},
{"x-envoy-overloaded", "true"}};
EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));
EXPECT_CALL(callbacks_, encodeData(_, true));
EXPECT_CALL(callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::UpstreamOverflow));
EXPECT_CALL(span_, injectContext(_)).Times(0);
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("upstream_rq_maintenance_mode")
.value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->load_report_stats_store_
.counter("upstream_rq_dropped")
.value());
EXPECT_EQ(callbacks_.details(), "maintenance_mode");
}
TEST_F(RouterTest, ResponseCodeDetailsSetByUpstream) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
response_decoder->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 0));
}
// Validate that x-envoy-upstream-service-time is added on a regular
// request/response path.
TEST_F(RouterTest, EnvoyUpstreamServiceTime) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
EXPECT_CALL(callbacks_, encodeHeaders_(_, true))
.WillOnce(Invoke([](Http::HeaderMap& headers, bool) {
EXPECT_FALSE(headers.get(Http::Headers::get().EnvoyUpstreamServiceTime).empty());
}));
response_decoder->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 0));
}
// Validate that x-envoy-attempt-count is added to request headers when the option is true.
TEST_F(RouterTest, EnvoyAttemptCountInRequest) {
verifyAttemptCountInRequestBasic(
/* set_include_attempt_count_in_request */ true,
/* preset_count*/ absl::nullopt,
/* expected_count */ 1);
}
// Validate that x-envoy-attempt-count is overwritten by the router on request headers, if the
// header is sent from the downstream and the option is set to true.
TEST_F(RouterTest, EnvoyAttemptCountInRequestOverwritten) {
verifyAttemptCountInRequestBasic(
/* set_include_attempt_count_in_request */ true,
/* preset_count*/ 123,
/* expected_count */ 1);
}
// Validate that x-envoy-attempt-count is not overwritten by the router on request headers, if the
// header is sent from the downstream and the option is set to false.
TEST_F(RouterTest, EnvoyAttemptCountInRequestNotOverwritten) {
verifyAttemptCountInRequestBasic(
/* set_include_attempt_count_in_request */ false,
/* preset_count*/ 123,
/* expected_count */ 123);
}
class MockRetryOptionsPredicate : public Upstream::RetryOptionsPredicate {
public:
MOCK_METHOD(UpdateOptionsReturn, updateOptions, (const UpdateOptionsParameters& parameters),
(const));
};
// Also verify retry options predicates work.
TEST_F(RouterTest, EnvoyAttemptCountInRequestUpdatedInRetries) {
auto retry_options_predicate = std::make_shared<MockRetryOptionsPredicate>();
callbacks_.route_->route_entry_.retry_policy_.retry_options_predicates_.emplace_back(
retry_options_predicate);
setIncludeAttemptCountInRequest(true);
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// Initial request has 1 attempt.
EXPECT_EQ(1, atoi(std::string(headers.getEnvoyAttemptCountValue()).c_str()));
// 5xx response.
router_.retry_state_->expectHeadersRetry();
Upstream::RetryOptionsPredicate::UpdateOptionsReturn update_options_return{
std::make_shared<Network::Socket::Options>()};
EXPECT_CALL(*retry_options_predicate, updateOptions(_)).WillOnce(Return(update_options_return));
Http::ResponseHeaderMapPtr response_headers1(
new Http::TestResponseHeaderMapImpl{{":status", "503"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(503));
// NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage)
response_decoder->decodeHeaders(std::move(response_headers1), true);
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// Verify retry options predicate return values have been updated.
EXPECT_EQ(update_options_return.new_upstream_socket_options_.value(),
router_.upstreamSocketOptions());
// We expect the 5xx response to kick off a new request.
EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);
NiceMock<Http::MockRequestEncoder> encoder2;
expectNewStreamWithImmediateEncoder(encoder2, &response_decoder, Http::Protocol::Http10);
router_.retry_state_->callback_();
EXPECT_EQ(2U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// The retry should cause the header to increase to 2.
EXPECT_EQ(2, atoi(std::string(headers.getEnvoyAttemptCountValue()).c_str()));
// Normal response.
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _)).WillOnce(Return(RetryStatus::No));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->health_checker_, setUnhealthy(_))
.Times(0);
Http::ResponseHeaderMapPtr response_headers2(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
response_decoder->decodeHeaders(std::move(response_headers2), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 1));
EXPECT_EQ(2, callbacks_.stream_info_.attemptCount().value());
}
// Validate that x-envoy-attempt-count is added when option is true.
TEST_F(RouterTest, EnvoyAttemptCountInResponse) {
verifyAttemptCountInResponseBasic(
/* set_include_attempt_count_in_response */ true,
/* preset_count */ absl::nullopt,
/* expected_count */ 1);
}
// Validate that x-envoy-attempt-count is overwritten by the router on response headers, if the
// header is sent from the upstream and the option is set to true.
TEST_F(RouterTest, EnvoyAttemptCountInResponseOverwritten) {
verifyAttemptCountInResponseBasic(
/* set_include_attempt_count_in_response */ true,
/* preset_count */ 123,
/* expected_count */ 1);
}
// Validate that x-envoy-attempt-count is not overwritten by the router on response headers, if the
// header is sent from the upstream and the option is not set to true.
TEST_F(RouterTest, EnvoyAttemptCountInResponseNotOverwritten) {
verifyAttemptCountInResponseBasic(
/* set_include_attempt_count_in_response */ false,
/* preset_count */ 123,
/* expected_count */ 123);
}
// Validate that x-envoy-attempt-count is present in local replies after an upstream attempt is
// made.
TEST_F(RouterTest, EnvoyAttemptCountInResponsePresentWithLocalReply) {
setIncludeAttemptCountInResponse(true);
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(Invoke([&](Http::StreamDecoder&, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::RemoteConnectionFailure,
absl::string_view(), cm_.thread_local_cluster_.conn_pool_.host_);
return nullptr;
}));
Http::TestResponseHeaderMapImpl response_headers{{":status", "503"},
{"content-length", "91"},
{"content-type", "text/plain"},
{"x-envoy-attempt-count", "1"}};
EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));
EXPECT_CALL(callbacks_, encodeData(_, true));
EXPECT_CALL(callbacks_.stream_info_,
setResponseFlag(StreamInfo::ResponseFlag::UpstreamConnectionFailure));
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
// Pool failure, so upstream request was never initiated.
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
EXPECT_EQ(callbacks_.details(), "upstream_reset_before_response_started{connection_failure}");
EXPECT_EQ(1U, callbacks_.stream_info_.attemptCount().value());
}
// Validate that the x-envoy-attempt-count header in the downstream response reflects the number of
// of upstream requests that occurred when retries take place.
TEST_F(RouterTest, EnvoyAttemptCountInResponseWithRetries) {
setIncludeAttemptCountInResponse(true);
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_EQ(1U, callbacks_.stream_info_.attemptCount().value());
// 5xx response.
router_.retry_state_->expectHeadersRetry();
Http::ResponseHeaderMapPtr response_headers1(
new Http::TestResponseHeaderMapImpl{{":status", "503"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(503));
response_decoder->decodeHeaders(std::move(response_headers1), true);
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// We expect the 5xx response to kick off a new request.
EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);
NiceMock<Http::MockRequestEncoder> encoder2;
expectNewStreamWithImmediateEncoder(encoder2, &response_decoder, Http::Protocol::Http10);
router_.retry_state_->callback_();
EXPECT_EQ(2U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_EQ(2U, callbacks_.stream_info_.attemptCount().value());
// Normal response.
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _)).WillOnce(Return(RetryStatus::No));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->health_checker_, setUnhealthy(_))
.Times(0);
Http::ResponseHeaderMapPtr response_headers2(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
EXPECT_CALL(callbacks_, encodeHeaders_(_, true))
.WillOnce(Invoke([](Http::ResponseHeaderMap& headers, bool) {
// Because a retry happened the number of attempts in the response headers should be 2.
EXPECT_EQ(2, atoi(std::string(headers.getEnvoyAttemptCountValue()).c_str()));
}));
response_decoder->decodeHeaders(std::move(response_headers2), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 1));
}
// Append cluster with default header name.
TEST_F(RouterTest, AppendCluster0) { testAppendCluster(absl::nullopt); }
// Append cluster with custom header name.
TEST_F(RouterTest, AppendCluster1) {
testAppendCluster(absl::make_optional(Http::LowerCaseString("x-custom-cluster")));
}
// Append hostname and address with default header names.
TEST_F(RouterTest, AppendUpstreamHost00) { testAppendUpstreamHost(absl::nullopt, absl::nullopt); }
// Append hostname and address with custom host address header name.
TEST_F(RouterTest, AppendUpstreamHost01) {
testAppendUpstreamHost(
absl::nullopt, absl::make_optional(Http::LowerCaseString("x-custom-upstream-host-address")));
}
// Append hostname and address with custom hostname header name.
TEST_F(RouterTest, AppendUpstreamHost10) {
testAppendUpstreamHost(absl::make_optional(Http::LowerCaseString("x-custom-upstream-hostname")),
absl::nullopt);
}
// Append hostname and address with custom header names.
TEST_F(RouterTest, AppendUpstreamHost11) {
testAppendUpstreamHost(
absl::make_optional(Http::LowerCaseString("x-custom-upstream-hostname")),
absl::make_optional(Http::LowerCaseString("x-custom-upstream-host-address")));
}
// Do not forward, with default not-forwarded header name
TEST_F(RouterTest, DoNotForward0) { testDoNotForward(absl::nullopt); }
// Do not forward, with custom not-forwarded header name
TEST_F(RouterTest, DoNotForward1) {
testDoNotForward(absl::make_optional(Http::LowerCaseString("x-custom-not-forwarded")));
}
// Validate that all DebugConfig options play nicely with each other.
TEST_F(RouterTest, AllDebugConfig) {
auto debug_config = std::make_unique<DebugConfig>(
/* append_cluster */ true,
/* cluster_header */ absl::nullopt,
/* append_upstream_host */ true,
/* hostname_header */ absl::nullopt,
/* host_address_header */ absl::nullopt,
/* do_not_forward */ true,
/* not_forwarded_header */ absl::nullopt);
callbacks_.streamInfo().filterState()->setData(DebugConfig::key(), std::move(debug_config),
StreamInfo::FilterState::StateType::ReadOnly,
StreamInfo::FilterState::LifeSpan::FilterChain);
cm_.thread_local_cluster_.conn_pool_.host_->hostname_ = "scooby.doo";
Http::TestResponseHeaderMapImpl response_headers{
{":status", "204"},
{"x-envoy-cluster", "fake_cluster"},
{"x-envoy-upstream-hostname", "scooby.doo"},
{"x-envoy-upstream-host-address", "10.0.0.5:9211"},
{"x-envoy-not-forwarded", "true"}};
EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
}
TEST_F(RouterTest, NoRetriesOverflow) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// 5xx response.
router_.retry_state_->expectHeadersRetry();
Http::ResponseHeaderMapPtr response_headers1(
new Http::TestResponseHeaderMapImpl{{":status", "503"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(503));
response_decoder->decodeHeaders(std::move(response_headers1), true);
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// We expect the 5xx response to kick off a new request.
EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);
NiceMock<Http::MockRequestEncoder> encoder2;
expectNewStreamWithImmediateEncoder(encoder2, &response_decoder, Http::Protocol::Http10);
router_.retry_state_->callback_();
EXPECT_EQ(2U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// RetryOverflow kicks in.
EXPECT_CALL(callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::UpstreamOverflow));
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _))
.WillOnce(Return(RetryStatus::NoOverflow));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->health_checker_, setUnhealthy(_))
.Times(0);
Http::ResponseHeaderMapPtr response_headers2(
new Http::TestResponseHeaderMapImpl{{":status", "503"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(503));
response_decoder->decodeHeaders(std::move(response_headers2), true);
EXPECT_TRUE(verifyHostUpstreamStats(0, 2));
}
TEST_F(RouterTest, ResetDuringEncodeHeaders) {
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder, &response_decoder, Http::Protocol::Http10);
EXPECT_CALL(callbacks_, removeDownstreamWatermarkCallbacks(_));
EXPECT_CALL(callbacks_, addDownstreamWatermarkCallbacks(_));
EXPECT_CALL(encoder, encodeHeaders(_, true))
.WillOnce(Invoke([&](const Http::HeaderMap&, bool) -> Http::Status {
encoder.stream_.resetStream(Http::StreamResetReason::RemoteReset);
return Http::okStatus();
}));
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
// First connection is successful and reset happens later on.
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess,
absl::optional<uint64_t>(absl::nullopt)));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _));
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
}
TEST_F(RouterTest, UpstreamTimeoutNoStatsEmissionWhenRuntimeGuardFalse) {
TestScopedRuntime scoped_runtime;
Runtime::LoaderSingleton::getExisting()->mergeValues(
{{"envoy.reloadable_features.do_not_await_headers_on_upstream_timeout_to_emit_stats",
"false"}});
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
Buffer::OwnedImpl data;
router_.decodeData(data, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_CALL(callbacks_.stream_info_,
setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout));
EXPECT_CALL(encoder.stream_, resetStream(Http::StreamResetReason::LocalReset));
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "503"}});
// NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage)
response_decoder->decodeHeaders(std::move(response_headers), false);
response_timeout_->invokeCallback();
EXPECT_EQ(0U,
cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter("upstream_rq_timeout")
.value());
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_timeout_.value());
EXPECT_EQ(0UL, cm_.thread_local_cluster_.conn_pool_.host_->stats().rq_timeout_.value());
}
TEST_F(RouterTest, UpstreamTimeoutAllStatsEmissionWhenRuntimeGuardTrue) {
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
Buffer::OwnedImpl data;
router_.decodeData(data, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_CALL(callbacks_.stream_info_,
setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout));
EXPECT_CALL(encoder.stream_, resetStream(Http::StreamResetReason::LocalReset));
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "503"}});
// NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage)
response_decoder->decodeHeaders(std::move(response_headers), false);
response_timeout_->invokeCallback();
EXPECT_EQ(1U,
cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter("upstream_rq_timeout")
.value());
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_timeout_.value());
EXPECT_EQ(1UL, cm_.thread_local_cluster_.conn_pool_.host_->stats().rq_timeout_.value());
}
TEST_F(RouterTest, UpstreamTimeout) {
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
Buffer::OwnedImpl data;
router_.decodeData(data, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_CALL(callbacks_.stream_info_,
setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout));
EXPECT_CALL(encoder.stream_, resetStream(Http::StreamResetReason::LocalReset));
Http::TestResponseHeaderMapImpl response_headers{
{":status", "504"}, {"content-length", "24"}, {"content-type", "text/plain"}};
EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));
EXPECT_CALL(callbacks_, encodeData(_, true));
EXPECT_CALL(*router_.retry_state_, shouldRetryReset(_, _, _)).Times(0);
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginTimeout, _));
response_timeout_->invokeCallback();
EXPECT_EQ(1U,
cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter("upstream_rq_timeout")
.value());
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_timeout_.value());
EXPECT_EQ(1UL, cm_.thread_local_cluster_.conn_pool_.host_->stats().rq_timeout_.value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
}
// Verify the timeout budget histograms are filled out correctly when using a
// global and per-try timeout in a successful request.
TEST_F(RouterTest, TimeoutBudgetHistogramStat) {
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder, &response_decoder, Http::Protocol::Http10);
expectPerTryTimerCreate();
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-timeout-ms", "400"},
{"x-envoy-upstream-rq-per-try-timeout-ms", "200"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
Buffer::OwnedImpl data;
router_.decodeData(data, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// Global timeout budget used.
EXPECT_CALL(
cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,
deliverHistogramToSinks(
Property(&Stats::Metric::name, "upstream_rq_timeout_budget_percent_used"), 20ull));
// Per-try budget used.
EXPECT_CALL(cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,
deliverHistogramToSinks(
Property(&Stats::Metric::name, "upstream_rq_timeout_budget_per_try_percent_used"),
40ull));
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
response_decoder->decodeHeaders(std::move(response_headers), false);
test_time_.advanceTimeWait(std::chrono::milliseconds(80));
response_decoder->decodeData(data, true);
}
// Verify the timeout budget histograms are filled out correctly when using a
// global and per-try timeout in a failed request.
TEST_F(RouterTest, TimeoutBudgetHistogramStatFailure) {
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder, &response_decoder, Http::Protocol::Http10);
expectPerTryTimerCreate();
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-timeout-ms", "400"},
{"x-envoy-upstream-rq-per-try-timeout-ms", "200"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
Buffer::OwnedImpl data;
router_.decodeData(data, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// Global timeout budget used.
EXPECT_CALL(
cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,
deliverHistogramToSinks(
Property(&Stats::Metric::name, "upstream_rq_timeout_budget_percent_used"), 20ull));
// Per-try budget used.
EXPECT_CALL(cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,
deliverHistogramToSinks(
Property(&Stats::Metric::name, "upstream_rq_timeout_budget_per_try_percent_used"),
40ull));
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "500"}});
response_decoder->decodeHeaders(std::move(response_headers), false);
test_time_.advanceTimeWait(std::chrono::milliseconds(80));
response_decoder->decodeData(data, true);
}
// Verify the timeout budget histograms are filled out correctly when only using a global timeout.
TEST_F(RouterTest, TimeoutBudgetHistogramStatOnlyGlobal) {
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder, &response_decoder, Http::Protocol::Http10);
expectPerTryTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-timeout-ms", "200"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
Buffer::OwnedImpl data;
router_.decodeData(data, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// Global timeout budget used.
EXPECT_CALL(
cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,
deliverHistogramToSinks(
Property(&Stats::Metric::name, "upstream_rq_timeout_budget_percent_used"), 40ull));
// Per-try budget used is zero out of an infinite timeout.
EXPECT_CALL(
cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,
deliverHistogramToSinks(
Property(&Stats::Metric::name, "upstream_rq_timeout_budget_per_try_percent_used"), 0ull));
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
response_decoder->decodeHeaders(std::move(response_headers), false);
test_time_.advanceTimeWait(std::chrono::milliseconds(80));
response_decoder->decodeData(data, true);
}
// Verify the timeout budget histograms are filled out correctly across retries.
TEST_F(RouterTest, TimeoutBudgetHistogramStatDuringRetries) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder1 = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder1, Http::Protocol::Http10);
expectPerTryTimerCreate();
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"},
{"x-envoy-upstream-rq-timeout-ms", "400"},
{"x-envoy-upstream-rq-per-try-timeout-ms", "100"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
Buffer::OwnedImpl data;
router_.decodeData(data, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// Per-try budget used on the first request.
EXPECT_CALL(cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,
deliverHistogramToSinks(
Property(&Stats::Metric::name, "upstream_rq_timeout_budget_per_try_percent_used"),
100ull));
// Global timeout histogram does not fire on the first request.
EXPECT_CALL(cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,
deliverHistogramToSinks(
Property(&Stats::Metric::name, "upstream_rq_timeout_budget_percent_used"), _))
.Times(0);
// Per-try timeout.
test_time_.advanceTimeWait(std::chrono::milliseconds(100));
router_.retry_state_->expectHeadersRetry();
Http::ResponseHeaderMapPtr response_headers1(
new Http::TestResponseHeaderMapImpl{{":status", "504"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(504));
response_decoder1->decodeHeaders(std::move(response_headers1), true);
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// We expect the 5xx response to kick off a new request.
EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);
NiceMock<Http::MockRequestEncoder> encoder2;
Http::ResponseDecoder* response_decoder2 = nullptr;
expectNewStreamWithImmediateEncoder(encoder2, &response_decoder2, Http::Protocol::Http10);
expectPerTryTimerCreate();
router_.retry_state_->callback_();
EXPECT_EQ(2U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// Per-try budget exhausted on the second try.
EXPECT_CALL(cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,
deliverHistogramToSinks(
Property(&Stats::Metric::name, "upstream_rq_timeout_budget_per_try_percent_used"),
100ull));
// Global timeout percentage used across both tries.
EXPECT_CALL(
cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,
deliverHistogramToSinks(
Property(&Stats::Metric::name, "upstream_rq_timeout_budget_percent_used"), 50ull));
// Trigger second request failure.
EXPECT_CALL(callbacks_.stream_info_,
setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout));
EXPECT_CALL(encoder2.stream_, resetStream(Http::StreamResetReason::LocalReset));
Http::TestResponseHeaderMapImpl response_headers{
{":status", "504"}, {"content-length", "24"}, {"content-type", "text/plain"}};
test_time_.advanceTimeWait(std::chrono::milliseconds(100));
EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));
EXPECT_CALL(callbacks_, encodeData(_, true));
EXPECT_CALL(*router_.retry_state_, shouldRetryReset(_, _, _));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginTimeout, _));
per_try_timeout_->invokeCallback();
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("upstream_rq_per_try_timeout")
.value());
EXPECT_EQ(1UL, cm_.thread_local_cluster_.conn_pool_.host_->stats().rq_timeout_.value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 2));
}
// Verify the timeout budget histograms are filled out correctly when the global timeout occurs
// during a retry.
TEST_F(RouterTest, TimeoutBudgetHistogramStatDuringGlobalTimeout) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder1 = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder1, Http::Protocol::Http10);
expectPerTryTimerCreate();
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"},
{"x-envoy-upstream-rq-timeout-ms", "400"},
{"x-envoy-upstream-rq-per-try-timeout-ms", "320"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
Buffer::OwnedImpl data;
router_.decodeData(data, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// Per-try budget used on the first request.
EXPECT_CALL(cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,
deliverHistogramToSinks(
Property(&Stats::Metric::name, "upstream_rq_timeout_budget_per_try_percent_used"),
50ull));
// Global timeout histogram does not fire on the first request.
EXPECT_CALL(cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,
deliverHistogramToSinks(
Property(&Stats::Metric::name, "upstream_rq_timeout_budget_percent_used"), _))
.Times(0);
// 5xx response.
router_.retry_state_->expectHeadersRetry();
Http::ResponseHeaderMapPtr response_headers1(
new Http::TestResponseHeaderMapImpl{{":status", "503"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(503));
test_time_.advanceTimeWait(std::chrono::milliseconds(160));
response_decoder1->decodeHeaders(std::move(response_headers1), true);
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// We expect the 5xx response to kick off a new request.
EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);
NiceMock<Http::MockRequestEncoder> encoder2;
Http::ResponseDecoder* response_decoder2 = nullptr;
expectNewStreamWithImmediateEncoder(encoder2, &response_decoder2, Http::Protocol::Http10);
expectPerTryTimerCreate();
router_.retry_state_->callback_();
EXPECT_EQ(2U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// Global timeout was hit, fires 100.
EXPECT_CALL(
cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,
deliverHistogramToSinks(
Property(&Stats::Metric::name, "upstream_rq_timeout_budget_percent_used"), 100ull));
// Per-try budget used on the second request won't fire because the global timeout was hit.
EXPECT_CALL(
cm_.thread_local_cluster_.cluster_.info_->timeout_budget_stats_store_,
deliverHistogramToSinks(
Property(&Stats::Metric::name, "upstream_rq_timeout_budget_per_try_percent_used"), _))
.Times(0);
// Trigger global timeout.
EXPECT_CALL(callbacks_.stream_info_,
setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout));
EXPECT_CALL(encoder2.stream_, resetStream(Http::StreamResetReason::LocalReset));
Http::TestResponseHeaderMapImpl response_headers{
{":status", "504"}, {"content-length", "24"}, {"content-type", "text/plain"}};
test_time_.advanceTimeWait(std::chrono::milliseconds(240));
EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));
EXPECT_CALL(callbacks_, encodeData(_, true));
EXPECT_CALL(*router_.retry_state_, shouldRetryReset(_, _, _)).Times(0);
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginTimeout, _));
response_timeout_->invokeCallback();
EXPECT_EQ(1U,
cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter("upstream_rq_timeout")
.value());
EXPECT_EQ(1UL, cm_.thread_local_cluster_.conn_pool_.host_->stats().rq_timeout_.value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 2));
}
// Validate gRPC OK response stats are sane when response is trailers only.
TEST_F(RouterTest, GrpcOkTrailersOnly) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"content-type", "application/grpc"},
{"grpc-timeout", "20S"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}, {"grpc-status", "0"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
response_decoder->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 0));
}
// Validate gRPC AlreadyExists response stats are sane when response is trailers only.
TEST_F(RouterTest, GrpcAlreadyExistsTrailersOnly) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"content-type", "application/grpc"},
{"grpc-timeout", "20S"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}, {"grpc-status", "6"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(409));
response_decoder->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 0));
}
// Validate gRPC Unavailable response stats are sane when response is trailers only.
TEST_F(RouterTest, GrpcOutlierDetectionUnavailableStatusCode) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"content-type", "application/grpc"},
{"grpc-timeout", "20S"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}, {"grpc-status", "14"}});
// Outlier detector will use the gRPC response status code.
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(503));
response_decoder->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
}
// Validate gRPC Internal response stats are sane when response is trailers only.
TEST_F(RouterTest, GrpcInternalTrailersOnly) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"content-type", "application/grpc"},
{"grpc-timeout", "20S"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}, {"grpc-status", "13"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(500));
response_decoder->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
}
// Validate gRPC response stats are sane when response is ended in a DATA
// frame.
TEST_F(RouterTest, GrpcDataEndStream) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"content-type", "application/grpc"},
{"grpc-timeout", "20S"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
response_decoder->decodeHeaders(std::move(response_headers), false);
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
Buffer::OwnedImpl data;
response_decoder->decodeData(data, true);
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
}
// Validate gRPC response stats are sane when response is reset after initial
// response HEADERS.
TEST_F(RouterTest, GrpcReset) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"content-type", "application/grpc"},
{"grpc-timeout", "20S"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
response_decoder->decodeHeaders(std::move(response_headers), false);
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _));
encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset);
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
EXPECT_EQ(1UL, stats_store_.counter("test.rq_reset_after_downstream_response_started").value());
}
// Validate gRPC OK response stats are sane when response is not trailers only.
TEST_F(RouterTest, GrpcOk) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"content-type", "application/grpc"},
{"grpc-timeout", "20S"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_CALL(callbacks_.dispatcher_, pushTrackedObject(_));
EXPECT_CALL(callbacks_.dispatcher_, popTrackedObject(_));
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
response_decoder->decodeHeaders(std::move(response_headers), false);
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
EXPECT_CALL(callbacks_.dispatcher_, pushTrackedObject(_));
EXPECT_CALL(callbacks_.dispatcher_, popTrackedObject(_));
Http::ResponseTrailerMapPtr response_trailers(
new Http::TestResponseTrailerMapImpl{{"grpc-status", "0"}});
response_decoder->decodeTrailers(std::move(response_trailers));
EXPECT_TRUE(verifyHostUpstreamStats(1, 0));
}
// Validate gRPC Internal response stats are sane when response is not trailers only.
TEST_F(RouterTest, GrpcInternal) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"content-type", "application/grpc"},
{"grpc-timeout", "20S"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
response_decoder->decodeHeaders(std::move(response_headers), false);
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
Http::ResponseTrailerMapPtr response_trailers(
new Http::TestResponseTrailerMapImpl{{"grpc-status", "13"}});
response_decoder->decodeTrailers(std::move(response_trailers));
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
}
TEST_F(RouterTest, UpstreamTimeoutWithAltResponse) {
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-timeout-alt-response", "204"},
{"x-envoy-internal", "true"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
Buffer::OwnedImpl data;
router_.decodeData(data, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_CALL(callbacks_.stream_info_,
setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout));
EXPECT_CALL(encoder.stream_, resetStream(Http::StreamResetReason::LocalReset));
Http::TestResponseHeaderMapImpl response_headers{{":status", "204"}};
EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));
EXPECT_CALL(*router_.retry_state_, shouldRetryReset(_, _, _)).Times(0);
EXPECT_CALL(
cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginTimeout, absl::optional<uint64_t>(204)));
response_timeout_->invokeCallback();
EXPECT_EQ(1U,
cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter("upstream_rq_timeout")
.value());
EXPECT_EQ(1UL, cm_.thread_local_cluster_.conn_pool_.host_->stats().rq_timeout_.value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
}
// Verify the upstream per try idle timeout.
TEST_F(RouterTest, UpstreamPerTryIdleTimeout) {
InSequence s;
callbacks_.route_->route_entry_.retry_policy_.per_try_idle_timeout_ =
std::chrono::milliseconds(3000);
// This pattern helps ensure that we're actually invoking the callback.
bool filter_state_verified = false;
router_.config().upstream_logs_.push_back(
std::make_shared<TestAccessLog>([&](const auto& stream_info) {
filter_state_verified =
stream_info.hasResponseFlag(StreamInfo::ResponseFlag::StreamIdleTimeout);
}));
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
Http::ConnectionPool::Callbacks* pool_callbacks;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(
Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
response_decoder = &decoder;
pool_callbacks = &callbacks;
return nullptr;
}));
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
response_timeout_ = new Event::MockTimer(&callbacks_.dispatcher_);
EXPECT_CALL(*response_timeout_, enableTimer(_, _));
Buffer::OwnedImpl data;
router_.decodeData(data, true);
per_try_idle_timeout_ = new Event::MockTimer(&callbacks_.dispatcher_);
EXPECT_CALL(*per_try_idle_timeout_, enableTimer(std::chrono::milliseconds(3000), _));
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// The per try timeout timer should not be started yet.
pool_callbacks->onPoolReady(encoder, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_CALL(encoder.stream_, resetStream(Http::StreamResetReason::LocalReset));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginTimeout, _));
EXPECT_CALL(*per_try_idle_timeout_, disableTimer());
EXPECT_CALL(callbacks_.stream_info_,
setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout));
EXPECT_CALL(*response_timeout_, disableTimer());
EXPECT_CALL(callbacks_.stream_info_, setResponseCodeDetails("upstream_per_try_idle_timeout"));
Http::TestResponseHeaderMapImpl response_headers{
{":status", "504"}, {"content-length", "24"}, {"content-type", "text/plain"}};
EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));
EXPECT_CALL(callbacks_, encodeData(_, true));
per_try_idle_timeout_->invokeCallback();
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("upstream_rq_per_try_idle_timeout")
.value());
EXPECT_EQ(1UL, cm_.thread_local_cluster_.conn_pool_.host_->stats().rq_timeout_.value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
EXPECT_TRUE(filter_state_verified);
}
// Verify the upstream per try idle timeout gets reset in the success case.
TEST_F(RouterTest, UpstreamPerTryIdleTimeoutSuccess) {
InSequence s;
callbacks_.route_->route_entry_.retry_policy_.per_try_idle_timeout_ =
std::chrono::milliseconds(3000);
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
Http::ConnectionPool::Callbacks* pool_callbacks;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(
Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
response_decoder = &decoder;
pool_callbacks = &callbacks;
return nullptr;
}));
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
response_timeout_ = new Event::MockTimer(&callbacks_.dispatcher_);
EXPECT_CALL(*response_timeout_, enableTimer(_, _));
Buffer::OwnedImpl data;
router_.decodeData(data, true);
per_try_idle_timeout_ = new Event::MockTimer(&callbacks_.dispatcher_);
EXPECT_CALL(*per_try_idle_timeout_, enableTimer(std::chrono::milliseconds(3000), _));
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// The per try timeout timer should not be started yet.
pool_callbacks->onPoolReady(encoder, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_CALL(*per_try_idle_timeout_, enableTimer(std::chrono::milliseconds(3000), _));
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
response_decoder->decodeHeaders(std::move(response_headers), false);
EXPECT_CALL(*per_try_idle_timeout_, enableTimer(std::chrono::milliseconds(3000), _));
response_decoder->decodeData(data, false);
EXPECT_CALL(*per_try_idle_timeout_, enableTimer(std::chrono::milliseconds(3000), _));
EXPECT_CALL(*per_try_idle_timeout_, disableTimer());
EXPECT_CALL(*response_timeout_, disableTimer());
response_decoder->decodeData(data, true);
}
// Verifies that the per try timeout is initialized once the downstream request has been read.
TEST_F(RouterTest, UpstreamPerTryTimeout) {
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder, &response_decoder, Http::Protocol::Http10);
Http::TestRequestHeaderMapImpl headers{{"x-envoy-internal", "true"},
{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
// We verify that both timeouts are started after decodeData(_, true) is called. This
// verifies that we are not starting the initial per try timeout on the first onPoolReady.
expectPerTryTimerCreate();
expectResponseTimerCreate();
Buffer::OwnedImpl data;
router_.decodeData(data, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_CALL(callbacks_.stream_info_,
setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout));
EXPECT_CALL(encoder.stream_, resetStream(Http::StreamResetReason::LocalReset));
Http::TestResponseHeaderMapImpl response_headers{
{":status", "504"}, {"content-length", "24"}, {"content-type", "text/plain"}};
EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));
EXPECT_CALL(callbacks_, encodeData(_, true));
EXPECT_CALL(
cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginTimeout, absl::optional<uint64_t>(504)));
per_try_timeout_->invokeCallback();
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("upstream_rq_per_try_timeout")
.value());
EXPECT_EQ(1UL, cm_.thread_local_cluster_.conn_pool_.host_->stats().rq_timeout_.value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
}
// Verifies that the per try timeout starts when onPoolReady is called when it occurs
// after the downstream request has been read.
TEST_F(RouterTest, UpstreamPerTryTimeoutDelayedPoolReady) {
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
Http::ConnectionPool::Callbacks* pool_callbacks;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(
Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
response_decoder = &decoder;
pool_callbacks = &callbacks;
return nullptr;
}));
Http::TestRequestHeaderMapImpl headers{{"x-envoy-internal", "true"},
{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
// Global timeout starts when decodeData(_, true) is called.
expectResponseTimerCreate();
Buffer::OwnedImpl data;
router_.decodeData(data, true);
// Per try timeout starts when onPoolReady is called.
expectPerTryTimerCreate();
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
pool_callbacks->onPoolReady(encoder, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_CALL(callbacks_.stream_info_,
setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout));
EXPECT_CALL(encoder.stream_, resetStream(Http::StreamResetReason::LocalReset));
Http::TestResponseHeaderMapImpl response_headers{
{":status", "504"}, {"content-length", "24"}, {"content-type", "text/plain"}};
EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));
EXPECT_CALL(callbacks_, encodeData(_, true));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginTimeout, _));
per_try_timeout_->invokeCallback();
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("upstream_rq_per_try_timeout")
.value());
EXPECT_EQ(1UL, cm_.thread_local_cluster_.conn_pool_.host_->stats().rq_timeout_.value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
}
// Ensures that the per try callback is not set until the stream becomes available.
TEST_F(RouterTest, UpstreamPerTryTimeoutExcludesNewStream) {
InSequence s;
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
Http::ConnectionPool::Callbacks* pool_callbacks;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(
Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
response_decoder = &decoder;
pool_callbacks = &callbacks;
return nullptr;
}));
response_timeout_ = new Event::MockTimer(&callbacks_.dispatcher_);
EXPECT_CALL(*response_timeout_, enableTimer(_, _));
Http::TestRequestHeaderMapImpl headers{{"x-envoy-internal", "true"},
{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
Buffer::OwnedImpl data;
router_.decodeData(data, true);
per_try_timeout_ = new Event::MockTimer(&callbacks_.dispatcher_);
EXPECT_CALL(*per_try_timeout_, enableTimer(_, _));
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// The per try timeout timer should not be started yet.
pool_callbacks->onPoolReady(encoder, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_CALL(encoder.stream_, resetStream(Http::StreamResetReason::LocalReset));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginTimeout, _));
EXPECT_CALL(*per_try_timeout_, disableTimer());
EXPECT_CALL(callbacks_.stream_info_,
setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout));
EXPECT_CALL(*response_timeout_, disableTimer());
Http::TestResponseHeaderMapImpl response_headers{
{":status", "504"}, {"content-length", "24"}, {"content-type", "text/plain"}};
EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));
EXPECT_CALL(callbacks_, encodeData(_, true));
per_try_timeout_->invokeCallback();
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("upstream_rq_per_try_timeout")
.value());
EXPECT_EQ(1UL, cm_.thread_local_cluster_.conn_pool_.host_->stats().rq_timeout_.value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
}
// Tests that a retry is sent after the first request hits the per try timeout, but then
// headers received in response to the first request are still used (and the 2nd request
// canceled). Also verify retry options predicates work.
TEST_F(RouterTest, HedgedPerTryTimeoutFirstRequestSucceeds) {
auto retry_options_predicate = std::make_shared<MockRetryOptionsPredicate>();
callbacks_.route_->route_entry_.retry_policy_.retry_options_predicates_.emplace_back(
retry_options_predicate);
enableHedgeOnPerTryTimeout();
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder1 = nullptr;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(
Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
response_decoder1 = &decoder;
EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));
callbacks.onPoolReady(encoder1, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess,
absl::optional<uint64_t>(absl::nullopt)))
.Times(2);
expectPerTryTimerCreate();
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_CALL(
cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginTimeout, absl::optional<uint64_t>(504)));
EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);
NiceMock<Http::MockRequestEncoder> encoder2;
Http::ResponseDecoder* response_decoder2 = nullptr;
router_.retry_state_->expectHedgedPerTryTimeoutRetry();
EXPECT_CALL(*retry_options_predicate, updateOptions(_));
per_try_timeout_->invokeCallback();
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(
Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
response_decoder2 = &decoder;
EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));
callbacks.onPoolReady(encoder2, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
expectPerTryTimerCreate();
router_.retry_state_->callback_();
EXPECT_EQ(2U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// We should not have updated any stats yet because no requests have been
// canceled
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
// Now write a 200 back. We expect the 2nd stream to be reset and stats to be
// incremented properly.
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
EXPECT_CALL(*router_.retry_state_, wouldRetryFromHeaders(_, _, _))
.WillOnce(Return(RetryState::RetryDecision::NoRetry));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);
EXPECT_CALL(encoder2.stream_, resetStream(_));
EXPECT_CALL(callbacks_, encodeHeaders_(_, _))
.WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool end_stream) -> void {
EXPECT_EQ(headers.Status()->value(), "200");
EXPECT_TRUE(end_stream);
}));
ASSERT(response_decoder1);
response_decoder1->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 0));
// TODO: Verify hedge stats here once they are implemented.
}
// Tests that an upstream request is reset even if it can't be retried as long as there is
// another in-flight request we're waiting on.
// Sequence:
// 1) first upstream request per try timeout
// 2) second upstream request sent
// 3) second upstream request gets 5xx, retries exhausted, assert it's reset
// 4) first upstream request gets 2xx
TEST_F(RouterTest, HedgedPerTryTimeoutResetsOnBadHeaders) {
enableHedgeOnPerTryTimeout();
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder1 = nullptr;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(
Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
response_decoder1 = &decoder;
EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));
callbacks.onPoolReady(encoder1, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess,
absl::optional<uint64_t>(absl::nullopt)))
.Times(2);
expectPerTryTimerCreate();
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_CALL(
cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginTimeout, absl::optional<uint64_t>(504)));
EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);
NiceMock<Http::MockRequestEncoder> encoder2;
Http::ResponseDecoder* response_decoder2 = nullptr;
router_.retry_state_->expectHedgedPerTryTimeoutRetry();
per_try_timeout_->invokeCallback();
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(
Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
response_decoder2 = &decoder;
EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));
callbacks.onPoolReady(encoder2, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
expectPerTryTimerCreate();
router_.retry_state_->callback_();
EXPECT_EQ(2U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// We should not have updated any stats yet because no requests have been
// canceled
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
// Now write a 5xx back on the 2nd request with no retries remaining. The 2nd request
// should be reset immediately.
Http::ResponseHeaderMapPtr bad_response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "500"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(500));
EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);
EXPECT_CALL(encoder2.stream_, resetStream(_));
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _))
.WillOnce(Return(RetryStatus::NoOverflow));
// Not end_stream, otherwise we wouldn't need to reset.
ASSERT(response_decoder2);
response_decoder2->decodeHeaders(std::move(bad_response_headers), false);
// Now write a 200 back. We expect the 2nd stream to be reset and stats to be
// incremented properly.
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);
EXPECT_CALL(callbacks_, encodeHeaders_(_, _))
.WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool end_stream) -> void {
EXPECT_EQ(headers.Status()->value(), "200");
EXPECT_TRUE(end_stream);
}));
response_decoder1->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 1));
// TODO: Verify hedge stats here once they are implemented.
}
// Three requests sent: 1) 5xx error, 2) per try timeout, 3) gets good response
// headers.
TEST_F(RouterTest, HedgedPerTryTimeoutThirdRequestSucceeds) {
enableHedgeOnPerTryTimeout();
// Verify cluster request/response sizes are accounted for all requests/responses.
EXPECT_CALL(
cm_.thread_local_cluster_.cluster_.info_->request_response_size_stats_store_,
deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_headers_size"), 73ull))
.Times(3);
EXPECT_CALL(
cm_.thread_local_cluster_.cluster_.info_->request_response_size_stats_store_,
deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rq_body_size"), 0ull))
.Times(3);
EXPECT_CALL(
cm_.thread_local_cluster_.cluster_.info_->request_response_size_stats_store_,
deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rs_headers_size"), 10ull))
.Times(2);
EXPECT_CALL(
cm_.thread_local_cluster_.cluster_.info_->request_response_size_stats_store_,
deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_rs_body_size"), 0ull))
.Times(2);
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder1 = nullptr;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(
Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
response_decoder1 = &decoder;
EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));
upstream_stream_info_.downstream_connection_info_provider_->setConnectionID(111);
callbacks.onPoolReady(encoder1, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
expectResponseTimerCreate();
expectPerTryTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);
Http::ResponseHeaderMapPtr response_headers1(
new Http::TestResponseHeaderMapImpl{{":status", "500"}});
// Local origin connect success happens for first and third try.
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess,
absl::optional<uint64_t>(absl::nullopt)))
.Times(2);
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(500));
EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);
EXPECT_CALL(callbacks_, encodeHeaders_(_, _)).Times(0);
router_.retry_state_->expectHeadersRetry();
// NOLINTNEXTLINE: Silence null pointer access warning
response_decoder1->decodeHeaders(std::move(response_headers1), true);
NiceMock<Http::MockRequestEncoder> encoder2;
Http::ResponseDecoder* response_decoder2 = nullptr;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(
Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
response_decoder2 = &decoder;
EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));
upstream_stream_info_.downstream_connection_info_provider_->setConnectionID(222);
callbacks.onPoolReady(encoder2, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
expectPerTryTimerCreate();
router_.retry_state_->callback_();
EXPECT_EQ(2U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// Now trigger a per try timeout on the 2nd request, expect a 3rd
router_.retry_state_->expectHedgedPerTryTimeoutRetry();
EXPECT_CALL(
cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginTimeout, absl::optional<uint64_t>(504)));
NiceMock<Http::MockRequestEncoder> encoder3;
Http::ResponseDecoder* response_decoder3 = nullptr;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(
Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
response_decoder3 = &decoder;
EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));
upstream_stream_info_.downstream_connection_info_provider_->setConnectionID(333);
callbacks.onPoolReady(encoder3, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
EXPECT_CALL(callbacks_, encodeHeaders_(_, _)).Times(0);
per_try_timeout_->invokeCallback();
expectPerTryTimerCreate();
router_.retry_state_->callback_();
EXPECT_EQ(3U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// Now write a 200 back. We expect the 2nd stream to be reset and stats to be
// incremented properly.
Http::ResponseHeaderMapPtr response_headers2(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);
EXPECT_CALL(encoder2.stream_, resetStream(_));
EXPECT_CALL(encoder3.stream_, resetStream(_)).Times(0);
EXPECT_CALL(callbacks_, encodeHeaders_(_, _))
.WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool end_stream) -> void {
EXPECT_EQ(headers.Status()->value(), "200");
EXPECT_TRUE(end_stream);
}));
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _)).WillOnce(Return(RetryStatus::No));
// NOLINTNEXTLINE: Silence null pointer access warning
response_decoder3->decodeHeaders(std::move(response_headers2), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 1));
EXPECT_EQ(333U, callbacks_.stream_info_.upstreamInfo()->upstreamConnectionId());
// TODO: Verify hedge stats here once they are implemented.
}
// First request times out and is retried, and then a response is received.
// Make sure we don't attempt to retry because we already retried for timeout.
TEST_F(RouterTest, RetryOnlyOnceForSameUpstreamRequest) {
enableHedgeOnPerTryTimeout();
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder1 = nullptr;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(
Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
response_decoder1 = &decoder;
EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));
callbacks.onPoolReady(encoder1, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess,
absl::optional<uint64_t>(absl::nullopt)))
.Times(2);
expectPerTryTimerCreate();
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);
EXPECT_CALL(
cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginTimeout, absl::optional<uint64_t>(504)));
router_.retry_state_->expectHedgedPerTryTimeoutRetry();
per_try_timeout_->invokeCallback();
NiceMock<Http::MockRequestEncoder> encoder2;
Http::ResponseDecoder* response_decoder2 = nullptr;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(
Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
response_decoder2 = &decoder;
EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));
callbacks.onPoolReady(encoder2, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
expectPerTryTimerCreate();
router_.retry_state_->callback_();
// Now send a 5xx back and make sure we don't ask whether we should retry it.
Http::ResponseHeaderMapPtr response_headers1(
new Http::TestResponseHeaderMapImpl{{":status", "500"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(500));
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _)).Times(0);
EXPECT_CALL(*router_.retry_state_, wouldRetryFromHeaders(_, _, _))
.WillOnce(Return(RetryState::RetryDecision::RetryWithBackoff));
ASSERT(response_decoder1);
response_decoder1->decodeHeaders(std::move(response_headers1), true);
EXPECT_CALL(
cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginTimeout, absl::optional<uint64_t>(504)));
response_timeout_->invokeCallback();
}
// Sequence: upstream request hits soft per try timeout and is retried, and
// then "bad" response headers come back before the retry has been scheduled.
// Ensures that the "bad" headers are not sent downstream because there is
// still an attempt pending.
TEST_F(RouterTest, BadHeadersDroppedIfPreviousRetryScheduled) {
enableHedgeOnPerTryTimeout();
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder1 = nullptr;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(
Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
response_decoder1 = &decoder;
EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));
callbacks.onPoolReady(encoder1, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess,
absl::optional<uint64_t>(absl::nullopt)))
.Times(2);
expectPerTryTimerCreate();
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);
EXPECT_CALL(
cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginTimeout, absl::optional<uint64_t>(504)));
router_.retry_state_->expectHedgedPerTryTimeoutRetry();
per_try_timeout_->invokeCallback();
expectPerTryTimerCreate();
// Now send a 5xx back and make sure we don't ask whether we should retry it
// and also that we don't respond downstream with it.
Http::ResponseHeaderMapPtr response_headers1(
new Http::TestResponseHeaderMapImpl{{":status", "500"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(500));
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _)).Times(0);
EXPECT_CALL(*router_.retry_state_, wouldRetryFromHeaders(_, _, _))
.WillOnce(Return(RetryState::RetryDecision::RetryWithBackoff));
EXPECT_CALL(callbacks_, encodeHeaders_(_, _)).Times(0);
ASSERT(response_decoder1);
response_decoder1->decodeHeaders(std::move(response_headers1), true);
// Now trigger the retry for the per try timeout earlier.
NiceMock<Http::MockRequestEncoder> encoder2;
Http::ResponseDecoder* response_decoder2 = nullptr;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(
Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
response_decoder2 = &decoder;
EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));
callbacks.onPoolReady(encoder2, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
router_.retry_state_->callback_();
Http::ResponseHeaderMapPtr response_headers2(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _)).WillOnce(Return(RetryStatus::No));
EXPECT_CALL(callbacks_, encodeHeaders_(_, _))
.WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool end_stream) -> void {
EXPECT_EQ(headers.Status()->value(), "200");
EXPECT_TRUE(end_stream);
}));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
response_decoder2->decodeHeaders(std::move(response_headers2), true);
}
// Test retrying a request, when the first attempt fails before the client
// has sent any of the body. Also verify retry options predicates work.
TEST_F(RouterTest, RetryRequestBeforeBody) {
auto retry_options_predicate = std::make_shared<MockRetryOptionsPredicate>();
callbacks_.route_->route_entry_.retry_policy_.retry_options_predicates_.emplace_back(
retry_options_predicate);
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{
{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}, {"myheader", "present"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
router_.retry_state_->expectResetRetry();
EXPECT_CALL(*retry_options_predicate, updateOptions(_));
encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset);
NiceMock<Http::MockRequestEncoder> encoder2;
expectNewStreamWithImmediateEncoder(encoder2, &response_decoder, Http::Protocol::Http10);
EXPECT_CALL(encoder2, encodeHeaders(HeaderHasValueRef("myheader", "present"), false));
router_.retry_state_->callback_();
EXPECT_EQ(2U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// Complete request. Ensure original headers are present.
const std::string body("body");
EXPECT_CALL(encoder2, encodeData(BufferStringEqual(body), true));
Buffer::OwnedImpl buf(body);
router_.decodeData(buf, true);
// Send successful response, verify success.
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl({{":status", "200"}}));
EXPECT_CALL(callbacks_, encodeHeaders_(_, _))
.WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool) -> void {
EXPECT_EQ(headers.Status()->value(), "200");
}));
// NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage)
response_decoder->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 1));
}
// Test retrying a request, when the first attempt fails while the client
// is sending the body.
TEST_F(RouterTest, RetryRequestDuringBody) {
Buffer::OwnedImpl decoding_buffer;
EXPECT_CALL(callbacks_, decodingBuffer()).WillRepeatedly(Return(&decoding_buffer));
EXPECT_CALL(callbacks_, addDecodedData(_, true))
.WillRepeatedly(Invoke([&](Buffer::Instance& data, bool) { decoding_buffer.move(data); }));
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{
{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}, {"myheader", "present"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
const std::string body1("body1");
Buffer::OwnedImpl buf1(body1);
EXPECT_CALL(*router_.retry_state_, enabled()).WillOnce(Return(true));
router_.decodeData(buf1, false);
router_.retry_state_->expectResetRetry();
encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset);
NiceMock<Http::MockRequestEncoder> encoder2;
expectNewStreamWithImmediateEncoder(encoder2, &response_decoder, Http::Protocol::Http10);
EXPECT_CALL(encoder2, encodeHeaders(HeaderHasValueRef("myheader", "present"), false));
EXPECT_CALL(encoder2, encodeData(BufferStringEqual(body1), false));
router_.retry_state_->callback_();
EXPECT_EQ(2U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// Complete request. Ensure original headers are present.
const std::string body2("body2");
EXPECT_CALL(encoder2, encodeData(BufferStringEqual(body2), true));
Buffer::OwnedImpl buf2(body2);
EXPECT_CALL(*router_.retry_state_, enabled()).WillOnce(Return(true));
router_.decodeData(buf2, true);
// Send successful response, verify success.
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl({{":status", "200"}}));
EXPECT_CALL(callbacks_, encodeHeaders_(_, _))
.WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool) -> void {
EXPECT_EQ(headers.Status()->value(), "200");
}));
response_decoder->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 1));
}
// Test retrying a request, when the first attempt fails while the client
// is sending the body, with more data arriving in between upstream attempts
// (which would normally happen during the backoff timer interval), but not end_stream.
TEST_F(RouterTest, RetryRequestDuringBodyDataBetweenAttemptsNotEndStream) {
Buffer::OwnedImpl decoding_buffer;
EXPECT_CALL(callbacks_, decodingBuffer()).WillRepeatedly(Return(&decoding_buffer));
EXPECT_CALL(callbacks_, addDecodedData(_, true))
.WillRepeatedly(Invoke([&](Buffer::Instance& data, bool) { decoding_buffer.move(data); }));
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{
{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}, {"myheader", "present"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
const std::string body1("body1");
Buffer::OwnedImpl buf1(body1);
EXPECT_CALL(*router_.retry_state_, enabled()).Times(3).WillRepeatedly(Return(true));
router_.decodeData(buf1, false);
router_.retry_state_->expectResetRetry();
encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset);
const std::string body2("body2");
Buffer::OwnedImpl buf2(body2);
router_.decodeData(buf2, false);
NiceMock<Http::MockRequestEncoder> encoder2;
expectNewStreamWithImmediateEncoder(encoder2, &response_decoder, Http::Protocol::Http10);
EXPECT_CALL(encoder2, encodeHeaders(HeaderHasValueRef("myheader", "present"), false));
EXPECT_CALL(encoder2, encodeData(BufferStringEqual(body1 + body2), false));
router_.retry_state_->callback_();
EXPECT_EQ(2U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// Complete request. Ensure original headers are present.
const std::string body3("body3");
EXPECT_CALL(encoder2, encodeData(BufferStringEqual(body3), true));
Buffer::OwnedImpl buf3(body3);
router_.decodeData(buf3, true);
// Send successful response, verify success.
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl({{":status", "200"}}));
EXPECT_CALL(callbacks_, encodeHeaders_(_, _))
.WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool) -> void {
EXPECT_EQ(headers.Status()->value(), "200");
}));
response_decoder->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 1));
}
// Test retrying a request, when the first attempt fails while the client
// is sending the body, with the rest of the request arriving in between upstream
// request attempts.
TEST_F(RouterTest, RetryRequestDuringBodyCompleteBetweenAttempts) {
Buffer::OwnedImpl decoding_buffer;
EXPECT_CALL(callbacks_, decodingBuffer()).WillRepeatedly(Return(&decoding_buffer));
EXPECT_CALL(callbacks_, addDecodedData(_, true))
.WillRepeatedly(Invoke([&](Buffer::Instance& data, bool) { decoding_buffer.move(data); }));
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
Http::TestRequestHeaderMapImpl headers{
{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}, {"myheader", "present"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
const std::string body1("body1");
Buffer::OwnedImpl buf1(body1);
EXPECT_CALL(*router_.retry_state_, enabled()).Times(2).WillRepeatedly(Return(true));
router_.decodeData(buf1, false);
router_.retry_state_->expectResetRetry();
encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset);
// Complete request while there is no upstream request.
const std::string body2("body2");
Buffer::OwnedImpl buf2(body2);
router_.decodeData(buf2, true);
NiceMock<Http::MockRequestEncoder> encoder2;
expectNewStreamWithImmediateEncoder(encoder2, &response_decoder, Http::Protocol::Http10);
EXPECT_CALL(encoder2, encodeHeaders(HeaderHasValueRef("myheader", "present"), false));
EXPECT_CALL(encoder2, encodeData(BufferStringEqual(body1 + body2), true));
router_.retry_state_->callback_();
EXPECT_EQ(2U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// Send successful response, verify success.
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl({{":status", "200"}}));
EXPECT_CALL(callbacks_, encodeHeaders_(_, _))
.WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool) -> void {
EXPECT_EQ(headers.Status()->value(), "200");
}));
response_decoder->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 1));
}
// Test retrying a request, when the first attempt fails while the client
// is sending the body, with the trailers arriving in between upstream
// request attempts.
TEST_F(RouterTest, RetryRequestDuringBodyTrailerBetweenAttempts) {
Buffer::OwnedImpl decoding_buffer;
EXPECT_CALL(callbacks_, decodingBuffer()).WillRepeatedly(Return(&decoding_buffer));
EXPECT_CALL(callbacks_, addDecodedData(_, true))
.WillRepeatedly(Invoke([&](Buffer::Instance& data, bool) { decoding_buffer.move(data); }));
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
Http::TestRequestHeaderMapImpl headers{
{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}, {"myheader", "present"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
const std::string body1("body1");
Buffer::OwnedImpl buf1(body1);
EXPECT_CALL(*router_.retry_state_, enabled()).WillOnce(Return(true));
router_.decodeData(buf1, false);
router_.retry_state_->expectResetRetry();
encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset);
// Complete request while there is no upstream request.
Http::TestRequestTrailerMapImpl trailers{{"some", "trailer"}};
router_.decodeTrailers(trailers);
NiceMock<Http::MockRequestEncoder> encoder2;
expectNewStreamWithImmediateEncoder(encoder2, &response_decoder, Http::Protocol::Http10);
EXPECT_CALL(encoder2, encodeHeaders(HeaderHasValueRef("myheader", "present"), false));
EXPECT_CALL(encoder2, encodeData(BufferStringEqual(body1), false));
EXPECT_CALL(encoder2, encodeTrailers(HeaderMapEqualRef(&trailers)));
router_.retry_state_->callback_();
EXPECT_EQ(2U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// Send successful response, verify success.
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl({{":status", "200"}}));
EXPECT_CALL(callbacks_, encodeHeaders_(_, _))
.WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool) -> void {
EXPECT_EQ(headers.Status()->value(), "200");
}));
response_decoder->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 1));
}
// Test retrying a request, when the first attempt fails while the client
// is sending the body, with the rest of the request arriving in between upstream
// request attempts, but exceeding the buffer limit causing a downstream request abort.
TEST_F(RouterTest, RetryRequestDuringBodyBufferLimitExceeded) {
Buffer::OwnedImpl decoding_buffer;
EXPECT_CALL(callbacks_, decodingBuffer()).WillRepeatedly(Return(&decoding_buffer));
EXPECT_CALL(callbacks_, addDecodedData(_, true))
.WillRepeatedly(Invoke([&](Buffer::Instance& data, bool) { decoding_buffer.move(data); }));
EXPECT_CALL(callbacks_.route_->route_entry_, retryShadowBufferLimit()).WillOnce(Return(10));
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
Http::TestRequestHeaderMapImpl headers{
{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}, {"myheader", "present"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
const std::string body1("body1");
Buffer::OwnedImpl buf1(body1);
EXPECT_CALL(*router_.retry_state_, enabled()).Times(2).WillRepeatedly(Return(true));
router_.decodeData(buf1, false);
router_.retry_state_->expectResetRetry();
encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset);
// Complete request while there is no upstream request.
const std::string body2(50, 'a');
Buffer::OwnedImpl buf2(body2);
router_.decodeData(buf2, false);
EXPECT_EQ(callbacks_.details(), "request_payload_exceeded_retry_buffer_limit");
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("retry_or_shadow_abandoned")
.value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
}
// Two requests are sent (slow request + hedged retry) and then global timeout
// is hit. Verify everything gets cleaned up.
TEST_F(RouterTest, HedgedPerTryTimeoutGlobalTimeout) {
enableHedgeOnPerTryTimeout();
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder1 = nullptr;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(
Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
response_decoder1 = &decoder;
EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));
callbacks.onPoolReady(encoder1, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess,
absl::optional<uint64_t>(absl::nullopt)))
.Times(2);
expectPerTryTimerCreate();
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_CALL(
cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginTimeout, absl::optional<uint64_t>(504)));
EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);
EXPECT_CALL(callbacks_, encodeHeaders_(_, _)).Times(0);
router_.retry_state_->expectHedgedPerTryTimeoutRetry();
per_try_timeout_->invokeCallback();
NiceMock<Http::MockRequestEncoder> encoder2;
Http::ResponseDecoder* response_decoder2 = nullptr;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(
Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
response_decoder2 = &decoder;
EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));
callbacks.onPoolReady(encoder2, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
expectPerTryTimerCreate();
router_.retry_state_->callback_();
EXPECT_EQ(2U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
// Now trigger global timeout, expect everything to be reset
EXPECT_CALL(encoder1.stream_, resetStream(_));
EXPECT_CALL(encoder2.stream_, resetStream(_));
EXPECT_CALL(
cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginTimeout, absl::optional<uint64_t>(504)));
EXPECT_CALL(callbacks_, encodeHeaders_(_, _))
.WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool) -> void {
EXPECT_EQ(headers.Status()->value(), "504");
}));
response_timeout_->invokeCallback();
EXPECT_TRUE(verifyHostUpstreamStats(0, 2));
EXPECT_EQ(2, cm_.thread_local_cluster_.conn_pool_.host_->stats_.rq_timeout_.value());
// TODO: Verify hedge stats here once they are implemented.
}
// Sequence: 1) per try timeout w/ hedge retry, 2) second request gets a 5xx
// response, no retries remaining 3) first request gets a 5xx response.
TEST_F(RouterTest, HedgingRetriesExhaustedBadResponse) {
enableHedgeOnPerTryTimeout();
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder1 = nullptr;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(
Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
response_decoder1 = &decoder;
EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));
callbacks.onPoolReady(encoder1, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess,
absl::optional<uint64_t>(absl::nullopt)));
expectPerTryTimerCreate();
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_CALL(
cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginTimeout, absl::optional<uint64_t>(504)));
EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);
EXPECT_CALL(callbacks_, encodeHeaders_(_, _)).Times(0);
router_.retry_state_->expectHedgedPerTryTimeoutRetry();
per_try_timeout_->invokeCallback();
NiceMock<Http::MockRequestEncoder> encoder2;
Http::ResponseDecoder* response_decoder2 = nullptr;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(
Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
response_decoder2 = &decoder;
EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));
callbacks.onPoolReady(encoder2, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess,
absl::optional<uint64_t>(absl::nullopt)));
expectPerTryTimerCreate();
router_.retry_state_->callback_();
EXPECT_EQ(2U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
// Now trigger a 503 in response to the second request.
Http::ResponseHeaderMapPtr bad_response_headers1(
new Http::TestResponseHeaderMapImpl{{":status", "503"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(503));
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _))
.WillOnce(Return(RetryStatus::NoRetryLimitExceeded));
ASSERT(response_decoder2);
response_decoder2->decodeHeaders(std::move(bad_response_headers1), true);
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// Now trigger a 503 in response to the first request.
Http::ResponseHeaderMapPtr bad_response_headers2(
new Http::TestResponseHeaderMapImpl{{":status", "503"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(503));
// We should not call shouldRetryHeaders() because you never retry the same
// request twice.
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _)).Times(0);
EXPECT_CALL(callbacks_, encodeHeaders_(_, _))
.WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool) -> void {
EXPECT_EQ(headers.Status()->value(), "503");
}));
response_decoder1->decodeHeaders(std::move(bad_response_headers2), true);
EXPECT_TRUE(verifyHostUpstreamStats(0, 2));
}
// Sequence: 1) per try timeout w/ hedge retry, 2) first request gets reset by upstream,
// 3) 2nd request gets a 200 which should be sent downstream.
TEST_F(RouterTest, HedgingRetriesProceedAfterReset) {
enableHedgeOnPerTryTimeout();
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder1 = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder1, Http::Protocol::Http10);
// First is reset
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess,
absl::optional<uint64_t>(absl::nullopt)))
.Times(2);
expectPerTryTimerCreate();
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_CALL(
cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginTimeout, absl::optional<uint64_t>(504)));
EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);
EXPECT_CALL(callbacks_, encodeHeaders_(_, _)).Times(0);
router_.retry_state_->expectHedgedPerTryTimeoutRetry();
per_try_timeout_->invokeCallback();
NiceMock<Http::MockRequestEncoder> encoder2;
Http::ResponseDecoder* response_decoder2 = nullptr;
expectNewStreamWithImmediateEncoder(encoder2, &response_decoder2, Http::Protocol::Http10);
expectPerTryTimerCreate();
router_.retry_state_->callback_();
EXPECT_EQ(2U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
// Now trigger an upstream reset in response to the first request.
EXPECT_CALL(encoder1.stream_, resetStream(_));
encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset);
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// We should not call shouldRetryReset() because you never retry the same
// request twice.
EXPECT_CALL(*router_.retry_state_, shouldRetryReset(_, _, _)).Times(0);
// Now trigger a 200 in response to the second request.
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _)).WillOnce(Return(RetryStatus::No));
EXPECT_CALL(callbacks_, encodeHeaders_(_, _))
.WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool) -> void {
EXPECT_EQ(headers.Status()->value(), "200");
}));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
response_decoder2->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 1));
}
// Sequence: 1) request with data hits per try timeout w/ hedge retry, 2)
// second request is immediately reset 3) 1st request gets a 200.
// The goal of this test is to ensure that the router can properly detect that an immediate
// reset happens and that we don't accidentally write data twice on the first request.
TEST_F(RouterTest, HedgingRetryImmediatelyReset) {
enableHedgeOnPerTryTimeout();
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
router_.retry_425_response_ = true;
expectNewStreamWithImmediateEncoder(encoder, &response_decoder, Http::Protocol::Http10);
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess,
absl::optional<uint64_t>(absl::nullopt)));
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
expectPerTryTimerCreate();
expectResponseTimerCreate();
Buffer::OwnedImpl body("test body");
EXPECT_CALL(encoder, encodeData(_, _));
Buffer::InstancePtr body_data(new Buffer::OwnedImpl("hello"));
router_.retry_state_->expectHedgedPerTryTimeoutRetry();
EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, router_.decodeData(*body_data, true));
EXPECT_CALL(
cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginTimeout, absl::optional<uint64_t>(504)));
EXPECT_CALL(encoder.stream_, resetStream(_)).Times(0);
EXPECT_CALL(callbacks_, encodeHeaders_(_, _)).Times(0);
per_try_timeout_->invokeCallback();
NiceMock<Http::MockRequestEncoder> encoder2;
// Per-timeout retry wouldn't enable 0-RTT.
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(Invoke([&](Http::StreamDecoder&, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions& options)
-> Http::ConnectionPool::Cancellable* {
EXPECT_FALSE(options.can_send_early_data_);
EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _));
callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::RemoteConnectionFailure,
absl::string_view(), cm_.thread_local_cluster_.conn_pool_.host_);
return nullptr;
}));
EXPECT_CALL(*router_.retry_state_,
shouldRetryReset(_, /*http3_used=*/RetryState::Http3Used::Unknown, _))
.WillOnce(Return(RetryStatus::NoRetryLimitExceeded));
ON_CALL(callbacks_, decodingBuffer()).WillByDefault(Return(body_data.get()));
router_.retry_state_->callback_();
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// Now trigger a 200 in response to the first request.
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
// The request was already retried when the per try timeout occurred so it
// should't even consult the retry state.
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _)).Times(0);
EXPECT_CALL(callbacks_, encodeHeaders_(_, _))
.WillOnce(Invoke([&](Http::ResponseHeaderMap& headers, bool) -> void {
EXPECT_EQ(headers.Status()->value(), "200");
}));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
response_decoder->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 1));
// Pool failure for the first try, so only 1 upstream request was made.
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
}
TEST_F(RouterTest, RetryNoneHealthy) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
router_.retry_state_->expectResetRetry();
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _));
encoder1.stream_.resetStream(Http::StreamResetReason::LocalReset);
EXPECT_CALL(cm_.thread_local_cluster_, httpConnPool(_, _, _)).WillOnce(Return(absl::nullopt));
Http::TestResponseHeaderMapImpl response_headers{
{":status", "503"}, {"content-length", "19"}, {"content-type", "text/plain"}};
EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));
EXPECT_CALL(callbacks_, encodeData(_, true));
EXPECT_CALL(callbacks_.stream_info_,
setResponseFlag(StreamInfo::ResponseFlag::NoHealthyUpstream));
router_.retry_state_->callback_();
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// Pool failure for the first try, so only 1 upstream request was made.
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
}
TEST_F(RouterTest, RetryUpstreamReset) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
EXPECT_CALL(*router_.retry_state_, enabled()).WillOnce(Return(true));
EXPECT_CALL(callbacks_, addDecodedData(_, _));
Buffer::OwnedImpl body("test body");
router_.decodeData(body, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_CALL(*router_.retry_state_, shouldRetryReset(Http::StreamResetReason::RemoteReset, _, _))
.WillOnce(Invoke([this](const Http::StreamResetReason, RetryState::Http3Used http3_used,
RetryState::DoRetryResetCallback callback) {
EXPECT_EQ(RetryState::Http3Used::No, http3_used);
router_.retry_state_->callback_ = [callback]() { callback(/*disable_http3=*/false); };
return RetryStatus::Yes;
}));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _));
encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset);
// We expect this reset to kick off a new request.
NiceMock<Http::MockRequestEncoder> encoder2;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(
Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions& options)
-> Http::ConnectionPool::Cancellable* {
EXPECT_TRUE(options.can_use_http3_);
response_decoder = &decoder;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess,
absl::optional<uint64_t>(absl::nullopt)));
callbacks.onPoolReady(encoder2, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
router_.retry_state_->callback_();
EXPECT_EQ(2U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// Normal response.
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _)).WillOnce(Return(RetryStatus::No));
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
response_decoder->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 1));
}
TEST_F(RouterTest, RetryHttp3UpstreamReset) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
router_.retry_425_response_ = true;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http3);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
EXPECT_CALL(*router_.retry_state_, enabled()).WillOnce(Return(true));
EXPECT_CALL(callbacks_, addDecodedData(_, _));
Buffer::OwnedImpl body("test body");
router_.decodeData(body, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_CALL(*router_.retry_state_, shouldRetryReset(Http::StreamResetReason::RemoteReset, _, _))
.WillOnce(Invoke([this](const Http::StreamResetReason, RetryState::Http3Used http3_used,
RetryState::DoRetryResetCallback callback) {
EXPECT_EQ(RetryState::Http3Used::Yes, http3_used);
router_.retry_state_->callback_ = [callback]() { callback(/*disable_http3=*/true); };
return RetryStatus::Yes;
}));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _));
encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset);
// We expect this reset to kick off a new request.
NiceMock<Http::MockRequestEncoder> encoder2;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(
Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions& options)
-> Http::ConnectionPool::Cancellable* {
EXPECT_TRUE(options.can_send_early_data_);
EXPECT_FALSE(options.can_use_http3_);
response_decoder = &decoder;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess,
absl::optional<uint64_t>(absl::nullopt)));
callbacks.onPoolReady(encoder2, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
router_.retry_state_->callback_();
EXPECT_EQ(2U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// Normal response.
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _)).WillOnce(Return(RetryStatus::No));
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
response_decoder->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 1));
}
TEST_F(RouterTest, NoRetryWithBodyLimit) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
// Set a per route body limit which disallows any buffering.
EXPECT_CALL(callbacks_.route_->route_entry_, retryShadowBufferLimit()).WillOnce(Return(0));
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
// Unlike RetryUpstreamReset above the data won't be buffered as the body exceeds the buffer limit
EXPECT_CALL(*router_.retry_state_, enabled()).WillOnce(Return(true));
EXPECT_CALL(callbacks_, addDecodedData(_, _)).Times(0);
Buffer::OwnedImpl body("t");
router_.decodeData(body, false);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
response_decoder->decodeHeaders(std::move(response_headers), true);
}
// Verifies that when the request fails with an upstream reset (per try timeout in this case)
// before an upstream host has been established, then the onHostAttempted function will not be
// invoked. This ensures that we're not passing a null host to the retry plugins.
TEST_F(RouterTest, RetryUpstreamPerTryTimeout) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(
Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
response_decoder = &decoder;
EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));
callbacks.onPoolReady(encoder1, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
expectPerTryTimerCreate();
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"},
{"x-envoy-internal", "true"},
{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
router_.retry_state_->expectResetRetry();
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginTimeout, _));
per_try_timeout_->invokeCallback();
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// We expect this reset to kick off a new request.
NiceMock<Http::MockRequestEncoder> encoder2;
EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(
Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
response_decoder = &decoder;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess,
absl::optional<uint64_t>(absl::nullopt)));
callbacks.onPoolReady(encoder2, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
expectPerTryTimerCreate();
router_.retry_state_->callback_();
EXPECT_EQ(2U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// Normal response.
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _)).WillOnce(Return(RetryStatus::No));
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
ASSERT(response_decoder);
response_decoder->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 1));
}
// Asserts that onHostAttempted is *not* called when the upstream connection fails in such
// a way that no host is present.
TEST_F(RouterTest, RetryUpstreamConnectionFailure) {
Http::ConnectionPool::Callbacks* conn_pool_callbacks;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(Invoke([&](Http::StreamDecoder&, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
conn_pool_callbacks = &callbacks;
return nullptr;
}));
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_CALL(*router_.retry_state_, onHostAttempted(_)).Times(0);
router_.retry_state_->expectResetRetry();
conn_pool_callbacks->onPoolFailure(ConnectionPool::PoolFailureReason::RemoteConnectionFailure,
absl::string_view(), nullptr);
// Pool failure, so no upstream request was made.
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
Http::ResponseDecoder* response_decoder = nullptr;
// We expect this reset to kick off a new request.
NiceMock<Http::MockRequestEncoder> encoder2;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(
Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
response_decoder = &decoder;
EXPECT_CALL(*router_.retry_state_, onHostAttempted(_));
callbacks.onPoolReady(encoder2, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
router_.retry_state_->callback_();
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// Normal response.
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _)).WillOnce(Return(RetryStatus::No));
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
response_decoder->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 0));
}
TEST_F(RouterTest, DontResetStartedResponseOnUpstreamPerTryTimeout) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectPerTryTimerCreate();
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-internal", "true"},
{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// Since the response is already started we don't retry.
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _)).WillOnce(Return(RetryStatus::No));
EXPECT_CALL(callbacks_, encodeHeaders_(_, false));
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
Buffer::OwnedImpl body("test body");
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
response_decoder->decodeHeaders(std::move(response_headers), false);
per_try_timeout_->invokeCallback();
EXPECT_CALL(callbacks_, encodeData(_, true));
response_decoder->decodeData(body, true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 0));
EXPECT_EQ(0U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("upstream_rq_per_try_timeout")
.value());
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
}
TEST_F(RouterTest, RetryUpstreamResetResponseStarted) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// Since the response is already started we don't retry.
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _)).WillOnce(Return(RetryStatus::No));
EXPECT_CALL(callbacks_, encodeHeaders_(_, false));
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
response_decoder->decodeHeaders(std::move(response_headers), false);
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _));
// Normally, sendLocalReply will actually send the reply, but in this case the
// HCM will detect the headers have already been sent and not route through
// the encoder again.
EXPECT_CALL(callbacks_, sendLocalReply(_, _, _, _, _)).WillOnce(InvokeWithoutArgs([] {}));
encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset);
// For normal HTTP, once we have a 200 we consider this a success, even if a
// later reset occurs.
EXPECT_TRUE(verifyHostUpstreamStats(1, 0));
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
}
// The router filter is responsible for not propagating 100-continue headers after the initial 100.
TEST_F(RouterTest, Coalesce1xxHeaders) {
// Setup.
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// Initial 100-continue, this is processed normally.
EXPECT_CALL(callbacks_, encode1xxHeaders_(_));
{
Http::ResponseHeaderMapPtr continue_headers(
new Http::TestResponseHeaderMapImpl{{":status", "100"}});
// NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage)
response_decoder->decode1xxHeaders(std::move(continue_headers));
}
EXPECT_EQ(
1U,
cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter("upstream_rq_100").value());
// No encode1xxHeaders() invocation for the second 100-continue (but we continue to track
// stats from upstream).
EXPECT_CALL(callbacks_, encode1xxHeaders_(_)).Times(0);
{
Http::ResponseHeaderMapPtr continue_headers(
new Http::TestResponseHeaderMapImpl{{":status", "100"}});
// NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage)
response_decoder->decode1xxHeaders(std::move(continue_headers));
}
EXPECT_EQ(
2U,
cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter("upstream_rq_100").value());
// Reset stream and cleanup.
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _));
encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
}
TEST_F(RouterTest, RetryUpstreamReset1xxResponseStarted) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// The 100-continue will result in resetting retry_state_, so when the stream
// is reset we won't even check shouldRetryReset() (or shouldRetryHeaders()).
EXPECT_CALL(*router_.retry_state_, shouldRetryReset(_, _, _)).Times(0);
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _)).Times(0);
EXPECT_CALL(callbacks_, encode1xxHeaders_(_));
Http::ResponseHeaderMapPtr continue_headers(
new Http::TestResponseHeaderMapImpl{{":status", "100"}});
// NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage)
response_decoder->decode1xxHeaders(std::move(continue_headers));
EXPECT_EQ(
1U,
cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter("upstream_rq_100").value());
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _));
encoder1.stream_.resetStream(Http::StreamResetReason::RemoteReset);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
}
TEST_F(RouterTest, RetryUpstream5xx) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// 5xx response.
router_.retry_state_->expectHeadersRetry();
Http::ResponseHeaderMapPtr response_headers1(
new Http::TestResponseHeaderMapImpl{{":status", "503"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(503));
response_decoder->decodeHeaders(std::move(response_headers1), true);
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// We expect the 5xx response to kick off a new request.
EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);
NiceMock<Http::MockRequestEncoder> encoder2;
expectNewStreamWithImmediateEncoder(encoder2, &response_decoder, Http::Protocol::Http10);
router_.retry_state_->callback_();
EXPECT_EQ(2U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// Normal response.
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _)).WillOnce(Return(RetryStatus::No));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->health_checker_, setUnhealthy(_))
.Times(0);
Http::ResponseHeaderMapPtr response_headers2(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
response_decoder->decodeHeaders(std::move(response_headers2), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 1));
}
TEST_F(RouterTest, RetryTimeoutDuringRetryDelay) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// 5xx response.
router_.retry_state_->expectHeadersRetry();
Http::ResponseHeaderMapPtr response_headers1(
new Http::TestResponseHeaderMapImpl{{":status", "503"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(503));
response_decoder->decodeHeaders(std::move(response_headers1), true);
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// Fire timeout.
EXPECT_CALL(callbacks_.stream_info_,
setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_, putResponseTime(_))
.Times(0);
Http::TestResponseHeaderMapImpl response_headers{
{":status", "504"}, {"content-length", "24"}, {"content-type", "text/plain"}};
EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));
EXPECT_CALL(callbacks_, encodeData(_, true));
response_timeout_->invokeCallback();
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
}
TEST_F(RouterTest, MaxStreamDurationValidlyConfiguredWithoutRetryPolicy) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
setUpstreamMaxStreamDuration(500);
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectMaxStreamDurationTimerCreate(std::chrono::milliseconds(500));
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
max_stream_duration_timer_->invokeCallback();
router_.onDestroy();
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
}
TEST_F(RouterTest, MaxStreamDurationDisabledIfSetToZero) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
setUpstreamMaxStreamDuration(0);
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
// not to be called timer creation.
EXPECT_CALL(callbacks_.dispatcher_, createTimer_).Times(0);
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
router_.onDestroy();
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
}
TEST_F(RouterTest, MaxStreamDurationCallbackNotCalled) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
setUpstreamMaxStreamDuration(5000);
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectMaxStreamDurationTimerCreate(std::chrono::milliseconds(5000));
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
router_.onDestroy();
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
}
TEST_F(RouterTest, MaxStreamDurationWhenDownstreamAlreadyStartedWithoutRetryPolicy) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
setUpstreamMaxStreamDuration(500);
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectMaxStreamDurationTimerCreate(std::chrono::milliseconds(500));
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
response_decoder->decodeHeaders(std::move(response_headers), false);
max_stream_duration_timer_->invokeCallback();
router_.onDestroy();
EXPECT_TRUE(verifyHostUpstreamStats(1, 0));
}
TEST_F(RouterTest, MaxStreamDurationWithRetryPolicy) {
// First upstream request
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
setUpstreamMaxStreamDuration(500);
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectMaxStreamDurationTimerCreate(std::chrono::milliseconds(500));
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "reset"},
{"x-envoy-internal", "true"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
router_.retry_state_->expectResetRetry();
max_stream_duration_timer_->invokeCallback();
// Second upstream request
NiceMock<Http::MockRequestEncoder> encoder2;
setUpstreamMaxStreamDuration(500);
expectNewStreamWithImmediateEncoder(encoder2, &response_decoder, Http::Protocol::Http10);
expectMaxStreamDurationTimerCreate(std::chrono::milliseconds(500));
router_.retry_state_->callback_();
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _)).WillOnce(Return(RetryStatus::No));
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
response_decoder->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 1));
}
TEST_F(RouterTest, RetryTimeoutDuringRetryDelayWithUpstreamRequestNoHost) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// 5xx response.
router_.retry_state_->expectHeadersRetry();
Http::ResponseHeaderMapPtr response_headers1(
new Http::TestResponseHeaderMapImpl{{":status", "503"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(503));
response_decoder->decodeHeaders(std::move(response_headers1), true);
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
Envoy::ConnectionPool::MockCancellable cancellable;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks&,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
response_decoder = &decoder;
return &cancellable;
}));
router_.retry_state_->callback_();
// Fire timeout.
EXPECT_CALL(cancellable, cancel(_));
EXPECT_CALL(callbacks_.stream_info_,
setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_, putResponseTime(_))
.Times(0);
Http::TestResponseHeaderMapImpl response_headers{
{":status", "504"}, {"content-length", "24"}, {"content-type", "text/plain"}};
EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));
EXPECT_CALL(callbacks_, encodeData(_, true));
response_timeout_->invokeCallback();
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// Timeout fired so no retry was done.
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
}
// Retry timeout during a retry delay leading to no upstream host, as well as an alt response code.
TEST_F(RouterTest, RetryTimeoutDuringRetryDelayWithUpstreamRequestNoHostAltResponseCode) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"},
{"x-envoy-internal", "true"},
{"x-envoy-upstream-rq-timeout-alt-response", "204"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// 5xx response.
router_.retry_state_->expectHeadersRetry();
Http::ResponseHeaderMapPtr response_headers1(
new Http::TestResponseHeaderMapImpl{{":status", "503"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(503));
response_decoder->decodeHeaders(std::move(response_headers1), true);
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
Envoy::ConnectionPool::MockCancellable cancellable;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks&,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
response_decoder = &decoder;
return &cancellable;
}));
router_.retry_state_->callback_();
// Fire timeout.
EXPECT_CALL(cancellable, cancel(_));
EXPECT_CALL(callbacks_.stream_info_,
setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_, putResponseTime(_))
.Times(0);
Http::TestResponseHeaderMapImpl response_headers{{":status", "204"}};
EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));
response_timeout_->invokeCallback();
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// no retry was done.
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
}
TEST_F(RouterTest, RetryUpstream5xxNotComplete) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
Buffer::InstancePtr body_data(new Buffer::OwnedImpl("hello"));
EXPECT_CALL(*router_.retry_state_, enabled()).WillOnce(Return(true));
EXPECT_CALL(callbacks_, addDecodedData(_, true));
EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, router_.decodeData(*body_data, false));
Http::TestRequestTrailerMapImpl trailers{{"some", "trailer"}};
router_.decodeTrailers(trailers);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// 5xx response.
router_.retry_state_->expectHeadersRetry();
Http::ResponseHeaderMapPtr response_headers1(
new Http::TestResponseHeaderMapImpl{{":status", "503"}});
EXPECT_CALL(encoder1.stream_, resetStream(Http::StreamResetReason::LocalReset));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(503));
response_decoder->decodeHeaders(std::move(response_headers1), false);
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// We expect the 5xx response to kick off a new request.
NiceMock<Http::MockRequestEncoder> encoder2;
expectNewStreamWithImmediateEncoder(encoder2, &response_decoder, Http::Protocol::Http10);
ON_CALL(callbacks_, decodingBuffer()).WillByDefault(Return(body_data.get()));
EXPECT_CALL(encoder2, encodeHeaders(_, false));
EXPECT_CALL(encoder2, encodeData(_, false));
EXPECT_CALL(encoder2, encodeTrailers(_));
router_.retry_state_->callback_();
EXPECT_EQ(2U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// Normal response.
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _)).WillOnce(Return(RetryStatus::No));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_, putResponseTime(_));
EXPECT_CALL(
cm_.thread_local_cluster_.conn_pool_.host_->health_checker_,
setUnhealthy(Upstream::HealthCheckHostMonitor::UnhealthyType::ImmediateHealthCheckFail));
Http::ResponseHeaderMapPtr response_headers2(new Http::TestResponseHeaderMapImpl{
{":status", "200"}, {"x-envoy-immediate-health-check-fail", "true"}});
response_decoder->decodeHeaders(std::move(response_headers2), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 1));
EXPECT_EQ(1U,
cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter("retry.upstream_rq_503")
.value());
EXPECT_EQ(
1U,
cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter("upstream_rq_200").value());
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("zone.zone_name.to_az.upstream_rq_200")
.value());
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("zone.zone_name.to_az.upstream_rq_2xx")
.value());
}
// Validate gRPC Cancelled response stats are sane when retry is taking effect.
TEST_F(RouterTest, RetryUpstreamGrpcCancelled) {
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-grpc-on", "cancelled"},
{"x-envoy-internal", "true"},
{"content-type", "application/grpc"},
{"grpc-timeout", "20S"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// gRPC with status "cancelled" (1)
router_.retry_state_->expectHeadersRetry();
Http::ResponseHeaderMapPtr response_headers1(
new Http::TestResponseHeaderMapImpl{{":status", "200"}, {"grpc-status", "1"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(499));
response_decoder->decodeHeaders(std::move(response_headers1), true);
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// We expect the grpc-status to result in a retried request.
EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);
NiceMock<Http::MockRequestEncoder> encoder2;
expectNewStreamWithImmediateEncoder(encoder2, &response_decoder, Http::Protocol::Http10);
router_.retry_state_->callback_();
EXPECT_EQ(2U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// Normal response.
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _)).WillOnce(Return(RetryStatus::No));
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}, {"grpc-status", "0"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
response_decoder->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 1));
}
// Verifies that the initial host is select with max host count of one, but during retries
// RetryPolicy will be consulted.
TEST_F(RouterTest, RetryRespectsMaxHostSelectionCount) {
router_.reject_all_hosts_ = true;
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
ON_CALL(*router_.retry_state_, hostSelectionMaxAttempts()).WillByDefault(Return(3));
// The router should accept any host at this point, since we're not in a retry.
EXPECT_EQ(1, router_.hostSelectionRetryCount());
Buffer::InstancePtr body_data(new Buffer::OwnedImpl("hello"));
EXPECT_CALL(*router_.retry_state_, enabled()).WillOnce(Return(true));
EXPECT_CALL(callbacks_, addDecodedData(_, true));
EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, router_.decodeData(*body_data, false));
Http::TestRequestTrailerMapImpl trailers{{"some", "trailer"}};
router_.decodeTrailers(trailers);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// 5xx response.
router_.retry_state_->expectHeadersRetry();
Http::ResponseHeaderMapPtr response_headers1(
new Http::TestResponseHeaderMapImpl{{":status", "503"}});
EXPECT_CALL(encoder1.stream_, resetStream(Http::StreamResetReason::LocalReset));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(503));
response_decoder->decodeHeaders(std::move(response_headers1), false);
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// We expect the 5xx response to kick off a new request.
NiceMock<Http::MockRequestEncoder> encoder2;
expectNewStreamWithImmediateEncoder(encoder2, &response_decoder, Http::Protocol::Http10);
ON_CALL(callbacks_, decodingBuffer()).WillByDefault(Return(body_data.get()));
EXPECT_CALL(encoder2, encodeHeaders(_, false));
EXPECT_CALL(encoder2, encodeData(_, false));
EXPECT_CALL(encoder2, encodeTrailers(_));
router_.retry_state_->callback_();
EXPECT_EQ(2U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// Now that we're triggered a retry, we should see the configured number of host selections.
EXPECT_EQ(3, router_.hostSelectionRetryCount());
// Normal response.
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _)).WillOnce(Return(RetryStatus::No));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->health_checker_, setUnhealthy(_))
.Times(0);
Http::ResponseHeaderMapPtr response_headers2(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
response_decoder->decodeHeaders(std::move(response_headers2), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 1));
}
// Verifies that the initial request accepts any host, but during retries
// RetryPolicy will be consulted.
TEST_F(RouterTest, RetryRespectsRetryHostPredicate) {
router_.reject_all_hosts_ = true;
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
NiceMock<Upstream::MockHost> host;
// The router should accept any host at this point, since we're not in a retry.
EXPECT_FALSE(router_.shouldSelectAnotherHost(host));
Buffer::InstancePtr body_data(new Buffer::OwnedImpl("hello"));
EXPECT_CALL(*router_.retry_state_, enabled()).WillOnce(Return(true));
EXPECT_CALL(callbacks_, addDecodedData(_, true));
EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, router_.decodeData(*body_data, false));
Http::TestRequestTrailerMapImpl trailers{{"some", "trailer"}};
router_.decodeTrailers(trailers);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// 5xx response.
router_.retry_state_->expectHeadersRetry();
Http::ResponseHeaderMapPtr response_headers1(
new Http::TestResponseHeaderMapImpl{{":status", "503"}});
EXPECT_CALL(encoder1.stream_, resetStream(Http::StreamResetReason::LocalReset));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(503));
response_decoder->decodeHeaders(std::move(response_headers1), false);
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// We expect the 5xx response to kick off a new request.
NiceMock<Http::MockRequestEncoder> encoder2;
expectNewStreamWithImmediateEncoder(encoder2, &response_decoder, Http::Protocol::Http10);
ON_CALL(callbacks_, decodingBuffer()).WillByDefault(Return(body_data.get()));
EXPECT_CALL(encoder2, encodeHeaders(_, false));
EXPECT_CALL(encoder2, encodeData(_, false));
EXPECT_CALL(encoder2, encodeTrailers(_));
router_.retry_state_->callback_();
EXPECT_EQ(2U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// Now that we're triggered a retry, we should see the router reject hosts.
EXPECT_TRUE(router_.shouldSelectAnotherHost(host));
// Normal response.
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _)).WillOnce(Return(RetryStatus::No));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->health_checker_, setUnhealthy(_))
.Times(0);
Http::ResponseHeaderMapPtr response_headers2(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
response_decoder->decodeHeaders(std::move(response_headers2), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 1));
}
TEST_F(RouterTest, InternalRedirectRejectedWhenReachingMaxInternalRedirect) {
enableRedirects(3);
setNumPreviousRedirect(3);
sendRequest();
EXPECT_CALL(callbacks_, recreateStream(_)).Times(0);
response_decoder_->decodeHeaders(std::move(redirect_headers_), false);
Buffer::OwnedImpl data("1234567890");
response_decoder_->decodeData(data, true);
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("upstream_internal_redirect_failed_total")
.value());
EXPECT_EQ(1UL,
stats_store_.counter("test.passthrough_internal_redirect_too_many_redirects").value());
}
TEST_F(RouterTest, InternalRedirectRejectedWithEmptyLocation) {
enableRedirects();
sendRequest();
redirect_headers_->setLocation("");
EXPECT_CALL(callbacks_, recreateStream(_)).Times(0);
response_decoder_->decodeHeaders(std::move(redirect_headers_), false);
Buffer::OwnedImpl data("1234567890");
response_decoder_->decodeData(data, true);
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("upstream_internal_redirect_failed_total")
.value());
EXPECT_EQ(1UL, stats_store_.counter("test.passthrough_internal_redirect_bad_location").value());
}
TEST_F(RouterTest, InternalRedirectRejectedWithInvalidLocation) {
enableRedirects();
sendRequest();
redirect_headers_->setLocation("h");
EXPECT_CALL(callbacks_, recreateStream(_)).Times(0);
response_decoder_->decodeHeaders(std::move(redirect_headers_), false);
Buffer::OwnedImpl data("1234567890");
response_decoder_->decodeData(data, true);
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("upstream_internal_redirect_failed_total")
.value());
EXPECT_EQ(1UL, stats_store_.counter("test.passthrough_internal_redirect_bad_location").value());
}
TEST_F(RouterTest, InternalRedirectRejectedWithoutCompleteRequest) {
enableRedirects();
sendRequest(false);
EXPECT_CALL(callbacks_, recreateStream(_)).Times(0);
response_decoder_->decodeHeaders(std::move(redirect_headers_), false);
Buffer::OwnedImpl data("1234567890");
response_decoder_->decodeData(data, true);
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("upstream_internal_redirect_failed_total")
.value());
}
TEST_F(RouterTest, InternalRedirectRejectedWithoutLocation) {
enableRedirects();
sendRequest();
redirect_headers_->removeLocation();
EXPECT_CALL(callbacks_, recreateStream(_)).Times(0);
response_decoder_->decodeHeaders(std::move(redirect_headers_), false);
Buffer::OwnedImpl data("1234567890");
response_decoder_->decodeData(data, true);
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("upstream_internal_redirect_failed_total")
.value());
}
TEST_F(RouterTest, InternalRedirectAcceptedWithRequestBody) {
enableRedirects();
sendRequest(false);
EXPECT_CALL(callbacks_.dispatcher_, createTimer_);
Buffer::InstancePtr body_data(new Buffer::OwnedImpl("random_fake_data"));
EXPECT_CALL(callbacks_, addDecodedData(_, true));
EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, router_.decodeData(*body_data, true));
EXPECT_CALL(callbacks_, clearRouteCache());
EXPECT_CALL(callbacks_, recreateStream(_)).WillOnce(Return(true));
response_decoder_->decodeHeaders(std::move(redirect_headers_), false);
Buffer::OwnedImpl response_data("1234567890");
response_decoder_->decodeData(response_data, false);
EXPECT_EQ(0U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("upstream_internal_redirect_failed_total")
.value());
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("upstream_internal_redirect_succeeded_total")
.value());
// In production, the HCM recreateStream would have called this.
router_.onDestroy();
EXPECT_EQ(1, callbacks_.streamInfo()
.filterState()
->getDataMutable<StreamInfo::UInt32Accessor>("num_internal_redirects")
->value());
}
TEST_F(RouterTest, CrossSchemeRedirectRejectedByPolicy) {
enableRedirects();
sendRequest();
redirect_headers_->setLocation("https://www.foo.com");
EXPECT_CALL(callbacks_, recreateStream(_)).Times(0);
response_decoder_->decodeHeaders(std::move(redirect_headers_), true);
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("upstream_internal_redirect_failed_total")
.value());
EXPECT_EQ(1UL, stats_store_.counter("test.passthrough_internal_redirect_unsafe_scheme").value());
}
TEST_F(RouterTest, InternalRedirectRejectedByPredicate) {
enableRedirects();
sendRequest();
redirect_headers_->setLocation("http://www.foo.com/some/path");
auto mock_predicate = std::make_shared<NiceMock<MockInternalRedirectPredicate>>();
EXPECT_CALL(callbacks_, clearRouteCache());
EXPECT_CALL(callbacks_.route_->route_entry_.internal_redirect_policy_, predicates())
.WillOnce(Return(std::vector<InternalRedirectPredicateSharedPtr>({mock_predicate})));
EXPECT_CALL(*mock_predicate, acceptTargetRoute(_, _, _, _)).WillOnce(Return(false));
ON_CALL(*mock_predicate, name()).WillByDefault(Return("mock_predicate"));
EXPECT_CALL(callbacks_, recreateStream(_)).Times(0);
response_decoder_->decodeHeaders(std::move(redirect_headers_), true);
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("upstream_internal_redirect_failed_total")
.value());
EXPECT_EQ(1UL, stats_store_.counter("test.passthrough_internal_redirect_predicate").value());
// Make sure the original host/path is preserved.
EXPECT_EQ("host", default_request_headers_.getHostValue());
EXPECT_EQ("/", default_request_headers_.getPathValue());
// Make sure x-envoy-original-url is not set for unsuccessful redirect.
EXPECT_EQ(nullptr, default_request_headers_.EnvoyOriginalUrl());
}
TEST_F(RouterTest, HttpInternalRedirectSucceeded) {
enableRedirects(3);
setNumPreviousRedirect(2);
sendRequest();
EXPECT_CALL(callbacks_, clearRouteCache());
EXPECT_CALL(callbacks_, recreateStream(_)).WillOnce(Return(true));
response_decoder_->decodeHeaders(std::move(redirect_headers_), false);
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("upstream_internal_redirect_succeeded_total")
.value());
// In production, the HCM recreateStream would have called this.
router_.onDestroy();
EXPECT_EQ(3, callbacks_.streamInfo()
.filterState()
->getDataMutable<StreamInfo::UInt32Accessor>("num_internal_redirects")
->value());
}
TEST_F(RouterTest, HttpInternalRedirectMatchedToDirectResponseSucceeded) {
NiceMock<MockDirectResponseEntry> direct_response;
std::string route_name("route-test-name");
EXPECT_CALL(direct_response, routeName()).WillOnce(ReturnRef(route_name));
enableRedirects();
sendRequest();
EXPECT_CALL(callbacks_, clearRouteCache()).WillOnce(InvokeWithoutArgs([&]() -> void {
// Direct message route should be matched after internal redirect
EXPECT_CALL(*callbacks_.route_, routeEntry()).WillRepeatedly(Return(nullptr));
EXPECT_CALL(*callbacks_.route_, directResponseEntry()).WillRepeatedly(Return(&direct_response));
}));
EXPECT_CALL(callbacks_, recreateStream(_)).WillOnce(Return(true));
response_decoder_->decodeHeaders(std::move(redirect_headers_), false);
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("upstream_internal_redirect_succeeded_total")
.value());
// In production, the HCM recreateStream would have called this.
router_.onDestroy();
EXPECT_EQ(1, callbacks_.streamInfo()
.filterState()
->getDataMutable<StreamInfo::UInt32Accessor>("num_internal_redirects")
->value());
}
TEST_F(RouterTest, InternalRedirectStripsFragment) {
enableRedirects();
default_request_headers_.setForwardedProto("http");
sendRequest();
EXPECT_CALL(callbacks_, clearRouteCache());
EXPECT_CALL(callbacks_, recreateStream(_)).WillOnce(Return(true));
Http::ResponseHeaderMapPtr redirect_headers{new Http::TestResponseHeaderMapImpl{
{":status", "302"}, {"location", "http://www.foo.com/#fragment"}}};
response_decoder_->decodeHeaders(std::move(redirect_headers), false);
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("upstream_internal_redirect_succeeded_total")
.value());
// In production, the HCM recreateStream would have called this.
router_.onDestroy();
EXPECT_EQ("/", default_request_headers_.getPathValue());
}
TEST_F(RouterTest, InternalRedirectKeepsFragmentWithOveride) {
TestScopedRuntime scoped_runtime;
scoped_runtime.mergeValues(
{{"envoy.reloadable_features.http_reject_path_with_fragment", "false"}});
enableRedirects();
default_request_headers_.setForwardedProto("http");
sendRequest();
EXPECT_CALL(callbacks_, clearRouteCache());
EXPECT_CALL(callbacks_, recreateStream(_)).WillOnce(Return(true));
Http::ResponseHeaderMapPtr redirect_headers{new Http::TestResponseHeaderMapImpl{
{":status", "302"}, {"location", "http://www.foo.com/#fragment"}}};
response_decoder_->decodeHeaders(std::move(redirect_headers), false);
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("upstream_internal_redirect_succeeded_total")
.value());
// In production, the HCM recreateStream would have called this.
router_.onDestroy();
EXPECT_EQ("/#fragment", default_request_headers_.getPathValue());
}
TEST_F(RouterTest, HttpsInternalRedirectSucceeded) {
auto ssl_connection = std::make_shared<Ssl::MockConnectionInfo>();
enableRedirects(3);
setNumPreviousRedirect(1);
default_request_headers_.setScheme("https");
sendRequest();
redirect_headers_->setLocation("https://www.foo.com");
EXPECT_CALL(connection_, ssl()).WillOnce(Return(ssl_connection));
EXPECT_CALL(callbacks_, clearRouteCache());
EXPECT_CALL(callbacks_, recreateStream(_)).WillOnce(Return(true));
response_decoder_->decodeHeaders(std::move(redirect_headers_), false);
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("upstream_internal_redirect_succeeded_total")
.value());
// In production, the HCM recreateStream would have called this.
router_.onDestroy();
}
TEST_F(RouterTest, CrossSchemeRedirectAllowedByPolicy) {
auto ssl_connection = std::make_shared<Ssl::MockConnectionInfo>();
enableRedirects();
default_request_headers_.setScheme("https");
sendRequest();
redirect_headers_->setLocation("http://www.foo.com");
EXPECT_CALL(connection_, ssl()).WillOnce(Return(ssl_connection));
EXPECT_CALL(callbacks_.route_->route_entry_.internal_redirect_policy_,
isCrossSchemeRedirectAllowed())
.WillOnce(Return(true));
EXPECT_CALL(callbacks_, clearRouteCache());
EXPECT_CALL(callbacks_, recreateStream(_)).WillOnce(Return(true));
response_decoder_->decodeHeaders(std::move(redirect_headers_), false);
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("upstream_internal_redirect_succeeded_total")
.value());
// In production, the HCM recreateStream would have called this.
router_.onDestroy();
}
TEST_F(RouterTest, Shadow) {
ShadowPolicyPtr policy = std::make_unique<TestShadowPolicy>("foo", "bar");
callbacks_.route_->route_entry_.shadow_policies_.push_back(std::move(policy));
policy = std::make_unique<TestShadowPolicy>("fizz", "buzz", envoy::type::v3::FractionalPercent(),
false);
callbacks_.route_->route_entry_.shadow_policies_.push_back(std::move(policy));
ON_CALL(callbacks_, streamId()).WillByDefault(Return(43));
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
EXPECT_CALL(runtime_.snapshot_, featureEnabled("bar", 0, 43, 10000)).WillOnce(Return(true));
EXPECT_CALL(runtime_.snapshot_, featureEnabled("buzz", 0, 43, 10000)).WillOnce(Return(true));
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
Buffer::InstancePtr body_data(new Buffer::OwnedImpl("hello"));
EXPECT_CALL(callbacks_, addDecodedData(_, true));
EXPECT_EQ(Http::FilterDataStatus::StopIterationNoBuffer, router_.decodeData(*body_data, false));
Http::TestRequestTrailerMapImpl trailers{{"some", "trailer"}};
EXPECT_CALL(callbacks_, decodingBuffer())
.Times(AtLeast(2))
.WillRepeatedly(Return(body_data.get()));
EXPECT_CALL(*shadow_writer_, shadow_("foo", _, _))
.WillOnce(Invoke([](const std::string&, Http::RequestMessagePtr& request,
const Http::AsyncClient::RequestOptions& options) -> void {
EXPECT_NE(request->body().length(), 0);
EXPECT_NE(nullptr, request->trailers());
EXPECT_EQ(absl::optional<std::chrono::milliseconds>(10), options.timeout);
EXPECT_TRUE(options.sampled_.value());
}));
EXPECT_CALL(*shadow_writer_, shadow_("fizz", _, _))
.WillOnce(Invoke([](const std::string&, Http::RequestMessagePtr& request,
const Http::AsyncClient::RequestOptions& options) -> void {
EXPECT_NE(request->body().length(), 0);
EXPECT_NE(nullptr, request->trailers());
EXPECT_EQ(absl::optional<std::chrono::milliseconds>(10), options.timeout);
EXPECT_FALSE(options.sampled_.value());
}));
router_.decodeTrailers(trailers);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
response_decoder->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 0));
}
TEST_F(RouterTest, AltStatName) {
// Also test no upstream timeout here.
EXPECT_CALL(callbacks_.route_->route_entry_, timeout())
.WillOnce(Return(std::chrono::milliseconds(0)));
EXPECT_CALL(callbacks_.dispatcher_, createTimer_(_)).Times(0);
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder, &response_decoder, Http::Protocol::Http10);
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-alt-stat-name", "alt_stat"},
{"x-envoy-internal", "true"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_, putResponseTime(_));
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"},
{"x-envoy-upstream-canary", "true"},
{"x-envoy-virtual-cluster", "hello"}});
response_decoder->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 0));
EXPECT_EQ(1U,
stats_store_.counter("vhost.fake_vhost.vcluster.fake_virtual_cluster.upstream_rq_200")
.value());
EXPECT_EQ(1U,
cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter("canary.upstream_rq_200")
.value());
EXPECT_EQ(
1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter("alt_stat.upstream_rq_200")
.value());
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("alt_stat.zone.zone_name.to_az.upstream_rq_200")
.value());
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("alt_stat.zone.zone_name.to_az.upstream_rq_200")
.value());
}
TEST_F(RouterTest, Redirect) {
MockDirectResponseEntry direct_response;
std::string route_name("route-test-name");
EXPECT_CALL(direct_response, newPath(_)).WillOnce(Return("hello"));
EXPECT_CALL(direct_response, routeName()).WillOnce(ReturnRef(route_name));
EXPECT_CALL(direct_response, rewritePathHeader(_, _));
EXPECT_CALL(direct_response, responseCode()).WillRepeatedly(Return(Http::Code::MovedPermanently));
EXPECT_CALL(direct_response, responseBody()).WillOnce(ReturnRef(EMPTY_STRING));
EXPECT_CALL(direct_response, finalizeResponseHeaders(_, _));
EXPECT_CALL(*callbacks_.route_, directResponseEntry()).WillRepeatedly(Return(&direct_response));
absl::string_view route_name_view(route_name);
EXPECT_CALL(callbacks_.stream_info_, setRouteName(route_name_view));
Http::TestResponseHeaderMapImpl response_headers{{":status", "301"}, {"location", "hello"}};
EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_FALSE(callbacks_.stream_info_.attemptCount().has_value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
}
TEST_F(RouterTest, RedirectFound) {
MockDirectResponseEntry direct_response;
std::string route_name("route-test-name");
EXPECT_CALL(direct_response, newPath(_)).WillOnce(Return("hello"));
EXPECT_CALL(direct_response, routeName()).WillOnce(ReturnRef(route_name));
EXPECT_CALL(direct_response, rewritePathHeader(_, _));
EXPECT_CALL(direct_response, responseCode()).WillRepeatedly(Return(Http::Code::Found));
EXPECT_CALL(direct_response, responseBody()).WillOnce(ReturnRef(EMPTY_STRING));
EXPECT_CALL(direct_response, finalizeResponseHeaders(_, _));
EXPECT_CALL(*callbacks_.route_, directResponseEntry()).WillRepeatedly(Return(&direct_response));
absl::string_view route_name_view(route_name);
EXPECT_CALL(callbacks_.stream_info_, setRouteName(route_name_view));
Http::TestResponseHeaderMapImpl response_headers{{":status", "302"}, {"location", "hello"}};
EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_FALSE(callbacks_.stream_info_.attemptCount().has_value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
}
TEST_F(RouterTest, DirectResponse) {
NiceMock<MockDirectResponseEntry> direct_response;
std::string route_name("route-test-name");
EXPECT_CALL(direct_response, routeName()).WillOnce(ReturnRef(route_name));
EXPECT_CALL(direct_response, responseCode()).WillRepeatedly(Return(Http::Code::OK));
EXPECT_CALL(direct_response, responseBody()).WillRepeatedly(ReturnRef(EMPTY_STRING));
EXPECT_CALL(*callbacks_.route_, directResponseEntry()).WillRepeatedly(Return(&direct_response));
absl::string_view route_name_view(route_name);
EXPECT_CALL(callbacks_.stream_info_, setRouteName(route_name_view));
Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}};
EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));
EXPECT_CALL(span_, injectContext(_)).Times(0);
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_FALSE(callbacks_.stream_info_.attemptCount().has_value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
EXPECT_EQ(1UL, config_.stats_.rq_direct_response_.value());
}
TEST_F(RouterTest, DirectResponseWithBody) {
NiceMock<MockDirectResponseEntry> direct_response;
std::string route_name("route-test-name");
EXPECT_CALL(direct_response, routeName()).WillOnce(ReturnRef(route_name));
EXPECT_CALL(direct_response, responseCode()).WillRepeatedly(Return(Http::Code::OK));
const std::string response_body("static response");
EXPECT_CALL(direct_response, responseBody()).WillRepeatedly(ReturnRef(response_body));
EXPECT_CALL(*callbacks_.route_, directResponseEntry()).WillRepeatedly(Return(&direct_response));
absl::string_view route_name_view(route_name);
EXPECT_CALL(callbacks_.stream_info_, setRouteName(route_name_view));
Http::TestResponseHeaderMapImpl response_headers{
{":status", "200"}, {"content-length", "15"}, {"content-type", "text/plain"}};
EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));
EXPECT_CALL(callbacks_, encodeData(_, true));
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_FALSE(callbacks_.stream_info_.attemptCount().has_value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
EXPECT_EQ(1UL, config_.stats_.rq_direct_response_.value());
}
TEST_F(RouterTest, DirectResponseWithLocation) {
NiceMock<MockDirectResponseEntry> direct_response;
std::string route_name("route-test-name");
EXPECT_CALL(direct_response, newPath(_)).WillOnce(Return("http://host/"));
EXPECT_CALL(direct_response, routeName()).WillOnce(ReturnRef(route_name));
EXPECT_CALL(direct_response, responseCode()).WillRepeatedly(Return(Http::Code::Created));
EXPECT_CALL(direct_response, responseBody()).WillRepeatedly(ReturnRef(EMPTY_STRING));
EXPECT_CALL(*callbacks_.route_, directResponseEntry()).WillRepeatedly(Return(&direct_response));
absl::string_view route_name_view(route_name);
EXPECT_CALL(callbacks_.stream_info_, setRouteName(route_name_view));
Http::TestResponseHeaderMapImpl response_headers{{":status", "201"},
{"location", "http://host/"}};
EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));
EXPECT_CALL(span_, injectContext(_)).Times(0);
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_FALSE(callbacks_.stream_info_.attemptCount().has_value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
EXPECT_EQ(1UL, config_.stats_.rq_direct_response_.value());
}
TEST_F(RouterTest, DirectResponseWithoutLocation) {
NiceMock<MockDirectResponseEntry> direct_response;
std::string route_name("route-test-name");
EXPECT_CALL(direct_response, newPath(_)).WillOnce(Return("http://host/"));
EXPECT_CALL(direct_response, routeName()).WillOnce(ReturnRef(route_name));
EXPECT_CALL(direct_response, responseCode()).WillRepeatedly(Return(Http::Code::OK));
EXPECT_CALL(direct_response, responseBody()).WillRepeatedly(ReturnRef(EMPTY_STRING));
EXPECT_CALL(*callbacks_.route_, directResponseEntry()).WillRepeatedly(Return(&direct_response));
absl::string_view route_name_view(route_name);
EXPECT_CALL(callbacks_.stream_info_, setRouteName(route_name_view));
Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}};
EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true));
EXPECT_CALL(span_, injectContext(_)).Times(0);
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_FALSE(callbacks_.stream_info_.attemptCount().has_value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
EXPECT_EQ(1UL, config_.stats_.rq_direct_response_.value());
}
// Verifies that we propagate the upstream connection filter state to the upstream and downstream
// request filter state.
TEST_F(RouterTest, PropagatesUpstreamFilterState) {
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
// This pattern helps ensure that we're actually invoking the callback.
bool filter_state_verified = false;
router_.config().upstream_logs_.push_back(
std::make_shared<TestAccessLog>([&](const auto& stream_info) {
filter_state_verified =
stream_info.upstreamInfo()->upstreamFilterState()->hasDataWithName("upstream data");
}));
upstream_stream_info_.filterState()->setData(
"upstream data", std::make_unique<StreamInfo::UInt32AccessorImpl>(123),
StreamInfo::FilterState::StateType::ReadOnly, StreamInfo::FilterState::LifeSpan::Connection);
expectResponseTimerCreate();
expectNewStreamWithImmediateEncoder(encoder, &response_decoder, Http::Protocol::Http10);
Http::TestRequestHeaderMapImpl headers{};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
// NOLINTNEXTLINE: Silence null pointer access warning
response_decoder->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 0));
EXPECT_TRUE(filter_state_verified);
EXPECT_TRUE(callbacks_.streamInfo().upstreamInfo()->upstreamFilterState()->hasDataWithName(
"upstream data"));
}
TEST_F(RouterTest, UpstreamSSLConnection) {
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
std::string session_id = "D62A523A65695219D46FE1FFE285A4C371425ACE421B110B5B8D11D3EB4D5F0B";
auto connection_info = std::make_shared<NiceMock<Ssl::MockConnectionInfo>>();
ON_CALL(*connection_info, sessionId()).WillByDefault(ReturnRef(session_id));
upstream_stream_info_.downstream_connection_info_provider_->setSslConnection(connection_info);
expectResponseTimerCreate();
expectNewStreamWithImmediateEncoder(encoder, &response_decoder, Http::Protocol::Http10);
Http::TestRequestHeaderMapImpl headers{};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
// NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage)
response_decoder->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 0));
ASSERT_NE(nullptr, callbacks_.streamInfo().upstreamInfo()->upstreamSslConnection());
EXPECT_EQ(session_id,
callbacks_.streamInfo().upstreamInfo()->upstreamSslConnection()->sessionId());
EXPECT_FALSE(callbacks_.streamInfo().upstreamInfo()->upstreamConnectionId().has_value());
EXPECT_FALSE(callbacks_.streamInfo().upstreamInfo()->upstreamInterfaceName().has_value());
}
// Verify that upstream timing information is set into the StreamInfo after the upstream
// request completes.
TEST_F(RouterTest, UpstreamTimingSingleRequest) {
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
StreamInfo::StreamInfoImpl stream_info(test_time_.timeSystem(), nullptr);
ON_CALL(callbacks_, streamInfo()).WillByDefault(ReturnRef(stream_info));
EXPECT_EQ(nullptr, stream_info.upstreamInfo());
Http::TestRequestHeaderMapImpl headers{};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
test_time_.advanceTimeWait(std::chrono::milliseconds(32));
Buffer::OwnedImpl data;
router_.decodeData(data, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "503"}});
// NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage)
response_decoder->decodeHeaders(std::move(response_headers), false);
test_time_.advanceTimeWait(std::chrono::milliseconds(43));
// Upstream timing data is now available live.
ASSERT_NE(nullptr, stream_info.upstreamInfo());
auto& upstream_timing = stream_info.upstreamInfo()->upstreamTiming();
EXPECT_TRUE(upstream_timing.first_upstream_tx_byte_sent_.has_value());
EXPECT_TRUE(upstream_timing.last_upstream_tx_byte_sent_.has_value());
EXPECT_TRUE(upstream_timing.first_upstream_rx_byte_received_.has_value());
EXPECT_FALSE(upstream_timing.last_upstream_rx_byte_received_.has_value());
response_decoder->decodeData(data, true);
// Now all these should be set.
EXPECT_TRUE(upstream_timing.first_upstream_tx_byte_sent_.has_value());
EXPECT_TRUE(upstream_timing.last_upstream_tx_byte_sent_.has_value());
EXPECT_TRUE(upstream_timing.first_upstream_rx_byte_received_.has_value());
EXPECT_TRUE(upstream_timing.last_upstream_rx_byte_received_.has_value());
// Timings should match our sleep() calls.
EXPECT_EQ(upstream_timing.last_upstream_rx_byte_received_.value() -
upstream_timing.first_upstream_rx_byte_received_.value(),
std::chrono::milliseconds(43));
EXPECT_EQ(upstream_timing.last_upstream_tx_byte_sent_.value() -
upstream_timing.first_upstream_tx_byte_sent_.value(),
std::chrono::milliseconds(32));
}
// Verify that upstream timing information is set into the StreamInfo when a
// retry occurs (and not before).
TEST_F(RouterTest, UpstreamTimingRetry) {
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
StreamInfo::StreamInfoImpl stream_info(test_time_, nullptr);
ON_CALL(callbacks_, streamInfo()).WillByDefault(ReturnRef(stream_info));
// Check that upstream timing is updated after the first request.
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
router_.retry_state_->expectHeadersRetry();
test_time_.advanceTimeWait(std::chrono::milliseconds(32));
Buffer::OwnedImpl data;
router_.decodeData(data, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
test_time_.advanceTimeWait(std::chrono::milliseconds(43));
expectNewStreamWithImmediateEncoder(encoder, &response_decoder, Http::Protocol::Http10);
Http::ResponseHeaderMapPtr bad_response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "503"}});
// NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage)
response_decoder->decodeHeaders(std::move(bad_response_headers), true);
router_.retry_state_->callback_();
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _)).WillOnce(Return(RetryStatus::No));
MonotonicTime retry_time = test_time_.monotonicTime();
Http::ResponseHeaderMapPtr good_response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
response_decoder->decodeHeaders(std::move(good_response_headers), false);
test_time_.advanceTimeWait(std::chrono::milliseconds(153));
response_decoder->decodeData(data, true);
auto& upstream_timing = stream_info.upstreamInfo()->upstreamTiming();
EXPECT_TRUE(upstream_timing.first_upstream_tx_byte_sent_.has_value());
EXPECT_TRUE(upstream_timing.last_upstream_tx_byte_sent_.has_value());
EXPECT_TRUE(upstream_timing.first_upstream_rx_byte_received_.has_value());
EXPECT_TRUE(upstream_timing.last_upstream_rx_byte_received_.has_value());
EXPECT_EQ(upstream_timing.last_upstream_rx_byte_received_.value() -
upstream_timing.first_upstream_rx_byte_received_.value(),
std::chrono::milliseconds(153));
// Time spent in upstream tx is 0 because we're using simulated time and
// don't have a good way to insert a "sleep" there, but values being present
// and equal to the time the retry was sent is good enough of a test.
StreamInfo::TimingUtility timing(stream_info);
EXPECT_EQ(timing.lastUpstreamTxByteSent().value() - timing.firstUpstreamTxByteSent().value(),
std::chrono::milliseconds(0));
EXPECT_EQ(timing.lastUpstreamTxByteSent().value() +
stream_info.startTimeMonotonic().time_since_epoch(),
retry_time.time_since_epoch());
EXPECT_EQ(timing.firstUpstreamTxByteSent().value() +
stream_info.startTimeMonotonic().time_since_epoch(),
retry_time.time_since_epoch());
}
// Verify that upstream timing information is set into the StreamInfo when a
// global timeout occurs.
TEST_F(RouterTest, UpstreamTimingTimeout) {
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder, &response_decoder, Http::Protocol::Http10);
StreamInfo::StreamInfoImpl stream_info(test_time_, nullptr);
ON_CALL(callbacks_, streamInfo()).WillByDefault(ReturnRef(stream_info));
expectResponseTimerCreate();
test_time_.advanceTimeWait(std::chrono::milliseconds(10));
// Check that upstream timing is updated after the first request.
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-timeout-ms", "50"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
auto& upstream_timing = stream_info.upstreamInfo()->upstreamTiming();
EXPECT_FALSE(upstream_timing.last_upstream_rx_byte_received_.has_value());
test_time_.advanceTimeWait(std::chrono::milliseconds(13));
Buffer::OwnedImpl data;
router_.decodeData(data, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
test_time_.advanceTimeWait(std::chrono::milliseconds(33));
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
// NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage)
response_decoder->decodeHeaders(std::move(response_headers), false);
test_time_.advanceTimeWait(std::chrono::milliseconds(99));
response_timeout_->invokeCallback();
EXPECT_TRUE(upstream_timing.first_upstream_tx_byte_sent_.has_value());
EXPECT_TRUE(upstream_timing.last_upstream_tx_byte_sent_.has_value());
EXPECT_TRUE(upstream_timing.first_upstream_rx_byte_received_.has_value());
// False because no end_stream was seen.
EXPECT_FALSE(upstream_timing.last_upstream_rx_byte_received_.has_value());
StreamInfo::TimingUtility timing(stream_info);
EXPECT_EQ(timing.firstUpstreamTxByteSent().value(), std::chrono::milliseconds(10));
EXPECT_EQ(timing.lastUpstreamTxByteSent().value(), std::chrono::milliseconds(23));
EXPECT_EQ(timing.firstUpstreamRxByteReceived().value(), std::chrono::milliseconds(56));
}
TEST(RouterFilterUtilityTest, FinalHedgingParamsHedgeOnPerTryTimeout) {
Http::TestRequestHeaderMapImpl empty_headers;
{ // route says true, header not present, expect true.
NiceMock<MockRouteEntry> route;
route.hedge_policy_.hedge_on_per_try_timeout_ = true;
EXPECT_CALL(route, hedgePolicy).WillRepeatedly(ReturnRef(route.hedge_policy_));
FilterUtility::HedgingParams hedgingParams =
FilterUtility::finalHedgingParams(route, empty_headers);
EXPECT_TRUE(hedgingParams.hedge_on_per_try_timeout_);
}
{ // route says false, header not present, expect false.
NiceMock<MockRouteEntry> route;
route.hedge_policy_.hedge_on_per_try_timeout_ = false;
EXPECT_CALL(route, hedgePolicy).WillRepeatedly(ReturnRef(route.hedge_policy_));
FilterUtility::HedgingParams hedgingParams =
FilterUtility::finalHedgingParams(route, empty_headers);
EXPECT_FALSE(hedgingParams.hedge_on_per_try_timeout_);
}
{ // route says false, header says true, expect true.
Http::TestRequestHeaderMapImpl headers{{"x-envoy-hedge-on-per-try-timeout", "true"}};
NiceMock<MockRouteEntry> route;
route.hedge_policy_.hedge_on_per_try_timeout_ = false;
EXPECT_CALL(route, hedgePolicy).WillRepeatedly(ReturnRef(route.hedge_policy_));
FilterUtility::HedgingParams hedgingParams = FilterUtility::finalHedgingParams(route, headers);
EXPECT_TRUE(hedgingParams.hedge_on_per_try_timeout_);
}
{ // route says false, header says false, expect false.
Http::TestRequestHeaderMapImpl headers{{"x-envoy-hedge-on-per-try-timeout", "false"}};
NiceMock<MockRouteEntry> route;
route.hedge_policy_.hedge_on_per_try_timeout_ = false;
EXPECT_CALL(route, hedgePolicy).WillRepeatedly(ReturnRef(route.hedge_policy_));
FilterUtility::HedgingParams hedgingParams = FilterUtility::finalHedgingParams(route, headers);
EXPECT_FALSE(hedgingParams.hedge_on_per_try_timeout_);
}
{ // route says true, header says false, expect false.
Http::TestRequestHeaderMapImpl headers{{"x-envoy-hedge-on-per-try-timeout", "false"}};
NiceMock<MockRouteEntry> route;
route.hedge_policy_.hedge_on_per_try_timeout_ = true;
EXPECT_CALL(route, hedgePolicy).WillRepeatedly(ReturnRef(route.hedge_policy_));
FilterUtility::HedgingParams hedgingParams = FilterUtility::finalHedgingParams(route, headers);
EXPECT_FALSE(hedgingParams.hedge_on_per_try_timeout_);
}
{ // route says true, header says true, expect true.
Http::TestRequestHeaderMapImpl headers{{"x-envoy-hedge-on-per-try-timeout", "true"}};
NiceMock<MockRouteEntry> route;
route.hedge_policy_.hedge_on_per_try_timeout_ = true;
EXPECT_CALL(route, hedgePolicy).WillRepeatedly(ReturnRef(route.hedge_policy_));
FilterUtility::HedgingParams hedgingParams = FilterUtility::finalHedgingParams(route, headers);
EXPECT_TRUE(hedgingParams.hedge_on_per_try_timeout_);
}
{ // route says true, header is invalid, expect true.
Http::TestRequestHeaderMapImpl headers{{"x-envoy-hedge-on-per-try-timeout", "bad"}};
NiceMock<MockRouteEntry> route;
route.hedge_policy_.hedge_on_per_try_timeout_ = true;
EXPECT_CALL(route, hedgePolicy).WillRepeatedly(ReturnRef(route.hedge_policy_));
FilterUtility::HedgingParams hedgingParams = FilterUtility::finalHedgingParams(route, headers);
EXPECT_TRUE(hedgingParams.hedge_on_per_try_timeout_);
}
{ // route says false, header is invalid, expect false.
Http::TestRequestHeaderMapImpl headers{{"x-envoy-hedge-on-per-try-timeout", "bad"}};
NiceMock<MockRouteEntry> route;
route.hedge_policy_.hedge_on_per_try_timeout_ = false;
EXPECT_CALL(route, hedgePolicy).WillRepeatedly(ReturnRef(route.hedge_policy_));
FilterUtility::HedgingParams hedgingParams = FilterUtility::finalHedgingParams(route, headers);
EXPECT_FALSE(hedgingParams.hedge_on_per_try_timeout_);
}
}
TEST(RouterFilterUtilityTest, FinalTimeout) {
{
NiceMock<MockRouteEntry> route;
EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(10)));
Http::TestRequestHeaderMapImpl headers;
FilterUtility::TimeoutData timeout =
FilterUtility::finalTimeout(route, headers, true, false, false, false);
EXPECT_EQ(std::chrono::milliseconds(10), timeout.global_timeout_);
EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);
}
{
NiceMock<MockRouteEntry> route;
EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(10)));
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-timeout-ms", "15"}};
FilterUtility::TimeoutData timeout =
FilterUtility::finalTimeout(route, headers, true, false, false, false);
EXPECT_EQ(std::chrono::milliseconds(15), timeout.global_timeout_);
EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-timeout-ms"));
EXPECT_EQ("15", headers.get_("x-envoy-expected-rq-timeout-ms"));
EXPECT_FALSE(headers.has("grpc-timeout"));
}
{
NiceMock<MockRouteEntry> route;
EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(10)));
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-timeout-ms", "bad"}};
FilterUtility::TimeoutData timeout =
FilterUtility::finalTimeout(route, headers, true, false, false, false);
EXPECT_EQ(std::chrono::milliseconds(10), timeout.global_timeout_);
EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-timeout-ms"));
EXPECT_EQ("10", headers.get_("x-envoy-expected-rq-timeout-ms"));
EXPECT_FALSE(headers.has("grpc-timeout"));
}
{
NiceMock<MockRouteEntry> route;
EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(10)));
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-timeout-ms", "15"},
{"x-envoy-upstream-rq-per-try-timeout-ms", "15"}};
FilterUtility::TimeoutData timeout =
FilterUtility::finalTimeout(route, headers, true, false, false, false);
EXPECT_EQ(std::chrono::milliseconds(15), timeout.global_timeout_);
EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-timeout-ms"));
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-per-try-timeout-ms"));
EXPECT_EQ("15", headers.get_("x-envoy-expected-rq-timeout-ms"));
EXPECT_FALSE(headers.has("grpc-timeout"));
}
{
NiceMock<MockRouteEntry> route;
EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(10)));
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-timeout-ms", "15"},
{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}};
FilterUtility::TimeoutData timeout =
FilterUtility::finalTimeout(route, headers, true, false, false, false);
EXPECT_EQ(std::chrono::milliseconds(15), timeout.global_timeout_);
EXPECT_EQ(std::chrono::milliseconds(5), timeout.per_try_timeout_);
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-timeout-ms"));
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-per-try-timeout-ms"));
EXPECT_EQ("5", headers.get_("x-envoy-expected-rq-timeout-ms"));
EXPECT_FALSE(headers.has("grpc-timeout"));
}
{
NiceMock<MockRouteEntry> route;
EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(10)));
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-timeout-ms", "15"},
{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}};
FilterUtility::TimeoutData timeout =
FilterUtility::finalTimeout(route, headers, true, false, true, false);
EXPECT_EQ(std::chrono::milliseconds(15), timeout.global_timeout_);
EXPECT_EQ(std::chrono::milliseconds(5), timeout.per_try_timeout_);
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-timeout-ms"));
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-per-try-timeout-ms"));
EXPECT_EQ("15", headers.get_("x-envoy-expected-rq-timeout-ms"));
EXPECT_FALSE(headers.has("grpc-timeout"));
}
{
NiceMock<MockRouteEntry> route;
EXPECT_CALL(route, maxGrpcTimeout())
.WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(10)));
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-timeout-ms", "15"},
{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}};
FilterUtility::TimeoutData timeout =
FilterUtility::finalTimeout(route, headers, true, true, true, false);
EXPECT_EQ(std::chrono::milliseconds(15), timeout.global_timeout_);
EXPECT_EQ(std::chrono::milliseconds(5), timeout.per_try_timeout_);
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-timeout-ms"));
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-per-try-timeout-ms"));
EXPECT_EQ("15", headers.get_("x-envoy-expected-rq-timeout-ms"));
EXPECT_EQ("15m", headers.get_("grpc-timeout"));
}
{
NiceMock<MockRouteEntry> route;
route.retry_policy_.per_try_timeout_ = std::chrono::milliseconds(7);
EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(10)));
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-timeout-ms", "15"}};
FilterUtility::TimeoutData timeout =
FilterUtility::finalTimeout(route, headers, true, false, false, false);
EXPECT_EQ(std::chrono::milliseconds(15), timeout.global_timeout_);
EXPECT_EQ(std::chrono::milliseconds(7), timeout.per_try_timeout_);
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-timeout-ms"));
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-per-try-timeout-ms"));
EXPECT_EQ("7", headers.get_("x-envoy-expected-rq-timeout-ms"));
EXPECT_FALSE(headers.has("grpc-timeout"));
}
{
NiceMock<MockRouteEntry> route;
route.retry_policy_.per_try_timeout_ = std::chrono::milliseconds(10);
EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(0)));
Http::TestRequestHeaderMapImpl headers;
FilterUtility::TimeoutData timeout =
FilterUtility::finalTimeout(route, headers, true, false, false, false);
EXPECT_EQ(std::chrono::milliseconds(0), timeout.global_timeout_);
EXPECT_EQ(std::chrono::milliseconds(10), timeout.per_try_timeout_);
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-timeout-ms"));
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-per-try-timeout-ms"));
EXPECT_EQ("10", headers.get_("x-envoy-expected-rq-timeout-ms"));
EXPECT_FALSE(headers.has("grpc-timeout"));
}
{
NiceMock<MockRouteEntry> route;
route.retry_policy_.per_try_timeout_ = std::chrono::milliseconds(7);
EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(10)));
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-timeout-ms", "15"},
{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}};
FilterUtility::TimeoutData timeout =
FilterUtility::finalTimeout(route, headers, true, false, false, false);
EXPECT_EQ(std::chrono::milliseconds(15), timeout.global_timeout_);
EXPECT_EQ(std::chrono::milliseconds(5), timeout.per_try_timeout_);
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-timeout-ms"));
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-per-try-timeout-ms"));
EXPECT_EQ("5", headers.get_("x-envoy-expected-rq-timeout-ms"));
EXPECT_FALSE(headers.has("grpc-timeout"));
}
{
NiceMock<MockRouteEntry> route;
EXPECT_CALL(route, maxGrpcTimeout())
.WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(0)));
Http::TestRequestHeaderMapImpl headers{{"content-type", "application/grpc"}};
FilterUtility::TimeoutData timeout =
FilterUtility::finalTimeout(route, headers, true, true, false, false);
EXPECT_EQ(std::chrono::milliseconds(0), timeout.global_timeout_);
EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);
EXPECT_FALSE(headers.has("grpc-timeout"));
}
{
NiceMock<MockRouteEntry> route;
EXPECT_CALL(route, maxGrpcTimeout()).WillRepeatedly(Return(absl::nullopt));
EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(10)));
Http::TestRequestHeaderMapImpl headers{{"content-type", "application/grpc"}};
FilterUtility::TimeoutData timeout =
FilterUtility::finalTimeout(route, headers, true, true, false, false);
EXPECT_EQ(std::chrono::milliseconds(10), timeout.global_timeout_);
EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);
EXPECT_FALSE(headers.has("grpc-timeout"));
}
{
NiceMock<MockRouteEntry> route;
EXPECT_CALL(route, maxGrpcTimeout())
.WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(0)));
Http::TestRequestHeaderMapImpl headers{{"content-type", "application/grpc"},
{"grpc-timeout", "1000m"}};
FilterUtility::TimeoutData timeout =
FilterUtility::finalTimeout(route, headers, true, true, false, false);
EXPECT_EQ(std::chrono::milliseconds(1000), timeout.global_timeout_);
EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);
EXPECT_EQ("1000m", headers.get_("grpc-timeout"));
}
{
NiceMock<MockRouteEntry> route;
EXPECT_CALL(route, maxGrpcTimeout())
.WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(999)));
Http::TestRequestHeaderMapImpl headers{{"content-type", "application/grpc"},
{"grpc-timeout", "1000m"}};
FilterUtility::TimeoutData timeout =
FilterUtility::finalTimeout(route, headers, true, true, false, false);
EXPECT_EQ(std::chrono::milliseconds(999), timeout.global_timeout_);
EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);
EXPECT_EQ("999m", headers.get_("grpc-timeout"));
}
{
NiceMock<MockRouteEntry> route;
EXPECT_CALL(route, maxGrpcTimeout())
.WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(999)));
Http::TestRequestHeaderMapImpl headers{{"content-type", "application/grpc"},
{"grpc-timeout", "0m"}};
FilterUtility::TimeoutData timeout =
FilterUtility::finalTimeout(route, headers, true, true, false, false);
EXPECT_EQ(std::chrono::milliseconds(999), timeout.global_timeout_);
EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);
EXPECT_EQ("999m", headers.get_("grpc-timeout"));
}
{
NiceMock<MockRouteEntry> route;
EXPECT_CALL(route, maxGrpcTimeout())
.WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(999)));
EXPECT_CALL(route, grpcTimeoutOffset())
.WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(10)));
Http::TestRequestHeaderMapImpl headers{{"content-type", "application/grpc"},
{"grpc-timeout", "100m"}};
FilterUtility::TimeoutData timeout =
FilterUtility::finalTimeout(route, headers, true, true, false, false);
EXPECT_EQ(std::chrono::milliseconds(90), timeout.global_timeout_);
EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);
}
{
NiceMock<MockRouteEntry> route;
EXPECT_CALL(route, maxGrpcTimeout())
.WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(999)));
EXPECT_CALL(route, grpcTimeoutOffset())
.WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(10)));
Http::TestRequestHeaderMapImpl headers{{"content-type", "application/grpc"},
{"grpc-timeout", "1m"}};
FilterUtility::TimeoutData timeout =
FilterUtility::finalTimeout(route, headers, true, true, false, false);
EXPECT_EQ(std::chrono::milliseconds(1), timeout.global_timeout_);
EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);
}
{
NiceMock<MockRouteEntry> route;
EXPECT_CALL(route, maxGrpcTimeout())
.WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(0)));
Http::TestRequestHeaderMapImpl headers{{"content-type", "application/grpc"},
{"grpc-timeout", "1000m"},
{"x-envoy-upstream-rq-timeout-ms", "15"}};
FilterUtility::TimeoutData timeout =
FilterUtility::finalTimeout(route, headers, true, true, false, false);
EXPECT_EQ(std::chrono::milliseconds(15), timeout.global_timeout_);
EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-timeout-ms"));
EXPECT_EQ("15", headers.get_("x-envoy-expected-rq-timeout-ms"));
EXPECT_EQ("15m", headers.get_("grpc-timeout"));
}
{
NiceMock<MockRouteEntry> route;
EXPECT_CALL(route, maxGrpcTimeout())
.WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(0)));
Http::TestRequestHeaderMapImpl headers{{"content-type", "application/grpc"},
{"grpc-timeout", "1000m"},
{"x-envoy-upstream-rq-timeout-ms", "bad"}};
FilterUtility::TimeoutData timeout =
FilterUtility::finalTimeout(route, headers, true, true, false, false);
EXPECT_EQ(std::chrono::milliseconds(1000), timeout.global_timeout_);
EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-timeout-ms"));
EXPECT_EQ("1000", headers.get_("x-envoy-expected-rq-timeout-ms"));
EXPECT_EQ("1000m", headers.get_("grpc-timeout"));
}
{
NiceMock<MockRouteEntry> route;
EXPECT_CALL(route, maxGrpcTimeout())
.WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(0)));
Http::TestRequestHeaderMapImpl headers{{"content-type", "application/grpc"},
{"grpc-timeout", "1000m"},
{"x-envoy-upstream-rq-timeout-ms", "15"},
{"x-envoy-upstream-rq-per-try-timeout-ms", "15"}};
FilterUtility::TimeoutData timeout =
FilterUtility::finalTimeout(route, headers, true, true, false, false);
EXPECT_EQ(std::chrono::milliseconds(15), timeout.global_timeout_);
EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-timeout-ms"));
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-per-try-timeout-ms"));
EXPECT_EQ("15", headers.get_("x-envoy-expected-rq-timeout-ms"));
EXPECT_EQ("15m", headers.get_("grpc-timeout"));
}
{
NiceMock<MockRouteEntry> route;
EXPECT_CALL(route, maxGrpcTimeout())
.WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(0)));
Http::TestRequestHeaderMapImpl headers{{"content-type", "application/grpc"},
{"grpc-timeout", "1000m"},
{"x-envoy-upstream-rq-timeout-ms", "15"},
{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}};
FilterUtility::TimeoutData timeout =
FilterUtility::finalTimeout(route, headers, true, true, false, false);
EXPECT_EQ(std::chrono::milliseconds(15), timeout.global_timeout_);
EXPECT_EQ(std::chrono::milliseconds(5), timeout.per_try_timeout_);
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-timeout-ms"));
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-per-try-timeout-ms"));
EXPECT_EQ("5", headers.get_("x-envoy-expected-rq-timeout-ms"));
EXPECT_EQ("5m", headers.get_("grpc-timeout"));
}
{
NiceMock<MockRouteEntry> route;
EXPECT_CALL(route, maxGrpcTimeout())
.WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(0)));
route.retry_policy_.per_try_timeout_ = std::chrono::milliseconds(7);
Http::TestRequestHeaderMapImpl headers{{"content-type", "application/grpc"},
{"grpc-timeout", "1000m"},
{"x-envoy-upstream-rq-timeout-ms", "15"}};
FilterUtility::TimeoutData timeout =
FilterUtility::finalTimeout(route, headers, true, true, false, false);
EXPECT_EQ(std::chrono::milliseconds(15), timeout.global_timeout_);
EXPECT_EQ(std::chrono::milliseconds(7), timeout.per_try_timeout_);
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-timeout-ms"));
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-per-try-timeout-ms"));
EXPECT_EQ("7", headers.get_("x-envoy-expected-rq-timeout-ms"));
EXPECT_EQ("7m", headers.get_("grpc-timeout"));
}
{
NiceMock<MockRouteEntry> route;
EXPECT_CALL(route, maxGrpcTimeout())
.WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(0)));
route.retry_policy_.per_try_timeout_ = std::chrono::milliseconds(7);
Http::TestRequestHeaderMapImpl headers{{"content-type", "application/grpc"},
{"grpc-timeout", "1000m"},
{"x-envoy-upstream-rq-timeout-ms", "15"},
{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}};
FilterUtility::TimeoutData timeout =
FilterUtility::finalTimeout(route, headers, true, true, false, false);
EXPECT_EQ(std::chrono::milliseconds(15), timeout.global_timeout_);
EXPECT_EQ(std::chrono::milliseconds(5), timeout.per_try_timeout_);
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-timeout-ms"));
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-per-try-timeout-ms"));
EXPECT_EQ("5", headers.get_("x-envoy-expected-rq-timeout-ms"));
EXPECT_EQ("5m", headers.get_("grpc-timeout"));
}
{
NiceMock<MockRouteEntry> route;
EXPECT_CALL(route, maxGrpcTimeout())
.WillRepeatedly(Return(absl::optional<std::chrono::milliseconds>(10000)));
Http::TestRequestHeaderMapImpl headers{{"content-type", "application/grpc"},
{"grpc-timeout", "6666666666666H"}};
FilterUtility::finalTimeout(route, headers, true, true, false, false);
EXPECT_EQ("10000", headers.get_("x-envoy-expected-rq-timeout-ms"));
EXPECT_EQ("10000m", headers.get_("grpc-timeout"));
}
{
NiceMock<MockRouteEntry> route;
EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(10)));
Http::TestRequestHeaderMapImpl headers{{"x-envoy-expected-rq-timeout-ms", "8"}};
// Make ingress envoy respect `x-envoy-expected-rq-timeout-ms` header.
bool respect_expected_rq_timeout = true;
FilterUtility::TimeoutData timeout = FilterUtility::finalTimeout(
route, headers, true, false, false, respect_expected_rq_timeout);
EXPECT_EQ(std::chrono::milliseconds(8), timeout.global_timeout_);
EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);
EXPECT_EQ("8", headers.get_("x-envoy-expected-rq-timeout-ms"));
EXPECT_FALSE(headers.has("grpc-timeout"));
}
{
NiceMock<MockRouteEntry> route;
EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(10)));
Http::TestRequestHeaderMapImpl headers{{"x-envoy-expected-rq-timeout-ms", "8"},
{"x-envoy-upstream-rq-per-try-timeout-ms", "4"}};
// Make ingress envoy respect `x-envoy-expected-rq-timeout-ms` header.
bool respect_expected_rq_timeout = true;
FilterUtility::TimeoutData timeout = FilterUtility::finalTimeout(
route, headers, true, false, false, respect_expected_rq_timeout);
EXPECT_EQ(std::chrono::milliseconds(8), timeout.global_timeout_);
EXPECT_EQ(std::chrono::milliseconds(4), timeout.per_try_timeout_);
EXPECT_EQ("4", headers.get_("x-envoy-expected-rq-timeout-ms"));
EXPECT_FALSE(headers.has("grpc-timeout"));
}
{
NiceMock<MockRouteEntry> route;
EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(10)));
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-timeout-ms", "8"}};
// Test that ingress envoy populates `x-envoy-expected-rq-timeout-ms` header if it has not been
// set by egress envoy.
bool respect_expected_rq_timeout = true;
FilterUtility::TimeoutData timeout = FilterUtility::finalTimeout(
route, headers, true, false, false, respect_expected_rq_timeout);
EXPECT_EQ(std::chrono::milliseconds(8), timeout.global_timeout_);
EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-timeout-ms"));
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-per-try-timeout-ms"));
EXPECT_EQ("8", headers.get_("x-envoy-expected-rq-timeout-ms"));
EXPECT_FALSE(headers.has("grpc-timeout"));
}
{
NiceMock<MockRouteEntry> route;
EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(10)));
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-timeout-ms", "8"}};
// Make envoy override `x-envoy-expected-rq-timeout-ms` header.
// Test that ingress envoy sets `x-envoy-expected-rq-timeout-ms` header.
bool respect_expected_rq_timeout = false;
FilterUtility::TimeoutData timeout = FilterUtility::finalTimeout(
route, headers, true, false, false, respect_expected_rq_timeout);
EXPECT_EQ(std::chrono::milliseconds(8), timeout.global_timeout_);
EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-timeout-ms"));
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-per-try-timeout-ms"));
EXPECT_EQ("8", headers.get_("x-envoy-expected-rq-timeout-ms"));
EXPECT_FALSE(headers.has("grpc-timeout"));
}
}
TEST(RouterFilterUtilityTest, FinalTimeoutSupressEnvoyHeaders) {
{
NiceMock<MockRouteEntry> route;
EXPECT_CALL(route, timeout()).WillOnce(Return(std::chrono::milliseconds(10)));
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-rq-timeout-ms", "15"}};
FilterUtility::TimeoutData timeout =
FilterUtility::finalTimeout(route, headers, true, false, false, false);
EXPECT_EQ(std::chrono::milliseconds(15), timeout.global_timeout_);
EXPECT_EQ(std::chrono::milliseconds(0), timeout.per_try_timeout_);
EXPECT_FALSE(headers.has("x-envoy-upstream-rq-timeout-ms"));
}
}
TEST(RouterFilterUtilityTest, SetUpstreamScheme) {
TestScopedRuntime scoped_runtime;
// With no scheme and x-forwarded-proto, set scheme based on encryption level
{
Http::TestRequestHeaderMapImpl headers;
FilterUtility::setUpstreamScheme(headers, false);
EXPECT_EQ("http", headers.get_(":scheme"));
}
{
Http::TestRequestHeaderMapImpl headers;
FilterUtility::setUpstreamScheme(headers, true);
EXPECT_EQ("https", headers.get_(":scheme"));
}
// With invalid x-forwarded-proto, still use scheme.
{
Http::TestRequestHeaderMapImpl headers;
headers.setForwardedProto("foo");
FilterUtility::setUpstreamScheme(headers, true);
EXPECT_EQ("https", headers.get_(":scheme"));
}
// Use valid x-forwarded-proto.
{
Http::TestRequestHeaderMapImpl headers;
headers.setForwardedProto(Http::Headers::get().SchemeValues.Http);
FilterUtility::setUpstreamScheme(headers, true);
EXPECT_EQ("http", headers.get_(":scheme"));
}
// Trust scheme over x-forwarded-proto.
{
Http::TestRequestHeaderMapImpl headers;
headers.setScheme(Http::Headers::get().SchemeValues.Https);
headers.setForwardedProto(Http::Headers::get().SchemeValues.Http);
FilterUtility::setUpstreamScheme(headers, false);
EXPECT_EQ("https", headers.get_(":scheme"));
}
}
TEST(RouterFilterUtilityTest, ShouldShadow) {
{
TestShadowPolicy policy;
NiceMock<Runtime::MockLoader> runtime;
EXPECT_CALL(runtime.snapshot_, featureEnabled(_, _, _, _)).Times(0);
EXPECT_FALSE(FilterUtility::shouldShadow(policy, runtime, 5));
}
{
TestShadowPolicy policy("cluster");
NiceMock<Runtime::MockLoader> runtime;
EXPECT_CALL(runtime.snapshot_, featureEnabled(_, _, _, _)).Times(0);
EXPECT_TRUE(FilterUtility::shouldShadow(policy, runtime, 5));
}
{
TestShadowPolicy policy("cluster", "foo");
NiceMock<Runtime::MockLoader> runtime;
EXPECT_CALL(runtime.snapshot_, featureEnabled("foo", 0, 5, 10000)).WillOnce(Return(false));
EXPECT_FALSE(FilterUtility::shouldShadow(policy, runtime, 5));
}
{
TestShadowPolicy policy("cluster", "foo");
NiceMock<Runtime::MockLoader> runtime;
EXPECT_CALL(runtime.snapshot_, featureEnabled("foo", 0, 5, 10000)).WillOnce(Return(true));
EXPECT_TRUE(FilterUtility::shouldShadow(policy, runtime, 5));
}
// Use default value instead of runtime key.
{
envoy::type::v3::FractionalPercent fractional_percent;
fractional_percent.set_numerator(5);
fractional_percent.set_denominator(envoy::type::v3::FractionalPercent::TEN_THOUSAND);
TestShadowPolicy policy("cluster", "foo", fractional_percent);
NiceMock<Runtime::MockLoader> runtime;
EXPECT_CALL(
runtime.snapshot_,
featureEnabled("foo", testing::Matcher<const envoy::type::v3::FractionalPercent&>(_), 3))
.WillOnce(Return(true));
EXPECT_TRUE(FilterUtility::shouldShadow(policy, runtime, 3));
}
}
TEST_F(RouterTest, CanaryStatusTrue) {
EXPECT_CALL(callbacks_.route_->route_entry_, timeout())
.WillOnce(Return(std::chrono::milliseconds(0)));
EXPECT_CALL(callbacks_.dispatcher_, createTimer_(_)).Times(0);
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder, &response_decoder, Http::Protocol::Http10);
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-alt-stat-name", "alt_stat"},
{"x-envoy-internal", "true"}};
HttpTestUtility::addDefaultHeaders(headers);
const absl::optional<std::string> virtual_cluster_name =
absl::optional<std::string>("fake_virtual_cluster");
EXPECT_CALL(callbacks_.stream_info_, setVirtualClusterName(virtual_cluster_name));
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"},
{"x-envoy-upstream-canary", "false"},
{"x-envoy-virtual-cluster", "hello"}});
ON_CALL(*cm_.thread_local_cluster_.conn_pool_.host_, canary()).WillByDefault(Return(true));
response_decoder->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 0));
EXPECT_EQ(1U,
cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter("canary.upstream_rq_200")
.value());
}
TEST_F(RouterTest, CanaryStatusFalse) {
EXPECT_CALL(callbacks_.route_->route_entry_, timeout())
.WillOnce(Return(std::chrono::milliseconds(0)));
EXPECT_CALL(callbacks_.dispatcher_, createTimer_(_)).Times(0);
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder, &response_decoder, Http::Protocol::Http10);
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-alt-stat-name", "alt_stat"},
{"x-envoy-internal", "true"}};
HttpTestUtility::addDefaultHeaders(headers);
const absl::optional<std::string> virtual_cluster_name =
absl::optional<std::string>("fake_virtual_cluster");
EXPECT_CALL(callbacks_.stream_info_, setVirtualClusterName(virtual_cluster_name));
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"},
{"x-envoy-upstream-canary", "false"},
{"x-envoy-virtual-cluster", "hello"}});
response_decoder->decodeHeaders(std::move(response_headers), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 0));
EXPECT_EQ(0U,
cm_.thread_local_cluster_.cluster_.info_->stats_store_.counter("canary.upstream_rq_200")
.value());
}
TEST_F(RouterTest, AutoHostRewriteEnabled) {
NiceMock<Http::MockRequestEncoder> encoder;
std::string req_host{"foo.bar.com"};
Http::TestRequestHeaderMapImpl incoming_headers;
HttpTestUtility::addDefaultHeaders(incoming_headers);
incoming_headers.setHost(req_host);
cm_.thread_local_cluster_.conn_pool_.host_->hostname_ = "scooby.doo";
Http::TestRequestHeaderMapImpl outgoing_headers;
HttpTestUtility::addDefaultHeaders(outgoing_headers);
outgoing_headers.setHost(cm_.thread_local_cluster_.conn_pool_.host_->hostname_);
outgoing_headers.setForwardedHost(req_host);
EXPECT_CALL(callbacks_.route_->route_entry_, timeout())
.WillOnce(Return(std::chrono::milliseconds(0)));
expectNewStreamWithImmediateEncoder(encoder, &response_decoder_, Http::Protocol::Http10);
// :authority header in the outgoing request should match the DNS name of
// the selected upstream host
EXPECT_CALL(encoder, encodeHeaders(HeaderMapEqualRef(&outgoing_headers), true))
.WillOnce(Invoke([&](const Http::HeaderMap&, bool) -> Http::Status {
encoder.stream_.resetStream(Http::StreamResetReason::RemoteReset);
return Http::okStatus();
}));
EXPECT_CALL(callbacks_.route_->route_entry_, autoHostRewrite()).WillOnce(Return(true));
EXPECT_CALL(callbacks_.route_->route_entry_, appendXfh()).WillOnce(Return(true));
router_.decodeHeaders(incoming_headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
}
TEST_F(RouterTest, AutoHostRewriteDisabled) {
NiceMock<Http::MockRequestEncoder> encoder;
std::string req_host{"foo.bar.com"};
Http::TestRequestHeaderMapImpl incoming_headers;
HttpTestUtility::addDefaultHeaders(incoming_headers);
incoming_headers.setHost(req_host);
cm_.thread_local_cluster_.conn_pool_.host_->hostname_ = "scooby.doo";
EXPECT_CALL(callbacks_.route_->route_entry_, timeout())
.WillOnce(Return(std::chrono::milliseconds(0)));
expectNewStreamWithImmediateEncoder(encoder, &response_decoder_, Http::Protocol::Http10);
// :authority header in the outgoing request should match the :authority header of
// the incoming request
EXPECT_CALL(encoder, encodeHeaders(HeaderMapEqualRef(&incoming_headers), true))
.WillOnce(Invoke([&](const Http::HeaderMap&, bool) -> Http::Status {
encoder.stream_.resetStream(Http::StreamResetReason::RemoteReset);
return Http::okStatus();
}));
EXPECT_CALL(callbacks_.route_->route_entry_, autoHostRewrite()).WillOnce(Return(false));
router_.decodeHeaders(incoming_headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
}
TEST_F(RouterTest, UpstreamSocketOptionsReturnedEmpty) {
auto options = router_.upstreamSocketOptions();
EXPECT_EQ(options.get(), nullptr);
}
TEST_F(RouterTest, IpTransparentOptions) {
Network::Socket::OptionsSharedPtr expected_options =
Network::SocketOptionFactory::buildIpTransparentOptions();
EXPECT_CALL(callbacks_, getUpstreamSocketOptions())
.Times(1)
.WillRepeatedly(Return(expected_options));
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
headers.setMethod("CONNECT");
router_.decodeHeaders(headers, false);
auto options = router_.upstreamSocketOptions();
EXPECT_EQ(expected_options->size(), options->size());
for (size_t i = 0; i < 2; i++) {
NiceMock<Network::MockConnectionSocket> dummy_socket;
auto state = envoy::config::core::v3::SocketOption::STATE_PREBIND;
auto expected_details = expected_options->at(i)->getOptionDetails(dummy_socket, state);
auto returned_details = options->at(i)->getOptionDetails(dummy_socket, state);
EXPECT_TRUE(expected_details == returned_details);
}
router_.onDestroy();
}
TEST_F(RouterTest, RedirectRecords) {
auto redirect_records = std::make_shared<Network::Win32RedirectRecords>();
memcpy(redirect_records->buf_, reinterpret_cast<void*>(redirect_records_data_.data()),
redirect_records_data_.size());
redirect_records->buf_size_ = redirect_records_data_.size();
router_.downstream_connection_.stream_info_.filterState()->setData(
Network::UpstreamSocketOptionsFilterState::key(),
std::make_unique<Network::UpstreamSocketOptionsFilterState>(),
StreamInfo::FilterState::StateType::Mutable, StreamInfo::FilterState::LifeSpan::Connection);
router_.downstream_connection_.stream_info_.filterState()
->getDataMutable<Network::UpstreamSocketOptionsFilterState>(
Network::UpstreamSocketOptionsFilterState::key())
->addOption(Network::SocketOptionFactory::buildWFPRedirectRecordsOptions(*redirect_records));
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
headers.setMethod("CONNECT");
router_.decodeHeaders(headers, false);
Network::Socket::OptionsSharedPtr expected_options =
Network::SocketOptionFactory::buildWFPRedirectRecordsOptions(*redirect_records);
auto options = router_.upstreamSocketOptions();
EXPECT_EQ(1, options->size());
NiceMock<Network::MockConnectionSocket> dummy_socket;
auto state = envoy::config::core::v3::SocketOption::STATE_PREBIND;
auto expected_details = expected_options->at(0)->getOptionDetails(dummy_socket, state);
auto returned_details = options->at(0)->getOptionDetails(dummy_socket, state);
EXPECT_TRUE(expected_details == returned_details);
router_.onDestroy();
}
TEST_F(RouterTest, ApplicationProtocols) {
callbacks_.streamInfo().filterState()->setData(
Network::ApplicationProtocols::key(),
std::make_unique<Network::ApplicationProtocols>(std::vector<std::string>{"foo", "bar"}),
StreamInfo::FilterState::StateType::ReadOnly, StreamInfo::FilterState::LifeSpan::FilterChain);
EXPECT_CALL(cm_.thread_local_cluster_, httpConnPool(_, _, _))
.WillOnce(Invoke([&](Upstream::ResourcePriority, absl::optional<Http::Protocol>,
Upstream::LoadBalancerContext* context) {
Network::TransportSocketOptionsConstSharedPtr transport_socket_options =
context->upstreamTransportSocketOptions();
EXPECT_NE(transport_socket_options, nullptr);
EXPECT_FALSE(transport_socket_options->applicationProtocolListOverride().empty());
EXPECT_EQ(transport_socket_options->applicationProtocolListOverride().size(), 2);
EXPECT_EQ(transport_socket_options->applicationProtocolListOverride()[0], "foo");
EXPECT_EQ(transport_socket_options->applicationProtocolListOverride()[1], "bar");
return Upstream::HttpPoolData([]() {}, &cm_.thread_local_cluster_.conn_pool_);
}));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(Return(&cancellable_));
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
EXPECT_CALL(span_, injectContext(_));
router_.decodeHeaders(headers, true);
// When the router filter gets reset we should cancel the pool request.
EXPECT_CALL(cancellable_, cancel(_));
router_.onDestroy();
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
EXPECT_EQ(0U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
}
// Verify that CONNECT payload is not sent upstream until :200 response headers
// are received.
TEST_F(RouterTest, ConnectPauseAndResume) {
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
EXPECT_CALL(encoder, encodeHeaders(_, false));
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
headers.setMethod("CONNECT");
headers.removePath();
router_.decodeHeaders(headers, false);
// Make sure any early data does not go upstream.
EXPECT_CALL(encoder, encodeData(_, _)).Times(0);
Buffer::OwnedImpl data;
router_.decodeData(data, true);
// Now send the response headers, and ensure the deferred payload is proxied.
EXPECT_CALL(encoder, encodeData(_, _));
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
response_decoder->decodeHeaders(std::move(response_headers), true);
}
// Invalid upstream will fail over to generic in opt mode, but crash in debug mode.
TEST_F(RouterTest, InvalidUpstream) {
// Explicitly configure an HTTP upstream, to test factory creation.
cm_.thread_local_cluster_.cluster_.info_->upstream_config_ =
absl::make_optional<envoy::config::core::v3::TypedExtensionConfig>();
// Configure a TCP upstream rather than an HTTP upstream.
envoy::extensions::upstreams::tcp::generic::v3::GenericConnectionPoolProto generic_config;
cm_.thread_local_cluster_.cluster_.info_->upstream_config_.value()
.mutable_typed_config()
->PackFrom(generic_config);
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
ON_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillByDefault(
Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
response_decoder = &decoder;
callbacks.onPoolReady(encoder, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
headers.setMethod("CONNECT");
EXPECT_ENVOY_BUG(router_.decodeHeaders(headers, false), "envoy bug failure: factory != nullptr.");
router_.onDestroy();
}
// Verify that CONNECT payload is not sent upstream if non-200 response headers are received.
TEST_F(RouterTest, ConnectPauseNoResume) {
// Explicitly configure an HTTP upstream, to test factory creation.
cm_.thread_local_cluster_.cluster_.info_->upstream_config_ =
absl::make_optional<envoy::config::core::v3::TypedExtensionConfig>();
envoy::extensions::upstreams::http::http::v3::HttpConnectionPoolProto http_config;
cm_.thread_local_cluster_.cluster_.info_->upstream_config_.value()
.mutable_typed_config()
->PackFrom(http_config);
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
EXPECT_CALL(encoder, encodeHeaders(_, false));
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
headers.setMethod("CONNECT");
headers.removePath();
router_.decodeHeaders(headers, false);
// Make sure any early data does not go upstream.
EXPECT_CALL(encoder, encodeData(_, _)).Times(0);
Buffer::OwnedImpl data;
router_.decodeData(data, true);
// Now send the response headers, and ensure the deferred payload is not proxied.
EXPECT_CALL(encoder, encodeData(_, _)).Times(0);
Http::ResponseHeaderMapPtr response_headers(
new Http::TestResponseHeaderMapImpl{{":status", "400"}});
response_decoder->decodeHeaders(std::move(response_headers), true);
}
TEST_F(RouterTest, ConnectExplicitTcpUpstream) {
// Explicitly configure a TCP upstream, to test factory creation.
cm_.thread_local_cluster_.cluster_.info_->upstream_config_ =
absl::make_optional<envoy::config::core::v3::TypedExtensionConfig>();
envoy::extensions::upstreams::http::tcp::v3::TcpConnectionPoolProto tcp_config;
cm_.thread_local_cluster_.cluster_.info_->upstream_config_.value()
.mutable_typed_config()
->PackFrom(tcp_config);
callbacks_.route_->route_entry_.connect_config_ =
absl::make_optional<RouteEntry::ConnectConfig>();
// Make sure newConnection is called on the TCP pool, not newStream on the HTTP pool.
EXPECT_CALL(cm_.thread_local_cluster_.tcp_conn_pool_, newConnection(_));
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
headers.setMethod("CONNECT");
headers.removePath();
router_.decodeHeaders(headers, false);
router_.onDestroy();
}
TEST_F(RouterTest, PostExplicitTcpUpstream) {
// Explicitly configure a generic upstream, to test factory creation.
cm_.thread_local_cluster_.cluster_.info_->upstream_config_ =
absl::make_optional<envoy::config::core::v3::TypedExtensionConfig>();
envoy::extensions::upstreams::http::generic::v3::GenericConnectionPoolProto generic_config;
cm_.thread_local_cluster_.cluster_.info_->upstream_config_.value()
.mutable_typed_config()
->PackFrom(generic_config);
callbacks_.route_->route_entry_.connect_config_ =
absl::make_optional<RouteEntry::ConnectConfig>();
callbacks_.route_->route_entry_.connect_config_.value().set_allow_post(true);
// Make sure newConnection is called on the TCP pool, not newStream on the HTTP pool.
EXPECT_CALL(cm_.thread_local_cluster_.tcp_conn_pool_, newConnection(_));
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
headers.setMethod("POST");
router_.decodeHeaders(headers, false);
router_.onDestroy();
}
TEST_F(RouterTest, PostHttpUpstream) {
// Explicitly configure a generic upstream, to test factory creation.
cm_.thread_local_cluster_.cluster_.info_->upstream_config_ =
absl::make_optional<envoy::config::core::v3::TypedExtensionConfig>();
envoy::extensions::upstreams::http::generic::v3::GenericConnectionPoolProto generic_config;
cm_.thread_local_cluster_.cluster_.info_->upstream_config_.value()
.mutable_typed_config()
->PackFrom(generic_config);
callbacks_.route_->route_entry_.connect_config_ =
absl::make_optional<RouteEntry::ConnectConfig>();
// Make sure POST request result in the HTTP pool.
EXPECT_CALL(cm_.thread_local_cluster_, httpConnPool(_, _, _));
Http::TestRequestHeaderMapImpl headers;
HttpTestUtility::addDefaultHeaders(headers);
headers.setMethod("POST");
router_.decodeHeaders(headers, false);
router_.onDestroy();
}
TEST_F(RouterTest, SetDynamicMaxStreamDuration) {
NiceMock<Http::MockRequestEncoder> encoder1;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder_, Http::Protocol::Http10);
expectMaxStreamDurationTimerCreate(std::chrono::milliseconds(500));
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-stream-duration-ms", "500"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
max_stream_duration_timer_->invokeCallback();
router_.onDestroy();
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
}
TEST_F(RouterTest, NotSetDynamicMaxStreamDurationIfZero) {
NiceMock<Http::MockRequestEncoder> encoder1;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder_, Http::Protocol::Http10);
// The timer will not be created.
EXPECT_CALL(callbacks_.dispatcher_, createTimer_).Times(0);
Http::TestRequestHeaderMapImpl headers{{"x-envoy-upstream-stream-duration-ms", "0"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
router_.onDestroy();
EXPECT_TRUE(verifyHostUpstreamStats(0, 0));
}
// Test that request/response header/body sizes are properly recorded.
TEST_F(RouterTest, RequestResponseSize) { testRequestResponseSize(false); }
// Test that request/response header/body sizes are properly recorded
// when there are trailers in both the request/response.
TEST_F(RouterTest, RequestResponseSizeWithTrailers) { testRequestResponseSize(true); }
TEST_F(RouterTest, ExpectedUpstreamTimeoutUpdatedDuringRetries) {
auto retry_options_predicate = std::make_shared<MockRetryOptionsPredicate>();
callbacks_.route_->route_entry_.retry_policy_.retry_options_predicates_.emplace_back(
retry_options_predicate);
setIncludeAttemptCountInRequest(true);
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"},
{"x-envoy-internal", "true"},
{"x-envoy-upstream-rq-timeout-ms", "200"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
test_time_.advanceTimeWait(std::chrono::milliseconds(50));
// Initial request has 1 attempt.
EXPECT_EQ(1, atoi(std::string(headers.getEnvoyAttemptCountValue()).c_str()));
EXPECT_EQ(200, atoi(std::string(headers.getEnvoyExpectedRequestTimeoutMsValue()).c_str()));
// 5xx response.
router_.retry_state_->expectHeadersRetry();
Upstream::RetryOptionsPredicate::UpdateOptionsReturn update_options_return{
std::make_shared<Network::Socket::Options>()};
EXPECT_CALL(*retry_options_predicate, updateOptions(_)).WillOnce(Return(update_options_return));
Http::ResponseHeaderMapPtr response_headers1(
new Http::TestResponseHeaderMapImpl{{":status", "503"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(503));
// NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage)
response_decoder->decodeHeaders(std::move(response_headers1), true);
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// Verify retry options predicate return values have been updated.
EXPECT_EQ(update_options_return.new_upstream_socket_options_.value(),
router_.upstreamSocketOptions());
// We expect the 5xx response to kick off a new request.
EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);
NiceMock<Http::MockRequestEncoder> encoder2;
expectNewStreamWithImmediateEncoder(encoder2, &response_decoder, Http::Protocol::Http10);
router_.retry_state_->callback_();
EXPECT_EQ(2U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// The retry should cause the header to increase to 2.
EXPECT_EQ(2, atoi(std::string(headers.getEnvoyAttemptCountValue()).c_str()));
// We already used 50ms of our 200ms timeout before the retry was triggered
EXPECT_EQ(150, atoi(std::string(headers.getEnvoyExpectedRequestTimeoutMsValue()).c_str()));
// Normal response.
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _)).WillOnce(Return(RetryStatus::No));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->health_checker_, setUnhealthy(_))
.Times(0);
Http::ResponseHeaderMapPtr response_headers2(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
response_decoder->decodeHeaders(std::move(response_headers2), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 1));
EXPECT_EQ(2, callbacks_.stream_info_.attemptCount().value());
}
TEST_F(RouterTest, ExpectedUpstreamTimeoutNotUpdatedDuringRetriesWhenRuntimeGuardDisabled) {
TestScopedRuntime scoped_runtime;
scoped_runtime.mergeValues(
{{"envoy.reloadable_features.update_expected_rq_timeout_on_retry", "false"}});
auto retry_options_predicate = std::make_shared<MockRetryOptionsPredicate>();
callbacks_.route_->route_entry_.retry_policy_.retry_options_predicates_.emplace_back(
retry_options_predicate);
setIncludeAttemptCountInRequest(true);
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder = nullptr;
expectNewStreamWithImmediateEncoder(encoder1, &response_decoder, Http::Protocol::Http10);
expectResponseTimerCreate();
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"},
{"x-envoy-internal", "true"},
{"x-envoy-upstream-rq-timeout-ms", "200"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
test_time_.advanceTimeWait(std::chrono::milliseconds(50));
// Initial request has 1 attempt.
EXPECT_EQ(1, atoi(std::string(headers.getEnvoyAttemptCountValue()).c_str()));
EXPECT_EQ(200, atoi(std::string(headers.getEnvoyExpectedRequestTimeoutMsValue()).c_str()));
// 5xx response.
router_.retry_state_->expectHeadersRetry();
Upstream::RetryOptionsPredicate::UpdateOptionsReturn update_options_return{
std::make_shared<Network::Socket::Options>()};
EXPECT_CALL(*retry_options_predicate, updateOptions(_)).WillOnce(Return(update_options_return));
Http::ResponseHeaderMapPtr response_headers1(
new Http::TestResponseHeaderMapImpl{{":status", "503"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(503));
// NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage)
response_decoder->decodeHeaders(std::move(response_headers1), true);
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// Verify retry options predicate return values have been updated.
EXPECT_EQ(update_options_return.new_upstream_socket_options_.value(),
router_.upstreamSocketOptions());
// We expect the 5xx response to kick off a new request.
EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0);
NiceMock<Http::MockRequestEncoder> encoder2;
expectNewStreamWithImmediateEncoder(encoder2, &response_decoder, Http::Protocol::Http10);
router_.retry_state_->callback_();
EXPECT_EQ(2U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
// The retry should cause the header to increase to 2.
EXPECT_EQ(2, atoi(std::string(headers.getEnvoyAttemptCountValue()).c_str()));
// We already used 50ms of our 200ms timeout before the retry was triggered,
// but with the guard disabled this should not change the header.
EXPECT_EQ(200, atoi(std::string(headers.getEnvoyExpectedRequestTimeoutMsValue()).c_str()));
// Normal response.
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _)).WillOnce(Return(RetryStatus::No));
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->health_checker_, setUnhealthy(_))
.Times(0);
Http::ResponseHeaderMapPtr response_headers2(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putHttpResponseCode(200));
response_decoder->decodeHeaders(std::move(response_headers2), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 1));
EXPECT_EQ(2, callbacks_.stream_info_.attemptCount().value());
}
TEST(RouterFilterUtilityTest, SetTimeoutHeaders) {
{
NiceMock<MockRouteEntry> route;
Http::TestRequestHeaderMapImpl headers;
FilterUtility::TimeoutData timeout;
timeout.global_timeout_ = std::chrono::milliseconds(200);
timeout.per_try_timeout_ = std::chrono::milliseconds(0);
FilterUtility::setTimeoutHeaders(0, timeout, route, headers, true, false, false);
EXPECT_EQ("200",
headers.get_(
"x-envoy-expected-rq-timeout-ms")); // No per try configured, use global timeout
}
{
NiceMock<MockRouteEntry> route;
Http::TestRequestHeaderMapImpl headers;
FilterUtility::TimeoutData timeout;
timeout.global_timeout_ = std::chrono::milliseconds(200);
timeout.per_try_timeout_ = std::chrono::milliseconds(0);
FilterUtility::setTimeoutHeaders(150, timeout, route, headers, true, false, false);
EXPECT_EQ("50", headers.get_("x-envoy-expected-rq-timeout-ms")); // Remains of global timeout
}
{
NiceMock<MockRouteEntry> route;
Http::TestRequestHeaderMapImpl headers;
FilterUtility::TimeoutData timeout;
timeout.global_timeout_ = std::chrono::milliseconds(200);
timeout.per_try_timeout_ = std::chrono::milliseconds(150);
FilterUtility::setTimeoutHeaders(0, timeout, route, headers, true, false, false);
EXPECT_EQ("150", headers.get_("x-envoy-expected-rq-timeout-ms")); // Per try timeout
}
{
NiceMock<MockRouteEntry> route;
Http::TestRequestHeaderMapImpl headers;
FilterUtility::TimeoutData timeout;
timeout.global_timeout_ = std::chrono::milliseconds(200);
timeout.per_try_timeout_ = std::chrono::milliseconds(150);
FilterUtility::setTimeoutHeaders(25, timeout, route, headers, true, false, false);
EXPECT_EQ("150", headers.get_("x-envoy-expected-rq-timeout-ms")); // Per try timeout
}
{
NiceMock<MockRouteEntry> route;
Http::TestRequestHeaderMapImpl headers;
FilterUtility::TimeoutData timeout;
timeout.global_timeout_ = std::chrono::milliseconds(200);
timeout.per_try_timeout_ = std::chrono::milliseconds(150);
FilterUtility::setTimeoutHeaders(150, timeout, route, headers, true, false, false);
EXPECT_EQ("50", headers.get_("x-envoy-expected-rq-timeout-ms")); // Remains of global timeout
}
{
NiceMock<MockRouteEntry> route;
Http::TestRequestHeaderMapImpl headers;
FilterUtility::TimeoutData timeout;
timeout.global_timeout_ = std::chrono::milliseconds(200);
timeout.per_try_timeout_ = std::chrono::milliseconds(0);
FilterUtility::setTimeoutHeaders(300, timeout, route, headers, true, false, false);
EXPECT_EQ("1", headers.get_("x-envoy-expected-rq-timeout-ms")); // Over time
}
{
NiceMock<MockRouteEntry> route;
Http::TestRequestHeaderMapImpl headers;
FilterUtility::TimeoutData timeout;
timeout.global_timeout_ = std::chrono::milliseconds(200);
timeout.per_try_timeout_ = std::chrono::milliseconds(150);
FilterUtility::setTimeoutHeaders(0, timeout, route, headers, true, false, true);
EXPECT_EQ("200", headers.get_("x-envoy-expected-rq-timeout-ms")); // Global timeout as hedged
}
{
NiceMock<MockRouteEntry> route;
Http::TestRequestHeaderMapImpl headers;
FilterUtility::TimeoutData timeout;
timeout.global_timeout_ = std::chrono::milliseconds(200);
timeout.per_try_timeout_ = std::chrono::milliseconds(150);
FilterUtility::setTimeoutHeaders(25, timeout, route, headers, true, false, true);
EXPECT_EQ("175", headers.get_(
"x-envoy-expected-rq-timeout-ms")); // Remains of global timeout as hedged
}
{
NiceMock<MockRouteEntry> route;
Http::TestRequestHeaderMapImpl headers;
FilterUtility::TimeoutData timeout;
timeout.global_timeout_ = std::chrono::milliseconds(200);
timeout.per_try_timeout_ = std::chrono::milliseconds(150);
FilterUtility::setTimeoutHeaders(150, timeout, route, headers, true, false, true);
EXPECT_EQ("50", headers.get_(
"x-envoy-expected-rq-timeout-ms")); // Remains of global timeout as hedged
}
}
TEST_F(RouterTest, HasEarlyDataAndRetryUpon425) {
Http::TestRequestHeaderMapImpl headers;
// This is a GET request.
HttpTestUtility::addDefaultHeaders(headers);
router_.retry_425_response_ = true;
NiceMock<Http::MockRequestEncoder> encoder1;
Http::ResponseDecoder* response_decoder1 = nullptr;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(
Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions& options)
-> Http::ConnectionPool::Cancellable* {
EXPECT_TRUE(options.can_use_http3_);
EXPECT_TRUE(options.can_send_early_data_);
response_decoder1 = &decoder;
callbacks.onPoolReady(encoder1, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
expectResponseTimerCreate();
EXPECT_CALL(encoder1, encodeHeaders(_, _));
router_.decodeHeaders(headers, true);
Http::ResponseHeaderMapPtr response_headers1(
new Http::TestResponseHeaderMapImpl{{":status", "425"}});
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, HeaderMapEqualRef(&headers), _))
.WillOnce(Invoke([this](const Http::ResponseHeaderMap&, const Http::RequestHeaderMap&,
RetryState::DoRetryHeaderCallback callback) {
router_.retry_state_->callback_ = [callback]() { callback(/*disable_early_data=*/true); };
return RetryStatus::Yes;
}));
ASSERT(response_decoder1);
response_decoder1->decodeHeaders(std::move(response_headers1), true);
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
// Expect retry upon 425 response.
NiceMock<Http::MockRequestEncoder> encoder2;
Http::ResponseDecoder* response_decoder2 = nullptr;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(
Invoke([&](Http::ResponseDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions& options)
-> Http::ConnectionPool::Cancellable* {
EXPECT_FALSE(options.can_send_early_data_);
EXPECT_TRUE(options.can_use_http3_);
response_decoder2 = &decoder;
callbacks.onPoolReady(encoder2, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
EXPECT_CALL(encoder2, encodeHeaders(HeaderMapEqualRef(&headers), _));
router_.retry_state_->callback_();
Http::ResponseHeaderMapPtr response_headers2(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _)).WillOnce(Return(RetryStatus::No));
EXPECT_CALL(callbacks_, encodeHeaders_(_, _));
response_decoder2->decodeHeaders(std::move(response_headers2), true);
EXPECT_TRUE(verifyHostUpstreamStats(1, 1));
}
// Test the case that request with upstream override host.
TEST_F(RouterTest, RequestWithUpstreamOverrideHost) {
NiceMock<Http::MockRequestEncoder> encoder_for_first_reqeust;
Http::ResponseDecoder* response_decoder = nullptr;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(Invoke([&](Http::ResponseDecoder& decoder,
Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
response_decoder = &decoder;
callbacks.onPoolReady(encoder_for_first_reqeust, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
expectResponseTimerCreate();
// Simulate the load balancer to call the `overrideHostToSelect`. When `overrideHostToSelect` of
// `LoadBalancerContext` is called, `upstreamOverrideHost` of StreamDecoderFilterCallbacks will be
// called to get address of upstream host that should be selected first.
EXPECT_CALL(callbacks_, upstreamOverrideHost())
.WillOnce(Return(absl::make_optional<absl::string_view>("1.2.3.4")));
auto override_host = router_.overrideHostToSelect();
EXPECT_EQ("1.2.3.4", override_host.value());
Http::TestRequestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}};
HttpTestUtility::addDefaultHeaders(headers);
// Simulate the normal first request.
router_.decodeHeaders(headers, true);
// Mock response with status 503.
router_.retry_state_->expectHeadersRetry();
Http::ResponseHeaderMapPtr response_headers_503(
new Http::TestResponseHeaderMapImpl{{":status", "503"}});
ASSERT(response_decoder != nullptr);
// NOLINTNEXTLINE: Silence null pointer access warning
response_decoder->decodeHeaders(std::move(response_headers_503), true);
// Kick off a new request.
NiceMock<Http::MockRequestEncoder> encoder_for_retry_request;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _, _))
.WillOnce(Invoke([&](Http::ResponseDecoder& decoder,
Http::ConnectionPool::Callbacks& callbacks,
const Http::ConnectionPool::Instance::StreamOptions&)
-> Http::ConnectionPool::Cancellable* {
response_decoder = &decoder;
callbacks.onPoolReady(encoder_for_retry_request, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
router_.retry_state_->callback_();
// Simulate the load balancer to call the `overrideHostToSelect` again. The upstream override host
// will be ignored when the request is retried.
EXPECT_CALL(callbacks_, upstreamOverrideHost()).Times(0);
EXPECT_EQ(absl::nullopt, router_.overrideHostToSelect());
// Normal response.
Http::ResponseHeaderMapPtr response_headers_200(
new Http::TestResponseHeaderMapImpl{{":status", "200"}});
EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _, _)).WillOnce(Return(RetryStatus::No));
ASSERT(response_decoder != nullptr);
// NOLINTNEXTLINE: Silence null pointer access warning
response_decoder->decodeHeaders(std::move(response_headers_200), true);
EXPECT_EQ(2, callbacks_.stream_info_.attemptCount().value());
router_.onDestroy();
}
} // namespace Router
} // namespace Envoy
|
#include "HResultException.h"
#include <sstream>
HResultException::HResultException(HRESULT hr)
: std::runtime_error("HResultException"), hr(hr)
{
}
HRESULT HResultException::GetHRESULT() const {
return this->hr;
}
|
#include "software/ai/hl/stp/play/enemy_freekick_play.h"
#include "shared/constants.h"
#include "software/ai/hl/stp/evaluation/enemy_threat.h"
#include "software/ai/hl/stp/play/play_factory.h"
#include "software/ai/hl/stp/tactic/block_shot_path_tactic.h"
#include "software/ai/hl/stp/tactic/crease_defender_tactic.h"
#include "software/ai/hl/stp/tactic/goalie_tactic.h"
#include "software/ai/hl/stp/tactic/move_tactic.h"
#include "software/ai/hl/stp/tactic/shadow_enemy_tactic.h"
#include "software/ai/hl/stp/tactic/shadow_freekicker_tactic.h"
#include "software/ai/hl/stp/tactic/stop_tactic.h"
#include "software/ai/world/game_state.h"
#include "software/util/parameter/dynamic_parameters.h"
const std::string EnemyFreekickPlay::name = "Enemy Freekick Play";
std::string EnemyFreekickPlay::getName() const
{
return EnemyFreekickPlay::name;
}
bool EnemyFreekickPlay::isApplicable(const World &world) const
{
if (world.gameState().isTheirFreeKick())
{
return true;
}
else
{
return false;
}
}
bool EnemyFreekickPlay::invariantHolds(const World &world) const
{
if (world.gameState().isTheirFreeKick())
{
return true;
}
else
{
return false;
}
}
void EnemyFreekickPlay::getNextTactics(TacticCoroutine::push_type &yield)
{
// Init our goalie tactic
auto goalie_tactic = std::make_shared<GoalieTactic>(
world.ball(), world.field(), world.friendlyTeam(), world.enemyTeam());
// Init a Crease Defender Tactic
auto crease_defender_tactic = std::make_shared<CreaseDefenderTactic>(
world.field(), world.ball(), world.friendlyTeam(), world.enemyTeam(),
CreaseDefenderTactic::LeftOrRight::RIGHT);
// Init FreeKickShadower tactics (these robots will both block the enemy robot taking
// a free kick (at most we will have 2
auto shadow_freekicker_1 = std::make_shared<ShadowFreekickerTactic>(
ShadowFreekickerTactic::First, world.enemyTeam(), world.ball(), world.field(),
true);
auto shadow_freekicker_2 = std::make_shared<ShadowFreekickerTactic>(
ShadowFreekickerTactic::Second, world.enemyTeam(), world.ball(), world.field(),
true);
// Init Shadow Enemy Tactics for extra robots
auto shadow_tactic_main = std::make_shared<ShadowEnemyTactic>(
world.field(), world.friendlyTeam(), world.enemyTeam(), true, world.ball(),
Util::DynamicParameters::DefenseShadowEnemyTactic::ball_steal_speed.value(),
true);
auto shadow_tactic_secondary = std::make_shared<ShadowEnemyTactic>(
world.field(), world.friendlyTeam(), world.enemyTeam(), true, world.ball(),
Util::DynamicParameters::DefenseShadowEnemyTactic::ball_steal_speed.value(),
true);
// Init Move Tactics for extra robots (These will be used if there are no robots to
// shadow)
auto move_tactic_main = std::make_shared<MoveTactic>(true);
auto move_tactic_secondary = std::make_shared<MoveTactic>(true);
do
{
// Create tactic vector (starting with Goalie)
std::vector<std::shared_ptr<Tactic>> tactics_to_run = {goalie_tactic};
// Get all enemy threats
auto enemy_threats = Evaluation::getAllEnemyThreats(
world.field(), world.friendlyTeam(), world.enemyTeam(), world.ball(), false);
// Check if the enemy is passing-capable
bool enemy_team_can_pass =
Util::DynamicParameters::EnemyCapability::enemy_team_can_pass.value();
// Update goalie tactic
goalie_tactic->updateParams(world.ball(), world.field(), world.friendlyTeam(),
world.enemyTeam());
// Update free kicke shadowers
shadow_freekicker_1->updateParams(world.enemyTeam(), world.ball());
shadow_freekicker_2->updateParams(world.enemyTeam(), world.ball());
// Update crease defenders
crease_defender_tactic->updateParams(world.ball(), world.field(),
world.friendlyTeam(), world.enemyTeam());
// Add Freekick shadower tactics
tactics_to_run.emplace_back(shadow_freekicker_1);
tactics_to_run.emplace_back(shadow_freekicker_2);
// Add Crease defender tactic
tactics_to_run.emplace_back(crease_defender_tactic);
// Assign ShadowEnemy tactics until we have every enemy covered. If there are not
// enough threats to shadow, move our robots to block the friendly net
if (enemy_threats.size() == 0)
{
move_tactic_main->updateParams(
world.field().friendlyGoal() + Point(0, 2 * ROBOT_MAX_RADIUS_METERS),
(world.ball().position() - world.field().friendlyGoal()).orientation(),
0);
move_tactic_main->updateParams(
world.field().friendlyGoal() + Point(0, -2 * ROBOT_MAX_RADIUS_METERS),
(world.ball().position() - world.field().friendlyGoal()).orientation(),
0);
tactics_to_run.emplace_back(move_tactic_main);
tactics_to_run.emplace_back(move_tactic_secondary);
}
if (enemy_threats.size() == 1)
{
shadow_tactic_main->updateParams(enemy_threats.at(1), world.field(),
world.friendlyTeam(), world.enemyTeam(),
ROBOT_MAX_RADIUS_METERS * 3,
enemy_team_can_pass, world.ball());
move_tactic_main->updateParams(
world.field().friendlyGoal() + Point(0, 2 * ROBOT_MAX_RADIUS_METERS),
(world.ball().position() - world.field().friendlyGoal()).orientation(),
0);
tactics_to_run.emplace_back(shadow_tactic_main);
tactics_to_run.emplace_back(move_tactic_main);
}
if (enemy_threats.size() >= 2)
{
shadow_tactic_main->updateParams(enemy_threats.at(1), world.field(),
world.friendlyTeam(), world.enemyTeam(),
ROBOT_MAX_RADIUS_METERS * 3,
enemy_team_can_pass, world.ball());
shadow_tactic_secondary->updateParams(enemy_threats.at(2), world.field(),
world.friendlyTeam(), world.enemyTeam(),
ROBOT_MAX_RADIUS_METERS * 3,
enemy_team_can_pass, world.ball());
tactics_to_run.emplace_back(shadow_tactic_main);
tactics_to_run.emplace_back(shadow_tactic_secondary);
}
// yield the Tactics this Play wants to run, in order of priority
yield(tactics_to_run);
} while (true);
}
// Register this play in the PlayFactory
static TPlayFactory<EnemyFreekickPlay> factory;
|
#include <iostream>
#include <vector>
#include <queue>
#include <algorithm>
using namespace std;
int main(void)
{
vector<pair<int, int> > jewels;
vector<int> bags;
priority_queue<int> pq;
int n, k;
long long sum = 0;
cin >> n >> k;
for (int i = 0, cost, weight; i < n; i++)
{
cin >> weight >> cost;
jewels.push_back(make_pair(weight, cost));
}
for (int i = 0, weight; i < k; i++)
{
cin >> weight;
bags.push_back(weight);
}
sort(jewels.begin(), jewels.end());
sort(bags.begin(), bags.end());
for (int i = 0, j = 0; i < k; i++)
{
while (j < n && jewels[j].first <= bags[i])
pq.push(jewels[j++].second);
if (!pq.empty())
{
sum += pq.top();
pq.pop();
}
}
cout << sum;
}
|
/*================================================================
* Copyright (C)2020 All rights reserved.
* FileName : LifeGameWindowOne.cc
* Author : elonkou
* Email : elonkou@ktime.cc
* Date : 2020年03月21日 星期六 09时43分54秒
================================================================*/
#include "LifeGameWindowOne.hh"
LifeGameWindowOne::LifeGameWindowOne() {
}
LifeGameWindowOne::~LifeGameWindowOne() {
}
void LifeGameWindowOne::show() {}
|
// Autogenerated from CppHeaderCreator
// Created by Sc2ad
// =========================================================================
#pragma once
// Begin includes
#include "extern/beatsaber-hook/shared/utils/typedefs.h"
// Including type: RootMotion.FinalIK.RagdollUtility
#include "RootMotion/FinalIK/RagdollUtility.hpp"
// Including type: UnityEngine.Vector3
#include "UnityEngine/Vector3.hpp"
// Including type: UnityEngine.Quaternion
#include "UnityEngine/Quaternion.hpp"
#include "extern/beatsaber-hook/shared/utils/il2cpp-utils-methods.hpp"
#include "extern/beatsaber-hook/shared/utils/il2cpp-utils-properties.hpp"
#include "extern/beatsaber-hook/shared/utils/il2cpp-utils-fields.hpp"
#include "extern/beatsaber-hook/shared/utils/utils.h"
// Completed includes
// Begin forward declares
// Forward declaring namespace: UnityEngine
namespace UnityEngine {
// Forward declaring type: Transform
class Transform;
}
// Completed forward declares
// Type namespace: RootMotion.FinalIK
namespace RootMotion::FinalIK {
// Size: 0x34
#pragma pack(push, 1)
// Autogenerated type: RootMotion.FinalIK.RagdollUtility/Child
class RagdollUtility::Child : public ::Il2CppObject {
public:
// public UnityEngine.Transform t
// Size: 0x8
// Offset: 0x10
UnityEngine::Transform* t;
// Field size check
static_assert(sizeof(UnityEngine::Transform*) == 0x8);
// public UnityEngine.Vector3 localPosition
// Size: 0xC
// Offset: 0x18
UnityEngine::Vector3 localPosition;
// Field size check
static_assert(sizeof(UnityEngine::Vector3) == 0xC);
// public UnityEngine.Quaternion localRotation
// Size: 0x10
// Offset: 0x24
UnityEngine::Quaternion localRotation;
// Field size check
static_assert(sizeof(UnityEngine::Quaternion) == 0x10);
// Creating value type constructor for type: Child
Child(UnityEngine::Transform* t_ = {}, UnityEngine::Vector3 localPosition_ = {}, UnityEngine::Quaternion localRotation_ = {}) noexcept : t{t_}, localPosition{localPosition_}, localRotation{localRotation_} {}
// public System.Void .ctor(UnityEngine.Transform transform)
// Offset: 0x1A891E0
template<::il2cpp_utils::CreationType creationType = ::il2cpp_utils::CreationType::Temporary>
static RagdollUtility::Child* New_ctor(UnityEngine::Transform* transform) {
static auto ___internal__logger = ::Logger::get().WithContext("RootMotion::FinalIK::RagdollUtility::Child::.ctor");
return THROW_UNLESS((::il2cpp_utils::New<RagdollUtility::Child*, creationType>(transform)));
}
// public System.Void FixTransform(System.Single weight)
// Offset: 0x1A89E28
void FixTransform(float weight);
// public System.Void StoreLocalState()
// Offset: 0x1A89DD8
void StoreLocalState();
}; // RootMotion.FinalIK.RagdollUtility/Child
#pragma pack(pop)
static check_size<sizeof(RagdollUtility::Child), 36 + sizeof(UnityEngine::Quaternion)> __RootMotion_FinalIK_RagdollUtility_ChildSizeCheck;
static_assert(sizeof(RagdollUtility::Child) == 0x34);
}
DEFINE_IL2CPP_ARG_TYPE(RootMotion::FinalIK::RagdollUtility::Child*, "RootMotion.FinalIK", "RagdollUtility/Child");
|
/************************************************************************
Copyright 2018 andrewpqc@mails.ccnu.edu.cn
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
************************************************************************/
#ifndef __VOGRO_UTILS_HPP__
#define __VOGRO_UTILS_HPP__
#include <algorithm>
#include <map>
#include <sstream>
#include <string>
#include <unistd.h>
#include <vector>
#include <locale>
#include <codecvt>
#define INITCOLOR(color) std::string("\033[1;") + std::string(color) + std::string("m")
#define RED_COLOR "31"
#define GREEN_COLOR "32"
#define YELLOW_COLOR "33"
#define BLUE_COLOR "34"
#define ZERO_COLOR "0"
inline static std::string ltrim(std::string str) {
auto length = str.length();
while (str.length()) {
if (str[0] == ' ')
str.erase(0, 1);
else
return str;
}
return str;
}
inline static std::string rtrim(std::string str) {
auto length = str.length();
for (auto i = length - 1; i >= 0; i--) {
if (str[i] == ' ')
str.pop_back();
else
return str;
}
return str;
}
inline static std::string trim(std::string str) { return rtrim(ltrim(str)); }
inline static std::string url_encode(const std::string &value) noexcept {
static auto hex_chars = "0123456789ABCDEF";
std::string result;
result.reserve(value.size()); // Minimum size of result
for (auto &chr : value) {
if (!((chr >= '0' && chr <= '9') || (chr >= 'A' && chr <= 'Z') || (chr >= 'a' && chr <= 'z') || chr == '-' ||
chr == '.' || chr == '_' || chr == '~'))
result += std::string("%") + hex_chars[static_cast<unsigned char>(chr) >> 4] +
hex_chars[static_cast<unsigned char>(chr) & 15];
else
result += chr;
}
return result;
}
inline static std::string url_decode(const std::string &value) noexcept {
std::string result;
result.reserve(value.size() / 3 + (value.size() % 3)); // Minimum size of result
for (std::size_t i = 0; i < value.size(); ++i) {
auto &chr = value[i];
if (chr == '%' && i + 2 < value.size()) {
auto hex = value.substr(i + 1, 2);
auto decoded_chr = static_cast<char>(std::strtol(hex.c_str(), nullptr, 16));
result += decoded_chr;
i += 2;
} else if (chr == '+')
result += ' ';
else
result += chr;
}
return result;
}
inline static std::string u8wstring_to_string(const std::wstring &wstr) {
std::wstring_convert<std::codecvt_utf8<wchar_t>> conv;
return conv.to_bytes(wstr);
}
inline static std::wstring u8string_to_wstring(const std::string &str) {
std::wstring_convert<std::codecvt_utf8<wchar_t> > conv;
return conv.from_bytes(str);
}
inline static std::pair<bool, bool> urlMatch(std::string requestUrl, std::string handlerUrl,
std::map<std::string, std::string> &storeMap) {
if (requestUrl.find("/static") == 0)
return std::make_pair(false, true);
if (handlerUrl.back() != '/')
handlerUrl += '/';
if (requestUrl.back() != '/')
requestUrl += '/';
std::string type, name, dynamicParam;
auto handlerUrlLength = handlerUrl.length();
auto requestUrlLength = requestUrl.length();
auto max_length = (handlerUrlLength > requestUrlLength) ? handlerUrlLength
: requestUrlLength;
size_t i = 0, j = 0;
for (; (i < max_length) && (j < max_length); ++i, ++j) {
if (handlerUrl[i] == '{') {
auto tempIndex = i + 1;
bool flag = true; // true代表当前在type域中
do {
if (handlerUrl[tempIndex] == ':') {
flag = false;
++tempIndex;
}
if (!flag)
name += handlerUrl[tempIndex];
else
type += handlerUrl[tempIndex];
++tempIndex;
} while (handlerUrl[tempIndex] != '}');
i = tempIndex + 1;
if (flag == true)
type = "int";
do {
// if type is int, every char in dynamicParam should be in
// [48,57]
if ((type == "int") && (requestUrl[j] < 48 || requestUrl[j] > 57)) {
return std::make_pair(false, false);
}
dynamicParam += requestUrl[j];
++j;
} while (requestUrl[j] != '/');
// store the dynamic parameters to storeMap
storeMap[name] = dynamicParam;
// clear name, type, dynamicParam
name.clear();
type.clear();
dynamicParam.clear();
}
if ((i >= handlerUrlLength) || (j >= requestUrlLength) || (handlerUrl[i] != requestUrl[j])) {
return std::make_pair(false, false);
}
}
if (j < requestUrlLength) return std::make_pair(false, false);
return std::make_pair(true, false);
}
std::map<std::string, std::string> split_query_string(std::string str) {
std::map<std::string, std::string> results;
std::string key, val;
auto flag = true;
if (str.length() == 0)
return results;
for (auto i = 0; i <= str.length(); i++) {
if (i == str.length() || str[i] == '&') {
flag = true;
results[key] = val;
key.clear();
val.clear();
continue;
} else if (str[i] == '=') {
flag = false;
continue;
}
if (flag)
key += str[i];
else
val += str[i];
}
return results;
}
inline std::pair<std::string, std::string> parse_header(std::string &header) {
std::string header_key, header_val;
header_val.reserve(50); // pre alloc space
auto pos = header.find(":");
header_key = trim(header.substr(0, pos));
header_val = trim(header.substr(pos + 1));
return std::make_pair(header_key, header_val);
}
inline static std::pair<std::string, std::pair<std::string, std::string>> parse_request_line(
std::string& request_line) {
std::string method, url, version;
url.reserve(50); // pre alloc 50 byte
char flag = 'm';
bool versionNumberStart = false;
for (auto i = 0; i < request_line.length(); i++) {
if (flag == 'm') {
if (request_line[i] == ' ') {
flag = 'u';
continue;
}
method += request_line[i];
}
if (flag == 'u') {
if (request_line[i] == ' ') {
flag = 'v';
continue;
}
url += request_line[i];
}
if (flag == 'v') {
if (request_line[i] == '/') {
versionNumberStart = true;
++i;
}
if (versionNumberStart)
version += request_line[i];
}
}
// for return three value
auto url_version_pair = std::make_pair(url_decode(url), version);
return std::make_pair(method, url_version_pair);
}
std::string getFileExtension(std::string path) {
auto pos = path.find_last_of('.');
std::string ext = path.substr(pos + 1);
std::transform(ext.begin(), ext.end(), ext.begin(), [](char in) -> char {
if (in <= 'Z' && in >= 'A')
return in - ('Z' - 'z');
return in;
});
return ext;
}
int is_file_exist(const char *path) {
if (path == NULL)
return -1;
if (access(path, F_OK) == 0) {
return 0;
}
return -1;
}
inline static std::vector<std::string> split(const std::string &s, char delimiter) {
std::vector<std::string> tokens;
std::string token;
std::istringstream tokenStream(s);
while (std::getline(tokenStream, token, delimiter)) {
tokens.push_back(token);
}
return tokens;
}
static const std::string base64_chars =
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789+/";
static inline bool is_base64(unsigned char c) {
return (isalnum(c) || (c == '+') || (c == '/'));
}
std::string base64_encode(unsigned char const *bytes_to_encode, unsigned long in_len) {
std::string ret;
int i = 0;
int j = 0;
unsigned char char_array_3[3];
unsigned char char_array_4[4];
while (in_len--) {
char_array_3[i++] = *(bytes_to_encode++);
if (i == 3) {
char_array_4[0] = (char_array_3[0] & 0xfc) >> 2;
char_array_4[1] = ((char_array_3[0] & 0x03) << 4) + ((char_array_3[1] & 0xf0) >> 4);
char_array_4[2] = ((char_array_3[1] & 0x0f) << 2) + ((char_array_3[2] & 0xc0) >> 6);
char_array_4[3] = char_array_3[2] & 0x3f;
for (i = 0; (i < 4); i++)
ret += base64_chars[char_array_4[i]];
i = 0;
}
}
if (i) {
for (j = i; j < 3; j++)
char_array_3[j] = '\0';
char_array_4[0] = (char_array_3[0] & 0xfc) >> 2;
char_array_4[1] = ((char_array_3[0] & 0x03) << 4) + ((char_array_3[1] & 0xf0) >> 4);
char_array_4[2] = ((char_array_3[1] & 0x0f) << 2) + ((char_array_3[2] & 0xc0) >> 6);
for (j = 0; (j < i + 1); j++)
ret += base64_chars[char_array_4[j]];
while ((i++ < 3))
ret += '=';
}
return ret;
}
std::string base64_decode(std::string const &encoded_string) {
int in_len = encoded_string.size();
int i = 0;
int j = 0;
int in_ = 0;
unsigned char char_array_4[4], char_array_3[3];
std::string ret;
while (in_len-- && (encoded_string[in_] != '=') && is_base64(encoded_string[in_])) {
char_array_4[i++] = encoded_string[in_];
in_++;
if (i == 4) {
for (i = 0; i < 4; i++)
char_array_4[i] = base64_chars.find(char_array_4[i]);
char_array_3[0] = (char_array_4[0] << 2) + ((char_array_4[1] & 0x30) >> 4);
char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3];
for (i = 0; (i < 3); i++)
ret += char_array_3[i];
i = 0;
}
}
if (i) {
for (j = 0; j < i; j++)
char_array_4[j] = base64_chars.find(char_array_4[j]);
char_array_3[0] = (char_array_4[0] << 2) + ((char_array_4[1] & 0x30) >> 4);
char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
for (j = 0; (j < i - 1); j++) ret += char_array_3[j];
}
return ret;
}
#endif
|
// Copyright (c) 2018-2020, The TurtleCoin Developers // Copyright (c) 2020, TRRXITTE inc.
//
// Please see the included LICENSE file for more information.
///////////////////////////////////////////
#include <zedwallet++/TransactionMonitor.h>
///////////////////////////////////////////
#include <iostream>
#include <utilities/ColouredMsg.h>
#include <zedwallet++/CommandImplementations.h>
#include <zedwallet++/GetInput.h>
void TransactionMonitor::start()
{
/* Grab new transactions and push them into a queue for processing */
m_walletBackend->m_eventHandler->onTransaction.subscribe([this](const auto tx) { m_queuedTransactions.push(tx); });
const std::string prompt = getPrompt(m_walletBackend);
while (!m_shouldStop)
{
const auto tx = m_queuedTransactions.peek();
/* Make sure we're not printing a garbage tx */
if (m_shouldStop)
{
break;
}
/* Don't print out fusion or outgoing transactions */
if (!tx.isFusionTransaction() && tx.totalAmount() > 0)
{
/* Aquire the lock, so we're not interleaving our output when a
command is being handled, for example, transferring */
std::scoped_lock lock(*m_mutex);
std::cout << InformationMsg("\nNew transaction found!\n\n");
printIncomingTransfer(tx);
/* Write out the prompt after every transfer. This prevents the
wallet being in a 'ready' state, waiting for input, but looking
like it's not. */
std::cout << InformationMsg(prompt) << std::flush;
}
m_queuedTransactions.deleteFront();
}
m_walletBackend->m_eventHandler->onTransaction.unsubscribe();
}
void TransactionMonitor::stop()
{
m_shouldStop = true;
m_queuedTransactions.stop();
}
std::shared_ptr<std::mutex> TransactionMonitor::getMutex() const
{
return m_mutex;
}
|
// (C) Copyright 2009-2011 Frederic Bron.
// Use, modification and distribution are subject to the
// Boost Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifdef TEST_STD
# include <type_traits>
#else
# include <boost/type_traits/has_right_shift.hpp>
#endif
#include "test.hpp"
#include "check_integral_constant.hpp"
#define BOOST_TT_TRAIT_NAME has_right_shift
#define BOOST_TT_TRAIT_OP >>
#include <istream>
#include <string>
#include "has_binary_operators.hpp"
void specific() {
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool const, void, void >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool &, void >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool const &, void, void >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int, void >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void, int &, void >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void, double, void >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void, void* const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void* const, void, int >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int*, void, int >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int* const &, void, void >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool, bool, bool & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool, bool &, int const >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool, int const, bool & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool, int & >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool, double &, int const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool, double const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool, int* const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool, int* &, bool const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool, int* &, int const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool, int* const &, int const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool, int* const &, int & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool, int* const &, int const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool const, bool const >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool const, bool const, void >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool const, int const, int >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool const, int &, bool & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool const, double const, bool const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool const, double &, bool const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool const, double const &, bool >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool const, double const &, bool & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool const, void* const &, int >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool const, void* const &, int const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool const, int* >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool const, int* const, int const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool &, bool, bool const & >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool &, int const, bool const & >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool &, int & >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool &, int const &, int const >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool &, void* &, bool const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool &, void* &, int const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool &, int*, int const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool &, int* const &, void >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool const &, bool, int & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool const &, bool &, bool >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool const &, double const, bool const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool const &, double const, int const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool const &, void* const, int & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool const &, void* &, void >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool const &, void* &, bool const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool const &, void* const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< bool const &, void* const &, int & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int, bool, void >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int, bool const, bool & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int, bool const, int >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int, bool &, bool >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int, bool &, int & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int, bool &, int const & >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int, double &, int >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int, void* const, int const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int, void* const &, int & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int, int* & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int, int* const &, int const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int const, bool &, int >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int const, bool &, int const & >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int const, bool const &, bool const >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int const, int, bool const >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int const, int const, bool const & >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int const, double const, bool const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int const, double & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int const, double &, bool const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int const, void* const, int const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int const, void* &, bool & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int const, int*, int const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int const, int* &, bool const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int const, int* &, int const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int &, bool &, int & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int &, bool const &, bool const & >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int &, bool const &, int & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int &, int &, int & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int &, double, bool & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int &, double, int >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int &, double const, void >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int &, double const, int const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int &, void* &, void >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int &, int* &, int const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int const &, int const, int const & >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int const &, int &, int & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int const &, int const &, bool >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int const &, double, bool const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int const &, double const, int const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int const &, void* const, int const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int const &, void* &, bool & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int const &, void* &, int const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int const &, int* &, bool const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int const &, int* const &, bool const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double, bool const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double, bool const, int const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double, int, bool const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double, int &, bool & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double, double, bool >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double, double const, int const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double, void* const, int const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double, void* const &, bool const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double const, bool &, void >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double const, bool const &, int const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double const, int, bool & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double const, int, int const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double const, double, bool const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double const, void*, int & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double const, void* &, int const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double const, int* const &, int const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double &, bool, bool const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double &, bool const &, int >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double &, double, void >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double &, double const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double &, double &, bool const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double &, double &, int & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double &, double const &, int & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double &, void* const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double &, int*, int const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double const &, bool & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double const &, int, int const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double const &, int &, bool >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double const &, int &, bool const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double const &, void*, bool const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double const &, void* const, bool & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double const &, void* const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double const &, int* const, void >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double const &, int* &, bool & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< double const &, int* &, bool const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void*, bool, bool const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void*, bool const, int >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void*, int, void >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void*, int const, bool const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void*, int const, int & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void*, int* const, bool const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void*, int* const &, int >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void* const, bool >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void* const, int const, void >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void* const, double const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void* const, void* const, bool const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void* const, void* const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void* &, bool const &, bool >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void* &, int const, int >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void* &, int const, int const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void* &, double, bool const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void* &, double, int & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void* &, double const, bool >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void* &, double const, bool & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void* &, void* const, bool const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void* &, int* const, bool >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void* &, int* const, bool const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void* &, int* const &, bool const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void* &, int* const &, int & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void* const &, bool, bool const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void* const &, bool const, void >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void* const &, bool const, bool const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void* const &, int >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void* const &, int, int >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void* const &, int const, bool const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void* const &, int &, bool >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void* const &, int &, bool const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void* const &, double const, bool & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void* const &, void* &, bool const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< void* const &, void* const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int*, int, void >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int*, int const, void >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int*, int const, bool const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int*, int &, int >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int*, double const &, bool >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int*, void*, bool const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int*, int* const, bool & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int*, int* &, int >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int*, int* &, int const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int*, int* const &, bool >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int* const, bool, int const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int* const, double const, bool >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int* const, double &, int >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int* const, double const &, bool & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int* const, void* >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int* const, int* const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int* const, int* &, int >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int* &, double &, bool const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int* &, void* const &, bool & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int* &, int*, int & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int* &, int* const &, bool const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int* const &, bool, bool const >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int* const &, bool, int >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int* const &, bool, int & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int* const &, int, void >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int* const &, int, int >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int* const &, int &, bool & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int* const &, int &, int const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int* const &, double const, int const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int* const &, void*, bool >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int* const &, void* const &, void >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int* const &, int*, bool const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int* const &, int* const, void >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int* const &, int* &, int & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< int* const &, int* const &, int const & >::value), 0);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< std::istream, bool&, std::istream& >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< std::istream, short&, std::istream& >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< std::istream, unsigned short&, std::istream& >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< std::istream, int&, std::istream& >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< std::istream, unsigned int&, std::istream& >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< std::istream, long&, std::istream& >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< std::istream, unsigned long&, std::istream& >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< std::istream, float&, std::istream& >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< std::istream, double&, std::istream& >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< std::istream, void*&, std::istream& >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< std::istream, char&, std::istream& >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< std::istream, signed char&, std::istream& >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< std::istream, unsigned char&, std::istream& >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< std::istream, char*, std::istream& >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< std::istream, signed char*, std::istream& >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< std::istream, unsigned char*, std::istream& >::value), 1);
BOOST_CHECK_INTEGRAL_CONSTANT((::boost::BOOST_TT_TRAIT_NAME< std::istream, std::string&, std::istream& >::value), 1);
}
TT_TEST_BEGIN(BOOST_TT_TRAIT_NAME)
common();
specific();
TT_TEST_END
|
#include <stdio.h>
#include <testComponent272/lib1.h>
int testComponent272_6 () {
printf("Hello world!\n");
return 0;
}
|
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "value_compare.h"
#include <vespa/eval/eval/tensor.h>
#include <vespa/eval/eval/tensor_engine.h>
#include <vespa/eval/eval/value_codec.h>
namespace vespalib::eval {
bool operator==(const Value &lhs, const Value &rhs)
{
return TensorSpec::from_value(lhs) == TensorSpec::from_value(rhs);
}
std::ostream &operator<<(std::ostream &out, const Value &value)
{
return out << TensorSpec::from_value(value);
}
} // namespace vespalib::eval
|
// #55 Zip アルゴリズム
// Python の zip() を実装しろという問題だ。
#include <iostream>
#include <vector>
// 区間バージョン
template <typename Input1, typename Input2, typename Output>
void zip(
Input1 begin1, Input1 end1,
Input2 begin2, Input2 end2,
Output result)
{
while (begin1 != end1 && begin2 != end2)
{
result++ = std::make_pair(*begin1++, *begin2++);
}
}
// コンテナバージョン
template <typename T, typename U>
std::vector<std::pair<T, U>> zip(
std::vector<T> const & range1,
std::vector<U> const & range2)
{
std::vector<std::pair<T, U>> result;
zip(
std::cbegin(range1), std::cend(range1),
std::cbegin(range2), std::cend(range2),
std::back_inserter(result));
return result;
}
int main()
{
std::vector<int> v1{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
std::vector<int> v2{ 1, 1, 3, 5, 8, 13, 21 };
// ほんとうは for ループの最初の括弧内の右側に書きたい
auto result = zip(v1, v2);
for (auto const & p : result)
{
std::cout << '{' << p.first << ',' << p.second << '}' << std::endl;
}
}
|
/***************************************************************************
* Copyright 1998-2020 by authors (see AUTHORS.txt) *
* *
* This file is part of LuxCoreRender. *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.*
* See the License for the specific language governing permissions and *
* limitations under the License. *
***************************************************************************/
#include <math.h>
#include <boost/format.hpp>
#include <boost/filesystem.hpp>
#include "luxrays/utils/thread.h"
#include "luxrays/utils/strutils.h"
#include "slg/samplers/sobol.h"
#include "slg/utils/pathdepthinfo.h"
#include "slg/engines/caches/photongi/photongicache.h"
#include "slg/engines/caches/photongi/tracephotonsthread.h"
#include "slg/utils/pathinfo.h"
using namespace std;
using namespace luxrays;
using namespace slg;
//------------------------------------------------------------------------------
// PhotonGICache
//------------------------------------------------------------------------------
PhotonGICache::PhotonGICache() :
scene(nullptr),
visibilityParticlesKdTree(nullptr),
radiancePhotonsBVH(nullptr) ,
causticPhotonsBVH(nullptr) {
}
PhotonGICache::PhotonGICache(const Scene *scn, const PhotonGICacheParams &p) :
scene(scn), params(p),
visibilityParticlesKdTree(nullptr),
radiancePhotonsBVH(nullptr) ,
indirectPhotonTracedCount(0),
causticPhotonsBVH(nullptr),
causticPhotonTracedCount(0),
causticPhotonPass(0) {
}
PhotonGICache::~PhotonGICache() {
delete visibilityParticlesKdTree;
delete causticPhotonsBVH;
delete radiancePhotonsBVH;
}
bool PhotonGICache::IsPhotonGIEnabled(const BSDF &bsdf) const {
const BSDFEvent eventTypes = bsdf.GetEventTypes();
if ((eventTypes & TRANSMIT) || (eventTypes & SPECULAR) ||
((eventTypes & GLOSSY) && (bsdf.GetGlossiness() < params.glossinessUsageThreshold)))
return false;
else
return bsdf.IsPhotonGIEnabled();
}
float PhotonGICache::GetIndirectUsageThreshold(const BSDFEvent lastBSDFEvent,
const float lastGlossiness, const float u0) const {
// Decide if the glossy surface is "nearly specular"
if ((lastBSDFEvent & GLOSSY) && (lastGlossiness < params.glossinessUsageThreshold)) {
// Disable the cache, the surface is "nearly specular"
return numeric_limits<float>::infinity();
} else {
// Use a larger blend zone for glossy surface
const float scale = (lastBSDFEvent & GLOSSY) ? 2.f : 1.f;
// Enable the cache for diffuse or glossy "nearly diffuse" but only after
// the threshold (before I brute force and cache between 0x and 1x the threshold)
return scale * u0 * params.indirect.usageThresholdScale * params.indirect.lookUpRadius;
}
}
bool PhotonGICache::IsDirectLightHitVisible(const EyePathInfo &pathInfo,
const bool photonGICausticCacheUsed) const {
// This is a specific check to cut fireflies created by some glossy or
// specular bounce
if (!(pathInfo.lastBSDFEvent & DIFFUSE) && (pathInfo.depth.diffuseDepth > 0))
return false;
else if (!params.caustic.enabled || !photonGICausticCacheUsed)
return true;
else if (!pathInfo.IsCausticPath() && (params.debugType == PGIC_DEBUG_NONE))
return true;
else
return false;
}
void PhotonGICache::TracePhotons(const u_int seedBase, const u_int photonTracedCount,
const bool indirectCacheDone, const bool causticCacheDone,
boost::atomic<u_int> &globalIndirectPhotonsTraced, boost::atomic<u_int> &globalCausticPhotonsTraced,
boost::atomic<u_int> &globalIndirectSize, boost::atomic<u_int> &globalCausticSize) {
const size_t renderThreadCount = GetHardwareThreadCount();
vector<TracePhotonsThread *> renderThreads(renderThreadCount, nullptr);
boost::atomic<u_int> globalPhotonsCounter(0);
// Create the photon tracing threads
for (size_t i = 0; i < renderThreadCount; ++i) {
renderThreads[i] = new TracePhotonsThread(*this, i,
seedBase, photonTracedCount,
indirectCacheDone, causticCacheDone,
globalPhotonsCounter, globalIndirectPhotonsTraced,
globalCausticPhotonsTraced, globalIndirectSize,
globalCausticSize);
}
// Start photon tracing threads
for (size_t i = 0; i < renderThreadCount; ++i)
renderThreads[i]->Start();
// Wait for the end of photon tracing threads
u_int indirectPhotonStored = 0;
u_int causticPhotonStored = 0;
for (size_t i = 0; i < renderThreadCount; ++i) {
renderThreads[i]->Join();
// Copy all photons
for (auto const &p : renderThreads[i]->indirectPhotons) {
PGICVisibilityParticle &vp = visibilityParticles[p.visibilityParticelIndex];
vp.alphaAccumulated.Add(p.lightID, p.alpha);
}
indirectPhotonStored += renderThreads[i]->indirectPhotons.size();
causticPhotons.insert(causticPhotons.end(), renderThreads[i]->causticPhotons.begin(),
renderThreads[i]->causticPhotons.end());
causticPhotonStored += renderThreads[i]->causticPhotons.size();
delete renderThreads[i];
}
// Update the count only if I have traced this kind of photons
if (!indirectCacheDone)
indirectPhotonTracedCount = globalIndirectPhotonsTraced;
// Update the count only if I have traced this kind of photons
if (!causticCacheDone)
causticPhotonTracedCount = globalCausticPhotonsTraced;
SLG_LOG("PhotonGI additional indirect photon stored: " << indirectPhotonStored);
SLG_LOG("PhotonGI additional caustic photon stored: " << causticPhotonStored);
// photonReacedCount isn't exactly but it is quite near
SLG_LOG("PhotonGI total photon traced: " << Max(indirectPhotonTracedCount, causticPhotonTracedCount));
}
void PhotonGICache::TracePhotons(const bool indirectEnabled, const bool causticEnabled) {
const size_t renderThreadCount = GetHardwareThreadCount();
boost::atomic<u_int> globalIndirectPhotonsTraced(0);
boost::atomic<u_int> globalCausticPhotonsTraced(0);
boost::atomic<u_int> globalIndirectSize(0);
boost::atomic<u_int> globalCausticSize(0);
// Update the count only if I have traced this kind of photons
if (indirectEnabled)
indirectPhotonTracedCount = 0;
// Update the count only if I have traced this kind of photons
if (causticEnabled)
causticPhotonTracedCount = 0;
if (indirectEnabled && (params.indirect.maxSize == 0)) {
// Automatic indirect cache convergence test is required
const u_int photonTracedStep = 2000000;
u_int photonTracedCount = 0;
vector<SpectrumGroup> lastAlpha(visibilityParticles.size());
vector<SpectrumGroup> currentAlpha(visibilityParticles.size());
while (photonTracedCount < params.photon.maxTracedCount) {
//------------------------------------------------------------------
// Trace additional photons
//------------------------------------------------------------------
TracePhotons(updateSeedBase, photonTracedStep, false, !causticEnabled,
globalIndirectPhotonsTraced, globalCausticPhotonsTraced,
globalIndirectSize, globalCausticSize);
photonTracedCount += photonTracedStep;
//------------------------------------------------------------------
// Check the convergence if it is not the first step
//------------------------------------------------------------------
if (photonTracedCount > photonTracedStep) {
// Compute current alpha
for (u_int i = 0; i < visibilityParticles.size(); ++i) {
const PGICVisibilityParticle vp = visibilityParticles[i];
currentAlpha[i] = vp.ComputeRadiance(params.indirect.lookUpRadius2, indirectPhotonTracedCount);
}
// Filter outgoing radiance
if (params.indirect.filterRadiusScale > 0.f) {
vector<SpectrumGroup> filteredCurrentAlpha(visibilityParticles.size());
FilterVisibilityParticlesRadiance(currentAlpha, filteredCurrentAlpha);
currentAlpha = filteredCurrentAlpha;
}
// Compute the scale for an auto-linear-like tone mapping of values
float Y = 0.f;
for (u_int i = 0; i < visibilityParticles.size(); ++i)
Y += currentAlpha[i].Sum().Y();
Y /= visibilityParticles.size();
const float alphaScale = (Y > 0.f) ? (1.25f / Y * powf(118.f / 255.f, 2.2f)) : 1.f;
for (u_int i = 0; i < visibilityParticles.size(); ++i)
currentAlpha[i] *= alphaScale;
// Look for the max. error
float maxError = 0.f;
for (u_int i = 0; i < visibilityParticles.size(); ++i) {
if (!currentAlpha[i].Black()) {
SpectrumGroup alpha = currentAlpha[i];
alpha -= lastAlpha[i];
for (u_int j = 0; j < alpha.Size(); ++j) {
const float currentError = alpha[j].Abs().Max();
maxError = Max(maxError, currentError);
}
}
// Update last alpha cache entries
lastAlpha[i] = currentAlpha[i];
}
SLG_LOG(boost::format("PhotonGI estimated current indirect photon error: %.2f%%") % (100.f * maxError));
// If the error is under the threshold, stop tracing photons for indirect cache
if (maxError < params.indirect.haltThreshold) {
// Finish the work for caustic cache too
if (causticEnabled &&
(causticPhotons.size() < params.caustic.maxSize) &&
(photonTracedCount < params.photon.maxTracedCount)) {
updateSeedBase += renderThreadCount;
TracePhotons(updateSeedBase,
params.photon.maxTracedCount - photonTracedCount, true, false,
globalIndirectPhotonsTraced, globalCausticPhotonsTraced,
globalIndirectSize, globalCausticSize);
}
break;
}
} else {
// Update last alpha cache entries
for (u_int i = 0; i < visibilityParticles.size(); ++i) {
const PGICVisibilityParticle vp = visibilityParticles[i];
lastAlpha[i] = vp.ComputeRadiance(params.indirect.lookUpRadius2, indirectPhotonTracedCount);
}
}
updateSeedBase += renderThreadCount;
}
} else {
// Just trace the asked amount of photon paths
TracePhotons(updateSeedBase, params.photon.maxTracedCount, !indirectEnabled, !causticEnabled,
globalIndirectPhotonsTraced, globalCausticPhotonsTraced,
globalIndirectSize, globalCausticSize);
}
updateSeedBase += renderThreadCount;
causticPhotons.shrink_to_fit();
}
void PhotonGICache::FilterVisibilityParticlesRadiance(const vector<SpectrumGroup> &radianceValues,
vector<SpectrumGroup> &filteredRadianceValues) const {
const float lookUpRadius2 = Sqr(params.indirect.filterRadiusScale * params.indirect.lookUpRadius);
const float lookUpCosNormalAngle = cosf(Radians(params.indirect.lookUpNormalAngle));
#pragma omp parallel for
for (
// Visual C++ 2013 supports only OpenMP 2.5
#if _OPENMP >= 200805
unsigned
#endif
int index = 0; index < visibilityParticles.size(); ++index) {
// Look for all near particles
vector<u_int> nearParticleIndices;
const PGICVisibilityParticle &vp = visibilityParticles[index];
// I can use visibilityParticlesKdTree to get radiance photons indices
// because there is a one on one correspondence
visibilityParticlesKdTree->GetAllNearEntries(nearParticleIndices,
vp.p, vp.n, vp.isVolume,
lookUpRadius2, lookUpCosNormalAngle);
if (nearParticleIndices.size() > 0) {
SpectrumGroup &filtered = filteredRadianceValues[index];
for (auto nearIndex : nearParticleIndices)
filtered += radianceValues[nearIndex];
filtered /= nearParticleIndices.size();
}
}
}
void PhotonGICache::CreateRadiancePhotons() {
//--------------------------------------------------------------------------
// Compute the outgoing radiance for each visibility entry
//--------------------------------------------------------------------------
vector<SpectrumGroup> outgoingRadianceValues(visibilityParticles.size());
for (u_int index = 0 ; index < visibilityParticles.size(); ++index) {
const PGICVisibilityParticle &vp = visibilityParticles[index];
outgoingRadianceValues[index] = vp.ComputeRadiance(params.indirect.lookUpRadius2, indirectPhotonTracedCount);
assert (outgoingRadianceValues[index].IsValid());
}
//--------------------------------------------------------------------------
// Filter outgoing radiance
//--------------------------------------------------------------------------
if (params.indirect.filterRadiusScale > 0.f) {
SLG_LOG("PhotonGI filtering radiance photons");
vector<SpectrumGroup> filteredOutgoingRadianceValues(visibilityParticles.size());
FilterVisibilityParticlesRadiance(outgoingRadianceValues, filteredOutgoingRadianceValues);
outgoingRadianceValues = filteredOutgoingRadianceValues;
}
//--------------------------------------------------------------------------
// Create a radiance map entry for each visibility entry
//--------------------------------------------------------------------------
for (u_int index = 0 ; index < visibilityParticles.size(); ++index) {
if (!outgoingRadianceValues[index].Black()) {
const PGICVisibilityParticle &vp = visibilityParticles[index];
radiancePhotons.push_back(RadiancePhoton(vp.p,
vp.n, outgoingRadianceValues[index], vp.isVolume));
}
}
radiancePhotons.shrink_to_fit();
SLG_LOG("PhotonGI total radiance photon stored: " << radiancePhotons.size());
}
void PhotonGICache::Preprocess(const u_int threadCnt) {
threadCount = threadCnt;
threadsSyncBarrier.reset(new boost::barrier(threadCount));
lastUpdateSpp = 0;
updateSeedBase = 1;
if (params.persistent.fileName != "") {
// Check if the file already exist
if (boost::filesystem::exists(params.persistent.fileName)) {
// Load the cache from the file
LoadPersistentCache(params.persistent.fileName);
return;
}
// The file doesn't exist so I have to go trough normal pre-processing
}
//--------------------------------------------------------------------------
// Evaluate best radius if required
//--------------------------------------------------------------------------
if (params.indirect.enabled && (params.indirect.lookUpRadius == 0.f)) {
params.indirect.lookUpRadius = EvaluateBestRadius();
SLG_LOG("PhotonGI best indirect cache radius: " << params.indirect.lookUpRadius);
}
//--------------------------------------------------------------------------
// Initialize all parameters
//--------------------------------------------------------------------------
if (!params.indirect.enabled)
params.indirect.maxSize = 0;
if (!params.caustic.enabled)
params.caustic.maxSize = 0;
if (params.indirect.enabled) {
// I must use indirect cache parameters for Visibility particles if the
// cache is enabled
params.visibility.lookUpRadius = params.indirect.lookUpRadius;
params.visibility.lookUpNormalAngle = params.indirect.lookUpNormalAngle;
} else {
if (params.visibility.lookUpRadius == 0.f) {
if (params.caustic.enabled) {
// Caustic radius is too small for visibility check
params.visibility.lookUpRadius = EvaluateBestRadius();
params.visibility.lookUpNormalAngle = params.caustic.lookUpNormalAngle;
} else
throw runtime_error("Indirect and/or caustic cache must be enabled in PhotonGI");
}
}
SLG_LOG("PhotonGI visibility lookup radius: " << params.visibility.lookUpRadius);
params.visibility.lookUpNormalCosAngle = cosf(Radians(params.visibility.lookUpNormalAngle));
params.visibility.lookUpRadius2 = params.visibility.lookUpRadius * params.visibility.lookUpRadius;
params.indirect.lookUpRadius2 = params.indirect.lookUpRadius * params.indirect.lookUpRadius;
params.caustic.lookUpRadius2 = params.caustic.lookUpRadius * params.caustic.lookUpRadius;
//--------------------------------------------------------------------------
// Trace visibility particles
//--------------------------------------------------------------------------
TraceVisibilityParticles();
if (visibilityParticles.size() == 0) {
SLG_LOG("PhotonGI WARNING: nothing is visible and/or cache enabled.");
return;
}
//--------------------------------------------------------------------------
// Fill all photon vectors
//--------------------------------------------------------------------------
// I build indirect and caustic caches with 2 different steps in order to
// have Metropolis work at beast for the 2 different tasks
if (params.indirect.enabled) {
SLG_LOG("PhotonGI tracing indirect cache photons");
TracePhotons(true, false);
}
if (params.caustic.enabled) {
SLG_LOG("PhotonGI tracing caustic cache photons");
TracePhotons(false, true);
}
//--------------------------------------------------------------------------
// Radiance photon map
//--------------------------------------------------------------------------
if (params.indirect.enabled) {
SLG_LOG("PhotonGI building radiance photon data");
CreateRadiancePhotons();
if (radiancePhotons.size() > 0) {
SLG_LOG("PhotonGI building radiance photons BVH");
radiancePhotonsBVH = new PGICRadiancePhotonBvh(&radiancePhotons,
params.indirect.lookUpRadius, params.indirect.lookUpNormalAngle);
}
}
//--------------------------------------------------------------------------
// Caustic photon map
//--------------------------------------------------------------------------
if ((causticPhotons.size() > 0) && params.caustic.enabled) {
SLG_LOG("PhotonGI building caustic photons BVH");
causticPhotonsBVH = new PGICPhotonBvh(&causticPhotons, causticPhotonTracedCount,
params.caustic.lookUpRadius, params.caustic.lookUpNormalAngle);
}
//--------------------------------------------------------------------------
// Free visibility map (only if it is not required for a further update
//--------------------------------------------------------------------------
if (!params.caustic.enabled || (params.caustic.updateSpp == 0)) {
delete visibilityParticlesKdTree;
visibilityParticlesKdTree = nullptr;
visibilityParticles.clear();
visibilityParticles.shrink_to_fit();
}
//--------------------------------------------------------------------------
// Print some statistics about memory usage
//--------------------------------------------------------------------------
size_t totalMemUsage = 0;
if (causticPhotonsBVH) {
SLG_LOG("PhotonGI caustic cache photons memory usage: " << ToMemString(causticPhotons.size() * sizeof(Photon)));
SLG_LOG("PhotonGI caustic cache BVH memory usage: " << ToMemString(causticPhotonsBVH->GetMemoryUsage()));
totalMemUsage += causticPhotons.size() * sizeof(Photon) + causticPhotonsBVH->GetMemoryUsage();
}
if (radiancePhotonsBVH) {
SLG_LOG("PhotonGI indirect cache photons memory usage: " << ToMemString(radiancePhotons.size() * sizeof(RadiancePhoton)));
SLG_LOG("PhotonGI indirect cache BVH memory usage: " << ToMemString(radiancePhotonsBVH->GetMemoryUsage()));
totalMemUsage += radiancePhotons.size() * sizeof(Photon) + radiancePhotonsBVH->GetMemoryUsage();
}
SLG_LOG("PhotonGI total memory usage: " << ToMemString(totalMemUsage));
//--------------------------------------------------------------------------
// Check if I have to save the persistent cache
//--------------------------------------------------------------------------
if (params.persistent.fileName != "")
SavePersistentCache(params.persistent.fileName);
}
const SpectrumGroup *PhotonGICache::GetIndirectRadiance(const BSDF &bsdf) const {
assert (IsPhotonGIEnabled(bsdf));
if (radiancePhotonsBVH) {
// Flip the normal if required
const Normal n = (bsdf.hitPoint.intoObject ? 1.f: -1.f) * bsdf.hitPoint.geometryN;
const RadiancePhoton *radiancePhoton = radiancePhotonsBVH->GetNearestEntry(bsdf.hitPoint.p, n, bsdf.IsVolume());
if (radiancePhoton) {
const SpectrumGroup *result = &radiancePhoton->outgoingRadiance;
assert (result->IsValid());
assert (DistanceSquared(radiancePhoton->p, bsdf.hitPoint.p) < radiancePhotonsBVH->GetEntryRadius());
assert (bsdf.IsVolume() == radiancePhoton->isVolume);
assert (radiancePhoton->isVolume || (Dot(radiancePhoton->n, n) > radiancePhotonsBVH->GetEntryNormalCosAngle()));
return result;
}
}
return nullptr;
}
SpectrumGroup PhotonGICache::ConnectWithCausticPaths(const BSDF &bsdf) const {
assert (IsPhotonGIEnabled(bsdf));
SpectrumGroup result;
if (causticPhotonsBVH) {
result = causticPhotonsBVH->ConnectAllNearEntries(bsdf);
assert (result.IsValid());
}
return result;
}
PhotonGISamplerType PhotonGICache::String2SamplerType(const string &type) {
if (type == "RANDOM")
return PhotonGISamplerType::PGIC_SAMPLER_RANDOM;
else if (type == "METROPOLIS")
return PhotonGISamplerType::PGIC_SAMPLER_METROPOLIS;
else
throw runtime_error("Unknown PhotonGI cache sampler type: " + type);
}
string PhotonGICache::SamplerType2String(const PhotonGISamplerType type) {
switch (type) {
case PhotonGISamplerType::PGIC_SAMPLER_RANDOM:
return "RANDOM";
case PhotonGISamplerType::PGIC_SAMPLER_METROPOLIS:
return "METROPOLIS";
default:
throw runtime_error("Unsupported sampler type in PhotonGICache::SamplerType2String(): " + ToString(type));
}
}
PhotonGIDebugType PhotonGICache::String2DebugType(const string &type) {
if (type == "none")
return PhotonGIDebugType::PGIC_DEBUG_NONE;
else if (type == "showindirect")
return PhotonGIDebugType::PGIC_DEBUG_SHOWINDIRECT;
else if (type == "showcaustic")
return PhotonGIDebugType::PGIC_DEBUG_SHOWCAUSTIC;
else if (type == "showindirectpathmix")
return PhotonGIDebugType::PGIC_DEBUG_SHOWINDIRECTPATHMIX;
else
throw runtime_error("Unknown PhotonGI cache debug type: " + type);
}
string PhotonGICache::DebugType2String(const PhotonGIDebugType type) {
switch (type) {
case PhotonGIDebugType::PGIC_DEBUG_NONE:
return "none";
case PhotonGIDebugType::PGIC_DEBUG_SHOWINDIRECT:
return "showindirect";
case PhotonGIDebugType::PGIC_DEBUG_SHOWCAUSTIC:
return "showcaustic";
case PhotonGIDebugType::PGIC_DEBUG_SHOWINDIRECTPATHMIX:
return "showindirectpathmix";
default:
throw runtime_error("Unsupported wrap type in PhotonGICache::DebugType2String(): " + ToString(type));
}
}
|
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#if defined(HAVE_CONFIG_H)
#include "config/bitcoin-config.h"
#endif
#include "chainparams.h"
#include "clientversion.h"
#include "compat.h"
#include "rpc/server.h"
#include "init.h"
#include "noui.h"
#include "scheduler.h"
#include "util.h"
#include "httpserver.h"
#include "httprpc.h"
#include "utilstrencodings.h"
#include <boost/algorithm/string/predicate.hpp>
#include <boost/filesystem.hpp>
#include <boost/thread.hpp>
#include <stdio.h>
/* Introduction text for doxygen: */
/*! \mainpage Developer documentation
*
* \section intro_sec Introduction
*
* This is the developer documentation of the reference client for an experimental new digital currency called Bitcoin (https://www.bitcoin.org/),
* which enables instant payments to anyone, anywhere in the world. Bitcoin uses peer-to-peer technology to operate
* with no central authority: managing transactions and issuing money are carried out collectively by the network.
*
* The software is a community-driven open source project, released under the MIT license.
*
* \section Navigation
* Use the buttons <code>Namespaces</code>, <code>Classes</code> or <code>Files</code> at the top of the page to start navigating the code.
*/
void WaitForShutdown(boost::thread_group* threadGroup)
{
bool fShutdown = ShutdownRequested();
// Tell the main threads to shutdown.
while (!fShutdown)
{
MilliSleep(200);
fShutdown = ShutdownRequested();
}
if (threadGroup)
{
Interrupt(*threadGroup);
threadGroup->join_all();
}
}
//////////////////////////////////////////////////////////////////////////////
//
// Start
//
bool AppInit(int argc, char* argv[])
{
boost::thread_group threadGroup;
CScheduler scheduler;
bool fRet = false;
//
// Parameters
//
// If Qt is used, parameters/bitcoin.conf are parsed in qt/bitcoin.cpp's main()
ParseParameters(argc, argv);
// Process help and version before taking care about datadir
if (IsArgSet("-?") || IsArgSet("-h") || IsArgSet("-help") || IsArgSet("-version"))
{
std::string strUsage = strprintf(_("%s Daemon"), _(PACKAGE_NAME)) + " " + _("version") + " " + FormatFullVersion() + "\n";
if (IsArgSet("-version"))
{
strUsage += FormatParagraph(LicenseInfo());
}
else
{
strUsage += "\n" + _("Usage:") + "\n" +
" quasard [options] " + strprintf(_("Start %s Daemon"), _(PACKAGE_NAME)) + "\n";
strUsage += "\n" + HelpMessage(HMM_BITCOIND);
}
fprintf(stdout, "%s", strUsage.c_str());
return true;
}
try
{
if (!boost::filesystem::is_directory(GetDataDir(false)))
{
fprintf(stderr, "Error: Specified data directory \"%s\" does not exist.\n", GetArg("-datadir", "").c_str());
return false;
}
try
{
ReadConfigFile(GetArg("-conf", BITCOIN_CONF_FILENAME));
} catch (const std::exception& e) {
fprintf(stderr,"Error reading configuration file: %s\n", e.what());
return false;
}
// Check for -testnet or -regtest parameter (Params() calls are only valid after this clause)
try {
SelectParams(ChainNameFromCommandLine());
} catch (const std::exception& e) {
fprintf(stderr, "Error: %s\n", e.what());
return false;
}
// Command-line RPC
bool fCommandLine = false;
for (int i = 1; i < argc; i++)
if (!IsSwitchChar(argv[i][0]) && !boost::algorithm::istarts_with(argv[i], "quasar:"))
fCommandLine = true;
if (fCommandLine)
{
fprintf(stderr, "Error: There is no RPC client functionality in quasard anymore. Use the quasar-cli utility instead.\n");
exit(EXIT_FAILURE);
}
// -server defaults to true for bitcoind but not for the GUI so do this here
SoftSetBoolArg("-server", true);
// Set this early so that parameter interactions go to console
InitLogging();
InitParameterInteraction();
if (!AppInitBasicSetup())
{
// InitError will have been called with detailed error, which ends up on console
exit(1);
}
if (!AppInitParameterInteraction())
{
// InitError will have been called with detailed error, which ends up on console
exit(1);
}
if (!AppInitSanityChecks())
{
// InitError will have been called with detailed error, which ends up on console
exit(1);
}
if (GetBoolArg("-daemon", false))
{
#if HAVE_DECL_DAEMON
fprintf(stdout, "Quasar server starting\n");
// Daemonize
if (daemon(1, 0)) { // don't chdir (1), do close FDs (0)
fprintf(stderr, "Error: daemon() failed: %s\n", strerror(errno));
return false;
}
#else
fprintf(stderr, "Error: -daemon is not supported on this operating system\n");
return false;
#endif // HAVE_DECL_DAEMON
}
fRet = AppInitMain(threadGroup, scheduler);
}
catch (const std::exception& e) {
PrintExceptionContinue(&e, "AppInit()");
} catch (...) {
PrintExceptionContinue(NULL, "AppInit()");
}
if (!fRet)
{
Interrupt(threadGroup);
// threadGroup.join_all(); was left out intentionally here, because we didn't re-test all of
// the startup-failure cases to make sure they don't result in a hang due to some
// thread-blocking-waiting-for-another-thread-during-startup case
} else {
WaitForShutdown(&threadGroup);
}
Shutdown();
return fRet;
}
int main(int argc, char* argv[])
{
SetupEnvironment();
// Connect bitcoind signal handlers
noui_connect();
return (AppInit(argc, argv) ? EXIT_SUCCESS : EXIT_FAILURE);
}
|
#pragma once
#include "../../../JObject.hpp"
namespace android::hardware::lights
{
class Light;
}
namespace android::hardware::lights
{
class LightState;
}
namespace android::hardware::lights
{
class LightsRequest;
}
namespace android::hardware::lights
{
class LightsRequest_Builder : public JObject
{
public:
// Fields
// QJniObject forward
template<typename ...Ts> explicit LightsRequest_Builder(const char *className, const char *sig, Ts...agv) : JObject(className, sig, std::forward<Ts>(agv)...) {}
LightsRequest_Builder(QJniObject obj);
// Constructors
LightsRequest_Builder();
// Methods
android::hardware::lights::LightsRequest_Builder addLight(android::hardware::lights::Light arg0, android::hardware::lights::LightState arg1) const;
android::hardware::lights::LightsRequest build() const;
android::hardware::lights::LightsRequest_Builder clearLight(android::hardware::lights::Light arg0) const;
};
} // namespace android::hardware::lights
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE36_Absolute_Path_Traversal__wchar_t_connect_socket_w32CreateFile_53b.cpp
Label Definition File: CWE36_Absolute_Path_Traversal.label.xml
Template File: sources-sink-53b.tmpl.cpp
*/
/*
* @description
* CWE: 36 Absolute Path Traversal
* BadSource: connect_socket Read data using a connect socket (client side)
* GoodSource: Full path and file name
* Sink: w32CreateFile
* BadSink : Open the file named in data using CreateFile()
* Flow Variant: 53 Data flow: data passed as an argument from one function through two others to a fourth; all four functions are in different source files
*
* */
#include "std_testcase.h"
#ifndef _WIN32
#include <wchar.h>
#endif
#ifdef _WIN32
#include <winsock2.h>
#include <windows.h>
#include <direct.h>
#pragma comment(lib, "ws2_32") /* include ws2_32.lib when linking */
#define CLOSE_SOCKET closesocket
#else /* NOT _WIN32 */
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <unistd.h>
#define INVALID_SOCKET -1
#define SOCKET_ERROR -1
#define CLOSE_SOCKET close
#define SOCKET int
#endif
#define TCP_PORT 27015
#define IP_ADDRESS "127.0.0.1"
namespace CWE36_Absolute_Path_Traversal__wchar_t_connect_socket_w32CreateFile_53
{
/* all the sinks are the same, we just want to know where the hit originated if a tool flags one */
#ifndef OMITBAD
/* bad function declaration */
void badSink_c(wchar_t * data);
void badSink_b(wchar_t * data)
{
badSink_c(data);
}
#endif /* OMITBAD */
#ifndef OMITGOOD
/* goodG2B uses the GoodSource with the BadSink */
void goodG2BSink_c(wchar_t * data);
void goodG2BSink_b(wchar_t * data)
{
goodG2BSink_c(data);
}
#endif /* OMITGOOD */
} /* close namespace */
|
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2019 ArangoDB GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Andrey Abramov
////////////////////////////////////////////////////////////////////////////////
#ifndef IRESEARCH_LIMITED_SAMPLE_COLLECTOR_H
#define IRESEARCH_LIMITED_SAMPLE_COLLECTOR_H
#include "shared.hpp"
#include "analysis/token_attributes.hpp"
#include "search/collectors.hpp"
#include "search/filter_visitor.hpp"
#include "search/multiterm_query.hpp"
#include "index/index_reader.hpp"
#include "index/iterators.hpp"
#include "utils/hash_utils.hpp"
#include "utils/string.hpp"
NS_ROOT
struct sub_reader;
struct index_reader;
template<typename DocIterator>
void fill(bitset& bs, DocIterator& it) {
auto* doc = irs::get<irs::document>(it);
if (!doc) {
return; // no doc value
}
while (it.next()) {
bs.set(doc->value);
}
}
inline void fill(bitset& bs, const term_iterator& term, size_t docs_count) {
auto it = term.postings(irs::flags::empty_instance());
if (!it) {
return; // no doc_ids in iterator
}
docs_count += (irs::doc_limits::min)();
if (bs.size() < docs_count) {
bs.reset(docs_count); // ensure we have enough space
}
fill(bs, *it);
}
//////////////////////////////////////////////////////////////////////////////
/// @class limited_sample_collector
/// @brief object to collect and track a limited number of scorers,
/// terms with longer postings are treated as more important
//////////////////////////////////////////////////////////////////////////////
template<typename Key, typename Comparer = std::less<Key>>
class limited_sample_collector : private irs::compact<0, Comparer>,
private util::noncopyable {
public:
using key_type = Key;
using comparer_type = Comparer;
private:
using comparer_rep = irs::compact<0, comparer_type>;
public:
explicit limited_sample_collector(size_t scored_terms_limit,
const comparer_type& comparer = {})
: comparer_rep(comparer),
scored_terms_limit_(scored_terms_limit) {
scored_states_.reserve(scored_terms_limit);
scored_states_heap_.reserve(scored_terms_limit);
}
//////////////////////////////////////////////////////////////////////////////
/// @brief prepare scorer for terms collecting
/// @param segment segment reader for the current term
/// @param state state containing this scored term
/// @param terms segment term-iterator positioned at the current term
//////////////////////////////////////////////////////////////////////////////
void prepare(const sub_reader& segment,
const seek_term_iterator& terms,
multiterm_state& scored_state) noexcept {
state_.state = &scored_state;
state_.segment = &segment;
state_.terms = &terms;
// get term metadata
auto* meta = irs::get<term_meta>(terms);
state_.docs_count = meta ? &meta->docs_count : &no_docs_;
}
//////////////////////////////////////////////////////////////////////////////
/// @brief collect current term
//////////////////////////////////////////////////////////////////////////////
void collect(const Key& key) {
assert(state_.segment && state_.terms && state_.state);
if (!scored_terms_limit_) {
// state will not be scored
// add all doc_ids from the doc_iterator to the unscored_docs
fill(state_.state->unscored_docs, *state_.terms, state_.segment->docs_count());
return; // nothing to collect (optimization)
}
if (scored_states_.size() < scored_terms_limit_) {
// have not reached the scored state limit yet
scored_states_heap_.emplace_back(scored_states_.size());
scored_states_.emplace_back(key, state_);
push();
return;
}
const size_t min_state_idx = scored_states_heap_.front();
if (scored_states_[min_state_idx].key < key) {
pop();
auto& min_state = scored_states_[min_state_idx];
auto state_term_it = min_state.state->reader->iterator(); // FIXME cache iterator???
assert(min_state.cookie);
if (state_term_it->seek(bytes_ref::NIL, *min_state.cookie)) {
// state will not be scored
// add all doc_ids from the doc_iterator to the unscored_docs
fill(min_state.state->unscored_docs, *state_term_it, min_state.segment->docs_count());
}
// update min state
min_state.docs_count = *state_.docs_count;
min_state.state = state_.state;
min_state.cookie = state_.terms->cookie();
min_state.term = state_.terms->value();
min_state.segment = state_.segment;
min_state.key = key;
push();
} else {
// state will not be scored
// add all doc_ids from the doc_iterator to the unscored_docs
fill(state_.state->unscored_docs, *state_.terms, state_.segment->docs_count());
}
}
//////////////////////////////////////////////////////////////////////////////
/// @brief finish collecting and evaluate stats
//////////////////////////////////////////////////////////////////////////////
void score(const index_reader& index,
const order::prepared& order,
std::vector<bstring>& stats) {
if (!scored_terms_limit_) {
return; // nothing to score (optimization)
}
// stats for a specific term
std::unordered_map<hashed_bytes_ref, stats_state> term_stats;
// iterate over all the states from which statistcis should be collected
uint32_t stats_offset = 0;
for (auto& scored_state : scored_states_) {
assert(scored_state.cookie);
auto& field = *scored_state.state->reader;
auto term_itr = field.iterator(); // FIXME
assert(term_itr);
// find the stats for the current term
const auto res = term_stats.try_emplace(
make_hashed_ref(bytes_ref(scored_state.term)),
index, field, order, stats_offset);
// find term attributes using cached state
if (!term_itr->seek(bytes_ref::NIL, *(scored_state.cookie))) {
continue; // some internal error that caused the term to disappear
}
auto& stats_entry = res.first->second;
// collect statistics, 0 because only 1 term
stats_entry.term_stats.collect(*scored_state.segment, field, 0, *term_itr);
scored_state.state->scored_states.emplace_back(
std::move(scored_state.cookie),
stats_entry.stats_offset,
static_cast<boost_t>(scored_state.key));
// update estimation for scored state
scored_state.state->scored_states_estimation += scored_state.docs_count;
}
// iterate over all stats and apply/store order stats
stats.resize(stats_offset);
for (auto& entry : term_stats) {
auto& stats_entry = stats[entry.second.stats_offset];
stats_entry.resize(order.stats_size());
auto* stats_buf = const_cast<byte_type*>(stats_entry.data());
entry.second.term_stats.finish(stats_buf, 0, entry.second.field_stats, index);
}
}
private:
struct stats_state {
explicit stats_state(
const irs::index_reader& index,
const irs::term_reader& field,
const irs::order::prepared& order,
uint32_t& state_offset)
: field_stats(order),
term_stats(order, 1) { // 1 term per bstring because a range is treated as a disjunction
// once per every 'state' collect field statistics over the entire index
for (auto& segment: index) {
// FIXME
field_stats.collect(segment, field); // collect field statistics once per segment
}
stats_offset = state_offset++;
}
field_collectors field_stats;
term_collectors term_stats;
uint32_t stats_offset;
};
//////////////////////////////////////////////////////////////////////////////
/// @brief a representation of state of the collector
//////////////////////////////////////////////////////////////////////////////
struct collector_state {
const sub_reader* segment{};
const seek_term_iterator* terms{};
multiterm_state* state{};
const uint32_t* docs_count{};
};
//////////////////////////////////////////////////////////////////////////////
/// @brief a representation of a term cookie with its associated range_state
//////////////////////////////////////////////////////////////////////////////
struct scored_term_state {
scored_term_state(const Key& key, const collector_state& state)
: key(key),
cookie(state.terms->cookie()),
state(state.state),
segment(state.segment),
term(state.terms->value()),
docs_count(*state.docs_count) {
assert(this->cookie);
}
scored_term_state(scored_term_state&&) = default;
scored_term_state& operator=(scored_term_state&&) = default;
Key key;
seek_term_iterator::cookie_ptr cookie; // term offset cache
multiterm_state* state; // state containing this scored term
const irs::sub_reader* segment; // segment reader for the current term
bstring term; // actual term value this state is for
uint32_t docs_count;
};
void push() noexcept {
std::push_heap(
scored_states_heap_.begin(),
scored_states_heap_.end(),
[this](const size_t lhs, const size_t rhs) noexcept {
return comparer()(scored_states_[rhs].key, scored_states_[lhs].key);
});
}
void pop() noexcept {
std::pop_heap(
scored_states_heap_.begin(),
scored_states_heap_.end(),
[this](const size_t lhs, const size_t rhs) noexcept {
return comparer()(scored_states_[rhs].key, scored_states_[lhs].key);
});
}
const comparer_type& comparer() const noexcept {
return comparer_rep::get();
}
const decltype(term_meta::docs_count) no_docs_ = 0;
collector_state state_;
std::vector<scored_term_state> scored_states_;
std::vector<size_t> scored_states_heap_; // use external heap as states are big
size_t scored_terms_limit_;
}; // limited_sample_collector
struct term_frequency {
uint32_t offset;
uint32_t frequency;
boost_t boost;
explicit operator boost_t() const noexcept {
return boost;
}
bool operator<(const term_frequency& rhs) const noexcept {
return frequency < rhs.frequency
|| (frequency == rhs.frequency && offset < rhs.offset);
}
};
//////////////////////////////////////////////////////////////////////////////
/// @class multiterm_visitor
/// @brief filter visitor for multiterm queries
//////////////////////////////////////////////////////////////////////////////
template<typename States>
class multiterm_visitor {
public:
multiterm_visitor(
limited_sample_collector<term_frequency>& collector,
States& states)
: collector_(collector), states_(states) {
}
void prepare(
const sub_reader& segment,
const term_reader& reader,
const seek_term_iterator& terms) {
// get term metadata
auto* meta = irs::get<term_meta>(terms);
// NOTE: we can't use reference to 'docs_count' here, like
// 'const auto& docs_count = meta ? meta->docs_count : NO_DOCS;'
// since not gcc4.9 nor msvc2015-2019 can handle this correctly
// probably due to broken optimization
docs_count_ = meta ? &meta->docs_count : &no_docs_;
// get state for current segment
auto& state = states_.insert(segment);
state.reader = &reader;
collector_.prepare(segment, terms, state);
key_.offset = 0;
}
// FIXME can incorporate boost into collecting logic
void visit(boost_t boost) {
// fill scoring candidates
assert(docs_count_);
key_.frequency = *docs_count_;
key_.boost = boost;
collector_.collect(key_);
++key_.offset;
}
private:
const decltype(term_meta::docs_count) no_docs_ = 0;
limited_sample_collector<term_frequency>& collector_;
States& states_;
term_frequency key_;
const decltype(term_meta::docs_count)* docs_count_ = nullptr;
}; // multiterm_visitor
NS_END
#endif // IRESEARCH_LIMITED_SAMPLE_COLLECTOR_H
|
#include <cstdio>
int main(){
const int N = 8;
bool array[N][N] = {0};
char temp;
for(int row = 0; row < N; row++){
for(int col = 0; col < N; col++){
scanf("%c", &temp); if(temp == 'B'){array[row][col] = 1;}
}
scanf("\n");
}
int total(2 * N);
for(int row = 0; row < N; row++){
for(int col = 0; col < N; col++){
if(!array[row][col]){--total; break;}
}
}
for(int col = 0; col < N; col++){
for(int row = 0; row < N; row++){
if(!array[row][col]){--total; break;}
}
}
if(total == 2 * N){total = N;}
printf("%d\n", total);
return 0;
}
|
// Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <fuchsia/sys/cpp/fidl.h>
#include <lib/sys/cpp/file_descriptor.h>
#include <lib/sys/cpp/testing/test_with_environment_fixture.h>
#include <lib/syslog/cpp/macros.h>
#include "src/lib/files/file.h"
#include "src/lib/files/scoped_temp_dir.h"
#include "src/lib/fxl/strings/string_printf.h"
namespace component {
namespace {
using fuchsia::sys::TerminationReason;
using sys::testing::EnclosingEnvironment;
constexpr char kRealm[] = "test";
class ComponentsBinaryTest : public gtest::TestWithEnvironmentFixture {
protected:
void OpenNewOutFile() {
ASSERT_TRUE(tmp_dir_.NewTempFile(&out_file_));
outf_ = fileno(std::fopen(out_file_.c_str(), "w"));
}
std::string ReadOutFile() {
std::string out;
if (!files::ReadFileToString(out_file_, &out)) {
FX_LOGS(ERROR) << "Could not read output file " << out_file_;
return "";
}
return out;
}
fuchsia::sys::LaunchInfo CreateLaunchInfo(const std::string& url,
const std::vector<std::string>& args = {}) {
fuchsia::sys::LaunchInfo launch_info;
launch_info.url = url;
launch_info.arguments = args;
launch_info.out = sys::CloneFileDescriptor(outf_);
launch_info.err = sys::CloneFileDescriptor(STDERR_FILENO);
return launch_info;
}
static std::string UrlFromCmx(const std::string& cmx) {
return fxl::StringPrintf("fuchsia-pkg://fuchsia.com/components_binary_tests#meta/%s",
cmx.c_str());
}
void RunComponent(const std::string& url, int64_t* return_code = nullptr,
TerminationReason* termination_reason = nullptr,
const std::vector<std::string>& args = {}) {
fuchsia::sys::ComponentControllerPtr controller;
environment_->CreateComponent(CreateLaunchInfo(url, std::move(args)), controller.NewRequest());
bool terminated = false;
controller.events().OnTerminated = [&terminated, &return_code, &termination_reason](
int64_t code, TerminationReason reason) {
if (return_code != nullptr) {
*return_code = code;
}
if (termination_reason != nullptr) {
*termination_reason = reason;
}
terminated = true;
};
RunLoopUntil([&terminated] { return terminated; });
}
ComponentsBinaryTest() {
OpenNewOutFile();
environment_ = CreateNewEnclosingEnvironment(kRealm, CreateServices());
}
private:
std::unique_ptr<EnclosingEnvironment> environment_;
files::ScopedTempDir tmp_dir_;
std::string out_file_;
int outf_;
};
// We therefore test that targeting a binary by a component manifest works, that
// argv0 properly propagates the binary path, and that the args field in the
// manifest is being properly passed through to the component.
TEST_F(ComponentsBinaryTest, EchoNoArgs) {
int64_t return_code = -1;
RunComponent(ComponentsBinaryTest::UrlFromCmx("echo1.cmx"), &return_code);
EXPECT_EQ(0, return_code);
std::string output = ReadOutFile();
ASSERT_EQ(output, "/pkg/bin/echo1\n");
}
TEST_F(ComponentsBinaryTest, EchoHelloWorld) {
int64_t return_code = -1;
RunComponent(ComponentsBinaryTest::UrlFromCmx("echo2.cmx"), &return_code);
EXPECT_EQ(0, return_code);
std::string output = ReadOutFile();
ASSERT_EQ(output, "/pkg/bin/echo2 helloworld\n");
}
TEST_F(ComponentsBinaryTest, GetEnvMatched) {
int64_t return_code = -1;
RunComponent(ComponentsBinaryTest::UrlFromCmx("getenv1.cmx"), &return_code);
EXPECT_EQ(0, return_code);
std::string output = ReadOutFile();
ASSERT_EQ(output, "FOO=bar BAR=baz\n");
}
TEST_F(ComponentsBinaryTest, GetEnvMismatch) {
int64_t return_code = -1;
RunComponent(ComponentsBinaryTest::UrlFromCmx("getenv2.cmx"), &return_code);
EXPECT_EQ(0, return_code);
std::string output = ReadOutFile();
ASSERT_EQ(output, "FOO=bar BAR=NULL\n");
}
TEST_F(ComponentsBinaryTest, UnallowedDeprecatedShellFailsToLaunch) {
int64_t return_code = -1;
TerminationReason termination_reason;
RunComponent(ComponentsBinaryTest::UrlFromCmx("echo_deprecated_shell.cmx"), &return_code,
&termination_reason);
EXPECT_NE(TerminationReason::EXITED, termination_reason);
}
TEST_F(ComponentsBinaryTest, EchoStdin) {
int64_t return_code = -1;
TerminationReason termination_reason;
RunComponent(ComponentsBinaryTest::UrlFromCmx("echo_stdin.cmx"), &return_code,
&termination_reason, {std::string("hello world")});
EXPECT_EQ(0, return_code);
EXPECT_EQ(TerminationReason::EXITED, termination_reason);
}
} // namespace
} // namespace component
|
// Fill out your copyright notice in the Description page of Project Settings.
#include "MarkerMeshActor.h"
#include "ConstructorHelpers.h"
#include "Components/StaticMeshComponent.h"
AMarkerMeshActor::AMarkerMeshActor()
{
PrimaryActorTick.bCanEverTick = false;
PrimaryActorTick.bStartWithTickEnabled = false;
Mesh = CreateDefaultSubobject<UStaticMeshComponent>(TEXT("Mesh"));
ConstructorHelpers::FObjectFinderOptional<UStaticMesh> MeshFinder(TEXT("StaticMesh'/TrackViz/Marker.Marker'"));
Mesh->SetStaticMesh(MeshFinder.Get());
RootComponent = Mesh;
RootComponent->Mobility = EComponentMobility::Static;
// SetMobility(EComponentMobility::Static);
}
|
// -*- C++ -*-
//
// Package: DTTFMasksOnlineProd
// Class: DTTFMasksOnlineProd
//
/**\class DTTFMasksOnlineProd DTTFMasksOnlineProd.h L1TriggerConfig/DTTrackFinder/src/DTTFMasksOnlineProd.cc
Description: <one line class summary>
Implementation:
<Notes on implementation>
*/
//
// Original Author: J. Troconiz - UAM Madrid
// Created: Fri Apr 3 00:26:52 CEST 2009
//
//
// system include files
#include <iostream>
// user include files
#include "CondTools/L1Trigger/interface/L1ConfigOnlineProdBase.h"
#include "CondFormats/L1TObjects/interface/L1MuDTTFMasks.h"
#include "CondFormats/DataRecord/interface/L1MuDTTFMasksRcd.h"
//
// class declaration
//
class DTTFMasksOnlineProd :
public L1ConfigOnlineProdBase< L1MuDTTFMasksRcd, L1MuDTTFMasks > {
public:
DTTFMasksOnlineProd(const edm::ParameterSet&);
~DTTFMasksOnlineProd() override;
std::shared_ptr< L1MuDTTFMasks > newObject(
const std::string& objectKey ) override ;
private:
// ----------member data ---------------------------
};
//
// constructors and destructor
//
DTTFMasksOnlineProd::DTTFMasksOnlineProd(
const edm::ParameterSet& iConfig)
: L1ConfigOnlineProdBase< L1MuDTTFMasksRcd, L1MuDTTFMasks >( iConfig )
{
//the following line is needed to tell the framework what
// data is being produced
//now do what ever other initialization is needed
}
DTTFMasksOnlineProd::~DTTFMasksOnlineProd()
{
// do anything here that needs to be done at desctruction time
// (e.g. close files, deallocate resources etc.)
}
std::shared_ptr< L1MuDTTFMasks >
DTTFMasksOnlineProd::newObject( const std::string& objectKey )
{
using namespace edm::es;
auto pDTTFMasks = std::make_shared< L1MuDTTFMasks >() ;
pDTTFMasks->reset() ;
std::string dttfSchema = "CMS_DT_TF" ;
// Order of strings is used below -- don't change!
std::vector< std::string > crateMaskColumns ;
crateMaskColumns.push_back( "WEDGE_CRATE_1" ) ;
crateMaskColumns.push_back( "WEDGE_CRATE_2" ) ;
crateMaskColumns.push_back( "WEDGE_CRATE_3" ) ;
crateMaskColumns.push_back( "WEDGE_CRATE_4" ) ;
crateMaskColumns.push_back( "WEDGE_CRATE_5" ) ;
crateMaskColumns.push_back( "WEDGE_CRATE_6" ) ;
crateMaskColumns.push_back( "WEDGE_CRATE_1_E" ) ;
crateMaskColumns.push_back( "WEDGE_CRATE_2_E" ) ;
crateMaskColumns.push_back( "WEDGE_CRATE_3_E" ) ;
crateMaskColumns.push_back( "WEDGE_CRATE_4_E" ) ;
crateMaskColumns.push_back( "WEDGE_CRATE_5_E" ) ;
crateMaskColumns.push_back( "WEDGE_CRATE_6_E" ) ;
l1t::OMDSReader::QueryResults crateMaskResults =
m_omdsReader.basicQuery( crateMaskColumns,
dttfSchema,
"DTTF_RUN_SETTINGS",
"DTTF_RUN_SETTINGS.ID",
m_omdsReader.singleAttribute( objectKey ) ) ;
if( crateMaskResults.queryFailed() ||
crateMaskResults.numberRows() != 1 ) // check query successful
{
edm::LogError( "L1-O2O" )
<< "Problem with L1MuDTTFMasks key " << objectKey ;
return std::shared_ptr< L1MuDTTFMasks >() ;
}
// Cache crate masks
unsigned long crateMaskL[ 6 ] ;
unsigned long crateMaskR[ 6 ] ;
unsigned long krateMaskL[ 6 ] ;
unsigned long krateMaskR[ 6 ] ;
for( int icrate = 0 ; icrate < 6 ; ++icrate )
{
std::string crateMask ;
crateMaskResults.fillVariable( crateMaskColumns[ icrate ],
crateMask ) ;
char* pEnd;
crateMaskL[ icrate ] = std::strtol( crateMask.c_str(), &pEnd, 16 ) ;
crateMaskR[ icrate ] = std::strtol( pEnd, (char **)nullptr, 16 ) ;
crateMaskResults.fillVariable( crateMaskColumns[ icrate+6 ],
crateMask ) ;
krateMaskL[ icrate ] = std::strtol( crateMask.c_str(), &pEnd, 16 ) ;
krateMaskR[ icrate ] = std::strtol( pEnd, (char **)nullptr, 16 ) ;
std::cout << "Crate " << icrate << " masks"
<< " L: " << std::hex << crateMaskL[ icrate ]
<< " " << std::hex << krateMaskL[ icrate ]
<< " R: " << std::hex << crateMaskR[ icrate ]
<< " " << std::hex << krateMaskR[ icrate ] << std::endl ;
}
// Map of sector (0-11) to name (L/R)
std::string sectorNames[ 12 ] = {
"R", "L", "R", "L", "L", "R", "L", "R", "R", "L", "R", "L" } ;
// Map of sector (0-11) to crate (0-5)
int crateNumbers[ 12 ] = { 3, 3, 4, 4, 5, 5, 2, 2, 1, 1, 0, 0 } ;
// Map of wheel array index to wheel number (+- 3, 2, 1).
int wheelNumbers[ 6 ] = { -3, -2, -1, 1, 2, 3 } ;
// Map of wheel array index to name ({N,P}{0,1,2}).
std::string wheelNames[ 6 ] = { "N2", "N1", "N0", "P0", "P1", "P2" } ;
// Map of sector+wheel name to bit number in crate mask
std::map< std::string, unsigned int > crateMaskBitmap ;
crateMaskBitmap.insert( std::make_pair( "N2", 24 ) ) ;
crateMaskBitmap.insert( std::make_pair( "N1", 20 ) ) ;
crateMaskBitmap.insert( std::make_pair( "N0", 16 ) ) ;
crateMaskBitmap.insert( std::make_pair( "P0", 8 ) ) ;
crateMaskBitmap.insert( std::make_pair( "P1", 4 ) ) ;
crateMaskBitmap.insert( std::make_pair( "P2", 0 ) ) ;
std::map< std::string, unsigned int > krateMaskBitmap ;
krateMaskBitmap.insert( std::make_pair( "N2", 16 ) ) ;
krateMaskBitmap.insert( std::make_pair( "N1", 12 ) ) ;
krateMaskBitmap.insert( std::make_pair( "P0", 8 ) ) ;
krateMaskBitmap.insert( std::make_pair( "P1", 4 ) ) ;
krateMaskBitmap.insert( std::make_pair( "P2", 0 ) ) ;
// Loop over sectors 0-11
for( int isc = 0 ; isc < 12 ; ++isc )
{
int crateNumber = crateNumbers[ isc ] ;
unsigned long crateMask = crateMaskL[ crateNumber ] ;
if ( sectorNames[ isc ] == "R" ) crateMask = crateMaskR[ crateNumber ] ;
unsigned long krateMask = krateMaskL[ crateNumber ] ;
if ( sectorNames[ isc ] == "R" ) krateMask = krateMaskR[ crateNumber ] ;
std::cout << "isc " << isc << " icr " << crateNumber << std::endl ;
// Loop over wheels 0-5
for( int iwh = 0 ; iwh < 6 ; ++iwh )
{
std::string sectorWheelName =
sectorNames[ isc ] + wheelNames[ iwh ] ;
unsigned int maskBit = 30 ;
std::map< std::string, unsigned int >::const_iterator itr =
crateMaskBitmap.find( wheelNames[ iwh ] ) ;
if( itr != crateMaskBitmap.end() ) maskBit = itr->second ;
unsigned long phtfEnabled = ( crateMask >> maskBit ) & 0xF ;
if ( wheelNames[ iwh ] == "P2" ) phtfEnabled += ( crateMask >> 24 ) & 0x10 ;
if ( wheelNames[ iwh ] == "N2" ) phtfEnabled += ( crateMask >> 25 ) & 0x10 ;
std::cout << "Bits " << std::dec << maskBit << " (" << sectorWheelName
<< ") of mask " << std::hex << crateMask << " is "
<< std::hex << phtfEnabled << std::endl ;
int nwh = wheelNumbers[ iwh ] ;
unsigned long chmask = phtfEnabled & 0x1;
std::cout << " INREC_CHDIS_ST1 " << 1-chmask ;
pDTTFMasks->set_inrec_chdis_st1( nwh, isc, 1-chmask ) ;
chmask = ( phtfEnabled >> 1 ) & 0x1;
std::cout << " INREC_CHDIS_ST2 " << 1-chmask ;
pDTTFMasks->set_inrec_chdis_st2( nwh, isc, 1-chmask ) ;
chmask = ( phtfEnabled >> 2 ) & 0x1;
std::cout << " INREC_CHDIS_ST3 " << 1-chmask ;
pDTTFMasks->set_inrec_chdis_st3( nwh, isc, 1-chmask ) ;
chmask = ( phtfEnabled >> 3 ) & 0x1;
std::cout << " INREC_CHDIS_ST4 " << 1-chmask ;
pDTTFMasks->set_inrec_chdis_st4( nwh, isc, 1-chmask ) ;
chmask = ( phtfEnabled >> 4 ) & 0x1;
std::cout << " INREC_CHDIS_CSC " << 1-chmask << std::endl ;
pDTTFMasks->set_inrec_chdis_csc( nwh, isc, 1-chmask ) ;
if ( wheelNames[ iwh ] == "N0" ) continue ;
maskBit = 20 ;
itr = krateMaskBitmap.find( wheelNames[ iwh ] ) ;
if( itr != krateMaskBitmap.end() ) maskBit = itr->second ;
unsigned long ettfEnabled = ( krateMask >> maskBit ) & 0x7 ;
std::cout << "Bits " << std::dec << maskBit << " (" << sectorWheelName
<< ") of mask " << std::hex << krateMask << " is "
<< std::hex << ettfEnabled << std::endl ;
chmask = ettfEnabled & 0x1;
std::cout << " ETSOC_CHDIS_ST1 " << 1-chmask ;
pDTTFMasks->set_etsoc_chdis_st1( nwh, isc, 1-chmask ) ;
chmask = ( ettfEnabled >> 1 ) & 0x1;
std::cout << " ETSOC_CHDIS_ST2 " << 1-chmask ;
pDTTFMasks->set_etsoc_chdis_st2( nwh, isc, 1-chmask ) ;
chmask = ( ettfEnabled >> 2 ) & 0x1;
std::cout << " ETSOC_CHDIS_ST3 " << 1-chmask << std::endl ;
pDTTFMasks->set_etsoc_chdis_st3( nwh, isc, 1-chmask ) ;
}
}
return pDTTFMasks ;
}
// ------------ method called to produce the data ------------
//define this as a plug-in
DEFINE_FWK_EVENTSETUP_MODULE(DTTFMasksOnlineProd);
|
/*
* ADC Read
* Basic example of using ADC with Arduino
*/
#include <Arduino.h>
#define ADC_RESOLUTION 1024 // ADC is 10 bit resolution
#define SYSTEM_VOLTAGE 5.1 // in volts
#define ADC_PORT A0 // port on which our circuit is connected
int adc_value_reading = 0;
float adc_voltage = 0.0;
int loop_delay = 500; // set delay between next run to 0.5 second
void setup()
{
// init default serial interface with speed of 115200 bauds
Serial.begin(115200);
// initialize LED digital pin as an output.
// LED_BUILTIN is predefined by Arduino framework
pinMode(LED_BUILTIN, OUTPUT);
// source of reference signal - default board voltage 5V
analogReference(DEFAULT);
}
void loop()
{
// let's get start time of this loop
// micros - Returns the number of microseconds since the Arduino board began running the current program.
uint32_t time_start = micros();
// get ADC value
// resolution of ADC in default is 1024 bits (0-1023)
adc_value_reading = analogRead(ADC_PORT);
// calculate voltage based on ADC reading
adc_voltage = ((float)adc_value_reading / (float)ADC_RESOLUTION ) * SYSTEM_VOLTAGE;
Serial.print("Analog read: ");
Serial.print(adc_value_reading);
Serial.print("; Voltage: ");
Serial.print(adc_voltage);
Serial.println(" V");
// show time! how long it takes to get one ADC reading?
uint_fast8_t time_diff = micros() - time_start;
Serial.print("Operations time [microseconds]: ");
Serial.println(time_diff);
delay(loop_delay);
}
|
/* Unicamp - Universidade Estadual de Campinas
FT - Faculdade de Tecnologia
Limeira - SP
Prof. Dr. Andre F. de Angelis
Maio/2015
*/
#include <string>
#include "a02ex03_b.hpp"
#include "a02ex03_j.hpp"
using namespace std;
Liquid::Liquid(float volume, double valor) : Food(valor)
{
this->volume = volume;
};
string Liquid::getDescricao()
{
return (" - " + to_string(volume) + " Litro(s).");
};
/* fim de arquivo */
|
/** \file mri_core_kspace_filter.cpp
\brief Implementation kspace filter functionalities for 2D and 3D MRI parallel imaging
\author Hui Xue
*/
#include "mri_core_kspace_filter.h"
#include "hoNDArray_elemwise.h"
#include <boost/algorithm/string.hpp>
#ifdef M_PI
#undef M_PI
#endif // M_PI
#define M_PI 3.14159265358979323846
namespace Gadgetron
{
ISMRMRDKSPACEFILTER get_kspace_filter_type(const std::string& name)
{
std::string name_lower(name);
boost::algorithm::to_lower(name_lower);
if (name_lower == "gaussian")
{
return ISMRMRD_FILTER_GAUSSIAN;
}
else if (name_lower == "hanning")
{
return ISMRMRD_FILTER_HANNING;
}
else if (name_lower == "taperedhanning")
{
return ISMRMRD_FILTER_TAPERED_HANNING;
}
else if (name_lower == "none")
{
return ISMRMRD_FILTER_NONE;
}
GERROR_STREAM("Unrecognized kspace filter name : " << name);
return ISMRMRD_FILTER_NONE;
}
std::string get_kspace_filter_name(ISMRMRDKSPACEFILTER v)
{
std::string name;
switch (v)
{
case ISMRMRD_FILTER_GAUSSIAN:
{
name = "Gaussian";
break;
}
case ISMRMRD_FILTER_HANNING:
{
name = "Hanning";
break;
}
case ISMRMRD_FILTER_TAPERED_HANNING:
{
name = "TaperedHanning";
break;
}
case ISMRMRD_FILTER_NONE:
{
name = "none";
break;
}
default:
{
GERROR_STREAM("Unrecognized kspace filter type : " << v);
name = "none";
}
}
return name;
}
template<typename T>
void generate_symmetric_filter(size_t len, hoNDArray<T>& filter, ISMRMRDKSPACEFILTER filterType, double sigma, size_t width)
{
try
{
if (len == 0) return;
filter.create(len);
if (width == 0 || width >= len) width = 1;
size_t ii;
if (filterType == ISMRMRD_FILTER_GAUSSIAN)
{
double r = -1.0*sigma*sigma / 2;
if (len % 2 == 0)
{
// to make sure the zero points match and boundary of filters are symmetric
double stepSize = 2.0 / (len - 2);
std::vector<double> x(len - 1);
for (ii = 0; ii<len - 1; ii++)
{
x[ii] = -1 + ii*stepSize;
}
for (ii = 0; ii<len - 1; ii++)
{
filter(ii + 1) = T(std::exp(r*(x[ii] * x[ii])));
}
filter(0) = T(0);
}
else
{
double stepSize = 2.0 / (len - 1);
std::vector<double> x(len);
for (ii = 0; ii<len; ii++)
{
x[ii] = -1 + ii*stepSize;
}
for (ii = 0; ii<len; ii++)
{
filter(ii) = T(std::exp(r*(x[ii] * x[ii])));
}
}
}
else if (filterType == ISMRMRD_FILTER_TAPERED_HANNING)
{
hoNDArray<T> w(width);
for (ii = 1; ii <= width; ii++)
{
w(ii - 1) = T((0.5 * (1 - std::cos(2.0*M_PI*ii / (2 * width + 1)))));
}
// make sure the center of the filter will end up being 1:
Gadgetron::fill(filter, T(1.0));
if (len % 2 == 0)
{
for (ii = 1; ii <= width; ii++)
{
filter(ii) = w(ii - 1);
filter(len - ii) = filter(ii);
}
filter(0) = T(0);
}
else
{
for (ii = 1; ii <= width; ii++)
{
filter(ii - 1) = w(ii - 1);
filter(len - ii) = filter(ii - 1);
}
}
}
else if (filterType == ISMRMRD_FILTER_HANNING)
{
if (len % 2 == 0)
{
size_t N = len - 1;
double halfLen = (double)((N + 1) / 2);
for (ii = 1; ii <= halfLen; ii++)
{
filter(ii) = T((0.5 * (1 - std::cos(2.0*M_PI*ii / (N + 1)))));
}
for (ii = (size_t)halfLen; ii<N; ii++)
{
filter(ii + 1) = filter(N - ii);
}
filter(0) = T(0);
}
else
{
double halfLen = (double)((len + 1) / 2);
for (ii = 1; ii <= (size_t)halfLen; ii++)
{
filter(ii - 1) = T((0.5 * (1 - std::cos(2.0*M_PI*ii / (len + 1)))));
}
for (ii = (size_t)halfLen; ii<len; ii++)
{
filter(ii) = filter(len - 1 - ii);
}
}
}
else if (filterType == ISMRMRD_FILTER_NONE)
{
Gadgetron::fill(filter, T(1.0));
}
else
{
GADGET_THROW("generate_symmetric_filter, unrecognized fiter type ... ");
}
T sos = 0.0f;
for (ii = 0; ii<len; ii++)
{
sos += filter(ii)*filter(ii);
}
T r = T(1.0 / std::sqrt(std::abs(sos) / (len)));
for (ii = 0; ii<len; ii++)
{
filter(ii) *= r;
}
}
catch (...)
{
GADGET_THROW("Errors in generate_symmetric_filter(...) ... ");
}
}
template EXPORTMRICORE void generate_symmetric_filter(size_t len, hoNDArray<float>& filter, ISMRMRDKSPACEFILTER filterType, double sigma, size_t width);
template EXPORTMRICORE void generate_symmetric_filter(size_t len, hoNDArray<double>& filter, ISMRMRDKSPACEFILTER filterType, double sigma, size_t width);
template EXPORTMRICORE void generate_symmetric_filter(size_t len, hoNDArray< std::complex<float> >& filter, ISMRMRDKSPACEFILTER filterType, double sigma, size_t width);
template EXPORTMRICORE void generate_symmetric_filter(size_t len, hoNDArray< std::complex<double> >& filter, ISMRMRDKSPACEFILTER filterType, double sigma, size_t width);
// ------------------------------------------------------------------------
template<typename T>
void generate_asymmetric_filter(size_t len, size_t start, size_t end, hoNDArray<T>& filter, ISMRMRDKSPACEFILTER filterType, size_t width, bool densityComp)
{
try
{
if (len == 0) return;
if (start > len - 1) start = 0;
if (end > len - 1) end = len - 1;
if (start > end)
{
start = 0;
end = len - 1;
}
filter.create(len);
Gadgetron::clear(filter);
size_t ii;
for (ii = start; ii <= end; ii++)
{
filter(ii) = T(1.0);
}
if (width == 0 || width >= len) width = 1;
hoNDArray<T> w(width);
if (filterType == ISMRMRD_FILTER_TAPERED_HANNING)
{
for (ii = 1; ii <= width; ii++)
{
w(ii - 1) = T((0.5 * (1 - std::cos(2.0*M_PI*ii / (2 * width + 1)))));
}
}
else if (filterType == ISMRMRD_FILTER_NONE)
{
Gadgetron::fill(w, T(1.0));
}
else
{
GADGET_THROW("generate_symmetric_filter, unrecognized fiter type ... ");
}
if (densityComp)
{
size_t startSym(0), endSym(len - 1);
find_symmetric_sampled_region(start, end, len / 2, startSym, endSym);
if (start == 0 && end == len - 1)
{
for (ii = 1; ii <= width; ii++)
{
filter(ii - 1) = w(ii - 1);
filter(len - ii) = filter(ii - 1);
}
}
if (start == 0 && end<len - 1)
{
for (ii = 0; ii<startSym; ii++)
{
filter(ii) = 2.0;
}
for (ii = 1; ii <= width; ii++)
{
filter(ii - 1 + startSym) = T(1.0) + w(width - ii);
filter(end - ii + 1) = w(ii - 1);
}
}
if (start>0 && end == len - 1)
{
for (ii = endSym + 1; ii<len; ii++)
{
filter(ii) = 2.0;
}
for (ii = 1; ii <= width; ii++)
{
filter(endSym - ii + 1) = T(1.0) + w(width - ii);
filter(start + ii - 1) = w(ii - 1);
}
}
if (start>0 && end<len - 1)
{
if (start == startSym && end == endSym)
{
for (ii = 1; ii <= width; ii++)
{
filter(start + ii - 1) = w(ii - 1);
filter(end - ii + 1) = w(ii - 1);
}
}
else if (start == startSym && end>endSym)
{
for (ii = endSym + 1; ii <= end; ii++)
{
filter(ii) = 2.0;
}
for (ii = 1; ii <= width; ii++)
{
filter(end - ii + 1) = T(1.0) + w(ii - 1);
filter(endSym - ii + 1) = w(width - ii);
filter(start + ii - 1) = w(ii - 1);
}
}
else if (start<startSym && end == endSym)
{
for (ii = start; ii<startSym; ii++)
{
filter(ii) = 2.0;
}
for (ii = 1; ii <= width; ii++)
{
filter(ii - 1 + start) = T(1.0) + w(ii - 1);
filter(ii - 1 + startSym) = w(width - ii);
filter(end - ii + 1) = w(ii - 1);
}
}
else
{
for (ii = 1; ii <= width; ii++)
{
filter(start + ii - 1) = w(ii - 1);
filter(end - ii + 1) = w(ii - 1);
}
}
}
}
else
{
if (start == 0 && end == len - 1)
{
for (ii = 1; ii <= width; ii++)
{
filter(ii - 1) = w(ii - 1);
filter(len - ii) = filter(ii - 1);
}
}
if (start == 0 && end<len - 1)
{
for (ii = 1; ii <= width; ii++)
{
filter(end - ii + 1) = w(ii - 1);
}
}
if (start>0 && end == len - 1)
{
for (ii = 1; ii <= width; ii++)
{
filter(start + ii - 1) = w(ii - 1);
}
}
if (start>0 && end<len - 1)
{
for (ii = 1; ii <= width; ii++)
{
filter(start + ii - 1) = w(ii - 1);
filter(end - ii + 1) = w(ii - 1);
}
}
}
T sos = 0.0f;
for (ii = 0; ii<len; ii++)
{
sos += filter(ii)*filter(ii);
}
T r = (T)(1.0 / std::sqrt(std::abs(sos) / (end - start + 1))); // SNR unit filter
for (ii = 0; ii<len; ii++)
{
filter(ii) *= r;
}
}
catch (...)
{
GADGET_THROW("Errors in generate_asymmetric_filter(...) ... ");
}
}
template EXPORTMRICORE void generate_asymmetric_filter(size_t len, size_t start, size_t end, hoNDArray<float>& filter, ISMRMRDKSPACEFILTER filterType, size_t width, bool densityComp);
template EXPORTMRICORE void generate_asymmetric_filter(size_t len, size_t start, size_t end, hoNDArray<double>& filter, ISMRMRDKSPACEFILTER filterType, size_t width, bool densityComp);
template EXPORTMRICORE void generate_asymmetric_filter(size_t len, size_t start, size_t end, hoNDArray< std::complex<float> >& filter, ISMRMRDKSPACEFILTER filterType, size_t width, bool densityComp);
template EXPORTMRICORE void generate_asymmetric_filter(size_t len, size_t start, size_t end, hoNDArray< std::complex<double> >& filter, ISMRMRDKSPACEFILTER filterType, size_t width, bool densityComp);
// ------------------------------------------------------------------------
template<typename T>
void generate_symmetric_filter_ref(size_t len, size_t start, size_t end, hoNDArray<T>& filter)
{
try
{
GADGET_CHECK_THROW(len >= 2);
GADGET_CHECK_THROW(start >= 0 && end <= len - 1 && start <= end);
if (start == 0 && end == len - 1)
{
generate_symmetric_filter(len, filter, ISMRMRD_FILTER_HANNING);
return;
}
size_t centerInd = len / 2;
size_t lenFilter(0); // make a symmetric filter with zero at the center
size_t lenFilterEnd = 2 * (end - centerInd) + 1;
size_t lenFilterStart = 2 * (centerInd - start) + 1;
if (start == 0 && end<len - 1)
{
lenFilter = lenFilterEnd;
}
else if (start>0 && end == len - 1)
{
lenFilter = lenFilterStart;
}
else if (start>0 && end<len - 1)
{
lenFilter = ((lenFilterStart<lenFilterEnd) ? lenFilterStart : lenFilterEnd);
}
else
{
GERROR_STREAM("generate_symmetric_filter_ref, invalid inputs : start - end - len ... " << start << " " << end << " " << len);
GADGET_THROW("generate_symmetric_filter_ref, invalid inputs ... ");
}
GADGET_CHECK_THROW(lenFilter>0);
hoNDArray<T> filterSym(lenFilter);
generate_symmetric_filter(lenFilter, filterSym, ISMRMRD_FILTER_HANNING);
filter.create(len);
Gadgetron::clear(&filter);
if (start == 0 && end<len - 1)
{
memcpy(filter.begin() + end - lenFilter + 1, filterSym.begin(), filterSym.get_number_of_bytes());
return;
}
else if (start>0 && end == len - 1)
{
memcpy(filter.begin() + start, filterSym.begin(), filterSym.get_number_of_bytes());
return;
}
else if (start>0 && end<len - 1)
{
if (lenFilter == lenFilterStart)
{
memcpy(filter.begin() + start, filterSym.begin(), filterSym.get_number_of_bytes());
}
else
{
memcpy(filter.begin() + end - lenFilter + 1, filterSym.begin(), filterSym.get_number_of_bytes());
}
return;
}
else
{
GERROR_STREAM("Invalid inputs : start - end - len : " << start << " " << end << " " << len);
GADGET_THROW("generate_symmetric_filter_ref, invalid inputs : start - end - len");
}
}
catch (...)
{
GADGET_THROW("Errors in generate_symmetric_filter_ref(...) ... ");
}
}
template EXPORTMRICORE void generate_symmetric_filter_ref(size_t len, size_t start, size_t end, hoNDArray<float>& filter);
template EXPORTMRICORE void generate_symmetric_filter_ref(size_t len, size_t start, size_t end, hoNDArray<double>& filter);
template EXPORTMRICORE void generate_symmetric_filter_ref(size_t len, size_t start, size_t end, hoNDArray< std::complex<float> >& filter);
template EXPORTMRICORE void generate_symmetric_filter_ref(size_t len, size_t start, size_t end, hoNDArray< std::complex<double> >& filter);
// ------------------------------------------------------------------------
template <typename T>
void compute_2d_filter(const hoNDArray<T>& fx, const hoNDArray<T>& fy, hoNDArray<T>& fxy)
{
try
{
size_t RO = fx.get_size(0);
size_t E1 = fy.get_size(0);
fxy.create(RO, E1);
T* pFxy = fxy.begin();
size_t x, y;
for (y = 0; y<E1; y++)
{
for (x = 0; x<RO; x++)
{
pFxy[y*RO + x] = fx(x) * fy(y);
}
}
}
catch (...)
{
GADGET_THROW("Errors in compute_2d_filter(...) ... ");
}
}
template EXPORTMRICORE void compute_2d_filter(const hoNDArray<float>& fx, const hoNDArray<float>& fy, hoNDArray<float>& fxy);
template EXPORTMRICORE void compute_2d_filter(const hoNDArray<double>& fx, const hoNDArray<double>& fy, hoNDArray<double>& fxy);
template EXPORTMRICORE void compute_2d_filter(const hoNDArray< std::complex<float> >& fx, const hoNDArray< std::complex<float> >& fy, hoNDArray< std::complex<float> >& fxy);
template EXPORTMRICORE void compute_2d_filter(const hoNDArray< std::complex<double> >& fx, const hoNDArray< std::complex<double> >& fy, hoNDArray< std::complex<double> >& fxy);
// ------------------------------------------------------------------------
void compute_2d_filter(const hoNDArray<float>& fx, const hoNDArray<float>& fy, hoNDArray< std::complex<float> >& fxy)
{
try
{
size_t RO = fx.get_size(0);
size_t E1 = fy.get_size(0);
fxy.create(RO, E1);
std::complex<float> * pFxy = fxy.begin();
size_t x, y;
for (y = 0; y<E1; y++)
{
for (x = 0; x<RO; x++)
{
pFxy[y*RO + x] = std::complex<float>(fx(x) * fy(y));
}
}
}
catch (...)
{
GADGET_THROW("Errors in compute_2d_filter(float) ... ");
}
}
// ------------------------------------------------------------------------
void compute_2d_filter(const hoNDArray<double>& fx, const hoNDArray<double>& fy, hoNDArray< std::complex<double> >& fxy)
{
try
{
size_t RO = fx.get_size(0);
size_t E1 = fy.get_size(0);
fxy.create(RO, E1);
std::complex<double> * pFxy = fxy.begin();
size_t x, y;
for (y = 0; y<E1; y++)
{
for (x = 0; x<RO; x++)
{
pFxy[y*RO + x] = std::complex<double>(fx(x) * fy(y));
}
}
}
catch (...)
{
GADGET_THROW("Errors in compute_2d_filter(double) ... ");
}
}
// ------------------------------------------------------------------------
template <typename T>
void compute_3d_filter(const hoNDArray<T>& fx, const hoNDArray<T>& fy, const hoNDArray<T>& fz, hoNDArray<T>& fxyz)
{
try
{
size_t RO = fx.get_size(0);
size_t E1 = fy.get_size(0);
size_t E2 = fz.get_size(0);
fxyz.create(RO, E1, E2);
T* pFxyz = fxyz.begin();
const T* px = fx.begin();
const T* py = fy.begin();
const T* pz = fz.begin();
size_t x, y, z;
T vz, vy, vx;
for (z = 0; z<E2; z++)
{
vz = pz[z];
for (y = 0; y<E1; y++)
{
vy = py[y];
for (x = 0; x<RO; x++)
{
vx = px[x];
pFxyz[x+y*RO+z*RO*E1] = (vx*vz*vy);
}
}
}
}
catch (...)
{
GADGET_THROW("Errors in compute_3d_filter(...) ... ");
}
}
template EXPORTMRICORE void compute_3d_filter(const hoNDArray<float>& fx, const hoNDArray<float>& fy, const hoNDArray<float>& fz, hoNDArray<float>& fxyz);
template EXPORTMRICORE void compute_3d_filter(const hoNDArray<double>& fx, const hoNDArray<double>& fy, const hoNDArray<double>& fz, hoNDArray<double>& fxyz);
template EXPORTMRICORE void compute_3d_filter(const hoNDArray< std::complex<float> >& fx, const hoNDArray< std::complex<float> >& fy, const hoNDArray< std::complex<float> >& fz, hoNDArray< std::complex<float> >& fxyz);
template EXPORTMRICORE void compute_3d_filter(const hoNDArray< std::complex<double> >& fx, const hoNDArray< std::complex<double> >& fy, const hoNDArray< std::complex<double> >& fz, hoNDArray< std::complex<double> >& fxyz);
// ------------------------------------------------------------------------
void compute_3d_filter(const hoNDArray<float>& fx, const hoNDArray<float>& fy, const hoNDArray<float>& fz, hoNDArray< std::complex<float> >& fxyz)
{
try
{
size_t RO = fx.get_size(0);
size_t E1 = fy.get_size(0);
size_t E2 = fz.get_size(0);
fxyz.create(RO, E1, E2);
std::complex<float> * pFxyz = fxyz.begin();
size_t x, y, z;
for (z = 0; z<E2; z++)
{
for (y = 0; y<E1; y++)
{
for (x = 0; x<RO; x++)
{
pFxyz[z*RO*E1 + y*RO + x] = std::complex<float>(fx(x)*fy(y)*fz(z));
}
}
}
}
catch (...)
{
GADGET_THROW("Errors in compute_3d_filter(float) ... ");
}
}
// ------------------------------------------------------------------------
void compute_3d_filter(const hoNDArray<double>& fx, const hoNDArray<double>& fy, const hoNDArray<double>& fz, hoNDArray< std::complex<double> >& fxyz)
{
try
{
size_t RO = fx.get_size(0);
size_t E1 = fy.get_size(0);
size_t E2 = fz.get_size(0);
fxyz.create(RO, E1, E2);
std::complex<double> * pFxyz = fxyz.begin();
size_t x, y, z;
for (z = 0; z<E2; z++)
{
for (y = 0; y<E1; y++)
{
for (x = 0; x<RO; x++)
{
pFxyz[z*RO*E1 + y*RO + x] = std::complex<double>(fx(x)*fy(y)*fz(z));
}
}
}
}
catch (...)
{
GADGET_THROW("Errors in compute_3d_filter(double) ... ");
}
}
// ------------------------------------------------------------------------
template <typename T>
void apply_kspace_filter_RO(hoNDArray<T>& data, const hoNDArray<T>& fRO)
{
try
{
GADGET_CHECK_THROW(data.get_size(0) == fRO.get_number_of_elements());
Gadgetron::multiply(data, fRO, data);
}
catch (...)
{
GADGET_THROW("Errors in apply_kspace_filter_RO(...) ... ");
}
}
template EXPORTMRICORE void apply_kspace_filter_RO(hoNDArray<float>& data, const hoNDArray<float>& fRO);
template EXPORTMRICORE void apply_kspace_filter_RO(hoNDArray<double>& data, const hoNDArray<double>& fRO);
template EXPORTMRICORE void apply_kspace_filter_RO(hoNDArray< std::complex<float> >& data, const hoNDArray< std::complex<float> >& fRO);
template EXPORTMRICORE void apply_kspace_filter_RO(hoNDArray< std::complex<double> >& data, const hoNDArray< std::complex<double> >& fRO);
template <typename T>
void apply_kspace_filter_RO(const hoNDArray<T>& data, const hoNDArray<T>& fRO, hoNDArray<T>& dataFiltered)
{
try
{
GADGET_CHECK_THROW(data.get_size(0) == fRO.get_number_of_elements());
Gadgetron::multiply(data, fRO, dataFiltered);
}
catch (...)
{
GADGET_THROW("Errors in apply_kspace_filter_RO(...) ... ");
}
}
template EXPORTMRICORE void apply_kspace_filter_RO(const hoNDArray<float>& data, const hoNDArray<float>& fRO, hoNDArray<float>& dataFiltered);
template EXPORTMRICORE void apply_kspace_filter_RO(const hoNDArray<double>& data, const hoNDArray<double>& fRO, hoNDArray<double>& dataFiltered);
template EXPORTMRICORE void apply_kspace_filter_RO(const hoNDArray< std::complex<float> >& data, const hoNDArray< std::complex<float> >& fRO, hoNDArray< std::complex<float> >& dataFiltered);
template EXPORTMRICORE void apply_kspace_filter_RO(const hoNDArray< std::complex<double> >& data, const hoNDArray< std::complex<double> >& fRO, hoNDArray< std::complex<double> >& dataFiltered);
// ------------------------------------------------------------------------
template <typename T>
void apply_kspace_filter_E1(const hoNDArray<T>& data, const hoNDArray<T>& fE1, hoNDArray<T>& dataFiltered)
{
try
{
GADGET_CHECK_THROW(data.get_size(1) == fE1.get_number_of_elements());
hoNDArray<T> fRO(data.get_size(0));
fRO.fill(T(1.0));
hoNDArray<T> fxy;
compute_2d_filter(fRO, fE1, fxy);
Gadgetron::multiply(data, fxy, dataFiltered);
}
catch (...)
{
GADGET_THROW("Errors in apply_kspace_filter_E1(...) ... ");
}
}
template EXPORTMRICORE void apply_kspace_filter_E1(const hoNDArray<float>& data, const hoNDArray<float>& fE1, hoNDArray<float>& dataFiltered);
template EXPORTMRICORE void apply_kspace_filter_E1(const hoNDArray<double>& data, const hoNDArray<double>& fE1, hoNDArray<double>& dataFiltered);
template EXPORTMRICORE void apply_kspace_filter_E1(const hoNDArray< std::complex<float> >& data, const hoNDArray< std::complex<float> >& fE1, hoNDArray< std::complex<float> >& dataFiltered);
template EXPORTMRICORE void apply_kspace_filter_E1(const hoNDArray< std::complex<double> >& data, const hoNDArray< std::complex<double> >& fE1, hoNDArray< std::complex<double> >& dataFiltered);
// ------------------------------------------------------------------------
template <typename T>
void apply_kspace_filter_ROE1(const hoNDArray<T>& data, const hoNDArray<T>& fROE1, hoNDArray<T>& dataFiltered)
{
try
{
GADGET_CHECK_THROW(data.get_size(0) == fROE1.get_size(0));
GADGET_CHECK_THROW(data.get_size(1) == fROE1.get_size(1));
Gadgetron::multiply(data, fROE1, dataFiltered);
}
catch (...)
{
GADGET_THROW("Errors in apply_kspace_filter_ROE1(...) ... ");
}
}
template EXPORTMRICORE void apply_kspace_filter_ROE1(const hoNDArray<float>& data, const hoNDArray<float>& fROE1, hoNDArray<float>& dataFiltered);
template EXPORTMRICORE void apply_kspace_filter_ROE1(const hoNDArray<double>& data, const hoNDArray<double>& fROE1, hoNDArray<double>& dataFiltered);
template EXPORTMRICORE void apply_kspace_filter_ROE1(const hoNDArray< std::complex<float> >& data, const hoNDArray< std::complex<float> >& fROE1, hoNDArray< std::complex<float> >& dataFiltered);
template EXPORTMRICORE void apply_kspace_filter_ROE1(const hoNDArray< std::complex<double> >& data, const hoNDArray< std::complex<double> >& fROE1, hoNDArray< std::complex<double> >& dataFiltered);
// ------------------------------------------------------------------------
template <typename T>
void apply_kspace_filter_ROE1(const hoNDArray<T>& data, const hoNDArray<T>& fRO, const hoNDArray<T>& fE1, hoNDArray<T>& dataFiltered)
{
try
{
GADGET_CHECK_THROW(data.get_size(0) == fRO.get_size(0));
GADGET_CHECK_THROW(data.get_size(1) == fE1.get_size(0));
hoNDArray<T> fROE1;
compute_2d_filter(fRO, fE1, fROE1);
Gadgetron::multiply(data, fROE1, dataFiltered);
}
catch (...)
{
GADGET_THROW("Errors in apply_kspace_filter_ROE1(...) ... ");
}
}
template EXPORTMRICORE void apply_kspace_filter_ROE1(const hoNDArray<float>& data, const hoNDArray<float>& fRO, const hoNDArray<float>& fE1, hoNDArray<float>& dataFiltered);
template EXPORTMRICORE void apply_kspace_filter_ROE1(const hoNDArray<double>& data, const hoNDArray<double>& fRO, const hoNDArray<double>& fE1, hoNDArray<double>& dataFiltered);
template EXPORTMRICORE void apply_kspace_filter_ROE1(const hoNDArray< std::complex<float> >& data, const hoNDArray< std::complex<float> >& fRO, const hoNDArray< std::complex<float> >& fE1, hoNDArray< std::complex<float> >& dataFiltered);
template EXPORTMRICORE void apply_kspace_filter_ROE1(const hoNDArray< std::complex<double> >& data, const hoNDArray< std::complex<double> >& fRO, const hoNDArray< std::complex<double> >& fE1, hoNDArray< std::complex<double> >& dataFiltered);
// ------------------------------------------------------------------------
template <typename T>
void apply_kspace_filter_E2(const hoNDArray<T>& data, const hoNDArray<T>& fE2, hoNDArray<T>& dataFiltered)
{
try
{
GADGET_CHECK_THROW(data.get_size(2) == fE2.get_number_of_elements());
hoNDArray<T> fRO(data.get_size(0));
fRO.fill(T(1.0));
hoNDArray<T> fE1(data.get_size(1));
fE1.fill(T(1.0));
hoNDArray<T> fxyz;
compute_3d_filter(fRO, fE1, fE2, fxyz);
Gadgetron::multiply(data, fxyz, dataFiltered);
}
catch (...)
{
GADGET_THROW("Errors in apply_kspace_filter_E2(...) ... ");
}
}
template EXPORTMRICORE void apply_kspace_filter_E2(const hoNDArray<float>& data, const hoNDArray<float>& fE2, hoNDArray<float>& dataFiltered);
template EXPORTMRICORE void apply_kspace_filter_E2(const hoNDArray<double>& data, const hoNDArray<double>& fE2, hoNDArray<double>& dataFiltered);
template EXPORTMRICORE void apply_kspace_filter_E2(const hoNDArray< std::complex<float> >& data, const hoNDArray< std::complex<float> >& fE2, hoNDArray< std::complex<float> >& dataFiltered);
template EXPORTMRICORE void apply_kspace_filter_E2(const hoNDArray< std::complex<double> >& data, const hoNDArray< std::complex<double> >& fE2, hoNDArray< std::complex<double> >& dataFiltered);
// ------------------------------------------------------------------------
template <typename T>
void apply_kspace_filter_ROE2(const hoNDArray<T>& data, const hoNDArray<T>& fRO, const hoNDArray<T>& fE2, hoNDArray<T>& dataFiltered)
{
try
{
GADGET_CHECK_THROW(data.get_size(0) == fRO.get_number_of_elements());
GADGET_CHECK_THROW(data.get_size(2) == fE2.get_number_of_elements());
hoNDArray<T> fE1(data.get_size(1));
fE1.fill(T(1.0));
hoNDArray<T> fxyz;
compute_3d_filter(fRO, fE1, fE2, fxyz);
Gadgetron::multiply(data, fxyz, dataFiltered);
}
catch (...)
{
GADGET_THROW("Errors in apply_kspace_filter_ROE2(...) ... ");
}
}
template EXPORTMRICORE void apply_kspace_filter_ROE2(const hoNDArray<float>& data, const hoNDArray<float>& fRO, const hoNDArray<float>& fE2, hoNDArray<float>& dataFiltered);
template EXPORTMRICORE void apply_kspace_filter_ROE2(const hoNDArray<double>& data, const hoNDArray<double>& fRO, const hoNDArray<double>& fE2, hoNDArray<double>& dataFiltered);
template EXPORTMRICORE void apply_kspace_filter_ROE2(const hoNDArray< std::complex<float> >& data, const hoNDArray< std::complex<float> >& fRO, const hoNDArray< std::complex<float> >& fE2, hoNDArray< std::complex<float> >& dataFiltered);
template EXPORTMRICORE void apply_kspace_filter_ROE2(const hoNDArray< std::complex<double> >& data, const hoNDArray< std::complex<double> >& fRO, const hoNDArray< std::complex<double> >& fE2, hoNDArray< std::complex<double> >& dataFiltered);
// ------------------------------------------------------------------------
template <typename T>
void apply_kspace_filter_E1E2(const hoNDArray<T>& data, const hoNDArray<T>& fE1, const hoNDArray<T>& fE2, hoNDArray<T>& dataFiltered)
{
try
{
GADGET_CHECK_THROW(data.get_size(1) == fE1.get_number_of_elements());
GADGET_CHECK_THROW(data.get_size(2) == fE2.get_number_of_elements());
hoNDArray<T> fRO(data.get_size(0));
fRO.fill(T(1.0));
hoNDArray<T> fxyz;
compute_3d_filter(fRO, fE1, fE2, fxyz);
Gadgetron::multiply(data, fxyz, dataFiltered);
}
catch (...)
{
GADGET_THROW("Errors in apply_kspace_filter_E1E2(...) ... ");
}
}
template EXPORTMRICORE void apply_kspace_filter_E1E2(const hoNDArray<float>& data, const hoNDArray<float>& fE1, const hoNDArray<float>& fE2, hoNDArray<float>& dataFiltered);
template EXPORTMRICORE void apply_kspace_filter_E1E2(const hoNDArray<double>& data, const hoNDArray<double>& fE1, const hoNDArray<double>& fE2, hoNDArray<double>& dataFiltered);
template EXPORTMRICORE void apply_kspace_filter_E1E2(const hoNDArray< std::complex<float> >& data, const hoNDArray< std::complex<float> >& fE1, const hoNDArray< std::complex<float> >& fE2, hoNDArray< std::complex<float> >& dataFiltered);
template EXPORTMRICORE void apply_kspace_filter_E1E2(const hoNDArray< std::complex<double> >& data, const hoNDArray< std::complex<double> >& fE1, const hoNDArray< std::complex<double> >& fE2, hoNDArray< std::complex<double> >& dataFiltered);
// ------------------------------------------------------------------------
template <typename T>
void apply_kspace_filter_ROE1E2(const hoNDArray<T>& data, const hoNDArray<T>& fROE1E2, hoNDArray<T>& dataFiltered)
{
try
{
GADGET_CHECK_THROW(data.get_size(0) == fROE1E2.get_size(0));
GADGET_CHECK_THROW(data.get_size(1) == fROE1E2.get_size(1));
GADGET_CHECK_THROW(data.get_size(2) == fROE1E2.get_size(2));
Gadgetron::multiply(data, fROE1E2, dataFiltered);
}
catch (...)
{
GADGET_THROW("Errors in apply_kspace_filter_ROE1E2(...) ... ");
}
}
template EXPORTMRICORE void apply_kspace_filter_ROE1E2(const hoNDArray<float>& data, const hoNDArray<float>& fROE1E2, hoNDArray<float>& dataFiltered);
template EXPORTMRICORE void apply_kspace_filter_ROE1E2(const hoNDArray<double>& data, const hoNDArray<double>& fROE1E2, hoNDArray<double>& dataFiltered);
template EXPORTMRICORE void apply_kspace_filter_ROE1E2(const hoNDArray< std::complex<float> >& data, const hoNDArray< std::complex<float> >& fROE1E2, hoNDArray< std::complex<float> >& dataFiltered);
template EXPORTMRICORE void apply_kspace_filter_ROE1E2(const hoNDArray< std::complex<double> >& data, const hoNDArray< std::complex<double> >& fROE1E2, hoNDArray< std::complex<double> >& dataFiltered);
// ------------------------------------------------------------------------
template <typename T>
void apply_kspace_filter_ROE1E2(const hoNDArray<T>& data, const hoNDArray<T>& fRO, const hoNDArray<T>& fE1, const hoNDArray<T>& fE2, hoNDArray<T>& dataFiltered)
{
try
{
GADGET_CHECK_THROW(data.get_size(0) == fRO.get_number_of_elements());
GADGET_CHECK_THROW(data.get_size(1) == fE1.get_number_of_elements());
GADGET_CHECK_THROW(data.get_size(2) == fE2.get_number_of_elements());
hoNDArray<T> fxyz;
compute_3d_filter(fRO, fE1, fE2, fxyz);
Gadgetron::multiply(data, fxyz, dataFiltered);
}
catch (...)
{
GADGET_THROW("Errors in apply_kspace_filter_ROE1E2(...) ... ");
}
}
template EXPORTMRICORE void apply_kspace_filter_ROE1E2(const hoNDArray<float>& data, const hoNDArray<float>& fRO, const hoNDArray<float>& fE1, const hoNDArray<float>& fE2, hoNDArray<float>& dataFiltered);
template EXPORTMRICORE void apply_kspace_filter_ROE1E2(const hoNDArray<double>& data, const hoNDArray<double>& fRO, const hoNDArray<double>& fE1, const hoNDArray<double>& fE2, hoNDArray<double>& dataFiltered);
template EXPORTMRICORE void apply_kspace_filter_ROE1E2(const hoNDArray< std::complex<float> >& data, const hoNDArray< std::complex<float> >& fRO, const hoNDArray< std::complex<float> >& fE1, const hoNDArray< std::complex<float> >& fE2, hoNDArray< std::complex<float> >& dataFiltered);
template EXPORTMRICORE void apply_kspace_filter_ROE1E2(const hoNDArray< std::complex<double> >& data, const hoNDArray< std::complex<double> >& fRO, const hoNDArray< std::complex<double> >& fE1, const hoNDArray< std::complex<double> >& fE2, hoNDArray< std::complex<double> >& dataFiltered);
// ------------------------------------------------------------------------
void find_symmetric_sampled_region(size_t start, size_t end, size_t center, size_t& startSym, size_t& endSym)
{
GADGET_CHECK_THROW(end >= start);
GADGET_CHECK_THROW(center >= start);
GADGET_CHECK_THROW(end >= center);
size_t halfSizeStart = center - start;
size_t halfSizeEnd = end - center;
if (halfSizeStart > halfSizeEnd)
{
startSym = center - halfSizeEnd;
endSym = center + halfSizeEnd;
}
else
{
startSym = center - halfSizeStart;
endSym = center + halfSizeStart;
}
}
// ------------------------------------------------------------------------
template<typename T>
void compute_filter_SNR_unit_scale_factor(const hoNDArray<T>& filter, T& scalFactor)
{
size_t ii, len;
len = filter.get_number_of_elements();
if (len == 0)
{
scalFactor = T(1.0);
return;
}
T sos(0.0);
for (ii = 0; ii<len; ii++)
{
sos += filter(ii)*filter(ii);
}
scalFactor = (T)(1.0 / std::sqrt(std::abs(sos) / len));
}
template EXPORTMRICORE void compute_filter_SNR_unit_scale_factor(const hoNDArray<float>& filter, float& scalFactor);
template EXPORTMRICORE void compute_filter_SNR_unit_scale_factor(const hoNDArray<double>& filter, double& scalFactor);
template<> EXPORTMRICORE
void compute_filter_SNR_unit_scale_factor(const hoNDArray< std::complex<float> >& filter, std::complex<float> & scalFactor)
{
size_t ii, len;
len = filter.get_number_of_elements();
if (len == 0)
{
scalFactor = std::complex<float>(1.0);
return;
}
std::complex<float> sos(0.0);
for (ii = 0; ii<len; ii++)
{
sos += filter(ii)*std::conj(filter(ii));
}
scalFactor = (std::complex<float>)(1.0 / std::sqrt(std::abs(sos) / len));
}
template<> EXPORTMRICORE
void compute_filter_SNR_unit_scale_factor(const hoNDArray< std::complex<double> >& filter, std::complex<double> & scalFactor)
{
size_t ii, len;
len = filter.get_number_of_elements();
if (len == 0)
{
scalFactor = std::complex<double>(1.0);
return;
}
std::complex<double> sos(0.0);
for (ii = 0; ii<len; ii++)
{
sos += filter(ii)*std::conj(filter(ii));
}
scalFactor = (std::complex<double>)(1.0 / std::sqrt(std::abs(sos) / len));
}
// ------------------------------------------------------------------------
}
|
//
// This is example code from Chapter 17.3.1 "The sizeof operator" of
// "Programming -- Principles and Practice Using C++" by Bjarne Stroustrup
//
#include <iostream>
#include <vector>
using namespace std;
//------------------------------------------------------------------------------
int main()
{
cout << "the size of char is " << sizeof(char) << ' ' << sizeof ('a') << '\n';
cout << "the size of int is " << sizeof(int) << ' ' << sizeof (2+2) << '\n';
int* p = 0;
cout << "the size of int* is " << sizeof(int*) << ' ' << sizeof (p) << '\n';
vector<int> v(1000);
cout << "the size of vector<int>(1000) is "
<< sizeof(vector<int>) << ' ' << sizeof (v) << '\n';
}
//------------------------------------------------------------------------------
|
#include "execution/ast/type.h"
#include <unordered_map>
#include <utility>
#include "brain/operating_unit.h"
#include "execution/exec/execution_context.h"
#include "execution/sql/aggregation_hash_table.h"
#include "execution/sql/aggregators.h"
#include "execution/sql/filter_manager.h"
#include "execution/sql/hash_table_entry.h"
#include "execution/sql/index_iterator.h"
#include "execution/sql/join_hash_table.h"
#include "execution/sql/join_hash_table_vector_probe.h"
#include "execution/sql/sorter.h"
#include "execution/sql/table_vector_iterator.h"
#include "execution/sql/thread_state_container.h"
#include "execution/sql/value.h"
#include "execution/sql/vector_projection_iterator.h"
// #include "execution/util/csv_reader.h" Fix later.
namespace terrier::execution::ast {
// ---------------------------------------------------------
// Type
// ---------------------------------------------------------
// TODO(pmenon): Fix me
bool Type::IsArithmetic() const {
return IsIntegerType() || // Primitive TPL integers
IsFloatType() || // Primitive TPL floats
IsSpecificBuiltin(BuiltinType::Integer) || // SQL integer
IsSpecificBuiltin(BuiltinType::Real) || // SQL reals
IsSpecificBuiltin(BuiltinType::Decimal); // SQL decimals
}
// ---------------------------------------------------------
// Builtin Type
// ---------------------------------------------------------
const char *BuiltinType::tpl_names[] = {
#define PRIM(BKind, CppType, Name, ...) Name,
#define OTHERS(BKind, ...) #BKind,
BUILTIN_TYPE_LIST(PRIM, OTHERS, OTHERS)
#undef F
};
const char *BuiltinType::cpp_names[] = {
#define F(BKind, CppType, ...) #CppType,
BUILTIN_TYPE_LIST(F, F, F)
#undef F
};
const uint64_t BuiltinType::SIZES[] = {
#define F(BKind, CppType, ...) sizeof(CppType),
BUILTIN_TYPE_LIST(F, F, F)
#undef F
};
const uint64_t BuiltinType::ALIGNMENTS[] = {
#define F(Kind, CppType, ...) std::alignment_of_v<CppType>,
BUILTIN_TYPE_LIST(F, F, F)
#undef F
};
const bool BuiltinType::PRIMITIVE_FLAGS[] = {
#define F(Kind, CppType, ...) std::is_fundamental_v<CppType>,
BUILTIN_TYPE_LIST(F, F, F)
#undef F
};
const bool BuiltinType::FLOATING_POINT_FLAGS[] = {
#define F(Kind, CppType, ...) std::is_floating_point_v<CppType>,
BUILTIN_TYPE_LIST(F, F, F)
#undef F
};
const bool BuiltinType::SIGNED_FLAGS[] = {
#define F(Kind, CppType, ...) std::is_signed_v<CppType>,
BUILTIN_TYPE_LIST(F, F, F)
#undef F
};
// ---------------------------------------------------------
// Function Type
// ---------------------------------------------------------
FunctionType::FunctionType(util::RegionVector<Field> &¶ms, Type *ret)
: Type(ret->GetContext(), sizeof(void *), alignof(void *), TypeId::FunctionType),
params_(std::move(params)),
ret_(ret) {}
// ---------------------------------------------------------
// Map Type
// ---------------------------------------------------------
MapType::MapType(Type *key_type, Type *val_type)
: Type(key_type->GetContext(), sizeof(std::unordered_map<int32_t, int32_t>),
alignof(std::unordered_map<int32_t, int32_t>), TypeId::MapType),
key_type_(key_type),
val_type_(val_type) {}
// ---------------------------------------------------------
// Struct Type
// ---------------------------------------------------------
StructType::StructType(Context *ctx, uint32_t size, uint32_t alignment, util::RegionVector<Field> &&fields,
util::RegionVector<uint32_t> &&field_offsets)
: Type(ctx, size, alignment, TypeId::StructType),
fields_(std::move(fields)),
field_offsets_(std::move(field_offsets)) {}
} // namespace terrier::execution::ast
|
// Copyright (c) 2010 Satoshi Nakamoto
// Copyright (c) 2009-2014 The Bitcoin developers
// Copyright (c) 2014-2015 The Dash developers
// Copyright (c) 2015-2017 The PIVX developers
// Copyright (c) 2018 The PYMT developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "chainparams.h"
#include "random.h"
#include "util.h"
#include "utilstrencodings.h"
#include <assert.h>
#include <boost/assign/list_of.hpp>
using namespace std;
using namespace boost::assign;
struct SeedSpec6 {
uint8_t addr[16];
uint16_t port;
};
#include "chainparamsseeds.h"
/**
* Main network
*/
//! Convert the pnSeeds6 array into usable address objects.
static void convertSeed6(std::vector<CAddress>& vSeedsOut, const SeedSpec6* data, unsigned int count)
{
// It'll only connect to one or two seed nodes because once it connects,
// it'll get a pile of addresses with newer timestamps.
// Seed nodes are given a random 'last seen time' of between one and two
// weeks ago.
const int64_t nOneWeek = 7 * 24 * 60 * 60;
for (unsigned int i = 0; i < count; i++) {
struct in6_addr ip;
memcpy(&ip, data[i].addr, sizeof(ip));
CAddress addr(CService(ip, data[i].port));
addr.nTime = GetTime() - GetRand(nOneWeek) - nOneWeek;
vSeedsOut.push_back(addr);
}
}
// What makes a good checkpoint block?
// + Is surrounded by blocks with reasonable timestamps
// (no blocks before with a timestamp after, none after with
// timestamp before)
// + Contains no strange transactions
static Checkpoints::MapCheckpoints mapCheckpoints =
boost::assign::map_list_of
(0, uint256("0x00000104b06b1cd499cced83d878ea92809cd84fc8d2b5523e19d2d16c99f549"))
(150, uint256("0x00000929b295ce073f4dd9a146893b3e33dafaae7ed36d5bb2ad3159e5ce15df"))
(200, uint256("0x00000087186aee20b63303710171cf299adbef3d373581b0510873e2ab709b0b"));
static const Checkpoints::CCheckpointData data = {
&mapCheckpoints,
1539647004, // * UNIX timestamp of last checkpoint block
202, // * total number of transactions between genesis and last checkpoint
// (the tx=... number in the SetBestChain debug.log lines)
2000 // * estimated number of transactions per day after checkpoint
};
static Checkpoints::MapCheckpoints mapCheckpointsTestnet =
boost::assign::map_list_of(0, uint256("0x00000715ca10a76a9ed27f745ee5133934b8b30bb07d47948827b9fa16b0eb87"));
static const Checkpoints::CCheckpointData dataTestnet = {
&mapCheckpointsTestnet,
1538275048,
0,
250};
static Checkpoints::MapCheckpoints mapCheckpointsRegtest =
boost::assign::map_list_of(0, uint256("0x300e9ef306fa2262674d6251dd4c0177d24dbadb9662846a76164172beec45ff"));
static const Checkpoints::CCheckpointData dataRegtest = {
&mapCheckpointsRegtest,
1538265600,
0,
100};
class CMainParams : public CChainParams
{
public:
CMainParams()
{
networkID = CBaseChainParams::MAIN;
strNetworkID = "main";
pchMessageStart[0] = 0xf4;
pchMessageStart[1] = 0xcb;
pchMessageStart[2] = 0xbd;
pchMessageStart[3] = 0xe2;
vAlertPubKey = ParseHex("04a36a426b812a83776c6edead336d19af5d2ca3a548697acb3c6f201ba51b44758619b281f1096b65e643b4d996b90afe4a9e08dd6e54600d5c81897a1ec3a453");
nDefaultPort = 37006;
bnProofOfWorkLimit = ~uint256(0) >> 20; // PYMT starting difficulty is 1 / 2^12
nSubsidyHalvingInterval = 864000;
nMaxReorganizationDepth = 100;
nEnforceBlockUpgradeMajority = 750;
nRejectBlockOutdatedMajority = 950;
nToCheckBlockUpgradeMajority = 1000;
nMinerThreads = 0;
nTargetTimespan = 1 * 60; // PYMT: 1 day
nTargetSpacing = 1 * 60; // PYMT: 1 minute
nLastPOWBlock = 300;
nMaturity = 90;
nMasternodeCountDrift = 10;
nMasternodeCollateralLimit = 1000;
nModifierUpdateBlock = 1;
nMaxMoneyOut = 20000000 * COIN;
const char* pszTimestamp = "The Times - How to be a sex worker: Brighton University advice for freshers";
CMutableTransaction txNew;
txNew.vin.resize(1);
txNew.vout.resize(1);
txNew.vin[0].scriptSig = CScript() << 4194596 << CScriptNum(4) << vector<unsigned char>((const unsigned char*)pszTimestamp, (const unsigned char*)pszTimestamp + strlen(pszTimestamp));
txNew.vout[0].nValue = 10 * COIN;
txNew.vout[0].scriptPubKey = CScript() << ParseHex("04792b5475e8199e0798d70f18bcff945c42e0abff135467fe61dc7491579a2c7e1fc8754def07bc43fe15138ec01c8124f10b4829397ca3a3c9cfba06ab394f7d") << OP_CHECKSIG;
genesis.vtx.push_back(txNew);
genesis.hashPrevBlock = 0;
genesis.hashMerkleRoot = genesis.BuildMerkleTree();
genesis.nVersion = 1;
genesis.nTime = 1538265600;
genesis.nBits = 0x1e0fffff;
genesis.nNonce = 961023;
hashGenesisBlock = genesis.GetHash();
assert(hashGenesisBlock == uint256("0x00000104b06b1cd499cced83d878ea92809cd84fc8d2b5523e19d2d16c99f549"));
assert(genesis.hashMerkleRoot == uint256("0x23b5bae265343a0fe0ce6fe1c1358fb89c9fcb0a885f3eda3e8a2c79e7f486b1"));
vSeeds.push_back(CDNSSeedData("54.37.205.229", "54.37.205.229"));
vSeeds.push_back(CDNSSeedData("144.217.161.46", "144.217.161.46"));
base58Prefixes[PUBKEY_ADDRESS] = std::vector<unsigned char>(1, 45);
base58Prefixes[SCRIPT_ADDRESS] = std::vector<unsigned char>(1, 31);
base58Prefixes[SECRET_KEY] = std::vector<unsigned char>(1, 60);
base58Prefixes[EXT_PUBLIC_KEY] = boost::assign::list_of(0x04)(0x88)(0xB2)(0x1E).convert_to_container<std::vector<unsigned char> >();
base58Prefixes[EXT_SECRET_KEY] = boost::assign::list_of(0x04)(0x88)(0xAD)(0xE4).convert_to_container<std::vector<unsigned char> >();
base58Prefixes[EXT_COIN_TYPE] = boost::assign::list_of(0x80)(0x00)(0x00)(0x77).convert_to_container<std::vector<unsigned char> >();
convertSeed6(vFixedSeeds, pnSeed6_main, ARRAYLEN(pnSeed6_main));
fRequireRPCPassword = true;
fMiningRequiresPeers = true;
fAllowMinDifficultyBlocks = false;
fDefaultConsistencyChecks = false;
fRequireStandard = true;
fMineBlocksOnDemand = false;
fSkipProofOfWorkCheck = false;
fTestnetToBeDeprecatedFieldRPC = false;
fHeadersFirstSyncingActive = false;
nPoolMaxTransactions = 3;
strSporkKey = "04050adbaaab7ab704cf78367374d0661cb0e82050838075c13e6e53deb324d5466243a1cd9c54bd2abc43605aeaa48032fa6520bb7c8d6a20b9f88394134dc847";
strObfuscationPoolDummyAddress = "KDjbvBPN5US5tZeFVeGWicahobjTZG9yLy";
nStartMasternodePayments = 1539648000;
}
const Checkpoints::CCheckpointData& Checkpoints() const
{
return data;
}
};
static CMainParams mainParams;
/**
* Testnet (v3)
*/
class CTestNetParams : public CMainParams
{
public:
CTestNetParams()
{
networkID = CBaseChainParams::TESTNET;
strNetworkID = "test";
pchMessageStart[0] = 0xb1;
pchMessageStart[1] = 0xe2;
pchMessageStart[2] = 0xf4;
pchMessageStart[3] = 0xc3;
vAlertPubKey = ParseHex("04792b5475e8199e0798d70f18bcff945c42e0abff135467fe61dc7491579a2c7e1fc8754def07bc43fe15138ec01c8124f10b4829397ca3a3c9cfba06ab394f7d");
nDefaultPort = 37005;
nEnforceBlockUpgradeMajority = 51;
nRejectBlockOutdatedMajority = 75;
nToCheckBlockUpgradeMajority = 100;
nMinerThreads = 0;
nTargetTimespan = 1 * 60; // PYMT: 1 day
nTargetSpacing = 1 * 60; // PYMT: 1 minute
nLastPOWBlock = 150;
nMaturity = 15;
nMasternodeCountDrift = 4;
nMasternodeCollateralLimit = 1000;
nModifierUpdateBlock = 1; // PYMT uses version 2 blocks
nMaxMoneyOut = 200000000 * COIN;
//! Modify the testnet genesis block so the timestamp is valid for a later start.
genesis.nTime = 1538275048;
genesis.nNonce = 918077;
hashGenesisBlock = genesis.GetHash();
assert(hashGenesisBlock == uint256("0x00000715ca10a76a9ed27f745ee5133934b8b30bb07d47948827b9fa16b0eb87"));
assert(genesis.hashMerkleRoot == uint256("0x23b5bae265343a0fe0ce6fe1c1358fb89c9fcb0a885f3eda3e8a2c79e7f486b1"));
vFixedSeeds.clear();
vSeeds.clear();
base58Prefixes[PUBKEY_ADDRESS] = std::vector<unsigned char>(1, 108);
base58Prefixes[SCRIPT_ADDRESS] = std::vector<unsigned char>(1, 90);
base58Prefixes[SECRET_KEY] = std::vector<unsigned char>(1, 239);
base58Prefixes[EXT_PUBLIC_KEY] = boost::assign::list_of(0x04)(0x35)(0x87)(0xCF).convert_to_container<std::vector<unsigned char> >();
base58Prefixes[EXT_SECRET_KEY] = boost::assign::list_of(0x04)(0x35)(0x83)(0x94).convert_to_container<std::vector<unsigned char> >();
base58Prefixes[EXT_COIN_TYPE] = boost::assign::list_of(0x80)(0x00)(0x00)(0x01).convert_to_container<std::vector<unsigned char> >();
convertSeed6(vFixedSeeds, pnSeed6_test, ARRAYLEN(pnSeed6_test));
fRequireRPCPassword = true;
fMiningRequiresPeers = false;
fAllowMinDifficultyBlocks = true;
fDefaultConsistencyChecks = false;
fRequireStandard = false;
fMineBlocksOnDemand = false;
fSkipProofOfWorkCheck = true;
fTestnetToBeDeprecatedFieldRPC = true;
nPoolMaxTransactions = 2;
strSporkKey = "04050adbaaab7ab704cf78367374d0661cb0e82050838075c13e6e53deb324d5466243a1cd9c54bd2abc43605aeaa48032fa6520bb7c8d6a20b9f88394134dc847";
strObfuscationPoolDummyAddress = "k7t1mjRDgu5DDRKrTMf5iFBr2wZzu8os9w";
nStartMasternodePayments = 1539648000;
}
const Checkpoints::CCheckpointData& Checkpoints() const
{
return dataTestnet;
}
};
static CTestNetParams testNetParams;
/**
* Regression test
*/
class CRegTestParams : public CTestNetParams
{
public:
CRegTestParams()
{
networkID = CBaseChainParams::REGTEST;
strNetworkID = "regtest";
pchMessageStart[0] = 0xa3;
pchMessageStart[1] = 0xf1;
pchMessageStart[2] = 0xd4;
pchMessageStart[3] = 0xf3;
nSubsidyHalvingInterval = 150;
nEnforceBlockUpgradeMajority = 750;
nRejectBlockOutdatedMajority = 950;
nToCheckBlockUpgradeMajority = 1000;
nMinerThreads = 1;
nTargetTimespan = 24 * 60 * 60; // PYMT: 1 day
nTargetSpacing = 1 * 60; // PYMT: 1 minutes
bnProofOfWorkLimit = ~uint256(0) >> 1;
nDefaultPort = 38006;
genesis.nTime = 1538265600;
genesis.nBits = 0x207fffff;
genesis.nNonce = 2;
hashGenesisBlock = genesis.GetHash();
assert(hashGenesisBlock == uint256("0x300e9ef306fa2262674d6251dd4c0177d24dbadb9662846a76164172beec45ff"));
assert(genesis.hashMerkleRoot == uint256("0x23b5bae265343a0fe0ce6fe1c1358fb89c9fcb0a885f3eda3e8a2c79e7f486b1"));
vFixedSeeds.clear(); //! regtest mode doesn't have any fixed seeds.
vSeeds.clear(); //! regtest mode doesn't have any DNS seeds.
fRequireRPCPassword = false;
fMiningRequiresPeers = false;
fAllowMinDifficultyBlocks = true;
fDefaultConsistencyChecks = true;
fRequireStandard = false;
fMineBlocksOnDemand = true;
fTestnetToBeDeprecatedFieldRPC = false;
}
const Checkpoints::CCheckpointData& Checkpoints() const
{
return dataRegtest;
}
};
static CRegTestParams regTestParams;
/**
* Unit test
*/
class CUnitTestParams : public CMainParams, public CModifiableParams
{
public:
CUnitTestParams()
{
networkID = CBaseChainParams::UNITTEST;
strNetworkID = "unittest";
nDefaultPort = 38005;
vFixedSeeds.clear(); //! Unit test mode doesn't have any fixed seeds.
vSeeds.clear(); //! Unit test mode doesn't have any DNS seeds.
fRequireRPCPassword = false;
fMiningRequiresPeers = false;
fDefaultConsistencyChecks = true;
fAllowMinDifficultyBlocks = false;
fMineBlocksOnDemand = true;
}
const Checkpoints::CCheckpointData& Checkpoints() const
{
// UnitTest share the same checkpoints as MAIN
return data;
}
//! Published setters to allow changing values in unit test cases
virtual void setSubsidyHalvingInterval(int anSubsidyHalvingInterval) { nSubsidyHalvingInterval = anSubsidyHalvingInterval; }
virtual void setEnforceBlockUpgradeMajority(int anEnforceBlockUpgradeMajority) { nEnforceBlockUpgradeMajority = anEnforceBlockUpgradeMajority; }
virtual void setRejectBlockOutdatedMajority(int anRejectBlockOutdatedMajority) { nRejectBlockOutdatedMajority = anRejectBlockOutdatedMajority; }
virtual void setToCheckBlockUpgradeMajority(int anToCheckBlockUpgradeMajority) { nToCheckBlockUpgradeMajority = anToCheckBlockUpgradeMajority; }
virtual void setDefaultConsistencyChecks(bool afDefaultConsistencyChecks) { fDefaultConsistencyChecks = afDefaultConsistencyChecks; }
virtual void setAllowMinDifficultyBlocks(bool afAllowMinDifficultyBlocks) { fAllowMinDifficultyBlocks = afAllowMinDifficultyBlocks; }
virtual void setSkipProofOfWorkCheck(bool afSkipProofOfWorkCheck) { fSkipProofOfWorkCheck = afSkipProofOfWorkCheck; }
};
static CUnitTestParams unitTestParams;
static CChainParams* pCurrentParams = 0;
CModifiableParams* ModifiableParams()
{
assert(pCurrentParams);
assert(pCurrentParams == &unitTestParams);
return (CModifiableParams*)&unitTestParams;
}
const CChainParams& Params()
{
assert(pCurrentParams);
return *pCurrentParams;
}
CChainParams& Params(CBaseChainParams::Network network)
{
switch (network) {
case CBaseChainParams::MAIN:
return mainParams;
case CBaseChainParams::TESTNET:
return testNetParams;
case CBaseChainParams::REGTEST:
return regTestParams;
case CBaseChainParams::UNITTEST:
return unitTestParams;
default:
assert(false && "Unimplemented network");
return mainParams;
}
}
void SelectParams(CBaseChainParams::Network network)
{
SelectBaseParams(network);
pCurrentParams = &Params(network);
}
bool SelectParamsFromCommandLine()
{
CBaseChainParams::Network network = NetworkIdFromCommandLine();
if (network == CBaseChainParams::MAX_NETWORK_TYPES)
return false;
SelectParams(network);
return true;
}
|
/*
* Action.hpp
* Copyright (c) 2014 Eran Pe'er.
*
* This program is made available under the terms of the MIT License.
*
* Created on Jun 5, 2014
*/
#pragma once
#include <functional>
#include <atomic>
#include <tuple>
#include "mockutils/DefaultValue.hpp"
#include "mockutils/Destructible.hpp"
#include "fakeit/FakeitExceptions.hpp"
namespace fakeit {
template<typename R, typename ... arglist>
struct Action : public Destructible {
virtual ~Action() = default;
virtual R invoke(arglist &... args) = 0;
virtual bool isDone() = 0;
};
template<typename R, typename ... arglist>
struct Repeat : public Action<R, arglist...> {
virtual ~Repeat() = default;
Repeat(std::function<R(arglist &...)> f) :
f(f), times(1) {
}
Repeat(std::function<R(arglist &...)> f, long times) :
f(f), times(times) {
}
virtual R invoke(arglist &... args) override {
times--;
return f(args...);
}
virtual bool isDone() override {
return times == 0;
}
private:
std::function<R(arglist &...)> f;
long times;
};
template<typename R, typename ... arglist>
struct RepeatForever : public Action<R, arglist...> {
virtual ~RepeatForever() = default;
RepeatForever(std::function<R(arglist &...)> f) :
f(f) {
}
virtual R invoke(arglist &... args) override {
return f(args...);
}
virtual bool isDone() override {
return false;
}
private:
std::function<R(arglist &...)> f;
};
template<typename R, typename ... arglist>
struct ReturnDefaultValue : public Action<R, arglist...> {
virtual ~ReturnDefaultValue() = default;
virtual R invoke(arglist &...) override {
return DefaultValue<R>::value();
}
virtual bool isDone() override {
return false;
}
};
template<typename R, typename ... arglist>
struct ReturnDelegateValue : public Action<R, arglist...> {
ReturnDelegateValue(std::function<R(arglist &...)> delegate) : _delegate(delegate) { }
virtual ~ReturnDelegateValue() = default;
virtual R invoke(arglist &... args) override {
return _delegate(args...);
}
virtual bool isDone() override {
return false;
}
private:
std::function<R(arglist &...)> _delegate;
};
}
|
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* distributed under the License is distributed on an AS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "tools/converter/parser/tflite/tflite_while_parser.h"
#include <vector>
#include <memory>
#include "tools/converter/ops/while.h"
#include "nnacl/op_base.h"
namespace mindspore {
namespace lite {
ops::PrimitiveC *TfliteWhileParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op,
const std::unique_ptr<tflite::SubGraphT> &tflite_subgraph,
const std::unique_ptr<tflite::ModelT> &tflite_model) {
auto prim = std::make_unique<While>();
MS_CHECK_TRUE_RET(prim != nullptr, nullptr);
const auto &tflite_attr = tflite_op->builtin_options.AsWhileOptions();
if (tflite_attr == nullptr) {
MS_LOG(ERROR) << "get While attr failed";
return nullptr;
}
prim->set_cond_subgraph_index(tflite_attr->cond_subgraph_index);
prim->set_body_subgraph_index(tflite_attr->body_subgraph_index);
return prim.release();
}
TfliteNodeRegister g_tfliteWhileParser(tflite::BuiltinOperator_WHILE, new TfliteWhileParser());
} // namespace lite
} // namespace mindspore
|
//-----------------------------------------------------------------------------
// Project : SDK Core
//
// Category : SDK Core Interfaces
// Filename : pluginterfaces/base/funknown.cpp
// Created by : Steinberg, 01/2004
// Description : Basic Interface
//
//-----------------------------------------------------------------------------
// LICENSE
// (c) 2016, Steinberg Media Technologies GmbH, All Rights Reserved
//-----------------------------------------------------------------------------
// This Software Development Kit may not be distributed in parts or its entirety
// without prior written agreement by Steinberg Media Technologies GmbH.
// This SDK must not be used to re-engineer or manipulate any technology used
// in any Steinberg or Third-party application or software module,
// unless permitted by law.
// Neither the name of the Steinberg Media Technologies nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SDK IS PROVIDED BY STEINBERG MEDIA TECHNOLOGIES GMBH "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL STEINBERG MEDIA TECHNOLOGIES GMBH BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//------------------------------------------------------------------------------
#ifndef __funknown__
#include "funknown.h"
#endif
#include "fstrdefs.h"
#include <stdio.h>
#if WINDOWS
#include <objbase.h>
#endif
#if MAC
#include <CoreFoundation/CoreFoundation.h>
#include <libkern/OSAtomic.h>
#if defined (__GNUC__) && (__GNUC__ >= 4) && !__LP64__
// on 32 bit Mac OS X we can safely ignore the format warnings as sizeof(int) == sizeof(long)
#pragma GCC diagnostic ignored "-Wformat"
#endif
#endif
namespace Steinberg {
//------------------------------------------------------------------------
#if COM_COMPATIBLE
#if WINDOWS
#define GuidStruct GUID
#else
struct GuidStruct
{
uint32 Data1;
uint16 Data2;
uint16 Data3;
uint8 Data4[8];
};
#endif
#endif
static void toString8 (char8* string, const char* data, int32 i1, int32 i2);
static void fromString8 (const char8* string, char* data, int32 i1, int32 i2);
static uint32 makeLong (uint8 b1, uint8 b2, uint8 b3, uint8 b4);
//------------------------------------------------------------------------
// FUnknownPrivate
//------------------------------------------------------------------------
namespace FUnknownPrivate {
//------------------------------------------------------------------------
int32 PLUGIN_API atomicAdd (int32& var, int32 d)
{
#if WINDOWS
return InterlockedExchangeAdd (&var, d) + d;
#elif MAC
return OSAtomicAdd32Barrier (d, (int32_t*)&var);
#else
var += d;
return var;
#endif
}
} // FUnknownPrivate
//------------------------------------------------------------------------
// FUID implementation
//------------------------------------------------------------------------
FUID::FUID ()
{
memset (data, 0, sizeof (TUID));
}
//------------------------------------------------------------------------
FUID::FUID (uint32 l1, uint32 l2, uint32 l3, uint32 l4)
{
from4Int (l1, l2, l3, l4);
}
//------------------------------------------------------------------------
FUID::FUID (const FUID& f)
{
memcpy (data, f.data, sizeof (TUID));
}
//------------------------------------------------------------------------
bool FUID::generate ()
{
#if WINDOWS
GUID guid;
HRESULT hr = CoCreateGuid (&guid);
switch (hr)
{
case RPC_S_OK:
memcpy (data, (char*)&guid, sizeof (TUID));
return true;
case RPC_S_UUID_LOCAL_ONLY:
default:
return false;
}
#elif MAC
CFUUIDRef uuid = CFUUIDCreate (kCFAllocatorDefault);
if (uuid)
{
CFUUIDBytes bytes = CFUUIDGetUUIDBytes (uuid);
memcpy (data, (char*)&bytes, sizeof (TUID));
CFRelease (uuid);
return true;
}
return false;
#else
// implement me!
return false;
#endif
}
//------------------------------------------------------------------------
bool FUID::isValid () const
{
TUID nulluid = {0};
return memcmp (data, nulluid, sizeof (TUID)) != 0;
}
//------------------------------------------------------------------------
FUID& FUID::operator = (const FUID& f)
{
memcpy (data, f.data, sizeof (TUID));
return *this;
}
//------------------------------------------------------------------------
FUID& FUID::operator = (FIDString uid)
{
memcpy (data, uid, sizeof (TUID));
return *this;
}
//------------------------------------------------------------------------
FUID& FUID::operator = (TUID uid)
{
memcpy (data, uid, sizeof (TUID));
return *this;
}
//------------------------------------------------------------------------
void FUID::from4Int (uint32 l1, uint32 l2, uint32 l3, uint32 l4)
{
#if COM_COMPATIBLE
data [0] = (char)((l1 & 0x000000FF) );
data [1] = (char)((l1 & 0x0000FF00) >> 8);
data [2] = (char)((l1 & 0x00FF0000) >> 16);
data [3] = (char)((l1 & 0xFF000000) >> 24);
data [4] = (char)((l2 & 0x00FF0000) >> 16);
data [5] = (char)((l2 & 0xFF000000) >> 24);
data [6] = (char)((l2 & 0x000000FF) );
data [7] = (char)((l2 & 0x0000FF00) >> 8);
data [8] = (char)((l3 & 0xFF000000) >> 24);
data [9] = (char)((l3 & 0x00FF0000) >> 16);
data [10] = (char)((l3 & 0x0000FF00) >> 8);
data [11] = (char)((l3 & 0x000000FF) );
data [12] = (char)((l4 & 0xFF000000) >> 24);
data [13] = (char)((l4 & 0x00FF0000) >> 16);
data [14] = (char)((l4 & 0x0000FF00) >> 8);
data [15] = (char)((l4 & 0x000000FF) );
#else
data [0] = (char)((l1 & 0xFF000000) >> 24);
data [1] = (char)((l1 & 0x00FF0000) >> 16);
data [2] = (char)((l1 & 0x0000FF00) >> 8);
data [3] = (char)((l1 & 0x000000FF) );
data [4] = (char)((l2 & 0xFF000000) >> 24);
data [5] = (char)((l2 & 0x00FF0000) >> 16);
data [6] = (char)((l2 & 0x0000FF00) >> 8);
data [7] = (char)((l2 & 0x000000FF) );
data [8] = (char)((l3 & 0xFF000000) >> 24);
data [9] = (char)((l3 & 0x00FF0000) >> 16);
data [10] = (char)((l3 & 0x0000FF00) >> 8);
data [11] = (char)((l3 & 0x000000FF) );
data [12] = (char)((l4 & 0xFF000000) >> 24);
data [13] = (char)((l4 & 0x00FF0000) >> 16);
data [14] = (char)((l4 & 0x0000FF00) >> 8);
data [15] = (char)((l4 & 0x000000FF) );
#endif
}
//------------------------------------------------------------------------
void FUID::to4Int (uint32& d1, uint32& d2, uint32& d3, uint32& d4) const
{
d1 = getLong1 ();
d2 = getLong2 ();
d3 = getLong3 ();
d4 = getLong4 ();
}
//------------------------------------------------------------------------
uint32 FUID::getLong1 () const
{
#if COM_COMPATIBLE
return makeLong (data[3], data[2], data [1], data [0]);
#else
return makeLong (data[0], data[1], data [2], data [3]);
#endif
}
//------------------------------------------------------------------------
uint32 FUID::getLong2 () const
{
#if COM_COMPATIBLE
return makeLong (data[5], data[4], data [7], data [6]);
#else
return makeLong (data[4], data[5], data [6], data [7]);
#endif
}
//------------------------------------------------------------------------
uint32 FUID::getLong3 () const
{
#if COM_COMPATIBLE
return makeLong (data[8], data[9], data [10], data [11]);
#else
return makeLong (data[8], data[9], data [10], data [11]);
#endif
}
//------------------------------------------------------------------------
uint32 FUID::getLong4 () const
{
#if COM_COMPATIBLE
return makeLong (data[12], data[13], data [14], data [15]);
#else
return makeLong (data[12], data[13], data [14], data [15]);
#endif
}
//------------------------------------------------------------------------
void FUID::toString (char8* string) const
{
if (!string)
return;
#if COM_COMPATIBLE
GuidStruct* g = (GuidStruct*)data;
char8 s[17];
Steinberg::toString8 (s, data, 8, 16);
sprintf (string, "%08X%04X%04X%s", g->Data1, g->Data2, g->Data3, s);
#else
Steinberg::toString8 (string, data, 0, 16);
#endif
}
//------------------------------------------------------------------------
bool FUID::fromString (const char8* string)
{
if (!string || !*string)
return false;
if (strlen (string) != 32)
return false;
#if COM_COMPATIBLE
GuidStruct g;
char s[33];
strcpy (s, string);
s[8] = 0;
sscanf (s, "%x", &g.Data1);
strcpy (s, string + 8);
s[4] = 0;
sscanf (s, "%hx", &g.Data2);
strcpy (s, string + 12);
s[4] = 0;
sscanf (s, "%hx", &g.Data3);
memcpy (data, &g, 8);
Steinberg::fromString8 (string + 16, data, 8, 16);
#else
Steinberg::fromString8 (string, data, 0, 16);
#endif
return true;
}
//------------------------------------------------------------------------
bool FUID::fromRegistryString (const char8* string)
{
if (!string || !*string)
return false;
if (strlen (string) != 38)
return false;
// e.g. {c200e360-38c5-11ce-ae62-08002b2b79ef}
#if COM_COMPATIBLE
GuidStruct g;
char8 s[10];
strncpy (s, string + 1, 8);
s[8] = 0;
sscanf (s, "%x", &g.Data1);
strncpy (s, string + 10, 4);
s[4] = 0;
sscanf (s, "%hx", &g.Data2);
strncpy (s, string + 15, 4);
s[4] = 0;
sscanf (s, "%hx", &g.Data3);
memcpy (data, &g, 8);
Steinberg::fromString8 (string + 20, data, 8, 10);
Steinberg::fromString8 (string + 25, data, 10, 16);
#else
Steinberg::fromString8 (string + 1, data, 0, 4);
Steinberg::fromString8 (string + 10, data, 4, 6);
Steinberg::fromString8 (string + 15, data, 6, 8);
Steinberg::fromString8 (string + 20, data, 8, 10);
Steinberg::fromString8 (string + 25, data, 10, 16);
#endif
return true;
}
//------------------------------------------------------------------------
void FUID::toRegistryString (char8* string) const
{
// e.g. {c200e360-38c5-11ce-ae62-08002b2b79ef}
#if COM_COMPATIBLE
GuidStruct* g = (GuidStruct*)data;
char8 s1[5];
Steinberg::toString8 (s1, data, 8, 10);
char8 s2[13];
Steinberg::toString8 (s2, data, 10, 16);
sprintf (string, "{%08X-%04X-%04X-%s-%s}", g->Data1, g->Data2, g->Data3, s1, s2);
#else
char8 s1[9];
Steinberg::toString8 (s1, data, 0, 4);
char8 s2[5];
Steinberg::toString8 (s2, data, 4, 6);
char8 s3[5];
Steinberg::toString8 (s3, data, 6, 8);
char8 s4[5];
Steinberg::toString8 (s4, data, 8, 10);
char8 s5[13];
Steinberg::toString8 (s5, data, 10, 16);
sprintf (string, "{%s-%s-%s-%s-%s}", s1, s2, s3, s4, s5);
#endif
}
//------------------------------------------------------------------------
void FUID::print (char8* string, int32 style) const
{
if (!string) // no string: debug output
{
char8 str [128];
print (str, style);
#if WINDOWS
OutputDebugStringA (str);
OutputDebugStringA ("\n");
#else
fprintf (stdout, "%s\n", str);
#endif
return;
}
uint32 l1, l2, l3, l4;
to4Int (l1, l2, l3, l4);
switch (style)
{
case kINLINE_UID:
sprintf (string, "INLINE_UID (0x%08X, 0x%08X, 0x%08X, 0x%08X)", l1, l2, l3, l4);
break;
case kDECLARE_UID:
sprintf (string, "DECLARE_UID (0x%08X, 0x%08X, 0x%08X, 0x%08X)", l1, l2, l3, l4);
break;
case kFUID:
sprintf (string, "FUID (0x%08X, 0x%08X, 0x%08X, 0x%08X)", l1, l2, l3, l4);
break;
case kCLASS_UID:
default:
sprintf (string, "DECLARE_CLASS_IID (Interface, 0x%08X, 0x%08X, 0x%08X, 0x%08X)", l1, l2, l3, l4);
break;
}
}
//------------------------------------------------------------------------
void FUID::toTUID (TUID result) const
{
memcpy (result, data, sizeof (TUID));
}
//------------------------------------------------------------------------
// helpers
//------------------------------------------------------------------------
static uint32 makeLong (uint8 b1, uint8 b2, uint8 b3, uint8 b4)
{
return (uint32(b1) << 24) | (uint32(b2) << 16) | (uint32(b3) << 8) | uint32(b4);
}
//------------------------------------------------------------------------
static void toString8 (char8* string, const char* data, int32 i1, int32 i2)
{
*string = 0;
for (int32 i = i1; i < i2; i++)
{
char8 s[3];
sprintf (s, "%02X", (uint8)data[i]);
strcat (string, s);
}
}
//------------------------------------------------------------------------
static void fromString8 (const char8* string, char* data, int32 i1, int32 i2)
{
for (int32 i = i1; i < i2; i++)
{
char8 s[3];
s[0] = *string++;
s[1] = *string++;
s[2] = 0;
int32 d = 0;
sscanf (s, "%2x", &d);
data[i] = (char)d;
}
}
} // Steinberg
|
/**
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <pybind11/pybind11.h>
#include "libraries/criterion/cpu/ForceAlignmentCriterion.h"
#include "libraries/criterion/cpu/FullConnectionCriterion.h"
#include "libraries/criterion/cpu/ViterbiPath.h"
#ifdef W2L_LIBRARIES_USE_CUDA
#include "libraries/criterion/cuda/ForceAlignmentCriterion.cuh"
#include "libraries/criterion/cuda/FullConnectionCriterion.cuh"
#include "libraries/criterion/cuda/ViterbiPath.cuh"
#endif // W2L_LIBRARIES_USE_CUDA
namespace py = pybind11;
using namespace w2l;
template <class T>
static T castBytes(const py::bytes& b) {
static_assert(
std::is_standard_layout<T>::value,
"types represented as bytes must be standard layout");
std::string s = b;
if (s.size() != sizeof(T)) {
throw std::runtime_error("wrong py::bytes size to represent object");
}
return *reinterpret_cast<const T*>(s.data());
}
using CpuFAC = cpu::ForceAlignmentCriterion<float>;
using CpuFCC = cpu::FullConnectionCriterion<float>;
using CpuViterbi = cpu::ViterbiPath<float>;
static void CpuFAC_forward(
int B,
int T,
int N,
int L,
CriterionScaleMode scaleMode,
py::bytes input,
py::bytes target,
py::bytes targetSize,
py::bytes trans,
py::bytes loss,
py::bytes workspace) {
CpuFAC::forward(
B,
T,
N,
L,
scaleMode,
castBytes<const float*>(input),
castBytes<const int*>(target),
castBytes<const int*>(targetSize),
castBytes<const float*>(trans),
castBytes<float*>(loss),
castBytes<void*>(workspace));
}
static void CpuFAC_backward(
int B,
int T,
int N,
int L,
py::bytes target,
py::bytes targetSize,
py::bytes grad,
py::bytes inputGrad,
py::bytes transGrad,
py::bytes workspace) {
CpuFAC::backward(
B,
T,
N,
L,
castBytes<const int*>(target),
castBytes<const int*>(targetSize),
castBytes<const float*>(grad),
castBytes<float*>(inputGrad),
castBytes<float*>(transGrad),
castBytes<void*>(workspace));
}
static void CpuFCC_forward(
int B,
int T,
int N,
CriterionScaleMode scaleMode,
py::bytes input,
py::bytes targetSize,
py::bytes trans,
py::bytes loss,
py::bytes workspace) {
CpuFCC::forward(
B,
T,
N,
scaleMode,
castBytes<const float*>(input),
castBytes<const int*>(targetSize),
castBytes<const float*>(trans),
castBytes<float*>(loss),
castBytes<void*>(workspace));
}
static void CpuFCC_backward(
int B,
int T,
int N,
py::bytes trans,
py::bytes grad,
py::bytes inputGrad,
py::bytes transGrad,
py::bytes workspace) {
CpuFCC::backward(
B,
T,
N,
castBytes<const float*>(trans),
castBytes<const float*>(grad),
castBytes<float*>(inputGrad),
castBytes<float*>(transGrad),
castBytes<void*>(workspace));
}
static void CpuViterbi_compute(
int B,
int T,
int N,
py::bytes input,
py::bytes trans,
py::bytes path,
py::bytes workspace) {
CpuViterbi::compute(
B,
T,
N,
castBytes<const float*>(input),
castBytes<const float*>(trans),
castBytes<int*>(path),
castBytes<void*>(workspace));
}
#ifdef W2L_LIBRARIES_USE_CUDA
using CudaFAC = cuda::ForceAlignmentCriterion<float>;
using CudaFCC = cuda::FullConnectionCriterion<float>;
using CudaViterbi = cuda::ViterbiPath<float>;
static void CudaFAC_forward(
int B,
int T,
int N,
int L,
CriterionScaleMode scaleMode,
py::bytes input,
py::bytes target,
py::bytes targetSize,
py::bytes trans,
py::bytes loss,
py::bytes workspace,
py::bytes stream) {
CudaFAC::forward(
B,
T,
N,
L,
scaleMode,
castBytes<const float*>(input),
castBytes<const int*>(target),
castBytes<const int*>(targetSize),
castBytes<const float*>(trans),
castBytes<float*>(loss),
castBytes<void*>(workspace),
castBytes<cudaStream_t>(stream));
}
static void CudaFAC_backward(
int B,
int T,
int N,
int L,
py::bytes target,
py::bytes targetSize,
py::bytes grad,
py::bytes inputGrad,
py::bytes transGrad,
py::bytes workspace,
py::bytes stream) {
CudaFAC::backward(
B,
T,
N,
L,
castBytes<const int*>(target),
castBytes<const int*>(targetSize),
castBytes<const float*>(grad),
castBytes<float*>(inputGrad),
castBytes<float*>(transGrad),
castBytes<void*>(workspace),
castBytes<cudaStream_t>(stream));
}
static void CudaFCC_forward(
int B,
int T,
int N,
CriterionScaleMode scaleMode,
py::bytes input,
py::bytes targetSize,
py::bytes trans,
py::bytes loss,
py::bytes workspace,
py::bytes stream) {
CudaFCC::forward(
B,
T,
N,
scaleMode,
castBytes<const float*>(input),
castBytes<const int*>(targetSize),
castBytes<const float*>(trans),
castBytes<float*>(loss),
castBytes<void*>(workspace),
castBytes<cudaStream_t>(stream));
}
static void CudaFCC_backward(
int B,
int T,
int N,
py::bytes trans,
py::bytes grad,
py::bytes inputGrad,
py::bytes transGrad,
py::bytes workspace,
py::bytes stream) {
CudaFCC::backward(
B,
T,
N,
castBytes<const float*>(trans),
castBytes<const float*>(grad),
castBytes<float*>(inputGrad),
castBytes<float*>(transGrad),
castBytes<void*>(workspace),
castBytes<cudaStream_t>(stream));
}
static void CudaViterbi_compute(
int B,
int T,
int N,
py::bytes input,
py::bytes trans,
py::bytes path,
py::bytes workspace,
py::bytes stream) {
CudaViterbi::compute(
B,
T,
N,
castBytes<const float*>(input),
castBytes<const float*>(trans),
castBytes<int*>(path),
castBytes<void*>(workspace),
castBytes<cudaStream_t>(stream));
}
#endif // W2L_LIBRARIES_USE_CUDA
PYBIND11_MODULE(_criterion, m) {
py::enum_<CriterionScaleMode>(m, "CriterionScaleMode")
.value("NONE", CriterionScaleMode::NONE)
.value("INPUT_SZ", CriterionScaleMode::INPUT_SZ)
.value("INPUT_SZ_SQRT", CriterionScaleMode::INPUT_SZ_SQRT)
.value("TARGET_SZ", CriterionScaleMode::TARGET_SZ)
.value("TARGET_SZ_SQRT", CriterionScaleMode::TARGET_SZ_SQRT);
py::class_<CpuFAC>(m, "CpuForceAlignmentCriterion")
.def("get_workspace_size", &CpuFAC::getWorkspaceSize)
.def("forward", &CpuFAC_forward)
.def("backward", &CpuFAC_backward);
py::class_<CpuFCC>(m, "CpuFullConnectionCriterion")
.def("get_workspace_size", &CpuFCC::getWorkspaceSize)
.def("forward", &CpuFCC_forward)
.def("backward", &CpuFCC_backward);
py::class_<CpuViterbi>(m, "CpuViterbiPath")
.def("get_workspace_size", &CpuViterbi::getWorkspaceSize)
.def("compute", &CpuViterbi_compute);
#ifdef W2L_LIBRARIES_USE_CUDA
m.attr("sizeof_cuda_stream") = py::int_(sizeof(cudaStream_t));
py::class_<CudaFAC>(m, "CudaForceAlignmentCriterion")
.def("get_workspace_size", &CudaFAC::getWorkspaceSize)
.def("forward", &CudaFAC_forward)
.def("backward", &CudaFAC_backward);
py::class_<CudaFCC>(m, "CudaFullConnectionCriterion")
.def("get_workspace_size", &CudaFCC::getWorkspaceSize)
.def("forward", &CudaFCC_forward)
.def("backward", &CudaFCC_backward);
py::class_<CudaViterbi>(m, "CudaViterbiPath")
.def("get_workspace_size", &CudaViterbi::getWorkspaceSize)
.def("compute", &CudaViterbi_compute);
#endif // W2L_LIBRARIES_USE_CUDA
}
|
/**
*
* Author: Juarez Paulino(coderemite)
* Email: juarez.paulino@gmail.com
*
*/
#include <bits/stdc++.h>
using namespace std;
#define eb emplace_back
#define mp make_pair
#define mt make_tuple
#define fi first
#define se second
#define sz(v) ((int) (v).size())
#define all(v) (v).begin(), (v).end()
#define ms(x,b) memset(x,b,sizeof(x))
#define sqr(x) ((x)*(x))
typedef string str;
typedef long double ld;
typedef long long ll;
typedef vector<int> vi;
typedef vector<ll> vll;
typedef vector<vi> vvi;
typedef vector<double> vd;
typedef vector<vd> vvd;
typedef vector<string> vs;
typedef pair<int,int> pii;
typedef vector<pii> vpi;
// UTILS
template<class T> bool ckmin(T& a, const T& b) { return b < a ? a = b, 1 : 0; }
template<class T> bool ckmax(T& a, const T& b) { return a < b ? a = b, 1 : 0; }
template<class T> int pct(T x) { return __builtin_popcount(x); }
template<class T> int bits(T x) { return T(sizeof(T))*8-__builtin_clz(x); }
template<class T> T divup(T a, T b) { return a/b+!(a<0||a%b == T(0)); }
int fstTrue(function<bool(int)> f, int lo, int hi) {
hi++; assert(lo <= hi);
while (lo < hi) {
int mid = lo + (hi-lo)/2;
f(mid) ? hi = mid : lo = mid+1; // find first true
}
return lo;
}
int fstFalse(function<bool(int)> f, int lo, int hi) {
hi++; assert(lo <= hi);
while (lo < hi) {
int mid = lo + (hi-lo)/2;
f(mid) ? lo = mid+1 : hi = mid; // find first false
}
return lo;
}
// INPUT
template<class A> void re(complex<A>& c);
template<class A, class B> void re(pair<A,B>& p);
template<class A> void re(vector<A>& v);
template<class A, size_t SZ> void re(array<A,SZ>& a);
template<class T> void re(T& x) { cin >> x; }
void re(double& d) { str t; re(t); d = stod(t); }
void re(ld& d) { str t; re(t); d = stold(t); }
template<class H, class... T> void re(H& h, T&... t) { re(h); re(t...); }
template<class A> void re(complex<A>& c) { A a,b; re(a,b); c = {a,b}; }
template<class A, class B> void re(pair<A,B>& p) { re(p.fi,p.se); }
template<class A> void re(vector<A>& x) { for (auto &u:x) re(u); }
template<class A, size_t SZ> void re(array<A,SZ>& x) { for (auto &u:x) re(u); }
// TO_STRING
#define ts to_string
str ts(char c) { return str(1,c); }
str ts(bool b) { return b ? "true" : "false"; }
str ts(const char* s) { return (str)s; }
str ts(str s) { return s; }
template<class A> str ts(complex<A> c) { stringstream ss; ss << c; return ss.str(); }
str ts(vector<bool> v) {
str res = "{";
for (int i=0;i<sz(v);i++) res += char('0'+v[i]);
res += "}";
return res;
}
template<size_t SZ> str ts(bitset<SZ> b) {
str res = ""; for (int i=0;i<SZ;i++) res += char('0'+b[i]);
return res; }
template<class A, class B> str ts(pair<A,B> p);
template<class T> str ts(T v) {
bool f = 1; str r = "{";
for (const auto& x: v) { if (!f) r += ", "; f = 0; r += ts(x); }
r += "}"; return r; }
template<class A, class B> str ts(pair<A,B> p) {
return "("+ts(p.fi)+", "+ts(p.se)+")"; }
// OUTPUT
void pr(double x) { cout << fixed << setprecision(10) << x; }
void pr(ld x) { cout << fixed << setprecision(10) << x; }
template<class A> void pr(A x) { cout << ts(x); }
template<class H, class... T> void pr(const H& h, const T&... t) {
pr(h); pr(t...); }
void ps() { pr("\n"); }
template<class H, class... T> void ps(const H& h, const T&... t) {
pr(h); if (sizeof...(t)) pr(" "); ps(t...); }
template<class A> void pv(A v) { int n = sz(v); for (int j=0;j<n;j++) pr(v[j]," \n"[j==n-1]); }
// DEBUG
void DBG() { cerr << "]" << endl; }
template<class H, class... T> void DBG(H h, T... t) {
cerr << ts(h); if (sizeof...(t)) cerr << ", ";
DBG(t...); }
#ifdef LOCAL
#define dbg(...) cerr << "LINE(" << __LINE__ << ") -> [" << #__VA_ARGS__ << "]: [", DBG(__VA_ARGS__)
#else
#define dbg(...) 0
#endif
// FILE I/O
void setIn(string s) { freopen(s.c_str(),"r",stdin); }
void setOut(string s) { freopen(s.c_str(),"w",stdout); }
void unsyncIO() { ios_base::sync_with_stdio(0); cin.tie(0); }
void setIO(string s = "") {
unsyncIO();
// cin.exceptions(cin.failbit);
// throws exception when do smth illegal
// ex. try to read letter into int
if (sz(s)) { setIn(s+".in"), setOut(s+".out"); }
}
mt19937 rng((uint32_t)chrono::steady_clock::now().time_since_epoch().count());
const ld PI = 2*acos(0);
const int INF = 1E9+7;
const ll LINF = 1LL<<62;
main() {
//setbuf(stdout, NULL);
setIO();
ll n, a, b, c; re(n,a,b,c);
ll ret = LINF;
for (int A = 0; A < 4; A++) {
for (int B = 0; B < 4; B++) {
for (int C = 0; C < 4; C++) {
if ((A+2*B+3*C+n)%4==0) {
ckmin(ret,A*a+B*b+C*c);
}
}
}
}
ps(ret);
}
|
//===- FuzzerShmemWindows.cpp - Posix shared memory -------------*- C++ -* ===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
// SharedMemoryRegion
//===----------------------------------------------------------------------===//
#include "FuzzerDefs.h"
#if LIBFUZZER_WINDOWS
#include "FuzzerIO.h"
#include "FuzzerShmem.h"
#include <fcntl.h>
#include <stdio.h>
#include <sys/stat.h>
#include <sys/types.h>
namespace fuzzer {
std::string SharedMemoryRegion::Path(const char *Name) {
return DirPlusFile(TmpDir(), Name);
}
std::string SharedMemoryRegion::SemName(const char *Name, int Idx) {
std::string Res(Name);
return Res + (char)('0' + Idx);
}
bool SharedMemoryRegion::Map(int fd) {
assert(0 && "UNIMPLEMENTED");
return false;
}
bool SharedMemoryRegion::Create(const char *Name) {
assert(0 && "UNIMPLEMENTED");
return false;
}
bool SharedMemoryRegion::Open(const char *Name) {
assert(0 && "UNIMPLEMENTED");
return false;
}
bool SharedMemoryRegion::Destroy(const char *Name) {
assert(0 && "UNIMPLEMENTED");
return false;
}
void SharedMemoryRegion::Post(int Idx) {
assert(0 && "UNIMPLEMENTED");
}
void SharedMemoryRegion::Wait(int Idx) {
Semaphore[1] = nullptr;
assert(0 && "UNIMPLEMENTED");
}
} // namespace fuzzer
#endif // LIBFUZZER_WINDOWS
|
/***
Copyright 2012 Devsim LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
***/
#include "ProcessModelOrder.hh"
#include "EquationObject.hh"
#include <set>
const ModelNameVector_t &ProcessModelOrder::GetModelVector(const ModelMap_t &model_list, const ModelNameVector_t &input_list)
{
processedList_.clear();
processedList_.reserve(input_list.size());
processStatusMap_.clear();
for (ModelNameVector_t::const_iterator it = input_list.begin(); it != input_list.end(); ++it)
{
const std::string &model_name = *it;
const ProcessStatus_t status = processStatusMap_[model_name];
ModelMap_t::const_iterator mit = model_list.find(model_name);
if (status == DONE)
{
}
else if (mit == model_list.end())
{
errorString_ += model_name + " was not declared or defined as a model\n";
}
else if (!(mit->second))
{
errorString_ += model_name + " was not defined as a model\n";
}
//// impossible for this model to be in processing
else if (status == UNTOUCHED)
{
processModelVector(model_list, mit->second);
}
processStatusMap_[model_name] = DONE;
processedList_.push_back(model_name);
}
return processedList_;
}
/*
Only print equations
Called recursively. Refactor, if possible, using iteration.
*/
void ProcessModelOrder::processModelVector(const ModelMap_t &model_list, Eqo::EqObjPtr eq)
{
std::set<std::string> mset;
const Eqo::EqObjType equation_type = eq->getType();
if (equation_type == Eqo::MODEL_OBJ)
{
const std::string &mname = eq->stringValue();
const ProcessStatus_t status = processStatusMap_[mname];
if (status == DONE)
{
return;
}
else if (status == PROCESSING)
{
errorString_ += mname + " is being processed in terms of itself\n";
processStatusMap_[mname] = DONE;
return;
}
else if (status == UNTOUCHED)
{
processStatusMap_[mname] = PROCESSING;
}
ModelMap_t::const_iterator mit = model_list.find(mname);
if (mit != model_list.end())
{
if (mit->second)
{
mset = (mit->second)->getReferencedType(Eqo::MODEL_OBJ);
}
}
}
else
{
//// Get all models referenced by this object
mset = eq->getReferencedType(Eqo::MODEL_OBJ);
}
std::set<std::string>::iterator it =mset.begin();
std::set<std::string>::iterator end=mset.end();
for ( ; it != end; ++it)
{
const std::string &sname = *it;
// Model was processed, we are done
if (processStatusMap_[sname] == DONE)
{
continue;
}
ModelMap_t::const_iterator mit = model_list.find(sname);
if (mit != model_list.end())
{
//// print out all models referenced by this equation
Eqo::EqObjPtr x = mit->second;
if (x)
{
processModelVector(model_list, x);
}
processedList_.push_back(sname);
processStatusMap_[sname] = DONE;
}
}
//// If we are a model, go ahead and print out our equation
if (equation_type == Eqo::MODEL_OBJ)
{
const std::string &mname = eq->stringValue();
processedList_.push_back(mname);
processStatusMap_[mname] = DONE;
}
}
|
// MusicXML Class Library
// Copyright (c) by Matthew James Briggs
// Distributed under the MIT License
#include "../../core/elements/OrnamentsChoice.h"
#include "../../core/FromXElement.h"
#include "../../core/elements/DelayedInvertedTurn.h"
#include "../../core/elements/DelayedTurn.h"
#include "../../core/elements/InvertedMordent.h"
#include "../../core/elements/InvertedTurn.h"
#include "../../core/elements/Mordent.h"
#include "../../core/elements/OtherOrnament.h"
#include "../../core/elements/Schleifer.h"
#include "../../core/elements/Shake.h"
#include "../../core/elements/Tremolo.h"
#include "../../core/elements/TrillMark.h"
#include "../../core/elements/Turn.h"
#include "../../core/elements/VerticalTurn.h"
#include "../../core/elements/WavyLine.h"
#include <iostream>
namespace mx
{
namespace core
{
OrnamentsChoice::OrnamentsChoice()
:myChoice( Choice::trillMark )
,myTrillMark( makeTrillMark() )
,myTurn( makeTurn() )
,myDelayedTurn( makeDelayedTurn() )
,myInvertedTurn( makeInvertedTurn() )
,myDelayedInvertedTurn( makeDelayedInvertedTurn() )
,myVerticalTurn( makeVerticalTurn() )
,myShake( makeShake() )
,myWavyLine( makeWavyLine() )
,myMordent( makeMordent() )
,myInvertedMordent( makeInvertedMordent() )
,mySchleifer( makeSchleifer() )
,myTremolo( makeTremolo() )
,myOtherOrnament( makeOtherOrnament() )
{}
bool OrnamentsChoice::hasAttributes() const
{
return false;
}
std::ostream& OrnamentsChoice::streamAttributes( std::ostream& os ) const
{
return os;
}
std::ostream& OrnamentsChoice::streamName( std::ostream& os ) const
{
return os;
}
bool OrnamentsChoice::hasContents() const
{
return true;
}
std::ostream& OrnamentsChoice::streamContents( std::ostream& os, const int indentLevel, bool& isOneLineOnly ) const
{
MX_UNUSED( isOneLineOnly );
switch ( myChoice )
{
case Choice::trillMark:
{
myTrillMark->toStream( os, indentLevel );
}
break;
case Choice::turn:
{
myTurn->toStream( os, indentLevel );
}
break;
case Choice::delayedTurn:
{
myDelayedTurn->toStream( os, indentLevel );
}
break;
case Choice::invertedTurn:
{
myInvertedTurn->toStream( os, indentLevel );
}
break;
case Choice::delayedInvertedTurn:
{
myDelayedInvertedTurn->toStream( os, indentLevel );
}
break;
case Choice::verticalTurn:
{
myVerticalTurn->toStream( os, indentLevel );
}
break;
case Choice::shake:
{
myShake->toStream( os, indentLevel );
}
break;
case Choice::wavyLine:
{
myWavyLine->toStream( os, indentLevel );
}
break;
case Choice::mordent:
{
myMordent->toStream( os, indentLevel );
}
break;
case Choice::invertedMordent:
{
myInvertedMordent->toStream( os, indentLevel );
}
break;
case Choice::schleifer:
{
mySchleifer->toStream( os, indentLevel );
}
break;
case Choice::tremolo:
{
myTremolo->toStream( os, indentLevel );
}
break;
case Choice::otherOrnament:
{
myOtherOrnament->toStream( os, indentLevel );
}
break;
default:
break;
}
return os;
}
OrnamentsChoice::Choice OrnamentsChoice::getChoice() const
{
return myChoice;
}
void OrnamentsChoice::setChoice( const OrnamentsChoice::Choice value )
{
myChoice = value;
}
TrillMarkPtr OrnamentsChoice::getTrillMark() const
{
return myTrillMark;
}
void OrnamentsChoice::setTrillMark( const TrillMarkPtr& value )
{
if( value )
{
myTrillMark = value;
}
}
TurnPtr OrnamentsChoice::getTurn() const
{
return myTurn;
}
void OrnamentsChoice::setTurn( const TurnPtr& value )
{
if( value )
{
myTurn = value;
}
}
DelayedTurnPtr OrnamentsChoice::getDelayedTurn() const
{
return myDelayedTurn;
}
void OrnamentsChoice::setDelayedTurn( const DelayedTurnPtr& value )
{
if( value )
{
myDelayedTurn = value;
}
}
InvertedTurnPtr OrnamentsChoice::getInvertedTurn() const
{
return myInvertedTurn;
}
void OrnamentsChoice::setInvertedTurn( const InvertedTurnPtr& value )
{
if( value )
{
myInvertedTurn = value;
}
}
DelayedInvertedTurnPtr OrnamentsChoice::getDelayedInvertedTurn() const
{
return myDelayedInvertedTurn;
}
void OrnamentsChoice::setDelayedInvertedTurn( const DelayedInvertedTurnPtr& value )
{
if( value )
{
myDelayedInvertedTurn = value;
}
}
VerticalTurnPtr OrnamentsChoice::getVerticalTurn() const
{
return myVerticalTurn;
}
void OrnamentsChoice::setVerticalTurn( const VerticalTurnPtr& value )
{
if( value )
{
myVerticalTurn = value;
}
}
ShakePtr OrnamentsChoice::getShake() const
{
return myShake;
}
void OrnamentsChoice::setShake( const ShakePtr& value )
{
if( value )
{
myShake = value;
}
}
WavyLinePtr OrnamentsChoice::getWavyLine() const
{
return myWavyLine;
}
void OrnamentsChoice::setWavyLine( const WavyLinePtr& value )
{
if( value )
{
myWavyLine = value;
}
}
MordentPtr OrnamentsChoice::getMordent() const
{
return myMordent;
}
void OrnamentsChoice::setMordent( const MordentPtr& value )
{
if( value )
{
myMordent = value;
}
}
InvertedMordentPtr OrnamentsChoice::getInvertedMordent() const
{
return myInvertedMordent;
}
void OrnamentsChoice::setInvertedMordent( const InvertedMordentPtr& value )
{
if( value )
{
myInvertedMordent = value;
}
}
SchleiferPtr OrnamentsChoice::getSchleifer() const
{
return mySchleifer;
}
void OrnamentsChoice::setSchleifer( const SchleiferPtr& value )
{
if( value )
{
mySchleifer = value;
}
}
TremoloPtr OrnamentsChoice::getTremolo() const
{
return myTremolo;
}
void OrnamentsChoice::setTremolo( const TremoloPtr& value )
{
if( value )
{
myTremolo = value;
}
}
OtherOrnamentPtr OrnamentsChoice::getOtherOrnament() const
{
return myOtherOrnament;
}
void OrnamentsChoice::setOtherOrnament( const OtherOrnamentPtr& value )
{
if( value )
{
myOtherOrnament = value;
}
}
bool OrnamentsChoice::fromXElement( std::ostream& message, xml::XElement& xelement )
{
bool isSuccess = true;
const std::string elementName = xelement.getName();
if( elementName == "trill-mark" )
{
myChoice = Choice::trillMark;
isSuccess &= getTrillMark()->fromXElement( message, xelement );
}
else if ( elementName == "turn" )
{
myChoice = Choice::turn;
isSuccess &= getTurn()->fromXElement( message, xelement );
}
else if ( elementName == "delayed-turn" )
{
myChoice = Choice::delayedTurn;
isSuccess &= getDelayedTurn()->fromXElement( message, xelement );
}
else if ( elementName == "inverted-turn" )
{
myChoice = Choice::invertedTurn;
isSuccess &= getInvertedTurn()->fromXElement( message, xelement );
}
else if ( elementName == "delayed-inverted-turn" )
{
myChoice = Choice::delayedInvertedTurn;
isSuccess &= getDelayedInvertedTurn()->fromXElement( message, xelement );
}
else if ( elementName == "vertical-turn" )
{
myChoice = Choice::verticalTurn;
isSuccess &= getVerticalTurn()->fromXElement( message, xelement );
}
else if ( elementName == "shake" )
{
myChoice = Choice::shake;
isSuccess &= getShake()->fromXElement( message, xelement );
}
else if ( elementName == "wavy-line" )
{
myChoice = Choice::wavyLine;
isSuccess &= getWavyLine()->fromXElement( message, xelement );
}
else if ( elementName == "mordent" )
{
myChoice = Choice::mordent;
isSuccess &= getMordent()->fromXElement( message, xelement );
}
else if ( elementName == "inverted-mordent" )
{
myChoice = Choice::invertedMordent;
isSuccess &= getInvertedMordent()->fromXElement( message, xelement );
}
else if ( elementName == "schleifer" )
{
myChoice = Choice::schleifer;
isSuccess &= getSchleifer()->fromXElement( message, xelement );
}
else if ( elementName == "tremolo" )
{
myChoice = Choice::tremolo;
isSuccess &= getTremolo()->fromXElement( message, xelement );
}
else if ( elementName == "other-ornament" )
{
myChoice = Choice::otherOrnament;
isSuccess &= getOtherOrnament()->fromXElement( message, xelement );
}
else
{
message << "OrnamentsChoice::fromXElement unrecognized element '" << elementName << "'" << std::endl;
isSuccess = false;
}
MX_RETURN_IS_SUCCESS;
}
}
}
|
// RUN: %clang_cc1 -no-opaque-pointers -triple i386-unknown-unknown -std=c++11 -fvisibility-inlines-hidden -emit-llvm -o - %s -O2 -disable-llvm-passes | FileCheck %s
// The trickery with optimization in the run line is to get IR
// generation to emit available_externally function bodies, but not
// actually inline them (and thus remove the emitted bodies).
struct X0 {
void __attribute__((visibility("default"))) f1() { }
void f2() { }
void f3();
static void f5() { }
virtual void f6() { }
};
inline void X0::f3() { }
template<typename T>
struct X1 {
void __attribute__((visibility("default"))) f1() { }
void f2() { }
void f3();
void f4();
static void f5() { }
virtual void f6() { }
};
template<typename T>
inline void X1<T>::f3() { }
template<>
inline void X1<int>::f4() { }
struct __attribute__((visibility("default"))) X2 {
void f2() { }
};
extern template struct X1<float>;
void use(X0 *x0, X1<int> *x1, X2 *x2, X1<float> *x3) {
// CHECK-LABEL: define linkonce_odr void @_ZN2X02f1Ev
x0->f1();
// CHECK-LABEL: define linkonce_odr hidden void @_ZN2X02f2Ev
x0->f2();
// CHECK-LABEL: define linkonce_odr hidden void @_ZN2X02f3Ev
x0->f3();
// CHECK-LABEL: define linkonce_odr hidden void @_ZN2X02f5Ev
X0::f5();
// CHECK-LABEL: define linkonce_odr hidden void @_ZN2X02f6Ev
x0->X0::f6();
// CHECK-LABEL: define linkonce_odr void @_ZN2X1IiE2f1Ev
x1->f1();
// CHECK-LABEL: define linkonce_odr hidden void @_ZN2X1IiE2f2Ev
x1->f2();
// CHECK-LABEL: define linkonce_odr hidden void @_ZN2X1IiE2f3Ev
x1->f3();
// CHECK-LABEL: define linkonce_odr hidden void @_ZN2X1IiE2f4Ev
x1->f4();
// CHECK-LABEL: define linkonce_odr hidden void @_ZN2X1IiE2f5Ev
X1<int>::f5();
// CHECK-LABEL: define linkonce_odr hidden void @_ZN2X1IiE2f6Ev
x1->X1::f6();
// CHECK-LABEL: define linkonce_odr hidden void @_ZN2X22f2Ev
x2->f2();
// CHECK-LABEL: define available_externally void @_ZN2X1IfE2f2Ev
x3->f2();
}
// rdar://problem/8614470
namespace test1 {
struct __attribute__((visibility("default"))) A {
inline void foo();
~A();
};
void test() {
A a;
a.foo();
}
// CHECK: declare void @_ZN5test11A3fooEv
// CHECK: declare {{.*}} @_ZN5test11AD1Ev
}
// PR8713
namespace test2 {
struct A {};
template <class T> class B {};
typedef B<A> arg;
namespace ns __attribute__((visibility("default"))) {
template <class T> inline void foo() {}
extern template void foo<arg>();
}
void test() {
ns::foo<arg>();
}
// CHECK-LABEL: define available_externally void @_ZN5test22ns3fooINS_1BINS_1AEEEEEvv()
}
namespace PR11642 {
template <typename T>
class Foo {
public:
T foo(T x) { return x; }
};
extern template class Foo<int>;
template class Foo<int>;
// CHECK-LABEL: define weak_odr noundef i32 @_ZN7PR116423FooIiE3fooEi
}
// Test that clang implements the new gcc behaviour for inline functions.
// GCC PR30066.
namespace test3 {
inline void foo(void) {
}
template<typename T>
inline void zed() {
}
template void zed<float>();
void bar(void) {
foo();
zed<int>();
}
// CHECK-LABEL: define weak_odr void @_ZN5test33zedIfEEvv
// CHECK-LABEL: define linkonce_odr hidden void @_ZN5test33fooEv
// CHECK-LABEL: define linkonce_odr hidden void @_ZN5test33zedIiEEvv
}
namespace test4 {
extern inline __attribute__ ((__gnu_inline__))
void foo() {}
void bar() {
foo();
}
// CHECK-LABEL: define available_externally void @_ZN5test43fooE
}
namespace test5 {
// just don't crash.
template <int> inline void Op();
class UnaryInstruction {
UnaryInstruction() {
Op<0>();
}
};
template <int Idx_nocapture> void Op() {
}
}
namespace test6 {
// just don't crash.
template <typename T>
void f(T x) {
}
struct C {
static void g() {
f([](){});
}
};
void g() {
C::g();
}
}
namespace PR34811 {
template <typename T> void tf() {}
// CHECK-LABEL: define linkonce_odr hidden noundef i8* @_ZN7PR348111fEv(
inline void *f() {
auto l = []() {};
// CHECK-LABEL: define linkonce_odr hidden void @_ZN7PR348112tfIZNS_1fEvEUlvE_EEvv(
return (void *)&tf<decltype(l)>;
}
void *p = (void *)f;
}
|
#include <iostream>
using namespace std;
int main()
{
double t;
char s;
cin >> t >> s;
switch(s)
{
case 'K':
t -= 273.15;
// Fallthrough
case 'C':
t *= 1.8;
t += 32;
// Fallthrough
case 'F':
cout.precision(2);
cout << fixed << t << endl;
break;
default:
break;
}
return 0;
}
|
// Copyright (c) 2017-2022, Mudita Sp. z.o.o. All rights reserved.
// For licensing, see https://github.com/mudita/MuditaOS/LICENSE.md
#pragma once
#include "ApplicationBellSettings.hpp"
#include "presenter/alarm_settings/AlarmSettingsPresenter.hpp"
#include <apps-common/windows/AppWindow.hpp>
namespace gui
{
class SideListView;
class BellSettingsAlarmSettingsWindow : public AppWindow,
public app::bell_settings::AlarmSettingsWindowContract::View
{
public:
static constexpr auto name = "BellSettingsAlarmSettingsWindow";
explicit BellSettingsAlarmSettingsWindow(
app::ApplicationCommon *app,
std::unique_ptr<app::bell_settings::AlarmSettingsWindowContract::Presenter> presenter);
void buildInterface() override;
void onBeforeShow(gui::ShowMode mode, gui::SwitchData *data) override;
void onClose(CloseReason reason) override;
bool onInput(const InputEvent &inputEvent) override;
void rebuild() override;
void exit() override;
private:
SideListView *sidelistview{};
std::unique_ptr<app::bell_settings::AlarmSettingsWindowContract::Presenter> presenter;
bool isSaveNeeded{false};
};
} /* namespace gui */
|
// Copyright (c) 2015-2016 Hypha
#include "hyphahandlers/alarm/alarm.h"
#include <chrono>
#include <sstream>
#include <string>
#include <thread>
#include <boost/property_tree/json_parser.hpp>
#include <boost/property_tree/ptree.hpp>
#include <Poco/ClassLibrary.h>
#include <hypha/core/database/database.h>
#include <hypha/core/database/userdatabase.h>
#include <hypha/plugin/hyphaplugin.h>
#include <hypha/plugin/pluginloader.h>
using namespace hypha::handler;
using namespace hypha::handler::alarm;
using namespace hypha::plugin;
using namespace hypha::settings;
using namespace hypha::database;
Alarm::Alarm() {}
Alarm::~Alarm() {}
void Alarm::doWork() { std::this_thread::sleep_for(std::chrono::seconds(1)); }
void Alarm::parse(std::string UNUSED(message)) {}
void Alarm::loadConfig(std::string UNUSED(config)) {}
std::string Alarm::getConfig() { return "{}"; }
HyphaHandler *Alarm::getInstance(std::string id) {
Alarm *instance = new Alarm();
instance->setId(id);
return instance;
}
void Alarm::receiveMessage(std::string message) {
boost::property_tree::ptree ptjson;
std::stringstream ssjson(message);
boost::property_tree::read_json(ssjson, ptjson);
boost::property_tree::ptree sendobject;
if (ptjson.get_optional<bool>("movement")) {
std::string mail = "movement";
sendobject.put("mail", mail);
}
if (ptjson.get_optional<bool>("alarm")) {
std::string id = ptjson.get<std::string>("id");
bool isAlarm = ptjson.get<bool>("alarm");
if (alarm.find(id) == alarm.end()) {
alarm[id] = isAlarm;
} else if (alarm[id] != isAlarm) {
alarm[id] = isAlarm;
std::string value;
if (ptjson.get_optional<bool>("value")) {
value = ptjson.get<bool>("value") ? "true" : "false";
} else if (ptjson.get_optional<double>("value")) {
std::stringstream tostring;
tostring << ptjson.get<double>("value");
value = tostring.str();
} else {
value = ptjson.get<std::string>("value");
}
sendobject.put("mail", ptjson.get<std::string>("id") + "(" +
ptjson.get<std::string>("type") + ") " +
value);
std::stringstream ssso;
boost::property_tree::write_json(ssso, sendobject);
sendMessage(ssso.str());
}
}
}
std::string Alarm::communicate(std::string UNUSED(message)) { return ""; }
POCO_BEGIN_MANIFEST(HyphaHandler)
POCO_EXPORT_CLASS(Alarm)
POCO_END_MANIFEST
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test1.c
**
** Purpose:
** Tests that wcsncmp case-sensitively compares wide strings, making sure that
** the count argument is handled correctly.
**
**
**==========================================================================*/
#include <palsuite.h>
/*
* Notes: uses wcslen.
*/
PALTEST(c_runtime_wcsncmp_test1_paltest_wcsncmp_test1, "c_runtime/wcsncmp/test1/paltest_wcsncmp_test1")
{
WCHAR str1[] = {'f','o','o',0};
WCHAR str2[] = {'f','o','o','x',0};
WCHAR str3[] = {'f','O','o',0};
char cstr1[] = "foo";
char cstr2[] = "foox";
char cstr3[] = "fOo";
if (PAL_Initialize(argc, argv))
{
return FAIL;
}
if (wcsncmp(str1, str2, wcslen(str2)) >= 0)
{
Fail("ERROR: wcsncmp(\"%s\", \"%s\", %d) returned >= 0\n", cstr1,
cstr2, wcslen(str2));
}
if (wcsncmp(str2, str1, wcslen(str2)) <= 0)
{
Fail("ERROR: wcsncmp(\"%s\", \"%s\", %d) returned <= 0\n", cstr2,
cstr1, wcslen(str2));
}
if (wcsncmp(str1, str2, wcslen(str1)) != 0)
{
Fail("ERROR: wcsncmp(\"%s\", \"%s\", %d) returned != 0\n", cstr1,
cstr2, wcslen(str1));
}
if (wcsncmp(str1, str3, wcslen(str1)) <= 0)
{
Fail("ERROR: wcsncmp(\"%s\", \"%s\", %d) returned >= 0\n", cstr1,
cstr3, wcslen(str1));
}
if (wcsncmp(str3, str1, wcslen(str1)) >= 0)
{
Fail("ERROR: wcsncmp(\"%s\", \"%s\", %d) returned >= 0\n", cstr3,
cstr1, wcslen(str1));
}
PAL_Terminate();
return PASS;
}
|
/**
* Definition for singly-linked list.
* struct ListNode {
* int val;
* ListNode *next;
* ListNode(int x) : val(x), next(NULL) {}
* };
*/
class Solution {
public:
ListNode* oddEvenList(ListNode* head) {
if(!head or !head->next) return head;
ListNode *odd = head, *even = head->next;
ListNode *oddP = odd, *evenP = even;
int count = 0;
head = head->next->next;
while(head) {
if(count%2 == 1) {
even->next = head;
even = even->next;
} else {
odd->next = head;
odd = odd->next;
}
head = head->next;
count++;
}
odd->next = evenP;
even->next = NULL;
return oddP;
}
};
|
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "content/common/gpu/client/gl_helper_scaling.h"
#include <deque>
#include <string>
#include <vector>
#include "base/bind.h"
#include "base/debug/trace_event.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/memory/ref_counted.h"
#include "base/message_loop/message_loop.h"
#include "base/time/time.h"
#include "third_party/WebKit/public/platform/WebCString.h"
#include "third_party/skia/include/core/SkRegion.h"
#include "ui/gfx/rect.h"
#include "ui/gfx/size.h"
#include "ui/gl/gl_bindings.h"
using blink::WebGLId;
using blink::WebGraphicsContext3D;
namespace content {
GLHelperScaling::GLHelperScaling(blink::WebGraphicsContext3D* context,
GLHelper* helper)
: context_(context),
helper_(helper),
vertex_attributes_buffer_(context_, context_->createBuffer()) {
InitBuffer();
}
GLHelperScaling::~GLHelperScaling() {
}
// Used to keep track of a generated shader program. The program
// is passed in as text through Setup and is used by calling
// UseProgram() with the right parameters. Note that |context_|
// and |helper_| are assumed to live longer than this program.
class ShaderProgram : public base::RefCounted<ShaderProgram> {
public:
ShaderProgram(WebGraphicsContext3D* context,
GLHelper* helper)
: context_(context),
helper_(helper),
program_(context, context->createProgram()) {
}
// Compile shader program, return true if successful.
bool Setup(const blink::WGC3Dchar* vertex_shader_text,
const blink::WGC3Dchar* fragment_shader_text);
// UseProgram must be called with GL_TEXTURE_2D bound to the
// source texture and GL_ARRAY_BUFFER bound to a vertex
// attribute buffer.
void UseProgram(const gfx::Size& src_size,
const gfx::Rect& src_subrect,
const gfx::Size& dst_size,
bool scale_x,
bool flip_y,
GLfloat color_weights[4]);
private:
friend class base::RefCounted<ShaderProgram>;
~ShaderProgram() {}
WebGraphicsContext3D* context_;
GLHelper* helper_;
// A program for copying a source texture into a destination texture.
ScopedProgram program_;
// The location of the position in the program.
blink::WGC3Dint position_location_;
// The location of the texture coordinate in the program.
blink::WGC3Dint texcoord_location_;
// The location of the source texture in the program.
blink::WGC3Dint texture_location_;
// The location of the texture coordinate of
// the sub-rectangle in the program.
blink::WGC3Dint src_subrect_location_;
// Location of size of source image in pixels.
blink::WGC3Dint src_pixelsize_location_;
// Location of size of destination image in pixels.
blink::WGC3Dint dst_pixelsize_location_;
// Location of vector for scaling direction.
blink::WGC3Dint scaling_vector_location_;
// Location of color weights.
blink::WGC3Dint color_weights_location_;
DISALLOW_COPY_AND_ASSIGN(ShaderProgram);
};
// Implementation of a single stage in a scaler pipeline. If the pipeline has
// multiple stages, it calls Scale() on the subscaler, then further scales the
// output. Caches textures and framebuffers to avoid allocating/deleting
// them once per frame, which can be expensive on some drivers.
class ScalerImpl :
public GLHelper::ScalerInterface,
public GLHelperScaling::ShaderInterface {
public:
// |context| and |copy_impl| are expected to live longer than this object.
// |src_size| is the size of the input texture in pixels.
// |dst_size| is the size of the output texutre in pixels.
// |src_subrect| is the portion of the src to copy to the output texture.
// If |scale_x| is true, we are scaling along the X axis, otherwise Y.
// If we are scaling in both X and Y, |scale_x| is ignored.
// If |vertically_flip_texture| is true, output will be upside-down.
// If |swizzle| is true, RGBA will be transformed into BGRA.
// |color_weights| are only used together with SHADER_PLANAR to specify
// how to convert RGB colors into a single value.
ScalerImpl(WebGraphicsContext3D* context,
GLHelperScaling* scaler_helper,
const GLHelperScaling::ScalerStage &scaler_stage,
ScalerImpl* subscaler,
const float* color_weights) :
context_(context),
scaler_helper_(scaler_helper),
spec_(scaler_stage),
intermediate_texture_(0),
dst_framebuffer_(context, context_->createFramebuffer()),
subscaler_(subscaler) {
if (color_weights) {
color_weights_[0] = color_weights[0];
color_weights_[1] = color_weights[1];
color_weights_[2] = color_weights[2];
color_weights_[3] = color_weights[3];
} else {
color_weights_[0] = 0.0;
color_weights_[1] = 0.0;
color_weights_[2] = 0.0;
color_weights_[3] = 0.0;
}
shader_program_ = scaler_helper_->GetShaderProgram(spec_.shader,
spec_.swizzle);
if (subscaler_) {
intermediate_texture_ = context_->createTexture();
ScopedTextureBinder<GL_TEXTURE_2D> texture_binder(
context_,
intermediate_texture_);
context_->texImage2D(GL_TEXTURE_2D,
0,
GL_RGBA,
spec_.src_size.width(),
spec_.src_size.height(),
0,
GL_RGBA,
GL_UNSIGNED_BYTE,
NULL);
}
}
virtual ~ScalerImpl() {
if (intermediate_texture_) {
context_->deleteTexture(intermediate_texture_);
}
}
// GLHelperShader::ShaderInterface implementation.
virtual void Execute(
blink::WebGLId source_texture,
const std::vector<blink::WebGLId>& dest_textures) OVERRIDE {
if (subscaler_) {
subscaler_->Scale(source_texture, intermediate_texture_);
source_texture = intermediate_texture_;
}
ScopedFramebufferBinder<GL_FRAMEBUFFER> framebuffer_binder(
context_,
dst_framebuffer_);
DCHECK_GT(dest_textures.size(), 0U);
scoped_ptr<blink::WGC3Denum[]> buffers(
new blink::WGC3Denum[dest_textures.size()]);
for (size_t t = 0; t < dest_textures.size(); t++) {
ScopedTextureBinder<GL_TEXTURE_2D> texture_binder(context_,
dest_textures[t]);
context_->framebufferTexture2D(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0 + t,
GL_TEXTURE_2D,
dest_textures[t],
0);
buffers[t] = GL_COLOR_ATTACHMENT0 + t;
}
ScopedTextureBinder<GL_TEXTURE_2D> texture_binder(context_,
source_texture);
context_->texParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,
GL_LINEAR);
context_->texParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
GL_LINEAR);
context_->texParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,
GL_CLAMP_TO_EDGE);
context_->texParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,
GL_CLAMP_TO_EDGE);
ScopedBufferBinder<GL_ARRAY_BUFFER> buffer_binder(
context_,
scaler_helper_->vertex_attributes_buffer_);
shader_program_->UseProgram(spec_.src_size,
spec_.src_subrect,
spec_.dst_size,
spec_.scale_x,
spec_.vertically_flip_texture,
color_weights_);
context_->viewport(0, 0, spec_.dst_size.width(), spec_.dst_size.height());
if (dest_textures.size() > 1) {
DCHECK_LE(static_cast<int>(dest_textures.size()),
scaler_helper_->helper_->MaxDrawBuffers());
context_->drawBuffersEXT(dest_textures.size(), buffers.get());
}
// Conduct texture mapping by drawing a quad composed of two triangles.
context_->drawArrays(GL_TRIANGLE_STRIP, 0, 4);
if (dest_textures.size() > 1) {
// Set the draw buffers back to not confuse others.
context_->drawBuffersEXT(1, &buffers[0]);
}
}
// GLHelper::ScalerInterface implementation.
virtual void Scale(blink::WebGLId source_texture,
blink::WebGLId dest_texture) OVERRIDE {
std::vector<blink::WebGLId> tmp(1);
tmp[0] = dest_texture;
Execute(source_texture, tmp);
}
virtual const gfx::Size& SrcSize() OVERRIDE {
if (subscaler_) {
return subscaler_->SrcSize();
}
return spec_.src_size;
}
virtual const gfx::Rect& SrcSubrect() OVERRIDE {
if (subscaler_) {
return subscaler_->SrcSubrect();
}
return spec_.src_subrect;
}
virtual const gfx::Size& DstSize() OVERRIDE {
return spec_.dst_size;
}
private:
WebGraphicsContext3D* context_;
GLHelperScaling* scaler_helper_;
GLHelperScaling::ScalerStage spec_;
GLfloat color_weights_[4];
blink::WebGLId intermediate_texture_;
scoped_refptr<ShaderProgram> shader_program_;
ScopedFramebuffer dst_framebuffer_;
scoped_ptr<ScalerImpl> subscaler_;
};
GLHelperScaling::ScalerStage::ScalerStage(
ShaderType shader_,
gfx::Size src_size_,
gfx::Rect src_subrect_,
gfx::Size dst_size_,
bool scale_x_,
bool vertically_flip_texture_,
bool swizzle_)
: shader(shader_),
src_size(src_size_),
src_subrect(src_subrect_),
dst_size(dst_size_),
scale_x(scale_x_),
vertically_flip_texture(vertically_flip_texture_),
swizzle(swizzle_) {
}
// The important inputs for this function is |x_ops| and
// |y_ops|. They represent scaling operations to be done
// on an imag of size |src_size|. If |quality| is SCALER_QUALITY_BEST,
// then we will interpret these scale operations literally and we'll
// create one scaler stage for each ScaleOp. However, if |quality|
// is SCALER_QUALITY_GOOD, then we can do a whole bunch of optimizations
// by combining two or more ScaleOps in to a single scaler stage.
// Normally we process ScaleOps from |y_ops| first and |x_ops| after
// all |y_ops| are processed, but sometimes we can combine one or more
// operation from both queues essentially for free. This is the reason
// why |x_ops| and |y_ops| aren't just one single queue.
void GLHelperScaling::ConvertScalerOpsToScalerStages(
GLHelper::ScalerQuality quality,
gfx::Size src_size,
gfx::Rect src_subrect,
const gfx::Size& dst_size,
bool vertically_flip_texture,
bool swizzle,
std::deque<GLHelperScaling::ScaleOp>* x_ops,
std::deque<GLHelperScaling::ScaleOp>* y_ops,
std::vector<ScalerStage> *scaler_stages) {
while (!x_ops->empty() || !y_ops->empty()) {
gfx::Size intermediate_size = src_subrect.size();
std::deque<ScaleOp>* current_queue = NULL;
if (!y_ops->empty()) {
current_queue = y_ops;
} else {
current_queue = x_ops;
}
ShaderType current_shader = SHADER_BILINEAR;
switch (current_queue->front().scale_factor) {
case 0:
if (quality == GLHelper::SCALER_QUALITY_BEST) {
current_shader = SHADER_BICUBIC_UPSCALE;
}
break;
case 2:
if (quality == GLHelper::SCALER_QUALITY_BEST) {
current_shader = SHADER_BICUBIC_HALF_1D;
}
break;
case 3:
DCHECK(quality != GLHelper::SCALER_QUALITY_BEST);
current_shader = SHADER_BILINEAR3;
break;
default:
NOTREACHED();
}
bool scale_x = current_queue->front().scale_x;
current_queue->front().UpdateSize(&intermediate_size);
current_queue->pop_front();
// Optimization: Sometimes we can combine 2-4 scaling operations into
// one operation.
if (quality == GLHelper::SCALER_QUALITY_GOOD) {
if (!current_queue->empty() && current_shader == SHADER_BILINEAR) {
// Combine two steps in the same dimension.
current_queue->front().UpdateSize(&intermediate_size);
current_queue->pop_front();
current_shader = SHADER_BILINEAR2;
if (!current_queue->empty()) {
// Combine three steps in the same dimension.
current_queue->front().UpdateSize(&intermediate_size);
current_queue->pop_front();
current_shader = SHADER_BILINEAR4;
}
}
// Check if we can combine some steps in the other dimension as well.
// Since all shaders currently use GL_LINEAR, we can easily scale up
// or scale down by exactly 2x at the same time as we do another
// operation. Currently, the following mergers are supported:
// * 1 bilinear Y-pass with 1 bilinear X-pass (up or down)
// * 2 bilinear Y-passes with 2 bilinear X-passes
// * 1 bilinear Y-pass with N bilinear X-pass
// * N bilinear Y-passes with 1 bilinear X-pass (down only)
// Measurements indicate that generalizing this for 3x3 and 4x4
// makes it slower on some platforms, such as the Pixel.
if (!scale_x && x_ops->size() > 0 &&
x_ops->front().scale_factor <= 2) {
int x_passes = 0;
if (current_shader == SHADER_BILINEAR2 && x_ops->size() >= 2) {
// 2y + 2x passes
x_passes = 2;
current_shader = SHADER_BILINEAR2X2;
} else if (current_shader == SHADER_BILINEAR) {
// 1y + Nx passes
scale_x = true;
switch (x_ops->size()) {
case 0:
NOTREACHED();
case 1:
if (x_ops->front().scale_factor == 3) {
current_shader = SHADER_BILINEAR3;
}
x_passes = 1;
break;
case 2:
x_passes = 2;
current_shader = SHADER_BILINEAR2;
break;
default:
x_passes = 3;
current_shader = SHADER_BILINEAR4;
break;
}
} else if (x_ops->front().scale_factor == 2) {
// Ny + 1x-downscale
x_passes = 1;
}
for (int i = 0; i < x_passes; i++) {
x_ops->front().UpdateSize(&intermediate_size);
x_ops->pop_front();
}
}
}
scaler_stages->push_back(ScalerStage(current_shader,
src_size,
src_subrect,
intermediate_size,
scale_x,
vertically_flip_texture,
swizzle));
src_size = intermediate_size;
src_subrect = gfx::Rect(intermediate_size);
vertically_flip_texture = false;
swizzle = false;
}
}
void GLHelperScaling::ComputeScalerStages(
GLHelper::ScalerQuality quality,
const gfx::Size& src_size,
const gfx::Rect& src_subrect,
const gfx::Size& dst_size,
bool vertically_flip_texture,
bool swizzle,
std::vector<ScalerStage> *scaler_stages) {
if (quality == GLHelper::SCALER_QUALITY_FAST ||
src_subrect.size() == dst_size) {
scaler_stages->push_back(ScalerStage(SHADER_BILINEAR,
src_size,
src_subrect,
dst_size,
false,
vertically_flip_texture,
swizzle));
return;
}
std::deque<GLHelperScaling::ScaleOp> x_ops, y_ops;
GLHelperScaling::ScaleOp::AddOps(src_subrect.width(),
dst_size.width(),
true,
quality == GLHelper::SCALER_QUALITY_GOOD,
&x_ops);
GLHelperScaling::ScaleOp::AddOps(src_subrect.height(),
dst_size.height(),
false,
quality == GLHelper::SCALER_QUALITY_GOOD,
&y_ops);
ConvertScalerOpsToScalerStages(
quality,
src_size,
src_subrect,
dst_size,
vertically_flip_texture,
swizzle,
&x_ops,
&y_ops,
scaler_stages);
}
GLHelper::ScalerInterface*
GLHelperScaling::CreateScaler(GLHelper::ScalerQuality quality,
gfx::Size src_size,
gfx::Rect src_subrect,
const gfx::Size& dst_size,
bool vertically_flip_texture,
bool swizzle) {
std::vector<ScalerStage> scaler_stages;
ComputeScalerStages(quality,
src_size,
src_subrect,
dst_size,
vertically_flip_texture,
swizzle,
&scaler_stages);
ScalerImpl* ret = NULL;
for (unsigned int i = 0; i < scaler_stages.size(); i++) {
ret = new ScalerImpl(context_, this, scaler_stages[i], ret, NULL);
}
return ret;
}
GLHelper::ScalerInterface*
GLHelperScaling::CreatePlanarScaler(
const gfx::Size& src_size,
const gfx::Rect& src_subrect,
const gfx::Size& dst_size,
bool vertically_flip_texture,
const float color_weights[4]) {
ScalerStage stage(SHADER_PLANAR,
src_size,
src_subrect,
dst_size,
true,
vertically_flip_texture,
false);
return new ScalerImpl(context_, this, stage, NULL, color_weights);
}
GLHelperScaling::ShaderInterface*
GLHelperScaling::CreateYuvMrtShader(
const gfx::Size& src_size,
const gfx::Rect& src_subrect,
const gfx::Size& dst_size,
bool vertically_flip_texture,
ShaderType shader) {
DCHECK(shader == SHADER_YUV_MRT_PASS1 || shader == SHADER_YUV_MRT_PASS2);
ScalerStage stage(shader,
src_size,
src_subrect,
dst_size,
true,
vertically_flip_texture,
false);
return new ScalerImpl(context_, this, stage, NULL, NULL);
}
const blink::WGC3Dfloat GLHelperScaling::kVertexAttributes[] = {
-1.0f, -1.0f, 0.0f, 0.0f,
1.0f, -1.0f, 1.0f, 0.0f,
-1.0f, 1.0f, 0.0f, 1.0f,
1.0f, 1.0f, 1.0f, 1.0f,
};
void GLHelperScaling::InitBuffer() {
ScopedBufferBinder<GL_ARRAY_BUFFER> buffer_binder(
context_, vertex_attributes_buffer_);
context_->bufferData(GL_ARRAY_BUFFER,
sizeof(kVertexAttributes),
kVertexAttributes,
GL_STATIC_DRAW);
}
scoped_refptr<ShaderProgram>
GLHelperScaling::GetShaderProgram(ShaderType type,
bool swizzle) {
ShaderProgramKeyType key(type, swizzle);
scoped_refptr<ShaderProgram>& cache_entry(shader_programs_[key]);
if (!cache_entry.get()) {
cache_entry = new ShaderProgram(context_, helper_);
std::basic_string<blink::WGC3Dchar> vertex_program;
std::basic_string<blink::WGC3Dchar> fragment_program;
std::basic_string<blink::WGC3Dchar> vertex_header;
std::basic_string<blink::WGC3Dchar> fragment_directives;
std::basic_string<blink::WGC3Dchar> fragment_header;
std::basic_string<blink::WGC3Dchar> shared_variables;
vertex_header.append(
"precision highp float;\n"
"attribute vec2 a_position;\n"
"attribute vec2 a_texcoord;\n"
"uniform vec4 src_subrect;\n");
fragment_header.append(
"precision mediump float;\n"
"uniform sampler2D s_texture;\n");
vertex_program.append(
" gl_Position = vec4(a_position, 0.0, 1.0);\n"
" vec2 texcoord = src_subrect.xy + a_texcoord * src_subrect.zw;\n");
switch (type) {
case SHADER_BILINEAR:
shared_variables.append("varying vec2 v_texcoord;\n");
vertex_program.append(" v_texcoord = texcoord;\n");
fragment_program.append(
" gl_FragColor = texture2D(s_texture, v_texcoord);\n");
break;
case SHADER_BILINEAR2:
// This is equivialent to two passes of the BILINEAR shader above.
// It can be used to scale an image down 1.0x-2.0x in either dimension,
// or exactly 4x.
shared_variables.append(
"varying vec4 v_texcoords;\n"); // 2 texcoords packed in one quad
vertex_header.append(
"uniform vec2 scaling_vector;\n"
"uniform vec2 dst_pixelsize;\n");
vertex_program.append(
" vec2 step = scaling_vector * src_subrect.zw / dst_pixelsize;\n"
" step /= 4.0;\n"
" v_texcoords.xy = texcoord + step;\n"
" v_texcoords.zw = texcoord - step;\n");
fragment_program.append(
" gl_FragColor = (texture2D(s_texture, v_texcoords.xy) +\n"
" texture2D(s_texture, v_texcoords.zw)) / 2.0;\n");
break;
case SHADER_BILINEAR3:
// This is kind of like doing 1.5 passes of the BILINEAR shader.
// It can be used to scale an image down 1.5x-3.0x, or exactly 6x.
shared_variables.append(
"varying vec4 v_texcoords1;\n" // 2 texcoords packed in one quad
"varying vec2 v_texcoords2;\n");
vertex_header.append(
"uniform vec2 scaling_vector;\n"
"uniform vec2 dst_pixelsize;\n");
vertex_program.append(
" vec2 step = scaling_vector * src_subrect.zw / dst_pixelsize;\n"
" step /= 3.0;\n"
" v_texcoords1.xy = texcoord + step;\n"
" v_texcoords1.zw = texcoord;\n"
" v_texcoords2 = texcoord - step;\n");
fragment_program.append(
" gl_FragColor = (texture2D(s_texture, v_texcoords1.xy) +\n"
" texture2D(s_texture, v_texcoords1.zw) +\n"
" texture2D(s_texture, v_texcoords2)) / 3.0;\n");
break;
case SHADER_BILINEAR4:
// This is equivialent to three passes of the BILINEAR shader above,
// It can be used to scale an image down 2.0x-4.0x or exactly 8x.
shared_variables.append(
"varying vec4 v_texcoords[2];\n");
vertex_header.append(
"uniform vec2 scaling_vector;\n"
"uniform vec2 dst_pixelsize;\n");
vertex_program.append(
" vec2 step = scaling_vector * src_subrect.zw / dst_pixelsize;\n"
" step /= 8.0;\n"
" v_texcoords[0].xy = texcoord - step * 3.0;\n"
" v_texcoords[0].zw = texcoord - step;\n"
" v_texcoords[1].xy = texcoord + step;\n"
" v_texcoords[1].zw = texcoord + step * 3.0;\n");
fragment_program.append(
" gl_FragColor = (\n"
" texture2D(s_texture, v_texcoords[0].xy) +\n"
" texture2D(s_texture, v_texcoords[0].zw) +\n"
" texture2D(s_texture, v_texcoords[1].xy) +\n"
" texture2D(s_texture, v_texcoords[1].zw)) / 4.0;\n");
break;
case SHADER_BILINEAR2X2:
// This is equivialent to four passes of the BILINEAR shader above.
// Two in each dimension. It can be used to scale an image down
// 1.0x-2.0x in both X and Y directions. Or, it could be used to
// scale an image down by exactly 4x in both dimensions.
shared_variables.append(
"varying vec4 v_texcoords[2];\n");
vertex_header.append(
"uniform vec2 dst_pixelsize;\n");
vertex_program.append(
" vec2 step = src_subrect.zw / 4.0 / dst_pixelsize;\n"
" v_texcoords[0].xy = texcoord + vec2(step.x, step.y);\n"
" v_texcoords[0].zw = texcoord + vec2(step.x, -step.y);\n"
" v_texcoords[1].xy = texcoord + vec2(-step.x, step.y);\n"
" v_texcoords[1].zw = texcoord + vec2(-step.x, -step.y);\n");
fragment_program.append(
" gl_FragColor = (\n"
" texture2D(s_texture, v_texcoords[0].xy) +\n"
" texture2D(s_texture, v_texcoords[0].zw) +\n"
" texture2D(s_texture, v_texcoords[1].xy) +\n"
" texture2D(s_texture, v_texcoords[1].zw)) / 4.0;\n");
break;
case SHADER_BICUBIC_HALF_1D:
// This scales down texture by exactly half in one dimension.
// directions in one pass. We use bilinear lookup to reduce
// the number of texture reads from 8 to 4
shared_variables.append(
"const float CenterDist = 99.0 / 140.0;\n"
"const float LobeDist = 11.0 / 4.0;\n"
"const float CenterWeight = 35.0 / 64.0;\n"
"const float LobeWeight = -3.0 / 64.0;\n"
"varying vec4 v_texcoords[2];\n");
vertex_header.append(
"uniform vec2 scaling_vector;\n"
"uniform vec2 src_pixelsize;\n");
vertex_program.append(
" vec2 step = src_subrect.zw * scaling_vector / src_pixelsize;\n"
" v_texcoords[0].xy = texcoord - LobeDist * step;\n"
" v_texcoords[0].zw = texcoord - CenterDist * step;\n"
" v_texcoords[1].xy = texcoord + CenterDist * step;\n"
" v_texcoords[1].zw = texcoord + LobeDist * step;\n");
fragment_program.append(
" gl_FragColor = \n"
// Lobe pixels
" (texture2D(s_texture, v_texcoords[0].xy) +\n"
" texture2D(s_texture, v_texcoords[1].zw)) *\n"
" LobeWeight +\n"
// Center pixels
" (texture2D(s_texture, v_texcoords[0].zw) +\n"
" texture2D(s_texture, v_texcoords[1].xy)) *\n"
" CenterWeight;\n");
break;
case SHADER_BICUBIC_UPSCALE:
// When scaling up, we need 4 texture reads, but we can
// save some instructions because will know in which range of
// the bicubic function each call call to the bicubic function
// will be in.
// Also, when sampling the bicubic function like this, the sum
// is always exactly one, so we can skip normalization as well.
shared_variables.append(
"varying vec2 v_texcoord;\n");
vertex_program.append(
" v_texcoord = texcoord;\n");
fragment_header.append(
"uniform vec2 src_pixelsize;\n"
"uniform vec2 scaling_vector;\n"
"const float a = -0.5;\n"
// This function is equivialent to calling the bicubic
// function with x-1, x, 1-x and 2-x
// (assuming 0 <= x < 1)
"vec4 filt4(float x) {\n"
" return vec4(x * x * x, x * x, x, 1) *\n"
" mat4( a, -2.0 * a, a, 0.0,\n"
" a + 2.0, -a - 3.0, 0.0, 1.0,\n"
" -a - 2.0, 3.0 + 2.0 * a, -a, 0.0,\n"
" -a, a, 0.0, 0.0);\n"
"}\n"
"mat4 pixels_x(vec2 pos, vec2 step) {\n"
" return mat4(\n"
" texture2D(s_texture, pos - step),\n"
" texture2D(s_texture, pos),\n"
" texture2D(s_texture, pos + step),\n"
" texture2D(s_texture, pos + step * 2.0));\n"
"}\n");
fragment_program.append(
" vec2 pixel_pos = v_texcoord * src_pixelsize - \n"
" scaling_vector / 2.0;\n"
" float frac = fract(dot(pixel_pos, scaling_vector));\n"
" vec2 base = (floor(pixel_pos) + vec2(0.5)) / src_pixelsize;\n"
" vec2 step = scaling_vector / src_pixelsize;\n"
" gl_FragColor = pixels_x(base, step) * filt4(frac);\n");
break;
case SHADER_PLANAR:
// Converts four RGBA pixels into one pixel. Each RGBA
// pixel will be dot-multiplied with the color weights and
// then placed into a component of the output. This is used to
// convert RGBA textures into Y, U and V textures. We do this
// because single-component textures are not renderable on all
// architectures.
shared_variables.append(
"varying vec4 v_texcoords[2];\n");
vertex_header.append(
"uniform vec2 scaling_vector;\n"
"uniform vec2 dst_pixelsize;\n");
vertex_program.append(
" vec2 step = scaling_vector * src_subrect.zw / dst_pixelsize;\n"
" step /= 4.0;\n"
" v_texcoords[0].xy = texcoord - step * 1.5;\n"
" v_texcoords[0].zw = texcoord - step * 0.5;\n"
" v_texcoords[1].xy = texcoord + step * 0.5;\n"
" v_texcoords[1].zw = texcoord + step * 1.5;\n");
fragment_header.append(
"uniform vec4 color_weights;\n");
fragment_program.append(
" gl_FragColor = color_weights * mat4(\n"
" vec4(texture2D(s_texture, v_texcoords[0].xy).rgb, 1.0),\n"
" vec4(texture2D(s_texture, v_texcoords[0].zw).rgb, 1.0),\n"
" vec4(texture2D(s_texture, v_texcoords[1].xy).rgb, 1.0),\n"
" vec4(texture2D(s_texture, v_texcoords[1].zw).rgb, 1.0));\n");
// Swizzle makes no sense for this shader.
DCHECK(!swizzle);
break;
case SHADER_YUV_MRT_PASS1:
// RGB24 to YV12 in two passes; writing two 8888 targets each pass.
//
// YV12 is full-resolution luma and half-resolution blue/red chroma.
//
// (original)
// RGBX RGBX RGBX RGBX RGBX RGBX RGBX RGBX
// RGBX RGBX RGBX RGBX RGBX RGBX RGBX RGBX
// RGBX RGBX RGBX RGBX RGBX RGBX RGBX RGBX
// RGBX RGBX RGBX RGBX RGBX RGBX RGBX RGBX
// RGBX RGBX RGBX RGBX RGBX RGBX RGBX RGBX
// RGBX RGBX RGBX RGBX RGBX RGBX RGBX RGBX
// |
// | (y plane) (temporary)
// | YYYY YYYY UUVV UUVV
// +--> { YYYY YYYY + UUVV UUVV }
// YYYY YYYY UUVV UUVV
// First YYYY YYYY UUVV UUVV
// pass YYYY YYYY UUVV UUVV
// YYYY YYYY UUVV UUVV
// |
// | (u plane) (v plane)
// Second | UUUU VVVV
// pass +--> { UUUU + VVVV }
// UUUU VVVV
//
shared_variables.append(
"varying vec4 v_texcoords[2];\n");
vertex_header.append(
"uniform vec2 scaling_vector;\n"
"uniform vec2 dst_pixelsize;\n");
vertex_program.append(
" vec2 step = scaling_vector * src_subrect.zw / dst_pixelsize;\n"
" step /= 4.0;\n"
" v_texcoords[0].xy = texcoord - step * 1.5;\n"
" v_texcoords[0].zw = texcoord - step * 0.5;\n"
" v_texcoords[1].xy = texcoord + step * 0.5;\n"
" v_texcoords[1].zw = texcoord + step * 1.5;\n");
fragment_directives.append(
"#extension GL_EXT_draw_buffers : enable\n");
fragment_header.append(
"const vec3 kRGBtoY = vec3(0.257, 0.504, 0.098);\n"
"const float kYBias = 0.0625;\n"
// Divide U and V by two to compensate for averaging below.
"const vec3 kRGBtoU = vec3(-0.148, -0.291, 0.439) / 2.0;\n"
"const vec3 kRGBtoV = vec3(0.439, -0.368, -0.071) / 2.0;\n"
"const float kUVBias = 0.5;\n");
fragment_program.append(
" vec3 pixel1 = texture2D(s_texture, v_texcoords[0].xy).rgb;\n"
" vec3 pixel2 = texture2D(s_texture, v_texcoords[0].zw).rgb;\n"
" vec3 pixel3 = texture2D(s_texture, v_texcoords[1].xy).rgb;\n"
" vec3 pixel4 = texture2D(s_texture, v_texcoords[1].zw).rgb;\n"
" vec3 pixel12 = pixel1 + pixel2;\n"
" vec3 pixel34 = pixel3 + pixel4;\n"
" gl_FragData[0] = vec4(dot(pixel1, kRGBtoY),\n"
" dot(pixel2, kRGBtoY),\n"
" dot(pixel3, kRGBtoY),\n"
" dot(pixel4, kRGBtoY)) + kYBias;\n"
" gl_FragData[1] = vec4(dot(pixel12, kRGBtoU),\n"
" dot(pixel34, kRGBtoU),\n"
" dot(pixel12, kRGBtoV),\n"
" dot(pixel34, kRGBtoV)) + kUVBias;\n");
// Swizzle makes no sense for this shader.
DCHECK(!swizzle);
break;
case SHADER_YUV_MRT_PASS2:
// We're just sampling two pixels and unswizzling them. There's
// no need to do vertical scaling with math, since bilinear
// interpolation in the sampler takes care of that.
shared_variables.append(
"varying vec4 v_texcoords;\n");
vertex_header.append(
"uniform vec2 scaling_vector;\n"
"uniform vec2 dst_pixelsize;\n");
vertex_program.append(
" vec2 step = scaling_vector * src_subrect.zw / dst_pixelsize;\n"
" step /= 2.0;\n"
" v_texcoords.xy = texcoord - step * 0.5;\n"
" v_texcoords.zw = texcoord + step * 0.5;\n");
fragment_directives.append(
"#extension GL_EXT_draw_buffers : enable\n");
fragment_program.append(
" vec4 lo_uuvv = texture2D(s_texture, v_texcoords.xy);\n"
" vec4 hi_uuvv = texture2D(s_texture, v_texcoords.zw);\n"
" gl_FragData[0] = vec4(lo_uuvv.rg, hi_uuvv.rg);\n"
" gl_FragData[1] = vec4(lo_uuvv.ba, hi_uuvv.ba);\n");
// Swizzle makes no sense for this shader.
DCHECK(!swizzle);
break;
}
if (swizzle) {
fragment_program.append(" gl_FragColor = gl_FragColor.bgra;\n");
}
vertex_program =
vertex_header +
shared_variables +
"void main() {\n" +
vertex_program +
"}\n";
fragment_program =
fragment_directives +
fragment_header +
shared_variables +
"void main() {\n" +
fragment_program +
"}\n";
bool result = cache_entry->Setup(vertex_program.c_str(),
fragment_program.c_str());
DCHECK(result || context_->isContextLost())
<< "vertex_program =\n" << vertex_program
<< "fragment_program =\n" << fragment_program;
}
return cache_entry;
}
bool ShaderProgram::Setup(const blink::WGC3Dchar* vertex_shader_text,
const blink::WGC3Dchar* fragment_shader_text) {
// Shaders to map the source texture to |dst_texture_|.
ScopedShader vertex_shader(context_, helper_->CompileShaderFromSource(
vertex_shader_text, GL_VERTEX_SHADER));
if (vertex_shader.id() == 0) {
return false;
}
context_->attachShader(program_, vertex_shader);
ScopedShader fragment_shader(context_, helper_->CompileShaderFromSource(
fragment_shader_text, GL_FRAGMENT_SHADER));
if (fragment_shader.id() == 0) {
return false;
}
context_->attachShader(program_, fragment_shader);
context_->linkProgram(program_);
blink::WGC3Dint link_status = 0;
context_->getProgramiv(program_, GL_LINK_STATUS, &link_status);
if (!link_status) {
LOG(ERROR) << std::string(context_->getProgramInfoLog(program_).utf8());
return false;
}
position_location_ = context_->getAttribLocation(program_, "a_position");
texcoord_location_ = context_->getAttribLocation(program_, "a_texcoord");
texture_location_ = context_->getUniformLocation(program_, "s_texture");
src_subrect_location_ = context_->getUniformLocation(program_, "src_subrect");
src_pixelsize_location_ = context_->getUniformLocation(program_,
"src_pixelsize");
dst_pixelsize_location_ = context_->getUniformLocation(program_,
"dst_pixelsize");
scaling_vector_location_ = context_->getUniformLocation(program_,
"scaling_vector");
color_weights_location_ = context_->getUniformLocation(program_,
"color_weights");
return true;
}
void ShaderProgram::UseProgram(
const gfx::Size& src_size,
const gfx::Rect& src_subrect,
const gfx::Size& dst_size,
bool scale_x,
bool flip_y,
GLfloat color_weights[4]) {
context_->useProgram(program_);
blink::WGC3Dintptr offset = 0;
context_->vertexAttribPointer(position_location_,
2,
GL_FLOAT,
GL_FALSE,
4 * sizeof(blink::WGC3Dfloat),
offset);
context_->enableVertexAttribArray(position_location_);
offset += 2 * sizeof(blink::WGC3Dfloat);
context_->vertexAttribPointer(texcoord_location_,
2,
GL_FLOAT,
GL_FALSE,
4 * sizeof(blink::WGC3Dfloat),
offset);
context_->enableVertexAttribArray(texcoord_location_);
context_->uniform1i(texture_location_, 0);
// Convert |src_subrect| to texture coordinates.
GLfloat src_subrect_texcoord[] = {
static_cast<float>(src_subrect.x()) / src_size.width(),
static_cast<float>(src_subrect.y()) / src_size.height(),
static_cast<float>(src_subrect.width()) / src_size.width(),
static_cast<float>(src_subrect.height()) / src_size.height(),
};
if (flip_y) {
src_subrect_texcoord[1] += src_subrect_texcoord[3];
src_subrect_texcoord[3] *= -1.0;
}
context_->uniform4fv(src_subrect_location_, 1, src_subrect_texcoord);
context_->uniform2f(src_pixelsize_location_,
src_size.width(),
src_size.height());
context_->uniform2f(dst_pixelsize_location_,
static_cast<float>(dst_size.width()),
static_cast<float>(dst_size.height()));
context_->uniform2f(scaling_vector_location_,
scale_x ? 1.0 : 0.0,
scale_x ? 0.0 : 1.0);
context_->uniform4fv(color_weights_location_, 1, color_weights);
}
} // namespace content
|
#pragma once
#include <ice/math/types.hxx>
namespace ice::math
{
static constexpr f32 const_pi = 3.14159265358979323846;
} // namespace ice::math
|
////////////////////////////////////////////////////////////
//
// SFML - Simple and Fast Multimedia Library
// Copyright (C) 2007-2015 Laurent Gomila (laurent@sfml-dev.org)
//
// This software is provided 'as-is', without any express or implied warranty.
// In no event will the authors be held liable for any damages arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it freely,
// subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented;
// you must not claim that you wrote the original software.
// If you use this software in a product, an acknowledgment
// in the product documentation would be appreciated but is not required.
//
// 2. Altered source versions must be plainly marked as such,
// and must not be misrepresented as being the original software.
//
// 3. This notice may not be removed or altered from any source distribution.
//
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
// Headers
////////////////////////////////////////////////////////////
#include <SFML/Graphics/Shader.hpp>
#include <SFML/Graphics/Texture.hpp>
#include <SFML/Graphics/GLCheck.hpp>
#include <SFML/Window/Context.hpp>
#include <SFML/System/InputStream.hpp>
#include <SFML/System/Mutex.hpp>
#include <SFML/System/Lock.hpp>
#include <SFML/System/Err.hpp>
#include <fstream>
#include <vector>
#ifndef SFML_OPENGL_ES
#if defined(SFML_SYSTEM_MACOS) || defined(SFML_SYSTEM_IOS)
#define castToGlHandle(x) reinterpret_cast<GLEXT_GLhandle>(static_cast<ptrdiff_t>(x))
#define castFromGlHandle(x) static_cast<unsigned int>(reinterpret_cast<ptrdiff_t>(x))
#else
#define castToGlHandle(x) (x)
#define castFromGlHandle(x) (x)
#endif
namespace
{
sf::Mutex mutex;
GLint checkMaxTextureUnits()
{
GLint maxUnits = 0;
glCheck(glGetIntegerv(GLEXT_GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, &maxUnits));
return maxUnits;
}
// Retrieve the maximum number of texture units available
GLint getMaxTextureUnits()
{
// TODO: Remove this lock when it becomes unnecessary in C++11
sf::Lock lock(mutex);
static GLint maxUnits = checkMaxTextureUnits();
return maxUnits;
}
// Read the contents of a file into an array of char
bool getFileContents(const std::string& filename, std::vector<char>& buffer)
{
std::ifstream file(filename.c_str(), std::ios_base::binary);
if (file)
{
file.seekg(0, std::ios_base::end);
std::streamsize size = file.tellg();
if (size > 0)
{
file.seekg(0, std::ios_base::beg);
buffer.resize(static_cast<std::size_t>(size));
file.read(&buffer[0], size);
}
buffer.push_back('\0');
return true;
}
else
{
return false;
}
}
// Read the contents of a stream into an array of char
bool getStreamContents(sf::InputStream& stream, std::vector<char>& buffer)
{
bool success = true;
sf::Int64 size = stream.getSize();
if (size > 0)
{
buffer.resize(static_cast<std::size_t>(size));
stream.seek(0);
sf::Int64 read = stream.read(&buffer[0], size);
success = (read == size);
}
buffer.push_back('\0');
return success;
}
bool checkShadersAvailable()
{
// Create a temporary context in case the user checks
// before a GlResource is created, thus initializing
// the shared context
sf::Context context;
// Make sure that extensions are initialized
sf::priv::ensureExtensionsInit();
bool available = GLEXT_multitexture &&
GLEXT_shading_language_100 &&
GLEXT_shader_objects &&
GLEXT_vertex_shader &&
GLEXT_fragment_shader;
return available;
}
}
namespace sf
{
////////////////////////////////////////////////////////////
Shader::CurrentTextureType Shader::CurrentTexture;
////////////////////////////////////////////////////////////
Shader::Shader() :
m_shaderProgram (0),
m_currentTexture(-1),
m_textures (),
m_params ()
{
}
////////////////////////////////////////////////////////////
Shader::~Shader()
{
ensureGlContext();
// Destroy effect program
if (m_shaderProgram)
glCheck(GLEXT_glDeleteObject(castToGlHandle(m_shaderProgram)));
}
////////////////////////////////////////////////////////////
bool Shader::loadFromFile(const std::string& filename, Type type)
{
// Read the file
std::vector<char> shader;
if (!getFileContents(filename, shader))
{
err() << "Failed to open shader file \"" << filename << "\"" << std::endl;
return false;
}
// Compile the shader program
if (type == Vertex)
return compile(&shader[0], NULL);
else
return compile(NULL, &shader[0]);
}
////////////////////////////////////////////////////////////
bool Shader::loadFromFile(const std::string& vertexShaderFilename, const std::string& fragmentShaderFilename)
{
// Read the vertex shader file
std::vector<char> vertexShader;
if (!getFileContents(vertexShaderFilename, vertexShader))
{
err() << "Failed to open vertex shader file \"" << vertexShaderFilename << "\"" << std::endl;
return false;
}
// Read the fragment shader file
std::vector<char> fragmentShader;
if (!getFileContents(fragmentShaderFilename, fragmentShader))
{
err() << "Failed to open fragment shader file \"" << fragmentShaderFilename << "\"" << std::endl;
return false;
}
// Compile the shader program
return compile(&vertexShader[0], &fragmentShader[0]);
}
////////////////////////////////////////////////////////////
bool Shader::loadFromMemory(const std::string& shader, Type type)
{
// Compile the shader program
if (type == Vertex)
return compile(shader.c_str(), NULL);
else
return compile(NULL, shader.c_str());
}
////////////////////////////////////////////////////////////
bool Shader::loadFromMemory(const std::string& vertexShader, const std::string& fragmentShader)
{
// Compile the shader program
return compile(vertexShader.c_str(), fragmentShader.c_str());
}
////////////////////////////////////////////////////////////
bool Shader::loadFromStream(InputStream& stream, Type type)
{
// Read the shader code from the stream
std::vector<char> shader;
if (!getStreamContents(stream, shader))
{
err() << "Failed to read shader from stream" << std::endl;
return false;
}
// Compile the shader program
if (type == Vertex)
return compile(&shader[0], NULL);
else
return compile(NULL, &shader[0]);
}
////////////////////////////////////////////////////////////
bool Shader::loadFromStream(InputStream& vertexShaderStream, InputStream& fragmentShaderStream)
{
// Read the vertex shader code from the stream
std::vector<char> vertexShader;
if (!getStreamContents(vertexShaderStream, vertexShader))
{
err() << "Failed to read vertex shader from stream" << std::endl;
return false;
}
// Read the fragment shader code from the stream
std::vector<char> fragmentShader;
if (!getStreamContents(fragmentShaderStream, fragmentShader))
{
err() << "Failed to read fragment shader from stream" << std::endl;
return false;
}
// Compile the shader program
return compile(&vertexShader[0], &fragmentShader[0]);
}
////////////////////////////////////////////////////////////
void Shader::setParameter(const std::string& name, float x)
{
if (m_shaderProgram)
{
ensureGlContext();
// Enable program
GLEXT_GLhandle program;
glCheck(program = GLEXT_glGetHandle(GLEXT_GL_PROGRAM_OBJECT));
glCheck(GLEXT_glUseProgramObject(castToGlHandle(m_shaderProgram)));
// Get parameter location and assign it new values
GLint location = getParamLocation(name);
if (location != -1)
{
glCheck(GLEXT_glUniform1f(location, x));
}
// Disable program
glCheck(GLEXT_glUseProgramObject(program));
}
}
////////////////////////////////////////////////////////////
void Shader::setParameter(const std::string& name, float x, float y)
{
if (m_shaderProgram)
{
ensureGlContext();
// Enable program
GLEXT_GLhandle program;
glCheck(program = GLEXT_glGetHandle(GLEXT_GL_PROGRAM_OBJECT));
glCheck(GLEXT_glUseProgramObject(castToGlHandle(m_shaderProgram)));
// Get parameter location and assign it new values
GLint location = getParamLocation(name);
if (location != -1)
{
glCheck(GLEXT_glUniform2f(location, x, y));
}
// Disable program
glCheck(GLEXT_glUseProgramObject(program));
}
}
////////////////////////////////////////////////////////////
void Shader::setParameter(const std::string& name, float x, float y, float z)
{
if (m_shaderProgram)
{
ensureGlContext();
// Enable program
GLEXT_GLhandle program;
glCheck(program = GLEXT_glGetHandle(GLEXT_GL_PROGRAM_OBJECT));
glCheck(GLEXT_glUseProgramObject(castToGlHandle(m_shaderProgram)));
// Get parameter location and assign it new values
GLint location = getParamLocation(name);
if (location != -1)
{
glCheck(GLEXT_glUniform3f(location, x, y, z));
}
// Disable program
glCheck(GLEXT_glUseProgramObject(program));
}
}
////////////////////////////////////////////////////////////
void Shader::setParameter(const std::string& name, float x, float y, float z, float w)
{
if (m_shaderProgram)
{
ensureGlContext();
// Enable program
GLEXT_GLhandle program;
glCheck(program = GLEXT_glGetHandle(GLEXT_GL_PROGRAM_OBJECT));
glCheck(GLEXT_glUseProgramObject(castToGlHandle(m_shaderProgram)));
// Get parameter location and assign it new values
GLint location = getParamLocation(name);
if (location != -1)
{
glCheck(GLEXT_glUniform4f(location, x, y, z, w));
}
// Disable program
glCheck(GLEXT_glUseProgramObject(program));
}
}
////////////////////////////////////////////////////////////
void Shader::setParameter(const std::string& name, const Vector2f& v)
{
setParameter(name, v.x, v.y);
}
////////////////////////////////////////////////////////////
void Shader::setParameter(const std::string& name, const Vector3f& v)
{
setParameter(name, v.x, v.y, v.z);
}
////////////////////////////////////////////////////////////
void Shader::setParameter(const std::string& name, const Color& color)
{
setParameter(name, color.r / 255.f, color.g / 255.f, color.b / 255.f, color.a / 255.f);
}
////////////////////////////////////////////////////////////
void Shader::setParameter(const std::string& name, const Transform& transform)
{
if (m_shaderProgram)
{
ensureGlContext();
// Enable program
GLEXT_GLhandle program;
glCheck(program = GLEXT_glGetHandle(GLEXT_GL_PROGRAM_OBJECT));
glCheck(GLEXT_glUseProgramObject(castToGlHandle(m_shaderProgram)));
// Get parameter location and assign it new values
GLint location = getParamLocation(name);
if (location != -1)
{
glCheck(GLEXT_glUniformMatrix4fv(location, 1, GL_FALSE, transform.getMatrix()));
}
// Disable program
glCheck(GLEXT_glUseProgramObject(program));
}
}
////////////////////////////////////////////////////////////
void Shader::setParameter(const std::string& name, const Texture& texture)
{
if (m_shaderProgram)
{
ensureGlContext();
// Find the location of the variable in the shader
int location = getParamLocation(name);
if (location != -1)
{
// Store the location -> texture mapping
TextureTable::iterator it = m_textures.find(location);
if (it == m_textures.end())
{
// New entry, make sure there are enough texture units
GLint maxUnits = getMaxTextureUnits();
if (m_textures.size() + 1 >= static_cast<std::size_t>(maxUnits))
{
err() << "Impossible to use texture \"" << name << "\" for shader: all available texture units are used" << std::endl;
return;
}
m_textures[location] = &texture;
}
else
{
// Location already used, just replace the texture
it->second = &texture;
}
}
}
}
////////////////////////////////////////////////////////////
void Shader::setParameter(const std::string& name, CurrentTextureType)
{
if (m_shaderProgram)
{
ensureGlContext();
// Find the location of the variable in the shader
m_currentTexture = getParamLocation(name);
}
}
////////////////////////////////////////////////////////////
unsigned int Shader::getNativeHandle() const
{
return m_shaderProgram;
}
////////////////////////////////////////////////////////////
void Shader::bind(const Shader* shader)
{
ensureGlContext();
// Make sure that we can use shaders
if (!isAvailable())
{
err() << "Failed to bind or unbind shader: your system doesn't support shaders "
<< "(you should test Shader::isAvailable() before trying to use the Shader class)" << std::endl;
return;
}
if (shader && shader->m_shaderProgram)
{
// Enable the program
glCheck(GLEXT_glUseProgramObject(castToGlHandle(shader->m_shaderProgram)));
// Bind the textures
shader->bindTextures();
// Bind the current texture
if (shader->m_currentTexture != -1)
glCheck(GLEXT_glUniform1i(shader->m_currentTexture, 0));
}
else
{
// Bind no shader
glCheck(GLEXT_glUseProgramObject(0));
}
}
////////////////////////////////////////////////////////////
bool Shader::isAvailable()
{
// TODO: Remove this lock when it becomes unnecessary in C++11
Lock lock(mutex);
static bool available = checkShadersAvailable();
return available;
}
////////////////////////////////////////////////////////////
bool Shader::compile(const char* vertexShaderCode, const char* fragmentShaderCode)
{
ensureGlContext();
// First make sure that we can use shaders
if (!isAvailable())
{
err() << "Failed to create a shader: your system doesn't support shaders "
<< "(you should test Shader::isAvailable() before trying to use the Shader class)" << std::endl;
return false;
}
// Destroy the shader if it was already created
if (m_shaderProgram)
{
glCheck(GLEXT_glDeleteObject(castToGlHandle(m_shaderProgram)));
m_shaderProgram = 0;
}
// Reset the internal state
m_currentTexture = -1;
m_textures.clear();
m_params.clear();
// Create the program
GLEXT_GLhandle shaderProgram;
glCheck(shaderProgram = GLEXT_glCreateProgramObject());
// Create the vertex shader if needed
if (vertexShaderCode)
{
// Create and compile the shader
GLEXT_GLhandle vertexShader;
glCheck(vertexShader = GLEXT_glCreateShaderObject(GLEXT_GL_VERTEX_SHADER));
glCheck(GLEXT_glShaderSource(vertexShader, 1, &vertexShaderCode, NULL));
glCheck(GLEXT_glCompileShader(vertexShader));
// Check the compile log
GLint success;
glCheck(GLEXT_glGetObjectParameteriv(vertexShader, GLEXT_GL_OBJECT_COMPILE_STATUS, &success));
if (success == GL_FALSE)
{
char log[1024];
glCheck(GLEXT_glGetInfoLog(vertexShader, sizeof(log), 0, log));
err() << "Failed to compile vertex shader:" << std::endl
<< log << std::endl;
glCheck(GLEXT_glDeleteObject(vertexShader));
glCheck(GLEXT_glDeleteObject(shaderProgram));
return false;
}
// Attach the shader to the program, and delete it (not needed anymore)
glCheck(GLEXT_glAttachObject(shaderProgram, vertexShader));
glCheck(GLEXT_glDeleteObject(vertexShader));
}
// Create the fragment shader if needed
if (fragmentShaderCode)
{
// Create and compile the shader
GLEXT_GLhandle fragmentShader;
glCheck(fragmentShader = GLEXT_glCreateShaderObject(GLEXT_GL_FRAGMENT_SHADER));
glCheck(GLEXT_glShaderSource(fragmentShader, 1, &fragmentShaderCode, NULL));
glCheck(GLEXT_glCompileShader(fragmentShader));
// Check the compile log
GLint success;
glCheck(GLEXT_glGetObjectParameteriv(fragmentShader, GLEXT_GL_OBJECT_COMPILE_STATUS, &success));
if (success == GL_FALSE)
{
char log[1024];
glCheck(GLEXT_glGetInfoLog(fragmentShader, sizeof(log), 0, log));
err() << "Failed to compile fragment shader:" << std::endl
<< log << std::endl;
glCheck(GLEXT_glDeleteObject(fragmentShader));
glCheck(GLEXT_glDeleteObject(shaderProgram));
return false;
}
// Attach the shader to the program, and delete it (not needed anymore)
glCheck(GLEXT_glAttachObject(shaderProgram, fragmentShader));
glCheck(GLEXT_glDeleteObject(fragmentShader));
}
// Link the program
glCheck(GLEXT_glLinkProgram(shaderProgram));
// Check the link log
GLint success;
glCheck(GLEXT_glGetObjectParameteriv(shaderProgram, GLEXT_GL_OBJECT_LINK_STATUS, &success));
if (success == GL_FALSE)
{
char log[1024];
glCheck(GLEXT_glGetInfoLog(shaderProgram, sizeof(log), 0, log));
err() << "Failed to link shader:" << std::endl
<< log << std::endl;
glCheck(GLEXT_glDeleteObject(shaderProgram));
return false;
}
m_shaderProgram = castFromGlHandle(shaderProgram);
// Force an OpenGL flush, so that the shader will appear updated
// in all contexts immediately (solves problems in multi-threaded apps)
glCheck(glFlush());
return true;
}
////////////////////////////////////////////////////////////
void Shader::bindTextures() const
{
TextureTable::const_iterator it = m_textures.begin();
for (std::size_t i = 0; i < m_textures.size(); ++i)
{
GLint index = static_cast<GLsizei>(i + 1);
glCheck(GLEXT_glUniform1i(it->first, index));
glCheck(GLEXT_glActiveTexture(GLEXT_GL_TEXTURE0 + index));
Texture::bind(it->second);
++it;
}
// Make sure that the texture unit which is left active is the number 0
glCheck(GLEXT_glActiveTexture(GLEXT_GL_TEXTURE0));
}
////////////////////////////////////////////////////////////
int Shader::getParamLocation(const std::string& name)
{
// Check the cache
ParamTable::const_iterator it = m_params.find(name);
if (it != m_params.end())
{
// Already in cache, return it
return it->second;
}
else
{
// Not in cache, request the location from OpenGL
int location = GLEXT_glGetUniformLocation(castToGlHandle(m_shaderProgram), name.c_str());
m_params.insert(std::make_pair(name, location));
if (location == -1)
err() << "Parameter \"" << name << "\" not found in shader" << std::endl;
return location;
}
}
} // namespace sf
#else // SFML_OPENGL_ES
// OpenGL ES 1 doesn't support GLSL shaders at all, we have to provide an empty implementation
namespace sf
{
////////////////////////////////////////////////////////////
Shader::CurrentTextureType Shader::CurrentTexture;
////////////////////////////////////////////////////////////
Shader::Shader() :
m_shaderProgram (0),
m_currentTexture(-1)
{
}
////////////////////////////////////////////////////////////
Shader::~Shader()
{
}
////////////////////////////////////////////////////////////
bool Shader::loadFromFile(const std::string& filename, Type type)
{
return false;
}
////////////////////////////////////////////////////////////
bool Shader::loadFromFile(const std::string& vertexShaderFilename, const std::string& fragmentShaderFilename)
{
return false;
}
////////////////////////////////////////////////////////////
bool Shader::loadFromMemory(const std::string& shader, Type type)
{
return false;
}
////////////////////////////////////////////////////////////
bool Shader::loadFromMemory(const std::string& vertexShader, const std::string& fragmentShader)
{
return false;
}
////////////////////////////////////////////////////////////
bool Shader::loadFromStream(InputStream& stream, Type type)
{
return false;
}
////////////////////////////////////////////////////////////
bool Shader::loadFromStream(InputStream& vertexShaderStream, InputStream& fragmentShaderStream)
{
return false;
}
////////////////////////////////////////////////////////////
void Shader::setParameter(const std::string& name, float x)
{
}
////////////////////////////////////////////////////////////
void Shader::setParameter(const std::string& name, float x, float y)
{
}
////////////////////////////////////////////////////////////
void Shader::setParameter(const std::string& name, float x, float y, float z)
{
}
////////////////////////////////////////////////////////////
void Shader::setParameter(const std::string& name, float x, float y, float z, float w)
{
}
////////////////////////////////////////////////////////////
void Shader::setParameter(const std::string& name, const Vector2f& v)
{
}
////////////////////////////////////////////////////////////
void Shader::setParameter(const std::string& name, const Vector3f& v)
{
}
////////////////////////////////////////////////////////////
void Shader::setParameter(const std::string& name, const Color& color)
{
}
////////////////////////////////////////////////////////////
void Shader::setParameter(const std::string& name, const Transform& transform)
{
}
////////////////////////////////////////////////////////////
void Shader::setParameter(const std::string& name, const Texture& texture)
{
}
////////////////////////////////////////////////////////////
void Shader::setParameter(const std::string& name, CurrentTextureType)
{
}
////////////////////////////////////////////////////////////
unsigned int Shader::getNativeHandle() const
{
return 0;
}
////////////////////////////////////////////////////////////
void Shader::bind(const Shader* shader)
{
}
////////////////////////////////////////////////////////////
bool Shader::isAvailable()
{
return false;
}
////////////////////////////////////////////////////////////
bool Shader::compile(const char* vertexShaderCode, const char* fragmentShaderCode)
{
return false;
}
////////////////////////////////////////////////////////////
void Shader::bindTextures() const
{
}
} // namespace sf
#endif // SFML_OPENGL_ES
|
/*
* The MIT License (MIT)
*
* Copyright (c) 2018 Filip Wasil and Fillwave community members
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
* INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <flw/flc/buffers/mVertexBufferBasic.h>
#include <flw/Log.h>
FLOGINIT_DEFAULT()
namespace flw {
namespace flc {
VertexBufferBasic::VertexBufferBasic(
std::function<float(float x, float y)> constructor
, GLint chunkDensity
, GLfloat gapSize
, const std::vector<GLuint> &indices
, GLuint dataStoreModification)
: TVertexBuffer<VertexBasic>(dataStoreModification) {
flc::VertexBasic vertex;
for (float z = 0; z <= chunkDensity; ++z) {
for (float x = 0; x <= chunkDensity; ++x) {
vertex.mColor[0] = 0.0f;
vertex.mColor[1] = 0.0f;
vertex.mColor[2] = 0.0f;
vertex.mColor[3] = 1.0f;
vertex.mPosition[0] = gapSize * (x - chunkDensity / 2);
vertex.mPosition[2] = gapSize * (z - chunkDensity / 2);
vertex.mPosition[3] = 1.0;
vertex.mTextureUV[0] = x / chunkDensity;
vertex.mTextureUV[1] = z / chunkDensity;
vertex.mPosition[1] = constructor(vertex.mTextureUV[0], vertex.mTextureUV[1]);
mDataVertices.push_back(vertex);
}
}
std::vector<glm::vec3> normals;
std::vector<glm::vec3> tangents;
for (size_t i = 0; i < mDataVertices.size(); i++) {
normals.push_back(glm::vec3(0.0));
tangents.push_back(glm::vec3(0.0));
}
int j, z;
for (size_t i = 0; i < indices.size(); i += 3) {
/* Normals */
j = i + 1;
z = i + 2;
const GLuint iIdx = indices[i];
const GLuint jIdx = indices[j];
const GLuint zIdx = indices[z];
glm::vec3 v0(mDataVertices[iIdx].mPosition[0], mDataVertices[iIdx].mPosition[1], mDataVertices[iIdx].mPosition[2]);
glm::vec3 v1(mDataVertices[jIdx].mPosition[0], mDataVertices[jIdx].mPosition[1], mDataVertices[jIdx].mPosition[2]);
glm::vec3 v2(mDataVertices[zIdx].mPosition[0], mDataVertices[zIdx].mPosition[1], mDataVertices[zIdx].mPosition[2]);
glm::vec3 normal = glm::normalize(glm::cross(v1 - v0, v2 - v0));
normals[iIdx] += normal;
normals[jIdx] += normal;
normals[zIdx] += normal;
/* Tangents */
glm::vec3 deltaPosition;
if (v0 == v1) {
deltaPosition = v2 - v0;
} else {
deltaPosition = v1 - v0;
}
glm::vec2 deltaUV1(mDataVertices[jIdx].mTextureUV[0] - mDataVertices[iIdx].mTextureUV[0],
mDataVertices[jIdx].mTextureUV[1] - mDataVertices[iIdx].mTextureUV[1]);
glm::vec3 tangent = deltaPosition / ( deltaUV1.s != 0.0f ? deltaUV1.s : 1.0f );
tangent = glm::normalize(tangent - glm::dot(normal, tangent) * normal);
tangents[iIdx] += tangent;
tangents[jIdx] += tangent;
tangents[indices[z]] += tangent;
}
for (size_t i = 0; i < indices.size(); ++i) {
glm::vec3 vector3_n = glm::normalize(normals[indices[i]]);
glm::vec3 vector3_t = glm::normalize(tangents[indices[i]]);
mDataVertices[indices[i]].mNormal[0] = vector3_n.x;
mDataVertices[indices[i]].mNormal[1] = vector3_n.y;
mDataVertices[indices[i]].mNormal[2] = vector3_n.z;
mDataVertices[indices[i]].mNormalTangentMap[0] = vector3_t.x;
mDataVertices[indices[i]].mNormalTangentMap[1] = vector3_t.y;
mDataVertices[indices[i]].mNormalTangentMap[2] = vector3_t.z;
}
mTotalElements = mDataVertices.size();
mData = mDataVertices.data();
mSize = mTotalElements * sizeof(VertexBasic);
}
VertexBufferBasic::VertexBufferBasic(const std::vector<flc::VertexBasic> &vertices, GLuint dataStoreModification)
: TVertexBuffer<VertexBasic>(vertices, dataStoreModification) {
// nothing
}
glm::vec3 VertexBufferBasic::getOcclusionBoxSize() {
glm::vec3 maximum(-10000.0, -10000.0, -10000.0);
glm::vec3 minimum(10000.0, 10000.0, 10000.0);
for (GLuint i = 0; i < mTotalElements; ++i) {
if (mDataVertices[i].mPosition[0] > maximum.x) {
maximum.x = mDataVertices[i].mPosition[0];
}
if (mDataVertices[i].mPosition[1] > maximum.y) {
maximum.y = mDataVertices[i].mPosition[1];
}
if (mDataVertices[i].mPosition[2] > maximum.z) {
maximum.z = mDataVertices[i].mPosition[2];
}
if (mDataVertices[i].mPosition[0] < minimum.x) {
minimum.x = mDataVertices[i].mPosition[0];
}
if (mDataVertices[i].mPosition[1] < minimum.y) {
minimum.y = mDataVertices[i].mPosition[1];
}
if (mDataVertices[i].mPosition[2] < minimum.z) {
minimum.z = mDataVertices[i].mPosition[2];
}
}
glm::vec3 result = maximum - minimum;
if (result.x > result.y) {
result = result.x > result.z ? glm::vec3(result.x) : glm::vec3(result.z);
} else {
result = result.y > result.z ? glm::vec3(result.y) : glm::vec3(result.z);
}
return result * 0.5f;
}
void VertexBufferBasic::log() const {
for (auto& it : mDataVertices) {
fLogI("Vertex UV: ", it.mTextureUV[0], " ", it.mTextureUV[1]);
fLogI("Vertex normal: ",it.mNormal[0], " ", it.mNormal[1], " ", it.mNormal[2]);
fLogI("Vertex position: ",it.mPosition[0], " ", it.mPosition[1], " ", it.mPosition[2]);
}
}
} /* flc */
} /* flw */
|
// Copyright 2012 Cloudera Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <boost/utility.hpp>
#include <gtest/gtest.h>
#include <math.h>
#include "common/init.h"
#include "util/rle-encoding.h"
#include "util/bit-stream-utils.inline.h"
#include "common/names.h"
namespace impala {
const int MAX_WIDTH = 32;
TEST(BitArray, TestBool) {
const int len = 8;
uint8_t buffer[len];
BitWriter writer(buffer, len);
// Write alternating 0's and 1's
for (int i = 0; i < 8; ++i) {
bool result = writer.PutValue(i % 2, 1);
EXPECT_TRUE(result);
}
writer.Flush();
EXPECT_EQ((int)buffer[0], BOOST_BINARY(1 0 1 0 1 0 1 0));
// Write 00110011
for (int i = 0; i < 8; ++i) {
bool result = false;
switch (i) {
case 0:
case 1:
case 4:
case 5:
result = writer.PutValue(false, 1);
break;
default:
result = writer.PutValue(true, 1);
break;
}
EXPECT_TRUE(result);
}
writer.Flush();
// Validate the exact bit value
EXPECT_EQ((int)buffer[0], BOOST_BINARY(1 0 1 0 1 0 1 0));
EXPECT_EQ((int)buffer[1], BOOST_BINARY(1 1 0 0 1 1 0 0));
// Use the reader and validate
BitReader reader(buffer, len);
for (int i = 0; i < 8; ++i) {
bool val = false;
bool result = reader.GetValue(1, &val);
EXPECT_TRUE(result);
EXPECT_EQ(val, i % 2);
}
for (int i = 0; i < 8; ++i) {
bool val = false;
bool result = reader.GetValue(1, &val);
EXPECT_TRUE(result);
switch (i) {
case 0:
case 1:
case 4:
case 5:
EXPECT_EQ(val, false);
break;
default:
EXPECT_EQ(val, true);
break;
}
}
}
// Writes 'num_vals' values with width 'bit_width' and reads them back.
void TestBitArrayValues(int bit_width, int num_vals) {
const int len = BitUtil::Ceil(bit_width * num_vals, 8);
const uint64_t mod = bit_width == 64? 1 : 1LL << bit_width;
uint8_t buffer[len];
BitWriter writer(buffer, len);
for (int i = 0; i < num_vals; ++i) {
bool result = writer.PutValue(i % mod, bit_width);
EXPECT_TRUE(result);
}
writer.Flush();
EXPECT_EQ(writer.bytes_written(), len);
BitReader reader(buffer, len);
for (int i = 0; i < num_vals; ++i) {
int64_t val;
bool result = reader.GetValue(bit_width, &val);
EXPECT_TRUE(result);
EXPECT_EQ(val, i % mod);
}
EXPECT_EQ(reader.bytes_left(), 0);
}
TEST(BitArray, TestValues) {
for (int width = 0; width <= MAX_WIDTH; ++width) {
TestBitArrayValues(width, 1);
TestBitArrayValues(width, 2);
// Don't write too many values
TestBitArrayValues(width, (width < 12) ? (1 << width) : 4096);
TestBitArrayValues(width, 1024);
}
}
// Test some mixed values
TEST(BitArray, TestMixed) {
const int len = 1024;
uint8_t buffer[len];
bool parity = true;
BitWriter writer(buffer, len);
for (int i = 0; i < len; ++i) {
bool result;
if (i % 2 == 0) {
result = writer.PutValue(parity, 1);
parity = !parity;
} else {
result = writer.PutValue(i, 10);
}
EXPECT_TRUE(result);
}
writer.Flush();
parity = true;
BitReader reader(buffer, len);
for (int i = 0; i < len; ++i) {
bool result;
if (i % 2 == 0) {
bool val;
result = reader.GetValue(1, &val);
EXPECT_EQ(val, parity);
parity = !parity;
} else {
int val;
result = reader.GetValue(10, &val);
EXPECT_EQ(val, i);
}
EXPECT_TRUE(result);
}
}
// Validates encoding of values by encoding and decoding them. If
// expected_encoding != NULL, also validates that the encoded buffer is
// exactly 'expected_encoding'.
// if expected_len is not -1, it will validate the encoded size is correct.
void ValidateRle(const vector<int>& values, int bit_width,
uint8_t* expected_encoding, int expected_len) {
const int len = 64 * 1024;
uint8_t buffer[len];
EXPECT_LE(expected_len, len);
RleEncoder encoder(buffer, len, bit_width);
for (int i = 0; i < values.size(); ++i) {
bool result = encoder.Put(values[i]);
EXPECT_TRUE(result);
}
int encoded_len = encoder.Flush();
if (expected_len != -1) {
EXPECT_EQ(encoded_len, expected_len);
}
if (expected_encoding != NULL) {
EXPECT_TRUE(memcmp(buffer, expected_encoding, expected_len) == 0);
}
// Verify read
RleDecoder decoder(buffer, len, bit_width);
for (int i = 0; i < values.size(); ++i) {
uint64_t val;
bool result = decoder.Get(&val);
EXPECT_TRUE(result);
EXPECT_EQ(values[i], val);
}
}
TEST(Rle, SpecificSequences) {
const int len = 1024;
uint8_t expected_buffer[len];
vector<int> values;
// Test 50 0' followed by 50 1's
values.resize(100);
for (int i = 0; i < 50; ++i) {
values[i] = 0;
}
for (int i = 50; i < 100; ++i) {
values[i] = 1;
}
// expected_buffer valid for bit width <= 1 byte
expected_buffer[0] = (50 << 1);
expected_buffer[1] = 0;
expected_buffer[2] = (50 << 1);
expected_buffer[3] = 1;
for (int width = 1; width <= 8; ++width) {
ValidateRle(values, width, expected_buffer, 4);
}
for (int width = 9; width <= MAX_WIDTH; ++width) {
ValidateRle(values, width, NULL, 2 * (1 + BitUtil::Ceil(width, 8)));
}
// Test 100 0's and 1's alternating
for (int i = 0; i < 100; ++i) {
values[i] = i % 2;
}
int num_groups = BitUtil::Ceil(100, 8);
expected_buffer[0] = (num_groups << 1) | 1;
for (int i = 1; i <= 100/8; ++i) {
expected_buffer[i] = BOOST_BINARY(1 0 1 0 1 0 1 0);
}
// Values for the last 4 0 and 1's. The upper 4 bits should be padded to 0.
expected_buffer[100/8 + 1] = BOOST_BINARY(0 0 0 0 1 0 1 0);
// num_groups and expected_buffer only valid for bit width = 1
ValidateRle(values, 1, expected_buffer, 1 + num_groups);
for (int width = 2; width <= MAX_WIDTH; ++width) {
int num_values = BitUtil::Ceil(100, 8) * 8;
ValidateRle(values, width, NULL, 1 + BitUtil::Ceil(width * num_values, 8));
}
}
// ValidateRle on 'num_vals' values with width 'bit_width'. If 'value' != -1, that value
// is used, otherwise alternating values are used.
void TestRleValues(int bit_width, int num_vals, int value = -1) {
const uint64_t mod = (bit_width == 64) ? 1 : 1LL << bit_width;
vector<int> values;
for (int v = 0; v < num_vals; ++v) {
values.push_back((value != -1) ? value : (v % mod));
}
ValidateRle(values, bit_width, NULL, -1);
}
TEST(Rle, TestValues) {
for (int width = 1; width <= MAX_WIDTH; ++width) {
TestRleValues(width, 1);
TestRleValues(width, 1024);
TestRleValues(width, 1024, 0);
TestRleValues(width, 1024, 1);
}
}
TEST(Rle, BitWidthZeroRepeated) {
uint8_t buffer[1];
const int num_values = 15;
buffer[0] = num_values << 1; // repeated indicator byte
RleDecoder decoder(buffer, sizeof(buffer), 0);
uint8_t val;
for (int i = 0; i < num_values; ++i) {
bool result = decoder.Get(&val);
EXPECT_TRUE(result);
EXPECT_EQ(val, 0); // can only encode 0s with bit width 0
}
EXPECT_FALSE(decoder.Get(&val));
}
TEST(Rle, BitWidthZeroLiteral) {
uint8_t buffer[1];
const int num_groups = 4;
buffer[0] = num_groups << 1 | 1; // literal indicator byte
RleDecoder decoder = RleDecoder(buffer, sizeof(buffer), 0);
const int num_values = num_groups * 8;
uint8_t val;
for (int i = 0; i < num_values; ++i) {
bool result = decoder.Get(&val);
EXPECT_TRUE(result);
EXPECT_EQ(val, 0); // can only encode 0s with bit width 0
}
EXPECT_FALSE(decoder.Get(&val));
}
// Test that writes out a repeated group and then a literal
// group but flush before finishing.
TEST(BitRle, Flush) {
vector<int> values;
for (int i = 0; i < 16; ++i) values.push_back(1);
values.push_back(0);
ValidateRle(values, 1, NULL, -1);
values.push_back(1);
ValidateRle(values, 1, NULL, -1);
values.push_back(1);
ValidateRle(values, 1, NULL, -1);
values.push_back(1);
ValidateRle(values, 1, NULL, -1);
}
// Test some random sequences.
TEST(BitRle, Random) {
int iters = 0;
while (iters < 1000) {
srand(iters++);
if (iters % 10000 == 0) LOG(ERROR) << "Seed: " << iters;
vector<int> values;
bool parity = 0;
for (int i = 0; i < 1000; ++i) {
int group_size = rand() % 20 + 1;
if (group_size > 16) {
group_size = 1;
}
for (int i = 0; i < group_size; ++i) {
values.push_back(parity);
}
parity = !parity;
}
ValidateRle(values, (iters % MAX_WIDTH) + 1, NULL, -1);
}
}
// Test a sequence of 1 0's, 2 1's, 3 0's. etc
// e.g. 011000111100000
TEST(BitRle, RepeatedPattern) {
vector<int> values;
const int min_run = 1;
const int max_run = 32;
for (int i = min_run; i <= max_run; ++i) {
int v = i % 2;
for (int j = 0; j < i; ++j) {
values.push_back(v);
}
}
// And go back down again
for (int i = max_run; i >= min_run; --i) {
int v = i % 2;
for (int j = 0; j < i; ++j) {
values.push_back(v);
}
}
ValidateRle(values, 1, NULL, -1);
}
TEST(BitRle, Overflow) {
for (int bit_width = 1; bit_width < 32; bit_width += 3) {
const int len = RleEncoder::MinBufferSize(bit_width);
uint8_t buffer[len];
int num_added = 0;
bool parity = true;
RleEncoder encoder(buffer, len, bit_width);
// Insert alternating true/false until there is no space left
while (true) {
bool result = encoder.Put(parity);
parity = !parity;
if (!result) break;
++num_added;
}
int bytes_written = encoder.Flush();
EXPECT_LE(bytes_written, len);
EXPECT_GT(num_added, 0);
RleDecoder decoder(buffer, bytes_written, bit_width);
parity = true;
uint32_t v;
for (int i = 0; i < num_added; ++i) {
bool result = decoder.Get(&v);
EXPECT_TRUE(result);
EXPECT_EQ(v, parity);
parity = !parity;
}
// Make sure we get false when reading past end a couple times.
EXPECT_FALSE(decoder.Get(&v));
EXPECT_FALSE(decoder.Get(&v));
}
}
}
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
impala::InitCommonRuntime(argc, argv, true);
return RUN_ALL_TESTS();
}
|
#include <vector>
class UnionFind {
public:
UnionFind(int n) {
parent_.resize(n);
size_.resive(n);
for(auto i = 0; i < n; ++i) {
parent[i] = i;
}
count_ = n;
}
/* 将 p 和 q 连通 */
void union(int p, int q) {
auto pRoot = find(p);
auto qRoot = find(q);
if (pRoot == qRoot) {
return;
}
// 平衡
if (size[pRoot] >= size[qRoot]) {
parent_[qRoot] = pRoot;
size[qRoot] += size[pRoot];
} else {
parent_[pRoot] = qRoot;
size[pRoot] += size[qRoot];
}
}
/* 判断 p 和 q 是否互相连通 */
bool isConnected(int p, int q) {
auto pRoot = find(p);
auto qRoot = find(q);
return pRoot == qRoot;
}
private:
/* 返回节点 x 的根节点 */
int find(int x) {
//// 不压缩时,不断查找父节点
//while(parent_[x] != x) {
// x = parent_[x];
//}
// 压缩路径长度
while(parent_[x] != x) {
parent_[x] = parent_[parent_[x]];
x = parent_[x];
}
return x;
}
private:
std::vector<int> parent_;
std::vector<int> size_;
int count_;
};
int main() {
UnionFind uf;
}
|
/*
Copyright 2012-2019 Ronald Römer
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <cassert>
#include <iostream>
#include <fstream>
#include <json/json.h>
#include <json/reader.h>
#include "inja.hpp"
#include "nlohmann/json.hpp"
#include "Decomposer.h"
#include "Tools.h"
int Point::_tag = 0;
void ToPoly (const Json::Value& pts, PolyType &poly) {
int i = 0;
for (const Json::Value& pt : pts) {
poly.push_back(Point(pt[0].asDouble(), pt[1].asDouble(), i++));
}
for (int j = 1; j < poly.size(); j++) {
poly[j].pt[0] += poly[j-1].pt[0];
poly[j].pt[1] += poly[j-1].pt[1];
}
}
int main (int argc, char *argv[]) {
std::istringstream stream(argv[1]);
int t;
stream >> t;
Json::Value doc;
Json::CharReaderBuilder reader;
std::ifstream jn("../../vp/dev/special.json");
std::string err;
if (Json::parseFromStream(reader, jn, &doc, &err)) {
const Json::Value polys = doc["polys"];
inja::Environment env;
env.set_element_notation(inja::ElementNotation::Dot);
int i = 0;
for (const Json::Value& p : polys) {
if (i == t) {
PolyType poly;
ToPoly(p, poly);
assert(TestCW(poly));
nlohmann::json data;
Ext ext;
GetExt(poly, ext);
data["width"] = std::abs(ext.maxX-ext.minX);
data["height"] = std::abs(ext.maxY-ext.minY);
data["x"] = -ext.minX;
data["y"] = -ext.minY;
data["poly"] = GetAbsolutePath(poly);
Decomposer d(poly, 1.);
DecResType decs;
d.GetDecomposed(decs);
for (auto& dec : decs) {
PolyType p;
for (int id : dec) {
p.push_back(poly[id]);
}
data["data"].push_back({{ "path", GetAbsolutePath(p) }});
assert(TestCW(p));
}
std::stringstream name;
name << "../dev/res/special_" << i << ".svg";
env.write("../dev/template.svg", data, name.str());
}
i++;
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.